text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
shovel_latencies <- function(dir1, dir2) {
d1tod2 <- shovel_latency(dir1, dir2)
d2tod1 <- shovel_latency(dir2, dir1)
df <- data.frame(quantile(d1tod2$latency, c(0, .1, .5, .95, .99, 1)),
quantile(d2tod1$latency, c(0, .1, .5, .95, .99, 1)))
names(df) <- c(paste(dir1, "to", dir2, sep=" "),
paste(dir2, "to", dir1, sep=" "))
df
}
files_to_df <- function(dir, filter, col2) {
files <- list.files(path=dir, pattern=filter,full.names=TRUE)
frames <- do.call(rbind, lapply(files, read.csv, head=FALSE,sep=" "))
names(frames) <- c("TAG", col2)
frames
}
shovel_latency <- function(source, sink) {
publishes <- files_to_df(source, "*publish", "PUB")
consumes <- files_to_df(sink, "*consume", "CON")
p2c <- merge(publishes, consumes, by="TAG")
p2c$latency <- p2c$CON - p2c$PUB
p2c[order(p2c$CON),]
}
|
{"hexsha": "4de85547b988db24081b059e528a053f6b62c9bd", "size": 839, "ext": "r", "lang": "R", "max_stars_repo_path": "priv/shovel.r", "max_stars_repo_name": "russelldb/rabl", "max_stars_repo_head_hexsha": "9aa140ef5ec09959393adba5a98321b6df8323e1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2017-08-17T15:13:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-18T16:26:41.000Z", "max_issues_repo_path": "priv/shovel.r", "max_issues_repo_name": "russelldb/rabl", "max_issues_repo_head_hexsha": "9aa140ef5ec09959393adba5a98321b6df8323e1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "priv/shovel.r", "max_forks_repo_name": "russelldb/rabl", "max_forks_repo_head_hexsha": "9aa140ef5ec09959393adba5a98321b6df8323e1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-08-22T10:56:20.000Z", "max_forks_repo_forks_event_max_datetime": "2018-03-06T00:08:55.000Z", "avg_line_length": 34.9583333333, "max_line_length": 70, "alphanum_fraction": 0.6233611442, "num_tokens": 290}
|
"""
replica_fidelity(df::DataFrame; p_field = :hproj, skip = 0)
Compute the fidelity of the average coefficient vector and the projector defined in
`p_field` from the result of replica [`lomc!()`](@ref) passed as argument `df`,
using replicas `_1` and `_2`.
Calls [`ratio_of_means()`](@ref) to perform a blocking analysis
on a ratio of the means of separate time series and returns a
[`RatioBlockingResult`](@ref).
The first `skip` steps in the time series are skipped.
The fidelity of states `|ψ⟩` and `|ϕ⟩` is defined as
```math
F(ψ,ϕ) = \\frac{|⟨ψ|ϕ⟩|^2}{⟨ψ|ψ⟩⟨ϕ|ϕ⟩} .
```
Specifically, `replica_fidelity` computes
```math
F(\\mathbf{v},⟨\\mathbf{c}⟩) =
\\frac{⟨(\\mathbf{c}_1⋅\\mathbf{v})(\\mathbf{v}⋅\\mathbf{c}_1)⟩}
{⟨\\mathbf{c}_1⋅\\mathbf{c}_1⟩} ,
```
where `v` is the projector specified by `p_field`, which is assumed to be normalised to
unity with the two-norm (i.e. `v⋅v == 1`), and ``\\mathbf{c}_1`` and ``\\mathbf{c}_2``
are two replica coefficient vectors.
"""
function replica_fidelity(df::DataFrame; p_field = :hproj, skip = 0, args...)
p_field_1 = Symbol(p_field, :_1)
p_field_2 = Symbol(p_field, :_2)
fid_num = conj(getproperty(df, p_field_1)) .* getproperty(df, p_field_2)
fid_num = fid_num[skip+1:end]
# denominator
fid_den = df.c1_dot_c2[skip+1:end]
return ratio_of_means(fid_num, fid_den; args...)
end
|
{"hexsha": "9a35c381078e7c49d40cf7184ac038096600eea0", "size": 1369, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/StatsTools/fidelity.jl", "max_stars_repo_name": "joachimbrand/Rimu.jl", "max_stars_repo_head_hexsha": "ee5237794c82e7dc83a9562768cf37c3979c7f55", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-08-03T05:13:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T00:05:01.000Z", "max_issues_repo_path": "src/StatsTools/fidelity.jl", "max_issues_repo_name": "joachimbrand/Rimu.jl", "max_issues_repo_head_hexsha": "ee5237794c82e7dc83a9562768cf37c3979c7f55", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 122, "max_issues_repo_issues_event_min_datetime": "2020-09-16T00:53:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-14T11:19:19.000Z", "max_forks_repo_path": "src/StatsTools/fidelity.jl", "max_forks_repo_name": "joachimbrand/Rimu.jl", "max_forks_repo_head_hexsha": "ee5237794c82e7dc83a9562768cf37c3979c7f55", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.1142857143, "max_line_length": 87, "alphanum_fraction": 0.6705624543, "num_tokens": 481}
|
import Statistics.LinearRegression
import Statistics.Sample
import qualified Data.Vector.Unboxed as U
import Control.Monad.Random
import Control.Monad
import Control.Applicative
import System.Random.MWC
import System.Random.MWC.Distributions
import qualified Data.Packed.Vector as V
import Graphics.Rendering.Plot
main = do
mapM_ test [1..10]
test_convergence
test_robust
test_variances
test k = do
let n = 10000000
let a = k*n + 1
let b = (k+1)*n
let xs = U.fromList [a..b]
let ys = U.map (\x -> x*100 + 2000) xs
putStrLn "linearRegression:"
putStrLn . show $ linearRegression xs ys
putStrLn "linearRegressionTLS:"
putStrLn . show $ linearRegressionTLS xs ys
test_convergence = do
let xs = U.fromList [1..10]
let ys = U.fromList $ [1..5] ++ [0,0] ++ [8..10]
let iter1 = linearRegression xs ys
let ep = defaultEstimationParameters
putStrLn "Initial iteration:"
putStrLn . show $ iter1
putStrLn "Successive iteration:"
putStrLn . show $ converge ep xs ys iter1
getNormals :: Double -> Double -> Int -> IO [Double]
getNormals mean std n = do
withSystemRandom . asGenIO $ \gen -> replicateM n (normal mean std gen)
testFigure :: U.Vector Double -> U.Vector Double -> (EstimatedRelation, EstimatedRelation, EstimatedRelation) -> Figure ()
testFigure xs ys (simple, non_robust, robust) = do
let vxs = V.fromList . U.toList $ xs
let vys = V.fromList . U.toList $ ys
let dataset = (vxs, [ point vys Cross, line_func simple, line_func non_robust, line_func robust ])
withTitle . setText $ "linreg test"
setPlots 1 1
withPlot (1,1) $ do
addAxis XAxis (Side Lower) $ do
setTicks Minor (TickNumber 5)
withAxisLine $ do
setLineWidth 1.0
addAxis YAxis (Side Lower) $ do
setTicks Minor (TickNumber 5)
withAxisLine $ do
setLineWidth 1.0
setDataset dataset
setRangeFromData XAxis Lower Linear
setRangeFromData YAxis Lower Linear
setLegend True NorthEast Inside
where
line_func (alpha,beta) = line ((\x -> alpha + beta*x) :: Function) (1.0 :: LineWidth)
test_robust = do
putStrLn "generating random dataset for robust fit:"
first_xs <- getNormals 0.0 10.0 800
first_ys_errs <- getNormals 0.0 1.0 800
let first_ys = zipWith (+) first_xs first_ys_errs
last_xs <- getNormals 50.0 (sqrt 50) 200
last_ys <- getNormals 0.0 (sqrt 50) 200
let xs = U.fromList $ first_xs ++ last_xs
let ys = U.fromList $ first_ys ++ last_ys
putStrLn "robustFit test results:"
(simple,non_robust,robust) <- evalRandIO (randTest xs ys)
putStrLn "linearRegression on dataset:"
putStrLn . show $ simple
putStrLn "convergedRegression on dataset:"
putStrLn . show $ non_robust
putStrLn "robustFit on dataset:"
putStrLn . show $ robust
let filename = "test_robust.png"
putStrLn $ "Image output is at " ++ filename
writeFigure PNG filename (800,800) $ testFigure xs ys (simple,non_robust,robust)
randTest :: MonadRandom m => U.Vector Double -> U.Vector Double -> m (EstimatedRelation,EstimatedRelation,EstimatedRelation)
randTest xs ys = do
robust <- robustFit defaultEstimationParameters xs ys
let simple = linearRegression xs ys
let non_robust = converge defaultEstimationParameters xs ys (0.0,0.001) -- simple
return (simple, non_robust, robust)
test_variances :: IO ()
test_variances = do
putStrLn "generating random dataset for variance test:"
let xs = U.fromList [-100..100]
offsets <- liftM U.fromList $ getNormals 0 10 (U.length xs)
let ys = U.zipWith (+) xs offsets
let ab = linearRegression xs ys
putStrLn $ "estimated fit should be (0,1). It is:" ++ show ab
let mse = linearRegressionMSE ab xs ys
putStrLn $ "Calculated MSE of sampled data should be an estimate of 10. it is:" ++ (show . sqrt $ mse)
let dsts = linearRegressionDistributions ab xs ys
putStrLn $ "Calculated distributions of the linear fit are linear transformed StudentT distributions with scalings that are estimates of (0.5,1.4777-4). They are:" ++ show dsts
|
{"hexsha": "46352d50aba46457bad827276912f89ddd472832", "size": 4214, "ext": "hs", "lang": "Haskell", "max_stars_repo_path": "tests/linreg.hs", "max_stars_repo_name": "alpmestan/statistics-linreg", "max_stars_repo_head_hexsha": "14c2f10088d1914b0303c191ec0d7243c8eb83ee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2016-01-09T12:21:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-13T01:29:03.000Z", "max_issues_repo_path": "tests/linreg.hs", "max_issues_repo_name": "alpmestan/statistics-linreg", "max_issues_repo_head_hexsha": "14c2f10088d1914b0303c191ec0d7243c8eb83ee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-02-23T12:40:19.000Z", "max_issues_repo_issues_event_max_datetime": "2017-02-23T13:05:49.000Z", "max_forks_repo_path": "tests/linreg.hs", "max_forks_repo_name": "alpmestan/statistics-linreg", "max_forks_repo_head_hexsha": "14c2f10088d1914b0303c191ec0d7243c8eb83ee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.6605504587, "max_line_length": 180, "alphanum_fraction": 0.677978168, "num_tokens": 1160}
|
[STATEMENT]
lemma invariantQCharacterizationAfterApplyBackjump_1:
assumes
"InvariantConsistent (getM state)"
"InvariantUniq (getM state)"
"InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state)" and
"InvariantWatchListsUniq (getWatchList state)" and
"InvariantWatchListsCharacterization (getWatchList state) (getWatch1 state) (getWatch2 state)"
"InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)" and
"InvariantWatchesDiffer (getF state) (getWatch1 state) (getWatch2 state)" and
"InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state)" and
"InvariantConflictFlagCharacterization (getConflictFlag state) (getF state) (getM state)" and
"InvariantQCharacterization (getConflictFlag state) (getQ state) (getF state) (getM state)" and
"InvariantUniqC (getC state)"
"getC state = [opposite (getCl state)]"
"InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))"
"InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))"
"getConflictFlag state"
"InvariantCFalse (getConflictFlag state) (getM state) (getC state)"
"InvariantCEntailed (getConflictFlag state) F0 (getC state)" and
"InvariantClCharacterization (getCl state) (getC state) (getM state)" and
"InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state)" and
"InvariantClCurrentLevel (getCl state) (getM state)"
"currentLevel (getM state) > 0"
"isUIP (opposite (getCl state)) (getC state) (getM state)"
shows
"let state'' = (applyBackjump state) in
InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
let ?l = "getCl state"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
let ?level = "getBackjumpLevel state"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
let ?prefix = "prefixToLevel ?level (getM state)"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
let ?state' = "state\<lparr> getConflictFlag := False, getQ := [], getM := ?prefix \<rparr>"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
let ?state'' = "setReason (opposite (getCl state)) (length (getF state) - 1) ?state'"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
let ?state'1 = "assertLiteral (opposite ?l) False ?state'"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
let ?state''1 = "assertLiteral (opposite ?l) False ?state''"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
have "?level < elementLevel ?l (getM state)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getBackjumpLevel state < elementLevel (getCl state) (getM state)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
InvariantConsistent (getM state)
InvariantUniq (getM state)
InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state)
InvariantWatchListsUniq (getWatchList state)
InvariantWatchListsCharacterization (getWatchList state) (getWatch1 state) (getWatch2 state)
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchesDiffer (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state)
InvariantConflictFlagCharacterization (getConflictFlag state) (getF state) (getM state)
InvariantQCharacterization (getConflictFlag state) (getQ state) (getF state) (getM state)
InvariantUniqC (getC state)
getC state = [opposite (getCl state)]
InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))
InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))
getConflictFlag state
InvariantCFalse (getConflictFlag state) (getM state) (getC state)
InvariantCEntailed (getConflictFlag state) F0 (getC state)
InvariantClCharacterization (getCl state) (getC state) (getM state)
InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state)
InvariantClCurrentLevel (getCl state) (getM state)
0 < currentLevel (getM state)
isUIP (opposite (getCl state)) (getC state) (getM state)
goal (1 subgoal):
1. getBackjumpLevel state < elementLevel (getCl state) (getM state)
[PROOF STEP]
using isMinimalBackjumpLevelGetBackjumpLevel[of "state"]
[PROOF STATE]
proof (prove)
using this:
InvariantConsistent (getM state)
InvariantUniq (getM state)
InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state)
InvariantWatchListsUniq (getWatchList state)
InvariantWatchListsCharacterization (getWatchList state) (getWatch1 state) (getWatch2 state)
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchesDiffer (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state)
InvariantConflictFlagCharacterization (getConflictFlag state) (getF state) (getM state)
InvariantQCharacterization (getConflictFlag state) (getQ state) (getF state) (getM state)
InvariantUniqC (getC state)
getC state = [opposite (getCl state)]
InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))
InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))
getConflictFlag state
InvariantCFalse (getConflictFlag state) (getM state) (getC state)
InvariantCEntailed (getConflictFlag state) F0 (getC state)
InvariantClCharacterization (getCl state) (getC state) (getM state)
InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state)
InvariantClCurrentLevel (getCl state) (getM state)
0 < currentLevel (getM state)
isUIP (opposite (getCl state)) (getC state) (getM state)
\<lbrakk>InvariantUniq (getM state); InvariantCFalse (getConflictFlag state) (getM state) (getC state); InvariantClCharacterization (getCl state) (getC state) (getM state); InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state); InvariantClCurrentLevel (getCl state) (getM state); InvariantUniqC (getC state); getConflictFlag state; isUIP (opposite (getCl state)) (getC state) (getM state); 0 < currentLevel (getM state)\<rbrakk> \<Longrightarrow> isMinimalBackjumpLevel (getBackjumpLevel state) (opposite (getCl state)) (getC state) (getM state)
goal (1 subgoal):
1. getBackjumpLevel state < elementLevel (getCl state) (getM state)
[PROOF STEP]
unfolding isMinimalBackjumpLevel_def
[PROOF STATE]
proof (prove)
using this:
InvariantConsistent (getM state)
InvariantUniq (getM state)
InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state)
InvariantWatchListsUniq (getWatchList state)
InvariantWatchListsCharacterization (getWatchList state) (getWatch1 state) (getWatch2 state)
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchesDiffer (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state)
InvariantConflictFlagCharacterization (getConflictFlag state) (getF state) (getM state)
InvariantQCharacterization (getConflictFlag state) (getQ state) (getF state) (getM state)
InvariantUniqC (getC state)
getC state = [opposite (getCl state)]
InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))
InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))
getConflictFlag state
InvariantCFalse (getConflictFlag state) (getM state) (getC state)
InvariantCEntailed (getConflictFlag state) F0 (getC state)
InvariantClCharacterization (getCl state) (getC state) (getM state)
InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state)
InvariantClCurrentLevel (getCl state) (getM state)
0 < currentLevel (getM state)
isUIP (opposite (getCl state)) (getC state) (getM state)
\<lbrakk>InvariantUniq (getM state); InvariantCFalse (getConflictFlag state) (getM state) (getC state); InvariantClCharacterization (getCl state) (getC state) (getM state); InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state); InvariantClCurrentLevel (getCl state) (getM state); InvariantUniqC (getC state); getConflictFlag state; isUIP (opposite (getCl state)) (getC state) (getM state); 0 < currentLevel (getM state)\<rbrakk> \<Longrightarrow> isBackjumpLevel (getBackjumpLevel state) (opposite (getCl state)) (getC state) (getM state) \<and> (if set (getC state) \<noteq> {opposite (getCl state)} then \<exists>ll. ll el getC state \<and> elementLevel (opposite ll) (getM state) = getBackjumpLevel state else getBackjumpLevel state = 0)
goal (1 subgoal):
1. getBackjumpLevel state < elementLevel (getCl state) (getM state)
[PROOF STEP]
unfolding isBackjumpLevel_def
[PROOF STATE]
proof (prove)
using this:
InvariantConsistent (getM state)
InvariantUniq (getM state)
InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state)
InvariantWatchListsUniq (getWatchList state)
InvariantWatchListsCharacterization (getWatchList state) (getWatch1 state) (getWatch2 state)
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchesDiffer (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state)
InvariantConflictFlagCharacterization (getConflictFlag state) (getF state) (getM state)
InvariantQCharacterization (getConflictFlag state) (getQ state) (getF state) (getM state)
InvariantUniqC (getC state)
getC state = [opposite (getCl state)]
InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))
InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))
getConflictFlag state
InvariantCFalse (getConflictFlag state) (getM state) (getC state)
InvariantCEntailed (getConflictFlag state) F0 (getC state)
InvariantClCharacterization (getCl state) (getC state) (getM state)
InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state)
InvariantClCurrentLevel (getCl state) (getM state)
0 < currentLevel (getM state)
isUIP (opposite (getCl state)) (getC state) (getM state)
\<lbrakk>InvariantUniq (getM state); InvariantCFalse (getConflictFlag state) (getM state) (getC state); InvariantClCharacterization (getCl state) (getC state) (getM state); InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state); InvariantClCurrentLevel (getCl state) (getM state); InvariantUniqC (getC state); getConflictFlag state; isUIP (opposite (getCl state)) (getC state) (getM state); 0 < currentLevel (getM state)\<rbrakk> \<Longrightarrow> (isLastAssertedLiteral (opposite (opposite (getCl state))) (oppositeLiteralList (getC state)) (elements (getM state)) \<and> 0 \<le> getBackjumpLevel state \<and> getBackjumpLevel state < elementLevel (opposite (opposite (getCl state))) (getM state) \<and> (\<forall>l'. l' el getC state \<and> l' \<noteq> opposite (getCl state) \<longrightarrow> elementLevel (opposite l') (getM state) \<le> getBackjumpLevel state)) \<and> (if set (getC state) \<noteq> {opposite (getCl state)} then \<exists>ll. ll el getC state \<and> elementLevel (opposite ll) (getM state) = getBackjumpLevel state else getBackjumpLevel state = 0)
goal (1 subgoal):
1. getBackjumpLevel state < elementLevel (getCl state) (getM state)
[PROOF STEP]
by (simp add: Let_def)
[PROOF STATE]
proof (state)
this:
getBackjumpLevel state < elementLevel (getCl state) (getM state)
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
hence "?level < currentLevel (getM state)"
[PROOF STATE]
proof (prove)
using this:
getBackjumpLevel state < elementLevel (getCl state) (getM state)
goal (1 subgoal):
1. getBackjumpLevel state < currentLevel (getM state)
[PROOF STEP]
using elementLevelLeqCurrentLevel[of "?l" "getM state"]
[PROOF STATE]
proof (prove)
using this:
getBackjumpLevel state < elementLevel (getCl state) (getM state)
elementLevel (getCl state) (getM state) \<le> currentLevel (getM state)
goal (1 subgoal):
1. getBackjumpLevel state < currentLevel (getM state)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
getBackjumpLevel state < currentLevel (getM state)
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
hence "InvariantQCharacterization (getConflictFlag ?state') (getQ ?state') (getF ?state') (getM ?state')"
"InvariantConflictFlagCharacterization (getConflictFlag ?state') (getF ?state') (getM ?state')"
[PROOF STATE]
proof (prove)
using this:
getBackjumpLevel state < currentLevel (getM state)
goal (1 subgoal):
1. InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) &&& InvariantConflictFlagCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
[PROOF STEP]
unfolding InvariantQCharacterization_def
[PROOF STATE]
proof (prove)
using this:
getBackjumpLevel state < currentLevel (getM state)
goal (1 subgoal):
1. \<not> getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) \<longrightarrow> (\<forall>l. l el getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) = (\<exists>c. c el getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) \<and> isUnitClause c l (elements (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))))) &&& InvariantConflictFlagCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
[PROOF STEP]
unfolding InvariantConflictFlagCharacterization_def
[PROOF STATE]
proof (prove)
using this:
getBackjumpLevel state < currentLevel (getM state)
goal (1 subgoal):
1. \<not> getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) \<longrightarrow> (\<forall>l. l el getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) = (\<exists>c. c el getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) \<and> isUnitClause c l (elements (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))))) &&& getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) = formulaFalse (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (elements (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
[PROOF STEP]
using \<open>InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))\<close>
[PROOF STATE]
proof (prove)
using this:
getBackjumpLevel state < currentLevel (getM state)
InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))
goal (1 subgoal):
1. \<not> getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) \<longrightarrow> (\<forall>l. l el getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) = (\<exists>c. c el getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) \<and> isUnitClause c l (elements (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))))) &&& getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) = formulaFalse (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (elements (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
[PROOF STEP]
using \<open>InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))\<close>
[PROOF STATE]
proof (prove)
using this:
getBackjumpLevel state < currentLevel (getM state)
InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))
InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))
goal (1 subgoal):
1. \<not> getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) \<longrightarrow> (\<forall>l. l el getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) = (\<exists>c. c el getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) \<and> isUnitClause c l (elements (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))))) &&& getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) = formulaFalse (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (elements (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
[PROOF STEP]
unfolding InvariantNoDecisionsWhenConflict_def
[PROOF STATE]
proof (prove)
using this:
getBackjumpLevel state < currentLevel (getM state)
\<forall>level'<currentLevel (getM state). \<not> formulaFalse (getF state) (elements (prefixToLevel level' (getM state)))
InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))
goal (1 subgoal):
1. \<not> getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) \<longrightarrow> (\<forall>l. l el getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) = (\<exists>c. c el getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) \<and> isUnitClause c l (elements (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))))) &&& getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) = formulaFalse (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (elements (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
[PROOF STEP]
unfolding InvariantNoDecisionsWhenUnit_def
[PROOF STATE]
proof (prove)
using this:
getBackjumpLevel state < currentLevel (getM state)
\<forall>level'<currentLevel (getM state). \<not> formulaFalse (getF state) (elements (prefixToLevel level' (getM state)))
\<forall>level'<currentLevel (getM state). \<nexists>clause literal. clause el getF state \<and> isUnitClause clause literal (elements (prefixToLevel level' (getM state)))
goal (1 subgoal):
1. \<not> getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) \<longrightarrow> (\<forall>l. l el getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) = (\<exists>c. c el getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) \<and> isUnitClause c l (elements (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))))) &&& getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) = formulaFalse (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (elements (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
[PROOF STEP]
unfolding applyBackjump_def
[PROOF STATE]
proof (prove)
using this:
getBackjumpLevel state < currentLevel (getM state)
\<forall>level'<currentLevel (getM state). \<not> formulaFalse (getF state) (elements (prefixToLevel level' (getM state)))
\<forall>level'<currentLevel (getM state). \<nexists>clause literal. clause el getF state \<and> isUnitClause clause literal (elements (prefixToLevel level' (getM state)))
goal (1 subgoal):
1. \<not> getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) \<longrightarrow> (\<forall>l. l el getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) = (\<exists>c. c el getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) \<and> isUnitClause c l (elements (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))))) &&& getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) = formulaFalse (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (elements (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
[PROOF STEP]
by (auto simp add: Let_def set_conv_nth)
[PROOF STATE]
proof (state)
this:
InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConflictFlagCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConflictFlagCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
have "InvariantConsistent (?prefix @ [(opposite ?l, False)])"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
InvariantConsistent (getM state)
InvariantUniq (getM state)
InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state)
InvariantWatchListsUniq (getWatchList state)
InvariantWatchListsCharacterization (getWatchList state) (getWatch1 state) (getWatch2 state)
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchesDiffer (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state)
InvariantConflictFlagCharacterization (getConflictFlag state) (getF state) (getM state)
InvariantQCharacterization (getConflictFlag state) (getQ state) (getF state) (getM state)
InvariantUniqC (getC state)
getC state = [opposite (getCl state)]
InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))
InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))
getConflictFlag state
InvariantCFalse (getConflictFlag state) (getM state) (getC state)
InvariantCEntailed (getConflictFlag state) F0 (getC state)
InvariantClCharacterization (getCl state) (getC state) (getM state)
InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state)
InvariantClCurrentLevel (getCl state) (getM state)
0 < currentLevel (getM state)
isUIP (opposite (getCl state)) (getC state) (getM state)
goal (1 subgoal):
1. InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
[PROOF STEP]
using InvariantConsistentAfterApplyBackjump[of "state" "F0"]
[PROOF STATE]
proof (prove)
using this:
InvariantConsistent (getM state)
InvariantUniq (getM state)
InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state)
InvariantWatchListsUniq (getWatchList state)
InvariantWatchListsCharacterization (getWatchList state) (getWatch1 state) (getWatch2 state)
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchesDiffer (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state)
InvariantConflictFlagCharacterization (getConflictFlag state) (getF state) (getM state)
InvariantQCharacterization (getConflictFlag state) (getQ state) (getF state) (getM state)
InvariantUniqC (getC state)
getC state = [opposite (getCl state)]
InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))
InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))
getConflictFlag state
InvariantCFalse (getConflictFlag state) (getM state) (getC state)
InvariantCEntailed (getConflictFlag state) F0 (getC state)
InvariantClCharacterization (getCl state) (getC state) (getM state)
InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state)
InvariantClCurrentLevel (getCl state) (getM state)
0 < currentLevel (getM state)
isUIP (opposite (getCl state)) (getC state) (getM state)
\<lbrakk>InvariantConsistent (getM state); InvariantUniq (getM state); InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state); getConflictFlag state; InvariantCFalse (getConflictFlag state) (getM state) (getC state); InvariantUniqC (getC state); InvariantCEntailed (getConflictFlag state) F0 (getC state); InvariantClCharacterization (getCl state) (getC state) (getM state); InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state); InvariantClCurrentLevel (getCl state) (getM state); 0 < currentLevel (getM state); isUIP (opposite (getCl state)) (getC state) (getM state)\<rbrakk> \<Longrightarrow> let state' = applyBackjump state in InvariantConsistent (getM state')
goal (1 subgoal):
1. InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
[PROOF STEP]
using assertLiteralEffect
[PROOF STATE]
proof (prove)
using this:
InvariantConsistent (getM state)
InvariantUniq (getM state)
InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state)
InvariantWatchListsUniq (getWatchList state)
InvariantWatchListsCharacterization (getWatchList state) (getWatch1 state) (getWatch2 state)
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchesDiffer (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state)
InvariantConflictFlagCharacterization (getConflictFlag state) (getF state) (getM state)
InvariantQCharacterization (getConflictFlag state) (getQ state) (getF state) (getM state)
InvariantUniqC (getC state)
getC state = [opposite (getCl state)]
InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))
InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))
getConflictFlag state
InvariantCFalse (getConflictFlag state) (getM state) (getC state)
InvariantCEntailed (getConflictFlag state) F0 (getC state)
InvariantClCharacterization (getCl state) (getC state) (getM state)
InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state)
InvariantClCurrentLevel (getCl state) (getM state)
0 < currentLevel (getM state)
isUIP (opposite (getCl state)) (getC state) (getM state)
\<lbrakk>InvariantConsistent (getM state); InvariantUniq (getM state); InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state); getConflictFlag state; InvariantCFalse (getConflictFlag state) (getM state) (getC state); InvariantUniqC (getC state); InvariantCEntailed (getConflictFlag state) F0 (getC state); InvariantClCharacterization (getCl state) (getC state) (getM state); InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state); InvariantClCurrentLevel (getCl state) (getM state); 0 < currentLevel (getM state); isUIP (opposite (getCl state)) (getC state) (getM state)\<rbrakk> \<Longrightarrow> let state' = applyBackjump state in InvariantConsistent (getM state')
\<lbrakk>InvariantWatchListsContainOnlyClausesFromF (getWatchList ?state) (getF ?state); InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state)\<rbrakk> \<Longrightarrow> getM (assertLiteral ?l ?d ?state) = getM ?state @ [(?l, ?d)]
\<lbrakk>InvariantWatchListsContainOnlyClausesFromF (getWatchList ?state) (getF ?state); InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state)\<rbrakk> \<Longrightarrow> getF (assertLiteral ?l ?d ?state) = getF ?state
\<lbrakk>InvariantWatchListsContainOnlyClausesFromF (getWatchList ?state) (getF ?state); InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state)\<rbrakk> \<Longrightarrow> getSATFlag (assertLiteral ?l ?d ?state) = getSATFlag ?state
\<lbrakk>InvariantWatchListsContainOnlyClausesFromF (getWatchList ?state) (getF ?state); InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state)\<rbrakk> \<Longrightarrow> isPrefix (getQ ?state) (getQ (assertLiteral ?l ?d ?state))
goal (1 subgoal):
1. InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
[PROOF STEP]
unfolding applyBackjump_def
[PROOF STATE]
proof (prove)
using this:
InvariantConsistent (getM state)
InvariantUniq (getM state)
InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state)
InvariantWatchListsUniq (getWatchList state)
InvariantWatchListsCharacterization (getWatchList state) (getWatch1 state) (getWatch2 state)
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchesDiffer (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state)
InvariantConflictFlagCharacterization (getConflictFlag state) (getF state) (getM state)
InvariantQCharacterization (getConflictFlag state) (getQ state) (getF state) (getM state)
InvariantUniqC (getC state)
getC state = [opposite (getCl state)]
InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))
InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))
getConflictFlag state
InvariantCFalse (getConflictFlag state) (getM state) (getC state)
InvariantCEntailed (getConflictFlag state) F0 (getC state)
InvariantClCharacterization (getCl state) (getC state) (getM state)
InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state)
InvariantClCurrentLevel (getCl state) (getM state)
0 < currentLevel (getM state)
isUIP (opposite (getCl state)) (getC state) (getM state)
\<lbrakk>InvariantConsistent (getM state); InvariantUniq (getM state); InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state); getConflictFlag state; InvariantCFalse (getConflictFlag state) (getM state) (getC state); InvariantUniqC (getC state); InvariantCEntailed (getConflictFlag state) F0 (getC state); InvariantClCharacterization (getCl state) (getC state) (getM state); InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state); InvariantClCurrentLevel (getCl state) (getM state); 0 < currentLevel (getM state); isUIP (opposite (getCl state)) (getC state) (getM state)\<rbrakk> \<Longrightarrow> let state' = let l = getCl state; level = getBackjumpLevel state; state' = state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel level (getM state)\<rparr> in Let (if 0 < level then setReason (opposite l) (length (getF state) - 1) state' else state') (assertLiteral (opposite l) False) in InvariantConsistent (getM state')
\<lbrakk>InvariantWatchListsContainOnlyClausesFromF (getWatchList ?state) (getF ?state); InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state)\<rbrakk> \<Longrightarrow> getM (assertLiteral ?l ?d ?state) = getM ?state @ [(?l, ?d)]
\<lbrakk>InvariantWatchListsContainOnlyClausesFromF (getWatchList ?state) (getF ?state); InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state)\<rbrakk> \<Longrightarrow> getF (assertLiteral ?l ?d ?state) = getF ?state
\<lbrakk>InvariantWatchListsContainOnlyClausesFromF (getWatchList ?state) (getF ?state); InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state)\<rbrakk> \<Longrightarrow> getSATFlag (assertLiteral ?l ?d ?state) = getSATFlag ?state
\<lbrakk>InvariantWatchListsContainOnlyClausesFromF (getWatchList ?state) (getF ?state); InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state)\<rbrakk> \<Longrightarrow> isPrefix (getQ ?state) (getQ (assertLiteral ?l ?d ?state))
goal (1 subgoal):
1. InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
[PROOF STEP]
unfolding setReason_def
[PROOF STATE]
proof (prove)
using this:
InvariantConsistent (getM state)
InvariantUniq (getM state)
InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state)
InvariantWatchListsUniq (getWatchList state)
InvariantWatchListsCharacterization (getWatchList state) (getWatch1 state) (getWatch2 state)
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchesDiffer (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state)
InvariantConflictFlagCharacterization (getConflictFlag state) (getF state) (getM state)
InvariantQCharacterization (getConflictFlag state) (getQ state) (getF state) (getM state)
InvariantUniqC (getC state)
getC state = [opposite (getCl state)]
InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))
InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))
getConflictFlag state
InvariantCFalse (getConflictFlag state) (getM state) (getC state)
InvariantCEntailed (getConflictFlag state) F0 (getC state)
InvariantClCharacterization (getCl state) (getC state) (getM state)
InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state)
InvariantClCurrentLevel (getCl state) (getM state)
0 < currentLevel (getM state)
isUIP (opposite (getCl state)) (getC state) (getM state)
\<lbrakk>InvariantConsistent (getM state); InvariantUniq (getM state); InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state); getConflictFlag state; InvariantCFalse (getConflictFlag state) (getM state) (getC state); InvariantUniqC (getC state); InvariantCEntailed (getConflictFlag state) F0 (getC state); InvariantClCharacterization (getCl state) (getC state) (getM state); InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state); InvariantClCurrentLevel (getCl state) (getM state); 0 < currentLevel (getM state); isUIP (opposite (getCl state)) (getC state) (getM state)\<rbrakk> \<Longrightarrow> let state' = let l = getCl state; level = getBackjumpLevel state; state' = state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel level (getM state)\<rparr> in Let (if 0 < level then state'\<lparr>getReason := getReason state'(opposite l \<mapsto> length (getF state) - 1)\<rparr> else state') (assertLiteral (opposite l) False) in InvariantConsistent (getM state')
\<lbrakk>InvariantWatchListsContainOnlyClausesFromF (getWatchList ?state) (getF ?state); InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state)\<rbrakk> \<Longrightarrow> getM (assertLiteral ?l ?d ?state) = getM ?state @ [(?l, ?d)]
\<lbrakk>InvariantWatchListsContainOnlyClausesFromF (getWatchList ?state) (getF ?state); InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state)\<rbrakk> \<Longrightarrow> getF (assertLiteral ?l ?d ?state) = getF ?state
\<lbrakk>InvariantWatchListsContainOnlyClausesFromF (getWatchList ?state) (getF ?state); InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state)\<rbrakk> \<Longrightarrow> getSATFlag (assertLiteral ?l ?d ?state) = getSATFlag ?state
\<lbrakk>InvariantWatchListsContainOnlyClausesFromF (getWatchList ?state) (getF ?state); InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state)\<rbrakk> \<Longrightarrow> isPrefix (getQ ?state) (getQ (assertLiteral ?l ?d ?state))
goal (1 subgoal):
1. InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
[PROOF STEP]
by (auto simp add: Let_def split: if_split_asm)
[PROOF STATE]
proof (state)
this:
InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
have "InvariantWatchCharacterization (getF ?state') (getWatch1 ?state') (getWatch2 ?state') (getM ?state')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
[PROOF STEP]
using InvariantWatchCharacterizationInBackjumpPrefix[of "state"]
[PROOF STATE]
proof (prove)
using this:
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state) \<Longrightarrow> let l = getCl state; level = getBackjumpLevel state; prefix = prefixToLevel level (getM state); state' = state\<lparr>getConflictFlag := False, getQ := [], getM := prefix\<rparr> in InvariantWatchCharacterization (getF state') (getWatch1 state') (getWatch2 state') (getM state')
goal (1 subgoal):
1. InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state) \<Longrightarrow> let l = getCl state; level = getBackjumpLevel state; prefix = prefixToLevel level (getM state); state' = state\<lparr>getConflictFlag := False, getQ := [], getM := prefix\<rparr> in InvariantWatchCharacterization (getF state') (getWatch1 state') (getWatch2 state') (getM state')
InvariantConsistent (getM state)
InvariantUniq (getM state)
InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state)
InvariantWatchListsUniq (getWatchList state)
InvariantWatchListsCharacterization (getWatchList state) (getWatch1 state) (getWatch2 state)
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchesDiffer (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state)
InvariantConflictFlagCharacterization (getConflictFlag state) (getF state) (getM state)
InvariantQCharacterization (getConflictFlag state) (getQ state) (getF state) (getM state)
InvariantUniqC (getC state)
getC state = [opposite (getCl state)]
InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))
InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))
getConflictFlag state
InvariantCFalse (getConflictFlag state) (getM state) (getC state)
InvariantCEntailed (getConflictFlag state) F0 (getC state)
InvariantClCharacterization (getCl state) (getC state) (getM state)
InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state)
InvariantClCurrentLevel (getCl state) (getM state)
0 < currentLevel (getM state)
isUIP (opposite (getCl state)) (getC state) (getM state)
goal (1 subgoal):
1. InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
[PROOF STEP]
by (simp add: Let_def)
[PROOF STATE]
proof (state)
this:
InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
have "\<not> opposite ?l el (getQ ?state'1)" "\<not> opposite ?l el (getQ ?state''1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) &&& \<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
[PROOF STEP]
using assertedLiteralIsNotUnit[of "?state'" "opposite ?l" "False"]
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>InvariantConsistent (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsUniq (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsCharacterization (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesEl (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesDiffer (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) in opposite (getCl state) \<notin> set (getQ state') - set (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
goal (1 subgoal):
1. \<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) &&& \<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
[PROOF STEP]
using assertedLiteralIsNotUnit[of "?state''" "opposite ?l" "False"]
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>InvariantConsistent (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsUniq (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsCharacterization (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesEl (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesDiffer (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) in opposite (getCl state) \<notin> set (getQ state') - set (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
\<lbrakk>InvariantConsistent (getM (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchListsUniq (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchListsCharacterization (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchesEl (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchesDiffer (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchCharacterization (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getM (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) in opposite (getCl state) \<notin> set (getQ state') - set (getQ (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
goal (1 subgoal):
1. \<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) &&& \<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
[PROOF STEP]
using \<open>InvariantQCharacterization (getConflictFlag ?state') (getQ ?state') (getF ?state') (getM ?state')\<close>
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>InvariantConsistent (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsUniq (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsCharacterization (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesEl (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesDiffer (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) in opposite (getCl state) \<notin> set (getQ state') - set (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
\<lbrakk>InvariantConsistent (getM (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchListsUniq (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchListsCharacterization (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchesEl (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchesDiffer (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchCharacterization (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getM (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) in opposite (getCl state) \<notin> set (getQ state') - set (getQ (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
goal (1 subgoal):
1. \<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) &&& \<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
[PROOF STEP]
using \<open>InvariantConsistent (?prefix @ [(opposite ?l, False)])\<close>
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>InvariantConsistent (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsUniq (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsCharacterization (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesEl (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesDiffer (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) in opposite (getCl state) \<notin> set (getQ state') - set (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
\<lbrakk>InvariantConsistent (getM (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchListsUniq (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchListsCharacterization (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchesEl (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchesDiffer (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchCharacterization (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getM (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) in opposite (getCl state) \<notin> set (getQ state') - set (getQ (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
goal (1 subgoal):
1. \<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) &&& \<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
[PROOF STEP]
using \<open>InvariantWatchCharacterization (getF ?state') (getWatch1 ?state') (getWatch2 ?state') (getM ?state')\<close>
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>InvariantConsistent (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsUniq (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsCharacterization (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesEl (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesDiffer (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) in opposite (getCl state) \<notin> set (getQ state') - set (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
\<lbrakk>InvariantConsistent (getM (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchListsUniq (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchListsCharacterization (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchesEl (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchesDiffer (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchCharacterization (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getM (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) in opposite (getCl state) \<notin> set (getQ state') - set (getQ (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
goal (1 subgoal):
1. \<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) &&& \<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
[PROOF STEP]
unfolding applyBackjump_def
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>InvariantConsistent (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsUniq (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsCharacterization (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesEl (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesDiffer (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) in opposite (getCl state) \<notin> set (getQ state') - set (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
\<lbrakk>InvariantConsistent (getM (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchListsUniq (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchListsCharacterization (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchesEl (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchesDiffer (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchCharacterization (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getM (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) in opposite (getCl state) \<notin> set (getQ state') - set (getQ (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
goal (1 subgoal):
1. \<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) &&& \<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
[PROOF STEP]
unfolding setReason_def
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>InvariantConsistent (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsUniq (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsCharacterization (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesEl (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesDiffer (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) in opposite (getCl state) \<notin> set (getQ state') - set (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
\<lbrakk>InvariantConsistent (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)); InvariantWatchListsUniq (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)); InvariantWatchListsCharacterization (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)); InvariantWatchesEl (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)); InvariantWatchesDiffer (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)); InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>) in opposite (getCl state) \<notin> set (getQ state') - set (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>))
InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
goal (1 subgoal):
1. \<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) &&& \<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>))
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>InvariantConsistent (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsUniq (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsCharacterization (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesEl (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesDiffer (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) in opposite (getCl state) \<notin> set (getQ state') - set (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
\<lbrakk>InvariantConsistent (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)); InvariantWatchListsUniq (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)); InvariantWatchListsCharacterization (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)); InvariantWatchesEl (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)); InvariantWatchesDiffer (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)); InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>) in opposite (getCl state) \<notin> set (getQ state') - set (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>))
InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConsistent (getM state)
InvariantUniq (getM state)
InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state)
InvariantWatchListsUniq (getWatchList state)
InvariantWatchListsCharacterization (getWatchList state) (getWatch1 state) (getWatch2 state)
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchesDiffer (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state)
InvariantConflictFlagCharacterization (getConflictFlag state) (getF state) (getM state)
InvariantQCharacterization (getConflictFlag state) (getQ state) (getF state) (getM state)
InvariantUniqC (getC state)
getC state = [opposite (getCl state)]
InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))
InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))
getConflictFlag state
InvariantCFalse (getConflictFlag state) (getM state) (getC state)
InvariantCEntailed (getConflictFlag state) F0 (getC state)
InvariantClCharacterization (getCl state) (getC state) (getM state)
InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state)
InvariantClCurrentLevel (getCl state) (getM state)
0 < currentLevel (getM state)
isUIP (opposite (getCl state)) (getC state) (getM state)
goal (1 subgoal):
1. \<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) &&& \<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>))
[PROOF STEP]
by (auto simp add: Let_def split: if_split_asm)
[PROOF STATE]
proof (state)
this:
\<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
\<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
hence "removeAll (opposite ?l) (getQ ?state'1) = getQ ?state'1"
"removeAll (opposite ?l) (getQ ?state''1) = getQ ?state''1"
[PROOF STATE]
proof (prove)
using this:
\<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
\<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
goal (1 subgoal):
1. removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) = getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) &&& removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))) = getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
[PROOF STEP]
using removeAll_id[of "opposite ?l" "getQ ?state'1"]
[PROOF STATE]
proof (prove)
using this:
\<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
\<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
opposite (getCl state) \<notin> set (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) \<Longrightarrow> removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) = getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
goal (1 subgoal):
1. removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) = getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) &&& removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))) = getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
[PROOF STEP]
using removeAll_id[of "opposite ?l" "getQ ?state''1"]
[PROOF STATE]
proof (prove)
using this:
\<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
\<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
opposite (getCl state) \<notin> set (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) \<Longrightarrow> removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) = getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
opposite (getCl state) \<notin> set (getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))) \<Longrightarrow> removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))) = getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
goal (1 subgoal):
1. removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) = getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) &&& removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))) = getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
[PROOF STEP]
unfolding setReason_def
[PROOF STATE]
proof (prove)
using this:
\<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
\<not> opposite (getCl state) el getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>))
opposite (getCl state) \<notin> set (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) \<Longrightarrow> removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) = getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
opposite (getCl state) \<notin> set (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>))) \<Longrightarrow> removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>))) = getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>))
goal (1 subgoal):
1. removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) = getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) &&& removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>))) = getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) = getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))) = getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConflictFlagCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) = getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))) = getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConflictFlagCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) = getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))) = getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConflictFlagCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) = getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))) = getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
InvariantConsistent (getM state)
InvariantUniq (getM state)
InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state)
InvariantWatchListsUniq (getWatchList state)
InvariantWatchListsCharacterization (getWatchList state) (getWatch1 state) (getWatch2 state)
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchesDiffer (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state)
InvariantConflictFlagCharacterization (getConflictFlag state) (getF state) (getM state)
InvariantQCharacterization (getConflictFlag state) (getQ state) (getF state) (getM state)
InvariantUniqC (getC state)
getC state = [opposite (getCl state)]
InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))
InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))
getConflictFlag state
InvariantCFalse (getConflictFlag state) (getM state) (getC state)
InvariantCEntailed (getConflictFlag state) F0 (getC state)
InvariantClCharacterization (getCl state) (getC state) (getM state)
InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state)
InvariantClCurrentLevel (getCl state) (getM state)
0 < currentLevel (getM state)
isUIP (opposite (getCl state)) (getC state) (getM state)
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
using InvariantWatchCharacterizationInBackjumpPrefix[of "state"]
[PROOF STATE]
proof (prove)
using this:
InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConflictFlagCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) = getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))) = getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
InvariantConsistent (getM state)
InvariantUniq (getM state)
InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state)
InvariantWatchListsUniq (getWatchList state)
InvariantWatchListsCharacterization (getWatchList state) (getWatch1 state) (getWatch2 state)
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchesDiffer (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state)
InvariantConflictFlagCharacterization (getConflictFlag state) (getF state) (getM state)
InvariantQCharacterization (getConflictFlag state) (getQ state) (getF state) (getM state)
InvariantUniqC (getC state)
getC state = [opposite (getCl state)]
InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))
InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))
getConflictFlag state
InvariantCFalse (getConflictFlag state) (getM state) (getC state)
InvariantCEntailed (getConflictFlag state) F0 (getC state)
InvariantClCharacterization (getCl state) (getC state) (getM state)
InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state)
InvariantClCurrentLevel (getCl state) (getM state)
0 < currentLevel (getM state)
isUIP (opposite (getCl state)) (getC state) (getM state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state) \<Longrightarrow> let l = getCl state; level = getBackjumpLevel state; prefix = prefixToLevel level (getM state); state' = state\<lparr>getConflictFlag := False, getQ := [], getM := prefix\<rparr> in InvariantWatchCharacterization (getF state') (getWatch1 state') (getWatch2 state') (getM state')
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
using InvariantQCharacterizationAfterAssertLiteral[of "?state'" "opposite ?l" "False"]
[PROOF STATE]
proof (prove)
using this:
InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConflictFlagCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) = getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))) = getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
InvariantConsistent (getM state)
InvariantUniq (getM state)
InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state)
InvariantWatchListsUniq (getWatchList state)
InvariantWatchListsCharacterization (getWatchList state) (getWatch1 state) (getWatch2 state)
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchesDiffer (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state)
InvariantConflictFlagCharacterization (getConflictFlag state) (getF state) (getM state)
InvariantQCharacterization (getConflictFlag state) (getQ state) (getF state) (getM state)
InvariantUniqC (getC state)
getC state = [opposite (getCl state)]
InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))
InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))
getConflictFlag state
InvariantCFalse (getConflictFlag state) (getM state) (getC state)
InvariantCEntailed (getConflictFlag state) F0 (getC state)
InvariantClCharacterization (getCl state) (getC state) (getM state)
InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state)
InvariantClCurrentLevel (getCl state) (getM state)
0 < currentLevel (getM state)
isUIP (opposite (getCl state)) (getC state) (getM state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state) \<Longrightarrow> let l = getCl state; level = getBackjumpLevel state; prefix = prefixToLevel level (getM state); state' = state\<lparr>getConflictFlag := False, getQ := [], getM := prefix\<rparr> in InvariantWatchCharacterization (getF state') (getWatch1 state') (getWatch2 state') (getM state')
\<lbrakk>InvariantConsistent (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsUniq (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsCharacterization (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesEl (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesDiffer (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantConflictFlagCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) in InvariantQCharacterization (getConflictFlag state') (removeAll (opposite (getCl state)) (getQ state')) (getF state') (getM state')
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
using InvariantQCharacterizationAfterAssertLiteral[of "?state''" "opposite ?l" "False"]
[PROOF STATE]
proof (prove)
using this:
InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConflictFlagCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) = getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))) = getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
InvariantConsistent (getM state)
InvariantUniq (getM state)
InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state)
InvariantWatchListsUniq (getWatchList state)
InvariantWatchListsCharacterization (getWatchList state) (getWatch1 state) (getWatch2 state)
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchesDiffer (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state)
InvariantConflictFlagCharacterization (getConflictFlag state) (getF state) (getM state)
InvariantQCharacterization (getConflictFlag state) (getQ state) (getF state) (getM state)
InvariantUniqC (getC state)
getC state = [opposite (getCl state)]
InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))
InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))
getConflictFlag state
InvariantCFalse (getConflictFlag state) (getM state) (getC state)
InvariantCEntailed (getConflictFlag state) F0 (getC state)
InvariantClCharacterization (getCl state) (getC state) (getM state)
InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state)
InvariantClCurrentLevel (getCl state) (getM state)
0 < currentLevel (getM state)
isUIP (opposite (getCl state)) (getC state) (getM state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state) \<Longrightarrow> let l = getCl state; level = getBackjumpLevel state; prefix = prefixToLevel level (getM state); state' = state\<lparr>getConflictFlag := False, getQ := [], getM := prefix\<rparr> in InvariantWatchCharacterization (getF state') (getWatch1 state') (getWatch2 state') (getM state')
\<lbrakk>InvariantConsistent (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsUniq (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsCharacterization (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesEl (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesDiffer (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantConflictFlagCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) in InvariantQCharacterization (getConflictFlag state') (removeAll (opposite (getCl state)) (getQ state')) (getF state') (getM state')
\<lbrakk>InvariantConsistent (getM (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchListsUniq (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchListsCharacterization (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchesEl (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchesDiffer (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchCharacterization (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getM (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantConflictFlagCharacterization (getConflictFlag (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getM (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantQCharacterization (getConflictFlag (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getQ (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getM (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) in InvariantQCharacterization (getConflictFlag state') (removeAll (opposite (getCl state)) (getQ state')) (getF state') (getM state')
goal (1 subgoal):
1. let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
unfolding applyBackjump_def
[PROOF STATE]
proof (prove)
using this:
InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConflictFlagCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) = getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))) = getQ (assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))
InvariantConsistent (getM state)
InvariantUniq (getM state)
InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state)
InvariantWatchListsUniq (getWatchList state)
InvariantWatchListsCharacterization (getWatchList state) (getWatch1 state) (getWatch2 state)
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchesDiffer (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state)
InvariantConflictFlagCharacterization (getConflictFlag state) (getF state) (getM state)
InvariantQCharacterization (getConflictFlag state) (getQ state) (getF state) (getM state)
InvariantUniqC (getC state)
getC state = [opposite (getCl state)]
InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))
InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))
getConflictFlag state
InvariantCFalse (getConflictFlag state) (getM state) (getC state)
InvariantCEntailed (getConflictFlag state) F0 (getC state)
InvariantClCharacterization (getCl state) (getC state) (getM state)
InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state)
InvariantClCurrentLevel (getCl state) (getM state)
0 < currentLevel (getM state)
isUIP (opposite (getCl state)) (getC state) (getM state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state) \<Longrightarrow> let l = getCl state; level = getBackjumpLevel state; prefix = prefixToLevel level (getM state); state' = state\<lparr>getConflictFlag := False, getQ := [], getM := prefix\<rparr> in InvariantWatchCharacterization (getF state') (getWatch1 state') (getWatch2 state') (getM state')
\<lbrakk>InvariantConsistent (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsUniq (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsCharacterization (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesEl (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesDiffer (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantConflictFlagCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) in InvariantQCharacterization (getConflictFlag state') (removeAll (opposite (getCl state)) (getQ state')) (getF state') (getM state')
\<lbrakk>InvariantConsistent (getM (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchListsUniq (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchListsCharacterization (getWatchList (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchesEl (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchesDiffer (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantWatchCharacterization (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch1 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getWatch2 (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getM (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantConflictFlagCharacterization (getConflictFlag (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getM (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))); InvariantQCharacterization (getConflictFlag (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getQ (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getF (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) (getM (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (setReason (opposite (getCl state)) (length (getF state) - 1) (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) in InvariantQCharacterization (getConflictFlag state') (removeAll (opposite (getCl state)) (getQ state')) (getF state') (getM state')
goal (1 subgoal):
1. let state'' = let l = getCl state; level = getBackjumpLevel state; state' = state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel level (getM state)\<rparr> in Let (if 0 < level then setReason (opposite l) (length (getF state) - 1) state' else state') (assertLiteral (opposite l) False) in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
unfolding setReason_def
[PROOF STATE]
proof (prove)
using this:
InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConflictFlagCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
InvariantConsistent (prefixToLevel (getBackjumpLevel state) (getM state) @ [(opposite (getCl state), False)])
InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))) = getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))
removeAll (opposite (getCl state)) (getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>))) = getQ (assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>))
InvariantConsistent (getM state)
InvariantUniq (getM state)
InvariantWatchListsContainOnlyClausesFromF (getWatchList state) (getF state)
InvariantWatchListsUniq (getWatchList state)
InvariantWatchListsCharacterization (getWatchList state) (getWatch1 state) (getWatch2 state)
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchesDiffer (getF state) (getWatch1 state) (getWatch2 state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state)
InvariantConflictFlagCharacterization (getConflictFlag state) (getF state) (getM state)
InvariantQCharacterization (getConflictFlag state) (getQ state) (getF state) (getM state)
InvariantUniqC (getC state)
getC state = [opposite (getCl state)]
InvariantNoDecisionsWhenUnit (getF state) (getM state) (currentLevel (getM state))
InvariantNoDecisionsWhenConflict (getF state) (getM state) (currentLevel (getM state))
getConflictFlag state
InvariantCFalse (getConflictFlag state) (getM state) (getC state)
InvariantCEntailed (getConflictFlag state) F0 (getC state)
InvariantClCharacterization (getCl state) (getC state) (getM state)
InvariantCllCharacterization (getCl state) (getCll state) (getC state) (getM state)
InvariantClCurrentLevel (getCl state) (getM state)
0 < currentLevel (getM state)
isUIP (opposite (getCl state)) (getC state) (getM state)
InvariantWatchCharacterization (getF state) (getWatch1 state) (getWatch2 state) (getM state) \<Longrightarrow> let l = getCl state; level = getBackjumpLevel state; prefix = prefixToLevel level (getM state); state' = state\<lparr>getConflictFlag := False, getQ := [], getM := prefix\<rparr> in InvariantWatchCharacterization (getF state') (getWatch1 state') (getWatch2 state') (getM state')
\<lbrakk>InvariantConsistent (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsUniq (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchListsCharacterization (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesEl (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchesDiffer (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantConflictFlagCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)); InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>) in InvariantQCharacterization (getConflictFlag state') (removeAll (opposite (getCl state)) (getQ state')) (getF state') (getM state')
\<lbrakk>InvariantConsistent (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>) @ [(opposite (getCl state), False)]); InvariantWatchListsContainOnlyClausesFromF (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)); InvariantWatchListsUniq (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)); InvariantWatchListsCharacterization (getWatchList (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)); InvariantWatchesEl (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)); InvariantWatchesDiffer (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)); InvariantWatchCharacterization (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)); InvariantConflictFlagCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)); InvariantQCharacterization (getConflictFlag (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getQ (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getF (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>)) (getM (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>))\<rbrakk> \<Longrightarrow> let state' = assertLiteral (opposite (getCl state)) False (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state), getReason := getReason (state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel (getBackjumpLevel state) (getM state)\<rparr>)(opposite (getCl state) \<mapsto> length (getF state) - 1)\<rparr>) in InvariantQCharacterization (getConflictFlag state') (removeAll (opposite (getCl state)) (getQ state')) (getF state') (getM state')
goal (1 subgoal):
1. let state'' = let l = getCl state; level = getBackjumpLevel state; state' = state\<lparr>getConflictFlag := False, getQ := [], getM := prefixToLevel level (getM state)\<rparr> in Let (if 0 < level then state'\<lparr>getReason := getReason state'(opposite l \<mapsto> length (getF state) - 1)\<rparr> else state') (assertLiteral (opposite l) False) in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
[PROOF STEP]
by (auto simp add: Let_def)
[PROOF STATE]
proof (state)
this:
let state'' = applyBackjump state in InvariantQCharacterization (getConflictFlag state'') (getQ state'') (getF state'') (getM state'')
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 68503, "file": "SATSolverVerification_ConflictAnalysis", "length": 65}
|
#=
Code related with input output (IO) of .nc files directly to/from ClimArrays
utilizing the NCDatasets.jl package and a buttload of convenience code.
An initial version of parts of this code was taken from:
https://github.com/rafaqz/GeoData.jl
=#
using NCDatasets: NCDatasets, NCDataset
export NCDatasets, NCDataset
export nckeys, ncdetails, globalattr, ncsize
export ncread, ncwrite
dim_to_commonname(::Lat) = "lat"
dim_to_commonname(::Lon) = "lon"
dim_to_commonname(::Time) = "time"
dim_to_commonname(::Pre) = "level"
dim_to_commonname(D::Dim) = string(DimensionalData.name(D))
dim_to_commonname(::Coord) = "cell"
const POSSIBLE_CELL_NAMES = ("ncells", "cell", "rgrid", "grid")
"""
nckeys(file::String)
Return all keys of the `.nc` file in `file`.
"""
function nckeys(path::String)
NCDataset(path) do ds
return keys(ds)
end
end
nckeys(a::NCDataset) = keys(a)
"""
ncdetails(file, io = stdout)
Print details about the `.nc` file in `file` on `io`.
"""
function ncdetails(file, io = stdout)
NCDataset(file) do ds
show(io, MIME"text/plain"(), ds)
end
end
ncdetails(ds::NCDataset, io = stdout) = show(io, MIME"text/plain"(), ds)
"""
ncsize(file, var)
Return the size of the variable of the `.nc` file without actually loading any data.
"""
function ncsize(file, var)
NCDataset(file) do ds
return size(ds[var])
end
end
"""
globalattr(file::String) → Dict
Return the global attributes of the .nc file.
"""
function globalattr(file::String)
NCDataset(file) do ds
return Dict(ds.attrib)
end
end
#########################################################################
# Imports
#########################################################################
include("netcdf_read.jl")
include("netcdf_write.jl")
|
{"hexsha": "0587b5a6931e37e0cfe627f5903cdfacbd2e1481", "size": 1789, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/io/netcdf.jl", "max_stars_repo_name": "Balinus/ClimateTypes.jl", "max_stars_repo_head_hexsha": "35b5e8f85638b7f1d3127b7a446de38afba2c6b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/io/netcdf.jl", "max_issues_repo_name": "Balinus/ClimateTypes.jl", "max_issues_repo_head_hexsha": "35b5e8f85638b7f1d3127b7a446de38afba2c6b6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/io/netcdf.jl", "max_forks_repo_name": "Balinus/ClimateTypes.jl", "max_forks_repo_head_hexsha": "35b5e8f85638b7f1d3127b7a446de38afba2c6b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.9275362319, "max_line_length": 84, "alphanum_fraction": 0.6383454444, "num_tokens": 476}
|
import Bio.SeqUtils.ProtParam
import os
import numpy as np
SET_NAME = 'MMP-cluster'
IF_ONLY_HEAVY = False
CNT_DB = 2
CNT_TARGET = 1
REFERENCE_PATH_TESTCASE = './testCase/MMP-cluster/reference-PDB/'
TARGETING_PATH_TESTCASE = './testCase/MMP-cluster/targeting-MMP/'
TARGET_DESIRE_SIZE = 166 #44 #MMP-cluster
# Chothia numbering definition for CDR regions
CHOTHIA_CDR = {'L': {'1': [24, 34], '2': [50, 56], '3': [89, 97]}, 'H':{'1': [26, 32], '2': [52, 56], '3': [95, 102]}}
#################################################################################################################
# function ReadAminoAndNum:
# Read in the Chothia number reference and targeting files. Store the numbering and putative germline.
#
# Input: targeting_direct, reference_direct
# Output:1. dictionary of Amino, {'L': {}, 'H': {}}
# 2. dictionary of Num , {'L': {}, 'H': {}}
# 3. dictionary of Germ , {'L': {'V': {}, 'J':{}}, 'H': {'V': {}, 'J':{}}}
# 4. list of DatasetName, [dh, dm, p1,....]
# 5. list of DatasetSize, [ , , ,...]
#################################################################################################################
def ReadAminoNumGerm(targeting_direct, reference_direct):
Amino = {'L': {}, 'H': {}}
Num ={'L': {}, 'H': {}}
Germ = {'L': {'V': {}, 'J':{}}, 'H': {'V': {}, 'J':{}}}
DatasetName = []
DatasetSize = []
targeting_filenames = sorted(os.listdir(targeting_direct))
reference_filenames = sorted(os.listdir(reference_direct))
for i, name in enumerate(reference_filenames + targeting_filenames):
if not name.endswith('.txt'):
continue
if i < len(reference_filenames):
direct = reference_direct
else:
direct = targeting_direct
with open(direct + name, 'r') as fi:
data = fi.readlines()
DatasetName.append(name.split('_')[0])
cnt_pattern = 0
cnt_seq = 0
tmp_num = []
tmp_seq = []
tmp_germ_V = ' '
tmp_germ_J = ' '
buff = ''
for j in range(len(data)):
# if chain begin
if data[j][0] =='L' or data[j][0] =='H':
L_H = data[j][0]
tmp_seq.append(data[j].split()[-1])
if len(data[j].split()) == 3:
tmp_num.append(data[j].split()[-2])
else:
tmp_num.append(data[j].split()[1] + data[j].split()[-2])
# second time of #|, line of germline
if data[j][0]=='#' and data[j][1] == '|':
cnt_pattern += 1
if (cnt_pattern % 4) == 0:
tmp_germ_V = data[j].split("|")[2]
tmp_germ_J = data[j].split("|")[4]
# time of \\, ending a sequence, need \\ to present \
if data[j][0] == '/':
if IF_ONLY_HEAVY:
seq_name = name.split('_')[0] + '_' + str(cnt_seq)
else:
seq_name = name.split('_')[0] + '_' + str(int(cnt_seq / 2))
cnt_seq += 1
Amino[L_H][seq_name] = tmp_seq
Num[L_H][seq_name] =tmp_num
Germ[L_H]['V'][seq_name] = tmp_germ_V
Germ[L_H]['J'][seq_name] = tmp_germ_J
# if not tmp_germ_V.startswith('IGHV3-23'):
# print(data[j - 8])
# print(seq_name)
# print(tmp_germ_V, tmp_germ_J)
tmp_num = []
tmp_seq = []
tmp_germ_V = ' '
tmp_germ_J = ' '
if IF_ONLY_HEAVY:
DatasetSize.append(cnt_seq)
else:
DatasetSize.append(int(cnt_seq / 2))
return Amino, Num, Germ, DatasetName, DatasetSize
#################################################################################################################
# function GetOneHotGerm:
# Transform the stored putative germline into one-hot encoded features.
#
# Input: Germ, DatasetSize, DatasetName
# Output: 1. array of OneHotGerm, [[seq1 onehot], [seq2 onehot], [seq3 onehot], ...]
# 2. list of GermFeatureNames according to one hot, [LV_IGLV1*1, LV_IGLV1*2,....
# LJ_XXXX,
# HV_XXXX,
# HJ_XXXX ...]
#################################################################################################################
def GetOneHotGerm(Germ, DatasetSize, DatasetName):
OneHotGerm = []
GermFeatureNames = []
# for every feature type
for H_L in Germ:
if IF_ONLY_HEAVY:
if H_L=='L':
continue
for V_J in Germ[H_L]:
# every feature name in that type
candidate = list(sorted(set(Germ[H_L][V_J].values())))
for can in candidate:
GermFeatureNames.append('Germ_' +H_L+ V_J+'_'+can)
# for every dataset
for i, name in enumerate(DatasetName):
tmp = [[] for j in range(int(DatasetSize[i]))]
# for every seq in that dataset
for j in range(int(DatasetSize[i])):
seq_name = name + '_' + str(j)
for k in range(len(GermFeatureNames)):
H_L = GermFeatureNames[k].split('_')[1][0]
V_J = GermFeatureNames[k].split('_')[1][1]
if Germ[H_L][V_J][seq_name] == GermFeatureNames[k].split('_')[2]:
tmp[j].append(1)
else:
tmp[j].append(0)
OneHotGerm += tmp
return OneHotGerm, GermFeatureNames
#################################################################################################################
# function ReadCanonTemp:
# Read in the template file (default PIGS) and store it.
#
# Output: 1. dictionary of CanonTemp, {'L': {'1': {'1':[]}, '2': {'1':[]}, '3': {'1':[]}}, 'H': {'1': {'1':[]}, '2': {'1':[]}, '3': {'1':[]}}}
#################################################################################################################
def ReadCanonTemp(canonical_direct):
CanonTemp = {'L': {'1': {'1':[]}, '2': {'1':[]}, '3': {'1':[]}}, 'H': {'1': {'1':[]}, '2': {'1':[]}, '3': {'1':[]}}}
with open(canonical_direct, 'r') as fi:
data = fi.readlines()
for i in range(len(data)):
if data[i].split()[1] not in CanonTemp[data[i][0]][data[i][1]]:
CanonTemp[data[i][0]][data[i][1]][data[i].split()[1]] = []
CanonTemp[data[i][0]][data[i][1]][data[i].split()[1]].append(data[i].split()[2:])
return CanonTemp
#################################################################################################################
# function GetCanon:
# Assign each sequence witht the predicted type of canonical structure according to the template.
#
# Input: Amino, Num
# Output: 1. dictionary of CanonTemp, {'L': {'1': {'1':[]}, '2': {'1':[]}, '3': {'1':[]}}, 'H': {'1': {'1':[]}, '2': {'1':[]}, '3': {'1':[]}}}
# optional: PIGS / Chothia
#################################################################################################################
def GetCanon(canonical_direct, Amino, Num):
CanonTemp = ReadCanonTemp(canonical_direct)
Canon = {'L': {'1': {}, '2': {}, '3': {}}, 'H': {'1': {}, '2': {}, '3': {}}}
# for every sequence
for seq_name in Num['H']:
for L_H in Canon:
if IF_ONLY_HEAVY:
if L_H == 'L':
continue
for j in Canon[L_H]:
cnt_len = 0
for k in Num[L_H][seq_name]:
if k[-1]>='A'and k[-1]<='Z':
num_i = int(k[:-1])
else:
num_i = int(k)
if num_i >= CHOTHIA_CDR[L_H][j][0] and num_i <= CHOTHIA_CDR[L_H][j][1]:
cnt_len += 1
length = cnt_len
# for every type number on specific CDR region
for k in CanonTemp[L_H][j]:
############## same type have diff version of template
for m in range(len(CanonTemp[L_H][j][k])):
# if have matched CDR length, then give zero type
if CanonTemp[L_H][j][k][m][0] == str(length):
# check if length is the only restriction
if len(CanonTemp[L_H][j][k][m]) == 1:
Canon[L_H][j][seq_name] = k
# check for each position with in specific motif
else:
restriction = CanonTemp[L_H][j][k][m][1:]
for l in range(0,len(restriction),2):
pos = CanonTemp[L_H][j][k][m][l+1]
# index of the number
if pos not in Num[L_H][seq_name]:
break
else:
id = int(Num[L_H][seq_name].index(pos))
s=CanonTemp[L_H][j][k][m][l + 2]
if Amino[L_H][seq_name][id] not in CanonTemp[L_H][j][k][m][l+2]:
break
Canon[L_H][j][seq_name] = k
# if no match canonical structure found, then append 0
if seq_name not in Canon[L_H][j]:
Canon[L_H][j][seq_name] = '0'
return Canon
#################################################################################################################
# function GetOneHotCanon:
# Similar to GetOneHotGerm, transform the stored canonical structure into one-hot encoded features.
#
# Input: Amino, Num, DatasetSize, DatasetName
# Output: 1. array of OneHotCanon, [[seq1 onehot], [seq2 onehot], [seq3 onehot], ...]
# 2. list of CanonFeatureNames according to one hot, [Canon_L1_1, Canon_L1_2,....
# Canon_L2_1,
# Canon_L3_1,
# Canon_H1_1,
# Canon_H2_1,
# Canon_H3_1,...]
#################################################################################################################
def GetOneHotCanon(canonical_direct, Amino, Num, DatasetSize, DatasetName):
Canon = GetCanon(canonical_direct, Amino, Num)
OneHotCanon = []
CanonFeatureNames = []
# for every feature type
for H_L in Canon:
if IF_ONLY_HEAVY:
if H_L=='L':
continue
# O_T_T stands for 1_2_3
for O_T_T in Canon[H_L]:
# every feature name in that type
candidate = list(sorted(set(Canon[H_L][O_T_T].values())))
for can in candidate:
CanonFeatureNames.append('Canonical_' +H_L+ O_T_T+'_'+can)
# for every dataset
for i, name in enumerate(DatasetName):
tmp = [[] for j in range(int(DatasetSize[i]))]
# for every seq in that dataset
for j in range(int(DatasetSize[i])):
seq_name = name + '_' + str(j)
for k in range(len(CanonFeatureNames)):
H_L = CanonFeatureNames[k].split('_')[1][0]
O_T_T = CanonFeatureNames[k].split('_')[1][1]
if Canon[H_L][O_T_T][seq_name] == CanonFeatureNames[k].split('_')[2]:
tmp[j].append(1)
else:
tmp[j].append(0)
OneHotCanon += tmp
return OneHotCanon, CanonFeatureNames
#################################################################################################################
# function GetCDRH3:
# Take the CDR-H3 of each seqeunce.
#
# Input: Amino, Num
# Output: 1. dictionary of CDRH3, {}
#################################################################################################################
def GetCDRH3(Amino, Num):
CDRH3={}
for seq_name in Amino['H']:
CDRH3[seq_name]=''
for i in range(len(Num['H'][seq_name])):
number = Num['H'][seq_name][i]
if number[-1] >= 'A' and number[-1] <= 'Z':
num_i = int(number[:-1])
else:
num_i = int(number)
if num_i >= CHOTHIA_CDR['H']['3'][0] and num_i <= CHOTHIA_CDR['H']['3'][1]:
CDRH3[seq_name] += Amino['H'][seq_name][i]
return CDRH3
#################################################################################################################
# function GetCDRH3PI:
# Calculate the pI value for each sequence
#
# Input: CDRH3
# Output: 1. dictionary of PI, {}
#################################################################################################################
def GetCDRH3PI(CDRH3):
void = ['KYPLAVSGIIT', '-------V', 'GVVTAAIDGMDV','DLYSGYRSYGLDV', 'GGTSYYGTDV','EEGDIPGTTCMDV']
PI_CDRH3={}
for seq_name in CDRH3:
prot = Bio.SeqUtils.ProtParam.ProteinAnalysis(CDRH3[seq_name])
try:
PI_CDRH3[seq_name] = prot.isoelectric_point()
except:
PI_CDRH3[seq_name] = -1
return PI_CDRH3
#################################################################################################################
# function GetPIBin:
# Halve the bin of pI following the binning method using sequence's pI information.
#
# Input: PI_CDRH3
# Output: 1. a list of PITheresholds, []
#################################################################################################################
def GetPIBin(PI_CDRH3):
PITheresholds = [0.0, 7.0, 14.0]
tenPercent = 0.1*len(PI_CDRH3)
PITolerance = 0.3
cnt = 0
while cnt > tenPercent or len(PITheresholds) == 3:
# count how many sequence over threshold
for i in range(1, len(PITheresholds)):
cnt = 0
if (PITheresholds[i] - PITheresholds[i-1])< (2 * PITolerance):
continue
# go over the dict
for seq in PI_CDRH3:
if PI_CDRH3[seq]> PITheresholds[i-1] and PI_CDRH3[seq]<PITheresholds[i]:
cnt +=1
#check if overflow tenpercent
if cnt > tenPercent:
PITheresholds.append((PITheresholds[i-1] + PITheresholds[i])/2.0)
PITheresholds = sorted(PITheresholds)
break
return PITheresholds
#################################################################################################################
# function GetOneHotPI:
# Transform the pI values into one-hot encoded pI bin features.
#
# Input: CDRH3, DatasetSize, DatasetName
# Output: 1. array of OneHotPI, [[seq1 onehot],
# [seq2 onehot],
# [seq3 onehot],
# ...]
# 2. list of PIFeatureNames according to one hot, [PI_bin1, PI_bin2, PI_bin3...]
#################################################################################################################
def GetOneHotPI(CDRH3, DatasetSize, DatasetName):
PI_CDRH3 = GetCDRH3PI(CDRH3)
PITheresholds = GetPIBin(PI_CDRH3)
PIFeatureNames = []
OneHotPI = []
for i in range(1, len(PITheresholds)):
PIFeatureNames.append('PI_'+str(PITheresholds[i-1])+'-'+str(PITheresholds[i]))
# for every dataset
for i, name in enumerate(DatasetName):
tmp = [[0 for k in range(len(PIFeatureNames))] for j in range(int(DatasetSize[i]))]
# for every seq in that dataset
for j in range(int(DatasetSize[i])):
seq_name = name + '_' + str(j)
for k in range(1, len(PITheresholds)):
if PI_CDRH3[seq_name] >= float(PITheresholds[k-1]) and PI_CDRH3[seq_name] <= float(PITheresholds[k]):
tmp[j][k-1] = 1
break
OneHotPI += tmp
return OneHotPI, PIFeatureNames
#################################################################################################################
# function GetPositionalMotifFreq:
# Count the frequency of each possible frequent possitional motif for each dataset.
#
# Input: CDRH3
# Output: 1. dictionary of MotifFreq, {'r1':{}, 'r2':{},'t1':{}, 't2':{}, 't3':{}, 't4':{}, 't5':{}, 't6':{}, 't7':{}, 't8':{}}
#################################################################################################################
def GetPositionalMotifFreq(CDRH3):
MotifFreq ={'r1':{}, 'r2':{},'t1':{}, 't2':{}, 't3':{}, 't4':{}, 't5':{}, 't6':{}, 't7':{}, 't8':{}}
MotifDict = {}
for seq_name in CDRH3:
MotifDict[seq_name] = []
f_name = seq_name.split('_')[0]
# length of motif
for i in range(2, 10):
if i > len(CDRH3[seq_name]):
continue
else:
for j in range(len(CDRH3[seq_name])-i):
PostionalMotif = str(j) +'_'+CDRH3[seq_name][j:j+i]
MotifDict[seq_name].append(PostionalMotif)
if PostionalMotif in MotifFreq[f_name]:
MotifFreq[f_name][PostionalMotif] += 1
else:
MotifFreq[f_name][PostionalMotif] = 1
return MotifFreq, MotifDict
#################################################################################################################
# function GetImpMotif (Version 1.0):
# Take only the most 2 frequent motif in each data set, top 2 * 10 set * 9 length = 180
#
# Input: MotifFreq
# Output: 1. list of ImpMotif, [motif1, motif2, ...]
#################################################################################################################
def GetImpMotif(MotifFreq):
ImpMotif = []
Top2 = 2
for f_name in MotifFreq:
motif_dic = MotifFreq[f_name]
for i in range(2, 11):
tmp = {}
for motif in motif_dic:
if motif.split('_')[0] == str(i):
tmp[motif]= motif_dic[motif]
sorted_tmp = sorted(tmp.items(),key= lambda k: k[1],reverse= True)
for j in range(Top2):
if len(sorted_tmp)> j:
ImpMotif.append(sorted_tmp[j][0])
ImpMotif = list(sorted(set(ImpMotif)))
return ImpMotif
#################################################################################################################
# function GetCDRH3Motif:
# Assign present frequent motif for each sequence
#
# Input: ImpMotif, CDRH3
# Output: 1. dictionary of Motif_CDRH3, {}
#################################################################################################################
def GetCDRH3Motif(ImpMotif, CDRH3, MotifDict):
Motif_CDRH3={}
for seq_name in CDRH3:
# seq_len = len(CDRH3[seq_name])
Motif_CDRH3[seq_name]=[0 for z in range(len(ImpMotif))]
for i in range(len(ImpMotif)):
if ImpMotif[i] in MotifDict[seq_name]:
Motif_CDRH3[seq_name][i] = 1
return Motif_CDRH3
#################################################################################################################
# function MultiHotMotif:
# Transfer motif information for each sequence to multi-hot encoded features.
#
# Input: CDRH3, DatasetSize, DatasetName
# Output: 1. array of MultiHotMotif, [[seq1 multihot], [seq2 multihot], [seq3 multihot],...]
# 2. list of MotifFeatureNames according to multi hot, [Motif1, Motif2, ...]
#################################################################################################################
def MultiHotMotif(CDRH3, DatasetSize, DatasetName):
MotifFreq, MotifDict = GetPositionalMotifFreq(CDRH3)
ImpMotif = GetImpMotif(MotifFreq)
Motif_CDRH3 = GetCDRH3Motif(ImpMotif, CDRH3, MotifDict)
MotifFeatureNames = []
for motif in ImpMotif:
MotifFeatureNames.append("Motif_"+ motif)
MultiHotMotif =[]
for i, name in enumerate(DatasetName):
tmp = [[] for j in range(int(DatasetSize[i]))]
# for every seq in that dataset
for j in range(int(DatasetSize[i])):
seq_name = name + '_' + str(j)
tmp[j]= Motif_CDRH3[seq_name]
MultiHotMotif+=tmp
return MultiHotMotif, MotifFeatureNames
#################################################################################################################
# function GetFeatureVectors:
# Combine germline, canonical structure, pI, motif features to feature vectors
#
# Input: OneHotGerm, GermFeatureNames, OneHotCanon, CanonFeatureNames, OneHotPI, PIFeatureNames, MultiHotMotif, MotifFeatureNames
# Output: 1. AllFeatureVectors for every sequence, [[seq1 LV, LJ, HV, HJ, L1, L2, L3, L1, L2, L3, pI, motif1, motif2, motifi...],
# [seq2 LV, LJ, HV, HJ, L1, L2, L3, L1, L2, L3, pI, motif1, motif2, motifi...],
# ...]
#
# 2. AllFeatureNames [LV, LJ, HV, HJ, L1, L2, L3, L1, L2, L3, pI, motif1, motif2, motifi...]
#################################################################################################################
def GetFeatureVectors(OneHotGerm, GermFeatureNames,
OneHotCanon, CanonFeatureNames,
OneHotPI, PIFeatureNames,
MultiHotMotif, MotifFeatureNames):
AllFeatureNames= GermFeatureNames + CanonFeatureNames + PIFeatureNames + MotifFeatureNames
AllFeatureVectors =[[] for i in range(len(OneHotGerm))]
# num of seq
for i in range(len(OneHotGerm)):
AllFeatureVectors[i] += OneHotGerm[i]
AllFeatureVectors[i] += OneHotCanon[i]
AllFeatureVectors[i] += OneHotPI[i]
AllFeatureVectors[i] += MultiHotMotif[i]
AllFeatureVectors = np.array(AllFeatureVectors)
ExcludeIGHVVectors = AllFeatureVectors
ExcludeFeatureNames = AllFeatureNames
if SET_NAME == 'IGHV':
name_index = []
ExcludeFeatureNames = []
for i, name in enumerate(AllFeatureNames):
if not name.startswith('Germ_HV_IGHV3-23'):
name_index.append(i)
ExcludeFeatureNames.append(AllFeatureNames[i])
ExcludeIGHVVectors = AllFeatureVectors[:, name_index]
return AllFeatureVectors, AllFeatureNames, ExcludeIGHVVectors, ExcludeFeatureNames
if __name__=='__main__':
targeting_direct = '../testCase-MMP/data/IGHV/'
reference_direct = '../testCase-MMP/data/IGHV/'
Amino, Num, Germ, DatasetName, DatasetSize = ReadAminoNumGerm(targeting_direct, reference_direct)
|
{"hexsha": "39353bc3e62c84bff9ded7c8804063acfe682985", "size": 23004, "ext": "py", "lang": "Python", "max_stars_repo_path": "ASAP/FeatureExtraction.py", "max_stars_repo_name": "HassounLab/ASAP", "max_stars_repo_head_hexsha": "fc02471cd352da1a7783ea48a5caf7874fe4910a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-03-16T06:02:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-16T03:32:04.000Z", "max_issues_repo_path": "ASAP/FeatureExtraction.py", "max_issues_repo_name": "HassounLab/ASAP", "max_issues_repo_head_hexsha": "fc02471cd352da1a7783ea48a5caf7874fe4910a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ASAP/FeatureExtraction.py", "max_forks_repo_name": "HassounLab/ASAP", "max_forks_repo_head_hexsha": "fc02471cd352da1a7783ea48a5caf7874fe4910a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-10-13T11:45:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-07T09:45:53.000Z", "avg_line_length": 43.1594746717, "max_line_length": 144, "alphanum_fraction": 0.4532689967, "include": true, "reason": "import numpy", "num_tokens": 5621}
|
[STATEMENT]
lemma minus_eq: "x - y = abs_nat (rep_nat x - rep_nat y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x - y = abs_nat (rep_nat x - rep_nat y)
[PROOF STEP]
by (metis abs_minus rep_inverse)
|
{"llama_tokens": 92, "file": "Polynomials_Term_Order", "length": 1}
|
import cv2
import pandas as pd
from face_alignment_1 import face_alignment
from face_base import find_face
from face_base import license_detection_Rough
from face_base import license_detection_Detailed
from smooth_sharpen import smooth
from smooth_sharpen import sharpen
from face_base import divide_image
from face_base import face_wipeoff
from PIL import Image
import pytesseract
import numpy as np
from dfg import rotate_image
import os
import ocr
import shutil
import numpy as np
from PIL import Image
from glob import glob
import imutils
image_files = glob('test_images2/*.*')
result_dir = 'test_result2'
if os.path.exists(result_dir):
shutil.rmtree(result_dir)
os.mkdir(result_dir)
for image_file in sorted(image_files):
print(image_file)
img = np.array(Image.open(image_file).convert('RGB'))
width,height,layer = img.shape
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face,face_plus,img,img_gray = find_face(img)
if face_plus == 'No':
continue
print('secenon')
lincese ,lincese_gray = license_detection_Rough(img,img_gray,face_plus)
# cv2.imshow('license_ori', lincese)
# cv2.waitKey(0)
face,face_plus,img,img_gray = find_face(lincese)
upper,lower = divide_image(lincese,face_plus)
result_lower, lower = ocr.model(lower)
lincese,lincese_gray,tag = rotate_image(lower,lincese,lincese_gray)
if tag == 0:
continue
# cv2.imshow('upper', upper)
# cv2.waitKey(0)
# cv2.imshow('lower', lower)
# cv2.waitKey(0)
# cv2.imshow('license', lincese)
# cv2.waitKey(0)
# lincese_gray = cv2.resize(lincese_gray, (400,247), interpolation=cv2.INTER_CUBIC)
# lincese = cv2.resize(lincese, (400,247), interpolation=cv2.INTER_CUBIC)
# cv2.imshow('resize',lincese_gray)
# cv2.waitKey(0)
# cv2.imshow('resize', lincese)
# cv2.waitKey(0)
face,face_plus,img,img_gray = find_face(lincese)
print('22')
lincese ,lincese_gray = license_detection_Detailed(lincese,lincese_gray,face_plus)
print('3')
lincese_gray_noface = face_wipeoff(lincese_gray,face_plus)
print('4')
face, face_plus, img, img_gray = find_face(lincese)
# cv2.imshow('bb',bb)
# cv2.waitKey(0)
print('1')
upper,lower = divide_image(lincese,face_plus)
# cv2.imshow('upper',upper)
# cv2.waitKey(0)
# cv2.imshow('lower',lower)
# cv2.waitKey(0)
# cv2.imwrite('upper.png',upper)
# cv2.imwrite('lower.png',lower)
output_dir = os.path.join(result_dir, os.path.splitext(os.path.split(image_file)[-1])[0])
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.mkdir(output_dir)
result_upper, image_result_upper = ocr.model(upper)
output_file = os.path.join(output_dir, 'result_upper.png')
cv2.imwrite(output_file, image_result_upper)
result_lower, image_result_lower = ocr.model(lower)
output_file = os.path.join(output_dir, 'result_lower.png')
cv2.imwrite(output_file, image_result_lower)
print('1')
result, image_framed = ocr.model(lincese)
# cv2.imshow('img',image_framed)
# cv2.waitKey(0)
# output_file = os.path.join(output_dir, 'result.png')
# cv2.imwrite(output_file, image_framed)
list = []
for key in result_lower:
length=len(result_lower[key][1])
idnumber=[]
for i in range(length):
# print((i == length-1) and (result_lower[key][1][i] == 'X'),i == length-1,result_lower[key][1][i] == 'X')
if result_lower[key][1][i].isdigit() or (i == length-1 and result_lower[key][1][i] == 'X'):
idnumber.append(result_lower[key][1][i])
print(idnumber)
if idnumber!=[] and len(idnumber)==18:
list.append(idnumber)
# list.append(result_lower[key][1])
for key in result_upper:
list.append(result_upper[key][1])
# output_dir = os.path.join(result_dir, os.path.splitext(os.path.split(image_file)[-1])[0])
# if os.path.exists(output_dir):
# shutil.rmtree(output_dir)
# os.mkdir(output_dir)
output_file = os.path.join(output_dir,'info.txt')
file = open(output_file, 'w')
for fp in list:
file.write(str(fp))
file.write('\n')
file.close()
output_file = os.path.join(output_dir, 'image.png')
cv2.imwrite(output_file, lincese)
print(' ')
|
{"hexsha": "37f4419008e2d47b043dbe9ead82398c78862287", "size": 4357, "ext": "py", "lang": "Python", "max_stars_repo_path": "performance_new.py", "max_stars_repo_name": "zacQin/ID_card_identification_2", "max_stars_repo_head_hexsha": "b359e8b0d26352ce359b280c36f2ffa837aa9611", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "performance_new.py", "max_issues_repo_name": "zacQin/ID_card_identification_2", "max_issues_repo_head_hexsha": "b359e8b0d26352ce359b280c36f2ffa837aa9611", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "performance_new.py", "max_forks_repo_name": "zacQin/ID_card_identification_2", "max_forks_repo_head_hexsha": "b359e8b0d26352ce359b280c36f2ffa837aa9611", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.7300613497, "max_line_length": 118, "alphanum_fraction": 0.6779894423, "include": true, "reason": "import numpy", "num_tokens": 1180}
|
c
c Program runs the subroutine iri_sm to obtain IRI13 densities
c along an L-shell.
c
c dlg June 3, 2009 fixed issue with trying to calculate bridge for locations
c below the F2 peak along the selected L-shell
c dlg June 11, 2009 added switchon feature to field aligned bridge function
c so that the equatorial density would be reached without
c having to use hugh power-law function factors when the
c topside fitted power-law was above equatorial density
c at the equator.
c
subroutine iri_ps_bridge(rr,al,alatr,amlt,itime,eq_iri_ps_trough,
& transh,rf2,alpha,dno,co,switchh,switchw,istat)
c
real re,tot_delh,delh,rr,rstart,amltrad
real rsample1,rsample2
parameter (re=6371.0,tot_delh=600.0/re,delh=5.0)
parameter (r=260.0/re+1.0)
c parameter (r=260.0/re+1.0,delr=delh/re)
parameter (amltrad=3.1415927/12.0)
c
real outf(20,100),oarr(50),alatr,along
real dens,hs,dens_old,rf3
real delrr,refden
real amlt,rs,al
real ro,transh,delhh,alpha,term1
real eqh,ano,eq_iri_ps_trough,co,fract
real dent
real diffr,delr,cosr1,alatr1,cosr2,alatr2
real ansample1,ansample2,rloc,rpos,rf2
real diffold,rlocold,an1old,an2old,dl,ahemisphere
real switchh,switchw
real*8 dno,dntransh,dtransh,dalpha
integer*4 itime(2)
integer istat,icount,iflag
common /irioutput/ rz12,f107,neiri,nhoiri,nheiri,noiri
c print*,'entering iri_ps_bridge',rr,al,amlt,itime,eq_iri_ps_trough
c istat must be either 0 or -1 as it is used in an equation later
istat=0
dl=al
c !Trevor Garner found error assuming north only, now pass latitude
ahemisphere=sign(1.0,alatr)
c get height and densiy of the f2 peak
c cosrl=amin1(sqrt(r/al),1.0)
c alatr=acos(cosrl) !Trevor Garner found error assuming north only, now pass lat
along=amod((amlt+12.0),24.0)*amltrad
cosrl=amin1(sqrt(r/al),1.0)
alatrl=acos(cosrl)*ahemisphere
call iri_sm(alatrl,along,r,itime,outf,oarr)
r2=oarr(2)/re+1.0
cosrl=amin1(sqrt(r2/al),1.0)
alatrl=acos(cosrl)*ahemisphere
call iri_sm(alatrl,along,r2,itime,outf,oarr)
r2=oarr(2)/re+1.0
cosrl=amin1(sqrt(r2/al),1.0)
alatrl=acos(cosrl)*ahemisphere
call iri_sm(alatrl,along,r2,itime,outf,oarr)
c approximate the F2 peak along the L-shell=al
rf2=oarr(2)/re+1.0
c If L-shell is at or below "r", the starting radial distance
c for searching for the maximum negative slope above the f2 peak,
c then the L-shell provided is exclusively an ionospheric issue
c and we need to pass back parameters that will minimize the hassle
c associated with the rest of the calculation for density, which
c will necessarily exclude the bridge density anyway.
c print*,'f2 peak at:',rf2,al
if(rr.le.rf2) then
istat=-1
c print*,'No bridge required, istat=-1 ',rs,al
return
endif
c In an effort to reduce the cals to iri2007 the following is used
c to approximate the point of maximum negative slope in the topside
c ionosphere. This has been obtained from a linear fit to this location
c (derived from the search algorithm above) as a function of returned
c rz12 value from IRI2007. That analysis obtained this relationship:
c ro = (1.05454+-0.000102) + (8.62678e-5+-1.20975e-6)*rz12
ro = 1.05454 + 8.62678e-5*rz12
c print*,'fieldaligned_bridge:',rz12,ro,rf2
if (ro .le. rf2) ro=rf2+0.01
transh=(ro-1.0)*re
diffh=1.0
diffr=diffh/re
ah1=transh-diffh
ah2=transh+diffh
r1=ah1/re+1.0
r2=ah2/re+1.0
c get the density at the maximum slope height
cosrl=amin1(sqrt(ro/al),1.0)
alatrl=acos(cosrl)*ahemisphere
call iri_sm(alatrl,along,ro,itime,outf,oarr)
antransh=outf(1,1)
c setup for use of densities and heights of the locations
c on either side of the point of maximum negative slope.
c Since only calculating ro from a fitted function, need to separately
c determine the ionospheric densities above and below to support initial
c calculation of the power law function.
cosrl=amin1(sqrt(r1/al),1.0)
alatrl=acos(cosrl)*ahemisphere
call iri_sm(alatrl,along,r1,itime,outf,oarr)
an1=outf(1,1)
c print*,'an1: ',alatrl,along,r1,an1,al
cosrl=amin1(sqrt(r2/al),1.0)
alatrl=acos(cosrl)*ahemisphere
call iri_sm(alatrl,along,r2,itime,outf,oarr)
an2=outf(1,1)
c print*,'an2: ',alatrl,along,r2,an2,al
if(al.le.r2) then
istat=-1
c print*,'No bridge required, istat=-1 ',al,r2
return
endif
eqh=(al-1.0)*re
c print*,'bridge=',ah1,ah2,eqh,transh,antransh
c print*,' =',an1,an2,eq_iri_ps_trough
alpha=-alog10(an1/an2)/alog10(ah1/ah2)
ano=an1*ah1**alpha
c print*,'intial alpha,ano:',alpha,ano
an3=ano*eqh**(-alpha)
c print*,'setup:',an3,eq_iri_ps_trough
c set up use of switch term that will not function by default
switchh=eqh*2.0
switchw=eqh/10.0
if (eq_iri_ps_trough .ge. an3) then
if(an2.le.eq_iri_ps_trough) then
c print*,'inverse IRI-eq:'
alpha=alog10(antransh/eq_iri_ps_trough)/alog10(transh/eqh)
ano=antransh*transh**alpha
dno=ano
else
c print*,'greater than or equal too'
co=eq_iri_ps_trough - an3
alpha=-alog10((an1-co)/(an2-co))/alog10(ah1/ah2)
ano=(an1-co)*ah1**alpha
dno=ano
endif
else
c print*,'less than'
c keep initial alpha and ano values
c provide switch values that bring the bridge function to the equatorial
c density at the equator
switchh=transh+(eqh-transh)/2.0
switchw=(eqh-transh)/2.0
dno=ano
co=0.0
endif
c print*,'final=',dno,alpha,co
c print*,' =',ah1,ah2,eqh,an1,an2,eq_iri_ps_trough
c print*,dens_old,dens,delh,hs
c print*,'leaving iri_ps_bridge',alpha,ano,transh,switchh,switchw,co
return
end
|
{"hexsha": "e1af3dc2fde1b36994efefcf94e54ded7dc68d2c", "size": 5940, "ext": "for", "lang": "FORTRAN", "max_stars_repo_path": "PyGCPM/__data/libgcpm/gcpm/iri_ps_bridge.for", "max_stars_repo_name": "mattkjames7/PyGCPM", "max_stars_repo_head_hexsha": "90d1c29b82b7b286f570eb49f7bf7618ddc4717b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PyGCPM/__data/libgcpm/gcpm/iri_ps_bridge.for", "max_issues_repo_name": "mattkjames7/PyGCPM", "max_issues_repo_head_hexsha": "90d1c29b82b7b286f570eb49f7bf7618ddc4717b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PyGCPM/__data/libgcpm/gcpm/iri_ps_bridge.for", "max_forks_repo_name": "mattkjames7/PyGCPM", "max_forks_repo_head_hexsha": "90d1c29b82b7b286f570eb49f7bf7618ddc4717b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9411764706, "max_line_length": 82, "alphanum_fraction": 0.682996633, "num_tokens": 2032}
|
\cleardoublepage%
\phantomsection\addcontentsline{toc}{chapter}{Introduction}%
\chapter*{Introduction}
As evidenced by Figure~\ref{fig:donald} and a number of films including \emph{Eternal Sunshine of the Spotless Mind} (2004) and \emph{The Discovery} (2017), the idea of directly connecting our brains to machines has captured popular fascination. It is no longer limited to futuristic genres of science fiction, where such ideas had already taken root much earlier---at least since the 1950s. In \citeApos{anderson1957callmejoe} \emph{Call Me Joe}, for example, a wheelchair-bound man uses a `psionic' head-mounted device to project his neural activity across great distances, allowing him to live an all-too-real life in another being's body.\nocite{anderson1957callmejoe} \citeA{roelfsema2018mindreadingwriting} mention a novel on this topic going as far back as the early 1930s---the same decade in which, allegedly, even Nikola Tesla intended to investigate a `thought projector', allowing thoughts to be visualised, albeit not directly from the brain itself---the principle was supposedly for the optic nerve to be bidirectional, allowing brain activity representing imagined visuals to be read off the retina \cite{tesla1993fantastic}. Whereas this particular idea obviously did not come to fruition, we can look back on a number of actual milestones in the past near-century that illustrate remarkable progress in the field of \emph{neurotechnology}---an inclusive term essentially referring to any form of technology that monitors or manipulates activity in the central nervous system (CNS). Progress in this field is ongoing and accelerating, moving what was previously science fiction ever closer to reality. In the more recent past, research has moved outside of the traditional laboratories into more naturalistic settings \cite{makeig2009mobi}, direct-to-consumer neurotechnology has become widely available to the general public \cite{ienca2018brainleaks}, and internationally renowned companies have begun conducting and funding research in both medical and consumer applications, increasing public awareness even further \cite{musk2019,moses2019fbspeech}.
From science fiction, to popular entertainment, to reality: It is the main argument of this dissertation that \emph{neuroadaptive technology}---defined further below---has now sufficiently progressed to warrant both widespread interest and widespread concern. To that end, the works in this dissertation serve to examine and demonstrate the current reality with respect to neuroadaptive technology. This dissertation first describes some of the capabilities of neuroadaptive technology and how it can be used, both on a conceptual level and with respect to already-published work, highlighting the advantages of such technology as well as a number of potential risks. Tools are then introduced to support the analysis of an experimental demonstration of neuroadaptive technology. As this demonstration shows, it is now possible to implement control based on the brain activity of \emph{unwitting} participants, who remain unaware of having any influence even as their brain activity guides a virtual object. Furthermore, the final chapter demonstrates that neuroadaptive technology can access subjective value-related processes. These and other demonstrations illustrate how neuroadaptive technology can greatly benefit human-computer interaction by realising goal-oriented and supportive behaviours without requiring any effort from the user. At the same time, they illustrate how these applications require consideration of the user's rights with respect to, among other issues, informed consent, outcome responsibility, and privacy of thought.
\clearpage%
\phantomsection\addcontentsline{toc}{section}{A Brief History of Brain-Computer Interfacing}%
\section*{A Brief History of Brain-Computer Interfacing}%
One of the earliest milestones in the development of neurotechnology was achieved on July 6\textsuperscript{th}, 1924, when \citeA{berger1929humaneeg} first observed electrical activity in a human brain, using a technique that had previously been performed only on animals. A recording of such electrical brain activity over time is, following Berger's suggestion, called an \emph{electroencephalogram}, with the technique in general being referred to as \emph{electroencephalography}, and the abbreviation EEG being used for either of these two words. Already at that time it was known that this electrical brain activity was influenced by outside stimuli, such as a bright light shone into the eyes of the animal under investigation. Berger, however, was specifically interested in the influence of internal changes on the recorded EEG: he speculated that human EEG recordings might be used to diagnose medical conditions on the basis of pathological activity, and cautiously noted first indications---in his own son's EEG---that different intensities of mental activity led to visible changes in the recorded curves.
We now know that EEG does indeed reflect internal cognitive processes. Another significant development in that regard is the use of the \emph{event-related potential} (ERP) technique \cite{luck2014erp}. This was the first of a number of techniques that allowed researchers to systematically and accurately associate the brain's neuroelectric activity with specific events, and investigate this activity as a function of these events' physical or conceptual properties. Whereas the first such studies were probably performed in the late 1930s (\citeNP{davis1939erp} as cited by \citeNP{luck2014erp}), the utility of the technique was greatly improved by the later use of computers which could automatically gather multiple stimulus-response pairs and average the responses together, thus cancelling out brain activity that was not related to the event. This technique revealed clear cognitive components to the observed activity \cite<e.g.,>{walter1964cnv}.
The ERP technique thus allowed responses to specific events to be interpreted on the basis of a post hoc analysis of all gathered data. The first step towards interpreting event-related brain activity in \emph{real time} was taken in the 1970s, by \citeA{vidal1973direct}. In order to identify certain patterns of brain activity immediately following their occurrence, Vidal suggested `treating the experiment as a signal detection problem' \cite{vidal1977}: with continuous access to an ongoing EEG recording, a computer classified incoming data as belonging to one of four categories, based on previously learned (and continuously updated) decision strategies. Specifically, Vidal's apparatus flashed a bright chequerboard pattern in order to elicit activity in the visual cortex. Due to the retinotopic mapping of the visual cortex, this activity had a different spatial distribution depending on whether the human participant was looking at a point to the left, right, top, or bottom of the flashing pattern. The computer could decode this from the recorded brain activity in real time, allowing the participant to control the movement of an object on a computer screen in four directions.
With this project, Vidal coined the term \emph{brain-computer interface} (BCI; \citeNP{vidal1973direct}), now referring to any system that translates a measurement of CNS activity into artificial input to a computer, `thereby changing the ongoing interactions between the CNS and its external or internal environment' \cite{wolpaw2012newsun}. Where natural communication channels rely on muscular activity (e.g. to write, type, gesture, speak) or on hormonal changes (e.g. internal signalling, pheromones), a BCI thus establishes a different, part-artificial communication channel that bypasses these faculties, and provides a computer with real-time access to an interpretation of our mental states to the extent that they can be decoded from our brain activity.
Vidal speculated upon a wide range of potential future applications of BCI technology, including general neuroscientific research, computer-assisted learning tuned to optimal brain states, and, perhaps somewhat tongue-in-cheek, controlling spaceships. But it was in two of the fields he suggested that BCI first gained widespread attention: human-computer communication and neuroprosthetic control. In particular, BCI technology offered a unique potential to support paralysed or otherwise motor-impaired patients \cite{wolpaw2002}. It was these people, not students or astronauts, who stood to benefit the most from this technology. Therefore, the primary focus of BCI research has long been on developing a practical means for direct, brain-based communication and control. This has resulted in a number of different mental speller devices \cite<e.g.,>{farwell1988,treder2011gazeindepbci} and brain-actuated prostheses \cite<e.g.,>{mullerputz2008ssvepprosthesis,vansteensel2016alsimplant}, allowing patients to e.g. write letters \cite{birbaumer1999spelling}, control wheelchairs \cite{iturrate2009p300wheelchair}, browse the internet \cite{mugler2010p300browser}, paint \cite{muenssinger2010brainpainting}, or move artificial limbs \cite{wolpaw2008prosthetic} using only their brain activity.
These and other applications have been improved throughout the past decades, in particular through improved reliability of the BCI methodology itself. Due to the non-stationarity of EEG activity, internal and environmental artefacts, and the general difficulty people can have in learning to modulate specific brain activity in and of itself, early applications sometimes required the user to be trained for many months before being able to meaningfully control a BCI system \cite{birbaumer2006commcontrol}. A major paradigm shift occurred when methods of machine learning were applied to BCI at the start of the current millennium \cite<e.g.,>{ramoser2000,blankertz2002singletrial,lotte2007classificationreview}. As opposed to users training to generate specific machine-mandated and machine-detectable patterns in their EEG, machine learning techniques allowed the training effort to be shifted to the computer: based on a large number of recorded samples, the machine could learn to extract more complex patterns from the user's EEG. These patterns could then also reflect less forced, less artificial, more natural aspects of human cognition such as imagined movement.
As a generic example, a BCI pipeline may consist of the following components. First, a \emph{training set} is recorded, containing brain activity that is indicative of at least two different mental states. This must not necessarily be done using EEG; magnetoencephalography, functional near-infrared spectroscopy, and functional magnetic resonance imaging are commonly used as well \cite<e.g.,>{mellinger2007megbci,solovey2012brainput,lorenz2016automaticneuroscientist}. These recordings usually represent a continuous stream of brain activity, from which the relevant segments must be extracted. A series of processing steps therefore reduce these segments to \emph{features}. The different mental states, now represented by different \emph{classes} of features, can then be described by the distributions of their corresponding features. A \emph{classifier} is then \emph{trained} or \emph{calibrated} on these features, learning their distributions. This classifier is then capable of \emph{classifying} newly incoming data as belonging to one of the previously-learned classes, based on where the newly extracted features of the incoming data fall within the previously-learned distributions.
% Even with these improved machine learning techniques, only few patients actually use BCI devices. When even a small amount of muscle control remains, it is generally preferred to use this over BCI systems \cite{pasqualotto2015bciveye}, while patients for whom a BCI was thought to be the only viable option, i.e. completely locked-in patients, may in fact no longer have sufficient mental function to operate a BCI \cite{ramosmurguialday2011listoclis,birbaumer2012silence}. Despite significant successes, therefore, research targeting people with disabilities appears to be declining, with many publications now focusing on the possibilities BCI can offer to the healthy population \cite{eddy2019bcitrends}.
As these and other machine learning techniques allowed complex natural patterns of brain activity to be detected in real time, some of the ideas already speculated upon by Berger and Vidal were slowly rekindled: that this methodology could be used to detect and decode different naturally-occurring mental states, allowing computers to support us in our everyday tasks. These types of applications appeared to have been largely forgotten due to the BCI research community's focus on medical interventions, to the point that they were in fact excluded from a widely accepted definition of BCI at the time \cite{wolpaw2002}. At that same time, however, the field of human-computer interaction had a long history of exploring different naturalistic communication and interaction techniques \cite<e.g.,>{jacob2008realitybased}, and it was in this community that in 2008, different research groups presented the concept of using naturally-occurring mental states in human-computer interaction scenarios \cite{girouard2008fnirshci,cutrell2008passiveinput,zander2008bcinteraction}. In particular, Zander and colleagues presented a form of EEG-based `passive control', in which the addition of a BCI pipeline, which could detect and correct perceived errors without requiring additional voluntary actions from the users, led to a significant performance increase in an otherwise regular human-computer interaction scenario \cite{zander2008bcinteraction}. Zander's subsequently proposed formal categorisation of BCI applications expanded the prevailing definitions to include this category of \emph{passive BCI} systems, thus introducing the term \cite{zander2008enhancing,zander2011,krol2018interactivity}.
In passive BCI systems, the communication channel that is established carries input to the computer that was not intended as such by the human. For example, when a human operator becomes fatigued over time, or temporarily overburdened by increased task demands, this may lead to a detectable change in their brain activity, allowing a computer to automatically implement supportive measures. In such a case, the operator did not explicitly instruct the system to do so, nor did they voluntarily manipulate their brain activity; nonetheless, through this brain activity, the operator did provide input that resulted in these measures being taken. \emph{Implicit input} refers to input that was not intended as such by the human, but is nonetheless used as input by the computer \cite{schmidt2000,rotting2009implicit,zander2014implicit}.
Over time, the reintroduction of these ideas changed the field of BCI research, which had long stressed volitional communication and control. BCI researchers were initially divided on the question whether or not passive BCI systems should be considered examples of brain-computer interfacing at all \cite{nijboer2013asilomarsurvey}, and it was criticised that passive BCI's reliance on `intention' cannot be neuroscientifically operationalised \cite{wolpaw2012newsun}. However, as more applications and theories concerning passive BCI and implicit input were presented \cite<e.g.,>{rotting2009implicit,girouard2010fnirshci,zander2012context,kirchner2013brainreading}, the formal definition of BCI was updated in 2012 to embrace the concept \cite{wolpaw2012newsun}. Passive BCI applications were furthermore identified as one of the guiding principles for future BCI research \cite{brunner2015horizon2020}, and in the past years, the relative portion of research targeting people with disabilities appears to be declining, with an increasing number of publications now focusing on the opportunities passive BCI can offer to the healthy population \cite{eddy2019bcitrends}.
At present, new machine learning methods continue to be developed and existing methods continue to be improved, providing increased reliability and opening up new applications for BCI technology \cite{lotte2018classificationreview}. For example, adaptive classifiers continuously update their parameters allowing them to track changing feature distributions \cite{shenoy2006adaptiveclassification,lotte2018classificationreview}, and transfer learning allows classifiers trained in one condition to be used in another, e.g. across sessions, across tasks, or across participants \cite{pan2010transferlearning,lotte2018classificationreview}. Furthermore, EEG hardware has become increasingly accessible to the general public \cite{ienca2018brainleaks}, tickling the public imagination, as e.g. evident from the various hackathons being organised in the field \cite{guger2019hackathons}. Whereas direct, explicit control continues to be a popular paradigm, human-computer interaction based on implicit input---i.e. \emph{implicit interaction}---is an avenue where BCI technology can have a truly unique impact. The most recent development in this field is the move towards neuroadaptive technology.
\phantomsection\addcontentsline{toc}{section}{Neuroadaptive Technology}%
\section*{Neuroadaptive Technology}%
What kinds of neurotechnology have authors of hard science fiction conceived of more recently, as possible future applications? Here is an excerpt from the Hugo-nominated novel \emph{Blindsight} \cite{watts2006blindsight}:
\begin{quote}
Szpindel cleared his throat. ``Try this one.''
The feed showed what she saw: a small black triangle on a white background. In the next instant it shattered into a dozen identical copies, and a dozen dozen. The proliferating brood rotated around the center screen, geometric primitives ballroom-dancing in precise formation, each sprouting smaller triangles from its tips, fractalizing, rotating, evolving into an infinite, intricate tilework...
A sketchpad, I realized. An interactive eyewitness reconstruction, without the verbiage. Susan's own pattern-matching wetware reacted to what she saw---\emph{no, there were more of them; no, the orientation's wrong; yes, that's it, but bigger}---and Szpindel's machine picked those reactions right out of her head and amended the display in realtime. It was a big step up from that half-assed workaround called \emph{language}. The easily-impressed might have even called it mind-reading.
\end{quote}
The implication here\footnote{Confirmed through personal correspondence.} is that our brains (our `pattern-matching wetware') cannot help but react to the stimuli we perceive. When presented with something, our brains inevitably interpret it and produce an internal response, even if no explicit (e.g. verbal) response is required. The device described here essentially uses a passive BCI, detecting and interpreting these automatic responses. This implicit input is then used to adjust the display in a closed-loop fashion and to reconstruct, step by step, what Susan thinks she saw.
This is a prime example of neuroadaptive technology. Pending a more formal, peer-reviewed definition, neuroadaptive technology refers to any technology that uses implicit input obtained from brain activity in order to adapt itself, e.g. to enable control or interaction. The term `neuroadaptive technology' itself as representing this line of research was suggested by Scott Makeig and chosen by consensus at the Passive BCI Community Meeting in Delmenhorst, 2014, attended by experts from different fields working on similar or otherwise overlapping research, including physiological computing, cybernetics, brain-computer interfacing, computational neuroscience, neuroergonomics, and human-computer interaction. The term appears to have first been used, with largely this same meaning, in 2003 \cite{hettinger2003neuroadaptive}, even before passive BCI became a more prominent term. These days, passive BCI, referring to the interface itself, can more strictly be seen as a tool which can enable technology to be neuroadaptive.
To illustrate the concept in more detail as it may presently be understood, let us turn to a similar, more tangible example: imagine reading a neuroadaptive electronic book. The appearance is that of any other electronic book. As a human being, you are, to varying degrees, sympathetic to the characters in the story and sensitive to their various fates: when the fate of a beloved character appears to take a turn for the worse, you sympathise and become saddened. All this is a natural, involuntary reaction to the story's progress, and, in this example, is reflected in detectable changes in your brain activity. Our neuroadaptive book receives this emotional state as implicit input, and, being an electronic book, it also knows what page is currently being read and what happens on that page. Connecting your sudden change in emotional state with the context in which it appeared---our beloved character's setback---the book can infer your positive attitude towards this character. It can now re-write the upcoming pages on the fly to take advantage of this newly-gained information, and can continue to do so page after page, compiling a story uniquely catered to your implicitly communicated mindset as you keep reading.
Since the story's adaptations are happening on upcoming pages based on implicit input, the reader could potentially be wholly unaware of what is happening in the background, and yet, it is the input coming from that same reader that is somehow guiding the story. This means that, to the reader, the experience may be no different from that of any other book: the neuroadaptive experience requires no conscious voluntary actions, but simply happens based on activity that occurs naturally while reading. As such, however, the reader is at the mercy of the neuroadaptive logic, which may or may not be in line with the user's wishes: a reader who may want a happy story could instead be served their own personal worst ending.
Furthermore, an adaptive story, as it is committed to the book's pages, may reveal sensitive information when read back by someone else. A reader in whose individualised version evil prevailed, for example, may not want others to know their apparently preferred outcome.
Finally, neuroadaptivity allows us to imagine an interesting scenario where the book does not have enough information to continue the plot line. When a decision is to be made between different paths but the preferences of the reader are unclear, the book could decide to postpone the decision and instead insert a chapter the primary purpose of which is not for the reader to be further entertained, but for the book to obtain further information regarding the reader's preferences. A number of situations can be presented simply to gauge the reader's responses, on the basis of which the necessary information to continue the main story can be inferred.
This example of a neuroadaptive book will stay with us throughout this dissertation, as it highlights a number of important aspects of neuroadaptive technology. It illustrates, for example, one of its main benefits: the implicit nature of the input means that the user does not have to exert any effort for this additional communication channel to be maintained. This makes it particularly useful in scenarios where high mental demand is placed on the operator, either to widen the human-computer communication bottleneck and make the interaction more symmetrical \cite{suchman1987hmcproblems,tufte1990}, to detect and alleviate the mental load using e.g. adaptive automation \cite{byrne1996adaptiveauto}, or to promote or sustain specific mental states.
For example, \citeA{kohlmorgen2007} has demonstrated how neuroadaptive technology can detect mental load during driving and automatically adjust secondary tasks to better suit the driver's current state, as one illustration of many possible uses in neuroergonomics and human-computer interaction \cite<e.g.,>{frey2016visualcomfort,mehta2013neuroergonomicsreview}. Vidal's suggestion to automatically detect mental states and tune adaptive learning systems accordingly has also been demonstrated to be feasible. \citeA{yuksel2016bach} presented a neuroadaptive learning system that automatically increased the difficulty level for students practising a musical piece whenever workload levels dropped below an individually-determined threshold. \citeA{walter2017adaptivelearning} demonstrated an arithmetic learning environment that both increased or decreased difficulty according to a measure of workload. In entertainment, \citeA{ewing2016tetris} introduced a game that uses implicit input in order to maximise the player's engagement; \citeA{krol2017meyendtris} proposed a similar concept using two separate dimensions of implicit input, thus additionally introducing an element of mental state balancing to the game. Entertainment overlaps with art in \citeauthor{ramchurn2019brainfilm}'s \citeyear{ramchurn2019brainfilm} proposal for a neuroadaptive film, switching between different narratives and sound designs based on a brain-based measure of a viewer's attention. Neuroadaptive technology has also been suggested to help with the contemplation of art itself \cite{krol2018museum}, or to infer personal preferences with respect to cultural heritage items in order to provide implicit tags or recommendations in real time \cite{karran2015culturalheritage}. We have also seen these developments in the context of neuroscientific research \cite{lorenz2017neuroadaptivebayesian}, where a neuroadaptive experimental design has been used to intelligently present different audiovisual stimuli in order to identify those stimuli that elicit the maximal response from the participant \cite{lorenz2016automaticneuroscientist}. Even tasks that are normally done using explicitly communicated commands, such as the control of a cursor or robotic arm, may be performed using neuroadaptive technology using implicit input elicited by movements of the cursor or robotic arm \cite{zander2014implicit,iturrate2015teaching}.
As such research illustrates and often emphasises, neuroadaptivity allows technology to support the user without placing any additional burden on them: the driver, for example, is automatically supported in real time without being required to undertake any explicit actions that would distract them from their main task, and visitors of the museum or movie spectators are given an individualised experience that they can focus on without explicitly needing to indicate their preferences at every turn.
The unique benefits of neuroadaptive technology, however, should be contrasted with its potential risks, which are of a similarly unique nature. The uniquely beneficial fact that neuroadaptive technology allows communication to take place without additional effort on the user's side, also means that it can happen outside of the user's awareness altogether. Furthermore, it may not be possible for users to limit or otherwise control the scope of this communication. Brain activity---the data at the heart of all neuroadaptive technology---is liable to contain more information than what is needed as input to a particular application. Additional information could be gathered accidentally, as e.g. incidental findings indicative of epilepsy \cite{acharya2013eegepilepsy} may be found in the recorded data, or, bad actors may deliberately attempt to obtain information outside of the bounds of necessity: imagine, for example, a neuroadaptive movie streaming service that also records your responses to advertisements. Furthermore, by design, the reciprocal nature of the system adaptations will be in a position to affect the mental states of the user. By and large, they will likely be designed to promote or sustain specific desirable mental states, such as a workload equilibrium or optimal learning engagement. A potential danger, however, lies in a mismatch between the system's target state and what states are acceptable or healthy for the user. Goal-oriented adaptive mechanisms can be said to constitute the system's own agenda \cite{fairclough2017intadapt}, and this agenda may or may not correspond to that of the user. These issues are compounded by the fact that implicit, not explicit, input is used: the user may have no control over the information that is being provided, and may be unaware of the use that is being made of the recorded data.
Where issues related to the safety and privacy of neural data, informed consent, and transparency have been discussed recently in the context of brain-computer interfacing, this has primarily been done in the context of physiological or neural data in general and BCI-based explicit control in particular \cite<e.g.,>{fairclough2014confidential,ienca2016ethics,yuste2017ethical,kellmeyer2018bigbraindata}. Any discussion of neuroadaptive technology must deal with these issues and the unique additional concerns they raise in the context of implicit control.
\phantomsection\addcontentsline{toc}{section}{Current Issues Addressed in this Dissertation}%
\section*{Current Issues Addressed in this Dissertation}%
The highly interdisciplinary nature of the field of neuroadaptive technology has caused relevant research to span different communities, and its rapid development has left it without a shared terminology concerning a number of key developments. Part~\ref{part:concepts} therefore presents a perspective on previous research, highlighting different ways in which implicit input has been used and can be used to enable neuroadaptive technology. In particular, it focuses on one particularly powerful method that has been independently implemented a number of times, but deserves our collective attention.
Specifically, Chapter~\ref{chapter:pbci} first reviews existing passive BCI research and applications, and categorises them based on a dimension that has an important bearing on how the technology is used, or can potentially be used: interactivity, i.e., the technology's ability to respond to input---implicit input, in this case. The more interactive a technological system is, the more responsive it is, the more autonomous, and the better capable of adaptation. The theoretical zero point on this scale is the method of mental state assessment itself: a system that has the ability to decode a person's mental states, but does not use the obtained information for any interaction with that same person. Following this, the suggested categories of increasing levels of interactivity are open-loop adaptation, closed-loop adaptation, and finally automated adaptation, also known as intelligent adaptation \cite{fairclough2017intadapt}. An example of an open-loop adaptation is the correction of an error: when an operator commits or perceives an error, this can be decoded from their brain activity, and a system with direct access to the relevant implicit input could thus immediately correct the perceived mistake. In the case of closed-loop adaptation, the actions performed by the system on the basis of implicit input feed back to the user and influence the brain activity that triggered the adaptive action in the first place. This, for example, is implemented in adaptive automation systems where an implicit measure of workload is used to adjust automation levels in order to again influence the workload that is being monitored. In the last category, neuroadaptive systems use models to represent their user's implicit input along any number of dimensions, and base their responses on the information present in that model using goal-oriented control logic. This decouples the control logic from immediate mental states, and grants the system more autonomy to respond in different ways.
The interactivity perspective thus finally points towards systems that, given their autonomy, can also autonomously gather implicit input from their users. This method can make neuroadaptive technology particularly versatile. Chapter~\ref{chapter:cp}, therefore, considers this method in more detail. It reviews a number of works that have used a specific sequence of steps in their research: the autonomous elicitation of a brain response, the subsequent automated interpretation of this response, and finally, an instance of learning on the basis of this decoded interpretation. This sequence has been used by different researchers independently of each other, but, it is argued, gains particular relevance in the largely unexplored context of implicit interaction. In order to collectively discuss some of the technical and ethical issues that arise from this method, Chapter~\ref{chapter:cp} first proposes a definition that covers these previously disparate implementations, and suggests \emph{cognitive probing} as a label to refer to the method.
Another issue concerns some of the fundamental difficulties of working with machine learning methods applied to brain data. Even with a clear conceptual understanding of what the technology is intended to do, care must be taken to validate the neural processes underlying the technology's actual functioning. For example, to the extent that cognitive probing is to be based on cortical processes taking place in the brain itself, it should be ruled out that the classifier makes use of non-cortical activity such as eye blinks or other muscular artefacts which do feature prominently in the EEG. This applies to all forms of neuroadaptive technology. Therefore, Part~\ref{part:tools} introduces two tools to help validate both the methods we use and the experiments we conduct in the field of neuroadaptive technology.
EEG is measured at the scalp, and although each electrode is at a spatially distinct location, it picks up electrical activity from all parts of the brain simultaneously---that is, all parts that generate activity at a sufficient scale for it to be measurable at the scalp. Because of this, EEG has a poor spatial resolution, and a significant amount of processing is required to interpret the recorded data. Unfortunately it is not possible to evaluate the analytical methods applied to EEG data against a ground truth, since no ground truth is available for EEG data. Instead, researchers turn to simulations of EEG data, where a ground truth can be manually constructed, allowing the results of newly developed methods to be compared to a known factual reference. Chapter~\ref{chapter:sereega} therefore presents SEREEGA (Simulating Event-Related EEG Activity), the first-of-its-kind free and open source toolbox designed to streamline and standardise the simulation of event-related EEG data. Using an architecture and feature set that covers and extends the vast majority of EEG simulation methods employed by researchers today, SEREEGA provides a scripting language and EEGLAB-based GUI \cite{delorme2004eeglab} to simulate realistic EEG data, thus providing a ground truth to evaluate and validate EEG analysis methods and pipelines.
Chapter~\ref{chapter:visualisation}, subsequently, uses SEREEGA to simulate data with a known ground truth in order to validate a source localisation method that visualises what areas of the brain a classifier focuses on. This is important information. Where many researchers rely on standardised experimental paradigms to elicit known cortical processes, this does not guarantee that these cortical processes are also targeted by the classifier. Similarly, post hoc analyses of recorded data to demonstrate that certain cortical processes were indeed elicited, for example through ERP analyses, provide no proof that these same processes contributed significantly to classification. In both cases, it is possible that the classifier instead focused primarily on other, more distinctive brain activity, including artefactual activity. Chapter~\ref{chapter:visualisation} therefore introduces a method that combines blind source separation with the filter weights produced by different types of classifiers, allowing these weights to be visualised in source space. The neurophysiologically uninterpretable filter weights are first transformed into interpretable patterns \cite{haufe2014}, and subsequently distributed onto the sources in a virtual brain such that each brain area's relative contribution to the classifier can be visualised. These so-called relevance weights can thus be used to analyse classifiers and inform statements as to which cortical processes, exactly, contributed to classification. Aside from that, this method also opens up new possibilities for classifiers to be used in neuroscientific research in general, opening up BCI methodology to a wider audience.
Part~\ref{part:validations}, finally, presents two validation studies based on the concepts from Part~\ref{part:concepts}, supported by the methods from Part~\ref{part:tools}.
The first study, presented in Chapter~\ref{chapter:nat}, shows that it is possible to use cognitive probes to realise implicit cursor control. Participants observed a cursor on a screen that was initially moving randomly. Each movement served as a cognitive probe, eliciting a response from the observer that could be decoded in real time from their brain activity. This response contained information pertaining to their interpretation of each cursor movement, judging them as either appropriate or not with respect to reaching a desired target. Using this information, a user model could be generated that allowed the preferred movement directions to be inferred. Over time, the cursor was then steered towards the preferred target. Importantly, participants were unaware of having any influence over the cursor, even though it was their brain activity that enabled its goal-oriented behaviour. An analysis of the classifier supported this conclusion. As such, this demonstrated how even a quintessential case of explicit control---the movement of a cursor---can in fact be done implicitly, using cognitive probing as described in Chapter~\ref{chapter:cp}.
The final chapter, Chapter~\ref{chapter:salval}, dives deeper into the just-mentioned implicit cursor control paradigm in order to further investigate which cognitive processes contributed to what extent to classification. A new experiment was designed to dissociate cognitive processes related to visual perception (salience) on the one hand, and subjective value interpretations (valence) on the other. As we will see, both these processes are indeed present in the data, but separate classifiers can be constructed to focus primarily on one or the other. The visualisation method presented in Chapter~\ref{chapter:visualisation} allows us to localise the cognitive activity related to these separate processes in different cortical areas. Using appropriate classifier designs as confirmed by visualisation or other methods, it is thus possible to access brain activity related to subjective valence processing.
This conclusion emphasises that neuroadaptive technology can elicit and have access to human cognition in a goal-oriented fashion without these humans being aware of having any influence, or, indeed, of being influenced. As much as science fiction may have inspired speculation as to the possibilities of neurotechnology, and as much as speculation can remain useful to illustrate the possibilities---as in the example of the neuroadaptive book---previously fantastical speculations and possibilities have now largely left the realm of science fiction, and their legal, societal, and ethical implications must be given due consideration going forward.
|
{"hexsha": "f3ec389636dca74cfae5a3e3817c289ea5155e84", "size": 39695, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "introduction.tex", "max_stars_repo_name": "lrkrol/dissertation", "max_stars_repo_head_hexsha": "548167344fada64384f95d23be67a48ee08f7449", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "introduction.tex", "max_issues_repo_name": "lrkrol/dissertation", "max_issues_repo_head_hexsha": "548167344fada64384f95d23be67a48ee08f7449", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "introduction.tex", "max_forks_repo_name": "lrkrol/dissertation", "max_forks_repo_head_hexsha": "548167344fada64384f95d23be67a48ee08f7449", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 409.2268041237, "max_line_length": 2415, "alphanum_fraction": 0.826955536, "num_tokens": 7879}
|
import os
import numpy as np
import torch
from torch import nn
import gin
from sparse_causal_model_learner_rl.trainable.fcnet import build_activation
@gin.configurable
class AbstractCombinedModel(nn.Module):
def __init__(self, n_models, input_shape, output_shape):
super(AbstractCombinedModel, self).__init__()
assert len(input_shape) == 1, input_shape
assert len(output_shape) == 1, output_shape
self.n_models = n_models
self.input_dim = input_shape[0]
self.output_dim = output_shape[0]
@gin.configurable
class CombinedLinearLayer(nn.Module):
"""Compute many linear layers of a single shape in a single pass.
Input shape: [batch_dim, in_features, n_models]
Output shape: [batch_dim, out_features, n_models]
Equation (for one model): y = Wx+b
"""
def __init__(self, in_features, out_features, n_models):
super(CombinedLinearLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.n_models = n_models
self.weight = nn.Parameter(torch.zeros(self.out_features,
self.in_features,
self.n_models))
self.bias = nn.Parameter(torch.zeros(self.out_features, self.n_models))
self.reset_parameters()
def __repr__(self):
return f"CombinedLinearLayer(inf={self.in_features}, outf={self.out_features}, n_models={self.n_models})"
def weight_by_model(self, idx):
return self.weight[:, :, idx]
def bias_by_model(self, idx):
return self.bias[:, idx]
def reset_parameters(self, apply_fcn=nn.Linear.reset_parameters):
class Resetter(nn.Module):
def __init__(self, w, b):
super(Resetter, self).__init__()
self.weight = w
self.bias = b
for m in range(self.n_models):
obj = Resetter(self.weight_by_model(m),
self.bias_by_model(m))
apply_fcn(obj)
def forward(self, x):
w, b = self.weight, self.bias
x = torch.einsum('bim,oim->bom', x, w) + b.view(1, *b.shape)
return x
@gin.configurable
class CombinedQuadraticLayer(CombinedLinearLayer):
"""Compute many quadratic layers of a single shape in a single pass.
Input shape: [batch_dim, in_features, n_models]
Output shape: [batch_dim, out_features, n_models]
Equation (for one model): y = x^TAx+Wx+b
"""
def __init__(self, **kwargs):
super(CombinedQuadraticLayer, self).__init__(**kwargs)
self.qweight = nn.Parameter(torch.zeros(self.out_features,
self.in_features,
self.in_features,
self.n_models))
self.reset_parameters()
def __repr__(self):
return f"CombinedQuadraticLayer(inf={self.in_features}, outf={self.out_features}, n_models={self.n_models})"
def weight_by_model(self, idx):
return self.weight[:, :, idx]
def bias_by_model(self, idx):
return self.bias[:, idx]
def qweight_by_model(self, idx):
return self.qweight[:, :, :, idx]
def reset_parameters(self, apply_fcn=nn.Linear.reset_parameters, qscaler=0.01):
super(CombinedQuadraticLayer, self).reset_parameters(apply_fcn=apply_fcn)
if hasattr(self, 'qweight'):
self.qweight.data = torch.randn(self.out_features, self.in_features, self.in_features, self.n_models) * qscaler
def forward(self, x):
out = super(CombinedQuadraticLayer, self).forward(x)
out += torch.einsum('bim,bjm,oijm->bom', x, x, self.qweight)
return out
@gin.configurable
class FCCombinedModel(AbstractCombinedModel):
def __init__(self, hidden_sizes, activation_cls=nn.ReLU,
input_reshape=False,
layers=CombinedLinearLayer,
skipconns=None,
add_input_batchnorm=False,
**kwargs):
self.hidden_sizes = hidden_sizes
self.input_reshape = input_reshape
if self.input_reshape:
assert len(kwargs['output_shape']) == 1
kwargs['n_models'] = kwargs['output_shape'][0]
kwargs['output_shape'] = (1,)
super(FCCombinedModel, self).__init__(**kwargs)
self.act_dims = self.hidden_sizes + [self.output_dim]
if callable(activation_cls):
self.activation = [build_activation(activation_cls, features=f * self.n_models)
for f in self.act_dims[:-1]] + [None]
elif isinstance(activation_cls, list):
self.activation = [build_activation(act_cls, features=f * self.n_models)
if act_cls is not None else None
for f, act_cls in zip(self.act_dims, activation_cls)]
elif activation_cls is None:
self.activation = [None] * (len(self.hidden_sizes) + 1)
else:
raise NotImplementedError
for i, act in enumerate(self.activation):
if act is not None:
setattr(self, 'act%02d' % (i + 1), act)
if skipconns is None:
skipconns = [False] * len(self.activation)
self.skipconns = skipconns
print(self.skipconns)
assert len(self.activation) == len(self.hidden_sizes) + 1, (self.activation,
self.hidden_sizes)
self.dims = [self.input_dim] + self.hidden_sizes + [self.output_dim]
print(self.dims, self.n_models)
self.fc = []
if callable(layers):
layers = [layers] * (len(self.dims) - 1)
if isinstance(layers, list):
assert len(layers) == len(self.dims) - 1, (len(layers), len(self.dims))
else:
raise NotImplementedError
self.layers = layers
for i in range(1, len(self.dims)):
self.fc.append(self.layers[i - 1](
in_features=self.dims[i - 1],
out_features=self.dims[i],
n_models=self.n_models))
# for torch to keep track of variables
setattr(self, f'fc%02d' % i, self.fc[-1])
if add_input_batchnorm:
self.bn = nn.BatchNorm1d(self.input_dim)
def __repr__(self, *args, **kwargs):
orig = super(FCCombinedModel, self).__repr__(*args, **kwargs)
return f"{orig} input_dim={self.input_dim} output_dim={self.output_dim} skips={self.skipconns} act={self.activation} hidden_sizes={self.hidden_sizes} layers={self.layers}"
def forward(self, x):
if self.input_reshape:
if hasattr(self, 'bn'):
x = self.bn(x)
x = x.view(*x.shape, 1).expand(*[-1] * len(x.shape), self.n_models)
for i, fc in enumerate(self.fc):
x_inp = x
x = fc(x)
if self.activation[i] is not None:
x = self.activation[i](x)
if self.skipconns[i]:
x = x + x_inp
assert x.shape[1] == self.output_dim, (x.shape, self.output_dim, self.n_models)
assert x.shape[2] == self.n_models
if self.output_dim == 1:
x = x.view(x.shape[0], x.shape[2])
return x
|
{"hexsha": "2e70128a1770db0d3268059518577a4ebfc5794a", "size": 7584, "ext": "py", "lang": "Python", "max_stars_repo_path": "sparse_causal_model_learner_rl/trainable/combined.py", "max_stars_repo_name": "sergeivolodin/causality-disentanglement-rl", "max_stars_repo_head_hexsha": "5a41b4a2e3d85fa7e9c8450215fdc6cf954df867", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-12-11T05:26:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-21T06:12:58.000Z", "max_issues_repo_path": "sparse_causal_model_learner_rl/trainable/combined.py", "max_issues_repo_name": "sergeivolodin/causality-disentanglement-rl", "max_issues_repo_head_hexsha": "5a41b4a2e3d85fa7e9c8450215fdc6cf954df867", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-04-30T16:29:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-26T07:32:18.000Z", "max_forks_repo_path": "sparse_causal_model_learner_rl/trainable/combined.py", "max_forks_repo_name": "sergeivolodin/causality-disentanglement-rl", "max_forks_repo_head_hexsha": "5a41b4a2e3d85fa7e9c8450215fdc6cf954df867", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1105527638, "max_line_length": 180, "alphanum_fraction": 0.5751582278, "include": true, "reason": "import numpy", "num_tokens": 1684}
|
"""Answer to Exercise 1.4
Author: Yuhuang Hu
Email : yuhuang.hu@ini.uzh.ch
"""
from __future__ import print_function
import numpy as np
import keras.backend as K
# define list of placeholders for variables
N = 3
theta = [K.placeholder(shape=(), dtype=np.float32) for i in range(N+1)]
x = K.placeholder(shape=(), dtype=np.float32)
# Compute function
y = theta[-1]
for i in range(N):
y += theta[i]*x**(i+1)
# compile function
fun = K.function(inputs=theta+[x], outputs=[y])
# setup example
# y = theta_2*x^3+theta_1*x^2+theta_0*x+theta_3
Theta = [1, 2, 3, 4, 5]
X = 5
print (fun(Theta+[X])[0])
# Compute individual gradient
grad_collector = [K.gradients(y, th)[0] for th in theta]
grad_fun = K.function(inputs=theta+[x], outputs=grad_collector)
# Evaluate each gradient
print (grad_fun(Theta+[X]))
|
{"hexsha": "4737dda146e89a2050d6e4e270dc51e895062989", "size": 813, "ext": "py", "lang": "Python", "max_stars_repo_path": "session_01/ex-1-4.py", "max_stars_repo_name": "PnS2018/exercise-solutions", "max_stars_repo_head_hexsha": "156c07a4cf92f3b6b8af1ac7608a957eba5deba6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "session_01/ex-1-4.py", "max_issues_repo_name": "PnS2018/exercise-solutions", "max_issues_repo_head_hexsha": "156c07a4cf92f3b6b8af1ac7608a957eba5deba6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "session_01/ex-1-4.py", "max_forks_repo_name": "PnS2018/exercise-solutions", "max_forks_repo_head_hexsha": "156c07a4cf92f3b6b8af1ac7608a957eba5deba6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.8292682927, "max_line_length": 71, "alphanum_fraction": 0.6912669127, "include": true, "reason": "import numpy", "num_tokens": 258}
|
import tensorflow as tf
import numpy as np
from PIL import Image
import imageio
import cv2
import glob
from skvideo.io import FFmpegWriter as VideoWriter
image_shape = (160, 576)
filename = 'um_000004.png'
image_file = './data/data_road/testing/image_2/' + filename
def get_input_image(path):
image = Image.open(path)
print(image.size)
yield True, np.array(image.resize((image_shape[1], image_shape[0])))
def get_video_frame(video_path):
vidcap = cv2.VideoCapture(video_path)
success = True
while success:
success, image = vidcap.read()
if not success:
break
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# image = image[:image_shape[0]*5, 300:image_shape[1]*2 + 300]
image = image[0:image_shape[0]*3, 0:]
# image = cv2.resize(image, (image_shape[1], image_shape[0]))
yield success, image
def create_mask(image):
image = image.convert("RGBA")
pixdata = image.load()
width, height = image.size
for y in range(height):
for x in range(width):
if pixdata[x, y] == (0, 0, 0, 255):
pixdata[x, y] = (0, 0, 0, 0)
return image
def get_mask_from_inference(im_softmax, image, threshold):
segment = im_softmax[0][:, 1].reshape(image.shape[0], image.shape[1], 1)
segment_mask = np.dot(segment > threshold, np.array([[0, 255, 0]]))
im_mask = np.where(segment_mask, image, 0)
im_mask = Image.fromarray(im_mask)
im_mask = create_mask(im_mask)
return im_mask
# video_writer = cv2.VideoWriter('segmented.avi', cv2.VideoWriter_fourcc(*'XVID'), 24, (1920, 800), True)
video_writer = VideoWriter('video_segmented.mp4')
# video_writer.open()
with tf.Session() as sess:
# load trained model
saver = tf.train.import_meta_graph('my_segmentation_model.meta')
saver.restore(sess, tf.train.latest_checkpoint('./'))
# create the graph
graph = tf.get_default_graph()
image_input = graph.get_tensor_by_name('image_input:0')
keep_prob = graph.get_tensor_by_name('keep_prob:0')
logits = graph.get_tensor_by_name('fcn_logits:0')
inputs = get_video_frame('./data/video.m4v')
# inputs = get_input_image(image_file)
count = 0
for _, image in inputs:
print(image.shape)
# for image_file in glob.glob('./data/data_road/testing/image_2/*.png'):
print(count)
# image = np.array(Image.open(image_file).resize((image_shape[1], image_shape[0])))
feed_dict = { image_input: [image], keep_prob: 1.0 }
#
# # run inference
im_softmax = sess.run([tf.nn.softmax(logits)], feed_dict)
#
# # extract second column (road)
mask = get_mask_from_inference(im_softmax, image, 0.5)
image = Image.fromarray(image)
image.paste(mask, (0, 0), mask)
# image = image.convert("RGB")
image = np.array(image)
video_writer.writeFrame(image)
# imageio.imwrite('test' + str(count) + '.png', image)
# image.show()
# if (count == 5):
# break
count += 1
# video_writer.stop();
# video_writer = None
|
{"hexsha": "f192c9b245ac0d7dcfb2808dd7cd4bdc198c5e1a", "size": 3134, "ext": "py", "lang": "Python", "max_stars_repo_path": "run.py", "max_stars_repo_name": "daltonrenaldo/CarND-Semantic-Segmentation", "max_stars_repo_head_hexsha": "720d89a125449f74697e6da4ed59e5934959f306", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "run.py", "max_issues_repo_name": "daltonrenaldo/CarND-Semantic-Segmentation", "max_issues_repo_head_hexsha": "720d89a125449f74697e6da4ed59e5934959f306", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run.py", "max_forks_repo_name": "daltonrenaldo/CarND-Semantic-Segmentation", "max_forks_repo_head_hexsha": "720d89a125449f74697e6da4ed59e5934959f306", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3404255319, "max_line_length": 105, "alphanum_fraction": 0.6403956605, "include": true, "reason": "import numpy", "num_tokens": 830}
|
"""
Case 27:
This case study a three bus system with 1 machine (One d- One q-: 4th order model), a VSM of 19 states and an infinite source.
The test changes botht he voltage magnitude and phase angle of the source bus.
"""
##################################################
############### LOAD DATA ########################
##################################################
# Use the sme test data as Test 09
include(joinpath(TEST_FILES_DIR, "data_tests/test09.jl"))
##################################################
############### SOLVE PROBLEM ####################
##################################################
####### Changing magnitude of votlage at source bus #########
# time span
tspan = (0.0, 20.0);
case_source = collect(PSY.get_components(PSY.Source, threebus_sys))[1]
# Define Fault using Callbacks
V_source_change = SourceBusVoltageChange(1.0, case_source, :V_ref, 1.02)
@testset "Test 27 Source Bus Voltage Magnitude Perturbation ResidualModel" begin
path = (joinpath(pwd(), "test-27"))
!isdir(path) && mkdir(path)
try
# Define Simulation Problem
sim = Simulation(
ResidualModel,
threebus_sys, # system
path,
tspan,
V_source_change,
)
# Test Initial Condition
diff_val = [0.0]
res = get_init_values_for_comparison(sim)
for (k, v) in test09_x0_init
diff_val[1] += LinearAlgebra.norm(res[k] - v)
end
@test (diff_val[1] < 1e-3)
# Solve problem
execute!(sim, IDA(), dtmax = 0.02)
results = read_results(sim)
# Obtain data for angles
series = get_state_series(results, ("generator-103-1", :θ_oc))
finally
@info("removing test files")
rm(path, force = true, recursive = true)
end
end
@testset "Test 27 Source Bus Voltage Magnitude Perturbation MassMatrixModel" begin
path = (joinpath(pwd(), "test-27"))
!isdir(path) && mkdir(path)
try
# Define Simulation Problem
sim = Simulation(
MassMatrixModel,
threebus_sys, # system
path,
tspan,
V_source_change,
)
# Test Initial Condition
diff_val = [0.0]
res = get_init_values_for_comparison(sim)
for (k, v) in test09_x0_init
diff_val[1] += LinearAlgebra.norm(res[k] - v)
end
@test (diff_val[1] < 1e-3)
# Solve problem
execute!(sim, Rodas4(), dtmax = 0.02)
results = read_results(sim)
# Obtain data for angles
series = get_state_series(results, ("generator-103-1", :θ_oc))
finally
@info("removing test files")
rm(path, force = true, recursive = true)
end
end
####### Changing angle of voltage at source bus #########
#time span
tspan = (0.0, 20.0);
case_source = collect(PSY.get_components(PSY.Source, threebus_sys))[1]
#Define Fault using Callbacks
V_source_change = SourceBusVoltageChange(1.0, case_source, :θ_ref, 0.1)
@testset "Test 27 Source Bus Voltage Angle Perturbation ResidualModel" begin
path = (joinpath(pwd(), "test-27"))
!isdir(path) && mkdir(path)
try
# Define Simulation Problem
sim = Simulation(
ResidualModel,
threebus_sys, # system
path,
tspan,
V_source_change,
)
# Test Initial Condition
diff_val = [0.0]
res = get_init_values_for_comparison(sim)
for (k, v) in test09_x0_init
diff_val[1] += LinearAlgebra.norm(res[k] - v)
end
@test (diff_val[1] < 1e-3)
# Solve problem
execute!(sim, IDA(), dtmax = 0.02)
results = read_results(sim)
# Obtain data for angles
series = get_state_series(results, ("generator-103-1", :θ_oc))
finally
@info("removing test files")
rm(path, force = true, recursive = true)
end
end
@testset "Test 27 Source Bus Voltage Angle Perturbation MassMatrixModel" begin
path = (joinpath(pwd(), "test-27"))
!isdir(path) && mkdir(path)
try
sim = Simulation(
MassMatrixModel,
threebus_sys, # system
path,
tspan,
V_source_change,
)
# Test Initial Condition
diff_val = [0.0]
res = get_init_values_for_comparison(sim)
for (k, v) in test09_x0_init
diff_val[1] += LinearAlgebra.norm(res[k] - v)
end
@test (diff_val[1] < 1e-3)
# Solve problem
execute!(sim, Rodas4(), dtmax = 0.02)
results = read_results(sim)
# Obtain data for angles
series = get_state_series(results, ("generator-103-1", :θ_oc))
finally
@info("removing test files")
rm(path, force = true, recursive = true)
end
end
|
{"hexsha": "a4346c53f5bb5b84afb74cfac8653c613f634c68", "size": 4862, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_case27_source_bus_voltage_change.jl", "max_stars_repo_name": "tavovalmo/PowerSimulationsDynamics.jl", "max_stars_repo_head_hexsha": "61ba0433ab89caa37f9cf2caedaa2bcc2566591c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_case27_source_bus_voltage_change.jl", "max_issues_repo_name": "tavovalmo/PowerSimulationsDynamics.jl", "max_issues_repo_head_hexsha": "61ba0433ab89caa37f9cf2caedaa2bcc2566591c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_case27_source_bus_voltage_change.jl", "max_forks_repo_name": "tavovalmo/PowerSimulationsDynamics.jl", "max_forks_repo_head_hexsha": "61ba0433ab89caa37f9cf2caedaa2bcc2566591c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2891566265, "max_line_length": 126, "alphanum_fraction": 0.5561497326, "num_tokens": 1224}
|
[STATEMENT]
lemma dlts_rel_eq[unfolded vimage2p_def]:
"BNF_Def.vimage2p un_DLTS un_DLTS (rel_fun (=) (rel_option (=))) = (=)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. BNF_Def.vimage2p un_DLTS un_DLTS (rel_map (=)) = (=)
[PROOF STEP]
by (auto simp add: vimage2p_def pmf.rel_eq option.rel_eq fun.rel_eq fun_eq_iff dlts.expand)
|
{"llama_tokens": 161, "file": "Probabilistic_System_Zoo_Probabilistic_Hierarchy", "length": 1}
|
import os
import re
import shutil
import subprocess
from subprocess import CalledProcessError
from cStringIO import StringIO
import nibabel as nb
import numpy as np
from django.core.exceptions import ValidationError
from django.forms import ModelForm
from django.forms.models import (
ModelMultipleChoiceField
)
# from form_utils.forms import BetterModelForm
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, Button
from crispy_forms.bootstrap import TabHolder, Tab
from .models import Collection, Image, User, StatisticMap, BaseStatisticMap, \
Atlas, NIDMResults, NIDMResultStatisticMap
from django.forms.forms import Form
from django.forms.fields import FileField
import tempfile
from neurovault.apps.statmaps.utils import (
split_filename, get_paper_properties,
detect_4D, split_4D_to_3D, memory_uploadfile,
is_thresholded, not_in_mni,
splitext_nii_gz)
from neurovault.apps.statmaps.nidm_results import NIDMUpload
from django import forms
from django.utils.encoding import smart_str
from django.utils.safestring import mark_safe
from django.forms.utils import flatatt
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.files.base import ContentFile
from django.forms.widgets import HiddenInput
from neurovault import settings
from gzip import GzipFile
from file_resubmit.admin import AdminResubmitFileWidget
from guardian.shortcuts import get_objects_for_user
# Create the form class.
collection_fieldsets = [
('Essentials', {'fields': ['name',
'DOI',
'description',
'full_dataset_url',
'contributors',
'private'],
'legend': 'Essentials'}),
('Participants', {'fields': ['subject_age_mean',
'subject_age_min',
'subject_age_max',
'handedness',
'proportion_male_subjects',
'inclusion_exclusion_criteria',
'number_of_rejected_subjects',
'group_comparison',
'group_description'],
'legend': 'Subjects'}),
('ExperimentalDesign', {
'fields': ['type_of_design',
'number_of_imaging_runs',
'number_of_experimental_units',
'length_of_runs',
'length_of_blocks',
'length_of_trials',
'optimization',
'optimization_method'],
'legend': 'Design'}),
('MRI_acquisition', {'fields': ['scanner_make',
'scanner_model',
'field_strength',
'pulse_sequence',
'parallel_imaging',
'field_of_view',
'matrix_size',
'slice_thickness',
'skip_distance',
'acquisition_orientation',
'order_of_acquisition',
'repetition_time',
'echo_time',
'flip_angle'],
'legend': 'Acquisition'}),
('IntersubjectRegistration', {'fields': [
'used_intersubject_registration',
'intersubject_registration_software',
'intersubject_transformation_type',
'nonlinear_transform_type',
'transform_similarity_metric',
'interpolation_method',
'object_image_type',
'functional_coregistered_to_structural',
'functional_coregistration_method',
'coordinate_space',
'target_resolution',
'used_smoothing',
'smoothing_type',
'smoothing_fwhm',
'resampled_voxel_size'],
'legend': 'Registration'}),
('Preprocessing', {
'fields': ['software_package',
'software_version',
'order_of_preprocessing_operations',
'quality_control',
'used_b0_unwarping',
'b0_unwarping_software',
'used_slice_timing_correction',
'slice_timing_correction_software',
'used_motion_correction',
'motion_correction_software',
'motion_correction_reference',
'motion_correction_metric',
'motion_correction_interpolation',
'used_motion_susceptibiity_correction'],
'legend': 'Preprocessing'}),
('IndividualSubjectModeling', {
'fields': ['intrasubject_model_type',
'intrasubject_estimation_type',
'intrasubject_modeling_software',
'hemodynamic_response_function',
'used_temporal_derivatives',
'used_dispersion_derivatives',
'used_motion_regressors',
'used_reaction_time_regressor',
'used_orthogonalization',
'orthogonalization_description',
'used_high_pass_filter',
'high_pass_filter_method',
'autocorrelation_model'],
'legend': '1st Level'}),
('GroupModeling', {
'fields': ['group_model_type',
'group_estimation_type',
'group_modeling_software',
'group_inference_type',
'group_model_multilevel',
'group_repeated_measures',
'group_repeated_measures_method'],
'legend': '2nd Level'}),
]
collection_row_attrs = {
'echo_time': {'priority': 1},
'number_of_rejected_subjects': {'priority': 2},
'inclusion_exclusion_criteria': {'priority': 3},
'group_comparison': {'priority': 1},
'subject_age_max': {'priority': 2},
'used_dispersion_derivatives': {'priority': 3},
'used_intersubject_registration': {'priority': 1},
'intrasubject_estimation_type': {'priority': 1},
'field_of_view': {'priority': 2},
'order_of_preprocessing_operations': {'priority': 2},
'smoothing_type': {'priority': 1},
'subject_age_min': {'priority': 2},
'length_of_blocks': {'priority': 2},
'used_orthogonalization': {'priority': 1},
'used_b0_unwarping': {'priority': 2},
'used_temporal_derivatives': {'priority': 2},
'software_package': {'priority': 1},
'scanner_model': {'priority': 1},
'high_pass_filter_method': {'priority': 2},
'proportion_male_subjects': {'priority': 2},
'number_of_imaging_runs': {'priority': 2},
'interpolation_method': {'priority': 2},
'group_repeated_measures_method': {'priority': 3},
'motion_correction_software': {'priority': 3},
'used_motion_regressors': {'priority': 2},
'functional_coregistered_to_structural': {'priority': 2},
'motion_correction_interpolation': {'priority': 3},
'optimization_method': {'priority': 3},
'hemodynamic_response_function': {'priority': 2},
'group_model_type': {'priority': 1},
'used_slice_timing_correction': {'priority': 1},
'intrasubject_modeling_software': {'priority': 2},
'resampled_voxel_size': {'priority': 3},
'object_image_type': {'priority': 1},
'group_description': {'priority': 2},
'functional_coregistration_method': {'priority': 3},
'length_of_trials': {'priority': 2},
'handedness': {'priority': 2},
'used_motion_correction': {'priority': 1},
'pulse_sequence': {'priority': 1},
'used_high_pass_filter': {'priority': 1},
'orthogonalization_description': {'priority': 2},
'acquisition_orientation': {'priority': 2},
'order_of_acquisition': {'priority': 3},
'group_repeated_measures': {'priority': 1},
'motion_correction_reference': {'priority': 3},
'group_model_multilevel': {'priority': 3},
'number_of_experimental_units': {'priority': 2},
'type_of_design': {'priority': 1},
'coordinate_space': {'priority': 1},
'transform_similarity_metric': {'priority': 3},
'repetition_time': {'priority': 1},
'slice_thickness': {'priority': 1},
'length_of_runs': {'priority': 2},
'software_version': {'priority': 1},
'autocorrelation_model': {'priority': 2},
'b0_unwarping_software': {'priority': 3},
'intersubject_transformation_type': {'priority': 1},
'quality_control': {'priority': 3},
'used_smoothing': {'priority': 1},
'smoothing_fwhm': {'priority': 1},
'intrasubject_model_type': {'priority': 1},
'matrix_size': {'priority': 2},
'optimization': {'priority': 2},
'group_inference_type': {'priority': 1},
'subject_age_mean': {'priority': 1},
'used_motion_susceptibiity_correction': {'priority': 3},
'group_statistic_type': {'priority': 2},
'skip_distance': {'priority': 2},
'used_reaction_time_regressor': {'priority': 2},
'group_modeling_software': {'priority': 2},
'parallel_imaging': {'priority': 3},
'intersubject_registration_software': {'priority': 2},
'nonlinear_transform_type': {'priority': 2},
'field_strength': {'priority': 1},
'group_estimation_type': {'priority': 1},
'target_resolution': {'priority': 1},
'slice_timing_correction_software': {'priority': 3},
'scanner_make': {'priority': 1},
'group_smoothness_fwhm': {'priority': 1},
'flip_angle': {'priority': 2},
'group_statistic_parameters': {'priority': 3},
'motion_correction_metric': {'priority': 3},
}
class ContributorCommaSepInput(forms.Widget):
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs, type='text', name=name)
if not type(value) == unicode and value is not None:
out_vals = []
for val in value:
try:
out_vals.append(str(User.objects.get(pk=val).username))
except:
continue
value = ', '.join(out_vals)
if value:
final_attrs['value'] = smart_str(value)
else:
final_attrs['value'] = smart_str(value)
return mark_safe(u'<input%s />' % flatatt(final_attrs))
class ContributorCommaField(ModelMultipleChoiceField):
widget = ContributorCommaSepInput
def clean(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'])
elif not self.required and not value:
return []
split_vals = [v.strip() for v in value.split(',')]
if not isinstance(split_vals, (list, tuple)):
raise ValidationError("Invalid input.")
for name in split_vals:
if not len(self.queryset.filter(username=name)):
raise ValidationError("User %s does not exist." % name)
return self.queryset.filter(username__in=split_vals)
class CollectionForm(ModelForm):
class Meta:
exclude = ('owner', 'private_token', 'contributors', 'private')
model = Collection
# fieldsets = study_fieldsets
# row_attrs = study_row_attrs
def clean(self):
cleaned_data = super(CollectionForm, self).clean()
doi = self.cleaned_data['DOI']
if doi.strip() == '':
self.cleaned_data['DOI'] = None
if self.cleaned_data['DOI']:
self.cleaned_data['DOI'] = self.cleaned_data['DOI'].strip()
try:
self.cleaned_data["name"], self.cleaned_data["authors"], self.cleaned_data[
"paper_url"], _, self.cleaned_data["journal_name"] = get_paper_properties(self.cleaned_data['DOI'].strip())
except:
self._errors["DOI"] = self.error_class(
["Could not resolve DOI"])
else:
if "name" in self._errors:
del self._errors["name"]
elif "name" not in cleaned_data or not cleaned_data["name"]:
self._errors["name"] = self.error_class(
["You need to set the name or the DOI"])
self._errors["DOI"] = self.error_class(
["You need to set the name or the DOI"])
return cleaned_data
def __init__(self, *args, **kwargs):
super(CollectionForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_class = 'form-horizontal'
self.helper.layout = Layout()
tab_holder = TabHolder()
for fs in collection_fieldsets:
# manually enforce field exclusion
fs[1]['fields'] = [
v for v in fs[1]['fields'] if v not in self.Meta.exclude]
tab_holder.append(Tab(fs[1]['legend'], *fs[1]['fields']))
self.helper.layout.extend([tab_holder, Submit(
'submit', 'Save', css_class="btn-large offset2")])
class OwnerCollectionForm(CollectionForm):
contributors = ContributorCommaField(
queryset=None, required=False, help_text="Select other NeuroVault users to add as contributes to the collection. Contributors can add, edit and delete images in the collection.")
class Meta():
exclude = ('owner', 'private_token')
model = Collection
widgets = {
'private': forms.RadioSelect
}
def __init__(self, *args, **kwargs):
super(OwnerCollectionForm, self).__init__(*args, **kwargs)
self.fields['contributors'].queryset = User.objects.exclude(
pk=self.instance.owner.pk)
class ImageValidationMixin(object):
def __init__(self, *args, **kwargs):
super(ImageValidationMixin, self).__init__()
self.afni_subbricks = []
self.afni_tmp = None
def clean_and_validate(self, cleaned_data):
print "enter clean_and_validate"
file = cleaned_data.get('file')
surface_left_file = cleaned_data.get('surface_left_file')
surface_right_file = cleaned_data.get('surface_right_file')
if surface_left_file and surface_right_file and not file:
if "file" in self._errors.keys():
del self._errors["file"]
cleaned_data["data_origin"] = 'surface'
tmp_dir = tempfile.mkdtemp()
try:
new_name = cleaned_data["name"] + ".nii.gz"
ribbon_projection_file = os.path.join(tmp_dir, new_name)
inputs_dict = {"lh": "surface_left_file",
"rh": "surface_right_file"}
intent_dict = {"lh": "CortexLeft",
"rh": "CortexRight"}
for hemi in ["lh", "rh"]:
print hemi
surface_file = cleaned_data.get(inputs_dict[hemi])
_, ext = splitext_nii_gz(surface_file.name)
if not ext.lower() in [".mgh", ".curv", ".gii", ".nii", ".nii.gz"]:
self._errors[inputs_dict[hemi]] = self.error_class(
["Doesn't have proper extension"]
)
del cleaned_data[inputs_dict[hemi]]
return cleaned_data
infile = os.path.join(tmp_dir, hemi + ext)
print "write " + hemi
print surface_file.file
surface_file.open()
surface_file = StringIO(surface_file.read())
with open(infile, 'w') as fd:
surface_file.seek(0)
shutil.copyfileobj(surface_file, fd)
try:
if ext.lower() != ".gii":
out_gii = os.path.join(tmp_dir, hemi + '.gii')
subprocess.check_output(
[os.path.join(os.environ['FREESURFER_HOME'],
"bin", "mris_convert"),
"-c", infile,
os.path.join(os.environ['FREESURFER_HOME'],
"subjects", "fsaverage", "surf",
hemi + ".white"),
out_gii])
else:
out_gii = infile
gii = nb.load(out_gii)
if gii.darrays[0].dims != [163842]:
self._errors[inputs_dict[hemi]] = self.error_class(
["Doesn't have proper dimensions - are you sure it's fsaverage?"]
)
del cleaned_data[inputs_dict[hemi]]
return cleaned_data
# fix intent
old_dict = gii.meta.metadata
old_dict['AnatomicalStructurePrimary'] = intent_dict[hemi]
gii.meta = gii.meta.from_dict(old_dict)
gii.to_filename(os.path.join(tmp_dir, hemi + '.gii'))
subprocess.check_output(
[os.path.join(os.environ['FREESURFER_HOME'],
"bin", "mri_surf2surf"),
"--s", "fsaverage",
"--hemi", hemi,
"--srcsurfval",
os.path.join(tmp_dir, hemi+'.gii'),
"--trgsubject", "ICBM2009c_asym_nlin",
"--trgsurfval",
os.path.join(tmp_dir, hemi+'.MNI.gii')])
except CalledProcessError, e:
raise RuntimeError(str(e.cmd) + " returned code " +
str(e.returncode) + " with output " + e.output)
cleaned_data['surface_left_file'] = memory_uploadfile(
os.path.join(tmp_dir, 'lh.gii'),
new_name[:-7] + ".fsaverage.lh.func.gii", None)
cleaned_data['surface_right_file'] = memory_uploadfile(
os.path.join(tmp_dir, 'rh.gii'),
new_name[:-7] + ".fsaverage.rh.func.gii", None)
print "surf2vol"
try:
subprocess.check_output(
[os.path.join(os.environ['FREESURFER_HOME'],
"bin", "mri_surf2vol"),
"--subject", "ICBM2009c_asym_nlin",
"--o",
ribbon_projection_file[:-3],
"--so",
os.path.join(os.environ['FREESURFER_HOME'],
"subjects", "ICBM2009c_asym_nlin", "surf", "lh.white"),
os.path.join(tmp_dir, 'lh.MNI.gii'),
"--so",
os.path.join(os.environ['FREESURFER_HOME'],
"subjects", "ICBM2009c_asym_nlin", "surf", "rh.white"),
os.path.join(tmp_dir, 'rh.MNI.gii')])
except CalledProcessError, e:
raise RuntimeError(str(e.cmd) + " returned code " +
str(e.returncode) + " with output " + e.output)
#fix one voxel offset
nii = nb.load(ribbon_projection_file[:-3])
affine = nii.affine
affine[0, 3] -= 1
nb.Nifti1Image(nii.get_data(), affine).to_filename(ribbon_projection_file)
cleaned_data['file'] = memory_uploadfile(
ribbon_projection_file, new_name, None)
finally:
shutil.rmtree(tmp_dir)
elif file:
# check extension of the data file
_, fname, ext = split_filename(file.name)
if not ext.lower() in [".nii.gz", ".nii", ".img"]:
self._errors["file"] = self.error_class(
["Doesn't have proper extension"]
)
del cleaned_data["file"]
return cleaned_data
# prepare file to loading into memory
file.open()
fileobj = file.file
if file.name.lower().endswith(".gz"):
fileobj = GzipFile(filename=file.name, mode='rb',
fileobj=fileobj)
file_map = {'image': nb.FileHolder(file.name, fileobj)}
try:
tmp_dir = tempfile.mkdtemp()
if ext.lower() == ".img":
hdr_file = cleaned_data.get('hdr_file')
if hdr_file:
# check extension of the hdr file
_, _, hdr_ext = split_filename(hdr_file.name)
if not hdr_ext.lower() in [".hdr"]:
self._errors["hdr_file"] = self.error_class(
["Doesn't have proper extension"])
del cleaned_data["hdr_file"]
return cleaned_data
else:
hdr_file.open()
file_map["header"] = nb.FileHolder(hdr_file.name,
hdr_file.file)
else:
self._errors["hdr_file"] = self.error_class(
[".img file requires .hdr file"]
)
del cleaned_data["hdr_file"]
return cleaned_data
# check if it is really nifti
try:
# print file_map
if "header" in file_map:
nii = nb.Nifti1Pair.from_file_map(file_map)
else:
nii = nb.Nifti1Image.from_file_map(file_map)
except Exception as e:
raise
# detect AFNI 4D files and prepare 3D slices
if nii is not None and detect_4D(nii):
self.afni_subbricks = split_4D_to_3D(nii, tmp_dir=tmp_dir)
else:
squeezable_dimensions = len([a for a in nii.shape if a not in [0, 1]])
if squeezable_dimensions != 3:
self._errors["file"] = self.error_class(
["4D files are not supported.\n "
"If it's multiple maps in one "
"file please split them and "
"upload separately"])
del cleaned_data["file"]
return cleaned_data
# convert to nii.gz if needed
if (ext.lower() != ".nii.gz"
or squeezable_dimensions < len(nii.shape)):
# convert pseudo 4D to 3D
if squeezable_dimensions < len(nii.shape):
new_data = np.squeeze(nii.get_data())
nii = nb.Nifti1Image(new_data, nii.get_affine(),
nii.get_header())
# Papaya does not handle float64, but by converting
# files we loose precision
# if nii.get_data_dtype() == np.float64:
# ii.set_data_dtype(np.float32)
new_name = fname + ".nii.gz"
nii_tmp = os.path.join(tmp_dir, new_name)
nb.save(nii, nii_tmp)
print "updating file in cleaned_data"
cleaned_data['file'] = memory_uploadfile(
nii_tmp, new_name, cleaned_data['file']
)
finally:
try:
if self.afni_subbricks:
# keep temp dir for AFNI slicing
self.afni_tmp = tmp_dir
else:
shutil.rmtree(tmp_dir)
except OSError as exc:
if exc.errno != 2: # code 2 - no such file or directory
raise # re-raise exception
elif not getattr(self, 'partial', False):
# Skip validation error if this is a partial update from the API
raise ValidationError("Couldn't read uploaded file")
return cleaned_data
class ImageForm(ModelForm, ImageValidationMixin):
hdr_file = FileField(
required=False, label='.hdr part of the map (if applicable)', widget=AdminResubmitFileWidget)
def __init__(self, *args, **kwargs):
ImageValidationMixin.__init__(self, *args, **kwargs)
ModelForm.__init__(self, *args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_class = 'form-horizontal'
self.helper.form_tag = False
class Meta:
model = Image
exclude = []
widgets = {
'file': AdminResubmitFileWidget,
'hdr_file': AdminResubmitFileWidget,
'data_origin': HiddenInput
}
def clean(self, **kwargs):
cleaned_data = super(ImageForm, self).clean()
cleaned_data["tags"] = clean_tags(cleaned_data)
return self.clean_and_validate(cleaned_data)
class StatisticMapForm(ImageForm):
def __init__(self, *args, **kwargs):
super(StatisticMapForm, self).__init__(*args, **kwargs)
self.helper.form_tag = False
self.helper.add_input(Submit('submit', 'Submit'))
def clean(self, **kwargs):
cleaned_data = super(StatisticMapForm, self).clean()
django_file = cleaned_data.get("file")
cleaned_data["is_valid"] = True #This will be only saved if the form will validate
cleaned_data["tags"] = clean_tags(cleaned_data)
# print cleaned_data
if "data_origin" in cleaned_data.keys() and cleaned_data["data_origin"] == "surface":
cleaned_data["is_thresholded"] = False
cleaned_data["not_mni"] = False
cleaned_data["perc_bad_voxels"] = 0
cleaned_data["brain_coverage"] = 100
elif django_file and "file" not in self._errors and "hdr_file" not in self._errors:
django_file.open()
fileobj = StringIO(django_file.read())
django_file.seek(0)
gzfileobj = GzipFile(
filename=django_file.name, mode='rb', fileobj=fileobj)
nii = nb.Nifti1Image.from_file_map(
{'image': nb.FileHolder(django_file.name, gzfileobj)})
cleaned_data["is_thresholded"], ratio_bad = is_thresholded(nii)
cleaned_data["perc_bad_voxels"] = ratio_bad*100.0
if cleaned_data["is_thresholded"] and not cleaned_data.get("ignore_file_warning") and cleaned_data.get("map_type") != "R":
self._errors["file"] = self.error_class(
["This map seems to be thresholded (%.4g%% of voxels are zeros). Please use an unthresholded version of the map if possible." % (cleaned_data["perc_bad_voxels"])])
if cleaned_data.get("hdr_file"):
self._errors["hdr_file"] = self.error_class(
["This map seems to be thresholded (%.4g%% of voxels are zeros). Please use an unthresholded version of the map if possible." % (cleaned_data["perc_bad_voxels"])])
self.fields[
"ignore_file_warning"].widget = forms.CheckboxInput()
else:
cleaned_data["not_mni"], cleaned_data["brain_coverage"], cleaned_data[
"perc_voxels_outside"] = not_in_mni(nii, target_template_image=cleaned_data["target_template_image"])
if cleaned_data["not_mni"] and not cleaned_data.get("ignore_file_warning") and cleaned_data.get(
"map_type") != "R":
self._errors["file"] = self.error_class(
["This map seems not to be in the MNI space (%.4g%% of meaningful voxels are outside of the brain). Please use transform your data to MNI space." % (cleaned_data["perc_voxels_outside"])])
if cleaned_data.get("hdr_file"):
self._errors["hdr_file"] = self.error_class(
["This map seems not to be in the MNI space (%.4g%% of meaningful voxels are outside of the brain). Please use transform your data to MNI space." % (cleaned_data["perc_voxels_outside"])])
self.fields[
"ignore_file_warning"].widget = forms.CheckboxInput()
if cleaned_data.get("map_type") == "R":
if "not_mni" in cleaned_data:
del cleaned_data["not_mni"]
if "is_thresholded" in cleaned_data:
del cleaned_data["is_thresholded"]
return cleaned_data
class Meta(ImageForm.Meta):
model = StatisticMap
fields = ('name', 'collection', 'description', 'map_type', 'modality', 'target_template_image', 'cognitive_paradigm_cogatlas',
'cognitive_contrast_cogatlas', 'cognitive_paradigm_description_url', 'analysis_level', 'number_of_subjects', 'contrast_definition', 'figure',
'file', 'ignore_file_warning', 'hdr_file', 'tags', 'statistic_parameters',
'smoothness_fwhm', 'is_thresholded', 'perc_bad_voxels', 'is_valid', 'data_origin')
widgets = {
'file': AdminResubmitFileWidget,
'hdr_file': AdminResubmitFileWidget,
'is_thresholded': HiddenInput,
'ignore_file_warning': HiddenInput,
'perc_bad_voxels': HiddenInput,
'not_mni': HiddenInput,
'brain_coverage': HiddenInput,
'perc_voxels_outside': HiddenInput,
'is_valid': HiddenInput,
'data_origin': HiddenInput
}
def save_afni_slices(self, commit):
try:
orig_img = self.instance
for n, (label, brick) in enumerate(self.afni_subbricks):
brick_fname = os.path.split(brick)[-1]
mfile = memory_uploadfile(brick, brick_fname, orig_img.file)
brick_img = StatisticMap(name='%s - %s' % (orig_img.name, label), collection=orig_img.collection,
file=mfile)
for field in set(self.Meta.fields) - set(['file', 'hdr_file', 'name', 'collection']):
if field in self.cleaned_data:
setattr(brick_img, field, self.cleaned_data[field])
brick_img.save()
return orig_img.collection
finally:
try:
shutil.rmtree(self.afni_tmp)
except OSError as exc:
if exc.errno != 2:
raise
def save(self, commit=True):
if self.afni_subbricks:
return self.save_afni_slices(commit)
else:
return super(StatisticMapForm, self).save(commit=commit)
class AtlasForm(ImageForm):
class Meta(ImageForm.Meta):
model = Atlas
fields = ('name', 'collection', 'description', 'figure',
'file', 'hdr_file', 'label_description_file', 'tags')
class PolymorphicImageForm(ImageForm):
def __init__(self, *args, **kwargs):
super(PolymorphicImageForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-8'
if self.instance.polymorphic_ctype is not None:
if self.instance.polymorphic_ctype.model == 'atlas':
self.fields = AtlasForm.base_fields
elif self.instance.polymorphic_ctype.model == 'nidmresultstatisticmap':
self.fields = NIDMResultStatisticMapForm(self.instance.collection.owner,
instance=self.instance).fields
else:
self.fields = StatisticMapForm.base_fields
def clean(self, **kwargs):
if "label_description_file" in self.fields.keys():
use_form = AtlasForm
elif "map_type" in self.fields.keys():
use_form = StatisticMapForm
else:
raise Exception("unknown image type! %s" % str(self.fields.keys()))
new_instance = use_form(self)
new_instance.cleaned_data = self.cleaned_data
new_instance._errors = self._errors
self.fields = new_instance.fields
return new_instance.clean()
class EditStatisticMapForm(StatisticMapForm):
def __init__(self, *args, **kwargs):
user = kwargs['user']
del kwargs['user']
super(EditStatisticMapForm, self).__init__(*args, **kwargs)
if user.is_superuser:
self.fields['collection'].queryset = Collection.objects.all()
else:
self.fields['collection'].queryset = get_objects_for_user(
user, 'statmaps.change_collection')
class AddStatisticMapForm(StatisticMapForm):
class Meta(StatisticMapForm.Meta):
fields = ('name', 'description', 'map_type', 'modality', 'target_template_image', 'cognitive_paradigm_cogatlas',
'cognitive_contrast_cogatlas', 'cognitive_paradigm_description_url', 'analysis_level', 'number_of_subjects', 'contrast_definition', 'figure',
'file', 'ignore_file_warning', 'hdr_file', 'surface_left_file', 'surface_right_file', 'tags', 'statistic_parameters',
'smoothness_fwhm', 'is_thresholded', 'perc_bad_voxels', 'data_origin')
class EditAtlasForm(AtlasForm):
def __init__(self, *args, **kwargs):
user = kwargs['user']
del kwargs['user']
super(EditAtlasForm, self).__init__(*args, **kwargs)
self.helper.form_tag = True
self.helper.add_input(Submit('submit', 'Submit'))
if user.is_superuser:
self.fields['collection'].queryset = Collection.objects.all()
else:
self.fields['collection'].queryset = get_objects_for_user(
user, 'statmaps.change_collection')
class Meta(AtlasForm.Meta):
exclude = ()
class SimplifiedStatisticMapForm(EditStatisticMapForm):
class Meta(EditStatisticMapForm.Meta):
fields = ('name', 'collection', 'description', 'map_type', 'modality', 'target_template_image', 'cognitive_paradigm_cogatlas',
'cognitive_contrast_cogatlas', 'cognitive_paradigm_description_url', 'file', 'ignore_file_warning', 'hdr_file', 'tags', 'is_thresholded',
'perc_bad_voxels')
class NeuropowerStatisticMapForm(EditStatisticMapForm):
def __init__(self, *args, **kwargs):
super(NeuropowerStatisticMapForm, self).__init__(*args, **kwargs)
self.fields['analysis_level'].required = True
self.fields['number_of_subjects'].required = True
class Meta(EditStatisticMapForm.Meta):
fields = ('name', 'collection', 'description', 'map_type', 'modality', 'target_template_image', 'map_type','analysis_level','number_of_subjects','cognitive_paradigm_cogatlas',
'cognitive_contrast_cogatlas', 'cognitive_paradigm_description_url', 'file', 'ignore_file_warning', 'hdr_file', 'tags', 'is_thresholded',
'perc_bad_voxels')
class UploadFileForm(Form):
# TODO Need to upload in a temp directory
# (upload_to="images/%s/%s"%(instance.collection.id, filename))
file = FileField(required=False)
def __init__(self, *args, **kwargs):
super(UploadFileForm, self).__init__(*args, **kwargs)
self.file = ''
def clean(self):
cleaned_data = super(UploadFileForm, self).clean()
file = cleaned_data.get("file")
if file:
ext = os.path.splitext(file.name)[1]
ext = ext.lower()
if ext not in ['.zip', '.gz']:
raise ValidationError("Not allowed filetype!")
class PathOnlyWidget(forms.Widget):
def render(self, name, value, attrs=None):
return mark_safe('<a target="_blank" href="%s">%s</a><br /><br />' % (value.url, value.url))
class MapTypeListWidget(forms.Widget):
def render(self, name, value, attrs=None):
map_type = [
v for k, v in BaseStatisticMap.MAP_TYPE_CHOICES if k == value].pop()
input = '<input type="hidden" name="%s" value="%s" />' % (name, value)
return mark_safe('%s<strong>%s</strong><br /><br />' % (input, map_type))
class NIDMResultsValidationMixin(object):
def clean_and_validate(self, data):
zip_file = data.get('zip_file')
partial = getattr(self, 'partial', False)
if (zip_file and partial) or (not partial):
return self.clean_and_validate_zip_file(data, zip_file)
return data
def clean_and_validate_zip_file(self, data, zip_file):
# make sure the zip file has a unique name
base_subdir = os.path.split(data['zip_file'].name)[-1].replace(
'.nidm.zip',
'')
nres = NIDMResults.objects.filter(collection=data['collection'],
name__startswith=base_subdir + ".nidm").count()
# don't count current instance
if self.instance.pk is not None and nres != 0:
nres -= 1
safe_name = '{0}_{1}.nidm'.format(base_subdir, nres)
data['name'] = base_subdir + ".nidm" if nres == 0 else safe_name
data['zip_file'].name = zip_file.name = data['name'] + ".zip"
try:
self.nidm = NIDMUpload(zip_file)
except Exception, e:
raise ValidationError(
"The NIDM file was not readable: {0}".format(e)
)
try:
self.clean_nidm(data)
except Exception, e:
raise ValidationError(e)
# delete existing images and files when changing file
if self.instance.pk is not None:
for statmap in self.instance.nidmresultstatisticmap_set.all():
statmap.delete()
cdir = os.path.dirname(self.instance.zip_file.path)
if os.path.isdir(cdir):
shutil.rmtree(cdir)
ttl_name = os.path.split(self.nidm.ttl.filename)[-1]
data['ttl_file'] = InMemoryUploadedFile(
# fix ttl for spm12
file=ContentFile(self.nidm.fix_spm12_ttl(
self.nidm.zip.read(self.nidm.ttl))),
field_name='file',
name=ttl_name,
content_type='text/turtle',
size=self.nidm.ttl.file_size,
charset='utf-8'
)
return data
def clean_nidm(self, cleaned_data):
for s in self.nidm.statmaps:
s['fname'] = os.path.split(s['file'])[-1]
s['statmap'] = NIDMResultStatisticMap(name=s['name'])
s['statmap'].collection = cleaned_data['collection']
s['statmap'].description = cleaned_data['description']
s['statmap'].map_type = s['type']
s['statmap'].nidm_results = self.instance
s['statmap'].file = 'images/1/foo/bar/'
try:
s['statmap'].clean_fields(exclude=('nidm_results', 'file'))
s['statmap'].validate_unique()
except Exception, e:
import traceback
raise ValidationError(
"There was a problem validating the Statistic Maps " +
"for this NIDM Result: \n{0}\n{1}".format(e, traceback.format_exc()))
def save_nidm_statmaps(nidm, instance):
for s in nidm.statmaps:
s['statmap'].nidm_results = instance
s['statmap'].file = ContentFile(open(s['file']).read(),
name=os.path.split(s['file'])[-1])
s['statmap'].save()
dest = os.path.dirname(instance.zip_file.path)
nidm.copy_to_dest(dest)
nidm.cleanup()
def handle_update_ttl_urls(instance):
ttl_content = instance.ttl_file.file.read()
fname = os.path.basename(
instance.nidmresultstatisticmap_set.first().file.name)
ttl_regx = re.compile(r'(prov:atLocation\ \")(file:\/.*\/)?(' +
fname + ')(\"\^\^xsd\:anyURI\ \;)')
hdr, urlprefix, nifti, ftr = re.search(ttl_regx, ttl_content).groups()
if not urlprefix:
urlprefix = ""
base_url = settings.DOMAIN_NAME
replace_path = base_url + os.path.join(
instance.collection.get_absolute_url(), instance.name) + '/'
updated_ttl = ttl_content.replace(hdr + urlprefix, hdr + replace_path)
instance.ttl_file.file.close()
with open(instance.ttl_file.path, 'w') as ttlf:
ttlf.write(updated_ttl)
ttlf.close()
class NIDMResultsForm(forms.ModelForm, NIDMResultsValidationMixin):
class Meta:
model = NIDMResults
widgets = {
'is_valid': forms.HiddenInput()
}
exclude = []
def __init__(self, *args, **kwargs):
super(NIDMResultsForm, self).__init__(*args, **kwargs)
for fld in ['ttl_file']:
if self.instance.pk is None:
self.fields[fld].widget = HiddenInput()
else:
self.fields[fld].widget = PathOnlyWidget()
self.helper = FormHelper(self)
self.helper.form_class = 'form-horizontal'
self.helper.form_tag = True
self.helper.add_input(Submit('submit', 'Submit'))
self.helper.add_input(
Button('delete', 'Delete',
onclick='window.location.href=window.location.href+"/delete"'))
self.nidm = None
self.new_statmaps = []
if self.instance.pk is not None:
self.fields['name'].widget = HiddenInput()
if self.fields.get('collection'):
self.fields['collection'].widget = HiddenInput()
def clean(self):
cleaned_data = super(NIDMResultsForm, self).clean()
cleaned_data["tags"] = clean_tags(cleaned_data)
# only process new uploads or replaced zips
if self.instance.pk is None or 'zip_file' in self.changed_data:
self.cleaned_data = self.clean_and_validate(cleaned_data)
def save(self, commit=True):
if self.instance.pk is None or 'zip_file' in self.changed_data:
do_update = True
nidm_r = super(NIDMResultsForm, self).save(commit)
if commit and do_update is not None:
self.save_nidm()
self.update_ttl_urls()
return nidm_r
def update_ttl_urls(self):
handle_update_ttl_urls(self.instance)
def save_nidm(self):
if self.nidm and 'zip_file' in self.changed_data:
save_nidm_statmaps(self.nidm, self.instance)
# todo: rewrite ttl
class NIDMViewForm(forms.ModelForm):
class Meta:
model = NIDMResults
exclude = ['is_valid']
def __init__(self, *args, **kwargs):
super(NIDMViewForm, self).__init__(*args, **kwargs)
for fld in ['ttl_file', 'zip_file']:
self.fields[fld].widget = PathOnlyWidget()
for fld in self.fields:
self.fields[fld].widget.attrs['readonly'] = 'readonly'
self.fields['name'].widget = HiddenInput()
if self.fields.get('collection'):
self.fields['collection'].widget = HiddenInput()
self.helper = FormHelper(self)
self.helper.form_class = 'form-horizontal'
self.helper.form_tag = True
class NIDMResultStatisticMapForm(ImageForm):
class Meta():
model = NIDMResultStatisticMap
fields = ('name', 'collection', 'description', 'map_type', 'figure',
'file', 'tags', 'nidm_results')
def __init__(self, *args, **kwargs):
super(NIDMResultStatisticMapForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_class = 'form-horizontal'
# problem with exclude() and fields()
self.fields['hdr_file'].widget = HiddenInput()
if self.instance.pk is None:
self.fields['file'].widget = HiddenInput()
else:
for fld in self.fields:
self.fields[fld].widget.attrs['readonly'] = 'readonly'
# 'disabled' causes the values to not be sent in the POST (?)
# self.fields[fld].widget.attrs['disabled'] = 'disabled'
if self.fields.get('nidm_results'):
self.fields['nidm_results'].widget = HiddenInput()
self.fields['map_type'].widget = MapTypeListWidget()
self.fields['file'].widget = PathOnlyWidget()
class EditNIDMResultStatisticMapForm(NIDMResultStatisticMapForm):
def __init__(self, user, *args, **kwargs):
super(EditNIDMResultStatisticMapForm, self).__init__(*args, **kwargs)
def clean_tags(self):
"""
Force all tags to lowercase.
"""
tags = self.get('tags', None)
if tags:
tags = [t.lower() for t in tags]
return tags
|
{"hexsha": "4af7e47020393d0cace1572802a7ed6a315bed7d", "size": 45749, "ext": "py", "lang": "Python", "max_stars_repo_path": "neurovault/apps/statmaps/forms.py", "max_stars_repo_name": "aphroditepv/NeuroVault", "max_stars_repo_head_hexsha": "14dbe45c24897f250938716c8d6a015a3b06df93", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "neurovault/apps/statmaps/forms.py", "max_issues_repo_name": "aphroditepv/NeuroVault", "max_issues_repo_head_hexsha": "14dbe45c24897f250938716c8d6a015a3b06df93", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "neurovault/apps/statmaps/forms.py", "max_forks_repo_name": "aphroditepv/NeuroVault", "max_forks_repo_head_hexsha": "14dbe45c24897f250938716c8d6a015a3b06df93", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.0873965041, "max_line_length": 215, "alphanum_fraction": 0.5567334805, "include": true, "reason": "import numpy", "num_tokens": 9719}
|
import torch
import numpy as np
import time
import torchvision
model = torch.hub.load('pytorch/vision:v0.6.0', 'squeezenet1_1', pretrained=True)
model.eval()
import urllib
url, filename = ("https://github.com/pytorch/hub/raw/master/dog.jpg", "cat.png")
from PIL import Image
from torchvision import transforms
input_image = Image.open(filename)
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
torch.set_num_threads(4)
input_tensor = preprocess(input_image)
input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
with torch.no_grad():
out = model(input_batch)
# Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes
repeat=10
numpy_time = np.zeros(repeat)
for i in range(0,repeat):
start_time = time.time()
with torch.no_grad():
out = model(input_batch)
elapsed_ms = (time.time() - start_time) * 1000
numpy_time[i] = elapsed_ms
print("pytorch Squeezenet v1.1 %-19s (%s)" % ("%.2f ms" % np.mean(numpy_time), "%.2f ms" % np.std(numpy_time)))
#_, index = torch.max(out, 1)
#percentage = torch.nn.functional.softmax(out, dim=1)[0] * 100
#with open('mobilenet-v2-labels.txt') as f:
# labels = [line.strip() for line in f.readlines()]
#_, indices = torch.sort(out, descending=True)
#percentage = torch.nn.functional.softmax(out, dim=1)[0] * 100
#[print(labels[idx], percentage[idx].item()) for idx in indices[0][:5]]
|
{"hexsha": "b97423bd557e8b4487fba99aeb1c69c0ac77e920", "size": 1563, "ext": "py", "lang": "Python", "max_stars_repo_path": "squeezenet-v1.1-pytorch.py", "max_stars_repo_name": "tom-gall/torch-bench", "max_stars_repo_head_hexsha": "8d2a938c21637d4305d88a7b2c42d860672d5be2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "squeezenet-v1.1-pytorch.py", "max_issues_repo_name": "tom-gall/torch-bench", "max_issues_repo_head_hexsha": "8d2a938c21637d4305d88a7b2c42d860672d5be2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "squeezenet-v1.1-pytorch.py", "max_forks_repo_name": "tom-gall/torch-bench", "max_forks_repo_head_hexsha": "8d2a938c21637d4305d88a7b2c42d860672d5be2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4905660377, "max_line_length": 112, "alphanum_fraction": 0.7050543826, "include": true, "reason": "import numpy", "num_tokens": 437}
|
// Copyright 2016 Yahoo Inc.
// Licensed under the terms of the Apache 2.0 license.
// Please see LICENSE file in the project root for terms.
#ifndef CAFFE_DISTRI_SOCKET_HPP_
#define CAFFE_DISTRI_SOCKET_HPP_
#include <stdio.h>
#include <map>
#include <string>
#include <vector>
#include <boost/thread.hpp>
#include <boost/bind.hpp>
#include "threadpool.hpp"
#include "caffe/caffe.hpp"
#include "caffe/common.hpp"
#include "caffe/util/blocking_queue.hpp"
using std::vector;
using std::map;
using std::string;
namespace caffe {
class SocketChannel;
class SocketAdapter {
public:
volatile int port;
explicit SocketAdapter(vector<shared_ptr<SocketChannel> > * channels);
vector<shared_ptr<SocketChannel> > *channels;
void start_sockt_srvr();
string address(){
char self_name[256];
char port_buf[256];
gethostname(self_name, 256);
snprintf(port_buf, sizeof(port_buf), "%d", port);
string address = self_name;
address +=":";
address += port_buf;
return address;
}
};
enum message_type {DIFF, DATA};
class QueuedMessage {
public:
int rank;
int iter_count_;
message_type type;
int size;
uint8_t* buffer;
QueuedMessage(int _rank, int iter_count, message_type type, int size, uint8_t* buffer);
};
class SocketBuffer {
public:
SocketBuffer(int rank, int iterCount, message_type mt, SocketChannel* channel,
uint8_t* buffer, size_t size, uint8_t* addr);
uint8_t* addr() const {
return addr_;
}
uint8_t* buffer() const {
return buffer_;
}
const size_t size() const {
return size_;
}
// Synchronously writes content to remote peer
void Write();
SocketBuffer* Read();
//protected:
SocketChannel* channel_;
uint8_t* addr_;
uint8_t* buffer_;
/*const*/ size_t size_;
int rank;
message_type mt_;
int iterCount_;
string info() {
std::stringstream sstm;
sstm << "Iteration: " << iterCount_;
return sstm.str();
}
};
class SocketChannel {
private:
int connect_to_peer(string to_peer, string to_port);
public:
SocketChannel();
~SocketChannel();
void Connect(string peer);
int client_fd;
caffe::BlockingQueue<QueuedMessage*> receive_queue;
int serving_fd;
int port_no;
string peer_name;
size_t size;
mutable boost::mutex write_mutex_;
string peer_info() {
std::stringstream sstm;
sstm << "peer_name: " << peer_name << " port_no: " << port_no
<< " client_fd: " << client_fd << " serving_fd: " << serving_fd;
return sstm.str();
}
static boost::threadpool::pool tp;
static caffe::BlockingQueue<QueuedMessage*> global_diff_receive_queue;
static caffe::BlockingQueue<QueuedMessage*> global_data_receive_queue;
static shared_ptr<SocketBuffer> read_next(const vector<shared_ptr<SocketBuffer> > &buffers, const message_type &mt) {
// Pop the message from local queue
QueuedMessage* qm =
reinterpret_cast<QueuedMessage*>(
(mt == DIFF ? global_diff_receive_queue : global_data_receive_queue)
.pop(string("trying to get message from queue")));
LOG(INFO) << "Iteration: " << qm->iter_count_ << " got a message from: " << " , " << buffers[qm->rank]->channel_->peer_info();
shared_ptr<SocketBuffer> sb_sptr = buffers[qm->rank];
memcpy(sb_sptr->addr_, qm->buffer, qm->size);
// Free up the buffer and the wrapper object
delete qm->buffer;
delete qm;
return sb_sptr;
}
};
class Socket {
public:
explicit Socket(const string &host, int port, bool listen);
~Socket();
int descriptor() { return fd_; }
shared_ptr<Socket> accept();
size_t read(void *buff, size_t size);
size_t write(void *buff, size_t size);
uint64_t readInt() {
// TODO loop for partial reads or writes
uint64_t value;
CHECK_EQ(read(&value, sizeof(uint64_t)), sizeof(uint64_t));
return value;
}
void writeInt(uint64_t value) {
CHECK_EQ(write(&value, sizeof(uint64_t)), sizeof(uint64_t));
}
string readStr() {
size_t size = readInt();
string str(size, ' ');
CHECK_EQ(read(&str[0], size), size);
return str;
}
void writeStr(const string &str) {
writeInt(str.size());
CHECK_EQ(write(const_cast<void*>(reinterpret_cast<const void *>
(str.c_str())), str.size()), str.size());
}
protected:
explicit Socket(int fd) : fd_(fd) { }
int fd_;
DISABLE_COPY_AND_ASSIGN(Socket);
};
} // namespace caffe
#endif
|
{"hexsha": "89b03e5d38ca134fa1bea49b5bfa3d76a6a2bae1", "size": 4473, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "caffe-distri/include/util/socket.hpp", "max_stars_repo_name": "jenniew/IntelCaffeOnSpark_mirror", "max_stars_repo_head_hexsha": "7b79ff25d5eed5f472ea7b1572f9c7fa9dcdc46c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "caffe-distri/include/util/socket.hpp", "max_issues_repo_name": "jenniew/IntelCaffeOnSpark_mirror", "max_issues_repo_head_hexsha": "7b79ff25d5eed5f472ea7b1572f9c7fa9dcdc46c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "caffe-distri/include/util/socket.hpp", "max_forks_repo_name": "jenniew/IntelCaffeOnSpark_mirror", "max_forks_repo_head_hexsha": "7b79ff25d5eed5f472ea7b1572f9c7fa9dcdc46c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.625, "max_line_length": 131, "alphanum_fraction": 0.6686787391, "num_tokens": 1136}
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from a2c_ppo_acktr.distributions import Bernoulli, Categorical, DiagGaussian, MultiCategoricalDistribution, \
RobotARCategoricalDistribution
from a2c_ppo_acktr.utils import init
import gym
from models.blocks import RMCBlock
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Policy(nn.Module):
def __init__(self, obs_shape, action_space, base=None, base_kwargs=None):
super(Policy, self).__init__()
if base_kwargs is None:
base_kwargs = {}
if base is None:
if len(obs_shape) == 3:
base = CNNBase
elif len(obs_shape) == 1:
base = MLPBase
else:
raise NotImplementedError
self.base = base(obs_shape[0], **base_kwargs)
if action_space.__class__.__name__ == "Discrete":
num_outputs = action_space.n
self.dist = Categorical(self.base.output_size, num_outputs)
elif action_space.__class__.__name__ == "Box":
num_outputs = action_space.shape[0]
self.dist = DiagGaussian(self.base.output_size, num_outputs)
elif action_space.__class__.__name__ == "MultiBinary":
num_outputs = action_space.shape[0]
self.dist = Bernoulli(self.base.output_size, num_outputs)
elif isinstance(action_space, gym.spaces.MultiDiscrete):
self.dist = MultiCategoricalDistribution(self.base.output_size, int(np.sum(action_space.nvec)), action_space.nvec)
else:
raise NotImplementedError
@property
def is_recurrent(self):
return self.base.is_recurrent
@property
def recurrent_hidden_state_size(self):
"""Size of rnn_hx."""
return self.base.recurrent_hidden_state_size
def forward(self, inputs, rnn_hxs, masks):
raise NotImplementedError
def act(self, inputs, rnn_hxs, masks, deterministic=False):
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
dist = self.dist(actor_features)
if deterministic:
action = dist.mode()
else:
action = dist.sample()
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action, action_log_probs, rnn_hxs
def get_value(self, inputs, rnn_hxs, masks):
value, _, _ = self.base(inputs, rnn_hxs, masks)
return value
def evaluate_actions(self, inputs, rnn_hxs, masks, action):
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
dist = self.dist(actor_features)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action_log_probs, dist_entropy, rnn_hxs
class NNBase(nn.Module):
def __init__(self, recurrent, recurrent_input_size, hidden_size):
super(NNBase, self).__init__()
self._hidden_size = hidden_size
self._recurrent = recurrent
if recurrent:
self.gru = nn.GRU(recurrent_input_size, hidden_size)
for name, param in self.gru.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
elif 'weight' in name:
nn.init.orthogonal_(param)
@property
def is_recurrent(self):
return self._recurrent
@property
def recurrent_hidden_state_size(self):
if self._recurrent:
return self._hidden_size
return 1
@property
def output_size(self):
return self._hidden_size
def _forward_gru(self, x, hxs, masks):
if x.size(0) == hxs.size(0):
x, hxs = self.gru(x.unsqueeze(0), (hxs * masks).unsqueeze(0))
x = x.squeeze(0)
hxs = hxs.squeeze(0)
else:
# x is a (T, N, -1) tensor that has been flatten to (T * N, -1)
N = hxs.size(0)
T = int(x.size(0) / N)
# unflatten
x = x.view(T, N, x.size(1))
# Same deal with masks
masks = masks.view(T, N)
# Let's figure out which steps in the sequence have a zero for any agent
# We will always assume t=0 has a zero in it as that makes the logic cleaner
has_zeros = ((masks[1:] == 0.0) \
.any(dim=-1)
.nonzero()
.squeeze()
.cpu())
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# Deal with scalar
has_zeros = [has_zeros.item() + 1]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 and t=T to the list
has_zeros = [0] + has_zeros + [T]
hxs = hxs.unsqueeze(0)
outputs = []
for i in range(len(has_zeros) - 1):
# We can now process steps that don't have any zeros in masks together!
# This is much faster
start_idx = has_zeros[i]
end_idx = has_zeros[i + 1]
rnn_scores, hxs = self.gru(
x[start_idx:end_idx],
hxs * masks[start_idx].view(1, -1, 1))
outputs.append(rnn_scores)
# assert len(outputs) == T
# x is a (T, N, -1) tensor
x = torch.cat(outputs, dim=0)
# flatten
x = x.view(T * N, -1)
hxs = hxs.squeeze(0)
return x, hxs
class CNNBase(NNBase):
def __init__(self, num_inputs, recurrent=False, hidden_size=512):
super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), nn.init.calculate_gain('relu'))
self.main = nn.Sequential(
init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(),
init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(),
init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(),
init_(nn.Linear(32 * 7 * 7, hidden_size)), nn.ReLU())
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
self.critic_linear = init_(nn.Linear(hidden_size, 1))
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = self.main(inputs / 255.0)
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
return self.critic_linear(x), x, rnn_hxs
class MLPBase(NNBase):
def __init__(self, num_inputs, recurrent=False, hidden_size=64):
super(MLPBase, self).__init__(recurrent, num_inputs, hidden_size)
if recurrent:
num_inputs = hidden_size
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
self.actor = nn.Sequential(
init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),
init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())
self.critic = nn.Sequential(
init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),
init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())
self.critic_linear = init_(nn.Linear(hidden_size, 1))
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = inputs
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
hidden_critic = self.critic(x)
hidden_actor = self.actor(x)
return self.critic_linear(hidden_critic), hidden_actor, rnn_hxs
class Surreal(NNBase):
def __init__(self, num_inputs, recurrent=False, config=None):
if config is None:
config = dict(rec=100, fc='300, 200', act='tanh')
act = config.get('act', 'tanh')
rec = config.get('rec', 100)
fc = config['fc'].split()
fc = [int(f) for f in fc]
act = nn.ReLU() if act == 'relu' else nn.Tanh()
super(Surreal, self).__init__(recurrent, num_inputs, rec)
if recurrent:
num_inputs = rec
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
layers = []
hiddens = [num_inputs] + fc
for l1, l2 in zip(hiddens[:-1], hiddens[1:]):
layers.append( init_(nn.Linear(l1, l2)))
layers.append(act)
self.actor = nn.Sequential(*layers)
self.critic = nn.Sequential(*layers)
self.critic_linear = init_(nn.Linear(fc[-1], 1))
self.train()
self.fc = fc
@property
def output_size(self):
return self.fc[-1]
def forward(self, inputs, rnn_hxs, masks):
x = inputs
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
hidden_critic = self.critic(x)
hidden_actor = self.actor(x)
return self.critic_linear(hidden_critic), hidden_actor, rnn_hxs
class OpenAI(NNBase):
def __init__(self, num_inputs, recurrent=False, config=None):
if config is None:
config = dict(rec=100, fc='300, 200', act='tanh')
act = config.get('act', 'tanh')
rec = config.get('rec', 100)
fc = config['fc'].split()
fc = [int(f) for f in fc]
act = nn.ReLU() if act == 'relu' else nn.Tanh()
assert(len(fc) > 0)
super(OpenAI, self).__init__(recurrent, fc[-1], rec)
num_outputs = fc[-1]
if recurrent:
num_outputs = rec
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
layers = []
hiddens = [num_inputs] + fc
for l1, l2 in zip(hiddens[:-1], hiddens[1:]):
layers.append( init_(nn.Linear(l1, l2)))
layers.append(act)
self.shared = nn.Sequential(*layers)
self.critic_linear = init_(nn.Linear(num_outputs, 1))
self.train()
self.fc = fc
self.num_outputs = num_outputs
@property
def output_size(self):
return self.num_outputs
def forward(self, inputs, rnn_hxs, masks):
x = self.shared(inputs)
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
return self.critic_linear(x), x, rnn_hxs
class MLP_ATTN(NNBase):
def __init__(self, obs_space, attention_dim=128, embedding_dim=100, recurrent=False, hidden_size=100, dims=None):
super(MLP_ATTN, self).__init__(recurrent, embedding_dim , hidden_size)
assert dims and int(sum([d for d in dims.values()])) == obs_space
self.num_heads = 3
self.N = len(dims)
self.embed_dim = self.num_heads * attention_dim
self.attn_module = RMCBlock(embedding_dim, self.embed_dim, self.num_heads, attention_dim, self.N)
self.dims = dims
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0))
self.moduleList = nn.ModuleList([init_(nn.Linear(dim, embedding_dim)) for dim in dims.values()])
self.base = Surreal(attention_dim, recurrent, hidden_size)
self.train()
@property
def output_size(self):
return self.base.output_size
def forward(self, inputs, rnn_hxs, masks):
_features = []
i = 0
for module, dim in zip(self.moduleList, self.dims.values()):
_features.append(module(inputs[:, i:i + dim]))
i += dim
x = torch.stack(_features, 1)
x = nn.functional.relu(x)
x = self.attn_module(x)
return self.base(x, rnn_hxs, masks)
|
{"hexsha": "dd26b2c3fc1a10d6c606502d393e9e764e0095ff", "size": 11889, "ext": "py", "lang": "Python", "max_stars_repo_path": "a2c_ppo_acktr/model.py", "max_stars_repo_name": "ava6969/AR-Project", "max_stars_repo_head_hexsha": "d34369178d41ac79d73710a2b6681dbde3910e9d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "a2c_ppo_acktr/model.py", "max_issues_repo_name": "ava6969/AR-Project", "max_issues_repo_head_hexsha": "d34369178d41ac79d73710a2b6681dbde3910e9d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "a2c_ppo_acktr/model.py", "max_forks_repo_name": "ava6969/AR-Project", "max_forks_repo_head_hexsha": "d34369178d41ac79d73710a2b6681dbde3910e9d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6196808511, "max_line_length": 126, "alphanum_fraction": 0.5773403987, "include": true, "reason": "import numpy", "num_tokens": 2953}
|
function bpsksys(bin,f)
disp('========================================');
disp(' HAM DIEU CHE DICH 2 PHA: BPSK');
disp(' VI DU:bpsksys([0 1 0 1 1 0 1 1 0],3)');
disp('Written by Nguyen Hoang Minh DHCNTPHCM. he..he..');
disp('========================================');
bin=[0 1 0 1 1 0 1 1 1 0];f=3;k=1000;
t=0:2*pi/(k-1):2*pi;
L = length(bin);sig0=cos(f*t);sig1=cos(f*t+pi);
bit1=ones(1,k);bit0=zeros(1,k);
mbit=[];mcw=[];
for n=1:L;
if bin(n)==0;
cw=sig0;bit=bit0;
else
cw=sig1;bit=bit1;
end
mbit=[mbit bit];
mcw=[mcw cw];
end
psk=mcw;
%================================================
s=length(mcw);mrec=[];
for m=0:k:s
if mcw(m)<0
rec=bit1;
elseif mcw(m)>0
rec=bit0;
end
mrec=[mrec rec];
end
depsk=mrec;
subplot(3,1,1);plot(mbit,'r','linewidth',2);axis([0 k*L -0.5 1.5]);grid on;title('Data in');
subplot(3,1,2);plot(psk,'m','linewidth',1.5);axis([0 k*L -1.5 1.5]);grid on;title('PSK modulation');
subplot(3,1,3);plot(depsk,'g','linewidth',2);axis([0 k*L -.5 1.5]);grid on;title('PSK demodulation,Data out');
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/30770-digital-analog-modulation/SignalModulations/Unfinished/bpsksys.m"}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from unittest import TestCase
from itertools import chain
import numpy as np
from numpy.lib import NumpyVersion
import sys
sys.path.append('../')
from fpq.vector import *
import fpq.fp
class TestVector(TestCase):
def test_is_valid_format(self):
# float : uint8
self.assertTrue(is_valid_format(np.float16, np.uint8, 2))
self.assertTrue(is_valid_format(np.float32, np.uint8, 2))
self.assertTrue(is_valid_format(np.float64, np.uint8, 2))
for nbits in chain(range(2), range(3,9)):
self.assertFalse(is_valid_format(np.float16, np.uint8, nbits))
self.assertFalse(is_valid_format(np.float32, np.uint8, nbits))
self.assertFalse(is_valid_format(np.float64, np.uint8, nbits))
# float16 : uint16
for nbits in range(2,7):
self.assertTrue(is_valid_format(np.float16, np.uint16, nbits))
for nbits in chain(range(2), range(8,17)):
self.assertFalse(is_valid_format(np.float16, np.uint16, nbits))
# float16 : uint32
for nbits in range(7,13):
self.assertTrue(is_valid_format(np.float16, np.uint32, nbits))
for nbits in chain(range(7), range(13,33)):
self.assertFalse(is_valid_format(np.float16, np.uint32, nbits))
# float16 : uint64
for nbits in range(65):
self.assertFalse(is_valid_format(np.float16, np.uint64, nbits))
# float32 : uint16
for nbits in range(2,7):
self.assertTrue(is_valid_format(np.float32, np.uint16, nbits))
for nbits in chain(range(2), range(8,17)):
self.assertFalse(is_valid_format(np.float32, np.uint16, nbits))
# float32 : uint32
for nbits in range(2,15):
self.assertTrue(is_valid_format(np.float32, np.uint32, nbits))
for nbits in chain(range(2), range(16,33)):
self.assertFalse(is_valid_format(np.float32, np.uint32, nbits))
# float32 : uint64
for nbits in range(15,26):
self.assertTrue(is_valid_format(np.float32, np.uint64, nbits))
for nbits in chain(range(15), range(26,65)):
self.assertFalse(is_valid_format(np.float32, np.uint64, nbits))
# float64 : uint16
for nbits in range(2,7):
self.assertTrue(is_valid_format(np.float64, np.uint16, nbits))
for nbits in chain(range(2), range(7,17)):
self.assertFalse(is_valid_format(np.float64, np.uint16, nbits))
# float64 : uint32
for nbits in range(2,15):
self.assertTrue(is_valid_format(np.float64, np.uint32, nbits))
for nbits in chain(range(2), range(15,33)):
self.assertFalse(is_valid_format(np.float64, np.uint32, nbits))
# float64 : uint64
for nbits in range(2,31):
self.assertTrue(is_valid_format(np.float64, np.uint64, nbits))
for nbits in chain(range(2), range(31,65)):
self.assertFalse(is_valid_format(np.float64, np.uint64, nbits))
def test_calc_breakdown_of_uint(self):
# uint8
expected = (2,2,2,2)
actual = calc_breakdown_of_uint(dtype=np.uint8, nbits=2)
self.assertTrue(isinstance(actual, tuple))
self.assertTrue(np.array_equal(actual, expected))
# uint16
expected = ((2, 2, 2, 10),
(2, 3, 3, 8),
(2, 4, 4, 6),
(2, 5, 5, 4),
(2, 6, 6, 2))
for i, nbits in enumerate(range(2,7)):
actual = calc_breakdown_of_uint(dtype=np.uint16, nbits=nbits)
self.assertTrue(isinstance(actual, tuple))
self.assertTrue(np.array_equal(actual, expected[i]))
# uint32
expected = ((2, 2, 2, 26), (2, 3, 3, 24), (2, 4, 4, 22),
(2, 5, 5, 20), (2, 6, 6, 18), (2, 7, 7, 16),
(2, 8, 8, 14), (2, 9, 9, 12), (2, 10, 10, 10),
(2, 11, 11, 8), (2, 12, 12, 6), (2, 13, 13, 4),
(2, 14, 14, 2))
for i, nbits in enumerate(range(2,15)):
actual = calc_breakdown_of_uint(dtype=np.uint32, nbits=nbits)
self.assertTrue(isinstance(actual, tuple))
self.assertTrue(np.array_equal(actual, expected[i]))
# uint64
expected = ((2, 2, 2, 58), (2, 3, 3, 56), (2, 4, 4, 54),
(2, 5, 5, 52), (2, 6, 6, 50), (2, 7, 7, 48),
(2, 8, 8, 46), (2, 9, 9, 44),(2, 10, 10, 42),
(2, 11, 11, 40), (2, 12, 12, 38), (2, 13, 13, 36),
(2, 14, 14, 34), (2, 15, 15, 32), (2, 16, 16, 30),
(2, 17, 17, 28), (2, 18, 18, 26), (2, 19, 19, 24),
(2, 20, 20, 22), (2, 21, 21, 20), (2, 22, 22, 18),
(2, 23, 23, 16), (2, 24, 24, 14), (2, 25, 25, 12),
(2, 26, 26, 10), (2, 27, 27, 8), (2, 28, 28, 6),
(2, 29, 29, 4), (2, 30, 30, 2))
for i, nbits in enumerate(range(2,31)):
actual = calc_breakdown_of_uint(dtype=np.uint64, nbits=nbits)
self.assertTrue(isinstance(actual, tuple))
self.assertTrue(np.array_equal(actual, expected[i]))
@unittest.skipIf(NumpyVersion(np.__version__) < '1.11.2', 'not supported in this numpy version')
def test_encoding_decoding_between_vec16_and_uint32(self):
dtypes = (np.float16, np.uint32)
nbits = 10
expected = np.array([-50, 30, 20], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, dtypes[1]))
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-01, atol=1e-02))
expected = np.array([[10, 20, 30],
[-40, 30, 20]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-01, atol=1e-02))
expected = np.array([[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-50, 30, 20]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-01, atol=1e-02))
expected = np.array([[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 90],
[-50, 30, 20]]],
[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-80, 30, 20]]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-01, atol=1e-02))
def test_encoding_decoding_between_vec32_and_uint32(self):
dtypes = (np.float32, np.uint32)
nbits = 10
expected = np.array([-50, 30, 20], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, dtypes[1]))
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-01, atol=1e-02))
expected = np.array([[10, 20, 30],
[-40, 30, 20]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-01, atol=1e-02))
expected = np.array([[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-50, 30, 20]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-01, atol=1e-02))
expected = np.array([[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 90],
[-50, 30, 20]]],
[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-80, 30, 20]]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-01, atol=1e-02))
def test_encoding_decoding_between_vec32_and_uint64(self):
dtypes = (np.float32, np.uint64)
nbits = 20
expected = np.array([-50, 30, 20], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, dtypes[1]))
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[10, 20, 30],
[-40, 30, 20]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-50, 30, 20]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 90],
[-50, 30, 20]]],
[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-80, 30, 20]]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
def test_encoding_decoding_between_vec64_and_uint64(self):
dtypes = (np.float64, np.uint64)
nbits = 20
expected = np.array([-50, 30, 20], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, dtypes[1]))
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[10, 20, 30],
[-40, 30, 20]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-50, 30, 20]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 90],
[-50, 30, 20]]],
[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-80, 30, 20]]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
def test_encoding_decoding_between_vec_and_uint_by_ogl(self):
encoder = fpq.fp.encode_fp_to_ogl_snorm
decoder = fpq.fp.decode_ogl_snorm_to_fp
dtypes = (np.float64, np.uint64)
nbits = 20
expected = np.array([-50, 30, 20], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits, encoder=encoder)
self.assertTrue(isinstance(enc, dtypes[1]))
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits, decoder=decoder)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[10, 20, 30],
[-40, 30, 20]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits, encoder=encoder)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits, decoder=decoder)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-50, 30, 20]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits, encoder=encoder)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits, decoder=decoder)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 90],
[-50, 30, 20]]],
[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-80, 30, 20]]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits, encoder=encoder)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits, decoder=decoder)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
def test_encoding_decoding_between_vec_and_uint_by_d3d(self):
encoder = fpq.fp.encode_fp_to_d3d_snorm
decoder = fpq.fp.decode_d3d_snorm_to_fp
dtypes = (np.float64, np.uint64)
nbits = 20
expected = np.array([-50, 30, 20], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits, encoder=encoder)
self.assertTrue(isinstance(enc, dtypes[1]))
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits, decoder=decoder)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[10, 20, 30],
[-40, 30, 20]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits, encoder=encoder)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits, decoder=decoder)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-50, 30, 20]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits, encoder=encoder)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits, decoder=decoder)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 90],
[-50, 30, 20]]],
[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-80, 30, 20]]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits, encoder=encoder)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits, decoder=decoder)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
|
{"hexsha": "b6380a98f9598dd7ea19c4ccb85b2996a18a5da7", "size": 20869, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_vector.py", "max_stars_repo_name": "Hasenpfote/fpq", "max_stars_repo_head_hexsha": "3154ed1b1d5eca08255e8359b5027439af43691c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_vector.py", "max_issues_repo_name": "Hasenpfote/fpq", "max_issues_repo_head_hexsha": "3154ed1b1d5eca08255e8359b5027439af43691c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-01-09T07:56:22.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-09T07:56:22.000Z", "max_forks_repo_path": "test/test_vector.py", "max_forks_repo_name": "Hasenpfote/fpq", "max_forks_repo_head_hexsha": "3154ed1b1d5eca08255e8359b5027439af43691c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.5375854214, "max_line_length": 100, "alphanum_fraction": 0.55249413, "include": true, "reason": "import numpy,from numpy", "num_tokens": 5675}
|
'''
Finding the best fit linear slope for a dataset example
'''
from statistics import mean
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
style.use('fivethirtyeight')
# test data
xs = np.array([1,2,3,4,5,6], dtype=np.float64)
ys = np.array([5,4,6,5,6,7], dtype=np.float64)
# generate best fit slope based on averages and square means
def best_fit_slope_and_intercept(xs, ys):
m = ((mean(xs) * mean(ys)) - mean(xs * ys)) / ((mean(xs) ** 2) - mean(xs ** 2))
b = mean(ys) - m*mean(xs)
return m, b
# y = mx + c
m,b = best_fit_slope_and_intercept(xs, ys)
regression_line = [(m * x) + b for x in xs] # create list of y values
# predictions
predict_x = 8
predict_y = (m * predict_x) + b
# plot
plt.scatter(xs, ys)
plt.scatter(predict_x, predict_y, color='g')
plt.plot(xs, regression_line)
plt.show()
|
{"hexsha": "c00bc499d6e4dc7e449723463e969d6e60c6f6d5", "size": 854, "ext": "py", "lang": "Python", "max_stars_repo_path": "Regression/Linear Regression/sklearn/best-fit-line.py", "max_stars_repo_name": "adam-bhaiji/machine-learning", "max_stars_repo_head_hexsha": "4ea97d6f802791077b8a19ccc2678cff8edcb630", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Regression/Linear Regression/sklearn/best-fit-line.py", "max_issues_repo_name": "adam-bhaiji/machine-learning", "max_issues_repo_head_hexsha": "4ea97d6f802791077b8a19ccc2678cff8edcb630", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Regression/Linear Regression/sklearn/best-fit-line.py", "max_forks_repo_name": "adam-bhaiji/machine-learning", "max_forks_repo_head_hexsha": "4ea97d6f802791077b8a19ccc2678cff8edcb630", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.8604651163, "max_line_length": 83, "alphanum_fraction": 0.6721311475, "include": true, "reason": "import numpy", "num_tokens": 263}
|
import networkx as nx
import matplotlib.pyplot as plt
class Top_Sort:
# A recursive function used by topologicalSort
def __topologicalSortUtil(self, v, visited, stack, G):
# Mark the current node as visited.
visited[v] = True
# Recur for all the vertices adjacent to this vertex
for i in range(len(G.nodes())):
if visited[i] == False:
self.__topologicalSortUtil(i, visited, stack, G)
# Push current vertex to stack which stores result
stack.append(v)
# The function to do Topological Sort. It uses recursive
# topologicalSortUtil()
def __topologicalSort(self, G):
# Mark all the vertices as not visited
visited = [False]*len(G.nodes())
stack = []
# Call the recursive helper function to store Topological
# Sort starting from all vertices one by one
for i in range(len(G.nodes())):
if visited[i] == False:
self.__topologicalSortUtil(i, visited, stack, G)
# Print contents of the stack
return stack[::-1] # return list in reverse order
def __CreateGraph(self, filename):
G = nx.DiGraph()
f = open(filename)
n = int(f.readline())
for i in range(n):
adj_list = list(map(int, (f.readline()).split()))
G.add_edge(adj_list[0], adj_list[1])
return G
#takes input from the file and creates a directed graph
def __CreateResultGraph(self, sorted_list):
D = nx.DiGraph()
for i in range(len(sorted_list)-1):
D.add_edge(sorted_list[i], sorted_list[i+1])
pos = nx.spring_layout(D)
val_map = {}
val_map[sorted_list[0]] = 'green'
val_map[sorted_list[len(sorted_list)-1]] = 'red'
values = [val_map.get(node, 'blue') for node in D.nodes()]
options = {
"node_color": values,
"edge_color": "#000000",
"width": 3,
"edge_cmap": plt.cm.Blues,
"with_labels" : True,
}
nx.draw(D, pos, **options)
#draws the graph
def __DrawGraph(self, G):
pos = nx.spring_layout(G)
options = {
"node_color": "#A0CBE2",
"edge_color": "#000000",
"width": 3,
"edge_cmap": plt.cm.Blues,
"with_labels" : True,
}
nx.draw(G, pos, **options) #with_labels=true is to show the node number in the output graph
return pos
def topological_sort(self, filename):
G = self.__CreateGraph(filename=filename)
plt.figure("Input Graph")
pos = self.__DrawGraph(G)
plt.figure("Graph after Topological Sort")
sorted_list = self.__topologicalSort(G=G)
self.__CreateResultGraph(sorted_list)
plt.show()
|
{"hexsha": "8c8f3fd5798c79a9cbda0e540bc92ad515412067", "size": 2828, "ext": "py", "lang": "Python", "max_stars_repo_path": "graph_algo_vis/topological_sort.py", "max_stars_repo_name": "Akarsh654/Graph-Algorithms-Package", "max_stars_repo_head_hexsha": "ceb417ca5e79ca6a26d709aea47ceddb1dbffc4c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-07-04T10:45:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-11T04:06:33.000Z", "max_issues_repo_path": "graph_algo_vis/topological_sort.py", "max_issues_repo_name": "Akarsh654/Graph-Algorithms-Package", "max_issues_repo_head_hexsha": "ceb417ca5e79ca6a26d709aea47ceddb1dbffc4c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graph_algo_vis/topological_sort.py", "max_forks_repo_name": "Akarsh654/Graph-Algorithms-Package", "max_forks_repo_head_hexsha": "ceb417ca5e79ca6a26d709aea47ceddb1dbffc4c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8837209302, "max_line_length": 101, "alphanum_fraction": 0.5809759547, "include": true, "reason": "import networkx", "num_tokens": 701}
|
# BSD Licensed, Copyright (c) 2006-2008 MetaCarta, Inc.
from TileCache.Layer import MetaLayer
import osgeo.gdal as gdal
import osgeo.gdal_array as gdalarray
import numpy
import PIL
class GDAL(MetaLayer):
"""
The GDAL Layer allows you to set up any GDAL datasource in TileCache.
Areas not covered by the image will be transparent in formats which
support transparency. The GDAL transparency is maintained. All bands
of an image are read from the source file at this time.
This Layer does not support images where north is not up.
Special effort is taken when the GeoTransform on the image is the default
(0.0, 1.0, 0.0, 0.0, 0.0, 1.0): In that case, the geotransform is
replaced with (0.0, 1.0, 0.0, self.ds.RasterYSize, 0.0, -1.0) . This allows
one to use the GDAL layer with non-georeferenced images: Simply specify a
bbox=0,0,size_x,size_y, and then you can use the image in TileCache. This is
likely a better idea than using the Image layer, if you can install GDAL,
since GDAL may be more efficient in managing subsetting of files, especially
geographic sized ones, due to its ability to support overviews on files it is
reading.
This layer depends on:
* GDAL 1.5 with Python Bindings
* PIL
* numpy
"""
config_properties = [
{'name':'name', 'description': 'Name of Layer'},
{'name':'file', 'description': 'GDAL-readable file path.'},
] + MetaLayer.config_properties
def __init__ (self, name, file = None, **kwargs):
MetaLayer.__init__(self, name, **kwargs)
self.ds = gdal.Open(file)
self.geo_transform = self.ds.GetGeoTransform()
if self.geo_transform[2] != 0 or self.geo_transform[4] != 0:
raise Exception("Image is not 'north-up', can not use.")
if self.geo_transform == (0.0, 1.0, 0.0, 0.0, 0.0, 1.0):
self.geo_transform = (0.0, 1.0, 0.0, self.ds.RasterYSize, 0.0, -1.0)
size = [self.ds.RasterXSize, self.ds.RasterYSize]
xform = self.geo_transform
self.data_extent = [
xform[0] + self.ds.RasterYSize * xform[2],
xform[3] + self.ds.RasterYSize * xform[5],
xform[0] + self.ds.RasterXSize * xform[1],
xform[3] + self.ds.RasterXSize * xform[4]
]
def renderTile(self, tile):
import PIL.Image as PILImage
import StringIO
bounds = tile.bounds()
im = None
# If the image is entirely outside the bounds, don't bother doing anything with it:
# just return an 'empty' tile.
if not (bounds[2] < self.data_extent[0] or bounds[0] > self.data_extent[2] or
bounds[3] < self.data_extent[1] or bounds[1] > self.data_extent[3]):
tile_offset_left = tile_offset_top = 0
target_size = tile.size()
off_x = int((bounds[0] - self.geo_transform[0]) / self.geo_transform[1]);
off_y = int((bounds[3] - self.geo_transform[3]) / self.geo_transform[5]);
width_x = int(((bounds[2] - self.geo_transform[0]) / self.geo_transform[1]) - off_x);
width_y = int(((bounds[1] - self.geo_transform[3]) / self.geo_transform[5]) - off_y);
# Prevent from reading off the sides of an image
if off_x + width_x > self.ds.RasterXSize:
oversize_right = off_x + width_x - self.ds.RasterXSize
target_size = [
target_size[0] - int(float(oversize_right) / width_x * target_size[0]),
target_size[1]
]
width_x = self.ds.RasterXSize - off_x
if off_x < 0:
oversize_left = -off_x
tile_offset_left = int(float(oversize_left) / width_x * target_size[0])
target_size = [
target_size[0] - int(float(oversize_left) / width_x * target_size[0]),
target_size[1],
]
width_x = width_x + off_x
off_x = 0
if off_y + width_y > self.ds.RasterYSize:
oversize_bottom = off_y + width_y - self.ds.RasterYSize
target_size = [
target_size[0],
target_size[1] - round(float(oversize_bottom) / width_y * target_size[1])
]
width_y = self.ds.RasterYSize - off_y
if off_y < 0:
oversize_top = -off_y
tile_offset_top = int(float(oversize_top) / width_y * target_size[1])
target_size = [
target_size[0],
target_size[1] - int(float(oversize_top) / width_y * target_size[1]),
]
width_y = width_y + off_y
off_y = 0
bands = self.ds.RasterCount
array = numpy.zeros((target_size[1], target_size[0], bands), numpy.uint8)
for i in range(bands):
array[:,:,i] = gdalarray.BandReadAsArray(self.ds.GetRasterBand(i+1), off_x, off_y, width_x, width_y, target_size[0], target_size[1])
im = PIL.Image.fromarray(array)
big = PIL.Image.new("RGBA", tile.size(), (0,0,0,0))
if im:
big.paste(im, (tile_offset_left, tile_offset_top))
buffer = StringIO.StringIO()
big.save(buffer, self.extension)
buffer.seek(0)
tile.data = buffer.read()
return tile.data
|
{"hexsha": "59c21bc90fa1ad0f3fe8af04876449cb5a6ef39d", "size": 5594, "ext": "py", "lang": "Python", "max_stars_repo_path": "public/cgi/tilecache/TileCache/Layers/GDAL.py", "max_stars_repo_name": "l34marr/mapwarper", "max_stars_repo_head_hexsha": "0bc844c6a9f002157d0c67d2f792b986be0f9133", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-09-01T14:37:39.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-01T14:37:39.000Z", "max_issues_repo_path": "public/cgi/tilecache/TileCache/Layers/GDAL.py", "max_issues_repo_name": "miklobit/mapwarper", "max_issues_repo_head_hexsha": "8c395dfcc66cd3b0f858584e2cf1e2f74cf21da7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2017-07-05T16:33:52.000Z", "max_issues_repo_issues_event_max_datetime": "2017-07-19T03:07:55.000Z", "max_forks_repo_path": "public/cgi/tilecache/TileCache/Layers/GDAL.py", "max_forks_repo_name": "miklobit/mapwarper", "max_forks_repo_head_hexsha": "8c395dfcc66cd3b0f858584e2cf1e2f74cf21da7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.7462686567, "max_line_length": 148, "alphanum_fraction": 0.5709688952, "include": true, "reason": "import numpy", "num_tokens": 1454}
|
#!/usr/local/bin/python3
# use age for lineaer regression
# accuracy 0.7890
# kaggle score 0.7655 (same as female alone)
import sys # pylint: disable=unused-import
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.metrics import accuracy_score
import warnings
warnings.simplefilter(action='ignore', category=RuntimeWarning)
# load data
train = pd.read_csv('../input/train.csv')
test = pd.read_csv("../input/test.csv")
#-------- main
# fill missing values
meanAge = train.Age.mean()
train.Age.fillna(meanAge, inplace=True)
test.Age.fillna(meanAge, inplace=True)
# feature creation
train['is_female'] = train.Sex.apply(lambda sex: 1 if sex == 'female' else 0)
test['is_female'] = test.Sex.apply(lambda sex: 1 if sex == 'female' else 0)
# print(train.describe())
train = train[['Age', 'is_female', 'Pclass', 'Survived']]
x_train = train.drop('Survived', axis=1)
y_train = train.Survived
x_test = test[x_train.columns]
print(x_train.head(20))
lr = LinearRegression()
lr.fit(x_train, y_train)
lr_train_pred = lr.predict(x_train)
lr_train_pred = np.round(lr_train_pred).astype(int)
print('accuracy', accuracy_score(y_train, lr_train_pred))
lr_test_pred = lr.predict(x_test)
lr_test_pred = np.round(lr_test_pred).astype(int)
predicted = pd.DataFrame({
"PassengerId": test.PassengerId,
"Survived": lr_test_pred
})
predicted.to_csv('../input/submission.csv', index=False)
|
{"hexsha": "8e295f83c178ec1659214a83d64cba60a85d1f8f", "size": 1440, "ext": "py", "lang": "Python", "max_stars_repo_path": "src-examples/003-age.py", "max_stars_repo_name": "peterorum/kaggle-titanic", "max_stars_repo_head_hexsha": "ae2d5f6fb62ecfee0c2c9f473c3c9d6e7ded836f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src-examples/003-age.py", "max_issues_repo_name": "peterorum/kaggle-titanic", "max_issues_repo_head_hexsha": "ae2d5f6fb62ecfee0c2c9f473c3c9d6e7ded836f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src-examples/003-age.py", "max_forks_repo_name": "peterorum/kaggle-titanic", "max_forks_repo_head_hexsha": "ae2d5f6fb62ecfee0c2c9f473c3c9d6e7ded836f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.2631578947, "max_line_length": 77, "alphanum_fraction": 0.7402777778, "include": true, "reason": "import numpy", "num_tokens": 372}
|
#this file contains a common tracking code for both elevator and rover
#It checks variable from file config.npy to figure out its own type
import time
from datetime import datetime
import subprocess
import numpy as np
from numpy import linalg
from numpy.linalg import inv
import math
import cmath
import linalgfunc
import pdb
import os
import serial
import sys, glob
import random
import Adafruit_BBIO.GPIO as GPIO
import pickle
#Libraries made for convenience
from analog import Analog
from motion_tracking_socket3D import MotionTrackingSocket3D
from led import LED
from trigger_socket import TriggerSocket
from motor_system import MotorSystem
import my_functions as mf
global pi
pi = np.pi
def initialize():
global num_iteration
num_iteration = 200
global t_Iter
t_Iter = 0.5
fs = 1/t_Iter
global y_all
y_all = np.zeros(num_iteration)
global y_hpf_all
y_hpf_all = np.zeros(num_iteration)
global u_all
u_all = np.zeros((num_iteration,3))
global p_all
p_all = np.zeros((num_iteration,2))
global utotal_all
utotal_all = np.zeros((num_iteration,2))
global motor_commands_all
motor_commands_all = np.zeros((num_iteration,2))
global x_ground_truth_all
x_ground_truth_all = np.zeros((num_iteration,6))
global time_all
time_all = np.zeros(num_iteration)
def setup():
global receiver
receiver = Analog()
global Gimbal
Gimbal = MotorSystem()
Gimbal.TakeGroundPosition()
global motion_socket
motion_socket = MotionTrackingSocket3D()
global MyRobotName
MyRobotName = mf.read_file("my_type.txt").split()[0]
global initial_pitch
global initial_yaw
global perturbation_factor
if MyRobotName == 'Rover':
initial_pitch = 7
initial_yaw = 7
perturbation_factor = 4
from underlying_robot import Robot
global myBot
myBot = Robot(motion_socket,MyRobotName,3,0.6)
elif MyRobotName == 'Elevator':
initial_pitch = 6
initial_yaw = -8
perturbation_factor = 3
MyRobotName2 = mf.read_file("my_name.txt").split()[0]
local_config_file_name = MyRobotName2 + '_config.txt'
s = mf.read_file(local_config_file_name)
local_config = s.split(' ')
global bias_angle
bias_angle = float(local_config[8])
global receiver_sum_angle
global base_sum_angle
receiver_sum_angle = initial_pitch
base_sum_angle = initial_yaw
global communication_flag
communication_flag = int(mf.read_file("communication_flag.txt"))
if communication_flag == 0:
global txLED
txLED = LED()
txLED.on()
else:
from receiver_handle import ReceiverHandle
global RxRoutine
RxRoutine = ReceiverHandle(scan[1])
global TxRoutine
TxRoutine = TransmissionHandle()
yaw1 = Gimbal.get_yaw()
x = motion_socket.x
if bias_angle == 180:
yaw2 = x[0]%360-180
else:
yaw2 = x[0]
#pdb.set_trace()
if abs(yaw1-yaw2)>1.0:
motion_socket.stop()
Gimbal.Deactivate()
txLED.off()
raise Exception("Sorry, the robot is not aligned, please correct the orientation: ",yaw2 )
Gimbal.WriteAbsoluteAngles([initial_yaw,initial_pitch])
x = motion_socket.x
pitch = Gimbal.get_pitch()
yaw = Gimbal.get_yaw()
print('Reached absolute yaw at ',yaw,' degrees, and absolute pitch at ',pitch,' degrees')
if bias_angle == 180:
yaw = x[0]%360-180
else:
yaw = x[0]
print('From Motion Tracking System yaw = ',yaw,' and pitch = ',x[1])
def trigger_setup():
current_time = time.time()
print("Current time: %f" %(current_time))
global my_trigger
my_trigger = TriggerSocket()
print("Waiting for the starting trigger on ", MyRobotName)
global t_START
t_START, duty, tIdle= my_trigger.waitForTrigger()
mf.wait_till(t_START)
global toc
toc = time.time()
print("Process triggered at time ",datetime.fromtimestamp(toc).strftime('%Y %m %d_%I:%M:%S.%f %p'), ' on ', MyRobotName)
if MyRobotName == 'Rover':
myBot.duty = duty
myBot.idle_time = tIdle
myBot.motion_state = True
def closing_setup():
Gimbal.Deactivate()
file_name = MyRobotName + '_3D_ExSeeking_data'
txt_file_name = file_name + '_recent_files_name.txt'
zip_name = file_name + datetime.fromtimestamp(toc).strftime('_%Y-%m-%d_%I:%M_%p.npz')
received_data_pkl_file_name = file_name + '_received_data' + datetime.fromtimestamp(toc).strftime('_%Y-%m-%d_%I:%M_%p.pkl')
iteration_num_pkl_file_name = file_name + '_iteration_nums'+ datetime.fromtimestamp(toc).strftime('_%Y-%m-%d_%I:%M_%p.pkl')
file2write = open(txt_file_name,'w')
file2write.write(zip_name + ' ')
if communication_flag == 0:
txLED.off()
else:
RxRoutine.stop()
TxRoutine.deactivate_transmission()
file2write.write(received_data_pkl_file_name + ' ')
file2write.write(iteration_num_pkl_file_name)
iteration_nums = RxRoutine.iteration_nums
received_data = RxRoutine.received_data
#np.save('recent_file_name.npy',common_file_name)
f = open(iteration_num_pkl_file_name,"wb")
pickle.dump(iteration_nums,f)
f.close()
f = open(received_data_pkl_file_name,"wb")
pickle.dump(received_data,f)
f.close()
file2write.close()
np.savez(zip_name, u_all=u_all, y_hpf_all = y_hpf_all, y_all = y_all, time_all = time_all, \
motor_commands_all=motor_commands_all, x_ground_truth_all=x_ground_truth_all,theta_all = theta)
message = MyRobotName+" is Done!"
my_trigger.sendFinisherFlag(message.encode())
my_trigger.Deactivate()
if MyRobotName == 'Rover':
myBot.takeGroundPosition()
motion_socket.stop()
#Variables Initialization
initialize()
setup()
y = 0
u2 = 0
u3 = 0
u = [0,u2,u3]
timer = np.zeros(num_iteration+1)
theta = np.zeros(num_iteration+1)
scan_psi = np.zeros(num_iteration+1)
scan_theta = np.zeros(num_iteration+1)
theta[0] = initial_pitch
scan_theta[0] = theta[0]
# ReceiverStepper.rotateMotor(-theta[0])
# receiver_sum_angle = receiver_sum_angle -theta[0]
interval = np.zeros(num_iteration)
fs = 1/t_Iter # Sampling frequency
fp = fs/perturbation_factor #perturbation frequency
#omega = fp*pi*2
K = 5
phase1 = 0 #Azimuthal phase
phase2 = 90 #elevation phase
A = 3 #Amplitude of the ES
p1 = A*mf.sind(phase1)
p2 = A*mf.sind(phase2)
u_total = [0,p1,p2]
previous_alpha_bias = 0
previous_beta_bias = 0
alpha_bias = p2
beta_bias = p1
motor_commands =mf.generate_motor_commands_old(theta[0], previous_alpha_bias,previous_beta_bias, u, alpha_bias, beta_bias)
Motor_command_receiver = motor_commands[0,0]
Motor_command_base = motor_commands[0,1]
base_sum_angle = base_sum_angle + Motor_command_base
receiver_sum_angle = receiver_sum_angle + Motor_command_receiver
motor_commands_all[0] = [Motor_command_base,Motor_command_receiver]
trigger_setup()
set_time = t_START + t_Iter
y_all[0] = receiver.getIntensity()
x_ground_truth_all[0] = motion_socket.x
tdiff_min = 1000
for i in range(1,num_iteration):
#print 'i= %d' %(i)
#u = [0,0,0]
Gimbal.ApplyMotorCommandsSync([Motor_command_base, Motor_command_receiver])
theta[i] = Gimbal.get_pitch()
y = receiver.getIntensity()
y_all[i] = y
lim = max(1,i-20)
y_hpf = y-np.mean(y_all[lim:i+1])
y_hpf_all[i] = y_hpf
u1 = -K*y_hpf*p1
u2 = -K*y_hpf*p2
u=[0,u1,u2]
previous_alpha_bias = p2
previous_beta_bias = p1
p1 = A*mf.sind(phase1+i*2*fp*t_Iter*180)
p2 = A*mf.sind(phase2+i*2*fp*t_Iter*180)
alpha_bias = p2
beta_bias = p1
if i==-1:
pdb.set_trace()
motor_commands =mf.generate_motor_commands_old(theta[i], previous_alpha_bias,previous_beta_bias, u, alpha_bias, beta_bias)
Motor_command_receiver = motor_commands[0,0]
Motor_command_base = motor_commands[0,1]
base_sum_angle = base_sum_angle + Motor_command_base
receiver_sum_angle = receiver_sum_angle + Motor_command_receiver
time_all[i] = set_time-t_START
tDiff = mf.wait_till(set_time)
if tDiff<tdiff_min:
tdiff_min = tDiff
#print "Iteration: %d, Scan_radius: %d, Angle %d" %(i,scan_radius,bias)
x_ground_truth_all[i] = motion_socket.x
motor_commands_all[i] = [Motor_command_base,Motor_command_receiver]
set_time = set_time + t_Iter
print("Iteration: %d / %d \r" % (i,num_iteration) )
if bias_angle == 180:
yaw = x_ground_truth_all[i,0]%360-180
else:
yaw = x_ground_truth_all[i,0]
print('From Motion Tracking System yaw = ',yaw,' and pitch = ',x_ground_truth_all[i,1], ' tDiff ',tDiff)
print('Minimum wait was: ',tdiff_min)
closing_setup()
print('Done!')
|
{"hexsha": "57e362dfec7baf2a2199c9a511f6bc1b1693aba6", "size": 8811, "ext": "py", "lang": "Python", "max_stars_repo_path": "ExperimentCode/STAC_3D_Air_ExSeeking.py", "max_stars_repo_name": "pratapbhanusolanki/tmech2021", "max_stars_repo_head_hexsha": "8b3d23f3c384482da2f3143b7abd33ac6b65d911", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ExperimentCode/STAC_3D_Air_ExSeeking.py", "max_issues_repo_name": "pratapbhanusolanki/tmech2021", "max_issues_repo_head_hexsha": "8b3d23f3c384482da2f3143b7abd33ac6b65d911", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ExperimentCode/STAC_3D_Air_ExSeeking.py", "max_forks_repo_name": "pratapbhanusolanki/tmech2021", "max_forks_repo_head_hexsha": "8b3d23f3c384482da2f3143b7abd33ac6b65d911", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7941176471, "max_line_length": 128, "alphanum_fraction": 0.6936783566, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2398}
|
import pytest
def test_x_minus_xt():
import jax
import jax.numpy as jnp
import sake
key = jax.random.PRNGKey(2666)
x = jax.random.normal(key=key, shape=(5, 3))
x_minus_xt = sake.functional.get_x_minus_xt(x)
assert x_minus_xt.shape == (5, 5, 3)
def test_x_minus_xt_norm():
import jax
import jax.numpy as jnp
import sake
key = jax.random.PRNGKey(2666)
x = jax.random.normal(key=key, shape=(5, 3))
x_minus_xt = sake.functional.get_x_minus_xt(x)
x_minus_xt_norm = sake.functional.get_x_minus_xt_norm(x_minus_xt)
assert x_minus_xt_norm.shape == (5, 5, 1)
def test_h_cat_ht():
import jax
import jax.numpy as jnp
import sake
key = jax.random.PRNGKey(2666)
h = jax.random.normal(key=key, shape=(5, 3))
h_cat_ht = sake.functional.get_h_cat_ht(h)
assert h_cat_ht.shape == (5, 5, 6)
|
{"hexsha": "a23192e13d6b38aa0240402b81bcc56cd7cba53f", "size": 863, "ext": "py", "lang": "Python", "max_stars_repo_path": "sake/tests/test_functional.py", "max_stars_repo_name": "yuanqing-wang/sake", "max_stars_repo_head_hexsha": "9968aeb51ced20e47646762d15416d38b59f8102", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-16T15:11:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T15:11:47.000Z", "max_issues_repo_path": "sake/tests/test_functional.py", "max_issues_repo_name": "yuanqing-wang/sake", "max_issues_repo_head_hexsha": "9968aeb51ced20e47646762d15416d38b59f8102", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sake/tests/test_functional.py", "max_forks_repo_name": "yuanqing-wang/sake", "max_forks_repo_head_hexsha": "9968aeb51ced20e47646762d15416d38b59f8102", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7666666667, "max_line_length": 69, "alphanum_fraction": 0.6778679027, "include": true, "reason": "import jax", "num_tokens": 271}
|
#-*- encoding: utf-8 -*-
import argparse
from tkinter.constants import TRUE
import numpy as np
import tkinter as tk
from tkinter.ttk import Label
from multiprocessing import Process, Queue
import time
class App(object):
def __init__(self, queue):
self.q = queue
self.root = tk.Tk()
self.word = Label(self.root)
self.txt_placeholder = tk.StringVar()
# self._set_text('###')
self._set_text('yesgo')
color = '#1C1C1C'
self._set_root(color)
self._set_label(color)
# self.get_text()
def mainloop(self):
self.root.mainloop()
def _set_root(self, color):
self.root.geometry('200x50')
self.root.title('Keywords spotting')
self.root.config(background=color)
def _set_label(self, color):
self.word.config(
width=20,
font=("Times", 40, 'bold'),
textvariable=self.txt_placeholder,
background=color,
foreground='#FCFAF2'
)
# self.txt_placeholder.set('unknown')
# lbl = Label(root, font = ('calibri', 40, 'bold'),
# background = 'purple',
# foreground = 'white')
self.word.pack(anchor='center', ipady=10)
def _set_text(self, txt):
self.txt_placeholder.set(txt)
def get_text(self):
if not self.q.empty():
txt = self.q.get()
self._set_text(txt)
self.word.after(1, self.get_text)
def push(q):
for i in range(100):
q.put(str(i))
print('push %d' % i)
time.sleep(0.5)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-m',
'--mode',
type=str,
default='sdData',
)
args = parser.parse_args()
##################### init #####################
q = Queue()
app = App(q)
# p = Process(target=push, args=[q])
# p.start()
app.mainloop()
# p.join()
|
{"hexsha": "5c7564a235a93b0b9da82ce27267878bfe7f5c6b", "size": 2021, "ext": "py", "lang": "Python", "max_stars_repo_path": "zynq/tmp.py", "max_stars_repo_name": "Roxbili/kws-demo", "max_stars_repo_head_hexsha": "7e0674f1407572fc8f148293b23fa20a5164bc5e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "zynq/tmp.py", "max_issues_repo_name": "Roxbili/kws-demo", "max_issues_repo_head_hexsha": "7e0674f1407572fc8f148293b23fa20a5164bc5e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "zynq/tmp.py", "max_forks_repo_name": "Roxbili/kws-demo", "max_forks_repo_head_hexsha": "7e0674f1407572fc8f148293b23fa20a5164bc5e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6463414634, "max_line_length": 60, "alphanum_fraction": 0.5343889164, "include": true, "reason": "import numpy", "num_tokens": 478}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 16:45:40 2020
@author: ogurcan
"""
import networkx as nx
import h5py as h5
import matplotlib.pylab as plt
import numpy as np
#nwflname='run-GOY/nwfile.pkl'
#nwflname='run-WS04-static/nwfile.pkl'
nwflname='run-NW04-static/nwfile.pkl'
gr=nx.read_gpickle(nwflname)
kns=nx.bipartite.sets(gr)[0]
kn=np.sort(np.array([l for l in kns]))
strs=nx.bipartite.sets(gr)[1]
N=kn.shape[0]
pn={kn[l]: np.array([np.cos(2*l*np.pi/N),np.sin(2*l*np.pi/N)]) for l in range(N)}
pt=dict()
for l in strs:
exec('a='+l)
if((a[2]-a[0])==2):
pt[l]=0.6*np.array([np.cos(2*(a[0]+1)*np.pi/N),np.sin(2*(a[0]+1)*np.pi/N)])
else:
pt[l]=0.4*np.array([np.cos(2*(a[0]+1)*np.pi/N),np.sin(2*(a[0]+1)*np.pi/N)])
plt.figure(figsize=(6, 6))
nx.draw_networkx_nodes(kn,pos=pn,node_size=300)
nx.draw_networkx_nodes(strs,pos=pt,node_shape='<',node_size=20)
pos=dict(pn,**pt)
nx.draw_networkx_edges(gr,pos=pos,width=0.5)
ln={kn[l]: l for l in range(N)}
nx.draw_networkx_labels(gr.subgraph(kn),pos=pn,labels=ln,font_size=10,font_color='w')
|
{"hexsha": "695b48de342ba24b51eddfa1a0191132c77977d7", "size": 1095, "ext": "py", "lang": "Python", "max_stars_repo_path": "draw_bipartite_nw.py", "max_stars_repo_name": "gurcani/dycon", "max_stars_repo_head_hexsha": "64313471a9222682dce12f8623eb5d0563a8bb5c", "max_stars_repo_licenses": ["CECILL-B"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "draw_bipartite_nw.py", "max_issues_repo_name": "gurcani/dycon", "max_issues_repo_head_hexsha": "64313471a9222682dce12f8623eb5d0563a8bb5c", "max_issues_repo_licenses": ["CECILL-B"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "draw_bipartite_nw.py", "max_forks_repo_name": "gurcani/dycon", "max_forks_repo_head_hexsha": "64313471a9222682dce12f8623eb5d0563a8bb5c", "max_forks_repo_licenses": ["CECILL-B"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4166666667, "max_line_length": 85, "alphanum_fraction": 0.6639269406, "include": true, "reason": "import numpy,import networkx", "num_tokens": 416}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Niccolò Bonacchi
# @Date: 2018-02-20 14:46:10
# matplotlib.use('Qt5Agg')
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
def make_fig(sph):
plt.ion()
f = plt.figure() # figsize=(19.2, 10.8), dpi=100)
ax_bars = plt.subplot2grid((2, 2), (0, 0), rowspan=1, colspan=1)
ax_psych = plt.subplot2grid((2, 2), (0, 1), rowspan=1, colspan=1)
ax_chron = plt.subplot2grid((2, 2), (1, 0), rowspan=1, colspan=1)
ax_vars = plt.subplot2grid((2, 2), (1, 1), rowspan=1, colspan=1)
ax_vars2 = ax_vars.twinx()
f.canvas.draw_idle()
plt.show()
f.suptitle(f"{sph.SUBJECT_NAME} - {sph.SUBJECT_WEIGHT}gr - {sph.SESSION_DATETIME}") # noqa
axes = (ax_bars, ax_psych, ax_chron, ax_vars, ax_vars2)
# plt.pause(0.001)
return (f, axes)
def update_fig(f, axes, tph):
ax_bars, ax_psych, ax_chron, ax_vars, ax_vars2 = axes
bar_data = get_barplot_data(tph)
psych_data = get_psych_data(tph)
chron_data = get_chron_data(tph)
vars_data = get_vars_data(tph)
plot_bars(bar_data, ax=ax_bars)
plot_psych(psych_data, ax=ax_psych)
plot_chron(chron_data, ax=ax_chron)
plot_vars(vars_data, ax=ax_vars, ax2=ax_vars2)
plt.pause(0.001)
fname = Path(tph.data_file_path).parent / "online_plot.png"
f.savefig(fname)
def get_barplot_data(tph):
out = {}
out["trial_num"] = tph.trial_num
out["ntrials_repeated"] = tph.rc.ntrials
out["ntrials_adaptive"] = tph.ac.ntrials
out["ntrials_correct"] = tph.ntrials_correct
out["ntrials_err"] = out["trial_num"] - out["ntrials_correct"]
out["water_delivered"] = np.round(tph.water_delivered, 3)
out["time_from_start"] = tph.elapsed_time
return out
def get_psych_data(tph):
sig_contrasts_all = np.array(tph.contrast_set)
sig_contrasts_all = np.append(sig_contrasts_all, [-x for x in sig_contrasts_all if x != 0])
sig_contrasts_all = np.sort(sig_contrasts_all)
sig_contrast_buffer = np.array(tph.signed_contrast_buffer)
response_side_buffer = np.array(tph.response_side_buffer)
ntrials_ccw = np.array(
[sum(response_side_buffer[sig_contrast_buffer == x] < 0) for x in sig_contrasts_all]
)
ntrials = np.array([sum(sig_contrast_buffer == x) for x in sig_contrasts_all])
prop_resp_ccw = [x / y if y != 0 else 0 for x, y in zip(ntrials_ccw, ntrials)]
return sig_contrasts_all, prop_resp_ccw
def get_chron_data(tph):
sig_contrasts_all = tph.contrast_set.copy()
sig_contrasts_all.extend([-x for x in sig_contrasts_all])
sig_contrasts_all = np.sort(sig_contrasts_all)
signed_contrast_buffer = np.array(tph.signed_contrast_buffer)
resopnse_time_buffer = np.array(tph.response_time_buffer)
rts = [np.median(resopnse_time_buffer[signed_contrast_buffer == x]) for x in sig_contrasts_all]
rts = [x if not np.isnan(x) else 0 for x in rts]
return sig_contrasts_all, rts
def get_vars_data(tph):
out = {}
out["median_rt"] = np.median(tph.response_time_buffer) * 1000
out["prop_correct"] = tph.ntrials_correct / tph.trial_num
out["Temperature_C"] = tph.as_data["Temperature_C"]
out["AirPressure_mb"] = tph.as_data["AirPressure_mb"]
out["RelativeHumidity"] = tph.as_data["RelativeHumidity"]
return out
# plotters
def plot_bars(bar_data, ax=None):
if ax is None:
# f = plt.figure() # figsize=(19.2, 10.8), dpi=100)
ax = plt.subplot2grid((1, 1), (0, 0), rowspan=1, colspan=1)
ax.cla()
def make_bar_texts(ax, ypos, vars):
left = 0
for var in vars:
ax.text(
left + (var * 0.15),
ypos,
str(var),
color="black",
fontweight="bold",
size="x-large",
)
left += var
else:
ax.text(
left + (var * 0.15),
ypos,
str(left),
color="black",
fontweight="bold",
size="x-large",
alpha=0.5,
)
width = 0.75
xlabels = [
"Water\nDelivered\n(µl)",
"Performance",
"Trial\nTypes",
"Session\nDuration",
]
y = [
bar_data["trial_num"],
bar_data["ntrials_correct"],
bar_data["water_delivered"],
0,
]
x = range(len(xlabels)) # the x locations for the groups
ax.barh(3, 0, width, color="black")
# ax.barh(0, bar_data['trial_num'], width, color="gray")
ax.text(
max(y) / 10,
3,
str(bar_data["time_from_start"]),
color="black",
fontweight="bold",
size="x-large",
)
ax.barh(2, bar_data["ntrials_repeated"], width, color="pink", label="Repeated")
ax.barh(
2,
bar_data["ntrials_adaptive"],
width,
left=bar_data["ntrials_repeated"],
color="orange",
label="Adaptive",
)
make_bar_texts(ax, 2, [bar_data["ntrials_repeated"], bar_data["ntrials_adaptive"]])
ax.barh(1, bar_data["ntrials_correct"], width, color="green", label="Correct")
ax.barh(
1,
bar_data["ntrials_err"],
width,
left=bar_data["ntrials_correct"],
color="red",
label="Error",
)
make_bar_texts(ax, 1, [bar_data["ntrials_correct"], bar_data["ntrials_err"]])
ax.barh(0, bar_data["water_delivered"], width, color="blue")
ax.text(
bar_data["water_delivered"] + 1,
0,
str(bar_data["water_delivered"]),
color="blue",
fontweight="bold",
size="x-large",
)
ax.set_yticks([i for i in x])
ax.set_yticklabels(xlabels, minor=False)
ax.set_xlim([0, max(y) + (max(y) * 0.2)])
ax.legend()
ax.figure.canvas.draw_idle()
def plot_psych(psych_data, ax=None):
if ax is None:
# f = plt.figure() # figsize=(19.2, 10.8), dpi=100)
ax = plt.subplot2grid((1, 1), (0, 0), rowspan=1, colspan=1)
ax.cla()
x = psych_data[0]
y = psych_data[1]
y = [0 if np.isnan(i) else i for i in y]
ax.plot(x, y, c="k", label="CCW responses", marker="o", ls="-")
ax.axhline(0.5, color="gray", ls="--", alpha=0.5)
ax.axvline(0.0, color="gray", ls="--", alpha=0.5)
ax.set_ylim([-0.1, 1.1])
ax.legend(loc="best")
ax.grid()
ax.figure.canvas.draw_idle()
return
def plot_chron(chron_data, ax=None):
if ax is None:
# f = plt.figure() # figsize=(19.2, 10.8), dpi=100)
ax = plt.subplot2grid((1, 1), (0, 0), rowspan=1, colspan=1)
ax.cla()
x = chron_data[0]
y = chron_data[1]
y = [0 if np.isnan(i) else i for i in y]
ax.plot(x, y, c="k", label="Median time to respond", marker="o", ls="-")
ax.axhline(0.5, color="gray", ls="--", alpha=0.5)
ax.axvline(0.0, color="gray", ls="--", alpha=0.5)
ax.legend(loc="best")
ax.grid()
ax.figure.canvas.draw_idle()
return
def plot_vars(vars_data, ax=None, ax2=None):
if ax is None:
# f = plt.figure() # figsize=(19.2, 10.8), dpi=100)
ax = plt.subplot2grid((1, 1), (0, 0), rowspan=1, colspan=1)
ax2 = ax.twinx()
if ax2 is None:
ax2 = ax.twinx()
ax.cla()
ax2.cla()
# ax.figure.tight_layout() # or right y-label is slightly clipped
width = 0.75
x = [0, 1, 2, 3, 4]
median_rt = vars_data["median_rt"] / 10
prop_correct = vars_data["prop_correct"]
temp = vars_data["Temperature_C"]
rel_hum = vars_data["RelativeHumidity"] / 100
ax.bar(x[0], median_rt, width, color="cyan", label="Median RT (10^1ms)")
ax.bar(x[1], temp, width, color="magenta", label="Temperature (ºC)")
ax2.bar(x[3], rel_hum, width, color="yellow", label="Relative humidity")
ax2.bar(x[4], prop_correct, width, color="black", label="Proportion correct")
ax2.set_ylim([0, 1.1])
ax.legend(loc="lower left")
ax2.legend(loc="lower right")
ax.figure.canvas.draw_idle()
ax2.figure.canvas.draw_idle()
if __name__ == "__main__":
pass
|
{"hexsha": "2d9a67f1574b1c6efa64e702f1b1155de9dee2f3", "size": 8074, "ext": "py", "lang": "Python", "max_stars_repo_path": "tasks/_iblrig_tasks_trainingChoiceWorld/online_plots.py", "max_stars_repo_name": "int-brain-lab/iblr", "max_stars_repo_head_hexsha": "18569278fc2d8cd3266adb2a5f660a43f8f2582e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2018-08-07T21:56:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-06T17:53:37.000Z", "max_issues_repo_path": "tasks/_iblrig_tasks_trainingChoiceWorld/online_plots.py", "max_issues_repo_name": "int-brain-lab/iblr", "max_issues_repo_head_hexsha": "18569278fc2d8cd3266adb2a5f660a43f8f2582e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 360, "max_issues_repo_issues_event_min_datetime": "2018-07-24T16:35:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T15:28:56.000Z", "max_forks_repo_path": "tasks/_iblrig_tasks_trainingChoiceWorld/online_plots.py", "max_forks_repo_name": "int-brain-lab/iblr", "max_forks_repo_head_hexsha": "18569278fc2d8cd3266adb2a5f660a43f8f2582e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2019-03-12T16:25:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-06T10:30:24.000Z", "avg_line_length": 29.4671532847, "max_line_length": 99, "alphanum_fraction": 0.6015605648, "include": true, "reason": "import numpy", "num_tokens": 2491}
|
""" This file implements the GA algorithm and acts as main(). """
# standard library
import multiprocessing as mp
import subprocess as sp
import logging
import glob
import shutil
import os
import time
import sys
from traceback import print_exc
from json import dumps, dump
from copy import deepcopy, copy
# external libraries
import numpy as np
from pkg_resources import require
# matador modules
import matador.compute
import matador.compute.slurm
from matador.scrapers.castep_scrapers import (
res2dict,
castep2dict,
cell2dict,
param2dict,
)
from matador.export import doc2res
from matador.export.utils import generate_hash
from matador.fingerprints.similarity import get_uniq_cursor
from matador.fingerprints.pdf import PDFFactory
from matador.utils.chem_utils import get_formula_from_stoich, get_root_source
from matador.hull import QueryConvexHull
# ilustrado modules
from .adapt import adapt
from .generation import Generation
from .fitness import FitnessCalculator
from .util import strip_useless, LOG, NewbornProcess
__version__ = require("ilustrado")[0].version
# As this class has many settings that are hacked directly into __dict__, disable these warnings.
# pylint: disable=access-member-before-definition
# pylint: disable=attribute-defined-outside-init
# pylint: disable bad-continuation
class ArtificialSelector:
""" ArtificialSelector takes an initial gene pool
and applies a genetic algorithm to optimise some
fitness function.
Keyword Arguments:
gene_pool (list(dict)) : initial cursor to use as "Generation 0",
seed (str) : seed name of cell and param files for CASTEP,
seed_prefix (str) : if not specifying a seed, this name will prefix all runs
fitness_metric (str) : currently either 'hull' or 'test',
hull (QueryConvexHull) : matador QueryConvexHull object to calculate distances,
res_path (str) : path to folder of res files to create hull, if no hull object passed
mutation_rate (float) : rate at which to perform single-parent mutations (DEFAULT: 0.5)
crossover_rate (float) : rate at which to perform crossovers (DEFAULT: 0.5)
num_generations (int) : number of generations to breed before quitting (DEFAULT: 5)
num_survivors (int) : number of structures to survive to next generation for breeding
(DEFAULT: 10)
population (int) : number of structures to breed in any given generation
(DEFAULT: 25)
failure_ratio (int) : maximum number of attempts per success (DEFAULT: 5)
elitism (float) : fraction of next generation to be comprised of elite
structures from previous generation (DEFAULT: 0.2)
best_from_stoich (bool) : whether to always include the best structure from a
stoichiomtery in the next generation,
mutations (list(str)) : list of mutation names to use,
structure_filter (fn(doc)) : any function that takes a matador doc and returns True
or False,
check_dupes (bool) : if True, filter relaxed structures for uniqueness on-the-fly (DEFAULT: True)
check_dupes_hull (bool) : compare pdf with all hull structures (DEFAULT: True)
sandbagging (bool) : whether or not to disfavour nearby compositions (DEFAULT: False)
minsep_dict (dict) : dictionary containing element-specific minimum separations, e.g.
{('K', 'K'): 2.5, ('K', 'P'): 2.0}. These should only be set such that
atoms do not overlap; let the DFT deal with bond lengths. No effort is made
to push apart atoms that are too close, the trial will simply be discarded. (DEFAULT: None)
max_num_mutations (int) : maximum number of mutations to perform on a single structure,
max_num_atoms (int) : most atoms allowed in a structure post-mutation/crossover,
nodes (list(str)) : list of node names to run on,
ncores (int or list(int)) : specifies the number of cores used by listed `nodes` per thread,
nprocs (int) : total number of processes,
recover_from (str) : recover from previous run_hash, by default ilustrado will recover
if it finds only one run hash in the folder
load_only (bool) : only load structures, do not continue breeding (DEFAULT: False)
executable (str) : path to DFT binary (DEFAULT: castep)
compute_mode (str) : either `direct`, `slurm`, `manual` (DEFAULT: direct)
max_num_nodes (int) : amount of array jobs to run per generation in `slurm` mode,
walltime_hrs (int) : maximum walltime for a SLURM array job,
slurm_template (str) : path to template slurm script that includes module loads etc,
entrypoint (str) : path to script that initialised this object, such that it can
be called by SLURM
debug (bool) : maximum printing level
testing (bool) : run test code only if true
verbosity (int) : extra printing level,
loglevel (str) : follows std library logging levels.
"""
def __init__(self, **kwargs):
""" This is the main entrypoint. Initialises parameters,
gene pool and begins the GA.
"""
prop_defaults = {
# important, required parameters
"gene_pool": None,
"seed": None,
"seed_prefix": None,
"fitness_metric": "hull",
"hull": None,
"res_path": None,
# recovery and loading parameters
"recover_from": None,
"load_only": False,
# GA numerical parameters
"mutation_rate": 1.0,
"crossover_rate": 0.0,
"num_generations": 5,
"num_survivors": 10,
"population": 25,
"elitism": 0.2,
"max_num_mutations": 3,
"max_num_atoms": 30,
# other GA options
"best_from_stoich": True,
"mutations": None,
"structure_filter": None,
"check_dupes": True,
"check_dupes_hull": True,
"failure_ratio": 5,
"sandbagging": False,
"minsep_dict": None,
# logistical and compute parameters
"compute_mode": "direct",
"ase_calculator": None,
"nodes": None,
"ncores": None,
"nprocs": 1,
"relaxer_params": None,
"executable": "castep",
"max_num_nodes": None,
"walltime_hrs": None,
"slurm_template": None,
"entrypoint": None,
# debug and logging parameters
"debug": False,
"testing": False,
"emt": False,
"verbosity": 0,
"loglevel": "info",
}
# cache current params to reload again later
self.current_params = deepcopy(prop_defaults)
self.current_params.update(kwargs)
self.__dict__.update(prop_defaults)
self.__dict__.update(kwargs)
splash_screen = (
r" _ _ _ _" + "\n"
r" (_)| | | | | |" + "\n"
r" _ | | _ _ ___ | |_ _ __ __ _ __| | ___" + "\n"
r" | || || | | |/ __|| __|| '__| / _` | / _` | / _ \ " + "\n"
r" | || || |_| |\__ \| |_ | | | (_| || (_| || (_) |" + "\n"
r" |_||_| \__,_||___/ \__||_| \__,_| \__,_| \___/" + "\n\n"
"****************************************************\n"
)
print("\033[92m\033[1m")
print("\n" + splash_screen)
print("\033[0m")
print("Loading harsh realities of life...", end="")
# post-load checks
if self.relaxer_params is None:
self.relaxer_params = dict()
self.next_gen = None
if isinstance(self.ncores, list):
if len(self.ncores) != len(self.nodes):
raise RuntimeError(
"Length mismatch between ncores and nodes list: {} vs {}".format(
self.ncores, self.nodes
)
)
# set up computing resource
if self.compute_mode not in ("slurm", "direct", "manual"):
raise RuntimeError("`compute_mode` must be one of `slurm`, `direct`, `manual`.")
if self.compute_mode == "slurm":
errors = []
if not isinstance(self.walltime_hrs, int):
errors.append(
"`walltime_hrs` specified incorrectly {}".format(self.walltime_hrs)
)
elif not self.walltime_hrs > 0:
errors.append(
"`walltime_hrs` specified incorrectly {}".format(self.walltime_hrs)
)
if not isinstance(self.max_num_nodes, int):
errors.append(
"`max_num_nodes` specified incorrectly {}".format(
self.max_num_nodes
)
)
elif not self.max_num_nodes > 0:
errors.append(
"`max_num_nodes` specified incorrectly {}".format(
self.max_num_nodes
)
)
if not isinstance(self.slurm_template, str):
errors.append(
"`slurm_template` must be a valid path, not {}".format(
self.slurm_template
)
)
elif not os.path.isfile(self.slurm_template):
errors.append(
"`slurm_template` file {} does not exist".format(
self.slurm_template
)
)
if errors:
raise RuntimeError(
"Invalid specification for `compute_mode='slurm'`, errors: \n{}".format(
"\n".join(errors)
)
)
self.slurm_dict = matador.compute.slurm.get_slurm_env()
if self.compute_mode == "direct":
if self.nodes is not None:
if self.nprocs != len(self.nodes):
logging.warning(
"Specified procs {} being replaced by number of nodes {}".format(
self.nprocs, len(self.nodes)
)
)
self.nprocs = len(self.nodes)
# set up GA logistics
self.run_hash = generate_hash()
self.generations = [] # list to store all generations
self.num_elite = int(self.elitism * self.num_survivors)
self.num_accepted = self.num_survivors - self.num_elite
self.max_attempts = self.failure_ratio * self.population
if self.num_survivors > self.population + self.num_elite:
raise RuntimeError(
"More survivors than total population: {} vs {}".format(
self.num_survivors, self.population + self.num_elite
)
)
if self.num_accepted > self.population:
raise RuntimeError(
"More accepted than total population: {} vs {}".format(
self.num_accepted, self.population + self.num_elite
)
)
if self.mutations is not None and isinstance(self.mutations, str):
self.mutations = [self.mutations]
else:
self.mutations = ["permute_atoms", "random_strain", "nudge_positions", "vacancy", "transmute_atoms"]
try:
from VoronoiNetwork import Vornetclass
self.mutations.append("voronoi")
except ImportError:
LOG.warning("Disabling Voronoi mutation.")
pass
if not isinstance(self.max_num_mutations, int) and self.max_num_mutations < 0:
raise RuntimeError(
"`max_num_mutations` must be >= 0, not {}".format(
self.max_num_mutations
)
)
if not isinstance(self.max_num_atoms, int) and self.max_num_atoms < 1:
raise RuntimeError(
"`max_num_atoms` must be >= 1, not {}".format(self.max_num_atoms)
)
# recover from specified run
if self.recover_from is not None:
if isinstance(self.recover_from, str):
self.run_hash = self.recover_from.split("/")[-1]
# try to look for gen0 files, if multiple are found, safely exit
else:
gen0_files = glob.glob("*gen0.json")
if len(gen0_files) > 1:
raise SystemExit(
"Several incomplete runs found in this folder, please tidy up before re-running."
)
if len(gen0_files) == 1:
self.run_hash = gen0_files[0].split("/")[-1].replace("-gen0.json", "")
self.recover_from = self.run_hash
else:
print("No recovery possible, starting fresh run.")
# set up logging
numeric_loglevel = getattr(logging, self.loglevel.upper(), None)
if not isinstance(numeric_loglevel, int):
raise SystemExit(
self.loglevel,
"is an invalid log level, please use either `info`, `debug` or `warning`.",
)
file_handler = logging.FileHandler(self.run_hash + ".log", mode="a")
file_handler.setLevel(numeric_loglevel)
file_handler.setFormatter(
logging.Formatter("%(asctime)s - %(name)s | %(levelname)8s: %(message)s")
)
LOG.addHandler(file_handler)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(numeric_loglevel)
stream_handler.setFormatter(
logging.Formatter("%(asctime)s - %(name)s | %(levelname)8s: %(message)s")
)
LOG.addHandler(stream_handler)
LOG.info("Starting up ilustrado {}".format(__version__))
# initialise fitness calculator
if self.fitness_metric == "hull" and self.hull is None:
if self.res_path is not None and os.path.isfile(self.res_path):
res_files = glob.glob("{}/*.res".format(self.res_path))
if not res_files:
raise SystemExit("No structures found in {}".format(self.res_path))
self.cursor = []
for res in res_files:
self.cursor.append(res2dict(res))
self.hull = QueryConvexHull(cursor=self.cursor)
raise SystemExit(
"Need to pass a QueryConvexHull object to use hull distance metric."
)
if self.fitness_metric in ["dummy", "hull_test"]:
self.testing = True
if self.testing and self.compute_mode == "slurm":
raise SystemExit("Please use `compute_mode=direct` for testing.")
print("Done!")
self.fitness_calculator = FitnessCalculator(
fitness_metric=self.fitness_metric,
hull=self.hull,
sandbagging=self.sandbagging,
debug=self.debug,
)
LOG.debug("Successfully initialised fitness calculator.")
# if we're checking hull pdfs too, make this list now
if self.check_dupes_hull:
print("Computing extra PDFs from hull...")
PDFFactory(self.hull.cursor)
self.extra_pdfs = deepcopy(self.hull.cursor)
# remove pdf object from cursor so generation can be serialized
for ind, _ in enumerate(self.hull.cursor):
del self.hull.cursor[ind]["pdf"]
else:
self.extra_pdfs = None
LOG.info("Successfully initialised similarity lists.")
if self.recover_from is not None:
print("Attempting to recover from run {}".format(self.run_hash))
if isinstance(self.recover_from, str):
LOG.info(
"Attempting to recover from previous run {}".format(self.run_hash)
)
self.recover()
if not self.load_only:
self.start()
def start(self):
""" Start running GA. """
print("Initialising quantum mechanics...", end=" ")
# read parameters for relaxation from seed files
if self.seed is not None:
seed = self.seed
errors = []
self.cell_dict, success_cell = cell2dict(seed, db=False)
self.param_dict, success_param = param2dict(seed, db=False)
if not success_cell:
errors.append("Failed to read cell file: {}".format(self.cell_dict))
if not success_param:
errors.append("Failed to read param file: {}".format(self.param_dict))
if errors:
raise RuntimeError("{}".format(errors.join("\n")))
else:
self.seed = "ilustrado"
if self.seed_prefix is not None:
self.seed = self.seed_prefix
self.cell_dict = {}
self.param_dict = {}
print("Done!\n")
LOG.debug("Successfully initialised cell and param files.")
if self.recover_from is None:
self.seed_generation_0(self.gene_pool)
if self.debug:
print(self.nodes)
if self.nodes is not None:
LOG.info("Running on nodes: {}".format(" ".join(self.nodes)))
elif self.compute_mode == "slurm":
LOG.info("Running through SLURM queue")
else:
LOG.info("Running on localhost only")
if self.debug:
print(
"Current number of generations: {}. Target number: {}".format(
len(self.generations), self.num_generations
)
)
# run GA self.num_generations
while len(self.generations) < self.num_generations:
self.breed_generation()
LOG.info("Successfully bred generation {}".format(len(self.generations)))
assert len(self.generations) == self.num_generations
self.finalise_files_for_export()
print("Reached target number of generations!")
print("Completed GA!")
LOG.info("Reached target number of generations!")
LOG.info("Completed GA!")
def breed_generation(self):
""" Build next generation from mutations/crossover of current and
perform relaxations if necessary.
"""
# initialise next_gen
if self.next_gen is None:
self.next_gen = Generation(
self.run_hash,
len(self.generations),
self.num_survivors,
self.num_accepted,
fitness_calculator=self.fitness_calculator,
)
# newborns is a list of structures, initially raw then relaxed
if self.compute_mode == "direct":
self.continuous_birth()
elif self.compute_mode in ("slurm", "manual"):
self.batch_birth()
if len(self.next_gen) < self.population:
LOG.warning("Next gen is smaller than desired population.")
# assert len(self.next_gen) >= self.population
self.next_gen.rank()
LOG.info("Ranked structures in generation {}".format(len(self.generations)))
if not self.testing:
cleaned = self.next_gen.clean()
LOG.info(
"Cleaned structures in generation {}, removed {}".format(
len(self.generations), cleaned
)
)
self.enforce_elitism()
self.reset_and_dump()
print(self.generations[-1])
def write_unrelaxed_generation(self):
""" Perform mutations and write res files for the resulting
structures. Additionally, dump an unrelaxed json file.
"""
while len(self.next_gen) < self.max_attempts:
newborn = self.birth_new_structure()
self.next_gen.birth(newborn)
for newborn in self.next_gen:
newborn = strip_useless(newborn)
doc2res(newborn, newborn["source"][0], info=False)
self.next_gen.dump("unrelaxed")
def batch_birth(self):
""" Assess whether a generation has been relaxed already. This is done by
checking for the existence of a file called <run_hash>-genunrelaxed.json.
If so, match the relaxations up with the cached unrelaxed structures
and rank them ready for the next generation.
If not, create a new generation of structures, dump the unrelaxed structures to file,
create the jobscripts to relax them, submit them and the job to check up on the relaxations,
then exit.
"""
print("Beginning birthing of generation {}...".format(len(self.generations)))
fname = "{}-genunrelaxed.json".format(self.run_hash)
if os.path.isfile(fname):
LOG.info("Found existing generation to be relaxed...")
# load the unrelaxed structures into a dummy generation
assert os.path.isfile(fname)
unrelaxed_gen = Generation(
self.run_hash,
len(self.generations),
self.num_survivors,
self.num_accepted,
dumpfile=fname,
fitness_calculator=None,
)
# check to see which unrelaxed structures completed successfully
LOG.info("Scanning for completed relaxations...")
for _, newborn in enumerate(unrelaxed_gen):
completed_castep_filename = "completed/{}.castep".format(newborn["source"][0])
completed_res_filename = "completed/{}.res".format(newborn["source"][0])
doc = None
s = None
if os.path.isfile(completed_castep_filename):
doc, s = castep2dict(completed_res_filename, db=True)
elif os.path.isfile(completed_res_filename):
doc, s = res2dict(completed_res_filename, db=True)
# if we find a res file in a completed folder, assumed it was relaxed
doc["optimised"] = True
# if all was a success, then "birth" the structure, after checking for uniqueness
if s and isinstance(doc, dict):
newborn = strip_useless(newborn)
doc = strip_useless(doc)
newborn.update(doc)
assert newborn.get("parents") is not None
LOG.info("Scraping result for {}".format(newborn["source"][0]))
self.scrape_result(newborn)
else:
LOG.warning(
"Failed to add {}, data found: {}".format(newborn["source"][0], doc)
)
# if there are not enough unrelaxed structures after that run, clean up then resubmit
LOG.info(
"Found {} structures out of target {}".format(
len(self.next_gen), self.population
)
)
if len(self.next_gen) < self.population:
LOG.info("Initialising new relaxation jobs...")
num_remaining = matador.compute.reset_job_folder()
# check if we can even finish this generation
if num_remaining < self.population - len(self.next_gen):
LOG.warning(
"There were too many failures, not enough remaining calculations to reach target."
)
LOG.warning(
"Consider restarting with a larger allowed failure_ratio."
)
raise SystemExit(
"Failed to return enough successful structures to continue, exiting..."
)
if self.compute_mode == "slurm":
# adjust number of nodes so we don't get stuck in the queue
if self.max_num_nodes > num_remaining:
LOG.info("Adjusted max num nodes to {}".format(self.max_num_nodes))
self.max_num_nodes = self.population - len(self.next_gen)
self.slurm_submit_relaxations_and_monitor()
LOG.info("Exiting monitor...")
exit(0)
# otherwise, remove unfinished structures from job file and release control of this generation
else:
LOG.info("Found enough structures to continue!".format())
count = 0
for doc in unrelaxed_gen:
structure = doc["source"][0] + ".res"
if os.path.isfile(structure):
os.remove(structure)
count += 1
LOG.info("Removed {} structures from job folder.".format(count))
return
# otherwise, generate a new unrelaxed generation and submit
else:
LOG.info("Initialising new generation...")
self.write_unrelaxed_generation()
if self.compute_mode == "slurm":
self.slurm_submit_relaxations_and_monitor()
LOG.info("Exiting monitor...")
exit(0)
def slurm_submit_relaxations_and_monitor(self):
""" Prepare and submit the appropriate slurm files.
"""
LOG.info("Preparing to submit slurm scripts...")
relax_fname = "{}_relax.job".format(self.run_hash)
# override jobname with this run's hash to allow for selective job killing
self.slurm_dict["SLURM_JOB_NAME"] = self.run_hash
compute_string = "run3 {}".format(self.seed)
matador.compute.slurm.write_slurm_submission_script(
relax_fname,
self.slurm_dict,
compute_string,
self.walltime_hrs,
template=self.slurm_template,
)
if self.max_num_nodes > self.max_attempts:
self.max_num_nodes = self.max_attempts
LOG.info("Adjusted max num nodes to {}".format(self.max_num_nodes))
# prepare script to read in results
monitor_fname = "{}_monitor.job".format(self.run_hash)
compute_string = "python {} >> ilustrado.out 2>> ilustrado.err".format(
self.entrypoint
)
matador.compute.slurm.write_slurm_submission_script(
monitor_fname,
self.slurm_dict,
compute_string,
1,
template=self.slurm_template,
)
# submit jobs, if any exceptions, cancel all jobs
try:
array_job_id = matador.compute.slurm.submit_slurm_script(
relax_fname, num_array_tasks=self.max_num_nodes
)
LOG.info("Submitted job array: {}".format(array_job_id))
monitor_job_id = matador.compute.slurm.submit_slurm_script(
monitor_fname, depend_on_job=array_job_id
)
LOG.info("Submitted monitor job: {}".format(monitor_job_id))
except Exception as exc:
LOG.error("Something went wrong, trying to cancel all jobs: {}".format(exc))
output = matador.compute.slurm.scancel_all_matching_jobs(name=self.run_hash)
LOG.error("scancel output: {}".format(output))
raise SystemExit("Something went wrong, please check the log file.")
def continuous_birth(self):
""" Create new generation and relax "as they come", filling the compute
resources allocated.
"""
newborns = []
procs = []
# queues is a list of mp.Queues where return values will end up
queues = []
if self.nodes is None:
free_nodes = self.nprocs * [None]
if isinstance(self.ncores, list):
free_cores = self.nprocs * [None]
else:
free_cores = self.nprocs * [self.ncores]
else:
free_nodes = deepcopy(self.nodes)
if isinstance(self.ncores, list):
free_cores = deepcopy(self.ncores)
else:
free_cores = len(self.nodes) * [self.ncores]
attempts = 0
print("Computing generation {}:".format(len(self.generations)))
print(89 * "─")
print(
"{:^25} {:^10} {:^10} {:^10} {:^30}".format(
"ID", "Formula", "# atoms", "Status", "Mutations"
)
)
print(89 * "─")
# print any recovered structures that already exist
if self.next_gen:
for _, structure in enumerate(self.next_gen):
print(
"{:^25} {:^10} {:^10} {:^10} {:^30}".format(
structure["source"][0],
get_formula_from_stoich(structure["stoichiometry"]),
structure["num_atoms"],
"Recovered",
", ".join(structure["mutations"]),
)
)
self.used_sources = [doc["source"][0] for doc in self.next_gen]
else:
self.used_sources = []
try:
finished = False
while attempts < self.max_attempts and not finished:
# if we've reached the target popn, try to kill remaining processes nicely
if len(self.next_gen) >= self.population:
finished = True
# while there are still processes running, try to kill them with kill files
# that should end the job at the completion of the next CASTEP run
self._kill_all_gently(procs, newborns, queues)
# are we using all nodes? if not, start some processes
elif len(procs) < self.nprocs and len(self.next_gen) < self.population:
# generate structure
newborn = self.birth_new_structure()
newborn_id = len(newborns)
newborns.append(newborn)
# clear up and assess CPU resources
node = free_nodes.pop()
ncores = free_cores.pop()
# actually relax structure (or not, if testing is turned on)
# TODO: refactor to be more general
if self.ase_calculator:
from ilustrado.util import AseRelaxation
queues.append(mp.Queue())
relaxer = AseRelaxation(newborns[-1], queues[-1], calculator=self.ase_calculator)
else:
if self.testing:
from ilustrado.util import FakeComputeTask as ComputeTask
else:
from matador.compute import ComputeTask
queues.append(mp.Queue())
relaxer = ComputeTask(
ncores=ncores,
nnodes=None,
node=node,
res=newborns[-1],
param_dict=self.param_dict,
cell_dict=self.cell_dict,
verbosity=1,
killcheck=True,
reopt=False,
executable=self.executable,
output_queue=queues[-1],
start=False,
**self.relaxer_params
)
# store proc object with structure ID, node name, output queue and number of cores
procs.append(
NewbornProcess(
newborn_id,
node,
mp.Process(target=relaxer.relax),
ncores=ncores,
)
)
procs[-1].process.start()
LOG.info(
"Initialised relaxation for newborn {} on node {} with {} cores.".format(
", ".join(newborns[-1]["source"]), node, ncores
)
)
# are we using all nodes? if so, are they all still running?
elif (
all([proc.process.is_alive() for proc in procs])
and len(procs) == self.nprocs
):
# poll processes every second
time.sleep(1)
# so we were using all nodes, but some have died...
else:
LOG.debug("Suspected at least one dead node")
# then find the dead ones, collect their results and
# delete them so we're no longer using all nodes
found_node = False
for ind, proc in enumerate(procs):
if not proc.process.is_alive():
LOG.debug("Found dead node {}".format(proc.node))
try:
result = queues[ind].get(timeout=60)
except Exception:
result = False
LOG.warning(
"Node {} failed to write to queue for newborn {}".format(
proc.node,
", ".join(newborns[proc.newborn_id]["source"]),
)
)
if isinstance(result, dict):
self.scrape_result(result, proc=proc, newborns=newborns)
try:
procs[ind].process.join(timeout=10)
LOG.debug(
"Process {proc.newborn_id} on node {proc.node} died gracefully.".format(
proc=proc
)
)
except Exception:
LOG.warning(
"Process {proc.newborn_id} on node {proc.node} has not died gracefully.".format(
proc=proc
)
)
procs[ind].process.terminate()
LOG.warning(
"Process {proc.newborn_id} on node {proc.node} terminated forcefully.".format(
proc=proc
)
)
if result is not False:
free_nodes.append(proc.node)
free_cores.append(proc.ncores)
del procs[ind]
del queues[ind]
attempts += 1
found_node = True
break
# new_free_nodes, new_free_cores, found_node, extra_attempts = self._collect_from_nodes(
# procs, newborns, queues
# )
# attempts += extra_attempts
# if new_free_nodes:
# free_nodes.append(new_free_nodes)
# free_cores.append(new_free_cores)
if not found_node:
time.sleep(10)
break
except Exception as exc:
LOG.warning("Something has gone terribly wrong...")
LOG.error("Exception caught:", exc_info=True)
print_exc()
# clean up on error/interrupt
if len(procs) > 1:
self.kill_all(procs)
raise exc
LOG.info("No longer breeding structures in this generation.")
# clean up at end either way
if len(procs) > 1:
LOG.info(
"Trying to kill {} on {} processes.".format(self.executable, len(procs))
)
self.kill_all(procs)
if attempts >= self.max_attempts:
LOG.warning("Failed to return enough successful structures to continue...")
print(
"Failed to return enough successful structures to continue, exiting..."
)
exit()
def enforce_elitism(self):
""" Add elite structures from previous generations
to bourgeoisie of current generation, through the merit
of their ancestors alone.
"""
# add random elite structures from previous gen
if self.num_elite <= len(self.generations[-1].bourgeoisie):
probabilities = (
np.asarray([doc["fitness"] for doc in self.generations[-1].bourgeoisie])
+ 0.0001
)
probabilities /= np.sum(probabilities)
elites = deepcopy(
np.random.choice(
self.generations[-1].bourgeoisie,
self.num_elite,
replace=False,
p=probabilities,
)
)
else:
elites = deepcopy(self.generations[-1].bourgeoisie)
if self.debug:
for doc in elites:
print(
"Adding doc {} at {} eV/atom".format(
" ".join(doc["text_id"]), doc["hull_distance"]
)
)
self.next_gen.set_bourgeoisie(
elites=elites, best_from_stoich=self.best_from_stoich
)
LOG.info("Added elite structures from previous generation to next gen.")
LOG.info("New length of next gen: {}.".format(len(self.next_gen)))
LOG.info(
"New length of bourgeoisie: {}.".format(len(self.next_gen.bourgeoisie))
)
def reset_and_dump(self):
""" Add now complete generation to generation list, reset
the next_gen variable and write dump files.
"""
# copy next generation to list of generations
self.generations.append(copy(self.next_gen))
# reset next_gen ready for, well, the next gen
self.next_gen = None
assert self.generations[-1] is not None
LOG.info(
"Added current generation {} to generation list.".format(
len(self.generations) - 1
)
)
# remove interim dump file and create new ones for populace and bourgeoisie
self.generations[-1].dump(len(self.generations) - 1)
self.generations[-1].dump_bourgeoisie(len(self.generations) - 1)
if os.path.isfile("{}-gencurrent.json".format(self.run_hash)):
os.remove("{}-gencurrent.json".format(self.run_hash))
if os.path.isfile("{}-genunrelaxed.json".format(self.run_hash)):
os.remove("{}-genunrelaxed.json".format(self.run_hash))
LOG.info(
"Dumped generation file for generation {}".format(len(self.generations) - 1)
)
def birth_new_structure(self):
""" Generate a new structure from current settings.
Returns:
dict: newborn structure to be optimised
"""
possible_parents = (
self.generations[-1].populace
if len(self.generations) == 1
else self.generations[-1].bourgeoisie
)
newborn = adapt(
possible_parents,
self.mutation_rate,
self.crossover_rate,
mutations=self.mutations,
max_num_mutations=self.max_num_mutations,
max_num_atoms=self.max_num_atoms,
structure_filter=self.structure_filter,
minsep_dict=self.minsep_dict,
debug=self.debug,
)
newborn_source_id = len(self.next_gen)
if self.compute_mode == "direct":
while (
"{}-GA-{}-{}x{}".format(
self.seed, self.run_hash, len(self.generations), newborn_source_id
)
in self.used_sources
):
newborn_source_id += 1
self.used_sources.append(
"{}-GA-{}-{}x{}".format(
self.seed, self.run_hash, len(self.generations), newborn_source_id
)
)
newborn["source"] = [
"{}-GA-{}-{}x{}".format(
self.seed, self.run_hash, len(self.generations), newborn_source_id
)
]
LOG.info(
"Initialised newborn {} with mutations ({})".format(
", ".join(newborn["source"]), ", ".join(newborn["mutations"])
)
)
return newborn
def scrape_result(self, result, proc=None, newborns=None):
""" Check process for result and scrape into self.next_gen if successful,
with duplicate detection if desired. If the optional arguments are provided,
extra logging info will be found when running in `direct` mode.
Parameters:
result (dict): containing output from process
Keyword Arguments:
proc (tuple) : standard process tuple from above,
newborns (list): of new structures to append result to.
"""
if self.debug:
if proc is not None:
print(proc)
print(dumps(result, sort_keys=True))
if result.get("optimised"):
status = "Relaxed"
if proc is not None:
LOG.debug(
"Newborn {} successfully optimised".format(
", ".join(newborns[proc.newborn_id]["source"])
)
)
if result.get("parents") is None:
LOG.warning(
"Failed to get parents for newborn {}.".format(
", ".join(newborns[proc.newborn_id]["source"])
)
)
result["parents"] = newborns[proc.newborn_id]["parents"]
result["mutations"] = newborns[proc.newborn_id]["mutations"]
result = strip_useless(result)
dupe = False
if self.check_dupes:
dupe = self.is_newborn_dupe(result, extra_pdfs=self.extra_pdfs)
if dupe:
status = "Duplicate"
if proc is not None:
LOG.debug(
"Newborn {} is a duplicate and will not be included.".format(
", ".join(newborns[proc.newborn_id]["source"])
)
)
else:
LOG.debug(
"Newborn {} is a duplicate and will not be included.".format(
result["source"][0]
)
)
with open(self.run_hash + "-dupe.json", "a") as f:
dump(result, f, sort_keys=False, indent=2)
if not dupe:
self.next_gen.birth(result)
if proc is not None:
LOG.info(
"Newborn {} added to next generation.".format(
", ".join(newborns[proc.newborn_id]["source"])
)
)
else:
LOG.info(
"Newborn {} added to next generation.".format(
result["source"][0]
)
)
LOG.info("Current generation size: {}".format(len(self.next_gen)))
self.next_gen.dump("current")
LOG.debug("Dumping json file for interim generation...")
else:
status = "Failed"
result = strip_useless(result)
with open(self.run_hash + "-failed.json", "a") as f:
dump(result, f, sort_keys=False, indent=2)
print(
"{:^25} {:^10} {:^10} {:^10} {:^30}".format(
result["source"][0],
get_formula_from_stoich(result["stoichiometry"]),
result["num_atoms"],
status,
", ".join(result["mutations"]),
)
)
def kill_all(self, procs):
""" Loop over processes and kill them all.
Parameters:
procs (list): list of :obj:`NewbornProcess` in form documented above.
"""
for proc in procs:
if self.nodes is not None:
sp.run(
["ssh", proc.node, "pkill {}".format(self.executable)],
timeout=15,
stdout=sp.DEVNULL,
shell=False,
)
proc.process.terminate()
def recover(self):
""" Attempt to recover previous generations from files in cwd
named '<run_hash>_gen{}.json'.format(gen_idx).
"""
if not os.path.isfile(("{}-gen0.json").format(self.run_hash)):
exit("Failed to load run, files missing for {}".format(self.run_hash))
if (
os.path.isfile(("{}-gencurrent.json").format(self.run_hash))
and self.compute_mode != "slurm"
):
incomplete = True
LOG.info("Found incomplete generation for {}".format(self.run_hash))
else:
incomplete = False
try:
i = 0
while os.path.isfile("{}-gen{}.json".format(self.run_hash, i)):
LOG.info(
"Trying to load generation {} from run {}.".format(i, self.run_hash)
)
fname = "{}-gen{}.json".format(self.run_hash, i)
self.generations.append(
Generation(
self.run_hash,
i,
self.num_survivors,
self.num_accepted,
dumpfile=fname,
fitness_calculator=None,
)
)
LOG.info(
"Successfully loaded {} structures into generation {} from run {}.".format(
len(self.generations[-1]), i, self.run_hash
)
)
i += 1
print("Recovered from run {}".format(self.run_hash))
LOG.info("Successfully loaded run {}.".format(self.run_hash))
except Exception:
print_exc()
LOG.error(
"Something went wrong when reloading run {}".format(self.run_hash)
)
exit("Something went wrong when reloading run {}".format(self.run_hash))
if not self.generations:
raise SystemExit("No generations found!")
for i, _ in enumerate(self.generations):
if not self.testing:
if i != 0:
removed = self.generations[i].clean()
LOG.info(
"Removed {} structures from generation {}".format(removed, i)
)
if i == len(self.generations) - 1 and len(self.generations) > 1:
if self.num_elite <= len(self.generations[-2].bourgeoisie):
# generate elites with probability proportional to their fitness, but ensure every p is non-zero
probabilities = (
np.asarray(
[doc["fitness"] for doc in self.generations[-2].bourgeoisie]
)
+ 0.0001
)
probabilities /= np.sum(probabilities)
elites = deepcopy(
np.random.choice(
self.generations[-2].bourgeoisie,
self.num_elite,
replace=False,
p=probabilities,
)
)
else:
elites = deepcopy(self.generations[-2].bourgeoisie)
self.generations[i].set_bourgeoisie(
best_from_stoich=self.best_from_stoich, elites=elites
)
else:
bourge_fname = "{}-gen{}-bourgeoisie.json".format(self.run_hash, i)
if os.path.isfile(bourge_fname):
self.generations[i].load_bourgeoisie(bourge_fname)
else:
self.generations[i].set_bourgeoisie(
best_from_stoich=self.best_from_stoich
)
LOG.info(
"Bourgeoisie contains {} structures: generation {}".format(
len(self.generations[i].bourgeoisie), i
)
)
assert len(self.generations[i]) >= 1
assert len(self.generations[i].bourgeoisie) >= 1
if incomplete:
LOG.info(
"Trying to load incomplete generation from run {}.".format(
self.run_hash
)
)
fname = "{}-gen{}.json".format(self.run_hash, "current")
self.next_gen = Generation(
self.run_hash,
len(self.generations),
self.num_survivors,
self.num_accepted,
dumpfile=fname,
fitness_calculator=self.fitness_calculator,
)
LOG.info(
"Successfully loaded {} structures into current generation ({}) from run {}.".format(
len(self.next_gen), len(self.generations), self.run_hash
)
)
assert len(self.next_gen) >= 1
def seed_generation_0(self, gene_pool):
""" Set up first generation from gene pool.
Parameters:
gene_pool (list(dict)): list of structure with which to seed generation.
"""
self.gene_pool = gene_pool
for ind, parent in enumerate(self.gene_pool):
if "_id" in parent:
del self.gene_pool[ind]["_id"]
# check gene pool is sensible
errors = []
if not isinstance(self.gene_pool, list):
errors.append("Initial gene pool not a list: {}".format(self.gene_pool))
if not len(self.gene_pool) >= 1:
errors.append(
"Initial gene pool not long enough: {}".format(self.gene_pool)
)
if errors:
raise SystemExit("Initial genee pool is not sensible: \n".join(errors))
generation = Generation(
self.run_hash,
0,
len(gene_pool),
len(gene_pool),
fitness_calculator=self.fitness_calculator,
populace=self.gene_pool,
)
generation.rank()
generation.set_bourgeoisie(best_from_stoich=False)
LOG.info(
"Successfully initialised generation 0 with {} members".format(
len(generation)
)
)
generation.dump(0)
generation.dump_bourgeoisie(0)
print(generation)
self.generations.append(generation)
def is_newborn_dupe(self, newborn, extra_pdfs=None):
""" Check each generation for a duplicate structure to the current newborn,
using PDF calculator from matador.
Parameters:
newborn (dict): new structure to screen against the existing,
Keyword Arguments:
extra_pdfs (list(dict)): any extra PDFs to compare to, e.g. other hull structures
not used to seed any generation
Returns:
bool: True if duplicate, else False.
"""
for ind, gen in enumerate(self.generations):
if ind == 0:
if gen.is_dupe(newborn, extra_pdfs=extra_pdfs):
return True
else:
if gen.is_dupe(newborn):
return True
return False
def finalise_files_for_export(self):
""" Move unique structures from gen1 onwards to folder "<run_hash>-results". """
path = "{}-results".format(self.run_hash)
os.makedirs(path.format(self.run_hash), exist_ok=True)
LOG.info("Moving unique files to {}-results/...".format(self.run_hash))
cursor = [struc for gen in self.generations[1:] for struc in gen]
uniq_inds, _, _, _, = get_uniq_cursor(cursor, projected=True)
cursor = [cursor[ind] for ind in uniq_inds]
for doc in cursor:
source = get_root_source(doc)
if not source:
LOG.warning("Issue writing {}".format(doc["source"]))
continue
else:
doc2res(
doc, "{}/{}".format(path, source), overwrite=False, hash_dupe=False
)
if os.path.isfile("completed/{}".format(source.replace(".res", ".castep"))):
shutil.copy(
"completed/{}".format(source.replace(".res", ".castep")),
"{}/{}".format(path, source.replace(".res", ".castep")),
)
def _kill_all_gently(self, procs, newborns, queues):
""" Kill all running processes.
Parameters:
procs (list): list of `:obj:NewbornProcess` objects.
newborns (list): list of corresponding structures.
queues (list): list of queues that were collecting results.
"""
kill_attempts = 0
while procs and kill_attempts < 5:
for ind, proc in enumerate(procs):
# create kill file so that matador will stop next finished CASTEP
filename = "{}.kill".format(newborns[proc.newborn_id]["source"][0])
with open(filename, "w"):
pass
# wait 1 minute for CASTEP run
if proc.process.join(timeout=60) is not None:
result = queues[ind].get(timeout=60)
if isinstance(result, dict):
self.scrape_result(result, proc=proc, newborns=newborns)
del procs[ind]
kill_attempts += 1
if kill_attempts >= 5:
for ind, proc in enumerate(procs):
proc.process.terminate()
del procs[ind]
|
{"hexsha": "6ab847203d3bc7ba87dafbd477b6e149a4f38561", "size": 55285, "ext": "py", "lang": "Python", "max_stars_repo_path": "ilustrado/ilustrado.py", "max_stars_repo_name": "ml-evs/ilustrado", "max_stars_repo_head_hexsha": "3121ecaff9cb517f3946b2283bf50dce499caad9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-10-31T20:54:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-05T16:39:43.000Z", "max_issues_repo_path": "ilustrado/ilustrado.py", "max_issues_repo_name": "ml-evs/ilustrado", "max_issues_repo_head_hexsha": "3121ecaff9cb517f3946b2283bf50dce499caad9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ilustrado/ilustrado.py", "max_forks_repo_name": "ml-evs/ilustrado", "max_forks_repo_head_hexsha": "3121ecaff9cb517f3946b2283bf50dce499caad9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-11-29T11:34:11.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-12T12:31:48.000Z", "avg_line_length": 42.2345301757, "max_line_length": 128, "alphanum_fraction": 0.5186397757, "include": true, "reason": "import numpy", "num_tokens": 10717}
|
#!/usr/bin/env python3
import numpy
import psycopg2
import dummy
from psycopg2.extensions import register_adapter
from psycopg2.extras import Json
# Start a postgres database via Docker
# docker run -ti --rm --name word_psql -e POSTGRES_PASSWORD=mikolov -p 5433:5432 postgres:10.5
def adapt_numpy_ndarray(numpy_ndarray):
return Json(numpy_ndarray.tolist())
connection = psycopg2.connect("host=localhost user=postgres password=mikolov port=5433")
register_adapter(numpy.ndarray, addapt_numpy_ndarray)
cursor = connection.cursor()
cursor.execute('CREATE TABLE embeddings (key varchar, embedding jsonb);')
connection.commit()
#########
# Write #
#########
for key, emb in dummy.embeddings():
cursor.execute('INSERT INTO embeddings (key, embedding) VALUES (%s, %s)', [key, emb])
connection.commit()
########
# Read #
########
for key, _ in dummy.embeddings():
cursor.execute('SELECT key, embedding FROM embeddings WHERE key=%s', (key,))
data = cursor.fetchone()
value = numpy.array(data[1])
assert type(value) is numpy.ndarray
assert len(value) is 50
cursor.execute('DROP TABLE embeddings')
connection.commit()
connection.close()
|
{"hexsha": "569d149543b48f87f8cef3ac17874c6a46e198a6", "size": 1170, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/psql_example.py", "max_stars_repo_name": "krokodilj/word_embedding_storage", "max_stars_repo_head_hexsha": "206c14cee1af0768b6e187167333dcccf0095e9d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 37, "max_stars_repo_stars_event_min_datetime": "2018-09-02T23:55:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-05T00:51:08.000Z", "max_issues_repo_path": "examples/psql_example.py", "max_issues_repo_name": "krokodilj/word_embedding_storage", "max_issues_repo_head_hexsha": "206c14cee1af0768b6e187167333dcccf0095e9d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2018-09-03T06:56:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-02T09:57:42.000Z", "max_forks_repo_path": "examples/psql_example.py", "max_forks_repo_name": "krokodilj/word_embedding_storage", "max_forks_repo_head_hexsha": "206c14cee1af0768b6e187167333dcccf0095e9d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-09-03T08:14:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-25T10:18:33.000Z", "avg_line_length": 25.4347826087, "max_line_length": 94, "alphanum_fraction": 0.7230769231, "include": true, "reason": "import numpy", "num_tokens": 272}
|
[STATEMENT]
lemma lt_tail_max:
assumes "tail p \<noteq> 0" and "v \<in> keys p" and "v \<prec>\<^sub>t lt p"
shows "v \<preceq>\<^sub>t lt (tail p)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. v \<preceq>\<^sub>t lt (tail p)
[PROOF STEP]
proof (rule lt_max_keys, simp add: keys_tail assms(2))
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. v \<noteq> lt p
[PROOF STEP]
from assms(3)
[PROOF STATE]
proof (chain)
picking this:
v \<prec>\<^sub>t lt p
[PROOF STEP]
show "v \<noteq> lt p"
[PROOF STATE]
proof (prove)
using this:
v \<prec>\<^sub>t lt p
goal (1 subgoal):
1. v \<noteq> lt p
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
v \<noteq> lt p
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 334, "file": "Polynomials_MPoly_Type_Class_Ordered", "length": 5}
|
import numpy as np
import os
import bilby.core.prior
from bilby.core.prior import PriorDict
import redback.model_library
from redback.utils import logger
def get_priors(model, times=None, y=None, yerr=None, dt=None, **kwargs):
prompt_prior_functions = dict(gaussian=get_gaussian_priors, skew_gaussian=get_skew_gaussian_priors,
skew_exponential=get_skew_exponential_priors, fred=get_fred_priors,
fred_extended=get_fred_extended_priors)
if model in redback.model_library.modules_dict['prompt_models']:
if times is None:
times = np.array([0, 100])
if y is None:
y = np.array([1, 1e6])
if yerr is None:
yerr = np.array([1, 1e3])
if dt is None:
dt = np.ones(len(times))
rate = y * dt
priors = prompt_prior_functions[model](times=times, y=rate, yerr=yerr)
priors['background_rate'] = bilby.core.prior.LogUniform(minimum=np.min(rate), maximum=np.max(rate),
name='background_rate')
return priors
priors = PriorDict()
try:
filename = os.path.join(os.path.dirname(__file__), 'priors', f'{model}.prior')
priors.from_file(filename)
except FileNotFoundError as e:
logger.warning(e)
logger.warning('Returning empty PriorDict.')
return priors
def get_prompt_priors(model, times, y, yerr, **kwargs):
if model == 'gaussian':
get_gaussian_priors(times=times, y=y, yerr=yerr, **kwargs)
def get_gaussian_priors(times, y, yerr, **kwargs):
dt = np.min(np.diff(times))
duration = times[-1] - times[0]
priors = bilby.core.prior.PriorDict()
priors['amplitude'] = bilby.core.prior.LogUniform(minimum=np.min(yerr), maximum=np.max(y),
name='amplitude', latex_label=r'$A$')
priors['sigma'] = bilby.core.prior.LogUniform(minimum=3*dt, maximum=duration, name="sigma", latex_label=r"$\sigma$")
priors['t_0'] = bilby.core.prior.Uniform(minimum=times[0], maximum=times[-1], name="t_0", latex_label=r"$t_0$")
return priors
def get_skew_gaussian_priors(times, y, yerr, **kwargs):
priors = get_gaussian_priors(times=times, y=y, yerr=yerr, **kwargs)
for latex_label, part in zip([r"$\sigma_{\mathrm{rise}}$" r"$\sigma_{\mathrm{rise}}$"], ['rise', 'fall']):
priors[f'sigma_{part}'] = bilby.core.prior.LogUniform(
minimum=priors['sigma'].minimum, maximum=priors['sigma'].maximum,
name=f"sigma_{part}", latex_label=latex_label)
del priors['sigma']
return priors
def get_skew_exponential_priors(times, y, yerr, **kwargs):
priors = get_gaussian_priors(times=times, y=y, yerr=yerr, **kwargs)
for latex_label, part in zip([r"$\tau_{\mathrm{rise}}$" r"$\tau_{\mathrm{rise}}$"], ['rise', 'fall']):
priors[f'tau_{part}'] = bilby.core.prior.LogUniform(
minimum=priors['sigma'].minimum, maximum=priors['sigma'].maximum,
name=f"tau_{part}", latex_label=latex_label)
del priors['sigma']
return priors
def get_fred_priors(times, y, yerr, **kwargs):
priors = bilby.core.prior.PriorDict()
priors['amplitude'] = bilby.core.prior.LogUniform(minimum=np.min(yerr), maximum=np.max(y),
name='amplitude', latex_label=r'$A$')
priors['tau'] = bilby.core.prior.Uniform(minimum=1e-3, maximum=1e3, name="t_0", latex_label=r"$t_0$")
priors['psi'] = bilby.core.prior.Uniform(minimum=1e-3, maximum=1e3, name=r"\psi")
priors['delta'] = bilby.core.prior.Uniform(minimum=times[0], maximum=times[-1], name=r"\delta")
return priors
def get_fred_extended_priors(times, y, yerr, **kwargs):
priors = get_fred_priors(times=times, y=y, yerr=yerr, **kwargs)
priors['gamma'] = bilby.core.prior.LogUniform(minimum=1e-3, maximum=1e3, name=r"$\gamma$")
priors['nu'] = bilby.core.prior.LogUniform(minimum=1e-3, maximum=1e3, name=r"$\nu")
|
{"hexsha": "7b4dad189e93fa8a84816256f8531367d1945685", "size": 4050, "ext": "py", "lang": "Python", "max_stars_repo_path": "redback/priors.py", "max_stars_repo_name": "nikhil-sarin/redback", "max_stars_repo_head_hexsha": "b0023b770a3c0a25a18c4f6ff1a07339be7f83fe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-11T10:03:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T10:51:54.000Z", "max_issues_repo_path": "redback/priors.py", "max_issues_repo_name": "nikhil-sarin/redback", "max_issues_repo_head_hexsha": "b0023b770a3c0a25a18c4f6ff1a07339be7f83fe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2022-03-03T07:59:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-03T08:01:32.000Z", "max_forks_repo_path": "redback/priors.py", "max_forks_repo_name": "nikhil-sarin/redback", "max_forks_repo_head_hexsha": "b0023b770a3c0a25a18c4f6ff1a07339be7f83fe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-31T12:24:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T12:24:59.000Z", "avg_line_length": 44.5054945055, "max_line_length": 120, "alphanum_fraction": 0.6325925926, "include": true, "reason": "import numpy", "num_tokens": 1122}
|
# Riduzione della dimensionalità
Fino ad ora abbiamo visto come le feature siano importanti per poter definire un algoritmo in grado di eseguire il proprio compito imparando dai dati, ora il problema è che ci potremmo trovare in condizioni in cui sfortunatamente abbiamo troppe feature e troppi pochi dati(troppe colonne e troppe poche righe) o che ci dicano che vogliono un njumero massimo di feature da usare per predire il nostro algoritmo in tal caso possiamo utilizzare **algoritmi che ci aiutino a ridurre le dimensioni del nostro dataset considerando solo quelle rilevanti anche senza sapere il modello che useremo, inoltre i dataset possono essere utilizzati in diversi contesti come tesi, immagini e molto altro**.
## decomposizione ai valori singolari (SVD)
La __[decomposizione ai valori singolari](https://it.wikipedia.org/wiki/Decomposizione_ai_valori_singolari)__ si basa su nozioni geometriche al fine di fattorizzare la matrice di partenza in matrici più semplici e che mi fornisca informazioni sulle proprietà di ogni componente che stiamo considerando.Dal punto di vista matematico si ha:
\begin{equation}
\Large M_{n \times m} = U_{n \times n} D_{n \times m} V^{\dagger}_{m \times m}
\end{equation}
dove $ M_{n \times m}$ è la nostra matrice di partenza con n righe e m colonne, $U_{n \times n}$ è una matrice unitaria ortogonale, $D_{n \times m}$ è una matrice singolare diagonale $n\times m$, e $V^{\dagger}_{m \times m}$ è la trasposta coniugata di una matrice unitaria ortogonale.<br>
Dal punto di vista pratico quello che ci interessa è la matrice $D$ poiché i suoi valori sulla diagonali rappresentano la varianza di ogni singola componente, a cosa ci serve questo? Per capirlo facciamo un esempio
```python
import numpy as np
#la nostra matrice di partenza
M = np.matrix([[1, 5, 6 ], [3, 4, 19], [2,7,24]])
U, D, V = np.linalg.svd(M)
#Trasformo solo D poiché essendo diagonale numpy per risparmiare memoria
#vi ritorna un array 1D poiché per le operazioni hanno lo stesso comportamento
print(f'Matrix U:\n {U}\n Matrix D :\n {np.diag(D)}\n Matrix V :\n {V}')
```
Matrix U:
[[-0.22097491 -0.91114121 0.34783874]
[-0.60034273 0.4081545 0.68774887]
[-0.76860828 -0.05684721 -0.63718891]]
Matrix D :
[[32.61541883 0. 0. ]
[ 0. 3.45287401 0. ]
[ 0. 0. 1.14547607]]
Matrix V :
[[-0.1091269 -0.27246326 -0.95595768]
[ 0.05781499 -0.96181284 0.26753223]
[ 0.99234507 0.02607372 -0.12071212]]
Ora mettiamo ipotesi che i voglia considerare solo le due componenti più importanti per ricostruire il dataset, per farlo vediamo se togliendo un valore alla diagonale che succede, ricordate noi vogliamo che $M \approx UDV^{\dagger}$, per fare in modo che non ci siano problemi di dimensionalità nel prodotto scalare successivo alla matrice $U$ si toglie la relativa colonna, mentre alla matrice $V^{\dagger}$ si toglie la riga relativa ad essa.
```python
#eliminate the first value of D, U and V lose the column
#@ means dot product in numpy
#original calculus gives me the original M
print(f'original matrix obtained with all features:\n {U @ np.diag(D) @ V}')
#i remove the first colmn of U and last row of V
print(f'matrix obtained eliminating the first element in diagonal {D[0]}:\n'
f'{U[: ,1:] @ np.diag(D[1:]) @ V[:2, :]}')
#i remove the middle column of U and middle row of V
print(f'matrix obtained eliminating the second element in diagonal {D[1]}:\n'
f'{U[: ,[0,2]] @ np.diag(D[[0,2]]) @ V[[0,2], :]}')
##i remove the last column of U and first row of V
print(f'matrix obtained eliminating the second element in diagonal {D[2]}:\n'
f'{U[: ,[0,1]] @ np.diag(D[:2]) @ V[[0,1], :]}')
```
original matrix obtained with all features:
[[ 1. 5. 6.]
[ 3. 4. 19.]
[ 2. 7. 24.]]
matrix obtained eliminating the first element in diagonal 32.6154188335189:
[[ 0.36635519 0.47395901 3.114092 ]
[-0.10824657 -1.14170015 -1.13647509]
[-0.02077816 0.75549322 -0.00762631]]
matrix obtained eliminating the second element in diagonal 3.4528740055280096:
[[ 1.18188917 1.97408316 6.84167133]
[ 2.91852099 5.35548866 18.6229652 ]
[ 2.01134829 6.81120935 24.0525129 ]]
matrix obtained eliminating the second element in diagonal 1.1454760652624991:
[[ 0.60460909 4.98961116 6.04809665]
[ 2.21823068 3.97945913 19.09509699]
[ 2.72429743 7.0190308 23.91189408]]
Da come possiamo vedere che se noi togliamo il valore più piccolo dalla matrice diagonale, la nuova matrice ricostruita sarà molto vicina alla matrice ottenuta, in tal caso la decomposizione viene chiamata __[TruncatedSVD](https://langvillea.people.cofc.edu/DISSECTION-LAB/Emmie%27sLSI-SVDModule/p5module.html)__.<br>
Da qui possiamo capire che qualora volessimo le componenti sono più rilevanti è sufficiente selezionare i valori delle diagonali più alti fino ad averne il numero desiderato.<br>
## PCA
La PCA (Principal Component Analysis) sfrutta prorio questo algoritmo di SVD permettendo di mappare il nostro problema proiettandolo su uno spazio più piccolo con la condizione di conserva quanto più possibile la norma dei nostri vettori sfruttando proprio la varianza di ogni singola feature associata ottenuta attraverso la matrice diagonale attenzione che ora le nostre nuove feature sono chiamate **principal component**, se avete dubbi consultate __[qui](https://medium.com/analytics-vidhya/what-is-principal-component-analysis-cf880cf95a0c)__.<br>
Poiché __[scikit](https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html)__ ha già implementato la sua funzione useremo quella.
```python
import time
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import plot_confusion_matrix, classification_report
#classification data
diabetes = pd.read_csv('../data/diabetes2.csv')
X_diabetes, y_diabetes = diabetes.drop('Outcome', axis = 1).values, diabetes.Outcome.values
target_names = ["Not Diabetes", "Diabetes"]
print(f'Original data: {X_diabetes.shape[0]} dati, {X_diabetes.shape[1]} feature')
#let's find the 5 most valuable feature
diabetes_pca = PCA(n_components=5)
#fit the data
diabetes_pca.fit(X_diabetes)
#trasform the data
X_pca = diabetes_pca.transform(X_diabetes)
print(f'Reduced data: {X_pca.shape[0]} dati, {X_pca.shape[1]} feature')
print(f'Reduced data PCA output: \n {X_pca}')
print("PCA : ")
print(f'- components: \n{diabetes_pca.components_} \n'
f'- explained variance: \n {diabetes_pca.explained_variance_} \n'
f'- explained variance ratio: \n {diabetes_pca.explained_variance_ratio_} \n'
f'- singular values: \n {diabetes_pca.singular_values_}\n'
f'- noise variance values: {diabetes_pca.noise_variance_}' )
print('-'*80)
#prepare the data,
print("The reduced data ha been divided to train and test in 80% traing, 20% testing")
X_pca_train, X_pca_test, y_diabetes_train, y_diabetes_test = train_test_split(
X_pca, y_diabetes, random_state=0, test_size = 0.2)
print('Allenaimo un Gradient Boosting Classifier sui dati ridotti:')
tree = GradientBoostingClassifier()
start = time.time()
tree.fit(X_pca_train, y_diabetes_train)
end = time.time()
print(f"Time taken to train Gradient Boosting Classifier on reduced data: {end - start}s ")
plot_confusion_matrix(tree, X_pca_test, y_diabetes_test, display_labels=target_names)
plt.title("Confusion matrix of classification")
plt.show()
print(classification_report(y_diabetes_test, tree.predict(X_pca_test), target_names= target_names))
```
# Non-negative matrix factorization (NMF o NNMF)
La PCA presenta un modo di ridurre la dimensionalità del dataset, ma è presente un problema: la possibilità che la ricostruzione della matrice dia dei valori negativi ed in genere i valori negativi sono difficili da interptretare ed analizzare, per questo l'obiettivo della __[NMF](https://scikit-learn.org/stable/modules/decomposition.html#non-negative-matrix-factorization-nmf-or-nnmf)__ è quello di fattorizzare la matrice imponendo che gli autovalori e i vettori delle matrici fattorizzati siano tutti positivi, poiché questo implica avere un maggiorn numero possibile di modi di fattorizzare, in genere la condizione che si pone è che la distanza matriciale tra la decomposizione e l'originale sia quanto **più vicina secondo la distanza di Frobenius definita anche come** __[norma matriciale](https://it.wikipedia.org/wiki/Norma_matriciale)__, **si introducono inoltre termini di regoralizzazione o si usano altre metriche per assicurare un risultato flessibile e quanto meno divergente, per saperne di più guardate** __[qui](https://scikit-learn.org/stable/modules/decomposition.html#nmf-with-a-beta-divergence)__.
Usiamo ora il modello sul diabetes dataset.
```python
from sklearn.decomposition import NMF
nmf = NMF(n_components=5, verbose = 0, max_iter=500, init= 'nndsvda' )
nmf.fit(X_diabetes)
#trasform the data
X_NMF = nmf.transform(X_diabetes)
print(f'Reduced data: {X_NMF.shape[0]} dati, {X_NMF.shape[1]} feature')
print("NMF : ")
print(f'- components: \n{nmf.components_} \n'
f'- reguralization: {nmf.regularization} \n'
f'- reconstruction error: {nmf.reconstruction_err_}\n'
f'- iterations: {nmf.n_iter_}')
print('-'*80)
#prepare the data,
print("The reduced data ha been divided to train and test in 80% traing, 20% testing")
X_NMF_train, X_NMF_test, y_diabetes_train, y_diabetes_test = train_test_split(
X_NMF, y_diabetes, random_state=0, test_size = 0.2)
print('Allenaimo un Gradient Boosting Classifier sui dati ridotti:')
tree = GradientBoostingClassifier()
start = time.time()
tree.fit(X_NMF_train, y_diabetes_train)
end = time.time()
print(f"Time taken to train Gradient Boosting Classifier on reduced data: {end - start}s ")
plot_confusion_matrix(tree, X_NMF_test, y_diabetes_test, display_labels=target_names)
plt.title("Confusion matrix of classification")
plt.show()
print(classification_report(y_diabetes_test, tree.predict(X_NMF_test), target_names= target_names))
```
## Latent Dirichlet Annotation(LDA)
L' __[LDA](https://scikit-learn.org/stable/modules/decomposition.html#latent-dirichlet-allocation-lda)__ è un algoritmo di riduzione dimensionale che è __[probabilistico generativo](https://ichi.pro/it/modelli-grafici-probabilistici-generativi-vs-discriminativi-40857457895478)__, la differenza da quelli discriminativi è che in questo caso noi cerchiamo di determinare una distribuzione di probabilità attraverso cui possiamo determinare quale sia la probabilità associata a quell'evento. Tradotto in matematica i modelli discriminativi determinano $P(Y|X)$, mentre quelli generativi $P(Y,X)$, questo permette in futuro anche di generare anche valori con una certa probabilità associata e in genere non sono limitati alla mera classificazione, per dettagli guardate qui un __[video sulle GAN](https://www.youtube.com/watch?v=8L11aMN5KY8)__, che sono modelli generativi.<br>
***Attenti però che questi modelli sono meno precisi poichè assumono che idati siano i.i.d. condizione che nei discriminativi può anche non essere vera!.***<br>
Tornando alla LDA quello che succede è che questo algoritmo cervca di capire dai dati quale sia la struttura sottostante leggendone solo una parte, facendo ciò quello che succede è che divide per categorie la struttura e in base a ciò considera solo le categorie più rilevanti al fine di poterne ricreare la struttura completa, **questo algoritmo permette l'apprendimento "online" ovvero ogni singolo nuovo dato può essere usato per allenare il modello e adattarlo in maniera istantanea ai possibili cambiamenti, se invece volete riallenare il modello solo quanto un certo numero di dati è raggiunto potete usare "batch"**.
```python
from sklearn.decomposition import LatentDirichletAllocation
lda = LatentDirichletAllocation(n_components=5, n_jobs=-1)
lda.fit(X_diabetes)
#trasform the data
X_lda = lda.transform(X_diabetes)
print(f'Reduced data: {X_lda.shape[0]} dati, {X_lda.shape[1]} feature')
print("LDA : ")
print(f'- components: \n{lda.components_} \n'
f'- bound_: {lda.bound_} \n'
f'- exp dirichlet components:\n {lda.exp_dirichlet_component_}\n'
f'- iterations: {lda.n_iter_}')
print('-'*80)
#prepare the data,
print("The reduced data ha been divided to train and test in 80% traing, 20% testing")
X_lda_train, X_lda_test, y_diabetes_train, y_diabetes_test = train_test_split(
X_lda, y_diabetes, random_state=0, test_size = 0.2)
print('Allenaimo un Gradient Boosting Classifier sui dati ridotti:')
tree = GradientBoostingClassifier()
start = time.time()
tree.fit(X_lda_train, y_diabetes_train)
end = time.time()
print(f"Time taken to train Gradient Boosting Classifier on reduced data: {end - start}s ")
plot_confusion_matrix(tree, X_lda_test, y_diabetes_test, display_labels=target_names)
plt.title("Confusion matrix of classification")
plt.show()
print(classification_report(y_diabetes_test, tree.predict(X_lda_test), target_names= target_names))
```
In questo notebook abbiamo quindi visto come possiamo utilizzare alcune tecniche per ridurre la dimensione del nostro dataset con lacondizione di riuscire a usare modelli che riescano a preformare quanto meglio possibile, sono presenti molte altre tecniche, per saprenedi più consultate la __[guida di scikit sulla dimensionality reduction](https://scikit-learn.org/stable/modules/decomposition.html#decompositions)__.
***
COMPLIMENTI AVETE FINITO LA LEZIONE SU PCA LDA E NMF, A PRESTO!
|
{"hexsha": "3a8523ed7f36fcf78498c3c7f1700c26c8f2ad5b", "size": 67801, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "3.machine learning/7-PCA_LDA_NMF.ipynb", "max_stars_repo_name": "matinator/Starting-Finance-Club-Torino", "max_stars_repo_head_hexsha": "8abda7caa769f2dc237c4ff520ef1b40038a5f65", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-18T15:45:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-18T15:45:39.000Z", "max_issues_repo_path": "3.machine learning/.ipynb_checkpoints/7-PCA_LDA_NMF-checkpoint.ipynb", "max_issues_repo_name": "matinator/Starting-Finance-Club-Torino", "max_issues_repo_head_hexsha": "8abda7caa769f2dc237c4ff520ef1b40038a5f65", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "3.machine learning/.ipynb_checkpoints/7-PCA_LDA_NMF-checkpoint.ipynb", "max_forks_repo_name": "matinator/Starting-Finance-Club-Torino", "max_forks_repo_head_hexsha": "8abda7caa769f2dc237c4ff520ef1b40038a5f65", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 127.2063789869, "max_line_length": 14132, "alphanum_fraction": 0.8373622808, "converted": true, "num_tokens": 4013}
|
# -*- coding: utf-8 -*-
## @package inversetoon.batch.generate_isophote_scene
#
# Isophote scene generator.
# @author tody
# @date 2015/07/31
import numpy as np
from inversetoon.batch.batch import normalDataSetBatch
from inversetoon.core.silhouette import silhoutteCurve
from inversetoon.io.image import loadNormal
from inversetoon.core.isophote import isophoteCurves
from inversetoon.cv.light import computeIllumination
from inversetoon.data.isophote_mesh import IsophoteMesh
from inversetoon.data.scene import Scene
from inversetoon.io.isophote import saveSceneData
from inversetoon import datasets
def computeIsophoteCurves(N_32F, L, S_8U):
I_32F = computeIllumination(N_32F, L)
isophotes = isophoteCurves(I_32F, M_8U=S_8U)
for isophote in isophotes:
isophote.setNormalImage(N_32F)
isophote.setLightDir(L)
return I_32F, isophotes
def normalToIsophoteFile(normal_file, scene_file, L1=np.array([-0.5, 0.5, 0.2]), L2=np.array([0.5, 0.5, 0.2])):
N_32F, A_8U = loadNormal(normal_file)
silhoutte_curve, S_8U = silhoutteCurve(A_8U)
silhoutte_curve.setNormalImage(N_32F)
I1_32F, isophotes1 = computeIsophoteCurves(N_32F, L1, S_8U)
I2_32F, isophotes2 = computeIsophoteCurves(N_32F, L2, S_8U)
isophote_curves = []
isophote_curves.extend(isophotes1)
isophote_curves.extend(isophotes2)
isophote_mesh = IsophoteMesh(silhoutte_curve, isophote_curves)
scene = Scene(isophote_mesh, normal_file)
saveSceneData(scene_file, scene)
def datasetFunc(data_name):
normal_file = datasets.normal.dataFile(data_name)
scene_file = datasets.isophote.dataFile(data_name)
normalToIsophoteFile(normal_file, scene_file)
if __name__ == '__main__':
normalDataSetBatch(datasetFunc)
|
{"hexsha": "ea9395b920c5fda52d509d7db5510bae8728f14f", "size": 1774, "ext": "py", "lang": "Python", "max_stars_repo_path": "inversetoon/batch/generate_isophote_scene.py", "max_stars_repo_name": "tody411/InverseToon", "max_stars_repo_head_hexsha": "bc5b922cae9bbf99ed1f020c93b1577c4747ff92", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2015-10-01T16:00:39.000Z", "max_stars_repo_stars_event_max_datetime": "2016-06-08T13:24:34.000Z", "max_issues_repo_path": "inversetoon/batch/generate_isophote_scene.py", "max_issues_repo_name": "tody411/InverseToon", "max_issues_repo_head_hexsha": "bc5b922cae9bbf99ed1f020c93b1577c4747ff92", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inversetoon/batch/generate_isophote_scene.py", "max_forks_repo_name": "tody411/InverseToon", "max_forks_repo_head_hexsha": "bc5b922cae9bbf99ed1f020c93b1577c4747ff92", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5666666667, "max_line_length": 111, "alphanum_fraction": 0.7626832018, "include": true, "reason": "import numpy", "num_tokens": 564}
|
# import standard plotting and animation
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.ticker import FormatStrFormatter
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from IPython.display import clear_output
# import standard libraries
import math
import time
import copy
from inspect import signature
class Visualizer:
'''
animators for time series
'''
#### animate exponential average ####
def animate_exponential_ave(self,x,y,savepath,**kwargs):
# produce figure
fig = plt.figure(figsize = (9,4))
gs = gridspec.GridSpec(1, 3, width_ratios=[1,7,1])
ax = plt.subplot(gs[0]); ax.axis('off')
ax1 = plt.subplot(gs[1]);
ax2 = plt.subplot(gs[2]); ax2.axis('off')
artist = fig
# view limits
xmin = -3
xmax = len(x) + 3
ymin = np.min(x)
ymax = np.max(x)
ygap = (ymax - ymin)*0.15
ymin -= ygap
ymax += ygap
# start animation
num_frames = len(y)
print ('starting animation rendering...')
def animate(k):
# clear panels
ax1.cla()
# print rendering update
if np.mod(k+1,25) == 0:
print ('rendering animation frame ' + str(k+1) + ' of ' + str(num_frames))
if k == num_frames - 1:
print ('animation rendering complete!')
time.sleep(1.5)
clear_output()
# plot x
ax1.plot(np.arange(1,x.size + 1),x,alpha = 1,c = 'k',linewidth = 2,zorder = 2);
# plot exponential average - initial conditions
if k == 1:
ax1.plot(np.arange(1,2), y[:1], alpha = 0.75, c = 'darkorange',linewidth = 4,zorder = 3);
# plot moving average - everything after and including initial conditions
if k > 1:
# plot
ax1.plot(np.arange(1,k+1),y[:k],alpha = 0.7,c = 'darkorange',linewidth = 4,zorder = 3);
# label axes
ax1.set_xlim([xmin,xmax])
ax1.set_ylim([ymin,ymax])
return artist,
anim = animation.FuncAnimation(fig, animate ,frames=num_frames, interval=num_frames, blit=True)
# produce animation and save
fps = 50
if 'fps' in kwargs:
fps = kwargs['fps']
anim.save(savepath, fps=fps, extra_args=['-vcodec', 'libx264'])
clear_output()
|
{"hexsha": "37db2cafcfd8adcbccde2258facb71389010d387", "size": 2605, "ext": "py", "lang": "Python", "max_stars_repo_path": "posts/dynamic_systems_unlimited_memory/library/exponential_average_animator.py", "max_stars_repo_name": "jermwatt/blog", "max_stars_repo_head_hexsha": "3dd0d464d7a17c1c7a6508f714edc938dc3c03e9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-04-17T23:55:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-08T02:18:49.000Z", "max_issues_repo_path": "posts/dynamic_systems_unlimited_memory/library/exponential_average_animator.py", "max_issues_repo_name": "jermwatt/blog", "max_issues_repo_head_hexsha": "3dd0d464d7a17c1c7a6508f714edc938dc3c03e9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "posts/dynamic_systems_unlimited_memory/library/exponential_average_animator.py", "max_forks_repo_name": "jermwatt/blog", "max_forks_repo_head_hexsha": "3dd0d464d7a17c1c7a6508f714edc938dc3c03e9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-04-10T22:46:27.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-06T09:16:30.000Z", "avg_line_length": 32.9746835443, "max_line_length": 105, "alphanum_fraction": 0.5443378119, "include": true, "reason": "import numpy", "num_tokens": 639}
|
#
# Base solver class
#
import pybamm
import numpy as np
from scipy import optimize
from scipy.sparse import issparse
class DaeSolver(pybamm.BaseSolver):
"""Solve a discretised model.
Parameters
----------
rtol : float, optional
The relative tolerance for the solver (default is 1e-6).
atol : float, optional
The absolute tolerance for the solver (default is 1e-6).
root_method : str, optional
The method to use to find initial conditions (default is "lm")
root_tol : float, optional
The tolerance for the initial-condition solver (default is 1e-6).
max_steps: int, optional
The maximum number of steps the solver will take before terminating
(default is 1000).
"""
def __init__(
self,
method=None,
rtol=1e-6,
atol=1e-6,
root_method="lm",
root_tol=1e-6,
max_steps=1000,
):
super().__init__(method, rtol, atol)
self.root_method = root_method
self.root_tol = root_tol
self.max_steps = max_steps
@property
def root_method(self):
return self._root_method
@root_method.setter
def root_method(self, method):
self._root_method = method
@property
def root_tol(self):
return self._root_tol
@root_tol.setter
def root_tol(self, tol):
self._root_tol = tol
@property
def max_steps(self):
return self._max_steps
@max_steps.setter
def max_steps(self, max_steps):
self._max_steps = max_steps
def compute_solution(self, model, t_eval):
"""Calculate the solution of the model at specified times.
Parameters
----------
model : :class:`pybamm.BaseModel`
The model whose solution to calculate. Must have attributes rhs and
initial_conditions
t_eval : numeric type
The times at which to compute the solution
"""
timer = pybamm.Timer()
solve_start_time = timer.time()
pybamm.logger.info("Calling DAE solver")
solution = self.integrate(
self.residuals,
self.y0,
t_eval,
events=self.event_funs,
mass_matrix=model.mass_matrix.entries,
jacobian=self.jacobian,
)
solve_time = timer.time() - solve_start_time
# Identify the event that caused termination
termination = self.get_termination_reason(solution, self.events)
return solution, solve_time, termination
def set_up(self, model):
"""Unpack model, perform checks, simplify and calculate jacobian.
Parameters
----------
model : :class:`pybamm.BaseModel`
The model whose solution to calculate. Must have attributes rhs and
initial_conditions
Raises
------
:class:`pybamm.SolverError`
If the model contains any algebraic equations (in which case a DAE solver
should be used instead)
"""
# create simplified rhs, algebraic and event expressions
concatenated_rhs = model.concatenated_rhs
concatenated_algebraic = model.concatenated_algebraic
events = model.events
if model.use_simplify:
# set up simplification object, for re-use of dict
simp = pybamm.Simplification()
pybamm.logger.info("Simplifying RHS")
concatenated_rhs = simp.simplify(concatenated_rhs)
pybamm.logger.info("Simplifying algebraic")
concatenated_algebraic = simp.simplify(concatenated_algebraic)
pybamm.logger.info("Simplifying events")
events = {name: simp.simplify(event) for name, event in events.items()}
if model.use_jacobian:
# Create Jacobian from concatenated rhs and algebraic
y = pybamm.StateVector(
slice(0, np.size(model.concatenated_initial_conditions))
)
# set up Jacobian object, for re-use of dict
jacobian = pybamm.Jacobian()
pybamm.logger.info("Calculating jacobian")
jac_rhs = jacobian.jac(concatenated_rhs, y)
jac_algebraic = jacobian.jac(concatenated_algebraic, y)
jac = pybamm.SparseStack(jac_rhs, jac_algebraic)
model.jacobian = jac
model.jacobian_rhs = jac_rhs
model.jacobian_algebraic = jac_algebraic
if model.use_simplify:
pybamm.logger.info("Simplifying jacobian")
jac_algebraic = simp.simplify(jac_algebraic)
jac = simp.simplify(jac)
if model.use_to_python:
pybamm.logger.info("Converting jacobian to python")
jac_algebraic = pybamm.EvaluatorPython(jac_algebraic)
jac = pybamm.EvaluatorPython(jac)
def jac_alg_fn(t, y):
return jac_algebraic.evaluate(t, y)
else:
jac = None
jac_alg_fn = None
if model.use_to_python:
pybamm.logger.info("Converting RHS to python")
concatenated_rhs = pybamm.EvaluatorPython(concatenated_rhs)
pybamm.logger.info("Converting algebraic to python")
concatenated_algebraic = pybamm.EvaluatorPython(concatenated_algebraic)
pybamm.logger.info("Converting events to python")
events = {
name: pybamm.EvaluatorPython(event) for name, event in events.items()
}
# Calculate consistent initial conditions for the algebraic equations
def rhs(t, y):
return concatenated_rhs.evaluate(t, y, known_evals={})[0][:, 0]
def algebraic(t, y):
return concatenated_algebraic.evaluate(t, y, known_evals={})[0][:, 0]
if len(model.algebraic) > 0:
y0 = self.calculate_consistent_initial_conditions(
rhs, algebraic, model.concatenated_initial_conditions[:, 0], jac_alg_fn
)
else:
# can use DAE solver to solve ODE model
y0 = model.concatenated_initial_conditions[:, 0]
# Create functions to evaluate residuals
def residuals(t, y, ydot):
pybamm.logger.debug(
"Evaluating residuals for {} at t={}".format(model.name, t)
)
y = y[:, np.newaxis]
rhs_eval, known_evals = concatenated_rhs.evaluate(t, y, known_evals={})
# reuse known_evals
alg_eval = concatenated_algebraic.evaluate(t, y, known_evals=known_evals)[0]
# turn into 1D arrays
rhs_eval = rhs_eval[:, 0]
alg_eval = alg_eval[:, 0]
return (
np.concatenate((rhs_eval, alg_eval)) - model.mass_matrix.entries @ ydot
)
# Create event-dependent function to evaluate events
def event_fun(event):
def eval_event(t, y):
return event.evaluate(t, y)
return eval_event
event_funs = [event_fun(event) for event in events.values()]
# Create function to evaluate jacobian
if jac is not None:
def jacobian(t, y):
return jac.evaluate(t, y, known_evals={})[0]
else:
jacobian = None
# Add the solver attributes
# Note: these are the (possibly) converted to python version rhs, algebraic
# etc. The expression tree versions of these are attributes of the model
self.y0 = y0
self.rhs = rhs
self.algebraic = algebraic
self.residuals = residuals
self.events = events
self.event_funs = event_funs
self.jacobian = jacobian
def calculate_consistent_initial_conditions(
self, rhs, algebraic, y0_guess, jac=None
):
"""
Calculate consistent initial conditions for the algebraic equations through
root-finding
Parameters
----------
rhs : method
Function that takes in t and y and returns the value of the differential
equations
algebraic : method
Function that takes in t and y and returns the value of the algebraic
equations
y0_guess : array-like
Array of the user's guess for the initial conditions, used to initialise
the root finding algorithm
jac : method
Function that takes in t and y and returns the value of the jacobian for the
algebraic equations
Returns
-------
y0_consistent : array-like, same shape as y0_guess
Initial conditions that are consistent with the algebraic equations (roots
of the algebraic equations)
"""
pybamm.logger.info("Start calculating consistent initial conditions")
# Split y0_guess into differential and algebraic
len_rhs = rhs(0, y0_guess).shape[0]
y0_diff, y0_alg_guess = np.split(y0_guess, [len_rhs])
def root_fun(y0_alg):
"Evaluates algebraic using y0_diff (fixed) and y0_alg (changed by algo)"
y0 = np.concatenate([y0_diff, y0_alg])
out = algebraic(0, y0)
pybamm.logger.debug(
"Evaluating algebraic equations at t=0, L2-norm is {}".format(
np.linalg.norm(out)
)
)
return out
if jac:
if issparse(jac(0, y0_guess)):
def jac_fn(y0_alg):
"""
Evaluates jacobian using y0_diff (fixed) and y0_alg (varying)
"""
y0 = np.concatenate([y0_diff, y0_alg])
return jac(0, y0)[:, len_rhs:].toarray()
else:
def jac_fn(y0_alg):
"""
Evaluates jacobian using y0_diff (fixed) and y0_alg (varying)
"""
y0 = np.concatenate([y0_diff, y0_alg])
return jac(0, y0)[:, len_rhs:]
else:
jac_fn = None
# Find the values of y0_alg that are roots of the algebraic equations
sol = optimize.root(
root_fun,
y0_alg_guess,
jac=jac_fn,
method=self.root_method,
tol=self.root_tol,
)
# Return full set of consistent initial conditions (y0_diff unchanged)
y0_consistent = np.concatenate([y0_diff, sol.x])
if sol.success and np.all(sol.fun < self.root_tol * len(sol.x)):
pybamm.logger.info("Finish calculating consistent initial conditions")
return y0_consistent
elif not sol.success:
raise pybamm.SolverError(
"Could not find consistent initial conditions: {}".format(sol.message)
)
else:
raise pybamm.SolverError(
"""
Could not find consistent initial conditions: solver terminated
successfully, but maximum solution error ({}) above tolerance ({})
""".format(
np.max(sol.fun), self.root_tol * len(sol.x)
)
)
def integrate(
self, residuals, y0, t_eval, events=None, mass_matrix=None, jacobian=None
):
"""
Solve a DAE model defined by residuals with initial conditions y0.
Parameters
----------
residuals : method
A function that takes in t, y and ydot and returns the residuals of the
equations
y0 : numeric type
The initial conditions
t_eval : numeric type
The times at which to compute the solution
events : method, optional
A function that takes in t and y and returns conditions for the solver to
stop
mass_matrix : array_like, optional
The (sparse) mass matrix for the chosen spatial method.
jacobian : method, optional
A function that takes in t, y and ydot and returns the Jacobian
"""
raise NotImplementedError
|
{"hexsha": "ba14d26d2cbf5f62e0a15317efedce6c4a381e69", "size": 12151, "ext": "py", "lang": "Python", "max_stars_repo_path": "pybamm/solvers/dae_solver.py", "max_stars_repo_name": "htorodriguez/PyBaMM", "max_stars_repo_head_hexsha": "91e051e8ce287824b41f238ae39f3208606228ff", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-10-29T19:06:04.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-29T19:06:04.000Z", "max_issues_repo_path": "pybamm/solvers/dae_solver.py", "max_issues_repo_name": "htorodriguez/PyBaMM", "max_issues_repo_head_hexsha": "91e051e8ce287824b41f238ae39f3208606228ff", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pybamm/solvers/dae_solver.py", "max_forks_repo_name": "htorodriguez/PyBaMM", "max_forks_repo_head_hexsha": "91e051e8ce287824b41f238ae39f3208606228ff", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0172910663, "max_line_length": 88, "alphanum_fraction": 0.5867006831, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2655}
|
// Copyright 2014 BVLC and contributors.
#include <algorithm>
#include <vector>
#include <cmath>
#include "google/protobuf/descriptor.h"
#include "google/protobuf/descriptor.h"
#include "caffe/layer.hpp"
#include "caffe/util/rng.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/flow_augmentation_layer.hpp"
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <boost/random.hpp>
#include <boost/random/normal_distribution.hpp>
#include <iostream>
#include <fstream>
#include <omp.h>
using std::max;
namespace caffe {
template <typename Dtype>
void FlowAugmentationLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
CHECK_GT(this->layer_param_.augmentation_param().crop_width(),0) << "Please enter crop width if you want to perform augmentation";
CHECK_GT(this->layer_param_.augmentation_param().crop_height(),0) << "Please enter crop height if you want to perform augmentation";
this->layer_param_.set_reshape_every_iter(false);
LOG(WARNING) << "FlowAugmentationLayer only runs Reshape only on setup";
}
template <typename Dtype>
void FlowAugmentationLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
CHECK_EQ(bottom.size(), 3) << "Flow augmentation layer takes three input blobs: FlowField, Img1TransfParams, Img2TransfParams";
CHECK_EQ(top.size(), 1) << "Flow augmentation layer outputs one output blob: Augmented Flow";
const int num = bottom[0]->num();
const int channels = bottom[0]->channels();
//const int height = bottom[0]->height();
//const int width = bottom[0]->width();
CHECK_EQ(channels, 2) << "Flow data must have two channels";
cropped_width_ = this->layer_param_.augmentation_param().crop_width();
cropped_height_ = this->layer_param_.augmentation_param().crop_height();
(top)[0]->Reshape(num,channels, cropped_height_, cropped_width_);
//test_coeffs_.ReshapeLike(*bottom[1]);
//test_coeffs_.ShareData(*bottom[1]); //reuse
// Set up coeff blobs
all_coeffs1_.ReshapeLike(*bottom[1]);
all_coeffs2_.ReshapeLike(*bottom[2]);
// How many params exist in general?
AugmentationCoeff coeff;
num_params_ = coeff.GetDescriptor()->field_count();
// = Coeff transformation matrix cache for one batch
coeff_matrices1_.reset(new SyncedMemory(num * sizeof(typename AugmentationLayerBase<Dtype>::tTransMat)));
coeff_matrices2_.reset(new SyncedMemory(num * sizeof(typename AugmentationLayerBase<Dtype>::tTransMat)));
}
template <typename Dtype>
void FlowAugmentationLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
LOG(FATAL) << "Forward CPU Augmentation not implemented.";
}
#ifdef CPU_ONLY
STUB_GPU(FlowAugmentationLayer);
#endif
INSTANTIATE_CLASS(FlowAugmentationLayer);
REGISTER_LAYER_CLASS(FlowAugmentation);
} // namespace caffe
|
{"hexsha": "51b628a82efb1f6c3966844c539fc04d5284773f", "size": 2942, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/caffe/layers/flow_augmentation_layer.cpp", "max_stars_repo_name": "AyaLotfy/flownet2", "max_stars_repo_head_hexsha": "e3e3dd043d9a65bc8727429938a0d88539f906fd", "max_stars_repo_licenses": ["FSFAP"], "max_stars_count": 1081.0, "max_stars_repo_stars_event_min_datetime": "2017-04-25T11:46:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T03:24:45.000Z", "max_issues_repo_path": "src/caffe/layers/flow_augmentation_layer.cpp", "max_issues_repo_name": "AyaLotfy/flownet2", "max_issues_repo_head_hexsha": "e3e3dd043d9a65bc8727429938a0d88539f906fd", "max_issues_repo_licenses": ["FSFAP"], "max_issues_count": 220.0, "max_issues_repo_issues_event_min_datetime": "2017-04-28T04:47:30.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-27T09:49:43.000Z", "max_forks_repo_path": "src/caffe/layers/flow_augmentation_layer.cpp", "max_forks_repo_name": "AyaLotfy/flownet2", "max_forks_repo_head_hexsha": "e3e3dd043d9a65bc8727429938a0d88539f906fd", "max_forks_repo_licenses": ["FSFAP"], "max_forks_count": 361.0, "max_forks_repo_forks_event_min_datetime": "2017-04-26T02:16:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-21T04:21:09.000Z", "avg_line_length": 31.6344086022, "max_line_length": 136, "alphanum_fraction": 0.7447314752, "num_tokens": 721}
|
from typing import Type
import torch
from torch import nn
import numpy as np
from nes import NES, Policy, default_config
from nes.config import default_config, Config
config = Config(default_config)
class Ackley(Policy):
def __init__(self):
super().__init__()
self.params = nn.Parameter(torch.rand(2), requires_grad=False)
def evaluate(self):
x = self.params[0]
y = self.params[1]
first_term = -20 * torch.exp(-0.2*torch.sqrt(0.5*(x**2+y**2)))
second_term = -torch.exp(0.5*(torch.cos(2*np.pi*x)+np.cos(2*np.pi*y)))+np.e + 20
return -(second_term + first_term).item()
@config('policy')
class PolicyConfig():
policy: Type[Policy] = Ackley
@config('optimizer')
class OptimizerConfig():
lr: float = 0.02
optim_type: Type[torch.optim.Optimizer] = torch.optim.Adam
@config('nes')
class NESConfig():
n_step: int = 300
l2_decay: float = 0.0
population_size: int = 256
sigma: float = 0.2
seed: int = 123123
if __name__ == '__main__':
nes = NES(config)
@nes.optimize.add_hook()
def after_optimize(self, *args, **kwargs):
reward = self.policy.evaluate()
print(f'Generation: {self.gen} Reward: {reward}')
nes.train()
|
{"hexsha": "3029cfbf1f3d3dbd3184923d7a64433b295b34ec", "size": 1250, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/ackley.py", "max_stars_repo_name": "goktug97/nes-torch", "max_stars_repo_head_hexsha": "016f2618d2f5019718c62359eebb9fd939647607", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-04-17T09:35:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T08:35:26.000Z", "max_issues_repo_path": "examples/ackley.py", "max_issues_repo_name": "goktug97/nes-torch", "max_issues_repo_head_hexsha": "016f2618d2f5019718c62359eebb9fd939647607", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/ackley.py", "max_forks_repo_name": "goktug97/nes-torch", "max_forks_repo_head_hexsha": "016f2618d2f5019718c62359eebb9fd939647607", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.3214285714, "max_line_length": 88, "alphanum_fraction": 0.6416, "include": true, "reason": "import numpy", "num_tokens": 350}
|
import numpy, logging
from sys import exit
from Classes.DotData import DotData
from Operations.Shari_Operations.localize.xpopMerge import xpopMerge
from Operations.Shari_Operations.localize.Scenario import GetSelectionScenarios, GetScenarios
from Operations.MiscUtil import MakeAlphaNum, Dict, Sfx, progress, AddFileSfx
def mergeSims( scenario, Ddata = '../Data/Shari_Data/sim/', simsOut = 'simsOut3', nreplicas = 5,
thinExt = '.thin', thinSfx = '',
selpop = None, getio = None ):
"""Gathers per-SNP information, for all replicas of a given scenario, and outputs it in a single DotData where each line
gives info for one SNP.
Specifically, reads simulation and Sweep output, collects columns needed for composite likehood test (chrom, base pair position, genetic
distance, anc frequencies for 3 populations, xpop for each pair, and ihs, iHH_A and iHH_D for selected population)
Input params:
scenario - an object of class Scenario, indicating the simulation scenario (either neutral or a selection scenario)
from which all replicas were simulated.
nreplicas - the number of replicas simulated under this scenario.
Each replica represents a chromosome region, with a set of SNPs on it.
Ddata - the directory under which the simulations and the Sweep analysis results live.
Under this directory we expect to find:
iHS analysis results, under power_ihs/
XP-EHH analysis results, under power_xpop
simulation output giving SNP positions
thinExt - the extension appended to simulation files that describe the SNPs in the simulated replica.
Sometimes we create simulations and then thin them under different thinning models (to simulate SNP ascertainment
by the various stages of HapMap; these differently thinned versions of the same simulations might be stored in
simulation files with different extensions.
thinSfx - the suffix appended to the power_ihs and power_xpop directory names, telling where to find iHS and XP-EHH
analyses of the simulations. When we analyze the same simulations after applying different thinning scenarios,
the iHS and XP-EHH analyses for each thinning scenario go into a separate set of directories.
Output params:
Ddata - under Ddata writes a DotData named merged_scenName.data, where each line gives info
for one SNP, with the following columns (type of data is float unless stated otherwise):
CHROM_POS 1 - physical (basepair) position of the SNP within its replica.
Note that one merged file contains SNPs for a set of replicas (all for the same scenario),
so there could be multiple SNPs with the same position. The replica number
is given in the Chrom column.
FREQ1 1 - derived allele frequency in pop 1 ( European )
FREQ1 4 - derived allele frequency in pop 4 ( EastAsian )
FREQ1 5 - derived allele frequency in pop 5 ( WestAfrican )
R AllEHH logratio Deviation European_WestAfrican - XP-EHH score to the right of the SNP,
between European and WestAfrican pops, normalized to the neutral background.
Analogously for the next five columns:
L AllEHH logratio Deviation European_WestAfrican
R AllEHH logratio Deviation EastAsian_European
L AllEHH logratio Deviation EastAsian_European
R AllEHH logratio Deviation EastAsian_WestAfrican
L AllEHH logratio Deviation EastAsian_WestAfrican
SNP pos (cM) European_WestAfrican - genetic map position of this SNP, within its replica.
(the European_WestAfrican suffix is irrelevant).
SNP pos (bases) European_WestAfrican - physical (basepair) position of this SNP within its replica.
(the European_WestAfrican suffix is irrelevant).
Chrom European_WestAfrican - the replica from which this SNP comes; can be nan.
(the European_WestAfrican suffix is irrelevant)
Chrom - the replica from which this SNP comes; can be nan
SNP pos (bases) - physical (basepair) position of this SNP within its replica.
SNP pos (cM) - genetic map position of this SNP within its replica
Both iHH_A - sum of iHH_A for both directions from this SNP
Both iHH_D - sum of iHH_D for both directions from this SNP
Both iHS - the value in 'Both Unstandardised iHS' (below), but binned by derived allele frequency
and normalized within the bin.
Left iHH_D - iHH_D to the left of the SNP (the raw integral value). analogously for the next three.
Right iHH_D
Left iHH_A
Right iHH_A
Both Unstandardised iHS - log( (iHH_A_left + iHH_A_right) / ( iHH_D_left + iHH_D_right ) )
( see also 'Both iHS' column for the standardized iHS score )
"""
assert selpop == None or scenario.is_neutral()
DataDir = Ddata + '/'
SimDir = DataDir + simsOut + thinSfx + '/'
if not scenario.is_neutral():
scenName = 'sel%d_%d' % ( scenario.mutFreq, scenario.mutPop )
scenDir = str( scenario.mutAge ) + 'ky/' + scenName
else:
scenName = 'neutral'
scenDir = 'neutral'
popName = {1:'European',4:'EastAsian',5:'WestAfrican'}
ihsSignifTsv = DataDir + 'power_ihs' + thinSfx + '/' + scenDir + '/ihs_sig_' + \
popName[ scenario.mutPop if not scenario.is_neutral() else ( selpop if selpop != None else 1 ) ] + '.tsv'
xpopSignifTsv = [ DataDir + 'power_xpop' + thinSfx + '/' + scenDir + '/xpop_significance_' + popPair + '.tsv'
for popPair in ( 'EastAsian_WestAfrican', 'EastAsian_European', 'European_WestAfrican' ) ]
posFiles = [ SimDir + scenDir + '/' + str(ichrom) + '_' + scenName + '.pos-%d%s' % ( pop, thinExt )
for ichrom in range( nreplicas ) for pop in ( 1, 4, 5 ) ]
ageSfx = '%dky' % ( scenario.mutAge if not scenario.isNeutral() else 10 )
mergedDotData = AddFileSfx( Ddata + 'merged.data/', ageSfx, scenario.scenName(), selpop, thinSfx )
fileDescrs = \
{ mergedDotData :
( 'Various per-snp statistics for SNPs in scenario $scenario, replicas 0-$nreplicas.',
( ( 'CHROM_POS 1', 'physical (basepair) position of the SNP within its replica. '
'Note that one merged file contains SNPs for a set of replicas (all for the same scenario), '
'so there could be multiple SNPs with the same position. The replica number '
'is given in the Chrom column. ' ),
( 'FREQ1 1', 'derived allele frequency in pop 1 ( European )' ),
( 'R AllEHH logratio Deviation European_WestAfrican', 'XP-EHH score to the R of the SNP, '
'between European and WestAfrican pops, normalized to the neutral background.' ),
( 'SNP pos (cM) European_WestAfrican', 'genetic map SNP position' ),
( 'SNP pos (bases) European_WestAfrican', 'physical SNP position' ),
( 'Chrom European_WestAfrican', 'chromosome (or replica number)' ),
( 'Chrom', 'chromosome (or replica number)' ),
( 'SNP pos (bases)', 'physical SNP position' ),
( 'SNP pos (cM)', 'genetic map SNP position' ),
( 'Both iHH_A', 'sum of iHH_A scores for both sides' ),
( 'Both iHH_D', 'sum of iHH_D scores for both sides' ),
( 'Both iHS', 'sum of iHS scores for both sides' ),
( ' Left iHH_D', 'iHH_D score to the left of the SNP' ),
( 'Right iHH_D', 'iHH_D score to the right of the SNP' ),
( 'Left iHH_A', 'iHH_A score to the left of the SNP' ),
( 'Right iHH_A', 'iHH_A score to the right of the SNP' ),
( 'Both Unstandardised iHS', 'sum of unstandardized iHS scores for both sides' ) ) ) }
if getio: return dict( depends_on = posFiles + [ ihsSignifTsv ] + xpopSignifTsv, creates = mergedDotData,
mediumRuleNameSfx = scenario.scenDir(),
fileDescrs = fileDescrs )
ncausal = 0
dashFixer = lambda v: v if v != '-' else numpy.nan
# Load iHS of selected pop
ihsAll = DotData(SVPath = ihsSignifTsv,ToLoad=['Chrom','SNP pos (bases)','SNP pos (cM)','Both iHH_A','Both iHH_D','Both iHS','Left iHH_D','Right iHH_D','Left iHH_A','Right iHH_A','Both Unstandardised iHS'], SVValueFixer = dashFixer)
ihsAllChrom = ihsAll.Chrom
# Load xpop values
xpopAll = xpopMerge( *xpopSignifTsv )
logging.info( 'done with xpopMerge' )
xpopAll = xpopAll[['R AllEHH logratio Deviation European_WestAfrican','L AllEHH logratio Deviation European_WestAfrican','R AllEHH logratio Deviation EastAsian_European','L AllEHH logratio Deviation EastAsian_European','R AllEHH logratio Deviation EastAsian_WestAfrican',
'L AllEHH logratio Deviation EastAsian_WestAfrican','SNP pos (cM) European_WestAfrican','SNP pos (bases) European_WestAfrican','Chrom European_WestAfrican']]
xpopAllChrom = xpopAll['Chrom European_WestAfrican']
replicates = []
xpopIdx = 0
ihsIdx = 0
for ichrom in range(nreplicas):
progress( 'Merging replicas', ichrom, nreplicas, freq = 1 )
logging.info( 'looking at replica %d of %d' % ( ichrom, nreplicas ) )
# Load in pos files for this replica.
# They give, for each SNP in the replica, its physical (basepair) position within the replica,
# and the frequency of the derived and the ancestral alleles.
pos1, pos4, pos5 = [ DotData(SVPath=SimDir + scenDir + '/' + str(ichrom) + '_' + scenName + '.pos-%d%s' % ( pop, thinExt),
SVSkipFirstLines = 1, SVHeader = False,
names = ['SNP','CHROM', 'CHROM_POS', 'ALLELE1', 'FREQ1', 'ALLELE2', 'FREQ2' ]) for pop in ( 1, 4, 5 ) ]
assert pos1.numCols() == pos4.numCols() == pos5.numCols()
posBlank = ((numpy.nan,)*pos1.numCols(),)*3
logging.info( 'Loaded pos files for chrom ' + str( ichrom ) + ': ' + str( len(pos1) ) + 'snps' )
assert set(pos1.CHROM_POS) == set(pos4.CHROM_POS) == set(pos5.CHROM_POS)
logging.info( 'pos file sizes are: %d, %d, %d' % ( len( pos1 ), len( pos4 ), len( pos5 ) ) )
logging.info( 'Merging on position...' )
posAll = DotData.mergeOnKeyCols((pos1,pos4,pos5),('CHROM_POS',)*3,posBlank, suffixes = (' 1',' 4',' 5'))
logging.info( 'Done merging.' )
logging.info( 'type(posAll) is ' + str( type( posAll ) ) )
print len(posAll)
chrom = numpy.ones(len(posAll))*ichrom
newChrom = DotData(Columns = [chrom,],names=['newChrom',])
print newChrom
posAll = posAll[['CHROM_POS 1','FREQ1 1','FREQ1 4','FREQ1 5']]
posAll.hstack(newChrom)
logging.info( 'added replica number column' )
print posAll
posAllBlank = (numpy.nan,)*posAll.numCols()
# 10-16-08 ADDED CHROM TO MERGED OUTPT ( not now used -- can be removed? )
#
# From the xpop and ihs significance results, get just the rows for SNPs in the
# current replica
#
#while xpopIdx < len( xpopAllChrom ) and xpopAllChrom[ xpopIdx ] == ichrom: xpopIdx += 1
#xpop = xpopAll[ :xpopIdx ]
xpop = xpopAll[ xpopAllChrom == ichrom ]
logging.info( 'selected xpop for replica %d' % ichrom )
xpopBlank = (numpy.nan,)*xpop.numCols()
#while ihsIdx < len( ihsAllChrom ) and ihsAllChrom[ ihsIdx ] == ichrom: ihsIdx += 1
#ihs = ihsAll[ :ihsIdx ]
ihs = ihsAll[ ihsAllChrom == ichrom ]
logging.info( 'selected ihs for replica %d' % ichrom )
ihsBlank = (numpy.nan,)*ihs.numCols()
# if not set( ihs[ 'SNP pos (bases)' ] ).issubset( set( posAll['CHROM_POS 1'] ) ):
# print 'bad positions: ', set( posAll['CHROM_POS 1'] ) - set( ihs[ 'SNP pos (bases)' ] )
# assert set( ihs[ 'SNP pos (bases)' ] ).issubset( set( posAll['CHROM_POS 1'] ) ), "bad iHS file " + ihsSignifTsv
logging.info( 'merging replica %d' % ichrom )
Data = DotData.mergeOnKeyCols((posAll,xpop,ihs),('CHROM_POS 1','SNP pos (bases) European_WestAfrican','SNP pos (bases)'),
blanks = (posAllBlank,xpopBlank,ihsBlank), suffixes = ('pos',' xpop',' ihs'),
verbose = True )
logging.info( 'done merging replica %d; now have %d records' % ( ichrom, len( Data ) ) )
Data = Data[ numpy.invert( numpy.isnan( Data[ 'CHROM_POS 1' ] ) ) ]
logging.info( 'done removing snp info for SNPs not in all .pos files for replica %d; now have %d records'
% ( ichrom, len( Data ) ) )
replicates.append(Data)
logging.info( 'now have ' + str( len( replicates ) ) + ' replicates.' )
# endloop: for each replica
logging.info( 'Stacking replicates...' )
allData = reduce( lambda x, y: x.vstack(y), replicates)
logging.info( 'Saving merged SNP info to ' + mergedDotData )
allData.save( mergedDotData )
logging.info( 'Finished mergeSims()' )
# print scen + ' ncausal: ' + str(ncausal)
def DefineRulesTo_MergeSims( pr, mutAges, mutPops, mutFreqs, noNeutral, nreplicas,
Ddata, simsOut, thinExt = '.thin', thinSfx = '' ):
"""Pipeline generator: for each scenario, create a rule to merge SNP info for all SNPs in each replica within that scenario,
into a single table.
"""
for scenario in ( GetSelectionScenarios if noNeutral else GetScenarios)( mutAges, mutPops, mutFreqs ):
print 'generating rule for scenario ', scenario
pr.addInvokeRule( invokeFn = mergeSims,
invokeArgs = Dict( 'scenario nreplicas Ddata simsOut thinExt thinSfx' ) )
|
{"hexsha": "0466cff04c72a86c7320f9131d3a336c0541bc81", "size": 12885, "ext": "py", "lang": "Python", "max_stars_repo_path": "old/Operations/Shari_Operations/localize/mergeSims.py", "max_stars_repo_name": "broadinstitute/cms", "max_stars_repo_head_hexsha": "4743ffd3feac08f02be7719c82b3371cb94a4d6b", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2015-05-18T14:39:00.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-22T12:57:07.000Z", "max_issues_repo_path": "old/Operations/Shari_Operations/localize/mergeSims.py", "max_issues_repo_name": "broadinstitute/cms", "max_issues_repo_head_hexsha": "4743ffd3feac08f02be7719c82b3371cb94a4d6b", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 33, "max_issues_repo_issues_event_min_datetime": "2015-04-13T20:48:02.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-19T07:27:30.000Z", "max_forks_repo_path": "old/Operations/Shari_Operations/localize/mergeSims.py", "max_forks_repo_name": "broadinstitute/cms", "max_forks_repo_head_hexsha": "4743ffd3feac08f02be7719c82b3371cb94a4d6b", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2016-03-31T06:56:01.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-30T16:35:45.000Z", "avg_line_length": 51.130952381, "max_line_length": 272, "alphanum_fraction": 0.6918897943, "include": true, "reason": "import numpy", "num_tokens": 3775}
|
import warnings
from typing import Union
import numpy as np
from scipy.special import betaln
from scipy.special import psi, polygamma
from autoconf import cached_property
from ..messages.abstract import AbstractMessage
def grad_betaln(ab):
psiab = psi(ab.sum(axis=1, keepdims=True))
return psi(ab) - psiab
def jac_grad_betaln(ab):
psi1ab = polygamma(1, ab.sum(axis=1, keepdims=True))
fii = polygamma(1, ab) - psi1ab
fij = -psi1ab[:, 0]
return np.array([[fii[:, 0], fij], [fij, fii[:, 1]]]).T
def inv_beta_suffstats(lnX, ln1X):
"""Solve for a, b for,
psi(a) + psi(a + b) = lnX
psi(b) + psi(a + b) = ln1X
"""
_lnX, _ln1X = np.ravel(lnX), np.ravel(ln1X)
lnXs = np.c_[_lnX, _ln1X]
# Find initial starting location
Gs = np.exp(lnXs)
dG = 1 - Gs.sum(axis=1, keepdims=True)
ab = np.maximum(1, (1 + Gs / dG) / 2)
# 5 Newton Raphson itertions is generally enough
for i in range(5):
f = grad_betaln(ab) - lnXs
jac = jac_grad_betaln(ab)
ab += np.linalg.solve(jac, - f)
if np.any(ab < 0):
warnings.warn(
"invalid negative parameters found for inv_beta_suffstats, "
"clampling value to 0.5",
RuntimeWarning
)
b = np.clip(ab, 0.5, None)
shape = np.shape(lnX)
if shape:
a = ab[:, 0].reshape(shape)
b = ab[:, 1].reshape(shape)
else:
a, b = ab[0, :]
return a, b
class BetaMessage(AbstractMessage):
"""
Models a Beta distribution
"""
log_base_measure = 0
_support = ((0, 1),)
_min = 0
_max = 1
_range = 1
_parameter_support = ((0, np.inf), (0, np.inf))
def __init__(
self,
alpha=0.5,
beta=0.5,
log_norm=0,
id_=None
):
self.alpha = alpha
self.beta = beta
super().__init__(
alpha,
beta,
log_norm=log_norm,
id_=id_
)
def value_for(self, unit: float) -> float:
raise NotImplemented()
@cached_property
def log_partition(self) -> np.ndarray:
return betaln(*self.parameters)
@cached_property
def natural_parameters(self) -> np.ndarray:
return self.calc_natural_parameters(
self.alpha,
self.beta
)
@staticmethod
def calc_natural_parameters(
alpha: Union[float, np.ndarray],
beta: Union[float, np.ndarray]
) -> np.ndarray:
return np.array([alpha - 1, beta - 1])
@staticmethod
def invert_natural_parameters(
natural_parameters: np.ndarray
) -> np.ndarray:
return natural_parameters + 1
@classmethod
def invert_sufficient_statistics(
cls, sufficient_statistics: np.ndarray
) -> np.ndarray:
a, b = inv_beta_suffstats(*sufficient_statistics)
return cls.calc_natural_parameters(a, b)
@classmethod
def to_canonical_form(cls, x: np.ndarray) -> np.ndarray:
return np.array([np.log(x), np.log1p(-x)])
@cached_property
def mean(self) -> Union[np.ndarray, float]:
return self.alpha / (self.alpha + self.beta)
@cached_property
def variance(self) -> Union[np.ndarray, float]:
return (
self.alpha * self.beta
/ (self.alpha + self.beta) ** 2
/ (self.alpha + self.beta + 1)
)
def sample(self, n_samples=None):
a, b = self.parameters
shape = (n_samples,) + self.shape if n_samples else self.shape
return np.random.beta(a, b, size=shape)
def kl(self, dist):
# TODO check this is correct
# https://arxiv.org/pdf/0911.4863.pdf
if self._support != dist._support:
raise TypeError('Support does not match')
aP, bP = dist.parameters
aQ, bQ = self.parameters
return (
betaln(aQ, bQ) - betaln(aP, bP)
- (aQ - aP) * psi(aP)
- (bQ - bP) * psi(bP)
+ (aQ - aP + bQ - bP) * psi(aP + bP)
)
def logpdf_gradient(self, x):
logl = self.logpdf(x)
a, b = self.parameters
gradl = (a - 1) / x + (b - 1) / (x - 1)
return logl, gradl
def logpdf_gradient_hessian(self, x):
logl = self.logpdf(x)
a, b = self.parameters
ax, bx = (a - 1) / x, (b - 1) / (x - 1)
gradl = ax + bx
hessl = -ax / x - bx / (x - 1)
return logl, gradl, hessl
|
{"hexsha": "1b31637d78b6fd5c2ef87ba5c8d333e14156e153", "size": 4524, "ext": "py", "lang": "Python", "max_stars_repo_path": "autofit/messages/beta.py", "max_stars_repo_name": "caoxiaoyue/PyAutoFit", "max_stars_repo_head_hexsha": "819cd2acc8d4069497a161c3bb6048128e44d828", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 39, "max_stars_repo_stars_event_min_datetime": "2019-01-24T10:45:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T09:37:59.000Z", "max_issues_repo_path": "autofit/messages/beta.py", "max_issues_repo_name": "caoxiaoyue/PyAutoFit", "max_issues_repo_head_hexsha": "819cd2acc8d4069497a161c3bb6048128e44d828", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 260, "max_issues_repo_issues_event_min_datetime": "2018-11-27T12:56:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:08:59.000Z", "max_forks_repo_path": "autofit/messages/beta.py", "max_forks_repo_name": "caoxiaoyue/PyAutoFit", "max_forks_repo_head_hexsha": "819cd2acc8d4069497a161c3bb6048128e44d828", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2018-11-30T16:49:05.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-21T17:39:29.000Z", "avg_line_length": 26.3023255814, "max_line_length": 72, "alphanum_fraction": 0.5506189213, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1296}
|
import numpy
from crystal_util import bragg_calc2
import scipy.constants as codata
def crystal_shadow(filename, str, phot_in):
'''
#+
# Singapore Synchrotron Light Source (SSLS)
# :Author: X.J. Yu, slsyxj@nus.edu.sg
# :Name: crystal_shadow
# :Purpose: create a shadow data file for a any crystal
# :Input:
# filename: file name to write
# str: output from Bragg_Calc
# phot_in: photon neerg array
#-
'''
RN = str["rn"]
D_SPACING = str["dspacing"]
nbatom = str["nbatom"]
atnum = str["atnum"]
TEMPER = str["temper"]
G_0 = str["G_0"]
G = str["G"]
G_BAR = str["G_BAR"]
f0coeff = numpy.array(str["f0coeff"])
NPOINT = str["npoint"]
energy = numpy.array(str["energy"])
fp = numpy.array(str["f1"])
fpp = numpy.array(str["f2"])
zcol = numpy.array(str["zcol"])
fcol = numpy.array(str["fraction"])
UCOL = numpy.array(str["unique_AtomicName"])
LCOL = numpy.array(str["list_AtomicName"])
CI = 0.0 + 1.0j
TOANGS = codata.h * codata.c / codata.e * 1e10
TOCM = TOANGS * 1e-8
TWOPI = 2 * numpy.pi
phot = phot_in[0] # ;first energy
F1 = numpy.zeros((len(phot_in), nbatom), dtype=float)
F2 = numpy.zeros((len(phot_in), nbatom), dtype=float)
F000 = numpy.zeros(nbatom, dtype=float)
for j in range(nbatom):
icentral = int(f0coeff.shape[1] / 2)
F000[j] = f0coeff[j, icentral] # X.J. Yu, slsyxj@nus.edu.sg
for i in range(icentral):
F000[j] += f0coeff[j, i] # actual number of electrons carried by each atom, X.J. Yu, slsyxj@nus.edu.sg
BOOL_UCOL = UCOL[0] == ''
for i, phot in enumerate(phot_in):
for j, ienergy in enumerate(energy):
if ienergy > phot:
break
nener = j - 1
for j in range(nbatom):
F1[i, j] = fp[j, nener] + (fp[j, nener + 1] - fp[j, nener]) * \
(phot - energy[nener]) / (energy[nener + 1] - energy[nener])
F2[i, j] = fpp[j, nener] + (fpp[j, nener + 1] - fpp[j, nener]) * \
(phot - energy[nener]) / (energy[nener + 1] - energy[nener])
F_0 = 0.0 + 0.0j
for j in range(nbatom):
# charged atom, the number of electrons not equal to atum anymore,while
# it is euqal to F000, and notably, fractial occupancy need consideration here
# occupancy till now, only consider in calculation of G, and G_BAR in bragg_calc
# comment out: X.J. Yu, slsyxj@nus.edu.sg
#
# F_0 += G_0[j] * ( atnum[j] + F1[j] + 1j * F2[j] ) * 1.0
#
FN = F000[j] + F1[i, j] + CI * F2[i, j]
if BOOL_UCOL: # normal crystal
F_0 += FN * numpy.sum(numpy.where(zcol == atnum[j], fcol, 0.0))
else:
# complex compound crystals
# take care same element carrying with different charge, O2-, O1.5-
# so with different f0 coefficients
F_0 += FN * numpy.sum(numpy.where(LCOL == UCOL[j], fcol, 0.0))
R_LAM0 = TOCM / phot # ;wavelength in cm
SIN_GRA = R_LAM0 / 2 / D_SPACING
theta = numpy.arcsin(SIN_GRA)
REFRAC = (1.0 + 0.0j) - R_LAM0 * R_LAM0 * RN * F_0 / TWOPI
DELTA = 1.0 - REFRAC.real
BETA = -REFRAC.imag
# ;
# ; THETA_B is the Bragg angle corrected for refraction
# ;
THETA_B = R_LAM0 / (1.0 - (DELTA / (SIN_GRA * SIN_GRA))) / 2.0 / D_SPACING # ;sin(theta_b)
C_TMP = numpy.zeros((nbatom, 3), dtype=float) # ;C coeff for f0 interpolation
if BOOL_UCOL: # normal crystal
for j in range(nbatom):
zcol = numpy.where(zcol == atnum[j], j + 1, zcol) # ;index for fortran, start from 1
else:
for j in range(nbatom):
zcol = numpy.where(LCOL == UCOL[j], j + 1, zcol) # ;index for fortran, start from 1
# ;ratio = [0.9D,1D,1.1D] * THETA_B/(TOANGS/PHOT)
ratio = numpy.array([0.9, 1.0, 1.1]) * SIN_GRA / (TOANGS / phot)
F0 = numpy.zeros((nbatom, 3), dtype=float)
A = numpy.zeros(3, dtype=float)
for j in range(nbatom):
icentral = len(f0coeff[0])
icentral = int(icentral / 2)
F0[j, :] = f0coeff[j, icentral]
for jj in range(icentral):
F0[j, :] += f0coeff[j, jj] * \
numpy.exp(-1.0 * f0coeff[j, jj + icentral + 1] * ratio * ratio)
IFLAG = -1
Y = F0[j, :]
A = numpy.polyfit(ratio, Y, 2)[::-1]
C_TMP[j, :] = A
# ;Test fitting working
# ;FOA = A[2]*ratio[1]^2 + A[1]*ratio[1] + A[0]
with open(filename, "w") as file:
try:
file.write(("-1 %g %g\n") % (RN, D_SPACING))
file.write(("%i " * 3 + "%.3lf\n") % (nbatom, len(zcol), len(phot_in), TEMPER[0]))
for j in range(nbatom):
file.write(("%g (%.6g, %.6g) (%.6g, %.6g)\n") % (
F000[j], G[j].real, G[j].imag, G_BAR[j].real, G_BAR[j].imag))
file.write(("%g " * 3 + "\n") % (C_TMP[j, 0], C_TMP[j, 1], C_TMP[j, 2]))
for j in range(len(zcol)):
file.write(("%i %g\n") % (zcol[j], fcol[j]))
for iphot in range(len(phot_in)):
file.write("%g \n" % (phot_in[iphot]))
for j in range(nbatom):
file.write(("%g " * 2 + "\n") % (F1[iphot, j], F2[iphot, j]))
file.close()
print("Shadow File written to disk: %s \n" % filename)
except:
file.close()
raise Exception("crystal_shadow.py: Shadow file creation failure!\n")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Calculation structure factor')
parser.add_argument('-n','--name',dest='descriptor', default=['YB66'],type=str, nargs=1, help='Crystal name')
parser.add_argument('-m','--m', metavar='H K L', default=[0,0,6],type=int, nargs=1, help='Miller indic [H, K, L]')
parser.add_argument('-e','--e', dest='EngRange', default=[2006,2194,0.5],type=float, nargs=3, help='[emin,emax,estep]')
parser.add_argument('-s','--SHADOWFILE', dest='SHADOW_NAME', default=[""],type=str, nargs=3, help='SHADOW filename')
args = parser.parse_args()
descriptor = args.descriptor[0]
HMILLER = args.m[0]
KMILLER = args.m[1]
LMILLER = args.m[2]
ENERGY = args.EngRange[0]
ENERGY_END = args.EngRange[1]
estep = args.EngRange[2]
NPOINTS = int((ENERGY_END-ENERGY)/estep + 1)
SHADOW_NAME = args.SHADOW_NAME[0]
energy = numpy.linspace(ENERGY,ENERGY_END,NPOINTS)
print("Using crystal descriptor: ",descriptor)
bragg_dictionary = bragg_calc2(descriptor=descriptor,hh=HMILLER,kk=KMILLER,ll=LMILLER,temper=1.0,
emin=ENERGY,emax=ENERGY_END,estep=estep,fileout=None) #50eV, replaced with estep
if SHADOW_NAME=='':
SHADOW_NAME = f'%s_%d%d%d_sha.dat'%(descriptor,HMILLER,KMILLER,LMILLER)
crystal_shadow(SHADOW_NAME,bragg_dictionary,energy)
|
{"hexsha": "9b4ce491a6202ecb1215b6d4e9a7b015df4deac9", "size": 7210, "ext": "py", "lang": "Python", "max_stars_repo_path": "yb66/create_shadowfile.py", "max_stars_repo_name": "91902078/yb66", "max_stars_repo_head_hexsha": "ece7f637ac8bacb1ba51a6f1f6f1f2e9cdb91bd9", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "yb66/create_shadowfile.py", "max_issues_repo_name": "91902078/yb66", "max_issues_repo_head_hexsha": "ece7f637ac8bacb1ba51a6f1f6f1f2e9cdb91bd9", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "yb66/create_shadowfile.py", "max_forks_repo_name": "91902078/yb66", "max_forks_repo_head_hexsha": "ece7f637ac8bacb1ba51a6f1f6f1f2e9cdb91bd9", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.0555555556, "max_line_length": 126, "alphanum_fraction": 0.5436893204, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2294}
|
/*=============================================================================
Copyright (c) 2002 2004 2006 Joel de Guzman
Copyright (c) 2004 Eric Niebler
http://spirit.sourceforge.net/
Use, modification and distribution is subject to the Boost Software
License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
http://www.boost.org/LICENSE_1_0.txt)
=============================================================================*/
#include <boost/spirit/include/classic_attribute.hpp>
#include <boost/spirit/include/classic_chset.hpp>
#include <boost/spirit/include/classic_core.hpp>
#include <boost/spirit/include/classic_if.hpp>
#include <boost/spirit/include/classic_lazy.hpp>
#include <boost/spirit/include/classic_loops.hpp>
#include <boost/spirit/include/phoenix1_primitives.hpp>
#include "actions.hpp"
#include "block_tags.hpp"
#include "grammar_impl.hpp"
#include "parsers.hpp"
#include "phrase_tags.hpp"
#include "scoped.hpp"
#include "state.hpp"
#include "stream.hpp"
#include "template_tags.hpp"
#include "utils.hpp"
namespace quickbook
{
namespace cl = boost::spirit::classic;
struct list_stack_item
{
// Is this the root of the context
// (e.g. top, template, table cell etc.)
enum list_item_type
{
syntactic_list, // In a list marked up '*' or '#'
top_level, // At the top level of a parse
// (might be a template body)
nested_block // Nested in a block element.
} type;
unsigned int indent; // Indent of list marker
// (or paragraph if not in a list)
unsigned int indent2; // Indent of paragraph
char mark; // List mark, '\0' if not in a list.
// Example of inside a list:
//
// |indent
// * List item
// |indent2
explicit list_stack_item(list_item_type r)
: type(r), indent(0), indent2(0), mark('\0')
{
}
explicit list_stack_item(
char mark_, unsigned int indent_, unsigned int indent2_)
: type(syntactic_list)
, indent(indent_)
, indent2(indent2_)
, mark(mark_)
{
}
};
struct block_types
{
enum values
{
none,
code,
list,
paragraph
};
};
struct main_grammar_local
{
////////////////////////////////////////////////////////////////////////
// Local actions
void start_blocks_impl(parse_iterator first, parse_iterator last);
void start_nested_blocks_impl(
parse_iterator first, parse_iterator last);
void end_blocks_impl(parse_iterator first, parse_iterator last);
void check_indentation_impl(parse_iterator first, parse_iterator last);
void check_code_block_impl(parse_iterator first, parse_iterator last);
void plain_block(string_iterator first, string_iterator last);
void list_block(
string_iterator first,
string_iterator mark_pos,
string_iterator last);
void clear_stack();
////////////////////////////////////////////////////////////////////////
// Local members
cl::rule<scanner> template_phrase, top_level, indent_check,
paragraph_separator, inside_paragraph, code, code_line, blank_line,
hr, inline_code, skip_inline_code, template_, attribute_template,
template_body, code_block, skip_code_block, macro, template_args,
template_args_1_4, template_arg_1_4, template_inner_arg_1_4,
brackets_1_4, template_args_1_5, template_arg_1_5,
template_arg_1_5_content, template_inner_arg_1_5, brackets_1_5,
template_args_1_6, template_arg_1_6, template_arg_1_6_content,
break_, command_line_macro_identifier, dummy_block,
line_dummy_block, square_brackets, error_brackets, skip_escape;
struct block_context_closure
: cl::closure<block_context_closure, element_info::context>
{
// Mask used to determine whether or not an element is a block
// element.
member1 is_block_mask;
};
cl::rule<scanner> simple_markup, simple_markup_end;
cl::rule<scanner> paragraph;
cl::rule<scanner> list;
cl::rule<scanner, block_context_closure::context_t>
syntactic_block_item;
cl::rule<scanner> common;
cl::rule<scanner> element;
// state
std::stack<list_stack_item> list_stack;
unsigned int list_indent;
bool no_eols;
element_info::context context;
char mark; // Simple markup's deliminator
bool still_in_block; // Inside a syntatic block
// transitory state
block_types::values block_type;
element_info info;
element_info::type_enum element_type;
// state
quickbook::state& state_;
////////////////////////////////////////////////////////////////////////
// Local constructor
main_grammar_local(quickbook::state& state)
: list_stack()
, list_indent(0)
, no_eols(true)
, context(element_info::in_top_level)
, mark('\0')
, state_(state)
{
}
};
struct process_element_impl : scoped_action_base
{
process_element_impl(main_grammar_local& l_)
: l(l_), pushed_source_mode_(false), element_context_error_(false)
{
}
bool start()
{
// This element doesn't exist in the current language version.
if (qbk_version_n < l.info.qbk_version) return false;
// The element is not allowed in this context.
if (!(l.info.type & l.context)) {
if (qbk_version_n < 107u) {
return false;
}
else {
element_context_error_ = true;
}
}
info_ = l.info;
if (info_.type != element_info::phrase &&
info_.type != element_info::maybe_block) {
paragraph_action para(l.state_);
para();
}
assert(l.state_.values.builder.empty());
if (l.state_.source_mode_next &&
info_.type != element_info::maybe_block) {
l.state_.push_tagged_source_mode(l.state_.source_mode_next);
pushed_source_mode_ = true;
l.state_.source_mode_next = 0;
}
return true;
}
template <typename ResultT, typename ScannerT>
bool result(ResultT r, ScannerT const& scan)
{
if (element_context_error_) {
error_message_action error(
l.state_, "Element not allowed in this context.");
error(scan.first, scan.first);
return true;
}
else if (r) {
return true;
}
else if (
qbk_version_n < 107u && info_.type & element_info::in_phrase) {
// Old versions of quickbook had a soft fail
// for unparsed phrase elements.
return false;
}
else {
// Parse error in body.
error_action error(l.state_);
error(scan.first, scan.first);
return true;
}
}
void success(parse_iterator, parse_iterator)
{
l.element_type = info_.type;
}
void failure() { l.element_type = element_info::nothing; }
void cleanup()
{
if (pushed_source_mode_) l.state_.pop_tagged_source_mode();
}
main_grammar_local& l;
element_info info_;
bool pushed_source_mode_;
bool element_context_error_;
};
struct scoped_paragraph : scoped_action_base
{
scoped_paragraph(quickbook::state& state_)
: state(state_), pushed(false)
{
}
bool start()
{
state.push_tagged_source_mode(state.source_mode_next);
pushed = true;
state.source_mode_next = 0;
return true;
}
void cleanup()
{
if (pushed) state.pop_tagged_source_mode();
}
quickbook::state& state;
bool pushed;
};
struct in_list_impl
{
main_grammar_local& l;
explicit in_list_impl(main_grammar_local& l_) : l(l_) {}
bool operator()() const
{
return !l.list_stack.empty() &&
l.list_stack.top().type == list_stack_item::syntactic_list;
}
};
template <typename T, typename M>
struct set_scoped_value_impl : scoped_action_base
{
typedef M T::*member_ptr;
explicit set_scoped_value_impl(T& l_, member_ptr ptr_)
: l(l_), ptr(ptr_), saved_value()
{
}
bool start(M const& value)
{
saved_value = l.*ptr;
l.*ptr = value;
return true;
}
void cleanup() { l.*ptr = saved_value; }
T& l;
member_ptr ptr;
M saved_value;
};
template <typename T, typename M>
struct set_scoped_value : scoped_parser<set_scoped_value_impl<T, M> >
{
typedef set_scoped_value_impl<T, M> impl;
set_scoped_value(T& l, typename impl::member_ptr ptr)
: scoped_parser<impl>(impl(l, ptr))
{
}
};
////////////////////////////////////////////////////////////////////////////
// Local grammar
void quickbook_grammar::impl::init_main()
{
main_grammar_local& local = cleanup_.add(new main_grammar_local(state));
// Global Actions
quickbook::element_action element_action(state);
quickbook::paragraph_action paragraph_action(state);
phrase_end_action end_phrase(state);
raw_char_action raw_char(state);
plain_char_action plain_char(state);
escape_unicode_action escape_unicode(state);
simple_phrase_action simple_markup(state);
break_action break_(state);
do_macro_action do_macro(state);
error_action error(state);
element_id_warning_action element_id_warning(state);
scoped_parser<to_value_scoped_action> to_value(state);
scoped_parser<scoped_paragraph> scope_paragraph(state);
quickbook_strict strict_mode(state);
// Local Actions
scoped_parser<process_element_impl> process_element(local);
in_list_impl in_list(local);
set_scoped_value<main_grammar_local, bool> scoped_no_eols(
local, &main_grammar_local::no_eols);
set_scoped_value<main_grammar_local, element_info::context>
scoped_context(local, &main_grammar_local::context);
set_scoped_value<main_grammar_local, bool> scoped_still_in_block(
local, &main_grammar_local::still_in_block);
member_action<main_grammar_local> check_indentation(
local, &main_grammar_local::check_indentation_impl);
member_action<main_grammar_local> check_code_block(
local, &main_grammar_local::check_code_block_impl);
member_action<main_grammar_local> start_blocks(
local, &main_grammar_local::start_blocks_impl);
member_action<main_grammar_local> start_nested_blocks(
local, &main_grammar_local::start_nested_blocks_impl);
member_action<main_grammar_local> end_blocks(
local, &main_grammar_local::end_blocks_impl);
// clang-format off
// phrase/phrase_start is used for an entirely self-contained
// phrase. For example, any remaining anchors are written out
// at the end instead of being saved for any following content.
phrase_start =
inline_phrase [end_phrase]
;
// nested_phrase is used for a phrase nested inside square
// brackets.
nested_phrase =
state.values.save()
[
scoped_context(element_info::in_phrase)
[*(~cl::eps_p(']') >> local.common)]
]
;
// paragraph_phrase is like a nested_phrase but is also terminated
// by a paragraph end.
paragraph_phrase =
state.values.save()
[
scoped_context(element_info::in_phrase)
[*(~cl::eps_p(phrase_end) >> local.common)]
]
;
// extended_phrase is like a paragraph_phrase but allows some block
// elements.
extended_phrase =
state.values.save()
[
scoped_context(element_info::in_conditional)
[*(~cl::eps_p(phrase_end) >> local.common)]
]
;
// inline_phrase is used a phrase that isn't nested inside
// brackets, but is not self contained. An example of this
// is expanding a template, which is parsed separately but
// is part of the paragraph that contains it.
inline_phrase =
state.values.save()
[ qbk_ver(107u)
>> local.template_phrase
| qbk_ver(0, 107u)
>> scoped_context(element_info::in_phrase)
[*local.common]
]
;
table_title_phrase =
state.values.save()
[
scoped_context(element_info::in_phrase)
[ *( ~cl::eps_p(space >> (']' | '[' >> space >> '['))
>> local.common
)
]
]
;
inside_preformatted =
scoped_no_eols(false)
[ paragraph_phrase
]
;
// Phrase templates can contain block tags, but can't contain
// syntatic blocks.
local.template_phrase =
scoped_context(element_info::in_top_level)
[ *( (local.paragraph_separator >> space >> cl::anychar_p)
[error("Paragraph in phrase template.")]
| local.common
)
]
;
// Top level blocks
block_start =
(*eol) [start_blocks]
>> ( *( local.top_level
>> !( qbk_ver(106u)
>> cl::ch_p(']')
>> cl::eps_p [error("Mismatched close bracket")]
)
)
) [end_blocks]
;
// Blocks contains within an element, e.g. a table cell or a footnote.
inside_paragraph =
state.values.save()
[ cl::eps_p [start_nested_blocks]
>> ( qbk_ver(107u)
>> (*eol)
>> (*local.top_level)
| qbk_ver(0, 107u)
>> local.inside_paragraph
) [end_blocks]
]
;
local.top_level =
cl::eps_p(local.indent_check)
>> ( cl::eps_p(ph::var(local.block_type) == block_types::code)
>> local.code
| cl::eps_p(ph::var(local.block_type) == block_types::list)
>> local.list
| cl::eps_p(ph::var(local.block_type) == block_types::paragraph)
>> ( local.hr
| local.paragraph
)
)
>> *eol
;
local.indent_check =
( *cl::blank_p
>> !( (cl::ch_p('*') | '#')
>> *cl::blank_p)
) [check_indentation]
;
local.paragraph =
// Usually superfluous call
// for paragraphs in lists.
cl::eps_p [paragraph_action]
>> scope_paragraph()
[
scoped_context(element_info::in_top_level)
[ scoped_still_in_block(true)
[ local.syntactic_block_item(element_info::is_contextual_block)
>> *( cl::eps_p(ph::var(local.still_in_block))
>> local.syntactic_block_item(element_info::is_block)
)
]
]
] [paragraph_action]
;
local.list =
*cl::blank_p
>> (cl::ch_p('*') | '#')
>> (*cl::blank_p)
>> scoped_context(element_info::in_list_block)
[ scoped_still_in_block(true)
[ *( cl::eps_p(ph::var(local.still_in_block))
>> local.syntactic_block_item(element_info::is_block)
)
]
]
;
local.syntactic_block_item =
local.paragraph_separator [ph::var(local.still_in_block) = false]
| (cl::eps_p(~cl::ch_p(']')) | qbk_ver(0, 107u))
[ph::var(local.element_type) = element_info::nothing]
>> local.common
// If the element is a block, then a newline will end the
// current syntactic block.
//
// Note that we don't do this for lists in 1.6, as it causes
// the list block to end. The support for nested syntactic
// blocks in 1.7 will fix that. Although it does mean the
// following line will need to be indented.
>> !( cl::eps_p(in_list) >> qbk_ver(106u, 107u)
| cl::eps_p
(
ph::static_cast_<int>(local.syntactic_block_item.is_block_mask) &
ph::static_cast_<int>(ph::var(local.element_type))
)
>> eol [ph::var(local.still_in_block) = false]
)
;
local.paragraph_separator =
cl::eol_p
>> cl::eps_p
( *cl::blank_p
>> ( cl::eol_p
| cl::end_p
| cl::eps_p(in_list) >> (cl::ch_p('*') | '#')
)
)
>> *eol
;
// Blocks contains within an element, e.g. a table cell or a footnote.
local.inside_paragraph =
scoped_context(element_info::in_nested_block)
[ *( local.paragraph_separator [paragraph_action]
| ~cl::eps_p(']')
>> local.common
)
] [paragraph_action]
;
local.hr =
cl::str_p("----")
>> state.values.list(block_tags::hr)
[ ( qbk_ver(106u)
>> *(line_comment | (cl::anychar_p - (cl::eol_p | '[' | ']')))
| qbk_ver(0, 106u)
>> *(line_comment | (cl::anychar_p - (cl::eol_p | "[/")))
)
>> *eol
] [element_action]
;
local.element
= '['
>> ( cl::eps_p(cl::punct_p)
>> elements [ph::var(local.info) = ph::arg1]
| elements [ph::var(local.info) = ph::arg1]
>> (cl::eps_p - (cl::alnum_p | '_'))
)
>> process_element()
[ state.values.list(ph::var(local.info.tag))
[ cl::lazy_p(*ph::var(local.info.rule))
>> space
>> ']'
] [element_action]
]
;
local.code =
state.values.list(code_tags::code_block)
[( local.code_line
>> *(*local.blank_line >> local.code_line)
) [state.values.entry(ph::arg1, ph::arg2)]
] [element_action]
>> *eol
;
local.code_line =
( *cl::blank_p
>> ~cl::eps_p(cl::eol_p)
) [check_code_block]
>> cl::eps_p(ph::var(local.block_type) == block_types::code)
>> *(cl::anychar_p - cl::eol_p)
>> (cl::eol_p | cl::end_p)
;
local.blank_line =
*cl::blank_p >> cl::eol_p
;
local.common =
local.macro
| local.element
| local.template_
| local.break_
| local.code_block
| local.inline_code
| local.simple_markup
| escape
| comment
| strict_mode
>> ( local.error_brackets [error("Invalid template/tag (strict mode)")]
| cl::eps_p('[') [error("Mismatched open bracket (strict mode)")]
>> cl::anychar_p
| cl::eps_p(']') [error("Mismatched close bracket (strict mode)")]
>> cl::anychar_p
)
| qbk_ver(106u)
>> local.square_brackets
| cl::space_p [raw_char]
| cl::anychar_p [plain_char]
;
skip_entity =
'['
// For escaped templates:
>> !(space >> cl::ch_p('`') >> (cl::alpha_p | '_'))
>> *(~cl::eps_p(']') >> skip_entity)
>> !cl::ch_p(']')
| local.skip_code_block
| local.skip_inline_code
| local.skip_escape
| comment
| (cl::anychar_p - '[' - ']')
;
local.square_brackets =
( cl::ch_p('[') [plain_char]
>> paragraph_phrase
>> ( cl::ch_p(']') [plain_char]
| cl::eps_p [error("Missing close bracket")]
)
| cl::ch_p(']') [plain_char]
>> cl::eps_p [error("Mismatched close bracket")]
)
;
local.error_brackets =
cl::ch_p('[') [plain_char]
>> ( local.error_brackets
| (cl::anychar_p - ']')
)
>> cl::ch_p(']')
;
local.macro =
cl::eps_p
( ( state.macro
>> ~cl::eps_p(cl::alpha_p | '_')
// must not be followed by alpha or underscore
)
& macro_identifier // must be a valid macro for the current version
)
>> state.macro [do_macro]
;
local.template_ =
( '['
>> space
>> state.values.list(template_tags::template_)
[ local.template_body
>> ']'
]
) [element_action]
;
local.attribute_template =
( '['
>> space
>> state.values.list(template_tags::attribute_template)
[ local.template_body
>> ']'
]
) [element_action]
;
local.template_body =
( cl::str_p('`')
>> cl::eps_p(cl::punct_p)
>> state.templates.scope
[state.values.entry(ph::arg1, ph::arg2, template_tags::escape)]
[state.values.entry(ph::arg1, ph::arg2, template_tags::identifier)]
>> !( qbk_ver(106u)
[error("Templates with punctuation names can't be escaped in quickbook 1.6+")]
| strict_mode
[error("Templates with punctuation names can't be escaped (strict mode)")]
)
| cl::str_p('`')
>> state.templates.scope
[state.values.entry(ph::arg1, ph::arg2, template_tags::escape)]
[state.values.entry(ph::arg1, ph::arg2, template_tags::identifier)]
| cl::eps_p(cl::punct_p)
>> state.templates.scope
[state.values.entry(ph::arg1, ph::arg2, template_tags::identifier)]
| state.templates.scope
[state.values.entry(ph::arg1, ph::arg2, template_tags::identifier)]
>> cl::eps_p(hard_space)
)
>> space
>> !local.template_args
;
local.template_args =
qbk_ver(106u) >> local.template_args_1_6
| qbk_ver(105u, 106u) >> local.template_args_1_5
| qbk_ver(0, 105u) >> local.template_args_1_4
;
local.template_args_1_4 = local.template_arg_1_4 >> *(".." >> local.template_arg_1_4);
local.template_arg_1_4 =
( cl::eps_p(*cl::blank_p >> cl::eol_p)
>> local.template_inner_arg_1_4 [state.values.entry(ph::arg1, ph::arg2, template_tags::block)]
| local.template_inner_arg_1_4 [state.values.entry(ph::arg1, ph::arg2, template_tags::phrase)]
)
;
local.template_inner_arg_1_4 =
+(local.brackets_1_4 | (cl::anychar_p - (cl::str_p("..") | ']')))
;
local.brackets_1_4 =
'[' >> local.template_inner_arg_1_4 >> ']'
;
local.template_args_1_5 = local.template_arg_1_5 >> *(".." >> local.template_arg_1_5);
local.template_arg_1_5 =
( cl::eps_p(*cl::blank_p >> cl::eol_p)
>> local.template_arg_1_5_content [state.values.entry(ph::arg1, ph::arg2, template_tags::block)]
| local.template_arg_1_5_content [state.values.entry(ph::arg1, ph::arg2, template_tags::phrase)]
)
;
local.template_arg_1_5_content =
+(local.brackets_1_5 | ('\\' >> cl::anychar_p) | (cl::anychar_p - (cl::str_p("..") | '[' | ']')))
;
local.template_inner_arg_1_5 =
+(local.brackets_1_5 | ('\\' >> cl::anychar_p) | (cl::anychar_p - (cl::str_p('[') | ']')))
;
local.brackets_1_5 =
'[' >> local.template_inner_arg_1_5 >> ']'
;
local.template_args_1_6 = local.template_arg_1_6 >> *(".." >> local.template_arg_1_6);
local.template_arg_1_6 =
( cl::eps_p(*cl::blank_p >> cl::eol_p)
>> local.template_arg_1_6_content [state.values.entry(ph::arg1, ph::arg2, template_tags::block)]
| local.template_arg_1_6_content [state.values.entry(ph::arg1, ph::arg2, template_tags::phrase)]
)
;
local.template_arg_1_6_content =
+ ( ~cl::eps_p("..") >> skip_entity )
;
local.break_
= ( '['
>> space
>> "br"
>> space
>> ']'
) [break_]
;
local.inline_code =
'`' >> state.values.list(code_tags::inline_code)
[(
*(cl::anychar_p -
( '`'
| (cl::eol_p >> *cl::blank_p >> cl::eol_p)
// Make sure that we don't go
) // past a single block
) >> cl::eps_p('`')
) [state.values.entry(ph::arg1, ph::arg2)]
>> '`'
] [element_action]
;
local.skip_inline_code =
'`'
>> *(cl::anychar_p -
( '`'
| (cl::eol_p >> *cl::blank_p >> cl::eol_p)
// Make sure that we don't go
) // past a single block
)
>> !cl::ch_p('`')
;
local.skip_code_block =
"```"
>> ~cl::eps_p("`")
>> ( (!( *(*cl::blank_p >> cl::eol_p)
>> ( *( "````" >> *cl::ch_p('`')
| ( cl::anychar_p
- (*cl::space_p >> "```" >> ~cl::eps_p("`"))
)
)
>> !(*cl::blank_p >> cl::eol_p)
)
>> (*cl::space_p >> "```")
))
| *cl::anychar_p
)
| "``"
>> ~cl::eps_p("`")
>> ( ( *(*cl::blank_p >> cl::eol_p)
>> ( *( "```" >> *cl::ch_p('`')
| ( cl::anychar_p
- (*cl::space_p >> "``" >> ~cl::eps_p("`"))
)
)
>> !(*cl::blank_p >> cl::eol_p)
)
>> (*cl::space_p >> "``")
)
| *cl::anychar_p
)
;
local.code_block =
"```"
>> ~cl::eps_p("`")
>> ( state.values.list(code_tags::inline_code_block)
[ *(*cl::blank_p >> cl::eol_p)
>> ( *( "````" >> *cl::ch_p('`')
| ( cl::anychar_p
- (*cl::space_p >> "```" >> ~cl::eps_p("`"))
)
)
>> !(*cl::blank_p >> cl::eol_p)
) [state.values.entry(ph::arg1, ph::arg2)]
>> (*cl::space_p >> "```")
] [element_action]
| cl::eps_p [error("Unfinished code block")]
>> *cl::anychar_p
)
| "``"
>> ~cl::eps_p("`")
>> ( state.values.list(code_tags::inline_code_block)
[ *(*cl::blank_p >> cl::eol_p)
>> ( *( "```" >> *cl::ch_p('`')
| ( cl::anychar_p
- (*cl::space_p >> "``" >> ~cl::eps_p("`"))
)
)
>> !(*cl::blank_p >> cl::eol_p)
) [state.values.entry(ph::arg1, ph::arg2)]
>> (*cl::space_p >> "``")
] [element_action]
| cl::eps_p [error("Unfinished code block")]
>> *cl::anychar_p
)
;
local.simple_markup =
cl::chset<>("*/_=") [ph::var(local.mark) = ph::arg1]
>> cl::eps_p(cl::graph_p) // graph_p must follow first mark
>> lookback
[ cl::anychar_p // skip back over the markup
>> ~cl::eps_p(cl::ch_p(boost::ref(local.mark)))
// first mark not be preceeded by
// the same character.
>> (cl::space_p | cl::punct_p | cl::end_p)
// first mark must be preceeded
// by space or punctuation or the
// mark character or a the start.
]
>> state.values.save()
[
to_value()
[
cl::eps_p((state.macro & macro_identifier) >> local.simple_markup_end)
>> state.macro [do_macro]
| ~cl::eps_p(cl::ch_p(boost::ref(local.mark)))
>> +( ~cl::eps_p
( lookback [~cl::ch_p(boost::ref(local.mark))]
>> local.simple_markup_end
)
>> cl::anychar_p [plain_char]
)
]
>> cl::ch_p(boost::ref(local.mark))
[simple_markup]
]
;
local.simple_markup_end
= ( lookback[cl::graph_p] // final mark must be preceeded by
// graph_p
>> cl::ch_p(boost::ref(local.mark))
>> ~cl::eps_p(cl::ch_p(boost::ref(local.mark)))
// final mark not be followed by
// the same character.
>> (cl::space_p | cl::punct_p | cl::end_p)
// final mark must be followed by
// space or punctuation
)
| '['
| "'''"
| '`'
| phrase_end
;
escape =
cl::str_p("\\n") [break_]
| cl::str_p("\\ ") // ignore an escaped space
| '\\' >> cl::punct_p [plain_char]
| "\\u" >> cl::repeat_p(4) [cl::chset<>("0-9a-fA-F")]
[escape_unicode]
| "\\U" >> cl::repeat_p(8) [cl::chset<>("0-9a-fA-F")]
[escape_unicode]
| ("'''" >> !eol)
>> state.values.save()
[ (*(cl::anychar_p - "'''")) [state.values.entry(ph::arg1, ph::arg2, phrase_tags::escape)]
>> ( cl::str_p("'''")
| cl::eps_p [error("Unclosed boostbook escape.")]
) [element_action]
]
;
local.skip_escape =
cl::str_p("\\n")
| cl::str_p("\\ ")
| '\\' >> cl::punct_p
| "\\u" >> cl::repeat_p(4) [cl::chset<>("0-9a-fA-F")]
| "\\U" >> cl::repeat_p(8) [cl::chset<>("0-9a-fA-F")]
| ("'''" >> !eol)
>> (*(cl::anychar_p - "'''"))
>> ( cl::str_p("'''")
| cl::eps_p
)
;
raw_escape =
cl::str_p("\\n") [error("Newlines invalid here.")]
| cl::str_p("\\ ") // ignore an escaped space
| '\\' >> cl::punct_p [raw_char]
| "\\u" >> cl::repeat_p(4) [cl::chset<>("0-9a-fA-F")]
[escape_unicode]
| "\\U" >> cl::repeat_p(8) [cl::chset<>("0-9a-fA-F")]
[escape_unicode]
| ('\\' >> cl::anychar_p) [error("Invalid escape.")]
[raw_char]
| ("'''" >> !eol) [error("Boostbook escape invalid here.")]
>> (*(cl::anychar_p - "'''"))
>> ( cl::str_p("'''")
| cl::eps_p [error("Unclosed boostbook escape.")]
)
;
attribute_template_body =
space
>> *( ~cl::eps_p(space >> cl::end_p | comment)
>> ( cl::eps_p
( cl::ch_p('[')
>> space
>> ( cl::eps_p(cl::punct_p)
>> elements
| elements
>> (cl::eps_p - (cl::alnum_p | '_'))
)
) [error("Elements not allowed in attribute values.")]
>> local.square_brackets
| local.attribute_template
| cl::eps_p(cl::ch_p('[')) [error("Unmatched template in attribute value.")]
>> local.square_brackets
| raw_escape
| cl::anychar_p [raw_char]
)
)
>> space
;
attribute_value_1_7 =
state.values.save() [
+( ~cl::eps_p(']' | cl::space_p | comment)
>> ( cl::eps_p
( cl::ch_p('[')
>> space
>> ( cl::eps_p(cl::punct_p)
>> elements
| elements
>> (cl::eps_p - (cl::alnum_p | '_'))
)
) [error("Elements not allowed in attribute values.")]
>> local.square_brackets
| local.attribute_template
| cl::eps_p(cl::ch_p('['))[error("Unmatched template in attribute value.")]
>> local.square_brackets
| raw_escape
| cl::anychar_p [raw_char]
)
)
]
;
//
// Command line
//
command_line =
state.values.list(block_tags::macro_definition)
[ *cl::space_p
>> local.command_line_macro_identifier
[state.values.entry(ph::arg1, ph::arg2)]
>> *cl::space_p
>> !( '='
>> *cl::space_p
>> to_value() [ inline_phrase ]
>> *cl::space_p
)
>> cl::end_p
] [element_action]
;
local.command_line_macro_identifier =
qbk_ver(106u)
>> +(cl::anychar_p - (cl::space_p | '[' | '\\' | ']' | '='))
| +(cl::anychar_p - (cl::space_p | ']' | '='))
;
// Miscellaneous stuff
// Follows an alphanumeric identifier - ensures that it doesn't
// match an empty space in the middle of the identifier.
hard_space =
(cl::eps_p - (cl::alnum_p | '_')) >> space
;
space =
*(cl::space_p | comment)
;
blank =
*(cl::blank_p | comment)
;
eol = blank >> cl::eol_p
;
phrase_end =
']'
| cl::eps_p(ph::var(local.no_eols))
>> cl::eol_p >> *cl::blank_p >> cl::eol_p
; // Make sure that we don't go
// past a single block, except
// when preformatted.
comment =
"[/" >> *(local.dummy_block | (cl::anychar_p - ']')) >> ']'
;
local.dummy_block =
'[' >> *(local.dummy_block | (cl::anychar_p - ']')) >> ']'
;
line_comment =
"[/" >> *(local.line_dummy_block | (cl::anychar_p - (cl::eol_p | ']'))) >> ']'
;
local.line_dummy_block =
'[' >> *(local.line_dummy_block | (cl::anychar_p - (cl::eol_p | ']'))) >> ']'
;
macro_identifier =
qbk_ver(106u)
>> +(cl::anychar_p - (cl::space_p | '[' | '\\' | ']'))
| qbk_ver(0, 106u)
>> +(cl::anychar_p - (cl::space_p | ']'))
;
// clang-format on
}
////////////////////////////////////////////////////////////////////////////
// Indentation Handling
template <typename Iterator> int indent_length(Iterator first, Iterator end)
{
int length = 0;
for (; first != end; ++first) {
if (*first == '\t') {
// hardcoded tab to 4 for now
length = length + 4 - (length % 4);
}
else {
++length;
}
}
return length;
}
void main_grammar_local::start_blocks_impl(parse_iterator, parse_iterator)
{
list_stack.push(list_stack_item(list_stack_item::top_level));
}
void main_grammar_local::start_nested_blocks_impl(
parse_iterator, parse_iterator)
{
// If this nested block is part of a list, then tell the
// output state.
state_.in_list = state_.explicit_list;
state_.explicit_list = false;
list_stack.push(list_stack_item(list_stack_item::nested_block));
}
void main_grammar_local::end_blocks_impl(parse_iterator, parse_iterator)
{
clear_stack();
list_stack.pop();
}
void main_grammar_local::check_indentation_impl(
parse_iterator first_, parse_iterator last_)
{
string_iterator first = first_.base();
string_iterator last = last_.base();
auto mark_pos = string_view(first, last - first).find_first_of("*#");
if (mark_pos == string_view::npos) {
plain_block(first, last);
}
else {
list_block(first, first + mark_pos, last);
}
}
void main_grammar_local::check_code_block_impl(
parse_iterator first, parse_iterator last)
{
unsigned int new_indent = indent_length(first.base(), last.base());
block_type = (new_indent > list_stack.top().indent2)
? block_types::code
: block_types::none;
}
void main_grammar_local::plain_block(
string_iterator first, string_iterator last)
{
if (qbk_version_n >= 106u) {
unsigned int new_indent = indent_length(first, last);
if (new_indent > list_stack.top().indent2) {
if (list_stack.top().type != list_stack_item::nested_block) {
block_type = block_types::code;
}
else {
block_type = block_types::paragraph;
}
}
else {
while (list_stack.top().type ==
list_stack_item::syntactic_list &&
new_indent < list_stack.top().indent) {
state_.end_list_item();
state_.end_list(list_stack.top().mark);
list_stack.pop();
list_indent = list_stack.top().indent;
}
if (list_stack.top().type == list_stack_item::syntactic_list &&
new_indent == list_stack.top().indent) {
// If the paragraph is aligned with the list item's marker,
// then end the current list item if that's aligned (or to
// the left of) the parent's paragraph.
//
// i.e.
//
// * Level 1
// * Level 2
//
// Still Level 2
//
// vs.
//
// * Level 1
// * Level 2
//
// Back to Level 1
list_stack_item save = list_stack.top();
list_stack.pop();
assert(
list_stack.top().type != list_stack_item::syntactic_list
? new_indent >= list_stack.top().indent
: new_indent > list_stack.top().indent);
if (new_indent <= list_stack.top().indent2) {
state_.end_list_item();
state_.end_list(save.mark);
list_indent = list_stack.top().indent;
}
else {
list_stack.push(save);
}
}
block_type = block_types::paragraph;
}
if (qbk_version_n == 106u &&
list_stack.top().type == list_stack_item::syntactic_list) {
detail::outerr(state_.current_file, first)
<< "Paragraphs in lists aren't supported in quickbook 1.6."
<< std::endl;
++state_.error_count;
}
}
else {
clear_stack();
if (list_stack.top().type != list_stack_item::nested_block &&
last != first)
block_type = block_types::code;
else
block_type = block_types::paragraph;
}
}
void main_grammar_local::list_block(
string_iterator first, string_iterator mark_pos, string_iterator last)
{
unsigned int new_indent = indent_length(first, mark_pos);
unsigned int new_indent2 = indent_length(first, last);
char list_mark = *mark_pos;
if (list_stack.top().type == list_stack_item::top_level &&
new_indent > 0) {
block_type = block_types::code;
return;
}
if (list_stack.top().type != list_stack_item::syntactic_list ||
new_indent > list_indent) {
list_stack.push(
list_stack_item(list_mark, new_indent, new_indent2));
state_.start_list(list_mark);
}
else if (new_indent == list_indent) {
state_.end_list_item();
}
else {
// This should never reach root, since the first list
// has indentation 0.
while (list_stack.top().type == list_stack_item::syntactic_list &&
new_indent < list_stack.top().indent) {
state_.end_list_item();
state_.end_list(list_stack.top().mark);
list_stack.pop();
}
state_.end_list_item();
}
list_indent = new_indent;
if (list_mark != list_stack.top().mark) {
detail::outerr(state_.current_file, first)
<< "Illegal change of list style.\n";
detail::outwarn(state_.current_file, first)
<< "Ignoring change of list style." << std::endl;
++state_.error_count;
}
state_.start_list_item();
block_type = block_types::list;
}
void main_grammar_local::clear_stack()
{
while (list_stack.top().type == list_stack_item::syntactic_list) {
state_.end_list_item();
state_.end_list(list_stack.top().mark);
list_stack.pop();
}
}
}
|
{"hexsha": "6d9368f3dce4cad94c39807b3a8969c8ee205966", "size": 47914, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tools/quickbook/src/main_grammar.cpp", "max_stars_repo_name": "cpp-pm/boost", "max_stars_repo_head_hexsha": "38c6c8c07f2fcc42d573b10807fef27ec14930f8", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1155.0, "max_stars_repo_stars_event_min_datetime": "2015-01-10T19:04:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T12:30:30.000Z", "max_issues_repo_path": "tools/quickbook/src/main_grammar.cpp", "max_issues_repo_name": "cpp-pm/boost", "max_issues_repo_head_hexsha": "38c6c8c07f2fcc42d573b10807fef27ec14930f8", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 618.0, "max_issues_repo_issues_event_min_datetime": "2015-01-02T01:39:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T15:18:40.000Z", "max_forks_repo_path": "tools/quickbook/src/main_grammar.cpp", "max_forks_repo_name": "cpp-pm/boost", "max_forks_repo_head_hexsha": "38c6c8c07f2fcc42d573b10807fef27ec14930f8", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 228.0, "max_forks_repo_forks_event_min_datetime": "2015-01-13T12:55:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T11:11:05.000Z", "avg_line_length": 36.271006813, "max_line_length": 111, "alphanum_fraction": 0.4355094544, "num_tokens": 10070}
|
"""
Name: Pham Tuan Anh
Class: K63-K2
MSSV: 18020116
You should understand the code you write.
"""
import numpy as np
import cv2
import sys
def q_0(input_file, output_file, delay=1):
"""
:param input_file:
:param output_file:
:param delay:
:return:
"""
img = cv2.imread(input_file, cv2.IMREAD_COLOR)
if img is None:
sys.exit("Could not read the image")
cv2.imshow('Apple image', img)
cv2.waitKey(delay)
cv2.imwrite(output_file, img)
"""
c2.waitKey(a) sẽ đợi trong một khoảng thời gian ít nhất a (ms).
Trong khoảng thời gian đó nếu người dùng nhấn phím bất kỳ, chương trình sẽ dừng;
nếu không, chương trình sẽ tiếp tục chạy ít nhất cho đến khi hết a (ms).
Tham khảo: https://web.archive.org/web/20120122022754/http://opencv.willowgarage.com/wiki/documentation/c/highgui/WaitKey
"""
def q_1(input_file):
"""
imread() -> Order: BRG (Blue, Green, Red)
"""
img1 = cv2.imread(input_file, cv2.IMREAD_COLOR)
if img1 is None:
sys.exit("Could not read the image")
(height, width, depth) = img1.shape
print("height={}, width={}, depth={}".format(height, width, depth))
yCrCbImg = cv2.cvtColor(img1, cv2.COLOR_BGR2YCR_CB)
avgY = np.mean(yCrCbImg[:, : , 0])
avgCr = np.mean(yCrCbImg[:, :, 1])
avgCb = np.mean(yCrCbImg[:, : , 2])
print("YCrCb")
print("Average of Y: %.2f" % avgY)
print("Average of Cr: %.2f" % avgCr)
print("Average of Cb: %.2f" % avgCb)
rgbImg = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
avgR = np.mean(rgbImg[:, :, 0])
avgG = np.mean(rgbImg[:, :, 1])
avgB = np.mean(rgbImg[:, :, 2])
print("RGB")
print("Average of R: %.2f" % avgR)
print("Average of G: %.2f" % avgG)
print("Average of B: %.2f" % avgB)
def q_2(input_file):
img2 = cv2.imread(input_file, cv2.IMREAD_COLOR)
if img2 is None:
sys.exit("Could not read the image")
clear_apple = img2[297:471, 363:539]
cv2.imshow("Clear apple", clear_apple)
cv2.imwrite("./result/clear_apple.png", clear_apple)
blurred_apple = img2[39:127, 90:176]
cv2.imshow("Blurred apple", blurred_apple)
cv2.imwrite("./result/blurred_apple.png", blurred_apple)
cv2.waitKey(0)
if __name__ == "__main__":
q_0('./sample_data/apple.png', './result/test_apple.png', 1000)
q_1('./sample_data/chromatic_aberration.png')
q_2("./sample_data/apple.png")
|
{"hexsha": "8366a93a3aa0cab3464b3dd04bc0f9ffbf67d56e", "size": 2478, "ext": "py", "lang": "Python", "max_stars_repo_path": "week02/week02.py", "max_stars_repo_name": "ptanh2k/int3404", "max_stars_repo_head_hexsha": "ad39ce61b768ef7b55936561c13bfa1c3adf0e92", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "week02/week02.py", "max_issues_repo_name": "ptanh2k/int3404", "max_issues_repo_head_hexsha": "ad39ce61b768ef7b55936561c13bfa1c3adf0e92", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "week02/week02.py", "max_forks_repo_name": "ptanh2k/int3404", "max_forks_repo_head_hexsha": "ad39ce61b768ef7b55936561c13bfa1c3adf0e92", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0842105263, "max_line_length": 121, "alphanum_fraction": 0.6198547215, "include": true, "reason": "import numpy", "num_tokens": 809}
|
program bspline_tests
use utils
use bspline
use finite_elements
use ogpf
implicit none
!---------------
! Variables
!---------------
integer :: i,j,total_tests, passed_tests
real(wp), allocatable :: y(:),x(:)
real(wp) :: result,true
!---------------
! Logic
!---------------
total_tests = 1
passed_tests = 0
write(*,*)
write(*,*) "!-----------------------"
write(*,*) "! Testing linear splines"
write(*,*) "!-----------------------"
write(*,*)
!call plot_linear_spline()
write(*,*)
write(*,*) "!----------------------------------"
write(*,*) "! Testing linear spline derivatives"
write(*,*) "!----------------------------------"
write(*,*)
!call plot_linear_spline_deriv()
write(*,*)
write(*,*) "!----------------------"
write(*,*) "! Testing cubic splines"
write(*,*) "!----------------------"
write(*,*)
!call plot_cubic_spline(cubic_spline_basis)
write(*,*)
write(*,*) "!---------------------------------"
write(*,*) "! Testing cubic spline derivatives"
write(*,*) "!---------------------------------"
write(*,*)
call plot_cubic_spline(cubic_spline_basis_deriv)
contains
subroutine plot_cubic_spline(s)
real(wp), external :: s
integer, parameter :: i = 101
integer, parameter :: n = 7
integer, parameter :: m = n+2
integer :: j,k
real(wp) :: y(m)
real(wp) :: x(i),fx(i),h
type(gpf):: gp
type(inner_product_obj) :: ipo
y = linspace(0.0_wp,1.0_wp,m)
h = y(2) - y(1)
write(*,*) "Node grid:"
write(*,'(F10.5)', advance="no") y
write(*,*)
k = 0
call set_inner_product(ipo,y,k,n,constant_fun,s,.true.)
x = linspace(y(1),y(3),i)
do j = 1,i
fx(j) = ipo%eval(x(j))
end do
call gp%title('Cubic B-Spline Phi_0')
call gp%options('set key top right; set grid')
call gp%plot(x,fx,'title "B(x)" with lines lt 1 lw 1')
k = 1
call set_inner_product(ipo,y,k,n,constant_fun,s,.true.)
x = linspace(y(1),y(4),i)
do j = 1,i
fx(j) = ipo%eval(x(j))
end do
call gp%title('Cubic B-Spline Phi_1')
call gp%options('set key top right; set grid')
call gp%plot(x,fx,'title "B(x)" with lines lt 1 lw 1')
k = 2
call set_inner_product(ipo,y,k,n,constant_fun,s,.true.)
x = linspace(y(1),y(5),i)
do j = 1,i
fx(j) = ipo%eval(x(j))
end do
call gp%title('Cubic B-Spline Phi_2')
call gp%options('set key top right; set grid')
call gp%plot(x,fx,'title "B(x)" with lines lt 1 lw 1')
k = 7
call set_inner_product(ipo,y,k,n,constant_fun,s,.true.)
x = linspace(y(m-3),y(m),i)
do j = 1,i
fx(j) = ipo%eval(x(j))
end do
call gp%title('Cubic B-Spline Phi_7')
call gp%options('set key top right; set grid')
call gp%plot(x,fx,'title "B(x)" with lines lt 1 lw 1')
k = 8
call set_inner_product(ipo,y,k,n,constant_fun,s,.true.)
x = linspace(y(m-2),y(m),i)
do j = 1,i
fx(j) = ipo%eval(x(j))
end do
call gp%title('Cubic B-Spline Phi_8')
call gp%options('set key top right; set grid')
call gp%plot(x,fx,'title "B(x)" with lines lt 1 lw 1')
end subroutine plot_cubic_spline
subroutine plot_linear_spline()
integer, parameter :: i = 101
integer, parameter :: n = 7
integer, parameter :: m = n+2
integer :: j,k
real(wp) :: x(i),fx(i),h
type(gpf):: gp
type(inner_product_obj) :: ipo
y = linspace(0.0_wp,1.0_wp,m)
h = y(2) - y(1)
! --------------
! B-Spline Tests
! --------------
! ---------
! Test One
! ---------
write(*,*) "Node grid:"
write(*,'(F10.5)', advance="no") y
write(*,*)
k = 0
x = linspace(y(1),y(3),i)
call set_inner_product(ipo,y,k,n,&
constant_fun,linear_spline_basis,.false.)
do j = 1, i
fx(j) = ipo%eval(x(j))
end do
call gp%title('Linear B-Spline Phi_0')
call gp%options('set key top right; set grid')
call gp%plot(x,fx,'title "B(x)" with lines lt 1 lw 1')
k = 1
x = linspace(y(2),y(4),i)
call set_inner_product(ipo,y,k,n,&
constant_fun,linear_spline_basis,.false.)
do j = 1, i
fx(j) = ipo%eval(x(j))
end do
call gp%title('Linear B-Spline Phi_1')
call gp%options('set key top right; set grid')
call gp%plot(x,fx,'title "B(x)" with lines lt 1 lw 1')
k = 6
x = linspace(y(m-2),y(m),i)
call set_inner_product(ipo,y,k,n,&
constant_fun,linear_spline_basis,.false.)
do j = 1, i
fx(j) = ipo%eval(x(j))
end do
call gp%title('Linear B-Spline Phi_7')
call gp%options('set key top right; set grid')
call gp%plot(x,fx,'title "B(x)" with lines lt 1 lw 1')
end subroutine plot_linear_spline
subroutine plot_linear_spline_deriv()
integer, parameter :: i = 101
integer, parameter :: n = 7
integer, parameter :: m = n+2
integer :: j,k
real(wp) :: x(i),fx(i),h
type(gpf):: gp
y = linspace(0.0_wp,1.0_wp,m)
h = y(2) - y(1)
write(*,*) "Node grid:"
write(*,'(F10.5)', advance="no") y
write(*,*)
k = 0
x = linspace(y(1),y(3),i)
do j = 1, i
if ( x(j) == 0 ) then
fx(j) = 1.0/h
elseif ( x(j) == size(y) ) then
fx(j) = -1.0/h
elseif ( x(j) <= y( k+2 ) ) then
fx(j) = 1.0/h
elseif ( x(j) <= y( k+3 ) ) then
fx(j) = -1.0/h
else
fx(j) = 0.0/h
!write(*,*) "Error ", x
end if
end do
call gp%title('Linear B-Spline Phi_0 Deriv')
call gp%options('set key top right; set grid')
call gp%plot(x,fx,'title "B(x)" with lines lt 1 lw 1')
k = 1
x = linspace(y(2),y(4),i)
do j = 1, i
if ( x(j) == 0 ) then
fx(j) = 1.0/h
elseif ( x(j) == size(y) ) then
fx(j) = -1.0/h
elseif ( x(j) <= y( k+2 ) ) then
fx(j) = 1.0/h
elseif ( x(j) <= y( k+3 ) ) then
fx(j) = -1.0/h
else
fx(j) = 0.0/h
!write(*,*) "Error ", x
end if
end do
call gp%title('Linear B-Spline Phi_1 Deriv')
call gp%options('set key top right; set grid')
call gp%plot(x,fx,'title "B(x)" with lines lt 1 lw 1')
k = 6
x = linspace(y(m-2),y(m),i)
do j = 1, i
if ( x(j) == 0 ) then
fx(j) = 1.0/h
elseif ( x(j) == size(y) ) then
fx(j) = -1.0/h
elseif ( x(j) <= y( k+2 ) ) then
fx(j) = 1.0/h
elseif ( x(j) <= y( k+3 ) ) then
fx(j) = -1.0/h
else
fx(j) = 0.0/h
!write(*,*) "Error ", x
end if
end do
call gp%title('Linear B-Spline Phi_7 Deriv')
call gp%options('set key top right; set grid')
call gp%plot(x,fx,'title "B(x)" with lines lt 1 lw 1')
end subroutine plot_linear_spline_deriv
function constant_fun(x)
real(wp) :: constant_fun
real(wp), intent(in) :: x
constant_fun = 1.0_wp
end function constant_fun
end program bspline_tests
|
{"hexsha": "5f3aa56bc74f068580f03c2b7c0125d2ce0f15b8", "size": 6488, "ext": "f95", "lang": "FORTRAN", "max_stars_repo_path": "tests/bspline_tests.f95", "max_stars_repo_name": "AndreTGMello/numerical-analysis-course", "max_stars_repo_head_hexsha": "e2b4b6e7c74e8db9f4f637e7bab5b73ef119a23f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/bspline_tests.f95", "max_issues_repo_name": "AndreTGMello/numerical-analysis-course", "max_issues_repo_head_hexsha": "e2b4b6e7c74e8db9f4f637e7bab5b73ef119a23f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/bspline_tests.f95", "max_forks_repo_name": "AndreTGMello/numerical-analysis-course", "max_forks_repo_head_hexsha": "e2b4b6e7c74e8db9f4f637e7bab5b73ef119a23f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.3909774436, "max_line_length": 65, "alphanum_fraction": 0.5493218249, "num_tokens": 2327}
|
"""Generates server for backend API"""
from flask import Flask, request
import json
import numpy as np
from skimage.transform import resize
from . import predict as pred
from . import transform_data as td
from . import config as cf
flask_app = Flask(__name__)
host = cf.HOST
port = cf.BACKEND_PORT
# load model
learn = pred.fetch_learner()
@flask_app.route('/api/predict', methods=['POST'])
def predict():
print('okay')
"""Obtains image segmentation prediction on image"""
# get base64 image from requested json
content = request.get_json()
im = np.asarray(content['contents'], dtype=np.uint8)
# perform data transformations
if len(im.shape) == 2:
im = td.make_3channel(im)
img = resize(im, (192, 256), order=1)
img = td.fastai_image(img)
# make prediction
prediction = pred.predict_segment(learn, img).astype(np.uint8)
prediction = 255 * resize(prediction, (576, 768), order=0)
prediction = prediction.astype(np.uint8)
resizefactor = (
im.shape[0] * im.shape[1]
/ (prediction.shape[0] * prediction.shape[1])
)
return json.dumps({
'content_type': content['content_type'],
'rf': resizefactor,
'yimage_list': prediction.tolist()
})
@flask_app.route('/api/get_size_distr', methods=['POST'])
def get_size_distr():
"""
Obtains size distribution of image without user input. Also
returns a version of the labeled image as a 3 channel rgb
image to be shown on the dashboard.
"""
# get requested json
content = request.get_json()
data_pred = json.loads(content['data_pred'])
# obtain size distributions on prediction by labeling connected regions
pred_data = np.asarray(data_pred['yimage_list'], dtype=np.uint8)
labeled, unique, size_distr = pred.get_size_distr(pred_data)
# rescale size_distr back to original image sizes
size_distr = size_distr * data_pred['rf']
return json.dumps({
'content_type': data_pred['content_type'],
'labeled_list': labeled.tolist(),
'unique_list': unique.tolist(),
'size_distr_list': size_distr.tolist()
})
if __name__ == '__main__':
flask_app.run(debug=True, host=host, port=port)
|
{"hexsha": "4ed2f0338406cf9225b2f58e1286a41932193780", "size": 2261, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/backend_api.py", "max_stars_repo_name": "lwang94/sem_size_analysis", "max_stars_repo_head_hexsha": "803251cdcab3d8304a365df9ac5879fcd9346270", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-03-11T08:19:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-25T09:14:11.000Z", "max_issues_repo_path": "src/backend_api.py", "max_issues_repo_name": "lwang94/sem_size_analysis", "max_issues_repo_head_hexsha": "803251cdcab3d8304a365df9ac5879fcd9346270", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-03-11T08:19:34.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-11T08:19:34.000Z", "max_forks_repo_path": "src/backend_api.py", "max_forks_repo_name": "lwang94/sem_size_analysis", "max_forks_repo_head_hexsha": "803251cdcab3d8304a365df9ac5879fcd9346270", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-12T08:23:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-12T08:23:31.000Z", "avg_line_length": 28.2625, "max_line_length": 75, "alphanum_fraction": 0.6660769571, "include": true, "reason": "import numpy", "num_tokens": 544}
|
//=========================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//=========================================================================
#include "Write.h"
#include "smtk/attribute/Attribute.h"
#include "smtk/attribute/FileItem.h"
#include "smtk/attribute/IntItem.h"
#include "smtk/attribute/ResourceItem.h"
#include "smtk/attribute/StringItem.h"
#include "smtk/session/vtk/Resource.h"
#include "smtk/session/vtk/Session.h"
#include "smtk/session/vtk/Write_xml.h"
#include "smtk/session/vtk/json/jsonResource.h"
#include "smtk/session/vtk/operators/Export.h"
#include "smtk/extension/vtk/source/vtkResourceMultiBlockSource.h"
#include "smtk/common/Paths.h"
#include "smtk/model/SessionIOJSON.h"
//force to use filesystem version 3
#define BOOST_FILESYSTEM_VERSION 3
#include <boost/filesystem.hpp>
using namespace smtk::model;
namespace
{
void RetrievePreservedUUID(vtkDataObject* data, std::vector<smtk::common::UUID>& uuids)
{
if (!data)
return;
vtkInformation* info = data->GetInformation();
uuids.emplace_back(vtkResourceMultiBlockSource::GetDataObjectUUID(info).toString());
}
void RetrievePreservedUUIDsRecursive(vtkDataObject* data, std::vector<smtk::common::UUID>& uuids)
{
RetrievePreservedUUID(data, uuids);
vtkMultiBlockDataSet* mbds = vtkMultiBlockDataSet::SafeDownCast(data);
if (mbds)
{
int nb = mbds->GetNumberOfBlocks();
for (int i = 0; i < nb; ++i)
{
RetrievePreservedUUIDsRecursive(mbds->GetBlock(i), uuids);
}
}
}
} // namespace
namespace smtk
{
namespace session
{
namespace vtk
{
bool Write::ableToOperate()
{
if (!this->Superclass::ableToOperate())
{
return false;
}
if (this->parameters()->associations()->numberOfValues() < 1)
{
return false;
}
return true;
}
Write::Result Write::operateInternal()
{
auto resourceItem = this->parameters()->associations();
smtk::session::vtk::Resource::Ptr rsrc =
std::dynamic_pointer_cast<smtk::session::vtk::Resource>(resourceItem->value());
// Serialize resource into a set of JSON records:
smtk::model::SessionIOJSON::json j = rsrc;
if (j.is_null())
{
return this->createResult(smtk::operation::Operation::Outcome::FAILED);
}
std::vector<smtk::common::UUID> preservedUUIDs;
smtk::common::UUIDs modelIds = rsrc->entitiesMatchingFlags(smtk::model::MODEL_ENTITY);
for (const auto& id : modelIds)
{
smtk::model::Model dataset = smtk::model::Model(rsrc, id);
EntityHandle handle = rsrc->session()->toEntity(dataset);
vtkMultiBlockDataSet* mbds = handle.object<vtkMultiBlockDataSet>();
RetrievePreservedUUIDsRecursive(mbds, preservedUUIDs);
}
std::vector<std::string> preservedUUIDsStr;
preservedUUIDsStr.reserve(preservedUUIDs.size());
for (auto& id : preservedUUIDs)
{
preservedUUIDsStr.push_back(id.toString());
}
j["preservedUUIDs"] = preservedUUIDsStr;
std::string fileDirectory = smtk::common::Paths::directory(rsrc->location()) + "/";
std::vector<std::string> modelFiles;
for (const auto& id : modelIds)
{
smtk::model::Model dataset = smtk::model::Model(rsrc, id);
std::string modelFile =
fileDirectory + id.toString() + rsrc->session()->defaultFileExtension(dataset);
static const bool exportToExodus = false;
if (exportToExodus)
{
Export::Ptr exportOp = Export::create();
exportOp->parameters()->findString("filetype")->setValue("");
exportOp->parameters()->associate(dataset.entityRecord());
exportOp->parameters()->findFile("filename")->setValue(modelFile);
Result exportOpResult = exportOp->operate(Key());
if (exportOpResult->findInt("outcome")->value() != static_cast<int>(Outcome::SUCCEEDED))
{
smtkErrorMacro(log(), "Cannot export file \"" << modelFile << "\".");
return this->createResult(smtk::operation::Operation::Outcome::FAILED);
}
}
else
{
std::string url = dataset.stringProperty("url")[0];
if (!boost::filesystem::is_regular_file(url))
{
smtkErrorMacro(log(), "Cannot copy file \"" << url << "\".");
return this->createResult(smtk::operation::Operation::Outcome::FAILED);
}
if (!boost::filesystem::is_regular_file(modelFile))
{
boost::filesystem::copy_file(url, modelFile);
}
}
modelFiles.push_back(id.toString() + rsrc->session()->defaultFileExtension(dataset));
}
j["modelFiles"] = modelFiles;
// Write JSON records to the specified URL:
smtk::model::SessionIOJSON::saveModelRecords(j, rsrc->location());
// Add the mesh file to the result's list of additional files
auto result = this->createResult(smtk::operation::Operation::Outcome::SUCCEEDED);
for (const auto& modelFile : modelFiles)
{
result->findFile("additional files")->appendValue(modelFile);
}
return result;
}
const char* Write::xmlDescription() const
{
return Write_xml;
}
void Write::markModifiedResources(Write::Result& /*unused*/)
{
auto resourceItem = this->parameters()->associations();
for (auto rit = resourceItem->begin(); rit != resourceItem->end(); ++rit)
{
auto resource = std::dynamic_pointer_cast<smtk::resource::Resource>(*rit);
// Set the resource as unmodified from its persistent (i.e. on-disk) state
resource->setClean(true);
}
}
bool write(const smtk::resource::ResourcePtr& resource)
{
Write::Ptr write = Write::create();
write->parameters()->associate(resource);
Write::Result result = write->operate();
return (result->findInt("outcome")->value() == static_cast<int>(Write::Outcome::SUCCEEDED));
}
} // namespace vtk
} // namespace session
} // namespace smtk
|
{"hexsha": "4f3791f4fba2390855c87d6be9ee13266e60e6f9", "size": 5949, "ext": "cxx", "lang": "C++", "max_stars_repo_path": "smtk/session/vtk/operators/Write.cxx", "max_stars_repo_name": "jcfr/SMTK", "max_stars_repo_head_hexsha": "0069ea37f8f71a440b8f10a157b84a56ca004551", "max_stars_repo_licenses": ["BSD-3-Clause-Clear"], "max_stars_count": 40.0, "max_stars_repo_stars_event_min_datetime": "2015-02-21T19:55:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T13:13:05.000Z", "max_issues_repo_path": "smtk/session/vtk/operators/Write.cxx", "max_issues_repo_name": "jcfr/SMTK", "max_issues_repo_head_hexsha": "0069ea37f8f71a440b8f10a157b84a56ca004551", "max_issues_repo_licenses": ["BSD-3-Clause-Clear"], "max_issues_count": 127.0, "max_issues_repo_issues_event_min_datetime": "2015-01-15T20:55:45.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-19T17:34:15.000Z", "max_forks_repo_path": "smtk/session/vtk/operators/Write.cxx", "max_forks_repo_name": "jcfr/SMTK", "max_forks_repo_head_hexsha": "0069ea37f8f71a440b8f10a157b84a56ca004551", "max_forks_repo_licenses": ["BSD-3-Clause-Clear"], "max_forks_count": 27.0, "max_forks_repo_forks_event_min_datetime": "2015-03-04T14:17:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-23T01:05:42.000Z", "avg_line_length": 28.8786407767, "max_line_length": 97, "alphanum_fraction": 0.675575727, "num_tokens": 1503}
|
import numpy as np
import torch
from torch.utils.data import Dataset
from torchsparse import SparseTensor
from torchsparse.utils import sparse_quantize
import lidar_det.utils.jrdb_transforms as jt
import lidar_det.utils.utils_box3d as ub3d
from .utils import collate_sparse_tensors, boxes_to_target
# from .utils import get_prediction_target
__all__ = [
"JRDBDet3D",
"NuScenesDet3D",
]
class _DatasetBase(Dataset):
def __init__(self, data_dir, split, cfg):
vs = cfg["voxel_size"]
voxel_size = (
np.array(vs, dtype=np.float32)
if isinstance(vs, list)
else np.array([vs, vs, vs], dtype=np.float32)
)
self._voxel_size = voxel_size.reshape(3, 1)
self._voxel_offset = np.array([1e5, 1e5, 1e4], dtype=np.int32).reshape(3, 1)
self._num_points = cfg["num_points"]
self._na = cfg["num_anchors"]
self._no = cfg["num_ori_bins"]
self._canonical = cfg["canonical"]
self._included_classes = cfg["included_classes"]
self._additional_features = cfg["additional_features"]
self._nsweeps = cfg["nsweeps"]
self._augmentation = cfg["augmentation"]
self.__training = "train" in split # loss will be computed
self.__split = split
self.__handle = self._get_handle(data_dir, split)
def _get_handle(self, data_dir, split):
raise NotImplementedError
def _get_data(self, data_dict, training=True):
raise NotImplementedError
def _do_augmentation(self, pc, boxes):
# random scale
scale_factor = np.random.uniform(0.95, 1.05)
pc *= scale_factor
# random rotation
theta = np.random.uniform(0, 2 * np.pi)
rot_mat = np.array(
[
[np.cos(theta), np.sin(theta), 0],
[-np.sin(theta), np.cos(theta), 0],
[0, 0, 1],
],
dtype=np.float32,
)
pc = rot_mat @ pc
if boxes is not None and len(boxes) > 0:
boxes[:, :6] *= scale_factor
boxes[:, :3] = boxes[:, :3] @ rot_mat.T
boxes[:, 6] += theta
return pc, boxes
@property
def split(self):
return self.__split # used by trainer.py
def __len__(self):
return len(self.__handle)
def __getitem__(self, idx):
data_dict = self.__handle[idx]
pc, boxes_gt, boxes_gt_cls, pc_offset, addi_feats = self._get_data(data_dict)
if self.__training and self._augmentation:
pc, boxes_gt = self._do_augmentation(pc, boxes_gt)
# voxel coordinate
pc_voxel = np.round(pc / self._voxel_size) + self._voxel_offset
pc_voxel = pc_voxel.T
inds, inverse_map = sparse_quantize(
pc_voxel, feats=None, labels=None, return_index=True, return_invs=True,
) # NOTE all this does is find indices of non-duplicating elements
# for nuScenes with multisweep, only do prediction for keyframe voxels
if "pc_dt" in data_dict:
pc_dt = data_dict["pc_dt"]
pc_kfmask = pc_dt == pc_dt.min()
net_input_kfmask = pc_kfmask[inds]
net_input_kfmask[inverse_map[pc_kfmask]] = 1
# print("pc_kfmask", pc_kfmask.shape, pc_kfmask.sum())
# print("net_input_kfmask", net_input_kfmask.shape, net_input_kfmask.sum())
else:
pc_kfmask = None
net_input_kfmask = None
# upper cap on memory consumption
if self.__training and len(inds) > self._num_points:
kept_inds = np.random.choice(len(inds), self._num_points, replace=False)
inds = inds[kept_inds]
if net_input_kfmask is not None:
net_input_kfmask = net_input_kfmask[kept_inds]
input_feat = (
pc.T[inds]
if addi_feats is None
else np.concatenate((pc.T[inds], addi_feats.T[inds]), axis=1)
) # (N, C)
net_input = SparseTensor(input_feat, pc_voxel[inds])
if net_input_kfmask is not None:
net_input_kfmask = torch.from_numpy(net_input_kfmask).bool()
params_dict = {
"ave_lwh": self._ave_lwh,
"canonical": self._canonical,
"voxel_offset": self._voxel_offset,
"voxel_size": self._voxel_size,
"class_mapping": self._inds_to_cls,
"dist_thresh": self._dist_thresh,
}
data_dict.update(
{
"net_input": net_input,
"net_input_kfmask": net_input_kfmask,
# "inverse_map": inverse_map,
"points": pc, # (3, N)
"points_offset": pc_offset, # (3,)
"points_kfmask": pc_kfmask, # (N, )
"num_voxels": len(inds),
"additional_features": addi_feats, # (C, N) or None
"boxes_gt": boxes_gt, # (B, 7) or None
"boxes_gt_cls": boxes_gt_cls, # (B,) or None
"params": params_dict,
}
)
if not self.__training:
return data_dict
# assigning target for each class independently
N = len(inds)
A = self._na
S = self._nc
btmp = boxes_to_target(np.ones((1, 7)), self._ave_lwh[0], A, self._no)
C = btmp.shape[-1]
closest_box_inds = -1 * np.ones((N, S), dtype=np.int32)
boxes_matched = np.zeros((N, S, 7), dtype=np.float32)
boxes_encoded = np.zeros((N, A, S, C), dtype=np.float32)
if boxes_gt is not None:
for icls in range(self._nc):
cmask = boxes_gt_cls == icls
boxes_gt_c = boxes_gt[cmask]
if len(boxes_gt_c) == 0:
continue
closest_box_inds_c, _ = ub3d.find_closest_boxes(pc, boxes_gt_c)
closest_box_inds_c = closest_box_inds_c[inds]
boxes_matched_c = boxes_gt_c[closest_box_inds_c]
closest_box_inds[:, icls] = closest_box_inds_c
boxes_matched[:, icls, :] = boxes_matched_c
boxes_encoded[:, :, icls, :] = boxes_to_target(
boxes_matched_c, self._ave_lwh[icls], A, self._no
)
boxes_matched = torch.from_numpy(boxes_matched)
boxes_encoded = torch.from_numpy(boxes_encoded)
# boxes_cls = (
# torch.from_numpy(boxes_gt_cls[closest_box_inds])
# if boxes_gt_cls is not None
# else None
# )
data_dict.update(
{
"boxes_matched": boxes_matched, # (N, S, 7)
"boxes_encoded": boxes_encoded, # (N, A, S, C)
# "boxes_cls": boxes_cls, # (N,)
"closest_box_inds": closest_box_inds, # (N, S)
}
)
return data_dict
def collate_batch(self, batch):
rtn_dict = {}
for k, v in batch[0].items():
if isinstance(v, SparseTensor):
rtn_dict[k] = collate_sparse_tensors([sample[k] for sample in batch])
elif isinstance(v, torch.Tensor):
rtn_dict[k] = torch.cat([sample[k] for sample in batch], dim=0)
elif k == "params":
if k not in rtn_dict:
rtn_dict[k] = v
else:
rtn_dict[k] = [sample[k] for sample in batch]
return rtn_dict
class JRDBDet3D(_DatasetBase):
def __init__(self, *args, **kwargs):
super(JRDBDet3D, self).__init__(*args, **kwargs)
self._ave_lwh = [(0.9, 0.5, 1.7)]
self._dist_thresh = [(0.5, 0.7)]
self._nc = 1
self._inds_to_cls = ["pedestrian"] # not used
def _get_handle(self, data_dir, split):
from .handles.jrdb_handle import JRDBHandleDet3D
jrdb_val_seq = [
"clark-center-2019-02-28_1",
"gates-ai-lab-2019-02-08_0",
"huang-2-2019-01-25_0",
"meyer-green-2019-03-16_0",
"nvidia-aud-2019-04-18_0",
"tressider-2019-03-16_1",
"tressider-2019-04-26_2",
]
if split == "train":
return JRDBHandleDet3D(data_dir, "train", exclude_sequences=jrdb_val_seq)
elif split == "val":
return JRDBHandleDet3D(data_dir, "train", sequences=jrdb_val_seq)
elif split == "train_val":
return JRDBHandleDet3D(data_dir, "train")
elif split == "test":
return JRDBHandleDet3D(data_dir, "test")
else:
raise RuntimeError(f"Invalid split: {split}")
def _get_data(self, data_dict):
# point cloud in base frame
pc_upper = data_dict["pc_upper"]
pc_lower = data_dict["pc_lower"]
pc_upper = jt.transform_pts_upper_velodyne_to_base(pc_upper)
pc_lower = jt.transform_pts_lower_velodyne_to_base(pc_lower)
pc = np.concatenate([pc_upper, pc_lower], axis=1) # (3, N)
pc_offset = np.zeros(3, dtype=np.float32)
if "label_str" not in data_dict.keys():
return pc, None, None, pc_offset, None
# bounding box in base frame
boxes, _ = ub3d.string_to_boxes(data_dict["label_str"])
# filter out corrupted annotations with negative dimension
valid_mask = (boxes[:, 3:6] > 0.0).min(axis=1).astype(np.bool)
boxes = boxes[valid_mask]
boxes_cls = np.zeros(len(boxes), dtype=np.int32)
return pc, boxes, boxes_cls, pc_offset, None
class NuScenesDet3D(_DatasetBase):
def __init__(self, *args, **kwargs):
super(NuScenesDet3D, self).__init__(*args, **kwargs)
self._ave_lwh = [
(0.50, 2.53, 0.98),
(1.70, 0.60, 1.28),
(11.23, 2.93, 3.47),
(4.62, 1.95, 1.73),
(6.37, 2.85, 3.19),
(2.11, 0.77, 1.47),
(0.73, 0.67, 1.77),
(0.41, 0.41, 1.07),
(12.29, 2.90, 3.87),
(6.93, 2.51, 2.84),
] # from nusc.list_category()
self._dist_thresh = [
(0.6, 2.63),
(0.7, 1.8),
(3.03, 11.33),
(2.05, 4.72),
(2.95, 6.47),
(0.87, 2.21),
(0.77, 0.83),
(0.51, 0.71),
(3.0, 12.39),
(2.61, 7.03),
]
self._nc = 10
self._cls_mapping = {
"animal": "void",
"human.pedestrian.personal_mobility": "void",
"human.pedestrian.stroller": "void",
"human.pedestrian.wheelchair": "void",
"movable_object.debris": "void",
"movable_object.pushable_pullable": "void",
"static_object.bicycle_rack": "void",
"vehicle.emergency.ambulance": "void",
"vehicle.emergency.police": "void",
"movable_object.barrier": "barrier",
"vehicle.bicycle": "bicycle",
"vehicle.bus.bendy": "bus",
"vehicle.bus.rigid": "bus",
"vehicle.car": "car",
"vehicle.construction": "construction_vehicle",
"vehicle.motorcycle": "motorcycle",
"human.pedestrian.adult": "pedestrian",
"human.pedestrian.child": "pedestrian",
"human.pedestrian.construction_worker": "pedestrian",
"human.pedestrian.police_officer": "pedestrian",
"movable_object.trafficcone": "traffic_cone",
"vehicle.trailer": "trailer",
"vehicle.truck": "truck",
}
self._cls_to_inds = {
"void": -1,
"barrier": 0,
"bicycle": 1,
"bus": 2,
"car": 3,
"construction_vehicle": 4,
"motorcycle": 5,
"pedestrian": 6,
"traffic_cone": 7,
"trailer": 8,
"truck": 9,
}
self._inds_to_cls = [
"barrier",
"bicycle",
"bus",
"car",
"construction_vehicle",
"motorcycle",
"pedestrian",
"traffic_cone",
"trailer",
"truck",
]
for i, c in enumerate(self._inds_to_cls):
assert self._cls_to_inds[c] == i
# customized classes
nc = len(self._included_classes)
if nc > 0:
cls_to_inds = {"void": -1}
inds_to_cls = []
dist_thresh = []
ave_lwh = []
for i, c in enumerate(self._included_classes):
cls_to_inds[c] = i
inds_to_cls.append(c)
idx = self._cls_to_inds[c]
dist_thresh.append(self._dist_thresh[idx])
ave_lwh.append(self._ave_lwh[idx])
for k, c in self._cls_mapping.items():
if c not in self._included_classes:
self._cls_mapping[k] = "void"
self._nc = nc
self._cls_to_inds = cls_to_inds
self._inds_to_cls = inds_to_cls
self._dist_thresh = dist_thresh
self._ave_lwh = ave_lwh
def _get_handle(self, data_dir, split):
from .handles.nuscenes_handle import NuScenesHandle
# return NuScenesHandle(data_dir, split, mini=True, nsweeps=self._nsweeps)
return NuScenesHandle(data_dir, split, mini=False, nsweeps=self._nsweeps)
def _get_data(self, data_dict):
# point cloud in global frame
pc = data_dict["pc"].points[:3] # (3, N)
# center point cloud
pc_mean = pc.mean(axis=1, keepdims=True)
pc -= pc_mean
# additional features
addi_feats = []
if "intensity" in self._additional_features:
intensity = (data_dict["pc"].points[3] / 255.0) - 0.5
addi_feats.append(intensity)
if "pc_dt" in data_dict and "time" in self._additional_features:
addi_feats.append(data_dict["pc_dt"])
addi_feats = np.stack(addi_feats, axis=0) if len(addi_feats) > 0 else None
if len(data_dict["anns"]) == 0:
return pc, None, None, pc_mean, addi_feats
boxes = []
boxes_cls = []
for ann in data_dict["anns"]:
cls_str = self._cls_mapping[ann["category_name"]]
if cls_str != "void":
box, _ = ub3d.box_from_nuscenes(ann)
boxes.append(box)
boxes_cls.append(self._cls_to_inds[cls_str])
boxes = np.array(boxes, dtype=np.float32)
boxes_cls = np.array(boxes_cls, dtype=np.int32)
if boxes.shape[0] > 0:
boxes[:, :3] = boxes[:, :3] - pc_mean.T
return pc, boxes, boxes_cls, pc_mean, addi_feats
|
{"hexsha": "aefd0432893f1bb8cf05501e2190a9ed1549c3e0", "size": 14667, "ext": "py", "lang": "Python", "max_stars_repo_path": "lidar_det/dataset/dataset_det3d.py", "max_stars_repo_name": "VisualComputingInstitute/Person_MinkUNet", "max_stars_repo_head_hexsha": "fa39764245a022740c0a3d8c85026532fff93e74", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-10-15T13:40:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T06:24:07.000Z", "max_issues_repo_path": "lidar_det/dataset/dataset_det3d.py", "max_issues_repo_name": "VisualComputingInstitute/Person_MinkUNet", "max_issues_repo_head_hexsha": "fa39764245a022740c0a3d8c85026532fff93e74", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-01-29T23:54:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T21:00:57.000Z", "max_forks_repo_path": "lidar_det/dataset/dataset_det3d.py", "max_forks_repo_name": "VisualComputingInstitute/Person_MinkUNet", "max_forks_repo_head_hexsha": "fa39764245a022740c0a3d8c85026532fff93e74", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-10-20T13:44:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-30T00:13:58.000Z", "avg_line_length": 35.1726618705, "max_line_length": 87, "alphanum_fraction": 0.5493284244, "include": true, "reason": "import numpy", "num_tokens": 3909}
|
#Ref: Sreenivas Sarwar Anik
"""
1st approach: Perform CLAHE
# Equalize light by performing CLAHE on the Luminance channel
# The equalize part alreay covered as aprt of previous tutorials about CLAHE
# This kind of works but you can still see shading after the correction.
2nd approach:
Apply rolling ball background subtraction
"""
import cv2
import numpy as np
img = cv2.imread("images/Alloy_gradient.jpg", 1)
lab_img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab_img)
clahe = cv2.createCLAHE(clipLimit=3, tileGridSize=(8,8))
clahe_img = clahe.apply(l)
CLAHE_img = cv2.merge((clahe_img,a,b))
corrected_image = cv2.cvtColor(CLAHE_img, cv2.COLOR_LAB2BGR)
cv2.imshow("Original image", img)
cv2.imshow("Corrected image", corrected_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
############################################################
"""
#2nd method
# https://pypi.org/project/opencv-rolling-ball/
#
# pip install opencv-rolling-ball
# Only works with 8 bit grey
A local background value is determined for every pixel by averaging over a
very large ball around the pixel. This value is then subtracted from
the original image, removing large spatial variations of the
background intensities. The radius should be set to at least the size of the
largest object that is not part of the background.
"""
import cv2
from cv2_rolling_ball import subtract_background_rolling_ball
from matplotlib import pyplot as plt
img = cv2.imread("images/Alloy_gradient.jpg", 0)
radius=30
final_img, background = subtract_background_rolling_ball(img, radius, light_background=True,
use_paraboloid=False, do_presmooth=True)
#optionally perform CLAHE to equalize histogram for better segmentation
#otherwise the image may appear washedout.
clahe = cv2.createCLAHE(clipLimit=3, tileGridSize=(8,8))
clahe_img = clahe.apply(final_img)
#cv2.imshow("Original image", img)
cv2.imshow("Background image", background)
cv2.imshow("AFter background subtraction", final_img)
cv2.imshow("After CLAHE", clahe_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
{"hexsha": "a61464a33dbd30b8d213af5309136d0e728b54b5", "size": 2098, "ext": "py", "lang": "Python", "max_stars_repo_path": "117_shading_correction_using_rolling_ball.py", "max_stars_repo_name": "Data-Laboratory/WorkExamples", "max_stars_repo_head_hexsha": "27e58207e664da7813673e6792c0c30c0a5bf74c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-15T22:27:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-15T22:27:27.000Z", "max_issues_repo_path": "117_shading_correction_using_rolling_ball.py", "max_issues_repo_name": "Data-Laboratory/WorkExamples", "max_issues_repo_head_hexsha": "27e58207e664da7813673e6792c0c30c0a5bf74c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "117_shading_correction_using_rolling_ball.py", "max_forks_repo_name": "Data-Laboratory/WorkExamples", "max_forks_repo_head_hexsha": "27e58207e664da7813673e6792c0c30c0a5bf74c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1388888889, "max_line_length": 92, "alphanum_fraction": 0.7469018112, "include": true, "reason": "import numpy", "num_tokens": 528}
|
# copyright (C) 2013 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from phonopy.structure.tetrahedron_method import TetrahedronMethod
def get_tetrahedra_frequencies(gp,
mesh,
grid_address,
relative_grid_address,
gp_ir_index,
frequencies,
grid_order=None,
lang='C'):
"""Returns frequencies on the relative_grid_addresses
Parameters
----------
gp : float
Grid index
mesh : ndarray
Mesh numbers.
shape=(3, ), dtype='int_'
grid_address : ndarray
Grid address in integers.
shape=(prod(mesh), 3), dtype='int_', order='C'
relative_grid_addresses : ndarray
Relative grid addresses from the centre (i.e., gp)
shape=(24, 4, 3), dtype='int_', order='C'
gp_ir_index : ndarray
Grid index to ir-grid index. The ir-grid index is
range(len(ir-grid-points)).
shape=(prod(mesh), ), dtype='int_'
frequencies : ndarray
Phonon frequences on ir-grid points.
shape=(ir-grid-points, num_band)
dtype='double'
grid_order : list of int, optional
This controls how grid addresses are stored either C style or
Fortran style. This is only valid when lang != 'C'.
lang : str, 'C' or else, optional
With 'C', C implementation is used. Otherwise Python implementation
runs.
Returns
-------
ndarray
Frequencies at tetheredra tertices.
shape=(num_bands, 24, 4), dtype='double', order='C'
"""
if lang == 'C':
try:
import phonopy._phonopy as phonoc
return _get_tetrahedra_frequencies_C(gp,
mesh,
grid_address,
relative_grid_address,
gp_ir_index,
frequencies)
except ImportError:
return _get_tetrahedra_frequencies_Py(gp,
mesh,
grid_address,
relative_grid_address,
gp_ir_index,
frequencies,
grid_order)
else:
return _get_tetrahedra_frequencies_Py(gp,
mesh,
grid_address,
relative_grid_address,
gp_ir_index,
frequencies,
grid_order)
def _get_tetrahedra_frequencies_C(gp,
mesh,
grid_address,
relative_grid_address,
gp_ir_index,
frequencies):
import phonopy._phonopy as phonoc
t_frequencies = np.zeros((1, frequencies.shape[1], 24, 4),
dtype='double')
phonoc.tetrahedra_frequencies(t_frequencies,
np.array([gp], dtype='int_'),
mesh,
grid_address,
gp_ir_index,
relative_grid_address,
frequencies)
return np.array(t_frequencies[0], dtype='double', order='C')
def _get_tetrahedra_frequencies_Py(gp,
mesh,
grid_address,
relative_grid_address,
gp_ir_index,
frequencies,
grid_order):
t_frequencies = np.zeros((frequencies.shape[1], 24, 4), dtype='double')
for i, t in enumerate(relative_grid_address):
address = t + grid_address[gp]
neighbors = np.dot(address % mesh, grid_order)
t_frequencies[:, i, :] = frequencies[gp_ir_index[neighbors]].T
return t_frequencies
class TetrahedronMesh(object):
def __init__(self,
cell,
frequencies, # only at ir-grid-points
mesh,
grid_address,
grid_mapping_table,
ir_grid_points,
grid_order=None,
lang='C'):
"""Linear tetrahedron method on uniform mesh for phonons
Parameters
----------
cell : PhonopyAtoms
Primitive cell used to calculate frequencies
frequencies: ndarray
Phonon frequences on ir-grid points
shape=(num_ir_grid_points, num_band)
dtype='double'
mesh : ndarray or list of int
Mesh numbers for grids
shape=(3,)
dtype='int_'
grid_address : ndarray
Addresses of all grid points given by GridPoints class.
shape=(prod(mesh), 3)
dtype='int_'
grid_mapping_table : ndarray
Mapping of grid points to irreducible grid points given by
GridPoints class.
shape=(prod(mesh),)
dtype='int_'
ir_grid_points : ndarray
Irreducible gird points given by GridPoints class.
shape=(len(np.unique(grid_mapping_table)),)
dtype='int_'
grid_order : list of int, optional
This controls how grid addresses are stored either C style or
Fortran style.
lang : str, 'C' or else, optional
With 'C', C implementation is used. Otherwise Python implementation
runs.
"""
self._cell = cell
self._frequencies = frequencies
self._mesh = np.array(mesh, dtype='int_')
self._grid_address = grid_address
self._grid_mapping_table = grid_mapping_table
self._lang = lang
if lang == 'C':
self._grid_order = None
else:
if grid_order is None:
self._grid_order = [1, mesh[0], mesh[0] * mesh[1]]
else:
self._grid_order = grid_order
self._ir_grid_points = ir_grid_points
self._gp_ir_index = None
self._tm = None
self._tetrahedra_frequencies = None
self._integration_weights = None
self._relative_grid_address = None
self._frequency_points = None
self._value = None
self._grid_point_count = 0
self._prepare()
def __iter__(self):
return self
def __next__(self):
if self._grid_point_count == len(self._ir_grid_points):
raise StopIteration
else:
gp = self._ir_grid_points[self._grid_point_count]
self._set_tetrahedra_frequencies(gp)
for ib, frequencies in enumerate(self._tetrahedra_frequencies):
self._tm.set_tetrahedra_omegas(frequencies)
self._tm.run(self._frequency_points, value=self._value)
iw = self._tm.get_integration_weight()
self._integration_weights[:, ib] = iw
self._integration_weights /= np.prod(self._mesh)
self._grid_point_count += 1
return self._integration_weights
def next(self):
return self.__next__()
def get_integration_weights(self):
return self._integration_weights
def get_frequency_points(self):
return self._frequency_points
def set(self,
value='I',
division_number=201,
frequency_points=None):
self._grid_point_count = 0
self._value = value
if frequency_points is None:
max_frequency = np.amax(self._frequencies)
min_frequency = np.amin(self._frequencies)
self._frequency_points = np.linspace(min_frequency,
max_frequency,
division_number,
dtype='double')
else:
self._frequency_points = np.array(frequency_points, dtype='double')
num_band = self._frequencies.shape[1]
num_freqs = len(self._frequency_points)
self._integration_weights = np.zeros((num_freqs, num_band),
dtype='double')
reciprocal_lattice = np.linalg.inv(self._cell.get_cell())
self._tm = TetrahedronMethod(reciprocal_lattice, mesh=self._mesh)
self._relative_grid_address = self._tm.get_tetrahedra()
def _prepare(self):
ir_gp_indices = {}
for i, gp in enumerate(self._ir_grid_points):
ir_gp_indices[gp] = i
self._gp_ir_index = np.zeros_like(self._grid_mapping_table)
for i, gp in enumerate(self._grid_mapping_table):
self._gp_ir_index[i] = ir_gp_indices[gp]
def _set_tetrahedra_frequencies(self, gp):
self._tetrahedra_frequencies = get_tetrahedra_frequencies(
gp,
self._mesh,
self._grid_address,
self._relative_grid_address,
self._gp_ir_index,
self._frequencies,
grid_order=self._grid_order,
lang=self._lang)
|
{"hexsha": "1bf12919db14c3e8a1d34e4870e7d62281257c31", "size": 11237, "ext": "py", "lang": "Python", "max_stars_repo_path": "phonopy/phonon/tetrahedron_mesh.py", "max_stars_repo_name": "ttadano/phonopy", "max_stars_repo_head_hexsha": "8c03955b2636b22b86e9324f5afcfa36396fa988", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-10T20:15:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-10T20:15:31.000Z", "max_issues_repo_path": "phonopy/phonon/tetrahedron_mesh.py", "max_issues_repo_name": "ttadano/phonopy", "max_issues_repo_head_hexsha": "8c03955b2636b22b86e9324f5afcfa36396fa988", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "phonopy/phonon/tetrahedron_mesh.py", "max_forks_repo_name": "ttadano/phonopy", "max_forks_repo_head_hexsha": "8c03955b2636b22b86e9324f5afcfa36396fa988", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7482758621, "max_line_length": 79, "alphanum_fraction": 0.5472991012, "include": true, "reason": "import numpy", "num_tokens": 2169}
|
\section{Modal pomsets}
In order to perform a sharper analysis of dependency, we present an alternate
semantics using modal pomsets defined below. Modal pomsets make a formal
distinction between strong order and weak order.
\begin{definition}
A \emph{modal (memory model) pomset} is a tuple
$(\Event, {\sle}, {\gtN},
\labeling)$, such that
\begin{itemize}
\item $(\Event, {\gtN},
\labeling)$ is a (memory model) pomset, and
\item ${\sle} \subseteq {\gtN}$ is a partial order.
\end{itemize}
\end{definition}
We write $\bEv\slt\aEv$ when $\bEv\sle\aEv$ and $\bEv\neq\aEv$, and similarly for $\gtN$. Thus, $(\sle \cup \reco)^{*} \subseteq {\gtN}$.
We list out a few observations to illustrate the relationship between \tvalpom s and pomsets. We are given a \tvalpom,
$(\Event, {\sle}, {\gtN}, \labeling)$. Then:
\begin{itemize}
\item $(\Event, {\gtN},\labeling)$ is a pomset with the same reads-from relation.
\item Let $\reco$ be the restriction of $\gtN$ to conflicting actions on the same location. Then, $(\Event, {\sle}, (\sle \cup \reco)^{*}, \labeling)$ is a \tvalpom, and $(\sle \cup \reco)^{*} \subseteq {\gtN}$.
\end{itemize}
\paragraph*{Changes to definitions}
The definition of the semantics of programs using \tvalpom\ largely follows the one using pomsets. We sketch the changes to definitions below.
\begin{itemize}
\item
We say that $\bEv$ \emph{fulfills $\aEv$ on $\aLoc$} if $\bEv$ writes
$\aVal$ to $\aLoc$, $\aEv$ reads $\aVal$ from $\aLoc$,
\begin{itemize}
\item $\bEv \slt \aEv$, and
\item if an event $\cEv$ writes to $\aLoc$ then either $\cEv \gtN \bEv$ or $\aEv \gtN \cEv$.
\end{itemize}
\item
Augmentation has to include ${\slt}$. i.e
$\aPS'$ is an \emph{augmentation} of $\aPS$ if $\Event'=\Event$,
${\labeling'}={\labeling}$, ${\sle'}\supseteq{\sle}$, and
${\gtN'}\supseteq{\gtN}$.
\item The definitions of substitution, restriction and the filtering
operations stay the same, with $\sle$ carried over unchanged. For
example, substitution is defined as follows:
Let $\aPSS\aSub$ be the set $\aPSS'$ where $\aPS'\in\aPSS'$ whenever
there is $\aPS\in\aPSS$ such that:
$\Event' = \Event$,
${\sle'} = {\sle}$,
${\gtN'} = {\gtN}$,
and
$\labeling'(\aEv) = (\bForm\aSub \mid \aAct)$ when $\labeling(\aEv) = (\bForm \mid \aAct)$.
\item In composition,we require ${\sle'}\supseteq{\sle^1}\cup{\sle^2}$
\item The changes to the definition \ref{def:prefix} of prefixing are as follows. The key changes are that synchronization and dependency enforce $\slt$ whereas coherence only enforces $\gtN$.
\begin{itemize}
\item ${\sle'}\supseteq{\sle}$.
% \item Item 5b changes to: if $\aEv$ is a write then either $\cEv\slt'\aEv$
% or $\labelingForm'(\aEv)$ implies $\labelingForm(\aEv)$.
\item 5b changes to: if $\bEv$ and $\aEv$ are \external actions in conflict, then $\bEv \gtN' \aEv$,
% \item Item \ref{pre-coherence} changes to:
% if $\aAct$ is a write that conflicts with $\labelingAct(\aEv)$
% then $\cEv \gtN' \aEv$,
\item Item 5a, 5c, 5d, 5e 5c change to impose $\slt$ order: eg. if $\aAct$ is an acquire or $\labelingAct(\aEv)$ is a release then $\cEv \slt' \aEv$.
\end{itemize}
\end{itemize}
We use $\tsem{\aCmd}$ to stand for the \tvalpom\ semantics of $\aCmd$.
\subsection{Generators. } Modal pomsets provide a characterization of generators from section~\ref{sec:sc}.
Recall that \emph{generators} in the pomset semantics are pomsets that are minimal with respect to augmentation and implication. These generators are induced by pomsets that are minimal with respect to augmentation and implication in the \tvalpom\ semantics in the following sense.
$(\Event, {\gtN},\labeling)$ is a generator for $\sem{\aCmd}$
if there exists $(\Event, \slt, {\gtN},\labeling) \in \tsem{\aCmd}$ minimal w.r.t.~augmentation and implication, and $\gtN = (\sle \cup \reco)^{*}$.
Furthermore, any strong order that is outside of program order must be induced by a reads-from. In the two-thread case, we can state the latter
property as follows: suppose $\aEv$ and $\bEv$ are not related by program
order and $\aEv\slt\bEv$; then there exist $\bEv'$ that reads-from $\aEv'$
such that $\aEv\xpox\aEv'$, $\bEv'\xpox\bEv$ and
$\aEv \slt \aEv' \slt \bEv' \slt \bEv$.
\subsection{Closure properties}
The fine grain analysis of dependency in the modal semantics allows us to establish some closure properties of the semantics of programs.
We consider programs of the
form $\vec{\aLoc}\GETS\vec{0}\SEMI\FENCE\SEMI\aCmd$, where $\aCmd$ is
restriction-free. Thus, all memory locations are initialized to $0$,
initialization happens-before the execution of any command,
We say that $\aPS' = \aPS\restrict{\Event'}$ when
$\Event' \subseteq \Event$,
${\labeling'} = {\labeling}\restrict{\Event'}$, and
${\le'} = {\le}\restrict{\Event'}$.
% ${\gtN'} = {\gtN}\restrict{\Event'}$.
\begin{definition}
Let $(\aPS \after \aEv) = {\{ \bEv\in\Event \mid \aEv \le \bEv
\}}$ be the set of events that follow $\aEv$ in $\aPS$.
\end{definition}
The semantics of read is ``input''-enabled, since it permits the read of any visible value. Thus, any racy read in a program can be replaced by a read of a earlier value (w.r.t.~$\reco$), even while the races with existing independent writes are maintained. A canonical example to keep in mind for this lemma is the program:
\begin{align*}
(y\GETS 0 \SEMI \aReg \GETS y \SEMI x \GETS 1)
\PAR
(x\GETS 0 \SEMI \bReg \GETS x \SEMI y \GETS 1)
\end{align*}
with both registers getting value $1$ via the execution:
\begin{tikzdisplay}[node distance=1em]
\event{wy0}{\DW{y}{0}}{}
\event{ry1}{\DR{y}{1}}{right=of wy0}
\event{wx1}{\DW{x}{1}}{right=of ry1}
\event{wx0}{\DW{x}{0}}{below=of wy0}
\event{rx1}{\DR{x}{1}}{right=of wx0}
\event{wy1}{\DW{y}{1}}{right=of rx1}
\rf{wx1}{rx1}
\rf{wy1}{ry1}
\wk{wx0}{rx1}
\wk{wy0}{ry1}
\end{tikzdisplay}
The lemma constructs the execution:
\begin{tikzdisplay}[node distance=1em]
\event{wy0}{\DW{y}{0}}{}
\event{ry1}{\DR{y}{0}}{right=of wy0}
\event{wx1}{\DW{x}{1}}{right=of ry1}
\event{wx0}{\DW{x}{0}}{below=of wy0}
\event{rx1}{\DR{x}{0}}{right=of wx0}
\event{wy1}{\DW{y}{1}}{right=of rx1}
\rf{wx0}{rx1}
\rf{wy0}{ry1}
\wk{rx1}{wx1}
\wk{ry1}{wy1}
\end{tikzdisplay}
\begin{lemma}\label{inputen}
%Let $\aCmd = \vec{\aLoc}\GETS\vec{0}\SEMI \FENCE\SEMI (\aCmd^1 \PAR \cdots \PAR \aCmd^n)$.
Let $\aPS \in \tsem{\aCmd}$ be a top level pomset.
Let $\aEv \in \aPS$ read from write event $\bEv$ on $\aLoc$, $\neg(\bEv \xhb \aEv)$.
Then, there exists $\bPS \in \tsem{\aCmd}$ such that:
\begin{itemize}
%\item $(\exists \aEv' \in \Event_{\bPS})$ such that $
%\Event_{\bPS}$ is the disjoint union of $\Event_{\aPS} \setminus
%(\aPS \after \aEv))$ and $(\bPS \after \aEv')$.
\item $\aEv'$ reads from $\aLoc$, with matching write event $\bEv'$, such that $\bEv' \xeco \bEv$ in $\bPS$
\item The restriction of $\sle$ in $\aPS$ to $\Event_{\aPS} \setminus (\aPS \after \aEv)$ agrees with the restriction of $\sle$ in $\bPS$ to $\Event_{\bPS} \setminus (\aPS \after \aEv)$ in $\bPS$.
\item The restriction of $\le$ in $\aPS$ to $\Event_{\aPS} \setminus (\aPS \after \aEv)$ agrees with the restriction of $\le$ in $\bPS$ to $\Event_{\bPS} \setminus (\aPS \after \aEv)$ in $\bPS$.
\end{itemize}
\end{lemma}
\begin{proof}
The form of $\aCmd$ ensures that there is always a write to $\aLoc$ that is related by $\xhb$ to any read. Thus, there is at least one other write than can satisfy the read recorded as $\aEv$.
The key observation behind the proof is that change in a prefixing read action can only affect the events that are dependent, ie. in the $\slt$ order to the read action.
\end{proof}
In the following lemma, invert the $\reco$ relationship between a read and a write. A canonical example to keep in mind for this lemma is the program:
\begin{align*}
(y\GETS 0 \SEMI x \GETS 1 \SEMI \aReg \GETS y)
\PAR (x\GETS 0 \SEMI y \GETS 1 \SEMI \bReg \GETS x)
\end{align*}
with both registers getting value $0$ via the execution:
\begin{tikzdisplay}[node distance=1em]
\event{wy0}{\DW{y}{0}}{}
\event{wx1}{\DW{x}{1}}{right=of wy0}
\event{ry0}{\DR{y}{0}}{right=of wx1}
\event{wx0}{\DW{x}{0}}{below=of wy0}
\event{wy1}{\DW{y}{1}}{right=of wx0}
\event{rx0}{\DR{x}{0}}{right=of wy1}
\rf[bend right]{wx0}{rx0}
\rf[bend left]{wy0}{ry0}
\wk{rx0}{wx1}
\wk{ry0}{wy1}
\wk{wx0}{wx1}
\wk{wy0}{wy1}
\end{tikzdisplay}
The lemma constructs the execution:
\begin{tikzdisplay}[node distance=1em]
\event{wy0}{\DW{y}{0}}{}
\event{wx1}{\DW{x}{1}}{right=of wy0}
\event{ry0}{\DR{y}{1}}{right=of wx1}
\event{wx0}{\DW{x}{0}}{below=of wy0}
\event{wy1}{\DW{y}{1}}{right=of wx0}
\event{rx0}{\DR{x}{1}}{right=of wy1}
\rf{wx1}{rx0}
\rf{wy1}{ry0}
\wk{wx0}{wx1}
\wk{wy0}{wy1}
\end{tikzdisplay}
\begin{lemma}\label{removerw}
Let $\aPS \in \tsem{\aCmd}$ be a top-level pomset.
Let $\bEv \in \aPS$ be a write on $\aLoc$.
Let $\aEv \in \aPS$ read from $\aLoc$ such that $\aEv \xeco \bEv$ and $\neg(\aEv \slt \bEv)$. Then, there exists $\bPS \in \tsem{\aCmd}$ such that:
\begin{itemize}
\item $\aEv' \in \bPS \setminus \aPS$ reads from $\aLoc$, with matching write $\bEv$.
\item The restriction of $\sle$ in $\aPS$ to $\Event_{\aPS} \setminus (\aPS\ \after\ \aEv)$ agrees with the restriction of $\sle$ in $\bPS$ to $\Event_{\bPS} \setminus (\aPS\ \after\ \aEv)$.
\end{itemize}
\end{lemma}
\begin{proof}
The proof proceeds similar to the above proof; in this case, replace the value read in $\aEv$ to come from $\bEv$.
\end{proof}
Any new event $\bEv'$ in $\bPS \after \aEv'$ reading from $\aLoc$ cannot have a matching write event $\bEv'' \xeco \bEv$ since that implies $\bEv' \xeco \bEv$ and a $\reco$ cycle $\bEv \slt \aEv \slt \aEv' \xeco \bEv$. Thus, the above lemma can be iterated if the new pomset is has any further reads that precede $\bEv$ in $\reco$, so we can finally derive a pomset with no reads and writes satisfying the hypothesis of the lemma.
The $\reco$ order between writes that are not related by $\lt$ can be reversed.
A canonical example to keep in mind for this lemma is the program:
\begin{align*}
(x\GETS 1)
\PAR (x\GETS 0)
\end{align*}
\begin{tikzdisplay}[node distance=1em]
\event{wy0}{\DW{x}{1}}{}
\event{wx0}{\DW{x}{0}}{right=of wy0}
\wk{wy0}{wx0}
\end{tikzdisplay}
The lemma constructs the execution:
\begin{tikzdisplay}[node distance=1em]
\event{wy0}{\DW{x}{1}}{}
\event{wx0}{\DW{x}{0}}{right=of wy0}
\wk{wx0}{wy0}
\end{tikzdisplay}
\begin{lemma}\label{cohww}
Let $\aPS \in \tsem{\aCmd}$ be a top level pomset. Let $\bEv, \aEv$ be a writes to $\aLoc$ such that:
\begin{itemize}
\item $\bEv\gtN \aEv$
\item for all writes $\cEv$ to $\aLoc$ such that $ \bEv \gtN \cEv \gtN \aEv$, it is the case that $ \neg(\cEv \slt \aEv)$ and $\neg(\cEv \xpox \aEv)$
\end{itemize}
Then, there exists $\bPS \in \tsem{\aCmd}$ such that $\Event_{\aPS} = \Event_{\bPS}$, $\sle_{\aPS} = \sle_{\bPS}$, and
$\aEv \gtN \bEv$ in $\bPS$.
\end{lemma}
\begin{proof}
We show how to interchange $\aEv, \bEv$ adjacent in $\gtN$, ie. we assume that $\neg(\exists \cEv) \ \bEv \gtN \cEv \gtN \aEv$. The full proof follows by induction.
Since $\sem{\aCmd}$ is augmentation closed, it suffices to show that we can build $\bPS$ while satisfying the constraints between $\slt,\gtN$. We list the changes below.
\begin{itemize}
\item $\aEv \gtN \bEv$ in $\bPS$
\item For all reads $\cEv$ matched to $\aEv$, change from $\bEv \gtN \cEv$ in $\aPS$ to $\cEv \gtN \bEv$ in $\bPS$
\item For all reads $\cEv$ matched to $\bEv$, change from $\cEv \gtN \aEv$ in $\aPS$ to $\aEv \gtN \cEv$ in $\bPS$
\popQED
\end{itemize}
\end{proof}
\section{Proof of DRF}\label{drfproof}
In this section of the appendix, we develop a proof of DRF for \tvalpom s. By the results in the earlier section, it yields DRF for the pomset semantics, since the races are identical in both models.
In the rest of this section, we assume that $\aPS$ is a generator for
$\tsem{\aCmd}$.
We prove:
\begin{description}
\item[DRF1: ] If $\aPS$ does not have a race, $\aPS \in \tsemsc{\aCmd}$.
\item[DRF2: ] If $\aPS$ has a race, then there exists $\bPS\in \tsemClosed{\aCmd}$ such that $\bPS \in \tsemsc{\aCmd}$ and has a race.
\end{description}
\paragraph*{Proof of DRF1}
We first show that if $\aPS \in \tsem{\aCmd} \setminus \tsemsc{\aCmd}$, then $\aPS$ has a race. By assumption, there is a cycle in $\rpox \cup \slt \cup \xeco$. Let this cycle be $\aEv_0, \aEv'_0, \aEv_1, \aEv'_1, \ldots, \aEv_n, \aEv'_n, \aEv_0$ where for all $i$, $\aEv_i \xpox \aEv'_i$ and $\aEv'_i \not\xpox \aEv'_{i+1}$.
If for all $i$, $\aEv'_i \xhb \aEv'_{i+1}$, then the above is a cycle in $\rhb$, which is a contradiction.
So, there is at least one $i$ such that $\aEv'_i \not\xhb \aEv'_{i+1}$. There are two cases to consider.
\begin{itemize}
\item $\aEv'_i \xeco \aEv'_{i+1}$. In this case, there is a race.
\item $\aEv'_i \slt \aEv'_{i+1}$. In this case, $\aEv'_i$ is a write and $\aEv'_{i+1}$ is a conflicting read, so there is a race.
\end{itemize}
\paragraph*{Proof of DRF2}
We define a size $|\aPS|$ as follows: $\size(\aPS)$ is the number of events in $\aPS$. Since we are considering loop free programs, there is an $\aPS \in \tsemsc{\aCmd}$ with maximum size, which we identify as $\size(\aCmd)$.
We prove by induction on $\size(\aCmd) - \size(\bPS)$ that given $(\aPS, \bPS)$ such that:
\begin{itemize}
\item $\bPS$ is a prefix of some $\aPS' \in \tsemsc{\aCmd}$
\item $\bPS$ is a prefix of $\aPS$ under all of $\xpox,\gtN,\lt$
\item $\aPS$ has a race
\end{itemize}
there exists $\bPS\in \tsem{\aCmd}$ that demonstrates the race.
The required theorem follows by setting $\bPS$ to be the empty pomset.
For the base case, $\bPS = |\aPS|$. In this case, $\aPS$ is the required witness.
Otherwise, consider a maximal sequential prefix, extending $\bPS$, w.r.t.~all of $\rpox,\reco,\slt$. If it strictly contains $\bPS$, result follows from induction hypothesis.
If not, $\bPS$ is already maximal. Consider the set of all events in $\aPS \setminus \bPS$ that are minimal w.r.t.~$\rhb$. In particular, these events will also be minimal w.r.t.~$\rpox$.
If one of these events, say $\aEv$ is a write, we proceed as follows. Using $\rhb$-minimality of $\aEv$, we deduce $\rpox$ minimality of $\aEv$. Using the generator properties, we deduce that $\aEv$ is $\slt$-minimal . Using lemma~\ref{removerw}, we build $\aPS_1$ from $\aPS$ without changing $\bPS$ to ensure that there are is no read $\bEv \in \aPS_1 \setminus \bPS$ such that $\bEv \xeco \aEv$. Using lemma~\ref{cohww}, we build $\aPS_2$ from $\aPS_1$ without changing $\bPS$ to ensure that there are is no write $\bEv \in \aPS_2 \setminus \bPS$ such that $\bEv \xeco \aEv$. Thus, $\aEv$ is $\reco$-minimal in $\aPS_2 \setminus \bPS$. Result follows from induction hypothesis by considering $(\aPS_2,\bPS_1)$ where $\bPS_1$ is got from $\bPS$ by adding $\aEv$.
So, we can assume that all events in $\aPS \setminus \bPS$, say $\aEv_0, \ldots, \aEv_n$ that are minimal w.r.t.~$\rhb$ are reads, and we have events
$\aEv'_0, \aEv'_1, \ldots, \aEv'_n, \aEv_0$ such that:
\[
\begin{array}{lrl}
\aEv_i \xpox\ \aEv'_i \\
\aEv'_i \ (\reco\ \cup \slt) \ \aEv_{(i+1)\mod n}
\end{array}
\]
Let $\bEv$ be the matching write for $\aEv_{(i+1)\mod n}$. If $\bEv_i \in \bPS$bEv , then by $\reco$ prefix closure of $\bPS$, $\bEv \xeco\ \aEv'_i$ and $\aEv_{(i+1)\mod n} \reco\ \aEv'_i$, which is a contradiction to $\reco$ being a partial order per location. So, we can assume that $\aEv'_i \ \slt \ \aEv_{(i+1)\mod n}$.
We proceed as follows. We use lemma~\ref{inputen} on the pomset $\aPS$ and read $\aEv_{(i+1)\mod n}$ and write $\aEv'_i$ to construct $\cPS$ that changes the value read in $\aEv_{j}$ to a value from $\bPS$. $\dPS$ is derived adding the modified read yielded by lemma~\ref{inputen} to $\bPS$. Result follows by induction hypothesis since $\dPS$ is a prefix of $\cPS$ under all of $\xpox,\lt, \reco$, $\cPS$ has a race, and $\size(\dPS) = \size(\bPS) + 1$.
\endinput
\begin{comment}
Operation Implementation
Relaxed read ldr
Relaxed write str
Acquiring read ldar
Releasing write stlr
Fence dmb.sy
\end{comment}
\begin{comment}
ob does not contradict eco
ob does not contradict (co cap po):
Suppose that wx1 po wx2 then it cannot be that wx2 ob wx1.
We know that wx1 co wx2 by SC-PER-LOC
% Case 1. w1 is read externally, then we have
% wx1 rfe r
% and
% r fre w2
% so
% wx1 obs+ wx2
% which contradicts EXTERNAL
% Case 2. wx1 is not read externally.
We show this by contradiction
Assume
wx1 co wx2
and
wx2 ob wx1
Note that
po supseteq dob cup aob cup bob
So in order to get order into wx1, we must have
wx2 (ob?; obs; ob?; obs; ob?) wx1
Note that we cannot have dob or bob into wx1 after obs, since then we would
also have it into wx2, creating a cycle in EXTERNAL. This holds because both
dob and bob are closed on the right w.r.t. coi
So it must be that
wx2 (ob?; obs; ob?; wx0; coe) wx1,
in which case we also have wx0 coe wx2, contradicting EXTERNAL
or
wx2 (ob?; obs; ob?; rx0; fre) wx1
in which case we also have rx0 fre wx2, contradicting EXTERNAL
Internal reads do not need to respect ob:
Arm allows the following:
Ra1 -ctrl-> Wx1 -rfi-> Rx1 ---> Wb1 if(a){x=1}; b=x
| |
Wa1 <-------------------------- Rb1 a=b
Suppose that wx1 po rx2 and rx2 is read externally.
Then it cannot be that rx2 ob wx1.
Case 1: if wx1 co wx2, then we have wx1 coe wx2 rfe rx2, contradicting EXTERNAL
Case 2: if wx2 co wx1, then we have rx2 fr wx1, contradicting SC-PER-LOC
Suppose that rx1 po wx2 and rx1 is read externally.
Then it cannot be that wx2 ob rx1.
Case 1: if wx2 co wx1, then wx2 co wx1 rf rx1 po wx2, contradicting SC-PER-LOC
Case 2: if wx1 co wx2, for a contradiction, suppose wx2 ob rx1.
then we need another thread involved to get order from wx2 to rx1.
To get order into the read, there are several options:
- use cross thread read, then dob; but dob does not include reads in it's domain.
An attempt to do this is something like:
Wx1 x=1
|
Ra2 -ctrl-> Rx1 - - -> Wx2 if(a){r=x}; x=2
| |
Wa2 <----------------- Rx2 a=x
But the ctrl dependency is not included in ob between reads.
- use cross thread read then barrier, but then you contradict EXTERNAL
- create and ob edge from Rx2 to Wx1.
An attempt to do this is,
Wx1 <-------------- Ra1
| | But cannot get Wx2 --> Wa1 without a barrier
Rx1 - - -> Wx2 ---> Wa1
Wx1 <----- Rx2
| | contradicts SC-PER-LOC
Rx1 - - -> Wx2
Other examples to type in:
Allowed:
Rx1 -> Wy0 Wy1
Ry1 -> Wz0 Wz1
Rz1 -> Wx0 Wx1
Forbidden:
Rx1 -> Wy0 Wy1
Ry1 -> Wx0 Wx1
\end{comment}
\begin{comment}
\citet{DBLP:journals/pacmpl/PodkopaevLV19} define the \emph{Intermediate
Memory Model (IMM)} and provide efficient implementations of the IMM into
several processor architectures, including TSO, ARMv8 and Power.
In this section, we show that any execution allowed by a sublanguage of the
IMM is also allowed by our semantics. The sublanguage we consider bans
loops, read-modify-write (RMW) operations, and fences. In addition, we take
the set of memory locations, $\Loc$, to be finite. Syntactically, we drop
the superscript \textsf{rlx} on relaxed reads and writes; in addition, we use
structured conditionals rather than the more general \textsf{goto}. We refer
to this sublanguage as $\muIMM$.
$\muIMM$ programs sit in the restriction-free fragment of our language, where
all memory locations are initialized to $0$ and parallel-composition occurs
only at top level. In other words, $\muIMM$ programs have the form
\begin{displaymath}
{\aLoc_1}\GETS{0}\SEMI
\cdots\SEMI
{\aLoc_m}\GETS{0}\SEMI
(\aCmd^1 \PAR \cdots \PAR \aCmd^n)
\end{displaymath}
where $\aCmd^1$, \ldots, $\aCmd^n$ do not include either composition or
restriction.
Due to space limitations, we do not include a full description of the IMM.
The broad strokes of the argument given here should be clear, but interested
readers will need to refer to \citep{DBLP:journals/pacmpl/PodkopaevLV19} for
details.
\end{comment}
\endinput
\section{Proof of DRF}
For any $\aPS$, then $\closed(\aPS)$ is set enriched with useless reads
(preserving augmentation closure) and where we remove any event whose
precondition is not a tautology.
For top level programs:
\begin{displaymath}
\semClosed{\VAR\vec{\aLoc}\SEMI
\vec{\aLoc}\GETS\vec{0}\SEMI
\vec{\bLoc}\GETS\vec{0}\SEMI
\FENCE\SEMI
(\aCmd^1 \PAR \cdots \PAR \aCmd^n)}
=
\VAR\vec{\aLoc}\SEMI
\vec{\aLoc}\GETS\vec{0}\SEMI
\vec{\bLoc}\GETS\vec{0}\SEMI
\FENCE\SEMI
(\semClosed{\aCmd^1} \PAR \cdots \PAR \semClosed{\aCmd^n})
\end{displaymath}
\begin{definition}
A thread: top level component of a parallel composition
\end{definition}
\begin{definition}
$\aPS$ is a generator of $\semClosed{\aCmd}$ if for all $\bPS \in \semClosed{\aCmd}$ such that $\aPS$ augments $\bPS$, $\aPS = \bPS$.
\end{definition}
Since the program we consider are loop free, for any command $\aCmd$, the size of the pomsets in $\aCmd$ are bounded by a constant, that we denote by $\size(\aCmd)$.
\section{Generators for semantics of programs with parallel composition}
All generators $\aPS$ satisfy the following factorization of cross-thread $\lt$.
\begin{lemma}\label{pargen}
Consider the subset of pomsets of $\semClosed{\aCmd \PAR \bCmd}$ that are $\aLoc$-closed for all $\aLoc$.
Let $\aPS$ be any generator.
%\begin{itemize}
% \item
Let $\aEv\lt\bEv$ and $\aEv \in \semClosed{\aCmd}$ and $\bEv \in \semClosed{\bCmd} $.
Then there is a write $\aEv' \in \semClosed{\aCmd}$, and a read $\bEv' \in \semClosed{\bCmd}$ such that $\bEv'$ reads-from $\aEv'$ and $\aEv \lt \aEv' \lt \bEv' \lt \bEv$.
%\item $\aEv \gtN \bEv$ only if $ \aEv [\lt \cup (\le; \reco;\le)^{\star}]
%\bEv$.
% \item If $\aEv\lt\bEv$ and $\aEv, \bEv \in \semClosed{\aCmd}$,
%then there exists
%There exists a release action $\aEv'$ in $\sem{\aCmd}$, a
%matching acquire action $\bEv'$ in $\sem{\bCmd}$ such that $
%\aEv \lt \aEv'$, $\bEv' \lt \bEv$ and $\aEv' \lt \bEv'$.
\end{lemma}
The proof of lemma~\ref{cohsat} yields the following two corollaries.
\begin{corollary}\label{cohrw}
Let $\aPS \in \sem{\aCmd}$ be a generator. Let
\begin{itemize}
\item $\bEv'$ be a read from $\aLoc$ with matching write $\bEv$. \item $\aEv$ be a write to $\aLoc$ such that $\bEv' \gtN \aEv$. \item Forall writes $\cEv$ to $\aLoc$ such that $ \bEv \gtN \cEv \gtN \aEv$, it is the case that $ \neg(\bEv' \lt \cEv)$ and $\neg(\bEv \xpox \cEv) ]$
\end{itemize}
Then, there exists $\bPS \in \sem{\aCmd}$, also a generator, such that $\Event_{\aPS} = \Event_{\bPS}$, $\le_{\aPS} = \le_{\bPS}$, and $\aEv \gtN \bEv'$ in $\bPS$.
\end{corollary}
\begin{corollary}\label{cohwr}
Let $\aPS \in \sem{\aCmd}$ be a generator. Let
\begin{itemize}
\item $\aEv'$ read from $\aLoc$ with matching write $\aEv$.
\item $\bEv$ be a write to $\aLoc$ such that $\bEv \gtN \aEv'$. \item Forall writes $\cEv$ to $\aLoc$ such that $ \bEv \gtN \cEv \gtN \aEv$ and $\cEv \not= \aEv$, it is the case that $ \neg(\cEv \lt \aEv')$ and $\neg(\cEv \xpox \aEv) ]$.
\end{itemize}
Then, there exists $\bPS \in \sem{\aCmd}$, also a generator, such that:
$\Event_{\aPS} = \Event_{\bPS}$, $\le_{\aPS} = \le_{\bPS}$, and
$\aEv' \gtN \bEv$ in $\bPS$.
\end{corollary}
===============good lemma. Not used. ==================
\begin{definition}
$ \aEv \xeco \bEv$ if both $\aEv$ and $\bEv$ touch the same location, at least one is a write, and $\aEv \xird \bEv$ or $\aEv \xrb \bEv$ or $\aEv\xird \bEv$ or $\bEv \gtN \aEv$.
\end{definition}
By lemma~\ref{extendob}, if $\aEv \not=\aEv'$, we deduce $\aEv \xob \bEv'$, and thus $\aEv \xob \bEv$. If $\bEv \not=\bEv'$, we deduce $\aEv' \xob \bEv$ and thus $\aEv \xob \bEv$.
Thus, if $\aEv \not=\aEv'$ or $\bEv \not=\bEv'$, then there is a cycle $\aEv \xob \bEv \xob \cEv \xob \cEv' \xob \aEv$.
So we can assume that $\aEv' = \aEv$, $\bEv' = \bEv$ and
\[ \aEv \xeco \bEv \xob \cEv \xob \cEv' \xeco \aEv \]
where all of $\aEv, \bEv, \cEv, \cEv'$ access the same location and at least one of $\aEv,\bEv$ is a write, at least one of $\aEv,\cEv'$ is a write, and at least one of $\bEv,\cEv$ is a write.
We reason by cases.
\begin{itemize}
\item If $\cEv'$ is a write or both $(\aEv, \bEv)$ are writes.
We deduce that $\bEv \xeco \cEv' \xeco \aEv$ and thus $\bEv \xeco \aEv$.
\item $\cEv'$ is a read. $\aEv$ is a write. $\bEv$ is a read.
In this case $\cEv$ is a write. From $\cEv \xob \aEv$, we deduce $\cEv \xeco \aEv$. Combining with $\bEv \xeco \cEv$, we deduce that $\bEv \xeco \aEv$.
\end{itemize}
In either case, there is a contradiction $\aEv \xeco \bEv \xeco \aEv$.
Consider the write $\cEv'$ fulfilling $\aEv$.
$\cEv' (\xobi \cap \xeco) \aEv$. Since $\aEv$ not $\rrfi$ event.
Also,
So, we can assume that $\aEv \xpox \bEv$, and the situation is:
\[ \cEv' \xobi \cEv (\xpox \cap \xobi) \aEv (\xeco \cap \xpox) \bEv (\xpox \cap \xobi) \cEv'' \xobi \cEv' \]
By lemma~\ref{extendob}, if $\cEv \not= \aEv$, $\cEv \xob \bEv$, and we have a cycle in $\xob$.
Similarly, if $\bEv \not= \cEv''$, $\aEv \xob \cEv''$, and we have a cycle in $\xob$. So, the situation is:
\[ \cEv' \xobi \aEv (\xeco \cap \xpox) \bEv \xobi \cEv' \]
$\cEv',\aEv,\bEv$ are events on same variable. The above is a cycle in $\xeco$.
|
{"hexsha": "82cb4609778b8971fbcad66f82cde7d5f54642d1", "size": 25732, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "drf-proof.tex", "max_stars_repo_name": "chicago-relaxed-memory/memory-model", "max_stars_repo_head_hexsha": "fd606fdb6a04685d9bb0bee61a5641e4623b10be", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-08-13T02:36:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-25T12:46:13.000Z", "max_issues_repo_path": "drf-proof.tex", "max_issues_repo_name": "chicago-relaxed-memory/memory-model", "max_issues_repo_head_hexsha": "fd606fdb6a04685d9bb0bee61a5641e4623b10be", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "drf-proof.tex", "max_forks_repo_name": "chicago-relaxed-memory/memory-model", "max_forks_repo_head_hexsha": "fd606fdb6a04685d9bb0bee61a5641e4623b10be", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.045751634, "max_line_length": 774, "alphanum_fraction": 0.6574304368, "num_tokens": 9238}
|
from pathlib import Path
from torchvision import transforms as trans
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import numpy as np
import cv2
import bcolz
import pickle
import mxnet as mx
from tqdm import tqdm
def load_bin(path, rootdir, transform, image_size=[112, 112]):
if not rootdir.exists():
rootdir.mkdir()
bins, issame_list = pickle.load(open(path, 'rb'), encoding='bytes')
data = bcolz.fill([len(bins), 3, image_size[0], image_size[1]],
dtype=np.float32,
rootdir=rootdir,
mode='w')
for i in range(len(bins)):
_bin = bins[i]
img = mx.image.imdecode(_bin).asnumpy()
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img = Image.fromarray(img.astype(np.uint8))
data[i, ...] = transform(img)
i += 1
if i % 1000 == 0:
print('loading bin', i)
print(data.shape)
np.save(str(rootdir) + '_list', np.array(issame_list))
return data, issame_list
def load_mx_rec(rec_path):
save_path = rec_path / 'imgs'
if not save_path.exists():
save_path.mkdir()
imgrec = mx.recordio.MXIndexedRecordIO(str(rec_path / 'train.idx'),
str(rec_path / 'train.rec'), 'r')
img_info = imgrec.read_idx(0)
header, _ = mx.recordio.unpack(img_info)
max_idx = int(header.label[0])
for idx in tqdm(range(1, max_idx)):
img_info = imgrec.read_idx(idx)
header, img = mx.recordio.unpack_img(img_info)
label = int(header.label[0])
img = Image.fromarray(img[:, :, ::-1])
label_path = save_path / str(label)
if not label_path.exists():
label_path.mkdir()
img.save(label_path / '{}.jpg'.format(idx), quality=95)
if __name__ == '__main__':
rec_path = Path('/workspace/jiangby/project/datasets/faces_glintasia')
load_mx_rec(rec_path)
bin_files = ['agedb_30', 'cfp_fp', 'lfw', 'cfp_ff']
test_transform = trans.Compose(
[trans.ToTensor(),
trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
for i in range(len(bin_files)):
load_bin(rec_path / (bin_files[i] + '.bin'), rec_path / bin_files[i],
test_transform)
|
{"hexsha": "befddee8c70f73b644bf6265f151977b92700580", "size": 2274, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/preprocess.py", "max_stars_repo_name": "leon2milan/faceRecognition", "max_stars_repo_head_hexsha": "c69271c9f808a63fa7dbb856e7726a59a4817515", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-02-25T10:40:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-25T10:42:01.000Z", "max_issues_repo_path": "data/preprocess.py", "max_issues_repo_name": "leon2milan/imageRecognition", "max_issues_repo_head_hexsha": "c69271c9f808a63fa7dbb856e7726a59a4817515", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/preprocess.py", "max_forks_repo_name": "leon2milan/imageRecognition", "max_forks_repo_head_hexsha": "c69271c9f808a63fa7dbb856e7726a59a4817515", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9402985075, "max_line_length": 77, "alphanum_fraction": 0.6024626209, "include": true, "reason": "import numpy", "num_tokens": 601}
|
# inc_data_dfg.r
myC1a<-rgb(251,212,150,maxColorValue=255)
myC2a<-rgb(237,153,118,maxColorValue=255)
myC3a<-rgb(179,213,148,maxColorValue=255)
myC4a<-rgb(112,200,230,maxColorValue=255)
myC1b<-rgb(243,178,40,maxColorValue=255)
myC2b<-rgb(220,62,42,maxColorValue=255)
myC3b<-rgb(109,182,68,maxColorValue=255)
myC4b<-rgb(0,163,218,maxColorValue=255)
myColours1<-c(myC1a, myC2a, myC3a,myC4a)
myColours2<-c(myC1b, myC2b, myC3b, myC4b)
a<-c(418.7,418.7); b<-c(768.0,768.0); c<-c(436.1,436.1); d<-c(476.7,478.7)
x<-as.matrix(data.frame(a,b,c,d))
a<-c(0,148.6); b<-c(0,271.4); c<-c(0,154.7); d<-c(0,185.8)
y<-as.matrix(data.frame(a,b,c,d))
w1<-"Humanities and social sciences"
w2<-"Life sciences"
w3<-"Natural sciences"
w4<-"Engineering"
labelling<-c(w1,w2,w3,w4)
|
{"hexsha": "91f539f31b1351f54fcb246d510a658ed1c58d80", "size": 761, "ext": "r", "lang": "R", "max_stars_repo_path": "src/scripts/inc_data_dfg.r", "max_stars_repo_name": "wilsonify/data-visualization", "max_stars_repo_head_hexsha": "4a4295a59f666625f4a47b2ad6a6f1eb06f9e8d3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/scripts/inc_data_dfg.r", "max_issues_repo_name": "wilsonify/data-visualization", "max_issues_repo_head_hexsha": "4a4295a59f666625f4a47b2ad6a6f1eb06f9e8d3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/scripts/inc_data_dfg.r", "max_forks_repo_name": "wilsonify/data-visualization", "max_forks_repo_head_hexsha": "4a4295a59f666625f4a47b2ad6a6f1eb06f9e8d3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2692307692, "max_line_length": 74, "alphanum_fraction": 0.7043363995, "num_tokens": 356}
|
import sys
import numpy as np
reff=sys.argv[1]
hybf=sys.argv[2]
def read_f(fname):
res={}
with open(fname, 'r') as fin:
for line in fin:
uttid, ali = line.split()[0], line.split()[1:]
res[uttid]=np.array([ int(x) for x in ali])
return res
ref=read_f(reff)
hyb=read_f(hybf)
def cal_acc(x, y):
assert abs(len(x) - len(y)) < 3
acc_arr = x == y
return np.count_nonzero(acc_arr) / len(x)
assert len(ref) == len(hyb)
acc_cnt=0.0
cnt=0
for k in ref:
acc = cal_acc(ref[k], hyb[k])
cnt+=1
acc_cnt+=acc
print(k, acc)
print('Total: cnt{} acc{}'.format(str(cnt), str(acc_cnt/cnt)))
|
{"hexsha": "045d1bc35eeef9dc4a12797fa681a20ecab35391", "size": 651, "ext": "py", "lang": "Python", "max_stars_repo_path": "egs/codeswitching/asr/local_jqg01/data/phone/acc.py", "max_stars_repo_name": "luyizhou4/espnet", "max_stars_repo_head_hexsha": "a408b9372df3f57ef33b8a378a8d9abc7f872cf5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "egs/codeswitching/asr/local_jqg01/data/phone/acc.py", "max_issues_repo_name": "luyizhou4/espnet", "max_issues_repo_head_hexsha": "a408b9372df3f57ef33b8a378a8d9abc7f872cf5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "egs/codeswitching/asr/local_jqg01/data/phone/acc.py", "max_forks_repo_name": "luyizhou4/espnet", "max_forks_repo_head_hexsha": "a408b9372df3f57ef33b8a378a8d9abc7f872cf5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.0833333333, "max_line_length": 62, "alphanum_fraction": 0.5837173579, "include": true, "reason": "import numpy", "num_tokens": 211}
|
#!/usr/bin/env python
# coding: utf-8
#
# Neng Lu
# nengl@student.unimelb.edu.au
# ANU & Unimelb
# Canberra, Australia
#
# Version: 1.0
# First version 14 May, 2020
# Last modified 22 May, 2020
import numpy as np
import math
from osgeo import gdal
from osgeo import osr
def testimport():
print("It works!")
#-----------------------------------------------------------#
# data=(ny,nx) version
def array2geotiff_yx(fname, data, latRange, lonRange, dtype):
"""
save GeoTiff file from the array of dem data
input:
fname: save file name
data: elevation data, an array in size of (n_lat,n_lon)
latRange: range of latitude, an array as [minlat,maxlat]
lonRange: range of longitude, an array as [minlon,maxlon]
dtype: dtype in gdal, as gdal.GDT_Byte or gdal.GDT_Float32
"""
nx = data.shape[1]
ny = data.shape[0]
xmin,xmax,ymin,ymax = [lonRange[0],lonRange[1],latRange[0],latRange[1]]
dx = (xmax - xmin) / float(nx)
dy = (ymax - ymin) / float(ny)
geotransform = (xmin, dx, 0, ymax, 0, -dy)
dst = gdal.GetDriverByName('GTiff').Create(fname, nx, ny, 1, dtype)
dst.SetGeoTransform(geotransform)
dst.GetRasterBand(1).WriteArray(data)
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
dst.SetProjection(srs.ExportToWkt())
dst.FlushCache()
def get_extent(extent_s,res_deg):
"""
Get the real coordinate extent of the area in the grid of earth2014
-----------
Input:
extent_s: the rough coordinate extent, a tuple like (-180,180,-90,90)
res_deg: the grid interval of earth2014 data
-----------
Output:
extent_r: the read coordinate extent
"""
lats = np.arange((-90+res_deg/2),(90-res_deg/4),res_deg)
lons = np.arange((-180+res_deg/2),(180-res_deg/4),res_deg)
nlat = len(lats)
nlon = len(lons)
minlon1,maxlon1,minlat1,maxlat1 = (lons.min(),lons.max(),lats.min(),lats.max())
minlon2,maxlon2,minlat2,maxlat2 = extent_s
minX_index = np.around((minlon2-minlon1)/res_deg).astype(int)
maxX_index = np.around((maxlon2-minlon1)/res_deg).astype(int)
if (maxX_index-minX_index)%2 == 0:
maxX_index = maxX_index - 1
minY_index = np.around((minlat2-minlat1)/res_deg).astype(int)
maxY_index = np.around((maxlat2-minlat1)/res_deg).astype(int)
if (maxY_index-minY_index)%2 == 0:
maxY_index = maxY_index - 1
extent_t = (lons[minX_index],lons[maxX_index],lats[minY_index],lats[maxY_index])
return extent_t
def get_data(data_s,extent_s,res_deg):
"""
Get the data of the target area
-----------
Input:
data_s: the data of the source area, an array of [ny,nx]
extent_s: the rough coordinate extent, a tuple like (-180,180,-90,90)
res_deg: the grid interval of earth2014 data, a value, unit: degree
-----------
Output:
extent_r: the read coordinate extent of the target area
data_r: the data of the target area
"""
lats = np.arange((-90+res_deg/2),(90-res_deg/4),res_deg)
lons = np.arange((-180+res_deg/2),(180-res_deg/4),res_deg)
nlat = len(lats)
nlon = len(lons)
minlon1,maxlon1,minlat1,maxlat1 = (lons.min(),lons.max(),lats.min(),lats.max())
minlon2,maxlon2,minlat2,maxlat2 = extent_s
minX_index = np.around((minlon2-minlon1)/res_deg).astype(int)
maxX_index = np.around((maxlon2-minlon1)/res_deg).astype(int)
if (maxX_index-minX_index)%2 == 0:
maxX_index = maxX_index - 1
minY_index = np.around((minlat2-minlat1)/res_deg).astype(int)
maxY_index = np.around((maxlat2-minlat1)/res_deg).astype(int)
if (maxY_index-minY_index)%2 == 0:
maxY_index = maxY_index - 1
extent_t = (lons[minX_index],lons[maxX_index],lats[minY_index],lats[maxY_index])
data_m = np.flipud(data_s.copy())
data_t = data_m[minY_index:(maxY_index+1),minX_index:(maxX_index+1)]
data_t = np.flipud(data_t)
return extent_t, data_t
#-----------------------------------------------------------#
def cal_dis_LngLat(lon1,lat1,lon2,lat2):
latitude1 = (math.pi/180)*lat1
latitude2 = (math.pi/180)*lat2
longitude1 = (math.pi/180)*lon1
longitude2= (math.pi/180)*lon2
#{arccos[sinb*siny+cosb*cosy*cos(a-x)]}*R
R = 6378.137
d = math.acos(math.sin(latitude1)*math.sin(latitude2)+ math.cos(latitude1)*math.cos(latitude2)*math.cos(longitude2-longitude1))*R
return d
def cal_azi_LngLat(lon1,lat1,lon2,lat2):
lat1_rad = lat1 * math.pi / 180
lon1_rad = lon1 * math.pi / 180
lat2_rad = lat2 * math.pi / 180
lon2_rad = lon2 * math.pi / 180
y = math.sin(lon2_rad - lon1_rad) * math.cos(lat2_rad)
x = math.cos(lat1_rad) * math.sin(lat2_rad) - \
math.sin(lat1_rad) * math.cos(lat2_rad) * math.cos(lon2_rad - lon1_rad)
azi = math.atan2(y, x) * 180 / math.pi
azi = float((azi + 360.0) % 360.0)
return azi
def cal_azi(x1,y1,x2,y2):
y = y2-y1
x = x2-x1
azi = math.atan2(y, x) * 180 / math.pi
azi = float((-azi + 90.0) % 360.0)
return azi
def cal_dis(x1,y1,x2,y2):
y = y2-y1
x = x2-x1
d = math.sqrt(y**2+x**2)
return d
def cal_azi_river_LngLat(river_xy):
river_x = river_xy[:,0]
river_y = river_xy[:,1]
N = len(river_x)
azi = np.zeros(N)
for i in range(0,N-1):
azi[i] = cal_azi_LngLat(river_x[i],river_y[i],river_x[i+1],river_y[i+1])
return azi
def cal_azi_river(river_xy):
river_x = river_xy[:,0]
river_y = river_xy[:,1]
N = len(river_x)
azi = np.zeros(N)
for i in range(0,N-1):
azi[i] = cal_azi(river_x[i],river_y[i],river_x[i+1],river_y[i+1])
return azi
|
{"hexsha": "2a0c20d9d8981d579851969e26d9464e203e0ae5", "size": 5689, "ext": "py", "lang": "Python", "max_stars_repo_path": "qixiang/functions.py", "max_stars_repo_name": "NengLu/UWG_QiXiang", "max_stars_repo_head_hexsha": "b2a5782c7794cf2505c696075f437097b03b7662", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-03-27T00:07:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-03T09:35:07.000Z", "max_issues_repo_path": "qixiang/functions.py", "max_issues_repo_name": "NengLu/UWG_QiXiang", "max_issues_repo_head_hexsha": "b2a5782c7794cf2505c696075f437097b03b7662", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "qixiang/functions.py", "max_forks_repo_name": "NengLu/UWG_QiXiang", "max_forks_repo_head_hexsha": "b2a5782c7794cf2505c696075f437097b03b7662", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6055555556, "max_line_length": 133, "alphanum_fraction": 0.6248901389, "include": true, "reason": "import numpy", "num_tokens": 1857}
|
import pandas as pd
import numpy as np
import umap
import sklearn.cluster as cluster
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
import spacy
import unicodedata
import matplotlib.pyplot as plt
import logging
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
JULIA_VARIABLE_CSV_PATH = "ExperimentData/JuliaVariableData.csv"
CLUSTER_LABEL_CSV_PATH = "clusteringLabels.csv"
KMEANS_CLUSTER_LABEL_CSV_PATH = "ExperimentData/KmeansCluster.csv"
KMEANS_CLUSTER_TRUTH_CSV_PATH = "ExperimentData/KmeanClusterTruths.csv"
KMEANS_PREDICTED_CSV_PATH = "ExperimentData/KmeansPredicted.csv"
PREDICTED_UMAP_CSV_PATH = "ExperimentData/simPredictedUmapClusters.csv"
def createWord2Vec(data):
nlp = spacy.load('en_core_web_md')
tokenList = []
for phrase in data:
token = nlp(phrase)
tokenList.append(token.vector)
return np.asarray(tokenList)
def useUMAP(tokenList):
db = DBSCAN(eps=0.3, min_samples=2).fit(np.asarray(tokenList))
umapModel = umap.UMAP(random_state=42).fit(np.asarray(tokenList))
standardEmbedding = umapModel.transform(tokenList)
db_umap = DBSCAN(eps=0.3, min_samples=2).fit(standardEmbedding)
return np.asarray(db.labels_), np.asarray(db_umap.labels_)
def writeUMAP_DBSCAN_CSV(subj_array, labels, umapLabels, labelsSimArray, \
uMapLabelsSimArray, OutSampleLabelsSimArray, OutSampleUMAPSimArray):
logging.info("Writing CSV")
outputString = "node,labels,umapLabels,dbscanSim,UMAPsim,out_sampleDBSCAN,out_sampleUMAP\n"
for i in range(len(labels)):
outputString += str(subj_array[i]) + ","\
+ str(labels[i]) + ","\
+str(umapLabels[i]) + ","\
+ str(labelsSimArray[i]) + ","\
+ str(uMapLabelsSimArray[i])+ ","\
+ str(OutSampleLabelsSimArray[i]) + ","\
+ str(OutSampleUMAPSimArray[i]) + "\n"
with open(CLUSTER_LABEL_CSV_PATH, 'w') as filetowrite:
filetowrite.write(outputString)
filetowrite.close()
def generatePairs(labels, umapLabels, data):
nlp = spacy.load('en_core_web_md')
labelsSimArray = []
uMapLabelsSimArray = []
OutSampleLabelsSimArray = []
OutSampleUMAPSimArray = []
labels_sim = 0;
umapLabels_sim = 0;
outsample_labels_sim = 0;
outsample_umap_sim = 0;
for i in range(len(data)):
logging.info("Iterating Word " + str(i))
for j in range(len(data)):
if i != j:
token1 = nlp(data[i])
token2 = nlp(data[j])
if(labels[i] == labels[j]):
labels_sim += token1.similarity(token2)
if(umapLabels[i] == umapLabels[j]):
umapLabels_sim += token1.similarity(token2)
if(labels [i] != labels[j]):
outsample_labels_sim += token1.similarity(token2)
if(umapLabels[i] != umapLabels[j]):
outsample_umap_sim += token1.similarity(token2)
if j == len(data)-1:
labelsSimArray.append(float(labels_sim/(list(labels).count(labels[i])-1)))
uMapLabelsSimArray.append(float(umapLabels_sim/(list(umapLabels).count(umapLabels[i])-1)))
if len(labels)-list(labels).count(labels[i]) == 0:
OutSampleLabelsSimArray.append(1)
else:
OutSampleLabelsSimArray.append(float(outsample_labels_sim/(len(labels)-1-list(labels).count(labels[i]))))
if len(umapLabels)-list(umapLabels).count(umapLabels[i]) == 0:
OutSampleUMAPSimArray.append(1)
else:
OutSampleUMAPSimArray.append(float(outsample_umap_sim/(len(umapLabels)-1-list(umapLabels).count(umapLabels[i]))))
labels_sim = 0;
umapLabels_sim = 0;
outsample_labels_sim = 0;
outsample_umap_sim = 0;
return labelsSimArray, uMapLabelsSimArray, OutSampleLabelsSimArray, OutSampleUMAPSimArray
def createCluster(svoFile):
SVOdata = pd.read_csv(svoFile)
subj_array = list(SVOdata["subject"])
obj_array = list(SVOdata["object"])
totalNodes = subj_array + obj_array
tokenList = createWord2Vec(totalNodes)
#Use UMAP Clustering
labels,umapLabels = useUMAP(tokenList)
#Retrieves Labels for Similarity
labelsSimArray, uMapLabelsSimArray, OutSampleLabelsSimArray, OutSampleUMAPSimArray = \
generatePairs(labels, umapLabels, totalNodes)
#Writes CSV for UMAP vs DBScan Labels
writeUMAP_DBSCAN_CSV(totalNodes, labels, umapLabels, labelsSimArray, \
uMapLabelsSimArray, OutSampleLabelsSimArray, OutSampleUMAPSimArray )
def cleanVariables(variableArray):
for i in range(len(variableArray)):
variableArray[i] = str(variableArray[i]).replace(",", " ")
variableArray[i] = str(variableArray[i]).replace("_", " ")
variableArray[i] = containsGreek(variableArray[i])
return variableArray
def containsGreek(inputString):
greekLetters = []
for s in inputString:
name = unicodedata.name(chr(ord(s)))
if "GREEK" in name:
greekLetters.append(s)
for letter in greekLetters:
name = unicodedata.name(chr(ord(letter))).split(" ")[3]
name = name.lower().capitalize()
inputString = inputString.replace(letter, str(name) + str(" "))
return inputString
def useKmeans(trainTokenList, K_size, variableTokenList):
print(type(trainTokenList), type(K_size), type(variableTokenList))
umapModel = umap.UMAP(random_state=42).fit(np.asarray(trainTokenList))
trainEmbedding = umapModel.transform(trainTokenList)
predictEmbedding = umapModel.transform(variableTokenList)
kmeans = KMeans(n_clusters=K_size, random_state = 0).fit(trainEmbedding)
return kmeans.labels_, kmeans.predict(predictEmbedding)
def writeCSV(variable_array, predictedLabels, fileName):
logging.info("generating CSV " + fileName)
outputString = "variable,cluster\n"
for i in range(len(variable_array)):
outputString += str(variable_array[i].replace(",", " ")) + "," + str(predictedLabels[i]) + "\n"
with open(fileName, 'w') as filetowrite:
filetowrite.write(outputString)
filetowrite.close()
def groupNodesByCluster(umapData):
maxNoClusters = max(list(umapData["umapLabels"]))
clusteredNodes = []
for i in range(maxNoClusters + 1):
temp_bin = []
for j in range(len(list(umapData["umapLabels"]))):
if list(umapData["umapLabels"])[j] == i:
temp_bin.append(list(umapData["node"])[j])
clusteredNodes.append(temp_bin)
return clusteredNodes
def groupNodesByKMeansCluster(kMeansData):
maxNoClusters = max(list(kMeansData["cluster"]))
clusteredNodes = []
for i in range(maxNoClusters + 1):
temp_bin = []
for j in range(len(list(kMeansData["cluster"]))):
if list(kMeansData["cluster"])[j] == i:
temp_bin.append(list(kMeansData["variable"])[j])
clusteredNodes.append(temp_bin)
return clusteredNodes
def getSimilarityLabels(clusteredNodes, variable_array):
labels = []
nlp = spacy.load('en_core_web_md')
count = 0
for variable in variable_array:
logging.info("Comparing Variable No: " + str(count))
count += 1
variableToken = nlp(variable)
highest_average = -9000
label = 0
for clusterNo in range(len(clusteredNodes)):
average = 0
for node in clusteredNodes[clusterNo]:
nodeToken = nlp(node)
average += variableToken.similarity(nodeToken)
average /= len(clusteredNodes[clusterNo])
if average > highest_average:
highest_average = average
label = clusterNo
labels.append(label)
return labels
def calculateKMeansAccuracy():
labeledData = pd.read_csv(JULIA_VARIABLE_CSV_PATH)
predictedData = pd.read_csv(KMEANS_PREDICTED_CSV_PATH)
labeled = list(labeledData["KMeansLabels"])
predicted = list(predictedData["cluster"])
count = 0
for i in range(len(predicted)):
if labeled[i] == predicted[i]:
count += 1
logging.info("KMeans Accuracy is : " + str(float(count/len(predicted))))
def calculateSimAccuracy():
labeledData = pd.read_csv(JULIA_VARIABLE_CSV_PATH)
predictedData = pd.read_csv(PREDICTED_UMAP_CSV_PATH)
labeled = list(labeledData["DBSCANLabels"])
predicted = list(predictedData["cluster"])
count = 0
for i in range(len(predicted)):
if labeled[i] == predicted[i]:
count += 1
logging.info("Similar Cluster Assignment Accuracy is : " + str(float(count/len(predicted))))
def runKMeansExp():
variableData = pd.read_csv(JULIA_VARIABLE_CSV_PATH)
umapData = pd.read_csv(CLUSTER_LABEL_CSV_PATH)
umapData = umapData[umapData.umapLabels != -1]
kmeansTrainData = list(umapData["node"])
variable_array = list(variableData["variable"])
variable_array = cleanVariables(variable_array)
variableTokenList = createWord2Vec(variable_array)
trainTokenList = createWord2Vec(kmeansTrainData)
print(len(trainTokenList))
K_size = max(list(umapData["umapLabels"]))
trainLabels, predictedLabels = useKmeans(trainTokenList, K_size, variableTokenList)
writeCSV(kmeansTrainData, trainLabels, KMEANS_CLUSTER_LABEL_CSV_PATH)
writeCSV(variable_array, predictedLabels, KMEANS_PREDICTED_CSV_PATH)
calculateKMeansAccuracy()
def runUMapSimilarityExp():
variableData = pd.read_csv(JULIA_VARIABLE_CSV_PATH)
umapData = pd.read_csv(CLUSTER_LABEL_CSV_PATH)
umapData = umapData[umapData.umapLabels != -1]
variable_array = list(variableData["variable"])
variable_array = cleanVariables(variable_array)
clusteredNodes = groupNodesByCluster(umapData)
labels = getSimilarityLabels(clusteredNodes, variable_array)
writeCSV(variable_array, labels, PREDICTED_UMAP_CSV_PATH)
calculateSimAccuracy()
def getAverageSimilarity(variable_array, clusteredNodes, predictedLabels):
nlp = spacy.load('en_core_web_md')
averageSimArray = []
for i in range(len(variable_array)):
averageSim = 0
for word in clusteredNodes[predictedLabels[i]]:
token1 = nlp(word)
token2 = nlp(variable_array[i])
averageSim += token1.similarity(token2)
averageSimArray.append(float(averageSim/ len(clusteredNodes[predictedLabels[i]])))
return averageSimArray
def runCombinationExp():
variableData = pd.read_csv(JULIA_VARIABLE_CSV_PATH)
umapData = pd.read_csv(CLUSTER_LABEL_CSV_PATH)
umapData = umapData[umapData.umapLabels != -1]
kmeansTrainData = list(umapData["node"])
variable_array = list(variableData["variable"])
variable_array = cleanVariables(variable_array)
variableTokenList = createWord2Vec(variable_array)
trainTokenList = createWord2Vec(kmeansTrainData)
K_size = max(list(umapData["umapLabels"]))
trainLabels, predictedLabels = useKmeans(trainTokenList, K_size, variableTokenList)
writeCSV(kmeansTrainData, trainLabels, KMEANS_CLUSTER_LABEL_CSV_PATH)
clusteredNodes = groupNodesByKMeansCluster(pd.read_csv(KMEANS_CLUSTER_LABEL_CSV_PATH))
averageSimArray = getAverageSimilarity(variable_array, clusteredNodes, predictedLabels)
writeCSV(variable_array, predictedLabels, KMEANS_PREDICTED_CSV_PATH)
graphCombinationExp(averageSimArray)
return averageSimArray
def graphCombinationExp(averageSimArray):
labeledData = pd.read_csv(JULIA_VARIABLE_CSV_PATH)
predictedData = pd.read_csv(KMEANS_CLUSTER_TRUTH_CSV_PATH)
labeled = list(labeledData["KMeansLabels"])
predicted = list(predictedData["cluster"])
thresholdArray = []
accuracy = []
numberOfAssignments = []
threshold = .01
while threshold < .95:
assignmentCount = 0
denominatorCount = 0
for i in range(len(predicted)):
if averageSimArray[i] > threshold:
denominatorCount += 1
if labeled[i] == predicted[i] and averageSimArray[i] > threshold:
assignmentCount += 1
if denominatorCount != 0:
accuracy.append(float(assignmentCount/denominatorCount))
else:
accuracy.append(1.0)
numberOfAssignments.append(float(assignmentCount/len(predicted)))
thresholdArray.append(threshold)
threshold += .02
numberOfAssignments = np.divide(np.asarray(numberOfAssignments), numberOfAssignments[0])
plt.figure(0)
plt.title("Accuracy vs Normalized True Assignments")
plt.plot(thresholdArray, accuracy, color="blue", label="Accuracy")
plt.plot(thresholdArray, numberOfAssignments, color="orange", label="Normalized True Assigns" )
plt.legend(loc="upper right")
plt.xticks(np.arange(0, 1, step=0.1))
plt.xlabel("Similarity Threshold")
plt.ylabel("Normalized Values")
idx = np.argwhere(np.diff(np.sign(numberOfAssignments - accuracy))).flatten()
plt.plot(thresholdArray[int(idx)], numberOfAssignments[int(idx)], 'ro')
logging.info("Intersection Threshold is: " + str(thresholdArray[int(idx)]))
|
{"hexsha": "2589dbf56797c92d3681ef3ca913920f5c6a86a7", "size": 13358, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/semanticClustering/experimentUtil.py", "max_stars_repo_name": "mikiec84/SemanticModels.jl", "max_stars_repo_head_hexsha": "f81baf0789cc547375f300429d0fd49c866d5339", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/semanticClustering/experimentUtil.py", "max_issues_repo_name": "mikiec84/SemanticModels.jl", "max_issues_repo_head_hexsha": "f81baf0789cc547375f300429d0fd49c866d5339", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/semanticClustering/experimentUtil.py", "max_forks_repo_name": "mikiec84/SemanticModels.jl", "max_forks_repo_head_hexsha": "f81baf0789cc547375f300429d0fd49c866d5339", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1375291375, "max_line_length": 133, "alphanum_fraction": 0.6796676149, "include": true, "reason": "import numpy", "num_tokens": 3258}
|
######
#
# 2-dimensional stuff.
#
function regulargrid2d(box, res)
xmin, ymin, xmax, ymax = box
rx, ry = res
dx = (xmax-xmin)/(rx-1)
dy = (ymax-ymin)/(ry-1)
vs_cnt = rx*ry # vertices count
es_cnt = (rx-1)*ry + rx*(ry-1)+ (rx-1)*(rx-1) # Horizontal + vertical + diagonal
fs_cnt = 2*(rx-1)*(ry-1) # down + up
ps = zeros(vs_cnt, 3) # points
vs = zeros(Int,vs_cnt,1) # vertices
es = zeros(Int,es_cnt,2) # edges
fs = zeros(Int,fs_cnt,3) # faces
# rows, columns and values for edge-vertex adjacency
e_rows = zeros(Int, 2*es_cnt)
e_cols = zeros(Int, 2*es_cnt)
e_vals = zeros(Int, 2*es_cnt)
# rows, columns and values for face-edge adjacency
f_rows = zeros(Int, 3*fs_cnt)
f_cols = zeros(Int, 3*fs_cnt)
f_vals = zeros(Int, 3*fs_cnt)
esc = 1
fsc = 1
for r in 0:(ry-1)
for c in 0:(rx-1)
# vertices indices
v00 = r*rx+ c
v01 = v00 + 1
v10 = v00 + rx
v11 = v01 + rx
# horizontal edges
he00 = r*(rx-1) + c
he01 = he00 + 1
he10 = he00 + (rx-1)
he11 = he01 + (rx-1)
# vertical edges
ve00 = (rx-1)*ry + r*rx + c
ve01 = ve00 + 1
ve10 = ve00 + rx
ve11 = ve01 + rx
# diagonal edges
de00 = (rx-1)*ry + rx*(ry-1) + r*(rx-1) + c
de01 = de00 + 1
de10 = de00 + (rx-1)
de11 = de01 + (rx-1)
# down faces
df00 = r*(rx-1) + c
df01 = df00 + 1
df10 = df00 + (rx-1)
df11 = df01 + (rx-1)
# up faces
uf00 = (rx-1)*(ry-1) + r*(rx-1) + c
uf01 = uf00 + 1
uf10 = uf00 + (rx-1)
uf11 = uf01 + (rx-1)
# setting points
ps[v00+1,:] = [xmin + c*dx, ymin + r*dy, 0.0]
# setting vertices
vs[v00+1] = v00
# Setting edges and faces
if c < (rx-1) # horizontal edges
es[he00+1,:] = [v00,v01]
e_rows[esc] = he00; e_cols[esc] = v00; e_vals[esc] = -1; esc = esc + 1
e_rows[esc] = he00; e_cols[esc] = v01; e_vals[esc] = 1; esc = esc + 1
end
if r < (ry-1) # vertical edges
es[ve00+1,:] = [v00,v10]
e_rows[esc] = ve00; e_cols[esc] = v00; e_vals[esc] = -1; esc = esc + 1
e_rows[esc] = ve00; e_cols[esc] = v10; e_vals[esc] = 1; esc = esc + 1
end
if r <(ry-1) && c < (rx-1)
fs[df00+1,:] = [v00,v01,v11] # down faces
f_rows[fsc] = df00; f_cols[fsc] = he00; f_vals[fsc] = 1; fsc = fsc + 1
f_rows[fsc] = df00; f_cols[fsc] = ve01; f_vals[fsc] = 1; fsc = fsc + 1
f_rows[fsc] = df00; f_cols[fsc] = de00; f_vals[fsc] = -1; fsc = fsc + 1
fs[uf00+1,:] = [v11,v10,v00] # up faces
f_rows[fsc] = uf00; f_cols[fsc] = de00; f_vals[fsc] = 1; fsc = fsc + 1
f_rows[fsc] = uf00; f_cols[fsc] = he10; f_vals[fsc] = -1; fsc = fsc + 1
f_rows[fsc] = uf00; f_cols[fsc] = ve00; f_vals[fsc] = -1; fsc = fsc + 1
es[de00+1,:] = [v00, v11] # diagonal edges
e_rows[esc] = de00; e_cols[esc] = v00; e_vals[esc] = -1; esc = esc + 1
e_rows[esc] = de00; e_cols[esc] = v11; e_vals[esc] = 1; esc = esc + 1
end
end
end
ps, vs.+1, es.+1, fs.+1
end
######
#
# 3-dimensional stuff.
#
function regulargrid3d(box, res)
xmin, ymin, zmin, xmax, ymax, zmax = box
resx, resy, resz = res
resxy = resx*resy
resxyz = resxy*resz
dx = (xmax-xmin)/(resx-1)
dy = (ymax-ymin)/(resy-1)
dz = (zmax-zmin)/(resz-1)
ps = zeros(resxyz, 3)
vs = zeros(Int,resxyz,1)
ts = zeros(Int,6*(resx-1)*(resy-1)*(resz-1),4)
tc = 1
for k in 0:(resz-1)
for j in 0:(resy-1)
for i in 0:(resx-1)
id = resxy*k+ resx*j + i
vs[id+1] = id
ps[id+1,:] = [xmin+i*dx,ymin+j*dy,zmin+k*dz]
v0 = resxy*k + j*resx + i
v1 = resxy*k + j*resx + i + 1
v2 = resxy*k + (j+1)*resx + i+1
v3 = resxy*k + (j+1)*resx + i
v4 = resxy*(k+1) + j*resx + i
v5 = resxy*(k+1) + j*resx + i + 1
v6 = resxy*(k+1) + (j+1)*resx + i + 1
v7 = resxy*(k+1) + (j+1)*resx + i
if i <(resx-1) && j < (resy-1) && k < (resz-1)
ts[tc+0,:] = [v2,v0,v3,v7]
ts[tc+1,:] = [v0,v2,v6,v7]
ts[tc+2,:] = [v4,v0,v6,v7]
ts[tc+3,:] = [v6,v0,v1,v2]
ts[tc+4,:] = [v0,v6,v1,v4]
ts[tc+5,:] = [v6,v5,v1,v4]
tc = tc + 6
end
end
end
end
ps, ts.+1
end
function compute_subfaces(faces::Array{Int,2})
fs_cnt, k = size(faces)
mask = ones(Bool,k)
sfs = Array{Array{Int64,2}}(undef,k*fs_cnt)
for i in 1:fs_cnt
for j in 1:k
mask[j] = false
sfs_index = (i-1)*k + j
sfs[sfs_index] = sort(faces[[i],mask],dims=2)
mask[j] = true
end
end
subfaces = vcat(unique(sfs)...)
subfaces
end
|
{"hexsha": "fe1872dfa6338fc417bd459aebb035654079647d", "size": 5394, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/grids.jl", "max_stars_repo_name": "valerocar/LevelSets.jl", "max_stars_repo_head_hexsha": "9cfc4c0bb2cb80a0d96dca81431daf50aa1f703c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/grids.jl", "max_issues_repo_name": "valerocar/LevelSets.jl", "max_issues_repo_head_hexsha": "9cfc4c0bb2cb80a0d96dca81431daf50aa1f703c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/grids.jl", "max_forks_repo_name": "valerocar/LevelSets.jl", "max_forks_repo_head_hexsha": "9cfc4c0bb2cb80a0d96dca81431daf50aa1f703c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2962962963, "max_line_length": 87, "alphanum_fraction": 0.4391916945, "num_tokens": 1998}
|
[STATEMENT]
lemma less_eq_multiset_empty_left[simp]:
shows "{#} \<le> M"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {#} \<le> M
[PROOF STEP]
by (simp add: subset_eq_imp_le_multiset)
|
{"llama_tokens": 88, "file": null, "length": 1}
|
[STATEMENT]
lemma orthogonal_complement_orthogonal_complement_closure_cspan:
\<open>orthogonal_complement (orthogonal_complement S) = closure (cspan S)\<close> for S :: \<open>'a::chilbert_space set\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. orthogonal_complement (orthogonal_complement S) = closure (cspan S)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. orthogonal_complement (orthogonal_complement S) = closure (cspan S)
[PROOF STEP]
have \<open>orthogonal_complement (orthogonal_complement S) = orthogonal_complement (orthogonal_complement (closure (cspan S)))\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. orthogonal_complement (orthogonal_complement S) = orthogonal_complement (orthogonal_complement (closure (cspan S)))
[PROOF STEP]
by (simp flip: orthogonal_complement_of_closure orthogonal_complement_of_cspan)
[PROOF STATE]
proof (state)
this:
orthogonal_complement (orthogonal_complement S) = orthogonal_complement (orthogonal_complement (closure (cspan S)))
goal (1 subgoal):
1. orthogonal_complement (orthogonal_complement S) = closure (cspan S)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
orthogonal_complement (orthogonal_complement S) = orthogonal_complement (orthogonal_complement (closure (cspan S)))
goal (1 subgoal):
1. orthogonal_complement (orthogonal_complement S) = closure (cspan S)
[PROOF STEP]
have \<open>\<dots> = closure (cspan S)\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. orthogonal_complement (orthogonal_complement (closure (cspan S))) = closure (cspan S)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
orthogonal_complement (orthogonal_complement (closure (cspan S))) = closure (cspan S)
goal (1 subgoal):
1. orthogonal_complement (orthogonal_complement S) = closure (cspan S)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
orthogonal_complement (orthogonal_complement S) = closure (cspan S)
[PROOF STEP]
show \<open>orthogonal_complement (orthogonal_complement S) = closure (cspan S)\<close>
[PROOF STATE]
proof (prove)
using this:
orthogonal_complement (orthogonal_complement S) = closure (cspan S)
goal (1 subgoal):
1. orthogonal_complement (orthogonal_complement S) = closure (cspan S)
[PROOF STEP]
by -
[PROOF STATE]
proof (state)
this:
orthogonal_complement (orthogonal_complement S) = closure (cspan S)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 842, "file": "Complex_Bounded_Operators_Complex_Inner_Product", "length": 10}
|
import os
import sys
from functools import partial
import numpy as np
import pytest
import scipy
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from scipy.stats import norm
from respy import RespyCls
from respy.fortran.interface import resfort_interface
from respy.pre_processing.data_processing import process_dataset
from respy.python.estimate.estimate_python import pyth_criterion
from respy.python.evaluate.evaluate_python import pyth_contributions
from respy.python.interface import get_scales_magnitudes
from respy.python.record.record_estimation import _spectral_condition_number
from respy.python.shared.shared_auxiliary import create_draws
from respy.python.shared.shared_auxiliary import dist_class_attributes
from respy.python.shared.shared_auxiliary import extract_cholesky
from respy.python.shared.shared_auxiliary import get_conditional_probabilities
from respy.python.shared.shared_auxiliary import get_emaxs_of_subsequent_period
from respy.python.shared.shared_auxiliary import get_optim_paras
from respy.python.shared.shared_auxiliary import ols
from respy.python.shared.shared_auxiliary import read_draws
from respy.python.shared.shared_auxiliary import replace_missing_values
from respy.python.shared.shared_auxiliary import transform_disturbances
from respy.python.shared.shared_constants import DECIMALS
from respy.python.shared.shared_constants import IS_F2PY
from respy.python.shared.shared_constants import MISSING_FLOAT
from respy.python.shared.shared_constants import TEST_RESOURCES_BUILD
from respy.python.shared.shared_constants import TOL
from respy.python.simulate.simulate_auxiliary import sort_edu_spec
from respy.python.simulate.simulate_auxiliary import sort_type_info
from respy.python.simulate.simulate_python import pyth_simulate
from respy.python.solve.solve_auxiliary import get_endogenous_variable
from respy.python.solve.solve_auxiliary import get_exogenous_variables
from respy.python.solve.solve_auxiliary import get_predictions
from respy.python.solve.solve_auxiliary import get_simulated_indicator
from respy.python.solve.solve_auxiliary import pyth_backward_induction
from respy.python.solve.solve_auxiliary import StateSpace
from respy.python.solve.solve_python import pyth_solve
from respy.python.solve.solve_risk import construct_emax_risk
from respy.tests.codes.auxiliary import simulate_observed
from respy.tests.codes.auxiliary import write_draws
from respy.tests.codes.auxiliary import write_edu_start
from respy.tests.codes.auxiliary import write_interpolation_grid
from respy.tests.codes.auxiliary import write_lagged_start
from respy.tests.codes.auxiliary import write_types
from respy.tests.codes.random_model import generate_random_model
assert_allclose = partial(np.testing.assert_allclose, rtol=TOL, atol=TOL)
assert_almost_equal = partial(np.testing.assert_almost_equal, decimal=DECIMALS)
if IS_F2PY:
sys.path.insert(0, str(TEST_RESOURCES_BUILD))
import f2py_interface as fort_debug
@pytest.mark.skipif(not IS_F2PY, reason="No F2PY available")
class TestClass(object):
""" This class groups together some tests.
"""
def test_1(self):
""" Compare the evaluation of the criterion function for the ambiguity
optimization and the simulated expected future value between the FORTRAN and
PYTHON implementations. These tests are set up a separate test case due to the
large setup cost to construct the ingredients for the interface.
"""
# Generate constraint periods
constr = {"program": {"version": "python"}}
# Generate random initialization file
params_spec, options_spec = generate_random_model(point_constr=constr)
respy_obj = RespyCls(params_spec, options_spec)
respy_obj = simulate_observed(respy_obj)
# Extract class attributes
(
state_space,
states_all,
mapping_state_idx,
periods_rewards_systematic,
periods_emax,
num_periods,
num_draws_emax,
edu_spec,
optim_paras,
num_types,
) = dist_class_attributes(
respy_obj,
"state_space",
"states_all",
"mapping_state_idx",
"periods_rewards_systematic",
"periods_emax",
"num_periods",
"num_draws_emax",
"edu_spec",
"optim_paras",
"num_types",
)
# Sample draws
draws_emax_standard = np.random.multivariate_normal(
np.zeros(4), np.identity(4), num_draws_emax
)
draws_emax_risk = transform_disturbances(
draws_emax_standard, np.zeros(4), optim_paras["shocks_cholesky"]
)
# Sampling of random period and admissible state index
period = np.random.choice(range(num_periods))
k = np.random.choice(range(state_space.states_per_period[period]))
# Select systematic rewards
rewards_systematic = periods_rewards_systematic[period, k, :]
# Evaluation of simulated expected future values. Limit to one individual as the
# Fortran version.
rewards_period = state_space.get_attribute_from_period("rewards", period)[k]
emaxs_period = state_space.get_attribute_from_period("emaxs", period)[k, :4]
max_education_period = (
state_space.get_attribute_from_period("states", period)[k, 3]
>= edu_spec["max"]
)
py = construct_emax_risk(
rewards_period[-2:],
rewards_period[:4],
emaxs_period,
draws_emax_risk,
optim_paras["delta"],
max_education_period,
)
f90 = fort_debug.wrapper_construct_emax_risk(
num_periods,
num_draws_emax,
period,
k,
draws_emax_risk,
rewards_systematic,
periods_emax,
states_all,
mapping_state_idx,
edu_spec["start"],
edu_spec["max"],
optim_paras["delta"],
optim_paras["coeffs_common"],
optim_paras["coeffs_a"],
optim_paras["coeffs_b"],
num_types,
)
assert_allclose(py, f90)
def test_2(self):
""" Compare results between FORTRAN and PYTHON of selected hand-crafted
functions. In test_97() we test FORTRAN implementations against PYTHON intrinsic
routines.
"""
for _ in range(33):
# Create grid of admissible state space values.
num_edu_start = np.random.choice(range(1, 3))
num_periods = np.random.randint(1, 15)
num_types = np.random.randint(1, 3)
edu_spec = {}
edu_spec["start"] = np.random.choice(
range(1, 10), size=num_edu_start, replace=False
).tolist()
edu_spec["max"] = max(edu_spec["start"]) + np.random.randint(1, 5)
min_idx = edu_spec["max"] + 1
# FORTRAN
base_args = (num_periods, num_types)
state_space = StateSpace(*base_args, edu_spec["start"], edu_spec["max"])
py_a, py_c, _, _ = state_space._get_fortran_counterparts()
py_b = state_space.states_per_period
py_d = py_b.max()
fort_a, fort_b, fort_c, fort_d = fort_debug.wrapper_create_state_space(
*base_args, edu_spec["start"], edu_spec["max"], min_idx
)
# Ensure equivalence
rslts = [[fort_a, py_a], [fort_b, py_b], [fort_c, py_c], [fort_d, py_d]]
for obj in rslts:
# Slice Fortran output to shape of Python output.
if isinstance(obj[0], np.ndarray):
obj[0] = obj[0][tuple(map(slice, obj[1].shape))]
assert_allclose(obj[0], obj[1])
for _ in range(100):
# Draw random request for testing purposes
num_covars = np.random.randint(2, 10)
num_agents = np.random.randint(100, 1000)
tiny = np.random.normal(size=num_agents)
beta = np.random.normal(size=num_covars)
# Generate sample
exog = np.random.sample((num_agents, num_covars))
exog[:, 0] = 1
endog = np.dot(exog, beta) + tiny
# Run OLS
beta_result = ols(y=endog, x=exog)
# Check parameters
py = beta_result
f90 = fort_debug.wrapper_get_coefficients(
endog, exog, num_covars, num_agents
)
assert_almost_equal(py, f90)
# Check prediction
py = exog.dot(beta_result)
f90 = fort_debug.wrapper_point_predictions(exog, f90, num_agents)
assert_almost_equal(py, f90)
def test_3(self):
""" Compare results between FORTRAN and PYTHON of selected functions.
"""
for _ in range(10):
# Draw random requests for testing purposes.
num_draws_emax = np.random.randint(2, 1000)
dim = np.random.randint(1, 6)
matrix = np.random.uniform(size=dim ** 2).reshape(dim, dim)
cov = np.dot(matrix, matrix.T)
# PDF of normal distribution
args = np.random.normal(size=3)
args[-1] **= 2
f90 = fort_debug.wrapper_normal_pdf(*args)
py = norm.pdf(*args)
assert_almost_equal(py, f90)
# Singular Value Decomposition
py = scipy.linalg.svd(matrix)
f90 = fort_debug.wrapper_svd(matrix, dim)
for i in range(3):
assert_allclose(py[i], f90[i])
# Pseudo-Inverse
py = np.linalg.pinv(matrix)
f90 = fort_debug.wrapper_pinv(matrix, dim)
assert_allclose(py, f90)
# Inverse
py = np.linalg.inv(cov)
f90 = fort_debug.wrapper_inverse(cov, dim)
assert_allclose(py, f90)
# Determinant
py = np.linalg.det(cov)
f90 = fort_debug.wrapper_determinant(cov)
assert_allclose(py, f90)
# Trace
py = np.trace(cov)
f90 = fort_debug.wrapper_trace(cov)
assert_allclose(py, f90)
# Random normal deviates. This only tests the interface, requires
# visual inspection in IPYTHON notebook as well.
fort_debug.wrapper_standard_normal(num_draws_emax)
# Clipping values below and above bounds.
num_values = np.random.randint(1, 10000)
lower_bound = np.random.randn()
upper_bound = lower_bound + np.random.ranf()
values = np.random.normal(size=num_values)
f90 = fort_debug.wrapper_clip_value(
values, lower_bound, upper_bound, num_values
)
py = np.clip(values, lower_bound, upper_bound)
assert_almost_equal(py, f90)
# Spectral condition number
py = _spectral_condition_number(cov)
fort = fort_debug.wrapper_spectral_condition_number(cov)
assert_almost_equal(py, fort)
def test_4(self):
""" Testing the core functions of the solution step for the equality of results
between the PYTHON and FORTRAN implementations.
"""
params_spec, options_spec = generate_random_model()
respy_obj = RespyCls(params_spec, options_spec)
# Ensure that backward induction routines use the same grid for the
# interpolation.
write_interpolation_grid(respy_obj)
# Extract class attributes
(
num_periods,
edu_spec,
optim_paras,
num_draws_emax,
seed_emax,
is_debug,
is_interpolated,
num_points_interp,
optimizer_options,
file_sim,
num_types,
) = dist_class_attributes(
respy_obj,
"num_periods",
"edu_spec",
"optim_paras",
"num_draws_emax",
"seed_emax",
"is_debug",
"is_interpolated",
"num_points_interp",
"optimizer_options",
"file_sim",
"num_types",
)
shocks_cholesky = optim_paras["shocks_cholesky"]
coeffs_common = optim_paras["coeffs_common"]
coeffs_home = optim_paras["coeffs_home"]
coeffs_edu = optim_paras["coeffs_edu"]
coeffs_a = optim_paras["coeffs_a"]
coeffs_b = optim_paras["coeffs_b"]
delta = optim_paras["delta"]
type_spec_shifts = optim_paras["type_shifts"]
type_spec_shares = optim_paras["type_shares"]
min_idx = edu_spec["max"] + 1
# Check the state space creation.
state_space = StateSpace(
num_periods, num_types, edu_spec["start"], edu_spec["max"], optim_paras
)
states_all, mapping_state_idx, _, _ = state_space._get_fortran_counterparts()
pyth = (
states_all,
state_space.states_per_period,
mapping_state_idx,
state_space.states_per_period.max(),
)
f2py = fort_debug.wrapper_create_state_space(
num_periods, num_types, edu_spec["start"], edu_spec["max"], min_idx
)
for i in range(4):
# Slice Fortran output to shape of Python output.
if isinstance(f2py[i], np.ndarray):
f2py_reduced = f2py[i][tuple(map(slice, pyth[i].shape))]
else:
f2py_reduced = f2py[i]
assert_allclose(pyth[i], f2py_reduced)
_, _, pyth, _ = state_space._get_fortran_counterparts()
f2py = fort_debug.wrapper_calculate_rewards_systematic(
num_periods,
state_space.states_per_period,
states_all,
state_space.states_per_period.max(),
coeffs_common,
coeffs_a,
coeffs_b,
coeffs_edu,
coeffs_home,
type_spec_shares,
type_spec_shifts,
)
assert_allclose(pyth, f2py)
# Carry some results from the systematic rewards calculation for future use and
# create the required set of disturbances.
periods_draws_emax = create_draws(
num_periods, num_draws_emax, seed_emax, is_debug
)
# Save result for next test.
periods_rewards_systematic = pyth.copy()
# Fix for hardcoded myopic agents.
optim_paras["delta"] = 0.00000000000000001
# Check backward induction procedure.
state_space = pyth_backward_induction(
periods_draws_emax,
state_space,
is_debug,
is_interpolated,
num_points_interp,
optim_paras,
file_sim,
False,
)
_, _, _, pyth = state_space._get_fortran_counterparts()
f2py = fort_debug.wrapper_backward_induction(
num_periods,
False,
state_space.states_per_period.max(),
periods_draws_emax,
num_draws_emax,
state_space.states_per_period,
periods_rewards_systematic,
mapping_state_idx,
states_all,
is_debug,
is_interpolated,
num_points_interp,
edu_spec["start"],
edu_spec["max"],
shocks_cholesky,
delta,
coeffs_common,
coeffs_a,
coeffs_b,
file_sim,
False,
)
assert_allclose(pyth, f2py)
def test_5(self):
""" This methods ensures that the core functions yield the same results across
implementations.
"""
params_spec, options_spec = generate_random_model()
respy_obj = RespyCls(params_spec, options_spec)
# Ensure that backward induction routines use the same grid for the
# interpolation.
max_states_period = write_interpolation_grid(respy_obj)
# Extract class attributes
(
num_periods,
edu_spec,
optim_paras,
num_draws_emax,
is_debug,
is_interpolated,
num_points_interp,
is_myopic,
num_agents_sim,
num_draws_prob,
tau,
seed_sim,
num_agents_est,
optimizer_options,
file_sim,
num_types,
num_paras,
) = dist_class_attributes(
respy_obj,
"num_periods",
"edu_spec",
"optim_paras",
"num_draws_emax",
"is_debug",
"is_interpolated",
"num_points_interp",
"is_myopic",
"num_agents_sim",
"num_draws_prob",
"tau",
"seed_sim",
"num_agents_est",
"optimizer_options",
"file_sim",
"num_types",
"num_paras",
)
min_idx = edu_spec["max"] + 1
shocks_cholesky = optim_paras["shocks_cholesky"]
coeffs_common = optim_paras["coeffs_common"]
coeffs_home = optim_paras["coeffs_home"]
coeffs_edu = optim_paras["coeffs_edu"]
coeffs_a = optim_paras["coeffs_a"]
coeffs_b = optim_paras["coeffs_b"]
delta = optim_paras["delta"]
type_spec_shares = optim_paras["type_shares"]
type_spec_shifts = optim_paras["type_shifts"]
# Write out random components and interpolation grid to align the three
# implementations.
max_draws = max(num_agents_sim, num_draws_emax, num_draws_prob)
write_types(type_spec_shares, num_agents_sim)
write_edu_start(edu_spec, num_agents_sim)
write_draws(num_periods, max_draws)
write_lagged_start(num_agents_sim)
# It is critical that the model is simulated after all files have been written
# to the disk because they are picked up in the subroutines.
respy_obj = simulate_observed(respy_obj)
periods_draws_emax = read_draws(num_periods, num_draws_emax)
periods_draws_prob = read_draws(num_periods, num_draws_prob)
periods_draws_sims = read_draws(num_periods, num_agents_sim)
fort, _ = resfort_interface(respy_obj, "simulate")
state_space = pyth_solve(
is_interpolated,
num_points_interp,
num_periods,
is_debug,
periods_draws_emax,
edu_spec,
optim_paras,
file_sim,
num_types,
)
(
states_all,
mapping_state_idx,
periods_rewards_systematic,
periods_emax,
) = state_space._get_fortran_counterparts()
py = (
periods_rewards_systematic,
state_space.states_per_period,
mapping_state_idx,
periods_emax,
states_all,
)
f2py = fort_debug.wrapper_solve(
is_interpolated,
num_points_interp,
num_draws_emax,
num_periods,
is_myopic,
is_debug,
periods_draws_emax,
min_idx,
edu_spec["start"],
edu_spec["max"],
coeffs_common,
coeffs_a,
coeffs_b,
coeffs_edu,
coeffs_home,
shocks_cholesky,
delta,
file_sim,
max_states_period,
num_types,
type_spec_shares,
type_spec_shifts,
)
assert_allclose(py[0], fort[0])
assert_allclose(py[1], fort[1])
assert_allclose(py[2], fort[2])
assert_allclose(py[3], fort[3])
assert_allclose(py[4], fort[4])
assert_allclose(py[0], f2py[0])
assert_allclose(py[1], f2py[1])
assert_allclose(py[2], f2py[2])
assert_allclose(py[3], f2py[3])
assert_allclose(py[4], f2py[4])
(
states_all,
mapping_state_idx,
periods_rewards_systematic,
periods_emax,
) = state_space._get_fortran_counterparts()
simulated_data = pyth_simulate(
state_space,
num_agents_sim,
periods_draws_sims,
seed_sim,
file_sim,
edu_spec,
optim_paras,
is_debug,
)
py = simulated_data.copy().fillna(MISSING_FLOAT).values
data_array = process_dataset(respy_obj).to_numpy()
# Is is very important to cut the data array down to the size of the estimation
# sample for the calculation of contributions.
data_array = py[: num_agents_est * num_periods, :]
f2py = fort_debug.wrapper_simulate(
periods_rewards_systematic,
mapping_state_idx,
periods_emax,
states_all,
num_periods,
num_agents_sim,
periods_draws_sims,
seed_sim,
file_sim,
edu_spec["start"],
edu_spec["max"],
edu_spec["share"],
edu_spec["lagged"],
optim_paras["coeffs_common"],
optim_paras["coeffs_a"],
optim_paras["coeffs_b"],
shocks_cholesky,
delta,
num_types,
type_spec_shares,
type_spec_shifts,
is_debug,
)
assert_allclose(py, f2py)
# We have to cut the simulated data to `num_agents_est` as the Python
# implementation calculates the likelihood contributions for all agents in the
# data.
simulated_data = simulated_data.loc[
simulated_data.Identifier.lt(num_agents_est)
]
py = pyth_contributions(
state_space, simulated_data, periods_draws_prob, tau, optim_paras
)
num_obs_agent = np.bincount(simulated_data.Identifier.to_numpy())
f2py = fort_debug.wrapper_contributions(
periods_rewards_systematic,
mapping_state_idx,
periods_emax,
states_all,
data_array,
periods_draws_prob,
tau,
num_periods,
num_draws_prob,
num_agents_est,
num_obs_agent,
num_types,
edu_spec["start"],
edu_spec["max"],
shocks_cholesky,
delta,
type_spec_shares,
type_spec_shifts,
)
assert_allclose(py, f2py)
# Evaluation of criterion function
x0 = get_optim_paras(optim_paras, num_paras, "all", is_debug)
py = pyth_criterion(
x0,
is_interpolated,
num_points_interp,
is_debug,
simulated_data,
tau,
periods_draws_emax,
periods_draws_prob,
state_space,
)
f2py = fort_debug.wrapper_criterion(
x0,
is_interpolated,
num_draws_emax,
num_periods,
num_points_interp,
is_myopic,
is_debug,
data_array,
num_draws_prob,
tau,
periods_draws_emax,
periods_draws_prob,
states_all,
state_space.states_per_period,
mapping_state_idx,
max_states_period,
num_agents_est,
num_obs_agent,
num_types,
edu_spec["start"],
edu_spec["max"],
edu_spec["share"],
type_spec_shares,
type_spec_shifts,
num_paras,
)
assert_allclose(py, f2py)
def test_6(self):
""" Further tests for the interpolation routines.
"""
params_spec, options_spec = generate_random_model()
respy_obj = RespyCls(params_spec, options_spec)
respy_obj = simulate_observed(respy_obj)
# Extract class attributes
(
periods_rewards_systematic,
mapping_state_idx,
seed_prob,
periods_emax,
num_periods,
states_all,
num_points_interp,
edu_spec,
num_draws_emax,
is_myopic,
is_debug,
is_interpolated,
optim_paras,
optimizer_options,
file_sim,
num_types,
) = dist_class_attributes(
respy_obj,
"periods_rewards_systematic",
"mapping_state_idx",
"seed_prob",
"periods_emax",
"num_periods",
"states_all",
"num_points_interp",
"edu_spec",
"num_draws_emax",
"is_myopic",
"is_debug",
"is_interpolated",
"optim_paras",
"optimizer_options",
"file_sim",
"num_types",
)
shocks_cholesky = optim_paras["shocks_cholesky"]
shocks_cov = shocks_cholesky.dot(shocks_cholesky.T)
coeffs_common = optim_paras["coeffs_common"]
coeffs_a = optim_paras["coeffs_a"]
coeffs_b = optim_paras["coeffs_b"]
delta = optim_paras["delta"]
# Add some additional objects required for the interfaces to the functions.
period = np.random.choice(num_periods)
periods_draws_emax = create_draws(
num_periods, num_draws_emax, seed_prob, is_debug
)
draws_emax_standard = periods_draws_emax[period, :, :]
draws_emax_risk = transform_disturbances(
draws_emax_standard, np.zeros(4), shocks_cholesky
)
# Initialize Python version and solve.
state_space = StateSpace(
num_periods, num_types, edu_spec["start"], edu_spec["max"], optim_paras
)
# Integrate periods_emax in state_space
state_space.emaxs = np.column_stack(
(
np.zeros((state_space.num_states, 4)),
periods_emax[~np.isnan(periods_emax) & (periods_emax != MISSING_FLOAT)],
)
)
# Fill emaxs_a - emaxs_home in the requested period
states_period = state_space.get_attribute_from_period("states", period)
# Do not get the emaxs from the previous period if we are in the last one.
if period != state_space.num_periods - 1:
state_space.emaxs = get_emaxs_of_subsequent_period(
states_period, state_space.indexer, state_space.emaxs, edu_spec["max"]
)
num_states = state_space.states_per_period[period]
shifts = np.random.randn(4)
# Slight modification of request which assures that the interpolation code is
# working.
num_points_interp = min(num_points_interp, num_states)
# Get the IS_SIMULATED indicator for the subset of points which are used for the
# predication model.
is_simulated = get_simulated_indicator(
num_points_interp, num_states, period, is_debug
)
# Unpack necessary attributes
rewards_period = state_space.get_attribute_from_period("rewards", period)
emaxs_period = state_space.get_attribute_from_period("emaxs", period)[:, :4]
max_education = (
state_space.get_attribute_from_period("states", period)[:, 3]
>= edu_spec["max"]
)
# Construct the exogenous variables for all points of the state space.
exogenous, max_emax = get_exogenous_variables(
rewards_period, emaxs_period, shifts, optim_paras["delta"], max_education
)
# Align output between Python and Fortran version.
py = (exogenous, max_emax)
f90 = fort_debug.wrapper_get_exogenous_variables(
period,
num_periods,
num_states,
periods_rewards_systematic,
shifts,
mapping_state_idx,
periods_emax,
states_all,
edu_spec["start"],
edu_spec["max"],
delta,
coeffs_common,
coeffs_a,
coeffs_b,
num_types,
)
assert_almost_equal(py[0], f90[0])
assert_almost_equal(py[1], f90[1])
# Construct endogenous variable so that the prediction model can be fitted.
endogenous = get_endogenous_variable(
rewards_period,
emaxs_period,
max_emax,
is_simulated,
draws_emax_risk,
optim_paras["delta"],
max_education,
)
f90 = fort_debug.wrapper_get_endogenous_variable(
period,
num_periods,
num_states,
periods_rewards_systematic,
mapping_state_idx,
periods_emax,
states_all,
is_simulated,
num_draws_emax,
max_emax,
draws_emax_risk,
edu_spec["start"],
edu_spec["max"],
shocks_cov,
delta,
coeffs_common,
coeffs_a,
coeffs_b,
)
assert_almost_equal(endogenous, replace_missing_values(f90))
py = get_predictions(endogenous, exogenous, max_emax, is_simulated)
f90 = fort_debug.wrapper_get_predictions(
endogenous,
exogenous,
max_emax,
is_simulated,
num_points_interp,
num_states,
file_sim,
False,
)
# This assertion fails if a column is all zeros.
if not exogenous.any(axis=0).any():
assert_array_almost_equal(py, f90)
def test_7(self):
""" This is a special test for shared functions related to the interpolation setup.
"""
# Impose constraints
point_constr = {"num_periods": np.random.randint(2, 5)}
params_spec, options_spec = generate_random_model(point_constr=point_constr)
respy_obj = RespyCls(params_spec, options_spec)
# Extract class attributes
is_debug, num_periods = dist_class_attributes(
respy_obj, "is_debug", "num_periods"
)
# Write out a grid for the interpolation
max_states_period = write_interpolation_grid(respy_obj)
# Draw random request for testing
num_states = np.random.randint(1, max_states_period)
candidates = list(range(num_states))
period = np.random.randint(1, num_periods)
num_points_interp = np.random.randint(1, num_states + 1)
# Check function for random choice and make sure that there are no duplicates.
args = (candidates, num_states, num_points_interp)
f90 = fort_debug.wrapper_random_choice(*args)
assert_equal(len(set(f90)), len(f90))
assert_equal(len(f90), num_points_interp)
# Check the standard cases of the function.
args = (num_points_interp, num_states, period, is_debug, num_periods)
f90 = fort_debug.wrapper_get_simulated_indicator(*args)
assert_equal(len(f90), num_states)
assert_equal(np.all(f90) in [0, 1], True)
# Test the standardization across PYTHON, F2PY, and FORTRAN implementations.
# This is possible as we write out an interpolation grid to disk which is used
# for both functions.
base_args = (num_points_interp, num_states, period, is_debug)
args = base_args
py = get_simulated_indicator(*args)
args = base_args + (num_periods,)
f90 = fort_debug.wrapper_get_simulated_indicator(*args)
assert_array_equal(f90, 1 * py)
os.unlink(".interpolation.respy.test")
# Special case where number of interpolation points are same as the number of
# candidates. In that case the returned indicator should be all TRUE.
args = (num_states, num_states, period, True, num_periods)
f90 = fort_debug.wrapper_get_simulated_indicator(*args)
assert_equal(sum(f90), num_states)
def test_8(self):
""" We test the construction of the Cholesky decomposition against each other.
"""
# Draw a random vector of parameters
x = np.random.uniform(size=54)
# Construct the Cholesky decompositions
py = extract_cholesky(x, info=0)
fort = fort_debug.wrapper_extract_cholesky(x)
# Compare the results based on the two methods
np.testing.assert_equal(fort, py)
def test_9(self):
""" Functions related to the scaling procedure.
"""
for _ in range(1000):
num_free = np.random.randint(1, 100)
values = np.random.uniform(-1000.0, 1000.0, size=num_free)
py = get_scales_magnitudes(values)
f90 = fort_debug.wrapper_get_scales_magnitude(values, num_free)
assert_almost_equal(py, f90)
def test_10(self):
""" Function that calculates the number of observations by individual.
"""
for _ in range(2):
params_spec, options_spec = generate_random_model()
respy_obj = RespyCls(params_spec, options_spec)
respy_obj = simulate_observed(respy_obj)
num_agents_est = respy_obj.get_attr("num_agents_est")
data_array = process_dataset(respy_obj).to_numpy()
py = np.bincount(data_array[:, 0].astype(int))
f90 = fort_debug.wrapper_get_num_obs_agent(data_array, num_agents_est)
assert_almost_equal(py, f90)
def test_11(self):
""" Function that calculates the conditional type probabilites."""
for _ in range(1000):
num_types = np.random.randint(1, 10)
edu_start = np.random.randint(10, 100)
type_shares = np.random.normal(0, 1, size=num_types * 2)
args = [type_shares, np.array([edu_start])]
py = get_conditional_probabilities(*args)
fort = fort_debug.wrapper_get_conditional_probabilities(*args + [num_types])
assert_almost_equal(np.sum(py), 1.0)
assert_almost_equal(py, fort)
def test_12(self):
""" Testing the functionality introduced to ensure that the simulation is
independent of the order of initial conditions and types in the initialization
file.
"""
num_elements = np.random.randint(1, 11)
input_array = np.random.normal(size=num_elements)
# We first check the sorting implementation.
py = sorted(input_array)
f90 = fort_debug.wrapper_sorted(input_array, num_elements)
assert_equal(py, f90)
params_spec, options_spec = generate_random_model()
respy_obj = RespyCls(params_spec, options_spec)
edu_spec, optim_paras, num_types = dist_class_attributes(
respy_obj, "edu_spec", "optim_paras", "num_types"
)
args = (edu_spec["start"], edu_spec["share"], edu_spec["max"])
f90 = fort_debug.wrapper_sort_edu_spec(*args)
py = sort_edu_spec(edu_spec)
for i, label in enumerate(["start", "share", "max"]):
assert_equal(py[label], f90[i])
py = sort_type_info(optim_paras, num_types)
f90 = fort_debug.wrapper_sort_type_info(optim_paras["type_shares"], num_types)
for i, label in enumerate(["order", "shares"]):
assert_equal(py[label], f90[i])
|
{"hexsha": "6607969d6fc0a3b92bf5d95498a56a0d038ebe2d", "size": 35693, "ext": "py", "lang": "Python", "max_stars_repo_path": "respy/tests/test_f2py.py", "max_stars_repo_name": "tobiasraabe/respy_for_ma", "max_stars_repo_head_hexsha": "405f40851b176705fe924220fba606263d47f3d6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "respy/tests/test_f2py.py", "max_issues_repo_name": "tobiasraabe/respy_for_ma", "max_issues_repo_head_hexsha": "405f40851b176705fe924220fba606263d47f3d6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "respy/tests/test_f2py.py", "max_forks_repo_name": "tobiasraabe/respy_for_ma", "max_forks_repo_head_hexsha": "405f40851b176705fe924220fba606263d47f3d6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8322274882, "max_line_length": 91, "alphanum_fraction": 0.6043762082, "include": true, "reason": "import numpy,from numpy,import scipy,from scipy", "num_tokens": 7733}
|
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!! SUBROUTINES FOR BOUNDARY CONDITIONS !!!!!!!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
subroutine apply_BC()
use constants
implicit none
integer :: i
!boundary conditions for reflection at piston side
do i = 1, nspec
U1D(1,3*(i-1)+1) = rho(1,i)!U1D(2,3*(i-1)+1) - lm * F1D(2,3*(i-1)+1) !continuity
U1D(1,3*(i-1)+2) = rho(1,i) * u(1,i) !momentum
U1D(1,3*(i-1)+3) = p(1,i) / (g-1) + 0.5 * rho(1,i) * u(1,i)**2!U1D(2,3*(i-1)+3) - lm * F1D(2,3*(i-1)+3) !energy
enddo
!now electrons
U1D(1,neqi+1) = U1D(2,neqi+1) - lm * F1D(2,neqi+1)
if (.not.(geom=="spherical")) then
!boundary condition for reflection at wall side
do i = 1, nspec
U1D(nz,3*(i-1)+1) = U1D(nz-1,3*(i-1)+1) - lm * F1D(nz-1,3*(i-1)+1) !continuity
U1D(nz,3*(i-1)+2) = 0. !momentum
U1D(nz,3*(i-1)+3) = U1D(nz-1,3*(i-1)+3) - lm * F1D(nz-1,3*(i-1)+3) !energy
enddo
!now electrons
U1D(nz,neqi+1) = U1D(nz-1,neqi+1)
else !spherical geometry
!boundary condition for reflection at wall side
do i = 1, nspec
U1D(nz,3*(i-1)+1) = U1D(nz-1,3*(i-1)+1) - lm * F1D(nz-1,3*(i-1)+1) &
- 2. * dtm_ / r(nz) * phi * F1D(nz-1,3*(i-1)+1)
U1D(nz,3*(i-1)+2) = 0. !momentum
U1D(nz,3*(i-1)+3) = U1D(nz-1,3*(i-1)+3) - lm * F1D(nz-1,3*(i-1)+3) &
- 2. * dtm_ / r(nz) * phi * F1D(nz-1,3*(i-1)+3)
enddo
!now electrons
U1D(nz,neqi+1) = U1D(nz-1,neqi+1)
endif
!BC for predictor and corrector
U1D_p(1,:) = U1D(1,:)
U1D_c(1,:) = U1D(1,:)
U1D_p(nz,:) = U1D(nz,:)
U1D_c(nz,:) = U1D(nz,:)
end subroutine apply_BC
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!! SUBROUTINE ART_VISCOSITY !!!!!!!!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
subroutine artviscosity()
use constants
implicit none
integer :: m,j,k
real*8 :: a, b, y1, y2, x1, x2
if (geom=="slab") then
do k = 1, nz
eps_visc(k,1:neqi+1) = eps_visc_max
enddo
else !spherical
y1 = log10(eps_visc_max)
y2 = log10(eps_visc_max / eps_compress)
x1 = log10(r(nz))
x2 = log10(r(1))
b = ( y1*x2 - y2*x1 ) / ( x2 - x1 )
a = ( y1 - b ) / x1
do k = 1,nz
eps_visc(k,1:neqi+1) = r(k)**a * 10**b
enddo
write(*,*) "eps_visc(1,1) = ", eps_visc(1,1), "eps_visc(nz,1) = ", eps_visc(nz,1)
endif
end subroutine artviscosity
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!! SUBROUTINE INITSPACEGRID !!!!!!!!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
subroutine initspacegrid()
use constants
implicit none
integer :: m,j,k
real*8 :: a, b, y1, y2, x1, x2
r(1) = L + rmin
r0(:,nregions+1) = r0(:,nregions+1) + rmin !correct also r0 array
do k = 2, nz
r(k) = r(k-1) + dr !note that dr is negative
enddo
r(nz) = rmin
!find out position of shell in r
k = 1
do while (r(k)>=r0(2,2)) !remember that r starts at Rmax
k = k + 1
enddo
nz0 = k
end subroutine initspacegrid
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!! SUBROUTINE INITVARIABLES !!!!!!!!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
subroutine initvariables()
use constants
implicit none
integer :: m,j,k
do j = 1, nspec
do m = 1, nregions
do k = 1, nz
if ( r(k) > r0(j,m) .and. r(k) <= r0(j,m+1)) then
T0(k,j) = temp0(j,m)
N0(k,j) = den0(j,m)
V0(k,j) = vel0(j,m)
endif
enddo
enddo
enddo
rho = 0.
u = 0.
p = 0.
T = 0.
nz00 = nz0 - dnz
!ions
do k = 1, nz
do j = 1, nspec
rho(k,j) = N0(k,j) * mi(j)
u(k,j) = V0(k,j)
p(k,j) = rho(k,j) / mi(j) * T0(k,j)
T(k,j) = T0(k,j)
enddo
enddo
end subroutine initvariables
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!! SUBROUTINE DO_SMOOTHING !!!!!!!!!!!!!!!!!!!!!!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
subroutine do_smoothing()
use constants
implicit none
integer :: m, j, k, kgrad(2)=0
real*8 :: a, b, y1, y2, x1, x2
real*8 :: grad, mingrad, maxgrad
real*8 :: nA(2)=0., nB(2)=0., uA(2)=0., uB(2)=0., TA(2)=0., TB(2)=0.
integer :: sgn1(2) = (/0,1/), sgn2(2) = (/1,0/), ok(2) = 0
real*8, parameter :: epsilon = 1.e-10
real*8 :: vel(nz)
if (smoothing) then
!now smooth out interface between nz0 and nz0+nsmooth
!------------------------
write(*,*) "density smoothing function"
write(*,*) "--------------------------"
!for each ion species, find position of max and min gradients, then smooth out density
do j = 1, nspec
maxgrad = 0.
mingrad = 1.e20
do k = 1, nz - 1
grad = rho(k,j) / rho(k+1,j)
if (grad > maxgrad) then
maxgrad = grad
kgrad(1) = k
nA(1) = rho(k,j) / mi(j)
nB(1) = rho(k+1,j) / mi(j)
endif
if (grad < mingrad) then
mingrad = grad
kgrad(2) = k
nA(2) = rho(k,j) / mi(j)
nB(2) = rho(k+1,j) / mi(j)
endif
enddo
do m = 1,2
do k = kgrad(m)-sgn1(m)*nsmooth, kgrad(m) + sgn2(m)*nsmooth
!now smooth out interface
y1 = log10( nA(m) )
y2 = log10( nB(m) )
x1 = log10( r(kgrad(m) - sgn1(m)*nsmooth) ) !note r is in micron
x2 = log10( r(kgrad(m) + sgn2(m)*nsmooth) ) !note r is in micron
b = ( y1*x2 - y2*x1 ) / ( x2 - x1 )
a = ( y1 - b ) / x1
rho(k,j) = ( 10**( a*log10(r(k)) + b ) ) * mi(j)
enddo
enddo
enddo
write(*,*) "velocity smoothing function"
write(*,*) "--------------------------"
!for each ion species, find position of max and min gradients, then smooth out density
ok = 0
do j = 1, nspec
maxgrad = 0.
mingrad = 1.e20
vel = max(epsilon,abs(u(:,j)))
do k = 1, nz - 1
grad = vel(k) / vel(k+1)
if (grad > maxgrad) then
maxgrad = grad
kgrad(1) = k
uA(1) = vel(k)
uB(1) = vel(k+1)
if ( k+tsmooth < nz .and. k-tsmooth>0 ) then
ok(1) = 1
endif
endif
if (grad < mingrad) then
mingrad = grad
kgrad(2) = k
uA(2) = vel(k)
uB(2) = vel(k+1)
if ( k+tsmooth < nz .and. k-tsmooth>0 ) then
ok(2) = 1
endif
endif
enddo
do m = 1,2
if(ok(m).ne.1) cycle !because it is a bad point...
do k = kgrad(m)-sgn1(m)*vsmooth, kgrad(m) + sgn2(m)*vsmooth
!now smooth out interface
y1 = log10( uA(m) )
y2 = log10( uB(m) )
x1 = log10( r(kgrad(m) - sgn1(m)*vsmooth) ) !note r is in micron
x2 = log10( r(kgrad(m) + sgn2(m)*vsmooth) ) !note r is in micron
b = ( y1*x2 - y2*x1 ) / ( x2 - x1 )
a = ( y1 - b ) / x1
u(k,j) = - ( 10**( a*log10(r(k)) + b ) )
enddo
enddo
enddo
write(*,*) "temperature smoothing function"
write(*,*) "------------------------------"
!for each ion species, find position of max and min gradients, then smooth out density
ok = 0
do j = 1, nspec
maxgrad = 0.
mingrad = 1.e20
do k = 1, nz - 1
grad = T(k,j) / T(k+1,j)
if (grad > maxgrad) then
maxgrad = grad
kgrad(1) = k
TA(1) = T(k,j)
TB(1) = T(k+1,j)
if ( k+tsmooth < nz .and. k-tsmooth>0 ) then
ok(1) = 1
endif
endif
if (grad < mingrad) then
mingrad = grad
kgrad(2) = k
TA(2) = T(k,j)
TB(2) = T(k+1,j)
if ( k+tsmooth < nz .and. k-tsmooth>0 ) then
ok(2) = 1
endif
endif
enddo
do m = 1,2
if(ok(m).ne.1) cycle !because it is a bad point...
do k = kgrad(m)-sgn1(m)*tsmooth, kgrad(m) + sgn2(m)*tsmooth
!now smooth out interface
y1 = log10( TA(m) )
y2 = log10( TB(m) )
x1 = log10( r(kgrad(m) - sgn1(m)*tsmooth) ) !note r is in micron
x2 = log10( r(kgrad(m) + sgn2(m)*tsmooth) ) !note r is in micron
b = ( y1*x2 - y2*x1 ) / ( x2 - x1 )
a = ( y1 - b ) / x1
T(k,j) = ( 10**( a*log10(r(k)) + b ) )
enddo
enddo
enddo
!now update pressure after all this smoothing...
do j = 1, nspec
p(:,j) = rho(:,j)/mi(j) * T(:,j)
enddo
endif
end subroutine do_smoothing
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!------------- SUBROUTINE READ_FILES -----------------------
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
subroutine read_files()
use constants
implicit none
real*8, dimension(nz) :: var
real*8 :: Tmean
integer :: i,k,c1,c2,c3,ierr
character(len=20) :: filename, x1
character(len=8) :: fmt ! format descriptor
fmt = '(I1)' ! an integer of width 5 with zeros at the left
c1 = 0
c2 = 0
c3 = 0
write(*,*) "--------------------------------"
do i = 1,(nspec+1)*3+2
if (i==1) then
filename = 'r.dat'
write(*,*) "reading ", filename
write(*,*)
open(unit=unt, file = filename, action = 'read', iostat = ierr)
if(ierr/=0) then
write(*,*) 'problems in opening file r.dat'
stop
else
do k = 1, nz
read(unt,'(E20.10E3)') r(k)
enddo
endif
close(unt)
else if (i==(nspec+1)*3+2) then
filename = 'efield.dat'
write(*,*) "reading ", filename
open(unit=unt, file = filename, action = 'read', iostat = ierr)
if(ierr/=0) then
write(*,*) ' warning - efield.dat does not exist'
else
do k = 1, nz
read(unt,'(E20.10E3)') Efield(k)
enddo
endif
close(unt)
else if (i>1 .and. i<=(nspec+1)+1) then
c1 = c1 +1
write (x1,fmt) c1
filename = trim('vel'//trim(x1)//'.dat')
write(*,*) "reading ", filename
open(unit=unt, file = filename, action = 'read', iostat = ierr)
if(ierr/=0) then
write(*,*) 'problems in opening file', filename
stop
else
do k = 1, nz
read(unt,'(E20.10E3)') u(k,c1) !m/s
enddo
endif
close(unt)
if (i==nspec+2) write(*,*)
else if (i>(nspec+1)+1 .and. i<=2*(nspec+1)+1) then
c2 = c2 +1
write (x1,fmt) c2
filename = trim('rho'//trim(x1)//'.dat')
write(*,*) "reading ", filename
open(unit=unt, file = filename, action = 'read', iostat = ierr)
if(ierr/=0) then
write(*,*) 'problems in opening file', filename
stop
else
do k = 1, nz
read(unt,'(E20.10E3)') rho(k,c2) !kg/cm3
enddo
endif
close(unt)
if (i==2*(nspec+1)+1) write(*,*)
else if (i>2*(nspec+1)+1 .and. i<=3*(nspec+1)+1) then
c3 = c3 +1
write (x1,fmt) c3
filename = trim('temp'//trim(x1)//'.dat')
write(*,*) "reading ", filename
open(unit=unt, file = filename, action = 'read', iostat = ierr)
if(ierr/=0) then
write(*,*) 'problems in opening file', filename
stop
else
do k = 1, nz
read(unt,'(E20.10E3)') T(k,c3) !Joule
enddo
endif
close(unt)
if (i==3*(nspec+1)+1) write(*,*)
endif
enddo
write(*,*) "--------------------------------"
!define position of boundary
L = r(1)
!make temperature equal at border
Tmean = sum(T(1,:))/(nspec+1)
T(1,:) = Tmean
end subroutine read_files
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!---------------------- SUBROUTINE DEFINE FLUXES ----------------------------
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
subroutine define_fluxes()
use constants
implicit none
integer :: k,j
U1D = 0.
F1D = 0.
R1D = 0.
write(*,*)
write(*,*) " WARNING - forcing quasi-neutrality and zero-current conditions"
write(*,*)
rho(:,nspec+1) = 0.
u(:,nspec+1) = 0.
!electrons
do k = 1, nz
do j = 1, nspec
rho(k,nspec+1) = rho(k,nspec+1) + me * Zi(j) * rho(k,j) / mi(j) !quasi-neutrality
u(k,nspec+1) = u(k,nspec+1) + Zi(j) * rho(k,j) * u(k,j) / mi(j) !zero-current condition
enddo
if(.not.(restart)) then !take species 1 as reference
p(k,nspec+1) = rho(k,nspec+1) / me * T(k,1) !electron pressure
T(k,nspec+1) = T(k,1) !electron temperature
else !we have read the electron temperature from file
p(k,nspec+1) = rho(k,nspec+1) / me * T(k,nspec+1) !electron pressure
endif
enddo
! correct for electron velocity
u(:,nspec+1) = u(:,nspec+1) / ( rho(:,nspec+1) / me )
! apply BC at piston side
! do j = 1, nspec+1
! u(1,j) = V0(1,j)!0.
! enddo
! finally, calculate U1D and F1D for all species
do j = 1, nspec
U1D(:,3*(j-1)+1) = rho(:,j)
U1D(:,3*(j-1)+2) = rho(:,j) * u(:,j)
U1D(:,3*(j-1)+3) = p(:,j) / (g-1) + 0.5 * rho(:,j) * u(:,j)**2
F1D(:,3*(j-1)+1) = rho(:,j) * u(:,j)
F1D(:,3*(j-1)+2) = rho(:,j) * u(:,j)**2 + p(:,j)
F1D(:,3*(j-1)+3) = u(:,j) * ( g / (g-1) * p(:,j) + 0.5 * rho(:,j) * u(:,j)**2 )
enddo
!electrons
U1D(:,neqi+1) = p(:,nspec+1) / (g-1) + 0.5 * rho(:,nspec+1) * u(:,nspec+1)**2
F1D(:,neqi+1) = u(:,nspec+1) * ( g / (g-1) * p(:,nspec+1) + 0.5 * rho(:,nspec+1) * u(:,nspec+1)**2 )
end subroutine define_fluxes
|
{"hexsha": "b192a1a98622e6ccbcc8d6dd4902404bf3265198", "size": 13210, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "ibc.f90", "max_stars_repo_name": "cbellei/mION", "max_stars_repo_head_hexsha": "378d1f5ea8c419fdf063947ff54643a580b82174", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ibc.f90", "max_issues_repo_name": "cbellei/mION", "max_issues_repo_head_hexsha": "378d1f5ea8c419fdf063947ff54643a580b82174", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ibc.f90", "max_forks_repo_name": "cbellei/mION", "max_forks_repo_head_hexsha": "378d1f5ea8c419fdf063947ff54643a580b82174", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5208333333, "max_line_length": 113, "alphanum_fraction": 0.4665404996, "num_tokens": 5144}
|
using PastaQ
using ITensors
using Printf
# 1. Prepation of a thermal state
#
# In this example, we show how to prepare the finite-temperature state
# of a many-body system:
#
# ρ̂(β) = exp(-β Ĥ)
#
# where Ĥ is the Hamiltonian and β is the inverse temperature.
# We specificallty consider the one-dimensional Ising model
#
# H = - ∑ᵢ σᶻ(i) σᶻ(i+1) - B ∑ᵢ σˣ(i)
#
# where B a the transverse magnetic field.
# 1a. Custom gates
#
# In order to build the thermal density operator, we implement the
# simplest flavor of imaginary-time evolution, breaking the operator
# exp(-βĤ) into a set of two-qubit and single-qubit gates, corresponding
# to the Ising interactions and the transverse field respetively. The
# time evolution to inverse temperature β is broken into elementary steps
# of size τ, where a gate is applied for each term appearing in the Hamiltonian.
#
# In this example, the quantum gates are not contained in the gate set of PastaQ.
# In order to extend, it is ony required to define the gate matrices using a
# format analogous to standard gates defined in gates.jl.
import PastaQ: gate
gate(::GateName"expτZZ"; τ::Float64) = exp(τ * kron(gate("Z"), gate("Z")))
gate(::GateName"expτX"; τ::Float64, B::Float64) = exp(τ * B * gate("X"))
# 1b. Generating the thermal state
N = 10 # Number of spins
B = 1.0 # Transverse magnetic field
β = 1.0 # Inverse temperature
τ = 0.005 # Trotter step
# Depth of the circuit
depth = β ÷ τ
# Ising interactions
zz_layer = [("expτZZ", (j, j + 1), (τ=τ,)) for j in 1:(N - 1)]
# Transverse field
x_layer = [("expτX", j, (τ=τ, B=B)) for j in 1:N]
# Build the gate structure
circuit = []
for d in 1:depth
append!(circuit, zz_layer)
append!(circuit, x_layer)
end
#
# 2. Run imaginary-time evolution towards the zero temperature
# ground state.
#
# 2a. Ground state energy with DMRG
#
# We compute the ground state energy by running DMRG
# on the Hamiltonian MPO, whose algorithm is implemented in
# ITensors.jl.
# In order to generate the MPO for the Hamiltonian, we leverage
# the ITensors.jl `AutoMPO()` function, which automatically
# generates the local MPO tensors from a set of pre-definend operators..
sites = siteinds("Qubit", N)
ampo = AutoMPO()
for j in 1:(N - 1)
# Ising ZZ interactions
ampo .+= -1, "Z", j, "Z", j + 1
end
for j in 1:N
# Transverse field X
ampo .+= -B, "X", j
end
# Generate Hamilotnian MPO
H = MPO(ampo, sites)
# Density-matrix renormalization group
dmrg_iter = 5 # DMRG steps
dmrg_cutoff = 1E-10 # Cutoff
Ψ0 = randomMPS(sites) # Initial state
sweeps = Sweeps(dmrg_iter)
maxdim!(sweeps, 10, 20, 30, 40, 50, 100)
cutoff!(sweeps, dmrg_cutoff)
# Run
println("Running DMRG to get ground state of transverse field Ising model:")
E, Ψ = dmrg(H, Ψ0, sweeps)
@printf("\nGround state energy: %.8f \n", E)
println("\n---------------------------------------\n")
#
# 2b. Run the imaginary-time circuit
#
β = 5.0 # Inverse temperature
Δ = 0.5 # Intermediate time-step
depth = Δ ÷ τ # Depth of the circuit
steps = β ÷ Δ # Total number of circuit application
# Initialize the density operator
ρ = PastaQ.identity_mpo(H)
println("Running imaginary time evolution to approximate the density matrix ρ = exp(-βH):")
for b in 1:steps
# Run the circuit
global ρ = runcircuit(ρ, circuit; cutoff=1E-12)
# Normalize the density operatorr
normalize!(ρ)
# Measure the energy
E_th = inner(ρ, H)
@printf("β = %.1f : tr(ρH) = %.8f \n", (Δ * b), E_th)
end
|
{"hexsha": "7422babd0596338ae3f20a9e8334e63dc399f890", "size": 3475, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/5_finitetemperature.jl", "max_stars_repo_name": "GTorlai/PastaQ.jl", "max_stars_repo_head_hexsha": "fc5ae805681df2de8d3fe105b600156997140c18", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 76, "max_stars_repo_stars_event_min_datetime": "2020-09-30T01:13:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T05:47:50.000Z", "max_issues_repo_path": "examples/5_finitetemperature.jl", "max_issues_repo_name": "GTorlai/PastaQ.jl", "max_issues_repo_head_hexsha": "fc5ae805681df2de8d3fe105b600156997140c18", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 148, "max_issues_repo_issues_event_min_datetime": "2020-09-29T23:22:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T16:41:33.000Z", "max_forks_repo_path": "examples/5_finitetemperature.jl", "max_forks_repo_name": "GTorlai/PastaQ.jl", "max_forks_repo_head_hexsha": "fc5ae805681df2de8d3fe105b600156997140c18", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2020-09-30T15:09:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T21:40:29.000Z", "avg_line_length": 27.8, "max_line_length": 91, "alphanum_fraction": 0.6863309353, "num_tokens": 1113}
|
% mycorr2 modified version of the 2D correlation
% for the use with im2col and col2im
% see GETPOINT
%
%
% $Id: mycorr2.m,v 2.0 2003/06/19 12:06:52 svoboda Exp $
% Note: It written in order to gain speed. The clarity of the code suffers accordingly
function R = mycorr2(X,G,Gn,Gn2)
% Gn = G-mean(G);
% Gn2 = sqrt(sum(Gn.^2));
mX = repmat(mean(X),size(X,1),1);
mXn = X - mX;
smX = sum(mXn.^2);
numerator = (mXn'*Gn)';
denominator = smX*Gn2;
R = numerator./denominator;
return
|
{"author": "strawlab", "repo": "MultiCamSelfCal", "sha": "0a26c88c63d8513eab76553033a9a6fb15ba6575", "save_path": "github-repos/MATLAB/strawlab-MultiCamSelfCal", "path": "github-repos/MATLAB/strawlab-MultiCamSelfCal/MultiCamSelfCal-0a26c88c63d8513eab76553033a9a6fb15ba6575/MultiCamSelfCal/FindingPoints/mycorr2.m"}
|
from pyscipopt import Sepa, Conshdlr, SCIP_RESULT, SCIP_STAGE
from time import time
import networkx as nx
import numpy as np
from utils.scip_models import maxcut_mccormic_model, MccormickCycleSeparator
from utils.misc import get_separator_cuts_applied
from utils.data import get_gnn_data
import os
import torch
import pickle
class SepaSampler(Sepa):
def __init__(self, G, x, y, name='Sampler',
hparams={}
):
"""
Sample scip.Model state every time self.sepaexeclp is invoked.
Store the generated data object in
"""
self.G = G
self.x = x
self.y = y
self.name = name
self.hparams = hparams
# data list
self.data_list = []
self.nsamples = 0
self.datapath = hparams.get('data_abspath', 'data')
self.savedir = hparams.get('relative_savedir', 'examples')
self.savedir = os.path.join(self.datapath, self.savedir)
self.data_filepath = os.path.join(self.savedir, self.name + '_scip_state.pkl')
self.stats_filepath = os.path.join(self.savedir, self.name + '_stats.pkl')
# saving mode: 'episode' | 'state'
# 'episode': save all the state-action pairs in a single file,
# as a Batch object.
# 'state': save each state-action pair in a separate file
# as a Data object.
self.saving_mode = hparams.get('saving_mode', 'episode')
self.reward_func = hparams.get('reward_func', 'db_integral_credit')
self.db_scale = hparams.get('db_scale', 1.0)
self.lpiter_scale = hparams.get('lpiter_scale', 1.0)
self.prev_action = None
self.prev_state = None
self.data_list = []
self.time_spent = 0
self.finished_episode = False
# stats
self.sample_format = hparams.get('sample_format', "sars")
self.stats = {
'ncuts': [],
'ncuts_applied': [],
'solving_time': [],
'processed_nodes': [],
'gap': [],
'lp_rounds': [],
'lp_iterations': [],
'dualbound': []
}
def sepaexeclp(self):
self.sample()
return {"result": SCIP_RESULT.DIDNOTRUN}
def update_stats(self):
# collect statistics at the beginning of each round, starting from the second round.
# the statistics are collected before taking any action, and refer to the last round.
# NOTE: the last update must be done after the solver terminates optimization,
# outside of this module, by calling McCormicCycleSeparator.update_stats() one more time.
self.stats['ncuts'].append(self.model.getNCuts())
self.stats['ncuts_applied'].append(self.model.getNCutsApplied())
self.stats['solving_time'].append(self.model.getSolvingTime())
self.stats['processed_nodes'].append(self.model.getNNodes())
self.stats['gap'].append(self.model.getGap())
self.stats['lp_rounds'].append(self.model.getNLPs())
self.stats['lp_iterations'].append(self.model.getNLPIterations())
self.stats['dualbound'].append(self.model.getDualbound())
def get_reward(self):
"""
compute action-wise reward according to self.reward_func
:return: np.ndarray of size len(self.last_action['activity'])
"""
# compute reward
db_improvement = np.abs(self.stats['dualbound'][-1] - self.stats['dualbound'][-2]) * self.db_scale
lp_iterations = (self.stats['lp_iterations'][-1] - self.stats['lp_iterations'][-2]) * self.lpiter_scale
activity = self.prev_action['activity']
if self.reward_func == 'db_improvement':
return np.full_like(activity, fill_value=db_improvement)
elif self.reward_func == 'db_integral':
return np.full_like(activity, fill_value=- db_improvement * lp_iterations)
elif self.reward_func == 'db_improvement_credit':
return db_improvement * (1 + activity)
elif self.reward_func == 'db_integral_credit':
return db_improvement * lp_iterations * (activity - 1)
elif self.reward_func == 'db_lpiter_fscore':
# compute the harmonic average of p=db_improvement and q=1/lp_iterations
# fscore = p*q/(p+q)
fscore = db_improvement / (db_improvement * lp_iterations + 1)
# this fscore will be high iff its both elements will be high,
# i.e great dual bound improvement in a few lp iterations
return np.full_like(activity, fill_value=fscore)
elif self.reward_func == 'db_lpiter_fscore_credit':
# compute the fscore as above,
# and assign the credit to the active constraints only.
fscore = db_improvement / (db_improvement * lp_iterations + 1)
return fscore * (1 + activity)
def sample(self):
t0 = time()
self.update_stats()
cur_state = self.model.getState(state_format='tensor', get_available_cuts=True, query=self.prev_action)
# compute the reward as the dual bound integral vs. LP iterations
if self.prev_action is not None:
action = self.prev_action['applied']
reward = self.get_reward()
if self.sample_format == 'sa':
data = (self.prev_state, action)
elif self.sample_format == 'sars':
# TODO verify
data = (self.prev_state, action, reward, cur_state)
self.data_list.append(data)
# termination condition. TODO: should never happen here
if self.model.getGap() == 0:
self.finished_episode = True
self.prev_action = cur_state['cut_names']
self.prev_state = cur_state
t_left = time() - t0
self.time_spent += t_left
def close(self):
""" query the last action, build the last state-action pair of the episode,
and save the episode to file """
if not self.finished_episode and self.prev_action is not None:
self.finished_episode = True
self.update_stats()
self.model.isInLPRows(self.prev_action) # TODO this function doesn't really work.
data = (self.prev_state.copy(), self.prev_action.copy())
self.data_list.append(data)
self.save_data()
def save_data(self):
if not os.path.exists(self.savedir):
os.makedirs(self.savedir)
with open(self.data_filepath, 'wb') as f:
pickle.dump(self.data_list, f)
print('Saved data to: ', self.data_filepath)
def save_stats(self):
if not os.path.exists(self.savedir):
os.makedirs(self.savedir)
with open(self.stats_filepath, 'wb') as f:
pickle.dump(self.stats, f)
print('Saved stats to: ', self.stats_filepath)
def testSepaSampler():
import sys
if '--mixed-debug' in sys.argv:
import ptvsd
port = 3000
# ptvsd.enable_attach(secret='my_secret', address =('127.0.0.1', port))
ptvsd.enable_attach(address=('127.0.0.1', port))
ptvsd.wait_for_attach()
n = 20
m = 10
seed = 223
G = nx.barabasi_albert_graph(n, m, seed=seed)
nx.set_edge_attributes(G, {e: np.random.normal() for e in G.edges}, name='weight')
model, x, y = maxcut_mccormic_model(G, use_general_cuts=False)
# model.setRealParam('limits/time', 1000 * 1)
""" Define a controller and appropriate callback to add user's cuts """
hparams = {'max_per_root': 2000,
'max_per_round': 20,
'criterion': 'random',
'forcecut': False,
'cuts_budget': 2000,
'policy': 'default'
}
cycle_sepa = MccormickCycleSeparator(G=G, x=x, y=y, hparams=hparams)
model.includeSepa(cycle_sepa, "MLCycles", "Generate cycle inequalities for MaxCut using McCormic variables exchange",
priority=1000000,
freq=1)
sampler = SepaSampler(G=G, x=x, y=y, name='samplertest')
model.includeSepa(sampler, sampler.name,
"Reinforcement learning separator",
priority=100000,
freq=1)
model.setIntParam('separating/maxcuts', 20)
model.setIntParam('separating/maxcutsroot', 100)
model.setIntParam('separating/maxstallroundsroot', -1)
model.setIntParam('separating/maxroundsroot', 2100)
model.setRealParam('limits/time', 300)
# model.setLongintParam('limits/nodes', 1)
model.optimize()
cycle_sepa.finish_experiment()
stats = cycle_sepa.stats
print("Solved using user's cutting-planes callback. Objective {}".format(model.getObjVal()))
cycle_cuts_applied = -1
# TODO: avrech - find a more elegant way to retrive cycle_cuts_applied
cuts, cuts_applied = get_separator_cuts_applied(model, 'MLCycles')
# model.printStatistics()
print('cycles added: ', cuts, ', cycles applied: ', cuts_applied)
# print(ci_cut.stats)
print('total cuts applied: ', model.getNCutsApplied())
print('separation time frac: ', stats['cycles_sepa_time'][-1] / stats['solving_time'][-1])
print('cuts applied vs time', stats['total_ncuts_applied'])
print('finish')
sampler.save_data()
from torch_geometric.data import DataLoader
data_list = torch.load(sampler.data_filepath)
from experiments.imitation.cutting_planes_dataset import CuttingPlanesDataset
dataset = CuttingPlanesDataset(sampler.savedir, savefile=False)
loader = DataLoader(dataset, batch_size=2, follow_batch=['x_s', 'x_t'])
batch = next(iter(loader))
print('finished')
if __name__ == '__main__':
testSepaSampler()
|
{"hexsha": "edef21bea54b66f4f52385a9864670493fdf9d4b", "size": 9723, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/samplers.py", "max_stars_repo_name": "avrech/learning2cut", "max_stars_repo_head_hexsha": "c0febe84db5097413823821510a4ae3c996dec93", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-08-09T05:26:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T09:27:22.000Z", "max_issues_repo_path": "utils/samplers.py", "max_issues_repo_name": "avrech/learning2cut", "max_issues_repo_head_hexsha": "c0febe84db5097413823821510a4ae3c996dec93", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-08-08T20:46:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T01:00:47.000Z", "max_forks_repo_path": "utils/samplers.py", "max_forks_repo_name": "avrech/learning2cut", "max_forks_repo_head_hexsha": "c0febe84db5097413823821510a4ae3c996dec93", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-18T23:27:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-18T23:27:45.000Z", "avg_line_length": 41.9094827586, "max_line_length": 121, "alphanum_fraction": 0.6303609997, "include": true, "reason": "import numpy,import networkx", "num_tokens": 2314}
|
"""
Models for the joint probability distribution.
"""
from abc import ABC, abstractmethod
import numpy as np
import scipy.integrate as integrate
from virocon.distributions import ConditionalDistribution
from virocon.intervals import NumberOfIntervalsSlicer
__all__ = ["GlobalHierarchicalModel"]
class MultivariateModel(ABC):
"""
Abstract base class for MultivariateModel.
Statistical model of multiple variables.
"""
@abstractmethod
def pdf(self, *args, **kwargs):
"""
Probability density function.
"""
pass
@abstractmethod
def cdf(self, *args, **kwargs):
"""
Cumulative distribution function.
"""
pass
@abstractmethod
def marginal_pdf(self, *args, **kwargs):
"""
Marginal probability density function.
"""
pass
@abstractmethod
def marginal_cdf(self, *args, **kwargs):
"""
Marginal cumulative distribution function.
"""
pass
@abstractmethod
def marginal_icdf(self, *args, **kwargs):
"""
Marginal inverse cumulative distribution function.
"""
pass
@abstractmethod
def draw_sample(self, *args, **kwargs):
"""
Draw a random sample of length n.
"""
pass
class GlobalHierarchicalModel(MultivariateModel):
"""
Hierarchical probabilistic model.
Probabilistic model that covers the complete range of an environmental
variable ("global"), following a particular hierarchical dependence
structure. The factorization describes a hierarchy where a random
variable with index i can only depend upon random variables with
indices less than i [1]_ .
Parameters
----------
dist_descriptions : dict
Description of the distributions.
Attributes
----------
distributions : list
The distributions used in the GlobalHierachicalModel.
conditional_on : list
Indicates the dependencies between the variables of the model. One
entry per distribution/dimension. Contains either None or int. If the
ith entry is None, the ith distribution is unconditional. If the ith
entry is an int j, the ith distribution depends on the jth dimension.
interval_slicers : list
One interval slicer per dimension. The interval slicer used for
slicing the intervals of the corresponding dimension, when necessary
during fitting.
n_dim : int
The number of dimensions, i.e. the number of variables of the model.
References
----------
.. [1] Haselsteiner, A.F.; Sander, A.; Ohlendorf, J.H.; Thoben, K.D. (2020)
Global hierarchical models for wind and wave contours: physical
interpretations of the dependence functions. OMAE 2020, Fort Lauderdale,
USA. Proceedings of the 39th International Conference on Ocean,
Offshore and Arctic Engineering.
Examples
--------
Create a Hs-Tz model and fit it to the available data. The following
example follows the methodology of OMAE2020 [1]_ .
Example 1.1:
Load the predefined OMAE 2020 model of Hs-Tz.
>>> from virocon import (GlobalHierarchicalModel, get_OMAE2020_Hs_Tz,
... read_ec_benchmark_dataset)
>>> data = read_ec_benchmark_dataset("datasets/ec-benchmark_dataset_D_1year.txt")
>>> dist_descriptions, fit_descriptions, semantics = get_OMAE2020_Hs_Tz()
>>> ghm = GlobalHierarchicalModel(dist_descriptions)
>>> ghm.fit(data, fit_descriptions=fit_descriptions)
Example 1.2:
Create the same OMEA 2020 model manually.
>>> from virocon import (DependenceFunction, ExponentiatedWeibullDistribution,
... LogNormalDistribution, WidthOfIntervalSlicer)
>>> def _asymdecrease3(x, a, b, c):
... return a + b / (1 + c * x)
>>> def _lnsquare2(x, a, b, c):
... return np.log(a + b * np.sqrt(np.divide(x, 9.81)))
>>> bounds = [(0, None),
... (0, None),
... (None, None)]
>>> sigma_dep = DependenceFunction(_asymdecrease3, bounds=bounds)
>>> mu_dep = DependenceFunction(_lnsquare2, bounds=bounds)
>>> dist_description_hs = {"distribution" : ExponentiatedWeibullDistribution(),
... "intervals" : WidthOfIntervalSlicer(width=0.5,
... min_n_points=50)
... }
>>> dist_description_tz = {"distribution" : LogNormalDistribution(),
... "conditional_on" : 0,
... "parameters" : {"sigma" : sigma_dep,
... "mu": mu_dep,
... },
... }
>>> dist_descriptions = [dist_description_hs, dist_description_tz]
>>> fit_description_hs = {"method" : "wlsq", "weights" : "quadratic"}
>>> fit_descriptions = [fit_description_hs, None]
>>> semantics = {"names" : ["Significant wave height", "Zero-crossing wave period"],
... "symbols" : ["H_s", "T_z"],
... "units" : ["m", "s"]
... }
>>> ghm = GlobalHierarchicalModel(dist_descriptions)
>>> ghm.fit(data, fit_descriptions=fit_descriptions)
"""
_dist_description_keys = {
"distribution",
"intervals",
"conditional_on",
"parameters",
}
def __init__(self, dist_descriptions):
self.distributions = []
self.conditional_on = []
self.interval_slicers = []
self.n_dim = len(dist_descriptions)
self._check_dist_descriptions(dist_descriptions)
for dist_desc in dist_descriptions:
# dist_class = dist_desc["distribution"]
dist = dist_desc["distribution"]
self.interval_slicers.append(
dist_desc.get("intervals", NumberOfIntervalsSlicer(n_intervals=10))
)
if "conditional_on" in dist_desc:
self.conditional_on.append(dist_desc["conditional_on"])
dist = ConditionalDistribution(dist, dist_desc["parameters"])
self.distributions.append(dist)
else:
self.conditional_on.append(None)
self.distributions.append(dist)
if self.conditional_on[0] is not None:
raise RuntimeError(
"Illegal state encountered. The first dimension "
"has to be independent, but was conditional on "
f"{self.conditional_on[0]}."
)
def __repr__(self):
name = "GlobalHierarchicalModel"
dists = repr(self.distributions)
# dists = dists.replace("), ", "),\n")
cond_on = repr(self.conditional_on)
return f"{name}(distributions={dists}, conditional_on={cond_on})"
def _check_dist_descriptions(self, dist_descriptions):
for i, dist_desc in enumerate(dist_descriptions):
if not "distribution" in dist_desc:
raise ValueError(
"Mandatory key 'distribution' missing in "
f"dist_description for dimension {i}"
)
if "conditional_on" in dist_desc and not "parameters" in dist_desc:
raise ValueError(
"For conditional distributions the "
"dist_description key 'parameters' "
f"is mandatory but was missing for dimension {i}."
)
unknown_keys = set(dist_desc).difference(self._dist_description_keys)
if len(unknown_keys) > 0:
raise ValueError(
"Unknown key(s) in dist_description for "
f"dimension {i}."
f"Known keys are {self._dist_description_keys}, "
f"but found {unknown_keys}."
)
def _split_in_intervals(self, data, dist_idx, conditioning_idx):
slicer = self.interval_slicers[conditioning_idx]
conditioning_data = data[:, conditioning_idx]
interval_slices, interval_centers, interval_boundaries = slicer.slice_(
conditioning_data
)
dist_data = [data[int_slice, dist_idx] for int_slice in interval_slices]
return dist_data, interval_centers, interval_boundaries
def _check_and_fill_fit_desc(self, fit_descriptions):
default_fit_desc = {"method": "mle", "weights": None}
if fit_descriptions is None:
fit_descriptions = [default_fit_desc for i in range(self.n_dim)]
else:
if len(fit_descriptions) != self.n_dim:
raise ValueError(
"fit_description must have one entry per dimension, but "
f"a length of {len(fit_descriptions)} != {self.n_dim} was found."
)
for i in range(len(fit_descriptions)):
if fit_descriptions[i] is None:
fit_descriptions[i] = default_fit_desc
else:
if not "method" in fit_descriptions[i]:
raise ValueError(
"Mandatory key 'method' missing in "
f"fit_description for dimension {i}."
)
if not "weights" in fit_descriptions[i]:
fit_descriptions[i]["weights"] = None
return fit_descriptions
def fit(self, data, fit_descriptions=None):
"""
Fit joint model to data.
Method of estimating the parameters of a probability distribution to
given data.
Parameters
----------
data : array-like
The data that should be used to fit the joint model.
Shape: (number of realizations, n_dim)
fit_description : dict
Description of the fit method. Defaults to None.
"""
data = np.array(data)
fit_descriptions = self._check_and_fill_fit_desc(fit_descriptions)
if data.shape[-1] != self.n_dim:
raise ValueError(
"The dimension of data does not match the "
"dimension of the model. "
f"The model has {self.n_dim} dimensions, "
f"but the data has {data.shape[-1]} dimensions."
)
for i in range(self.n_dim):
dist = self.distributions[i]
conditioning_idx = self.conditional_on[i]
fit_method = fit_descriptions[i]["method"]
weights = fit_descriptions[i]["weights"]
if conditioning_idx is None:
dist.fit(data[:, i], fit_method, weights)
else:
(
dist_data,
conditioning_data,
conditioning_interval_boundaries,
) = self._split_in_intervals(data, i, conditioning_idx)
# dist data is a list of ndarray
# and conditioning_data is a list of interval points
dist.fit(
dist_data,
conditioning_data,
conditioning_interval_boundaries,
fit_method,
weights,
)
self.distributions[
i
] = dist # TODO is the writeback necessary? -> probably not
def pdf(self, x):
"""
Probability density function.
Parameters
----------
x : array_like
Points at which the pdf is evaluated.
Shape: (n, n_dim), where n is the number of points at which the
pdf should be evaluated.
"""
# Ensure that x is a 2D numpy array.
x = np.array(x)
if x.ndim == 1:
x = np.array([x])
x = np.asarray_chkfinite(x)
fs = np.empty_like(x)
fs[:, 0] = self.distributions[0].pdf(x[:, 0])
for i in range(1, self.n_dim):
if self.conditional_on[i] is None:
fs[:, i] = self.distributions[i].pdf(x[:, i])
else:
cond_idx = self.conditional_on[i]
fs[:, i] = self.distributions[i].pdf(x[:, i], given=x[:, cond_idx])
return np.prod(fs, axis=-1)
def cdf(self, x):
"""
Cumulative distribution function.
Parameters
----------
x : array_like
Points at which the cdf is evaluated.
Shape: (n, n_dim), where n is the number of points at which the
cdf should be evaluated.
"""
# Ensure that x is a 2D numpy array.
x = np.array(x)
if x.ndim == 1:
x = np.array([x])
x = np.asarray_chkfinite(x)
n_dim = self.n_dim
integral_order = list(range(n_dim))
def get_integral_func():
arg_order = integral_order
def integral_func(*args):
assert len(args) == n_dim
# sort arguments as expected by pdf (the models order)
x = np.array(args)[np.argsort(arg_order)].reshape((1, n_dim))
return self.pdf(x)
return integral_func
lower_integration_limits = [0] * n_dim
integral_func = get_integral_func()
p = np.empty(len(x))
for i in range(len(x)):
integration_limits = [
(lower_integration_limits[j], x[i, j]) for j in range(n_dim)
]
p[i], error = integrate.nquad(integral_func, integration_limits)
return p
def marginal_pdf(self, x, dim):
"""
Marginal probability density function.
Parameters
----------
x : array_like
Points at which the pdf is evaluated.
Shape: 1-dimensional
dim : int
The dimension for which the marginal is calculated.
"""
# x = x.reshape((-1, 1))
if self.conditional_on[dim] is None:
# the distribution is not conditional -> it is the marginal
return self.distributions[dim].pdf(x)
# the distribution is conditional
# thus we integrate over the joint pdf to get the marginal
# TODO check size of x
n_dim = self.n_dim
integral_order = list(range(n_dim))
del integral_order[dim] # we do not integrate over the dim'th variable
integral_order = integral_order[::-1] # we integrate over last dimensions first
# scipy.integrate.nquad expects one argument per dimension
# thus we have to wrap the (joint) pdf
def get_integral_func():
arg_order = integral_order + [dim]
def integral_func(*args):
assert len(args) == n_dim
# sort arguments as expected by pdf (the models order)
# arguments = list(args)[:-1]
# arguments.append(args[-1][0])
x = np.array(args)[np.argsort(arg_order)].reshape((1, n_dim))
return self.pdf(x)
return integral_func
# TODO make limits a property of the distributions?
# "for var in integral_order append limits"
# but for now we simplify that all vars have the same limits
limit = (0, np.inf)
limits = [limit] * (n_dim - 1)
f = np.empty_like(x)
integral_func = get_integral_func()
for i, x_i in enumerate(x):
result, _ = integrate.nquad(integral_func, ranges=limits, args=[x_i])
f[i] = result
return f
def marginal_cdf(self, x, dim):
"""
Marginal cumulative distribution function.
Parameters
----------
x : array_like
Points at which the cdf is evaluated.
Shape: 1-dimensional
dim : int
The dimension for which the marginal is calculated.
"""
# x = x.reshape((-1, 1))
if self.conditional_on[dim] is None:
# the distribution is not conditional -> it is the marginal
return self.distributions[dim].cdf(x)
# the distribution is conditional
# thus we integrate over the joint pdf to get the marginal pdf
# and then integrate the marginal pdf to get the marginal cdf
# TODO check size of x
n_dim = self.n_dim
integral_order = list(range(n_dim))
del integral_order[dim]
integral_order = integral_order[::-1] # we integrate over last dimensions first
integral_order = integral_order + [
dim
] # finally we integrate over the dim'th var
# scipy.integrate.nquad expects one argument per dimension
# thus we have to wrap the (joint) pdf
def get_integral_func():
arg_order = integral_order
def integral_func(*args):
assert len(args) == n_dim
# sort arguments as expected by pdf (the models order)
# arguments = list(args)[:-1]
# arguments.append(args[-1][0])
x = np.array(args)[np.argsort(arg_order)].reshape((1, n_dim))
return self.pdf(x)
return integral_func
# TODO make limits (or lower limit) a property of the distributions?
limit = (0, np.inf)
limits = [limit] * (n_dim - 1)
F = np.empty_like(x)
integral_func = get_integral_func()
for i, x_i in enumerate(x):
result, _ = integrate.nquad(integral_func, ranges=limits + [(0, x_i)])
F[i] = result
return F
def marginal_icdf(self, p, dim, precision_factor=1):
"""
Marginal inverse cumulative distribution function.
Estimates the marginal icdf by drawing a Monte-Carlo sample.
Parameters
----------
p : array_like
Probabilities for which the icdf is evaluated.
Shape: 1-dimensional
dim : int
The dimension for which the marginal is calculated.
precision_factor : float
Precision factor that determines the size of the sample to draw.
A sample is drawn of which on average precision_factor * 100
realizations exceed the quantile. Minimum sample size is 100000.
Defaults to 1.0
"""
p = np.array(p)
if self.conditional_on[dim] is None:
# the distribution is not conditional -> it is the marginal
return self.distributions[dim].icdf(p)
p_min = np.min(p)
p_max = np.max(p)
nr_exceeding_points = 100 * precision_factor
p_small = np.min([p_min, 1 - p_max])
n = int((1 / p_small) * nr_exceeding_points)
n = max([n, 100000])
sample = self.draw_sample(n)
x = np.quantile(sample[:, dim], p)
return x
def draw_sample(self, n):
"""
Draw a random sample of size n.
Parameters
----------
n : int
Sample size.
"""
samples = np.zeros((n, self.n_dim))
for i in range(self.n_dim):
cond_idx = self.conditional_on[i]
dist = self.distributions[i]
if cond_idx is None:
samples[:, i] = dist.draw_sample(n)
else:
conditioning_values = samples[:, cond_idx]
samples[:, i] = dist.draw_sample(1, conditioning_values)
return samples
|
{"hexsha": "890b56743d134853b9f30ed0048b7801d2c5471c", "size": 19902, "ext": "py", "lang": "Python", "max_stars_repo_path": "virocon/jointmodels.py", "max_stars_repo_name": "ahaselsteiner/viroconcom", "max_stars_repo_head_hexsha": "69b903dabde1de73c85b5648f66523b9c151996e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2021-06-02T17:03:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T16:19:53.000Z", "max_issues_repo_path": "virocon/jointmodels.py", "max_issues_repo_name": "virocon-organization/viroconcom", "max_issues_repo_head_hexsha": "186d768a7f39788b827173467febb038044199c7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 65, "max_issues_repo_issues_event_min_datetime": "2018-10-09T16:22:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-21T15:06:26.000Z", "max_forks_repo_path": "virocon/jointmodels.py", "max_forks_repo_name": "virocon-organization/viroconcom", "max_forks_repo_head_hexsha": "186d768a7f39788b827173467febb038044199c7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-10-16T19:23:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-16T11:50:22.000Z", "avg_line_length": 33.3366834171, "max_line_length": 88, "alphanum_fraction": 0.5568284595, "include": true, "reason": "import numpy,import scipy", "num_tokens": 4205}
|
"""Least squares fitting.
Implements a penalised least-squares fit.
putting point data onto the mesh.
The penalty term (or smoothing term) is controlled by the smoothing
parameter alpha.
With a value of alpha=0, the fit function will attempt
to interpolate as closely as possible in the least-squares sense.
With values alpha > 0, a certain amount of smoothing will be applied.
A positive alpha is essential in cases where there are too few
data points.
A negative alpha is not allowed.
A typical value of alpha is 1.0e-6
Ole Nielsen, Stephen Roberts, Duncan Gray, Christopher Zoppou
Geoscience Australia, 2004.
TO DO
* test geo_ref, geo_spatial
IDEAS
* (DSG-) Change the interface of fit, so a domain object can
be passed in. (I don't know if this is feasible). If could
save time/memory.
"""
from __future__ import print_function
from __future__ import absolute_import
import numpy as num
from . import fitsmooth
import sys
from builtins import str
from builtins import range
from past.builtins import basestring
from anuga.abstract_2d_finite_volumes.neighbour_mesh import Mesh
from anuga.caching import cache
from anuga.geospatial_data.geospatial_data import Geospatial_data, \
ensure_absolute
from anuga.fit_interpolate.general_fit_interpolate import FitInterpolate
from anuga.utilities.sparse import Sparse_CSR
from anuga.utilities.numerical_tools import ensure_numeric
from anuga.utilities.cg_solve import conjugate_gradient
from anuga.config import default_smoothing_parameter as DEFAULT_ALPHA
import anuga.utilities.log as log
# Python 2.7 Hack
try:
from exceptions import Exception
except:
pass
class TooFewPointsError(Exception):
pass
class VertsWithNoTrianglesError(Exception):
pass
# ----------------------------------------------
# C code to build interpolation matrices
# ----------------------------------------------
class Fit(FitInterpolate):
def __init__(self,
vertex_coordinates=None,
triangles=None,
mesh=None,
mesh_origin=None,
alpha=None,
verbose=False,
cg_precon='Jacobi',
use_c_cg=True):
"""
Padarn Note 05/12/12: This documentation should probably
be updated to account for the fact that the fitting is now
done in C. I wasn't sure what details were necessary though.
Fit data at points to the vertices of a mesh.
Inputs:
vertex_coordinates: List of coordinate pairs [xi, eta] of
points constituting a mesh (or an m x 2 numeric array or
a geospatial object)
Points may appear multiple times
(e.g. if vertices have discontinuities)
triangles: List of 3-tuples (or a numeric array) of
integers representing indices of all vertices in the mesh.
mesh_origin: A geo_reference object or 3-tuples consisting of
UTM zone, easting and northing.
If specified vertex coordinates are assumed to be
relative to their respective origins.
Note: Don't supply a vertex coords as a geospatial object and
a mesh origin, since geospatial has its own mesh origin.
Usage,
To use this in a blocking way, call build_fit_subset, with z info,
and then fit, with no point coord, z info.
"""
# Initialise variabels
if alpha is None:
self.alpha = DEFAULT_ALPHA
else:
self.alpha = alpha
FitInterpolate.__init__(self,
vertex_coordinates,
triangles,
mesh,
mesh_origin=mesh_origin,
verbose=verbose)
self.AtA = None
self.Atz = None
self.D = None
self.point_count = 0
# NOTE PADARN: NEEDS FIXING - currently need smoothing matrix
# even if alpha is zero, due to C function expecting it. This
# could and should be removed.
if True:
if verbose:
log.critical('Building smoothing matrix')
self.D = self._build_smoothing_matrix_D()
bd_poly = self.mesh.get_boundary_polygon()
self.mesh_boundary_polygon = ensure_numeric(bd_poly)
self.cg_precon = cg_precon
self.use_c_cg = use_c_cg
def _build_coefficient_matrix_B(self,
verbose=False):
"""
Build final coefficient matrix from AtA and D
"""
msize = self.mesh.number_of_nodes
self.B = fitsmooth.build_matrix_B(self.D,
self.AtA, self.alpha)
# Convert self.B matrix to CSR format
self.B = Sparse_CSR(data=num.array(self.B[0]),
Colind=num.array(self.B[1]),
rowptr=num.array(self.B[2]),
m=msize, n=msize)
# NOTE PADARN: The above step could potentially be removed
# and the sparse matrix worked with directly in C. Not sure
# if this would be worthwhile.
def _build_smoothing_matrix_D(self):
"""Build m x m smoothing matrix, where
m is the number of basis functions phi_k (one per vertex)
The smoothing matrix is defined as
D = D1 + D2
where
[D1]_{k,l} = \int_\Omega
\frac{\partial \phi_k}{\partial x}
\frac{\partial \phi_l}{\partial x}\,
dx dy
[D2]_{k,l} = \int_\Omega
\frac{\partial \phi_k}{\partial y}
\frac{\partial \phi_l}{\partial y}\,
dx dy
The derivatives \frac{\partial \phi_k}{\partial x},
\frac{\partial \phi_k}{\partial x} for a particular triangle
are obtained by computing the gradient a_k, b_k for basis function k
NOTE PADARN: All of this is now done in an external C function, and the
result is stored in a Capsule object, meaning the entries cannot be directly
accessed.
"""
# NOTE PADARN: Should the input arguments here be checked - making
# sure that they are floats? Not sure if this is done elsewhere.
# NOTE PADARN: Should global coordinates be used for the smoothing
# matrix, or is thids not important?
return fitsmooth.build_smoothing_matrix(self.mesh.triangles,
self.mesh.areas, self.mesh.vertex_coordinates)
# NOTE PADARN: This function was added to emulate behavior of the original
# class not using external C functions. This method is dangerous as D could
# be very large - it was added as it is used in a unit test.
def get_D(self):
return fitsmooth.return_full_D(self.D, self.mesh.number_of_nodes)
# NOTE PADARN: This function was added to emulate behavior of the original
# class so as to pass a unit test. It is completely unneeded.
def build_fit_subset(self, point_coordinates, z=None, attribute_name=None,
verbose=False, output='dot'):
self._build_matrix_AtA_Atz(
point_coordinates, z, attribute_name, verbose, output)
def _build_matrix_AtA_Atz(self, point_coordinates, z=None, attribute_name=None,
verbose=False, output='dot'):
"""Build:
AtA m x m interpolation matrix, and,
Atz m x a interpolation matrix where,
m is the number of basis functions phi_k (one per vertex)
a is the number of data attributes
This algorithm uses a quad tree data structure for fast binning of
data points.
If Ata is None, the matrices AtA and Atz are created.
This function can be called again and again, with sub-sets of
the point coordinates. Call fit to get the results.
Preconditions
z and points are numeric
Point_coordindates and mesh vertices have the same origin.
The number of attributes of the data points does not change
"""
if isinstance(point_coordinates, Geospatial_data):
point_coordinates = point_coordinates.get_data_points(
absolute=True)
# Convert input to numeric arrays
if z is not None:
z = ensure_numeric(z, float)
else:
msg = 'z not specified'
assert isinstance(point_coordinates, Geospatial_data), msg
z = point_coordinates.get_attributes(attribute_name)
point_coordinates = ensure_numeric(point_coordinates, float)
npts = len(z)
z = num.array(z)
# NOTE PADARN : This copy might be needed to
# make sure memory is contig - would be better to read in C..
z = z.copy()
self.point_count += z.shape[0]
zdim = 1
if len(z.shape) != 1:
zdim = z.shape[1]
[AtA, Atz] = fitsmooth.build_matrix_AtA_Atz_points(self.root.root,
self.mesh.number_of_nodes,
self.mesh.triangles,
num.array(point_coordinates), z, zdim, npts)
if verbose and output == 'dot':
print('\b.', end=' ')
sys.stdout.flush()
if zdim == 1:
Atz = num.array(Atz[0])
else:
Atz = num.array(Atz).transpose()
if self.AtA is None and self.Atz is None:
self.AtA = AtA
self.Atz = Atz
else:
fitsmooth.combine_partial_AtA_Atz(self.AtA, AtA,
self.Atz, Atz, zdim, self.mesh.number_of_nodes)
def fit(self, point_coordinates_or_filename=None, z=None,
verbose=False,
point_origin=None,
attribute_name=None,
max_read_lines=1e7):
"""Fit a smooth surface to given 1d array of data points z.
The smooth surface is computed at each vertex in the underlying
mesh using the formula given in the module doc string.
Inputs:
point_coordinates_or_filename: The co-ordinates of the data points.
A filename of a .pts file or a
List of coordinate pairs [x, y] of
data points or an nx2 numeric array or a Geospatial_data object
or points file filename
z: Single 1d vector or array of data at the point_coordinates.
"""
if isinstance(point_coordinates_or_filename, basestring):
if point_coordinates_or_filename[-4:] != ".pts":
use_blocking_option2 = False
# NOTE PADARN 29/03/13: File reading from C has been removed. Now
# the input is either a set of points, or a filename which is then
# handled by the Geospatial_data object
if verbose:
print('Fit.fit: Initializing')
# Use blocking to load in the point info
if isinstance(point_coordinates_or_filename, basestring):
msg = "Don't set a point origin when reading from a file"
assert point_origin is None, msg
filename = point_coordinates_or_filename
G_data = Geospatial_data(filename,
max_read_lines=max_read_lines,
load_file_now=False,
verbose=verbose)
for i, geo_block in enumerate(G_data):
# Build the array
points = geo_block.get_data_points(absolute=True)
z = geo_block.get_attributes(attribute_name=attribute_name)
self._build_matrix_AtA_Atz(points, z, attribute_name, verbose)
point_coordinates = None
if verbose:
print('')
else:
point_coordinates = point_coordinates_or_filename
# This condition either means a filename was read or the function
# recieved a None as input
if point_coordinates is None:
if verbose:
log.critical('Fit.fit: Warning: no data points in fit')
msg = 'No interpolation matrix.'
assert self.AtA is not None, msg
assert self.Atz is not None
else:
point_coordinates = ensure_absolute(point_coordinates,
geo_reference=point_origin)
# if isinstance(point_coordinates,Geospatial_data) and z is None:
# z will come from the geo-ref
self._build_matrix_AtA_Atz(
point_coordinates, z, verbose=verbose, output='counter')
# Check sanity
m = self.mesh.number_of_nodes # Nbr of basis functions (1/vertex)
n = self.point_count
if n < m and self.alpha == 0.0:
msg = 'ERROR (least_squares): Too few data points\n'
msg += 'There are only %d data points and alpha == 0. ' % n
msg += 'Need at least %d\n' % m
msg += 'Alternatively, set smoothing parameter alpha to a small '
msg += 'positive value,\ne.g. 1.0e-3.'
raise TooFewPointsError(msg)
self._build_coefficient_matrix_B(verbose)
loners = self.mesh.get_lone_vertices()
# FIXME - make this as error message.
# test with
# Not_yet_test_smooth_att_to_mesh_with_excess_verts.
if len(loners) > 0:
msg = 'WARNING: (least_squares): \nVertices with no triangles\n'
msg += 'All vertices should be part of a triangle.\n'
msg += 'In the future this will be inforced.\n'
msg += 'The following vertices are not part of a triangle;\n'
msg += str(loners)
log.critical(msg)
#raise VertsWithNoTrianglesError(msg)
return conjugate_gradient(self.B, self.Atz, self.Atz,
imax=2 * len(self.Atz)+1000, use_c_cg=self.use_c_cg,
precon=self.cg_precon)
# poin_coordiantes can also be a points file name
def fit_to_mesh(point_coordinates,
vertex_coordinates=None,
triangles=None,
mesh=None,
point_attributes=None,
alpha=DEFAULT_ALPHA,
verbose=False,
mesh_origin=None,
data_origin=None,
max_read_lines=None,
attribute_name=None,
use_cache=False,
cg_precon='Jacobi',
use_c_cg=True):
"""Wrapper around internal function _fit_to_mesh for use with caching.
"""
args = (point_coordinates, )
kwargs = {'vertex_coordinates': vertex_coordinates,
'triangles': triangles,
'mesh': mesh,
'point_attributes': point_attributes,
'alpha': alpha,
'verbose': verbose,
'mesh_origin': mesh_origin,
'data_origin': data_origin,
'max_read_lines': max_read_lines,
'attribute_name': attribute_name,
'cg_precon': cg_precon,
'use_c_cg': use_c_cg
}
if use_cache is True:
if isinstance(point_coordinates, basestring):
# We assume that point_coordinates is the name of a .csv/.txt
# file which must be passed onto caching as a dependency
# (in case it has changed on disk)
dep = [point_coordinates]
else:
dep = None
return cache(_fit_to_mesh,
args, kwargs,
verbose=verbose,
compression=False,
dependencies=dep)
else:
res = _fit_to_mesh(*args, **kwargs)
"print intep should go out of range"
return res
# point_coordinates can also be a points file name
def _fit_to_mesh(point_coordinates,
vertex_coordinates=None,
triangles=None,
mesh=None,
point_attributes=None,
alpha=DEFAULT_ALPHA,
verbose=False,
mesh_origin=None,
data_origin=None,
max_read_lines=None,
attribute_name=None,
cg_precon='Jacobi',
use_c_cg=True):
"""
Fit a smooth surface to a triangulation,
given data points with attributes.
Inputs:
vertex_coordinates: List of coordinate pairs [xi, eta] of
points constituting a mesh (or an m x 2 numeric array or
a geospatial object)
Points may appear multiple times
(e.g. if vertices have discontinuities)
triangles: List of 3-tuples (or a numeric array) of
integers representing indices of all vertices in the mesh.
point_coordinates: List of coordinate pairs [x, y] of data points
(or an nx2 numeric array). This can also be a .csv/.txt/.pts
file name.
alpha: Smoothing parameter.
mesh_origin: A geo_reference object or 3-tuples consisting of
UTM zone, easting and northing.
If specified vertex coordinates are assumed to be
relative to their respective origins.
point_attributes: Vector or array of data at the
point_coordinates.
"""
if mesh is None:
# FIXME(DSG): Throw errors if triangles or vertex_coordinates
# are None
# Convert input to numeric arrays
triangles = ensure_numeric(triangles, int)
vertex_coordinates = ensure_absolute(vertex_coordinates,
geo_reference=mesh_origin)
if verbose:
log.critical('_fit_to_mesh: Building mesh')
mesh = Mesh(vertex_coordinates, triangles)
# Don't need this as we have just created the mesh
# mesh.check_integrity()
interp = Fit(mesh=mesh,
verbose=verbose,
alpha=alpha,
cg_precon=cg_precon,
use_c_cg=use_c_cg)
vertex_attributes = interp.fit(point_coordinates,
point_attributes,
point_origin=data_origin,
max_read_lines=max_read_lines,
attribute_name=attribute_name,
verbose=verbose)
# Add the value checking stuff that's in least squares.
# Maybe this stuff should get pushed down into Fit.
# at least be a method of Fit.
# Or intigrate it into the fit method, saving teh max and min's
# as att's.
return vertex_attributes
def fit_to_mesh_file(mesh_file, point_file, mesh_output_file,
alpha=DEFAULT_ALPHA, verbose=False,
expand_search=False,
precrop=False,
display_errors=True):
"""
Given a mesh file (tsh) and a point attribute file, fit
point attributes to the mesh and write a mesh file with the
results.
Note: the points file needs titles. If you want anuga to use the tsh file,
make sure the title is elevation.
NOTE: Throws IOErrors, for a variety of file problems.
"""
from anuga.load_mesh.loadASCII import import_mesh_file, \
export_mesh_file, concatinate_attributelist
try:
mesh_dict = import_mesh_file(mesh_file)
except IOError as e:
if display_errors:
log.critical("Could not load bad file: %s" % str(e))
raise IOError # Could not load bad mesh file.
vertex_coordinates = mesh_dict['vertices']
triangles = mesh_dict['triangles']
if isinstance(mesh_dict['vertex_attributes'], num.ndarray):
old_point_attributes = mesh_dict['vertex_attributes'].tolist()
else:
old_point_attributes = mesh_dict['vertex_attributes']
if isinstance(mesh_dict['vertex_attribute_titles'], num.ndarray):
old_title_list = mesh_dict['vertex_attribute_titles'].tolist()
else:
old_title_list = mesh_dict['vertex_attribute_titles']
if verbose:
log.critical('tsh file %s loaded' % mesh_file)
# load in the points file
try:
geo = Geospatial_data(point_file, verbose=verbose)
except IOError as e:
if display_errors:
log.critical("Could not load bad file: %s" % str(e))
raise IOError # Re-raise exception
point_coordinates = geo.get_data_points(absolute=True)
title_list, point_attributes = concatinate_attributelist(
geo.get_all_attributes())
if 'geo_reference' in mesh_dict and \
not mesh_dict['geo_reference'] is None:
mesh_origin = mesh_dict['geo_reference'].get_origin()
else:
mesh_origin = None
if verbose:
log.critical("points file loaded")
if verbose:
log.critical("fitting to mesh")
f = fit_to_mesh(point_coordinates,
vertex_coordinates,
triangles,
None,
point_attributes,
alpha=alpha,
verbose=verbose,
data_origin=None,
mesh_origin=mesh_origin)
if verbose:
log.critical("finished fitting to mesh")
# convert array to list of lists
new_point_attributes = f.tolist()
# FIXME have this overwrite attributes with the same title - DSG
# Put the newer attributes last
if old_title_list != []:
old_title_list.extend(title_list)
# FIXME can this be done a faster way? - DSG
for i in range(len(old_point_attributes)):
old_point_attributes[i].extend(new_point_attributes[i])
mesh_dict['vertex_attributes'] = old_point_attributes
mesh_dict['vertex_attribute_titles'] = old_title_list
else:
mesh_dict['vertex_attributes'] = new_point_attributes
mesh_dict['vertex_attribute_titles'] = title_list
if verbose:
log.critical("exporting to file %s" % mesh_output_file)
try:
export_mesh_file(mesh_output_file, mesh_dict)
except IOError as e:
if display_errors:
log.critical("Could not write file %s", str(e))
raise IOError
|
{"hexsha": "55e1a374ba0859ac219f7d7b84a558a37cfae057", "size": 22547, "ext": "py", "lang": "Python", "max_stars_repo_path": "anuga/fit_interpolate/fit.py", "max_stars_repo_name": "samcom12/anuga_core", "max_stars_repo_head_hexsha": "f4378114dbf02d666fe6423de45798add5c42806", "max_stars_repo_licenses": ["Python-2.0", "OLDAP-2.7"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "anuga/fit_interpolate/fit.py", "max_issues_repo_name": "samcom12/anuga_core", "max_issues_repo_head_hexsha": "f4378114dbf02d666fe6423de45798add5c42806", "max_issues_repo_licenses": ["Python-2.0", "OLDAP-2.7"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "anuga/fit_interpolate/fit.py", "max_forks_repo_name": "samcom12/anuga_core", "max_forks_repo_head_hexsha": "f4378114dbf02d666fe6423de45798add5c42806", "max_forks_repo_licenses": ["Python-2.0", "OLDAP-2.7"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0752, "max_line_length": 103, "alphanum_fraction": 0.5947576174, "include": true, "reason": "import numpy", "num_tokens": 4673}
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 28 22:43:10 2016
@author: kevin
"""
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import plotly
import plotly.plotly as py
from plotly.graph_objs import *
plotly.tools.set_credentials_file(username='kevyin', api_key='n3c33j5hac')
from ggplot import *
pd.set_option('display.mpl_style', 'default') # Make the graphs a bit prettier
#%%
df_failed = pd.read_csv('data/failed_cycle123_2.txt.gz')
df_normal = pd.read_csv('data/normal_cycle123_2.txt.gz')
df_failed.dtypes
df_normal.dtypes
#%%
plt.figure(1)
called_int_cols = df_failed.columns[df_failed.columns.str.match('Called')]
df_failed_byLaneCycle = df_failed.groupby(['RunFolder', 'Lane', 'Cycle','Read'])[called_int_cols].mean()
ax1 = plt.subplot(1,2,1)
df_failed_byLaneCycle.boxplot()
df_normal_byLaneCycle = df_normal.groupby(['RunFolder', 'Lane', 'Cycle','Read'])[called_int_cols].mean()
plt.subplot(1,2,2, sharey=ax1)
df_normal_byLaneCycle.boxplot()
#%%
# plotly
# trace0 = Scatter(
# x=[1, 2, 3, 4],
# y=[10, 15, 13, 17]
# )
# trace1 = Scatter(
# x=[16, 12, 13, 14],
# y=[16, 5, 11, 9]
# )
# data = Data([trace0, trace1])
#
# py.plot(data, filename = 'basic-line')
#%%
# ggplot
ggplot(diamonds, aes(x='price', color='clarity')) + \
geom_density() + \
scale_color_brewer(type='div', palette=7) + \
facet_wrap('cut')
#
print ggplot(mpg, aes(x='class', y='hwy')) + geom_boxplot()
print ggplot(mpg, aes(x='class', y='hwy')) + geom_boxplot() + facet_wrap('manufacturer')
print ggplot(diamonds, aes('pd.cut(carat, bins=10, labels=range(10))', 'price')) + geom_boxplot()
diamonds['clarity'] = pd.Categorical(diamonds['clarity'], ordered=True,
categories='I1 SI2 SI1 VS2 VS1 VVS2 VVS1 IF'.split())
print ggplot(diamonds, aes(x='clarity', y='price')) + geom_boxplot()
|
{"hexsha": "2354f83250ffb1469da963b2fce7fc5a9916b98c", "size": 1870, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples-python27/analyse_example.py", "max_stars_repo_name": "kevyin/sitta", "max_stars_repo_head_hexsha": "e2504dc6dddb57742deb22d6ce881925b59070c8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples-python27/analyse_example.py", "max_issues_repo_name": "kevyin/sitta", "max_issues_repo_head_hexsha": "e2504dc6dddb57742deb22d6ce881925b59070c8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples-python27/analyse_example.py", "max_forks_repo_name": "kevyin/sitta", "max_forks_repo_head_hexsha": "e2504dc6dddb57742deb22d6ce881925b59070c8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.7142857143, "max_line_length": 104, "alphanum_fraction": 0.6748663102, "include": true, "reason": "import numpy", "num_tokens": 586}
|
%% loftLinQuad2hex
% Below is a demonstration of the features of the |loftLinQuad2hex| function
%%
clear; close all; clc;
%% Syntax
% |[varargout]=loftLinQuad2hex(Fq,Vq,Vq2,numSteps);|
%% Description
% UNDOCUMENTED
%% Examples
%
%%
%
% <<gibbVerySmall.gif>>
%
% _*GIBBON*_
% <www.gibboncode.org>
%
% _Kevin Mattheus Moerman_, <gibbon.toolbox@gmail.com>
%%
% _*GIBBON footer text*_
%
% License: <https://github.com/gibbonCode/GIBBON/blob/master/LICENSE>
%
% GIBBON: The Geometry and Image-based Bioengineering add-On. A toolbox for
% image segmentation, image-based modeling, meshing, and finite element
% analysis.
%
% Copyright (C) 2006-2022 Kevin Mattheus Moerman and the GIBBON contributors
%
% This program is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program. If not, see <http://www.gnu.org/licenses/>.
|
{"author": "gibbonCode", "repo": "GIBBON", "sha": "8178520664a6148db939eaea87e75b3cba4f2b4f", "save_path": "github-repos/MATLAB/gibbonCode-GIBBON", "path": "github-repos/MATLAB/gibbonCode-GIBBON/GIBBON-8178520664a6148db939eaea87e75b3cba4f2b4f/docs/HELP_loftLinQuad2hex.m"}
|
import pdb
import sys
from functools import reduce
import numpy as np
from prompt_toolkit import prompt
from tabulate import tabulate
from ..metadata_interface import *
from ..common import *
class ReplUi(object):
def __init__(self, all_tagsets, pgid=None):
self._init_brick(all_tagsets)
self.pgid = pgid
def _init_brick(self, all_tagsets):
# TODO: Read below from an external file
non_brick_tagsets = ['none',
'rightidentifier',
'leftidentifier',
'unknown',
'pump_flow_status',
'networkadapter',
'analog_input_sensor',
'analog_output_setpoint',
'binary_input_sensor',
'binary_output_setpoint',
'multistate_input_sensor',
'multistate_output_setpoint',
]
# left identifier: contraints meaning of left tagset
# right identifier: contraints meaning of right tagset
#TODO: Create a dict with dummy values to speed up lookup if needed.
self.all_tagsets = all_tagsets + non_brick_tagsets
splitter = lambda s: s.split('_')
adder = lambda x, y: x + y
self.all_tags = list(set(reduce(adder, map(splitter, self.all_tagsets), [])))
def display_target(self, srcid, building):
if not RawMetadata.objects(srcid=srcid, building=building):
raise Exception('Srcid {0} not found in our DB'.format(srcid))
print_rawmetadata(srcid, building)
def normalize_tagset(self, raw_tagset):
tagset = '_'.join(raw_tagset.split()) # TODO: Capitalize if necessary.
return tagset
def print_sentence_with_pos(self, sentence, base=0):
num_levels = int(np.log10(len(sentence))) + 1
for level in reversed(list(range(0, num_levels))):
divider = np.power(10, level)
line = ''
for i, c in enumerate(sentence[base:]):
istr = str(i + base)
if len(istr) <= level :
curr_digit = '0'
else:
curr_digit = istr[len(istr) - level - 1]
if curr_digit == '0' and level > 0:
line += ' '
else:
line += curr_digit
print(line)
print(sentence[base:])
def validate_tagset(self, tagset):
if tagset.split('-')[0] in self.all_tagsets:
return True
else:
return False
def validate_label(self, label):
label = label.split('-')[0] # Removing domain-specific names.
for tag in label.split('_'):
if tag not in self.all_tags:
return False
return True
def make_bio_tuples(self, word, label):
tup = []
if label == 'O':
return [[c, 'O'] for c in word]
else:
tup.append([word[0], 'B_' + label])
for c in word[1:]:
tup.append([c, 'I_' + label])
return tup
def commands(self, cmd):
if cmd == 'debug':
pdb.set_trace()
return 'debug'
else:
raise Exception('Unknown commands: {0}'.format(cmd))
def get_input(self, task):
if task == 'receive_label':
msg = 'label: '
elif task == 'end_idx':
msg = 'end_idx: '
elif task == 'alltagsets':
msg = 'all tagsets: '
inp = prompt(msg)
if inp and inp[0] == '@':
return self.commands(inp[1:])
elif task == 'receive_label':
return self.parse_label(inp)
elif task == 'end_idx':
if not inp:
return None
else:
return int(inp)
elif task == 'alltagsets':
found_tagsets = []
if self.validate_tagset(inp):
found_tagsets.append(inp)
else:
print('incorrect tagset: {0}'.format(inp))
while True:
print('current tagsets: {0}'.format(found_tagsets))
inp = prompt(msg)
if inp == 'done':
break
elif self.validate_tagset(inp):
found_tagsets.append(inp)
else:
print('incorrect tagset: {0}'.format(inp))
continue
return list(set(found_tagsets))
def parse_label(self, label):
if label == '':
return 'O'
elif label == 'l':
return 'leftidentifier'
elif label == 'r':
return 'rightidentifier'
else:
return label
def get_answer_point_tagset(self, srcid, building):
point_tagset = prompt('Point TagSet: ')
point_tagset = self.normalize_tagset(point_tagset)
return point_tagset
def get_answer_full_parsing(self, srcid, building):
print('Instruction:')
done = False
labeled_metadata = query_labels(
pgid=self.pgid,
srcid=srcid,
building=building,
).upsert_one(
srcid=srcid,
building=building,
)
fullparsing = labeled_metadata[FULL_PARSING]
metadatas = RawMetadata.objects(srcid=srcid, building=building)\
.first().metadata
for metadata_type, sentence in metadatas.items():
base_idx = 0
while base_idx < len(sentence):
print('=================================')
# 1. Print the entire raw metadata
print_rawmetadata(srcid, building)
# 2. Print the labeled data so far.
print('***************Labeled******************')
parsed = fullparsing.get(metadata_type, [])
print('Metadata Type: {0}'.format(metadata_type))
labeled_df = pd.DataFrame({
'words': [row[0] for row in parsed],
'labels': [row[1] for row in parsed]
})
print(tabulate(labeled_df, headers='keys', tablefmt='psql'))
# 3. Print the unlabeled data so far.
print('***************Unlabeled******************')
print('Metadata Type: {0}'.format(metadata_type))
self.print_sentence_with_pos(sentence, base_idx)
# 4. Specify which parts to be labeled
while True:
try:
end_idx = self.get_input('end_idx')
if end_idx == 'debug':
continue
elif not end_idx:
end_idx = base_idx
curr_word = sentence[base_idx:end_idx + 1]
label = 'O'
else:
end_idx = int(end_idx)
if end_idx < base_idx:
raise Exception('end_idx is to low as {0}'
.format(end_idx))
curr_word = sentence[base_idx:end_idx + 1]
print('-- Curr word: {0}'.format(curr_word))
# 5. Specify what the label is
while True:
label = self.get_input('receive_label')
# 5.1. Validate if the label is right according to Brick.
if self.validate_label(label):
break
else:
print('Not a valid label: {0}'
.format(label))
# 6. update the data set.
parsed += self.make_bio_tuples(curr_word, label)
fullparsing[metadata_type] = parsed
base_idx = end_idx + 1
break
except KeyboardInterrupt:
print('Interrupted')
sys.exit(0)
except Exception as e:
print(e)
continue
return fullparsing
def get_answer_all_tagsets(self, srcid, building):
print('=================================')
print_rawmetadata(srcid, building)
print_fullparsing(srcid, building)
received_tagsets = self.get_input('alltagsets')
return received_tagsets
def get_answer(self, srcid, building, example_type):
if example_type == POINT_TAGSET:
return self.get_answer_point_tagset(srcid, building)
elif example_type == FULL_PARSING:
return self.get_answer_full_parsing(srcid, building)
elif example_type == ALL_TAGSETS:
return self.get_answer_all_tagsets(srcid, building)
else:
raise Exception('UI for {0} is not implemented yet'
.format(example_type))
print('done for {0}'.format(srcid))
def ask_example(self, srcid, building, example_types=[]):
self.display_target(srcid, building)
answers = {}
for example_type in example_types:
answer = self.get_answer(srcid, building, example_type)
if answer: #TODO: Do I really need this condition?
insert_groundtruth(srcid, building, self.pgid, **{example_type: answer})
def store_example(self, srcid, building, answers):
insert_groundtruth(srcid, building, self.pgid, **answers)
|
{"hexsha": "912532dd7c028573ea6415185fa702001034d53d", "size": 9845, "ext": "py", "lang": "Python", "max_stars_repo_path": "plastering/uis/cmdline_ui.py", "max_stars_repo_name": "MingzheWu418/plastering", "max_stars_repo_head_hexsha": "322531e934c3acf2ecc8f520b37a6d255b9959c2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2018-09-19T01:16:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T14:35:36.000Z", "max_issues_repo_path": "plastering/uis/cmdline_ui.py", "max_issues_repo_name": "MingzheWu418/plastering", "max_issues_repo_head_hexsha": "322531e934c3acf2ecc8f520b37a6d255b9959c2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2019-04-12T18:37:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:27:55.000Z", "max_forks_repo_path": "plastering/uis/cmdline_ui.py", "max_forks_repo_name": "MingzheWu418/plastering", "max_forks_repo_head_hexsha": "322531e934c3acf2ecc8f520b37a6d255b9959c2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2019-03-05T23:44:11.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T07:29:31.000Z", "avg_line_length": 38.6078431373, "max_line_length": 89, "alphanum_fraction": 0.4936515998, "include": true, "reason": "import numpy", "num_tokens": 1964}
|
################################
# EvoMan FrameWork - V1.0 2016 #
# Author: Karine Miras #
# karine.smiras@gmail.com #
################################
import sys
import numpy
import random
import Base
from Base.SpriteConstants import *
from Base.SpriteDefinition import *
from sensors import Sensors
tilemap = 'evoman/map2.tmx'
timeexpire = 1500 # game run limit
# enemy 4 sprite, heatman
class Enemy(pygame.sprite.Sprite):
def __init__(self, location,*groups):
super(Enemy, self).__init__(*groups)
self.spriteDefinition = SpriteDefinition('evoman/images/EnemySprites.png', 0, 0, 43, 59)
self.updateSprite(SpriteConstants.STANDING, SpriteConstants.LEFT)
self.rect = pygame.rect.Rect(location, self.image.get_size())
self.direction = -1
self.max_life = 100
self.life = self.max_life
self.resting = 0
self.dy = 0
self.twists = []
self.alternate = 1
self.fireflash = 0
self.imune = 0
self.rect.x = 550
self.timeenemy = 0
self.hurt = 0
self.shooting = 0
self.gun_cooldown = 0
self.rect.right = 580
def update(self, dt, game):
if game.time==1:
# puts enemy in random initial position
if game.randomini == 'yes':
self.rect.x = numpy.random.choice([640,500,400,300])
# defines game mode for player action
if game.enemymode == 'static': # enemy controlled by static movements
if self.timeenemy == 2:
atack1 = 1
else:
atack1 = 0
if self.timeenemy> 50:
atack2 = 1
else:
atack2 = 0
if self.timeenemy == 3:
atack3 = 1
else:
atack3 = 0
if (self.fireflash>=1 and self.fireflash <=40):
atack4 = 1
else:
atack4 = 0
elif game.enemymode == 'ai': # player controlled by AI algorithm
# calls the controller providing game sensors
actions = game.enemy_controller.control(self.sensors.get(game), game.econt)
if len(actions) < 4:
game.print_logs("ERROR: Enemy 1 controller must return 4 decision variables.")
sys.exit(0)
atack1 = actions[0]
atack2 = actions[1]
atack3 = actions[2]
atack4 = actions[3]
if atack3 == 1 and not self.gun_cooldown:
atack3 = 1
else:
atack3 = 0
# if the 'start game' marker is 1
if game.start == 1:
self.timeenemy += 1 # increments enemy timer
last = self.rect.copy() # copies last position state of the enemy
# when player atacks, enemy turns into fire and goes towards his direction
if game.player.atacked == 1 and self.fireflash == 0:
self.fireflash = 100
else:
self.fireflash = max(0,self.fireflash -1)
if atack4 == 1:
self.rect.x += self.direction * 600 * dt
if self.fireflash == 1:
self.direction = self.direction * -1
if self.rect.colliderect(game.player.rect):
self.fireflash = 0
# otherwise he just keeps shooting towards the player direction
elif self.fireflash == 0:
if atack1 == 1 and self.resting == 1:
self.dy = -900
self.resting = 0
self.imune = 0 # enemy is not imune to player's shooting anymore
# images of the enemy standing up
if self.direction == -1:
self.updateSprite(SpriteConstants.STANDING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.STANDING, SpriteConstants.RIGHT)
# reinicializes timer and turns to the players direction
if atack2 == 1:
self.timeenemy = 1
if game.enemymode == 'static':
if game.player.rect.right < self.rect.left:
self.direction = -1
elif game.player.rect.left > self.rect.right:
self.direction = 1
else:
self.direction = self.direction *-1
# checks collision of the player with the enemy
if self.rect.colliderect(game.player.rect):
# choses what sprite penalise according to config
if game.contacthurt == "player":
game.player.life = max(0, game.player.life-(game.level*0.3))
if game.contacthurt == "enemy":
game.enemy.life = max(0, game.enemy.life-(game.level*0.3))
# pushes player when he collides with the enemy
game.player.rect.x += self.direction * 50 * dt
# limits the player to stand on the screen space even being pushed
if game.player.rect.x < 60:
game.player.rect.x = 60
if game.player.rect.x > 620:
game.player.rect.x = 620
# sets flag to change the player image when he is hurt
game.player.hurt = 5
# gravity
self.dy = min(400, self.dy + 100)
self.rect.y += self.dy * dt
# controls screen walls and platforms limits agaist enemy
new = self.rect
self.resting = 0
for cell in game.tilemap.layers['triggers'].collide(new, 'blockers'):
blockers = cell['blockers']
if 'l' in blockers and last.right <= cell.left and new.right > cell.left:
new.right = cell.left
if 'r' in blockers and last.left >= cell.right and new.left < cell.right:
new.left = cell.right
if 't' in blockers and last.bottom <= cell.top and new.bottom > cell.top:
self.resting = 1
new.bottom = cell.top
self.dy = 0
if 'b' in blockers and last.top >= cell.bottom and new.top < cell.bottom:
new.top = cell.bottom
# enemy shoots 3 bullets
if atack3 == 1:
self.shooting = 5
self.gun_cooldown = 5
# if enemy is not turned into fire, shoots, otherwise stops the time counter for a while.
if self.fireflash == 0:
# bullets sound effect
if game.sound == "on" and game.playermode == "human":
sound = pygame.mixer.Sound('evoman/sounds/scifi011.wav')
c = pygame.mixer.Channel(3)
c.set_volume(10)
c.play(sound)
for i in range (0,3):
self.twists.append(Bullet_e4((self.rect.x ,self.rect.y ), self.direction, i, len(self.twists), game.sprite_e))
else :
self.timeenemy -= 1
self.gun_cooldown = max(0, self.gun_cooldown - dt) # decreases time for bullets limitation.
# changes bullets images according to the enemy direction
if self.shooting > 0:
if self.direction == -1:
self.updateSprite(SpriteConstants.SHOOTING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.SHOOTING, SpriteConstants.RIGHT)
self.shooting -= 1
self.shooting = max(0,self.shooting)
# changes the image when enemy is hurt and imune, as a fireball
if self.imune == 1:
if game.time%2==0:
self.image = pygame.image.load('evoman/images/fireball.png')
else:
self.image = pygame.image.load('evoman/images/fireball2.png')
self.hurt -=1
def updateSprite(self, state, direction):
self.image = self.spriteDefinition.getImage(state, direction)
# enemy bullets
class Bullet_e4(pygame.sprite.Sprite):
image = pygame.image.load('evoman/images/bullet_l.png')
def __init__(self, location, direction, n, n_twist, *groups):
super(Bullet_e4, self).__init__(*groups)
self.rect = pygame.rect.Rect(location, self.image.get_size())
self.direction = direction
self.lifespan = 30
self.n= n
self.n_twist = n_twist
def update(self, dt, game):
# puts the bullets in positions relative to the player. They go from the enemy to where the player is.
if self.n == 0:
aux_x = 50
aux_y = (abs(game.player.rect.x - game.enemy.rect.x)*0.55)
elif self.n == 1:
aux_x = 20
aux_y = (abs(game.player.rect.x - game.enemy.rect.x)*0.60)
elif self.n == 2:
aux_x = -10
aux_y = (abs(game.player.rect.x - game.enemy.rect.x)*0.65)
# bullets axis x movement
if self.direction == -1:
if self.rect.x > game.player.rect.left + aux_x:
self.rect.x += self.direction * 650 * dt
else:
if self.rect.x < game.player.rect.right - aux_x:
self.rect.x += self.direction * 650 * dt
# bullets axis y movements
if self.direction == -1:
if self.rect.x > game.player.rect.left + aux_y:
self.rect.y -= 500 * dt
else:
self.rect.y += 700 * dt
else:
if self.rect.x < game.player.rect.right - aux_y-10:
self.rect.y -= 500 * dt
else:
self.rect.y += 700 * dt
# prevents bullets from passing through the floor
self.rect.y = min(410,self.rect.y)
# removes old bullets
if self.rect.y == 410:
self.lifespan -= 1
if self.lifespan < 0:
self.kill()
game.enemy.twists[self.n_twist] = None
return
if self.rect.right<1 or self.rect.left>736 or self.rect.top <1 or self.rect.bottom>512 :
self.kill()
game.enemy.twists[self.n_twist] = None
return
# checks collision of enemy's bullet with the player
if self.rect.colliderect(game.player.rect):
# player loses life points, accoring to the difficulty level of the game (the more difficult, the more it loses).
game.player.life = max(0, game.player.life-(game.level*0.3))
# pushes player when he collides with the enemy
game.player.rect.x += self.direction * 100 * dt
# limits the player to stand on the screen space even being pushed
if game.player.rect.x < 60:
game.player.rect.x = 60
if game.player.rect.x > 620:
game.player.rect.x = 620
# sets flag to change the player image when he is hurt
game.player.hurt = 5
|
{"hexsha": "a84eab0590d1492a10d5111b6cfabee07fdf57e3", "size": 11183, "ext": "py", "lang": "Python", "max_stars_repo_path": "evoman/enemy4.py", "max_stars_repo_name": "ChristophHoenes/EWoMan2", "max_stars_repo_head_hexsha": "c3a117ba1b217d8b4a8f678a5cb4fda471134bfa", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "evoman/enemy4.py", "max_issues_repo_name": "ChristophHoenes/EWoMan2", "max_issues_repo_head_hexsha": "c3a117ba1b217d8b4a8f678a5cb4fda471134bfa", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "evoman/enemy4.py", "max_forks_repo_name": "ChristophHoenes/EWoMan2", "max_forks_repo_head_hexsha": "c3a117ba1b217d8b4a8f678a5cb4fda471134bfa", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-23T03:30:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-23T03:30:27.000Z", "avg_line_length": 34.0945121951, "max_line_length": 134, "alphanum_fraction": 0.5281230439, "include": true, "reason": "import numpy", "num_tokens": 2527}
|
import random
from typing import Tuple
import discord
import numpy
from discord.ext import commands
from .base_cog import BaseCog
from ..utils.converters import BoolConverter
from ..utils.exceptions import CommandError
class PUBGCog(BaseCog):
"""PUBG commands"""
EMOJI = "<:pubghelm:565522877902749726>"
DEFAULT_SQUAD = ["Simon", "Hugo", "Travis", "Steve"]
@commands.command(
name="drop", aliases=["roulette", "plane"], description="u fucking wot"
)
async def drop(self, ctx: commands.Context, map_:str=None, hot: BoolConverter(["hot", "h"])=False):
"""
Chooses random drop location on a given map.
"""
MAPS = {
"miramar": {
"locations":
[
"El Pozo",
"Pecado",
"San Martín",
"Hacienda del Patrón",
"Campo Militar",
"Los Leones",
"Monte Nuevo",
"El Azahar",
"Cruz del Valle",
"Tierra Bronca",
"Torre Ahumada",
"Impala",
"La Cobrería",
],
"hot_idx": 6
},
"erangel": {
"locations":
[
"South George",
"North George",
"Yasnaya",
"School",
"Pochinki",
"Mylta",
"Mylta Power",
"Military Base",
"Novorepnoye",
"Lipovka",
"Prison",
"Shelter",
"Primorsk",
"Gatka",
"Zharki",
],
"hot_idx": 9
}
}
if not map_:
_maps = ",".join([f"**`{m}`**" for m in MAPS.keys()])
raise CommandError(f"No map specified! Choose one of: {_maps}")
# Get PUBG map
pubgmap = MAPS.get(map_.lower())
# Raise exception if map cannot be found
if not pubgmap:
raise CommandError("Invalid map!")
# Get list of locations for selected map
locations = pubgmap.get("locations")
# Determine drop location selection logic
if hot:
hot_idx = pubgmap.get("hot_idx")
location = random.choice(locations[:hot_idx])
else:
location = random.choice(locations)
await ctx.send(location)
# Start of Crate command
@commands.command(
name="crate",
aliases=["crateplay", "dibs", "airdrop"],
description="nah mate ur not getting the awm",
usage="<name1>, <name2>, ...[namelast] OR 'c'"
)
async def crate(self, ctx: commands.Context, *players):
"""
Distributes airdrop loot among a squad.
"""
# Make players iterable a mutable object
players = list(players)
tts = "tts" in players
if tts:
players.remove("tts")
# Resort to default squad if no players arguments
if not players:
squad = self.DEFAULT_SQUAD
# Get players from ctx.author's voice channel
elif players[0] in ["channel", "c", "ch", "chanel"]:
try:
squad = await self.get_usernames_in_voice_channel(ctx, nick=True)
except AttributeError:
raise CommandError(
f"Must be connected to a voice channel to use `{players[0]}` argument."
)
else:
if len(squad) < 2:
raise CommandError(
"A minimum of 2 users must be connected to the voice channel!"
)
# At least 2 players must be specified
elif len(players) == 1:
raise CommandError("Can't roll crate for 1 player.")
else:
squad = players
# Limit names to one word
squad = [name.split(" ")[0] for name in squad]
# Determines size of squad and distributes guns accordingly.
# Returns size of squad and gun list containing n=squadsize lists.
gunsplit, armorsplit = await self.roll_guns(squad)
output = await self.generate_crate_text(squad, gunsplit, armorsplit)
if tts:
sc = self.bot.get_cog("SoundCog")
filename = await sc._do_create_tts_file(output[3:-3], "en", "pubgcrate", overwrite=True)
await ctx.invoke(sc.play, filename)
await ctx.send(output)
async def roll_guns(self, squad: list) -> Tuple[list, list]:
_CRATEGUNS_ALL = [
"AWM",
"AUG",
"Groza",
"MK14",
"Ghillie",
"Helm",
"Vest",
"M249",
]
GUNS = _CRATEGUNS_ALL[:4]
EQUIPMENT = list(set(_CRATEGUNS_ALL) - set(GUNS))
# Shuffle lists
random.shuffle(squad)
random.shuffle(GUNS)
random.shuffle(EQUIPMENT)
# Divide lists by len(squad)
squadsize = len(squad)
gunsplit = numpy.array_split(GUNS, squadsize)
armorsplit = numpy.array_split(EQUIPMENT, squadsize)
# Reroll if one person gets 4 items in a 3-man squad.
if squadsize == 3:
while any([True if len(list(guns)+list(armor))>=4 else False for guns, armor in zip(gunsplit, armorsplit)]):
random.shuffle(gunsplit)
random.shuffle(armorsplit)
return gunsplit, armorsplit
async def generate_crate_text(self, squad: list, gunsplit: list, armorsplit: list) -> str:
"""
Creates output message for !crate command.
"""
if squad[0].isdigit():
# Sort squad numerically
squad.sort()
msg = "```"
_spc = len(max(squad, key=len)) + 1
for idx, player in enumerate(squad):
if player.islower():
player = player.capitalize()
name_spc = " "*(_spc-len(player))
gun = " ".join(gunsplit[idx])
equipment = " ".join(armorsplit[idx])
msg += f"{player}:{name_spc} {gun} {equipment}\n"
msg += "```"
return msg
|
{"hexsha": "e9a7fb41cbd993bf2ee336b2a08c05aa8dc9bb82", "size": 6678, "ext": "py", "lang": "Python", "max_stars_repo_path": "vjemmie/cogs/pubg_cog.py", "max_stars_repo_name": "PederHA/vjemmie", "max_stars_repo_head_hexsha": "e3742380d3ea06de90f8227a0934569f8fd02b5c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-07-30T02:43:27.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-30T02:43:27.000Z", "max_issues_repo_path": "vjemmie/cogs/pubg_cog.py", "max_issues_repo_name": "PederHA/vjemmie", "max_issues_repo_head_hexsha": "e3742380d3ea06de90f8227a0934569f8fd02b5c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-09-20T14:07:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T01:18:23.000Z", "max_forks_repo_path": "vjemmie/cogs/pubg_cog.py", "max_forks_repo_name": "PederHA/vjemmie", "max_forks_repo_head_hexsha": "e3742380d3ea06de90f8227a0934569f8fd02b5c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0594059406, "max_line_length": 121, "alphanum_fraction": 0.4764899671, "include": true, "reason": "import numpy", "num_tokens": 1457}
|
/*
Copyright 2013 Adobe
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
*/
/*************************************************************************************************/
#include <adobe/config.hpp>
#include <functional>
#include <iostream>
#include <utility>
#define BOOST_TEST_MAIN
#include <boost/test/unit_test.hpp>
#include <adobe/test/check_regular.hpp>
#include <adobe/test/check_less_than_comparable.hpp>
#include <adobe/test/check_null.hpp>
#include <adobe/copy_on_write.hpp>
#include <adobe/memory.hpp>
namespace {
template <typename T>
class noisy_allocator;
template <>
class noisy_allocator<void> {
public:
void* pointer;
typedef const void* const_pointer;
typedef void value_type;
template <class U>
struct rebind {
typedef noisy_allocator<U> other;
};
friend inline bool operator==(const noisy_allocator&, const noisy_allocator&) { return true; }
friend inline bool operator!=(const noisy_allocator&, const noisy_allocator&) { return false; }
};
std::size_t noisy_check_allocation(bool count = false) {
static std::size_t alloc_count_s(0);
std::size_t prev_allocation(alloc_count_s);
if (count)
++alloc_count_s;
else
alloc_count_s = 0;
return prev_allocation;
}
std::size_t noisy_check_deallocation(bool count = false) {
static std::size_t dealloc_count_s(0);
std::size_t prev_deallocation(dealloc_count_s);
if (count)
++dealloc_count_s;
else
dealloc_count_s = 0;
return prev_deallocation;
}
template <typename T>
class noisy_allocator {
public:
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef T value_type;
template <typename U>
struct rebind {
typedef noisy_allocator<U> other;
};
noisy_allocator() {}
template <typename U>
noisy_allocator(const noisy_allocator<U>&) {}
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
pointer allocate(size_type n, noisy_allocator<void>::const_pointer = 0) {
if (n > max_size())
throw std::bad_alloc();
pointer result = static_cast<pointer>(::operator new(n * sizeof(T), std::nothrow));
if (!result)
throw std::bad_alloc();
std::cout << " alloc @ " << static_cast<void*>(result) << "; sizeof(T): " << sizeof(T);
if (n != 1)
std::cout << "; n: " << n;
std::cout << std::endl;
noisy_check_allocation(true);
return result;
}
void deallocate(pointer p, size_type) {
::operator delete(p, std::nothrow);
std::cout << "dealloc @ " << static_cast<void*>(p) << "; sizeof(T): " << sizeof(T)
<< std::endl;
noisy_check_deallocation(true);
}
size_type max_size() const { return size_type(-1) / sizeof(T); }
void construct(pointer p, const T& x) { adobe::construct(p, x); }
void destroy(pointer p) { adobe::destroy(p); }
friend inline bool operator==(const noisy_allocator& x, const noisy_allocator& y) {
return true;
}
friend inline bool operator!=(const noisy_allocator& x, const noisy_allocator& y) {
return false;
}
};
template <typename R, typename T>
R make_value(const T& x) {
return R(x);
}
template <>
std::string make_value(const long& x) {
std::stringstream s;
s << x;
return std::string(s.str());
}
template <typename CowType>
void test_copy_on_write() {
enum {
is_noisy = boost::is_same<typename CowType::allocator_type,
noisy_allocator<typename CowType::value_type>>::value
};
typename CowType::value_type (*mv)(const long&) =
&make_value<typename CowType::value_type, long>;
if (is_noisy) {
// reset counters
noisy_check_allocation();
noisy_check_deallocation();
std::cout << "Testing " << typeid(CowType).name() << "...\n";
}
// Test default constructor
{ CowType value_0; }
// Check
if (is_noisy) {
BOOST_CHECK_MESSAGE(noisy_check_allocation() == 1, "allocation count mismatch");
BOOST_CHECK_MESSAGE(noisy_check_deallocation() == 0, "deallocation count mismatch");
}
// Test basic concept requirements
{
CowType value_1(mv(1)); // allocation
CowType value_2(mv(2)); // allocation
CowType value_3(mv(3)); // allocation
// regular
adobe::check_regular(value_1);
// operator<
adobe::check_less_than_comparable(value_1, value_2, value_3, std::less<CowType>());
// operator>
adobe::check_less_than_comparable(value_3, value_2, value_1, std::greater<CowType>());
CowType value_test(mv(1)); // allocation
BOOST_CHECK_MESSAGE(value_1 == value_test, "equality of non-identical values");
BOOST_CHECK_MESSAGE(value_2 != value_test, "equality of non-identical values");
BOOST_CHECK(value_test.unique_instance());
value_test = value_2; // deallocation
BOOST_CHECK(!value_test.unique_instance());
BOOST_CHECK(value_test.identity(value_2));
}
// Check
if (is_noisy) {
BOOST_CHECK_MESSAGE(noisy_check_allocation() == 4, "allocation count mismatch");
BOOST_CHECK_MESSAGE(noisy_check_deallocation() == 4, "deallocation count mismatch");
}
// Test basic move semantics
{
CowType value_1(mv(42)); // allocation
CowType value_2(mv(21)); // allocation
CowType value_move(std::move(value_1));
BOOST_CHECK_MESSAGE(value_move != value_1, "move failure");
value_move = std::move(value_2); // deallocation
BOOST_CHECK_MESSAGE(value_move != value_2, "move failure");
BOOST_CHECK_MESSAGE(value_1 == value_2, "move failure"); // both should be object_m == 0
}
// Check
if (is_noisy) {
BOOST_CHECK_MESSAGE(noisy_check_allocation() == 2, "allocation count mismatch");
BOOST_CHECK_MESSAGE(noisy_check_deallocation() == 2, "deallocation count mismatch");
}
// Test custom allocator constructor and set
{
typename CowType::allocator_type my_allocator;
CowType value_4(my_allocator); // allocation
value_4.write() = mv(4);
}
// Check
if (is_noisy) {
BOOST_CHECK_MESSAGE(noisy_check_allocation() == 1, "allocation count mismatch");
BOOST_CHECK_MESSAGE(noisy_check_deallocation() == 1, "deallocation count mismatch");
}
// Test copy-assignment using null object_m
{
CowType foo(mv(1)); // allocation
CowType bar(std::move(foo));
foo = mv(2); // allocation
}
// Check
if (is_noisy) {
BOOST_CHECK_MESSAGE(noisy_check_allocation() == 2, "allocation count mismatch");
BOOST_CHECK_MESSAGE(noisy_check_deallocation() == 2, "deallocation count mismatch");
}
// Test copy-assignment using non-null object_m
{
CowType foo(mv(5)); // allocation
CowType bar(foo);
BOOST_CHECK(bar.identity(foo));
bar = mv(6); // allocation
BOOST_CHECK(bar.unique_instance() && foo.unique_instance());
}
// Check
if (is_noisy) {
BOOST_CHECK_MESSAGE(noisy_check_allocation() == 2, "allocation count mismatch");
BOOST_CHECK_MESSAGE(noisy_check_deallocation() == 2, "deallocation count mismatch");
}
// Test move-assignment using null object_m
{
CowType foo(mv(1)); // allocation
CowType bar(std::move(foo));
typename CowType::value_type value(mv(2));
foo = std::move(value); // allocation
}
// Check
if (is_noisy) {
BOOST_CHECK_MESSAGE(noisy_check_allocation() == 2, "allocation count mismatch");
BOOST_CHECK_MESSAGE(noisy_check_deallocation() == 2, "deallocation count mismatch");
}
// Test move-assignment using unique instance
{
CowType foo(mv(1)); // allocation
typename CowType::value_type value(mv(2));
foo = std::move(value);
}
// Check
if (is_noisy) {
BOOST_CHECK_MESSAGE(noisy_check_allocation() == 1, "allocation count mismatch");
BOOST_CHECK_MESSAGE(noisy_check_deallocation() == 1, "deallocation count mismatch");
}
// Test move-assignment using new allocation
{
CowType foo(mv(1)); // allocation
CowType bar(foo);
typename CowType::value_type value(mv(2));
foo = std::move(value); // allocation
}
// Check
if (is_noisy) {
BOOST_CHECK_MESSAGE(noisy_check_allocation() == 2, "allocation count mismatch");
BOOST_CHECK_MESSAGE(noisy_check_deallocation() == 2, "deallocation count mismatch");
}
// Test write() using unique instance
{
CowType foo(mv(1)); // allocation
foo.write() = typename CowType::value_type(mv(2));
}
// Check
if (is_noisy) {
BOOST_CHECK_MESSAGE(noisy_check_allocation() == 1, "allocation count mismatch");
BOOST_CHECK_MESSAGE(noisy_check_deallocation() == 1, "deallocation count mismatch");
}
// Test write() using new allocation
{
CowType foo(mv(1)); // allocation
CowType bar(foo);
foo.write() = typename CowType::value_type(mv(2)); // allocation
}
// Check
if (is_noisy) {
BOOST_CHECK_MESSAGE(noisy_check_allocation() == 2, "allocation count mismatch");
BOOST_CHECK_MESSAGE(noisy_check_deallocation() == 2, "deallocation count mismatch");
}
// Test read()
{
CowType foo(mv(1)); // allocation
BOOST_CHECK_MESSAGE(foo.read() == typename CowType::value_type(mv(1)), "read error");
BOOST_CHECK_MESSAGE(static_cast<typename CowType::value_type>(foo) ==
typename CowType::value_type(mv(1)),
"read error");
BOOST_CHECK_MESSAGE(*foo == typename CowType::value_type(mv(1)), "read error");
BOOST_CHECK_MESSAGE(*(foo.operator->()) == typename CowType::value_type(mv(1)),
"read error");
}
// Check
if (is_noisy) {
BOOST_CHECK_MESSAGE(noisy_check_allocation() == 1, "allocation count mismatch");
BOOST_CHECK_MESSAGE(noisy_check_deallocation() == 1, "deallocation count mismatch");
}
// Test swap
{
CowType foo(mv(1)); // allocation
CowType bar(mv(2)); // allocation
swap(foo, bar);
BOOST_CHECK_MESSAGE(foo.read() == typename CowType::value_type(mv(2)), "swap error");
BOOST_CHECK_MESSAGE(bar.read() == typename CowType::value_type(mv(1)), "swap error");
}
// Check
if (is_noisy) {
BOOST_CHECK_MESSAGE(noisy_check_allocation() == 2, "allocation count mismatch");
BOOST_CHECK_MESSAGE(noisy_check_deallocation() == 2, "deallocation count mismatch");
}
}
} // namespace
BOOST_AUTO_TEST_CASE(CowType_allocator_rtti) {
using namespace adobe;
{
typedef copy_on_write<int> cow_t;
std::cout << typeid(cow_t).name() << '\n';
// BOOST_CHECK(!t.requires_std_rtti());
}
{
typedef copy_on_write<int, std::allocator<int>> cow_t;
std::cout << typeid(cow_t).name() << '\n';
// BOOST_CHECK(t.requires_std_rtti());
}
}
BOOST_AUTO_TEST_CASE(copy_on_write) {
// test nonmovable type with capture_allocator
test_copy_on_write<adobe::copy_on_write<int>>();
// test nonmovable type with std::allocator
test_copy_on_write<adobe::copy_on_write<int, std::allocator<int>>>();
// test nonmovable type with noisy_allocator
test_copy_on_write<adobe::copy_on_write<int, noisy_allocator<int>>>();
// test movable type with capture_allocator
test_copy_on_write<adobe::copy_on_write<std::string>>();
// test movable type with std::allocator
test_copy_on_write<adobe::copy_on_write<std::string, std::allocator<std::string>>>();
// test movable type with noisy_allocator
test_copy_on_write<adobe::copy_on_write<std::string, noisy_allocator<std::string>>>();
}
BOOST_AUTO_TEST_CASE(void_equality) {
BOOST_CHECK(noisy_allocator<void>() == noisy_allocator<void>());
BOOST_CHECK(!(noisy_allocator<void>() != noisy_allocator<void>()));
}
|
{"hexsha": "16bf19b666b45c335620c1929ee363acf726825a", "size": 12506, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/unit_tests/copy_on_write/cow_test.cpp", "max_stars_repo_name": "ilelann/adobe_source_libraries", "max_stars_repo_head_hexsha": "82224d13335398dfebfc77addabab28c4296ecba", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/unit_tests/copy_on_write/cow_test.cpp", "max_issues_repo_name": "ilelann/adobe_source_libraries", "max_issues_repo_head_hexsha": "82224d13335398dfebfc77addabab28c4296ecba", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/unit_tests/copy_on_write/cow_test.cpp", "max_forks_repo_name": "ilelann/adobe_source_libraries", "max_forks_repo_head_hexsha": "82224d13335398dfebfc77addabab28c4296ecba", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-06-18T12:25:12.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-18T12:25:12.000Z", "avg_line_length": 31.1094527363, "max_line_length": 99, "alphanum_fraction": 0.6344154806, "num_tokens": 2964}
|
(F::FqFiniteField)(coeffs::Array{T,1}) where {T<:Union{Integer,fmpz}} = begin
g = gen(F)
x = zero(F)
for (i, c) in enumerate(coeffs)
x += c * g^(i-1)
end
x
end
(F::FqNmodFiniteField)(coeffs::Array{T,1}) where {T<:Union{Integer,fmpz}} = begin
g = gen(F)
x = zero(F)
for (i, c) in enumerate(coeffs)
x += c * g^(i-1)
end
x
end
sqrt(a::FinFieldElem) = root(a, 2)
# http://trac.sagemath.org/ticket/7931
# http://sagenb.org/src/rings/finite_rings/element_base.pyx
root(a::FinFieldElem, n::Integer) = root(a, fmpz(n))
root(a::FinFieldElem, n::fmpz) = begin
if iszero(a)
n <= 0 && throw(DomainError())
return a
end
K = parent(a)
q = order(K)
if n < 0
a = inv(a)
n = -n
elseif n == 0
a == 1 || throw(DomainError())
return a
end
if isone(a)
GCD = gcd(n, q-1)
GCD == 1 && return a
g = gen(K) # TODO: The generator is guaranteed to be a multiplicative generator only if the field is generated by a Conway polynomial.
q1overn = (q-1) ÷ GCD
nthroot = g^q1overn
return nthroot
end
m = n % (q-1)
m == 0 && error("$(a) has no $(n)th root in $(K)")
# GCD = α*m + β*(q-1), so 1/m = α/GCD (mod q-1)
GCD, α, β = gcdx(m, q-1)
GCD == 1 && return a^α
m = GCD
q1overn = (q-1) ÷ m
a^q1overn != 1 && error("$(a) has no $(n)th root in $(K)")
b = a^α
F = [(fmpz(p), e) for (p, e) in factor(BigInt(m))]
g = gen(K) # TODO: The generator is guaranteed to be a multiplicative generator only if the field is generated by a Conway polynomial.
for (r, v) in F
k, h = remove(q-1, r)
z = h * invmod(-h, r^v)::typeof(h)
x = (1 + z) ÷ (r^v)
if k == 1
b = b^x
else
t = log(b^h, g^(r^v * h), r^(k-v))
b = b^x * g^(-z*t)
end
end
b
end
sqrts(a::FinFieldElem) = roots(a, 2)
# http://trac.sagemath.org/ticket/7931
# http://sagenb.org/src/rings/finite_rings/element_base.pyx
roots(a::FinFieldElem, n::Integer) = roots(a, fmpz(n))
roots(a::T, n::fmpz) where {T<:FinFieldElem} = begin
if iszero(a)
n <= 0 && throw(DomainError())
return [a]
end
K = parent(a)
q = order(K)
if n < 0
a = inv(a)
n = -n
elseif n == 0
a == 1 || throw(DomainError())
e = elements(K)
e[e .!= 0]
end
if isone(a)
GCD = gcd(n, q-1)
GCD == 1 && return [a]
g = gen(K) # TODO: The generator is guaranteed to be a multiplicative generator only if the field is generated by a Conway polynomial.
q1overn = (q-1) ÷ GCD
nthroot = g^q1overn
return [nthroot^i for i in 0:(GCD-1)]
end
m = n % (q-1)
m == 0 && return T[]
# GCD = α*m + β*(q-1), so 1/m = α/GCD (mod q-1)
GCD, α, β = gcdx(m, q-1)
GCD == 1 && return [a^α]
m = GCD
q1overn = (q-1) ÷ m
a^q1overn != 1 && return T[]
b = a^α
F = [(fmpz(p), e) for (p, e) in factor(BigInt(m))]
g = gen(K) # TODO: The generator is guaranteed to be a multiplicative generator only if the field is generated by a Conway polynomial.
for (r, v) in F
k, h = remove(q-1, r)
z = h * invmod(-h, r^v)::typeof(h)
x = (1 + z) ÷ (r^v)
if k == 1
b = b^x
else
t = log(b^h, g^(r^v * h), r^(k-v))
b = b^x * g^(-z*t)
end
end
nthroot = g^q1overn
L = [b]
for _ in 1:(m-1)
b *= nthroot
push!(L, b)
end
L
end
elements(F::Union{FqFiniteField,FqNmodFiniteField}) = begin
p = characteristic(F)
k = degree(F)
e = typeof(zero(F))[]
@forcartesian c [p for _ in 1:k] begin
push!(e, F(c .- 1))
end
e
end
rand(F::Union{FqFiniteField,FqNmodFiniteField}) = begin
p = characteristic(F)
k = degree(F)
F([rand(0:(p-1)) for _ in 1:k])
end
|
{"hexsha": "4d5f260871c97890f932d0f3d8d7d78401b4d7a7", "size": 3966, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Math/Algebra/field.jl", "max_stars_repo_name": "Samayel/Brainstorm.jl", "max_stars_repo_head_hexsha": "9d83bb0a104973e498ba4ca84b0a27ede6c053ac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-12-22T17:56:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-09T21:13:36.000Z", "max_issues_repo_path": "src/Math/Algebra/field.jl", "max_issues_repo_name": "Samayel/Brainstorm.jl", "max_issues_repo_head_hexsha": "9d83bb0a104973e498ba4ca84b0a27ede6c053ac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Math/Algebra/field.jl", "max_forks_repo_name": "Samayel/Brainstorm.jl", "max_forks_repo_head_hexsha": "9d83bb0a104973e498ba4ca84b0a27ede6c053ac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8915662651, "max_line_length": 142, "alphanum_fraction": 0.5047907211, "num_tokens": 1454}
|
(*
File: HOL/Computational_Algebra/Squarefree.thy
Author: Manuel Eberl <manuel@pruvisto.org>
Squarefreeness and decomposition of ring elements into square part and squarefree part
*)
section \<open>Squarefreeness\<close>
theory Squarefree
imports Primes
begin
(* TODO: Generalise to n-th powers *)
definition squarefree :: "'a :: comm_monoid_mult \<Rightarrow> bool" where
"squarefree n \<longleftrightarrow> (\<forall>x. x ^ 2 dvd n \<longrightarrow> x dvd 1)"
lemma squarefreeI: "(\<And>x. x ^ 2 dvd n \<Longrightarrow> x dvd 1) \<Longrightarrow> squarefree n"
by (auto simp: squarefree_def)
lemma squarefreeD: "squarefree n \<Longrightarrow> x ^ 2 dvd n \<Longrightarrow> x dvd 1"
by (auto simp: squarefree_def)
lemma not_squarefreeI: "x ^ 2 dvd n \<Longrightarrow> \<not>x dvd 1 \<Longrightarrow> \<not>squarefree n"
by (auto simp: squarefree_def)
lemma not_squarefreeE [case_names square_dvd]:
"\<not>squarefree n \<Longrightarrow> (\<And>x. x ^ 2 dvd n \<Longrightarrow> \<not>x dvd 1 \<Longrightarrow> P) \<Longrightarrow> P"
by (auto simp: squarefree_def)
lemma not_squarefree_0 [simp]: "\<not>squarefree (0 :: 'a :: comm_semiring_1)"
by (rule not_squarefreeI[of 0]) auto
lemma squarefree_factorial_semiring:
assumes "n \<noteq> 0"
shows "squarefree (n :: 'a :: factorial_semiring) \<longleftrightarrow> (\<forall>p. prime p \<longrightarrow> \<not>p ^ 2 dvd n)"
unfolding squarefree_def
proof safe
assume *: "\<forall>p. prime p \<longrightarrow> \<not>p ^ 2 dvd n"
fix x :: 'a assume x: "x ^ 2 dvd n"
{
assume "\<not>is_unit x"
moreover from assms and x have "x \<noteq> 0" by auto
ultimately obtain p where "p dvd x" "prime p"
using prime_divisor_exists by blast
with * have "\<not>p ^ 2 dvd n" by blast
moreover from \<open>p dvd x\<close> have "p ^ 2 dvd x ^ 2" by (rule dvd_power_same)
ultimately have "\<not>x ^ 2 dvd n" by (blast dest: dvd_trans)
with x have False by contradiction
}
thus "is_unit x" by blast
qed auto
lemma squarefree_factorial_semiring':
assumes "n \<noteq> 0"
shows "squarefree (n :: 'a :: factorial_semiring) \<longleftrightarrow>
(\<forall>p\<in>prime_factors n. multiplicity p n = 1)"
proof (subst squarefree_factorial_semiring [OF assms], safe)
fix p assume "\<forall>p\<in>#prime_factorization n. multiplicity p n = 1" "prime p" "p^2 dvd n"
with assms show False
by (cases "p dvd n")
(auto simp: prime_factors_dvd power_dvd_iff_le_multiplicity not_dvd_imp_multiplicity_0)
qed (auto intro!: multiplicity_eqI simp: power2_eq_square [symmetric])
lemma squarefree_factorial_semiring'':
assumes "n \<noteq> 0"
shows "squarefree (n :: 'a :: factorial_semiring) \<longleftrightarrow>
(\<forall>p. prime p \<longrightarrow> multiplicity p n \<le> 1)"
by (subst squarefree_factorial_semiring'[OF assms]) (auto simp: prime_factors_multiplicity)
lemma squarefree_unit [simp]: "is_unit n \<Longrightarrow> squarefree n"
proof (rule squarefreeI)
fix x assume "x^2 dvd n" "n dvd 1"
hence "is_unit (x^2)" by (rule dvd_unit_imp_unit)
thus "is_unit x" by (simp add: is_unit_power_iff)
qed
lemma squarefree_1 [simp]: "squarefree (1 :: 'a :: algebraic_semidom)"
by simp
lemma squarefree_minus [simp]: "squarefree (-n :: 'a :: comm_ring_1) \<longleftrightarrow> squarefree n"
by (simp add: squarefree_def)
lemma squarefree_mono: "a dvd b \<Longrightarrow> squarefree b \<Longrightarrow> squarefree a"
by (auto simp: squarefree_def intro: dvd_trans)
lemma squarefree_multD:
assumes "squarefree (a * b)"
shows "squarefree a" "squarefree b"
by (rule squarefree_mono[OF _ assms], simp)+
lemma squarefree_prime_elem:
assumes "prime_elem (p :: 'a :: factorial_semiring)"
shows "squarefree p"
proof -
from assms have "p \<noteq> 0" by auto
show ?thesis
proof (subst squarefree_factorial_semiring [OF \<open>p \<noteq> 0\<close>]; safe)
fix q assume *: "prime q" "q^2 dvd p"
with assms have "multiplicity q p \<ge> 2" by (intro multiplicity_geI) auto
thus False using assms \<open>prime q\<close> prime_multiplicity_other[of q "normalize p"]
by (cases "q = normalize p") simp_all
qed
qed
lemma squarefree_prime:
assumes "prime (p :: 'a :: factorial_semiring)"
shows "squarefree p"
using assms by (intro squarefree_prime_elem) auto
lemma squarefree_mult_coprime:
fixes a b :: "'a :: factorial_semiring_gcd"
assumes "coprime a b" "squarefree a" "squarefree b"
shows "squarefree (a * b)"
proof -
from assms have nz: "a * b \<noteq> 0" by auto
show ?thesis unfolding squarefree_factorial_semiring'[OF nz]
proof
fix p assume p: "p \<in> prime_factors (a * b)"
with nz have "prime p"
by (simp add: prime_factors_dvd)
have "\<not> (p dvd a \<and> p dvd b)"
proof
assume "p dvd a \<and> p dvd b"
with \<open>coprime a b\<close> have "is_unit p"
by (auto intro: coprime_common_divisor)
with \<open>prime p\<close> show False
by simp
qed
moreover from p have "p dvd a \<or> p dvd b" using nz
by (auto simp: prime_factors_dvd prime_dvd_mult_iff)
ultimately show "multiplicity p (a * b) = 1" using nz p assms(2,3)
by (auto simp: prime_elem_multiplicity_mult_distrib prime_factors_multiplicity
not_dvd_imp_multiplicity_0 squarefree_factorial_semiring')
qed
qed
lemma squarefree_prod_coprime:
fixes f :: "'a \<Rightarrow> 'b :: factorial_semiring_gcd"
assumes "\<And>a b. a \<in> A \<Longrightarrow> b \<in> A \<Longrightarrow> a \<noteq> b \<Longrightarrow> coprime (f a) (f b)"
assumes "\<And>a. a \<in> A \<Longrightarrow> squarefree (f a)"
shows "squarefree (prod f A)"
using assms
by (induction A rule: infinite_finite_induct)
(auto intro!: squarefree_mult_coprime prod_coprime_right)
lemma squarefree_powerD: "m > 0 \<Longrightarrow> squarefree (n ^ m) \<Longrightarrow> squarefree n"
by (cases m) (auto dest: squarefree_multD)
lemma squarefree_power_iff:
"squarefree (n ^ m) \<longleftrightarrow> m = 0 \<or> is_unit n \<or> (squarefree n \<and> m = 1)"
proof safe
assume "squarefree (n ^ m)" "m > 0" "\<not>is_unit n"
show "m = 1"
proof (rule ccontr)
assume "m \<noteq> 1"
with \<open>m > 0\<close> have "n ^ 2 dvd n ^ m" by (intro le_imp_power_dvd) auto
from this and \<open>\<not>is_unit n\<close> have "\<not>squarefree (n ^ m)" by (rule not_squarefreeI)
with \<open>squarefree (n ^ m)\<close> show False by contradiction
qed
qed (auto simp: is_unit_power_iff dest: squarefree_powerD)
definition squarefree_nat :: "nat \<Rightarrow> bool" where
[code_abbrev]: "squarefree_nat = squarefree"
lemma squarefree_nat_code_naive [code]:
"squarefree_nat n \<longleftrightarrow> n \<noteq> 0 \<and> (\<forall>k\<in>{2..n}. \<not>k ^ 2 dvd n)"
proof safe
assume *: "\<forall>k\<in>{2..n}. \<not> k\<^sup>2 dvd n" and n: "n > 0"
show "squarefree_nat n" unfolding squarefree_nat_def
proof (rule squarefreeI)
fix k assume k: "k ^ 2 dvd n"
have "k dvd n" by (rule dvd_trans[OF _ k]) auto
with n have "k \<le> n" by (intro dvd_imp_le)
with bspec[OF *, of k] k have "\<not>k > 1" by (intro notI) auto
moreover from k and n have "k \<noteq> 0" by (intro notI) auto
ultimately have "k = 1" by presburger
thus "is_unit k" by simp
qed
qed (auto simp: squarefree_nat_def squarefree_def intro!: Nat.gr0I)
definition square_part :: "'a :: factorial_semiring \<Rightarrow> 'a" where
"square_part n = (if n = 0 then 0 else
normalize (\<Prod>p\<in>prime_factors n. p ^ (multiplicity p n div 2)))"
lemma square_part_nonzero:
"n \<noteq> 0 \<Longrightarrow> square_part n = normalize (\<Prod>p\<in>prime_factors n. p ^ (multiplicity p n div 2))"
by (simp add: square_part_def)
lemma square_part_0 [simp]: "square_part 0 = 0"
by (simp add: square_part_def)
lemma square_part_unit [simp]: "is_unit x \<Longrightarrow> square_part x = 1"
by (auto simp: square_part_def prime_factorization_unit)
lemma square_part_1 [simp]: "square_part 1 = 1"
by simp
lemma square_part_0_iff [simp]: "square_part n = 0 \<longleftrightarrow> n = 0"
by (simp add: square_part_def)
lemma normalize_uminus [simp]:
"normalize (-x :: 'a :: {normalization_semidom, comm_ring_1}) = normalize x"
by (rule associatedI) auto
lemma multiplicity_uminus_right [simp]:
"multiplicity (x :: 'a :: {factorial_semiring, comm_ring_1}) (-y) = multiplicity x y"
proof -
have "multiplicity x (-y) = multiplicity x (normalize (-y))"
by (rule multiplicity_normalize_right [symmetric])
also have "\<dots> = multiplicity x y" by simp
finally show ?thesis .
qed
lemma multiplicity_uminus_left [simp]:
"multiplicity (-x :: 'a :: {factorial_semiring, comm_ring_1}) y = multiplicity x y"
proof -
have "multiplicity (-x) y = multiplicity (normalize (-x)) y"
by (rule multiplicity_normalize_left [symmetric])
also have "\<dots> = multiplicity x y" by simp
finally show ?thesis .
qed
lemma prime_factorization_uminus [simp]:
"prime_factorization (-x :: 'a :: {factorial_semiring, comm_ring_1}) = prime_factorization x"
by (rule prime_factorization_cong) simp_all
lemma square_part_uminus [simp]:
"square_part (-x :: 'a :: {factorial_semiring, comm_ring_1}) = square_part x"
by (simp add: square_part_def)
lemma prime_multiplicity_square_part:
assumes "prime p"
shows "multiplicity p (square_part n) = multiplicity p n div 2"
proof (cases "n = 0")
case False
thus ?thesis unfolding square_part_nonzero[OF False] multiplicity_normalize_right
using finite_prime_divisors[of n] assms
by (subst multiplicity_prod_prime_powers)
(auto simp: not_dvd_imp_multiplicity_0 prime_factors_dvd multiplicity_prod_prime_powers)
qed auto
lemma square_part_square_dvd [simp, intro]: "square_part n ^ 2 dvd n"
proof (cases "n = 0")
case False
thus ?thesis
by (intro multiplicity_le_imp_dvd)
(auto simp: prime_multiplicity_square_part prime_elem_multiplicity_power_distrib)
qed auto
lemma prime_multiplicity_le_imp_dvd:
assumes "x \<noteq> 0" "y \<noteq> 0"
shows "x dvd y \<longleftrightarrow> (\<forall>p. prime p \<longrightarrow> multiplicity p x \<le> multiplicity p y)"
using assms by (auto intro: multiplicity_le_imp_dvd dvd_imp_multiplicity_le)
lemma dvd_square_part_iff: "x dvd square_part n \<longleftrightarrow> x ^ 2 dvd n"
proof (cases "x = 0"; cases "n = 0")
assume nz: "x \<noteq> 0" "n \<noteq> 0"
thus ?thesis
by (subst (1 2) prime_multiplicity_le_imp_dvd)
(auto simp: prime_multiplicity_square_part prime_elem_multiplicity_power_distrib)
qed auto
definition squarefree_part :: "'a :: factorial_semiring \<Rightarrow> 'a" where
"squarefree_part n = (if n = 0 then 1 else n div square_part n ^ 2)"
lemma squarefree_part_0 [simp]: "squarefree_part 0 = 1"
by (simp add: squarefree_part_def)
lemma squarefree_part_unit [simp]: "is_unit n \<Longrightarrow> squarefree_part n = n"
by (auto simp add: squarefree_part_def)
lemma squarefree_part_1 [simp]: "squarefree_part 1 = 1"
by simp
lemma squarefree_decompose: "n = squarefree_part n * square_part n ^ 2"
by (simp add: squarefree_part_def)
lemma squarefree_part_uminus [simp]:
assumes "x \<noteq> 0"
shows "squarefree_part (-x :: 'a :: {factorial_semiring, comm_ring_1}) = -squarefree_part x"
proof -
have "-(squarefree_part x * square_part x ^ 2) = -x"
by (subst squarefree_decompose [symmetric]) auto
also have "\<dots> = squarefree_part (-x) * square_part (-x) ^ 2" by (rule squarefree_decompose)
finally have "(- squarefree_part x) * square_part x ^ 2 =
squarefree_part (-x) * square_part x ^ 2" by simp
thus ?thesis using assms by (subst (asm) mult_right_cancel) auto
qed
lemma squarefree_part_nonzero [simp]: "squarefree_part n \<noteq> 0"
using squarefree_decompose[of n] by (cases "n \<noteq> 0") auto
lemma prime_multiplicity_squarefree_part:
assumes "prime p"
shows "multiplicity p (squarefree_part n) = multiplicity p n mod 2"
proof (cases "n = 0")
case False
hence n: "n \<noteq> 0" by auto
have "multiplicity p n mod 2 + 2 * (multiplicity p n div 2) = multiplicity p n" by simp
also have "\<dots> = multiplicity p (squarefree_part n * square_part n ^ 2)"
by (subst squarefree_decompose[of n]) simp
also from assms n have "\<dots> = multiplicity p (squarefree_part n) + 2 * (multiplicity p n div 2)"
by (subst prime_elem_multiplicity_mult_distrib)
(auto simp: prime_elem_multiplicity_power_distrib prime_multiplicity_square_part)
finally show ?thesis by (subst (asm) add_right_cancel) simp
qed auto
lemma prime_multiplicity_squarefree_part_le_Suc_0 [intro]:
assumes "prime p"
shows "multiplicity p (squarefree_part n) \<le> Suc 0"
by (simp add: assms prime_multiplicity_squarefree_part)
lemma squarefree_squarefree_part [simp, intro]: "squarefree (squarefree_part n)"
by (subst squarefree_factorial_semiring'')
(auto simp: prime_multiplicity_squarefree_part_le_Suc_0)
lemma squarefree_decomposition_unique:
assumes "square_part m = square_part n"
assumes "squarefree_part m = squarefree_part n"
shows "m = n"
by (subst (1 2) squarefree_decompose) (simp_all add: assms)
lemma normalize_square_part [simp]: "normalize (square_part x) = square_part x"
by (simp add: square_part_def)
lemma square_part_even_power': "square_part (x ^ (2 * n)) = normalize (x ^ n)"
proof (cases "x = 0")
case False
have "normalize (square_part (x ^ (2 * n))) = normalize (x ^ n)" using False
by (intro multiplicity_eq_imp_eq)
(auto simp: prime_multiplicity_square_part prime_elem_multiplicity_power_distrib)
thus ?thesis by simp
qed (auto simp: power_0_left)
lemma square_part_even_power: "even n \<Longrightarrow> square_part (x ^ n) = normalize (x ^ (n div 2))"
by (subst square_part_even_power' [symmetric]) auto
lemma square_part_odd_power': "square_part (x ^ (Suc (2 * n))) = normalize (x ^ n * square_part x)"
proof (cases "x = 0")
case False
have "normalize (square_part (x ^ (Suc (2 * n)))) = normalize (square_part x * x ^ n)"
proof (rule multiplicity_eq_imp_eq, goal_cases)
case (3 p)
hence "multiplicity p (square_part (x ^ Suc (2 * n))) =
(2 * (n * multiplicity p x) + multiplicity p x) div 2"
by (subst prime_multiplicity_square_part)
(auto simp: False prime_elem_multiplicity_power_distrib algebra_simps simp del: power_Suc)
also from 3 False have "\<dots> = multiplicity p (square_part x * x ^ n)"
by (subst div_mult_self4) (auto simp: prime_multiplicity_square_part
prime_elem_multiplicity_mult_distrib prime_elem_multiplicity_power_distrib)
finally show ?case .
qed (insert False, auto)
thus ?thesis by (simp add: mult_ac)
qed auto
lemma square_part_odd_power:
"odd n \<Longrightarrow> square_part (x ^ n) = normalize (x ^ (n div 2) * square_part x)"
by (subst square_part_odd_power' [symmetric]) auto
end
|
{"author": "seL4", "repo": "isabelle", "sha": "e1ab32a3bb41728cd19541063283e37919978a4c", "save_path": "github-repos/isabelle/seL4-isabelle", "path": "github-repos/isabelle/seL4-isabelle/isabelle-e1ab32a3bb41728cd19541063283e37919978a4c/src/HOL/Computational_Algebra/Squarefree.thy"}
|
# adapted from https://github.com/yangheng95/LC-ABSA/blob/c945a94e0f86116c5578245aa9ad36c46c7b9c4a/models/lc_apc/lcf_bert.py
# according to
import copy
from argparse import Namespace
from typing import Dict
import numpy as np
import torch
import torch.nn as nn
from transformers.modeling_bert import BertPooler, BertSelfAttention
from NewsSentiment.consts import *
from NewsSentiment.dataset import FXDataset
from NewsSentiment.layers.attention import FXBertSelfAttention
from NewsSentiment.models.FXBaseModel import FXBaseModel
class GlobalContext(nn.Module):
def __init__(self, global_context_seqs_per_doc):
super(GlobalContext, self).__init__()
self.global_context_seqs_per_doc = global_context_seqs_per_doc
def forward(self, inputs):
pass
class SelfAttention(nn.Module):
def __init__(self, config, opt):
super(SelfAttention, self).__init__()
self.opt = opt
self.config = config
self.SA = FXBertSelfAttention(
hidden_size=config.hidden_size,
num_attention_heads=config.num_attention_heads,
attention_probs_dropout_prob=0.1,
)
self.tanh = torch.nn.Tanh()
def forward(self, inputs):
zero_tensor = torch.tensor(
np.zeros((inputs.size(0), 1, 1, self.opt.max_seq_len), dtype=np.float32),
dtype=torch.float32,
).to(self.opt.device)
SA_out = self.SA(inputs, zero_tensor)
return self.tanh(SA_out[0])
class LCF_BERT2Dual(FXBaseModel):
"""
While lcf.py:LCF_BERT is the implementation as implemented in PyTorch-ABSA repository, this implementation here
(LCF_BERT2Dual) is following the implementation as in the author's repository, which according to
https://github.com/yangheng95/LC-ABSA/issues/10#issuecomment-670301603 has seen some more improvements compared to
the version from PyTorch-ABSA
"""
@staticmethod
def get_language_models():
return (get_default_lm(),)
@staticmethod
def get_input_field_ids():
return [
(get_default_lm(), FIELD_TEXT_THEN_TARGET_IDS_WITH_SPECIAL_TOKENS),
(
get_default_lm(),
FIELD_TEXT_THEN_TARGET_IDS_WITH_SPECIAL_TOKENS_SEGMENT_IDS,
),
(get_default_lm(), FIELD_TEXT_IDS_WITH_SPECIAL_TOKENS),
(get_default_lm(), FIELD_TARGET_IDS_WITH_SPECIAL_TOKENS),
(get_default_lm(), FIELD_TEXT_IDS_WITH_SPECIAL_TOKENS_TARGET_MASK),
]
def __init__(self, transformer_models: Dict, opt: Namespace):
super(LCF_BERT2Dual, self).__init__()
bert = transformer_models[get_default_lm()]
self.bert4global = bert
# note that we use a second bert here, which should slightly improve performance
# cf. https://github.com/yangheng95/LC-ABSA/#tips
# self.bert4local = copy.deepcopy(bert)
# we can't do this on scc because even for batch size = only 16 we run out of
# memory. because of that, we use the same bert for both local and global
# (just as in lcf.py)
self.bert4local = bert
self.opt = opt
self.dropout = nn.Dropout(self.opt.dropout)
self.bert_SA = SelfAttention(bert.config, self.opt)
self.linear2 = nn.Linear(bert.config.hidden_size * 2, bert.config.hidden_size)
# self.linear3 = nn.Linear(bert.config.hidden_size * 3, bert.config.hidden_size)
self.bert_pooler = BertPooler(bert.config)
self.dense = nn.Linear(bert.config.hidden_size, self.opt.polarities_dim)
def feature_dynamic_mask(self, text_local_indices, aspect_indices):
texts = text_local_indices.cpu().numpy()
asps = aspect_indices.cpu().numpy()
mask_len = self.opt.SRD
masked_text_raw_indices = np.ones(
(
text_local_indices.size(0),
self.opt.max_seq_len,
self.bert4local.config.hidden_size,
),
dtype=np.float32,
)
for text_i, asp_i in zip(range(len(texts)), range(len(asps))):
asp_len = np.count_nonzero(asps[asp_i]) - 2
try:
asp_begin = np.argwhere(texts[text_i] == asps[asp_i][1])[0][0]
except:
continue
if asp_begin >= mask_len:
mask_begin = asp_begin - mask_len
else:
mask_begin = 0
for i in range(mask_begin):
masked_text_raw_indices[text_i][i] = np.zeros(
(self.bert4local.config.hidden_size), dtype=np.float
)
for j in range(asp_begin + asp_len + mask_len, self.opt.max_seq_len):
masked_text_raw_indices[text_i][j] = np.zeros(
(self.bert4local.config.hidden_size), dtype=np.float
)
masked_text_raw_indices = torch.from_numpy(masked_text_raw_indices)
return masked_text_raw_indices.to(self.opt.device)
def feature_dynamic_weighted(self, text_local_indices, aspect_indices):
texts = text_local_indices.cpu().numpy()
asps = aspect_indices.cpu().numpy()
masked_text_raw_indices = np.ones(
(
text_local_indices.size(0),
self.opt.max_seq_len,
self.bert4local.config.hidden_size,
),
dtype=np.float32,
)
for text_i, asp_i in zip(range(len(texts)), range(len(asps))):
asp_len = np.count_nonzero(asps[asp_i]) - 2
try:
asp_begin = np.argwhere(texts[text_i] == asps[asp_i][1])[0][0]
asp_avg_index = (asp_begin * 2 + asp_len) / 2
except:
continue
distances = np.zeros(np.count_nonzero(texts[text_i]), dtype=np.float32)
for i in range(1, np.count_nonzero(texts[text_i]) - 1):
if abs(i - asp_avg_index) + asp_len / 2 > self.opt.SRD:
distances[i] = 1 - (
abs(i - asp_avg_index) + asp_len / 2 - self.opt.SRD
) / np.count_nonzero(texts[text_i])
else:
distances[i] = 1
for i in range(len(distances)):
masked_text_raw_indices[text_i][i] = (
masked_text_raw_indices[text_i][i] * distances[i]
)
masked_text_raw_indices = torch.from_numpy(masked_text_raw_indices)
return masked_text_raw_indices.to(self.opt.device)
def forward(self, inputs):
text_target_bert_indices = FXDataset.get_input_by_params(
inputs, get_default_lm(), FIELD_TEXT_THEN_TARGET_IDS_WITH_SPECIAL_TOKENS,
)
text_target_bert_segments_ids = FXDataset.get_input_by_params(
inputs,
get_default_lm(),
FIELD_TEXT_THEN_TARGET_IDS_WITH_SPECIAL_TOKENS_SEGMENT_IDS,
)
text_local_indices = FXDataset.get_input_by_params(
inputs, get_default_lm(), FIELD_TEXT_IDS_WITH_SPECIAL_TOKENS
)
aspect_indices = FXDataset.get_input_by_params(
inputs, get_default_lm(), FIELD_TARGET_IDS_WITH_SPECIAL_TOKENS
)
# bert
global_context_features = self.invoke_language_model(
self.bert4global,
input_ids=text_target_bert_indices,
token_type_ids=text_target_bert_segments_ids,
)
local_context_features = self.invoke_language_model(
self.bert4local, text_local_indices
)
# mask
if self.opt.local_context_focus == "cdm":
lcf_matrix = self.feature_dynamic_mask(text_local_indices, aspect_indices)
elif self.opt.local_context_focus == "cdw":
lcf_matrix = self.feature_dynamic_weighted(
text_local_indices, aspect_indices
)
# LCF layer
lcf_features = torch.mul(local_context_features, lcf_matrix)
lcf_features = self.bert_SA(lcf_features)
cat_features = torch.cat((lcf_features, global_context_features), dim=-1)
cat_features = self.linear2(cat_features)
cat_features = self.dropout(cat_features)
pooled_out = self.bert_pooler(cat_features)
dense_out = self.dense(pooled_out)
return dense_out
|
{"hexsha": "f79d7e2ced217adb6732defe255e2c5276aae52f", "size": 8304, "ext": "py", "lang": "Python", "max_stars_repo_path": "NewsSentiment/models/singletarget/lcf2.py", "max_stars_repo_name": "fhamborg/NewsMTSC", "max_stars_repo_head_hexsha": "5a8f88d7fbb921090e984cc378b02d75524c1025", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 46, "max_stars_repo_stars_event_min_datetime": "2021-04-09T11:53:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T14:41:13.000Z", "max_issues_repo_path": "NewsSentiment/models/singletarget/lcf2.py", "max_issues_repo_name": "fhamborg/NewsMTSC", "max_issues_repo_head_hexsha": "5a8f88d7fbb921090e984cc378b02d75524c1025", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2021-05-25T12:44:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-12T08:51:58.000Z", "max_forks_repo_path": "NewsSentiment/models/singletarget/lcf2.py", "max_forks_repo_name": "fhamborg/NewsMTSC", "max_forks_repo_head_hexsha": "5a8f88d7fbb921090e984cc378b02d75524c1025", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2021-05-23T11:40:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T03:13:26.000Z", "avg_line_length": 40.3106796117, "max_line_length": 124, "alphanum_fraction": 0.6368015414, "include": true, "reason": "import numpy", "num_tokens": 1893}
|
c
c-----------------------------------------------------------------------
c subroutine: r8tx
c radix 8 iteration subroutine
c-----------------------------------------------------------------------
c
subroutine r8tx(nxtlt, nthpo, lengt, cr0, cr1, cr2, cr3, cr4,
* cr5, cr6, cr7, ci0, ci1, ci2, ci3, ci4, ci5, ci6, ci7)
dimension cr0(2), cr1(2), cr2(2), cr3(2), cr4(2), cr5(2), cr6(2),
* cr7(2), ci1(2), ci2(2), ci3(2), ci4(2), ci5(2), ci6(2),
* ci7(2), ci0(2)
common /con2/ pi2, p7
c
scale = pi2/float(lengt)
do 30 j=1,nxtlt
arg = float(j-1)*scale
c1 = cos(arg)
s1 = sin(arg)
c2 = c1**2 - s1**2
s2 = c1*s1 + c1*s1
c3 = c1*c2 - s1*s2
s3 = c2*s1 + s2*c1
c4 = c2**2 - s2**2
s4 = c2*s2 + c2*s2
c5 = c2*c3 - s2*s3
s5 = c3*s2 + s3*c2
c6 = c3**2 - s3**2
s6 = c3*s3 + c3*s3
c7 = c3*c4 - s3*s4
s7 = c4*s3 + s4*c3
do 20 k=j,nthpo,lengt
ar0 = cr0(k) + cr4(k)
ar1 = cr1(k) + cr5(k)
ar2 = cr2(k) + cr6(k)
ar3 = cr3(k) + cr7(k)
ar4 = cr0(k) - cr4(k)
ar5 = cr1(k) - cr5(k)
ar6 = cr2(k) - cr6(k)
ar7 = cr3(k) - cr7(k)
ai0 = ci0(k) + ci4(k)
ai1 = ci1(k) + ci5(k)
ai2 = ci2(k) + ci6(k)
ai3 = ci3(k) + ci7(k)
ai4 = ci0(k) - ci4(k)
ai5 = ci1(k) - ci5(k)
ai6 = ci2(k) - ci6(k)
ai7 = ci3(k) - ci7(k)
br0 = ar0 + ar2
br1 = ar1 + ar3
br2 = ar0 - ar2
br3 = ar1 - ar3
br4 = ar4 - ai6
br5 = ar5 - ai7
br6 = ar4 + ai6
br7 = ar5 + ai7
bi0 = ai0 + ai2
bi1 = ai1 + ai3
bi2 = ai0 - ai2
bi3 = ai1 - ai3
bi4 = ai4 + ar6
bi5 = ai5 + ar7
bi6 = ai4 - ar6
bi7 = ai5 - ar7
cr0(k) = br0 + br1
ci0(k) = bi0 + bi1
if (j.le.1) go to 10
cr1(k) = c4*(br0-br1) - s4*(bi0-bi1)
ci1(k) = c4*(bi0-bi1) + s4*(br0-br1)
cr2(k) = c2*(br2-bi3) - s2*(bi2+br3)
ci2(k) = c2*(bi2+br3) + s2*(br2-bi3)
cr3(k) = c6*(br2+bi3) - s6*(bi2-br3)
ci3(k) = c6*(bi2-br3) + s6*(br2+bi3)
tr = p7*(br5-bi5)
ti = p7*(br5+bi5)
cr4(k) = c1*(br4+tr) - s1*(bi4+ti)
ci4(k) = c1*(bi4+ti) + s1*(br4+tr)
cr5(k) = c5*(br4-tr) - s5*(bi4-ti)
ci5(k) = c5*(bi4-ti) + s5*(br4-tr)
tr = -p7*(br7+bi7)
ti = p7*(br7-bi7)
cr6(k) = c3*(br6+tr) - s3*(bi6+ti)
ci6(k) = c3*(bi6+ti) + s3*(br6+tr)
cr7(k) = c7*(br6-tr) - s7*(bi6-ti)
ci7(k) = c7*(bi6-ti) + s7*(br6-tr)
go to 20
10 cr1(k) = br0 - br1
ci1(k) = bi0 - bi1
cr2(k) = br2 - bi3
ci2(k) = bi2 + br3
cr3(k) = br2 + bi3
ci3(k) = bi2 - br3
tr = p7*(br5-bi5)
ti = p7*(br5+bi5)
cr4(k) = br4 + tr
ci4(k) = bi4 + ti
cr5(k) = br4 - tr
ci5(k) = bi4 - ti
tr = -p7*(br7+bi7)
ti = p7*(br7-bi7)
cr6(k) = br6 + tr
ci6(k) = bi6 + ti
cr7(k) = br6 - tr
ci7(k) = bi6 - ti
20 continue
30 continue
return
end
|
{"hexsha": "9cc4f591faa587671eb683fe461fa74c1ca8f09d", "size": 3366, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "iraf.v2161/math/ieee/chap1/r8tx.f", "max_stars_repo_name": "ysBach/irafdocgen", "max_stars_repo_head_hexsha": "b11fcd75cc44b01ae69c9c399e650ec100167a54", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-12-01T15:19:09.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-02T16:48:42.000Z", "max_issues_repo_path": "iraf.v2161/math/ieee/chap1/r8tx.f", "max_issues_repo_name": "ysBach/irafdocgen", "max_issues_repo_head_hexsha": "b11fcd75cc44b01ae69c9c399e650ec100167a54", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-11-30T13:48:50.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-02T19:40:25.000Z", "max_forks_repo_path": "iraf.v2161/math/ieee/chap1/r8tx.f", "max_forks_repo_name": "ysBach/irafdocgen", "max_forks_repo_head_hexsha": "b11fcd75cc44b01ae69c9c399e650ec100167a54", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1666666667, "max_line_length": 72, "alphanum_fraction": 0.376114082, "num_tokens": 1436}
|
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#ifndef _UNOTOOLS_LOCALEDATAWRAPPER_HXX
#define _UNOTOOLS_LOCALEDATAWRAPPER_HXX
#include <tools/string.hxx>
#include <com/sun/star/i18n/XLocaleData2.hpp>
#include <com/sun/star/i18n/LocaleItem.hpp>
#include <com/sun/star/i18n/reservedWords.hpp>
#include <unotools/readwritemutexguard.hxx>
#include "unotools/unotoolsdllapi.h"
#ifndef BOOST_SHARED_PTR_HPP_INCLUDED
#include <boost/shared_ptr.hpp>
#endif
namespace com { namespace sun { namespace star {
namespace lang {
class XMultiServiceFactory;
}
}}}
class Date;
class Time;
class CalendarWrapper;
enum DateFormat {
MDY,
DMY,
YMD
};
enum MeasurementSystem {
MEASURE_METRIC,
MEASURE_US
};
class UNOTOOLS_DLLPUBLIC LocaleDataWrapper
{
static sal_uInt8 nLocaleDataChecking; // 0:=dontknow, 1:=yes, 2:=no
::com::sun::star::uno::Reference< ::com::sun::star::lang::XMultiServiceFactory > xSMgr;
::com::sun::star::uno::Reference< ::com::sun::star::i18n::XLocaleData2 > xLD;
::com::sun::star::lang::Locale aLocale;
::boost::shared_ptr< ::com::sun::star::i18n::Calendar > xDefaultCalendar;
::com::sun::star::i18n::LocaleDataItem aLocaleDataItem;
::com::sun::star::uno::Sequence< ::rtl::OUString > aReservedWordSeq;
::com::sun::star::uno::Sequence< sal_Int32 > aGrouping;
// cached items
String aLocaleItem[::com::sun::star::i18n::LocaleItem::COUNT];
String aReservedWord[::com::sun::star::i18n::reservedWords::COUNT];
String aCurrSymbol;
String aCurrBankSymbol;
int nDateFormat;
int nLongDateFormat;
sal_uInt16 nCurrPositiveFormat;
sal_uInt16 nCurrNegativeFormat;
sal_uInt16 nCurrDigits;
sal_Bool bLocaleDataItemValid;
sal_Bool bReservedWordValid;
mutable ::utl::ReadWriteMutex aMutex;
// dummies, to be implemented or provided by XML locale data
sal_Unicode cCurrZeroChar;
// not implemented, prevent usage
LocaleDataWrapper( const LocaleDataWrapper& );
LocaleDataWrapper& operator=( const LocaleDataWrapper& );
// whenever Locale changes
void invalidateData();
void getOneLocaleItemImpl( sal_Int16 nItem );
const String& getOneLocaleItem( sal_Int16 nItem ) const;
void getOneReservedWordImpl( sal_Int16 nWord );
const String& getOneReservedWord( sal_Int16 nWord ) const;
void getCurrSymbolsImpl();
void getCurrFormatsImpl();
void scanCurrFormatImpl( const String& rCode,
xub_StrLen nStart, xub_StrLen& nSign,
xub_StrLen& nPar, xub_StrLen& nNum,
xub_StrLen& nBlank, xub_StrLen& nSym );
void getDateFormatsImpl();
DateFormat scanDateFormatImpl( const String& rCode );
void getDefaultCalendarImpl();
sal_Unicode* ImplAddFormatNum( sal_Unicode* pBuf,
sal_Int64 nNumber, sal_uInt16 nDecimals,
sal_Bool bUseThousandSep, sal_Bool bTrailingZeros ) const;
void getDigitGroupingImpl();
public:
LocaleDataWrapper(
const ::com::sun::star::uno::Reference< ::com::sun::star::lang::XMultiServiceFactory > & xSF,
const ::com::sun::star::lang::Locale& rLocale
);
~LocaleDataWrapper();
/** Get the service factory, meant to be able to create a CalendarWrapper
from a LocaleDataWrapper. Note that the service factory may be
non-existent if this LocaleDataWrapper was created without one and
lives "on the grassland". The CalendarWrapper ctor can handle that
though. */
const ::com::sun::star::uno::Reference<
::com::sun::star::lang::XMultiServiceFactory > & getServiceFactory()
const { return xSMgr; }
/// set a new Locale to request
void setLocale( const ::com::sun::star::lang::Locale& rLocale );
/// get current requested Locale
const ::com::sun::star::lang::Locale& getLocale() const;
/// get current loaded Locale, which might differ from the requested Locale
::com::sun::star::lang::Locale getLoadedLocale() const;
// Wrapper implementations of service LocaleData
::com::sun::star::i18n::LanguageCountryInfo getLanguageCountryInfo() const;
::com::sun::star::i18n::LocaleDataItem getLocaleItem() const;
::com::sun::star::uno::Sequence< ::com::sun::star::i18n::Calendar > getAllCalendars() const;
/// NOTE: this wraps XLocaleData2::getAllCurrencies2() in fact.
::com::sun::star::uno::Sequence< ::com::sun::star::i18n::Currency2 > getAllCurrencies() const;
::com::sun::star::uno::Sequence< ::com::sun::star::i18n::FormatElement > getAllFormats() const;
::com::sun::star::uno::Sequence< ::com::sun::star::i18n::Implementation > getCollatorImplementations() const;
::com::sun::star::uno::Sequence< ::rtl::OUString > getTransliterations() const;
::com::sun::star::i18n::ForbiddenCharacters getForbiddenCharacters() const;
::com::sun::star::uno::Sequence< ::rtl::OUString > getReservedWord() const;
::com::sun::star::uno::Sequence< ::com::sun::star::lang::Locale > getAllInstalledLocaleNames() const;
/// same as the wrapper implementation but static
static ::com::sun::star::uno::Sequence< ::com::sun::star::lang::Locale > getInstalledLocaleNames();
/** Get LanguageTypes for all installed locales which are unambiguous
convertible back and forth between locale ISO strings and MS-LCID
LanguageType. Upon the first time the function is called when
locale data checking is enabled, messages are shown for locales not
matching, excluding already known problems.
(e.g. used in number formatter dialog init)
*/
static ::com::sun::star::uno::Sequence< sal_uInt16 > getInstalledLanguageTypes();
/// maps the LocaleData string to the International enum
MeasurementSystem mapMeasurementStringToEnum( const String& rMS ) const;
/// Convenience method to obtain the default calendar.
const ::boost::shared_ptr< ::com::sun::star::i18n::Calendar > getDefaultCalendar() const;
/// Convenience method to obtain the day names of the default calendar.
const ::com::sun::star::uno::Sequence< ::com::sun::star::i18n::CalendarItem > getDefaultCalendarDays() const;
/// Convenience method to obtain the month names of the default calendar.
const ::com::sun::star::uno::Sequence< ::com::sun::star::i18n::CalendarItem > getDefaultCalendarMonths() const;
/** Obtain digit grouping. The usually known grouping by thousands (#,###)
is actually only one of possible groupings. Another one, for example,
used in India is group by 3 and then by 2 indefinitely (#,##,###). The
integer sequence returned here specifies grouping from right to left
(!), with a 0 entry designating the end of rules and the previous value
to be repeated indefinitely. Hence the sequence {3,0} specifies the
usual grouping by thousands, whereas the sequence {3,2,0} specifies
Indian grouping. The sal_Int32* getConstArray() can be passed directly
to the ::rtl::math::doubleToString() methods as argument for the
pGroups parameter. */
const ::com::sun::star::uno::Sequence< sal_Int32 > getDigitGrouping() const;
// Functionality of class International methods, LocaleItem
inline const String& getDateSep() const
{ return getOneLocaleItem( ::com::sun::star::i18n::LocaleItem::DATE_SEPARATOR ); }
inline const String& getNumThousandSep() const
{ return getOneLocaleItem( ::com::sun::star::i18n::LocaleItem::THOUSAND_SEPARATOR ); }
inline const String& getNumDecimalSep() const
{ return getOneLocaleItem( ::com::sun::star::i18n::LocaleItem::DECIMAL_SEPARATOR ); }
inline const String& getTimeSep() const
{ return getOneLocaleItem( ::com::sun::star::i18n::LocaleItem::TIME_SEPARATOR ); }
inline const String& getTime100SecSep() const
{ return getOneLocaleItem( ::com::sun::star::i18n::LocaleItem::TIME_100SEC_SEPARATOR ); }
inline const String& getListSep() const
{ return getOneLocaleItem( ::com::sun::star::i18n::LocaleItem::LIST_SEPARATOR ); }
inline const String& getQuotationMarkStart() const
{ return getOneLocaleItem( ::com::sun::star::i18n::LocaleItem::SINGLE_QUOTATION_START ); }
inline const String& getQuotationMarkEnd() const
{ return getOneLocaleItem( ::com::sun::star::i18n::LocaleItem::SINGLE_QUOTATION_END ); }
inline const String& getDoubleQuotationMarkStart() const
{ return getOneLocaleItem( ::com::sun::star::i18n::LocaleItem::DOUBLE_QUOTATION_START ); }
inline const String& getDoubleQuotationMarkEnd() const
{ return getOneLocaleItem( ::com::sun::star::i18n::LocaleItem::DOUBLE_QUOTATION_END ); }
inline const String& getMeasurementSystem() const
{ return getOneLocaleItem( ::com::sun::star::i18n::LocaleItem::MEASUREMENT_SYSTEM ); }
inline MeasurementSystem getMeasurementSystemEnum() const
{ return mapMeasurementStringToEnum( getOneLocaleItem( ::com::sun::star::i18n::LocaleItem::MEASUREMENT_SYSTEM ) ); }
inline const String& getTimeAM() const
{ return getOneLocaleItem( ::com::sun::star::i18n::LocaleItem::TIME_AM ); }
inline const String& getTimePM() const
{ return getOneLocaleItem( ::com::sun::star::i18n::LocaleItem::TIME_PM ); }
inline const String& getLongDateDayOfWeekSep() const
{ return getOneLocaleItem( ::com::sun::star::i18n::LocaleItem::LONG_DATE_DAY_OF_WEEK_SEPARATOR ); }
inline const String& getLongDateDaySep() const
{ return getOneLocaleItem( ::com::sun::star::i18n::LocaleItem::LONG_DATE_DAY_SEPARATOR ); }
inline const String& getLongDateMonthSep() const
{ return getOneLocaleItem( ::com::sun::star::i18n::LocaleItem::LONG_DATE_MONTH_SEPARATOR ); }
inline const String& getLongDateYearSep() const
{ return getOneLocaleItem( ::com::sun::star::i18n::LocaleItem::LONG_DATE_YEAR_SEPARATOR ); }
// currency
const String& getCurrSymbol() const;
const String& getCurrBankSymbol() const;
sal_uInt16 getCurrPositiveFormat() const;
sal_uInt16 getCurrNegativeFormat() const;
sal_uInt16 getCurrDigits() const;
// simple date and time formatting
DateFormat getDateFormat() const;
DateFormat getLongDateFormat() const;
/// only numerical values of Gregorian calendar
String getDate( const Date& rDate ) const;
String getTime( const Time& rTime, sal_Bool bSec = sal_True,
sal_Bool b100Sec = sal_False ) const;
String getDuration( const Time& rTime,
sal_Bool bSec = sal_True, sal_Bool b100Sec = sal_False ) const;
/** The CalendarWrapper already <b>MUST</b>
have loaded a calendar.
@param nDisplayDayOfWeek
0 := abbreviated name
1 := full name
@param bDayOfMonthWithLeadingZero
<FALSE/> := without leading zero
<TRUE/> := with leading zero if <10
@param nDisplayMonth
0 := abbreviated name
1 := full name
@param bTwoDigitYear
<FALSE/> := full year
<TRUE/> := year % 100
*/
String getLongDate( const Date& rDate,
CalendarWrapper& rCal,
sal_Int16 nDisplayDayOfWeek = 1,
sal_Bool bDayOfMonthWithLeadingZero = sal_False,
sal_Int16 nDisplayMonth = 1,
sal_Bool bTwoDigitYear = sal_False
) const;
/** Simple number formatting
@param nNumber
value * 10**nDecimals
@param bTrailingZeros
</sal_True> := always display trailing zeros in
decimal places, even if integer value.
</sal_False> := trailing zeros are only displayed
if the value is not an integer value.
*/
String getNum( sal_Int64 nNumber, sal_uInt16 nDecimals,
sal_Bool bUseThousandSep = sal_True,
sal_Bool bTrailingZeros = sal_True ) const;
/// "Secure" currency formatted string.
String getCurr( sal_Int64 nNumber, sal_uInt16 nDecimals,
const String& rCurrencySymbol,
sal_Bool bUseThousandSep = sal_True ) const;
/** Default currency formatted string, use with
care as default currency may change in any
locale, for example, DEM -> EUR */
String getCurr( sal_Int64 nNumber, sal_uInt16 nDecimals,
sal_Bool bUseThousandSep = sal_True ) const
{ return getCurr( nNumber, nDecimals,
getCurrSymbol(), bUseThousandSep ); }
// dummy returns, to be implemented
inline sal_Unicode getCurrZeroChar() const
{ return cCurrZeroChar; }
inline sal_Bool isNumLeadingZero() const
{ return sal_True; }
/// standard decimal places
inline sal_uInt16 getNumDigits() const
{ return 2; }
inline sal_Bool isNumTrailingZeros() const
{ return sal_True; }
// reserved words
inline const String& getTrueWord() const
{ return getOneReservedWord( ::com::sun::star::i18n::reservedWords::TRUE_WORD ); }
inline const String& getFalseWord() const
{ return getOneReservedWord( ::com::sun::star::i18n::reservedWords::FALSE_WORD ); }
/// return a quarter string matching nQuarter (0..3) => "1st quarter" .. "4th quarter"
inline const String& getQuarterWord( sal_Int16 nQuarter ) const
{ return getOneReservedWord( ::com::sun::star::i18n::reservedWords::QUARTER1_WORD + nQuarter ); }
inline const String& getAboveWord() const
{ return getOneReservedWord( ::com::sun::star::i18n::reservedWords::ABOVE_WORD ); }
inline const String& getBelowWord() const
{ return getOneReservedWord( ::com::sun::star::i18n::reservedWords::BELOW_WORD ); }
/// return a quarter abbreviation string matching nQuarter (0..3) => "Q1" .. "Q2"
inline const String& getQuarterAbbreviation( sal_Int16 nQuarter ) const
{ return getOneReservedWord( ::com::sun::star::i18n::reservedWords::QUARTER1_ABBREVIATION + nQuarter ); }
/** Return whether locale data checks are enabled.
Checks are enabled if the environment variable
OOO_ENABLE_LOCALE_DATA_CHECKS is set to 'Y' or 'Yes' (or any other
string starting with 'Y') or '1'.
Also used in conjunction with the number formatter. */
static inline bool areChecksEnabled()
{
if (nLocaleDataChecking == 0)
evaluateLocaleDataChecking();
return nLocaleDataChecking == 1;
}
/** Append locale info to string, used with locale data checking.
A string similar to "de_DE requested\n en_US loaded" is appended. */
String& appendLocaleInfo( String& rDebugMsg ) const;
/** Ouput a message during locale data checking. The (UTF-8) string is
written to stderr and in a non-product build or if DBG_UTIL is enabled
also raised as an assertion message box. */
static void outputCheckMessage( const String& rMsg );
static void outputCheckMessage( const char* pStr);
private:
static void evaluateLocaleDataChecking();
};
#endif // _UNOTOOLS_LOCALEDATAWRAPPER_HXX
|
{"hexsha": "ff38aabf8fb292176a2e15ecf3ff226223fd1537", "size": 17970, "ext": "hxx", "lang": "C++", "max_stars_repo_path": "main/unotools/inc/unotools/localedatawrapper.hxx", "max_stars_repo_name": "Grosskopf/openoffice", "max_stars_repo_head_hexsha": "93df6e8a695d5e3eac16f3ad5e9ade1b963ab8d7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 679.0, "max_stars_repo_stars_event_min_datetime": "2015-01-06T06:34:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T01:06:03.000Z", "max_issues_repo_path": "main/unotools/inc/unotools/localedatawrapper.hxx", "max_issues_repo_name": "Grosskopf/openoffice", "max_issues_repo_head_hexsha": "93df6e8a695d5e3eac16f3ad5e9ade1b963ab8d7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 102.0, "max_issues_repo_issues_event_min_datetime": "2017-11-07T08:51:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-17T12:13:49.000Z", "max_forks_repo_path": "main/unotools/inc/unotools/localedatawrapper.hxx", "max_forks_repo_name": "Grosskopf/openoffice", "max_forks_repo_head_hexsha": "93df6e8a695d5e3eac16f3ad5e9ade1b963ab8d7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 331.0, "max_forks_repo_forks_event_min_datetime": "2015-01-06T11:40:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T04:07:51.000Z", "avg_line_length": 49.9166666667, "max_line_length": 141, "alphanum_fraction": 0.6070116861, "num_tokens": 4155}
|
include(joinpath("gammaReg", "chosenVariables_inverse_test.jl"))
include(joinpath("gammaReg", "chosenVariables_log_test.jl"))
include(joinpath("gammaReg", "research_inverse_test.jl"))
include(joinpath("gammaReg", "research_inverse_test.jl"))
|
{"hexsha": "34f1547617077eabe04ba581b89be1a6617c35c6", "size": 243, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/dataAnalysis/Duration/gammaReg_test.jl", "max_stars_repo_name": "AlexLsn/CSO", "max_stars_repo_head_hexsha": "b1e2eb949003d9a2ea865581da554ea4ca7c8cf7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-24T17:51:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-30T01:22:10.000Z", "max_issues_repo_path": "test/dataAnalysis/Duration/gammaReg_test.jl", "max_issues_repo_name": "AlexLsn/CSO", "max_issues_repo_head_hexsha": "b1e2eb949003d9a2ea865581da554ea4ca7c8cf7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/dataAnalysis/Duration/gammaReg_test.jl", "max_forks_repo_name": "AlexLsn/CSO", "max_forks_repo_head_hexsha": "b1e2eb949003d9a2ea865581da554ea4ca7c8cf7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-11T19:55:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-11T19:55:37.000Z", "avg_line_length": 40.5, "max_line_length": 64, "alphanum_fraction": 0.7983539095, "num_tokens": 58}
|
"""
sciwebvis.material
------------------
:copyright: 2015, Juan David Adarve. See AUTHORS for more details
:license: 3-clause BSD, see LICENSE for more details
"""
import numpy as np
from jinja2 import Environment, PackageLoader
from .JSRenderable import JSRenderable
from .color import Color
# from .util import generateID
__all__ = ['Material', 'PointMaterial',
'WireframeMaterial', 'TextureMaterial', 'ShaderMaterial']
# template Environment object
_templateEnv = Environment(loader=PackageLoader('sciwebvis', 'templates'))
class Material(JSRenderable):
def __init__(self):
self.__ID = None
def render(self):
pass
def addToFigure(self, fig):
pass
@property
def ID(self):
return self.__ID
@ID.setter
def ID(self, value):
self.__ID = value
class PointMaterial(Material):
"""
Material used to render points.
"""
def __init__(self, fig=None, **kwargs):
"""Creates a new point material.
Parameters
----------
fig : Figure, optional.
Figure object to which this material is attached. Defaults to None.
Kwargs
------
pointSize : int, optional.
Point size
color : Color, optional.
Point color.
"""
super(PointMaterial, self).__init__()
self.__properties = dict()
self.__properties['pointSize'] = kwargs.pop('pointSize', 5)
self.__properties['color'] = kwargs.pop('color', Color())
if fig != None:
fig.addMaterial(self)
def addToFigure(self, fig):
# nothing to do for this material
pass
def render(self):
materialTemplate = _templateEnv.get_template('js/pointMaterial.js')
return materialTemplate.render(pointSize = self.__properties['pointSize'],
color = self.__properties['color'].render())
class WireframeMaterial(Material):
def __init__(self, fig=None, **kwargs):
"""Creates a new wireframe material
Parameters
----------
fig : Figure, optional.
Figure object to which this material is attached. Defaults to None.
Kwargs
------
color : Color, optional.
lineWidth : int, optional.
transparent : bool, optional.
"""
super(WireframeMaterial, self).__init__()
self.__properties = dict()
self.__properties['color'] = kwargs.pop('color', Color())
self.__properties['lineWidth'] = kwargs.pop('lineWidth', 1)
self.__properties['transparent'] = str(kwargs.pop('transparent', True)).lower()
if fig != None:
fig.addMaterial(self)
def addToFigure(self, fig):
# nothing to do
pass
def render(self):
materialTemplate = _templateEnv.get_template('js/wireframeMaterial.js')
return materialTemplate.render(lineWidth=self.__properties['lineWidth'],
color=self.__properties['color'].render(),
transparent=self.__properties['transparent'])
class TextureMaterial(Material):
def __init__(self, fig=None, **kwargs):
"""Creates a new texture material.
Parameters
----------
fig : Figure, optional.
Figure object to which this material is attached. Defaults to None.
Kwargs
------
texture : ndarray.
Image texture to use by the material
Raises
------
KeyError: if texture kwarg is not present.
"""
super(TextureMaterial, self).__init__()
self.__properties = dict()
if not 'texture' in kwargs.keys():
raise KeyError('texture argument not set')
try:
tex = kwargs.pop('texture')
if type(tex) != np.ndarray:
raise TypeError('texture parameter should be numpy ndarray')
self.__properties['texture_data'] = tex
except KeyError as ke:
raise KeyError('texture argument not set')
# add material to figure
if fig != None:
self.addToFigure(fig)
def addToFigure(self, fig):
# add texture data to figure
self.__properties['texture'] = fig.addData(self.__properties['texture_data'])
def render(self):
materialTemplate = _templateEnv.get_template('js/textureMaterial.js')
return materialTemplate.render(texture=self.__properties['texture'])
class ShaderMaterial(Material):
def __init__(self, fig=None, **kwargs):
"""Creates a new shader material.
Parameters
----------
fig : Figure, optional.
Figure object to which this material is attached. Defaults to None.
Kwargs
------
vertex : string
Vertex shader code.
fragment : string
Fragment shader code.
"""
super(ShaderMaterial, self).__init__()
self.__vertex = kwargs.pop('vertex')
self.__fragment = kwargs.pop('fragment')
def render(self):
return """
SCIWIS.ShaderMaterial({
vertex: 'uniform vec4 color;uniform float pointSize;attribute vec4 vcolor;/* output color to fragment shader */varying vec4 vertexColor;void main() { gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0); gl_PointSize = pointSize; /* pixels */ /*vertexColor = color;*/ vertexColor = vcolor;}',
fragment: 'varying vec4 vertexColor;void main() { /* point radius */ float r = length(gl_PointCoord - vec2(0.5, 0.5)); gl_FragColor = vertexColor; if(r > 0.5) { discard; }}',
transparent: true,
uniforms: {pointSize : {type : 'f', value : 10}, color : {type : 'v4', value : new THREE.Vector4(1.0, 0.0, 0.0, 1.0)}}
})
"""
def addToFigure(self, fig):
pass
|
{"hexsha": "d310ea6709e311c84e9d23986e03e95b5ceb3595", "size": 5959, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/sciwebvis/material.py", "max_stars_repo_name": "jadarve/sciwebvis", "max_stars_repo_head_hexsha": "887268b310067809a7a7495952f76b1e70aeed64", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/sciwebvis/material.py", "max_issues_repo_name": "jadarve/sciwebvis", "max_issues_repo_head_hexsha": "887268b310067809a7a7495952f76b1e70aeed64", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/sciwebvis/material.py", "max_forks_repo_name": "jadarve/sciwebvis", "max_forks_repo_head_hexsha": "887268b310067809a7a7495952f76b1e70aeed64", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.5751072961, "max_line_length": 334, "alphanum_fraction": 0.5908709515, "include": true, "reason": "import numpy", "num_tokens": 1290}
|
import numpy as np
from collections import Counter
from sklearn.preprocessing import StandardScaler
def min_max_normalize(X):
"""Min-Max normalization function
X = (X - Xmin)/(Xmax - Xmin)"""
samples, features = X.shape
for i in range(features):
xmin = X[:, i].min()
xmax = X[:, i].max()
X[:, i] = (X[:, i] - xmin)/(xmax - xmin)
return X
def standardize(X):
"""Standardizes the model according to
the formula
x = (x - u)/ s
where u = mean and s = std of feature x"""
samples, features = X.shape
for i in range(features):
u = np.mean(X[:, i])
std = np.std(X[:, i])
X[:, i] = (X[:, i] - u)/ std
return X
def metrics(y_true, y_pred):
"""Calculates the accuracy, f1-score,
precision and recall of the model"""
tp = 0.0
tn = 0.0
fp = 0.0
fn = 0.0
for i, j in zip(y_true, y_pred):
if (i == 1 and j == 1): tp += 1
elif (i == 0 and j == 0): tn += 1
elif (i == 1 and j == 0): fn += 1
else: fp += 1
try:
precision = tp/(tp + fp)
except ZeroDivisionError:
precision = 0
try:
recall = tp/(tp + fn)
except ZeroDivisionError:
recall = 0
try:
fscore = (2*precision*recall)/(precision + recall)
except ZeroDivisionError:
fscore = 0
try:
accuracy = 100 * (tp + tn)/(tp + tn + fp + fn)
except ZeroDivisionError:
accuracy = 0
return ({
'f1-score': fscore,
'precision': precision,
'recall' : recall,
'accuracy': accuracy,
})
if __name__ == "__main__":
a = np.arange(1, 21, dtype=np.float).reshape(-1, 4)
print (a)
# print (min_max_normalize(a))
# print (standardize(a))
# print (StandardScaler().fit_transform(a))
actual = [1, 1, 0, 1, 0, 0, 1, 0, 0, 0]
predicted = [1, 0, 0, 1, 0, 0, 1, 1, 1, 0]
print (Counter(zip(actual, predicted)))
print (metrics(actual, predicted))
|
{"hexsha": "1f0fdbe4391269c67fb3e0acaf22c4268dd4f5ff", "size": 2004, "ext": "py", "lang": "Python", "max_stars_repo_path": "Assignment-2/utils.py", "max_stars_repo_name": "PranjalGupta2199/py-classifier", "max_stars_repo_head_hexsha": "e74be0349d755201b7265f125ef81fceec174f64", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Assignment-2/utils.py", "max_issues_repo_name": "PranjalGupta2199/py-classifier", "max_issues_repo_head_hexsha": "e74be0349d755201b7265f125ef81fceec174f64", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Assignment-2/utils.py", "max_forks_repo_name": "PranjalGupta2199/py-classifier", "max_forks_repo_head_hexsha": "e74be0349d755201b7265f125ef81fceec174f64", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8571428571, "max_line_length": 58, "alphanum_fraction": 0.5314371257, "include": true, "reason": "import numpy", "num_tokens": 623}
|
import tvm
from tvm import topi
import numpy as np
import torch
import torchvision
from torch.autograd import Variable
from torchvision import transforms
from tvm.tensor_graph.nn.layers import Layer
from tvm.tensor_graph.nn.functional import dense, gemm
from tvm.tensor_graph.core import compute, GraphTensor, GraphOp, GraphNode
def internel_SCRNN(inputs, B, U, V, fc_weight, state_h, state_c, alpha = 0.5):
'''
# state_h : [batch, 128=num_units]
# state_c : [batch, 64=context_units]
# inputs: [batch, 28*28=input_size]
# B : [28*28, 64]
# (inputs @ B) : [batch, 64]
# context_state : [batch, 64] -> next state_c
# concated: [batch, 64+28*28+128]
# FC layer 64+28*28+128 -> 128
# hidden_state : [batch, 128]
# U: [128, 128]
# V: [64, 128]
# new_h: [batch, 128] -> next state_h
'''
batch, input_size = inputs.shape
_, num_units = state_h.shape
__, context_units = state_c.shape
# context_state = (1 - alpha) * (inputs @ self.B) + alpha * state_c
input_at_B = gemm(inputs, B, transposeA=False, transposeB=False)
def _inner_state_c(batch, context_units, input_at_B, state_c, requires_grad=True):
return compute([batch, context_units],
lambda i, j: (1-alpha) * input_at_B[i, j] + alpha * state_c[i, j],
name="state_c",
requires_grad=requires_grad)
context_state = GraphOp([batch, context_units], [], [input_at_B, state_c],
_inner_state_c, name="context_state")
# concated = torch.cat([context_state, inputs, state_h], dim=1)
def _inner_concated(batch, cat_dim, context_state, inputs, state_h, requires_grad=True):
return compute([batch, cat_dim],
lambda i, j: tvm.te.if_then_else(j < context_units,
context_state[i, j],
tvm.te.if_then_else(j < context_units+input_size,
inputs[i, j-context_units],
state_h[i, j-context_units-input_size])),
name="concated",
requires_grad=requires_grad)
concated = GraphOp([batch, context_units+input_size+num_units], [], [context_state, inputs, state_h],
_inner_concated, name="concated")
# concated: [batch, 64+28*28+128], FC layer 64+28*28+128 -> 128
# hidden_state = torch.sigmoid(self.fc(concated))
fc_ed = gemm(concated, fc_weight, transposeA=False, transposeB=False)
def _inner_hidden_state(batch, num_units, fc_ed, requires_grad=True):
return compute([batch, num_units],
lambda i, j: tvm.te.sigmoid(fc_ed[i, j]),
name="sigmoid",
requires_grad=requires_grad)
hidden_state = GraphOp([batch, num_units], [], [fc_ed], _inner_hidden_state, name="sigmoid")
# new_h = hidden_state @ self.U + context_state @ self.V
h_at_U = gemm(hidden_state, U, transposeA=False, transposeB=False)
c_at_V = gemm(context_state, V, transposeA=False, transposeB=False)
def _inner_new_h(batch, num_units, h_at_U, c_at_V, requires_grad=True):
return compute([batch, num_units],
lambda i, j: h_at_U[i, j] + c_at_V[i, j],
name="new_h",
requires_grad=requires_grad)
new_h = GraphOp([batch, num_units], [], [h_at_U, c_at_V], _inner_new_h, name="new_h")
return new_h, context_state
class SCRNN(Layer):
def __init__(self, num_units=128,context_units=64, input_size=28*28):
super(SCRNN, self).__init__()
# B : [28*28, 64]
# U: [128, 128]
# V: [64, 128]
# FC layer 64+28*28+128 -> 128
self.B = GraphTensor([input_size, context_units], name="B", requires_grad=True)
self.U = GraphTensor([num_units, num_units], name="U", requires_grad=True)
self.V = GraphTensor([context_units, num_units], name="V", requires_grad=True)
self.fc_weight = GraphTensor([num_units+context_units+input_size, num_units], name="fc_weight", requires_grad=True)
self.weight_for_classify = GraphTensor([10, num_units], name="weight_for_classify", requires_grad=True)
def forward(self, x, old_h, old_c):
# state_h : [batch, 128=num_units]
# state_c : [batch, 64=context_units]
new_h, new_c = internel_SCRNN(x, self.B, self.U, self.V, self.fc_weight, old_h, old_c)
result = dense(new_h, self.weight_for_classify, bias=None)
return result, new_h, new_c
def get_model():
model = SCRNN()
return model
|
{"hexsha": "34f0818143f0849a44af93e6bf5eb355e969015c", "size": 4585, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/tvm/tensor_graph/testing/models/SCRNN.py", "max_stars_repo_name": "QinHan-Erin/AMOS", "max_stars_repo_head_hexsha": "634bf48edf4015e4a69a8c32d49b96bce2b5f16f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2022-03-18T07:29:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T14:54:32.000Z", "max_issues_repo_path": "python/tvm/tensor_graph/testing/models/SCRNN.py", "max_issues_repo_name": "QinHan-Erin/AMOS", "max_issues_repo_head_hexsha": "634bf48edf4015e4a69a8c32d49b96bce2b5f16f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/tvm/tensor_graph/testing/models/SCRNN.py", "max_forks_repo_name": "QinHan-Erin/AMOS", "max_forks_repo_head_hexsha": "634bf48edf4015e4a69a8c32d49b96bce2b5f16f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-03-18T08:26:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T06:02:48.000Z", "avg_line_length": 46.3131313131, "max_line_length": 123, "alphanum_fraction": 0.6285714286, "include": true, "reason": "import numpy", "num_tokens": 1238}
|
#pragma once
#include <polyfem/ProblemWithSolution.hpp>
#include <Eigen/Dense>
#include <vector>
#include <string>
namespace polyfem
{
class State;
class KernelProblem : public ProblemWithSolution
{
public:
KernelProblem(const std::string &name);
VectorNd eval_fun(const VectorNd &pt, const double t) const override;
AutodiffGradPt eval_fun(const AutodiffGradPt &pt, const double t) const override;
AutodiffHessianPt eval_fun(const AutodiffHessianPt &pt, const double t) const override
{
assert(false);
return AutodiffHessianPt(1);
}
void rhs(const AssemblerUtils &assembler, const std::string &formulation, const Eigen::MatrixXd &pts, const double t, Eigen::MatrixXd &val) const override;
void set_parameters(const json ¶ms) override;
bool is_scalar() const override;
State *state;
private:
std::string formulation_ = "Laplacian";
int n_kernels_ = 5;
double kernel_distance_ = 0.05;
Eigen::VectorXd kernel_weights_;
};
} // namespace polyfem
|
{"hexsha": "d9dfe8e1c10e6d0cc2f73e5b366248c1da21c672", "size": 997, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/problem/KernelProblem.hpp", "max_stars_repo_name": "danielepanozzo/polyfem", "max_stars_repo_head_hexsha": "34a7719c2a3874b7ecc865c28d8b3d9bbdf7d0ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 228.0, "max_stars_repo_stars_event_min_datetime": "2018-11-23T19:32:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T10:30:51.000Z", "max_issues_repo_path": "src/problem/KernelProblem.hpp", "max_issues_repo_name": "danielepanozzo/polyfem", "max_issues_repo_head_hexsha": "34a7719c2a3874b7ecc865c28d8b3d9bbdf7d0ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14.0, "max_issues_repo_issues_event_min_datetime": "2019-03-11T22:44:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-16T14:50:35.000Z", "max_forks_repo_path": "src/problem/KernelProblem.hpp", "max_forks_repo_name": "danielepanozzo/polyfem", "max_forks_repo_head_hexsha": "34a7719c2a3874b7ecc865c28d8b3d9bbdf7d0ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 45.0, "max_forks_repo_forks_event_min_datetime": "2018-12-31T02:04:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T02:42:01.000Z", "avg_line_length": 24.3170731707, "max_line_length": 157, "alphanum_fraction": 0.7492477432, "num_tokens": 268}
|
import re
import inspect
import time
import pandas as pd
import numpy as np
import ipywidgets as ipw
import traitlets as tra
from multiprocessing import Process
from datetime import datetime
from IPython import display
from collections.abc import Iterator
try:
from utils import frontend as utils
from processing import EnvHandeler
except ImportError:
from .utils import frontend as utils
from .processing import EnvHandeler
class WidgetCell(ipw.Button):
'''
WidgetCell(use_iloc: bool=False, index: Any, column: Any,
owner: pd.DataFrame, out: ipw.Output=ipw.Output())
Child of ipywidgets' Button class for representing a cell of
a data frame by the WidgetDf class.
See ``ipywidgets.Button`` and ``WidgetDf`` for more infomation.
Parameters:
-----------
use_iloc (bool): Weather or not the iloc should be used to
locate the cell value in the passed DataFrame ('owner'
argument) instead of the loc (default is False).
index (Any): Index of the desired cell.
column (Any): Column of the desired cell.
owner (pd.DataFrame): DataFrame from which to locate the cell's
value.
out (ipw.Output): Ipywidgets' Output widget in which the cell's
value will be displayed upon being clicked (default is
``ipw.Output()``).
'''
def __init__(self,
use_iloc: bool=False,
index: 'Any'=None,
column: 'Any'=None,
owner: pd.DataFrame=None,
out: ipw.Output=ipw.Output(),
**kwargs):
super().__init__(**kwargs)
self.use_iloc = use_iloc
self.owner = owner
self.out = out
self.add_traits(
value=tra.Any(),
index=tra.Any(),
column=tra.Any(),
)
self.index = index
self.column = column
self.observe(self.update, names=[
'column', 'index',
])
self.on_click(self.click)
self.update(self.getvalue())
@property
def fromowner_(self) -> bool:
return self.use_loc or self.use_iloc
@property
def loc_(self) -> 'pd.core.indexing._LocIndexer|pd.core.indexing._iLocIndexer':
'''
self.owner.iloc if self.use_iloc else self.owner.loc
'''
return self.owner.iloc if self.use_iloc else self.owner.loc
def getvalue(self, value: 'Any'=None) -> 'Any':
'''
self.getvalue(value: Any=None) -> Any
If the value passed is None, the value of the cell in ``self.owner``
with the index ``self.index`` and column ``self.column``. Otherwise
the given value is returned.
Parameters:
-----------
value (Any): Either None or the value to be returned (default is
None).
'''
value = self.value if value is None else value
return self.loc_[self.index, self.column]
def setvalue(self, *args, **kwargs) -> None:
'''
self.setvalue(*args, **kwargs) -> None
Inplace method for setting the 'value' trait using ``self.getvalue``.
See ``self.getvalue`` for more infomation.
Parameters:
-----------
*args: Positional arguments passed to ``self.getvalue``.
**kwargs: Key Word arguments passed to ``self.getvalue``.
'''
self.value = self.getvalue(*args, **kwargs)
def update(self, value: 'Any'=None) -> None:
'''
self.update(value: Any=None) -> Any
Updates the 'value', 'description' and 'tooltip' traits.
See ``self.setvalue`` and ``self.setdesc`` for more infomation.
Parameters:
-----------
value (Any): value argument passed to ``self.setvalue`` (default is
None).
'''
self.setvalue(value)
self.setdesc()
def getdesc(self) -> str:
'''
self.getdesc() -> str
Returns the appropriate description and tooltip.
See ``utils.usename`` for more infomation.
'''
return utils.usename(self.value)
def setdesc(self) -> None:
'''
self.setdesc() -> None
Inplace method for setting the 'description' and 'tooltip' traits.
See ``self.getdesc`` for more infomation.
'''
self.description = self.tooltip = self.getdesc()
def click(self, button: 'WidgetCell') -> None:
'''
self.click(button: WidgetCell) -> None
For adding to the buttons on_click functions. It displays the value of
the cell using ``utils.showobj`` in the 'out' attribute.
See ``utils.showobj`` for more infomation.
Parameters:
-----------
button (WidgetCell): Should generally be self.
'''
head = f'{button.index} - {button.column}'
with self.out:
print('\n'+inspect.cleandoc(f'''
{head}
{"="*len(head)}
'''))
utils.showobj(button.value)
class WidgetDf(ipw.VBox):
'''
WidgetDf(data: pd.DataFrame, out: ipw.Output, **kwargs)
Widget for representing pandas Data Frames. It inherits from
ipywidgets' VBox class. Each cell is represented by a WidgetCell
object.
See ``ipywidgets.VBox`` and ``WidgetCell`` for more infomation.
Parameters:
-----------
data (pd.DataFrame): Any pandas DataFrame, used as the 'owner'
attribute of each cell.
out (ipw.Output): Any ipw.Output object, used as the 'out'
attribute of each cell (default is ``ipw.Output()``).
**kwargs: Key word arguments used to initalise the parent
(ipw.VBox).
'''
cell_layout = ipw.Layout(width='150px')
index_layout = ipw.Layout(width='100px')
column_layout = ipw.Layout(width=cell_layout.width)
def __init__(self,
data: pd.DataFrame,
out: ipw.Output=ipw.Output(),
**kwargs):
super().__init__(**kwargs)
self.add_traits(data=tra.Any())
self.data = data
self.out = out
self.clear_button = utils.ClearButton(self.out)
self.setbuttonbox()
self.setchildren()
self.observe(self.setchildren, names='data')
@property
def loc_(self) -> pd.core.indexing._LocIndexer:
'''
self.data.loc
'''
return self.data.loc
@property
def iloc_(self) -> pd.core.indexing._iLocIndexer:
'''
self.data.iloc
'''
return self.data.iloc
def _ipython_display_(self) -> None:
display.display(super(), self.button_box, self.out)
def getbuttonbox(self) -> ipw.HBox:
'''
self.getbuttonbox() -> ipw.HBox
Returns an ipywidgets HBox containing the 'clear_button'.
'''
return ipw.HBox((self.clear_button,))
def setbuttonbox(self) -> None:
'''
self.setbuttonbox() -> None
Inplace method for setting the 'button_box' attribute using
``self.getbuttonbox``.
See ``self.getbuttonbox`` for more infomation.
'''
self.button_box = self.getbuttonbox()
def getcell(self, index: 'Any', column: 'Any') -> WidgetCell:
'''
self.getcell(index: Any, column: Any) -> WidgetCell
Returns a WidgetCell object representing the cell in ``self.data`` at
the given column and index.
See ``WidgetCell`` for more infomation.
Parameters:
-----------
index (Any): Index of the cell in self.data.
column (Any): Column of the cell in self.data.
'''
return WidgetCell(
use_iloc=False,
owner=self.data,
index=index,
column=column,
out=self.out,
layout=self.__class__.cell_layout
)
def getindex(self, index: 'Any') -> ipw.Label:
'''
self.getindex(index: Any) -> ipw.Label
Returns a Label widget which has a value equal to the string
representation of the passed index. This is used to represent an
individual item in ``self.data.index``.
Parameters:
-----------
index (Any): Item of an index.
'''
return ipw.Label(str(index), layout=self.__class__.index_layout)
def getcolumn(self, column: 'Any') -> ipw.Label:
'''
self.getindex(column: Any) -> ipw.Label
Returns a Label widget which has a value equal to the string
representation of the passed column. This is used to represent an
individual item in ``self.data.columns``.
Parameters:
-----------
column (Any): Column name.
'''
return ipw.Label(str(column), layout=self.__class__.column_layout)
def getrow(self, index: 'Any') -> tuple:
'''
self.getrow(index: Any) -> tuple
Returns the appropriate tuple of widgets to represent the row in
``self.data`` at the given index.
See ``self.getindex`` and ``self.getcell`` for more infomation.
Parameters:
----------
index (Any): Item in ``self.data.index``.
'''
return (self.getindex(index), *tuple(pd.Series(self.data.columns).apply(
lambda col: self.getcell(index, col)
).values))
def getrows(self) -> tuple:
'''
self.getrows() -> tuple
Returns a tuple of tuples, with each inner tuple being generated by
``self.getrow`` for a given index.
See ``self.getrow`` for more infomation.
'''
return tuple(pd.Series(self.data.index).apply(
lambda i: self.getrow(i)
).values)
def getchildren(self) -> tuple:
'''
self.getchildren() -> tuple
Returns a tuple appropriate for use as the 'child' trait.
See ``self.getcolumns`` and ``self.getrows`` for more infomation.
'''
return (self.getcolumns(), *utils.hboxes(self.getrows()))
def getcolumns(self) -> ipw.HBox:
'''
self.getcolumns() -> ipw.HBox
Returns an ipywidgets HBox of Label widgets representing the column
names of ``self.data``.
Note, the 0th child of returned HBox represents the index name. If the
index is unnamed, the value of the 0th child is blank.
'''
inam = self.data.index.name
return ipw.HBox(
[self.getcolumn('') if inam is None else self.getcolumn(inam)] +
[self.getcolumn(col) for col in self.data.columns]
)
def setchildren(self, *args) -> None:
'''
self.setchildren() -> None
Inplace method for setting the 'child' trait.
See self.getchildren for more infomation.s
'''
self.children = self.getchildren()
def itercells(self) -> Iterator:
'''
itercells(self) -> Iterator
Yields each cell by row then column.
'''
for row in self.children[1:]:
for cell in row.children[1:]:
yield cell
def changeout(self, out: ipw.Output) -> None:
'''
self.changeout(out: ipw.Ouput) -> None
Inplace method for safly changing the 'out' attribute.
Parameters:
-----------
out (ipw.Output): New Output widget in which to display cell values
when clicked.
'''
self.out = out
self.clear_button.out = out
for cell in self.itercells():
cell.out = self.out
class WidgetEnv(WidgetDf, EnvHandeler):
'''
WidgetEnv(*args, **kwargs)
Widget for representing the EnvHandeler objects. It inherits from
the WidgetDf and EnvHandeler classes.
See ``WidgetDf`` and ``EnvHandeler`` for more infomation.
Parameters:
-----------
*args: Positional arguments used to initialise the EnvHandeler
parent.
**kwargs: Key word arguments used to initialise the EnvHandeler
parent.
'''
def __init__(self, *args, **kwargs):
EnvHandeler.__init__(self, *args, **kwargs)
WidgetDf.__init__(self, self.df)
self.add_traits(last_updated=tra.Any())
self.last_updated = None
self.setupdatebutton()
@utils.inthread
def update(self, *args, **kwargs) -> None:
'''
self.update(self, *args, **kwargs) -> None
Wrapper around the 'update' method of the EnvHandeler parent in which
``self.data`` is ``set.df`` after the parent's update method is called.
Finally, it updates the 'last_updated' attribute using ``datetime.now``.
Note, this function is decorated with ``utils.inthread``, hence will
run in its own thread.
See ``EnvHandeler.update``, ``utils.inthread`` and ``datetime.now`` for
more infomation.
Parameters:
-----------
*args: Positional arguments passed to the parents update method.
**kwargs: Key word arguments passed to the parents update method.
'''
super().update(*args, **kwargs)
self.data = self.df
self.last_updated = datetime.now()
def getupdatebutton(self, *args, **kwargs) -> utils.UpdateButton:
'''
self.getupdatebutton(*args, **kwargs) -> utils.UpdateButton
Returns an instance of the ``utils.UpdateButton`` class which calls the
'update' method upon being clicked.
See ``self.update`` and ``utils.UpdateButton`` for more infomation.
Parameters:
-----------
*args: Positional arguments passed to the update method upon the
returned button being clicked.
**kwargs: Key word arguments passed to the update method upon the
returned button being clicked.
'''
button = utils.UpdateButton()
button.on_click(lambda button: self.update(*args, **kwargs))
return button
def setupdatebutton(self, *args, **kwargs) -> None:
'''
self.setbutton(*args, **kwargs) -> None:
Inplace method for creating the 'update_button' and adding it to the
children of the 'button_box' attribute.
See ``self.getupdatebutton`` for more infomation.
Parameters:
-----------
*args: Positional arguments passed to ``self.getupdatebutton``.
**kwargs: Key word arguments passed to ``self.getupdatebutton``.
'''
self.update_button = self.getupdatebutton(*args, *kwargs)
self.button_box.children += (self.update_button,)
def subenv(self, *args, new_output: bool=True, **kwargs) -> 'WidgetEnv':
'''
self.subenv(*args, new_output: bool=True, **kwargs) -> WidgetEnv
Wrapper around the 'subenv' method of the EnvHandeler parent in which
the resulting WidgetEnv is given a new Output widget as its 'out'
attribute.
See ``EnvHandeler.subenv``, and ``self.changeout`` for more infomation.
Parameters:
-----------
*args: Positional arguments passed to ``super().subenv``.
new_output (bool): Weather or not the returned WidgetEnv should
have its own, new, output.
Note, if False, output from both self and the returned
WidgetEnv will share an output.
**kwargs: Key word arguments passed to ``super().subenv``.
'''
env = super().subenv(*args, **kwargs)
env.changeout(ipw.Output()) if new_output else None
return env
class AutoWidgetEnv(WidgetEnv):
'''
AutoWidgetEnv(*args, interval: float=5, start: bool=True, **kwargs)
Child class of ``WidgetEnv``. Is able to automatically update itself
periodically in the background.
See ``WidgetEnv`` for more infomation.
Parameters:
-----------
*args: Positional arguments passed to the parent's constructor.
interval (float): Number of seconds between updates (default is 5).
start (bool): Weather to start automatic updates on initialisation.
``self.start`` can be used to commence automatic updating after
the fact (default is True).
**kwargs: Key word arguments passed to the parent's constructor.
'''
def __init__(self,
*args,
interval: float=5,
start:bool=True,
**kwargs):
super().__init__(*args, **kwargs)
self.interval = interval
self.paused = False
self.setpausebutton()
self.start() if start else None
def update(self, *args, **kwargs) -> None:
'''
self.update(*args, **kwargs) -> None
Wrapper around the 'update' method of the parent which only calls the
method if ``self.paused`` is False.
See ``WidgetEnv.update`` for more infomation.
Parameters:
-----------
*args: Positional arguments passed to ``super().update``.
**kwargs: Key word arguments passed to ``super().update``.
'''
if not self.paused:
super().update(*args, **kwargs)
def start(self, *args, **kwargs) -> None:
'''
self.start(*args, **kwargs) -> None:
Commences automatic updating.
See See ``WidgetEnv.update`` and ``utils.runperiodic`` for more
infomation.
Parameters:
-----------
*args: Positional arguments passed to ``super().update``.
**kwargs: Key word arguments passed to ``super().update``.
'''
utils.runperiodic(
func=self.update,
interval=self.interval
)(*args, **kwargs)
def stop(self):
self.update_process.terminate()
self.setupdateprocess()
def getpausebutton(self, **kwargs) -> utils.PausePlayButton:
pause_button = utils.PausePlayButton(self.paused, **kwargs)
pause_button.on_click(
lambda button: setattr(self, 'paused', button.paused)
)
return pause_button
def setpausebutton(self, **kwargs) -> None:
self.pause_button = self.getpausebutton(**kwargs)
self.button_box.children += (self.pause_button,)
|
{"hexsha": "96cc1d9b841b3d3afd6650a6955a42b1f1b936a6", "size": 19165, "ext": "py", "lang": "Python", "max_stars_repo_path": "interface.py", "max_stars_repo_name": "nuki111/env_explore", "max_stars_repo_head_hexsha": "b5dfa05fbcfb0126e246e4ef4eb5a392a8615cf0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "interface.py", "max_issues_repo_name": "nuki111/env_explore", "max_issues_repo_head_hexsha": "b5dfa05fbcfb0126e246e4ef4eb5a392a8615cf0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "interface.py", "max_forks_repo_name": "nuki111/env_explore", "max_forks_repo_head_hexsha": "b5dfa05fbcfb0126e246e4ef4eb5a392a8615cf0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9862306368, "max_line_length": 88, "alphanum_fraction": 0.5551787112, "include": true, "reason": "import numpy", "num_tokens": 4185}
|
[STATEMENT]
lemma funas_ctxt_of_gctxt_conv [simp]:
"funas_ctxt (ctxt_of_gctxt C) = funas_gctxt C"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. funas_ctxt (ctxt_of_gctxt C) = funas_gctxt C
[PROOF STEP]
by (induct C) (auto simp flip: funas_gterm_gterm_of_term)
|
{"llama_tokens": 132, "file": "Regular_Tree_Relations_Util_Ground_Ctxt", "length": 1}
|
import os
import json
from pdf2words import document
import numpy as np
import re
from operator import itemgetter
from collections import OrderedDict
class name_scoring:
def __init__(self):
self.top_words = []
self.clusters = []
self.score = []
self.flg = 0
self.size = []
self.location = []
self.position_x = 0
self.position_y = 0
self.thresh_y=0
def forming(self, obj):
word_group = []
d = document.Document()
d.load(json_object=obj)
wordpage = d._document['pages'][0]
height = int(wordpage._height)
width = int(wordpage._width)
self.thresh_y = 0.008 * height
half_h = height//2
self.position_x = width//2
self.position_y = half_h//2
flag = 0
#iterating through all words of 1st page
for word in wordpage._words:
x0 = word._x0
x1 = word._x1
y0 = word._y0
y1 = word._y1
# extracting words in top half of first page
if(y1 <= half_h):
if(flag > 1):
#visting minimum 2 words in a cluster
if(flag == 2):
diff1_x = word_group[1]._x0-word_group[0]._x1
diff1_y = word_group[1]._y1-word_group[0]._y1
#next line encountered at 2nd word
if(abs(diff1_y) > self.thresh_y):
val = word_group.pop()
self.clusters.append(word_group)
word_group = []
word_group.append(val)
word_group.append(word)
flag = 1
if(flag >= 2):
diff2_x = x0 - temp_x
diff2_y = y1 - temp_y1
#grouping words in particular cluster based on minimum difference
if(abs(diff2_y) <= self.thresh_y):
thresh_x = (((y1-y0) + (temp_y1-temp_y0))/2)*0.55
if(abs(diff1_x-diff2_x) <= thresh_x):
word_group.append(word)
diff1_x = diff2_x
elif(abs(diff1_x) > abs(diff2_x)):
val = word_group.pop()
self.clusters.append(word_group)
word_group = []
word_group.append(val)
word_group.append(word)
flag = 1
else:
self.clusters.append(word_group)
word_group = []
word_group.append(word)
flag = 0
#next line encountered at 3rd word
elif(abs(diff2_y) > self.thresh_y):
self.clusters.append(word_group)
word_group = []
word_group.append(word)
flag = 0
else:
word_group.append(word)
temp_x = x1
temp_y1 = y1
temp_y0 = y0
self.top_words.append(word)
flag += 1
self.clusters.append(word_group)
word_group = []
def clean(self):
new_clusters = []
def splitbydelimeter(symbl, cluster, i):
new_cluster = []
if(len(cluster) > i+1):
if(cluster[i]._text != symbl):
temp = cluster[:i]
if(temp != []):
new_cluster.append(temp)
temp = cluster[i:]
if(temp != []):
new_cluster.append(temp)
else:
temp = cluster[:i]
if(temp != []):
new_cluster.append(temp)
temp = cluster[i+1:]
if(temp != []):
new_cluster.append(temp)
else:
if(cluster[i]._text != symbl):
temp = cluster[:i+1]
if(temp != []):
new_cluster.append(temp)
else:
temp = cluster[:i]
if(temp != []):
new_cluster.append(temp)
return new_cluster
for cluster in self.clusters:
temp = []
flag = 0
for i in range(len(cluster)):
# keep the cluster that have words with substrings (MR.|Mr.|M/S.|Mrs.)
if(re.search('(MR\.|Mr\.|M\/S\.|Mrs\.|Miss.|Dr\.|messrs|Smt\.|S\/O.)', cluster[i]._text) is not None):
temp = cluster
break
# removing words with special characters ' , ; * ' and
elif(re.search('[;*,]', cluster[i]._text) is not None):
temp = []
flag = 1
break
# remove clusters with only numbers of length <= 3
elif(re.search('^[^A-Za-z:&-\/]+$', cluster[i]._text) is not None):
if(len(cluster[i]._text) <= 3):
temp = []
flag = 1
break
continue
# dealing with words having ' : - /'
# every thing before colon in one cluster
# every thing before colon in another cluster
elif(':' in cluster[i]._text):
new_clusters += splitbydelimeter(':', cluster, i)
flag = 1
break
elif('/' in cluster[i]._text):
new_clusters += splitbydelimeter('/', cluster, i)
flag = 1
break
elif('-' in cluster[i]._text):
new_clusters += splitbydelimeter('-', cluster, i)
flag = 1
break
# cluster remains as it is
else:
temp.append(cluster[i])
if(flag == 0):
if(temp != []):
new_clusters.append(temp)
self.clusters = new_clusters
def read(self, path):
with open(path, 'r') as f:
l = f.readlines()
l = [x.strip('\n') for x in l]
return l
def scoring(self):
for cluster in self.clusters:
scr = 0
cities = self.read('pdf2words/scoring_data/cities.txt')
first_names = self.read('pdf2words/scoring_data/first_names.txt')
last_names = self.read('pdf2words/scoring_data/last_names.txt')
words = ''.join([x._text for x in cluster])
l = [x._text.upper() for x in cluster]
# checking for cluster length
if(len(cluster) >= 3):
scr += 2
# position based scoring
x_measure = 0
y_measure = 0
size = 0
length = len(cluster)
for i in range(len(cluster)):
size = size+abs(cluster[i]._y0-cluster[i]._y1)
y_measure = y_measure+(cluster[i]._y0+cluster[i]._y1)//2
if(i == 0):
x_measure += cluster[i]._x0
elif(i == length-1):
x_measure += cluster[i]._x1
x_measure = x_measure//2
y_measure = y_measure//length
size = size//length
self.size.append(size)
self.location.append(y_measure)
#print(x_measure,y_measure,self.position_x,self.position_y,cluster[0]._text)
if(self.position_x >= x_measure and self.position_y >= y_measure):
scr += 6
elif(self.position_x <= x_measure and self.position_y >= y_measure):
scr += 2
elif(self.position_x >= x_measure and self.position_y <= y_measure):
scr += 1
else:
scr += 0
# checking for cities
flag = 0
for i in cities:
if(i.upper() in l):
flag = 1
break
if(flag == 1):
scr -= 2.5
# checking for firstnames
flag = 0
for i in first_names:
if(i.upper() in l):
flag = 1
break
if(flag == 1):
scr += 2
# checking for lastnames
flag = 0
for i in last_names:
if(i.upper() in l):
flag = 1
break
if(flag == 1):
scr += 3
# checking for commonly occuring bank words
common = self.read('pdf2words/scoring_data/bank_terms.txt')
flag = 0
for i in common:
if(len(i) >= 4 and i.upper() in l):
flag += 1
else:
for x in l:
if(i.upper() == x):
flag += 1
if(flag >= 1):
scr -= (5*(flag))
# checking for commonly occuring bank words
common = self.read('pdf2words/scoring_data/addr_terms.txt')
flag = 0
for i in common:
if(len(i) >= 4 and i.upper() in l):
flag += 1
else:
for x in l:
if(i.upper() == x):
flag += 1
if(flag >= 1):
scr -= (5*(flag))
# checking for company suffix
company = self.read('pdf2words/scoring_data/company_suffix.txt')
flag = 0
for i in company:
if i.upper() in l:
flag += 1
if(flag >= 1):
scr += 3
# checking for numeric values
if(re.search('[0-9]', words) is not None):
scr -= 5
# checking clusters with only one word
if(len(cluster) == 1):
scr -= 3
# checking for case sensitive
if(words.isupper()):
scr += 1
if(re.search('(MR\.|Mr\.|M\/S\.|Mrs\.|Miss.|Dr\.|messrs|Smt\.)', words) is not None):
scr += 3
self.score.append(scr)
# processing top 5 clusters for more accuracy
index = np.argsort(np.asarray(self.score))
index = index[::-1]
index = index[:5]
location = {}
size = {}
for i in index:
size[self.size[i]] = i
location[self.location[i]] = i
size = OrderedDict(sorted(size.items(), reverse=True))
location = OrderedDict(sorted(location.items()))
# 1.) size based scoring
cnt = 2.5
if(len(size) == 5):
for i in size:
if(cnt == 2.5):
temp = i
self.score[size[i]] += cnt
else:
if(abs(temp-i) >= self.thresh_y):
cnt -= 0.5
self.score[size[i]] += cnt
else:
self.score[size[i]] += cnt
# 2.) location based scoring
cnt = 2.5
for i in location:
self.score[location[i]] += cnt
cnt -= 0.5
def print_c(self, clusters):
for cluster in clusters:
for words in cluster:
print(words._text+' ', end='')
print()
def get_name(self):
Z = [[scr, cluster] for scr, cluster in zip(self.score, self.clusters)]
Z = list(reversed(sorted(Z, key=itemgetter(0))))
clusters = [i[1] for i in Z[:1]]
name = ''
for words in clusters[0]:
name += words._text+' '
return name
def get_bbox(self, cluster):
#bbox=[x1,x2,x3,x4]
bbox = [0, 0, 0, 0]
bbox[1] = cluster[0]._y0
bbox[3] = cluster[-1]._y1
for i in cluster:
if(i._x0 > bbox[0]):
bbox[0] = i._x0
if(i._x1 > bbox[2]):
bbox[2] = i._x1
return bbox
def check_acc(self, json_path):
# reverse sorting clusters based on score
Z = [[scr, cluster] for scr, cluster in zip(self.score, self.clusters)]
Z = list(reversed(sorted(Z, key=itemgetter(0))))
clusters = [i[1] for i in Z[:3]]
# printing clusters and corresponding score
'''cnt = 0
for cluster in clusters:
for words in cluster:
print(words._text+' ', end='')
print(Z[cnt][0])
cnt += 1'''
# comparing with original data
answers = self.read('pdf2words/scoring_data/original.txt')
dict = {}
for i in answers:
temp = i.split(' ')
dict[temp[0]] = ' '.join(temp[1:])
for cluster in clusters[:1]:
words = ''
for word in cluster:
words += word._text+' '
if(dict[json_path.split('.')[0]] != 'None'):
self.flg = 1
# original name is a substring of found words
if(dict[json_path.split('.')[0]].upper() in words.upper()):
self.flg = 2
break
# 2 or more found words occur in original
if(sum([1 if(word._text.upper() in dict[json_path.split('.')[0]].upper()) else 0 for word in cluster]) >= 2):
self.flg = 2
break
|
{"hexsha": "76f32185fa21baf1417552938d4d82dcabbb2772", "size": 14011, "ext": "py", "lang": "Python", "max_stars_repo_path": "flask/pdf2words/name_score.py", "max_stars_repo_name": "ishan-modi/docker", "max_stars_repo_head_hexsha": "768ccc3450043d41d1de21ebef28aef6ce4d6149", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "flask/pdf2words/name_score.py", "max_issues_repo_name": "ishan-modi/docker", "max_issues_repo_head_hexsha": "768ccc3450043d41d1de21ebef28aef6ce4d6149", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "flask/pdf2words/name_score.py", "max_forks_repo_name": "ishan-modi/docker", "max_forks_repo_head_hexsha": "768ccc3450043d41d1de21ebef28aef6ce4d6149", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-01-20T12:39:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-11T10:08:16.000Z", "avg_line_length": 30.3926247289, "max_line_length": 125, "alphanum_fraction": 0.4245235886, "include": true, "reason": "import numpy", "num_tokens": 2985}
|
import os
import sys
import re
import importlib
import torch
import numpy as np
import random
# @NOTE: https://stackoverflow.com/a/1176023/2425365
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z])([A-Z])')
def to_camel_case(name: str):
cap_sub = first_cap_re.sub(r'\1_\2', name)
return all_cap_re.sub(r'\1_\2', cap_sub).lower()
def import_usr_dir(usr_dir):
dir_path = os.path.abspath(os.path.expanduser(usr_dir).rstrip("/"))
containing_dir, module_name = os.path.split(dir_path)
sys.path.insert(0, containing_dir)
importlib.import_module(module_name)
sys.path.pop(0)
def set_seeds(seed):
"""
Set the seed value for PyTorch, NumPy and Python.
Important for reproducible runs!
:param seed: seed value
:return:
"""
if seed is None:
return
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def minibatch_generator(*args, minibatch_size=5):
total_len = len(args[0])
minibatch_idx = np.random.choice(total_len, minibatch_size)
for _ in range(total_len // minibatch_size):
yield tuple(map(lambda item: item[minibatch_idx, :], args))
minibatch_idx = np.random.choice(total_len, minibatch_size)
|
{"hexsha": "8fe83935cfaf217f9386df3c82c3d4380d1ef93b", "size": 1225, "ext": "py", "lang": "Python", "max_stars_repo_path": "torchrl/utils/misc.py", "max_stars_repo_name": "srikarym/torchrl", "max_stars_repo_head_hexsha": "fee98e78ac1657a2c9a4063dd8d63ba207a121e2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-02-27T19:00:32.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-19T03:18:28.000Z", "max_issues_repo_path": "torchrl/utils/misc.py", "max_issues_repo_name": "srikarym/torchrl", "max_issues_repo_head_hexsha": "fee98e78ac1657a2c9a4063dd8d63ba207a121e2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "torchrl/utils/misc.py", "max_forks_repo_name": "srikarym/torchrl", "max_forks_repo_head_hexsha": "fee98e78ac1657a2c9a4063dd8d63ba207a121e2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0, "max_line_length": 69, "alphanum_fraction": 0.7208163265, "include": true, "reason": "import numpy", "num_tokens": 339}
|
#!/usr/bin/env python
# coding=utf-8
import torch.nn.functional as F
from scipy.spatial.distance import cdist
from utils.utils_pytorch import *
import matplotlib.pyplot as plt
from utils.general import plot_cm
import sys
sys.path.append("..")
from datasets.utils_dataset import merge_images_labels
def test_ac(tg_model, X_valid_total, Y_valid_total, evalset, testloader, order, iteration, args):
if args.dataset == 'cifar100':
acc_old, acc_new, acc_total = test_cifar100(tg_model, X_valid_total, Y_valid_total, evalset, testloader, order, iteration, args)
elif args.dataset == 'tinyimagenet' or 'cub_imagenet':
acc_old, acc_new, acc_total = test_imgstyle(tg_model, X_valid_total, Y_valid_total, evalset, testloader, order, iteration, args)
return acc_old, acc_new, acc_total
def test_cifar100(tg_model, X_valid_total, Y_valid_total, evalset, testloader, order, iteration, args):
tg_model.eval()
order_list = list(order)
if iteration == 0:
indices_test_subset_old = np.array([i in order[range(0, args.nb_cl_fg)] for i in Y_valid_total])
indices_test_subset_new = np.array([i in order[range(0, args.nb_cl_fg)] for i in Y_valid_total])
start_new_class = 0
end_new_class = args.nb_cl_fg
else:
if args.nb_cl_fg == args.nb_cl:
indices_test_subset_old = np.array([i in order[range(0, iteration * args.nb_cl)] for i in Y_valid_total])
indices_test_subset_new = np.array([i in order[range(iteration * args.nb_cl, (iteration + 1) * args.nb_cl)] for i in Y_valid_total])
start_new_class = iteration * args.nb_cl
end_new_class = (iteration + 1) * args.nb_cl
else:
indices_test_subset_old = np.array([i in order[range(0, args.nb_cl_fg + (iteration-1) * args.nb_cl)] for i in Y_valid_total])
indices_test_subset_new = np.array([i in order[range(args.nb_cl_fg + (iteration-1) * args.nb_cl, args.nb_cl_fg + iteration * args.nb_cl)] for i in Y_valid_total])
start_new_class = args.nb_cl_fg + (iteration-1) * args.nb_cl
end_new_class = args.nb_cl_fg + iteration * args.nb_cl
### compute old classes accuracy
X_valid_old = X_valid_total[indices_test_subset_old]
Y_valid_old = Y_valid_total[indices_test_subset_old]
map_Y_valid_old = np.array([order_list.index(i) for i in Y_valid_old])
evalset.data = X_valid_old.astype('uint8')
evalset.targets = map_Y_valid_old
evalloader = torch.utils.data.DataLoader(evalset, batch_size=args.eval_batch_size, shuffle=False,num_workers=2)
acc_old = compute_accuracy_all_images(tg_model, evalloader)
### compute new classes accturacy
X_valid_new = X_valid_total[indices_test_subset_new]
Y_valid_new = Y_valid_total[indices_test_subset_new]
map_Y_valid_new = np.array([order_list.index(i) for i in Y_valid_new])
evalset.data = X_valid_new.astype('uint8')
evalset.targets = map_Y_valid_new
evalloader = torch.utils.data.DataLoader(evalset, batch_size=args.eval_batch_size, shuffle=False,num_workers=2)
acc_new = compute_accuracy_all_images(tg_model, evalloader)
### compute total acc and each class acc
acc_total, old_class_acc_mean, new_class_acc_mean, total_class_acc_mean = compute_accuracy_per_class(tg_model, testloader, start_new_class, end_new_class, args, iteration)
print('Old images ac: {:.2f} % New images ac: {:.2f} % Total images ac: {:.2f} % '.format(acc_old, acc_new, acc_total))
print('Old classes ac: {:.2f} % New classes ac: {:.2f} % Total classes ac: {:.2f} % '.format(old_class_acc_mean, new_class_acc_mean, total_class_acc_mean))
return acc_old, acc_new, acc_total
def test_imgstyle(tg_model, X_valid_total, Y_valid_total, evalset, testloader, order, iteration, args):
tg_model.eval()
order_list = list(order)
if iteration == 0:
indices_test_subset_old = np.array([i in order[range(0, args.nb_cl_fg)] for i in Y_valid_total])
indices_test_subset_new = np.array([i in order[range(0, args.nb_cl_fg)] for i in Y_valid_total])
start_new_class = 0
end_new_class = args.nb_cl_fg
else:
if args.nb_cl_fg == args.nb_cl:
indices_test_subset_old = np.array([i in order[range(0, iteration * args.nb_cl)] for i in Y_valid_total])
indices_test_subset_new = np.array([i in order[range(iteration * args.nb_cl, (iteration + 1) * args.nb_cl)] for i in Y_valid_total])
start_new_class = iteration * args.nb_cl
end_new_class = (iteration + 1) * args.nb_cl
else:
indices_test_subset_old = np.array([i in order[range(0, args.nb_cl_fg + (iteration-1) * args.nb_cl)] for i in Y_valid_total])
indices_test_subset_new = np.array([i in order[range(args.nb_cl_fg + (iteration-1) * args.nb_cl, args.nb_cl_fg + iteration * args.nb_cl)] for i in Y_valid_total])
start_new_class = args.nb_cl_fg + (iteration-1) * args.nb_cl
end_new_class = args.nb_cl_fg + iteration * args.nb_cl
### compute old classes accuracy
X_valid_old = X_valid_total[indices_test_subset_old]
Y_valid_old = Y_valid_total[indices_test_subset_old]
map_Y_valid_old = np.array([order_list.index(i) for i in Y_valid_old])
eval_set_old = merge_images_labels(X_valid_old, map_Y_valid_old)
evalset.imgs = evalset.samples = eval_set_old
evalloader = torch.utils.data.DataLoader(evalset, batch_size=args.eval_batch_size, shuffle=False,num_workers=2)
acc_old = compute_accuracy_all_images(tg_model, evalloader)
### compute new classes accturacy
X_valid_new = X_valid_total[indices_test_subset_new]
Y_valid_new = Y_valid_total[indices_test_subset_new]
map_Y_valid_new = np.array([order_list.index(i) for i in Y_valid_new])
eval_set_new = merge_images_labels(X_valid_new, map_Y_valid_new)
evalset.imgs = evalset.samples = eval_set_new
evalloader = torch.utils.data.DataLoader(evalset, batch_size=args.eval_batch_size, shuffle=False,num_workers=2)
acc_new = compute_accuracy_all_images(tg_model, evalloader)
### compute total acc and each class acc
acc_total, old_class_acc_mean, new_class_acc_mean, total_class_acc_mean = compute_accuracy_per_class(tg_model, testloader, start_new_class, end_new_class, args, iteration)
print('Old images ac: {:.2f} % New images ac: {:.2f} % Total images ac: {:.2f} % '.format(acc_old, acc_new, acc_total))
print('Old classes ac: {:.2f} % New classes ac: {:.2f} % Total classes ac: {:.2f} % '.format(old_class_acc_mean, new_class_acc_mean, total_class_acc_mean))
return acc_old, acc_new, acc_total
def compute_accuracy_all_images(tg_model, evalloader):
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.cuda(), targets.cuda()
total += targets.size(0)
outputs = tg_model(inputs)
outputs = F.softmax(outputs, dim=1)
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
cnn_acc = 100.*correct/total
return cnn_acc
def compute_accuracy_per_class(tg_model, evalloader, start_new_class, end_new_class, args, task_id):
correct = 0
total = 0
all_targets = []
all_predicted = []
outputs_old_classes = []
per_label_acc = np.zeros(end_new_class)
per_label_counts = np.zeros(end_new_class)
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.cuda(), targets.cuda()
total += targets.size(0)
all_targets.append(targets.cpu())
outputs = tg_model(inputs)
outputs = F.softmax(outputs, dim=1)
outputs_old_classes.append(outputs[:,0:end_new_class].cpu())
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
all_predicted.append(predicted.cpu())
for c in range(end_new_class):
pos = (targets == c)
per_label_acc[c] += (pos * (predicted == c)).sum().item()
per_label_counts[c] += pos.sum().item()
cnn_acc = 100.*correct/total
per_label_counts = np.maximum(per_label_counts, 1) # avoid div 0
per_label_acc /= per_label_counts
total_class_acc_mean = 100.*per_label_acc.mean()
if start_new_class == 0:
old_class_acc_mean = 0
else:
old_class_acc_mean = 100. * per_label_acc[0:start_new_class].mean()
new_class_acc_mean = 100. * per_label_acc[start_new_class:end_new_class].mean()
### plot confusion matrix
if args.plot_cm:
labels = np.array(list(range(1,end_new_class + 1)))
plot_cm(np.concatenate(all_targets), np.concatenate(all_predicted), labels,
vmax=50, title='Confusion matrix')
save_cm_path = args.tensorboard_base_path + 'task{}'.format(task_id)
os.makedirs(save_cm_path, exist_ok=True)
plt.savefig(save_cm_path + '/confusion_matrix.jpg')
### save per class accuracy to excel
import pandas as pd
df = pd.DataFrame(per_label_acc)
save_excel_path = args.tensorboard_base_path + 'task{}'.format(task_id)
os.makedirs(save_excel_path, exist_ok=True)
df.to_excel(save_excel_path +'/per_class_acc.xlsx', index=False)
return cnn_acc, old_class_acc_mean, new_class_acc_mean, total_class_acc_mean
def test_cifar100_and_plot_cm(tg_model, X_valid_total, Y_valid_total, X_valid_ori, Y_valid_ori, evalset, testloader, order, order_list, iteration, args):
tg_model.eval()
# if iteration>start_iter:
# ## joint classifiers
# #num_old_classes = ref_model.fc.out_features
# tg_model.fc.weight.data[:num_old_classes] = ref_model.fc.weight.data
# tg_model.fc.bias.data[:num_old_classes] = ref_model.fc.bias.data
print("##############################################################")
# Calculate validation error of model on the original classes:
map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori])
# print('Computing accuracy on the original batch of classes...')
evalset.data = X_valid_ori.astype('uint8')
evalset.targets = map_Y_valid_ori
evalloader = torch.utils.data.DataLoader(evalset, batch_size=args.eval_batch_size, shuffle=False,
num_workers=2)
acc_old = compute_accuracy_WI(tg_model, evalloader, 0, args.nb_cl_fg + (iteration-1) * args.nb_cl)
print('Old classes accuracy: {:.2f} %'.format(acc_old))
##
if iteration == 0:
indices_test_subset_cur = np.array(
[i in order[range(0, args.nb_cl_fg)] for i in Y_valid_total])
else:
indices_test_subset_cur = np.array(
[i in order[range(args.nb_cl_fg + (iteration-1) * args.nb_cl, args.nb_cl_fg + iteration * args.nb_cl)] for i in Y_valid_total])
X_valid_cur = X_valid_total[indices_test_subset_cur]
Y_valid_cur = Y_valid_total[indices_test_subset_cur]
map_Y_valid_cur = np.array([order_list.index(i) for i in Y_valid_cur])
# print('Computing accuracy on the original batch of classes...')
evalset.data = X_valid_cur.astype('uint8')
evalset.targets = map_Y_valid_cur
evalloader = torch.utils.data.DataLoader(evalset, batch_size=args.eval_batch_size, shuffle=False,
num_workers=2)
acc_cur = compute_accuracy_WI(tg_model, evalloader, 0, args.nb_cl * (iteration + 1))
print('New classes accuracy: {:.2f} %'.format(acc_cur))
# Calculate validation error of model on the cumul of classes:
acc = compute_accuracy_WI_and_plot_cm(tg_model, testloader, 0, args.nb_cl * (iteration + 1), args)
print('Total accuracy: {:.2f} %'.format(acc))
print("##############################################################")
return acc_old, acc_cur, acc
def test_tiny_or_crossd_save_per_class_acc(tg_model, X_valid_total, Y_valid_total, X_valid_ori, Y_valid_ori, evalset, testloader, order, order_list, iteration, args):
tg_model.eval()
# if iteration>start_iter:
# ## joint classifiers
# #num_old_classes = ref_model.fc.out_features
# tg_model.fc.weight.data[:num_old_classes] = ref_model.fc.weight.data
# tg_model.fc.bias.data[:num_old_classes] = ref_model.fc.bias.data
print("##############################################################")
# Calculate validation error of model on the original classes:
map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori])
# print('Computing accuracy on the original batch of classes...')
ori_eval_set = merge_images_labels(X_valid_ori, map_Y_valid_ori)
evalset.imgs = evalset.samples = ori_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=args.eval_batch_size, shuffle=False,
num_workers=2)
if iteration == 0:
acc_old, old_class_mean = compute_accuracy_WI_per_class_acc(tg_model, evalloader, 0, args.nb_cl_fg, args, iteration)
print('Old classes accuracy: {:.2f} % old Per Class mean: {:.2f}%'.format(acc_old, old_class_mean))
else:
acc_old, old_class_mean = compute_accuracy_WI_per_class_acc(tg_model, evalloader, 0, args.nb_cl_fg + (iteration - 1) * args.nb_cl , args, iteration)
print('Old classes accuracy: {:.2f} % old Per Class mean: {:.2f}%'.format(acc_old, old_class_mean))
# acc_old = compute_accuracy_WI(tg_model, evalloader, 0, args.nb_cl_fg + (iteration-1) * args.nb_cl)
# print('Old classes accuracy: {:.2f} %'.format(acc_old))
# indices_test_subset_cur = np.array(
# [i in order[range(iteration * args.nb_cl, (iteration + 1) * args.nb_cl)] for i in Y_valid_total])
if iteration == 0:
indices_test_subset_cur = np.array([i in order[range(0, args.nb_cl_fg)] for i in Y_valid_total])
else:
indices_test_subset_cur = np.array([i in order[range(args.nb_cl_fg + (iteration-1) * args.nb_cl, args.nb_cl_fg + iteration * args.nb_cl)] for i in Y_valid_total])
X_valid_cur = X_valid_total[indices_test_subset_cur]
Y_valid_cur = Y_valid_total[indices_test_subset_cur]
map_Y_valid_cur = np.array([order_list.index(i) for i in Y_valid_cur])
current_eval_set = merge_images_labels(X_valid_cur, map_Y_valid_cur)
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=args.eval_batch_size, shuffle=False,
num_workers=2)
if iteration == 0:
acc_cur, new_class_mean = compute_accuracy_WI_per_class_acc(tg_model, evalloader, 0, args.nb_cl_fg, args, iteration, new_class_per_acc=True)
print('new accuracy: {:.2f} % new per class mean: {:.2f}%'.format(acc_cur, new_class_mean))
else:
acc_cur, new_class_mean = compute_accuracy_WI_per_class_acc(tg_model, evalloader, args.nb_cl_fg, args.nb_cl_fg + args.nb_cl * iteration, args, iteration, new_class_per_acc=True)
print('new accuracy: {:.2f} % new per class mean: {:.2f}%'.format(acc_cur, new_class_mean))
acc_cur = compute_accuracy_WI(tg_model, evalloader, 0, args.nb_cl_fg + args.nb_cl * (iteration + 1))
print('New classes accuracy: {:.2f} %'.format(acc_cur))
# Calculate validation error of model on the cumul of classes:
if iteration == 0:
acc, class_mean = compute_accuracy_WI_per_class_acc(tg_model, testloader, 0, args.nb_cl_fg, args, iteration)
print('Total accuracy: {:.2f} % Total per class mean: {:.2f}%'.format(acc, class_mean))
print("##############################################################")
else:
acc, class_mean = compute_accuracy_WI_per_class_acc(tg_model, testloader, 0, args.nb_cl_fg + args.nb_cl * iteration, args, iteration)
print('Total accuracy: {:.2f} % Total per class mean: {:.2f}%'.format(acc, class_mean))
print("##############################################################")
return acc_old, acc_cur, acc
def compute_accuracy_WI_per_class_acc(tg_model, evalloader, start_class, end_class, args, task_id, new_class_per_acc=False):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#tg_feature_model.eval()
correct = 0
total = 0
all_targets = []
all_predicted = []
outputs_old_classes = []
per_label_acc = np.zeros(end_class)
per_label_counts = np.zeros(end_class)
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
all_targets.append(targets.cpu())
#targets = targets - start_class
outputs = tg_model(inputs)
#outputs = outputs[:, start_class: end_class]
outputs = F.softmax(outputs, dim=1)
# outputs = F.softmax(outputs[:,0:end_class], dim=1)
outputs_old_classes.append(outputs[:,0:end_class].cpu())
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
all_predicted.append(predicted.cpu())
for c in range(end_class):
pos = (targets == c)
per_label_acc[c] += (pos * (predicted == c)).sum().item()
per_label_counts[c] += pos.sum().item()
cnn_acc = 100.*correct/total
per_label_counts = np.maximum(per_label_counts, 1) # avoid div 0
per_label_acc /= per_label_counts
class_acc_mean = 100.*per_label_acc.mean()
per_label_acc_dict = {i: per_label_acc[i] for i in range(end_class)}
# print (per_label_acc_dict)
# print (class_acc_mean)
import pandas as pd
df = pd.DataFrame(per_label_acc)
save_excel_path = args.tensorboard_base_path + 'task{}'.format(task_id)
os.makedirs(save_excel_path, exist_ok=True)
df.to_excel(save_excel_path +'/per_class_acc.xlsx', index=False)
if new_class_per_acc:
new_class_acc_mean = 100.*per_label_acc[start_class:end_class].mean()
return cnn_acc, new_class_acc_mean
else:
return cnn_acc, class_acc_mean
def test_tiny_or_crossd(tg_model, X_valid_total, Y_valid_total, X_valid_ori, Y_valid_ori, evalset, testloader, order, order_list, iteration, args):
tg_model.eval()
# if iteration>start_iter:
# ## joint classifiers
# #num_old_classes = ref_model.fc.out_features
# tg_model.fc.weight.data[:num_old_classes] = ref_model.fc.weight.data
# tg_model.fc.bias.data[:num_old_classes] = ref_model.fc.bias.data
print("##############################################################")
# Calculate validation error of model on the original classes:
map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori])
# print('Computing accuracy on the original batch of classes...')
ori_eval_set = merge_images_labels(X_valid_ori, map_Y_valid_ori)
evalset.imgs = evalset.samples = ori_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=args.eval_batch_size, shuffle=False,
num_workers=2)
acc_old = compute_accuracy_WI(tg_model, evalloader, 0, args.nb_cl * (iteration + 1))
print('Old classes accuracy: {:.2f} %'.format(acc_old))
##
# indices_test_subset_cur = np.array(
# [i in order[range(iteration * args.nb_cl, (iteration + 1) * args.nb_cl)] for i in Y_valid_total])
if iteration == 0:
indices_test_subset_cur = np.array(
[i in order[range(0, args.nb_cl_fg)] for i in Y_valid_total])
else:
indices_test_subset_cur = np.array(
[i in order[range(args.nb_cl_fg + (iteration-1) * args.nb_cl, args.nb_cl_fg + iteration * args.nb_cl)] for i in Y_valid_total])
X_valid_cur = X_valid_total[indices_test_subset_cur]
Y_valid_cur = Y_valid_total[indices_test_subset_cur]
map_Y_valid_cur = np.array([order_list.index(i) for i in Y_valid_cur])
# print('Computing accuracy on the original batch of classes...')
current_eval_set = merge_images_labels(X_valid_cur, map_Y_valid_cur)
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=args.eval_batch_size, shuffle=False,
num_workers=2)
acc_cur = compute_accuracy_WI(tg_model, evalloader, 0, args.nb_cl * (iteration + 1))
print('New classes accuracy: {:.2f} %'.format(acc_cur))
# Calculate validation error of model on the cumul of classes:
acc = compute_accuracy_WI(tg_model, testloader, 0, args.nb_cl * (iteration + 1))
print('Total accuracy: {:.2f} %'.format(acc))
print("##############################################################")
return acc_old, acc_cur, acc
def test_tiny_or_crossd_oracle(tg_model, X_valid_total, Y_valid_total, X_valid_ori, Y_valid_ori, evalset, testloader, order, order_list, iteration, args):
tg_model.eval()
# if iteration>start_iter:
# ## joint classifiers
# #num_old_classes = ref_model.fc.out_features
# tg_model.fc.weight.data[:num_old_classes] = ref_model.fc.weight.data
# tg_model.fc.bias.data[:num_old_classes] = ref_model.fc.bias.data
print("##############################################################")
# Calculate validation error of model on the original classes:
map_Y_valid_ori = np.array([order_list.index(i) for i in Y_valid_ori])
# print('Computing accuracy on the original batch of classes...')
ori_eval_set = merge_images_labels(X_valid_ori, map_Y_valid_ori)
evalset.imgs = evalset.samples = ori_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=args.eval_batch_size, shuffle=False,
num_workers=2)
acc_old = compute_accuracy_WI(tg_model, evalloader, 0, args.nb_cl * (iteration + 1))
print('Old classes accuracy: {:.2f} %'.format(acc_old))
##
indices_test_subset_cur = np.array(
[i in order[range(args.nb_cl_pre,args.nb_cl_fg)] for i in Y_valid_total])
# indices_test_subset_cur = np.array(
# [i in order[range(60,80)] for i in Y_valid_total])
X_valid_cur = X_valid_total[indices_test_subset_cur]
Y_valid_cur = Y_valid_total[indices_test_subset_cur]
map_Y_valid_cur = np.array([order_list.index(i) for i in Y_valid_cur])
# print('Computing accuracy on the original batch of classes...')
current_eval_set = merge_images_labels(X_valid_cur, map_Y_valid_cur)
evalset.imgs = evalset.samples = current_eval_set
evalloader = torch.utils.data.DataLoader(evalset, batch_size=args.eval_batch_size, shuffle=False,
num_workers=2)
acc_cur = compute_accuracy_WI(tg_model, evalloader, 0, args.nb_cl * (iteration + 1))
print('New classes accuracy: {:.2f} %'.format(acc_cur))
# Calculate validation error of model on the cumul of classes:
acc = compute_accuracy_WI(tg_model, testloader, 0, args.nb_cl * (iteration + 1))
print('Total accuracy: {:.2f} %'.format(acc))
print("##############################################################")
return acc_old, acc_cur, acc
def compute_accuracy(tg_model, tg_feature_model, class_means, evalloader, scale=None, print_info=True, device=None):
if device is None:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tg_model.eval()
tg_feature_model.eval()
#evalset = torchvision.datasets.CIFAR100(root='./data', train=False,
# download=False, transform=transform_test)
#evalset.test_data = input_data.astype('uint8')
#evalset.test_labels = input_labels
#evalloader = torch.utils.data.DataLoader(evalset, batch_size=128,
# shuffle=False, num_workers=2)
correct = 0
correct_icarl = 0
correct_ncm = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
outputs = tg_model(inputs)
outputs = F.softmax(outputs, dim=1)
if scale is not None:
assert(scale.shape[0] == 1)
assert(outputs.shape[1] == scale.shape[1])
outputs = outputs / scale.repeat(outputs.shape[0], 1).type(torch.FloatTensor).to(device)
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
outputs_feature = np.squeeze(tg_feature_model(inputs)).cpu()
# Compute score for iCaRL
sqd_icarl = cdist(class_means[:,:,0].T, outputs_feature, 'sqeuclidean')
score_icarl = torch.from_numpy((-sqd_icarl).T).to(device)
_, predicted_icarl = score_icarl.max(1)
correct_icarl += predicted_icarl.eq(targets).sum().item()
# Compute score for NCM
sqd_ncm = cdist(class_means[:,:,1].T, outputs_feature, 'sqeuclidean')
score_ncm = torch.from_numpy((-sqd_ncm).T).to(device)
_, predicted_ncm = score_ncm.max(1)
correct_ncm += predicted_ncm.eq(targets).sum().item()
# print(sqd_icarl.shape, score_icarl.shape, predicted_icarl.shape, \
# sqd_ncm.shape, score_ncm.shape, predicted_ncm.shape)
if print_info:
print(" top 1 accuracy CNN :\t\t{:.2f} %".format(100.*correct/total))
print(" top 1 accuracy iCaRL :\t\t{:.2f} %".format(100.*correct_icarl/total))
print(" top 1 accuracy NCM :\t\t{:.2f} %".format(100.*correct_ncm/total))
cnn_acc = 100.*correct/total
icarl_acc = 100.*correct_icarl/total
ncm_acc = 100.*correct_ncm/total
return [cnn_acc, icarl_acc, ncm_acc]
def compute_accuracy_CNN(tg_model, evalloader):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#tg_feature_model.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
outputs = tg_model(inputs)
outputs = F.softmax(outputs, dim=1)
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
#print(" top 1 accuracy CNN :\t\t{:.2f} %".format(100.*correct/total))
cnn_acc = 100.*correct/total
return cnn_acc
def compute_accuracy_WI_and_plot_cm(tg_model, evalloader, start_class, end_class, args):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#tg_feature_model.eval()
correct = 0
total = 0
all_targets = []
all_predicted = []
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
all_targets.append(targets.cpu())
#targets = targets - start_class
outputs = tg_model(inputs)
#outputs = outputs[:, start_class: end_class]
outputs = F.softmax(outputs, dim=1)
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
all_predicted.append(predicted.cpu())
cnn_acc = 100.*correct/total
# cm = confusion_matrix(np.concatenate(all_targets), np.concatenate(all_predicted))
# df_cm = pd.DataFrame(cm, range(max(np.concatenate(all_targets)) + 1), range(max(np.concatenate(all_targets)) + 1))
# # sn.set(font_scale=1.4)
# sn.heatmap(df_cm, annot=True, annot_kws={"size": 6})
# plt.savefig('1.jpg')
# plt.show()
# if args.load_task1_lwf:
# labels = np.array(list(range(60)))
# ax, sigma, sigma_norm = plot_cm(np.concatenate(all_targets), np.concatenate(all_predicted), labels,
# vmax=50, title='Confusion matrix')
# plt.savefig('cifar100_6tasks_task1_lwf.jpg')
# plt.show()
if args.load_task1_ours:
labels = np.array(list(range(60)))
ax, sigma, sigma_norm = plot_cm(np.concatenate(all_targets), np.concatenate(all_predicted), labels,
vmax=50, title='Confusion matrix')
plt.savefig('cifar100_6tasks_task1_ours.jpg')
plt.show()
return cnn_acc
def compute_accuracy_WI(tg_model, evalloader, start_class, end_class):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#tg_feature_model.eval()
correct = 0
total = 0
all_targets = []
all_predicted = []
outputs_old_classes = []
per_label_acc = np.zeros(60)
per_label_counts = np.zeros(60)
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
all_targets.append(targets.cpu())
#targets = targets - start_class
outputs = tg_model(inputs)
#outputs = outputs[:, start_class: end_class]
outputs = F.softmax(outputs, dim=1)
# outputs_old_classes.append(outputs[:,0:50].cpu())
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
all_predicted.append(predicted.cpu())
# for c in range(60):
# pos = (targets == c)
# per_label_acc[c] += (pos * (predicted == c)).sum().item()
# per_label_counts[c] += pos.sum().item()
cnn_acc = 100.*correct/total
# per_label_counts = np.maximum(per_label_counts, 1) # avoid div 0
# per_label_acc /= per_label_counts
# aa = np.concatenate(outputs_old_classes)
# ab = np.mean(aa, axis=0)
# print(np.argsort(ab))
# cm = confusion_matrix(np.concatenate(all_targets), np.concatenate(all_predicted))
# df_cm = pd.DataFrame(cm, range(max(np.concatenate(all_targets)) + 1), range(max(np.concatenate(all_targets)) + 1))
# # sn.set(font_scale=1.4)
# sn.heatmap(df_cm, annot=True, annot_kws={"size": 6})
# plt.savefig('1.jpg')
# plt.show()
# labels = np.array(list(range(100)))
# ax, sigma, sigma_norm = plot_cm(np.concatenate(all_targets), np.concatenate(all_predicted), labels,
# vmax=50, title='Confusion matrix')
# plt.savefig('test1.jpg')
# plt.show()
return cnn_acc
def compute_accuracy_Version1(tg_model, evalloader, nb_cl, nclassifier, iteration):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#tg_feature_model.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
#targets = targets - start_class
outputs = tg_model(inputs, side_fc=True)
#outputs = F.softmax(outputs, dim=1)
real_classes = int(outputs.size(1)/nclassifier)
nstep = iteration+1
outputs_sum = torch.zeros(outputs.size(0), real_classes).to(device)
##
for i in range(nstep):
start = nb_cl*nclassifier*i
for j in range(nclassifier):
end = start+nb_cl
outputs_sum[:, i*nb_cl:(i+1)*nb_cl] += outputs[:, start:end]
start = end
# for i in range(nstep):
# start = nb_cl*nclassifier*i
# outputs_1 = F.softmax(outputs[:, start:start+nb_cl], dim=1)
# outputs_2 = F.softmax(outputs[:, start+nb_cl:start + 2*nb_cl], dim=1)
# ratio = torch.sum(torch.abs(outputs_1 - outputs_2), 1)
# outputs_sum[:, i*nb_cl:(i+1)*nb_cl] = outputs_1 #(outputs_1+outputs_2) * torch.unsqueeze(2.0 - ratio, 1)
outputs_sum = F.softmax(outputs_sum, dim=1)
_, predicted = outputs_sum.max(1)
correct += predicted.eq(targets).sum().item()
cnn_acc = 100. * correct / total
return cnn_acc
def compute_discrepancy(tg_model, evalloader, nb_cl, nclassifier, iteration, discrepancy):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#tg_feature_model.eval()
total = 0
nstep = iteration + 1
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
#targets = targets - start_class
outputs = tg_model(inputs, side_fc=True)
##
for i in range(nstep):
start_index = nb_cl*nclassifier*i
for iter_1 in range(nclassifier):
outputs_1 = outputs[:, (start_index + nb_cl * iter_1):(start_index + nb_cl * (iter_1 + 1))]
outputs_1 = F.softmax(outputs_1, dim=1)
for iter_2 in range(iter_1 + 1, nclassifier):
outputs_2 = outputs[:, (start_index + nb_cl * iter_2):(start_index + nb_cl * (iter_2 + 1))]
outputs_2 = F.softmax(outputs_2, dim=1)
discrepancy[targets.size(0)*batch_idx:targets.size(0)*(batch_idx+1),i] += torch.sum(torch.abs(outputs_1 - outputs_2), 1)
return discrepancy
def compute_accuracy_Side(tg_model, evalloader, nb_cl, nclassifier, iteration, inds):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#tg_feature_model.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
#targets = targets - start_class
outputs = tg_model(inputs, side_fc=True)
batch_inds = inds[batch_idx*targets.size(0):(batch_idx+1)*targets.size(0)]
real_classes = int(outputs.size(1)/nclassifier)
nstep = iteration+1
outputs_sum = torch.zeros(outputs.size(0), nb_cl).to(device)
##
start = nb_cl*nclassifier*batch_inds
for j in range(nclassifier):
end = start+nb_cl
outputs_sum += outputs[:, start:end]
start = end
outputs_sum = outputs_sum/nclassifier
outputs_sum = F.softmax(outputs_sum, dim=1)
_, predicted = outputs_sum.max(1)
correct += predicted.eq(targets).sum().item()
cnn_acc = 100. * correct / total
return cnn_acc
def compute_accuracy_Step1(tg_model, evalloader, start_class, end_class):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#tg_feature_model.eval()
correct1 = 0
correct2 = 0
correct3 = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
#targets = targets - start_class
outputs = tg_model(inputs, cls_fc=True)
# for i in range(args.num_cls):
outputs_1 = outputs[:, :20]
outputs_2 = outputs[:, 20:40]
#
outputs_1 = F.softmax(outputs_1, dim=1)
_, predicted1 = outputs_1.max(1)
correct1 += predicted1.eq(targets).sum().item()
#
outputs_2 = F.softmax(outputs_2, dim=1)
_, predicted2 = outputs_2.max(1)
correct2 += predicted2.eq(targets).sum().item()
# fusion
outputs_fusion = outputs[:, :20] + outputs[:, 20:40]
_, predicted3 = outputs_fusion.max(1)
correct3 += predicted3.eq(targets).sum().item()
cnn_acc_1 = 100. * correct1 / total
cnn_acc_2 = 100. * correct2 / total
cnn_acc_3 = 100. * correct3 / total
return cnn_acc_1, cnn_acc_2, cnn_acc_3
def compute_accuracy_Step2(tg_model, evalloader, start_class, end_class):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#tg_feature_model.eval()
correct1 = 0
correct2 = 0
correct3 = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
#targets = targets - start_class
outputs = tg_model(inputs, cls_fc=True)
#outputs = F.sigmoid(outputs)
# for i in range(args.num_cls):
old_outputs_1 = outputs[:, :20]
old_outputs_2 = outputs[:, 20:40]
old_outputs = (old_outputs_1 + old_outputs_2)/2
#
new_outputs_1 = outputs[:, 40:60]
new_outputs_2 = outputs[:, 60:80]
new_outputs = (new_outputs_1 + new_outputs_2) / 2
##
final_outputs = torch.cat((old_outputs_1, new_outputs_1), dim=1)
final_outputs = F.softmax(final_outputs, dim=1)
_, predicted1 = final_outputs.max(1)
correct1 += predicted1.eq(targets).sum().item()
final_outputs = torch.cat((old_outputs_2, new_outputs_2), dim=1)
final_outputs = F.softmax(final_outputs, dim=1)
_, predicted2 = final_outputs.max(1)
correct2 += predicted2.eq(targets).sum().item()
final_outputs = torch.cat((old_outputs, new_outputs), dim=1)
final_outputs = F.softmax(final_outputs, dim=1)
_, predicted3 = final_outputs.max(1)
correct3 += predicted3.eq(targets).sum().item()
cnn_acc_1 = 100. * correct1 / total
cnn_acc_2 = 100. * correct2 / total
cnn_acc_3 = 100. * correct3 / total
return cnn_acc_1, cnn_acc_2, cnn_acc_3
def compute_accuracy_AIG_Cls(tg_model, cls_model, evalloader, start_class, end_class):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#tg_feature_model.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
#targets = targets - start_class
feats = tg_model(inputs, cls_fc=False)
outputs = cls_model(feats)
#outputs = outputs[:, start_class: end_class]
outputs = F.softmax(outputs, dim=1)
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
cnn_acc = 100.*correct/total
return cnn_acc
def compute_accuracy_AIG_Semantic(tg_model, policy_model, evalloader, start_class, end_class):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#tg_feature_model.eval()
correct = 0
correct_gates = 0
total = 0
temp = 1
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
total += targets.size(0)
targets = targets - start_class
inputs = inputs.to(device)
targets = targets.to(device)
gates, gates_cls = policy_model(inputs, temperature=temp)
outputs = tg_model(inputs, gates)
#outputs_sub = outputs[:, start_class: end_class]
outputs = F.softmax(outputs, dim=1)
gates_cls = F.softmax(gates_cls, dim=1)
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
_, predicted_gates = gates_cls.max(1)
correct_gates += predicted_gates.eq(targets).sum().item()
cnn_acc = 100.*correct/total
cnn_acc_gates = 100. * correct_gates / total
return cnn_acc, cnn_acc_gates
def compute_accuracy_AIG_Semantic_Cls(tg_model, cls_model, policy_model, evalloader, start_class, end_class):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#tg_feature_model.eval()
correct = 0
correct_gates = 0
total = 0
temp = 1
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
total += targets.size(0)
targets = targets - start_class
inputs = inputs.to(device)
targets = targets.to(device)
gates, gates_cls = policy_model(inputs, temperature=temp)
feats = tg_model(inputs, gates, cls_fc=False)
outputs = cls_model(feats)
#outputs_sub = outputs[:, start_class: end_class]
outputs = F.softmax(outputs, dim=1)
gates_cls = F.softmax(gates_cls, dim=1)
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
_, predicted_gates = gates_cls.max(1)
correct_gates += predicted_gates.eq(targets).sum().item()
cnn_acc = 100.*correct/total
cnn_acc_gates = 100. * correct_gates / total
return cnn_acc, cnn_acc_gates
def compute_accuracy_Policy_Step1(tg_model, cls_model, evalloader, start_class, end_class):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#tg_feature_model.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
targets = targets - start_class
feats = tg_model(inputs, gates=None, cls_fc=False)
outputs = cls_model(feats)
outputs = F.softmax(outputs, dim=1)
_, predicted = outputs[:, start_class:end_class].max(1)
correct += predicted.eq(targets).sum().item()
cnn_acc = 100.*correct/total
return cnn_acc
def compute_accuracy_Policy_Step1_Gated(tg_model, policy_model, cls_model, evalloader, start_class, end_class):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#tg_feature_model.eval()
correct = 0
total = 0
temp = 1
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
targets = targets - start_class
new_gates = policy_model(inputs, temperature=temp)
feats = tg_model(inputs, gates=new_gates, cls_fc=False)
outputs = cls_model(feats)
outputs = F.softmax(outputs, dim=1)
_, predicted = outputs[:, start_class:end_class].max(1)
correct += predicted.eq(targets).sum().item()
cnn_acc = 100.*correct/total
return cnn_acc
def compute_accuracy_Policy_Step2(tg_model, old_cls_model, new_cls_model, evalloader, start_class, end_class):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
targets = targets - start_class
feats = tg_model(inputs, gates=None, cls_fc=False)
old_logits = old_cls_model(feats)
new_logits = new_cls_model(feats)
logits = torch.cat((old_logits, new_logits), 1)
logits = logits[:, start_class:end_class]
logits = F.softmax(logits, dim=1)
_, predicted = logits.max(1)
correct += predicted.eq(targets).sum().item()
cnn_acc = 100.*correct/total
return cnn_acc
def compute_accuracy_Policy_Step2_Gated(tg_model, policy_model, old_cls_model, new_cls_model, evalloader, start_class, end_class):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
correct = 0
total = 0
temp = 1
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
targets = targets - start_class
gates = policy_model(inputs, temperature=temp)
feats = tg_model(inputs, gates=gates, cls_fc=False)
old_logits = old_cls_model(feats)
new_logits = new_cls_model(feats)
logits = torch.cat((old_logits, new_logits), 1)
logits = logits[:, start_class:end_class]
logits = F.softmax(logits, dim=1)
_, predicted = logits.max(1)
correct += predicted.eq(targets).sum().item()
cnn_acc = 100.*correct/total
return cnn_acc
def compute_accuracy_AIG_Original(tg_model, evalloader, start_class, end_class, gates):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#tg_feature_model.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
targets = targets - start_class
if gates == True:
outputs, _ = tg_model(inputs, temperature=1, openings=gates)
else:
outputs = tg_model(inputs, temperature=1, openings=gates)
outputs = F.softmax(outputs, dim=1)
_, predicted = outputs[:, start_class:end_class].max(1)
correct += predicted.eq(targets).sum().item()
#print(" top 1 accuracy CNN :\t\t{:.2f} %".format(100.*correct/total))
cnn_acc = 100.*correct/total
return cnn_acc
def compute_accuracy_AIG_2(common_model, specific_model, cls_model, evalloader, start_class, end_class):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#tg_feature_model.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
targets = targets - start_class
feats = common_model(inputs, side=False)
feats = specific_model(feats)
logits = cls_model(feats)
outputs = F.softmax(logits, dim=1)
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
#print(" top 1 accuracy CNN :\t\t{:.2f} %".format(100.*correct/total))
cnn_acc = 100.*correct/total
return cnn_acc
def compute_accuracy_AIG_Step2(common_model, task1_specific_model, task2_specific_model, task1_cls_model, task2_cls_model, evalloader, start_class, end_class):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#tg_feature_model.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
targets = targets - start_class
feats = common_model(inputs, side=False)
task1_feats = task1_specific_model(feats)
task1_logits = task1_cls_model(task1_feats)
task2_feats = task2_specific_model(feats)
task2_logits = task2_cls_model(task2_feats)
logits = torch.cat((task1_logits, task2_logits), 1)
outputs = logits[:, start_class:end_class]
outputs = F.softmax(outputs, dim=1)
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
#print(" top 1 accuracy CNN :\t\t{:.2f} %".format(100.*correct/total))
cnn_acc = 100.*correct/total
return cnn_acc
def compute_accuracy_without_FC(tg_model, evalloader, fc_cls, pool_classifers):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
tg_model.eval()
fc_cls.eval()
if len(pool_classifers)>0:
for old_cls in pool_classifers:
old_cls.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(evalloader):
inputs, targets = inputs.to(device), targets.to(device)
total += targets.size(0)
outputs = tg_model(inputs)
probs = fc_cls(outputs)
if len(pool_classifers)>0:
for old_cls in reversed(pool_classifers):
old_probs = old_cls(outputs)
probs = torch.cat((old_probs, probs), 1)
probs = F.softmax(probs, dim=1)
#probs = F.sigmoid(probs)
_, predicted = probs.max(1)
correct += predicted.eq(targets).sum().item()
print(" top 1 accuracy CNN :\t\t{:.2f} %".format(100.*correct/total))
cnn_acc = 100.*correct/total
return cnn_acc
|
{"hexsha": "283b22edd98537c21a1d18544d4b1a22ce2c20a2", "size": 49089, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/compute_accuracy.py", "max_stars_repo_name": "xmengxin/MFGR", "max_stars_repo_head_hexsha": "ba807d0f52c0eb00d330eaa9bcef56c1343d2588", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-09-02T08:43:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T17:13:43.000Z", "max_issues_repo_path": "utils/compute_accuracy.py", "max_issues_repo_name": "xmengxin/MFGR", "max_issues_repo_head_hexsha": "ba807d0f52c0eb00d330eaa9bcef56c1343d2588", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/compute_accuracy.py", "max_forks_repo_name": "xmengxin/MFGR", "max_forks_repo_head_hexsha": "ba807d0f52c0eb00d330eaa9bcef56c1343d2588", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-19T06:26:18.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T06:26:18.000Z", "avg_line_length": 46.7069457659, "max_line_length": 185, "alphanum_fraction": 0.6389822567, "include": true, "reason": "from scipy", "num_tokens": 12033}
|
r"""
Module defining Pyclaw geometry objects.
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import warnings
import six
from six.moves import range
from six.moves import zip
deprec_message = "'edges' has been deprecated; please use 'nodes' instead."
# ============================================================================
# Default function definitions
# ============================================================================
# Default mapc2p functions
def identity_map_1d(x):
return x,
def identity_map_2d(x,y):
return x,y
def identity_map_3d(x,y,z):
return x,y,z
identity_map={'1': identity_map_1d,
'2': identity_map_2d,
'3': identity_map_3d}
class Grid(object):
r"""
Representation of a single grid.
:Dimension information:
Each dimension has an associated name with it that can be accessed via
that name such as ``grid.x.num_cells`` which would access the x dimension's
number of cells.
:Properties:
If the requested property has multiple values, a list will be returned
with the corresponding property belonging to the dimensions in order.
:Initialization:
Input:
- *dimensions* - (list of :class:`Dimension`) Dimensions that are to
be associated with this grid
Output:
- (:class:`grid`) Initialized grid object
A PyClaw grid is usually constructed from a tuple of PyClaw Dimension objects:
>>> from clawpack.pyclaw.geometry import Dimension, Grid
>>> x = Dimension(0.,1.,10,name='x')
>>> y = Dimension(-1.,1.,25,name='y')
>>> grid = Grid((x,y))
>>> print(grid)
2-dimensional domain (x,y)
No mapping
Extent: [0.0, 1.0] x [-1.0, 1.0]
Cells: 10 x 25
We can query various properties of the grid:
>>> grid.num_dim
2
>>> grid.num_cells
[10, 25]
>>> grid.lower
[0.0, -1.0]
>>> grid.delta # Returns [dx, dy]
[0.1, 0.08]
A grid can be extended to higher dimensions using the add_dimension() method:
>>> z=Dimension(-2.0,2.0,21,name='z')
>>> grid.add_dimension(z)
>>> grid.num_dim
3
>>> grid.num_cells
[10, 25, 21]
Coordinates
===========
We can get the x, y, and z-coordinate arrays of cell nodes and centers from the grid.
Properties beginning with 'c' refer to the computational (unmapped) domain, while
properties beginning with 'p' refer to the physical (mapped) domain. For grids with
no mapping, the two are identical. Also note the difference between 'center' and
'centers'.
>>> import numpy as np
>>> np.set_printoptions(precision=2) # avoid doctest issues with roundoff
>>> grid.c_center([1,2,3])
array([ 0.15, -0.8 , -1.33])
>>> grid.p_nodes[0][0,0,0]
0.0
>>> grid.p_nodes[1][0,0,0]
-1.0
>>> grid.p_nodes[2][0,0,0]
-2.0
It's also possible to get coordinates for ghost cell arrays:
>>> x = Dimension(0.,1.,5,name='x')
>>> grid1d = Grid([x])
>>> grid1d.c_centers
[array([0.1, 0.3, 0.5, 0.7, 0.9])]
>>> grid1d.c_centers_with_ghost(2)
[array([-0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3])]
Mappings
========
A grid mapping can be used to solve in a domain that is not rectangular,
or to adjust the local spacing of grid cells. For instance, we can
use smaller cells on the left and larger cells on the right by doing:
>>> double = lambda xarr : np.array([x**2 for x in xarr])
>>> grid1d.mapc2p = double
>>> grid1d.p_centers
array([0.01, 0.09, 0.25, 0.49, 0.81])
Note that the 'nodes' (or nodes) of the mapped grid are the mapped values
of the computational nodes. In general, they are not the midpoints between
mapped centers:
>>> grid1d.p_nodes
array([0. , 0.04, 0.16, 0.36, 0.64, 1. ])
"""
def __getattr__(self,key):
# Provide dimension attribute lists when requested from Grid object.
# Note that this only gets called when one requests an attribute
# that the grid itself doesn't possess.
if key in ['num_cells','lower','upper','delta','units','centers','nodes',
'on_lower_boundary','on_upper_boundary']:
return self.get_dim_attribute(key)
else:
raise AttributeError("'Grid' object has no attribute '"+key+"'")
# ========== Property Definitions ========================================
@property
def num_dim(self):
r"""(int) - Number of dimensions"""
return len(self._dimensions)
@property
def dimensions(self):
r"""(list) - List of :class:`Dimension` objects defining the
grid's extent and resolution"""
return [getattr(self,name) for name in self._dimensions]
@property
def c_centers(self):
r"""(list of ndarray(...)) - List containing the arrays locating
the computational locations of cell centers, see
:meth:`_compute_c_centers` for more info."""
self._compute_c_centers()
return self._c_centers
@property
def c_nodes(self):
r"""(list of ndarray(...)) - List containing the arrays locating
the computational locations of cell nodes, see
:meth:`_compute_c_nodes` for more info."""
self._compute_c_nodes()
return self._c_nodes
@property
def p_centers(self):
r"""(list of ndarray(...)) - List containing the arrays locating
the physical locations of cell centers, see
:meth:`_compute_p_centers` for more info."""
self._compute_p_centers()
return self._p_centers
@property
def p_nodes(self):
r"""(list of ndarray(...)) - List containing the arrays locating
the physical locations of cell nodes, see
:meth:`_compute_p_nodes` for more info."""
self._compute_p_nodes()
return self._p_nodes
@property
def mapc2p(self):
return self._mapc2p
@mapc2p.setter
def mapc2p(self,mapc2p):
self._mapc2p = mapc2p
self._clear_cached_values()
# ========== Class Methods ===============================================
def __init__(self,dimensions):
r"""
Instantiate a Grid object
See :class:`Grid` for more info.
"""
# ========== Attribute Definitions ===================================
r"""(func) - Coordinate mapping function"""
self.gauges = []
r"""(list) - List of gauges' indices to be filled by add_gauges
method.
"""
self.gauge_file_names = []
r"""(list) - List of file names to write gauge values to"""
self.gauge_files = []
r"""(list) - List of file objects to write gauge values to"""
self.gauge_dir_name = '_gauges'
r"""(string) - Name of the output directory for gauges. If the
`Controller` class is used to run the application, this directory by
default will be created under the `Controller` `outdir` directory.
"""
self._p_centers = None
self._p_nodes = None
self._c_centers = None
self._c_nodes = None
# Dimension parsing
if isinstance(dimensions,Dimension):
dimensions = [dimensions]
self._dimensions = []
for dim in dimensions:
self.add_dimension(dim)
super(Grid,self).__init__()
def _clear_cached_values(self):
self._p_centers = None
self._p_nodes = None
self._c_centers = None
self._c_nodes = None
# ========== Dimension Manipulation ======================================
def add_dimension(self,dimension):
r"""
Add the specified dimension to this patch
:Input:
- *dimension* - (:class:`Dimension`) Dimension to be added
"""
# Add dimension to name list and as an attribute
if dimension.name in self._dimensions:
raise Exception('Unable to add dimension. A dimension'+
' of the same name: {name}, already exists.'
.format(name=dimension.name))
self._dimensions.append(dimension.name)
setattr(self,dimension.name,dimension)
self._clear_cached_values()
# Reset mapping as it presumably makes no sense now
self.mapc2p = identity_map[str(self.num_dim)]
def get_dim_attribute(self,attr):
r"""
Returns a tuple of all dimensions' attribute attr
"""
return [getattr(dim,attr) for dim in self.dimensions]
def __copy__(self):
return self.__class__(self)
def __str__(self):
output = "%s-dimensional domain " % str(self.num_dim)
output += "("+",".join([dim.name for dim in self.dimensions])+")\n"
if self.mapc2p in list(identity_map.values()):
output += "No mapping\n"
output += "Extent: "
else:
output += "Mapping function: "+self.mapc2p.__name__+"\n"
output += "Computational domain: "
output += " x ".join(["[{:.2}, {:.2}]".format(dim.lower, dim.upper)
for dim in self.dimensions])
output += "\n"
output += "Cells: "
output += " x ".join(["{}".format(dim.num_cells) for dim in self.dimensions])
return output
# ========== Coordinates =============================================
def _compute_c_centers(self, recompute=False):
r"""Calculate the coordinates of the centers in the computational domain.
:Input:
- *recompute* - (bool) Whether to force a recompute of the arrays
"""
if recompute or (self._c_centers is None) or \
any([c is None for c in self.get_dim_attribute('_centers')]):
index = np.indices(self.num_cells)
self._c_centers = []
for i,center_array in enumerate(self.get_dim_attribute('centers')):
self._c_centers.append(center_array[index[i,...]])
def _compute_c_nodes(self, recompute=False):
r"""Calculate the coordinates of the nodes in the computational domain.
:Input:
- *recompute* - (bool) Whether to force a recompute of the arrays
"""
if recompute or (self._c_nodes is None) or \
any([c is None for c in self.get_dim_attribute('_nodes')]):
index = np.indices(n+1 for n in self.num_cells)
self._c_nodes = []
for i,edge_array in enumerate(self.get_dim_attribute('nodes')):
self._c_nodes.append(edge_array[index[i,...]])
def _compute_p_centers(self, recompute=False):
r"""Calculate the coordinates of the centers in the physical domain.
:Input:
- *recompute* - (bool) Whether to force a recompute of the arrays
"""
if recompute or (self._p_centers is None) or \
any([c is None for c in self.get_dim_attribute('_centers')]):
self._compute_c_centers(recompute=recompute)
self._p_centers = self.mapc2p(*self._c_centers)
def _compute_p_nodes(self, recompute=False):
r"""Calculate the coordinates of the nodes (corners) in the physical domain.
:Input:
- *recompute* - (bool) Whether to force a recompute of the arrays
"""
if recompute or (self._p_nodes is None) or \
any([c is None for c in self.get_dim_attribute('_nodes')]):
self._compute_c_nodes(recompute=recompute)
self._p_nodes = self.mapc2p(*self._c_nodes)
def c_center(self,ind):
r"""Compute center of computational cell with index ind."""
index = [np.array(i) for i in ind]
return np.array([self.c_centers[i][index] for i in range(self.num_dim)])
def p_center(self,ind):
r"""Compute center of physical cell with index ind."""
return self.mapc2p(*self.c_center(ind))
def c_centers_with_ghost(self, num_ghost):
r"""
Calculate the coordinates of the cell centers, including
ghost cells, in the computational domain.
:Input:
- *num_ghost* - (int) Number of ghost cell layers
"""
index = np.indices(n+2*num_ghost for n in self.num_cells)
centers = []
for i,dim in enumerate(self.dimensions):
center_array = dim.centers_with_ghost(num_ghost)
centers.append(center_array[index[i,...]])
return centers
def c_nodes_with_ghost(self, num_ghost):
r"""
Calculate the coordinates of the cell nodes (corners), including
ghost cells, in the computational domain.
:Input:
- *num_ghost* - (int) Number of ghost cell layers
"""
index = np.indices(n+2*num_ghost+1 for n in self.num_cells)
nodes = []
for i,dim in enumerate(self.dimensions):
edge_array = dim.nodes_with_ghost(num_ghost)
nodes.append(edge_array[index[i,...]])
return nodes
def p_centers_with_ghost(self,num_ghost):
return self.mapc2p(*self.c_centers_with_ghost(num_ghost))
def p_nodes_with_ghost(self,num_ghost):
return self.mapc2p(*self.c_nodes_with_ghost(num_ghost))
# ========================================================================
# Edges: deprecated; will be removed in 6.0
@property
def c_edges(self):
warnings.warn(deprec_message)
return self.c_nodes
@property
def p_edges(self):
warnings.warn(deprec_message)
return self.p_nodes
def p_edges_with_ghost(self,num_ghost):
warnings.warn(deprec_message)
return self.p_nodes_with_ghost(num_ghost)
def c_edges_with_ghost(self, num_ghost):
warnings.warn(deprec_message)
return self.c_nodes_with_ghost(num_ghost)
# ========================================================================
# ========================================================================
# Gauges
# ========================================================================
def add_gauges(self,gauge_coords):
r"""
Determine the cell indices of each gauge and make a list of all gauges
with their cell indices.
"""
for gauge in gauge_coords:
# Check if gauge belongs to this grid:
if all(self.lower[n]<=gauge[n]<self.upper[n] for n in range(self.num_dim)):
# Set indices relative to this grid
gauge_index = [int(round((gauge[n]-self.lower[n])/self.delta[n]))
for n in range(self.num_dim)]
gauge_file_name = 'gauge'+'_'.join(str(coord) for coord in gauge)+'.txt'
self.gauge_file_names.append(gauge_file_name)
self.gauges.append(gauge_index)
def setup_gauge_files(self,outdir):
r"""
Creates and opens file objects for gauges.
"""
import os
gauge_path = os.path.join(outdir,self.gauge_dir_name)
if not os.path.exists(gauge_path):
try:
os.makedirs(gauge_path)
except OSError:
print("gauge directory already exists, ignoring")
for gauge in self.gauge_file_names:
gauge_file = os.path.join(gauge_path,gauge)
if os.path.isfile(gauge_file):
os.remove(gauge_file)
self.gauge_files.append(open(gauge_file,'a'))
def plot(self,num_ghost=0,mapped=True,mark_nodes=False,mark_centers=False):
r"""Make a plot of the grid.
By default the plot uses the mapping
grid.mapc2p and does not show any ghost cells. This can be modified
via the arguments `mapped` and `num_ghost`.
Returns a handle to the plot axis object.
"""
import matplotlib.pyplot as plt
if self.num_dim == 2:
fig, ax = plt.subplots(1,1)
if num_ghost>0:
if mapped:
xe, ye = self.p_nodes_with_ghost(num_ghost)
else:
xe, ye = self.c_nodes_with_ghost(num_ghost)
p = ax.pcolormesh(xe,ye,0*xe,edgecolors='k',cmap='bwr',alpha=0.2)
p.set_clim(-1,1)
if mapped:
xe, ye = self.p_nodes
xc, yc = self.p_centers
else:
xe, ye = self.c_nodes
xc, yc = self.c_centers
p = ax.pcolormesh(xe,ye,0*xe,edgecolors='k',cmap='bwr')
p.set_clim(-1,1)
if mark_nodes:
ax.plot(xe,ye,'or')
if mark_centers:
ax.plot(xc,yc,'ob')
ax.axis('equal')
ax.set_xlabel(self.dimensions[0].name)
ax.set_ylabel(self.dimensions[1].name)
return ax
else:
raise Exception('Grid plotting implemented for 2D grids only.')
def _check_validity(self):
for dim in self.dimensions:
dim._check_validity()
assert type(self.num_cells) is int, 'Dimension.num_cells must be an integer'
assert type(self.lower) is float, 'Dimension.lower must be a float'
assert type(self.upper) is float, 'Dimension.upper must be a float'
assert self.num_cells>0, 'Dimension.num_cells must be positive'
assert self.upper > self.lower, 'Dimension.upper must be greater than lower'
# ============================================================================
# Dimension Object
# ============================================================================
class Dimension(object):
r"""
Basic class representing a dimension of a Patch object
:Initialization:
Required arguments, in order:
- *lower* - (float) Lower extent of dimension
- *upper* - (float) Upper extent of dimension
- *num_cells* - (int) Number of cells
Optional (keyword) arguments:
- *name* - (string) string Name of dimension
- *units* - (string) Type of units, used for informational purposes only
Output:
- (:class:`Dimension`) - Initialized Dimension object
Example:
>>> from clawpack.pyclaw.geometry import Dimension
>>> x = Dimension(0.,1.,100,name='x')
>>> print(x)
Dimension x: (num_cells,delta,[lower,upper]) = (100,0.01,[0.0,1.0])
>>> x.name
'x'
>>> x.num_cells
100
>>> x.delta
0.01
>>> x.nodes[0]
0.0
>>> x.nodes[1]
0.01
>>> x.nodes[-1]
1.0
>>> x.centers[-1]
0.995
>>> len(x.centers)
100
>>> len(x.nodes)
101
"""
@property
def delta(self):
r"""(float) - Size of an individual, computational cell"""
return (self.upper-self.lower) / float(self.num_cells)
# ========== Edges: deprecated; will be removed in 6.0 =======
@property
def edges(self):
warnings.warn(deprec_message)
return self.nodes
def edges_with_ghost(self,num_ghost):
warnings.warn(deprec_message)
return self.nodes_with_ghost(num_ghost)
# ========================================================================
# ========== Centers and nodes ========================================
@property
def nodes(self):
r"""(ndarrary(:)) - Location of all cell edge coordinates
for this dimension"""
if self._nodes is None:
self._nodes = np.empty(self.num_cells+1)
for i in range(0,self.num_cells+1):
self._nodes[i] = self.lower + i*self.delta
return self._nodes
@property
def centers(self):
r"""(ndarrary(:)) - Location of all cell center coordinates
for this dimension"""
if self._centers is None:
self._centers = np.empty(self.num_cells)
for i in range(0,self.num_cells):
self._centers[i] = self.lower + (i+0.5)*self.delta
return self._centers
@property
def lower(self):
return self._lower
@lower.setter
def lower(self,lower):
self._lower = float(lower)
self._centers = None # Reset cached arrays
self._nodes = None
self._check_validity()
@property
def upper(self):
return self._upper
@upper.setter
def upper(self,upper):
self._upper = float(upper)
self._centers = None # Reset cached arrays
self._nodes = None
self._check_validity()
@property
def num_cells(self):
return self._num_cells
@num_cells.setter
def num_cells(self,num_cells):
self._num_cells = int(num_cells)
self._centers = None # Reset cached arrays
self._nodes = None
self._check_validity()
def centers_with_ghost(self,num_ghost):
r"""(ndarrary(:)) - Location of all cell center coordinates
for this dimension, including centers of ghost cells."""
centers = self.centers
pre = self.lower+(np.arange(-num_ghost,0)+0.5)*self.delta
post = self.upper + self.delta * (np.arange(num_ghost) + 0.5)
return np.hstack((pre,centers,post))
def nodes_with_ghost(self,num_ghost):
r"""(ndarrary(:)) - Location of all edge coordinates
for this dimension, including nodes of ghost cells."""
nodes = self.nodes
pre = np.linspace(self.lower-num_ghost*self.delta,self.lower-self.delta,num_ghost)
post = np.linspace(self.upper+self.delta, self.upper+num_ghost*self.delta,num_ghost)
return np.hstack((pre,nodes,post))
def __init__(self, lower, upper, num_cells, name='x',
on_lower_boundary=None,on_upper_boundary=None, units=None):
r"""
Create a Dimension object.
See :class:`Dimension` for full documentation
"""
if isinstance(lower,six.string_types):
raise Exception('Passing dimension name as first argument is deprecated. \
Pass it as a keyword argument instead.')
self._nodes = None
self._centers = None
self._centers_with_ghost = None
self._nodes_with_ghost = None
self._lower = float(lower)
self._upper = float(upper)
self._num_cells = int(num_cells)
self.name = name
self.on_lower_boundary = on_lower_boundary
self.on_upper_boundary = on_upper_boundary
self.units = units
self._check_validity()
def _check_validity(self):
assert isinstance(self.num_cells,int), 'Dimension.num_cells must be an integer; got %s' % type(self.num_cells)
assert isinstance(self.lower,float), 'Dimension.lower must be a float'
assert isinstance(self.upper,float), 'Dimension.upper must be a float'
assert self.num_cells>0, 'Dimension.num_cells must be positive'
assert self.upper > self.lower, 'Dimension.upper must be greater than lower'
def __str__(self):
output = "Dimension %s" % self.name
if self.units:
output += " (%s)" % self.units
output += ": (num_cells,delta,[lower,upper]) = (%s,%s,[%s,%s])" \
% (self.num_cells,self.delta,self.lower,self.upper)
return output
def __len__(self):
return self.num_cells
# ============================================================================
# Pyclaw Patch object definition
# ============================================================================
class Patch(object):
"""
:Global Patch information:
Each patch has a value for :attr:`level` and :attr:`patch_index`.
"""
# Global properties
@property
def num_cells_global(self):
r"""(list) - List of the number of cells in each dimension"""
return self.get_dim_attribute('num_cells')
@property
def lower_global(self):
r"""(list) - Lower coordinate extents of each dimension"""
return self.get_dim_attribute('lower')
@property
def upper_global(self):
r"""(list) - Upper coordinate extends of each dimension"""
return self.get_dim_attribute('upper')
@property
def num_dim(self):
r"""(int) - Number of dimensions"""
return len(self._dimensions)
@property
def dimensions(self):
r"""(list) - List of :class:`Dimension` objects defining the
grid's extent and resolution"""
return [getattr(self,name) for name in self._dimensions]
@property
def delta(self):
r"""(list) - List of computational cell widths"""
return self.get_dim_attribute('delta')
@property
def name(self):
r"""(list) - List of names of each dimension"""
return self._dimensions
def __init__(self,dimensions):
self.level = 1
r"""(int) - AMR level this patch belongs to, ``default = 1``"""
self.patch_index = 1
r"""(int) - Patch number of current patch, ``default = 0``"""
if isinstance(dimensions,Dimension):
dimensions = [dimensions]
self._dimensions = []
for dim in dimensions:
dim.on_lower_boundary = True
dim.on_upper_boundary = True
self.add_dimension(dim)
self.grid = Grid(dimensions)
super(Patch,self).__init__()
def add_dimension(self,dimension):
r"""
Add the specified dimension to this patch
:Input:
- *dimension* - (:class:`Dimension`) Dimension to be added
"""
# Add dimension to name list and as an attribute
if dimension.name in self._dimensions:
raise Exception('Unable to add dimension. A dimension'+
' of the same name: {name}, already exists.'
.format(name=dimension.name))
self._dimensions.append(dimension.name)
setattr(self,dimension.name,dimension)
def get_dim_attribute(self,attr):
r"""
Returns a tuple of all dimensions' attribute attr
"""
return [getattr(getattr(self,name),attr) for name in self._dimensions]
def __deepcopy__(self,memo={}):
import copy
result = self.__class__(copy.deepcopy(self.dimensions))
result.__init__(copy.deepcopy(self.dimensions))
result.grid.mapc2p = self.grid.mapc2p
for attr in ('level','patch_index'):
setattr(result,attr,copy.deepcopy(getattr(self,attr)))
return result
def __str__(self):
output = "Patch %s:\n" % self.patch_index
output += '\n'.join((str(getattr(self,dim)) for dim in self._dimensions))
return output
# ============================================================================
# Pyclaw Domain object definition
# ============================================================================
class Domain(object):
r"""
A Domain is a list of Patches.
A Domain may be initialized in the following ways:
1. Using 3 arguments, which are in order
- A list of the lower boundaries in each dimension
- A list of the upper boundaries in each dimension
- A list of the number of cells to be used in each dimension
2. Using a single argument, which is
- A list of dimensions; or
- A list of patches.
:Examples:
>>> from clawpack import pyclaw
>>> domain = pyclaw.Domain( (0.,0.), (1.,1.), (100,100))
>>> print(domain.num_dim)
2
>>> print(domain.grid.num_cells)
[100, 100]
"""
@property
def num_dim(self):
r"""(int) - :attr:`Patch.num_dim` of base patch"""
return self._get_base_patch_attribute('num_dim')
@property
def patch(self):
r"""(:class:`Patch`) - First patch is returned"""
return self.patches[0]
@property
def grid(self):
r"""(list) - :attr:`Patch.grid` of base patch"""
return self._get_base_patch_attribute('grid')
def __init__(self,*arg):
if len(arg)>1:
lower = arg[0]
upper = arg[1]
n = arg[2]
dims = []
names = ['x','y','z']
names = names[:len(n)+1]
for low,up,nn,name in zip(lower,upper,n,names):
dims.append(Dimension(low,up,nn,name=name))
self.patches = [Patch(dims)]
else:
geom = arg[0]
if not isinstance(geom,list) and not isinstance(geom,tuple):
geom = [geom]
if isinstance(geom[0],Patch):
self.patches = geom
elif isinstance(geom[0],Dimension):
self.patches = [Patch(geom)]
def _get_base_patch_attribute(self, name):
r"""
Return base patch attribute name
:Output:
- (id) - Value of attribute from ``self.patches[0]``
"""
return getattr(self.patches[0],name)
def __deepcopy__(self,memo={}):
import copy
result = self.__class__(copy.deepcopy(self.patches))
result.__init__(copy.deepcopy(self.patches))
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
|
{"hexsha": "d2c12e020c3cbffbe52ef5a2a6737b90293c766c", "size": 29397, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pyclaw/geometry.py", "max_stars_repo_name": "BrisaDavis/pyclaw", "max_stars_repo_head_hexsha": "439e5b3f8f1d3892578368c17c4ad584fda706c2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/pyclaw/geometry.py", "max_issues_repo_name": "BrisaDavis/pyclaw", "max_issues_repo_head_hexsha": "439e5b3f8f1d3892578368c17c4ad584fda706c2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/pyclaw/geometry.py", "max_forks_repo_name": "BrisaDavis/pyclaw", "max_forks_repo_head_hexsha": "439e5b3f8f1d3892578368c17c4ad584fda706c2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5895883777, "max_line_length": 118, "alphanum_fraction": 0.5683913324, "include": true, "reason": "import numpy", "num_tokens": 6709}
|
#ifndef BOOST_MPL_AUX_MSVC_ETI_BASE_HPP_INCLUDED
#define BOOST_MPL_AUX_MSVC_ETI_BASE_HPP_INCLUDED
// Copyright Aleksey Gurtovoy 2001-2004
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/mpl for documentation.
// $Id: msvc_eti_base.hpp,v 1.2 2009/02/16 01:51:05 wdong-pku Exp $
// $Date: 2009/02/16 01:51:05 $
// $Revision: 1.2 $
#include <boost/mpl/aux_/is_msvc_eti_arg.hpp>
#include <boost/mpl/aux_/config/eti.hpp>
#include <boost/mpl/aux_/config/gcc.hpp>
#include <boost/mpl/aux_/config/workaround.hpp>
namespace boost { namespace mpl { namespace aux {
#if defined(BOOST_MPL_CFG_MSVC_70_ETI_BUG)
template< bool > struct msvc_eti_base_impl
{
template< typename T > struct result_
: T
{
typedef T type;
};
};
template<> struct msvc_eti_base_impl<true>
{
template< typename T > struct result_
{
typedef result_ type;
typedef result_ first;
typedef result_ second;
typedef result_ tag;
enum { value = 0 };
};
};
template< typename T > struct msvc_eti_base
: msvc_eti_base_impl< is_msvc_eti_arg<T>::value >
::template result_<T>
{
};
#else // !BOOST_MPL_CFG_MSVC_70_ETI_BUG
template< typename T > struct msvc_eti_base
: T
{
#if BOOST_WORKAROUND(BOOST_MPL_CFG_GCC, BOOST_TESTED_AT(0x0304))
msvc_eti_base();
#endif
typedef T type;
};
#endif
template<> struct msvc_eti_base<int>
{
typedef msvc_eti_base type;
typedef msvc_eti_base first;
typedef msvc_eti_base second;
typedef msvc_eti_base tag;
enum { value = 0 };
};
}}}
#endif // BOOST_MPL_AUX_MSVC_ETI_BASE_HPP_INCLUDED
|
{"hexsha": "4fbf226090e4518546a97c8a6309253c44196e1c", "size": 1756, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "lshkit/trunk/3rd-party/boost/boost/mpl/aux_/msvc_eti_base.hpp", "max_stars_repo_name": "wzj1695224/BinClone", "max_stars_repo_head_hexsha": "3b6dedb9a1f08be6dbcdce8f3278351ef5530ed8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 21.0, "max_stars_repo_stars_event_min_datetime": "2015-05-22T09:22:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-06T18:54:07.000Z", "max_issues_repo_path": "lshkit/trunk/3rd-party/boost/boost/mpl/aux_/msvc_eti_base.hpp", "max_issues_repo_name": "mrfarhadi/BinClone", "max_issues_repo_head_hexsha": "035c20ab27ec00935c12ce54fe9c52bba4aaeff2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2020-05-21T08:43:19.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-21T08:43:19.000Z", "max_forks_repo_path": "lshkit/trunk/3rd-party/boost/boost/mpl/aux_/msvc_eti_base.hpp", "max_forks_repo_name": "mrfarhadi/BinClone", "max_forks_repo_head_hexsha": "035c20ab27ec00935c12ce54fe9c52bba4aaeff2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 11.0, "max_forks_repo_forks_event_min_datetime": "2015-09-08T20:56:14.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-22T12:52:45.000Z", "avg_line_length": 22.5128205128, "max_line_length": 67, "alphanum_fraction": 0.7027334852, "num_tokens": 521}
|
# 导入包
import zipfile
import paddle
import paddle.fluid as fluid
import matplotlib.pyplot as plt
import matplotlib.image as mping
from PIL import Image
import json
import numpy as np
import cv2
import sys
import time
import h5py
# import scipy.io as io
from matplotlib import pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import scipy
from matplotlib import cm as CM
from paddle.utils.plot import Ploter
start = time.time()
#把图片对应的标签装入字典
f = open('data/data1917/train.json',encoding='utf-8')
content = json.load(f)
print(content.keys())
print('info:',content['info'])
print('stage:',content['stage'])
print('split:',content['split'])
print(content['annotations'][0].keys())
print(content['annotations'][0]['type'])
print(content['annotations'][0][ 'id'])
print(content['annotations'][0]['ignore_region'])
print(content['annotations'][0]['name'])
print(content['annotations'][0]['num'])
#把stage1都去掉:
for j in range(len(content['annotations'])):
content['annotations'][j]['name'] = content['annotations'][j]['name'].lstrip('stage1').lstrip('/')
print(content['annotations'][1]['name'])
#读取解压文件里的信息
zfile = zipfile.ZipFile("data/train_new.zip")
l = [] # l中存储了train中所有的图片路径
for fname in zfile.namelist()[1:]:
# print(fname)
l.append(fname)
print(l[3])
name = l[3]
im = Image.open(name)
plt.imshow(im)
#查看标注的信息
for j in range(len(content['annotations'])):
if content['annotations'][j]['name'] == name:
print('id = ',content['annotations'][j]['id']) #图片id
ann = content['annotations'][j]['annotation']
print(ann) #图片标注格式是x,y,w,h,有些只有x,y
print('有标注的个数:',len(ann))
#可视化第三个标注的信息
lab = 1
box = (ann[lab]['x'],ann[lab]['y'],ann[lab]['x']+ann[lab]['w'],ann[lab]['y']+ann[lab]['h'])
new_img = im.crop(box=box)
plt.imshow(new_img)
#可视化图片所有标注信息
width = im.size[0] #获取宽度
height = im.size[1] #获取长度
print(width,height)
for a in range(len(ann)): #遍历所有标注
for x in range(width):
for y in range(height):
# r,g,b = im.getpixel((x,y))
if(x > (ann[a]['x']-5) and x < (ann[a]['x']+5) and y > ann[a]['y'] and y < (ann[a]['y']+ann[a]['h'])):
im.putpixel((x,y),(255,0,0)) #画一条长(x,y)到(x,y+h)的红线,红线宽为正负5个像素点
if(x > (ann[a]['x']+ann[a]['w']-5) and x < (ann[a]['x']+ann[a]['w']+5) and y > ann[a]['y'] and y < (ann[a]['y']+ann[a]['h'])):
im.putpixel((x,y),(255,0,0)) #画一条长(x+w,y)到(x+w,y+h)的红线,红线宽为正负5个像素点
if(y > (ann[a]['y']-5) and y < (ann[a]['y']+5) and x > ann[a]['x'] and x < (ann[a]['x']+ann[a]['w'])):
im.putpixel((x,y),(255,0,0)) #画一条长(x,y)到(x+w,y)的红线,红线宽为正负5个像素点
if(y > (ann[a]['y']+ann[a]['h']-5) and y < (ann[a]['y']+ann[a]['h']+5) and x > ann[a]['x'] and x < (ann[a]['x']+ann[a]['w'])):
im.putpixel((x,y),(255,0,0)) #画一条长(x,y+h)到(x+w,y+h)的红线,红线宽为正负5个像素点
plt.imshow(im)
# 根据图片的大小,对图片的来源进行分类
l_set = []
s_2560_1920 = [] #方框 鱼眼电梯 63张
s_928_576 = [] #点 自动售货机 248张
s_1024_768 = [] #点 街拍 302
s_640_480 = [] #点 家拍 92
s_2048_2048 =[] #方框 鱼眼电梯 41
s_1080_1618 =[] #滤掉 1
s_1920_1080 = [] #方框 超市 1240
s_1440_1080 =[] #滤掉 1
s_1920_1200 =[] #方框 街拍 12
for inde in range(2000):
imm = Image.open(content['annotations'][inde]['name'])
l_set.append(imm.size)
if imm.size == (2560, 1920):s_2560_1920.append(content['annotations'][inde]['name'])
elif imm.size == (928, 576):s_928_576.append(content['annotations'][inde]['name'])
elif imm.size == (1024, 768):s_1024_768.append(content['annotations'][inde]['name'])
elif imm.size == (640, 480):s_640_480.append(content['annotations'][inde]['name'])
elif imm.size == (2048, 2048):s_2048_2048.append(content['annotations'][inde]['name'])
elif imm.size == (1080, 1618):s_1080_1618.append(content['annotations'][inde]['name'])
elif imm.size == (1920, 1080):s_1920_1080.append(content['annotations'][inde]['name'])
elif imm.size == (1440, 1080):s_1440_1080.append(content['annotations'][inde]['name'])
elif imm.size == (1920, 1200):s_1920_1200.append(content['annotations'][inde]['name'])
print(len(l_set))
sett = set(l_set)
print(sett)
print(len(s_2560_1920),len(s_928_576),len(s_1024_768),len(s_640_480),len(s_2048_2048),len(s_1080_1618),len(s_1920_1080),len(s_1440_1080),len(s_1920_1200))
print(s_1440_1080)
print(s_1080_1618)
# print(s_1024_768)
# 统计出所有的,以点为图中每个人标注的样本
point_l = []
for f in range(2000):
if 'w' not in content['annotations'][f]['annotation'][0]:
point_l.append(content['annotations'][f]['name'])
# for p_name in point_l:
# print(p_name)
print(len(point_l))
#如果标注是一个坐标不是区域, 展示其中一幅图像上 是如何使用一个点来标注人的
# name1 = 'train/b179764112252559b76a59db9fa18021.jpg'
name1 = point_l[1]
im1 = Image.open(name1)
for j in range(len(content['annotations'])):
if content['annotations'][j]['name'] == name1:
print('id = ',content['annotations'][j]['id'])
ann1 = content['annotations'][j]['annotation']
# print(ann1)
print('有标注的个数:',len(ann1))
for a in range(len(ann1)):
for x in range(im1.size[0]):
for y in range(im1.size[1]):
if(x > (ann1[a]['x']-10) and x < (ann1[a]['x']+10) and y > ann1[a]['y']-10 and y < (ann1[a]['y']+10)): #取坐标范围正负10的像素
im1.putpixel((x,y),(255,0,0)) #对所取范围的像素变成红色
plt.imshow(im1)
# 上段代码块中的标注的gt
gt = []
for a in range(len(ann1)):
gt.append([ann1[a]['x'],ann1[a]['y']])
print(gt)
gt = np.array(gt)
print(gt.shape)
# 使用高斯滤波变换生成密度图
def gaussian_filter_density(gt):
# Generates a density map using Gaussian filter transformation
# 初始化密度图
density = np.zeros(gt.shape, dtype=np.float32)
# 获取gt中不为0的元素的个数
gt_count = np.count_nonzero(gt)
# 如果gt全为0,就返回全0的密度图
if gt_count == 0:
return density
# FInd out the K nearest neighbours using a KDTree
pts = np.array(list(zip(np.nonzero(gt)[1].ravel(), np.nonzero(gt)[0].ravel())))
# if gt_count > 0 and gt_count < 20:
# leafsize = 2048
# # build kdtree
# tree = scipy.spatial.KDTree(pts.copy(), leafsize=leafsize)
# query kdtree
# distances, locations = tree.query(pts, k=4)
for i, pt in enumerate(pts):
pt2d = np.zeros(gt.shape, dtype=np.float32)
pt2d[pt[1], pt[0]] = 1.
if gt_count > 1:
# sigma = (distances[i][1]+distances[i][2]+distances[i][3])*0.1
sigma = 25
else:
sigma = np.average(np.array(gt.shape)) / 2. / 2. # case: 1 point
# Convolve with the gaussian filter
density += scipy.ndimage.filters.gaussian_filter(pt2d, sigma, mode='constant')
return density
print(gt.shape)
img = plt.imread(name1)
k = np.zeros((img.shape[0], img.shape[1]))
for i in range(0, len(gt)):
if int(gt[i][1]) < img.shape[0] and int(gt[i][0]) < img.shape[1]:
k[int(gt[i][1]), int(gt[i][0])] = 1
# generate density map
k = gaussian_filter_density(k)
# 可视化 密度图
print(k.shape)
groundtruth = np.asarray(k)
# groundtruth = groundtruth.resize((80,60))
print(groundtruth.shape)
plt.imshow(groundtruth,cmap=CM.jet)
print("Sum = " ,np.sum(groundtruth))
# print(groundtruth[0][59:100])
#图片操作
def picture_opt(img,ann):
size_x,size_y = img.size
train_img_size = (640,480)
img = img.resize(train_img_size,Image.ANTIALIAS)
img = np.array(img)
img = img / 255.0
gt = []
for b_l in range(len(ann)):
# 假设人体是使用方框标注的,通过求均值的方法将框变为点
if 'w' in ann[b_l].keys():
x = (ann[b_l]['x']+(ann[b_l]['x']+ann[b_l]['w']))/2
y = ann[b_l]['y']+20
x = (x*640/size_x)/8
y = (y*480/size_y)/8
gt.append((x,y))
else:
x = ann[b_l]['x']
y = ann[b_l]['y']
x = (x*640/size_x)/8
y = (y*480/size_y)/8
gt.append((x,y))
# 返回resize后的图片 和 gt
return img,gt
# 密度图处理
def ground(img, gt):
imgs = img
x = imgs.shape[0] / 8
y = imgs.shape[1] / 8
k = np.zeros((int(x), int(y)))
for i in range(0, len(gt)):
if int(gt[i][1]) < int(x) and int(gt[i][0]) < int(y):
k[int(gt[i][1]), int(gt[i][0])] = 1
# generate density map
k = gaussian_filter_density(k)
return k
#方框变点
qt = []
img = Image.open(content['annotations'][2]['name'])
ann = content['annotations'][2]['annotation']
print(img.size)
temp = img.resize((80, 60),Image.ANTIALIAS)
im,qt = picture_opt(img,ann)
print(im.shape)
print(qt)
for a in range(len(qt)):
for x in range(temp.size[0]):
for y in range(temp.size[1]):
if(x > (qt[a][0]-1) and x < (qt[a][0]+1) and y > qt[a][1]-1 and y < (qt[a][1]+1)): #取坐标范围正负10的像素
temp.putpixel((x,y),(255,0,0)) #对所取范围的像素变成红色
plt.imshow(temp)
k = ground(im,qt)
# 定义数据生成器
def train_set():
def inner():
for ig_index in range(2000): # 遍历所有图片
if len(content['annotations'][ig_index]['annotation']) == 2: continue
if len(content['annotations'][ig_index]['annotation']) == 3: continue
if content['annotations'][ig_index]['name'] == 'train/8538edb45aaf7df78336aa5b49001be6.jpg': continue
if content['annotations'][ig_index]['name'] == 'train/377df0a7a9abc44e840e938521df3b54.jpg': continue
if content['annotations'][ig_index]['ignore_region']: # 把忽略区域都用像素为0填上
ig_list = [] # 存放忽略区1的数据
ig_list1 = [] # 存放忽略区2的数据
# print(content['annotations'][ig_index]['ignore_region'])
if len(content['annotations'][ig_index]['ignore_region']) == 1: # 因为每张图的忽略区域最多2个,这里是为1的情况
# print('ig1',ig_index)
ign_rge = content['annotations'][ig_index]['ignore_region'][0] # 取第一个忽略区的数据
for ig_len in range(len(ign_rge)): # 遍历忽略区坐标个数,组成多少变型
ig_list.append([ign_rge[ig_len]['x'], ign_rge[ig_len]['y']]) # 取出每个坐标的x,y然后组成一个小列表放到ig_list
ig_cv_img = cv2.imread(content['annotations'][ig_index]['name']) # 用cv2读取一张图片
pts = np.array(ig_list, np.int32) # 把ig_list转成numpy.ndarray数据格式,为了填充需要
cv2.fillPoly(ig_cv_img, [pts], (0, 0, 0), cv2.LINE_AA) # 使用cv2.fillPoly方法对有忽略区的图片用像素为0填充
ig_img = Image.fromarray(cv2.cvtColor(ig_cv_img, cv2.COLOR_BGR2RGB)) # cv2转PIL
ann = content['annotations'][ig_index]['annotation'] # 把所有标注的信息读取出来
ig_im, gt = picture_opt(ig_img, ann)
k = ground(ig_im, gt)
groundtruth = np.asarray(k)
groundtruth = groundtruth.T.astype('float32')
ig_im = ig_im.transpose().astype('float32')
yield ig_im, groundtruth
if len(content['annotations'][ig_index]['ignore_region']) == 2: # 有2个忽略区域
# print('ig2',ig_index)
ign_rge = content['annotations'][ig_index]['ignore_region'][0]
ign_rge1 = content['annotations'][ig_index]['ignore_region'][1]
for ig_len in range(len(ign_rge)):
ig_list.append([ign_rge[ig_len]['x'], ign_rge[ig_len]['y']])
for ig_len1 in range(len(ign_rge1)):
ig_list1.append([ign_rge1[ig_len1]['x'], ign_rge1[ig_len1]['y']])
ig_cv_img2 = cv2.imread(content['annotations'][ig_index]['name'])
pts = np.array(ig_list, np.int32)
pts1 = np.array(ig_list1, np.int32)
cv2.fillPoly(ig_cv_img2, [pts], (0, 0, 0), cv2.LINE_AA)
cv2.fillPoly(ig_cv_img2, [pts1], (0, 0, 0), cv2.LINE_AA)
ig_img2 = Image.fromarray(cv2.cvtColor(ig_cv_img2, cv2.COLOR_BGR2RGB)) # cv2转PIL
ann = content['annotations'][ig_index]['annotation'] # 把所有标注的信息读取出来
ig_im, gt = picture_opt(ig_img2, ann)
k = ground(ig_im, gt)
k = np.zeros((int(ig_im.shape[0] / 8), int(ig_im.shape[1] / 8)))
groundtruth = np.asarray(k)
groundtruth = groundtruth.T.astype('float32')
ig_im = ig_im.transpose().astype('float32')
yield ig_im, groundtruth
else:
# print('else',ig_index,content['annotations'][ig_index]['name'])
img = Image.open(content['annotations'][ig_index]['name'])
ann = content['annotations'][ig_index]['annotation'] # 把所有标注的信息读取出来
im, gt = picture_opt(img, ann)
k = ground(im, gt)
groundtruth = np.asarray(k)
groundtruth = groundtruth.T.astype('float32')
im = im.transpose().astype('float32')
yield im, groundtruth
return inner
BATCH_SIZE= 2 #每次取10张
# 设置训练reader
train_reader = paddle.batch(
paddle.reader.shuffle(
train_set(), buf_size=5),
batch_size=BATCH_SIZE)
def crowd_deconv_without_bn(img):
x = img
x = fluid.layers.conv2d(input=x, num_filters=64, filter_size=3, padding=1, act='relu')
x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.conv2d(input=x, num_filters=64, filter_size=3, padding=1, act='relu')
print('3-64-2', x.shape)
x = fluid.layers.pool2d(input=x, pool_size=2, pool_stride=2)
x = fluid.layers.dropout(x=x, dropout_prob=0.25)
print('pool', x.shape)
x = fluid.layers.conv2d(input=x, num_filters=128, filter_size=3, padding=1, act=None)
x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.conv2d(input=x, num_filters=128, filter_size=3, padding=1, act='relu')
print('3-128-2', x.shape)
x = fluid.layers.pool2d(input=x, pool_size=2, pool_stride=2)
x = fluid.layers.dropout(x=x, dropout_prob=0.25)
x = fluid.layers.conv2d(input=x, num_filters=256, filter_size=3, padding=1, act='relu')
x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.conv2d(input=x, num_filters=256, filter_size=3, padding=1, act=None)
x = fluid.layers.batch_norm(input=x, act='relu')
x = fluid.layers.conv2d(input=x, num_filters=256, filter_size=3, padding=1, act='relu')
print('3-256-3', x.shape)
x = fluid.layers.pool2d(input=x, pool_size=2, pool_stride=2)
x = fluid.layers.dropout(x=x, dropout_prob=0.5)
# x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=1, act='relu')
# x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=1, act='relu')
# x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=1,act='relu' )
# x = fluid.layers.pool2d(input=x, pool_size=3, pool_stride=1, pool_padding=1)
# x = fluid.layers.pool2d(input=x, pool_size=2, pool_stride=2)
# x = fluid.layers.dropout(x=x, dropout_prob=0.5)
x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=1, act='relu')
x = fluid.layers.dropout(x=x, dropout_prob=0.5)
x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=1, act='relu')
x = fluid.layers.dropout(x=x, dropout_prob=0.5)
x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=1)
x = fluid.layers.batch_norm(input=x, act=None)
print('3-512-3', x.shape)
# x = fluid.layers.pool2d(input=x, pool_size=3, pool_stride=2, pool_padding=1)
# x = fluid.layers.dropout(x=x, dropout_prob=0.5)
print('clowd_net output shape:', x.shape)
return x
def dilations_cnn(VGG_16_net):
x = VGG_16_net
print(x.shape)
x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=2, dilation=2, act='relu')
x = fluid.layers.dropout(x=x, dropout_prob=0.5)
x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=2, dilation=2, act='relu')
x = fluid.layers.dropout(x=x, dropout_prob=0.5)
x = fluid.layers.conv2d(input=x, num_filters=512, filter_size=3, padding=2, dilation=2, act='relu')
x = fluid.layers.dropout(x=x, dropout_prob=0.5)
x = fluid.layers.conv2d(input=x, num_filters=256, filter_size=3, padding=2, dilation=2, act='relu')
x = fluid.layers.dropout(x=x, dropout_prob=0.5)
x = fluid.layers.conv2d(input=x, num_filters=128, filter_size=3, padding=2, dilation=2, act='relu')
x = fluid.layers.dropout(x=x, dropout_prob=0.5)
x = fluid.layers.conv2d(input=x, num_filters=64, filter_size=3, padding=2, dilation=2, act='relu')
x = fluid.layers.conv2d(input=x, num_filters=1, filter_size=1, act=None)
print(x.shape)
return x
img_size = [3,640,480]
images = fluid.layers.data(name='images',shape=img_size,dtype='float32')
label = fluid.layers.data(name='label',shape=[1,80,60],dtype='float32')
VGG = crowd_deconv_without_bn(images)
predict = dilations_cnn(VGG)
squar = fluid.layers.square_error_cost(input=predict, label=label)
cost = fluid.layers.sqrt(squar, name=None)
print(cost.shape)
avg_cost = fluid.layers.mean(cost)
print(avg_cost.shape)
# 创建优化器optimizer,下面列举了2种常用的优化器,不同类型优化器选一即可
# 创建Momentum优化器,并设置学习率(learning_rate)、动量(momentum)
# optimizer = fluid.optimizer.Momentum(
# learning_rate=0.001,
# momentum=0.8)
optimizer = fluid.optimizer.AdamOptimizer(learning_rate=1e-6)
# optimizer = fluid.optimizer.SGD(learning_rate=1e-5)
optimizer.minimize(avg_cost)
print('优化')
startup_program = fluid.default_startup_program()
main_program = fluid.default_main_program()
# test_program = fluid.default_main_program().clone(for_test=True)
#optimized = fluid.transpiler.memory_optimize(input_program=fluid.default_main_program(), print_log=False)
# 设置训练场所
use_cuda = False
# use_cuda = True
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
# 创建执行器,palce在程序初始化时设定
exe = fluid.Executor(place)
# 初始化执行器
exe.run(startup_program)
feeder = fluid.DataFeeder(feed_list=[images, label],place=place)
#训练保存
model_save_dir = 'renliuyuce_model6'
train_prompt = "Train cost"
cost_ploter = Ploter(train_prompt)
def event_handler_plot(ploter_title, step, cost):
cost_ploter.append(ploter_title, step, cost)
cost_ploter.plot()
# 只训练1个EPOCH,仅仅是跑通流程
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
EPOCH_NUM = 1
# 开始训练
lists = []
step = 0
for epochs in range(EPOCH_NUM):
# 开始训练
for batch_id, train_data in enumerate(train_reader()): # 遍历train_reader的迭代器,并为数据加上索引batch_id
train_cost, sult, lab, vgg = exe.run(program=main_program, # 运行主程序
feed=feeder.feed(train_data), # 喂入一个batch的数据
fetch_list=[avg_cost, predict, label, VGG]) # fetch均方误差和准确率
if step % 10 == 0:
event_handler_plot(train_prompt, step, train_cost[0])
# print(batch_id)
if batch_id % 100 == 0: # 每100次batch打印一次训练、进行一次测试
p = [np.sum(pre) for pre in sult]
l = [np.sum(pre) for pre in lab]
print(p, l, np.sum(sult), np.sum(lab))
print('Pass:%d, Batch:%d, Cost:%0.5f' % (epochs, batch_id, train_cost[0]))
step += 1
# 保存模型
if model_save_dir is not None:
fluid.io.save_inference_model(model_save_dir, ['images'], [predict], exe)
print('训练模型保存完成!')
end = time.time()
print(time.strftime('V100训练用时:%M分%S秒', time.localtime(end - start)))
# 测试图片
import numpy as np
from PIL import Image
import paddle.fluid as fluid
import matplotlib.pyplot as plt
import zipfile
test_zfile = zipfile.ZipFile("data/test_new.zip")
l_test = []
for test_fname in test_zfile.namelist()[1:]:
l_test.append(test_fname)
test_img = Image.open(l_test[0])
plt.imshow(test_img)
test_img = test_img.resize((640, 480))
test_im = np.array(test_img)
test_im = test_im / 255.0
test_im = test_im.transpose().reshape(1, 3, 640, 480).astype('float32')
use = True
place1 = fluid.CUDAPlace(0) if use else fluid.CPUPlace()
# 定义一个executor
infer_exe = fluid.Executor(place1)
inference_scope = fluid.core.Scope() # 要想运行一个网络,需要指明它运行所在的域,确切的说: exe.Run(&scope) 。
model_save_dir = 'renliuyuce_model6'
with fluid.scope_guard(inference_scope):
# 获取训练好的模型
# 从指定目录中加载 推理model(inference model)
[inference_program, # 预测用的program
feed_target_names, # 是一个str列表,它包含需要在推理 Program 中提供数据的变量的名称。
fetch_targets] = fluid.io.load_inference_model(model_save_dir, # fetch_targets:是一个 Variable 列表,从中我们可以得到推断结果。
infer_exe) # infer_exe: 运行 inference model的 executor
results = infer_exe.run(inference_program, # 运行预测程序
feed={feed_target_names[0]: test_im}, # 喂入要预测的img
fetch_list=fetch_targets) # 得到推测结果
result = results[0][0][0]
print(result)
plt.imshow(result, cmap=CM.jet)
print(np.sum(results[0]))
# 测试输出保存CSV,仅测试了100个样本,输出结果每行代表一个样本,分布为标号 样本名称 人流密度
import numpy as np
from PIL import Image
import paddle.fluid as fluid
import matplotlib.pyplot as plt
import zipfile
test_zfile = zipfile.ZipFile("data/data1917/test_new.zip")
l_test = []
for test_fname in test_zfile.namelist()[1:]:
# print(fname)
l_test.append(test_fname)
use = True
place1 = fluid.CUDAPlace(0) if use else fluid.CPUPlace()
infer_exe = fluid.Executor(place1)
inference_scope = fluid.core.Scope()
model_save_dir = 'renliuyuce_model6'
data_dict = {}
with fluid.scope_guard(inference_scope):
[inference_program,
feed_target_names,
fetch_targets] = fluid.io.load_inference_model(model_save_dir, infer_exe)
for index in range(100):
test_img = Image.open(l_test[index])
test_img = test_img.resize((640, 480))
test_im = np.array(test_img)
test_im = test_im / 255.0
test_im = test_im.transpose().reshape(1, 3, 640, 480).astype('float32')
l_test[index] = l_test[index].lstrip('test').lstrip('/')
results = infer_exe.run(inference_program, # 运行预测程序
feed={feed_target_names[0]: test_im}, # 喂入要预测的img
fetch_list=fetch_targets) # 得到推测结果
# print(people)
people = np.sum(results)
print(index, l_test[index], int(people))
data_dict[l_test[index]] = int(people)
import csv
with open('results7.csv', 'w') as csvfile:
fieldnames = ['id', 'predicted']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for k, v in data_dict.items():
writer.writerow({'id': k, 'predicted': v})
|
{"hexsha": "9965c3ec361436e9af0e16d7f8b897c3185be918", "size": 21130, "ext": "py", "lang": "Python", "max_stars_repo_path": "crowd_density_detection.py", "max_stars_repo_name": "ArseneLupinhb/crowd_density_detection", "max_stars_repo_head_hexsha": "a4f4e955319926a57dcfc0b446f6d40b0449df30", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-04-06T13:17:48.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-07T01:34:38.000Z", "max_issues_repo_path": "crowd_density_detection.py", "max_issues_repo_name": "ArseneLupinhb/crowd_density_detection", "max_issues_repo_head_hexsha": "a4f4e955319926a57dcfc0b446f6d40b0449df30", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "crowd_density_detection.py", "max_forks_repo_name": "ArseneLupinhb/crowd_density_detection", "max_forks_repo_head_hexsha": "a4f4e955319926a57dcfc0b446f6d40b0449df30", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6080246914, "max_line_length": 154, "alphanum_fraction": 0.6647420729, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 7279}
|
from collections import defaultdict
import numpy as np
from datetime import datetime
from graph import Graph
from graph import FloatVec
from graph import LongVec
from graph import LongPair
from graph import PairVec
from graph import constrainedGreedyAdditiveEdgeContraction
import progressbar
import math
def constant_length(list_seq, timesteps, num_fea, start=False):
""" Pads sequence with zeros.
Inputs:
list_seq -- List of feature vectors.
timesteps -- Constant number of timesteps to be enforced.
num_fea -- Number of features in each feature vector.
start -- If true, uses beginning of sequence for sequences longer
than desired length, otherwise uses the end of sequence.
"""
seq = np.array(list_seq)
if seq.size == 0:
return np.zeros((timesteps, num_fea))
vsize, hsize = seq.shape
assert hsize == num_fea
if vsize == timesteps:
return seq
elif vsize > timesteps:
if start:
return seq[:timesteps, :]
else:
return seq[-timesteps:, :]
else:
return np.vstack((np.zeros((timesteps - vsize, num_fea)), seq))
def renumber_track_ids(track_ids):
uids, index = np.unique(track_ids, return_index=True)
id_map = {}
for new_id, uid in enumerate(uids[np.argsort(index)]):
id_map[uid] = new_id
new_ids = [id_map[t] for t in track_ids]
return new_ids
def _make_tracklets(detections, track_ids):
tracklets = defaultdict(list)
num_tracklets = np.max(track_ids) + 1
assert(len(detections) == len(track_ids))
for d, tid in zip(detections, track_ids):
tracklets[tid].append(d)
# make sure to sort by frame
for tid in tracklets:
tracklets[tid].sort(key=lambda x:x['frame'])
return list(tracklets.values())
def _find_edge_pairs(tracklets, max_frame_diff):
pairs = []
start_frames = np.array([int(t[0]['frame']) for t in tracklets])
stop_frames = np.array([int(t[-1]['frame']) for t in tracklets])
lengths = np.array([len(t) for t in tracklets])
length_based_max_diffs = np.clip(2 * lengths, 0, max_frame_diff)
for tid1, start in enumerate(start_frames):
diffs = start - stop_frames
max_diffs = np.clip(length_based_max_diffs, 0, length_based_max_diffs[tid1])
# TODO: Make this parameterized from strategy
tid0 = np.argwhere(np.logical_and(diffs > 0, diffs <= max_frame_diff)) #diffs <= max_diffs))
pairs += [(t[0], tid1) for t in tid0]
return pairs
def _find_constraints(tracklets):
constraints = []
frame_ranges = np.array([
[int(t[0]['frame']), int(t[-1]['frame'])] for t in tracklets
])
for tid0, (start0, stop0) in enumerate(frame_ranges):
in_range = np.logical_and(frame_ranges >= start0, frame_ranges <= stop0)
in_range = np.logical_or(in_range[:, 0], in_range[:, 1])
tid1 = np.argwhere(in_range)
constraints += [(tid0, t[0]) for t in tid1 if t[0] != tid0]
return constraints
def _tracklets_to_ids(tracklets, track_ids):
detections = []
det_ids = []
assert(len(tracklets) == len(track_ids))
for t, tid in zip(tracklets, track_ids):
detections += t
det_ids += [tid for _ in range(len(t))]
det_ids = renumber_track_ids(det_ids)
return (detections, det_ids)
def join_tracklets(
detections,
track_ids,
max_frame_diff,
weight_strategy
):
print(f"{datetime.now()}: Renumbering track IDs...")
new_ids = renumber_track_ids(track_ids)
print(f"{datetime.now()}: Creating tracklets...")
tracklets = _make_tracklets(detections, new_ids)
print(f"{datetime.now()}: Finding edges...")
pairs = _find_edge_pairs(tracklets, max_frame_diff)
print(f"{datetime.now()}: Finding constraints...")
constraints = _find_constraints(tracklets)
print(f"{datetime.now()}: Computing edge weights...")
weights = weight_strategy.compute(tracklets,
pairs)
print(f"{datetime.now()}: Constructing graph...")
graph = Graph()
graph.insertVertices(len(set(new_ids)))
for p0, p1 in pairs:
graph.insertEdge(int(p0), int(p1))
weights_vec = FloatVec()
for w in weights:
weights_vec.append(w)
constraints_vec = PairVec()
for c0, c1 in constraints:
constraints_vec.append(LongPair(int(c0), int(c1)))
arg = LongVec()
print(f"{datetime.now()}: Solving graph...")
constrainedGreedyAdditiveEdgeContraction(graph, weights_vec, constraints_vec, arg)
print(f"{datetime.now()}: Aggregating edge cut status...")
is_cut = [arg[int(p0)] != arg[int(p1)] for p0, p1 in pairs]
print(f"{datetime.now()}: Converting back to detection list...")
new_dets, new_ids = _tracklets_to_ids(tracklets, arg)
print(f"{datetime.now()}: Iteration complete!")
return (new_dets, new_ids, pairs, weights, is_cut, constraints)
|
{"hexsha": "c314ff96234ee596398413560ee7b440f5f60bcc", "size": 4941, "ext": "py", "lang": "Python", "max_stars_repo_path": "deploy_python/openem/tracking/graph_utils.py", "max_stars_repo_name": "openem-team/openem", "max_stars_repo_head_hexsha": "45222c9c77084eacab278da25a8734ae7d43f677", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2019-01-23T23:58:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-30T19:42:35.000Z", "max_issues_repo_path": "deploy_python/openem/tracking/graph_utils.py", "max_issues_repo_name": "openem-team/openem", "max_issues_repo_head_hexsha": "45222c9c77084eacab278da25a8734ae7d43f677", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-03-20T15:21:41.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-18T18:49:38.000Z", "max_forks_repo_path": "deploy_python/openem/tracking/graph_utils.py", "max_forks_repo_name": "openem-team/openem", "max_forks_repo_head_hexsha": "45222c9c77084eacab278da25a8734ae7d43f677", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-05-08T17:39:12.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-09T01:27:17.000Z", "avg_line_length": 38.3023255814, "max_line_length": 100, "alphanum_fraction": 0.6593806922, "include": true, "reason": "import numpy", "num_tokens": 1238}
|
import numpy as np
import keras
from keras import backend as K
from keras.layers.core import Dense
from keras.optimizers import Adam
from keras.metrics import categorical_crossentropy
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras.models import Model
from keras.applications import imagenet_utils
from sklearn.metrics import confusion_matrix
from PIL import Image
mobile = keras.applications.mobilenet.MobileNet()
def prepare_image(file):
img_path = 'images/'
img = image.load_img(img_path+file, target_size=(224, 224))
img_array = image.img_to_array(img)
img_array_expanded_dims = np.expand_dims(img_array, axis=0)
return keras.applications.mobilenet.preprocess_input(img_array_expanded_dims)
|
{"hexsha": "8867d713464dc5a67cd98650434be2f7e58d5600", "size": 781, "ext": "py", "lang": "Python", "max_stars_repo_path": "server/model.py", "max_stars_repo_name": "codergab/Vusion", "max_stars_repo_head_hexsha": "44ed404112943531e645db5e4cb03fe44d08a2ef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "server/model.py", "max_issues_repo_name": "codergab/Vusion", "max_issues_repo_head_hexsha": "44ed404112943531e645db5e4cb03fe44d08a2ef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "server/model.py", "max_forks_repo_name": "codergab/Vusion", "max_forks_repo_head_hexsha": "44ed404112943531e645db5e4cb03fe44d08a2ef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5416666667, "max_line_length": 81, "alphanum_fraction": 0.8130601793, "include": true, "reason": "import numpy", "num_tokens": 165}
|
using LazyArrays, ArrayLayouts, LinearAlgebra, FillArrays
import LazyArrays: materialize!, MemoryLayout, triangulardata, LazyLayout, UnknownLayout, LazyMatrix
# used to test general matrix backends
struct MyMatrix{T} <: LazyMatrix{T}
A::Matrix{T}
end
MyMatrix{T}(::UndefInitializer, n::Int, m::Int) where T = MyMatrix{T}(Array{T}(undef, n, m))
MyMatrix(A::AbstractMatrix{T}) where T = MyMatrix{T}(Matrix{T}(A))
Base.convert(::Type{MyMatrix{T}}, A::MyMatrix{T}) where T = A
Base.convert(::Type{MyMatrix{T}}, A::MyMatrix) where T = MyMatrix(convert(AbstractArray{T}, A.A))
Base.convert(::Type{MyMatrix}, A::MyMatrix)= A
Base.convert(::Type{AbstractArray{T}}, A::MyMatrix) where T = MyMatrix(convert(AbstractArray{T}, A.A))
Base.convert(::Type{AbstractMatrix{T}}, A::MyMatrix) where T = MyMatrix(convert(AbstractArray{T}, A.A))
Base.convert(::Type{MyMatrix{T}}, A::AbstractArray{T}) where T = MyMatrix{T}(A)
Base.convert(::Type{MyMatrix{T}}, A::AbstractArray) where T = MyMatrix{T}(convert(AbstractArray{T}, A))
Base.convert(::Type{MyMatrix}, A::AbstractArray{T}) where T = MyMatrix{T}(A)
Base.getindex(A::MyMatrix, kj...) = A.A[kj...]
Base.getindex(A::MyMatrix, ::Colon, j::Integer) = A.A[:,j]
Base.getindex(A::MyMatrix, ::Colon, j::AbstractVector) = MyMatrix(A.A[:,j])
Base.setindex!(A::MyMatrix, v, kj...) = setindex!(A.A, v, kj...)
Base.size(A::MyMatrix) = size(A.A)
Base.similar(::Type{MyMatrix{T}}, m::Int, n::Int) where T = MyMatrix{T}(undef, m, n)
Base.similar(::MyMatrix{T}, m::Int, n::Int) where T = MyMatrix{T}(undef, m, n)
Base.similar(::MyMatrix, ::Type{T}, m::Int, n::Int) where T = MyMatrix{T}(undef, m, n)
LinearAlgebra.factorize(A::MyMatrix) = factorize(A.A)
struct MyLazyArray{T,N} <: LazyArray{T,N}
data::Array{T,N}
end
Base.size(A::MyLazyArray) = size(A.data)
Base.getindex(A::MyLazyArray, j::Int...) = A.data[j...]
LazyArrays.MemoryLayout(::Type{<:MyLazyArray}) = LazyLayout()
LinearAlgebra.factorize(A::MyLazyArray) = factorize(A.data)
@testset "lazymul/ldiv tests" begin
@testset "*" begin
A = randn(5,5)
B = randn(5,5)
x = randn(5)
@test MyMatrix(A)*x ≈ apply(*,MyMatrix(A),x) ≈ A*x
@test MemoryLayout(MyMatrix(A)) isa LazyLayout
@test all(MyMatrix(A)*MyMatrix(A) .=== apply(*,MyMatrix(A),MyMatrix(A)))
@test all(MyMatrix(A)*A .=== apply(*,MyMatrix(A),A))
@test all(A*MyMatrix(A) .=== apply(*,A,MyMatrix(A)))
@test MyMatrix(A)*MyMatrix(A) ≈ MyMatrix(A)*A ≈ A*MyMatrix(A) ≈ A^2
@test MyMatrix(A)*MyMatrix(A)*MyMatrix(A) ≈ apply(*,MyMatrix(A),MyMatrix(A),MyMatrix(A)) ≈ A^3
@test all(UpperTriangular(A) * MyMatrix(A) .=== apply(*,UpperTriangular(A), MyMatrix(A)))
@test all(MyMatrix(A) * UpperTriangular(A) .=== apply(*, MyMatrix(A),UpperTriangular(A)))
@test all(Diagonal(A) * MyMatrix(A) .=== apply(*,Diagonal(A), MyMatrix(A)))
@test all(MyMatrix(A) * Diagonal(A) .=== apply(*, MyMatrix(A),Diagonal(A)))
@test all(MyMatrix(A)' * x .=== apply(*,MyMatrix(A)',x))
@test all(MyMatrix(A)' * MyMatrix(A)' .=== apply(*,MyMatrix(A)', MyMatrix(A)'))
@test all(MyMatrix(A)' * A' .=== apply(*,MyMatrix(A)', A'))
@test all(A' * MyMatrix(A)' .=== apply(*,MyMatrix(A)', MyMatrix(A)'))
@test all(MyMatrix(A)' * MyMatrix(A) .=== apply(*,MyMatrix(A)', MyMatrix(A)))
@test all(MyMatrix(A)' * A .=== apply(*,MyMatrix(A)', A))
@test all(MyMatrix(A) * MyMatrix(A)' .=== apply(*,MyMatrix(A), MyMatrix(A)'))
@test all(A * MyMatrix(A)' .=== apply(*,A, MyMatrix(A)'))
@test all(UpperTriangular(A) * MyMatrix(A) .=== apply(*,UpperTriangular(A), MyMatrix(A)))
@test all(MyMatrix(A) * UpperTriangular(A) .=== apply(*, MyMatrix(A),UpperTriangular(A)))
@test all(Diagonal(A) * MyMatrix(A)' .=== apply(*,Diagonal(A), MyMatrix(A)'))
@test all(MyMatrix(A)' * Diagonal(A) .=== apply(*,MyMatrix(A)',Diagonal(A)))
@test all(UpperTriangular(A) * MyMatrix(A)' .=== apply(*,UpperTriangular(A), MyMatrix(A)'))
@test all(MyMatrix(A)' * UpperTriangular(A) .=== apply(*,MyMatrix(A)',UpperTriangular(A)))
@test ApplyArray(\, MyMatrix(A), x)[1,1] ≈ (A\x)[1]
@test MyMatrix(A)\x ≈ apply(\,MyMatrix(A),x) ≈ copyto!(similar(x),Ldiv(A,copy(x))) ≈ A\x
@test eltype(applied(\,MyMatrix(A),x)) == eltype(apply(\,MyMatrix(A),x)) == eltype(MyMatrix(A)\x) == Float64
@test MyMatrix(A)\MyMatrix(B) ≈ MyMatrix(A)\B ≈ apply(\,MyMatrix(A),B) ≈ copyto!(similar(B),Ldiv(A,copy(B))) ≈ A\B
@test eltype(applied(\,MyMatrix(A),B)) == eltype(apply(\,MyMatrix(A),B)) == eltype(MyMatrix(A)\B) == Float64
@test MyMatrix(A) * ApplyArray(exp,B) ≈ apply(*, MyMatrix(A),ApplyArray(exp,B)) ≈ A*exp(B)
@test ApplyArray(exp,A) * MyMatrix(B) ≈ apply(*, ApplyArray(exp,A), MyMatrix(B)) ≈ exp(A)*B
@test ApplyArray(exp,A) * ApplyArray(exp,B) ≈ apply(*, ApplyArray(exp,A),ApplyArray(exp,B)) ≈ exp(A)*exp(B)
@test MyMatrix(A) * BroadcastArray(exp,B) ≈ apply(*, MyMatrix(A),BroadcastArray(exp,B)) ≈ A*exp.(B)
@test BroadcastArray(exp,A) * MyMatrix(B) ≈ apply(*, BroadcastArray(exp,A), MyMatrix(B)) ≈ exp.(A)*B
@test BroadcastArray(exp,A) * BroadcastArray(exp,B) ≈ apply(*, BroadcastArray(exp,A),BroadcastArray(exp,B)) ≈ exp.(A)*exp.(B)
end
@testset "\\" begin
A = randn(5,5)
B = randn(5,5)
x = randn(5)
@test MyMatrix(A) \ x == apply(\, MyMatrix(A), x)
@test ldiv!(MyMatrix(A), copy(x)) == materialize!(Ldiv(MyMatrix(A), copy(x)))
@test MyMatrix(A) \ x ≈ ldiv!(MyMatrix(A), copy(x)) ≈ A\x
@test MyMatrix(A) \ B == apply(\, MyMatrix(A), B)
@test ldiv!(MyMatrix(A), copy(B)) == materialize!(Ldiv(MyMatrix(A), copy(B)))
@test MyMatrix(A) \ B ≈ MyMatrix(A) \ MyMatrix(B) ≈ ldiv!(MyMatrix(A), copy(B)) ≈ A\B
@test_broken ldiv!(MyMatrix(A), MyMatrix(copy(B))) ≈ A\B
C = randn(5,3)
@test all(MyMatrix(C)\x .=== apply(\,MyMatrix(C),x))
@test MyMatrix(C)\x ≈ C\x
@test all(MyMatrix(C)\B .=== apply(\,MyMatrix(C),B))
@test MyMatrix(C)\B ≈ C\B
@test_throws DimensionMismatch apply(\,MyMatrix(C),randn(4))
@test_throws DimensionMismatch apply(\,MyMatrix(C),randn(4,3))
end
@testset "Lazy" begin
A = MyLazyArray(randn(2,2))
B = MyLazyArray(randn(2,2))
x = MyLazyArray(randn(2))
@test apply(*,A,x) isa ApplyVector
@test apply(*,A,Array(x)) isa ApplyVector
@test apply(*,Array(A),x) isa ApplyVector
@test apply(*,A,x) ≈ apply(*,Array(A),x) ≈ apply(*,A,Array(x)) ≈ Array(A)*Array(x)
@test apply(*,A,B) isa ApplyMatrix
@test apply(*,A,Array(B)) isa ApplyMatrix
@test apply(*,Array(A),B) isa ApplyMatrix
@test apply(*,A,B) ≈ apply(*,Array(A),B) ≈ apply(*,A,Array(B)) ≈ Array(A)*Array(B)
@test apply(\,A,x) isa ApplyVector
@test apply(\,A,Array(x)) isa ApplyVector
@test apply(\,Array(A),x) isa ApplyVector
@test apply(\,A,x) ≈ apply(\,Array(A),x) ≈ apply(\,A,Array(x)) ≈ Array(A)\Array(x)
@test apply(\,A,B) isa ApplyMatrix
@test apply(\,A,Array(B)) isa ApplyMatrix
@test apply(\,Array(A),B) isa ApplyMatrix
@test apply(\,A,B) ≈ apply(\,Array(A),B) ≈ apply(\,A,Array(B)) ≈ Array(A)\Array(B)
Ap = applied(*,A,x)
@test copyto!(similar(Ap), Ap) == A*x
@test copyto!(similar(Ap,BigFloat), Ap) ≈ A*x
@test MemoryLayout(typeof(Diagonal(x))) isa DiagonalLayout{LazyLayout}
@test MemoryLayout(typeof(Diagonal(ApplyArray(+,x,x)))) isa DiagonalLayout{LazyLayout}
@test MemoryLayout(typeof(Diagonal(1:6))) isa DiagonalLayout{UnknownLayout}
@test MemoryLayout(typeof(A')) isa LazyLayout
@test MemoryLayout(typeof(transpose(A))) isa LazyLayout
@test MemoryLayout(typeof(view(A,1:2,1:2))) isa LazyLayout
@test MemoryLayout(typeof(reshape(A,4))) isa LazyLayout
end
@testset "QR" begin
B = MyMatrix(randn(3,3))
Q = qr(randn(3,3)).Q
@test Q * B ≈ Q*B.A
end
@testset "ambiguities" begin
A = randn(5,5)
b = MyLazyArray(randn(5))
c = randn(5)
c̃ = complex.(c)
@test A*b isa ApplyVector{Float64,typeof(*)}
@test UpperTriangular(A)*b isa ApplyVector{Float64,typeof(*)}
@test A*b ≈ A*Vector(b)
@test UpperTriangular(A)*b ≈ UpperTriangular(A)*Vector(b)
@test c'b ≈ c̃'b ≈ c'Vector(b)
@test transpose(c)b ≈ transpose(c̃)b ≈ transpose(c)Vector(b)
end
@testset "InvMatrix" begin
A = randn(5,5)
B = randn(5,5)
b = MyLazyArray(randn(5))
M = ApplyArray(*, B, b)
@test InvMatrix(A) * b ≈ A \ b
@test InvMatrix(A) * M ≈ A \ B * b
@test ArrayLayouts.ldiv(MyMatrix(A), M) ≈ A\ B * b
end
@testset "Tri/Diagonal" begin
b = MyLazyArray(randn(5))
c = MyLazyArray(randn(4))
d = MyLazyArray(randn(3))
@test copy(Diagonal(b)) == Diagonal(copy(b))
@test map(copy, Diagonal(b)) == Diagonal(copy(b))
@test inv(Diagonal(b)) == inv(Diagonal(b.data))
@test inv(Diagonal(b)) isa Diagonal{Float64,<:BroadcastVector}
@test copy(Tridiagonal(c, b, c)) == Tridiagonal(copy(c), copy(b), copy(c))
@test copy(Tridiagonal(c, b, c, d)) == Tridiagonal(copy(c), copy(b), copy(c), copy(d))
@test copy(Tridiagonal(c, b, c, d)).du2 == d
@test map(copy, Tridiagonal(c, b, c)) == Tridiagonal(copy(c), copy(b), copy(c))
@test map(copy, Tridiagonal(c, b, c, d)) == Tridiagonal(copy(c), copy(b), copy(c), copy(d))
@test map(copy, Tridiagonal(c, b, c, d)).du2 == d
@test MemoryLayout(Tridiagonal(c, b, c)) isa TridiagonalLayout{LazyLayout,LazyLayout,LazyLayout}
@test MemoryLayout(SymTridiagonal(b, c)) isa SymTridiagonalLayout{LazyLayout,LazyLayout}
@test MemoryLayout(Bidiagonal(b, c, :U)) isa BidiagonalLayout{LazyLayout,LazyLayout}
@test LazyArrays.tridiagonallayout(UnknownLayout(), UnknownLayout(), LazyLayout()) isa TridiagonalLayout{LazyLayout,LazyLayout,LazyLayout}
@test LazyArrays.tridiagonallayout(UnknownLayout(), LazyLayout(), UnknownLayout()) isa TridiagonalLayout{LazyLayout,LazyLayout,LazyLayout}
@test LazyArrays.tridiagonallayout(LazyLayout(), UnknownLayout(), UnknownLayout()) isa TridiagonalLayout{LazyLayout,LazyLayout,LazyLayout}
@test LazyArrays.tridiagonallayout(UnknownLayout(), LazyLayout(), LazyLayout()) isa TridiagonalLayout{LazyLayout,LazyLayout,LazyLayout}
@test LazyArrays.tridiagonallayout(LazyLayout(), UnknownLayout(), LazyLayout()) isa TridiagonalLayout{LazyLayout,LazyLayout,LazyLayout}
@test LazyArrays.tridiagonallayout(LazyLayout(), LazyLayout(), UnknownLayout()) isa TridiagonalLayout{LazyLayout,LazyLayout,LazyLayout}
@test LazyArrays.symtridiagonallayout(UnknownLayout(), LazyLayout()) isa SymTridiagonalLayout{LazyLayout,LazyLayout}
@test LazyArrays.symtridiagonallayout(LazyLayout(), UnknownLayout()) isa SymTridiagonalLayout{LazyLayout,LazyLayout}
@test LazyArrays.bidiagonallayout(UnknownLayout(), LazyLayout()) isa BidiagonalLayout{LazyLayout,LazyLayout}
@test LazyArrays.bidiagonallayout(LazyLayout(), UnknownLayout()) isa BidiagonalLayout{LazyLayout,LazyLayout}
end
@testset "Nested" begin
a = MyLazyArray(randn(5))
@test a .\ rand(5) .* Zeros(5) ≡ Zeros(5)
@test broadcast(*, Zeros(5), Base.broadcasted(\, a, rand(5))) ≡ Zeros(5)
end
@testset "inv" begin
A = randn(5,5)
B = randn(5,5)
M = ApplyArray(*, A, B)
b = randn(5)
@test M \ MyLazyArray(b) ≈ M \ b
end
@testset "Diagonal Fill" begin
b = randn(5)
B = randn(5,5)
@test Eye(5) * MyLazyArray(b) == b
@test MyLazyArray(B) * Eye(5) == B
end
@testset "LazyBroadcast" begin
a = MyLazyArray(randn(5))
b = a .^ 2
@test BroadcastArray(view(b,1:3)) == Vector(a)[1:3] .^2
end
@testset "Apply*Broadcast" begin
A = randn(5,5)
B = randn(5,5)
@test ApplyArray(*, A, B) * BroadcastArray(*, A, B) ≈ (A*B) * (A .* B)
@test BroadcastArray(*, A, B) * ApplyArray(*, A, B) ≈ (A .* B) * (A*B)
@test ApplyArray(*, A, B) \ BroadcastArray(*, A, B) ≈ (A*B) \ (A .* B)
@test BroadcastArray(*, A, B) \ ApplyArray(*, A, B) ≈ (A .* B) \ (A * B)
@test BroadcastArray(*, A, B) \ BroadcastArray(*, A, B) ≈ (A .* B) \ (A .* B)
end
@testset "CartesianIndex view" begin
A = randn(5,5)
B = randn(5,5)
M = ApplyArray(*, A, B)
@test layout_getindex(M,[CartesianIndex(1,2),CartesianIndex(3,3)]) == M[[CartesianIndex(1,2),CartesianIndex(3,3)]] == [M[1,2], M[3,3]]
end
end
|
{"hexsha": "577332913ad0e72ab8c772e55a3ecced602470c3", "size": 12905, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/lazymultests.jl", "max_stars_repo_name": "johnbcoughlin/LazyArrays.jl", "max_stars_repo_head_hexsha": "6fcc9b900aa147222b259037fd48d4698ad1ad54", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/lazymultests.jl", "max_issues_repo_name": "johnbcoughlin/LazyArrays.jl", "max_issues_repo_head_hexsha": "6fcc9b900aa147222b259037fd48d4698ad1ad54", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/lazymultests.jl", "max_forks_repo_name": "johnbcoughlin/LazyArrays.jl", "max_forks_repo_head_hexsha": "6fcc9b900aa147222b259037fd48d4698ad1ad54", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.6346153846, "max_line_length": 146, "alphanum_fraction": 0.6082913599, "num_tokens": 4157}
|
"""
The functions in this module calculate different graph-level properties.
The first function is a wrapper that
subsamples networks from a list of null models to output a dataframe of set sizes.
"""
__author__ = 'Lisa Rottjers'
__email__ = 'lisa.rottjers@kuleuven.be'
__status__ = 'Development'
__license__ = 'Apache 2.0'
import pandas as pd
import networkx as nx
from random import sample
import numpy as np
import os
def generate_graph_frame(networks, random, degree, fractions, core, perm):
"""
This function estimates graph-level properties of all networks provided in
the network, random and degree lists.
The random and degree lists are structured as follows:
---List corresponding to each original network (length networks)
---List of permutations per original network (length n in generate_null)
The core list is structured as follows:
---List of all shared fractions (length fractions)
---List corresponding to core prevalence(length core)
---List of permutations per original network (length networks)
The function returns a pandas dataframe with the size of the intersection,
the type of model and the shared fraction as a separate column.
The length of the dataset is equal to the number of original networks,
the number of permuted sets for the random models and the number of permuted sets
for the degree-preserving model.
:param networks: List of input networks
:param random: Dictionary with permuted input networks without preserved degree distribution
:param degree: Dictionary with permuted input networks with preserved degree distribution
:param fractions: List with fractions of shared interactions
:param core: List with prevalence of shared interactions
:param perm: Number of sets to take from null models
:return: List of lists with set sizes
"""
# Create empty pandas dataframe
results = pd.DataFrame(columns=['Network', 'Name', 'Group', 'Network type', 'Conserved fraction',
'Prevalence of conserved fraction',
'Property', 'Value'])
for x in networks:
group = os.path.basename(x)
results = _generate_graph_rows(name='Input', data=results, group=group,
networks=networks[x], fraction=None, prev=None, perm=None)
# construct the subsampled model sets nperm times
for i in range(perm):
degreeperm = [sample(degree[x]['degree'][r], 1)[0] for r in range(len(degree[x]['degree']))]
results = _generate_graph_rows(name='Degree', data=results, group=group,
networks=degreeperm, fraction=None, prev=None, perm=i)
randomperm = [sample(random[x]['random'][r], 1)[0] for r in range(len(random[x]['random']))]
results = _generate_graph_rows(name='Random', data=results, group=group,
networks=randomperm, fraction=None, prev=None, perm=i)
if fractions:
num_models = len(random[group]['core'][fractions[0]][core[0]])
for frac in fractions:
for c in core:
for i in range(num_models):
degreeperm = degree[x]['core'][frac][c][i]
randomperm = random[x]['core'][frac][c][i]
results = _generate_graph_rows(name='Degree', data=results, group=group,
networks=degreeperm, fraction=frac, prev=c, perm=None)
results = _generate_graph_rows(name='Random', data=results, group=group,
networks=randomperm, fraction=frac, prev=c, perm=None)
return results
def _generate_graph_rows(data, name, group, networks, fraction, prev, perm):
"""
Generates Pandas rows with network measures for a list of networks.
:param data: Pandas dataframe
:param name: Name for the list of NetworkX objects
:param group: Name for grouping NetworkX objects
:param networks: List of NetworkX objects
:param fraction: If a null model with core is provided, adds the core fraction to the row
:param prev: If a null model with core is provided, adds the core prevalence to the row
:param perm: iteration of graph subsampling, necessary for permutation testing
:return: Pandas dataframe with added rows
"""
full_name = name + ' networks'
if fraction:
name += ' size: ' + str(fraction) + ' prev:' + str(prev)
properties = generate_graph_properties(networks)
for property in properties:
for network in properties[property]:
data = data.append({'Network': name,
'Name': network[0],
'Group': group,
'Network type': full_name,
'Conserved fraction': fraction,
'Prevalence of conserved fraction': prev,
'Property': property,
'Value': network[1],
'iteration': perm},
ignore_index=True)
return data
def generate_graph_properties(networks):
"""
This function constructs lists with centrality rankings of nodes in multiple networks.
Instead of using the absolute degree or betweenness centrality, this takes metric bias into account.
If the graph is not connected, the values are calculated for the largest connected component.
:param networks: List of input networks
:return: Pandas dataframe with rankings
"""
properties = dict()
property_names = ['Assortativity', 'Connectivity', 'Diameter', 'Radius', 'Average shortest path length']
for property in property_names:
properties[property] = list()
for network in networks:
if len(network[1].nodes) > 0:
properties['Assortativity'].append((network[0],
nx.degree_pearson_correlation_coefficient(network[1])))
properties['Connectivity'].append((network[0],
nx.average_node_connectivity(network[1])))
if nx.is_connected(network[1]):
properties['Diameter'].append((network[0], nx.diameter(network[1])))
properties['Radius'].append((network[0], nx.radius(network[1])))
properties['Average shortest path length'].append((network[0],
nx.average_shortest_path_length(network[1])))
else:
components = list(nx.connected_components(network[1]))
sizes = []
for component in components:
sizes.append(len(component))
subnetwork = nx.subgraph(network[1], components[np.where(np.max(sizes) == sizes)[0][0]])
properties['Diameter'].append((network[0], nx.diameter(subnetwork)))
properties['Radius'].append((network[0], nx.radius(subnetwork)))
properties['Average shortest path length'].append((network[0],
nx.average_shortest_path_length(subnetwork)))
else:
properties['Assortativity'].append(None)
properties['Connectivity'].append(None)
properties['Diameter'].append(None)
properties['Radius'].append(None)
properties['Average shortest path length'].append(None)
return properties
|
{"hexsha": "da0638de422e26f8cf322702447473851bc7b08e", "size": 7733, "ext": "py", "lang": "Python", "max_stars_repo_path": "anuran/graphvals.py", "max_stars_repo_name": "ramellose/anuran", "max_stars_repo_head_hexsha": "8541f9cedbca00981257564fb8562d46fa5f5cab", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-03-05T16:11:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-03T17:56:57.000Z", "max_issues_repo_path": "anuran/graphvals.py", "max_issues_repo_name": "ramellose/anuran", "max_issues_repo_head_hexsha": "8541f9cedbca00981257564fb8562d46fa5f5cab", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-11-05T11:28:39.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-24T09:13:35.000Z", "max_forks_repo_path": "anuran/graphvals.py", "max_forks_repo_name": "ramellose/anuran", "max_forks_repo_head_hexsha": "8541f9cedbca00981257564fb8562d46fa5f5cab", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-03T07:50:44.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-03T07:50:44.000Z", "avg_line_length": 51.5533333333, "max_line_length": 112, "alphanum_fraction": 0.6080434501, "include": true, "reason": "import numpy,import networkx", "num_tokens": 1508}
|
// Copyright 2017 The Ray Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ray/core_worker/transport/thread_pool_manager.h"
#include <boost/asio/post.hpp>
namespace ray {
namespace core {
/// Wraps a thread-pool to block posts until the pool has free slots. This is used
/// by the SchedulingQueue to provide backpressure to clients.
BoundedExecutor::BoundedExecutor(int max_concurrency)
: num_running_(0), max_concurrency_(max_concurrency), pool_(max_concurrency){};
/// Posts work to the pool, blocking if no free threads are available.
void BoundedExecutor::PostBlocking(std::function<void()> fn) {
mu_.LockWhen(absl::Condition(this, &BoundedExecutor::ThreadsAvailable));
num_running_ += 1;
mu_.Unlock();
boost::asio::post(pool_, [this, fn]() {
fn();
absl::MutexLock lock(&mu_);
num_running_ -= 1;
});
}
/// Stop the thread pool.
void BoundedExecutor::Stop() { pool_.stop(); }
/// Join the thread pool.
void BoundedExecutor::Join() { pool_.join(); }
bool BoundedExecutor::ThreadsAvailable() { return num_running_ < max_concurrency_; }
PoolManager::PoolManager(const std::vector<ConcurrencyGroup> &concurrency_groups,
const int32_t default_group_max_concurrency) {
for (auto &group : concurrency_groups) {
const auto name = group.name;
const auto max_concurrency = group.max_concurrency;
auto pool = std::make_shared<BoundedExecutor>(max_concurrency);
auto &fds = group.function_descriptors;
for (auto fd : fds) {
functions_to_thread_pool_index_[fd->ToString()] = pool;
}
name_to_thread_pool_index_[name] = pool;
}
// If max concurrency of default group is 1, the tasks of default group
// will be performed in main thread instead of any executor pool.
if (default_group_max_concurrency > 1) {
default_thread_pool_ =
std::make_shared<BoundedExecutor>(default_group_max_concurrency);
}
}
std::shared_ptr<BoundedExecutor> PoolManager::GetPool(
const std::string &concurrency_group_name, ray::FunctionDescriptor fd) {
if (!concurrency_group_name.empty()) {
auto it = name_to_thread_pool_index_.find(concurrency_group_name);
/// TODO(qwang): Fail the user task.
RAY_CHECK(it != name_to_thread_pool_index_.end());
return it->second;
}
/// Code path of that this task wasn't specified in a concurrency group addtionally.
/// Use the predefined concurrency group.
if (functions_to_thread_pool_index_.find(fd->ToString()) !=
functions_to_thread_pool_index_.end()) {
return functions_to_thread_pool_index_[fd->ToString()];
}
return default_thread_pool_;
}
/// Stop and join the thread pools that the pool manager owns.
void PoolManager::Stop() {
if (default_thread_pool_) {
RAY_LOG(DEBUG) << "Default pool is stopping.";
default_thread_pool_->Stop();
RAY_LOG(INFO) << "Default pool is joining. If the 'Default pool is joined.' "
"message is not printed after this, the worker is probably "
"hanging because the actor task is running an infinite loop.";
default_thread_pool_->Join();
RAY_LOG(INFO) << "Default pool is joined.";
}
for (const auto &it : name_to_thread_pool_index_) {
it.second->Stop();
}
for (const auto &it : name_to_thread_pool_index_) {
it.second->Join();
}
}
} // namespace core
} // namespace ray
|
{"hexsha": "5bfa82965a83e80a21f8aacf43d8ba2ff28fa889", "size": 3885, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/ray/core_worker/transport/thread_pool_manager.cc", "max_stars_repo_name": "daobook/ray", "max_stars_repo_head_hexsha": "af9f1ef4dc160e0671206556b387f8017f3c3930", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 33.0, "max_stars_repo_stars_event_min_datetime": "2020-05-27T14:25:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T06:11:30.000Z", "max_issues_repo_path": "src/ray/core_worker/transport/thread_pool_manager.cc", "max_issues_repo_name": "daobook/ray", "max_issues_repo_head_hexsha": "af9f1ef4dc160e0671206556b387f8017f3c3930", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 107.0, "max_issues_repo_issues_event_min_datetime": "2021-01-23T08:03:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T06:24:02.000Z", "max_forks_repo_path": "src/ray/core_worker/transport/thread_pool_manager.cc", "max_forks_repo_name": "daobook/ray", "max_forks_repo_head_hexsha": "af9f1ef4dc160e0671206556b387f8017f3c3930", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2020-08-06T15:53:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T03:31:31.000Z", "avg_line_length": 36.308411215, "max_line_length": 86, "alphanum_fraction": 0.7132561133, "num_tokens": 917}
|
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
C code and some fundamental functions
'''
from pyscf.lib import parameters
param = parameters
from pyscf.lib import numpy_helper
from pyscf.lib import linalg_helper
from pyscf.lib import logger
from pyscf.lib.misc import *
from pyscf.lib.numpy_helper import *
from pyscf.lib.linalg_helper import *
from pyscf.lib import chkfile
from pyscf.lib import diis
from pyscf.lib.misc import StreamObject
|
{"hexsha": "162173341e35fbc0c6ce22b666ae3dd0f368b269", "size": 449, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/__init__.py", "max_stars_repo_name": "gmwang18/pyscf", "max_stars_repo_head_hexsha": "fcd6877751661c8a9743c1c872a4a2b65f6dd7ac", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/__init__.py", "max_issues_repo_name": "gmwang18/pyscf", "max_issues_repo_head_hexsha": "fcd6877751661c8a9743c1c872a4a2b65f6dd7ac", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/__init__.py", "max_forks_repo_name": "gmwang18/pyscf", "max_forks_repo_head_hexsha": "fcd6877751661c8a9743c1c872a4a2b65f6dd7ac", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.45, "max_line_length": 43, "alphanum_fraction": 0.7928730512, "include": true, "reason": "import numpy", "num_tokens": 108}
|
import numpy as np
import torch
import torch.nn as nn
from mbmf import utils
class HybridAgent(nn.Module):
""" Take an ensemble of SAC agents and an MPC planner """
def __init__(self,
sac_agents,
planner,
ensemble_model,
buffer,
action_dim,
L=None,
stochastic=False,
update_sac=True,
warm_up=False,
n_sac_updates=1,
cem_std=1.0,
device='cpu'):
super().__init__()
self.sac_agents = sac_agents
self.planner = planner
self.ensemble_model = ensemble_model
self.buffer = buffer
self.action_dim = action_dim
self.n_sac_updates = n_sac_updates
self.cem_std = cem_std
self.L = L
self.stochastic = stochastic
self.update_sac = update_sac
self.warm_up = warm_up
self.device = device
self._global_step = 0
def toggle_updates(self, update_sac):
self.update_sac = update_sac
def toggle_stochastic(self, stochastic):
self.stochastic = stochastic
def toggle_warm_up(self, warm_up):
""" warm up means just init zero mean Gausian """
self.warm_up = warm_up
def forward(self, state, use_stds=True):
# TODO make use_stds a param
# TODO
weighting = False
if self.warm_up:
action = self.planner(state.squeeze(), action_mean=None, action_std=None, is_torch=torch.is_tensor(state))
else:
n_agents = len(self.sac_agents)
if weighting:
weights = np.zeros(n_agents)
mus = np.zeros((self.planner.plan_horizon, n_agents, self.action_dim))
pis = np.zeros((self.planner.plan_horizon, n_agents, self.action_dim))
# (state_dim, ) -> (n_agents, ensemble_size, state_dim)
state_tensor = torch.tensor(state).float().to(self.device)
state_tensor = state_tensor.unsqueeze(0).unsqueeze(0).repeat(n_agents, self.planner.ensemble_size, 1)
# predict actions and states
for t in range(self.planner.plan_horizon):
# average across ensemble -> (n_agents, state_dim)
avg_state_tensor = state_tensor.mean(dim=1)
# FIXME convert all numpy
np_avg_state_tensor = avg_state_tensor.detach().cpu().numpy()
# store actions taken (n_agents, action_dim)
actions = np.zeros((n_agents, self.action_dim))
# loop over agents
for a, agent in enumerate(self.sac_agents):
# agent states -> (state_dim, )
agent_states = np_avg_state_tensor[a, :]
# select action
mu, pi = agent.select_and_sample_action(agent_states)
# store metrics
actions[a, :] = mu
mus[t, a, :] = mu
pis[t, a, :] = pi
# propagate actions
# average states -> (n_agents, state_dim)
# actions -> (n_agents, action_dim)
actions = torch.tensor(actions).float().to(self.device)
if weighting:
# average over ensemble
rewards = self.planner.reward_measure(state_tensor.mean(1).to(self.device), actions=actions)
weights += rewards.detach().cpu().numpy()
# state tensor -> (n_agents, ensemble_size, batch_size, state_dim)
state_tensor = self.ensemble_model.forward_agents(avg_state_tensor, actions).squeeze(2)
# state tensor -> (n_agents, ensemble_size, state_dim)
state_tensor = state_tensor.squeeze(2)
# mus -> (plan_horizon, n_agents, action_dim)
# weights are (n_agents,)
# TODO choose single agent or use average or could weight each trajectory by cost (?)
# mus -> (plan_horizon, n_agents, action_dim) -> (plan_horizon, action_dim)
if not weighting:
action_mean = np.mean(mus, axis=1)
else:
action_mean = np.average(mus, axis=1, weights=weights)
action_mean = torch.tensor(action_mean).float().to(self.device)
# TODO
if use_stds:
action_std = np.std(mus, axis=1)
action_std = torch.tensor(action_std).float().to(self.device)
action_std = torch.clamp(action_std, -10**6, 1.0)
else:
action_std = torch.ones_like(action_mean) * self.cem_std
state = torch.tensor(state).float().squeeze().to(self.device)
print("going into contorl")
print("action_mean: ", action_mean.shape)
print("action_std: ", action_std.shape)
bib
action = self.planner(state, action_mean=action_mean, action_std=action_std, is_torch=True, L=self.L)
# NOTE should seperate this out
if self.update_sac and self.buffer is not None:
for _ in range(self.n_sac_updates):
for sac_agent in self.sac_agents:
sac_agent.update(self.buffer, None, self._global_step)
self._global_step += 1
return action
|
{"hexsha": "bfa53c0f20474ec4d4fd045c5796a033922752fc", "size": 5487, "ext": "py", "lang": "Python", "max_stars_repo_path": "mbmf/control/hybrid.py", "max_stars_repo_name": "BerenMillidge/state-augmentation", "max_stars_repo_head_hexsha": "b3834b3a99be9854ecf83b6d83e9fbb2184f4861", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mbmf/control/hybrid.py", "max_issues_repo_name": "BerenMillidge/state-augmentation", "max_issues_repo_head_hexsha": "b3834b3a99be9854ecf83b6d83e9fbb2184f4861", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mbmf/control/hybrid.py", "max_forks_repo_name": "BerenMillidge/state-augmentation", "max_forks_repo_head_hexsha": "b3834b3a99be9854ecf83b6d83e9fbb2184f4861", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8255033557, "max_line_length": 119, "alphanum_fraction": 0.5538545653, "include": true, "reason": "import numpy", "num_tokens": 1170}
|
"""
FMA Helpers
"""
from typing import List, Optional, Tuple, Union
import numpy as np
import pandas as pd
_LIST_GENRE_COLUMNS: Tuple[str, ...] = ("track_genres", "track_genres_all")
def join_columns(df: pd.DataFrame) -> pd.DataFrame:
df.columns = ["_".join(i) for i in df.columns]
return df
def sort_list_genres(row: pd.Series) -> pd.Series:
# Ensure the top genre is always first in the 'list genre'
# fields (i.e., 'track_genres' and 'track_genres_all').
def weight(g: str, fallback: str) -> int:
return -1 if g == row["track_genre_top"] else row[fallback].index(g)
for c in _LIST_GENRE_COLUMNS:
if isinstance(row[c], list):
row[c] = sorted(row[c], key=lambda g: weight(g, fallback=c))
return row
def column_filter(
df: pd.DataFrame,
column: str,
values: Optional[Union[str, List[str]]] = None,
) -> pd.DataFrame:
if values is None:
return df
elif isinstance(values, str):
parsed_values = [values.lower()]
else:
parsed_values = [s.lower() for s in values]
return df[df[column].str.lower().isin(parsed_values).values]
class TrackLicenseFilter:
def filter(self, track_license: pd.Series) -> np.ndarray:
raise NotImplementedError()
def __call__(self, df: pd.DataFrame) -> pd.DataFrame:
sel = self.filter(df["track_license"].str.lower())
return df[sel]
class PdTrackLicenseFilter(TrackLicenseFilter):
def filter(self, track_license: pd.Series) -> np.ndarray:
sel = (
track_license.astype(str)
.str.strip()
.str.lower()
.str.contains("public domain")
)
return sel.values
|
{"hexsha": "fec2cd0ae9f2b6198c2cb3ef0f41999539a84a3a", "size": 1706, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/fma/data/_helpers.py", "max_stars_repo_name": "TariqAHassan/wav2rec", "max_stars_repo_head_hexsha": "8d3f33291f246d80a4935cf7aa2cc75f110d9c15", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2021-11-12T03:58:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-19T08:13:30.000Z", "max_issues_repo_path": "experiments/fma/data/_helpers.py", "max_issues_repo_name": "TariqAHassan/wav2rec", "max_issues_repo_head_hexsha": "8d3f33291f246d80a4935cf7aa2cc75f110d9c15", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiments/fma/data/_helpers.py", "max_forks_repo_name": "TariqAHassan/wav2rec", "max_forks_repo_head_hexsha": "8d3f33291f246d80a4935cf7aa2cc75f110d9c15", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-12T03:58:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-12T03:58:05.000Z", "avg_line_length": 26.65625, "max_line_length": 76, "alphanum_fraction": 0.6307151231, "include": true, "reason": "import numpy", "num_tokens": 417}
|
(* This code is copyrighted by its authors; it is distributed under *)
(* the terms of the LGPL license (see LICENSE and description files) *)
(****************************************************************************)
(* *)
(* *)
(* Solange Coupet-Grimal & Line Jakubiec-Jamet *)
(* *)
(* *)
(* Laboratoire d'Informatique Fondamentale de Marseille *)
(* CMI et Faculté des Sciences de Luminy *)
(* *)
(* e-mail:{Solange.Coupet,Line.Jakubiec}@lif.univ-mrs.fr *)
(* *)
(* *)
(* Developped in Coq v6 *)
(* Ported to Coq v7 *)
(* Translated to Coq v8 *)
(* *)
(* July 12nd 2005 *)
(* *)
(****************************************************************************)
(* Lib_Plus.v *)
(****************************************************************************)
Require Export Lib_Minus.
Lemma plus_opp : forall n m : nat, n + m - m = n.
intros n m; elim (plus_comm m n); apply minus_plus.
Qed.
Hint Immediate plus_opp.
Lemma S_plus : forall n : nat, S n = n + 1.
intro; elim plus_comm; auto with arith.
Qed.
Hint Immediate S_plus.
Lemma lt_plus : forall n m : nat, 0 < n -> m < n + m.
simple induction n; simple induction m; auto with arith.
intros.
simpl in |- *; apply lt_n_S.
elim plus_comm; simpl in |- *.
elim plus_comm; apply H0; auto with arith.
Qed.
Hint Immediate lt_plus.
Lemma le_minus_plus : forall n m : nat, n - m <= n + m.
simple induction n; auto with arith.
Qed.
Hint Immediate le_minus_plus.
Lemma le_le_assoc_plus_minus :
forall n m p : nat, n <= m -> n <= p -> m - n + p = m + (p - n).
intros.
elim H.
elim minus_n_n; simpl in |- *; elim le_plus_minus; auto with arith.
intros.
elim minus_Sn_m; simpl in |- *.
apply eq_S; auto with arith.
assumption.
Qed.
Hint Immediate le_le_assoc_plus_minus.
Lemma le_lt_plus : forall n m p q : nat, n <= p -> m < q -> n + m < p + q.
intros.
apply lt_le_trans with (n + q).
apply plus_lt_compat_l; try trivial.
apply plus_le_compat_r; try trivial.
Qed.
Lemma plus_eq_zero : forall a b : nat, a + b = 0 -> a = 0 /\ b = 0.
intros a b H.
split; apply sym_equal; apply le_n_O_eq; elim H; auto with arith.
Qed.
Hint Immediate plus_eq_zero.
Lemma le_transp_l : forall n m p : nat, n + m <= p -> m <= p - n.
simple induction n; intros.
simpl in H; elim minus_n_O; assumption.
elim H0.
elim plus_comm; rewrite plus_opp; auto with arith.
intros.
simpl in |- *; apply H; auto with arith.
Qed.
Hint Immediate le_transp_l.
Lemma le_transp_r : forall n m p : nat, n + m <= p -> n <= p - m.
intros.
apply le_transp_l.
elim plus_comm; assumption.
Qed.
Hint Immediate le_transp_r.
|
{"author": "coq-contribs", "repo": "fairisle", "sha": "e36087a6b7e52ef3c6dcfdeba1298e8fde1260a0", "save_path": "github-repos/coq/coq-contribs-fairisle", "path": "github-repos/coq/coq-contribs-fairisle/fairisle-e36087a6b7e52ef3c6dcfdeba1298e8fde1260a0/Libraries/Lib_Arithmetic/Lib_Plus.v"}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.