text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
[STATEMENT]
lemma knows'_sub_knows: "knows' A evs <= knows A evs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. knows' A evs \<subseteq> knows A evs
[PROOF STEP]
by (auto simp: knows_decomp)
|
{"llama_tokens": 81, "file": null, "length": 1}
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 26 14:44:34 2018
Workdir = F:\jTKount\1226
Filename = feature_sel.py
Describe: Some basic method to select the feature;
Reference: Luo Bin; blog:http://www.cnblogs.com/hhh5460/p/5186226.html
@author: OrenLi1042420545
"""
import numpy as np
import pandas as pd
from sklearn import datasets
from minepy import MINE
from scipy.spatial.distance import pdist, squareform
from sklearn.model_selection import cross_val_score, ShuffleSplit
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Lasso, Ridge
from sklearn.metrics import r2_score
from collections import defaultdict
from stability_selection import randomized_lasso
from sklearn.feature_selection import RFE
def distcorr(X, Y):
""" Compute the distance correlation function
>>> a = [1,2,3,4,5]
>>> b = np.array([1,2,9,4,4])
>>> distcorr(a, b)
0.762676242417
"""
X = np.atleast_1d(X)
Y = np.atleast_1d(Y)
if np.prod(X.shape) == len(X):
X = X[:, None]
if np.prod(Y.shape) == len(Y):
Y = Y[:, None]
X = np.atleast_2d(X)
Y = np.atleast_2d(Y)
n = X.shape[0]
if Y.shape[0] != X.shape[0]:
raise ValueError('Number of samples must match')
a = squareform(pdist(X))
b = squareform(pdist(Y))
A = a - a.mean(axis=0)[None, :] - a.mean(axis=1)[:, None] + a.mean()
B = b - b.mean(axis=0)[None, :] - b.mean(axis=1)[:, None] + b.mean()
dcov2_xy = (A * B).sum()/float(n * n)
dcov2_xx = (A * A).sum()/float(n * n)
dcov2_yy = (B * B).sum()/float(n * n)
dcor = np.sqrt(dcov2_xy)/np.sqrt(np.sqrt(dcov2_xx) * np.sqrt(dcov2_yy))
return dcor
if __name__ == '__main__':
# ############################################################################
# dataset select house iris cancer
data_name = 'house'
# method_choise 2 3 4 5
method_choise = 2
# method_name2 2: 'pearson' 'MIC' 'Distance' 'Model_based'
method_name2 = 'Model_based'
# method_name3 3: 'linear' 'lasso' 'ridge'
method_name3 = 'ridge'
# method_name4 4: 'MDI' 'MDA'
method_name4 = 'MDI'
# method_name5 5: 'Stab_sel' 'RFE'
method_name5 = 'RFE'
# ############################################################################
# load data
re_feMol = pd.DataFrame()
if data_name == 'house':
LSdat = datasets.load_boston()
elif data_name == 'iris':
LSdat = datasets.load_iris()
elif data_name == 'cancer':
LSdat = datasets.load_breast_cancer()
# ############################################################################
# get into model
if method_choise == 2:
# 2 Univariate feature selection
for i in range(LSdat.data.shape[1]):
if method_name2 == 'pearson':
re_feMol.loc[i, 'FeatrueImportance'] = np.corrcoef(
LSdat.target, LSdat.data[:, i])[0, 1]
elif method_name2 == 'MIC': # notice H0
m = MINE()
m.compute_score(LSdat.target, LSdat.data[:, i])
re_feMol.loc[i, 'FeatrueImportance'] = m.mic()
elif method_name2 == 'Distance':
re_feMol.loc[i, 'FeatrueImportance'] = distcorr(
LSdat.target, LSdat.data[:, i])
elif method_name2 == 'Model_based':
rf = RandomForestRegressor(n_estimators=20, max_depth=4)
re_feMol.loc[i, 'FeatrueImportance'] = np.mean(cross_val_score(
rf, LSdat.data[:, i:i+1], LSdat.target, scoring='r2',
cv = ShuffleSplit(n_splits=10,
test_size=0.1, random_state=0)))
re_feMol.loc[i, 'ind'] = 'Feature_' + str(i)
# ############################################################################
elif method_choise == 3:
# 3. linear model and regex
scaler = StandardScaler()
if method_name3 == 'linear':
lr = LinearRegression()
lr.fit(scaler.fit_transform(LSdat.data), LSdat.target)
re_feMol['FeatrueImportance'] = lr.coef_
elif method_name3 == 'lasso':
lasso = Lasso(alpha=.3)
lasso.fit(scaler.fit_transform(LSdat.data), LSdat.target)
re_feMol['FeatrueImportance'] = lasso.coef_
elif method_name3 == 'ridge':
ridge = Ridge(alpha=10)
ridge.fit(scaler.fit_transform(LSdat.data), LSdat.target)
re_feMol['FeatrueImportance'] = ridge.coef_
re_feMol['ind'] = ['Feature_' + str(i) for i in range(LSdat.data.shape[1])]
# ############################################################################
elif method_choise == 4:
# 4. Random Forest
if method_name4 == 'MDI':
rf = RandomForestRegressor()
rf.fit(LSdat.data, LSdat.target)
re_feMol['FeatrueImportance'] = rf.feature_importances_
if method_name4 == 'MDA':
rf = RandomForestRegressor()
names = LSdat.feature_names
scores = defaultdict(list)
for train_idx, test_idx in ShuffleSplit(n_splits=10,
test_size=0.1,
random_state=0).split(LSdat.data):
rf.fit(LSdat.data[train_idx], LSdat.target[train_idx])
acc = r2_score(LSdat.target[test_idx],
rf.predict(LSdat.data[test_idx]))
for i in range(LSdat.data.shape[1]):
X_t = LSdat.data[test_idx, :].copy()
np.random.shuffle(X_t[:, i])
shuff_acc = r2_score(LSdat.target[test_idx], rf.predict(X_t))
scores[names[i]].append((acc-shuff_acc)/acc)
re_feMol['FeatrueImportance'] = [np.mean(score) for k, score in scores.items()]
re_feMol['ind'] = ['Feature_' + str(i) for i in range(LSdat.data.shape[1])]
# ############################################################################
elif method_choise == 5:
if method_name5 == 'Stab_sel':
rlasso = randomized_lasso.RandomizedLasso(alpha=0.025)
rlasso.fit(LSdat.data, LSdat.target)
re_feMol['FeatrueImportance'] = rlasso.coef_
re_feMol['ind'] = ['Feature_' + str(i) for i in range(LSdat.data.shape[1])]
if method_name5 == 'RFE':
lr = LinearRegression()
rfe = RFE(lr, n_features_to_select=1)
rfe.fit(LSdat.data, LSdat.target)
re_feMol['FeatrueImportance'] = LSdat.data.shape[1] - rfe.ranking_
# ############################################################################
# table format
re_feMol = re_feMol.set_index('ind')
re_feMol['sort_help'] = re_feMol['FeatrueImportance'].abs()
re_feMol = re_feMol.sort_values(
by = 'sort_help', ascending=False).drop('sort_help', axis=1)
|
{"hexsha": "95f1539d1f23fc6a99efcc9b23585f22e454a8a0", "size": 7239, "ext": "py", "lang": "Python", "max_stars_repo_path": "feature_sel.py", "max_stars_repo_name": "kala-oro/Feature_select", "max_stars_repo_head_hexsha": "611aaa7e4ebfeebdf030a90aced3218fae4bc66c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-12-27T07:11:13.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-27T07:39:53.000Z", "max_issues_repo_path": "feature_sel.py", "max_issues_repo_name": "kala-oro/Feature_select", "max_issues_repo_head_hexsha": "611aaa7e4ebfeebdf030a90aced3218fae4bc66c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "feature_sel.py", "max_forks_repo_name": "kala-oro/Feature_select", "max_forks_repo_head_hexsha": "611aaa7e4ebfeebdf030a90aced3218fae4bc66c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.9627329193, "max_line_length": 92, "alphanum_fraction": 0.5263157895, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1850}
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Sparse operators"""
import numpy as np
import scipy.sparse as sp
import tvm
from tvm import relay, te
from .. import nn
from ..util import traverse_inline
def sparse_dense(data, weight_data, weight_indices, weight_indptr):
"""
Computes sparse-dense matrix multiplication of `data` and
`(weight_data, weight_indices, weight_indptr).T`
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
2-D with shape [M, K], float32
weight_data : tvm.te.Tensor
1-D with shape [nnz] (CSR) or
3-D with shape [num_blocks, bs_r, bs_c] (BSR)
weight_indices : tvm.te.Tensor
1-D with shape [nnz] (CSR) or
1-D with shape [num_blocks] (BSR)
weight_indptr : tvm.te.Tensor
1-D with shape [N + 1] (CSR) or
1-D with shape [(N + 1) // bs_r] (BSR)
Returns
-------
output : tvm.te.Tensor
2-D with shape [M, N]
"""
# pylint:disable=unused-argument
return nn.sparse_dense(data, weight_data, weight_indices, weight_indptr)
def schedule_sparse_dense(outs):
"""Create schedule for sparse dense"""
# pylint:disable=invalid-name
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if op.tag == "sparse_dense_bsrmm":
y_bsrmm = op.input_tensors[0]
assert y_bsrmm.op.tag == "sparse_dense_bsrmm_block"
out = s.outputs[0].output(0)
if op not in s.outputs:
y_reshape = op.output(0)
s[y_reshape].compute_at(s[out], s[out].op.axis[1])
(_, c) = s[y_bsrmm].op.reduce_axis
(m_o, n_o) = s[out].op.axis
s[out].bind(m_o, te.thread_axis("blockIdx.x"))
s[out].bind(n_o, te.thread_axis("blockIdx.y"))
s[y_bsrmm].compute_at(s[out], n_o)
thread_x = te.thread_axis("threadIdx.x")
y_bsrmm_factored = s.rfactor(y_bsrmm, c)
tx = s[y_bsrmm].op.reduce_axis[0]
s[y_bsrmm].bind(tx, thread_x)
s[y_bsrmm_factored].compute_at(s[y_bsrmm], tx)
s[y_bsrmm].set_store_predicate(thread_x.var.equal(0))
s[out].set_store_predicate(thread_x.var.equal(0))
traverse_inline(s, outs[0].op, _callback)
return s
def schedule_cuda_transpose(s, out):
"""Schedule for transpose on the gpu.
Roughly follows this:
https://developer.nvidia.com/blog/efficient-matrix-transpose-cuda-cc/, but
without the padding for shared memory. For better performance, we could
rewrite it in tir to add the padding.
"""
def _callback(op):
# pylint: disable=invalid-name
m, n = s[op].op.axis
warp_size = int(tvm.target.Target.current(allow_none=False).thread_warp_size)
no, ni = s[op].split(n, factor=warp_size)
mo, mi = s[op].split(m, factor=warp_size)
s[op].reorder(mo, no, mi, ni)
s[op].bind(mo, te.thread_axis("blockIdx.x"))
s[op].bind(no, te.thread_axis("blockIdx.y"))
c = s.cache_read(op.input_tensors[0], "shared", op)
s[c].compute_at(s[op], no)
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
s[op].bind(ni, thread_x)
# This is a hack to make the scheduling language realize that this axis
# can be scheduled.
a, _ = s[c].split(s[c].op.axis[1], factor=1)
s[c].bind(a, thread_x)
# Use 4 warps per block. Slightly faster than 1 warp per block
ao, _ = s[op].split(mi, nparts=4)
s[op].bind(ao, thread_y)
ao, _ = s[c].split(s[c].op.axis[0], nparts=4)
s[c].bind(ao, thread_y)
traverse_inline(s, out.op, _callback)
def sparse_dense_tir(data, w_data, w_indices, w_indptr):
"""Compute data * w^T.
Actually computes (w * data^T) ^ T as data needs to be in column-major
format for performance reasons.
Good resources:
Yang, Carl, Aydın Buluç, and John D. Owens. "Design principles for sparse
matrix multiplication on the GPU." European Conference on Parallel
Processing. Springer, Cham, 2018. <- This code is basically row-split from here.
Gale, Trevor, et al. "Sparse GPU Kernels for Deep Learning." arXiv preprint
arXiv:2006.10901 (2020).
Profile with
`/opt/nvidia/nsight-compute/2020.1.2/ncu -k default_function_kernel1
--section '.*' -s 1 -c 1 venv/bin/python3 test_topi_sparse.py manual`
with either default_function_kernel0 for the transpose or
default_function_kernel1 for the multiply.
"""
def ceil_div(a, b):
return (a + (b - 1)) // b
def gen_ir(data, w_data, w_indices, w_indptr, out):
# pylint: disable=invalid-name
# TODO(tkonolige): use tensorcores for block multiply
# TODO(tkonolige): use vectorize on loads
# TODO(tkonolige): seperate implementation if M is small
# TODO(tkonolige): seperate implementation for large block sizes
ib = tvm.tir.ir_builder.create()
warp_size = int(tvm.target.Target.current(allow_none=False).thread_warp_size)
m = data.shape[1]
nb = w_indptr.shape[0] - 1
nnzb = w_data.shape[0]
# treat csr like block size 1 bsr
if len(w_data.shape) == 1:
bs_n = 1
bs_k = 1
else:
bs_n = w_data.shape[1]
bs_k = w_data.shape[2]
bs_m = bs_n
mb = m // bs_m
mi = warp_size
assert (
mb >= mi
), "Number of block rows in dense matrix must be larger than warp size: {} vs {}.".format(
warp_size, m
)
mo = ceil_div(mb, mi)
ni = 1 # TODO(tkonolige): how do I compute the number of warps per block?
no = ceil_div(nb, ni)
rowlength_bi = warp_size
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(bx, "thread_extent", mo)
by = te.thread_axis("blockIdx.y")
ib.scope_attr(by, "thread_extent", no)
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", warp_size)
warp = te.thread_axis("threadIdx.y")
ib.scope_attr(warp, "thread_extent", ni)
out_ptr = ib.buffer_ptr(out)
data_ptr = ib.buffer_ptr(data)
w_data_ptr = ib.buffer_ptr(w_data, shape=(nnzb, bs_n, bs_k))
w_indices_ptr = ib.buffer_ptr(w_indices)
w_indptr_ptr = ib.buffer_ptr(w_indptr)
n_index = by * ni + warp
m_index = bx * mi + tx
row_start = w_indptr_ptr[n_index]
# Guaranteed to be evenly divisible
rowlength_bo = ceil_div(w_indptr_ptr[n_index + 1] - row_start, rowlength_bi)
# thread local storage for bs_m x bs_n block
block = ib.allocate(data.dtype, (bs_m, bs_n), name="block", scope="local")
indices = ib.allocate(w_indices.dtype, (rowlength_bi,), name="indices", scope="warp")
data_cache = ib.allocate(data.dtype, (mi, bs_m, bs_k), name="data_cache", scope="local")
w_data_cache = ib.allocate(
w_data.dtype, (rowlength_bi, bs_n, bs_k), name="w_data_cache", scope="warp"
)
# zero block
with ib.for_range(0, bs_m, name="x", for_type="unroll") as x:
with ib.for_range(0, bs_n, name="y", for_type="unroll") as y:
block[x, y] = 0.0
# compute into thread local storage using warp_size chunks
with ib.for_range(0, rowlength_bo, name="bb") as bb:
elem_idx = bb * rowlength_bi + tx
# Cache indices. Guaranteed to be multiple of warp_size.
indices[elem_idx] = w_indices_ptr[row_start + elem_idx]
# cache dense matrix
# each thread has a row
# TODO: ideally we could vectorize this
with ib.for_range(0, rowlength_bi, name="bi") as bi:
with ib.for_range(0, bs_m, name="x", for_type="unroll") as x:
with ib.for_range(0, bs_k, name="z", for_type="unroll") as z:
# This memory acces should be out of bounds when
# m_index >= mb (which occurs when the dense matrix
# rows % 32 != 0), but it seems to work just fine...
data_cache[bi, x, z] = data_ptr[indices[bi] * bs_k + z, m_index * bs_m + x]
# cache w_data
elem_idx = bb * rowlength_bi + tx
with ib.for_range(0, bs_n, name="y", for_type="unroll") as y:
with ib.for_range(0, bs_k, name="z", for_type="unroll") as z:
w_data_cache[tx, y, z] = w_data_ptr[row_start + elem_idx, y, z]
with ib.for_range(0, mi, name="i") as i:
# thread local block matmul
with ib.for_range(0, bs_m, name="x", for_type="unroll") as x:
with ib.for_range(0, bs_n, name="y", for_type="unroll") as y:
with ib.for_range(0, bs_k, name="z", for_type="unroll") as z:
block[x, y] += data_cache[i, x, z] * w_data_cache[i, y, z]
# store results
with ib.for_range(0, bs_m, name="x", for_type="unroll") as x:
with ib.for_range(0, bs_n, name="y", for_type="unroll") as y:
with ib.if_scope(m_index < mb):
with ib.if_scope(n_index < nb):
# It doesn't seem like we would be getting coelesced
# writes here, but it doesn't seem to matter
out_ptr[m_index * bs_m + x, n_index * bs_n + y] = block[x, y]
return ib.get()
data_t = tvm.topi.transpose(data)
# handle csr
if len(w_data.shape) == 1:
blocksize = 1
else:
blocksize = w_data.shape[1]
out_shape = (data_t.shape[1], (w_indptr.shape[0] - 1) * blocksize)
out_buf = tvm.tir.decl_buffer(out_shape, data.dtype, "out_buf")
out = te.extern(
[out_shape],
[data_t, w_data, w_indices, w_indptr, data],
lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),
dtype=data.dtype,
out_buffers=[out_buf],
name="sparse_dense_gpu",
tag="sparse_dense_gpu",
)
return out
def sparse_dense_padded(data, weight_data, weight_indices, weight_indptr):
"""
Computes sparse-dense matrix multiplication of `data` and
`(weight_data, weight_indices, weight_indptr).T`
This variation uses a padded matrix where all row lengths are a multiple of the warp size.
Parameters
----------
cfg: ConfigEntity
The config for this template
data : tvm.te.Tensor
2-D with shape [M, K], float32
weight_data : tvm.te.Tensor
1-D with shape [nnz] (CSR) or
3-D with shape [num_blocks, bs_r, bs_c] (BSR)
weight_indices : tvm.te.Tensor
1-D with shape [nnz] (CSR) or
1-D with shape [num_blocks] (BSR)
weight_indptr : tvm.te.Tensor
1-D with shape [N + 1] (CSR) or
1-D with shape [(N + 1) // bs_r] (BSR)
Returns
-------
output : tvm.te.Tensor
2-D with shape [M, N]
"""
return sparse_dense_tir(data, weight_data, weight_indices, weight_indptr)
def schedule_sparse_dense_padded(outs):
"""Create schedule for sparse dense"""
# XXX: this will fail if we don't include the data_t Tensor in the schedule
# ops. Maybe create_schedule should do some analysis so this isn't
# necessary
data_t = outs[0].op.input_tensors[0]
s = te.create_schedule([outs[0].op, data_t.op])
schedule_cuda_transpose(s, outs[0].op.input_tensors[0])
return s
def pad_sparse_matrix(matrix, blocksize):
"""Pad rows of sparse matrix matrix so that they are a multiple of blocksize."""
assert isinstance(matrix, sp.bsr_matrix)
new_entries = np.zeros(matrix.shape[0], dtype=matrix.indptr.dtype)
bsr = matrix.blocksize[0]
for i in range(matrix.shape[0] // bsr):
row_length = matrix.indptr[i + 1] - matrix.indptr[i]
if row_length % blocksize != 0:
new_entries[i] = blocksize - (row_length % blocksize)
additional = np.sum(new_entries)
indices = np.zeros(matrix.indices.shape[0] + additional, dtype=matrix.indices.dtype)
data = np.zeros(
(matrix.data.shape[0] + additional, matrix.data.shape[1], matrix.data.shape[2]),
dtype=matrix.data.dtype,
)
n = matrix.shape[0] // bsr
indptr = np.zeros(n + 1, dtype=matrix.indptr.dtype)
indptr[: matrix.indptr.shape[0]] = matrix.indptr
for i in range(matrix.shape[0] // bsr):
indptr[i + 1] = indptr[i] + new_entries[i] + (matrix.indptr[i + 1] - matrix.indptr[i])
indices[indptr[i] : indptr[i + 1] - new_entries[i]] = matrix.indices[
matrix.indptr[i] : matrix.indptr[i + 1]
]
data[indptr[i] : indptr[i + 1] - new_entries[i], :, :] = matrix.data[
matrix.indptr[i] : matrix.indptr[i + 1], :, :
]
return sp.bsr_matrix((data, indices, indptr), matrix.shape)
@nn.sparse_dense_alter_layout.register(["cuda", "gpu"])
def _alter_sparse_dense_layout(_attrs, inputs, _tinfos, _out_type):
"""With cuda, we modify use alter_op_layout to swap the default
sparse_dense implementation for one that operates on a padded matrix. We
also padd the matrix.
"""
if (
isinstance(inputs[1], relay.Constant)
and isinstance(inputs[2], relay.Constant)
and isinstance(inputs[3], relay.Constant)
):
sparse_matrix = sp.bsr_matrix(
(inputs[1].data.asnumpy(), inputs[2].data.asnumpy(), inputs[3].data.asnumpy())
)
warp_size = int(tvm.target.Target.current(allow_none=False).thread_warp_size)
sparse_matrix = pad_sparse_matrix(sparse_matrix, warp_size)
return relay.nn._make.sparse_dense_padded(
inputs[0],
relay.Constant(tvm.nd.array(sparse_matrix.data)),
relay.Constant(tvm.nd.array(sparse_matrix.indices)),
relay.Constant(tvm.nd.array(sparse_matrix.indptr)),
)
return None
|
{"hexsha": "3fd6fbebc62fc2d310c3fbc601211a283369eab9", "size": 14803, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/tvm/topi/cuda/sparse.py", "max_stars_repo_name": "mycpuorg/tvm", "max_stars_repo_head_hexsha": "bf99e9a60818e4cd5bc67a471325fc4fdc92fc82", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2017-08-29T02:26:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-14T06:13:33.000Z", "max_issues_repo_path": "python/tvm/topi/cuda/sparse.py", "max_issues_repo_name": "mycpuorg/tvm", "max_issues_repo_head_hexsha": "bf99e9a60818e4cd5bc67a471325fc4fdc92fc82", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/tvm/topi/cuda/sparse.py", "max_forks_repo_name": "mycpuorg/tvm", "max_forks_repo_head_hexsha": "bf99e9a60818e4cd5bc67a471325fc4fdc92fc82", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-03-27T01:02:13.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-29T00:32:31.000Z", "avg_line_length": 38.7513089005, "max_line_length": 99, "alphanum_fraction": 0.6133891779, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3976}
|
abstract type GraphNetwork <: AbstractNetwork end
"""
evaluate(::AbstractNetwork, state)
(nn::AbstractNetwork)(state) = evaluate(nn, state)
Evaluate the neural network as an MCTS oracle on a single state.
Note, however, that evaluating state positions once at a time is slow and so you
may want to use a `BatchedOracle` along with an inference server that uses
[`evaluate_batch`](@ref).
"""
function evaluate(nn::GraphNetwork, state)
gspec = game_spec(nn)
actions_mask = GI.actions_mask(GI.init(gspec, state))
x = GI.graph_state(gspec, state)
a = Float32.(actions_mask)
# @show @__LINE__
# @show sizeof(convert_input_tuple(nn, (x, a)))
xnet, anet = convert_input_tuple(nn, (x, a))
net_output = forward_normalized(nn, [xnet], anet)
p, v, _ = from_singletons.(convert_output_tuple(nn, net_output))
return (p[actions_mask], v[1])
end
"""
evaluate_batch(::AbstractNetwork, batch)
Evaluate the neural network as an MCTS oracle on a batch of states at once.
Take a list of states as input and return a list of `(P, V)` pairs as defined in the
MCTS oracle interface.
"""
function evaluate_batch(nn::GraphNetwork, batch)
gspec = game_spec(nn)
X = Flux.batch((GI.graph_state(gspec, b) for b in batch))
X = Flux.batch(Vector{typeof(X[1])}(X))
A = Flux.batch((GI.actions_mask(GI.init(gspec, b)) for b in batch))
Xnet, Anet = convert_input_tuple(nn, (X, Float32.(A)))
P, V, _ = convert_output_tuple(nn, forward_normalized(nn, Xnet, Anet))
return [(P[A[:,i],i], V[1,i]) for i in eachindex(batch)]
end
|
{"hexsha": "582dd4d948ec395a40304bd79988131ba7f09a7d", "size": 1542, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/networks/graph_network.jl", "max_stars_repo_name": "laurium-labs/AlphaZero.jl", "max_stars_repo_head_hexsha": "60aca46aaf1960258a5fff39a17b6afa021dbb1b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/networks/graph_network.jl", "max_issues_repo_name": "laurium-labs/AlphaZero.jl", "max_issues_repo_head_hexsha": "60aca46aaf1960258a5fff39a17b6afa021dbb1b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/networks/graph_network.jl", "max_forks_repo_name": "laurium-labs/AlphaZero.jl", "max_forks_repo_head_hexsha": "60aca46aaf1960258a5fff39a17b6afa021dbb1b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0454545455, "max_line_length": 84, "alphanum_fraction": 0.7101167315, "num_tokens": 426}
|
using Test, YaoBlocks, YaoArrayRegister
@testset "test constructor" for T in [Float16, Float32, Float64]
# NOTE: type should follow the axis
@test RotationGate(X, 0.1) isa PrimitiveBlock{1}
@test_throws TypeError RotationGate{1, Complex{T}, XGate} # will not accept non-real type
@test Rx(T(0.1)) isa RotationGate{1, T, XGate}
@test Ry(T(0.1)) isa RotationGate{1, T, YGate}
@test Rz(T(0.1)) isa RotationGate{1, T, ZGate}
end
@testset "test matrix" begin
theta = 2.0
for (DIRECTION, MAT) in [
(X, [cos(theta/2) -im*sin(theta/2); -im*sin(theta/2) cos(theta/2)]),
(Y, [cos(theta/2) -sin(theta/2); sin(theta/2) cos(theta/2)]),
(Z, [exp(-im*theta/2) 0;0 exp(im*theta/2)]),
(CNOT, exp(-mat(CNOT)/2*theta*im |> Matrix)),
(control(2, (1,), 2=>X), exp(-mat(CNOT)/2*theta*im |> Matrix))]
@test mat(RotationGate(DIRECTION, theta)) ≈ MAT
end
end
@testset "test apply" begin
r = rand_state(1)
@test state(apply!(copy(r), Rx(0.1))) ≈ mat(Rx(0.1)) * state(r)
end
@testset "test dispatch" begin
@test dispatch!(Rx(0.1), 0.3) == Rx(0.3)
@test nparameters(Rx(0.1)) == 1
@testset "test $op" for op in [+, -, *, /]
@test dispatch!(op, Rx(0.1), π) == Rx(op(0.1, π))
end
@test_throws AssertionError dispatch!(Rx(0.1), (0.2, 0.3))
end
@testset "adjoints" begin
@test Rx(0.1)' == Rx(-0.1)
@test Rx(0.2)' == Rx(-0.2)
@test copy(Rx(0.1)) == Rx(0.1)
g = Rx(0.1) # creates a new one
@test copy(g) !== g
end
|
{"hexsha": "f937e986306700ffef21db51fcf3c1fcf1bee61f", "size": 1533, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/primitive/rotation_gate.jl", "max_stars_repo_name": "yihong-zhang/YaoBlocks.jl", "max_stars_repo_head_hexsha": "9bd8f309b5c258968fb5ce4c2f12fc5e854d8b68", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/primitive/rotation_gate.jl", "max_issues_repo_name": "yihong-zhang/YaoBlocks.jl", "max_issues_repo_head_hexsha": "9bd8f309b5c258968fb5ce4c2f12fc5e854d8b68", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/primitive/rotation_gate.jl", "max_forks_repo_name": "yihong-zhang/YaoBlocks.jl", "max_forks_repo_head_hexsha": "9bd8f309b5c258968fb5ce4c2f12fc5e854d8b68", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.66, "max_line_length": 93, "alphanum_fraction": 0.5805609915, "num_tokens": 577}
|
#' add
#'
#' Add two matrices: \code{ret = alpha*x + beta*y}.
#'
#' @param transx,transy Should x/y be transposed?
#' @param alpha,beta Scalars.
#' @param x,y Input data.
#' @param ret Either \code{NULL} or an already allocated fml matrix of the same
#' class and type as \code{x}.
#' @return Returns the matrix sum.
#'
#' @rdname linalg-add
#' @name add
#'
#' @useDynLib fmlr R_linalg_add
#'
#' @export
linalg_add = function(transx=FALSE, transy=FALSE, alpha=1, beta=1, x, y, ret=NULL)
{
check_is_mat(x)
check_is_mat(y)
transx = as.logical(transx)
transy = as.logical(transy)
alpha = as.double(alpha)
beta = as.double(beta)
invisiret = check_inputs(ret, x, y)
if (is.null(ret))
ret = setret(x)
.Call(R_linalg_add, get_backend(x), x$get_type(), transx, transy, alpha, beta, x$data_ptr(), y$data_ptr(), ret$data_ptr())
if (invisiret)
invisible(ret)
else
ret
}
#' matmult
#'
#' Multiply two matrices: \code{ret = alpha*x*y}.
#'
#' @param transx,transy Should x/y be transposed?
#' @param alpha Scalar.
#' @param x,y Input data.
#' @param ret Either \code{NULL} or an already allocated fml matrix of the same
#' class and type as \code{x}.
#' @return Returns the matrix product.
#'
#' @rdname linalg-matmult
#' @name matmult
#'
#' @useDynLib fmlr R_linalg_matmult
#'
#' @export
linalg_matmult = function(transx=FALSE, transy=FALSE, alpha=1, x, y, ret=NULL)
{
if (!is_mat(x) && !is_vec(x))
stop("argument 'x' must be a matrix or vector type")
if (!is_mat(y) && !is_vec(y))
stop("argument 'y' must be a matrix or vector type")
transx = as.logical(transx)
transy = as.logical(transy)
alpha = as.double(alpha)
invisiret = check_inputs(ret, x, y, check_class=FALSE)
if (is.null(ret))
{
vec = is_vec(x) || is_vec(y)
ret = setret(x, vec=vec)
}
.Call(R_linalg_matmult, get_backend(x), x$get_type(), transx, transy, alpha, x$get_class(), x$data_ptr(), y$get_class(), y$data_ptr(), ret$data_ptr())
if (invisiret)
invisible(ret)
else
ret
}
#' @useDynLib fmlr R_linalg_crossprod
linalg_crossprods = function(x, ret, alpha, xpose)
{
check_is_mat(x)
xpose = as.logical(xpose)
alpha = as.double(alpha)
invisiret = check_inputs(ret, x)
if (is.null(ret))
ret = setret(x)
.Call(R_linalg_crossprod, get_backend(x), x$get_type(), xpose, alpha, x$data_ptr(), ret$data_ptr())
if (invisiret)
invisible(ret)
else
ret
}
#' crossprod
#'
#' Compute crossproducts.
#'
#' @param alpha Number to scale the crossproduct by.
#' @param x Input data.
#' @param ret Either \code{NULL} or an already allocated fml matrix of the same
#' class and type as \code{x}.
#' @return Returns the crossproduct.
#'
#' @rdname linalg-crossprod
#' @name crossprod
NULL
#' @rdname linalg-crossprod
#' @export
linalg_crossprod = function(alpha=1, x, ret=NULL)
{
linalg_crossprods(x, ret, alpha, xpose=FALSE)
}
#' @rdname linalg-crossprod
#' @export
linalg_tcrossprod = function(alpha=1, x, ret=NULL)
{
linalg_crossprods(x, ret, alpha, xpose=TRUE)
}
#' xpose
#'
#' Matrix transpose.
#'
#' @param x Input data.
#' @param ret Either \code{NULL} or an already allocated fml matrix of the same
#' class and type as \code{x}.
#' @return Returns the xpose.
#'
#' @rdname linalg-xpose
#' @name xpose
#' @useDynLib fmlr R_linalg_xpose
#'
#' @export
linalg_xpose = function(x, ret=NULL)
{
check_is_mat(x)
invisiret = check_inputs(ret, x)
if (is.null(ret))
ret = setret(x)
.Call(R_linalg_xpose, get_backend(x), x$get_type(), x$data_ptr(), ret$data_ptr())
if (invisiret)
invisible(ret)
else
ret
}
#' lu
#'
#' LU factorization. The factorization occurs in-place.
#'
#' @param x Input data, overwritten by its LU factorization.
#' @return Returns \code{NULL}.
#'
#' @rdname linalg-lu
#' @name lu
#' @useDynLib fmlr R_linalg_lu
#'
#' @export
linalg_lu = function(x)
{
check_is_mat(x)
.Call(R_linalg_lu, get_backend(x), x$get_type(), x$data_ptr())
invisible(NULL)
}
#' det
#'
#' Determinant
#'
#' @param x Input data, overwritten by its LU factorization.
#'
#' @return Returns a list containing the modulus and the sign.
#'
#' @rdname linalg-det
#' @name det
#' @useDynLib fmlr R_linalg_det
#'
#' @export
linalg_det = function(x)
{
check_is_mat(x)
.Call(R_linalg_det, get_backend(x), x$get_type(), x$data_ptr())
}
#' trace
#'
#' Matrix trace, i.e. theh sum of the diagonal elements.
#'
#' @param x Input data.
#' @return Returns the trace.
#'
#' @rdname linalg-trace
#' @name trace
#' @useDynLib fmlr R_linalg_trace
#'
#' @export
linalg_trace = function(x)
{
check_is_mat(x)
.Call(R_linalg_trace, get_backend(x), x$get_type(), x$data_ptr())
}
#' svd
#'
#' Computes the singular value decomposition.
#'
#' @details
#' You will need to initialize the return objects \code{s} and/or \code{u} and
#' \code{vt}.
#' manually. See the example.
#'
#' @param x Input data. The input values are overwritten.
#' @param s Singular values.
#' @param u,vt The left/right singular vectors. Should both be \code{NULL} or
#' matrices of the same backend and fundamental type as \code{x}.
#'
#' @examples
#' suppressMessages(library(fmlr))
#' x = cpumat(3, 2)
#' x$fill_linspace(1, 6)
#'
#' s = cpuvec()
#' linalg_svd(x, s)
#' s$info()
#' s$print()
#'
#' @rdname linalg-svd
#' @name svd
#' @useDynLib fmlr R_linalg_svd
#'
#' @export
linalg_svd = function(x, s, u=NULL, vt=NULL)
{
check_is_mat(x)
check_is_vec(s)
check_type_consistency(x, s)
if (!is.null(u) && !is.null(vt))
check_inputs(x, u, vt)
else if (!is.null(u) || !is.null(vt))
stop("must pass neither u and vt or both u and vt")
if (is.null(u))
.Call(R_linalg_svd, get_backend(x), x$get_type(), x$data_ptr(), s$data_ptr(), NULL, NULL)
else
.Call(R_linalg_svd, get_backend(x), x$get_type(), x$data_ptr(), s$data_ptr(), u$data_ptr(), vt$data_ptr())
invisible(NULL)
}
#' eigen
#'
#' Computes the eigenvalues and/or eigenvectors
#'
#' @details
#' You will need to initialize the return objects \code{values} and/or
#' \code{vectors}.
#' manually. See the example.
#'
#' @param x Input data. The input values are overwritten.
#' @param values The eigenvalues.
#' @param vectors The eigenvectors.
#'
#' @rdname linalg-eigen
#' @name eigen
#' @useDynLib fmlr R_linalg_eigen_sym
#'
#' @export
linalg_eigen_sym = function(x, values, vectors=NULL)
{
check_is_mat(x)
check_is_vec(values)
check_type_consistency(x, values)
if (!is.null(vectors))
check_inputs(x, vectors)
if (is.null(vectors))
.Call(R_linalg_eigen_sym, get_backend(x), x$get_type(), x$data_ptr(), values$data_ptr(), NULL)
else
.Call(R_linalg_eigen_sym, get_backend(x), x$get_type(), x$data_ptr(), values$data_ptr(), vectors$data_ptr())
invisible(NULL)
}
#' invert
#'
#' Invert a matrix.
#'
#' @param x Input data, overwritten by the inverse.
#' @return Returns \code{NULL}.
#'
#' @rdname linalg-invert
#' @name invert
#' @useDynLib fmlr R_linalg_invert
#'
#' @export
linalg_invert = function(x)
{
check_is_mat(x)
.Call(R_linalg_invert, get_backend(x), x$get_type(), x$data_ptr())
invisible(NULL)
}
#' solve
#'
#' Solve a system of equations.
#'
#' @param x Input data. The input values are overwritten.
#' @param y The RHS, overwritten by the solution.
#'
#' @rdname linalg-solve
#' @name solve
#' @useDynLib fmlr R_linalg_solve
#'
#' @export
linalg_solve = function(x, y)
{
check_is_mat(x)
check_type_consistency(x, y)
if (is_vec(y))
{
if (is_mpimat(x))
stop("can not mix vector with mpimat")
class = CLASS_VEC
}
else
{
check_class_consistency(x, y)
class = CLASS_MAT
}
.Call(R_linalg_solve, get_backend(x), x$get_type(), x$data_ptr(), class, y$data_ptr())
invisible(NULL)
}
#' qr
#'
#' Computes the compact QR factorization.
#'
#' @param x Input data. The input values are overwritten.
#' @param qraux Auxilliary data for the compact QR.
#'
#' @rdname linalg-qr
#' @name qr
#' @useDynLib fmlr R_linalg_qr
#'
#' @export
linalg_qr = function(x, qraux)
{
check_is_mat(x)
check_is_vec(qraux)
check_type_consistency(x, qraux)
.Call(R_linalg_qr, get_backend(x), x$get_type(), x$data_ptr(), qraux$data_ptr())
invisible(NULL)
}
#' qr_Q
#'
#' Computes the Q matrix from the compact QR factorization.
#'
#' @param QR The compact QR factorization. The return from \code{qr()}.
#' @param qraux Auxilliary data for the compact QR.
#' @param Q The output Q matrix.
#' @param work A workspace vector.
#'
#' @rdname linalg-qr-Q
#' @name qr_Q
#' @useDynLib fmlr R_linalg_qr_Q
#'
#' @export
linalg_qr_Q = function(QR, qraux, Q, work)
{
check_inputs(QR, Q)
check_is_vec(qraux)
check_is_vec(work)
check_type_consistency(QR, qraux, work)
.Call(R_linalg_qr_Q, get_backend(QR), QR$get_type(), QR$data_ptr(), qraux$data_ptr(), Q$data_ptr(), work$data_ptr())
invisible(NULL)
}
#' qr_R
#'
#' Computes the R matrix from the compact QR factorization.
#'
#' @param QR The compact QR factorization. The return from \code{qr()}.
#' @param R The output R matrix.
#'
#' @rdname linalg-qr-R
#' @name qr_R
#' @useDynLib fmlr R_linalg_qr_R
#'
#' @export
linalg_qr_R = function(QR, R)
{
check_inputs(QR, R)
.Call(R_linalg_qr_R, get_backend(QR), QR$get_type(), QR$data_ptr(), R$data_ptr())
invisible(NULL)
}
#' lq
#'
#' Computes the compact LQ factorization.
#'
#' @param x Input data. The input values are overwritten.
#' @param lqaux Auxilliary data for the compact LQ.
#'
#' @rdname linalg-lq
#' @name lq
#' @useDynLib fmlr R_linalg_lq
#'
#' @export
linalg_lq = function(x, lqaux)
{
check_is_mat(x)
check_is_vec(lqaux)
check_type_consistency(x, lqaux)
.Call(R_linalg_lq, get_backend(x), x$get_type(), x$data_ptr(), lqaux$data_ptr())
invisible(NULL)
}
#' lq_L
#'
#' Computes the L matrix from the compact LQ factorization.
#'
#' @param LQ The compact LQ factorization. The return from \code{lq()}.
#' @param L The output L matrix.
#'
#' @rdname linalg-qr-R
#' @name lq_L
#' @useDynLib fmlr R_linalg_lq_L
#'
#' @export
linalg_lq_L = function(LQ, L)
{
check_inputs(LQ, L)
.Call(R_linalg_lq_L, get_backend(LQ), LQ$get_type(), LQ$data_ptr(), L$data_ptr())
invisible(NULL)
}
#' lq_Q
#'
#' Computes the Q matrix from the compact LQ factorization.
#'
#' @param LQ The compact LQ factorization. The return from \code{lq()}.
#' @param lqaux Auxilliary data for the compact LQ.
#' @param Q The output Q matrix.
#' @param work A workspace vector.
#'
#' @rdname linalg-lq-Q
#' @name lq_Q
#' @useDynLib fmlr R_linalg_lq_Q
#'
#' @export
linalg_lq_Q = function(LQ, lqaux, Q, work)
{
check_inputs(LQ, Q)
check_is_vec(lqaux)
check_is_vec(work)
check_type_consistency(LQ, lqaux, work)
.Call(R_linalg_lq_Q, get_backend(LQ), LQ$get_type(), LQ$data_ptr(), lqaux$data_ptr(), Q$data_ptr(), work$data_ptr())
invisible(NULL)
}
#' qrsvd
#'
#' QR/LQ-based SVD. Useful for very tall/skinny or short/wide data.
#'
#' @param x Input data. The input values are overwritten.
#' @param s Singular values.
#' @param u,vt The left/right singular vectors. Should both be \code{NULL} or
#' matrices of the same backend and fundamental type as \code{x}.
#'
#' @rdname linalg-qrsvd
#' @name qrsvd
#' @useDynLib fmlr R_linalg_qrsvd
#'
#' @export
linalg_qrsvd = function(x, s, u=NULL, vt=NULL)
{
check_is_mat(x)
check_is_vec(s)
check_type_consistency(x, s)
if (!is.null(u) && !is.null(vt))
check_inputs(x, u, vt)
else if (!is.null(u) || !is.null(vt))
stop("must pass neither u and vt or both u and vt")
if (is.null(u))
.Call(R_linalg_qrsvd, get_backend(x), x$get_type(), x$data_ptr(), s$data_ptr(), NULL, NULL)
else
.Call(R_linalg_qrsvd, get_backend(x), x$get_type(), x$data_ptr(), s$data_ptr(), u$data_ptr(), vt$data_ptr())
invisible(NULL)
}
#' cpsvd
#'
#' "Crossproducts" SVD.
#'
#' @details
#' Computes the approximate SVD via the eigenvalue decomposition of
#' \code{crossprod(x)} if the input is tall/skinny and \code{tcrossprod(x)}
#' otherwise.
#'
#' @param x Input data. The input values are overwritten.
#' @param s Singular values.
#' @param u,vt The left/right singular vectors. Should both be \code{NULL} or
#' matrices of the same backend and fundamental type as \code{x}.
#'
#' @rdname linalg-cpsvd
#' @name cpsvd
#' @useDynLib fmlr R_linalg_cpsvd
#'
#' @export
linalg_cpsvd = function(x, s, u=NULL, vt=NULL)
{
check_is_mat(x)
check_is_vec(s)
check_type_consistency(x, s)
if (!is.null(u) && !is.null(vt))
check_inputs(x, u, vt)
else if (!is.null(u) || !is.null(vt))
stop("must pass neither u and vt or both u and vt")
if (is.null(u))
.Call(R_linalg_cpsvd, get_backend(x), x$get_type(), x$data_ptr(), s$data_ptr(), NULL, NULL)
else
.Call(R_linalg_cpsvd, get_backend(x), x$get_type(), x$data_ptr(), s$data_ptr(), u$data_ptr(), vt$data_ptr())
invisible(NULL)
}
#' chol
#'
#' Compute the lower-triangular Cholesky factor.
#'
#' @param x Input data, overwritten by the inverse.
#' @return Returns \code{NULL}.
#'
#' @rdname linalg-chol
#' @name chol
#' @useDynLib fmlr R_linalg_chol
#'
#' @export
linalg_chol = function(x)
{
check_is_mat(x)
.Call(R_linalg_chol, get_backend(x), x$get_type(), x$data_ptr())
invisible(NULL)
}
#' norms
#'
#' Norms.
#'
#' @param x Input data. The data is un-modified except for \code{norm_2()}.
#'
#' @return The requested norm.
#'
#' @rdname linalg-norm
#' @name norm
#'
#' @useDynLib fmlr R_linalg_norm
NULL
#' @rdname linalg-norm
#' @export
linalg_norm_1 = function(x)
{
check_is_mat(x)
.Call(R_linalg_norm, get_backend(x), x$get_type(), x$data_ptr(), "1")
}
#' @rdname linalg-norm
#' @export
linalg_norm_I = function(x)
{
check_is_mat(x)
.Call(R_linalg_norm, get_backend(x), x$get_type(), x$data_ptr(), "I")
}
#' @rdname linalg-norm
#' @export
linalg_norm_F = function(x)
{
check_is_mat(x)
.Call(R_linalg_norm, get_backend(x), x$get_type(), x$data_ptr(), "F")
}
#' @rdname linalg-norm
#' @export
linalg_norm_M = function(x)
{
check_is_mat(x)
.Call(R_linalg_norm, get_backend(x), x$get_type(), x$data_ptr(), "M")
}
#' @rdname linalg-norm
#' @export
linalg_norm_2 = function(x)
{
check_is_mat(x)
.Call(R_linalg_norm, get_backend(x), x$get_type(), x$data_ptr(), "2")
}
#' Condition Number
#'
#' Condition numbers.
#'
#' @param x Input data. The data is modified in each case.
#'
#' @return The requested condition number.
#'
#' @rdname linalg-cond
#' @name cond
#'
#' @useDynLib fmlr R_linalg_cond
NULL
#' @rdname linalg-cond
#' @export
linalg_cond_1 = function(x)
{
check_is_mat(x)
.Call(R_linalg_cond, get_backend(x), x$get_type(), x$data_ptr(), "1")
}
#' @rdname linalg-cond
#' @export
linalg_cond_I = function(x)
{
check_is_mat(x)
.Call(R_linalg_cond, get_backend(x), x$get_type(), x$data_ptr(), "I")
}
#' @rdname linalg-cond
#' @export
linalg_cond_2 = function(x)
{
check_is_mat(x)
.Call(R_linalg_cond, get_backend(x), x$get_type(), x$data_ptr(), "2")
}
#' dot
#'
#' Compute vector dot product i.e. \code{sum(x*y)}.
#'
#' @param x,y Input data.
#' class and type as \code{x}.
#' @return Returns the dot product.
#'
#' @rdname linalg-dot
#' @name dot
#'
#' @useDynLib fmlr R_linalg_dot
#'
#' @export
linalg_dot = function(x, y=NULL)
{
check_is_vec(x)
if (!is.null(y))
{
check_is_vec(y)
check_backend_consistency(x, y)
.Call(R_linalg_dot, get_backend(x), x$get_type(), x$data_ptr(), y$data_ptr())
}
else
.Call(R_linalg_dot, get_backend(x), x$get_type(), x$data_ptr(), NULL)
}
#' trinv
#'
#' Invert a triangular matrix.
#'
#' @param upper Is the matrix upper triangular? Otherwise only the lower
#' triangle will be referenced.
#' @param unit_diag Is the matrix unit diagonal?
#' @param x Input data, overwritten by the inverse.
#' @return Returns \code{NULL}.
#'
#' @rdname linalg-trinv
#' @name trinv
#' @useDynLib fmlr R_linalg_trinv
#'
#' @export
linalg_trinv = function(upper, unit_diag, x)
{
check_is_mat(x)
upper = as.logical(upper)
unit_diag = as.logical(unit_diag)
.Call(R_linalg_trinv, get_backend(x), x$get_type(), upper, unit_diag, x$data_ptr())
invisible(NULL)
}
#' rsvd
#'
#' SVD approximation via random projections.
#'
#' @param seed Seed for the random generator.
#' @param k The number of components to estimate. Integer > 0.
#' @param q Exponent (see paper if you really care). Values of 1 or 2 are good.
#' @param x Input data. The input values are overwritten.
#' @param s Singular values.
#' @param u,vt The left/right singular vectors. Should both be \code{NULL} or
#' matrices of the same backend and fundamental type as \code{x}.
#'
#' @references Halko, Nathan, Per-Gunnar Martinsson, and Joel A. Tropp. "Finding
#' structure with randomness: Probabilistic algorithms for constructing
#' approximate matrix decompositions." SIAM review 53, no. 2 (2011): 217-288.
#'
#' @rdname linalg-rsvd
#' @name rsvd
#' @useDynLib fmlr R_linalg_rsvd
#'
#' @export
linalg_rsvd = function(seed, k, q, x, s, u=NULL, vt=NULL)
{
seed = as.integer(seed)
k = as.integer(k)
q = as.integer(q)
check_is_mat(x)
check_is_vec(s)
check_type_consistency(x, s)
if (!is.null(u) && !is.null(vt))
check_inputs(x, u, vt)
else if (!is.null(u) || !is.null(vt))
stop("must pass neither u and vt or both u and vt")
if (is.null(u))
.Call(R_linalg_rsvd, get_backend(x), x$get_type(), seed, k, q, x$data_ptr(), s$data_ptr(), NULL, NULL)
else
.Call(R_linalg_rsvd, get_backend(x), x$get_type(), seed, k, q, x$data_ptr(), s$data_ptr(), u$data_ptr(), vt$data_ptr())
invisible(NULL)
}
|
{"hexsha": "4a2a4c508b6bcd57adff06495b298a263f3bca97", "size": 17680, "ext": "r", "lang": "R", "max_stars_repo_path": "R/linalg.r", "max_stars_repo_name": "fml-fam/fmlr", "max_stars_repo_head_hexsha": "7a9c8030435b9921fc832b27ef5f174a40c7792b", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-02-06T21:06:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-23T22:34:08.000Z", "max_issues_repo_path": "R/linalg.r", "max_issues_repo_name": "wrathematics/fmlr", "max_issues_repo_head_hexsha": "7a9c8030435b9921fc832b27ef5f174a40c7792b", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-02-19T17:27:46.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-09T00:30:36.000Z", "max_forks_repo_path": "R/linalg.r", "max_forks_repo_name": "wrathematics/fmlr", "max_forks_repo_head_hexsha": "7a9c8030435b9921fc832b27ef5f174a40c7792b", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.6401468788, "max_line_length": 152, "alphanum_fraction": 0.6642533937, "num_tokens": 5548}
|
from collections import defaultdict
import PIL.Image as Im
import numpy as np
from .constants import *
def extract_table(table, origin):
out = []
for ri in range(num_rows*2):
row = []
for ci in range(num_cols):
x, y = origin[0] + table_offset_x * ci, origin[1] + table_offset_y * ri
img = table.crop((x, y, x+symbol_width, y+symbol_height))
row.append(img)
out.append(row)
return out
def cmp_hist(a, b):
assert len(a) == len(b)
return sum(abs(x-y) for x,y in zip(a, b))
def match(im, wiggle=False):
im = np.array(im).astype(int)
mx, my = (4, 6) if wiggle else (0, 0)
def dif(a, b):
bs = float("inf")
for dy in range(-my, my+1):
for dx in range(-mx, mx+1):
fi, li = max(0, dy), min(symbol_height, symbol_height + dy)
fj, lj = max(0, dx), min(symbol_width, symbol_width + dx)
h, w = li-fi, lj-fj
sa = a[fi:li, fj:lj]
sb = b[0:h, 0:w]
d = np.sum((sa-sb)**2)
f = d / (w*h)
bs = min(bs, f)
return bs
#d = sorted([(dif(im, sm), sym) for sym, sm in gnd.items()])
#sym = d[0][1]
sym = min(gnd.items(), key=lambda sym_im: dif(im, sym_im[1]))[0]
if sym[0] == "B" or sym == "ES":
sym = 'BL'
return sym
def extract_cap(cap):
tab = extract_table(cap, table_top_left)
cols = [[] for _ in range(num_cols)]
for ci in range(num_cols):
for ri in range(len(tab)):
m = match(tab[ri][ci])
if m != "BL":
assert m in valid_cards
cols[ci].append(m)
else:
break
print("column %d done, %d cards" % (ci, len(cols[ci])))
#pdb.set_trace()
rose = cap.crop((rose_x, rose_y, rose_x+symbol_width, rose_y+symbol_height))
rose = match(rose, wiggle=True)
print("rose done")
side, dst = [], []
for i in range(3):
x = table_top_left[0] + table_offset_x * i
img = cap.crop((x, rose_y, x+symbol_width, rose_y+symbol_height))
sym = match(img, wiggle=True)
assert sym in (valid_cards | set(['BL', 'XX']))
side.append(sym)
print("side done")
for i in range(3):
x = table_top_left[0] + table_offset_x * (5 + i)
img = cap.crop((x, rose_y, x+symbol_width, rose_y+symbol_height))
sym = match(img, wiggle=True)
assert sym in (valid_cards | set(['BL']))
dst.append(sym)
print("dest done")
#pdb.set_trace()
#sanity
#assert not any(len(col) > 5 for col in cols)
occ = defaultdict(int)
for col in cols:
for sym in col:
occ[sym] += 1
for sym in side:
occ[sym] += 1
for sym, oc in occ.items():
if sym in DRAGONS:
assert oc == 0 or oc == 4
elif sym != "BL" and sym != "XX":
assert oc == 1
for sym in dst:
if sym != 'BL':
c = sym[1]
v = int(sym[0])
for lv in range(1, v):
assert occ["%d%s" % (lv, c)] == 0
#dragons
drag = []
for sym in DRAGONS:
if occ[sym] == 0:
drag.append(True)
else:
drag.append(False)
print("extraction finished")
return side, rose, dst, cols, drag
def load_ground():
syms = """\
1b,1g,1r,2b,2g,2r,3b,3g
3r,4b,4g,4r,5b,5g,5r,6b
6g,6r,7b,7g,7r,8b,8g,8r
9b,9g,9r,BL,Bl,ES,GR,RE
RO,WH,XX,B0"""
syms = [l.strip().split(',') for l in syms.split('\n')]
ground_im = Im.open("shenzhen_ground.png").convert("RGB")
map = {}
for ri, row in enumerate(syms):
for ci, sym in enumerate(row):
img = ground_im.crop((ci*symbol_width, ri*symbol_height, (ci+1)*symbol_width, (ri+1)*symbol_height))
map[sym] = img
return map
def make_quilt(table):
quilt = Im.new("RGB", (num_cols*symbol_width, num_rows*symbol_height))
ri, ci = 0, 0
txt = ""
for sym, im in table:
quilt.paste(im, box=(ci*symbol_width, ri*symbol_height))
txt += sym + " "
ci += 1
if ci == num_cols:
ci, ri = 0, ri+1
txt += '\n'
return quilt, txt
gnd = load_ground()
gnd = {sym: np.array(im).astype(int) for sym, im in gnd.items()}
|
{"hexsha": "f2445ae30445f8b2fbba1f9e6693020e275f1c2b", "size": 4401, "ext": "py", "lang": "Python", "max_stars_repo_path": "solver/vision.py", "max_stars_repo_name": "pyrolitic/shenzhen_io_solitaire_solver", "max_stars_repo_head_hexsha": "04c8a32ab6faff215422867bf5c3e7f446dbdb61", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-07-08T03:19:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T23:53:43.000Z", "max_issues_repo_path": "solver/vision.py", "max_issues_repo_name": "pyrolitic/shenzhen_io_solitaire_solver", "max_issues_repo_head_hexsha": "04c8a32ab6faff215422867bf5c3e7f446dbdb61", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-03-18T05:21:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T05:22:29.000Z", "max_forks_repo_path": "solver/vision.py", "max_forks_repo_name": "pyrolitic/shenzhen_io_solitaire_solver", "max_forks_repo_head_hexsha": "04c8a32ab6faff215422867bf5c3e7f446dbdb61", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7647058824, "max_line_length": 112, "alphanum_fraction": 0.5137468757, "include": true, "reason": "import numpy", "num_tokens": 1335}
|
[STATEMENT]
lemma Crypt_synth_eq [simp]:
"Key K \<notin> H ==> (Crypt K X \<in> synth H) = (Crypt K X \<in> H)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Key K \<notin> H \<Longrightarrow> (Crypt K X \<in> synth H) = (Crypt K X \<in> H)
[PROOF STEP]
by blast
|
{"llama_tokens": 115, "file": "Inductive_Confidentiality_DolevYao_Message", "length": 1}
|
import numpy as np
import scipy.signal as signal2
import math
import wave
try:
import pylab
except ImportError:
pass
import operator
from .process import *
from . import *
class ChromagramProcess(SimpleProcess):
"""docstring for Chroma2Process"""
def run(self):
#signal = self.signal.data
#fs = self.signal.sample_rate
#Magnitude, freqs = specgram(signal,fs,self.step_size,self.logN,self.clip_exp)
#Magnitude, freqs = specgram2(signal,fs,self.logN,self.step_size,clip=10**self.clip_exp)
# transpose and skip zero bin
Magnitude = self.matrix.data
freqs = self.matrix.freqs
Magnitude = np.asarray(Magnitude.T)[1:,] # remove zero bin
freqs = freqs[1:]
CS = chromagram(Magnitude,freqs,self.nbin)
x = np.asarray(-CS.T)
mat = Matrix(x,self.matrix.sample_rate,Matrix.K_SPECTROGRAM)
# error is in there for debugging the painting code
chromatic_scale=[
'A', 'A#/Bb', 'B',
'C', 'C#/Db', 'D',
'D#/Eb', 'E', 'F',
'F#/Gb', 'G', 'G#/Ab']
if self.nbin == 12:
mat.ylabel_fptr = lambda idx : chromatic_scale[idx];
if self.nbin == 120:
mat.ylabel_fptr = lambda idx : chromatic_scale[idx//10] if idx<120 else chromatic_scale[-1];
return mat
@staticmethod
def getOptions():
"""returns a list of ProcessOptionsSpecifier"""
opts = [ \
SpectralMatrixInSpecifier(),
MatrixOutSpecifier(),
ProcessOptionsSpecifier(store="nbin",dtype=int, \
min=6,max=120,default=12, \
help="number of chroma bins"),
ProcessOptionsSpecifier(store="A_tune",dtype=int, \
min=0,default=880, \
help="Tuning Frequency A5"),
]
return opts
def specgram(signal,fs,step,logN,clip_exp):
signal = signal/signal.max(); # normalize
Nfft=2**logN #FFT Size
overlap=Nfft-step
window = np.hamming(Nfft)
Pxx, freqs, _, _ = pylab.specgram(signal,
Fs=fs, Fc=0, scale='linear',
window=window, NFFT=Nfft,
noverlap=overlap)
clip=10**clip_exp
np.clip(Pxx,clip,np.inf,out=Pxx)
Magnitude= 20*np.log10(abs(Pxx[1:,])) # was np.log
freqs = freqs[1:,] # skip zero bin
return Magnitude, freqs
def specgram2(signal, Fs, logN, step, window=np.hamming, clip=10**-16):
"""
this is a functional equivalent to mlab.specgram
except matrix operations are converted to in-place row operations
on memory constrained systems this prevents copying of massive matrices.
default output is the magnitude periodogram.
"""
Nfft = 2**logN
noverlap = Nfft - step
x = np.asarray(signal)
n = len(x)
tmp = int(max(0,np.ceil((n-Nfft)//step)))
num_frames = tmp + 1
xlen = tmp*step + Nfft
if n < xlen:
x = np.resize(x, (xlen,))
x[n:] = 0
wind = window(Nfft)
windloss = (np.abs(wind)**2).sum()
scale_factor = 2
scale_by_freq = 1.0/(Fs*windloss)
Nout = Nfft//2 + 1 # include zero bin
result = np.empty( (n//step+1,Nout))
for i in range(num_frames):
idx = i*step
y = x[idx:idx+Nfft] * wind
y = np.fft.fft(y,n=Nfft,axis=0)[:Nout]
# i think this is part of magnitude psd, should be optional
np.absolute( y, out=y )
y *= scale_by_freq
y[1:-1] *= scale_factor # TODO check range
result[i] = y.real
# db scale
np.clip(result,clip,np.inf,out=result)
np.log10(result,out=result)
result *= 20
#np.clip(result,clip,np.inf,out=result)
# was np.log
freqs = np.fft.fftfreq(Nfft, 1/Fs)[:Nout]
freqs[-1] *= -1 # sign correct last freq bin
return result, freqs
def chromagram(Magnitude, freqs,nbin=12,A5=880):
#Chroma centered on A5 = 880Hz
#Number of chroma bins
st=2**(1/float(nbin)) #Semitone
tunechroma1=[np.log2(A5*st**i) for i in range(nbin)]
tunechroma2=[int(v) for v in tunechroma1]
#tunechroma2=[int(np.log2(A5*st**i)) for i in range(nbin)]
chroma=np.asarray(tunechroma1)-np.asarray(tunechroma2);
nchroma=len(chroma)
freqschroma=np.asarray(np.log2(freqs)) - np.asarray([int(np.log2(f)) for f in freqs])
nfreqschroma=len(freqschroma)
CD=np.zeros((nfreqschroma, nchroma))
for i in range(nchroma):
CD[:,i] = np.abs(freqschroma - chroma[i])
FlipMatrix=np.flipud(CD)
min_index = []
min_value = []
for i in reversed(range(FlipMatrix.shape[0])):
index, value = min(enumerate(FlipMatrix[i]), key=operator.itemgetter(1))
min_index.append(index)
min_value.append(value)
#Numpy Array for Chroma Scale population
CS = np.zeros((nchroma,Magnitude.shape[1]))
for i in range(CS.shape[0]):
#Find index value in min_index list
a = [index for index,x in enumerate(min_index) if x == i]
#Array for values in each index
AIndex = np.zeros((len(a),Magnitude.shape[1]))
for t,value in enumerate(a):
AIndex[t,:] = Magnitude[value,:]
MeanMag=[]
for M in AIndex.T:
MeanMag.append(np.mean(M))
CS[i,:] = MeanMag
#normalize the chromagram array
CS= CS / CS.max()
return CS
|
{"hexsha": "ba8a994f69cd84a9dd0c353f789c92916ed5c311", "size": 5452, "ext": "py", "lang": "Python", "max_stars_repo_path": "SigProc/chromagram.py", "max_stars_repo_name": "nsetzer/SigProc", "max_stars_repo_head_hexsha": "c944d9a21bf90107374fe9d04cad6e44f52c7b0a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SigProc/chromagram.py", "max_issues_repo_name": "nsetzer/SigProc", "max_issues_repo_head_hexsha": "c944d9a21bf90107374fe9d04cad6e44f52c7b0a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SigProc/chromagram.py", "max_forks_repo_name": "nsetzer/SigProc", "max_forks_repo_head_hexsha": "c944d9a21bf90107374fe9d04cad6e44f52c7b0a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-08-25T00:37:53.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-25T00:37:53.000Z", "avg_line_length": 31.3333333333, "max_line_length": 104, "alphanum_fraction": 0.5887747616, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1507}
|
/**
* @file llfloaterflickr.cpp
* @brief Implementation of llfloaterflickr
* @author cho@lindenlab.com
*
* $LicenseInfo:firstyear=2013&license=viewerlgpl$
* Second Life Viewer Source Code
* Copyright (C) 2013, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License only.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
* $/LicenseInfo$
*/
#include "llviewerprecompiledheaders.h"
#include "llfloaterflickr.h"
#include "llagent.h"
#include "llagentui.h"
#include "llcheckboxctrl.h"
#include "llcombobox.h"
#include "llflickrconnect.h"
#include "llfloaterreg.h"
#include "lliconctrl.h"
#include "llimagefiltersmanager.h"
#include "llresmgr.h" // LLLocale
#include "llsdserialize.h"
#include "llloadingindicator.h"
#include "llslurl.h"
#include "lltrans.h"
#include "llsnapshotlivepreview.h"
#include "llfloaterbigpreview.h"
#include "llviewerregion.h"
#include "llviewercontrol.h"
#include "llviewermedia.h"
#include "lltabcontainer.h"
#include "llviewerparcelmgr.h"
#include "llviewerregion.h"
#include <boost/regex.hpp>
#include "llspinctrl.h"
#include "llviewernetwork.h"
#include "llnotificationsutil.h"
#include "exoflickr.h"
#include "exoflickrauth.h"
#include "llnotificationsutil.h"
#include "llviewernetwork.h"
static LLPanelInjector<LLFlickrPhotoPanel> t_panel_photo("llflickrphotopanel");
static LLPanelInjector<LLFlickrAccountPanel> t_panel_account("llflickraccountpanel");
// <FS:Ansariel> Don't assume we're always in Second Life
const std::string DEFAULT_TAG_TEXT = "Firestorm ";
static std::string FLICKR_MACHINE_TAGS_NAMESPACE = "secondlife";
// </FS:Ansariel>
///////////////////////////
//LLFlickrPhotoPanel///////
///////////////////////////
LLFlickrPhotoPanel::LLFlickrPhotoPanel() :
mResolutionComboBox(NULL),
mRefreshBtn(NULL),
mBtnPreview(NULL),
mWorkingLabel(NULL),
mThumbnailPlaceholder(NULL),
mTitleTextBox(NULL),
mDescriptionTextBox(NULL),
mLocationCheckbox(NULL),
mTagsTextBox(NULL),
mRatingComboBox(NULL),
mBigPreviewFloater(NULL),
mPostButton(NULL)
{
mCommitCallbackRegistrar.add("SocialSharing.SendPhoto", boost::bind(&LLFlickrPhotoPanel::onSend, this));
mCommitCallbackRegistrar.add("SocialSharing.RefreshPhoto", boost::bind(&LLFlickrPhotoPanel::onClickNewSnapshot, this));
mCommitCallbackRegistrar.add("SocialSharing.BigPreview", boost::bind(&LLFlickrPhotoPanel::onClickBigPreview, this));
}
LLFlickrPhotoPanel::~LLFlickrPhotoPanel()
{
if(mPreviewHandle.get())
{
mPreviewHandle.get()->die();
}
// <FS:Ansariel> Store settings at logout
gSavedSettings.setS32("FSLastSnapshotToFlickrResolution", getChild<LLComboBox>("resolution_combobox")->getCurrentIndex());
gSavedSettings.setS32("FSLastSnapshotToFlickrWidth", getChild<LLSpinCtrl>("custom_snapshot_width")->getValue().asInteger());
gSavedSettings.setS32("FSLastSnapshotToFlickrHeight", getChild<LLSpinCtrl>("custom_snapshot_height")->getValue().asInteger());
// </FS:Ansariel>
}
BOOL LLFlickrPhotoPanel::postBuild()
{
setVisibleCallback(boost::bind(&LLFlickrPhotoPanel::onVisibilityChange, this, _2));
mResolutionComboBox = getChild<LLUICtrl>("resolution_combobox");
mResolutionComboBox->setCommitCallback(boost::bind(&LLFlickrPhotoPanel::updateResolution, this, TRUE));
mFilterComboBox = getChild<LLUICtrl>("filters_combobox");
mFilterComboBox->setCommitCallback(boost::bind(&LLFlickrPhotoPanel::updateResolution, this, TRUE));
mRefreshBtn = getChild<LLUICtrl>("new_snapshot_btn");
mBtnPreview = getChild<LLButton>("big_preview_btn");
mWorkingLabel = getChild<LLUICtrl>("working_lbl");
mThumbnailPlaceholder = getChild<LLUICtrl>("thumbnail_placeholder");
mTitleTextBox = getChild<LLUICtrl>("photo_title");
mDescriptionTextBox = getChild<LLUICtrl>("photo_description");
mLocationCheckbox = getChild<LLUICtrl>("add_location_cb");
mTagsTextBox = getChild<LLUICtrl>("photo_tags");
// <FS:Ansariel> Don't assume we're always in Second Life
//mTagsTextBox->setValue(DEFAULT_TAG_TEXT);
mTagsTextBox->setValue(DEFAULT_TAG_TEXT + (LLGridManager::instance().isInSecondLife() ? "secondlife" : ("\"" + LLGridManager::instance().getGridId()) + "\"") + " ");
// </FS:Ansariel>
mRatingComboBox = getChild<LLUICtrl>("rating_combobox");
mPostButton = getChild<LLUICtrl>("post_photo_btn");
mCancelButton = getChild<LLUICtrl>("cancel_photo_btn");
mBigPreviewFloater = dynamic_cast<LLFloaterBigPreview*>(LLFloaterReg::getInstance("big_preview"));
// <FS:Ansariel> FIRE-15112: Allow custom resolution for SLShare
getChild<LLSpinCtrl>("custom_snapshot_width")->setCommitCallback(boost::bind(&LLFlickrPhotoPanel::updateResolution, this, TRUE));
getChild<LLSpinCtrl>("custom_snapshot_height")->setCommitCallback(boost::bind(&LLFlickrPhotoPanel::updateResolution, this, TRUE));
getChild<LLCheckBoxCtrl>("keep_aspect_ratio")->setCommitCallback(boost::bind(&LLFlickrPhotoPanel::updateResolution, this, TRUE));
getChild<LLComboBox>("resolution_combobox")->setCurrentByIndex(gSavedSettings.getS32("FSLastSnapshotToFlickrResolution"));
getChild<LLSpinCtrl>("custom_snapshot_width")->setValue(gSavedSettings.getS32("FSLastSnapshotToFlickrWidth"));
getChild<LLSpinCtrl>("custom_snapshot_height")->setValue(gSavedSettings.getS32("FSLastSnapshotToFlickrHeight"));
// </FS:Ansariel>
// Update filter list
std::vector<std::string> filter_list = LLImageFiltersManager::getInstance()->getFiltersList();
LLComboBox* filterbox = static_cast<LLComboBox *>(mFilterComboBox);
for (U32 i = 0; i < filter_list.size(); i++)
{
filterbox->add(filter_list[i]);
}
return LLPanel::postBuild();
}
// virtual
S32 LLFlickrPhotoPanel::notify(const LLSD& info)
{
if (info.has("snapshot-updating"))
{
// Disable the Post button and whatever else while the snapshot is not updated
// updateControls();
return 1;
}
if (info.has("snapshot-updated"))
{
// Enable the send/post/save buttons.
updateControls();
// The refresh button is initially hidden. We show it after the first update,
// i.e. after snapshot is taken
LLUICtrl * refresh_button = getRefreshBtn();
if (!refresh_button->getVisible())
{
refresh_button->setVisible(true);
}
return 1;
}
return 0;
}
void LLFlickrPhotoPanel::draw()
{
LLSnapshotLivePreview * previewp = static_cast<LLSnapshotLivePreview *>(mPreviewHandle.get());
// Enable interaction only if no transaction with the service is on-going (prevent duplicated posts)
bool no_ongoing_connection = !(LLFlickrConnect::instance().isTransactionOngoing());
// <FS:Ansariel> Exodus' flickr upload
LLFlickrConnect::EConnectionState connection_state = LLFlickrConnect::instance().getConnectionState();
no_ongoing_connection &= (connection_state != LLFlickrConnect::FLICKR_CONNECTION_IN_PROGRESS &&
connection_state != LLFlickrConnect::FLICKR_CONNECTION_FAILED &&
connection_state != LLFlickrConnect::FLICKR_NOT_CONNECTED);
// </FS:Ansariel>
mCancelButton->setEnabled(no_ongoing_connection);
mTitleTextBox->setEnabled(no_ongoing_connection);
mDescriptionTextBox->setEnabled(no_ongoing_connection);
mTagsTextBox->setEnabled(no_ongoing_connection);
mRatingComboBox->setEnabled(no_ongoing_connection);
mResolutionComboBox->setEnabled(no_ongoing_connection);
mFilterComboBox->setEnabled(no_ongoing_connection);
mRefreshBtn->setEnabled(no_ongoing_connection);
mBtnPreview->setEnabled(no_ongoing_connection);
mLocationCheckbox->setEnabled(no_ongoing_connection);
// Reassign the preview floater if we have the focus and the preview exists
if (hasFocus() && isPreviewVisible())
{
attachPreview();
}
// Toggle the button state as appropriate
bool preview_active = (isPreviewVisible() && mBigPreviewFloater->isFloaterOwner(getParentByType<LLFloater>()));
mBtnPreview->setToggleState(preview_active);
// Display the preview if one is available
if (previewp && previewp->getThumbnailImage())
{
const LLRect& thumbnail_rect = mThumbnailPlaceholder->getRect();
const S32 thumbnail_w = previewp->getThumbnailWidth();
const S32 thumbnail_h = previewp->getThumbnailHeight();
// calc preview offset within the preview rect
const S32 local_offset_x = (thumbnail_rect.getWidth() - thumbnail_w) / 2 ;
const S32 local_offset_y = (thumbnail_rect.getHeight() - thumbnail_h) / 2 ;
S32 offset_x = thumbnail_rect.mLeft + local_offset_x;
S32 offset_y = thumbnail_rect.mBottom + local_offset_y;
gGL.matrixMode(LLRender::MM_MODELVIEW);
// Apply floater transparency to the texture unless the floater is focused.
F32 alpha = getTransparencyType() == TT_ACTIVE ? 1.0f : getCurrentTransparency();
LLColor4 color = LLColor4::white;
gl_draw_scaled_image(offset_x, offset_y,
thumbnail_w, thumbnail_h,
previewp->getThumbnailImage(), color % alpha);
}
// Update the visibility of the working (computing preview) label
mWorkingLabel->setVisible(!(previewp && previewp->getSnapshotUpToDate()));
// Enable Post if we have a preview to send and no on going connection being processed
mPostButton->setEnabled(no_ongoing_connection && (previewp && previewp->getSnapshotUpToDate()));
// Draw the rest of the panel on top of it
LLPanel::draw();
}
LLSnapshotLivePreview* LLFlickrPhotoPanel::getPreviewView()
{
LLSnapshotLivePreview* previewp = (LLSnapshotLivePreview*)mPreviewHandle.get();
return previewp;
}
void LLFlickrPhotoPanel::onVisibilityChange(BOOL visible)
{
if (visible)
{
if (mPreviewHandle.get())
{
LLSnapshotLivePreview* preview = getPreviewView();
if(preview)
{
LL_DEBUGS() << "opened, updating snapshot" << LL_ENDL;
preview->updateSnapshot(TRUE);
}
}
else
{
LLRect full_screen_rect = getRootView()->getRect();
LLSnapshotLivePreview::Params p;
p.rect(full_screen_rect);
LLSnapshotLivePreview* previewp = new LLSnapshotLivePreview(p);
mPreviewHandle = previewp->getHandle();
previewp->setContainer(this);
previewp->setSnapshotType(LLSnapshotModel::SNAPSHOT_WEB);
previewp->setSnapshotFormat(LLSnapshotModel::SNAPSHOT_FORMAT_PNG);
previewp->setThumbnailSubsampled(TRUE); // We want the preview to reflect the *saved* image
previewp->setAllowRenderUI(FALSE); // We do not want the rendered UI in our snapshots
previewp->setAllowFullScreenPreview(FALSE); // No full screen preview in SL Share mode
previewp->setThumbnailPlaceholderRect(mThumbnailPlaceholder->getRect());
updateControls();
}
}
}
void LLFlickrPhotoPanel::onClickNewSnapshot()
{
LLSnapshotLivePreview* previewp = getPreviewView();
if (previewp)
{
previewp->updateSnapshot(TRUE);
}
}
void LLFlickrPhotoPanel::onClickBigPreview()
{
// Toggle the preview
if (isPreviewVisible())
{
LLFloaterReg::hideInstance("big_preview");
}
else
{
attachPreview();
LLFloaterReg::showInstance("big_preview");
}
}
bool LLFlickrPhotoPanel::isPreviewVisible()
{
return (mBigPreviewFloater && mBigPreviewFloater->getVisible());
}
void LLFlickrPhotoPanel::attachPreview()
{
if (mBigPreviewFloater)
{
LLSnapshotLivePreview* previewp = getPreviewView();
mBigPreviewFloater->setPreview(previewp);
mBigPreviewFloater->setFloaterOwner(getParentByType<LLFloater>());
}
}
void LLFlickrPhotoPanel::onSend()
{
LLEventPumps::instance().obtain("FlickrConnectState").stopListening("LLFlickrPhotoPanel"); // just in case it is already listening
LLEventPumps::instance().obtain("FlickrConnectState").listen("LLFlickrPhotoPanel", boost::bind(&LLFlickrPhotoPanel::onFlickrConnectStateChange, this, _1));
// <FS:Ansariel> Exodus' flickr upload
sendPhoto();
// </FS:Ansariel>
}
bool LLFlickrPhotoPanel::onFlickrConnectStateChange(const LLSD& data)
{
switch (data.get("enum").asInteger())
{
case LLFlickrConnect::FLICKR_CONNECTED:
sendPhoto();
break;
case LLFlickrConnect::FLICKR_POSTED:
LLEventPumps::instance().obtain("FlickrConnectState").stopListening("LLFlickrPhotoPanel");
// <FS:Ansariel> FIRE-15948: Don't close floater after each post and retain entered text
//clearAndClose();
break;
}
return false;
}
void LLFlickrPhotoPanel::sendPhoto()
{
// Get the title, description, and tags
std::string title = mTitleTextBox->getValue().asString();
std::string description = mDescriptionTextBox->getValue().asString();
std::string tags = mTagsTextBox->getValue().asString();
// Add the location if required
bool add_location = mLocationCheckbox->getValue().asBoolean();
if (add_location)
{
// Get the SLURL for the location
LLSLURL slurl;
LLAgentUI::buildSLURL(slurl);
std::string slurl_string = slurl.getSLURLString();
std::string photo_link_text = "Visit this location";// at [] in Second Life";
std::string parcel_name = LLViewerParcelMgr::getInstance()->getAgentParcelName();
if (!parcel_name.empty())
{
boost::regex pattern = boost::regex("\\S\\.[a-zA-Z]{2,}");
boost::match_results<std::string::const_iterator> matches;
if(!boost::regex_search(parcel_name, matches, pattern))
{
photo_link_text += " at " + parcel_name;
}
}
// <FS:Ansariel> Don't assume we're always in Second Life
//photo_link_text += " in Second Life";
if (LLGridManager::instance().isInSecondLife())
{
photo_link_text += " in Second Life";
}
else
{
photo_link_text += " in \"" + LLGridManager::instance().getGridLabel() + "\"";
}
// </FS:Ansariel>
slurl_string = "<a href=\"" + slurl_string + "\">" + photo_link_text + "</a>";
// Add it to the description (pretty crude, but we don't have a better option with photos)
if (description.empty())
description = slurl_string;
else
description = description + "\n\n" + slurl_string;
// Also add special "machine tags" with location metadata
const LLVector3& agent_pos_region = gAgent.getPositionAgent();
LLViewerRegion* region = gAgent.getRegion();
LLParcel* parcel = LLViewerParcelMgr::getInstance()->getAgentParcel();
if (region && parcel)
{
S32 pos_x = S32(agent_pos_region.mV[VX]);
S32 pos_y = S32(agent_pos_region.mV[VY]);
S32 pos_z = S32(agent_pos_region.mV[VZ]);
std::string parcel_name = LLViewerParcelMgr::getInstance()->getAgentParcelName();
std::string region_name = region->getName();
// <FS:Ansariel> Don't assume we're always in Second Life
if (!LLGridManager::instance().isInSecondLife())
{
FLICKR_MACHINE_TAGS_NAMESPACE = LLGridManager::instance().getGridId();
}
// </FS:Ansariel>
if (!region_name.empty())
{
tags += llformat(" \"%s:region=%s\"", FLICKR_MACHINE_TAGS_NAMESPACE.c_str(), region_name.c_str());
}
if (!parcel_name.empty())
{
tags += llformat(" \"%s:parcel=%s\"", FLICKR_MACHINE_TAGS_NAMESPACE.c_str(), parcel_name.c_str());
}
tags += llformat(" \"%s:x=%d\"", FLICKR_MACHINE_TAGS_NAMESPACE.c_str(), pos_x);
tags += llformat(" \"%s:y=%d\"", FLICKR_MACHINE_TAGS_NAMESPACE.c_str(), pos_y);
tags += llformat(" \"%s:z=%d\"", FLICKR_MACHINE_TAGS_NAMESPACE.c_str(), pos_z);
}
}
// Get the content rating
int content_rating = mRatingComboBox->getValue().asInteger();
// Get the image
LLSnapshotLivePreview* previewp = getPreviewView();
// <FS:Ansariel> Exodus' flickr upload
LLFlickrConnect::instance().setConnectionState(LLFlickrConnect::FLICKR_POSTING);
LLSD params;
params["title"] = title;
params["safety_level"] = content_rating;
params["tags"] = tags;
params["description"] = description;
exoFlickr::uploadPhoto(params, previewp->getFormattedImage().get(), boost::bind(&LLFlickrPhotoPanel::uploadCallback, this, _1, _2));
// </FS:Ansariel>
updateControls();
}
void LLFlickrPhotoPanel::clearAndClose()
{
mTitleTextBox->setValue("");
mDescriptionTextBox->setValue("");
LLFloater* floater = getParentByType<LLFloater>();
if (floater)
{
floater->closeFloater();
if (mBigPreviewFloater)
{
mBigPreviewFloater->closeOnFloaterOwnerClosing(floater);
}
}
}
void LLFlickrPhotoPanel::updateControls()
{
LLSnapshotLivePreview* previewp = getPreviewView();
BOOL got_snap = previewp && previewp->getSnapshotUpToDate();
// *TODO: Separate maximum size for Web images from postcards
LL_DEBUGS() << "Is snapshot up-to-date? " << got_snap << LL_ENDL;
updateResolution(FALSE);
}
void LLFlickrPhotoPanel::updateResolution(BOOL do_update)
{
LLComboBox* combobox = static_cast<LLComboBox *>(mResolutionComboBox);
LLComboBox* filterbox = static_cast<LLComboBox *>(mFilterComboBox);
std::string sdstring = combobox->getSelectedValue();
LLSD sdres;
std::stringstream sstream(sdstring);
LLSDSerialize::fromNotation(sdres, sstream, sdstring.size());
S32 width = sdres[0];
S32 height = sdres[1];
// Note : index 0 of the filter drop down is assumed to be "No filter" in whichever locale
std::string filter_name = (filterbox->getCurrentIndex() ? filterbox->getSimple() : "");
LLSnapshotLivePreview * previewp = static_cast<LLSnapshotLivePreview *>(mPreviewHandle.get());
if (previewp && combobox->getCurrentIndex() >= 0)
{
// <FS:Ansariel> FIRE-15112: Allow custom resolution for SLShare; moved up
checkAspectRatio(width);
S32 original_width = 0 , original_height = 0 ;
previewp->getSize(original_width, original_height) ;
if (width == 0 || height == 0)
{
// take resolution from current window size
LL_DEBUGS() << "Setting preview res from window: " << gViewerWindow->getWindowWidthRaw() << "x" << gViewerWindow->getWindowHeightRaw() << LL_ENDL;
previewp->setSize(gViewerWindow->getWindowWidthRaw(), gViewerWindow->getWindowHeightRaw());
}
// <FS:Ansariel> FIRE-15112: Allow custom resolution for SLShare
else if (width == -1 || height == -1)
{
// take resolution from custom size
LLSpinCtrl* width_spinner = getChild<LLSpinCtrl>("custom_snapshot_width");
LLSpinCtrl* height_spinner = getChild<LLSpinCtrl>("custom_snapshot_height");
S32 custom_width = width_spinner->getValue().asInteger();
S32 custom_height = height_spinner->getValue().asInteger();
if (checkImageSize(previewp, custom_width, custom_height, TRUE, previewp->getMaxImageSize()))
{
width_spinner->set(custom_width);
height_spinner->set(custom_height);
}
LL_DEBUGS() << "Setting preview res from custom: " << custom_width << "x" << custom_height << LL_ENDL;
previewp->setSize(custom_width, custom_height);
}
// </FS:Ansariel>
else
{
// use the resolution from the selected pre-canned drop-down choice
LL_DEBUGS() << "Setting preview res selected from combo: " << width << "x" << height << LL_ENDL;
previewp->setSize(width, height);
}
// <FS:Ansariel> FIRE-15112: Allow custom resolution for SLShare; moved up
//checkAspectRatio(width);
previewp->getSize(width, height);
if ((original_width != width) || (original_height != height))
{
previewp->setSize(width, height);
if (do_update)
{
//previewp->updateSnapshot(TRUE);
previewp->updateSnapshot(TRUE, TRUE);
updateControls();
}
}
// Get the old filter, compare to the current one "filter_name" and set if changed
std::string original_filter = previewp->getFilter();
if (original_filter != filter_name)
{
previewp->setFilter(filter_name);
if (do_update)
{
previewp->updateSnapshot(FALSE, TRUE);
updateControls();
}
}
}
// <FS:Ansariel> FIRE-15112: Allow custom resolution for SLShare
BOOL custom_resolution = static_cast<LLComboBox *>(mResolutionComboBox)->getSelectedValue().asString() == "[i-1,i-1]";
getChild<LLSpinCtrl>("custom_snapshot_width")->setEnabled(custom_resolution);
getChild<LLSpinCtrl>("custom_snapshot_height")->setEnabled(custom_resolution);
getChild<LLCheckBoxCtrl>("keep_aspect_ratio")->setEnabled(custom_resolution);
// </FS:Ansariel>
}
void LLFlickrPhotoPanel::checkAspectRatio(S32 index)
{
LLSnapshotLivePreview *previewp = getPreviewView() ;
BOOL keep_aspect = FALSE;
if (0 == index) // current window size
{
keep_aspect = TRUE;
}
// <FS:Ansariel> FIRE-15112: Allow custom resolution for SLShare
else if (-1 == index)
{
keep_aspect = getChild<LLCheckBoxCtrl>("keep_aspect_ratio")->get();
}
// </FS:Ansariel>
else // predefined resolution
{
keep_aspect = FALSE;
}
if (previewp)
{
previewp->mKeepAspectRatio = keep_aspect;
}
}
LLUICtrl* LLFlickrPhotoPanel::getRefreshBtn()
{
return mRefreshBtn;
}
// <FS:Ansariel> Exodus' flickr upload
void LLFlickrPhotoPanel::onOpen(const LLSD& key)
{
// Reauthorise if necessary.
LLFlickrConnect::instance().setConnectionState(LLFlickrConnect::FLICKR_CONNECTION_IN_PROGRESS);
new exoFlickrAuth(boost::bind(&LLFlickrPhotoPanel::flickrAuthResponse, this, _1, _2));
}
void LLFlickrPhotoPanel::uploadCallback(bool success, const LLSD& response)
{
LLSD args;
if(success && response["stat"].asString() == "ok")
{
LLFlickrConnect::instance().setConnectionState(LLFlickrConnect::FLICKR_POSTED);
args["ID"] = response["photoid"];
LLNotificationsUtil::add("ExodusFlickrUploadComplete", args);
}
else
{
LLFlickrConnect::instance().setConnectionState(LLFlickrConnect::FLICKR_POST_FAILED);
}
}
void LLFlickrPhotoPanel::flickrAuthResponse(bool success, const LLSD& response)
{
if(!success)
{
// Complain about failed auth here.
LL_WARNS("Flickr") << "Flickr authentication failed." << LL_ENDL;
LLFlickrConnect::instance().setConnectionState(LLFlickrConnect::FLICKR_CONNECTION_FAILED);
}
else
{
LLFlickrConnect::instance().setConnectionState(LLFlickrConnect::FLICKR_CONNECTED);
}
}
// </FS:Ansariel>
// <FS:Ansariel> FIRE-15112: Allow custom resolution for SLShare
BOOL LLFlickrPhotoPanel::checkImageSize(LLSnapshotLivePreview* previewp, S32& width, S32& height, BOOL isWidthChanged, S32 max_value)
{
S32 w = width ;
S32 h = height ;
if(previewp && previewp->mKeepAspectRatio)
{
if(gViewerWindow->getWindowWidthRaw() < 1 || gViewerWindow->getWindowHeightRaw() < 1)
{
return FALSE ;
}
//aspect ratio of the current window
F32 aspect_ratio = (F32)gViewerWindow->getWindowWidthRaw() / gViewerWindow->getWindowHeightRaw() ;
//change another value proportionally
if(isWidthChanged)
{
height = ll_round(width / aspect_ratio) ;
}
else
{
width = ll_round(height * aspect_ratio) ;
}
//bound w/h by the max_value
if(width > max_value || height > max_value)
{
if(width > height)
{
width = max_value ;
height = (S32)(width / aspect_ratio) ;
}
else
{
height = max_value ;
width = (S32)(height * aspect_ratio) ;
}
}
}
return (w != width || h != height) ;
}
// </FS:Ansariel>
///////////////////////////
//LLFlickrAccountPanel//////
///////////////////////////
LLFlickrAccountPanel::LLFlickrAccountPanel() :
mAccountCaptionLabel(NULL),
mAccountNameLabel(NULL),
mPanelButtons(NULL),
mConnectButton(NULL),
mDisconnectButton(NULL)
{
mCommitCallbackRegistrar.add("SocialSharing.Connect", boost::bind(&LLFlickrAccountPanel::onConnect, this));
mCommitCallbackRegistrar.add("SocialSharing.Disconnect", boost::bind(&LLFlickrAccountPanel::onDisconnect, this));
setVisibleCallback(boost::bind(&LLFlickrAccountPanel::onVisibilityChange, this, _2));
}
BOOL LLFlickrAccountPanel::postBuild()
{
mAccountCaptionLabel = getChild<LLTextBox>("account_caption_label");
mAccountNameLabel = getChild<LLTextBox>("account_name_label");
mPanelButtons = getChild<LLUICtrl>("panel_buttons");
mConnectButton = getChild<LLUICtrl>("connect_btn");
mDisconnectButton = getChild<LLUICtrl>("disconnect_btn");
return LLPanel::postBuild();
}
void LLFlickrAccountPanel::draw()
{
LLFlickrConnect::EConnectionState connection_state = LLFlickrConnect::instance().getConnectionState();
//Disable the 'disconnect' button and the 'use another account' button when disconnecting in progress
bool disconnecting = connection_state == LLFlickrConnect::FLICKR_DISCONNECTING;
mDisconnectButton->setEnabled(!disconnecting);
//Disable the 'connect' button when a connection is in progress
bool connecting = connection_state == LLFlickrConnect::FLICKR_CONNECTION_IN_PROGRESS;
mConnectButton->setEnabled(!connecting);
LLPanel::draw();
}
void LLFlickrAccountPanel::onVisibilityChange(BOOL visible)
{
if(visible)
{
LLEventPumps::instance().obtain("FlickrConnectState").stopListening("LLFlickrAccountPanel");
LLEventPumps::instance().obtain("FlickrConnectState").listen("LLFlickrAccountPanel", boost::bind(&LLFlickrAccountPanel::onFlickrConnectStateChange, this, _1));
LLEventPumps::instance().obtain("FlickrConnectInfo").stopListening("LLFlickrAccountPanel");
LLEventPumps::instance().obtain("FlickrConnectInfo").listen("LLFlickrAccountPanel", boost::bind(&LLFlickrAccountPanel::onFlickrConnectInfoChange, this));
//Connected
if(LLFlickrConnect::instance().isConnected())
{
showConnectedLayout();
}
//Check if connected (show disconnected layout in meantime)
else
{
showDisconnectedLayout();
}
if ((LLFlickrConnect::instance().getConnectionState() == LLFlickrConnect::FLICKR_NOT_CONNECTED) ||
(LLFlickrConnect::instance().getConnectionState() == LLFlickrConnect::FLICKR_CONNECTION_FAILED))
{
LLFlickrConnect::instance().checkConnectionToFlickr();
}
}
else
{
LLEventPumps::instance().obtain("FlickrConnectState").stopListening("LLFlickrAccountPanel");
LLEventPumps::instance().obtain("FlickrConnectInfo").stopListening("LLFlickrAccountPanel");
}
}
bool LLFlickrAccountPanel::onFlickrConnectStateChange(const LLSD& data)
{
if(LLFlickrConnect::instance().isConnected())
{
//In process of disconnecting so leave the layout as is
if(data.get("enum").asInteger() != LLFlickrConnect::FLICKR_DISCONNECTING)
{
showConnectedLayout();
}
}
else
{
showDisconnectedLayout();
}
return false;
}
bool LLFlickrAccountPanel::onFlickrConnectInfoChange()
{
LLSD info = LLFlickrConnect::instance().getInfo();
std::string clickable_name;
//Strings of format [http://www.somewebsite.com Click Me] become clickable text
if(info.has("link") && info.has("name"))
{
clickable_name = "[" + info["link"].asString() + " " + info["name"].asString() + "]";
}
mAccountNameLabel->setText(clickable_name);
return false;
}
void LLFlickrAccountPanel::showConnectButton()
{
if(!mConnectButton->getVisible())
{
mConnectButton->setVisible(TRUE);
mDisconnectButton->setVisible(FALSE);
}
}
void LLFlickrAccountPanel::hideConnectButton()
{
if(mConnectButton->getVisible())
{
mConnectButton->setVisible(FALSE);
mDisconnectButton->setVisible(TRUE);
}
}
void LLFlickrAccountPanel::showDisconnectedLayout()
{
mAccountCaptionLabel->setText(getString("flickr_disconnected"));
mAccountNameLabel->setText(std::string(""));
showConnectButton();
}
void LLFlickrAccountPanel::showConnectedLayout()
{
LLFlickrConnect::instance().loadFlickrInfo();
mAccountCaptionLabel->setText(getString("flickr_connected"));
hideConnectButton();
}
void LLFlickrAccountPanel::onConnect()
{
LLFlickrConnect::instance().checkConnectionToFlickr(true);
}
void LLFlickrAccountPanel::onDisconnect()
{
LLFlickrConnect::instance().disconnectFromFlickr();
}
////////////////////////
//LLFloaterFlickr///////
////////////////////////
LLFloaterFlickr::LLFloaterFlickr(const LLSD& key) : LLFloater(key),
mFlickrPhotoPanel(NULL),
mStatusErrorText(NULL),
mStatusLoadingText(NULL),
mStatusLoadingIndicator(NULL)
{
mCommitCallbackRegistrar.add("SocialSharing.Cancel", boost::bind(&LLFloaterFlickr::onCancel, this));
}
void LLFloaterFlickr::onClose(bool app_quitting)
{
LLFloaterBigPreview* big_preview_floater = dynamic_cast<LLFloaterBigPreview*>(LLFloaterReg::getInstance("big_preview"));
if (big_preview_floater)
{
big_preview_floater->closeOnFloaterOwnerClosing(this);
}
LLFloater::onClose(app_quitting);
}
void LLFloaterFlickr::onCancel()
{
LLFloaterBigPreview* big_preview_floater = dynamic_cast<LLFloaterBigPreview*>(LLFloaterReg::getInstance("big_preview"));
if (big_preview_floater)
{
big_preview_floater->closeOnFloaterOwnerClosing(this);
}
closeFloater();
}
BOOL LLFloaterFlickr::postBuild()
{
// Keep tab of the Photo Panel
mFlickrPhotoPanel = static_cast<LLFlickrPhotoPanel*>(getChild<LLUICtrl>("panel_flickr_photo"));
// Connection status widgets
mStatusErrorText = getChild<LLTextBox>("connection_error_text");
mStatusLoadingText = getChild<LLTextBox>("connection_loading_text");
mStatusLoadingIndicator = getChild<LLUICtrl>("connection_loading_indicator");
// <FS:Ansariel> Exodus' flickr upload
getChild<LLTabContainer>("tabs")->removeTabPanel(getChild<LLPanel>("panel_flickr_account"));
// </FS:Ansariel>
return LLFloater::postBuild();
}
void LLFloaterFlickr::showPhotoPanel()
{
LLTabContainer* parent = dynamic_cast<LLTabContainer*>(mFlickrPhotoPanel->getParent());
if (!parent)
{
LL_WARNS() << "Cannot find panel container" << LL_ENDL;
return;
}
parent->selectTabPanel(mFlickrPhotoPanel);
}
void LLFloaterFlickr::draw()
{
if (mStatusErrorText && mStatusLoadingText && mStatusLoadingIndicator)
{
mStatusErrorText->setVisible(false);
mStatusLoadingText->setVisible(false);
mStatusLoadingIndicator->setVisible(false);
LLFlickrConnect::EConnectionState connection_state = LLFlickrConnect::instance().getConnectionState();
std::string status_text;
switch (connection_state)
{
case LLFlickrConnect::FLICKR_NOT_CONNECTED:
// No status displayed when first opening the panel and no connection done
case LLFlickrConnect::FLICKR_CONNECTED:
// When successfully connected, no message is displayed
case LLFlickrConnect::FLICKR_POSTED:
// No success message to show since we actually close the floater after successful posting completion
break;
case LLFlickrConnect::FLICKR_CONNECTION_IN_PROGRESS:
// Connection loading indicator
mStatusLoadingText->setVisible(true);
status_text = LLTrans::getString("SocialFlickrConnecting");
mStatusLoadingText->setValue(status_text);
mStatusLoadingIndicator->setVisible(true);
break;
case LLFlickrConnect::FLICKR_POSTING:
// Posting indicator
mStatusLoadingText->setVisible(true);
status_text = LLTrans::getString("SocialFlickrPosting");
mStatusLoadingText->setValue(status_text);
mStatusLoadingIndicator->setVisible(true);
break;
case LLFlickrConnect::FLICKR_CONNECTION_FAILED:
// Error connecting to the service
mStatusErrorText->setVisible(true);
status_text = LLTrans::getString("SocialFlickrErrorConnecting");
mStatusErrorText->setValue(status_text);
break;
case LLFlickrConnect::FLICKR_POST_FAILED:
// Error posting to the service
mStatusErrorText->setVisible(true);
status_text = LLTrans::getString("SocialFlickrErrorPosting");
mStatusErrorText->setValue(status_text);
break;
case LLFlickrConnect::FLICKR_DISCONNECTING:
// Disconnecting loading indicator
mStatusLoadingText->setVisible(true);
status_text = LLTrans::getString("SocialFlickrDisconnecting");
mStatusLoadingText->setValue(status_text);
mStatusLoadingIndicator->setVisible(true);
break;
case LLFlickrConnect::FLICKR_DISCONNECT_FAILED:
// Error disconnecting from the service
mStatusErrorText->setVisible(true);
status_text = LLTrans::getString("SocialFlickrErrorDisconnecting");
mStatusErrorText->setValue(status_text);
break;
}
}
LLFloater::draw();
}
// <FS:Ansariel> Exodus' flickr upload
void LLFloaterFlickr::onOpen(const LLSD& key)
{
mFlickrPhotoPanel->onOpen(key);
}
// </FS:Ansariel>
|
{"hexsha": "e58709995c964f54429f62ade4e538b2f6ffa229", "size": 32598, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "indra/newview/llfloaterflickr.cpp", "max_stars_repo_name": "SaladDais/LLUDP-Encryption", "max_stars_repo_head_hexsha": "8a426cd0dd154e1a10903e0e6383f4deb2a6098a", "max_stars_repo_licenses": ["ISC"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2022-01-29T07:10:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T07:10:03.000Z", "max_issues_repo_path": "indra/newview/llfloaterflickr.cpp", "max_issues_repo_name": "bloomsirenix/Firestorm-manikineko", "max_issues_repo_head_hexsha": "67e1bb03b2d05ab16ab98097870094a8cc9de2e7", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "indra/newview/llfloaterflickr.cpp", "max_forks_repo_name": "bloomsirenix/Firestorm-manikineko", "max_forks_repo_head_hexsha": "67e1bb03b2d05ab16ab98097870094a8cc9de2e7", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-10-01T22:22:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T22:22:27.000Z", "avg_line_length": 33.6408668731, "max_line_length": 166, "alphanum_fraction": 0.7274986195, "num_tokens": 8313}
|
import FreeCAD
import numpy as np
from array import array
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
def convertToRGBAArray(RGBint):
Blue = RGBint & 255
Green = (RGBint >> 8) & 255
Red = (RGBint >> 16) & 255
return (Red, Green, Blue, 0xff)
class PixelContainer:
NUM_COLOR_COMPONENTS = 4
def __init__(self, resolutionX, resolutionY):
self.image = Image.new(mode="RGBX", size=(resolutionX, resolutionY))
self.draw = ImageDraw.Draw(self.image)
self.font = ImageFont.truetype("truetype/ttf-bitstream-vera/VeraIt.ttf", 32)
self.modified = False
def _colToTup(self, color):
return (color.r, color.g, color.b, color.a)
def clear(self, color):
self.modified = True
if not color:
col = (0, 0, 0, 255)
else:
col = self._colToTup(color)
(x,y) = self.image.size
self.image.paste( (col[0], col[1], col[2], col[3]), [0, 0, x, y] )
def setPixel_ARGB32(self, pixel):
self.modified = True
self.image.putpixel((pixel.pos.x, pixel.pos.y), convertToRGBAArray(pixel.rgba))
# interpret subwindow as closed interval on both sides
def setSubWindowPixels_ARGB32(self, subWindowData):
self.modified = True
xmin = min(subWindowData.p1.x, subWindowData.p2.x)
xmax = max(subWindowData.p1.x, subWindowData.p2.x)
ymin = min(subWindowData.p1.y, subWindowData.p2.y)
ymax = max(subWindowData.p1.y, subWindowData.p2.y)
#FreeCAD.Console.PrintMessage("Type is" + str(type(subWindowData.rgbaStream)) + "\n")
#0xRRGGBBff
buf = array('I')
for rgba32 in subWindowData.rgbaStream:
#FreeCAD.Console.PrintMessage("r" + str( (rgba32 >> 24) & 0xff ) +
# "g" + str( (rgba32 >> 16) & 0xff ) +
# "b" + str( (rgba32 >> 8) & 0xff ) + "\n")
buf.append(rgba32)
assert(buf.itemsize == 4)
im = Image.frombuffer("RGBX", (xmax - xmin + 1, ymax - ymin + 1), bytes(buf), 'raw', "BGRX", 0, 1)
self.image.paste(im, (xmin, ymin))
def drawRectangle(self, rectangle):
self.modified = True
col = self._colToTup(rectangle.pixelColor)
fill = None
if rectangle.filled:
fill = col
self.draw.rectangle( [ (rectangle.p1.x, rectangle.p1.y), (rectangle.p2.x, rectangle.p2.y) ], fill = fill, outline = col )
def drawLine(self, lineData):
self.modified = True
col = self._colToTup(lineData.pixelColor)
self.draw.line([ (lineData.p1.x, lineData.p1.y), (lineData.p2.x, lineData.p2.y) ], fill = col)
def toString(self):
return self.image.transpose(Image.FLIP_TOP_BOTTOM).tobytes()
def setActiveFont(self, fontData):
try:
self.font = ImageFont.truetype(fontData.path, fontData.size)
except IOError:
FreeCAD.Console.PrintError("Could not open font file: " + fontData.path + "\n")
self.font = ImageFont.truetype("truetype/ttf-bitstream-vera/VeraIt.ttf", fontData.size)
def drawText(self, textData):
self.modified = True
col = self._colToTup(textData.color)
self.draw.text((textData.pos.x, textData.pos.y), textData.text, font = self.font, fill=col)
def getTextSize(self, txt, fontData):
if fontData.path:
font = ImageFont.truetype(fontData.path, fontData.size)
return self.draw.textsize(text = txt, font = font)
else:
return self.draw.textsize(text = txt, font = self.font)
def dirty(self):
if self.modified:
self.modified = False
return True
return False
|
{"hexsha": "d53a4377aba6703c70080de04025ad286a11925f", "size": 3772, "ext": "py", "lang": "Python", "max_stars_repo_path": "FPPixelContainer.py", "max_stars_repo_name": "dliess/FreeCADFrontPanelSimulation", "max_stars_repo_head_hexsha": "f1ae7dc2b57e19202b479621077e515a59e1beec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2018-08-07T06:36:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-22T21:48:47.000Z", "max_issues_repo_path": "FPPixelContainer.py", "max_issues_repo_name": "dliess/FreeCADFrontPanelSimulation", "max_issues_repo_head_hexsha": "f1ae7dc2b57e19202b479621077e515a59e1beec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2018-03-07T23:52:16.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-31T10:27:45.000Z", "max_forks_repo_path": "FPPixelContainer.py", "max_forks_repo_name": "dliess/FreeCADFrontPanelSimulation", "max_forks_repo_head_hexsha": "f1ae7dc2b57e19202b479621077e515a59e1beec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-05-19T22:50:42.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-19T22:50:42.000Z", "avg_line_length": 37.3465346535, "max_line_length": 129, "alphanum_fraction": 0.6015376458, "include": true, "reason": "import numpy", "num_tokens": 1029}
|
# standard imports
import matplotlib.pyplot as plt
import numpy as np
# custom imports
from SDCA.sdca4crf.utils import entropy, kullback_leibler, logsubtractexp, subtractexp_scalar
class SequenceMarginals:
"""Represent anything that is decomposable over the nodes and edges of a sequential model.
It can be a score, a conditional probability p(y|x) under the form of MARGINALS or
LOG-MARGINALS (in which case self.islog=True), the ascent direction, the derivative of the KL
or the entropy."""
def __init__(self, unary, binary, log):
self.length = unary.shape[0]
self.nb_labels = unary.shape[1]
self.unary = unary
self.binary = binary
self.islog = log
if self.length == 0:
raise ValueError("Sequences of length 0 are not accepted.")
if self.length != binary.shape[0] + 1:
raise ValueError("Wrong length of marginals: %i vs %i"
% (unary.shape[0], binary.shape[0] + 1))
if self.nb_labels != binary.shape[1] \
or self.nb_labels != binary.shape[2]:
raise ValueError("Wrong alphabet size: %i vs (%i, %i)"
% (unary.shape[1], binary.shape[1], binary.shape[2]))
def __str__(self):
return "unary: \n" + np.array_str(self.unary) \
+ "\n binary: \n" + np.array_str(self.binary)
def __repr__(self):
return "unary: \n" + np.array_repr(self.unary) \
+ "\n binary: \n" + np.array_repr(self.binary)
def display(self, alphabet):
alength = len(alphabet)
plt.matshow(self.unary)
plt.xticks(range(alength), [alphabet[x] for x in range(alength)])
plt.colorbar(fraction=0.046, pad=0.04)
plt.title("unary marginals")
if self.length > 1:
plt.matshow(self.binary.sum(axis=0))
plt.xticks(range(alength), [alphabet[x] for x in range(alength)])
plt.yticks(range(alength), [alphabet[x] for x in range(alength)])
plt.colorbar(fraction=0.046, pad=0.04)
plt.title("sum of binary marginals")
#########################################
# Special operations
#########################################
def log(self):
return SequenceMarginals(np.log(self.unary), np.log(self.binary), log=True)
def exp(self):
return SequenceMarginals(np.exp(self.unary), np.exp(self.binary), log=False)
def log_reduce_exp(self, to_add):
if self.length == 1: # the joint is the unary
themax = np.amax(self.unary)
return themax + np.log(np.sum(np.exp(self.unary - themax)
+ to_add * np.exp(-themax)))
elif self.length == 2: # the joint is the binary
themax = np.amax(self.binary)
return themax + np.log(np.sum(np.exp(self.binary - themax))
+ to_add * np.exp(-themax))
else:
themax = max(np.amax(self.unary[1:-1]), np.amax(self.binary))
return themax + np.log(np.sum(np.exp(self.binary - themax))
- np.sum(np.exp(self.unary[1:-1] - themax))
+ to_add * np.exp(-themax))
def convex_combination(self, other, s):
"""Return (1-s)*self + s*other"""
if s == 0:
return self
if s == 1:
return other
if self.islog:
unary = np.logaddexp(np.log(1 - s) + self.unary, np.log(s) + other.unary)
binary = np.logaddexp(np.log(1 - s) + self.binary, np.log(s) + other.binary)
else:
unary = (1 - s) * self.unary + s * other.unary
binary = (1 - s) * self.binary + s * other.binary
return SequenceMarginals(unary=unary, binary=binary, log=self.islog)
def logsubtractexp(self, other):
"""Return the ascent direction without numerical issue"""
unary, usign = logsubtractexp(self.unary, other.unary)
binary, bsign = logsubtractexp(self.binary, other.binary)
logvalue = SequenceMarginals(unary=unary, binary=binary, log=True)
signs = SequenceMarginals(unary=usign, binary=bsign, log=False)
return logvalue, signs
#########################################
# Typical arithmetic operations
#########################################
def combine(self, other, ufunc):
unary = ufunc(self.unary, other.unary)
binary = ufunc(self.binary, other.binary)
return SequenceMarginals(unary, binary, self.islog)
def subtract(self, other):
return self.combine(other, np.subtract)
def multiply(self, other):
return self.combine(other, np.multiply)
def multiply_scalar(self, scalar):
return SequenceMarginals(scalar * self.unary, scalar * self.binary, self.islog)
#########################################
# Assertion operations
#########################################
def is_density(self, integral=1):
return np.isclose(np.sum(self.unary, axis=1), integral).all() \
and np.isclose(np.sum(self.binary, axis=(1, 2)), integral).all()
def is_consistent(self):
if self.length == 1:
return True
ans = True
from_left_binary = np.sum(self.binary, axis=1)
from_right_binary = np.sum(self.binary, axis=2)
if not np.isclose(from_left_binary, self.unary[1:]).all():
ans = False
print("Left inconsistent with unary.")
if not np.isclose(from_right_binary, self.unary[:-1]).all():
ans = False
print("Right inconsistent with unary.")
if not np.isclose(from_right_binary[1:], from_left_binary[:-1]).all():
ans = False
print("Left inconsistent with right.")
return ans
#########################################
# Information theory
#########################################
def entropy(self):
returnlog = False
if self.length == 1:
return entropy(self.unary, returnlog=returnlog)
elif self.length == 2:
return entropy(self.binary, returnlog=returnlog)
else:
cliques = entropy(self.binary, returnlog=True)
separations = entropy(self.unary[1:-1], returnlog=True)
return subtractexp_scalar(cliques, separations)
def kullback_leibler(self, other):
returnlog = False
if self.length != other.length:
raise ValueError("Not the same sequence length %i %i" % (self.length, other.length))
if self.length == 1:
return kullback_leibler(self.unary, other.unary, returnlog=returnlog)
elif self.length == 2:
return kullback_leibler(self.binary, other.binary, returnlog=returnlog)
else:
cliques = kullback_leibler(self.binary, other.binary, returnlog=True)
separations = kullback_leibler(self.unary[1:-1], other.unary[1:-1],
returnlog=True)
return subtractexp_scalar(cliques, separations)
|
{"hexsha": "8a6e232487cc300fab9ee577831c5be3da498dcd", "size": 7139, "ext": "py", "lang": "Python", "max_stars_repo_path": "SDCA/sdca4crf/parameters/sequence_marginals.py", "max_stars_repo_name": "Yaakoubi/Struct-CKN", "max_stars_repo_head_hexsha": "fa007fa71310866584bdf2e5b038e6663b94e965", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-30T13:42:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-30T13:42:56.000Z", "max_issues_repo_path": "SDCA/sdca4crf/parameters/sequence_marginals.py", "max_issues_repo_name": "Yaakoubi/Struct-CKN", "max_issues_repo_head_hexsha": "fa007fa71310866584bdf2e5b038e6663b94e965", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SDCA/sdca4crf/parameters/sequence_marginals.py", "max_forks_repo_name": "Yaakoubi/Struct-CKN", "max_forks_repo_head_hexsha": "fa007fa71310866584bdf2e5b038e6663b94e965", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-03-16T22:00:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T20:08:57.000Z", "avg_line_length": 39.8826815642, "max_line_length": 97, "alphanum_fraction": 0.5626838493, "include": true, "reason": "import numpy", "num_tokens": 1667}
|
function avgNEES = ANEES(trans_err, rot_err, err_sigma)
stateErr = [rot_err;trans_err];
stateVar = err_sigma.^2;
avgNEES = 0;
stepNum = size(stateErr, 2);
for i = 1:stepNum
avgNEES = avgNEES + (1/stepNum)*stateErr(:,i)'*inv(diag(stateVar(:,i)))*stateErr(:,i);
end
end
|
{"author": "yuzhou42", "repo": "MSCKF", "sha": "d95d90c85b24f27001bd0ecdce8739b6e602b6df", "save_path": "github-repos/MATLAB/yuzhou42-MSCKF", "path": "github-repos/MATLAB/yuzhou42-MSCKF/MSCKF-d95d90c85b24f27001bd0ecdce8739b6e602b6df/KITTI Trials/ANEES.m"}
|
import urllib
import urllib.request
import cv2
import os
import numpy as np
from multiprocessing.dummy import Pool as ThreadPool
import itertools
pic_num = 1
def store_raw_images(paths, links):
global pic_num
for link, path in zip(links, paths):
if not os.path.exists(path):
os.makedirs(path)
image_urls = str(urllib.request.urlopen(link).read())
pool = ThreadPool(32)
pool.starmap(loadImage, zip(itertools.repeat(path),image_urls.split('\\n'),itertools.count(pic_num)))
pool.close()
pool.join()
def loadImage(path,link, counter):
global pic_num
if pic_num < counter:
pic_num = counter+1;
try:
urllib.request.urlretrieve(link, path+"/"+str(counter)+".jpg")
img = cv2.imread(path+"/"+str(counter)+".jpg")
if img is not None:
cv2.imwrite(path+"/"+str(counter)+".jpg",img)
print(counter)
except Exception as e:
print(str(e))
def removeInvalid(dirPaths):
for dirPath in dirPaths:
for img in os.listdir(dirPath):
for invalid in os.listdir('invalid'):
try:
current_image_path = str(dirPath)+'/'+str(img)
invalid = cv2.imread('invalid/'+str(invalid))
question = cv2.imread(current_image_path)
if invalid.shape == question.shape and not(np.bitwise_xor(invalid,question).any()):
os.remove(current_image_path)
break
except Exception as e:
print(str(e))
def main():
links = [
'http://image-net.org/api/text/imagenet.synset.geturls?wnid=n01318894', \
'http://image-net.org/api/text/imagenet.synset.geturls?wnid=n03405725', \
'http://image-net.org/api/text/imagenet.synset.geturls?wnid=n07942152', \
'http://image-net.org/api/text/imagenet.synset.geturls?wnid=n00021265', \
'http://image-net.org/api/text/imagenet.synset.geturls?wnid=n07697537', \
'http://image-net.org/api/text/imagenet.synset.geturls?wnid=n12400720'
]
paths = ['pets', 'furniture', 'people', 'food', 'hotdog','jackfruit']
store_raw_images(paths, links)
removeInvalid(paths)
if __name__ == "__main__":
main()
|
{"hexsha": "83861e5226a0638b9d779f1f8da1c759469b8a74", "size": 2392, "ext": "py", "lang": "Python", "max_stars_repo_path": "images/get_images.py", "max_stars_repo_name": "adamshamsudeen/not-jackfruit", "max_stars_repo_head_hexsha": "245a9e6e1cbc1c48ca8050cbc0c87510b50210be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2018-02-17T23:46:54.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-29T12:58:15.000Z", "max_issues_repo_path": "images/get_images.py", "max_issues_repo_name": "adamshamsudeen/not-jackfruit", "max_issues_repo_head_hexsha": "245a9e6e1cbc1c48ca8050cbc0c87510b50210be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "images/get_images.py", "max_forks_repo_name": "adamshamsudeen/not-jackfruit", "max_forks_repo_head_hexsha": "245a9e6e1cbc1c48ca8050cbc0c87510b50210be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3243243243, "max_line_length": 110, "alphanum_fraction": 0.5848662207, "include": true, "reason": "import numpy", "num_tokens": 553}
|
import numpy as np
class environment():
# this class defines what actions are available, what they do, and how they modify the environment
# this class keeps track of the agents attributes including loss
def __init__(self, agent_position, agent_direction, environment_shape):
# position is a 2 element list containing a coordinate pair [x,y]
# direction is a cardinal direction represented by one of the four characters N, S, E, W
# environment_shape is the shape of the environment matrix
self.environment_shape = environment_shape
self.agent_position = agent_position
self.agent_direction = agent_direction
self.moves = 0
def __can_occupy(self, tile):
# can the agent occupy tile?
return tile[0] != '9' # True if tile is not wood
def __cut_the_grass(self, environment_state):
# if the current space is tall grass then cut it
if environment_state[self.agent_position[0], self.agent_position[1]][0] == '8': # if current space is tall grass
current_tile = environment_state[self.agent_position[0], self.agent_position[1]]
environment_state[self.agent_position[0], self.agent_position[1]] = self.__modify_tile(current_tile, '1', 0) # replace with cut grass
return environment_state
def __modify_tile(self, tile, new_value, position):
# returns tile with new_value in position
# "why dont you just use arrays instead of non mutable strings?", mainly cause it makes one hot encoding easier but also cause
# I already did it this way and am only now realizing making a tensor of the lawn instead of the matrix may bave been the better way.
# converting to an array is slow with numpy.fromstring and slow with python list() so I decided to just not convert it
if position == 0:
return new_value + tile[1] + tile[2]
elif position == 1:
return tile[0] + new_value + tile[2]
elif position == 2:
return tile[0] + tile[1] + new_value
else:
raise Exception('invalid tile index ' + position)
def take_action(self, action, environment_state):
# takes the action that was recived and returns an updated environment state
if action == 1:
updated_environment_state = self.advance(environment_state)
elif action == 2:
updated_environment_state = self.pivot_clockwise(environment_state)
elif action == 3:
updated_environment_state = self.pivot_counterclockwise(environment_state)
else:
raise Exception('invalid action_id ' + action)
self.moves += 1
return updated_environment_state
# below I define all actions the agent can take and how they modify the environment
def advance(self, environment_state):
# advance one space in the direction the agent is currently facing
# return updated environment state
current_tile = environment_state[self.agent_position[0], self.agent_position[1]] # defining here for readability
if self.agent_direction == 'N':
environment_state[self.agent_position[0], self.agent_position[1]] = self.__modify_tile(current_tile, '1', 2) # exit from north
if self.agent_position[0]-1 != -1 and self.__can_occupy(environment_state[self.agent_position[0]-1, self.agent_position[1]]): # if next space can be occupied
self.agent_position[0] -= 1 # advance
current_tile = environment_state[self.agent_position[0], self.agent_position[1]] # redefine current tile since the agent moved
environment_state[self.agent_position[0], self.agent_position[1]] = self.__modify_tile(current_tile, '2', 1) # exit from north # enter from south
elif self.agent_direction == 'S':
environment_state[self.agent_position[0], self.agent_position[1]] = self.__modify_tile(current_tile, '2', 2) # exit from south
if self.agent_position[0]+1 != self.environment_shape[0] and self.__can_occupy(environment_state[self.agent_position[0]+1, self.agent_position[1]]): # if next space can be occupied
self.agent_position[0] += 1 # advance
current_tile = environment_state[self.agent_position[0], self.agent_position[1]] # redefine current tile since the agent moved
environment_state[self.agent_position[0], self.agent_position[1]] = self.__modify_tile(current_tile, '1', 1) # enter from north
elif self.agent_direction == 'E':
environment_state[self.agent_position[0], self.agent_position[1]] = self.__modify_tile(current_tile, '3', 2) # exit from east
if self.agent_position[1]+1 != self.environment_shape[1] and self.__can_occupy(environment_state[self.agent_position[0], self.agent_position[1]+1]): # if next space can be occupied
self.agent_position[1] += 1 # advance
current_tile = environment_state[self.agent_position[0], self.agent_position[1]] # redefine current tile since the agent moved
environment_state[self.agent_position[0], self.agent_position[1]] = self.__modify_tile(current_tile, '4', 1) # enter from west
elif self.agent_direction == 'W':
environment_state[self.agent_position[0], self.agent_position[1]] = self.__modify_tile(current_tile, '4', 2) # exit from west
if self.agent_position[1]-1 != -1 and self.__can_occupy(environment_state[self.agent_position[0], self.agent_position[1]-1]): # if next space can be occupied
self.agent_position[1] -= 1 # advance
current_tile = environment_state[self.agent_position[0], self.agent_position[1]] # redefine current tile since the agent moved
environment_state[self.agent_position[0], self.agent_position[1]] = self.__modify_tile(current_tile, '3', 1) # enter from east
else:
raise Exception('unknown direction')
environment_state = self.__cut_the_grass(environment_state) # if the current space is tall grass then cut it
return environment_state
def pivot_clockwise(self, environment_state):
# rotate direction by 90° clockwise
# return updated environment state
if self.agent_direction == 'N':
self.agent_direction = 'E'
elif self.agent_direction == 'S':
self.agent_direction = 'W'
elif self.agent_direction == 'E':
self.agent_direction = 'S'
elif self.agent_direction == 'W':
self.agent_direction = 'N'
else:
raise Exception('unknown direction')
return environment_state
def pivot_counterclockwise(self, environment_state):
# rotate direction by 90° counterclockwise
# return updated environment state
if self.agent_direction == 'N':
self.agent_direction = 'W'
elif self.agent_direction == 'S':
self.agent_direction = 'E'
elif self.agent_direction == 'E':
self.agent_direction = 'N'
elif self.agent_direction == 'W':
self.agent_direction = 'S'
else:
raise Exception('unknown direction')
return environment_state
# below I define all getters which are just used to interface with the simulation class
def get_done_condition(self, environment_state):
# returns true if the environment is complete (entire lawn is mowed)
return ('800' not in environment_state) # if there are no tall grass blocks then lawn is mowed
def get_action_space(self):
# returns tuple of possible actions as numbers
# mapping:
# 1 = 'advance'
# 2 = 'pivot_clockwise'
# 3 = 'pivot_counterclockwise'
return (1,2,3)
def get_position(self):
# return agents current position
return self.x, self.y
def get_direction(self):
# return agents current direction
return self.direction
def get_reward(self):
# return agents current reward
return -self.moves
|
{"hexsha": "3affc83aa69008ed5dbdd5fe12c6cb02dc6df7f2", "size": 8323, "ext": "py", "lang": "Python", "max_stars_repo_path": "lawn_mowing_environment.py", "max_stars_repo_name": "JacobZuliani/Efficient-Lawn-Mowing-with-Deep-Reinforcement-Learning", "max_stars_repo_head_hexsha": "7c508b242579270cedab354061709fb97355c58a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lawn_mowing_environment.py", "max_issues_repo_name": "JacobZuliani/Efficient-Lawn-Mowing-with-Deep-Reinforcement-Learning", "max_issues_repo_head_hexsha": "7c508b242579270cedab354061709fb97355c58a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lawn_mowing_environment.py", "max_forks_repo_name": "JacobZuliani/Efficient-Lawn-Mowing-with-Deep-Reinforcement-Learning", "max_forks_repo_head_hexsha": "7c508b242579270cedab354061709fb97355c58a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 54.3986928105, "max_line_length": 192, "alphanum_fraction": 0.6548119668, "include": true, "reason": "import numpy", "num_tokens": 1774}
|
// (c) Copyright 2008 Samuel Debionne.
//
// Distributed under the MIT Software License. (See accompanying file
// license.txt) or copy at http://www.opensource.org/licenses/mit-license.php)
//
// See http://code.google.com/p/fsc-sdk/ for the library home page.
//
// $Revision: $
// $History: $
/// \file sim_connect.hpp
/// Sim Connect c++ API
#if !defined(__FSX_SIM_CONNECT_HPP__)
#define __FSX_SIM_CONNECT_HPP__
#if _MSC_VER > 1000
#pragma once
#endif // _MSC_VER > 1000
#include <cassert>
#include <windows.h>
#include <SimConnect.h>
#include <boost/shared_ptr.hpp>
#include <boost/multi_index_container.hpp>
#include <boost/multi_index/hashed_index.hpp>
#include <boost/multi_index/member.hpp>
#include <boost/multi_index/sequenced_index.hpp>
#include <boost/utility.hpp>
#include <assert.hpp>
// Call a function and record its description (to help in error tracking)
#if ! defined(NDEBUG)
#define SIM_CONNECT_CALL(x) \
x; \
add_record(#x)
#else
#define SIM_CONNECT_CALL(x)
#endif
namespace fsx {
namespace detail {
struct record
{
record(DWORD _id, std::string _descr) : id_(_id), descr_(_descr) {}
DWORD id_;
std::string descr_;
};
} //namespace detail
/// Sim connect object. Encapuslate the SimConnect handle.
class sim_connect //: public boost::noncopyable
{
typedef boost::shared_ptr<void> handle_t;
public:
/// Used to send a request to the Flight Simulator server to open up communications with a new client.
HRESULT open(LPCSTR _szName, HWND _hWnd, DWORD _UserEventWin32, HANDLE _hEventHandle, DWORD _dwConfigIndex)
{
HANDLE hsc;
HRESULT hr = SimConnect_Open(&hsc, _szName, _hWnd, _UserEventWin32, _hEventHandle, _dwConfigIndex);
if (hr == S_OK)
hsc_ = handle_t(hsc, SimConnect_Close);
return hr;
}
/// Used to request that the communication with the server is ended.
void close() {hsc_.reset(); }
/// Return true is the sim_conncet object is currently connected, false otherwise.
bool is_open() {return hsc_; }
/// \name Dispatch
//@{
/// Used to process the next SimConnect message received through the specified callback function.
HRESULT call_dispatch(DispatchProc _pfcnDispatch, void * _pContext = NULL)
{
return SimConnect_CallDispatch(handle(), _pfcnDispatch, _pContext);
}
/// Used to process the next SimConnect message received, without the use of a callback function.
HRESULT get_next_dispatch(SIMCONNECT_RECV** _ppData, DWORD* _pcbData)
{
return SimConnect_GetNextDispatch(handle(), _ppData, _pcbData);
}
//@}
/// \name Notification groups
//@{
/// Used to add an individual client defined event to a notification group.
HRESULT add_client_event_to_notification_group(SIMCONNECT_NOTIFICATION_GROUP_ID _GroupID, SIMCONNECT_CLIENT_EVENT_ID _EventID, BOOL _bMaskable = FALSE)
{
return SIM_CONNECT_CALL(SimConnect_AddClientEventToNotificationGroup(handle(), _GroupID, _EventID, _bMaskable));
}
/// Used to set the priority of a notification group.
HRESULT set_notification_group_priority(SIMCONNECT_NOTIFICATION_GROUP_ID _GroupID, DWORD _dwPriority)
{
return SIM_CONNECT_CALL(SimConnect_SetNotificationGroupPriority(handle(), _GroupID, _dwPriority));
}
/// Used to request events are transmitted from a notification group, when the simulation is in Dialog Mode.
HRESULT request_notification_group(SIMCONNECT_NOTIFICATION_GROUP_ID _GroupID, DWORD _dwReserved = 0, DWORD _dwFlags = 0)
{
return SIM_CONNECT_CALL(SimConnect_RequestNotificationGroup(handle(), _GroupID, _dwReserved, _dwFlags));
}
//@}
/// \name Client data
//@{
/// Used to add an offset and a size in bytes, or a type, to a client data definition.
HRESULT add_to_client_data_defintinion(SIMCONNECT_CLIENT_DATA_DEFINITION_ID _DefineID, DWORD _dwOffset, DWORD _dwSizeOrType, float _fEpsilon = 0.0f, DWORD _dwDatumID = SIMCONNECT_UNUSED)
{
return SIM_CONNECT_CALL(SimConnect_AddToClientDataDefinition(handle(), _DefineID, _dwOffset, _dwSizeOrType, _fEpsilon, _dwDatumID));
}
/// Used to clear the definition of the specified client data.
HRESULT clear_client_data_definition(SIMCONNECT_CLIENT_DATA_DEFINITION_ID _DefineID)
{
return SIM_CONNECT_CALL(SimConnect_ClearClientDataDefinition(handle(), _DefineID));
}
/// Used to associate an ID with a named client date area.
HRESULT map_client_data_name_to_id(const char* _szClientDataName, SIMCONNECT_CLIENT_DATA_ID _ClientDataID)
{
return SIM_CONNECT_CALL(SimConnect_MapClientDataNameToID(handle(), _szClientDataName, _ClientDataID));
}
/// Used to request that the data in an area created by another client be sent to this client.
HRESULT request_client_data(SIMCONNECT_CLIENT_DATA_ID _ClientDataID, SIMCONNECT_DATA_REQUEST_ID _RequestID, SIMCONNECT_CLIENT_DATA_DEFINITION_ID _DefineID, SIMCONNECT_CLIENT_DATA_PERIOD _Period = SIMCONNECT_CLIENT_DATA_PERIOD_ONCE, SIMCONNECT_CLIENT_DATA_REQUEST_FLAG _Flags = 0, DWORD _dwOrigin = 0, DWORD _dwInterval = 0, DWORD _dwLimit = 0)
{
return SIM_CONNECT_CALL(SimConnect_RequestClientData(handle(), _ClientDataID, _RequestID, _DefineID, _Period, _Flags, _dwOrigin, _dwInterval, _dwLimit));
}
//@}
/// \name Client data
//@{
/// Used to add a Flight Simulator simulation variable name to a client defined object definition.
HRESULT add_to_data_definition(SIMCONNECT_DATA_DEFINITION_ID _DefineID, const char* _szDatumName, const char* _szUnitsName, SIMCONNECT_DATATYPE _DatumType = SIMCONNECT_DATATYPE_FLOAT64, float _fEpsilon = 0, DWORD _dwDatumID = SIMCONNECT_UNUSED)
{
return SIM_CONNECT_CALL(SimConnect_AddToDataDefinition(handle(), _DefineID, _szDatumName, _szUnitsName, _DatumType, _fEpsilon, _dwDatumID));
}
/// Used to remove all simulation variables from a client defined object.
HRESULT clear_data_definition(SIMCONNECT_DATA_DEFINITION_ID _DefineID)
{
return SIM_CONNECT_CALL(SimConnect_ClearDataDefinition(handle(), _DefineID));
}
/// Used to write one or more units of data to a client data area.
HRESULT set_client_data(SIMCONNECT_CLIENT_DATA_ID _ClientDataID, SIMCONNECT_CLIENT_DATA_DEFINITION_ID _DefineID, DWORD _dwFlags, DWORD _dwReserved, DWORD _cbUnitSize, void* _pDataSet)
{
return SIM_CONNECT_CALL(SimConnect_SetClientData(handle(), _ClientDataID, _DefineID, _dwFlags, _dwReserved, _cbUnitSize, _pDataSet));
}
//@}
/// \name Sim objects
//@{
/// Used to make changes to the data properties of an object.
HRESULT set_data_on_sim_object(SIMCONNECT_DATA_DEFINITION_ID _DefineID, SIMCONNECT_OBJECT_ID _ObjectID, SIMCONNECT_DATA_SET_FLAG _Flags, DWORD _dwArrayCount, DWORD _cbUnitSize, void* _pDataSet)
{
return SIM_CONNECT_CALL(SimConnect_SetDataOnSimObject(handle(), _DefineID, _ObjectID, _Flags, _dwArrayCount, _cbUnitSize, _pDataSet));
}
/// Used to request when the SimConnect client is to receive data values for a specific object.
HRESULT request_data_on_sim_object(SIMCONNECT_DATA_REQUEST_ID _RequestID, SIMCONNECT_DATA_DEFINITION_ID _DefineID, SIMCONNECT_OBJECT_ID _ObjectID, SIMCONNECT_PERIOD _Period, SIMCONNECT_DATA_REQUEST_FLAG _Flags = 0, DWORD _dwOrigin = 0, DWORD _dwInterval = 0, DWORD _dwLimit = 0)
{
return SIM_CONNECT_CALL(SimConnect_RequestDataOnSimObject(handle(), _RequestID, _DefineID, _ObjectID, _Period, _Flags, _dwOrigin, _dwInterval, _dwLimit));
}
/// Used to retrieve information about simulation objects of a given type that are within a specified radius of the user's aircraft.
HRESULT request_data_on_sim_object_type(SIMCONNECT_DATA_REQUEST_ID _RequestID, SIMCONNECT_DATA_DEFINITION_ID _DefineID, DWORD _dwRadiusMeters, SIMCONNECT_SIMOBJECT_TYPE _Type)
{
return SIM_CONNECT_CALL(SimConnect_RequestDataOnSimObjectType(handle(), _RequestID, _DefineID, _dwRadiusMeters, _Type));
}
//@}
/// \name Client Event
//@{
/// Used to associate a client defined event ID with a Flight Simulator event name.
HRESULT map_client_event_to_sim_event(SIMCONNECT_CLIENT_EVENT_ID _EventID, const char* _szEventName)
{
return SIM_CONNECT_CALL(SimConnect_MapClientEventToSimEvent(handle(), _EventID, _szEventName));
}
/// Used to request that the Flight Simulator server transmit to all SimConnect clients the specified client event.
HRESULT transmit_client_event(SIMCONNECT_OBJECT_ID _ObjectID, SIMCONNECT_CLIENT_EVENT_ID _EventID, DWORD _dwData, SIMCONNECT_NOTIFICATION_GROUP_ID _GroupID, SIMCONNECT_EVENT_FLAG _Flags)
{
return SIM_CONNECT_CALL(SimConnect_TransmitClientEvent(handle(), _ObjectID, _EventID, _dwData, _GroupID, _Flags));
}
/// Used to remove a client defined event from a notification group.
HRESULT remove_client_event(SIMCONNECT_NOTIFICATION_GROUP_ID _GroupID, SIMCONNECT_CLIENT_EVENT_ID _EventID)
{
return SIM_CONNECT_CALL(SimConnect_RemoveClientEvent(handle(), _GroupID, _EventID));
}
//@}
/// \name System event
//@{
/// Used to request that a specific system event is notified to the client.
HRESULT subscribe_to_system_event(SIMCONNECT_CLIENT_EVENT_ID _EventID, const char* _szSystemEventName)
{
return SIM_CONNECT_CALL(SimConnect_SubscribeToSystemEvent(handle(), _EventID, _szSystemEventName));
}
/// Used to request that notifications are no longer received for the specified system event.
HRESULT unsubscribe_from_system_event(SIMCONNECT_CLIENT_EVENT_ID _EventID)
{
return SIM_CONNECT_CALL(SimConnect_UnsubscribeFromSystemEvent(handle(), _EventID));
}
/// Used to turn requests for event information from the server on and off.
HRESULT set_system_event_state(SIMCONNECT_CLIENT_EVENT_ID _EventID, SIMCONNECT_STATE _dwState)
{
return SIM_CONNECT_CALL(SimConnect_SetSystemEventState(handle(), _EventID, _dwState));
}
/// Used to access a number of Flight Simulator system components.
HRESULT set_system_state(const char* _szState, DWORD _dwInteger, float _fFloat, char* _szString)
{
return SIM_CONNECT_CALL(SimConnect_SetSystemState(handle(), _szState, _dwInteger, _fFloat, _szString));
}
//@}
/// \name Input event
//@{
/// Used to connect input events (such as keystrokes, joystick or mouse movements) with the sending of appropriate event notifications.
HRESULT map_input_event_to_client_event(SIMCONNECT_INPUT_GROUP_ID _GroupID, const char* _szInputDefinition, SIMCONNECT_CLIENT_EVENT_ID _DownEventID, DWORD _dwDownValue = 0, SIMCONNECT_CLIENT_EVENT_ID _UpEventID =(SIMCONNECT_CLIENT_EVENT_ID)SIMCONNECT_UNUSED, DWORD _dwUpValue = 0, BOOL _bMaskable = FALSE)
{
return SIM_CONNECT_CALL(SimConnect_MapInputEventToClientEvent(handle(), _GroupID, _szInputDefinition, _DownEventID, _dwDownValue, _UpEventID, _dwUpValue, _bMaskable));
}
/// Used to remove an input event from a specified input group object.
HRESULT remove_input_event(SIMCONNECT_INPUT_GROUP_ID _GroupID, const char* _szInputDefinition)
{
return SIM_CONNECT_CALL(SimConnect_RemoveInputEvent(handle(), _GroupID, _szInputDefinition));
}
//@}
/// Used to remove all the client defined events from a notification group.
HRESULT clear_notification_group(SIMCONNECT_NOTIFICATION_GROUP_ID _GroupID)
{
return SIM_CONNECT_CALL(SimConnect_ClearNotificationGroup(handle(), _GroupID));
}
/// Used to request the creation of a reserved data area for this client.
HRESULT create_client_data(SIMCONNECT_CLIENT_DATA_ID _ClientDataID, DWORD _dwSize, SIMCONNECT_CREATE_CLIENT_DATA_FLAG _Flags)
{
return SIM_CONNECT_CALL(SimConnect_CreateClientData(handle(), _ClientDataID, _dwSize, _Flags));
}
/// \name Flight and flight plan
//@{
/// Used to load an existing flight file.
HRESULT flight_load(const char* _szFileName)
{
return SIM_CONNECT_CALL(SimConnect_FlightLoad(handle(), _szFileName));
}
/// Used to load an existing flight plan.
HRESULT flight_plan_load(const char* _szFileName)
{
return SIM_CONNECT_CALL(SimConnect_FlightPlanLoad(handle(), _szFileName));
}
/// Used to save the current state of a flight to a flight file.
HRESULT flight_save(const char* _szFileName, const char* _szTitle, const char* _szDescription, DWORD _dwFlags)
{
return SIM_CONNECT_CALL(SimConnect_FlightSave(handle(), _szFileName, _szTitle, _szDescription, _dwFlags));
}
//@}
/// Used to request a specific keyboard TAB-key combination applies only to this client.
HRESULT request_reserved_key(SIMCONNECT_CLIENT_EVENT_ID _EventID, const char* _szKeyChoice1, const char* _szKeyChoice2 = "", const char* _szKeyChoice3 = "")
{
return SimConnect_RequestReservedKey(handle(), _EventID, _szKeyChoice1, _szKeyChoice2, _szKeyChoice3);
}
/// Used to request information from a number of Flight Simulator system components.
HRESULT request_system_state(SIMCONNECT_DATA_REQUEST_ID _RequestID, const char* _szState)
{
return SimConnect_RequestSystemState(handle(), _RequestID, _szState);
}
/// \name Input group
//@{
/// Used to set the priority for a specified input group object.
HRESULT set_input_group_priority(SIMCONNECT_INPUT_GROUP_ID _GroupID, DWORD _dwPriority)
{
return SimConnect_SetInputGroupPriority(handle(), _GroupID, _dwPriority);
}
/// Used to turn requests for input event information from the server on and off.
HRESULT set_input_group_state(SIMCONNECT_INPUT_GROUP_ID _GroupID, DWORD _dwState)
{
return SimConnect_SetInputGroupState(handle(), _GroupID, _dwState);
}
/// Used to remove all the input events from a specified input group object.
HRESULT clear_input_group(SIMCONNECT_INPUT_GROUP_ID _GroupID)
{
return SIM_CONNECT_CALL(SimConnect_ClearInputGroup(handle(), _GroupID));
}
//@}
/// \name Variable length strings
//@{
/// Used to assist in adding variable length strings to a structure.
static HRESULT insert_string(char* _pDest, DWORD _cbDest, void** _ppEnd, DWORD* _pcbStringV, const char* _pSource)
{
return SimConnect_InsertString(_pDest, _cbDest, _ppEnd, _pcbStringV, _pSource);
}
static HRESULT retrieve_string(SIMCONNECT_RECV* _pData, DWORD _cbData, void* _pStringV, char** _ppszString, DWORD* _pcbString)
{
return SimConnect_RetrieveString(_pData, _cbData, _pStringV, _ppszString, _pcbString);
}
//@}
/// \name Tracking Errors
//@{
/// Returns the ID of the last packet sent to the SimConnect server.
HRESULT get_last_sent_packet_id(DWORD& dwSendID)
{
return SimConnect_GetLastSentPacketID(handle(), &dwSendID);
}
/// Given the ID of an erroneous packet, return the description string of that call
std::string get_record(DWORD _id) const
{
const ordered_records_t& or = boost::multi_index::get<1>(records);
ordered_records_t::const_iterator it = or.find(_id);
if (it != or.end())
return it->descr_;
else
return "Description not found";
}
//@}
private:
handle_t hsc_;
HANDLE handle() const
{
FSC_PRECONDITION((bool) hsc_);
return hsc_.get();
}
//The most recent calls list type
typedef boost::multi_index::multi_index_container<
detail::record,
boost::multi_index::indexed_by<
boost::multi_index::sequenced<>,
boost::multi_index::hashed_unique<BOOST_MULTI_INDEX_MEMBER(detail::record, DWORD, id_)> >
> records_t;
typedef records_t::nth_index<1>::type ordered_records_t;
//The maximum number of recorded calls
static const size_t max_records = 10;
records_t records;
// Record the ID along with the identification string
void add_record(std::string _desc)
{
DWORD id;
if (S_OK == SimConnect_GetLastSentPacketID(handle(), &id))
{
records.push_front(detail::record(id, _desc));
if (records.size() > max_records)
records.pop_back();
}
}
};
} //namespace fsx
#endif //__FSX_SIM_CONNECT_HPP__
|
{"hexsha": "0f769089a0e49cc0e928046a815a18c06019a2e8", "size": 16888, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "fsc/include/fsx/sim_connect.hpp", "max_stars_repo_name": "gbucknell/fsc-sdk", "max_stars_repo_head_hexsha": "11b7cda4eea35ec53effbe37382f4b28020cd59d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fsc/include/fsx/sim_connect.hpp", "max_issues_repo_name": "gbucknell/fsc-sdk", "max_issues_repo_head_hexsha": "11b7cda4eea35ec53effbe37382f4b28020cd59d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fsc/include/fsx/sim_connect.hpp", "max_forks_repo_name": "gbucknell/fsc-sdk", "max_forks_repo_head_hexsha": "11b7cda4eea35ec53effbe37382f4b28020cd59d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.6464646465, "max_line_length": 348, "alphanum_fraction": 0.7146494552, "num_tokens": 3969}
|
# Python libraries
import argparse, os
import torch
import sys
root_dir = os.path.abspath(__file__).split('examples')[0]
sys.path.insert(0, root_dir )
# Lib files
import lib.utils as utils
import lib.medloaders as medical_loaders
import lib.medzoo as medzoo
import lib.train as train
from lib.losses3D import DiceLoss, WeightedCrossEntropyLoss
from batchgenerators.augmentations.crop_and_pad_augmentations import crop
from batchgenerators.dataloading import MultiThreadedAugmenter, SingleThreadedAugmenter
from batchgenerators.examples.brats2017.config import brats_preprocessed_folder, num_threads_for_brats_example
from batchgenerators.transforms import Compose
from batchgenerators.utilities.data_splitting import get_split_deterministic
from batchgenerators.utilities.file_and_folder_operations import *
import numpy as np
from batchgenerators.dataloading.data_loader import DataLoader
from batchgenerators.augmentations.utils import pad_nd_image
from batchgenerators.transforms.spatial_transforms import SpatialTransform_2, MirrorTransform
from batchgenerators.transforms.color_transforms import BrightnessMultiplicativeTransform, GammaTransform
from batchgenerators.transforms.noise_transforms import GaussianNoiseTransform, GaussianBlurTransform
from lib.medloaders.miccai_2020_ribfrac import MICCAI2020_RIBFRAC, MICCAI2020_RIBFRAC_DataLoader3D
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
seed = 1777777
torch.manual_seed(seed)
def main():
args = get_arguments()
utils.reproducibility(args, seed)
# utils.make_dirs(args.save)
if not os.path.exists(args.save):
os.makedirs(args.save)
# training_generator, val_generator, full_volume, affine = medical_loaders.generate_datasets(args,
training_generator, val_generator, full_volume, affine, dataset = medical_loaders.generate_datasets(args,
path='/data/hejy/MedicalZooPytorch_2cls/datasets')
model, optimizer = medzoo.create_model(args)
criterion = DiceLoss(classes=2, skip_index_after=args.classes, weight = torch.tensor([1, 1]).cuda(), sigmoid_normalization=True)
# criterion = WeightedCrossEntropyLoss()
if args.cuda:
model = model.cuda()
# model.restore_checkpoint(args.pretrained)
dataloader_train = MICCAI2020_RIBFRAC_DataLoader3D(dataset, args.batchSz, args.dim, num_threads_in_multithreaded=2)
tr_transforms = get_train_transform(args.dim)
training_generator_aug = SingleThreadedAugmenter(dataloader_train, tr_transforms,)
trainer = train.Trainer(args, model, criterion, optimizer, train_data_loader=training_generator,
valid_data_loader=val_generator, lr_scheduler=None, dataset = dataset, train_data_loader_aug=training_generator_aug)
trainer.training()
def get_train_transform(patch_size):
# we now create a list of transforms. These are not necessarily the best transforms to use for BraTS, this is just
# to showcase some things
tr_transforms = []
# the first thing we want to run is the SpatialTransform. It reduces the size of our data to patch_size and thus
# also reduces the computational cost of all subsequent operations. All subsequent operations do not modify the
# shape and do not transform spatially, so no border artifacts will be introduced
# Here we use the new SpatialTransform_2 which uses a new way of parameterizing elastic_deform
# We use all spatial transformations with a probability of 0.2 per sample. This means that 1 - (1 - 0.1) ** 3 = 27%
# of samples will be augmented, the rest will just be cropped
tr_transforms.append(
SpatialTransform_2(
patch_size, [i // 2 for i in patch_size],
do_elastic_deform=True, deformation_scale=(0, 0.25),
do_rotation=True,
angle_x=(- 15 / 360. * 2 * np.pi, 15 / 360. * 2 * np.pi),
angle_y=(- 15 / 360. * 2 * np.pi, 15 / 360. * 2 * np.pi),
angle_z=(- 15 / 360. * 2 * np.pi, 15 / 360. * 2 * np.pi),
do_scale=True, scale=(0.75, 1.25),
border_mode_data='constant', border_cval_data=0,
border_mode_seg='constant', border_cval_seg=0,
order_seg=1, order_data=3,
random_crop=False,
p_el_per_sample=0.1, p_rot_per_sample=0.1, p_scale_per_sample=0.1
)
)
# now we mirror along all axes
tr_transforms.append(MirrorTransform(axes=(0, 1, 2)))
# brightness transform for 15% of samples
tr_transforms.append(BrightnessMultiplicativeTransform((0.7, 1.5), per_channel=True, p_per_sample=0.15))
# gamma transform. This is a nonlinear transformation of intensity values
# (https://en.wikipedia.org/wiki/Gamma_correction)
tr_transforms.append(GammaTransform(gamma_range=(0.5, 2), invert_image=False, per_channel=True, p_per_sample=0.15))
# we can also invert the image, apply the transform and then invert back
tr_transforms.append(GammaTransform(gamma_range=(0.5, 2), invert_image=True, per_channel=True, p_per_sample=0.15))
# Gaussian Noise
tr_transforms.append(GaussianNoiseTransform(noise_variance=(0, 0.05), p_per_sample=0.15))
# blurring. Some BraTS cases have very blurry modalities. This can simulate more patients with this problem and
# thus make the model more robust to it
tr_transforms.append(GaussianBlurTransform(blur_sigma=(0.5, 1.5), different_sigma_per_channel=True,
p_per_channel=0.5, p_per_sample=0.15))
# now we compose these transforms together
tr_transforms = Compose(tr_transforms)
return tr_transforms
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--batchSz', type=int, default=4)
parser.add_argument('--dataset_name', type=str, default="ribfrac")
parser.add_argument('--dim', nargs="+", type=int, default=(256,256,256))#(128, 128, 48))#(256,256,256))#(256,256,256))#(512,512,96))#(256,256,256))#(64,64,48))#(384,384,128)) #(192,192,96)) # (64,64,48))#(128, 128, 48)) # # patch_shapes = [(64, 128, 128), (96, 128, 128),(64, 160, 160), (96, 160, 160), (64, 192, 192), (96, 192, 192)]
parser.add_argument('--nEpochs', type=int, default=300)
parser.add_argument('--inChannels', type=int, default=1)
parser.add_argument('--inModalities', type=int, default=1)
parser.add_argument('--samples_train', type=int, default=1200)
parser.add_argument('--samples_val', type=int, default=100)
parser.add_argument('--classes', type=int, default=2)
parser.add_argument('--threshold', default=0.8, type=float)
parser.add_argument('--augmentation', default='no', type=str,
help='Tensor normalization: options max, mean, global')
parser.add_argument('--normalization', default='global_mean', type=str,
help='Tensor normalization: options max, mean, global')
parser.add_argument('--loadData', default=False)
parser.add_argument('--terminal_show_freq', default=1)
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--split', default=1, type=float, help='Select percentage of training data(default: 0.8)')
parser.add_argument('--lr', default=1e-2, type=float,
help='learning rate (default: 1e-3)')
parser.add_argument('--lrstep', default=[110, 200], type=float,
help='lr decay step ')
parser.add_argument('--cuda', action='store_true', default=True)
parser.add_argument('--model', type=str, default='UNET3D',#"SKIPDENSENET3D",#" #'VNET2',#"RESNET3DVAE",#,"RESNETMED3D",#"HIGHRESNET",#'DENSENET1',#'DENSEVOXELNET',#
choices=('VNET', 'VNET2', 'UNET3D', 'DENSENET1', 'DENSENET2', 'DENSENET3', 'HYPERDENSENET'))
parser.add_argument('--opt', type=str, default='sgd',
choices=('sgd', 'adam', 'rmsprop'))
parser.add_argument('--log_dir', type=str,
default='../runs/')
parser.add_argument('--model_save_dir_suff', type=str,
# default='_512x512x96_thresh0.6_weight1_sample200')
# default='_512x512x96_thresh0.6_weight1_sample1200')
# default='_256x256x256_thresh0.6_weight1_sample400')
# default='_256x256x256_thresh0.6_weight1_sample400_sigmoid')
# default='_256x256x256_thresh0.6_weight1_sample1200_sigmoid')
# default='_256x256x256_thresh0.8_weight1_sample1200_sigmoid')
default='_256x256x256_thresh0.8_weight1_sample1200_sigmoid_aug_nocrop')
# default='_256x256x256_thresh0.6_weight1_sample400_sigmoid_aug_nocrop')
# default='_64x64x48_thresh0.1_weight1_sample400')
# default='_128x128x48_thresh0.1_weight1_sample400_softmax_debug')
# default='_128x128x48_thresh0.1_weight1_sample400_softmax')
# default='_128x128x48_thresh0.1_weight1_sample400_sigmoid_augdebug')
# default='_128x128x48_thresh0.1_weight0.1_sample400_softmax')
# default='_128x128x48_thresh0.1_weight1_sample400_wce')
# default='_128x128x48_thresh0.1_weight1_sample400_sigmoid_aug_nocrop')
# default='_test')
parser.add_argument('--pretrained',
# default='/data/hejy/MedicalZooPytorch/saved_models/UNET3D_checkpoints/UNET3D_64x64x48_0.1_weight0.1/UNET3D_64x64x48_0.1_weight0.1_BEST.pth',
# default='/data/hejy/MedicalZooPytorch/saved_models/UNET3D_checkpoints/UNET3D_128x128x48_thresh0.3_weight0.01_sample400_epoch600_test_val+/UNET3D_128x128x48_thresh0.3_weight0.01_sample400_epoch600_test_val+_BEST.pth',
# default = '/data/hejy/MedicalZooPytorch_2cls/saved_models/UNET3D_checkpoints/2cls_UNET3D_512x512x96_thresh0.6_weight1_sample200/2cls_UNET3D_512x512x96_thresh0.6_weight1_sample200_BEST.pth',
# default='/data/hejy/MedicalZooPytorch_2cls/saved_models/UNET3D_checkpoints/2cls_UNET3D_128x128x48_thresh0.1_weight1_sample400_softmax/2cls_UNET3D_128x128x48_thresh0.1_weight1_sample400_softmax_BEST.pth',
# default='/data/hejy/MedicalZooPytorch/saved_models/UNET3D_checkpoints/UNET3D_128x128x48_thresh0.3_weight0.01_sample400/UNET3D_128x128x48_thresh0.3_weight0.01_sample400_BEST.pth',
# default='/data/hejy/MedicalZooPytorch/saved_models/UNET3D_checkpoints/UNET3D_128x128x48_thresh0.3_sample400_epoch600_test_val+_wce/UNET3D_128x128x48_thresh0.3_sample400_epoch600_test_val+_wce_BEST.pth',
default='/data/hejy/MedicalZooPytorch_2cls/saved_models/UNET3D_checkpoints/2cls_UNET3D_256x256x256_thresh0.6_weight1_sample400_sigmoid_aug_nocrop/2cls_UNET3D_256x256x256_thresh0.6_weight1_sample400_sigmoid_aug_nocrop_last_epoch_copy.pth',
type=str, metavar='PATH',
help='path to pretrained model')
args = parser.parse_args()
# args.save = '/data/hejy/MedicalZooPytorch/saved_models/' + args.model + '_checkpoints/' + args.model + '_{}_{}_'.format(
# utils.datestr(), args.dataset_name)
args.save = '/data/hejy/MedicalZooPytorch_2cls/saved_models/' + args.model + '_checkpoints/' + '2cls_'+ args.model + args.model_save_dir_suff
return args
if __name__ == '__main__':
main()
|
{"hexsha": "33b8bf4d48e66aed806e60365f01eccb944c34a0", "size": 11651, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/train_ribfrac_aug.py", "max_stars_repo_name": "eynaij/MedicalZooPytorch_RibFrac", "max_stars_repo_head_hexsha": "720cd2a3b7e62a47ed35b9e41e15db92e802ffb8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-09-20T15:59:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-12T09:28:35.000Z", "max_issues_repo_path": "examples/train_ribfrac_aug.py", "max_issues_repo_name": "eynaij/MedicalZooPytorch_RibFrac", "max_issues_repo_head_hexsha": "720cd2a3b7e62a47ed35b9e41e15db92e802ffb8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-12-17T02:38:05.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-17T02:38:05.000Z", "max_forks_repo_path": "examples/train_ribfrac_aug.py", "max_forks_repo_name": "eynaij/MedicalZooPytorch_RibFrac", "max_forks_repo_head_hexsha": "720cd2a3b7e62a47ed35b9e41e15db92e802ffb8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 60.6822916667, "max_line_length": 346, "alphanum_fraction": 0.7019998283, "include": true, "reason": "import numpy", "num_tokens": 3045}
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.filters import gaussian_filter1d
plt.style.use('seaborn-bright')
savedir = '/scratch/ws/1/haja565a-workspace2/quant/'
expNames = [ '700g12','700g13', '700g14','700g15', '700g16','700g17']#, ]#'700g15',, '700g18', '700g19']#'700j17', '700j18', '700j19','700g11',
expConRad = {'700j17': 25, '700j18': 20, '700j19': 15, '700g13': 25, '700g14': 20, '700g16': 15, '700g11': 35, '700g12': 30, '700g15': 17.5,\
'700g17': 12.5, '700g18': 10, '700g19': 7.5}
expLi = {'700j17': 7500, '700j18': 7500, '700j19': 7500, '700g13': 10000, '700g14': 10000, '700g16': 10000, '700g11': 10000, '700g12': 10000, '700g15': 10000,\
'700g17': 10000, '700g18': 10000, '700g19': 10000}
Dictrotorder = {}
Dicttransorder = {}
Dicttime = {}
for exp in expNames:
Dictrotorder[exp] = np.load(savedir + exp + "/rotOrder.npy")
Dicttransorder[exp] = np.load(savedir + exp + "/transOrder.npy")
Dicttime[exp] = np.load(savedir + exp + "/timearray.npy")
fig, ax = plt.subplots()
for exp in expNames:
ax.plot(Dicttime[exp], gaussian_filter(Dictrotorder[exp], sigma = 100), label = r"$r_c = %s$"%expConRad[exp] )#$L_i = %s$"%expLi[exp] + ", " + r"
ax.legend()
ax.set_xlabel('time')
ax.set_ylabel(r'$O_R$')
ax.set_xlim(0,200)
ax.set_ylim(-1,1)
ax.grid()
plt.savefig(savedir + 'rotOrdermix.png')
|
{"hexsha": "d528a6bc9f2e496896c2b55b0b90884fbcc3c2f0", "size": 1404, "ext": "py", "lang": "Python", "max_stars_repo_path": "multiquantorder.py", "max_stars_repo_name": "harishpjain/cell_growth_division", "max_stars_repo_head_hexsha": "2e4b56a443bfd253a2d2b75656cbceb688f5ce04", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "multiquantorder.py", "max_issues_repo_name": "harishpjain/cell_growth_division", "max_issues_repo_head_hexsha": "2e4b56a443bfd253a2d2b75656cbceb688f5ce04", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "multiquantorder.py", "max_forks_repo_name": "harishpjain/cell_growth_division", "max_forks_repo_head_hexsha": "2e4b56a443bfd253a2d2b75656cbceb688f5ce04", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.5454545455, "max_line_length": 159, "alphanum_fraction": 0.6588319088, "include": true, "reason": "import numpy,from scipy", "num_tokens": 547}
|
import numpy as np
from pathlib import Path
from gensim.models.fasttext import FastText as FT_gensim
from gensim.test.utils import datapath
class WordEmbeddingUtils:
"""
This contains utilities to manage words embeddings.
"""
def __init__(self):
super().__init__()
self.read_wv_model()
def read_wv_model(self, model_name='embeddings_one_gram_fast_tweets_only'):
"""
read the word to vv embedding model passed in parameter
Args:
path ([type]): [description]
"""
model_path = Path.cwd().joinpath('models', model_name).__str__()
self.model_gensim = FT_gensim.load(model_path)
def check_bigram(self, word):
"""
check if the given word passed in parameter is a bigram or not
"""
if len(word.split(' ')) == 2:
return True
return False
def get_bi_grams_vector(self, string):
"""
get the vector of the bigram passed in parameter,
this is done by using the average of the two words building the vector
Args:
string ([type]): [description]
Returns:
[type]: [description]
"""
word1, words2 = string.split(' ')
bigram_vector = np.mean([self.model_gensim.wv.get_vector(word1),
self.model_gensim.wv.get_vector(words2)],
axis=0)
return bigram_vector
def compute_cosine_similarity(self, words):
"""
This code use numpy to compute the
cosine similarity between two given vectors
params:
words : a 2 d array with 2 words we are calculating the similarity for
"""
word_1, word_2 = words[0], words[1]
vector_1 = self.get_word_vector(word_1)
vector_2 = self.get_word_vector(word_2)
ma = np.linalg.norm(vector_1)
mb = np.linalg.norm(vector_2)
cosine_distance = (np.matmul(vector_1, vector_2))/(ma*mb)
return cosine_distance
def get_word_vector(self, word):
"""
return the word vector for the corresponding word or bigram
Args:
word ([type]): [description]
"""
if self.check_bigram(word):
return self.get_bi_grams_vector(word)
return self.model_gensim.wv.get_vector(word)
|
{"hexsha": "846612fd8fcb1b01bc004a51c18deb27556cb091", "size": 2366, "ext": "py", "lang": "Python", "max_stars_repo_path": "topic_modeling/word_embedings_utils.py", "max_stars_repo_name": "espoirMur/balobi_nini", "max_stars_repo_head_hexsha": "b68b9af4c84ec0f5b38ae8ba52d5f0d32b41ead3", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-30T08:03:10.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-30T08:03:10.000Z", "max_issues_repo_path": "topic_modeling/word_embedings_utils.py", "max_issues_repo_name": "espoirMur/balobi_nini", "max_issues_repo_head_hexsha": "b68b9af4c84ec0f5b38ae8ba52d5f0d32b41ead3", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2020-09-23T14:05:33.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-04T22:40:41.000Z", "max_forks_repo_path": "topic_modeling/word_embedings_utils.py", "max_forks_repo_name": "espoirMur/balobi_nini", "max_forks_repo_head_hexsha": "b68b9af4c84ec0f5b38ae8ba52d5f0d32b41ead3", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-29T10:38:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-29T10:38:13.000Z", "avg_line_length": 31.1315789474, "max_line_length": 79, "alphanum_fraction": 0.6014370245, "include": true, "reason": "import numpy", "num_tokens": 527}
|
import proto.filestream_pb2_grpc as f_pb2_grpc
import proto.filestream_pb2 as f_pb2
import numpy as np
import grpc
def run():
channel = grpc.insecure_channel('127.0.0.1:50000')
stub = f_pb2_grpc.FileStreamServiceStub(channel)
print('Receiver started successfully')
while True:
try:
responses = stub.FileStreamResponse(f_pb2.FileRequest(msg = 'requesting file'))
for res in responses:
# chunk_b = np.frombuffer(res.chunk, dtype = np.uint8)
print(res)
except grpc.RpcError as e:
print(e.details())
break
if __name__ == '__main__':
run()
|
{"hexsha": "6036c6ed7c2c878cfed03251a284b0adc42428ab", "size": 601, "ext": "py", "lang": "Python", "max_stars_repo_path": "local_server/Receiver.py", "max_stars_repo_name": "Birkenpapier/FileTransfer", "max_stars_repo_head_hexsha": "aef42aa6f81419c5cc37b4f513cb7b6450cbd648", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "local_server/Receiver.py", "max_issues_repo_name": "Birkenpapier/FileTransfer", "max_issues_repo_head_hexsha": "aef42aa6f81419c5cc37b4f513cb7b6450cbd648", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "local_server/Receiver.py", "max_forks_repo_name": "Birkenpapier/FileTransfer", "max_forks_repo_head_hexsha": "aef42aa6f81419c5cc37b4f513cb7b6450cbd648", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.724137931, "max_line_length": 83, "alphanum_fraction": 0.693843594, "include": true, "reason": "import numpy", "num_tokens": 157}
|
##
## Software PI-Net: Pose Interacting Network for Multi-Person Monocular 3D Pose Estimation
## Copyright Inria and UPC
## Year 2021
## Contact : wen.guo@inria.fr
##
## The software PI-Net is provided under MIT License.
##
import os
import os.path as osp
import sys
import numpy as np
class Config:
trainset = ['MuCo']
testset = 'MuPoTS_skeleton'
## directory
cur_dir = osp.dirname(os.path.abspath(__file__))
root_dir = osp.join(cur_dir, '../')
this_dir = cur_dir.split(osp.sep)[-1]
data_dir = osp.join(root_dir, 'data')
output_dir = osp.join(root_dir, 'output')
model_dir_link = osp.join(cur_dir, 'snapshot')
## model setting, input&output
resnet_type = 50 # 50, 101, 152
input_shape = (256, 256)
output_shape = (input_shape[0]//4, input_shape[1]//4)
depth_dim = 64
bbox_3d_shape = (2000, 2000, 2000) # depth, height, width
pixel_mean = (0.485, 0.456, 0.406)
pixel_std = (0.229, 0.224, 0.225)
# rnn setting
shuffle_rate=0.75
## DATA
nb_test_seq = 4
pair_index_path_muco = osp.join(data_dir, trainset[0], 'data','annotations/MuCo_id2pairId.json')
pair_index_path = osp.join(data_dir, testset, 'data', 'MuPoTS-3D_id2pairId.json')
train_annot_path = osp.join(data_dir, trainset[0], 'data','annotations/MuCo-3DHP_with_posenent_result_filter.json')
val_annot_path = osp.join(data_dir, testset, 'data', 'MuPoTS-3D_with_posenet_result.json') #'MuPoTS-3D.json')
## training config
batch_size = 4#32
end_epoch = 26#31#21
lr = 1e-5#5e-5
lr_strategy = "const"# "poly"#"poly" # "poly"
# poly
end_lr = lr * 1e-3
power = 0.9
# const
lr_dec_epoch = [17, 21]
lr_dec_factor = 10
## log
snapshot_iter = 1
suffix = this_dir + '_bz' + str(batch_size) + '_lr'+str(lr)
model_dir = osp.join(output_dir, 'snapshot', suffix)
vis_dir = osp.join(output_dir, 'vis', suffix)
tensorboard_log_dir = osp.join(output_dir, 'tensorboard_log', suffix)
result_dir = osp.join(output_dir, 'result', suffix)
log_dir = osp.join(output_dir, 'log', suffix)
## testing config
test_batch_size = 1
flip_test = False#True
use_gt_info = True#False
vis_A = False#True
## others
num_thread = 20
gpu_ids = '0'
num_gpus = 1
continue_train = False
def set_args(self, gpu_ids, bz, lr, snapshot_iter, nb_crossval_split=None, continue_train=False, vis_A=False):
if gpu_ids:
self.gpu_ids = gpu_ids
self.num_gpus = len(self.gpu_ids.split(','))
os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu_ids
self.continue_train = continue_train
self.vis_A = vis_A
if bz:
self.batch_size = bz
if lr:
self.lr = lr
if snapshot_iter:
self.snapshot_iter = snapshot_iter
if self.lr_strategy == "poly":
self.suffix = self.this_dir + '_bz'+str(self.batch_size) + '_polylr'+str(self.lr)
elif self.lr_strategy == "const":
self.suffix = self.this_dir + '_bz'+str(self.batch_size) + '_lr'+str(self.lr)
if nb_crossval_split:
self.trainset = ['MuPoTS_skeleton']
self.nb_crossval_split = nb_crossval_split
self.train_annot_path = osp.join(self.data_dir, self.trainset[0], 'data', 'annotations_crossval', 'mupots_crossval_train' + str(self.nb_crossval_split) + '.json')
self.val_annot_path = osp.join(self.data_dir, self.testset, 'data', 'annotations_crossval', 'mupots_crossval_val' + str(self.nb_crossval_split) + '.json')
self.suffix = self.this_dir+'_'+str(self.nb_crossval_split) + '_bz'+str(self.batch_size) + '_lr'+str(self.lr)
self.model_dir = osp.join(self.output_dir, 'snapshot', self.suffix)
self.vis_dir = osp.join(self.output_dir, 'vis', self.suffix)
self.tensorboard_log_dir = osp.join(self.output_dir, 'tensorboard_log', self.suffix)
self.result_dir = osp.join(self.output_dir, 'result', self.suffix)
self.log_dir = osp.join(self.output_dir, 'log', self.suffix)
make_folder(cfg.model_dir)
make_folder(cfg.log_dir)
make_folder(cfg.result_dir)
make_folder(cfg.vis_dir)
print('>>> Using GPU: {}'.format(self.gpu_ids))
print ('>>> bz: {}, lr: {}, snapshot: {}'.format(self.batch_size, self.lr, self.snapshot_iter))
cfg = Config()
print ('>>> path:', cfg.cur_dir)
sys.path.insert(0, osp.join(cfg.root_dir))
sys.path.insert(0, osp.join(cfg.root_dir, 'data'))
from utils.dir_utils import add_pypath, make_folder, link_file
add_pypath(osp.join(cfg.data_dir))
for i in range(len(cfg.trainset)):
add_pypath(osp.join(cfg.data_dir, cfg.trainset[i]))
add_pypath(osp.join(cfg.data_dir, cfg.testset))
|
{"hexsha": "8497160dbbabb7a5ac0052b0f7903a871bbdfc40", "size": 4772, "ext": "py", "lang": "Python", "max_stars_repo_path": "model/config.py", "max_stars_repo_name": "GUO-W/PI-Net", "max_stars_repo_head_hexsha": "0c93a05d3aa277a80101f69ad196e5d6c8edba76", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-05-21T14:07:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-13T07:39:00.000Z", "max_issues_repo_path": "model/config.py", "max_issues_repo_name": "GUO-W/PI-Net", "max_issues_repo_head_hexsha": "0c93a05d3aa277a80101f69ad196e5d6c8edba76", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model/config.py", "max_forks_repo_name": "GUO-W/PI-Net", "max_forks_repo_head_hexsha": "0c93a05d3aa277a80101f69ad196e5d6c8edba76", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7076923077, "max_line_length": 174, "alphanum_fraction": 0.6548616932, "include": true, "reason": "import numpy", "num_tokens": 1360}
|
# -*- coding: utf-8 -*-
"""
.. module:: skimpy
:platform: Unix, Windows
:synopsis: Simple Kinetic Models in Python
.. moduleauthor:: SKiMPy team
[---------]
Copyright 2020 Laboratory of Computational Systems Biotechnology (LCSB),
Ecole Polytechnique Federale de Lausanne (EPFL), Switzerland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
from bokeh.plotting import figure, output_file, show, curdoc, ColumnDataSource
from bokeh.layouts import column
from bokeh.palettes import Spectral11, viridis
import numpy as np
import pandas as pd
def timetrace_plot(time, data,
filename='out.html',
legend=None,
x_label='',
y_label='',
legend_location=None,
backend='webgl',
**kwargs):
"""
Classic time vs. Y-value plot.
:param time:
:param data:
:param filename:
:param legend:
:return:
"""
# output to static HTML file
output_file(filename)
# MAKE COLORZ
num_species = data.shape[1]
if num_species > 11:
palette = viridis(num_species)
else:
palette = Spectral11[:num_species]
TOOLTIPS = [
("index", "$index"),
("(t,c)", "($x, $y)"),
("species", "$name"),
]
# PLOTS
p = figure(tooltips=TOOLTIPS, **kwargs)
for e in range(num_species):
if legend is not None:
legend_str = legend[e]
else:
legend_str = 'Compound {}'.format(e)
# Generate Data source
this_src = ColumnDataSource(data=dict(
time=time,
data=data[:,e],
))
p.line(x='time',y='data', line_color=palette[e], source=this_src,
name=legend_str, legend_label=legend_str)
if not legend_location is None:
p.legend.location = legend_location
p.legend.click_policy="hide"
# show the results
#Make the axsis pretty
# change just some things about the x-axis
p.xaxis.axis_label = x_label
# change just some things about the y-axis
p.yaxis.axis_label = y_label
p.output_backend = backend
show(p)
def boxplot(df, filename):
# TODO document, short explanation of usage
# Adapted from: https://bokeh.pydata.org/en/latest/docs/gallery/boxplot.html
mean = df.mean()
not_nan = [i for i,e in enumerate(mean) if e is not np.nan]
cats = df.mean().dropna().index.values
# find the quartiles and IQR for each category
groups = df[cats]
q1 = groups.quantile(q=0.25)
q2 = groups.quantile(q=0.5)
q3 = groups.quantile(q=0.75)
iqr = q3 - q1
upper = q3 + 1.5 * iqr
lower = q1 - 1.5 * iqr
qmin = groups.quantile(q=0.00)
qmax = groups.quantile(q=1.00)
upper = [min([x, y]) for (x, y) in zip(list(qmax.loc[:]), upper)]
lower = [max([x, y]) for (x, y) in zip(list(qmin.loc[:]), lower)]
p = figure(tools="",
plot_height=1000,
plot_width=20*len(cats),
y_axis_type="log",
background_fill_color="#efefef",
y_range=(qmin.min(), qmax.max()),
x_range=cats,
toolbar_location=None)
# stems
p.segment(cats, upper, cats, q3, line_color="black")
p.segment(cats, lower, cats, q1, line_color="black")
# boxes
p.vbar(cats, 0.7, q2, q3, fill_color="#E08E79", line_color="black")
p.vbar(cats, 0.7, q1, q2, fill_color="#3B8686", line_color="black")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = "white"
p.grid.grid_line_width = 2
p.xaxis.major_label_text_font_size = "12pt"
p.xaxis.major_label_orientation = np.pi / 2.
output_file(filename)
show(p)
def plot_population_per_variable(data,
filename,
stride = 1,
variables=None,
y_label='concentration',
x_label='time',
**kwargs):
"""
:param data:
:param filename:
:param stride: How many points to skip for the plot. `stride=100` will only plot every 100
points
:return:
"""
plots = OrderedDict()
grouped = data.groupby('solution_id')
TOOLTIPS = [
("index", "$index"),
("(t,c)", "($x, $y)"),
("species", "$name"),
]
if variables is None:
variables = data.columns
for var in variables:
if var in ['solution_id', 'time']:
continue
p = figure(tooltips=TOOLTIPS, **kwargs)
for group, this_data in grouped:
this_data = this_data.iloc[::stride]
p.line(this_data['time'], this_data[var],
# line_color = colors[group],
line_alpha=0.2)
p.title.text = var
plots[var] = p
output_file(filename.format(var))
#Make the axsis pretty
# change just some things about the x-axis
p.xaxis.axis_label = x_label
# change just some things about the y-axis
p.yaxis.axis_label = y_label
show(p)
curdoc().clear()
|
{"hexsha": "d1165f1de31c20ec6d809f27f8ae0590964cbd46", "size": 6057, "ext": "py", "lang": "Python", "max_stars_repo_path": "skimpy/viz/plotting.py", "max_stars_repo_name": "AQ18/skimpy", "max_stars_repo_head_hexsha": "435fc50244f2ca815bbb39d525a82a4692f5c0ac", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2020-11-05T10:59:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T01:38:31.000Z", "max_issues_repo_path": "skimpy/viz/plotting.py", "max_issues_repo_name": "AQ18/skimpy", "max_issues_repo_head_hexsha": "435fc50244f2ca815bbb39d525a82a4692f5c0ac", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2022-01-27T10:23:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-10T18:16:06.000Z", "max_forks_repo_path": "skimpy/viz/plotting.py", "max_forks_repo_name": "AQ18/skimpy", "max_forks_repo_head_hexsha": "435fc50244f2ca815bbb39d525a82a4692f5c0ac", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-08-04T17:01:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T01:38:32.000Z", "avg_line_length": 29.5463414634, "max_line_length": 95, "alphanum_fraction": 0.5497771174, "include": true, "reason": "import numpy", "num_tokens": 1424}
|
[STATEMENT]
lemma req_neq_pro [iff]: "req A r n I B \<noteq> pro B' ofr A' r' I' (cons M L) J C"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. req A r n I B \<noteq> pro B' ofr A' r' I' \<lbrace>M, L\<rbrace> J C
[PROOF STEP]
by (auto simp: req_def pro_def)
|
{"llama_tokens": 123, "file": null, "length": 1}
|
#!/usr/bin/env python
# Copyright (c) 2014, Robot Control and Pattern Recognition Group, Warsaw University of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Warsaw University of Technology nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYright HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import roslib; roslib.load_manifest('barrett_hand_controller')
import sys
import rospy
import math
import std_msgs.msg
import tf
from tf import *
from tf.transformations import *
from tf2_msgs.msg import *
import PyKDL
import tf_conversions.posemath as pm
import copy
from scipy import optimize
import random
import numpy as np
right_makrer_id=2
def locateMarker(T_T2_7, T_C_M):
if len(T_T2_7) != len(T_C_M):
return None
if len(T_T2_7) < 2:
return None
weights_ori = []
weights_pos = []
z_limit = 0.3
for idx in range(0, len(T_C_M)):
v = T_C_M[idx] * PyKDL.Vector(0,0,1) - T_C_M[idx] * PyKDL.Vector()
if v.z() > -z_limit:
weights_ori.append(0.0)
weights_pos.append(0.0)
continue
# v.z() is in range (-1.0, -z_limit)
weight = ((-v.z()) - z_limit)/(1.0-z_limit)
if weight > 1.0 or weight < 0.0:
print "error: weight==%s"%(weight)
weights_ori.append(1.0-weight)
weights_pos.append(weight)
best_ori_idx = weights_ori.index( max(weights_ori) )
best_pos_idx = weights_pos.index( max(weights_pos) )
print "best orientation index: %s"%(best_ori_idx)
print "best position index: %s"%(best_pos_idx)
T_7bo_7i = []
T_Mbo_Mi = []
for idx in range(0, len(T_T2_7)):
T_7bo_7i.append(T_T2_7[best_ori_idx].Inverse() * T_T2_7[idx])
T_Mbo_Mi.append(T_C_M[best_ori_idx].Inverse() * T_C_M[idx])
T_7bp_7i = []
T_Mbp_Mi = []
for idx in range(0, len(T_T2_7)):
T_7bp_7i.append(T_T2_7[best_pos_idx].Inverse() * T_T2_7[idx])
T_Mbp_Mi.append(T_C_M[best_pos_idx].Inverse() * T_C_M[idx])
def estOrientation():
def calc_R(rx, ry, rz):
R_7_M = PyKDL.Frame(PyKDL.Rotation.EulerZYX(rx, ry, rz))
ret = []
for idx in range(0,len(T_7bo_7i)):
diff = PyKDL.diff( T_7bo_7i[idx] * R_7_M, R_7_M * T_Mbo_Mi[idx] )
ret.append( diff.rot.Norm() * weights_ori[idx] )
return ret
def f_2(c):
""" calculate the algebraic distance between each contact point and jar surface pt """
Di = calc_R(*c)
return Di
def sumf_2(p):
return math.fsum(np.array(f_2(p))**2)
angle_estimate = random.random(), random.random(), random.random()
# angle_2, ier = optimize.leastsq(f_2, angle_estimate, maxfev = 10000)
# least squares with constraints
angle_2 = optimize.fmin_slsqp(sumf_2, angle_estimate, bounds=[(-math.pi, math.pi),(-math.pi, math.pi),(-math.pi, math.pi)], iprint=0)
score = calc_R(angle_2[0],angle_2[1],angle_2[2])
score_v = 0.0
for s in score:
score_v += s*s
return [score_v, PyKDL.Frame(PyKDL.Rotation.EulerZYX(angle_2[0],angle_2[1],angle_2[2]))]
best_score = 1000000.0
best_R_7_M = PyKDL.Frame()
for i in range(0, 10):
score, R_7_M = estOrientation()
if score < best_score:
best_score = score
best_R_7_M = copy.deepcopy(R_7_M)
def estPos(R_7_M_est):
rot_mx = copy.deepcopy(R_7_M_est.M)
def calc_R(px, py, pz):
R_7_M = PyKDL.Frame(rot_mx, PyKDL.Vector(px, py, pz))
ret = []
for idx in range(0,len(T_7bp_7i)):
diff = PyKDL.diff( T_7bp_7i[idx] * R_7_M, R_7_M * T_Mbp_Mi[idx] )
ret.append( diff.vel.Norm() * weights_pos[idx] )
return ret
def f_2(c):
""" calculate the algebraic distance between each contact point and jar surface pt """
Di = calc_R(*c)
return Di
pos_estimate = 0.0, 0.0, 0.0
pos_2, ier = optimize.leastsq(f_2, pos_estimate, maxfev = 10000)
score = calc_R(pos_2[0],pos_2[1],pos_2[2])
score_v = 0.0
for s in score:
score_v += s*s
return [score_v, PyKDL.Frame(rot_mx, PyKDL.Vector(pos_2[0], pos_2[1], pos_2[2]))]
best_score = 1000000.0
best_T_7_M = PyKDL.Frame()
for i in range(0, 10):
score, T_7_M = estPos(best_R_7_M)
if score < best_score:
best_score = score
best_T_7_M = copy.deepcopy(T_7_M)
return [best_score, best_T_7_M]
def meanOrientation(T, weights=None):
R = []
for t in T:
R.append( PyKDL.Frame(copy.deepcopy(t.M)) )
if weights == None:
wg = list( np.ones(len(T)) )
else:
wg = weights
wg_sum = sum(weights)
def calc_R(rx, ry, rz):
R_mean = PyKDL.Frame(PyKDL.Rotation.EulerZYX(rx, ry, rz))
diff = []
for r in R:
diff.append(PyKDL.diff( R_mean, r ))
ret = []
for idx in range(0, len(diff)):
rot_err = diff[idx].rot.Norm()
ret.append(rot_err * wg[idx] / wg_sum)
return ret
def f_2(c):
""" calculate the algebraic distance between each contact point and jar surface pt """
Di = calc_R(*c)
return Di
def sumf_2(p):
return math.fsum(np.array(f_2(p))**2)
angle_estimate = R[0].M.GetEulerZYX()
# angle_2, ier = optimize.leastsq(f_2, angle_estimate, maxfev = 10000)
# least squares with constraints
angle_2 = optimize.fmin_slsqp(sumf_2, angle_estimate, bounds=[(-math.pi, math.pi),(-math.pi, math.pi),(-math.pi, math.pi)], iprint=0)
score = calc_R(angle_2[0],angle_2[1],angle_2[2])
score_v = 0.0
for s in score:
score_v += s*s
return [score_v, PyKDL.Frame(PyKDL.Rotation.EulerZYX(angle_2[0],angle_2[1],angle_2[2]))]
def meanPosition(T, weights=None):
if weights == None:
wg = list( np.ones(len(T)) )
else:
wg = weights
wg_sum = sum(weights)
mean_p = PyKDL.Vector()
for idx in range(0, len(T)):
mean_p += T[idx].p * wg[idx] / wg_sum
return mean_p
if __name__ == "__main__":
a = []
for arg in sys.argv:
a.append(arg)
rospy.init_node('head_position', anonymous=True)
tf_listener = tf.TransformListener()
rospy.sleep(2.0)
print "waiting for markers..."
T_C_M = []
T_T2_7 = []
T_C_M_stable = PyKDL.Frame()
T_T2_7_stable = PyKDL.Frame()
stable_t = 0
while True:
rospy.sleep(0.1)
try:
pose = tf_listener.lookupTransform('camera', 'ar_marker_'+str(right_makrer_id), rospy.Time(0))
T_C_M_current = pm.fromTf(pose)
pose = tf_listener.lookupTransform('head_tilt_link', 'right_arm_7_link', rospy.Time(0))
T_T2_7_current = pm.fromTf(pose)
except:
continue
d1 = PyKDL.diff(T_C_M_stable, T_C_M_current)
d2 = PyKDL.diff(T_T2_7_stable, T_T2_7_current)
score = d2.vel.Norm() + d2.rot.Norm()
if score > 0.002:
stable_t = 0
T_C_M_stable = copy.deepcopy(T_C_M_current)
T_T2_7_stable = copy.deepcopy(T_T2_7_current)
else:
stable_t += 1
add = True
if stable_t > 10:
for t in T_T2_7:
d = PyKDL.diff(T_T2_7_current, t)
if d.rot.Norm() < 0.1:
add = False
break
if add:
z_limit = 0.6
v = T_C_M_current * PyKDL.Vector(0,0,1) - T_C_M_current * PyKDL.Vector()
if v.z() > -z_limit:
print "the marker angle is too big"
else:
print "added"
T_C_M.append(copy.deepcopy(T_C_M_current))
T_T2_7.append(copy.deepcopy(T_T2_7_current))
if len(T_C_M) > 5 or rospy.is_shutdown():
break
score,T_7_M = locateMarker(T_T2_7, T_C_M)
q = T_7_M.M.GetQuaternion()
print "PyKDL.Frame(PyKDL.Rotation.Quaternion(%s,%s,%s,%s), PyKDL.Vector(%s,%s,%s))"%(q[0], q[1], q[2], q[3], T_7_M.p.x(), T_7_M.p.y(), T_7_M.p.z())
print "score: %s"%(score)
print T_7_M
weights_ori = []
weights_pos = []
z_limit = 0.6
for idx in range(0, len(T_C_M)):
v = T_C_M[idx] * PyKDL.Vector(0,0,1) - T_C_M[idx] * PyKDL.Vector()
if v.z() > -z_limit:
weights_ori.append(0.0)
weights_pos.append(0.0)
continue
# v.z() is in range (-1.0, -z_limit)
weight = ((-v.z()) - z_limit)/(1.0-z_limit)
if weight > 1.0 or weight < 0.0:
print "error: weight==%s"%(weight)
weights_ori.append(1.0-weight)
weights_pos.append(weight)
T_T2_C = []
for i in range(0, len(T_C_M)):
T_T2_C.append( T_T2_7[i] * T_7_M * T_C_M[i].Inverse() )
mean_p = meanPosition(T_T2_C, weights_pos)
score,mean_R = meanOrientation(T_T2_C, weights_ori)
print "mean rotation score: %s"%(score)
br = tf.TransformBroadcaster()
rospy.sleep(2.0)
T_T2_C_est = PyKDL.Frame(copy.deepcopy(mean_R.M), mean_p)
q = T_T2_C_est.M.GetQuaternion()
print [T_T2_C_est.p.x(), T_T2_C_est.p.y(), T_T2_C_est.p.z()]
print [q[0], q[1], q[2], q[3]]
while not rospy.is_shutdown():
br.sendTransform([T_T2_C_est.p.x(), T_T2_C_est.p.y(), T_T2_C_est.p.z()], [q[0], q[1], q[2], q[3]], rospy.Time.now(), "camera", "head_tilt_link")
rospy.sleep(0.1)
|
{"hexsha": "dbe1659fd16f8fceee1cf792845e66a0e5109f13", "size": 11301, "ext": "py", "lang": "Python", "max_stars_repo_path": "common/locate_camera.py", "max_stars_repo_name": "RCPRG-ros-pkg/control_subsystem", "max_stars_repo_head_hexsha": "fd0b384b9027b43bb8bce3716cbbf6f9b3369d63", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "common/locate_camera.py", "max_issues_repo_name": "RCPRG-ros-pkg/control_subsystem", "max_issues_repo_head_hexsha": "fd0b384b9027b43bb8bce3716cbbf6f9b3369d63", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "common/locate_camera.py", "max_forks_repo_name": "RCPRG-ros-pkg/control_subsystem", "max_forks_repo_head_hexsha": "fd0b384b9027b43bb8bce3716cbbf6f9b3369d63", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0505050505, "max_line_length": 152, "alphanum_fraction": 0.5834881869, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3199}
|
from scipy.misc import comb
from math import e
n = 10
r = 0.03
z = 1000
w = 2376.07
R = 0.055
RawPm = open("./Raw/Pm.txt")
RawPw = open("./Raw/Pw.txt")
ResultC = open("./Result/ResultCommittee.txt", "w")
ResultI = open("./Result/ResultInsurer.txt", "w")
dataRow = int(input("Input the total amount data you want to calculate: "))
for k in range(0,dataRow,1):
Pm = float(RawPm.readline())
Pw = float(RawPw.readline())
C = float((25000*e**(n*r))/(r*(e**(n*r)-1))+(25000*e**(26*r))/(r*(e**(26*r)-1))+z+w)
sumM = 0.0
sumW = 0.0
for i in range(0,n+1,1):
M = float(comb(n, i)*(1-Pm)**(14-i)*(Pm**i)*25000*i)
sumM += M
for j in range(0,n+1,1):
W = float(comb(n, i)*(1-Pw)**(26-i)*(Pw**i)*25000*i)
sumW += W
A = sumM + sumW
if C*(R+1.0) > A > C:
ResultC.write("Deal\n")
ResultI.write("Deal\n")
if C > A:
ResultC.write("Should not\n")
ResultI.write("Should\n")
if A > C*(R+1.0):
ResultC.write("Should\n")
ResultI.write("Should not\n")
print("Recorded!")
ResultC.close()
ResultI.close()
RawPm.close()
RawPw.close()
print("Process finished!")
|
{"hexsha": "b965b5e02a9ba809fc3177e5d87197b82483281e", "size": 1164, "ext": "py", "lang": "Python", "max_stars_repo_path": "Previous Contests/IMMC2016-master/Q4/Method1/Q4.py", "max_stars_repo_name": "stOOrz-Mathematical-Modelling-Group/IMMC_2022_Autumn", "max_stars_repo_head_hexsha": "4430eec4940055e434d8c6183332fc55601937d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Previous Contests/IMMC2016-master/Q4/Method1/Q4.py", "max_issues_repo_name": "stOOrz-Mathematical-Modelling-Group/IMMC_2022_Autumn", "max_issues_repo_head_hexsha": "4430eec4940055e434d8c6183332fc55601937d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Previous Contests/IMMC2016-master/Q4/Method1/Q4.py", "max_forks_repo_name": "stOOrz-Mathematical-Modelling-Group/IMMC_2022_Autumn", "max_forks_repo_head_hexsha": "4430eec4940055e434d8c6183332fc55601937d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8666666667, "max_line_length": 88, "alphanum_fraction": 0.5541237113, "include": true, "reason": "from scipy", "num_tokens": 417}
|
import numpy as np
import pandas as pd
import random
dataset = pd.read_csv("datas.csv")
label = pd.read_csv("labels.csv")
def choose_diff(dataset):
add_labels = dataset.apply(lambda x: x.sum(), axis=1).values
diff = []
count = 0
val_length = len(dataset.columns.values)
for item in add_labels:
if abs(item) > 85:
count += 1
diff.append(item)
print count
choose_diff(dataset)
print 11/10
# def pred_label(classifiers):
# # print "c",len(self.classifiers.values)
# if len(classifiers.values) == 0:
# print "initial classifiers"
# return
# else:
# add_labels = classifiers.apply(lambda x: x.sum(), axis=1).values
# pred_labels = []
# for item1 in add_labels:
# if item1 > 0:
# pred_labels.append(1)
# else:
# pred_labels.append(-1)
# # print "pred_labels",self.pred_labels
# return pred_labels
# def caculate_acc(pred_value,label):
# diff_labels = pred_value - label.values.T[0]
# count = 0
# for item2 in diff_labels:
# if item2 == 0:
# count+=1
# else:
# count = count
# accuracy = float(count)/len(label.values.T[0])
# return accuracy
# #83.3%
# # validate = dataset.iloc[:,[ 5,20,30,72,9,91,94,21,3,97,61,67,79,4,18,62,54,57,95,80,70,68,40,50,60,52,82,25,71,19,32,33,55,59,46,12]]
# arr = [1,58,68,81,29,40,51,92,93,57,39,6,87,67,53,90,42,27,52,54,44,45,47,76,88,63,56,66,86,14,98,80,95,2,5,41,64,72,75,83,89,70,22,18,9,60,13,85,33,24,77,84,61,4,73,26,50,65]
# validate = dataset.iloc[:,[1,58,68,81,29,40,51,92,93,57,39,6,87,67,53,90,42,27,52,54,44,45,47,76,88,63,56,66,86,14,98,80,95,2,5,41,64,72,75,83,89,70,22,18,9,60,13,85,33,24,77,84,61,4,73,26,50,65]]
# #83.42%
# # arr = [1,25,68,2,18,7,9,97,80,14,30,4,73,27,38,16,33,95,94,65,44,63,47,83,70,96,55,12,69,26,53,81,36,13,75]
#83.47%
# arr = [63,22,91,20,90,52,26,4,61,70,86,17,78,49,97,96,67,39,11,64,71,33,3,6,76,14,73,66,80,85,84,31,19,47,82,45,27,41,74,1,25,32,8,98,59,99,5,56,30,18,9]
#83.52% 67
# arr = [68,58,81,9,3,15,46,61,40,24,86,8,93,63,50,74,14,94,5,87,83,79,65,17,98,53,13,67,37,54,89,29,42,82,60,28,6,95,56,18,33,71,75,35,90,34,12,76,20,99,26,47,62,78,85,69,73,11,92,10,55,49,91,57,64,2,52]
#0.8342 61
# arr = [17,9,31,5,69,21,23,74,61,76,35,12,90,59,26,15,79,41,8,24,68,80,20,98,86,30,85,22,18,56,2,45,83,52,62,44,73,40,19,64,81,87,14,84,47,34,67,16,39,7,60,42,3,88,49,97,78,32,91,51,33]
#0.8351 41
# arr = [60,48,83,98,6,64,20,9,40,21,26,44,69,76,35,66,52,29,68,82,1,87,81,4,18,70,90,27,31,47,28,72,22,2,19,42,45,3,58,91,30]
#0.8331 55
# arr = [9,89,5,92,28,11,68,21,88,70,33,63,64,83,18,85,29,52,93,40,12,84,31,45,22,57,38,30,8,37,97,61,65,2,35,96,81,67,98,82,55,50,34,1,20,69,19,91,99,51,49,43,71,36,23]
#0.8339 43
# arr = [33,80,55,47,59,73,96,45,42,91,20,32,53,18,84,6,4,41,71,70,81,46,36,9,19,79,67,57,74,58,65,93,40,87,35,34,92,3,8,76,66,86,37]
#0.8347 33
# arr = [62,87,76,18,86,24,80,98,53,38,12,16,52,81,63,34,19,65,89,1,35,33,41,36,20,37,70,60,39,3,99,40,5]
#0.8355 39
# arr = [55,69,23,79,47,42,76,96,32,52,59,75,73,16,81,85,80,20,33,87,37,82,13,83,4,70,41,5,98,74,60,45,93,28,3,97,56,91,18]
# result =[x-1 for x in arr]
# # print result
# validate = dataset.iloc[:,result]
# # print validate
# pred_labels_ = pred_label(validate)
# acc = caculate_acc(pred_labels_,label)
# print acc,len(arr)
# classifiers = pd.DataFrame()
# # s = np.array([np.random.randint(0,98),np.random.randint(0,98)])
# # s = [0,98]
# s = random.sample(range(99),2)
# classifiers[1] = dataset.iloc[:,1]
# print "classifiers1",classifiers
# print s
# # a = dataset.iloc[:,s]
# d = np.array([1])
# x = 2
# nd = np.hstack((d,)*x)
# classifiers[s+nd] = dataset.iloc[:,s]
# # print a
# # classifiers[s+nd] = dataset.iloc[:,s]
# print "classifiers2",classifiers
# classifiers.drop(s+nd,axis=1,inplace=True)
# # sumr = s+nd
# # classifiers.pop(sumr[0])
# # classifiers.pop(sumr[1])
# print "classifiers3",classifiers
# print "env classifier",classifiers.columns.values
# #print dataset.iloc[:,[93]]#have 0 value
|
{"hexsha": "e0ebb1071740fe88aea1ec7ee5bb40ed17a556ba", "size": 3932, "ext": "py", "lang": "Python", "max_stars_repo_path": "DQN/true_acc.py", "max_stars_repo_name": "DaniellaAngel/MachineLearning", "max_stars_repo_head_hexsha": "9497278a85e0e097092e82b937e0d69fadd138f5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "DQN/true_acc.py", "max_issues_repo_name": "DaniellaAngel/MachineLearning", "max_issues_repo_head_hexsha": "9497278a85e0e097092e82b937e0d69fadd138f5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DQN/true_acc.py", "max_forks_repo_name": "DaniellaAngel/MachineLearning", "max_forks_repo_head_hexsha": "9497278a85e0e097092e82b937e0d69fadd138f5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.5490196078, "max_line_length": 204, "alphanum_fraction": 0.6480162767, "include": true, "reason": "import numpy", "num_tokens": 1890}
|
import early_stopping_analysis
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
dataset_orders = ['mrpc', 'rte', 'cola', 'sst']
def main():
unformatted_data = early_stopping_analysis.main()
data = format_data(unformatted_data)
#plt.style.use('ggplot')
plt.rcParams.update({'font.size': 15})
fig = plt.figure(figsize=(6,24))
counter = 0
for dataset in dataset_orders:
counter += 1
ax1 = fig.add_subplot(4,1,counter)
make_one_plot(data[dataset], dataset, ax1)
save_figure()
def format_data(unformatted_data):
data = {}
for dataset in unformatted_data:
data[dataset] = {}
data[dataset]['num_started'] = []
data[dataset]['num_fully_train'] = []
data[dataset]['percent_data'] = []
for budget in unformatted_data[dataset]:
data[dataset]['num_started'].append(unformatted_data[dataset][budget]['num_started'])
data[dataset]['num_fully_train'].append(unformatted_data[dataset][budget]['num_fully_train'])
data[dataset]['percent_data'].append(unformatted_data[dataset][budget]['percent_data'])
return data
def make_one_plot(data, dataset, ax1):
line1, = ax1.plot(range(1,31), data['num_started'], marker='s', fillstyle='none', color='#1f77b4')
line2, = ax1.plot(range(1,31), data['num_fully_train'], marker='o', fillstyle='none', color='#ff7f0e')
ax2 = ax1.twinx()
line3, = ax2.plot(range(1,31), data['percent_data'], marker='x', fillstyle='none', color='#2ca02c')
align_y_axes = True
if align_y_axes:
# to set the y-axes to have the same number of ticks, so we can use a grid
#ax1.set_yticks(np.linspace(ax1.get_ybound()[0], ax1.get_ybound()[1], 6))
#ax2.set_yticks(np.linspace(ax2.get_ybound()[0], ax2.get_ybound()[1], 6))
ax2_y_ticks = np.linspace(0, 1, len(ax1.get_yticks())-1).tolist()
ax2_y_ticks = [-ax2_y_ticks[1]] + ax2_y_ticks
ax2.set_yticks(ax2_y_ticks)
# to get the ylim
lower_proportion = ax1.get_ylim()[0] / ax1.get_yticks()[0]
ax2_lower_y_lim = ax2_y_ticks[0]*lower_proportion
upper_dist = ax1.get_yticks()[-1] - ax1.get_yticks()[-2]
upper_proportion = (ax1.get_ylim()[1] - ax1.get_yticks()[-2]) / upper_dist
ax2_step_dist = ax2_y_ticks[-1] - ax2_y_ticks[-2]
ax2_upper_y_lim = ax2.get_yticks()[-2] + ax2_step_dist * upper_proportion
ax2.set_ylim((ax2_lower_y_lim, ax2_upper_y_lim))
ax2_ylabels = [str(round(percent * 100)) + "%" for percent in ax2_y_ticks]
#ax2_ylabels = ax2_ylabels[1,len(ax2_ylabels)]]
ax2.set_yticklabels(ax2_ylabels)
upper_ylim_1 = True
if upper_ylim_1:
ax1.set_ylim((ax1.get_ylim()[0], ax1.get_yticks()[-1]))
ax2.set_ylim((ax2.get_ylim()[0], ax2.get_yticks()[-1]))
else:
ax2.set_ylim((0,1))
if dataset == 'sst':
ax1.set_xlabel('Budget sufficient to train X models on all data')
ax1.set_ylabel('Number of experiments')
ax2.set_ylabel('Percent of data trained on\nbefore early stopping')
ax1.set_title("Optimal early stopping for {}".format(get_dataset_name(dataset)))
#ax1.set_zorder(ax2.get_zorder()+1)
#ax1.patch.set_visible(False)
#ax1.yaxis.grid(color='gray', linestyle='dashed')
ax2.grid(None)
#ax2.set_yticks(np.linspace(ax2.get_yticks()[0], ax2.get_yticks()[-1], len(ax1.get_yticks())))
if dataset == 'mrpc':
ax2.legend((line1, line2, line3), ("Number started", "Number stopped early",
"% of data before stopping"), loc=2)
def save_figure():
dirname = "/home/jessedd/data/results/bert_on_stilts/plot_drafts/"
filename = "numstarted_numfinished_percentdata.pdf"
print("saving to {}".format(dirname + filename))
plt.savefig(dirname + filename, bbox_inches='tight')
def get_dataset_name(dataset):
correct_names = {'cola': 'CoLA', 'mrpc': 'MRPC', 'sst': 'SST', 'rte': 'RTE'}
return correct_names[dataset]
if __name__ == "__main__":
main()
|
{"hexsha": "a1c710173bb4c4ee6f2a3805fa3bf72d7fd6ba93", "size": 4224, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/early_stopping_plot.py", "max_stars_repo_name": "dodgejesse/bert_on_stilts", "max_stars_repo_head_hexsha": "63884f37f519fd1d6eafde43ba213a25a5575a82", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis/early_stopping_plot.py", "max_issues_repo_name": "dodgejesse/bert_on_stilts", "max_issues_repo_head_hexsha": "63884f37f519fd1d6eafde43ba213a25a5575a82", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis/early_stopping_plot.py", "max_forks_repo_name": "dodgejesse/bert_on_stilts", "max_forks_repo_head_hexsha": "63884f37f519fd1d6eafde43ba213a25a5575a82", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6229508197, "max_line_length": 106, "alphanum_fraction": 0.631155303, "include": true, "reason": "import numpy", "num_tokens": 1169}
|
[STATEMENT]
lemma zero_lt_num [simp]: "0 < (numeral n :: _ :: {canonically_ordered_monoid_add, semiring_char_0})"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (0::'a) < numeral n
[PROOF STEP]
by (metis not_gr_zero zero_neq_numeral)
|
{"llama_tokens": 104, "file": "Probabilistic_While_Bernoulli", "length": 1}
|
#version 120
varying highp vec4 color;
void main(void)
{
gl_FragColor = color;
}
|
{"hexsha": "f1bf1fadfa69c963f80a798e213cf8f0e2e51f97", "size": 87, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ext/libigl/external/cgal/src/CGAL_Project/demo/Polyhedron/resources/shader_no_light_no_selection.f", "max_stars_repo_name": "liminchen/OptCuts", "max_stars_repo_head_hexsha": "cb85b06ece3a6d1279863e26b5fd17a5abb0834d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 187, "max_stars_repo_stars_event_min_datetime": "2019-01-23T04:07:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T03:44:58.000Z", "max_issues_repo_path": "ext/libigl/external/cgal/src/CGAL_Project/demo/Polyhedron/resources/shader_no_light_no_selection.f", "max_issues_repo_name": "xiaoxie5002/OptCuts", "max_issues_repo_head_hexsha": "1f4168fc867f47face85fcfa3a572be98232786f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2019-03-22T13:27:38.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-18T13:23:23.000Z", "max_forks_repo_path": "ext/libigl/external/cgal/src/CGAL_Project/demo/Polyhedron/resources/shader_no_light_no_selection.f", "max_forks_repo_name": "xiaoxie5002/OptCuts", "max_forks_repo_head_hexsha": "1f4168fc867f47face85fcfa3a572be98232786f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 34, "max_forks_repo_forks_event_min_datetime": "2019-02-13T01:11:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T03:29:40.000Z", "avg_line_length": 12.4285714286, "max_line_length": 25, "alphanum_fraction": 0.6896551724, "num_tokens": 30}
|
#%%
#%load_ext autoreload
#%autoreload 2
import os
import sys
import numpy as np
import scipy
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import dash
import dash_core_components as dcc
import dash_html_components as html
pd.set_option('display.max_rows', 800)
pd.set_option('display.max_columns', 800)
pd.set_option('display.expand_frame_repr', False)
if sys.platform == 'win32':
home = 'D:\\'
else:
home=os.path.expanduser('~')
from functools import reduce
import seaborn as sns
from copy import deepcopy
from scipy.interpolate import griddata as gd
sys.path.append(os.path.join(home, 'repo', 'research_current', 'VisSoft'))
plt.style.use(os.path.join(home, 'repo', 'mplstyles', 'mypaper.mplstyle'))
#plt.style.use(os.path.join(home, 'repo', 'mplstyles', 'mypresentation.mplstyle'))
#sys.path.append(os.path.join(home, 'repo', 'WaterProject', 'NVT_W_method'))
#sys.path.insert(0, os.path.join(home, "Software", "flat_histogram_analysis"))
#from utils_movie import read_framework_file, read_movie_file, write_to_point3D #, plot_molecule
from ads_vis.plotly_utils_test import plot_molecule, compute_RDF, make_cell_parameter_matrix, make_3d_histograms
from ads_vis.network_utils import make_NNN, plot_network_traces
import time
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import re
import multiprocessing
import copy
import plotly as px
np.random.seed(1)
df_data = pd.read_excel(os.path.join(home, "repo", "WaterProject", "NVT_W_method", "waterData_combined.xlsx"), \
skiprows=2, header=0, usecols=[0, 1, 3])
for name_root in df_data["Name"].values:
name = f"{name_root}_fullCoRE"
print(name)
try:
t1 = time.time()
#frame_file_name = f"{home}/repo/WaterProject/NVT_W_method/snapshots/{name}_frame.pdb"
frame_file_name = f"{home}/Downloads/movie_files_processed/{name_root}_frame.pdb"
with open(frame_file_name, "r") as pdb_frame:
properties_array = pdb_frame.readlines()[1].split()[1:]
print(properties_array)
[A, B, C, alpha_deg, beta_deg, gamma_deg] = [float(parameter) for parameter in properties_array]
print([A, B, C, alpha_deg, beta_deg, gamma_deg])
frame_properties_dict = {"A": A, "B": B, "C": C, "alpha_deg": alpha_deg, \
"beta_deg": beta_deg, "gamma_deg": gamma_deg}
#Properties of the frame. they need to be defined somewhere in the code.
frame_cell_parameter_matrix = make_cell_parameter_matrix(frame_properties_dict)
df_frame = pd.read_table(frame_file_name, delim_whitespace=True, skiprows=2,
usecols=[1, 2, 4, 5, 6], names=["AtomNum", "AtomType", "x", "y", "z"])
df_frame = df_frame.query("x>3 & x<18 & y>0 & y<13")
t2 = time.time()
#plotting the frame molecule.
fig = plot_molecule(name, df_frame)
print(f"T2: {t2 - t1}")
movie_file_name = f"{home}/Downloads/movie_files_processed/{name_root}_T298_1_918_movie_processed.txt"
O_pos_df = pd.read_table(movie_file_name, \
delim_whitespace=True, skiprows=1, usecols=[4,5,6], names=["Ow_x", "Ow_y", "Ow_z"]) #Read directly from a bash output.
O_pos_array = O_pos_df[["Ow_x", "Ow_y", "Ow_z"]].values
#3D histogram
t3d_a = time.time()
hist , mid_point_mesh = make_3d_histograms(O_pos_array, A, B, C)
fig.update_layout(width=14 * 96 * 0.5, height=12 * 96 * 0.5, autosize=False)
t3d_b = time.time()
print(f"3D histogram time: {t3d_b-t3d_a:.3f}")
t_net_a = time.time()
#Create an array of low free energies and compute the graph.
mesh_array = np.column_stack(( mid_point_mesh[0].flatten(), mid_point_mesh[1].flatten(),\
mid_point_mesh[2].flatten(), -np.log(hist.flatten()) ))
mesh_array_df = pd.DataFrame(mesh_array, columns=["x", "y", "z", "FreeEnergy"])
energy_cutoff=10
low_energy_mask = mesh_array_df["FreeEnergy"] < energy_cutoff
mesh_array_df = mesh_array_df[low_energy_mask]
#Here, we will implement the nearest neighbor network. Later, this function
#can be transferred to a different file for network utils.
sample_size=min(mesh_array_df.shape[0], 1000)
NNN = make_NNN(mesh_array_df, frame_cell_parameter_matrix, size=sample_size)
nodes , edges, node_info = NNN
print(f"Edges: {len(edges)}, Vertices: {node_info.shape[0]}, Beta index: {len(edges) /node_info.shape[0] : .2f}")
with open("beta_index_vals_fullCoRE.txt", "a+") as outfile:
outfile.write(f"{name} Edges: {len(edges)}, Vertices: {node_info.shape[0]},\
Beta index: {len(edges) /node_info.shape[0] : .2f}\n")
t_net_b = time.time()
print(f"Network computation time: {t_net_b - t_net_a}")
except Exception as e:
print(f"{name} {e}")
continue
"""
trdf_a = time.time()
#RDFs
#gr, hist_bins = compute_RDF(copy.deepcopy(O_pos_array[1::50, :]), frame_properties_dict)
gr, hist_bins = compute_RDF(mesh_array_df[["x", "y", "z"]].values, frame_properties_dict, \
dr=1, sample_size=min(mesh_array_df.shape[0], 1000))
trdf_b = time.time()
print(f"RDF time: {trdf_b-trdf_a:.3f}")
##########
#Here, we also add the ability to visualize the energy map in the structure. For instance, we
#have externally computed the interaction energy of the water molecule with the framework which we will now plot
#visualize.
energy_file_name = f"{home}/repo/WaterProject/NVT_W_method/free_energy_data/{name}_DDEC-o_298_energy-comb.txt"
ener_df = pd.read_csv(energy_file_name, header=0, usecols=[1,2,3,4,6,7,9,10]) #Read directly from a bash output.
#remove duplicates and only select those without energy drifts.
ener_df["LastWarning"] = ener_df["LastWarning"].replace(np.nan, "Normal", regex=True)
ener_df["LastWarning"] = ener_df["LastWarning"].astype("str").astype("category")
ener_df = ener_df[ener_df["LastWarning"]=="Normal"]
ener_df.drop_duplicates("Position_index", keep="last", inplace=True)
#filter for low energy points.
#ener_df = ener_df.query("Energy<=0")
#visualize the energy map.
x = ener_df["x"].values
y = ener_df["y"].values
z = ener_df["z"].values
v = ener_df["Energy"].values
_ , energy_mid_point_mesh = make_3d_histograms(ener_df[["x", "y", "z"]].values, A, B, C)
X, Y, Z = energy_mid_point_mesh
V = gd((x,y,z), v, (X.flatten(),Y.flatten(),Z.flatten()), method='nearest')
#Now, we will also add a network and create the points appropriately.
ener_mesh_array = np.column_stack((X.flatten(), Y.flatten(), Z.flatten(), V ))
ener_mesh_array_df = pd.DataFrame(ener_mesh_array, columns=["x", "y", "z", "Energy"])
#energy_cutoff=-5000
low_energy_mask = (ener_mesh_array_df["Energy"] - ener_mesh_array_df["Energy"].min()) < 2500
ener_mesh_array_df = ener_mesh_array_df[low_energy_mask]
sample_size=min(ener_mesh_array_df.shape[0], 1000)
NNN = make_NNN(ener_mesh_array_df, frame_cell_parameter_matrix,
size=sample_size)
ener_nodes , ener_edges, ener_node_info = NNN
#We will now plot these on the figure. This can also be made into a function and written as a utility.
print(f"Edges: {len(ener_edges)}, Vertices: {ener_node_info.shape[0]}, Beta index: {len(ener_edges) /ener_node_info.shape[0] : .2f}")
with open("ener_beta_index_vals_fullCoRE.txt", "a+") as outfile:
outfile.write(f"{name} Edges: {len(ener_edges)}, Vertices: {ener_node_info.shape[0]},\
Beta index: {len(ener_edges) / ener_node_info.shape[0] : .2f}\n")
"""
# %%
|
{"hexsha": "5d1cf7cdcc17699d90be0dae9b769dee1de5d360", "size": 7731, "ext": "py", "lang": "Python", "max_stars_repo_path": "compute_without_app.py", "max_stars_repo_name": "architdatar/AdsVis", "max_stars_repo_head_hexsha": "464f78211901530178683fda39795d6e121ca369", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "compute_without_app.py", "max_issues_repo_name": "architdatar/AdsVis", "max_issues_repo_head_hexsha": "464f78211901530178683fda39795d6e121ca369", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "compute_without_app.py", "max_forks_repo_name": "architdatar/AdsVis", "max_forks_repo_head_hexsha": "464f78211901530178683fda39795d6e121ca369", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.265625, "max_line_length": 139, "alphanum_fraction": 0.6803777002, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 2129}
|
import numpy as np
class Loss:
def __init__(self):
pass
def get_loss(self, x, y):
pass
class Softmax_cross_entropy_loss(Loss):
def __init__(self):
pass
def get_loss(self, x, y):
shifted_logits = x - np.max(x, axis=1, keepdims=True)
Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)
log_probs = shifted_logits - np.log(Z)
probs = np.exp(log_probs)
N = x.shape[0]
loss = -np.sum(log_probs[np.arange(N), y]) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
|
{"hexsha": "bf87101046fe36df9b550d9cd1b07b726d5cae82", "size": 612, "ext": "py", "lang": "Python", "max_stars_repo_path": "FrankNN/losses.py", "max_stars_repo_name": "fpreiswerk/FrankNN", "max_stars_repo_head_hexsha": "66441195acdd6af237f1d780975440477019dbbf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "FrankNN/losses.py", "max_issues_repo_name": "fpreiswerk/FrankNN", "max_issues_repo_head_hexsha": "66441195acdd6af237f1d780975440477019dbbf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "FrankNN/losses.py", "max_forks_repo_name": "fpreiswerk/FrankNN", "max_forks_repo_head_hexsha": "66441195acdd6af237f1d780975440477019dbbf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.1034482759, "max_line_length": 65, "alphanum_fraction": 0.5539215686, "include": true, "reason": "import numpy", "num_tokens": 170}
|
// Copyright John Maddock 2013.
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt
// or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifdef _MSC_VER
# define _SCL_SECURE_NO_WARNINGS
#endif
#include <boost/multiprecision/cpp_bin_float.hpp>
#ifdef TEST_MPFR
#include <boost/multiprecision/mpfr.hpp>
#endif
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_int.hpp>
#include "libs/multiprecision/test/test.hpp"
#include <iostream>
#include <iomanip>
template <class T>
T generate_random()
{
typedef int e_type;
static boost::random::mt19937 gen;
T val = gen();
T prev_val = -1;
while(val != prev_val)
{
val *= (gen.max)();
prev_val = val;
val += gen();
}
e_type e;
val = frexp(val, &e);
static boost::random::uniform_int_distribution<e_type> ui(-20, 20);
return ldexp(val, ui(gen));
}
using namespace boost::multiprecision;
#ifdef TEST_MPFR
typedef number<mpfr_float_backend<35> > good_type;
#else
typedef double good_type;
#endif
typedef number<cpp_bin_float<std::numeric_limits<good_type>::digits, digit_base_2>, et_off> test_type;
void test_special_cases()
{
test_type max_val = (std::numeric_limits<test_type>::max)();
test_type min_val = (std::numeric_limits<test_type>::min)();
test_type eps = std::numeric_limits<test_type>::epsilon();
test_type inf_val = (std::numeric_limits<test_type>::infinity)();
test_type nan_val = (std::numeric_limits<test_type>::quiet_NaN)();
test_type half = 0.5;
test_type one_point_5 = 1.5;
BOOST_CHECK((boost::math::isnormal)(max_val));
BOOST_CHECK((boost::math::isnormal)(-max_val));
BOOST_CHECK((boost::math::isnormal)(min_val));
BOOST_CHECK((boost::math::isnormal)(-min_val));
BOOST_CHECK((boost::math::isinf)(inf_val));
BOOST_CHECK((boost::math::isinf)(-inf_val));
BOOST_CHECK((boost::math::isnan)(nan_val));
BOOST_CHECK((boost::math::isnan)(-nan_val));
if(std::numeric_limits<test_type>::has_denorm)
min_val = std::numeric_limits<test_type>::denorm_min();
// Adding epsilon will increment 1.0:
BOOST_CHECK(test_type(1) + eps != test_type(1));
BOOST_CHECK(test_type(1) + eps / 2 == test_type(1));
// But it's not the smallest value that will do that:
test_type small = 1 + eps;
small = ldexp(small, -std::numeric_limits<test_type>::digits);
BOOST_CHECK(test_type(1) + small != test_type(1));
// And if we increment 1.0 first, then an even smaller
// addition will round up:
test_type one_next = test_type(1) + eps;
BOOST_CHECK(one_next + eps / 2 != one_next);
// Overflow:
BOOST_CHECK_EQUAL(max_val + max_val * eps, inf_val);
BOOST_CHECK_EQUAL(-max_val - max_val * eps, -inf_val);
BOOST_CHECK_EQUAL(max_val * 2, inf_val);
BOOST_CHECK_EQUAL(max_val * -2, -inf_val);
BOOST_CHECK_EQUAL(max_val / half, inf_val);
BOOST_CHECK_EQUAL(max_val / -half, -inf_val);
BOOST_CHECK_EQUAL(max_val / min_val, inf_val);
BOOST_CHECK_EQUAL(max_val / -min_val, -inf_val);
// Underflow:
BOOST_CHECK_EQUAL(min_val * 2 - one_point_5 * min_val, 0);
BOOST_CHECK_EQUAL(-min_val * 2 + one_point_5 * min_val, 0);
BOOST_CHECK_EQUAL(min_val / 2, 0);
BOOST_CHECK_EQUAL(min_val / max_val, 0);
BOOST_CHECK_EQUAL(min_val * half, 0);
BOOST_CHECK_EQUAL(min_val - min_val, 0);
BOOST_CHECK_EQUAL(max_val - max_val, 0);
BOOST_CHECK_EQUAL(-min_val + min_val, 0);
BOOST_CHECK_EQUAL(-max_val + max_val, 0);
// Things which should not over/underflow:
BOOST_CHECK_EQUAL((min_val * 2) / 2, min_val);
BOOST_CHECK_EQUAL((max_val / 2) * 2, max_val);
BOOST_CHECK_GE((min_val * 2.0000001) / 1.9999999999999999, min_val);
BOOST_CHECK_LE((max_val / 2.0000001) * 1.9999999999999999, max_val);
BOOST_CHECK_EQUAL(min_val * 2 - min_val, min_val);
BOOST_CHECK_EQUAL(max_val / 2 + max_val / 2, max_val);
// Things involving zero:
BOOST_CHECK_EQUAL(max_val + 0, max_val);
BOOST_CHECK_EQUAL(max_val - 0, max_val);
BOOST_CHECK_EQUAL(0 + max_val, max_val);
BOOST_CHECK_EQUAL(0 - max_val, -max_val);
BOOST_CHECK_EQUAL(max_val * 0, 0);
BOOST_CHECK_EQUAL(0 * max_val, 0);
BOOST_CHECK_EQUAL(max_val / 0, inf_val);
BOOST_CHECK_EQUAL(0 / max_val, 0);
BOOST_CHECK_EQUAL(-max_val / 0, -inf_val);
BOOST_CHECK_EQUAL(0 / -max_val, 0);
// Things involving infinity:
BOOST_CHECK_EQUAL(inf_val + 2, inf_val);
BOOST_CHECK_EQUAL(inf_val - 2, inf_val);
BOOST_CHECK_EQUAL(inf_val + -2, inf_val);
BOOST_CHECK_EQUAL(inf_val - -2, inf_val);
BOOST_CHECK_EQUAL(-inf_val + 2, -inf_val);
BOOST_CHECK_EQUAL(-inf_val - 2, -inf_val);
BOOST_CHECK_EQUAL(-inf_val + -2, -inf_val);
BOOST_CHECK_EQUAL(-inf_val - -2, -inf_val);
BOOST_CHECK_EQUAL(2 + inf_val, inf_val);
BOOST_CHECK_EQUAL(2 - inf_val, -inf_val);
BOOST_CHECK_EQUAL(-2 + inf_val, inf_val);
BOOST_CHECK_EQUAL(-2 - inf_val, -inf_val);
BOOST_CHECK_EQUAL(2 + (-inf_val), -inf_val);
BOOST_CHECK_EQUAL(2 - (-inf_val), inf_val);
BOOST_CHECK_EQUAL(-2 + (-inf_val), -inf_val);
BOOST_CHECK_EQUAL(-2 - (-inf_val), inf_val);
BOOST_CHECK_EQUAL(sqrt(inf_val), inf_val);
BOOST_CHECK(boost::math::isnan(sqrt(-inf_val)));
BOOST_CHECK_EQUAL(inf_val + test_type(2), inf_val);
BOOST_CHECK_EQUAL(inf_val - test_type(2), inf_val);
BOOST_CHECK_EQUAL(inf_val + test_type(-2), inf_val);
BOOST_CHECK_EQUAL(inf_val - test_type(-2), inf_val);
BOOST_CHECK_EQUAL(-inf_val + test_type(2), -inf_val);
BOOST_CHECK_EQUAL(-inf_val - test_type(2), -inf_val);
BOOST_CHECK_EQUAL(-inf_val + test_type(-2), -inf_val);
BOOST_CHECK_EQUAL(-inf_val - test_type(-2), -inf_val);
BOOST_CHECK_EQUAL(test_type(2) + inf_val, inf_val);
BOOST_CHECK_EQUAL(test_type(2) - inf_val, -inf_val);
BOOST_CHECK_EQUAL(test_type(-2) + inf_val, inf_val);
BOOST_CHECK_EQUAL(test_type(-2) - inf_val, -inf_val);
BOOST_CHECK_EQUAL(test_type(2) + (-inf_val), -inf_val);
BOOST_CHECK_EQUAL(test_type(2) - (-inf_val), inf_val);
BOOST_CHECK_EQUAL(test_type(-2) + (-inf_val), -inf_val);
BOOST_CHECK_EQUAL(test_type(-2) - (-inf_val), inf_val);
BOOST_CHECK((boost::math::isnan)(inf_val - inf_val));
BOOST_CHECK_EQUAL(inf_val * 2, inf_val);
BOOST_CHECK_EQUAL(-inf_val * 2, -inf_val);
BOOST_CHECK_EQUAL(inf_val * -2, -inf_val);
BOOST_CHECK_EQUAL(-inf_val * -2, inf_val);
BOOST_CHECK_EQUAL(inf_val * test_type(-2), -inf_val);
BOOST_CHECK_EQUAL(-inf_val * test_type(-2), inf_val);
BOOST_CHECK((boost::math::isnan)(inf_val * 0));
BOOST_CHECK((boost::math::isnan)(-inf_val * 0));
BOOST_CHECK_EQUAL(inf_val / 2, inf_val);
BOOST_CHECK_EQUAL(-inf_val / 2, -inf_val);
BOOST_CHECK_EQUAL(inf_val / -2, -inf_val);
BOOST_CHECK_EQUAL(-inf_val / -2, inf_val);
BOOST_CHECK_EQUAL(inf_val / test_type(-2), -inf_val);
BOOST_CHECK_EQUAL(-inf_val / test_type(-2), inf_val);
BOOST_CHECK_EQUAL(inf_val / 0, inf_val);
BOOST_CHECK_EQUAL(-inf_val / 0, -inf_val);
BOOST_CHECK((boost::math::isnan)(inf_val / inf_val));
BOOST_CHECK((boost::math::isnan)(-inf_val / inf_val));
// Things involving nan:
BOOST_CHECK((boost::math::isnan)(nan_val + 2));
BOOST_CHECK((boost::math::isnan)(nan_val - 2));
BOOST_CHECK((boost::math::isnan)(nan_val + 0));
BOOST_CHECK((boost::math::isnan)(nan_val - 0));
BOOST_CHECK((boost::math::isnan)(nan_val + inf_val));
BOOST_CHECK((boost::math::isnan)(nan_val - inf_val));
BOOST_CHECK((boost::math::isnan)(nan_val + nan_val));
BOOST_CHECK((boost::math::isnan)(nan_val - nan_val));
BOOST_CHECK((boost::math::isnan)(2 + nan_val));
BOOST_CHECK((boost::math::isnan)(2 - nan_val));
BOOST_CHECK((boost::math::isnan)(0 - nan_val));
BOOST_CHECK((boost::math::isnan)(0 - nan_val));
BOOST_CHECK((boost::math::isnan)(inf_val + nan_val));
BOOST_CHECK((boost::math::isnan)(inf_val - nan_val));
BOOST_CHECK((boost::math::isnan)(nan_val * 2));
BOOST_CHECK((boost::math::isnan)(nan_val / 2));
BOOST_CHECK((boost::math::isnan)(nan_val * 0));
BOOST_CHECK((boost::math::isnan)(nan_val / 0));
BOOST_CHECK((boost::math::isnan)(nan_val * inf_val));
BOOST_CHECK((boost::math::isnan)(nan_val / inf_val));
BOOST_CHECK((boost::math::isnan)(nan_val * nan_val));
BOOST_CHECK((boost::math::isnan)(nan_val / nan_val));
BOOST_CHECK((boost::math::isnan)(2 * nan_val));
BOOST_CHECK((boost::math::isnan)(2 / nan_val));
BOOST_CHECK((boost::math::isnan)(0 / nan_val));
BOOST_CHECK((boost::math::isnan)(0 / nan_val));
BOOST_CHECK((boost::math::isnan)(inf_val * nan_val));
BOOST_CHECK((boost::math::isnan)(inf_val / nan_val));
// Corner cases:
BOOST_CHECK_EQUAL((max_val * half) / half, max_val);
BOOST_CHECK_EQUAL((max_val / 2) * 2, max_val);
BOOST_CHECK_EQUAL((min_val / half) * half, min_val);
BOOST_CHECK_EQUAL((min_val * 2) / 2, min_val);
BOOST_CHECK_EQUAL(max_val + min_val, max_val);
BOOST_CHECK_EQUAL(min_val + max_val, max_val);
BOOST_CHECK_EQUAL(max_val - min_val, max_val);
BOOST_CHECK_EQUAL(min_val - max_val, -max_val);
// Signed zeros:
BOOST_CHECK(boost::math::signbit(min_val * -min_val));
BOOST_CHECK(boost::math::signbit(min_val * min_val) == 0);
BOOST_CHECK(boost::math::signbit(-min_val * -min_val) == 0);
BOOST_CHECK(boost::math::signbit(-min_val * min_val));
BOOST_CHECK(boost::math::signbit(min_val / max_val) == 0);
BOOST_CHECK(boost::math::signbit(min_val / -max_val));
BOOST_CHECK(boost::math::signbit(-min_val / -max_val) == 0);
BOOST_CHECK(boost::math::signbit(-min_val / max_val));
BOOST_CHECK(boost::math::signbit(min_val / 2) == 0);
BOOST_CHECK(boost::math::signbit(min_val / -2));
BOOST_CHECK(boost::math::signbit(-min_val / -2) == 0);
BOOST_CHECK(boost::math::signbit(-min_val / 2));
test_type neg_zero = min_val * -min_val;
// Arithmetic involving signed zero:
BOOST_CHECK_EQUAL(-neg_zero, 0);
BOOST_CHECK(!boost::math::signbit(-neg_zero));
BOOST_CHECK_EQUAL(neg_zero + 2, 2);
BOOST_CHECK_EQUAL(neg_zero + test_type(2), 2);
BOOST_CHECK_EQUAL(2 + neg_zero, 2);
BOOST_CHECK_EQUAL(test_type(2) + neg_zero, 2);
BOOST_CHECK_EQUAL(neg_zero + -2, -2);
BOOST_CHECK_EQUAL(neg_zero + test_type(-2), -2);
BOOST_CHECK_EQUAL(-2 + neg_zero, -2);
BOOST_CHECK_EQUAL(test_type(-2) + neg_zero, -2);
BOOST_CHECK_EQUAL(neg_zero - 2, -2);
BOOST_CHECK_EQUAL(neg_zero - test_type(2), -2);
BOOST_CHECK_EQUAL(2 - neg_zero, 2);
BOOST_CHECK_EQUAL(test_type(2) - neg_zero, 2);
BOOST_CHECK_EQUAL(neg_zero - -2, 2);
BOOST_CHECK_EQUAL(neg_zero - test_type(-2), 2);
BOOST_CHECK_EQUAL(-2 - neg_zero, -2);
BOOST_CHECK_EQUAL(test_type(-2) - neg_zero, -2);
BOOST_CHECK_EQUAL(neg_zero * 2, 0);
BOOST_CHECK_EQUAL(neg_zero * test_type(2), 0);
BOOST_CHECK_EQUAL(2 * neg_zero, 0);
BOOST_CHECK_EQUAL(test_type(2) * neg_zero, 0);
BOOST_CHECK_EQUAL(neg_zero * -2, 0);
BOOST_CHECK_EQUAL(neg_zero * test_type(-2), 0);
BOOST_CHECK_EQUAL(-2 * neg_zero, 0);
BOOST_CHECK_EQUAL(test_type(-2) * neg_zero, 0);
BOOST_CHECK(boost::math::signbit(neg_zero * 2));
BOOST_CHECK(boost::math::signbit(neg_zero * test_type(2)));
BOOST_CHECK(boost::math::signbit(2 * neg_zero));
BOOST_CHECK(boost::math::signbit(test_type(2) * neg_zero));
BOOST_CHECK(!boost::math::signbit(neg_zero * -2));
BOOST_CHECK(!boost::math::signbit(neg_zero * test_type(-2)));
BOOST_CHECK(!boost::math::signbit(-2 * neg_zero));
BOOST_CHECK(!boost::math::signbit(test_type(-2) * neg_zero));
BOOST_CHECK_EQUAL(neg_zero / 2, 0);
BOOST_CHECK_EQUAL(neg_zero / test_type(2), 0);
BOOST_CHECK_EQUAL(2 / neg_zero, -inf_val);
BOOST_CHECK_EQUAL(test_type(2) / neg_zero, -inf_val);
BOOST_CHECK_EQUAL(neg_zero / -2, 0);
BOOST_CHECK_EQUAL(neg_zero / test_type(-2), 0);
BOOST_CHECK_EQUAL(-2 / neg_zero, inf_val);
BOOST_CHECK_EQUAL(test_type(-2) / neg_zero, inf_val);
BOOST_CHECK(boost::math::signbit(neg_zero / 2));
BOOST_CHECK(boost::math::signbit(neg_zero / test_type(2)));
BOOST_CHECK(boost::math::signbit(2 / neg_zero));
BOOST_CHECK(boost::math::signbit(test_type(2) / neg_zero));
BOOST_CHECK(!boost::math::signbit(neg_zero / -2));
BOOST_CHECK(!boost::math::signbit(neg_zero / test_type(-2)));
BOOST_CHECK(!boost::math::signbit(-2 / neg_zero));
BOOST_CHECK(!boost::math::signbit(test_type(-2) / neg_zero));
BOOST_CHECK(boost::math::signbit(neg_zero.convert_to<double>()));
BOOST_CHECK(boost::math::signbit(neg_zero.convert_to<float>()));
BOOST_CHECK(boost::math::signbit(neg_zero.convert_to<long double>()));
test_type zero(0);
BOOST_CHECK(!boost::math::signbit(zero.convert_to<double>()));
BOOST_CHECK(!boost::math::signbit(zero.convert_to<float>()));
BOOST_CHECK(!boost::math::signbit(zero.convert_to<long double>()));
// Conversions to other types of special values:
if(std::numeric_limits<float>::has_infinity)
{
BOOST_CHECK_EQUAL(inf_val.convert_to<float>(), std::numeric_limits<float>::infinity());
BOOST_CHECK_EQUAL((-inf_val).convert_to<float>(), -std::numeric_limits<float>::infinity());
}
if(std::numeric_limits<float>::has_quiet_NaN)
{
BOOST_CHECK((boost::math::isnan)(nan_val.convert_to<float>()));
}
if(std::numeric_limits<double>::has_infinity)
{
BOOST_CHECK_EQUAL(inf_val.convert_to<double>(), std::numeric_limits<double>::infinity());
BOOST_CHECK_EQUAL((-inf_val).convert_to<double>(), -std::numeric_limits<double>::infinity());
}
if(std::numeric_limits<double>::has_quiet_NaN)
{
BOOST_CHECK((boost::math::isnan)(nan_val.convert_to<double>()));
}
if(std::numeric_limits<long double>::has_infinity)
{
BOOST_CHECK_EQUAL(inf_val.convert_to<long double>(), std::numeric_limits<long double>::infinity());
BOOST_CHECK_EQUAL((-inf_val).convert_to<long double>(), -std::numeric_limits<long double>::infinity());
}
if(std::numeric_limits<long double>::has_quiet_NaN)
{
BOOST_CHECK((boost::math::isnan)(nan_val.convert_to<long double>()));
}
}
int main()
{
test_special_cases();
unsigned error_count = 0;
for(unsigned i = 0; i < 100000; ++i)
{
good_type a = generate_random<good_type>();
good_type b = generate_random<good_type>();
test_type ta(a);
test_type tb(b);
BOOST_CHECK_EQUAL(test_type(a * b), ta * tb);
BOOST_CHECK_EQUAL(test_type(-a * b), -ta * tb);
BOOST_CHECK_EQUAL(test_type(a * -b), ta * -tb);
BOOST_CHECK_EQUAL(test_type(-a * -b), -ta * -tb);
BOOST_CHECK_EQUAL(test_type(a + b), ta + tb);
BOOST_CHECK_EQUAL(test_type(-a + b), -ta + tb);
BOOST_CHECK_EQUAL(test_type(a + -b), ta + -tb);
BOOST_CHECK_EQUAL(test_type(-a + -b), -ta + -tb);
BOOST_CHECK_EQUAL(test_type(a - b), ta - tb);
BOOST_CHECK_EQUAL(test_type(-a - b), -ta - tb);
BOOST_CHECK_EQUAL(test_type(a - -b), ta - -tb);
BOOST_CHECK_EQUAL(test_type(-a - -b), -ta - -tb);
BOOST_CHECK_EQUAL(test_type(a / b), ta / tb);
BOOST_CHECK_EQUAL(test_type(-a / b), -ta / tb);
BOOST_CHECK_EQUAL(test_type(a / -b), ta / -tb);
BOOST_CHECK_EQUAL(test_type(-a / -b), -ta / -tb);
BOOST_CHECK_EQUAL(test_type(sqrt(a)), sqrt(ta));
BOOST_CHECK_EQUAL(test_type(floor(a)), floor(ta));
BOOST_CHECK_EQUAL(test_type(floor(-a)), floor(-ta));
BOOST_CHECK_EQUAL(test_type(ceil(a)), ceil(ta));
BOOST_CHECK_EQUAL(test_type(ceil(-a)), ceil(-ta));
#ifdef TEST_MPFR
//
// Conversions:
//
BOOST_CHECK_EQUAL(a.convert_to<double>(), ta.convert_to<double>());
BOOST_CHECK_EQUAL(a.convert_to<float>(), ta.convert_to<float>());
BOOST_CHECK_EQUAL(b.convert_to<double>(), tb.convert_to<double>());
BOOST_CHECK_EQUAL(b.convert_to<float>(), tb.convert_to<float>());
#else
BOOST_CHECK_EQUAL(a, ta.convert_to<double>());
BOOST_CHECK_EQUAL(static_cast<float>(a), ta.convert_to<float>());
BOOST_CHECK_EQUAL(b, tb.convert_to<double>());
BOOST_CHECK_EQUAL(static_cast<float>(b), tb.convert_to<float>());
#endif
static boost::random::mt19937 i_gen;
int si = i_gen();
BOOST_CHECK_EQUAL(test_type(a * si), ta * si);
BOOST_CHECK_EQUAL(test_type(-a * si), -ta * si);
BOOST_CHECK_EQUAL(test_type(-a * -si), -ta * -si);
BOOST_CHECK_EQUAL(test_type(a * -si), ta * -si);
unsigned ui = std::abs(si);
BOOST_CHECK_EQUAL(test_type(a * ui), ta * ui);
BOOST_CHECK_EQUAL(test_type(-a * ui), -ta * ui);
// Divide:
BOOST_CHECK_EQUAL(test_type(a / si), ta / si);
BOOST_CHECK_EQUAL(test_type(-a / si), -ta / si);
BOOST_CHECK_EQUAL(test_type(-a / -si), -ta / -si);
BOOST_CHECK_EQUAL(test_type(a / -si), ta / -si);
BOOST_CHECK_EQUAL(test_type(a / ui), ta / ui);
BOOST_CHECK_EQUAL(test_type(-a / ui), -ta / ui);
// Error reporting:
if(boost::detail::test_errors() != error_count)
{
error_count = boost::detail::test_errors();
std::cout << std::setprecision(std::numeric_limits<test_type>::max_digits10) << std::scientific;
std::cout << "a (mpfr) = " << a << std::endl;
std::cout << "a (test) = " << ta << std::endl;
std::cout << "b (mpfr) = " << b << std::endl;
std::cout << "b (test) = " << tb << std::endl;
std::cout << "si = " << si << std::endl;
std::cout << "ui = " << ui << std::endl;
}
}
return boost::report_errors();
}
|
{"hexsha": "1bcb2f81fdcc42a77ae2d92cc83734aa1a1d9baf", "size": 17496, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "3rdParty/boost/1.62.0/libs/multiprecision/test/test_cpp_bin_float.cpp", "max_stars_repo_name": "sita1999/arangodb", "max_stars_repo_head_hexsha": "6a4f462fa209010cd064f99e63d85ce1d432c500", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 18.0, "max_stars_repo_stars_event_min_datetime": "2016-03-04T15:44:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T11:06:25.000Z", "max_issues_repo_path": "3rdParty/boost/1.62.0/libs/multiprecision/test/test_cpp_bin_float.cpp", "max_issues_repo_name": "lipper/arangodb", "max_issues_repo_head_hexsha": "66ea1fd4946668192e3f0d1060f0844f324ad7b8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 49.0, "max_issues_repo_issues_event_min_datetime": "2016-02-29T17:59:52.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-05T04:59:26.000Z", "max_forks_repo_path": "3rdParty/boost/1.62.0/libs/multiprecision/test/test_cpp_bin_float.cpp", "max_forks_repo_name": "lipper/arangodb", "max_forks_repo_head_hexsha": "66ea1fd4946668192e3f0d1060f0844f324ad7b8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2016-07-30T10:17:12.000Z", "max_forks_repo_forks_event_max_datetime": "2016-08-11T20:31:46.000Z", "avg_line_length": 42.5693430657, "max_line_length": 109, "alphanum_fraction": 0.6752400549, "num_tokens": 5003}
|
#!/usr/bin/env python3
from mpl_toolkits.mplot3d import Axes3D
from rasterization import Rasterizer
from transformation import multiply
from transformation import TransformGenerator
import argparse
import DirectLinearTransform
import json
import math
import matplotlib
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import random
import sys
def main():
args = _getParsedArgs(sys.argv[1:])
if args.p:
_pointPicker(args.input_image)
sys.exit(0)
transformImage(
args.input_image,
args.output_image,
_readCorrespondences(args.correspondences),
args.background
)
def transformImage(input_image_path, output_image_path,
corresponding_points, background_path):
im = mpimg.imread(input_image_path)
bg_im = None
if background_path is not None:
bg_im = mpimg.imread(background_path)
transform_matrix = DirectLinearTransform.computeTransform(
corresponding_points
)
image_rasterization = Rasterizer(
im,
transformation_matrix = transform_matrix,
background = bg_im
)
matplotlib.image.imsave(
output_image_path,
image_rasterization.rasterize()
)
def _pointPicker(input_image_path):
""" Utility function to select coordinates on image """
image = mpimg.imread(input_image_path)
fig = plt.figure()
axes = plt.imshow(image)
fig.canvas.mpl_connect(
'button_press_event',
lambda ev: print(ev.xdata, ev.ydata)
)
plt.show()
def _readCorrespondences(correspondenceFilePath):
with open(correspondenceFilePath, 'r') as correspondenceFileHandler:
return json.load(correspondenceFileHandler)
def _getParsedArgs(args):
parser = argparse.ArgumentParser(
description = "CLI input to homography application"
)
parser.add_argument(
"-p",
action = "store_true",
help = "use point picker utility")
parser.add_argument(
"--input-image",
default = "./media/t2.png",
help = "input image to be transformed")
parser.add_argument(
"--output-image",
default = "output.png",
help = "output image path for saving new image")
parser.add_argument(
"--correspondences",
default = "3.json",
help = "corresponding set of points to derive transform")
parser.add_argument(
"--background",
default = None,
help = "optionally specify background image to act as canvas")
return parser.parse_args(args)
if __name__ == "__main__":
main()
|
{"hexsha": "324811f886f195dc8925fbe83aaf23b9d5e038a5", "size": 2631, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/homography.py", "max_stars_repo_name": "Pratool/homography", "max_stars_repo_head_hexsha": "c9daeaa3364b7c658b39c225952288dd828c332e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-08-12T17:38:22.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-12T17:38:22.000Z", "max_issues_repo_path": "python/homography.py", "max_issues_repo_name": "Pratool/homography", "max_issues_repo_head_hexsha": "c9daeaa3364b7c658b39c225952288dd828c332e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-03-03T15:43:38.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-04T03:22:47.000Z", "max_forks_repo_path": "python/homography.py", "max_forks_repo_name": "Pratool/homography", "max_forks_repo_head_hexsha": "c9daeaa3364b7c658b39c225952288dd828c332e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8469387755, "max_line_length": 72, "alphanum_fraction": 0.6769289244, "include": true, "reason": "import numpy", "num_tokens": 562}
|
import numpy as np
import chess
import chess.variant
from BughouseEnv import BughouseEnv
# Mini Example for Agent
b = chess.Board()
fen= b.fen()
agent = BughouseEnv(0, 0)
state = agent('a2a3')
agent2 = BughouseEnv(0, 0)
agent2.load_state(state)
moves = agent2.get_legal_moves_dict()
for key, value in moves.items():
if value == 1:
print(key)
agent = BughouseEnv(0, 0)
moves = agent.get_legal_moves_dict()
start = agent.get_state() # genrate a state
for key, value in moves.items():
if value == 1:
print(key)
state2 = agent("a2a3")
state = agent.get_state()
agent2 = BughouseEnv(0, 0)
agent2.load_state(state)
print("______")
moves = agent2.get_legal_moves_dict()
for key, value in moves.items():
if value == 1:
print(key)
game_over = agent.game_finished()
score = agent.get_score()
print(agent.boards.boards[0])
board = chess.variant.CrazyhouseBoard()
agent.load_state(start)
print(agent.boards.boards[0])
state = agent("a2a3")
moves = agent.get_legal_moves_dict()
for key, value in moves.items():
if value == 1:
print(key)
print()
#
# from BugHouseBoard import BughouseBoards
#
# chess.BB_ALL
#
# legal_moves = {}
# for o in chess.SQUARE_NAMES:
# for t in chess.SQUARE_NAMES:
# if o != t:
# legal_moves[o+t] = 0
# #promotion moves
# #placement moves
#
#
# bh_boards = BughouseBoards()
# for lm in bh_boards.generate_legal_moves(BughouseBoards.LEFT):
# legal_moves[lm.uci()] = 1
# print(lm.uci())
# print(bh_boards.generate_legal_moves(BughouseBoards.LEFT))
# print(bh_boards.generate_legal_moves(BughouseBoards.RIGHT))
# bh_boards.boards[0].pockets[chess.WHITE].add(chess.QUEEN)
# #bh_boards.boards[0].pockets[chess.WHITE].add(chess.PAWN)
# #bh_boards.boards[0].pockets[chess.WHITE].add(chess.PAWN)
# print(bh_boards.generate_legal_moves(BughouseBoards.LEFT))
# print(bh_boards.generate_legal_moves(BughouseBoards.RIGHT))
# print(bh_boards.get_win_status())
# for lm in bh_boards.generate_legal_moves(BughouseBoards.LEFT):
# legal_moves[lm.uci()] = 1
# print(lm.uci())
# bh_boards.push_uci("Q@h6",BughouseBoards.LEFT)
# print(bh_boards.boards[0])
# fen = bh_boards.board_fen()
# bh_boards2 = BughouseBoards()
# print(bh_boards2.boards[0])
# bh_boards2.set_fen(fen)
# print(bh_boards2.boards[0])
# print(bh_boards.boards[0].legal_moves)
# bh_boards2.boards[0].turn = bh_boards.boards[0].turn
# print(bh_boards2.boards[0].legal_moves)
# bs = bh_boards.to_numpy_simplified(True)
# bh_boards.boards[0].pockets[chess.WHITE].add(chess.PAWN)
# bh_boards.boards[0].pockets[chess.WHITE].add(chess.KNIGHT)
# bh_boards.boards[0].pockets[chess.WHITE].add(chess.BISHOP)
# bh_boards.boards[0].pockets[chess.WHITE].add(chess.ROOK)
# bh_boards.boards[0].pockets[chess.WHITE].add(chess.QUEEN)
# ret = bh_boards.get_pockets_numpy()
# bh_boards.boards[0].is_checkmate()
# print()
|
{"hexsha": "834eb21e8e89481a7493a3f04aff165d8f064634", "size": 2856, "ext": "py", "lang": "Python", "max_stars_repo_path": "testBHboard.py", "max_stars_repo_name": "MbProg/BughouseAlphaZero", "max_stars_repo_head_hexsha": "25d2f25417713a85b24eac3ce9a3a7f5c55ff5e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "testBHboard.py", "max_issues_repo_name": "MbProg/BughouseAlphaZero", "max_issues_repo_head_hexsha": "25d2f25417713a85b24eac3ce9a3a7f5c55ff5e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "testBHboard.py", "max_forks_repo_name": "MbProg/BughouseAlphaZero", "max_forks_repo_head_hexsha": "25d2f25417713a85b24eac3ce9a3a7f5c55ff5e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4432989691, "max_line_length": 64, "alphanum_fraction": 0.725140056, "include": true, "reason": "import numpy", "num_tokens": 899}
|
[STATEMENT]
lemma sublist_split_concat:
assumes "a \<in> set (acc @ (as@x#bs))" and "sublist ys a"
shows "(\<exists>a\<in>set (rev acc @ as @ [x]). sublist ys a) \<or> sublist ys (concat bs @ cs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<exists>a\<in>set (rev acc @ as @ [x]). sublist ys a) \<or> sublist ys (concat bs @ cs)
[PROOF STEP]
proof(cases "a \<in> set (rev acc @ as @ [x])")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. a \<in> set (rev acc @ as @ [x]) \<Longrightarrow> (\<exists>a\<in>set (rev acc @ as @ [x]). sublist ys a) \<or> sublist ys (concat bs @ cs)
2. a \<notin> set (rev acc @ as @ [x]) \<Longrightarrow> (\<exists>a\<in>set (rev acc @ as @ [x]). sublist ys a) \<or> sublist ys (concat bs @ cs)
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
a \<in> set (rev acc @ as @ [x])
goal (2 subgoals):
1. a \<in> set (rev acc @ as @ [x]) \<Longrightarrow> (\<exists>a\<in>set (rev acc @ as @ [x]). sublist ys a) \<or> sublist ys (concat bs @ cs)
2. a \<notin> set (rev acc @ as @ [x]) \<Longrightarrow> (\<exists>a\<in>set (rev acc @ as @ [x]). sublist ys a) \<or> sublist ys (concat bs @ cs)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
a \<in> set (rev acc @ as @ [x])
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
a \<in> set (rev acc @ as @ [x])
goal (1 subgoal):
1. (\<exists>a\<in>set (rev acc @ as @ [x]). sublist ys a) \<or> sublist ys (concat bs @ cs)
[PROOF STEP]
using assms(2)
[PROOF STATE]
proof (prove)
using this:
a \<in> set (rev acc @ as @ [x])
sublist ys a
goal (1 subgoal):
1. (\<exists>a\<in>set (rev acc @ as @ [x]). sublist ys a) \<or> sublist ys (concat bs @ cs)
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
(\<exists>a\<in>set (rev acc @ as @ [x]). sublist ys a) \<or> sublist ys (concat bs @ cs)
goal (1 subgoal):
1. a \<notin> set (rev acc @ as @ [x]) \<Longrightarrow> (\<exists>a\<in>set (rev acc @ as @ [x]). sublist ys a) \<or> sublist ys (concat bs @ cs)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. a \<notin> set (rev acc @ as @ [x]) \<Longrightarrow> (\<exists>a\<in>set (rev acc @ as @ [x]). sublist ys a) \<or> sublist ys (concat bs @ cs)
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
a \<notin> set (rev acc @ as @ [x])
goal (1 subgoal):
1. a \<notin> set (rev acc @ as @ [x]) \<Longrightarrow> (\<exists>a\<in>set (rev acc @ as @ [x]). sublist ys a) \<or> sublist ys (concat bs @ cs)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
a \<notin> set (rev acc @ as @ [x])
[PROOF STEP]
have "a \<in> set bs"
[PROOF STATE]
proof (prove)
using this:
a \<notin> set (rev acc @ as @ [x])
goal (1 subgoal):
1. a \<in> set bs
[PROOF STEP]
using assms(1)
[PROOF STATE]
proof (prove)
using this:
a \<notin> set (rev acc @ as @ [x])
a \<in> set (acc @ as @ x # bs)
goal (1 subgoal):
1. a \<in> set bs
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
a \<in> set bs
goal (1 subgoal):
1. a \<notin> set (rev acc @ as @ [x]) \<Longrightarrow> (\<exists>a\<in>set (rev acc @ as @ [x]). sublist ys a) \<or> sublist ys (concat bs @ cs)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
a \<in> set bs
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
a \<in> set bs
goal (1 subgoal):
1. (\<exists>a\<in>set (rev acc @ as @ [x]). sublist ys a) \<or> sublist ys (concat bs @ cs)
[PROOF STEP]
using assms(2) concat_all_sublist[of bs]
sublist_order.dual_order.trans[where c = ys, where b = "concat bs"]
[PROOF STATE]
proof (prove)
using this:
a \<in> set bs
sublist ys a
\<forall>x\<in>set bs. sublist x (concat bs)
\<lbrakk>sublist (concat bs) ?a; sublist ys (concat bs)\<rbrakk> \<Longrightarrow> sublist ys ?a
goal (1 subgoal):
1. (\<exists>a\<in>set (rev acc @ as @ [x]). sublist ys a) \<or> sublist ys (concat bs @ cs)
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
(\<exists>a\<in>set (rev acc @ as @ [x]). sublist ys a) \<or> sublist ys (concat bs @ cs)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1703, "file": "Query_Optimization_IKKBZ_Optimality", "length": 17}
|
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 22 15:30:59 2020
@author: JANAKI
"""
import cv2
import dlib
import numpy as np
import argparse
from contextlib import contextmanager
from model import model_choose
def get_args():
parser = argparse.ArgumentParser(description="To detect faces from live webcam feed, pass them to the model, and display the face with the estimated age label.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--model_name", type=str, default="AlexNet",
help="Enter the model's name you want to use.")
parser.add_argument("--depth", type=int, default=16,
help="Enter the depth of WideResNet.")
parser.add_argument("--width", type=int, default=8,
help="Enter the width of WideResNet")
parser.add_argument("--weight_file", type=str, default=None,
help="enter the path to the weight file")
args = parser.parse_args()
return args
# =============================================================================
# def video():
# capture = yield_images()
# try:
# yield capture
# finally:
# capture.release()
# =============================================================================
def yield_images():
# capture video
video_capture = cv2.VideoCapture(0)
video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
while True:
ret, img = video_capture.read()
yield img
def draw_label(image, point, label, font=cv2.FONT_HERSHEY_SIMPLEX, font_scale=1, thickness=2):
size = cv2.getTextSize(label, font, font_scale, thickness)[0]
x, y = point
cv2.rectangle(image, (x, y - size[1]), (x + size[0], y), (255, 0, 0), cv2.FILLED)
cv2.putText(image, label, point, font, font_scale, (255, 255, 255), thickness)
def main():
args = get_args()
model_name = args.model_name
weight_file = args.weight_file
depth = args.depth
width = args.width
if not weight_file:
weight_file = 'alexnet.hdf5'
# for face detection
detector = dlib.get_frontal_face_detector()
# load model and weights
model = model_choose(depth, width, model_name=model_name)
#model = buildmodel(64, 16, 8)
model.load_weights(weight_file)
img_size = model.input.shape.as_list()[1]
image_generator = yield_images()
margin = 0.8
for img in image_generator:
input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_h, img_w, _ = np.shape(input_img)
# detect faces using dlib detector
detected = detector(input_img, 1)
faces = np.empty((len(detected), img_size, img_size, 3))
if len(detected) > 0:
for i, d in enumerate(detected):
x1, y1, x2, y2, w, h = d.left(), d.top(), d.right() + 1, d.bottom() + 1, d.width(), d.height()
xw1 = max(int(x1 - margin * w), 0)
yw1 = max(int(y1 - margin * h), 0)
xw2 = min(int(x2 + margin * w), img_w - 1)
yw2 = min(int(y2 + margin * h), img_h - 1)
cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)
faces[i, :, :, :] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1, :], (img_size, img_size))
# predict ages and genders of the detected faces
results = model.predict(faces)
ages = np.arange(0, 101).reshape(101, 1)
if (model_name == "WideResNet"):
predicted_ages = results[1].dot(ages).flatten()
else:
predicted_ages = results.dot(ages).flatten()
# draw results
for i, d in enumerate(detected):
label = str(int(predicted_ages[i]))
draw_label(img, (d.left(), d.top()), label)
cv2.imshow("Real time Age Estimation", img)
key = cv2.waitKey(30)
if key == 27:
break
if __name__ == '__main__':
main()
|
{"hexsha": "e626b6b1cfa3909127c7216871bf22f08e56550b", "size": 4091, "ext": "py", "lang": "Python", "max_stars_repo_path": "demo.py", "max_stars_repo_name": "janu6134/Real-time-Age-Estimation", "max_stars_repo_head_hexsha": "3c0233d5d1b8c505d484592c02c38f2ed8a9f93e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "demo.py", "max_issues_repo_name": "janu6134/Real-time-Age-Estimation", "max_issues_repo_head_hexsha": "3c0233d5d1b8c505d484592c02c38f2ed8a9f93e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demo.py", "max_forks_repo_name": "janu6134/Real-time-Age-Estimation", "max_forks_repo_head_hexsha": "3c0233d5d1b8c505d484592c02c38f2ed8a9f93e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8859649123, "max_line_length": 165, "alphanum_fraction": 0.5666096309, "include": true, "reason": "import numpy", "num_tokens": 1032}
|
import numpy as np
import pycuda.autoinit # NOQA:401
import pycuda.gpuarray as gpuarray
from cufinufft import cufinufft
import utils
def _test_type1(dtype, shape=(16, 16, 16), M=4096, tol=1e-3):
complex_dtype = utils._complex_dtype(dtype)
dim = len(shape)
k = utils.gen_nu_pts(M, dim=dim).astype(dtype)
c = utils.gen_nonuniform_data(M).astype(complex_dtype)
k_gpu = gpuarray.to_gpu(k)
c_gpu = gpuarray.to_gpu(c)
fk_gpu = gpuarray.GPUArray(shape, dtype=complex_dtype)
plan = cufinufft(1, shape, eps=tol, dtype=dtype)
plan.set_pts(k_gpu[0], k_gpu[1], k_gpu[2])
plan.execute(c_gpu, fk_gpu)
fk = fk_gpu.get()
ind = int(0.1789 * np.prod(shape))
fk_est = fk.ravel()[ind]
fk_target = utils.direct_type1(c, k, shape, ind)
type1_rel_err = np.abs(fk_target - fk_est) / np.abs(fk_target)
print('Type 1 relative error:', type1_rel_err)
assert type1_rel_err < 0.01
def test_type1_32(shape=(16, 16, 16), M=4096, tol=1e-3):
return _test_type1(dtype=np.float32, shape=shape, M=M, tol=tol)
def test_type1_64(shape=(16, 16, 16), M=4096, tol=1e-3):
return _test_type1(dtype=np.float64, shape=shape, M=M, tol=tol)
def _test_type2(dtype, shape=(16, 16, 16), M=4096, tol=1e-3):
complex_dtype = utils._complex_dtype(dtype)
k = utils.gen_nu_pts(M).astype(dtype)
fk = utils.gen_uniform_data(shape).astype(complex_dtype)
k_gpu = gpuarray.to_gpu(k)
fk_gpu = gpuarray.to_gpu(fk)
c_gpu = gpuarray.GPUArray(shape=(M,), dtype=complex_dtype)
plan = cufinufft(2, shape, eps=tol, dtype=dtype)
plan.set_pts(k_gpu[0], k_gpu[1], k_gpu[2])
plan.execute(c_gpu, fk_gpu)
c = c_gpu.get()
ind = M // 2
c_est = c[ind]
c_target = utils.direct_type2(fk, k[:, ind])
type2_rel_err = np.abs(c_target - c_est) / np.abs(c_target)
print('Type 2 relative error:', type2_rel_err)
assert type2_rel_err < 0.01
def test_type2_32(shape=(16, 16, 16), M=4096, tol=1e-3):
return _test_type2(dtype=np.float32, shape=shape, M=M, tol=tol)
def test_type2_64(shape=(16, 16, 16), M=4096, tol=1e-3):
return _test_type2(dtype=np.float64, shape=shape, M=M, tol=tol)
def test_opts(shape=(8, 8, 8), M=32, tol=1e-3):
dtype = np.float32
complex_dtype = utils._complex_dtype(dtype)
dim = len(shape)
k = utils.gen_nu_pts(M, dim=dim).astype(dtype)
c = utils.gen_nonuniform_data(M).astype(complex_dtype)
k_gpu = gpuarray.to_gpu(k)
c_gpu = gpuarray.to_gpu(c)
fk_gpu = gpuarray.GPUArray(shape, dtype=complex_dtype)
plan = cufinufft(1, shape, eps=tol, dtype=dtype, gpu_sort=False,
gpu_maxsubprobsize=10)
plan.set_pts(k_gpu[0], k_gpu[1], k_gpu[2])
plan.execute(c_gpu, fk_gpu)
fk = fk_gpu.get()
ind = int(0.1789 * np.prod(shape))
fk_est = fk.ravel()[ind]
fk_target = utils.direct_type1(c, k, shape, ind)
type1_rel_err = np.abs(fk_target - fk_est) / np.abs(fk_target)
assert type1_rel_err < 0.01
def main():
test_type1_32()
test_type2_32()
test_type1_64()
test_type2_64()
if __name__ == '__main__':
main()
|
{"hexsha": "bc8257e8a8dcc436b0487bbdbe74d91170e5baf2", "size": 3136, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/cufinufft/tests/test_basic.py", "max_stars_repo_name": "elliottslaughter/cufinufft", "max_stars_repo_head_hexsha": "bb1453dfe9dc12159e8e346eae79ad4d71fd566f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 56, "max_stars_repo_stars_event_min_datetime": "2020-05-12T22:22:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T23:54:48.000Z", "max_issues_repo_path": "python/cufinufft/tests/test_basic.py", "max_issues_repo_name": "elliottslaughter/cufinufft", "max_issues_repo_head_hexsha": "bb1453dfe9dc12159e8e346eae79ad4d71fd566f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 108, "max_issues_repo_issues_event_min_datetime": "2020-05-13T16:59:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T22:30:57.000Z", "max_forks_repo_path": "python/cufinufft/tests/test_basic.py", "max_forks_repo_name": "elliottslaughter/cufinufft", "max_forks_repo_head_hexsha": "bb1453dfe9dc12159e8e346eae79ad4d71fd566f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2020-05-22T12:29:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-03T18:08:03.000Z", "avg_line_length": 23.7575757576, "max_line_length": 68, "alphanum_fraction": 0.6629464286, "include": true, "reason": "import numpy,import pycuda", "num_tokens": 1036}
|
import numpy as np
import h5py
import json
import sys
import csv
import illustris_python as il
def LoadMergHist(simu, subhaloID):
'''
return subhalo's main progenitor and merger history with snapshot
'''
if simu == 'TNG':
ldir = '/Raid0/zhouzb/merg_data/tng_DiskMerTree/%d.json' % subhaloID
else:
ldir = '/Raid0/zhouzb/merg_data/il1_DiskMerTree/%d.json' % subhaloID
with open(ldir) as f:
data = json.load(f)
Main = np.array(data['Main'])
return dict(zip(Main[:, 0], Main[:, 1])), np.array(data['Mergers'])
def rothalo(coor, Jz):
'''
set halo's angular momentum as z axis
'''
rot = RotMatrix(Norm(Jz))
for i in range(len(coor)):
coor[i] = np.dot(rot, coor[i])
return coor
#Vector Normalized
def Norm(vect):
'''Input a vector, normalize it in-place'''
mol=np.linalg.norm(vect)
vect /= mol
return vect
#Find the rotation matrix when input vector rotated to z_axis
def RotMatrix(vect):
'''Input a vector, return a (3,3) rotation matrix '''
x=vect[0]
y=vect[1]
z=vect[2]
sinA= y/(z**2+y**2)**0.5
cosA= z/(z**2+y**2)**0.5
sinB= x/(x**2+(y*sinA+z*cosA)**2)**0.5
cosB= (y*sinA+z*cosA)/(x**2+(y*sinA+z*cosA)**2)**0.5
return np.array([[cosB, -sinA*sinB, -cosA*sinB], [0, cosA, -sinA], [sinB, sinA*cosB, cosA*cosB]])
def subhaloA2(simu, haloID, snapnum):
if haloID == -1:
return -1
try:
data = il.func.loadgalaxy(simu, snapnum, haloID, partType=4, fields=['Coordinates', 'Masses', 'Velocities', 'GFM_StellarFormationTime'])
coor= data['Coordinates']
mas = data['Masses']
vel = data['Velocities']
sf_time = data['GFM_StellarFormationTime']
except:
print(sys.exc_info()[0])
print('halo %d in snap_%d load faild.'%(haloID,snapnum))
print(' ')
return -1
half_r = (il.func.loadSubhalos(simu, snapnum, 'SubhaloHalfmassRadType'))[haloID,4]
halo_position = il.func.loadSubhalos(simu, snapnum, 'SubhaloPos')[haloID]
is_star = (sf_time>=0.0) # don't use wind particles
r = coor - halo_position
dis = ((r**2).sum(1))**0.5
inside = dis < (half_r*2)
vel = vel[(inside & is_star)]
mas = mas[(inside & is_star)]
coor = r[(inside & is_star)]
coor[coor > 37500] -= 75000
coor[coor < -37500] += 75000
#Calculate angular momentum
V = np.sum(vel * mas[:, np.newaxis], 0) / mas.sum()
vel = vel - V
L = np.sum(np.cross(coor, vel * mas[:, np.newaxis]), axis=0)
#Set halo's angular momentum Jz as z axis
coor = rothalo(coor, L)
#r_list is a list about the distance between stellar particles and halo_position
#r_sort a index about paritcles' distance
r_list = ((coor[:,:2]**2).sum(1))**0.5
r_sort = r_list.argsort()
#a0 = Sigma(M[i])
a0 = np.zeros(len(r_sort))
ptr = 0
for i in r_sort:
if ptr == 0:
a0[ptr] = mas[i]
else:
a0[ptr] = mas[i] + a0[ptr-1]
ptr += 1
#Position angle: Theta = arctan(y/x)
#Creat a list of a_m(R)
a2_R = np.zeros(len(r_sort))
b2_R = np.zeros(len(r_sort))
list_i = 0
for star in r_sort:
if coor[star, 1] == 0:
continue
Theta = np.arctan(coor[star,0] / coor[star,1])
#a_i = M[numb] * cos( m * Theta[numb] ) , m=2
a_i = mas[star] * np.cos(2*Theta)
#b_i = M[numb] * sin( m * Theta[numb] ) , m=2
b_i = mas[star] * np.sin(2*Theta)
#a2_R[i] = a_i.sum(:i)
if list_i > 0:
a2_R[list_i] = a_i + a2_R[list_i - 1]
b2_R[list_i] = b_i + b2_R[list_i - 1]
else:
a2_R[list_i] = a_i
b2_R[list_i] = b_i
list_i += 1
#It's a list about A2 parameter, sorted by distance between halo and center
A2_R = (a2_R ** 2 + b2_R ** 2)** 0.5 / a0
A2_R = A2_R[int(len(A2_R) / 100):]
return A2_R.max()
def SelectDisk(simu, snap_num):
'''
Input Snapshot number, like snap_num = 99 (z=0)
Select disk galaxies, return haloID of them.
'''
#select halo stellar particles > 40000
halolen = il.func.loadSubhalos(simu, snap_num, 'SubhaloLenType')[:, 4]
if simu == 'TNG':
with h5py.File('/Raid0/zhouzb/TNGdata/supplementary/stellar_circs.hdf5','r') as cir:
haloID = np.array(cir['Snapshot_%d'%snap_num]['SubfindID'])
cir07frac = np.array(cir['Snapshot_%d'%snap_num]['CircAbove07Frac'])
MassTensor = np.array(cir['Snapshot_%d' % snap_num]['MassTensorEigenVals'])
else:
with h5py.File('/Raid0/zhouzb/il1_data/stellar_circs.hdf5','r') as cir:
haloID = np.array(cir['Snapshot_%d'%snap_num]['SubfindID'])
cir07frac = np.array(cir['Snapshot_%d'%snap_num]['CircAbove07Frac'])
MassTensor = np.array(cir['Snapshot_%d' % snap_num]['MassTensorEigenVals'])
#circularity parameter ϵ > 0.2
cir_mask = cir07frac > 0.2
#flatness of the galaxy is defined as the ratio M1=(M2M3)**0.5 , disk galaxy's flatness < 0.7
MassTensor = MassTensor[cir_mask]
haloID = haloID[cir_mask]
flat = MassTensor[:,0]/(MassTensor[:,1]*MassTensor[:,2])**0.5
flat_mask = flat < 0.7
haloID = haloID[flat_mask]
mas_mask = halolen[haloID] > 40000
haloID = haloID[mas_mask]
return haloID
tngSnap = [67, 50, 40, 33]
il1Snap = [103, 85, 75, 68]
tngdiskA2 = {}
for snap in tngSnap:
print('Start :TNG snap%d'%snap)
diskID = SelectDisk('TNG', snap)
haloA2 = {}
for haloID in diskID:
A2 = subhaloA2('TNG', haloID, snap)
haloA2[haloID] = A2
tngdiskA2[snap] = haloA2
np.save('/Raid0/zhouzb/BFwithZ/tngDiskA2.npy', tngdiskA2)
|
{"hexsha": "9f26575a3e69b25b847d330e461200240fc152ec", "size": 5927, "ext": "py", "lang": "Python", "max_stars_repo_path": "new/BarFractionWithRedshift.py", "max_stars_repo_name": "Max-astro/A2Project", "max_stars_repo_head_hexsha": "5d40263742133f214936b06b622d08092e694aed", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "new/BarFractionWithRedshift.py", "max_issues_repo_name": "Max-astro/A2Project", "max_issues_repo_head_hexsha": "5d40263742133f214936b06b622d08092e694aed", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "new/BarFractionWithRedshift.py", "max_forks_repo_name": "Max-astro/A2Project", "max_forks_repo_head_hexsha": "5d40263742133f214936b06b622d08092e694aed", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3597883598, "max_line_length": 145, "alphanum_fraction": 0.5716213936, "include": true, "reason": "import numpy", "num_tokens": 1960}
|
import equity_risk_model
import numpy
import pandas
from pytest_cases import fixture
@fixture(scope="module")
def factor_model():
universe = numpy.array(["A", "B", "C", "D", "E"])
factors = numpy.array(["foo", "bar", "baz"])
factor_loadings = pandas.DataFrame(
data=numpy.array(
[
[0.2, 0.3, -0.1, -0.2, 0.45],
[0.01, -0.2, -0.23, -0.01, 0.4],
[0.1, 0.05, 0.23, 0.15, -0.1],
]
),
columns=universe,
index=factors,
)
covariance_factor = pandas.DataFrame(
data=numpy.array(
[[0.3, 0.05, 0.01], [0.05, 0.15, -0.10], [0.01, -0.10, 0.2]]
),
columns=factors,
index=factors,
)
covariance_specific = pandas.DataFrame(
data=numpy.diag([0.05, 0.04, 0.10, 0.02, 0.09]),
index=universe,
columns=universe,
)
return equity_risk_model.model.FactorRiskModel(
universe,
factors,
factor_loadings,
covariance_factor,
covariance_specific,
)
@fixture(scope="module")
def factor_model_with_groups():
universe = numpy.array(["A", "B", "C", "D", "E"])
factors = numpy.array(["foo", "bar", "baz"])
factor_loadings = pandas.DataFrame(
data=[
[0.2, 0.3, -0.1, -0.2, 0.45],
[0.01, -0.2, -0.23, -0.01, 0.4],
[0.1, 0.05, 0.23, 0.15, -0.1],
],
index=factors,
columns=universe,
)
covariance_factor = pandas.DataFrame(
data=[[0.3, 0.05, 0.01], [0.05, 0.15, -0.10], [0.01, -0.10, 0.2]],
index=factors,
columns=factors,
)
covariance_specific = pandas.DataFrame(
data=numpy.diag([0.05, 0.04, 0.10, 0.02, 0.09]),
index=universe,
columns=universe,
)
factor_groups = {"Alpha": ["foo", "bar"], "Beta": ["baz"]}
return equity_risk_model.model.FactorRiskModel(
universe,
factors,
factor_loadings,
covariance_factor,
covariance_specific,
factor_group_mapping=factor_groups,
)
@fixture(scope="module")
def risk_calculator(factor_model):
return equity_risk_model.risk.RiskCalculator(factor_model)
@fixture(scope="module")
def risk_calculator_with_factor_groups(factor_model_with_groups):
return equity_risk_model.risk.RiskCalculator(factor_model_with_groups)
@fixture(scope="module")
def concentration_calculator(risk_calculator):
return equity_risk_model.concentration.ConcentrationCalculator(
risk_calculator=risk_calculator
)
|
{"hexsha": "924b4ac6e7eba997ae27b9ba8b5736f63d2a164c", "size": 2592, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/conftest.py", "max_stars_repo_name": "blaahhrrgg/equity-risk-model", "max_stars_repo_head_hexsha": "f94af6126e57597642dbcbf6896beeb52d879936", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-12-21T14:44:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T09:29:01.000Z", "max_issues_repo_path": "tests/conftest.py", "max_issues_repo_name": "blaahhrrgg/equity-risk-model", "max_issues_repo_head_hexsha": "f94af6126e57597642dbcbf6896beeb52d879936", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/conftest.py", "max_forks_repo_name": "blaahhrrgg/equity-risk-model", "max_forks_repo_head_hexsha": "f94af6126e57597642dbcbf6896beeb52d879936", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-12-18T04:00:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T17:02:03.000Z", "avg_line_length": 24.2242990654, "max_line_length": 74, "alphanum_fraction": 0.5775462963, "include": true, "reason": "import numpy", "num_tokens": 757}
|
[STATEMENT]
lemma sep_list_conj_Cons [simp]: "\<And>* (x#xs) = (x ** \<And>* xs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>* x # xs = (x \<and>* \<And>* xs)
[PROOF STEP]
by (simp add: sep_list_conj_def sep.foldl_absorb0)
|
{"llama_tokens": 111, "file": "Separation_Algebra_Separation_Algebra", "length": 1}
|
# bchhun, {2019-12-12}
from ReconstructOrder.workflow.reconstructBatch import reconstruct_batch
import os, glob
import tifffile as tf
import pytest
import numpy as np
from ..testMetrics import mse
def test_reconstruct_source(setup_multidim_src):
"""
Runs a full multidim reconstruction based on supplied config files
:param setup_multidim_src:
:return:
"""
config = setup_multidim_src
try:
reconstruct_batch(config)
except Exception as ex:
pytest.fail("Exception thrown during reconstruction = "+str(ex))
def test_src_target_mse(setup_multidim_src, setup_multidim_target):
"""
Runs a comparison between reconstruction from config and target
:param setup_multidim_src: fixture that returns PATH to config
:param setup_multidim_target: fixture that returns PATH to .tif files
:return:
"""
config = setup_multidim_src
reconstruct_batch(config)
processed_folder = os.getcwd() + '/temp/predict/src/SM_2019_0612_20x_1_BG_2019_0612_1515_1/B3-Site_1'
processed_files = glob.glob(processed_folder+'/*.tif')
print("PROCESSED FILES" + str(processed_files))
print("number of proc images ="+str(len(processed_files)))
target_folder = setup_multidim_target
target_files = glob.glob(target_folder+'/*.tif')
print("TARGET FILES" + str(target_files))
print("number of target images ="+str(len(target_files)))
p_sort = sorted(processed_files)
s_sort = sorted(target_files)
for idx, file in enumerate(p_sort):
if os.path.basename(file) == os.path.basename(s_sort[idx]):
predict = tf.imread(file)
target = tf.imread(s_sort[idx])
try:
assert mse(predict, target) <= np.finfo(np.float32).eps
except AssertionError as ae:
if 'img_Phase' in target:
print(f" ==== KNOWN error in Phase Reconstruction ==== ")
print(f"MSE relative = {mse(predict, target)}")
print(f"MSE FAIL ON PREDICT = " + file)
print(f"MSE FAIL ON TARGET = " + target + "\n")
continue
else:
print(f"MSE relative = {mse(predict, target)}")
print(f"MSE FAIL ON PREDICT = " + file)
print(f"MSE FAIL ON TARGET = " + target + "\n")
pytest.fail("Assertion Error = " + str(ae))
|
{"hexsha": "9c37e6512e4c5499dde5393eb23d76fc37cf72c8", "size": 2439, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/integration_tests/multidim_complete_pipeline_tests.py", "max_stars_repo_name": "czbiohub/reconstruct-order", "max_stars_repo_head_hexsha": "e729ae3871aea0a5ec2d42744a9448c7f0a93037", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-10-30T23:00:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-02T19:09:07.000Z", "max_issues_repo_path": "tests/integration_tests/multidim_complete_pipeline_tests.py", "max_issues_repo_name": "czbiohub/ReconstructOrder", "max_issues_repo_head_hexsha": "e729ae3871aea0a5ec2d42744a9448c7f0a93037", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2019-07-08T22:51:29.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-13T15:44:01.000Z", "max_forks_repo_path": "tests/integration_tests/multidim_complete_pipeline_tests.py", "max_forks_repo_name": "mehta-lab/reconstruct-order", "max_forks_repo_head_hexsha": "e729ae3871aea0a5ec2d42744a9448c7f0a93037", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-05-02T23:28:36.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-16T23:46:46.000Z", "avg_line_length": 34.8428571429, "max_line_length": 105, "alphanum_fraction": 0.6330463305, "include": true, "reason": "import numpy", "num_tokens": 571}
|
! { dg-do run }
! PR29936 Missed constraint on RECL=specifier in unformatted sequential WRITE
! Submitted by Jerry DeLisle <jvdelisle@gcc.gnu.org>
program us_recl
real, dimension(5) :: array = 5.4321
integer :: istatus
open(unit=10, form="unformatted", access="sequential", RECL=16)
write(10, iostat=istatus) array
if (istatus == 0) STOP 1
close(10, status="delete")
end program us_recl
|
{"hexsha": "cffaa9e1e0d4b042c59d8472531636f213a42776", "size": 400, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "validation_tests/llvm/f18/gfortran.dg/write_check3.f90", "max_stars_repo_name": "brugger1/testsuite", "max_stars_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-02-12T18:20:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T19:46:19.000Z", "max_issues_repo_path": "validation_tests/llvm/f18/gfortran.dg/write_check3.f90", "max_issues_repo_name": "brugger1/testsuite", "max_issues_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-08-31T22:05:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T18:30:03.000Z", "max_forks_repo_path": "validation_tests/llvm/f18/gfortran.dg/write_check3.f90", "max_forks_repo_name": "brugger1/testsuite", "max_forks_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2020-08-31T21:59:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T22:06:46.000Z", "avg_line_length": 33.3333333333, "max_line_length": 77, "alphanum_fraction": 0.7225, "num_tokens": 131}
|
from PIL import Image, ImageEnhance, ImageOps
import numpy as np
import random
random.seed(0)
class BasicPolicy(object):
def __init__(self, mirror_ratio = 0, flip_ratio = 0, color_change_ratio = 0, is_full_set_colors = False, add_noise_peak = 0.0, erase_ratio = -1.0):
# Random color channel order
from itertools import product, permutations
self.indices = list(product([0,1,2], repeat = 3)) if is_full_set_colors else list(permutations(range(3), 3))
self.indices.insert(0, [0,1,2]) # R,G,B
self.add_noise_peak = add_noise_peak
# Mirror and flip
self.color_change_ratio = color_change_ratio
self.mirror_ratio = mirror_ratio
self.flip_ratio = flip_ratio
# Erase
self.erase_ratio = erase_ratio
def __call__(self, img, depth):
# 0) Add poisson noise (e.g. choose peak value 20)
if self.add_noise_peak > 0:
PEAK = self.add_noise_peak
img = np.random.poisson(np.clip(img, 0, 1) * PEAK) / PEAK
# 1) Color change
policy_idx = random.randint(0, len(self.indices) - 1)
if random.uniform(0, 1) >= self.color_change_ratio:
policy_idx = 0
img = img[...,list(self.indices[policy_idx])]
# 2) Mirror image
if random.uniform(0, 1) <= self.mirror_ratio:
img = img[...,::-1,:]
depth = depth[...,::-1,:]
# 3) Flip image vertically
if random.uniform(0, 1) < self.flip_ratio:
img = img[...,::-1,:,:]
depth = depth[...,::-1,:,:]
# 4) Erase random box
if random.uniform(0, 1) < self.erase_ratio:
img = self.eraser(img)
return img, depth
class SubPolicy(object):
def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)):
ranges = {
"shearX": np.linspace(0, 0.3, 10),
"shearY": np.linspace(0, 0.3, 10),
"translateX": np.linspace(0, 150 / 331, 10),
"translateY": np.linspace(0, 150 / 331, 10),
"rotate": np.linspace(0, 30, 10),
"color": np.linspace(0.0, 0.9, 10),
"posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int),
"solarize": np.linspace(256, 0, 10),
"contrast": np.linspace(0.0, 0.9, 10),
"sharpness": np.linspace(0.0, 0.9, 10),
"brightness": np.linspace(0.0, 0.9, 10),
"autocontrast": [0] * 10,
"equalize": [0] * 10,
"invert": [0] * 10
}
func = {
"shearX": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
Image.BICUBIC, fillcolor=fillcolor),
"shearY": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),
Image.BICUBIC, fillcolor=fillcolor),
"translateX": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0),
fillcolor=fillcolor),
"translateY": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])),
fillcolor=fillcolor),
#"rotate": lambda img, magnitude: rotate_with_fill(img, magnitude),
# "rotate": lambda img, magnitude: img.rotate(magnitude * random.choice([-1, 1])),
"rotate": lambda img, magnitude: img,
"color": lambda img, magnitude: ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1])),
"posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude),
"solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude),
"contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance(
1 + magnitude * random.choice([-1, 1])),
"sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance(
1 + magnitude * random.choice([-1, 1])),
"brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance(
1 + magnitude * random.choice([-1, 1])),
"autocontrast": lambda img, magnitude: ImageOps.autocontrast(img),
"equalize": lambda img, magnitude: ImageOps.equalize(img),
"invert": lambda img, magnitude: ImageOps.invert(img)
}
self.p1 = p1
self.operation1 = func[operation1]
self.magnitude1 = ranges[operation1][magnitude_idx1]
self.p2 = p2
self.operation2 = func[operation2]
self.magnitude2 = ranges[operation2][magnitude_idx2]
def __call__(self, img):
if random.random() < self.p1: img = self.operation1(img, self.magnitude1)
if random.random() < self.p2: img = self.operation2(img, self.magnitude2)
return img
|
{"hexsha": "7bb70961c1eb297aeb43fedf67f9d5cf3967b76e", "size": 5045, "ext": "py", "lang": "Python", "max_stars_repo_path": "augment.py", "max_stars_repo_name": "kimtaehyeong/msnnff", "max_stars_repo_head_hexsha": "75586be601bbdbfafcdf4038bc08f239e119b417", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "augment.py", "max_issues_repo_name": "kimtaehyeong/msnnff", "max_issues_repo_head_hexsha": "75586be601bbdbfafcdf4038bc08f239e119b417", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "augment.py", "max_forks_repo_name": "kimtaehyeong/msnnff", "max_forks_repo_head_hexsha": "75586be601bbdbfafcdf4038bc08f239e119b417", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-31T06:42:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T06:42:12.000Z", "avg_line_length": 43.1196581197, "max_line_length": 151, "alphanum_fraction": 0.5746283449, "include": true, "reason": "import numpy", "num_tokens": 1392}
|
\chapter{ Machine Learning}
% CHAPTER SETTINGS
\graphicspath{{./images/machine_learning/}}
\section{xx}
\subsection{Explaing bagging}
Known more formally as Bootstrapped Aggregation is where the same algorithm has different perspectives on the problem by being trained on different subsets of the training data.
\subsection{Explaing boosting}
Different algorithms are trained on the same training data.
\subsection{Explaing blending}
Known more formally as Stacked Generalization or Stacking is where a variety of models whose predictions are taken as input to a new model that learns how to combine the predictions into an overall prediction.
|
{"hexsha": "4446dfd4907c10187bbc679bf3c734559630830e", "size": 650, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "main/chapters/machine_learning.tex", "max_stars_repo_name": "romanroson/veritas", "max_stars_repo_head_hexsha": "148c1aea2369beca96bf8929af260c6634ecbe4c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main/chapters/machine_learning.tex", "max_issues_repo_name": "romanroson/veritas", "max_issues_repo_head_hexsha": "148c1aea2369beca96bf8929af260c6634ecbe4c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main/chapters/machine_learning.tex", "max_forks_repo_name": "romanroson/veritas", "max_forks_repo_head_hexsha": "148c1aea2369beca96bf8929af260c6634ecbe4c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.3333333333, "max_line_length": 209, "alphanum_fraction": 0.8169230769, "num_tokens": 132}
|
subroutine printPartProp (gsmObj, proj, targ, results, ncas, intel)
! ======================================================================
!
! Prints out a table of emitted particle properties for the first nnnp
! reactions.
! Basically not used in CEM03; except for debugging.
!
! Definition of spt:
! spt(1,k) = sin(theta.k)
! spt(2,k) = cos(theta.k)
! spt(3,k) = kinetic energy of particle k (GeV)
! spt(4,k) = charge of particle k
! spt(5,k) = rest mass of particle k
!
! Definition of parz:
! parz(1,k) = particle type #; 1-9 for n-pi+;
! nint(A + 999*Z) for residual nuclei.
! parz(2,k) = kinetic energy of particle k (GeV)
! parz(3,k) = theta of particle k
! parz(4,k) = phi of particle k
! parz(5,k) = index: <100 for cascade particle,
! negative for hole;
! = 100 for preq.; 200 for coalescence;
! = 1000 for thermal; 2000 for evap. from fission.
! fragments.
! parz(6,k) = electric charge of particle k
!
! CEM95 written by S. G. Mashnik
! Edited by A. J. Sierk LANL T-2 February, 1996.
! Edited by A. J. Sierk, LANL T-16, October, 2003.
! Edited by AJS, LANL T-2, December, 2011.
!
! ======================================================================
use, intrinsic:: iso_fortran_env, only: int32, int64, real64
use gsm_params, only: thousand, radianToDegree
implicit none
class(GSM), intent(inout) :: gsmObj
type(GSMProjectile), intent(in ) :: proj
type(GSMTarget), intent(in ) :: targ
type(GSMResults), intent(in ) :: results
integer(int64), intent(in ) :: ncas
integer(int64), intent(in ) :: intel
integer(int32) :: k
real(real64) :: aa, am, fi, pt, tet, tk, zz
! ======================================================================
write (16, 100) ncas, intel
write (16, 200) targ%numBaryons, targ%numProtons, proj%kinEnergy, results%numProgeny
write (16, 300)
do k = 1, results%numProgeny
tet = results%progenyBnk(k)%theta*radianToDegree
fi = results%progenyBnk(k)%phi*radianToDegree
tk = results%progenyBnk(k)%kinEnergy*thousand
am = results%progenyBnk(k)%restMass*thousand
zz = results%progenyBnk(k)%numProtons
pt = results%progenyBnk(k)%typeID
aa = results%progenyBnk(k)%numBaryons
if (aa > 4.d0 .or. pt > 10.d0) then
aa = results%progenyBnk(k)%typeID - 999.d0*zz
am = aa
pt = aa
endif
write (16, 400) pt, zz, tk, tet, fi, results%progenyBnk(k)%prodMech, &
& results%progenyBnk(k)%sinTheta, results%progenyBnk(k)%cosTheta, am
end do
return
! ======================================================================
100 format (/1x,'ncas =',i7,', intel =',i7)
200 format (1x,'After the cascade, ', &
& 'At = ',f4.0,', Zt =',f4.0,', Ut =',f7.2,', ktot =',i3)
300 format (2x,'Par Q',4x,'T(MeV)',1x,'theta',3x,'phi',3x, &
& 'ngen',5x,'st',6x,'ct',5x,'M(MeV)')
400 format (1x,f4.0,1x,f3.0,1x,f7.2,2x,f5.1,2x,f5.1,2x,f5.0,2x,f6.4, &
& 2x,f6.4,1x,f7.1)
! ======================================================================
end subroutine printPartProp
|
{"hexsha": "93de109ccddc214e6ea6d8f0741d543c24690ab7", "size": 3539, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/GeneralizedSpallation/printPartProp.f90", "max_stars_repo_name": "lanl/generalized-spallation-model", "max_stars_repo_head_hexsha": "4a2f01a873d2e8f2304b8fd1474d43d1ce8d744d", "max_stars_repo_licenses": ["Intel", "Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-24T18:05:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-24T18:05:48.000Z", "max_issues_repo_path": "src/GeneralizedSpallation/printPartProp.f90", "max_issues_repo_name": "lanl/generalized-spallation-model", "max_issues_repo_head_hexsha": "4a2f01a873d2e8f2304b8fd1474d43d1ce8d744d", "max_issues_repo_licenses": ["Intel", "Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/GeneralizedSpallation/printPartProp.f90", "max_forks_repo_name": "lanl/generalized-spallation-model", "max_forks_repo_head_hexsha": "4a2f01a873d2e8f2304b8fd1474d43d1ce8d744d", "max_forks_repo_licenses": ["Intel", "Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.130952381, "max_line_length": 88, "alphanum_fraction": 0.486012998, "num_tokens": 1069}
|
import asammdf
import pandas as pd
from scipy import io
import time
import argparse
def parse_arguments():
"""
Parse commandline arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument("--input_file", type=str, help="Path to MF4 file")
parser.add_argument("--output_file", default="output.mat", type=str, help="Path to save output .mat file")
return parser.parse_args()
def main():
args = parse_arguments()
load_file = args.input_file
save_file = args.output_file
start_time = time.time()
mdf = asammdf.MDF(load_file)
df = mdf.to_dataframe()
column_names = list(df.columns)
column_names
new_list = []
for item in column_names:
string_item = str(item)
new_string = string_item.replace(".", "_")
new_list.append(new_string)
df.columns = new_list
io.savemat(save_file, {name: col.values for name, col in df.items()}, format='5')
print("--- %s seconds ---" % (time.time() - start_time))
if __name__ == "__main__":
main()
|
{"hexsha": "85019e6f3a5f5d60847e7b71b543f205dd94ae11", "size": 1047, "ext": "py", "lang": "Python", "max_stars_repo_path": "mdfconverter2.py", "max_stars_repo_name": "chriswernette/MFDConverter", "max_stars_repo_head_hexsha": "bffe2cb4b09dbbaa22d92f1ecd6c6df48d066f78", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-10T15:00:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-10T15:00:59.000Z", "max_issues_repo_path": "mdfconverter2.py", "max_issues_repo_name": "chriswernette/MDFConverter", "max_issues_repo_head_hexsha": "bffe2cb4b09dbbaa22d92f1ecd6c6df48d066f78", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mdfconverter2.py", "max_forks_repo_name": "chriswernette/MDFConverter", "max_forks_repo_head_hexsha": "bffe2cb4b09dbbaa22d92f1ecd6c6df48d066f78", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5526315789, "max_line_length": 110, "alphanum_fraction": 0.6571155683, "include": true, "reason": "from scipy", "num_tokens": 253}
|
[STATEMENT]
theorem aodv_loop_freedom:
assumes "wf_net_tree n"
shows "closed (pnet (\<lambda>i. paodv i \<langle>\<langle> qmsg) n) \<TTurnstile> netglobal (\<lambda>\<sigma>. \<forall>dip. irrefl ((rt_graph \<sigma> dip)\<^sup>+))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. closed (pnet (\<lambda>i. paodv i \<langle>\<langle> qmsg) n) \<TTurnstile> netglobal (\<lambda>\<sigma>. \<forall>dip. irrefl ((rt_graph \<sigma> dip)\<^sup>+))
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
wf_net_tree n
goal (1 subgoal):
1. closed (pnet (\<lambda>i. paodv i \<langle>\<langle> qmsg) n) \<TTurnstile> netglobal (\<lambda>\<sigma>. \<forall>dip. irrefl ((rt_graph \<sigma> dip)\<^sup>+))
[PROOF STEP]
by (rule aodv_openproc_par_qmsg.netglobal_weakenE
[OF net_nhop_quality_increases inv_to_loop_freedom])
|
{"llama_tokens": 340, "file": "AODV_variants_b_fwdrreps_B_Aodv_Loop_Freedom", "length": 2}
|
# This file is part of the bapsflib package, a Python toolkit for the
# BaPSF group at UCLA.
#
# http://plasma.physics.ucla.edu/
#
# Copyright 2017-2018 Erik T. Everson and contributors
#
# License: Standard 3-clause BSD; see "LICENSES/LICENSE.txt" for full
# license terms and contributor agreement.
#
"""Module for the template control mappers."""
__all__ = ["HDFMapControlTemplate", "HDFMapControlCLTemplate"]
import h5py
import numpy as np
import os
from abc import ABC, abstractmethod
from typing import Iterable, List, Union
from warnings import warn
from .parsers import CLParse
from .types import ConType
class HDFMapControlTemplate(ABC):
# noinspection PySingleQuotedDocstring
'''
Template class for all control mapping classes to inherit from.
Any inheriting class should define :code:`__init__` as::
def __init__(self, group: h5py.Group):
"""
:param group: HDF5 group object
"""
# initialize
HDFMapControlTemplate.__init__(self, group)
# define control type
self.info['contype'] = ConType.motion
# populate self.configs
self._build_configs()
.. note::
* Any method that raises a :exc:`NotImplementedError` is
intended to be overwritten by the inheriting class.
* :code:`from bapsflib._hdf.maps.controls import ConType`
* If a control device is structured around a
:ibf:`command list`, then its mapping class should subclass
:class:`~.templates.HDFMapControlCLTemplate`.
Which is a subclass of
:class:`~.templates.HDFMapControlTemplate`,
but adds methods for parsing/handling a command list.
'''
def __init__(self, group: h5py.Group):
"""
:param group: the control device HDF5 group object
"""
# condition group arg
if isinstance(group, h5py.Group):
self._control_group = group
else:
raise TypeError("arg `group` is not of type h5py.Group")
# define _info attribute
self._info = {
"group name": os.path.basename(group.name),
"group path": group.name,
"contype": NotImplemented,
}
# initialize configuration dictionary
self._configs = {}
@property
def configs(self) -> dict:
"""
Dictionary containing all the relevant mapping information to
translate the HDF5 data into a numpy array by
:class:`~bapsflib._hdf.utils.hdfreadcontrols.HDFReadControls`.
**-- Constructing** :code:`configs` **--**
The :code:`configs` dict is a nested dictionary where the first
level of keys represents the control device configuration names.
Each configuration corresponds to one dataset in the HDF5
control group and represents a grouping of state values
associated with an probe or instrument used during an
experiment.
Each configuration is a dictionary consisting of a set of
required keys (:code:`'dset paths'`, :code:`'shotnum'`, and
:code:`'state values'`) and optional keys. Any optional key is
considered as meta-info for the device and is added to the
:attr:`~bapsflib._hdf.utils.hdfreadcontrols.HDFReadControls.info`
dictionary when the numpy array is constructed. The required
keys constitute the mapping for constructing the numpy array
and are explained in the table below.
.. csv-table:: Dictionary breakdown for
:code:`config = configs['config name']`
:header: "Key", "Description"
:widths: 20, 60
"::
config['dset paths']
", "
Internal HDF5 path to the dataset associated with the
control device configuration. For example, ::
config['dset paths'] = ('/foo/bar/Control/d1', )
"
"::
config['shotnum']
", "
Defines how the run shot numbers are stored in the HDF5
file, which are mapped to the :code:`'shotnum'` field of the
constructed numpy array. Should look like, ::
config['shotnum'] = {
'dset paths': config['dset paths'],
'dset field': ('Shot number',),
'shape': (),
'dtype': numpy.int32,
}
where :code:`'dset paths'` is the internal HDF5 path to the
dataset, :code:`'dset field'` is the field name of the
dataset containing shot numbers, :code:`'shape'` is the
numpy shape of the shot number data, and :code:`'dtype'`
is the numpy :code:`dtype` of the data. This all defines
the numpy :code:`dtype` of the :code:`'shotnum'` field in
the
:class:`~bapsflib._hdf.utils.hdfreadcontrols.HDFReadControls`
constructed numpy array.
"
"::
config['state values']
", "
This is another dictionary defining :code:`'state values`.
For example, ::
config['state values'] = {
'xyz': {
'dset paths': config['dset paths'],
'dset field': ('x', 'y', 'z'),
'shape': (3,),
'dtype': numpy.float32}
}
will tell
:class:`~bapsflib._hdf.utils.hdfreadcontrols.HDFReadControls`
to construct a numpy array with a the :code:`'xyz'` field.
This field would be a 3-element array of
:code:`numpy.float32`, where the :code:`'x'` field of the
HDF5 dataset is mapped to the 1st index, :code:`'y'` is
mapped to the 2nd index, and :code:`'z'` is mapped to the
3rd index.
**Note:**
* A state value field (key) can not be defined as
:code:`'signal'` since this field is reserved for
digitizer data constructed by
:class:`~bapsflib._hdf.utils.hdfreaddata.HDFReadData`.
* If state value data represents probe position data, then
it should be given the field name (key) :code:`'xyz'`
(like in the example above).
"
If a control device saves data around the concept of a
:ibf:`command list`, then :code:`configs` has a few additional
required keys, see table below.
.. csv-table:: Additional required keys for
:code:`config = configs['config name']` when
the control device saves data around the concept
of a :ibf:`command list`.
:header: "Key", "Description"
:widths: 20, 60
"::
config['command list']
", "
A tuple representing the original **command list**.
For example, ::
config['command list'] = ('VOLT: 20.0',
'VOLT 25.0',
'VOLT 30.0')
"
"::
config['state values']
", "
Has all the same keys as before, plus the addition of
:code:`'command list'`, :code:`'cl str`,
and :code:`'re pattern'`.
For example, ::
config['state values'] = {
'command': {
'dset paths': config['dset paths'],
'dset field': ('Command index',),
'shape': (),
'dtype': numpy.float32,
'command list': (20.0, 25.0, 30.0),
'cl str': ('VOLT: 20.0', 'VOLT 25.0',
'VOLT 30.0'),
're pattern': re.compile(r'some RE pattern')}
}
where :code:`'re pattern'` is the compiled RE pattern used
to parse the original **command list**, :code:`'cl str'` is
the matched string segment of the **command list**, and
:code:`'command list'` is the set of values that will
populate the constructed numpy array.
"
.. note::
For further details, look to :ref:`add_control_mod`.
"""
return self._configs
@property
def contype(self) -> ConType:
"""control device type"""
return self._info["contype"]
@property
def dataset_names(self) -> List[str]:
"""list of names of the HDF5 datasets in the control group"""
dnames = [
name for name in self.group if isinstance(self.group[name], h5py.Dataset)
]
return dnames
@property
def group(self) -> h5py.Group:
"""Instance of the HDF5 Control Device group"""
return self._control_group
@property
def has_command_list(self) -> bool:
"""
:return: :code:`True` if dataset utilizes a command list
"""
has_cl = False
for config_name in self._configs:
if "command list" in self._configs[config_name]:
has_cl = True
break
return has_cl
@property
def info(self) -> dict:
"""
Control device dictionary of meta-info. For example, ::
info = {
'group name': 'Control',
'group path': '/foo/bar/Control',
'contype': 'motion',
}
"""
return self._info
@property
def one_config_per_dset(self) -> bool:
"""
:code:`'True'` if each control configuration has its own dataset
"""
n_dset = len(self.dataset_names)
n_configs = len(self._configs)
return True if n_dset == n_configs else False
@property
def subgroup_names(self) -> List[str]:
"""list of names of the HDF5 sub-groups in the control group"""
sgroup_names = [
name for name in self.group if isinstance(self.group[name], h5py.Group)
]
return sgroup_names
@property
def device_name(self) -> str:
"""Name of Control device"""
return self._info["group name"]
@abstractmethod
def construct_dataset_name(self, *args) -> str:
"""
Constructs the dataset name corresponding to the input
arguments.
:return: name of dataset
:raise: :exc:`NotImplementedError`
"""
raise NotImplementedError
@abstractmethod
def _build_configs(self):
"""
Gathers the necessary metadata and fills :data:`configs`.
:raise: :exc:`NotImplementedError`
"""
raise NotImplementedError
class HDFMapControlCLTemplate(HDFMapControlTemplate):
# noinspection PySingleQuotedDocstring
'''
A modified :class:`HDFMapControlTemplate` template class for
mapping control devices that record around the concept of a
:ibf:`command list`.
Any inheriting class should define :code:`__init__` as::
def __init__(self, group: h5py.Group):
"""
:param group: HDF5 group object
"""
# initialize
HDFMapControlCLTemplate.__init__(self, control_group)
# define control type
self.info['contype'] = ConType.waveform
# define known command list RE patterns
self._default_re_patterns = (
r'(?P<FREQ>(\bFREQ\s)(?P<VAL>(\d+\.\d*|\.\d+|\d+\b)))',
)
# populate self.configs
self._build_configs()
.. note::
* Any method that raises a :exc:`NotImplementedError` is
intended to be overwritten by the inheriting class.
* :code:`from bapsflib._hdf.maps.controls import ConType`
'''
def __init__(self, group: h5py.Group):
"""
:param group: the control device HDF5 group object
"""
HDFMapControlTemplate.__init__(self, group)
# initialize internal 'command list' regular expression (RE)
# patterns
self._default_re_patterns = ()
"""tuple of default RE patterns for the control device"""
@abstractmethod
def _default_state_values_dict(self, config_name: str) -> dict:
"""
Returns the default :code:`'state values'` dictionary for
configuration *config_name*.
:param str config_name: configuration name
:raise: :exc:`NotImplementedError`
:Example:
.. code-block:: python
# define default dict
default_dict = {
'command': {
'dset paths':
self._configs[config_name]['dese paths'],
'dset field': ('Command index', ),
're pattern': None,
'command list':
self._configs[config_name]['command list'],
'cl str':
self._configs[config_name]['command list'],
'shape': (),
}
}
default_dict['command']['dtype'] = \\
default_dict['command']['command list'].dtype
# return
return default_dict
"""
raise NotImplementedError
def _construct_state_values_dict(
self, config_name: str, patterns: Union[str, Iterable[str]]
) -> dict:
"""
Returns a dictionary for
:code:`configs[config_name]['state values']` based on the
supplied RE patterns. :code:`None` is returned if the
construction failed.
:param config_name: configuration name
:param patterns: list of RE pattern strings
"""
# -- check requirements exist before continuing ----
# get dataset
dset_path = self._configs[config_name]["dset paths"][0]
dset = self.group.get(dset_path)
# ensure 'Command index' is a field
if "Command index" not in dset.dtype.names:
warn(f"Dataset '{dset_path}' does NOT have 'Command index' field")
return {}
# ensure 'Command index' is a field of scalars
if dset.dtype["Command index"].shape != () or not np.issubdtype(
dset.dtype["Command index"].type, np.integer
):
warn(
f"Dataset '{dset_path}' 'Command index' field is NOT a column of integers"
)
return {}
# -- apply RE patterns to 'command list' ----
success, sv_dict = self.clparse(config_name).apply_patterns(patterns)
# regex was unsuccessful, return alt_dict
if not success:
return {}
# -- complete `sv_dict` before return ----
# 1. 'command list' and 'cl str' are tuples from clparse
# 2. add 'dset paths'
# 3. add 'dset field'
# 4. add 'shape'
# 5. 'dtype' defined by clparse.apply_patterns
#
for state in sv_dict:
# add additional keys
sv_dict[state]["dset paths"] = self._configs[config_name]["dset paths"]
sv_dict[state]["dset field"] = ("Command index",)
sv_dict[state]["shape"] = ()
# return
return sv_dict
def clparse(self, config_name: str) -> CLParse:
"""
Return instance of
:class:`~bapsflib.lapd.controls.parsers.CLParse`
for `config_name`.
:param str config_name: configuration name
"""
# retrieve command list
cl = self._configs[config_name]["command list"]
# define clparse and return
return CLParse(cl)
def reset_state_values_config(self, config_name: str, apply_patterns=False):
"""
Reset the :code:`configs[config_name]['state values']`
dictionary.
:param config_name: configuration name
:param bool apply_patterns: Set :code:`False` (DEFAULT) to
reset to :code:`_default_state_values_dict(config_name)`.
Set :code:`True` to rebuild dict using
:attr:`_default_re_patterns`.
"""
if apply_patterns:
# get sv_dict dict
sv_dict = self._construct_state_values_dict(
config_name, self._default_re_patterns
)
if not bool(sv_dict):
sv_dict = self._default_state_values_dict(config_name)
else:
# get default dict
sv_dict = self._default_state_values_dict(config_name)
# reset config
self._configs[config_name]["state values"] = sv_dict
def set_state_values_config(
self, config_name: str, patterns: Union[str, Iterable[str]]
):
"""
Rebuild and set
:code:`configs[config_name]['state values']` based on the
supplied RE *patterns*.
:param config_name: configuration name
:param patterns: list of RE strings
"""
# construct dict for 'state values' dict
sv_dict = self._construct_state_values_dict(config_name, patterns)
# update 'state values' dict
if not bool(sv_dict):
# do nothing since default parsing was unsuccessful
warn("RE parsing of 'command list' was unsuccessful, doing nothing")
else:
self._configs[config_name]["state values"] = sv_dict
|
{"hexsha": "a7a0ad5cfd0d903da1afbafe1e56ebf92ad86adf", "size": 17703, "ext": "py", "lang": "Python", "max_stars_repo_path": "bapsflib/_hdf/maps/controls/templates.py", "max_stars_repo_name": "BaPSF/bapsflib", "max_stars_repo_head_hexsha": "999c88f813d3a7c5c244a77873850c5c5a4042b8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2018-07-05T21:37:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-05T00:41:52.000Z", "max_issues_repo_path": "bapsflib/_hdf/maps/controls/templates.py", "max_issues_repo_name": "BaPSF/bapsflib", "max_issues_repo_head_hexsha": "999c88f813d3a7c5c244a77873850c5c5a4042b8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 54, "max_issues_repo_issues_event_min_datetime": "2018-08-19T00:28:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T17:16:22.000Z", "max_forks_repo_path": "bapsflib/_hdf/maps/controls/templates.py", "max_forks_repo_name": "rocco8773/bapsflib", "max_forks_repo_head_hexsha": "999c88f813d3a7c5c244a77873850c5c5a4042b8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2018-08-18T00:16:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T00:06:33.000Z", "avg_line_length": 34.2417794971, "max_line_length": 90, "alphanum_fraction": 0.5534090267, "include": true, "reason": "import numpy", "num_tokens": 3887}
|
root = joinpath(@__DIR__, "..")
using Pkg; Pkg.activate(root)
src = joinpath(root, "src")
out = joinpath(root, "notebooks")
using Literate
mkpath(out)
for f in ["Project.toml", "Manifest.toml"]
cp(joinpath(root, f), joinpath(out, f), force = true)
end
function preprocess(s)
s = "using Pkg; Pkg.activate(\".\"); Pkg.instantiate()\n#-\n" * s
end
for f in ["utils.jl"]
cp(joinpath(src, f), joinpath(out, f), force = true)
end
for f in ["intro.jl", "backandforth.jl", "forward.jl", "tracing.jl", "reverse.jl"]
Literate.notebook(joinpath(src, f), out,
preprocess = preprocess,
credit = false)
end
|
{"hexsha": "c906d803b6a19705a2a1a59337a4b4a29d35a109", "size": 648, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/notebooks.jl", "max_stars_repo_name": "dhairyagandhi96/diff-zoo", "max_stars_repo_head_hexsha": "20c7d03f2900253880d2c5fd288ed36bb04aa783", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 760, "max_stars_repo_stars_event_min_datetime": "2018-11-05T14:50:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T09:45:22.000Z", "max_issues_repo_path": "src/notebooks.jl", "max_issues_repo_name": "dhairyagandhi96/diff-zoo", "max_issues_repo_head_hexsha": "20c7d03f2900253880d2c5fd288ed36bb04aa783", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2019-02-26T15:26:25.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-12T00:58:22.000Z", "max_forks_repo_path": "src/notebooks.jl", "max_forks_repo_name": "dhairyagandhi96/diff-zoo", "max_forks_repo_head_hexsha": "20c7d03f2900253880d2c5fd288ed36bb04aa783", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 66, "max_forks_repo_forks_event_min_datetime": "2019-01-30T18:50:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T17:55:57.000Z", "avg_line_length": 23.1428571429, "max_line_length": 82, "alphanum_fraction": 0.6219135802, "num_tokens": 196}
|
# try modis
# https://e4ftl01.cr.usgs.gov/MOLT/MOD13C1.006/2000.06.09/
# http://hdfeos.org/zoo/NSIDC/MOD10C1_Day_CMG_Snow_Cover.py
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import numpy as np
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import re
from mpl_toolkits.basemap import Basemap
FILE_NAME = 'MOD13C1.A2000161.006.2015147153014.hdf'
from pyhdf.SD import SD, SDC
hdf = SD(FILE_NAME, SDC.READ)
print(hdf.info())
datasets_dic = hdf.datasets()
for idx,sds in enumerate(datasets_dic.keys()):
print(idx,sds)
# Read dataset.
data2D = hdf.select('CMG 0.05 Deg 16 days NDVI')
data = data2D[:,:].astype(np.float64)
# Read global attribute.
fattrs = hdf.attributes(full=1)
ga = fattrs["StructMetadata.0"]
gridmeta = ga[0]
# Read projection parameters.
# The needed information is in a global attribute called 'StructMetadata.0'.
# Use regular expressions to tease out the extents of the grid.
ul_regex = re.compile(r'''UpperLeftPointMtrs=\(
(?P<upper_left_x>[+-]?\d+\.\d+)
,
(?P<upper_left_y>[+-]?\d+\.\d+)
\)''', re.VERBOSE)
match = ul_regex.search(gridmeta)
x0 = np.float(match.group('upper_left_x')) / 1e6
y0 = np.float(match.group('upper_left_y')) / 1e6
lr_regex = re.compile(r'''LowerRightMtrs=\(
(?P<lower_right_x>[+-]?\d+\.\d+)
,
(?P<lower_right_y>[+-]?\d+\.\d+)
\)''', re.VERBOSE)
match = lr_regex.search(gridmeta)
x1 = np.float(match.group('lower_right_x')) / 1e6
y1 = np.float(match.group('lower_right_y')) / 1e6
ny, nx = data.shape
xinc = (x1 - x0) / nx
yinc = (y1 - y0) / ny
# Construct the grid. It's already in lat/lon.
x = np.linspace(x0, x0 + xinc*nx, nx)
y = np.linspace(y0, y0 + yinc*ny, ny)
lon, lat = np.meshgrid(x, y)
# Retrieve attributes.
attrs = data2D.attributes(full=1)
lna=attrs["long_name"]
long_name = lna[0]
aoa=attrs["add_offset"]
add_offset = aoa[0]
fva=attrs["_FillValue"]
_FillValue = fva[0]
sfa=attrs["scale_factor"]
scale_factor = sfa[0]
ua=attrs["units"]
units = ua[0]
fig = plt.figure(dpi=1000)
m = Basemap(projection='cyl', resolution='l',
llcrnrlat=-90, urcrnrlat = 90,
llcrnrlon=-180, urcrnrlon = 180)
#m.arcgisimage(service='World_Shaded_Relief', xpixels = 1500, verbose= True)
#m.drawcoastlines(linewidth=0.1)
#m.drawparallels(np.arange(-90., 120., 30.), labels=[1, 0, 0, 0])
#m.drawmeridians(np.arange(-180, 180., 45.), labels=[0, 0, 0, 1])
# Render the image in the projected coordinate system.
m.pcolormesh(lon[::2,::2], lat[::2,::2], data[::2,::2],
latlon=True, cmap='YlOrRd')
basename = os.path.basename(FILE_NAME)
plt.title('{0}\n{1}'.format(basename, long_name))
fig = plt.gcf()
# plt.show()
pngfile = "{0}.py.png".format(basename)
fig.savefig(pngfile)
|
{"hexsha": "e28e664eb7704bacad9a885dda9a31c3298d6b14", "size": 2961, "ext": "py", "lang": "Python", "max_stars_repo_path": "gis/hdf_tests/modis_test.py", "max_stars_repo_name": "natelowry/data_visualization", "max_stars_repo_head_hexsha": "8d01b6ae5337ff5c7a4eda59e657a53d19af5f32", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gis/hdf_tests/modis_test.py", "max_issues_repo_name": "natelowry/data_visualization", "max_issues_repo_head_hexsha": "8d01b6ae5337ff5c7a4eda59e657a53d19af5f32", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gis/hdf_tests/modis_test.py", "max_forks_repo_name": "natelowry/data_visualization", "max_forks_repo_head_hexsha": "8d01b6ae5337ff5c7a4eda59e657a53d19af5f32", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-08T03:20:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-08T03:20:45.000Z", "avg_line_length": 28.7475728155, "max_line_length": 78, "alphanum_fraction": 0.6403242148, "include": true, "reason": "import numpy", "num_tokens": 952}
|
(* -------------------------------------------------------------------- *)
(* ------- *) Require Import Setoid Morphisms.
From mathcomp Require Import all_ssreflect all_algebra.
From mathcomp.analysis Require Import boolp reals realseq realsum distr.
From xhl.pwhile Require Import notations inhabited pwhile psemantic passn range.
Set Implicit Arguments.
Unset Strict Implicit.
Unset Printing Implicit Defensive.
Unset SsrOldRewriteGoalsOrder.
Import GRing.Theory Num.Theory Order.Theory.
Local Open Scope ring_scope.
Local Open Scope syn_scope.
Local Open Scope sem_scope.
Local Open Scope mem_scope.
(* -------------------------------------------------------------------- *)
Section Couplings.
Context {A B : choiceType} (μ1 : Distr A) (μ2 : Distr B).
Definition iscoupling (ν : Distr (A * B)) :=
dfst ν = μ1 /\ dsnd ν = μ2.
End Couplings.
(* -------------------------------------------------------------------- *)
Section CouplingsTheory.
Context {A B C D : choiceType}.
Lemma iscoupling_eq (μ1 μ2 μ1' μ2' : Distr _) (ν : Distr (A * B)) :
μ1 =1 μ1' -> μ2 =1 μ2' -> iscoupling μ1 μ2 ν -> iscoupling μ1' μ2' ν.
Proof. by do 2! move=> /distr_eqP->. Qed.
Lemma iscoupling_prod (μ : Distr (A * B)) :
iscoupling (dfst μ) (dsnd μ) μ.
Proof. by []. Qed.
Lemma iscoupling_dnull : @iscoupling A B dnull dnull dnull.
Proof. by split; rewrite dmarginE dlet_null. Qed.
Lemma iscoupling_dunit a b :
@iscoupling A B (dunit a) (dunit b) (dunit (a, b)).
Proof. by split; rewrite dmarginE dlet_unit. Qed.
Lemma iscoupling_swap (μ1 μ2 : Distr A) (ν : Distr (A * A)) :
iscoupling μ1 μ2 ν -> iscoupling μ2 μ1 (dswap ν).
Proof.
case=> <- <-; split; apply/distr_eqP => m;
by rewrite (dfst_dswap, dsnd_dswap).
Qed.
Lemma iscoupling_dlet
(μ1 μ2 : Distr _) (ν : Distr (A * B))
(θ1 θ2 : _ -> Distr _) (ν' : _ -> Distr (C * D)) :
iscoupling μ1 μ2 ν
-> (forall x, x \in dinsupp ν ->
iscoupling (θ1 x.1) (θ2 x.2) (ν' x))
-> iscoupling
(\dlet_(x <- μ1) (θ1 x))
(\dlet_(x <- μ2) (θ2 x))
(\dlet_(x <- ν ) (ν' x)).
Proof.
move=> [eq1 eq2] hC; split; rewrite !dmargin_dlet; subst μ1 μ2.
+ by rewrite dlet_dmargin; apply/eq_in_dlet => // x /hC [<- _].
+ by rewrite dlet_dmargin; apply/eq_in_dlet => // x /hC [_ <-].
Qed.
Lemma iscoupling_dlim
(μ1 μ2 : nat -> Distr _) (ν : nat -> Distr (A * B)) :
(forall n, iscoupling (μ1 n) (μ2 n) (ν n))
-> (forall n m, (n <= m)%N -> ν n <=1 ν m)
-> iscoupling (dlim μ1) (dlim μ2) (dlim ν).
Proof.
move=> hC mono; rewrite /iscoupling !dmarginE !dlet_lim //.
by split; apply/eq_dlim => n; case: (hC n).
Qed.
End CouplingsTheory.
(* -------------------------------------------------------------------- *)
Implicit Types P Q S I : rassn.
Implicit Types c : cmd.
(* -------------------------------------------------------------------- *)
Definition prhl P c1 c2 Q :=
forall m : rmem, P m ->
exists2 ν,
iscoupling (ssem c1 m.1) (ssem c2 m.2) ν
& range Q ν.
(* -------------------------------------------------------------------- *)
Lemma prhlw P c1 c2 Q m :
prhl P c1 c2 Q -> P m ->
{ ν | iscoupling (ssem c1 m.1) (ssem c2 m.2) ν & range Q ν }.
Proof. move=> h Pm.
have: exists ν, iscoupling (ssem c1 m.1) (ssem c2 m.2) ν /\ range Q ν.
+ by case: (h _ Pm) => ν h1 h2; exists ν; split.
by case/cid=> ν [h1 h2]; exists ν.
Qed.
(* -------------------------------------------------------------------- *)
Lemma prhl_sem P c1 c2 c'1 c'2 Q :
(forall m, P m -> ssem c1 m.1 = ssem c'1 m.1)
-> (forall m, P m -> ssem c2 m.2 = ssem c'2 m.2)
-> prhl P c1 c2 Q
-> prhl P c'1 c'2 Q.
Proof.
move=> eq1 eq2 h m Pm; case: (h _ Pm) => [ν hC hR].
by exists ν => //; rewrite -!(eq1, eq2).
Qed.
(* -------------------------------------------------------------------- *)
Lemma prhl_conseq P P' c1 c2 Q Q' :
(forall m, P' m -> P m)
-> (forall m, Q m -> Q' m)
-> prhl P c1 c2 Q
-> prhl P' c1 c2 Q'.
Proof.
move=> hP hQ h m /hP /h [ν hC hR]; exists ν => //.
by apply/range_weaken/hQ: hR.
Qed.
(* -------------------------------------------------------------------- *)
Lemma prhl_swap P c1 c2 Q :
prhl P c2 c1 Q <-> prhl (pswap P) c1 c2 (pswap Q).
Proof.
move: P Q c1 c2 => [:hG] P Q c1 c2; split; last first.
+ move: P Q c1 c2; abstract: hG => P Q c1 c2 h -[m1 m2] Pm.
case: (h (m2, m1))=> //= ν [hC1 hC2] hR; exists (dswap ν) => /=.
* by apply/iscoupling_swap.
* by move/range_pswap: hR; apply/range_weaken; case.
+ by move=> h; apply/hG; apply/prhl_conseq: h; case.
Qed.
(* -------------------------------------------------------------------- *)
Lemma prhl_lepr P c1 c2 (E1 E2 : assn) m:
P m
-> prhl P c1 c2 [pred m | E1 m.1 ==> E2 m.2]
-> \P_[ssem c1 m.1] E1 <= \P_[ssem c2 m.2] E2.
Proof.
case: m => /= m1 m2 Pm h; case/h: Pm => {h} /= ν [<- <-] h.
by rewrite !pr_dmargin le_in_pr //= => m /h /implyP.
Qed.
(* -------------------------------------------------------------------- *)
Lemma prhl_eqpr P c1 c2 (E1 E2 : assn) m:
P m
-> prhl P c1 c2 [pred m | E1 m.1 == E2 m.2]
-> \P_[ssem c1 m.1] E1 = \P_[ssem c2 m.2] E2.
Proof.
case: m => [m1 m2] Pm h; rewrite (rwP eqP) eq_le (@prhl_lepr P) //=.
+ apply/prhl_conseq: h => // {m1 m2 Pm} -[m1 m2] /=.
by move/eqP=> ->; apply/implyP.
apply/(prhl_lepr (P := pswap P) (m := (m2, m1))) => //.
move/prhl_swap: h; apply/prhl_conseq=> // {m1 m2 Pm} -[m1 m2] /=.
by move/eqP=> ->; apply/implyP.
Qed.
(* -------------------------------------------------------------------- *)
Lemma prhl_exfalso c1 c2 Q : prhl pred0 c1 c2 Q.
Proof. by []. Qed.
(* -------------------------------------------------------------------- *)
Lemma prhl_abort P c1 c2 Q : prhl P abort abort Q.
Proof.
move=> m _; exists dnull; last by apply/range_dnull.
by rewrite !ssemE; split; rewrite dmarginE dlet_null.
Qed.
(* -------------------------------------------------------------------- *)
Lemma prhl_case P A c1 c2 Q :
prhl (P /\ A)%A c1 c2 Q
-> prhl (P /\ ~ A)%A c1 c2 Q
-> prhl P c1 c2 Q.
Proof.
move=> hA hNA m Pm; case/boolP: (A m) => [Am | NAm].
+ by apply/hA; rewrite -(rwP andP).
+ by apply/hNA; rewrite -(rwP andP).
Qed.
(* -------------------------------------------------------------------- *)
Lemma prhl_skip P : prhl P skip skip P.
Proof.
move=> m Pm; exists (dunit m); last by apply/range_dunit.
by rewrite !ssemE -!dmargin_dunit; apply/iscoupling_prod.
Qed.
(* -------------------------------------------------------------------- *)
Lemma prhl_assignL {t : ihbType} (x : vars t) (e : expr t) Q :
prhl [pred m : rmem | Q m.[~1 x <- `[{ e }] m.1]] (x <<- e) skip Q.
Proof.
move=> m /= Qmxe; exists (dunit (m.[~1 x <- `[{ e }] m.1])); last first.
+ by apply/range_dunit.
rewrite !ssemE; apply/(iscoupling_eq _ _ (iscoupling_prod _)).
+ by apply/distr_eqP; rewrite dmargin_dunit -/(mselect '1 _) mselect_mset.
+ by apply/distr_eqP; rewrite dmargin_dunit -/(mselect '2 _) mselect_mset.
Qed.
(* -------------------------------------------------------------------- *)
Lemma prhl_rndL {t : ihbType} P (x : vars t) (d : dexpr t) Q :
P =1 [pred m : rmem
| dweight (`[{ d }] m.1) == 1
& `[< range [pred v | Q m.[~1 x <- v]] (`[{ d }] m.1) >]]
-> prhl P (x <$- d) skip Q.
Proof.
move=> PE -[m1 m2] /=; rewrite {}PE => /andP[/= /eqP wgt1] /asboolP hrg.
rewrite !ssemE; set μ := `[{ d }] m1.
pose ν := \dlet_(v <- μ) dunit (m1.[x <- v], m2); exists ν.
+ apply/(iscoupling_eq _ _ (iscoupling_prod _)).
* apply/distr_eqP; rewrite dmargin_dlet; apply/eq_in_dlet=> //=.
by move=> v _; rewrite dmargin_dunit.
* move=> m; rewrite dmargin_dlet -[RHS]mul1r -wgt1 -dletC.
apply/distr_eqP: m; apply/eq_in_dlet => // v _.
by rewrite dmargin_dunit.
+ case=> m'1 m'2 /dinsupp_dlet[v vμ]; rewrite dunit1E.
rewrite pnatr_eq0 eqb0 negbK; case/eqP=> <- <-.
by move/(_ v vμ): hrg => /= {vμ}; case: {ν} x v.
Qed.
(* -------------------------------------------------------------------- *)
Lemma prhl_if P e1 e2 c1 c'1 c2 c'2 Q :
prhl (P /\ `[{ e1#'1 && e2#'2 }])%A c1 c2 Q
-> prhl (P /\ `[{ ~~ e1#'1 && ~~ e2#'2 }])%A c'1 c'2 Q
-> prhl (P /\ `[{ e1#'1 =b e2#'2 }])%A
(If e1 then c1 else c'1)
(If e2 then c2 else c'2)
Q.
Proof.
move=> h1 h2 m /andP[/= Pm /eqP]; rewrite !ssemE => eqe.
rewrite -eqe; case: ifPn => hc.
+ by apply/h1 => /=; rewrite Pm !ssemE -eqe hc.
+ by apply/h2 => /=; rewrite Pm !ssemE -eqe hc.
Qed.
(* -------------------------------------------------------------------- *)
Lemma prhl_ifL P e c1 c2 c Q :
prhl (P /\ `[{ e#'1 }])%A c1 c Q
-> prhl (P /\ `[{ ~~ e#'1 }])%A c2 c Q
-> prhl P (If e then c1 else c2) c Q.
Proof.
move=> h1 h2 m Pm; rewrite !ssemE; case: ifPn => he.
+ by apply/h1 => /=; rewrite ssemE Pm.
+ by apply/h2 => /=; rewrite ssemE Pm.
Qed.
(* -------------------------------------------------------------------- *)
Lemma prhl_seq R P c1 c1' c2 c2' Q :
prhl P c1 c2 R
-> prhl R c1' c2' Q
-> prhl P (c1 ;; c1') (c2 ;; c2') Q.
Proof.
move=> h1 h2 m Pm; case: (h1 _ Pm) => ν hC hR.
pose ν' m :=
if @idP (m \in dinsupp ν) is ReflectT Rm then
tag (prhlw h2 (hR _ Rm))
else dnull.
exists (\dlet_(m <- ν) ν' m); last first.
+ apply/(range_dlet hR) => m' Rm'; rewrite /ν'.
case: {-}_ / idP; first by move=> p; case: prhlw.
by move=> _ x /dinsuppP; rewrite dnullE.
rewrite !ssemE; apply/iscoupling_dlet => //.
move=> m' hm'; rewrite /ν'; case: {-}_ / idP => //.
by move=> p; case: prhlw.
Qed.
(* -------------------------------------------------------------------- *)
Lemma prhl_while I e1 e2 c1 c2 :
(forall m : rmem, I m -> `[{ e1#'1 =b e2#'2 }] m)
-> (prhl (I /\ `[{ e1#'1 && e2#'2 }])%A c1 c2 I)
->
prhl
I
(While e1 Do c1)
(While e2 Do c2)
(I /\ `[{ ~~ e1#'1}] /\ `[{ ~~ e2#'2 }])%A.
Proof. set J := (I /\ _)%A => hs h.
pose ν1 m := if @idP (J m) is ReflectT Rm then tag (prhlw h Rm) else dunit m.
pose νn := fix νn n m {struct n} :=
if n is n.+1 then \dlet_(m' <- νn n m) ν1 m' else dunit m.
pose νe n m := \dlet_(m' <- νn n m) if esem e1 m'.1 then dnull else dunit m'.
move=> m Im; pose ν n := νe n m.
have rg_νn: forall n, range I (νn n m).
+ elim=> [|n ih] /=; first by apply/range_dunit.
apply/(range_dlet ih) => {Im ν ih} m Im; rewrite /ν1.
case: {-}_ / idP; first by move=> p; case: prhlw.
by move=> _; apply/range_dunit.
have mono_ν n : ν n <=1 ν n.+1.
+ move=> /= m'; rewrite /ν /νe dlet_dlet -/(νn _ _).
apply/le_dlet => //= {}m' Im' m''.
case: ifPn => [he1|hNe1]; first by apply/lef_dnull.
rewrite dunit1E; case: eqP => /= [<-|_]; last by apply/ge0_mu.
have /distr_eqP ->: ν1 m' =1 dunit m'.
* rewrite /ν1; case: {-}_ / idP => // p; move: {-}p.
by rewrite /J /= ssemE (negbTE hNe1) andbF.
by rewrite dlet_unit (negbTE hNe1) dunit1E eqxx.
exists (dlim ν).
+ rewrite !ssemE;
rewrite -(iffLR (distr_eqP _ _) (dlim_bump (fun _ => _ m.1)));
rewrite -(iffLR (distr_eqP _ _) (dlim_bump (fun _ => _ m.2))).
apply/iscoupling_dlim => [n|n k le_nk]; last first.
* move=> m'; rewrite -[k](subnK le_nk); elim: (_ - _)%N => //.
by move=> n' ihn'; rewrite addSn; apply/(le_trans ihn').
rewrite !whilen_iterc !ssemE; apply/iscoupling_dlet => /=; last first.
* move=> m' Im'; rewrite !ssemE; move/rg_νn/hs: Im'.
rewrite !ssemE => /eqP <-; case: ifPn => _.
- by apply/iscoupling_dnull.
- by case: m' => a b; apply/iscoupling_dunit.
elim: n => /= [|n ihn]; rewrite !(iterc0, itercSr) !ssemE.
* by case: {+}m => a b; apply/iscoupling_dunit.
apply/iscoupling_dlet => //= m' /rg_νn Im'; move/hs: (Im').
rewrite !ssemE => /eqP eqe; rewrite -eqe /ν1.
case: {-}_ / idP => /= [p|]; first case/and3P: {+}p.
* by rewrite !ssemE => _ -> _; case: prhlw.
rewrite !ssemE -eqe Im' /= andbb => /negP/negbTE => ->/=.
by case: {+}m' => a b; apply/iscoupling_dunit.
+ apply/range_dlim => n; apply/(range_dlet (rg_νn n)) => m' Im'.
case: ifPn => [he1|hNe1]; first by apply/range_dnull.
apply/range_dunit=> /=; rewrite Im' /= !ssemE.
by move: (hs _ Im'); rewrite !ssemE => /eqP <-; rewrite hNe1.
Qed.
|
{"author": "strub", "repo": "xhl", "sha": "5c4a4c0691438a2be9b650372ba95aca09ba3c56", "save_path": "github-repos/coq/strub-xhl", "path": "github-repos/coq/strub-xhl/xhl-5c4a4c0691438a2be9b650372ba95aca09ba3c56/prhl/prhl.v"}
|
!=======================================================================
! RECIPROCAL
!=======================================================================
module reciprocal_inp
! k-space variables :
use controls !KJ 8/06
use struct, nphstr => nph
use kklist,only: nkp,usesym,nkx,nky,nkz,ktype
use strfacs,only: streta,strrmax,strgmax,init_strfacs
implicit none
integer icorehole
real*8 streimag ! additional broadening for calculation KKR structure factors ; not recommended
character(*),parameter,private :: filename='reciprocal.inp'
contains
subroutine reciprocal_write
!KJ next file added 8/06
integer i
open (file=filename, unit=3, status='unknown')
! in which space are we?
write(3,10) 'spacy'
write(3,20) spacy
if(spacy.eq.0) then
write(3,10) 'lattice vectors (in A, in Carthesian coordinates)'
write(3,30) a1
write(3,30) a2
write(3,30) a3
write(3,10) 'Volume scaling factor (A^3); eimag; core hole'
write(3,30) dble(-1),dble(0),dble(1)
write(3,10) 'lattice type (P,I,F,R,B,CXY,CYZ,CXZ)'
write(3,10) latticename
write(3,10) '#atoms in unit cell ; position absorber ; corehole?'
write(3,20) nats,absorber,icorehole
write(3,10) '# k-points total/x/y/z ; ktype; use symmetry?'
write(3,*) nkp,nkx,nky,nkz,ktype,usesym ! format line 20 limits integer to 4 positions - not enough for nkp!
write(3,10) 'ppos'
do i=1,nats
write(3,30) ppos(:,i)
enddo
write(3,10) 'ppot'
!KJ bugfix 5/2012: It's important not to use formatting when there are more atoms than fit on one line!!
write(3,*) ppot
write(3,10) 'streta,strgmax,strrmax'
write(3,30) streta,strgmax,strrmax
endif
close(3)
! standard formats for string, integers and real numbers
10 format(a)
20 format (20i4)
30 format (9f13.5)
end subroutine reciprocal_write
subroutine reciprocal_read(celvin)
use struct, nphstr => nph
integer i
real*8,intent(out) :: celvin
open (3,file=filename,status='unknown',err=167)
read(3,*,end=167,err=167)
read(3,*,end=167,err=167) spacy
if(spacy.eq.0) then
read(3,*) ; read(3,*) a1(:)
read(3,*) a2(:)
read(3,*) a3(:)
read(3,*) ; read(3,*) celvin,streimag,cholestrength
read(3,*) ; read(3,*) latticename
lattice=latticename(1:1)
read(3,*) ; read(3,*) nats,absorber,icorehole
read(3,*) ; read(3,*) nkp,nkx,nky,nkz,ktype,usesym
read(3,*)
!Careful: the next statement used to be "if size(ppot).eq.0". However, on ifort size(ppot)=0 but on gfortran it =1!!
!Hence the new instruction.
!I wish if(allocated(ppot)) would work here; I don't understand why it doesn't.
if(size(ppot).lt.nats) call init_struct(nats) !KJ 7-09 bugfix call this only once ; I can't seem to use "allocated(ppos)" here?
do i=1,nats
read(3,*) ppos(:,i)
enddo
read(3,*) ; read(3,*) ppot
read(3,*) ; read(3,*) streta,strgmax,strrmax
if(icorehole.eq.1) then
corehole=.true.
else
corehole=.false.
endif
endif
return
167 spacy=1
return
end subroutine reciprocal_read
subroutine reciprocal_init
call init_controls
call init_strfacs
icorehole = 1 ! use core hole
streimag = dble(0) ! no extra broadening for KKR struc factors
cholestrength = dble(1) ! don't mess with core hole
end subroutine reciprocal_init
end module reciprocal_inp
|
{"hexsha": "ce282e4e26f8d9bb54a336eaba1a39a1dd951bd7", "size": 3566, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/OPCONSAT/oca_reciprocal_inp.f90", "max_stars_repo_name": "xraypy/feff85exafs", "max_stars_repo_head_hexsha": "ec8dcb07ca8ee034d0fa7431782074f0f65357a5", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2016-01-05T21:29:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T08:59:17.000Z", "max_issues_repo_path": "src/OPCONSAT/oca_reciprocal_inp.f90", "max_issues_repo_name": "xraypy/feff85exafs", "max_issues_repo_head_hexsha": "ec8dcb07ca8ee034d0fa7431782074f0f65357a5", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2015-01-04T18:37:30.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-07T12:06:12.000Z", "max_forks_repo_path": "src/OPCONSAT/oca_reciprocal_inp.f90", "max_forks_repo_name": "xraypy/feff85exafs", "max_forks_repo_head_hexsha": "ec8dcb07ca8ee034d0fa7431782074f0f65357a5", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2016-01-05T21:29:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T13:11:01.000Z", "avg_line_length": 35.3069306931, "max_line_length": 134, "alphanum_fraction": 0.605159843, "num_tokens": 1156}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
# sys.path.append('/home/dev1/opencv/lib/')
sys.path.append('/usr/local/lib/python2.7/site-packages')
# sys.path.append('/home/frappe/frappe-bench-dimela/env/lib/python2.7/site-packages')
import numpy as np
import cv2
import csv
import glob
class Searcher:
def __init__(self, indexPath):
# store our index path
self.indexPath = indexPath
def search(self, queryFeatures, limit=10):
# initialize our dictionary of results
results = {}
# open the index file for reading
with open(self.indexPath) as f:
# initialize the CSV reader
reader = csv.reader(f)
# loop over the rows in the index
for row in reader:
# parse out the image ID and features, then compute the
# chi-squared distance between the features in our index
# and our query features
features = [float(x) for x in row[1:]]
d = self.chi2_distance(features, queryFeatures)
# now that we have the distance between the two feature
# vectors, we can udpate the results dictionary -- the
# key is the current image ID in the index and the
# value is the distance we just computed, representing
# how 'similar' the image in the index is to our query
results[row[0]] = d
# close the reader
f.close()
# sort our results, so that the smaller distances (i.e. the
# more relevant images are at the front of the list)
results = sorted([(v, k) for (k, v) in results.items()])
# return our (limited) results
return results[:limit]
def chi2_distance(self, histA, histB, eps=1e-10):
# compute the chi-squared distance
d = 0.5 * np.sum([((a - b) ** 2) / (a + b + eps)
for (a, b) in zip(histA, histB)])
# return the chi-squared distance
return d
class ColorDescriptor:
def __init__(self, bins):
# store the number of bins for the 3D histogram
self.bins = bins
def describe(self, image):
# convert the image to the HSV color space and initialize
# the features used to quantify the image
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
features = []
# grab the dimensions and compute the center of the image
(h, w) = image.shape[:2]
(cX, cY) = (int(w * 0.5), int(h * 0.5))
# divide the image into four rectangles/segments (top-left,
# top-right, bottom-right, bottom-left)
segments = [(0, cX, 0, cY), (cX, w, 0, cY), (cX, w, cY, h),
(0, cX, cY, h)]
# construct an elliptical mask representing the center of the
# image
(axesX, axesY) = (int(w * 0.75) / 2, int(h * 0.75) / 2)
ellipMask = np.zeros(image.shape[:2], dtype="uint8")
cv2.ellipse(ellipMask, (cX, cY), (axesX, axesY), 0, 0, 360, 255, -1)
# loop over the segments
for (startX, endX, startY, endY) in segments:
# construct a mask for each corner of the image, subtracting
# the elliptical center from it
cornerMask = np.zeros(image.shape[:2], dtype="uint8")
cv2.rectangle(cornerMask, (startX, startY), (endX, endY), 255, -1)
cornerMask = cv2.subtract(cornerMask, ellipMask)
# extract a color histogram from the image, then update the
# feature vector
hist = self.histogram(image, cornerMask)
features.extend(hist)
# extract a color histogram from the elliptical region and
# update the feature vector
hist = self.histogram(image, ellipMask)
features.extend(hist)
# return the feature vector
return features
def histogram(self, image, mask):
# extract a 3D color histogram from the masked region of the
# image, using the supplied number of bins per channel; then
# normalize the histogram
hist = cv2.calcHist([image], [0, 1, 2], mask, self.bins,
[0, 180, 0, 256, 0, 256])
hist = cv2.normalize(hist, False).flatten()
# return the histogram
return hist
def get_data():
pyimagesearch = {}
pyimagesearch["dataset1"] = "/home/frappe/frappe-bench-dimela/sites/dimela/public/files"
pyimagesearch["dataset2"] = "/home/frappe/frappe-bench-dimela/sites/dimela/private/files"
pyimagesearch["index"] = "/home/frappe/frappe-bench-dimela/sites/dimela/dataset.csv"
pyimagesearch["result_path"] = "/home/frappe/frappe-bench-dimela/sites/dimela/private/files"
return pyimagesearch
def loopDir(path, type, output, cd):
# use glob to grab the image paths and loop over them
for imagePath in glob.glob(path + "/*." + str(type)):
try:
# extract the image ID (i.e. the unique filename) from the image
# path and load the image itself
imageID = imagePath[imagePath.rfind("/") + 1:]
image = cv2.imread(imagePath)
# describe the image
features = cd.describe(image)
# write the features to file
features = [str(f) for f in features]
output.write("%s,%s\n" % (imageID, ",".join(features)))
except:
pass
pyimagesearch = get_data()
# initialize the color descriptor
cd = ColorDescriptor((8, 12, 3))
# open the output index file for writing
output = open(pyimagesearch["index"], "w")
# use glob to grab the image paths and loop over them
loopDir(pyimagesearch["dataset1"], "png", output, cd)
loopDir(pyimagesearch["dataset2"], "png", output, cd)
loopDir(pyimagesearch["dataset1"], "jpg", output, cd)
loopDir(pyimagesearch["dataset2"], "jpg", output, cd)
# close the index file
output.close()
|
{"hexsha": "cfdd6d3b47c319dd323a634a8b7e4f5274bcfabd", "size": 5944, "ext": "py", "lang": "Python", "max_stars_repo_path": "erpbg/erpbg/pyimagesearch/update.py", "max_stars_repo_name": "InspireSoft/erpbg", "max_stars_repo_head_hexsha": "6da33242dc5b6a52e19cd6c17af2262dd33b6b41", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "erpbg/erpbg/pyimagesearch/update.py", "max_issues_repo_name": "InspireSoft/erpbg", "max_issues_repo_head_hexsha": "6da33242dc5b6a52e19cd6c17af2262dd33b6b41", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "erpbg/erpbg/pyimagesearch/update.py", "max_forks_repo_name": "InspireSoft/erpbg", "max_forks_repo_head_hexsha": "6da33242dc5b6a52e19cd6c17af2262dd33b6b41", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4662576687, "max_line_length": 96, "alphanum_fraction": 0.6012786003, "include": true, "reason": "import numpy", "num_tokens": 1505}
|
# -*- coding:utf8 -*-
# @TIME : 2021/3/18 10:27
# @Author : SuHao
# @File : model.py
import torch.nn as nn
from utils.parse_config import *
from utils.utils import *
from itertools import chain
def creat_modules(module_defs):
"""
Constructs module list of layer blocks from module configuration in module_defs
根据cfg配置文件创建网络
"""
hyperparams = module_defs.pop(0) # 超参数
output_filters = [int(hyperparams["channels"])] # 输入层的输出通道数
module_list = nn.ModuleList() # 存放模块的列表
for module_i, module_def in enumerate(module_defs):
modules = nn.Sequential()
if module_def["type"] == "convolutional": # 根据配置文件创建卷积块,包含BN层+卷积层+激活函数层
bn = int(module_def["batch_normalize"]) # 表示是否要进行BN
filters = int(module_def["filters"]) # 输出通道数
kernel_size = int(module_def["size"]) # 卷积核大小
pad = (kernel_size - 1) // 2 # 填充
groups = int(module_def["groups"]) if "groups" in module_def.keys() else 1 # 分组卷积
modules.add_module(
f"conv_{module_i}",
nn.Conv2d(
in_channels=output_filters[-1], # 输入通道数:上一个网络模块的输出
out_channels=filters,
kernel_size=kernel_size,
stride=int(module_def["stride"]),
padding=pad,
groups=groups,
bias=not bn, # 如果要进行BN就没有偏执,如果不进行BN,就没有偏置
# 因为如果要进行BN,偏置会在BN的计算过程中抵消掉,不起作用,因此还不如直接取消偏置,减少参数量
),
)
if bn:
modules.add_module(f"batch_norm_{module_i}", nn.BatchNorm2d(filters, momentum=0.9, eps=1e-5)) # 添加BN
if module_def["activation"] == "leaky":
modules.add_module(f"leaky_{module_i}", nn.LeakyReLU(0.1)) # 添加LeakyReLU
elif module_def["type"] == "maxpool": # 最大池化层
kernel_size = int(module_def["size"])
stride = int(module_def["stride"]) # 步长
if kernel_size == 2 and stride == 1:
modules.add_module(f"_debug_padding_{module_i}", nn.ZeroPad2d((0, 1, 0, 1))) # 0填充
# nn.ZeroPad2d沿着四个方向进行补零操作
maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=int((kernel_size - 1) // 2)) # 最大池化
modules.add_module(f"maxpool_{module_i}", maxpool)
elif module_def["type"] == "upsample": # 上采样
upsample = nn.Upsample(scale_factor=int(module_def["stride"]), mode='nearest')
modules.add_module(f"upsample_{module_i}", upsample)
elif module_def["type"] == "route": # 融合层
layers = [int(x) for x in module_def["layers"].split(",")]
filters = sum([output_filters[1:][i] for i in layers])
modules.add_module(f"route_{module_i}", EmptyLayer()) # 作者创建了一个空层,相关操作在后续
elif module_def["type"] == "shortcut": # 残差网络中的相加
filters = output_filters[1:][int(module_def["from"])]
modules.add_module(f"shortcut_{module_i}", EmptyLayer()) # 作者创建了一个空层,相关操作在后续
## 我自己加的dropout
elif module_def["type"] == "dropout":
drop = nn.Dropout(p=float(module_def["probability"]))
modules.add_module(f"dropout_{module_i}", drop)
elif module_def["type"] == "yolo":
anchor_idxs = [int(x) for x in module_def["mask"].split(",")] # Anchor的序号,yolov3中每个特征图有3个Anchor
# Extract anchors
anchors = [int(x) for x in module_def["anchors"].split(",")]
anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]
anchors = [anchors[i] for i in anchor_idxs] # 提取3个Anchor
num_classes = int(module_def["classes"])
img_size = int(hyperparams["width"])
# Define detection layer
yolo_layer = YOLOLayer(anchors, num_classes, img_size)
modules.add_module(f"yolo_{module_i}", yolo_layer)
module_list.append(modules) # 向列表中添加模块
output_filters.append(filters)
return hyperparams, module_list
class EmptyLayer(nn.Module):
"""Placeholder for 'route' and 'shortcut' layers"""
def __init__(self):
super(EmptyLayer, self).__init__()
class YOLOLayer(nn.Module):
def __init__(self, anchors, num_classes, img_dim):
super(YOLOLayer, self).__init__()
self.num_classes = num_classes
self.no = num_classes + 5
self.num_anchors = len(anchors)
self.img_dim = img_dim
self.grid = torch.zeros(1) # TODO
anchors = torch.tensor(list(chain(*anchors))).float().view(-1, 2)
self.anchors = anchors
self.stride = None
def forward(self, inputs):
stride = self.img_dim // inputs.size(2)
self.stride = stride
bs, _, ny, nx = inputs.shape # x(bs,255,20,20) to x(bs,3,20,20,85)
inputs_view = inputs.view(bs, self.num_anchors, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
inputs_sigmoid = inputs_view.sigmoid()
x = inputs_sigmoid[..., 0] * 2.0 - 0.5
y = inputs_sigmoid[..., 1] * 2.0 - 0.5
w = (inputs_sigmoid[..., 2] * 2.0) ** 2
h = (inputs_sigmoid[..., 3] * 2.0) ** 2
pred = inputs_sigmoid[..., 4:]
FloatTensor = torch.cuda.FloatTensor if inputs.is_cuda else torch.FloatTensor
self.grid_size = nx
self.grid_x = FloatTensor([i for j in range(self.grid_size) for i in range(self.grid_size)])\
.view([1, 1, self.grid_size, self.grid_size])
self.grid_y = FloatTensor([j for j in range(self.grid_size) for i in range(self.grid_size)])\
.view([1, 1, self.grid_size, self.grid_size])
self.anchor_w = [self.anchors[i][0] for i in range(self.num_anchors)]
self.anchor_h = [self.anchors[i][1] for i in range(self.num_anchors)] # 列表self.anchor_h里的元素是tensor
X = FloatTensor()
for i in range(self.num_anchors):
X = torch.cat((X, torch.add(x[:, i:i + 1, :, :], self.grid_x)), 1)
Y = FloatTensor()
for i in range(self.num_anchors):
Y = torch.cat((Y, torch.add(y[:, i:i + 1, :, :], self.grid_y)), 1)
W = FloatTensor()
for i in range(self.num_anchors):
W = torch.cat((W, torch.mul(w[:, i:i+1, :, :], self.anchor_w[i])), 1)
H = FloatTensor()
for i in range(self.num_anchors):
H = torch.cat((H, torch.mul(h[:, i:i+1, :, :], self.anchor_h[i])), 1)
outputs = torch.cat(
(
torch.mul(X, self.stride).view(bs, 1, -1, 1),
torch.mul(Y, self.stride).view(bs, 1, -1, 1),
W.view(bs, 1, -1, 1),
H.view(bs, 1, -1, 1),
pred.view(bs, 1, -1, self.num_classes+1),
),
-1,
) # 沿着倒数第一个维度将上述三个矩阵进行拼接
return outputs
class YOLOv3(nn.Module):
'''
YOLOv3 模型
'''
def __init__(self, config_path):
super(YOLOv3, self).__init__()
self.module_defs = parse_model_config(config_path) # 解析网络配置文件
# self.hyperparams是一个字典
# self.module_list是存放网络结构的列表,其中的元素都是每个网络层或者网络结构对象或者nn.Sequence()
self.hyperparams, self.module_list = creat_modules(self.module_defs)
self.yolo_layers = [layer[0] for layer in self.module_list if isinstance(layer[0], YOLOLayer)]
# 单独拿出Yolo层,yolo-tiny有两个yolo层
self.img_size = int(self.hyperparams["width"])
self.seen = 0
self.header_info = np.array([0, 0, 0, self.seen, 0], dtype=np.int32)
def forward(self, x):
layer_outputs, yolo_outputs = [], []
FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
yolo_outputs_2 = FloatTensor() # 转换后的输出,预测的位置
for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):
# zip() 函数用于将可迭代的对象作为参数,将对象中对应的元素打包成一个个元组,然后返回由这些元组组成的列表。
if module_def["type"] in ["convolutional", "upsample", "maxpool", "dropout"]:
x = module(x)
elif module_def["type"] == "route": # 融合层,特征图拼接
x = torch.cat([layer_outputs[int(layer_i)] for layer_i in module_def["layers"].split(",")], 1)
elif module_def["type"] == "shortcut":
layer_i = int(module_def["from"]) # 残差模块
x = layer_outputs[-1] + layer_outputs[layer_i]
elif module_def["type"] == "yolo":
out = module(x)
yolo_outputs.append(out)
layer_outputs.append(x)
# 每次经过一个模块,其输出保存在layer_output中,方便随时访问中间层的输出,便于route和shotcut操作
# layer_outputs并不占用额外的内存,因为append只是浅拷贝
# yolo_outputs.append(yolo_outputs_2)
# 如果有三个yolo层,yolo_outputs则有4个元素,前三个是yolo层不经过转换的输出,维度分别为n*255*13*13
# n*255*26*26和n*255*52*52, 第四个元素是经过位置转换的yolo层输出的拼接,维度n*(3*13*13+3*26*26+3*52*52)*85
# 如果输入图像大小是416*416,则最多预测3*13*13+3*26*26+3*52*52个目标
# yolo_outputs = torch.cat(yolo_outputs, 1)
return yolo_outputs
def load_darknet_weights(self, weights_path):
"""Parses and loads the weights stored in 'weights_path'"""
# 记载darkenet格式的权重;darknet是一个开源框架,其权重文件后缀为.weight
# Open the weights file
with open(weights_path, "rb") as f:
header = np.fromfile(f, dtype=np.int32, count=5) # First five are header values
self.header_info = header # Needed to write header when saving weights
self.seen = header[3] # number of images seen during training
weights = np.fromfile(f, dtype=np.float32) # The rest are weights
# Establish cutoff for loading backbone weights
cutoff = None
if "darknet53.conv.74" in weights_path:
cutoff = 75
ptr = 0
for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):
if i == cutoff:
break
if module_def["type"] == "convolutional":
conv_layer = module[0]
if module_def["batch_normalize"]:
# Load BN bias, weights, running mean and running variance
bn_layer = module[1]
num_b = bn_layer.bias.numel() # Number of biases
# Bias
bn_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.bias)
bn_layer.bias.data.copy_(bn_b)
ptr += num_b
# Weight
bn_w = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.weight)
bn_layer.weight.data.copy_(bn_w)
ptr += num_b
# Running Mean
bn_rm = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_mean)
bn_layer.running_mean.data.copy_(bn_rm)
ptr += num_b
# Running Var
bn_rv = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_var)
bn_layer.running_var.data.copy_(bn_rv)
ptr += num_b
else:
# Load conv. bias
num_b = conv_layer.bias.numel()
conv_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(conv_layer.bias)
conv_layer.bias.data.copy_(conv_b)
ptr += num_b
# Load conv. weights
num_w = conv_layer.weight.numel()
conv_w = torch.from_numpy(weights[ptr : ptr + num_w]).view_as(conv_layer.weight)
conv_layer.weight.data.copy_(conv_w)
ptr += num_w
def save_darknet_weights(self, path, cutoff=-1):
"""
@:param path - path of the new weights file
@:param cutoff - save layers between 0 and cutoff (cutoff = -1 -> all are saved)
"""
fp = open(path, "wb")
self.header_info[3] = self.seen
self.header_info.tofile(fp)
# Iterate through layers
for i, (module_def, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
if module_def["type"] == "convolutional":
conv_layer = module[0]
# If batch norm, load bn first
if module_def["batch_normalize"]:
bn_layer = module[1]
bn_layer.bias.data.cpu().numpy().tofile(fp)
bn_layer.weight.data.cpu().numpy().tofile(fp)
bn_layer.running_mean.data.cpu().numpy().tofile(fp)
bn_layer.running_var.data.cpu().numpy().tofile(fp)
# Load conv bias
else:
conv_layer.bias.data.cpu().numpy().tofile(fp)
# Load conv weights
conv_layer.weight.data.cpu().numpy().tofile(fp)
fp.close()
if __name__ == "__main__":
'''
测试model.py
'''
from utils.utils import *
conifg_path = "../configs/yolov3-tiny-bac.cfg"
net = YOLOv3(conifg_path)
# print(net)
net.apply(weights_init_normal)
net.eval()
img_size = 640
inputs = torch.rand(1, 3, img_size, img_size)*255
outputs = net(inputs)
print(outputs[0].size())
# onnx
save_path = "./yolo-fastest-xl.onnx"
torch.onnx.export(net, inputs, save_path, input_names=["input"], output_names=["out0", "out1"],
verbose=True, opset_version=11)
# load onnx
import onnx
import torch
import cv2
onnx_name = "./yolo-fastest-xl.onnx"
model = onnx.load(onnx_name)
#检查IR是否良好
onnx.checker.check_model(model)
# 添加 ExpLayer
class ExpLayer(object):
def __init__(self, params, blobs):
super(ExpLayer, self).__init__()
def getMemoryShapes(self, inputs):
return inputs
def forward(self, inputs):
return [np.exp(inputs[0])]
cv2.dnn_registerLayer('Exp', ExpLayer)
# opencv dnn加载
import numpy as np
net = cv2.dnn.readNetFromONNX(onnx_name)
img = inputs.numpy()
img = img[0]
img = img.transpose((1,2,0))
img = img.astype('uint8')
blob = cv2.dnn.blobFromImage(img, size=(img_size, img_size)) # img 必须是uint8
net.setInput(blob)
out = net.forward("out0")
print(out)
# 检查opencv加载onnx以后的结果和原来的结果是否相同
outputs = outputs[0].detach().numpy()
print(outputs[0] - out[0])
|
{"hexsha": "fabde9f4e30ff0af8bd61c6d6a406ccaa9429722", "size": 14795, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/modelv5.py", "max_stars_repo_name": "qqsuhao/YOLOv3-YOLOv3-tiny-yolo-fastest-xl--pytorch", "max_stars_repo_head_hexsha": "351023a929afb2109b2233d4c089cb1d3562be52", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2021-05-06T07:31:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T03:17:13.000Z", "max_issues_repo_path": "models/modelv5.py", "max_issues_repo_name": "duhaijun/YOLOv3-YOLOv3-tiny-yolo-fastest-xl--pytorch", "max_issues_repo_head_hexsha": "351023a929afb2109b2233d4c089cb1d3562be52", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-07-06T02:07:04.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-06T03:14:31.000Z", "max_forks_repo_path": "models/modelv5.py", "max_forks_repo_name": "duhaijun/YOLOv3-YOLOv3-tiny-yolo-fastest-xl--pytorch", "max_forks_repo_head_hexsha": "351023a929afb2109b2233d4c089cb1d3562be52", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2021-05-08T03:48:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-23T03:37:06.000Z", "avg_line_length": 42.7601156069, "max_line_length": 122, "alphanum_fraction": 0.5599864819, "include": true, "reason": "import numpy", "num_tokens": 4148}
|
import numpy as np
import pandas as pd
import scipy.spatial.distance as sci
import matplotlib.pyplot as plt
from scipy.stats import norm
from matplotlib.ticker import FormatStrFormatter
# Figure 6A
# Determination of Hamming distance
# Python script was used in JupyterLab
# Save figures as . . .
save = 'fig_hamming_111.png'
pklfile = 'mdf_FN80cysaCA2.pkl'
ot = pd.read_pickle(pklfile)
pklfile = 'mdf_FN28cysaCA2.pkl'
ot2 = pd.read_pickle(pklfile)
pklfile = 'mdf_FN28cysaCA9.pkl'
ot3 = pd.read_pickle(pklfile)
naive28 = ot2[ot2['C28_0']>0].drop(['C28P2CA2','C28P3CA2','C28P5CA2','C28P7CA2'],axis=1)
naive80 = ot[ot['C80_0']>0].drop(['C80P2','C80P3','C80P5','C80P7'], axis=1)
ot = ot.drop(['C80_0'], axis=1)
ot2 = ot2.drop(['C28_0'], axis=1)
ot3 = ot3.drop(['C28_0'], axis=1)
ot = ot.rename(columns={"C80P2": "2", "C80P3": "3", "C80P5": "5", "C80P7": "7"})
ot2 = ot2.rename(columns={"C28P2CA2": "2", "C28P3CA2": "3", "C28P5CA2": "5", "C28P7CA2": "7"})
ot3 = ot3.rename(columns={"C28P2CA9": "2", "C28P3CA9": "3", "C28P5CA9": "5", "C28P7CA9": "7"})
# hamming distance of all variants in winning population
Fn80p2 = ot[ot['2']>0].nlargest(494, '2').drop(['3','5','7'], axis=1)
Fn80p3 = ot[ot['3']>0].nlargest(205, '3').drop(['2','5','7'], axis=1)
Fn80p5 = ot[ot['5']>0].nlargest(169, '5').drop(['2','3','5'], axis=1)
Fn80p7 = ot[ot['7']>0].nlargest(180, '7').drop(['2','3','5'], axis=1)
Fn28p2 = ot2[ot2['2']>0].nlargest(199, '2').drop(['3','5','7'], axis=1)
Fn28p3 = ot2[ot2['3']>0].nlargest(12, '3').drop(['2','5','7'], axis=1)
Fn28p5 = ot2[ot2['5']>0].nlargest(429, '5').drop(['2','3','7'], axis=1)
Fn28p7 = ot2[ot2['7']>0].nlargest(443, '7').drop(['2','3','5'], axis=1)
Fn28p2aCA9 = ot3[ot3['2']>0].nlargest(47, '2').drop(['3','5','7'], axis=1)
Fn28p3aCA9 = ot3[ot3['3']>0].nlargest(83, '3').drop(['2','5','7'], axis=1)
Fn28p5aCA9 = ot3[ot3['5']>0].nlargest(90, '5').drop(['2','3','7'], axis=1)
Fn28p7aCA9 = ot3[ot3['7']>0].nlargest(421, '7').drop(['2','3','5'], axis=1)
df = [naive80, Fn80p2, Fn80p3, Fn80p5, Fn80p7, naive28, Fn28p2, Fn28p3, Fn28p5, Fn28p7, \
Fn28p2aCA9, Fn28p3aCA9, Fn28p5aCA9, Fn28p7aCA9]
for i in df:
i['BC_l'] = 0
i['DE_l'] = 0
i['FG_l'] = 0
for j in range(len(i)):
i['BC_l'].iloc[j] = len(i['BC'].iloc[j])
i['DE_l'].iloc[j] = len(i['DE'].iloc[j])
i['FG_l'].iloc[j] = len(i['FG'].iloc[j])
i = i[(i['BC_l'] < 12) & (i['DE_l'] < 8) & (i['FG_l'] < 10)]
X = 'X'
BC_max, DE_max, FG_max = 9, 6, 8
for i in df:
i['BC_adj'] = 'A'
i['DE_adj'] = 'A'
i['FG_adj'] = 'A'
i['AA_adj'] = 'A'
i['AA_adj_arr'] = 'A'
for j in range(len(i)):
if i['BC_l'].iloc[j] == BC_max:
i['BC_adj'].iloc[j] = i['BC'].iloc[j]
elif i['BC_l'].iloc[j] == BC_max-1:
i['BC_adj'].iloc[j] = i['BC'].iloc[j][0:4]+X+i['BC'].iloc[j][4:]
elif i['BC_l'].iloc[j] == BC_max-2:
i['BC_adj'].iloc[j] = i['BC'].iloc[j][0:3]+2*X+i['BC'].iloc[j][3:]
if i['DE_l'].iloc[j] == DE_max:
i['DE_adj'].iloc[j] = i['DE'].iloc[j]
elif i['DE_l'].iloc[j] == DE_max-2:
i['DE_adj'].iloc[j] = i['DE'].iloc[j][0:2]+2*X + i['DE'].iloc[j][2:]
elif i['DE_l'].iloc[j] == DE_max-3:
i['DE_adj'].iloc[j] = i['DE'].iloc[j][0:2]+3*X + i['DE'].iloc[j][-1]
if i['FG_l'].iloc[j] == FG_max:
i['FG_adj'].iloc[j] = i['FG'].iloc[j]
elif i['FG_l'].iloc[j] == FG_max-1:
i['FG_adj'].iloc[j] = i['FG'].iloc[j][0:5]+X+i['FG'].iloc[j][-2:]
elif i['FG_l'].iloc[j] == FG_max-2:
i['FG_adj'].iloc[j] = i['FG'].iloc[j][0:5]+2*X+i['FG'].iloc[j][-1:]
i['AA_adj'] = i['BC_adj']+i['DE_adj']+i['FG_adj']
for j in range(len(i)):
i['AA_adj_arr'].iloc[j] = list(i['AA_adj'].iloc[j])
i['len'] = 0
for j in range(len(i)):
i['len'].iloc[j] = len(i['AA_adj'].iloc[j])
for i in df:
i = i[i['len'] == 23 ]
hamming = None
hamming = np.empty((len(i),len(i)))
hamming[:] = np.nan
for j in range(0,len(i)):
for k in range(j+1,len(i)):
hamming[j,k] = sci.hamming(i['AA_adj_arr'].iloc[j], i['AA_adj_arr'].iloc[k]) * 23
hamming = hamming.flatten()
hamming = hamming[~np.isnan(hamming)]
if i.equals(naive80) == True:
hamN80 = hamming
elif i.equals(Fn80p2[Fn80p2['len'] == 23]) == True:
hamFn80p2 = hamming
elif i.equals(Fn80p3[Fn80p3['len'] == 23]) == True:
hamFn80p3 = hamming
elif i.equals(Fn80p5) == True:
hamFn80p5 = hamming
elif i.equals(Fn80p7) == True:
hamFn80p7 = hamming
elif i.equals(naive28) == True:
hamN28 = hamming
elif i.equals(Fn28p2) == True:
hamFn28p2 = hamming
elif i.equals(Fn28p3) == True:
hamFn28p3 = hamming
elif i.equals(Fn28p5) == True:
hamFn28p5 = hamming
elif i.equals(Fn28p7) == True:
hamFn28p7 = hamming
elif i.equals(Fn28p2aCA9) == True:
hamFn28p2aCA9 = hamming
elif i.equals(Fn28p3aCA9) == True:
hamFn28p3aCA9 = hamming
elif i.equals(Fn28p5aCA9) == True:
hamFn28p5aCA9 = hamming
elif i.equals(Fn28p7aCA9[Fn28p7aCA9['len'] == 23]) == True:
hamFn28p7aCA9 = hamming
else:
print('naming is wrong: fix')
hamming = None
# Generation of best fit line using probability density function
bins = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,20,20,21,22,23]
mu, sigma = norm.fit(np.rint(hamN80))
best_fit_line = norm.pdf(bins, mu, sigma)
mu1, sigma1 = norm.fit(np.rint(hamFn80p2))
best_fit_line1 = norm.pdf(bins, mu1, sigma1)
mu2, sigma2 = norm.fit(np.rint(hamFn80p3))
best_fit_line2 = norm.pdf(bins, mu2, sigma2)
mu3, sigma3 = norm.fit(np.rint(hamFn80p5))
best_fit_line3 = norm.pdf(bins, mu3, sigma3)
mu4, sigma4 = norm.fit(np.rint(hamFn80p7))
best_fit_line4 = norm.pdf(bins, mu4, sigma4)
######
mu5, sigma5 = norm.fit(np.rint(hamN28))
best_fit_line5 = norm.pdf(bins, mu5, sigma5)
mu6, sigma6 = norm.fit(np.rint(hamFn28p2))
best_fit_line6 = norm.pdf(bins, mu6, sigma6)
mu7, sigma7 = norm.fit(np.rint(hamFn28p3))
best_fit_line7 = norm.pdf(bins, mu7, sigma7)
mu8, sigma8 = norm.fit(np.rint(hamFn28p5))
best_fit_line8 = norm.pdf(bins, mu8, sigma8)
mu9, sigma9 = norm.fit(np.rint(hamFn28p7))
best_fit_line9 = norm.pdf(bins, mu9, sigma9)
#####
mu10, sigma10 = norm.fit(np.rint(hamFn28p2aCA9))
best_fit_line10 = norm.pdf(bins, mu10, sigma10)
mu11, sigma11 = norm.fit(np.rint(hamFn28p3aCA9))
best_fit_line11 = norm.pdf(bins, mu11, sigma11)
mu12, sigma12 = norm.fit(np.rint(hamFn28p5aCA9))
best_fit_line12 = norm.pdf(bins, mu12, sigma12)
mu13, sigma13 = norm.fit(np.rint(hamFn28p7aCA9))
best_fit_line13 = norm.pdf(bins, mu13, sigma13)
# Generation of Figure
fig, ax = plt.subplots(figsize=(5,5), dpi=150)
plt.plot(bins, best_fit_line5, color="black", linewidth=2) # naive28
plt.plot(bins, best_fit_line10, color="#1f77b4", linewidth=4) # Fn28p2aCA9
plt.plot(bins, best_fit_line11, color="#ff7f0e", linewidth=4)
plt.plot(bins, best_fit_line12, color="#2ca02c", linewidth=4)
plt.plot(bins, best_fit_line13, color="#d62728", linewidth=4)
plt.plot(bins, best_fit_line, color="black", linewidth=2, linestyle='dashed') # naive80
plt.plot(bins, best_fit_line1, color="#1f77b4", linewidth=2, linestyle='dashed')
plt.plot(bins, best_fit_line2, color="#ff7f0e", linewidth=2, linestyle='dashed')
plt.plot(bins, best_fit_line3, color="#2ca02c", linewidth=2, linestyle='dashed')
plt.plot(bins, best_fit_line4, color="#d62728", linewidth=2, linestyle='dashed')
plt.plot(bins, best_fit_line6, color="#1f77b4", linewidth=2) # Fn28p2aCA2
plt.plot(bins, best_fit_line7, color="#ff7f0e", linewidth=2)
plt.plot(bins, best_fit_line8, color="#2ca02c", linewidth=2)
plt.plot(bins, best_fit_line9, color="#d62728", linewidth=2)
plt.xlim([0, 23])
plt.ylim([0, 0.2])
plt.xticks(fontsize=14)
plt.yticks([0,0.05,0.1,0.15,0.2],fontsize=14)
plt.xlabel('Hamming Distance', fontsize=16)
plt.ylabel('Normalized Frequency', fontsize=16)
ax.set_aspect(1.0/ax.get_data_ratio(), adjustable='box')
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
#plt.legend()
plt.show()
fig.savefig(save, bbox_inches = 'tight')
|
{"hexsha": "7c3183085b6b35647faee3797e7f8512216195d6", "size": 8174, "ext": "py", "lang": "Python", "max_stars_repo_path": "Deep Sequence Analysis/Hamming.py", "max_stars_repo_name": "HackelLab-UMN/PriSM-Inhibition-and-Seq-Analysis", "max_stars_repo_head_hexsha": "0d863d02e3da8ead4fdf8f6cf42ae1f64b0431b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Deep Sequence Analysis/Hamming.py", "max_issues_repo_name": "HackelLab-UMN/PriSM-Inhibition-and-Seq-Analysis", "max_issues_repo_head_hexsha": "0d863d02e3da8ead4fdf8f6cf42ae1f64b0431b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Deep Sequence Analysis/Hamming.py", "max_forks_repo_name": "HackelLab-UMN/PriSM-Inhibition-and-Seq-Analysis", "max_forks_repo_head_hexsha": "0d863d02e3da8ead4fdf8f6cf42ae1f64b0431b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1545454545, "max_line_length": 94, "alphanum_fraction": 0.6146317592, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 3188}
|
#include <boost/lexical_cast.hpp>
#include <pcl/common/common.h>
#include "global.h"
#include "rosinterface.h"
int
main(int argc, char** argv)
{
// ******************************** Command line parser for arguments *************************************
cv::CommandLineParser parser(argc, argv,
#if CV_MAJOR_VERSION == 3
"{ h help | false | print this message }"
"{ o object | brick | Object to get pose [brick(default); drill ; yellow]}"
"{ sc scenario | 0 | Scenario table -> 0 ; nottable -> 1 }"
"{ l limits | -0.5,0.5,-0.5,0.3,0.6,1.7 | X, Y and Z limits from camera(xmin,xmax,ymin,ymax,zmin,zmax)(no spaces!) }"
"{ p path | empty | Path to new object model (pointcloud (.pcd, .ply) format)}"
"{ s sensor | astra | Sensor to choose [kinect, astra, euclid]}"
#else
"{ h | help | false | print this message }"
"{ o | object | brick | Object to get pose [brick(default); drill ; yellow]}"
"{ sc | scenario | 0 | Scenario table -> 0 ; nottable -> 1 }"
"{ l | limits | -0.5,0.5,-0.5,0.3,0.6,1.7 | X, Y and Z limits from camera(xmin,xmax,ymin,ymax,zmin,zmax)(no spaces!) }"
"{ p | path | empty | Path to new object model (pointcloud (.pcd, .ply) format)}"
"{ s | sensor | astra | Sensor to choose [kinect, astra, euclid]}"
#endif
);
// ******************************** Get Values of Different Arguments *************************************
//print help
bool helpReq = parser.get<bool>("h");
if ( helpReq )
{
#if CV_MAJOR_VERSION == 3
parser.printMessage();
#else
parser.printParams();
#endif
//parser.printMessage();
return 0;
}
//to get object name to estimate and scenario
std::string objName = parser.get<std::string>("o");
int scenario = parser.get<int>("sc");
// to get limits of the vieweing space of sensor
std::string limits = parser.get<std::string> ("l");
std::vector<std::string> fieldString;
boost::split( fieldString, limits, boost::is_any_of( "," ) );
//std::cout << boost::lexical_cast<float>( fieldString.at(0)) << " , " << fieldString.at(5) << std::endl;
std::vector<float> limitsD;
for(int i = 0; i < fieldString.size(); ++i){
limitsD.push_back(boost::lexical_cast<float>( fieldString.at(i)));
}
//to get object path and check its validity
std::string path = parser.get<std::string>("p");
//Check path given as new object model is valid
if(path != "empty"){
boost::filesystem::path pathToObj(path);
if( !boost::filesystem::exists(pathToObj)){
std::cout << "Path to new object doesn't exists!";
return 0;
}
if ( (pathToObj.extension() != ".pcd") && (pathToObj.extension() != ".ply")){
std::cout << "Only .pcd and .ply extension files possible" << std::endl;
return 0;
}
}
// to get Sensor model type
std::string sensor = parser.get<std::string>("s");
bool kinect(false), astra(false), euclid(false);
if (sensor == "kinect"){
kinect = true;
}
else if (sensor == "astra"){
astra = true;
}
else if (sensor == "euclid"){
euclid = true;
}
else{
std::cerr << "Invalid Sensor Type" ;
return 0;
}
// ********************************** Connection to ROS Interface and start Pose Estimation ********************************************************
RosInterface rosInterface( euclid, kinect, astra);
rosInterface.startRosInterface( argc, argv, objName, scenario, limitsD, path);
return 0;
}
|
{"hexsha": "bdb6f824eb9fc495dec858511d1202d4a3787b22", "size": 4151, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "DetectAndLocalize/src/main.cpp", "max_stars_repo_name": "gopi231091/Object-Pose-Estimation", "max_stars_repo_head_hexsha": "11726fd008447fed3947c893d959b5acb9fd339e", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2019-02-19T18:55:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-10T22:20:18.000Z", "max_issues_repo_path": "DetectAndLocalize/src/main.cpp", "max_issues_repo_name": "gopi231091/Object-Pose-Estimation", "max_issues_repo_head_hexsha": "11726fd008447fed3947c893d959b5acb9fd339e", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DetectAndLocalize/src/main.cpp", "max_forks_repo_name": "gopi231091/Object-Pose-Estimation", "max_forks_repo_head_hexsha": "11726fd008447fed3947c893d959b5acb9fd339e", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-09-06T01:52:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-03T09:40:11.000Z", "avg_line_length": 40.6960784314, "max_line_length": 162, "alphanum_fraction": 0.4794025536, "num_tokens": 970}
|
import cv2
import numpy as np
import pandas as pd
import time
class Stitcher():
def __init__(self, stitch_mode=0, feature=0, search_ratio=0.75, offset_match=0):
self.stitch_mode = stitch_mode # "0" for translational mode and "1" for homography mode
self.feature = feature # "0" for "sift" and "1" for "surf" and "2" for "orb"
self.search_ratio = search_ratio # "0.75" is commonly used
self.offset_match = offset_match # "0" for "mode" and "1" for "ransac"
def detectAndDescribe(self, image):
'''
计算图像的特征点集合,并返回该点集&描述特征
:param image:需要分析的图像
:return:返回特征点集,及对应的描述特征
'''
if self.feature == 0: # "sift"
descriptor = cv2.xfeatures2d.SIFT_create()
elif self.feature == 1: # "surf"
descriptor = cv2.xfeatures2d.SURF_create()
elif self.feature == 2: # "orb"
descriptor = cv2.ORB_create()
# 检测SIFT特征点,并计算描述子
kps, features = descriptor.detectAndCompute(image, None)
# 将结果转换成NumPy数组
kps = np.float32([kp.pt for kp in kps])
return (kps, features)
def getOffsetByMode(self, kpsA, kpsB, matches):
if len(matches) == 0:
return [0, 0]
dxList = []; dyList = [];
for trainIdx, queryIdx in matches:
ptA = (kpsA[queryIdx][1], kpsA[queryIdx][0])
ptB = (kpsB[trainIdx][1], kpsB[trainIdx][0])
# dxList.append(int(round(ptA[0] - ptB[0])))
# dyList.append(int(round(ptA[1] - ptB[1])))
# if int(round(ptA[0] - ptB[0])) == 0 and int(round(ptA[1] - ptB[1])) == 0:
# continue
dxList.append(int(round(ptA[0] - ptB[0])))
dyList.append(int(round(ptA[1] - ptB[1])))
if len(dxList) == 0:
dxList.append(0); dyList.append(0)
# Get Mode offset in [dxList, dyList], thanks for clovermini
zipped = zip(dxList, dyList)
zip_list = list(zipped)
zip_dict = dict((a, zip_list.count(a)) for a in zip_list)
zip_dict_sorted = dict(sorted(zip_dict.items(), key=lambda x: x[1], reverse=True))
dx = list(zip_dict_sorted)[0][0]
dy = list(zip_dict_sorted)[0][1]
num = zip_dict_sorted[list(zip_dict_sorted)[0]]
if num < 5:
dx = 0
dy = 0
# print("dx = " + str(dx) + ", dy = " + str(dy) + ", num = " + str(num))
# self.printAndWrite(" In Mode, The number of num is " + str(num) + " and the number of offsetEvaluate is "+str(offsetEvaluate))
return [dx, dy]
def getHomography(self, kpsA, kpsB, matches):
ptsA = np.float32([kpsA[i] for (_, i) in matches])
ptsB = np.float32([kpsB[i] for (i, _) in matches])
if len(matches) < 4 or kpsA.shape[0] < 4 or kpsB.shape[0] < 4:
return np.zeros((3, 3), dtype=np.int)
# H, mask = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, 5.0)
H, mask = cv2.findHomography(ptsA, ptsB, cv2.RANSAC)
if H is None:
return np.zeros((3, 3), dtype=np.int)
return H
def evaluateByFeatureSearch(self, images, groundTrue):
'''
Stitch two images
:param images: [imageA, imageB]
:param registrateMethod: list:
:param fuseMethod:
:param direction: stitching direction
:return:
'''
stitch_status = False
(imageA, imageB) = images
# get the feature points
(kpsA, featuresA) = self.detectAndDescribe(imageA)
(kpsB, featuresB) = self.detectAndDescribe(imageB)
if featuresA is None or featuresB is None:
return stitch_status,1000000000
print(" The feature num of imageA is {}".format(featuresA.shape[0]))
print(" The feature num of imageB is {}".format(featuresB.shape[0]))
# match the feature points
matches = []
if self.feature == 0 or self.feature == 1: # For surf or sift
matcher = cv2.DescriptorMatcher_create("BruteForce")
# 使用KNN检测来自A、B图的SIFT特征匹配对,K=2,返回一个列表
raw_matches = matcher.knnMatch(featuresA, featuresB, 2)
for m in raw_matches:
# 当最近距离跟次近距离的比值小于ratio值时,保留此匹配对
if len(m) == 2 and m[0].distance < m[1].distance * self.search_ratio:
# 存储两个点在featuresA, featuresB中的索引值
matches.append((m[0].trainIdx, m[0].queryIdx))
elif self.feature == 2: # For orb
matcher = cv2.DescriptorMatcher_create("BruteForce-Hamming")
raw_matches = matcher.match(featuresA, featuresB)
for m in raw_matches:
matches.append((m.trainIdx, m.queryIdx))
print(" The match num of two images is {}".format(len(matches)))
distance = 0
if self.stitch_mode == 0 and self.offset_match == 0:
prd_offset = self.getOffsetByMode(kpsA, kpsB, matches)
distance = np.linalg.norm(np.array(prd_offset) - np.array(groundTrue))
print(" prd_offset={}, gt_offset={}, The matching result is {}".format(prd_offset, groundTrue, (np.array(prd_offset) == np.array(groundTrue)).all()))
if (np.array(prd_offset) == np.array(groundTrue)).all():
stitch_status = True
elif self.stitch_mode == 0 and self.offset_match == 1:
H = self.getHomography(kpsA, kpsB, matches)
prd_offset = np.array([int(round(-H[1, 2])), int(round(-H[0, 2]))])
distance = np.linalg.norm(np.array(prd_offset) - np.array(groundTrue))
print(" prd_offset={}, gt_offset={}, The matching result is {}".format(prd_offset, groundTrue, (np.array(prd_offset) == np.array(groundTrue)).all()))
if (np.array(prd_offset) == np.array(groundTrue)).all():
stitch_status = True
elif self.stitch_mode == 1 and self.offset_match == 1:
H = self.getHomography(kpsA, kpsB, matches)
else:
print("Input error, No such mathcing algorithm")
return stitch_status, distance
if __name__ == '__main__':
# # input_address = ".\\datasets\\graffiti\\fromCSV\\images_notFixed\\"
# # input_csv = ".\\datasets\\graffiti\\fromCSV\\val_notFixed.csv"
# input_address = "D:\\MyDocuments\\images_notfixed\\"
# input_csv = "D:\\MyDocuments\\val_notFixed.csv"
# stitch_mode = 0 # "0" for translational mode and "1" for homography mode
# feature = 2 # "0" for "sift" and "1" for "surf" and "2" for "orb"
# search_ratio = 0.75 # "0.75" is commonly used
# offset_match = 1 # "0" for "mode" and "1" for "ransac"
#
# stitcher = Stitcher(stitch_mode=stitch_mode, feature=feature, search_ratio=search_ratio, offset_match=offset_match)
# csv_file = pd.read_csv(input_csv)
# distance_loss = 0
# correct_num = 0
# time_start = time.time()
# false_image = []
# for idx in range(len(csv_file)):
# image_name = csv_file.iloc[idx, 1]
# print("the {} th images, Analysising {}".format(idx, image_name))
# local_start_time = time.time()
# imageA = cv2.imread(input_address + image_name + "\\" + image_name + "_A.jpg")
# imageB = cv2.imread(input_address + image_name + "\\" + image_name + "_B.jpg")
# drow = csv_file.iloc[idx, 2]
# dcol = csv_file.iloc[idx, 3]
# stitch_status, distance = stitcher.evaluateByFeatureSearch([imageA, imageB], [drow, dcol])
# distance_loss = distance_loss + distance
# if stitch_status:
# correct_num = correct_num + 1
# else:
# false_image.append(image_name)
# local_end_time = time.time()
# print('The duration time cost is {} s'.format(local_end_time - local_start_time))
# print('Now, the number of false match is {}'.format(len(false_image)))
# time_end = time.time()
# print('The duration time cost is {} s, and the average time cost is {}'.format(time_end - time_start, (time_end - time_start) / len(csv_file)))
# print('The average accuracy is {} %, and the average distance loss is {}'. format(correct_num / len(csv_file) * 100, distance_loss / len(csv_file)))
# # print("False Images: {}".format(false_image))
# f = open('tt.txt', 'w')
# f.write(str(false_image))
# f.close()
input_address = ".\\datasets\\zirconSEMCL\\fromCSV\\images_notFixed"
stitch_mode = 0 # "0" for translational mode and "1" for homography mode
feature = 2 # "0" for "sift" and "1" for "surf" and "2" for "orb"
search_ratio = 0.75 # "0.75" is commonly used
offset_match = 1 # "0" for "mode" and "1" for "ransac"
stitcher = Stitcher(stitch_mode=stitch_mode, feature=feature, search_ratio=search_ratio, offset_match=offset_match)
csv_file = pd.read_csv(input_csv)
distance_loss = 0
correct_num = 0
time_start = time.time()
false_image = []
for idx in range(len(csv_file)):
image_name = csv_file.iloc[idx, 1]
print("the {} th images, Analysising {}".format(idx, image_name))
local_start_time = time.time()
imageA = cv2.imread(input_address + image_name + "\\" + image_name + "_A.jpg")
imageB = cv2.imread(input_address + image_name + "\\" + image_name + "_B.jpg")
drow = csv_file.iloc[idx, 2]
dcol = csv_file.iloc[idx, 3]
stitch_status, distance = stitcher.evaluateByFeatureSearch([imageA, imageB], [drow, dcol])
distance_loss = distance_loss + distance
if stitch_status:
correct_num = correct_num + 1
else:
false_image.append(image_name)
local_end_time = time.time()
print('The duration time cost is {} s'.format(local_end_time - local_start_time))
print('Now, the number of false match is {}'.format(len(false_image)))
time_end = time.time()
print('The duration time cost is {} s, and the average time cost is {}'.format(time_end - time_start, (time_end - time_start) / len(csv_file)))
print('The average accuracy is {} %, and the average distance loss is {}'. format(correct_num / len(csv_file) * 100, distance_loss / len(csv_file)))
# print("False Images: {}".format(false_image))
f = open('tt.txt', 'w')
f.write(str(false_image))
f.close()
|
{"hexsha": "d824da9c8a2577bf4eaf28597d7b6d3eab2a50f0", "size": 10430, "ext": "py", "lang": "Python", "max_stars_repo_path": "trad_stitch.py", "max_stars_repo_name": "MATony/DeepStitch", "max_stars_repo_head_hexsha": "650429daa17964d08a0e0e5e4be1f749bdaac847", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-04T07:52:05.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-04T07:52:05.000Z", "max_issues_repo_path": "trad_stitch.py", "max_issues_repo_name": "Keep-Passion/DeepStitch", "max_issues_repo_head_hexsha": "650429daa17964d08a0e0e5e4be1f749bdaac847", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trad_stitch.py", "max_forks_repo_name": "Keep-Passion/DeepStitch", "max_forks_repo_head_hexsha": "650429daa17964d08a0e0e5e4be1f749bdaac847", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.9043062201, "max_line_length": 161, "alphanum_fraction": 0.5935762224, "include": true, "reason": "import numpy", "num_tokens": 2911}
|
import gym
import numpy as np
from stable_baselines.common.policies import MlpPolicy as common_MlpPolicy
from stable_baselines.ddpg.policies import MlpPolicy as DDPG_MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.ddpg.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise, AdaptiveParamNoiseSpec
from stable_baselines import PPO1, PPO2, DDPG
import multiprocessing as mp
#defining the variables
def ppo1_nmileg_pool(stiffness_value):
RL_method = "PPO1"
experiment_ID = "experiment_4_pool_A/mc_1/"
save_name_extension = RL_method
total_timesteps = 500000
stiffness_value_str = "stiffness_{}".format(stiffness_value)
log_dir = "./logs/{}/{}/{}/".format(experiment_ID, RL_method, stiffness_value_str)
# defining the environments
env = gym.make('TSNMILeg{}-v1'.format(stiffness_value))
#env = gym.wrappers.Monitor(env, "./tmp/gym-results", video_callable=False, force=True)
# defining the initial model
if RL_method == "PPO1":
model = PPO1(common_MlpPolicy, env, verbose=1, tensorboard_log=log_dir)
elif RL_method == "PPO2":
env = DummyVecEnv([lambda: env])
model = PPO2(common_MlpPolicy, env, verbose=1, tensorboard_log=log_dir)
elif RL_method == "DDPG":
env = DummyVecEnv([lambda: env])
n_actions = env.action_space.shape[-1]
param_noise = None
action_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions), sigma=float(0.5)* 5 * np.ones(n_actions))
model = DDPG(DDPG_MlpPolicy, env, verbose=1, param_noise=param_noise, action_noise=action_noise, tensorboard_log=log_dir)
else:
raise ValueError("Invalid RL mode")
# setting the environment on the model
#model.set_env(env)
# training the model
# training the model
model.learn(total_timesteps=total_timesteps)
# saving the trained model
model.save(log_dir+"/model")
return None
pool = mp.Pool(mp.cpu_count())
stiffness_versions = 9
pool.map_async(ppo1_nmileg_pool, [row for row in range(stiffness_versions)])
pool.close()
pool.join()
#import pdb; pdb.set_trace()
|
{"hexsha": "d4cb2bc38e4c038b1e0054623b3f664a28490879", "size": 2010, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiment_4_pool_train.py", "max_stars_repo_name": "marjanin/tendon_stiffness", "max_stars_repo_head_hexsha": "b1dc379b09bbf9c044410a6bc51afbee0cba2e05", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-20T02:04:46.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-20T02:04:46.000Z", "max_issues_repo_path": "experiment_4_pool_train.py", "max_issues_repo_name": "marjanin/tendon_stiffness", "max_issues_repo_head_hexsha": "b1dc379b09bbf9c044410a6bc51afbee0cba2e05", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiment_4_pool_train.py", "max_forks_repo_name": "marjanin/tendon_stiffness", "max_forks_repo_head_hexsha": "b1dc379b09bbf9c044410a6bc51afbee0cba2e05", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-11T11:41:39.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-11T11:41:39.000Z", "avg_line_length": 38.6538461538, "max_line_length": 123, "alphanum_fraction": 0.7810945274, "include": true, "reason": "import numpy", "num_tokens": 561}
|
import os, sys
import numpy as np
import imageio
import json
import random
import time
import torch
import math
import shutil
import pathlib
from tqdm import tqdm, trange
import matplotlib.pyplot as plt
import argparse
import glob
import torch.nn.functional as F
import torchvision
import yaml
#from torch.utils.tensorboard import SummaryWriter
# Import Helper Classes
from estimator_helpers import Estimator
from agent_helpers import Agent
from quad_plot import System
from quad_helpers import vec_to_rot_matrix
from mpc_utils import extra_config_parser, Renderer
from pose_estimate import rot_psi, rot_theta, rot_phi, trans_t
from nerf import (CfgNode, get_embedding_function,
load_blender_data, load_llff_data, models)
DEBUG = False
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
nerf_filter = True
####################### MAIN LOOP ##########################################
def simulate(planner_cfg, agent_cfg, filter_cfg, extra_cfg, model_coarse, model_fine, cfg, encode_position_fn, encode_direction_fn):
'''We've assumed that by calling this function, the NeRF model has already been created (i.e. create_nerf has been called) such that
such that calling render() returns a valid RGB, etc tensor.
How trajectory planning works:
A good initialization for the sequence of poses is returned by running A*. This is only run once! A trajectory loss is computed, consisting of a collision loss
(querying densities from the NeRF from x,y,z points) and a trust region loss. The outputs are a sequence of future rollout poses (where the planner wants the agent to be)
and a control action(s) to update the agent. This algorithm is run MPC style, with the intent that A* yields a good initialization for the trajectory, and subsequent optimizations can just be done by
performing gradient descent on the trajectory loss whilst having good performance.
How state estimation works:
Given an image, gradient descent is performed on the NeRF reconstruction loss, optimizing on the estimated pose in SE(3). The exponential map was used to create SE(3) from se(3) in R6 such that
the transformation is differentiable. Two sampling schemes exist: (1) random sampling of pixels from the full image H x W, or (2) random sampling from a mask around features detected by ORB/SIFT on the
observed image (termed interest region sampling by iNeRF).
How the whole pipeline works:
The objective is to path plan from pose P0 at time t = 0 to PT at time t = T. At time t, the agent runs the trajectory planning algorithm, yielding a control action(s) and future desired poses P{t+1:T}.
The agent takes the control action and also receives an image corresponding to the "real" pose at time t + 1. The state estimator uses P{t+1} as the anchor of the tangential plane and returns P_hat_{t+1} = P @ P{t+1},
where P in SE(3) are the parameters optimized by the state estimator. P_hat_{t+1} is passed to the trajectory planner as the pose estimate.
Args:
'''
start_state = planner_cfg['start_state']
end_state = planner_cfg['end_state']
render_kwargs = {
'embed_fn': encode_position_fn,
'embeddirs_fn': encode_direction_fn,
'chunksize': 1500000,
'model': model_fine
}
if DEBUG == False:
exp_name = planner_cfg['exp_name']
for i in range(100):
renderer = Renderer(render_kwargs)
basefolder = "paths" / pathlib.Path(planner_cfg['exp_name'])
if basefolder.exists():
print(basefolder, "already exists!")
if input("Clear it before continuing? [y/N]:").lower() == "y":
shutil.rmtree(basefolder)
basefolder.mkdir()
(basefolder / "train_poses").mkdir()
(basefolder / "train_graph").mkdir()
(basefolder / "execute_poses").mkdir()
(basefolder / "execute_graph").mkdir()
print("created", basefolder)
traj = System(renderer, start_state, end_state, planner_cfg)
traj.basefolder = basefolder
then = time.time()
traj.a_star_init()
now = time.time()
print('A* takes', now - then)
traj.learn_init()
print('Initial Path takes', time.time() - now)
agent = Agent(start_state, agent_cfg)
filter = Estimator(filter_cfg, agent, start_state)
#inerf_dynamics = Estimator(filter_cfg, agent, start_state, filter=False)
true_states = start_state.cpu().detach().numpy()
#steps = traj.get_actions().shape[0]
'''
agent_file = './paths/agent_data.json'
with open(agent_file,"r") as f:
meta = json.load(f)
true_states = meta["true_states"]
true_states = np.array(true_states)
true_states = true_states[1:]
'''
'''
action_file = './paths/estimator_data.json'
with open(action_file,"r") as f:
data_estimator = json.load(f)
actions = torch.tensor(data_estimator['actions'])
'''
#assert len(true_states) == len(actions)
#steps = actions.shape[0]
###FOR EXPERIMENTS, TAKE THE FIRST 5 STEPS
steps = 10
noise_std = extra_cfg['mpc_noise_std']
noise_mean = extra_cfg['mpc_noise_mean']
for iter in trange(steps):
if iter < steps - 5:
action = traj.get_next_action().clone().detach()
else:
action = traj.get_actions()[iter - steps + 5, :]
#print(traj.get_actions().shape)
#action = actions[iter]
noise = np.random.normal(noise_mean, noise_std)
true_pose, true_state, gt_img = agent.step(action, noise=noise)
true_states = np.vstack((true_states, true_state))
#true_pose, true_state, gt_img = agent.state2image(torch.tensor(true_states[iter]))
#plt.figure()
#plt.imsave('paths/true/'+ f'{iter}_gt_img.png', gt_img)
#plt.close()
#action = torch.tensor(actions[iter])
#measured_state = estimator.optimize(start_state, sig, gt_img, true_pose)
#measured_states.append(measured_state.cpu().detach().numpy().tolist())
torch.cuda.empty_cache()
then = time.time()
state_est = filter.estimate_state(gt_img, true_pose, action,
model_coarse=model_coarse, model_fine=model_fine,cfg=cfg, encode_position_fn=encode_position_fn,
encode_direction_fn=encode_direction_fn)
now = time.time()
print('Estimator takes', now-then)
#state_est_inerf_dyn = inerf_dynamics.estimate_state(gt_img, true_pose, action,
# model_coarse=model_coarse, model_fine=model_fine,cfg=cfg, encode_position_fn=encode_position_fn,
# encode_direction_fn=encode_direction_fn)
then = time.time()
if iter < steps - 5:
traj.update_state(state_est)
traj.learn_update(iter)
now = time.time()
print('Update planner', now - then)
#plot_trajectory(traj.get_full_states(), true_states)
filter.save_data(f'paths/{exp_name}/filter_data_{i}.json')
#inerf_dynamics.save_data(f'paths/{exp_name}/inerf_dyn_data_{i}.json')
agent.save_data(f'paths/{exp_name}/agent_data_{i}.json')
agent.command_sim_reset()
time.sleep(0.1)
return
else:
####################################### DEBUGING ENVIRONMENT ####################################################3
'''
to8b = lambda x : (255*np.clip(x,0,1)).astype(np.uint8)
renderer = Renderer(hwf, K, chunk, render_kwargs_train)
#Read in poses
data_path = './paths/stonehenge_every_grad_step/'
gt_data_path = data_path + 'agent_data.json'
est_data_path = data_path + 'estimator_data.json'
gt_img_path = data_path + 'true/'
with open(gt_data_path,"r") as f:
meta = json.load(f)
true_states = meta["true_states"]
true_states = np.array(true_states)
with open(est_data_path,"r") as f:
data_estimator1 = json.load(f)
pixel_losses1 = []
dyn_losses1 = []
rot_errors1 = np.empty((0, 3))
trans_errors1 = np.empty((0, 3))
state_estimates1 = data_estimator1['state_estimates']
covariances1 = data_estimator1['covariance']
states1 = np.empty((0, 18))
predicted_states1 = data_estimator1['predicted_states']
actions1 = data_estimator1['actions']
for iter, val in enumerate(data_estimator1["iterations"]):
pixel_losses1 += data_estimator1['pixel_losses'][f'{iter}']
dyn_losses1 += data_estimator1['dyn_losses'][f'{iter}']
rot_errors1 = np.vstack((rot_errors1, np.array(data_estimator1['rot_errors'][f'{iter}'])))
trans_errors1 = np.vstack((trans_errors1, np.array(data_estimator1['trans_errors'][f'{iter}'])))
states1 = np.vstack((states1, np.array(data_estimator1['states'][f'{iter}'])))
for it, state_est in enumerate(state_estimates1):
testsavedir = data_path + f'gif_step{it}'
os.makedirs(testsavedir, exist_ok=True)
obs_img = imageio.imread(gt_img_path + f'{it}_gt_img.png')
obs_img = obs_img[..., :3]
obs_img = (np.array(obs_img) / 255.).astype(np.float32)
imgs = []
with torch.no_grad():
for iter, state in enumerate(data_estimator1['states'][f'{it}']):
if iter < 60:
print('Iteration', iter, 'Image Number', it)
state = torch.tensor(state)
pose = state2pose(state)
sim_pose = convert_blender_to_sim_pose(pose.cpu().detach().numpy())
rgb = renderer.get_img_from_pose(torch.tensor(sim_pose))
rgb = rgb.cpu().detach().numpy()
rgb8 = to8b(rgb)
ref = to8b(obs_img)
print(rgb.shape)
print(ref.shape)
filename = os.path.join(testsavedir, str(iter)+'.png')
#dst = cv2.addWeighted(rgb8, 0.7, ref, 0.3, 0)
#imageio.imwrite(filename, dst)
#imgs.append(dst)
imageio.imwrite(filename, rgb8)
imgs.append(rgb8)
#imageio.mimwrite(os.path.join(testsavedir, 'video.gif'), imgs, fps=8) #quality = 8 for mp4 format
'''
pass
return
####################### END OF MAIN LOOP ##########################################
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--config", type=str, required=True, help="Path to (.yml) config file."
)
parser.add_argument(
"--load-checkpoint",
type=str,
default="",
help="Path to load saved checkpoint from.",
)
parser = extra_config_parser(parser)
configargs = parser.parse_args()
# Read config file.
cfg = None
with open(configargs.config, "r") as f:
cfg_dict = yaml.load(f, Loader=yaml.FullLoader)
cfg = CfgNode(cfg_dict)
# # (Optional:) enable this to track autograd issues when debugging
# torch.autograd.set_detect_anomaly(True)
# If a pre-cached dataset is available, skip the dataloader.
USE_CACHED_DATASET = False
train_paths, validation_paths = None, None
images, poses, render_poses, hwf, i_split = None, None, None, None, None
H, W, focal, i_train, i_val, i_test = None, None, None, None, None, None
#TODO: Implement CACHED DATASET!
'''
if hasattr(cfg.dataset, "cachedir") and os.path.exists(cfg.dataset.cachedir):
train_paths = glob.glob(os.path.join(cfg.dataset.cachedir, "train", "*.data"))
validation_paths = glob.glob(
os.path.join(cfg.dataset.cachedir, "val", "*.data")
)
USE_CACHED_DATASET = True
else:
'''
# Load dataset
images, poses, render_poses, hwf = None, None, None, None
if cfg.dataset.type.lower() == "blender":
images, poses, render_poses, hwf, i_split = load_blender_data(
cfg.dataset.basedir,
half_res=cfg.dataset.half_res,
testskip=cfg.dataset.testskip,
)
i_train, i_val, i_test = i_split
H, W, focal = hwf
H, W = int(H), int(W)
hwf = [H, W, focal]
if cfg.nerf.train.white_background:
images = images[..., :3] * images[..., -1:] + (1.0 - images[..., -1:])
elif cfg.dataset.type.lower() == "llff":
images, poses, bds, render_poses, i_test = load_llff_data(
cfg.dataset.basedir, factor=cfg.dataset.downsample_factor
)
hwf = poses[0, :3, -1]
poses = poses[:, :3, :4]
if not isinstance(i_test, list):
i_test = [i_test]
if cfg.dataset.llffhold > 0:
i_test = np.arange(images.shape[0])[:: cfg.dataset.llffhold]
i_val = i_test
i_train = np.array(
[
i
for i in np.arange(images.shape[0])
if (i not in i_test and i not in i_val)
]
)
H, W, focal = hwf
H, W = int(H), int(W)
hwf = [H, W, focal]
images = torch.from_numpy(images)
poses = torch.from_numpy(poses)
# Seed experiment for repeatability
seed = cfg.experiment.randomseed
np.random.seed(seed)
torch.manual_seed(seed)
# Device on which to run.
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
encode_position_fn = get_embedding_function(
num_encoding_functions=cfg.models.coarse.num_encoding_fn_xyz,
include_input=cfg.models.coarse.include_input_xyz,
log_sampling=cfg.models.coarse.log_sampling_xyz,
)
encode_direction_fn = None
if cfg.models.coarse.use_viewdirs:
encode_direction_fn = get_embedding_function(
num_encoding_functions=cfg.models.coarse.num_encoding_fn_dir,
include_input=cfg.models.coarse.include_input_dir,
log_sampling=cfg.models.coarse.log_sampling_dir,
)
# Initialize a coarse-resolution model.
model_coarse = getattr(models, cfg.models.coarse.type)(
num_encoding_fn_xyz=cfg.models.coarse.num_encoding_fn_xyz,
num_encoding_fn_dir=cfg.models.coarse.num_encoding_fn_dir,
include_input_xyz=cfg.models.coarse.include_input_xyz,
include_input_dir=cfg.models.coarse.include_input_dir,
use_viewdirs=cfg.models.coarse.use_viewdirs,
)
model_coarse.to(device)
# If a fine-resolution model is specified, initialize it.
model_fine = None
if hasattr(cfg.models, "fine"):
model_fine = getattr(models, cfg.models.fine.type)(
num_encoding_fn_xyz=cfg.models.fine.num_encoding_fn_xyz,
num_encoding_fn_dir=cfg.models.fine.num_encoding_fn_dir,
include_input_xyz=cfg.models.fine.include_input_xyz,
include_input_dir=cfg.models.fine.include_input_dir,
use_viewdirs=cfg.models.fine.use_viewdirs,
)
model_fine.to(device)
'''
# Initialize optimizer.
trainable_parameters = list(model_coarse.parameters())
if model_fine is not None:
trainable_parameters += list(model_fine.parameters())
optimizer = getattr(torch.optim, cfg.optimizer.type)(
trainable_parameters, lr=cfg.optimizer.lr
)
'''
# Load an existing checkpoint, if a path is specified.
if os.path.exists(configargs.load_checkpoint):
checkpoint = torch.load(configargs.load_checkpoint)
model_coarse.load_state_dict(checkpoint["model_coarse_state_dict"])
if checkpoint["model_fine_state_dict"]:
model_fine.load_state_dict(checkpoint["model_fine_state_dict"])
start_iter = checkpoint["iter"]
torch.set_default_tensor_type('torch.cuda.FloatTensor')
torch.cuda.empty_cache()
### PLANNER CONFIG DETAILS
# renderer = get_nerf('configs/stonehenge.txt')
# stonehenge - simple
#start_pos = torch.tensor([-0.9,-0.9, 0.])
#start_pos = torch.tensor([0.9,-0.2, 0.2])
#start_pos = torch.tensor([-0.31,-0.9, 0.])
#end_pos = torch.tensor([-0.2,0.55, 0.3])
#end_pos = torch.tensor([-0.3,0.5, 0.4])
#end_pos = torch.tensor([-0.55, 0.6, 0.4])
# start_pos = torch.tensor([-1, 0, 0.2])
# end_pos = torch.tensor([ 1, 0, 0.5])
#playground
#start_pos = torch.tensor([0.7,-0.2, 0.4])
#end_pos = torch.tensor([-0.35,0.55, 0.4])
#stonehenge
#start_pos = [0.39, -0.67, 0.2]
#end_pos = [-0.4, 0.55, 0.16]
#CHURCH
#start_pos = torch.tensor([-1.1,-0.8, 0.6])
#end_pos = torch.tensor([-1.64,-0.73, 0.59])
#Violin
#start_pos = torch.tensor([-0.9,-0.9, 0.2])
#end_pos = torch.tensor([0.4,0.75, 0.15])
#Kings Hall
#start_pos = torch.tensor([-0.12,-0.65, -0.24])
#end_pos = torch.tensor([-.1,0.33, -0.25])
#start_R = vec_to_rot_matrix( torch.tensor([0.0,0.0, .3]))
#end_R = vec_to_rot_matrix(torch.tensor([0.,0.0, 0.]))
start_pos = torch.tensor(cfg_dict['start_pos']).float()
end_pos = torch.tensor(cfg_dict['end_pos']).float()
start_R = vec_to_rot_matrix( torch.tensor(cfg_dict['start_R']))
end_R = vec_to_rot_matrix(torch.tensor(cfg_dict['end_R']))
### ASSUME ZERO INITIAL RATES
init_rates = torch.zeros(3)
start_state = torch.cat( [start_pos, init_rates, start_R.reshape(-1), init_rates], dim=0 )
end_state = torch.cat( [end_pos, init_rates, end_R.reshape(-1), init_rates], dim=0 )
### PLANNER CONFIGS
planner_cfg = {"T_final": cfg_dict['T_final'],
"steps": cfg_dict['steps'],
"lr": cfg_dict['planner_lr'],
"epochs_init": cfg_dict['epochs_init'],
"fade_out_epoch": cfg_dict['fade_out_epoch'],
"fade_out_sharpness": cfg_dict['fade_out_sharpness'],
"epochs_update": cfg_dict['epochs_update'],
'start_state': start_state.to(device),
'end_state': end_state.to(device),
'exp_name': cfg.experiment.id
}
### AGENT CONFIGS
agent_cfg = {'dt': planner_cfg["T_final"]/planner_cfg["steps"],
'mass': cfg_dict['mass'],
'g': cfg_dict['g'],
'I': torch.tensor(cfg_dict['I']).float().to(device),
'path': cfg_dict['path'],
'half_res': cfg.dataset.half_res,
'white_bg': cfg.nerf.train.white_background}
### FILTER CONFIGS
filter_cfg = {
'dil_iter': cfg_dict['dil_iter'],
'batch_size': cfg_dict['batch_size'],
'kernel_size': cfg_dict['kernel_size'],
'lrate': cfg_dict['lrate_relative_pose_estimation'],
'sampling_strategy': cfg_dict['sampling_strategy'],
'reject_thresh': cfg_dict['reject_thresh'],
'N_iter': cfg_dict['N_iter'],
'sig0': torch.tensor(cfg_dict['sig0']).float().to(device),
'Q': torch.tensor(cfg_dict['Q']).float().to(device),
'R': torch.tensor(cfg_dict['R']).float().to(device),
'H': H,
'W': W,
'focal': focal
}
### EXTRA CONFIGS
extra_cfg = {
'mpc_noise_std': np.array([float(i) for i in cfg_dict['mpc_noise_std']]),
'mpc_noise_mean': np.array([float(i) for i in cfg_dict['mpc_noise_mean']])
}
simulate(planner_cfg, agent_cfg, filter_cfg, extra_cfg, model_coarse, model_fine, cfg, encode_position_fn, encode_direction_fn)
|
{"hexsha": "0c11b4f56b548054401887288aed44d663ff5744", "size": 20261, "ext": "py", "lang": "Python", "max_stars_repo_path": "simulate.py", "max_stars_repo_name": "chengine/nerf-pytorch-MPC", "max_stars_repo_head_hexsha": "1844b4f70ce3680a923784816605831abb74bd4a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-07T07:59:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T09:01:12.000Z", "max_issues_repo_path": "simulate.py", "max_issues_repo_name": "chengine/nerf-pytorch-MPC", "max_issues_repo_head_hexsha": "1844b4f70ce3680a923784816605831abb74bd4a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "simulate.py", "max_forks_repo_name": "chengine/nerf-pytorch-MPC", "max_forks_repo_head_hexsha": "1844b4f70ce3680a923784816605831abb74bd4a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9625246548, "max_line_length": 221, "alphanum_fraction": 0.6028823849, "include": true, "reason": "import numpy", "num_tokens": 4829}
|
"""
Utilities for matplotlib plotting.
"""
from builtins import range
import matplotlib.pyplot as mpl
from . import dictionary, funcargparse
from ..dataproc import waveforms
import numpy as np
class IRecurrentPlot(object):
"""
Recurrent plot.
Can be used to plot multiple similar datasets in the same plot.
First ploting call creates figure and axes (calling :meth:`plot_prepare` method);
all consecutive calls only change the data (calling :meth:`plot_next` method).
This way the figure is preserved between the plotting calls, which decreases resource consumption and minimizes matplotlib memory leaks.
Args:
fig (matplotlib.figure.Figure): If not ``None``, the figure to use for plotting.
auto_clear (bool): If ``True``, clear plot (empty data) before each subsequent plotting.
auto_relim (bool): If ``True``, rescale plot after each plotting.
auto_layout (bool): If ``True``, call `tight_layout` after each plotting.
"""
def __init__(self, fig=None, auto_clear=True, auto_relim=True, auto_layout=True):
object.__init__(self)
self.fig=fig
self.prepared=False
self.auto_relim=auto_relim
self.auto_clear=auto_clear
self.auto_layout=auto_layout
self.lines=dictionary.Dictionary()
def __setitem__(self, name, line):
if isinstance(line,list) and len(line)==1:
line=line[0]
self.lines[name]=line
def __getitem__(self, name):
return self.lines[name]
def plot_prepare(self, *args, **vargs):
"""
Prepare plot.
Abstract method, has to be overloaded in subclasses.
Called once before the first plotting happens.
"""
raise NotImplementedError("IRecurrentPlot.plot_prepare")
def plot_clear(self):
"""Clear the ploted data."""
for p in self.lines.iternodes():
try:
p.set_data([],[])
except AttributeError:
pass
return self
def plot_next(self, *args, **kwargs):
"""
Plot data.
Abstract method, has to be overloaded in subclasses.
Called every time the data is updated.
"""
raise NotImplementedError("IRecurrentPlot.plot_next")
def setup_figure(self):
"""
Create a figure if it hasn't been created already.
"""
if self.fig is None:
self.fig=mpl.figure()
def setup_prepare(self, *args, **kwargs):
"""
Prepare the plot if it hasn't been prepared already.
"""
if not self.prepared:
self.setup_figure()
self.plot_prepare(*args,**kwargs)
self.prepared=True
def plot(self, *args, **kwargs):
"""
Plot the data.
The supplied arguments are redirected to the overloaded methods :meth:`plot_prepare` and :meth:`plot_next`.
"""
self.setup_prepare(*args,**kwargs)
if self.auto_clear:
self.plot_clear()
self.plot_next(*args,**kwargs)
if self.auto_relim:
for plt in self.fig.axes:
plt.relim()
plt.autoscale_view()
if self.auto_layout:
self.fig.tight_layout()
return self
def savefig(self, path, *args, **kwargs):
"""
Save the figure to the location defined by `path`.
Arguments are passed to :meth:`matplotlib.figure.Figure.savefig`.
"""
if self.fig is not None:
self.fig.savefig(path,*args,**kwargs)
return self
def close(self):
"""Clear and close the figure."""
if self.fig is not None:
for plt in self.fig.axes:
plt.cla()
self.fig.clf()
mpl.close(self.fig)
self.fig=None
self.prepared=False
class GenericPlotter(IRecurrentPlot):
"""
Generic multi-axes plotter.
Args:
axes_num (int): Number of axes in the figure (all are aligned vertically).
plots_num: Number of plot lines;
can be either an integer (all plots have the same number of lines) or an integer list of length `axes_num`.
axes_names ([str]): Names of axes for referencing in plotting (ordered integers by default).
log_x/log_y: Use log scale for x/y axes;
can be either a single bool (all plots have the same scale) or list of a bool list of length `axes_num`.
xlabel/ylabel: Labels for for x/y axes;
can be either a single string (all plots have the same axes labels) or a string list with length `axes_num`.
legend ([str]): Plot legends
xscale/yscale: Scales for x/y axes (supplied data is multiplied by these scales before plotting);
can be either a single float (all axes have the same scale)
or list of floats with length `axes_num` (all plots in the same axes have the same scale)
or list of lists of floats (specifies scale for each line in each plot separately)
fig (matplotlib.figure.Figure): If not ``None``, the figure to use for plotting.
"""
def __init__(self, axes_num=1, plots_num=1, axes_names=None, log_x=False, log_y=False, xlabel="", ylabel="", legend=None, xscale=1., yscale=1., fig=None):
IRecurrentPlot.__init__(self,fig=fig)
self.axes_num=axes_num
self.axes_names=axes_names or list(range(axes_num))
self.plots_num=funcargparse.as_sequence(plots_num,axes_num)
#plot_names=funcargparse.as_sequence(plot_names,axes_num)
#self.plot_names=[funcargparse.as_sequence(pns,pn) if pn else range(pn) for pn,xs in zip(self.plots_num,plot_names)]
self.plot_names=[list(range(pn)) for pn in self.plots_num]
self.log_x=funcargparse.as_sequence(log_x,axes_num)
self.log_y=funcargparse.as_sequence(log_y,axes_num)
self.xlabel=funcargparse.as_sequence(xlabel,axes_num)
self.ylabel=funcargparse.as_sequence(ylabel,axes_num)
xscale=funcargparse.as_sequence(xscale,axes_num)
yscale=funcargparse.as_sequence(yscale,axes_num)
self.xscale=[funcargparse.as_sequence(xs,pn) for pn,xs in zip(self.plots_num,xscale)]
self.yscale=[funcargparse.as_sequence(ys,pn) for pn,ys in zip(self.plots_num,yscale)]
#legend=funcargparse.as_sequence(legend,axes_num)
#self.legend=[funcargparse.as_sequence(l,pn,allowed_type="builtin;nostring") for pn,l in zip(self.plots_num,legend)]
self.legend=funcargparse.as_sequence(legend,axes_num)
def plot_prepare(self, *args, **kwargs):
"""
Prepare the plot if it hasn't been prepared already.
"""
for a in range(self.axes_num):
an=self.axes_names[a]
ax=self.fig.add_subplot(self.axes_num,1,a+1)
for pn in self.plot_names[a]:
self[an,pn]=ax.plot([],[])
if self.log_x[a]:
ax.set_xscale("log")
if self.log_y[a]:
ax.set_yscale("log")
ax.set_xlabel(self.xlabel[a])
ax.set_ylabel(self.ylabel[a])
ax.grid(which="both")
def plot_next(self, data, legend=None):
"""
Plot data.
Data is a list of lists ``[axes_num][plot_num]`` of 1D or 2D 2-columns array.
"""
data=funcargparse.as_sequence(data,self.axes_num,allowed_type="builtin;nostring")
legend=funcargparse.as_sequence(legend,self.axes_num,allowed_type="builtin;nostring") if (legend is not None) else self.legend
for a in range(self.axes_num):
an=self.axes_names[a]
ad=funcargparse.as_sequence(data[a],self.plots_num[a],allowed_type="builtin;nostring")
l=legend[a]
for p,pn in enumerate(self.plot_names[a]):
d=np.asarray(ad[p])
xs,ys=self.xscale[a][p],self.yscale[a][p]
if np.ndim(d)==1:
self[an,pn].set_data(np.arange(len(d))*xs,d*ys)
else:
self[an,pn].set_data(d[:,0]*xs,d[:,1]*ys)
if l is not None:
self.fig.axes[a].legend(l)
else:
self.fig.axes[a].legend([]).set_visible(False)
def add_all_subplots(fig, r, c=None, *args, **vargs):
if c is None:
r,c=r//10,r%10
for n in range(1,r*c+1):
fig.add_subplot(r,c,n,*args,**vargs)
return fig.axes[-r*c:]
def iterlabels(obj, include=["axes","ticks","title"]):
"""
Iterate over text labels in `obj`.
Args:
obj: can be a single ``Axes`` or a ``Figure`` (in which case iteration goes over all contained axes).
include ([str]): determines which kind of labels are iterated over. Can contain ``"axes"`` (axes label), ``"ticks"`` (axes tick labels) and ``"title"`` (plot title).
"""
if isinstance(obj,mpl.Figure):
for ax in obj.axes:
for lab in iterlabels(ax, include=include):
yield lab
return
if "axes" in include:
yield obj.xaxis.label
yield obj.yaxis.label
if "ticks" in include:
for lab in obj.get_xticklabels()+obj.get_yticklabels():
yield lab
if "title" in include:
yield obj.title
def plot_func(func, rng, *args, **kwargs):
"""
Plot a callable function over a given range.
`rng` is a tuple `(start, stop, points_number)` passed to :func:`numpy.linspace` to generate plot points.
The rest of the arguments is the same as in :func:`matplotlib.pyplot.plot`.
"""
xs=np.linspace(*rng)
mpl.plot(xs,func(xs),*args,**kwargs)
def plot_columns(data, x_column, y_column, *args, **kwargs):
"""
Plot two data columns vs each other.
"""
xs=waveforms.get_x_column(data,x_column)
ys=waveforms.get_y_column(data,y_column)
mpl.plot(xs,ys,*args,**kwargs)
|
{"hexsha": "f8cf0778600a75af6be6fc78424367c9c716235e", "size": 10058, "ext": "py", "lang": "Python", "max_stars_repo_path": "pylablib/core/utils/plotting.py", "max_stars_repo_name": "AlexShkarin/pyLabLib-v0", "max_stars_repo_head_hexsha": "1c3c59d4bcbea4a16eee916033972ee13a7d1af6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-03-06T08:31:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T15:02:06.000Z", "max_issues_repo_path": "pylablib/core/utils/plotting.py", "max_issues_repo_name": "AlexShkarin/pyLabLib-v0", "max_issues_repo_head_hexsha": "1c3c59d4bcbea4a16eee916033972ee13a7d1af6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-09-10T17:25:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-07T20:49:22.000Z", "max_forks_repo_path": "pylablib/core/utils/plotting.py", "max_forks_repo_name": "AlexShkarin/pyLabLib-v0", "max_forks_repo_head_hexsha": "1c3c59d4bcbea4a16eee916033972ee13a7d1af6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-08-16T09:02:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T10:58:53.000Z", "avg_line_length": 39.4431372549, "max_line_length": 173, "alphanum_fraction": 0.6089679857, "include": true, "reason": "import numpy", "num_tokens": 2328}
|
import numpy as np
import glob
import shutil
import os
import cv2
from PIL import Image, ImageOps
from matplotlib import pyplot as plt
clothes_dir = '/home/ssai1/dhgwag/VITON/VITON-HD/datasets/train/cloth'
clothes_mask_dir = '/home/ssai1/dhgwag/VITON/VITON-HD/datasets/train/cloth-mask'
image_dir = '/home/ssai1/dhgwag/VITON/VITON-HD/datasets/train/image'
image_parse_dir = '/home/ssai1/dhgwag/VITON/VITON-HD/datasets/train/image-parse'
result_dir = '/home/ssai1/yjcho/blackened_datasets'
def load_one_image(image_path):
img = Image.open(image_path).convert('RGB')
# img.save('img_test.jpg', format='jpeg')
np_img = np.array(img)
return np_img
def load_one_image_parse(image_parse_path):
# img_parse = Image.open(image_parse_path).convert('RGB')
img_parse = Image.open(image_parse_path)
# img_parse.save('img_parse_test.png', format='png')
np_img_parse = np.array(img_parse)
return np_img_parse
def get_parse_clothes(img_parse):
"""
img_parse: numpy array
"""
# print(np.unique(img_parse))
parse_upper = ((img_parse == 5).astype(np.float32) +
(img_parse == 6).astype(np.float32) +
(img_parse == 7).astype(np.float32))
# print("parse_cloth's elements:", np.unique(parse_upper))
return parse_upper
def parse2mask(parse):
"""
parse: NUMPY ARRAY upper clothes
"""
upper_mask = parse[np.where(parse > 0.0)] = 1.0
def clothes_darkenizer(img, mask):
# print("mask", mask.shape)
np_clothes = np.copy(img)
# print(type(np_clothes), np_clothes.shape)
np_clothes[np.where(mask == 0.0)] = 0.0 # only clothes will survive
Image.fromarray(np.uint8(np_clothes)).save('np_clothes.jpg')
PIL_clothes = Image.fromarray(np.uint8(np_clothes)).convert('RGB')
PIL_clothes.save('PIL_clothes.jpg')
PIL_gray_clothes = ImageOps.grayscale(PIL_clothes)
PIL_gray_clothes.save('gray_PIL.jpg')
np_gray_clothes = np.array(PIL_gray_clothes)
# stack three times
np_gray_clothes = np.stack([np_gray_clothes,np_gray_clothes,np_gray_clothes], axis=-1)
return np_gray_clothes
def merge_images(img1, img2, img2_mask):
"""
img1: main image
img2: sub image
img2_mask
"""
result = np.copy(img1)
result[np.where(img2_mask != 0)] = img2[np.where(img2_mask != 0)]
return result
def main():
shutil.rmtree(result_dir) if os.path.exists(result_dir) else None
os.mkdir(result_dir) if not os.path.exists(result_dir) else None
result_cloth_dir = os.path.join(result_dir, 'cloth')
result_cloth_mask_dir = os.path.join(result_dir, 'cloth-mask')
result_image_dir = os.path.join(result_dir, 'image')
result_image_parse_dir = os.path.join(result_dir, 'image-parse')
os.mkdir(result_cloth_dir)
os.mkdir(result_cloth_mask_dir)
os.mkdir(result_image_dir)
os.mkdir(result_image_parse_dir)
# human image processing
for img_path in glob.glob(os.path.join(image_dir, '*.jpg')):
img_parse_path = os.path.join(image_parse_dir, os.path.basename(img_path)).replace('.jpg', '.png')
img = load_one_image(img_path)
img_parse = load_one_image_parse(img_parse_path)
parse_upper = get_parse_clothes(img_parse)
np_gray_clothes = clothes_darkenizer(img, parse_upper)
result_img = merge_images(img, np_gray_clothes, parse_upper)
PIL_result_img = Image.fromarray(result_img)
PIL_result_img.save(os.path.join(result_image_dir, os.path.basename(img_path)))
Image.fromarray(img_parse).save(os.path.join(result_image_parse_dir, os.path.basename(img_parse_path)))
# plt.imshow(np.array(result_img))
# plt.show()
# clothes image processing
for clothes_path in glob.glob(os.path.join(clothes_dir, '*.jpg')):
clothes_mask_path = os.path.join(clothes_mask_dir, os.path.basename(clothes_path))
clothes = load_one_image(clothes_path)
clothes_mask = load_one_image(clothes_mask_path)
np_gray_clothes = clothes_darkenizer(clothes, clothes_mask)
result_img = merge_images(clothes, np_gray_clothes, clothes_mask)
PIL_result_img = Image.fromarray(result_img)
PIL_result_img.save(os.path.join(result_cloth_dir, os.path.basename(clothes_path)))
Image.fromarray(clothes_mask).save(os.path.join(result_cloth_mask_dir, os.path.basename(clothes_mask_path)))
# plt.imshow(np.array(result_img))
# plt.show()
if __name__ == '__main__':
main()
|
{"hexsha": "86c60ddf6f3d06b937150afb93d5a8fd13d31287", "size": 4645, "ext": "py", "lang": "Python", "max_stars_repo_path": "Scripts/clothes_blackenizer.py", "max_stars_repo_name": "choyoungjung/xray-align-AR", "max_stars_repo_head_hexsha": "18847c01008fe5a53bbdea5915a1a4e84e7c7f22", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Scripts/clothes_blackenizer.py", "max_issues_repo_name": "choyoungjung/xray-align-AR", "max_issues_repo_head_hexsha": "18847c01008fe5a53bbdea5915a1a4e84e7c7f22", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Scripts/clothes_blackenizer.py", "max_forks_repo_name": "choyoungjung/xray-align-AR", "max_forks_repo_head_hexsha": "18847c01008fe5a53bbdea5915a1a4e84e7c7f22", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6594202899, "max_line_length": 117, "alphanum_fraction": 0.6846071044, "include": true, "reason": "import numpy", "num_tokens": 1188}
|
[STATEMENT]
lemma lookup_combine [simp]:
"lookup (combine f t1 t2) k = combine_options f (lookup t1 k) (lookup t2 k)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lookup (RBT.combine f t1 t2) k = combine_options f (lookup t1 k) (lookup t2 k)
[PROOF STEP]
by (simp add: combine_altdef)
|
{"llama_tokens": 119, "file": null, "length": 1}
|
import pandas as pd
import numpy as np
from sklearn import preprocessing
import pygeohash as pgh
from copy import deepcopy
from sklearn.decomposition import PCA
import pygeohash as pgh
class Feature_Engineering:
def __init__(self):
self.features = []
def extract_dt_time(self, data):
data['Hour'] = data.Dates.dt.hour
data['Year'] = data.Dates.dt.year
data['Month'] = data.Dates.dt.month
data['Minute'] = data.Dates.dt.minute - 30
def onehot(self, data, columns, add_feature = True):
for col in columns:
onehot = pd.get_dummies(data[col])
onehot.columns = [col + '_' + str(x) for x in onehot.columns]
data = pd.concat([data, onehot], axis = 1)
if(add_feature):
self.features += onehot.columns.tolist()
return data
def add_feature(self, columns):
self.features += columns
def add_seasons(self, data, add_feature = True):
data['Summer'] = data['Month'].apply(lambda x: 1 if x in [6, 7, 8] else 0)
data['Winter'] = data['Month'].apply(lambda x: 1 if x in [12, 1, 2] else 0)
data['Autumn'] = data['Month'].apply(lambda x: 1 if x in [9, 10, 11] else 0)
data['Spring'] = data['Month'].apply(lambda x: 1 if x in [3, 4, 5] else 0)
if(add_feature):
self.features += ['Summer', 'Winter', 'Autumn', 'Spring']
def add_crossing(self, data, add_feature = True):
data['crossing'] = data['Address'].apply(lambda x: 1 if (x.find('/') != -1) else 0)
if(add_feature):
self.features += ['crossing']
def Hour_bins(self, data, nbins = 4, add_feature = True):
if(nbins == 2):
data['morning'] = data['Hour'].apply(lambda x: 1 if x in [1, 2, 3, 4, 5, 6,7, 8, 9, 10, 11] else 0)
data['night'] = data['Hour'].apply(lambda x: 1 if x in [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 0] else 0)
if(add_feature):
self.features += ['morning', 'night']
elif(nbins == 3):
data['night'] = data['Hour'].apply(lambda x: 1 if x in [1, 2, 3, 4, 5, 6,7] else 0)
data['morning'] = data['Hour'].apply(lambda x: 1 if x in [8, 9, 10, 11] else 0)
data['evening'] = data['Hour'].apply(lambda x: 1 if x in [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 0] else 0)
if(add_feature):
self.features += ['morning', 'night', 'evening']
elif(nbins == 4):
data['morning'] = data['Hour'].apply(lambda x: 1 if x in [7, 8, 9, 10, 11] else 0)
data['evening'] = data['Hour'].apply(lambda x: 1 if x in [17, 18, 19, 20, 21, 22] else 0)
data['night'] = data['Hour'].apply(lambda x: 1 if x in [23, 0, 1, 2, 3, 4, 5, 6] else 0)
data['afternoon'] = data['Hour'].apply(lambda x: 1 if x in [12, 13, 14, 15, 16] else 0)
if(add_feature):
self.features += ['morning', 'night', 'evening', 'afternoon']
def geohashing(self, train, test, precision = 3, pivot_col = 'Resolution', add_feature = True, is_onehot = True):
train['geohashes'] = train.apply(lambda x: pgh.encode(x.X, x.Y, precision = precision), axis = 1)
test['geohashes'] = test.apply(lambda x: pgh.encode(x.X, x.Y, precision = precision), axis = 1)
# if(add_feature):
# self.features += geo1.unique().tolist()
# for i in np.asarray(geo1.unique()):
# flag = 0
# for j in np.asarray(geo2.unique()):
# if(i == j):
# flag = 1
# if(flag == 0):
# test[i] = test[pivot_col].apply(lambda x: 0)
# if(add_feature):
# self.features += [j]
# for i in np.asarray(geo2.unique()):
# flag = 0
# for j in np.asarray(geo1.unique()):
# if(i == j):
# flag = 1
# if(flag == 0):
# train[i] = train[pivot_col].apply(lambda x: 0)
# if(add_feature):
# self.features += [j]
return train, test
def X_Y_rot(self, data, add_feature = True):
data[['new_X', 'new_Y']] = data[['X', 'Y']]
sc = preprocessing.StandardScaler()
data[['new_X', 'new_Y']] = sc.fit_transform(data[['X', 'Y']])
data["rot45_X"], data["rot45_Y"] = .707 * data["new_Y"] + .707 * data["new_X"], .707 * data["new_Y"] - .707 * data["new_X"]
data["rot30_X"], data["rot30_Y"] = (1.732/2) * data["new_X"] + (1./2) * data["new_Y"], (1.732/2) * data["new_Y"] - (1./2) * data["new_X"]
data["rot60_X"], data["rot60_Y"] = (1./2) * data["new_X"] + (1.732/2) * data["new_Y"], (1./2)* data["new_Y"] - (1.732/2) * data["new_X"]
data["radial_r"] = np.sqrt( np.power(data["new_Y"], 2) + np.power(data["new_X"], 2))
if(add_feature):
self.features += ['rot60_X', 'rot60_Y', 'rot30_X', 'rot30_Y', 'rot45_X', 'rot45_Y', 'radial_r']
def odds_base_target(self, train, test, base, target, col_name_prefix, add_base_odds = False, add_feature = True): #odds of target given base
bas_sort = sorted(train[base].unique())
tar_sort = sorted(train[target].unique())
tar_counts = train.groupby([target]).size()
bas_tar_counts = train.groupby([base, target]).size()
bas_counts = train.groupby([base]).size()
logodds = {}
logoddsPA = {}
MIN_CAT_COUNTS = 2
tar_logodds = np.log(tar_counts / len(train)) - np.log(1.0 - tar_counts / float(len(train)))
for bas in bas_sort:
PA = bas_counts[bas] / float(len(train))
logoddsPA[bas] = np.log(PA) - np.log(1.- PA)
logodds[bas] = deepcopy(tar_logodds)
for tar in bas_tar_counts[bas].keys():
if (bas_tar_counts[bas][tar] > MIN_CAT_COUNTS) and bas_tar_counts[bas][tar] < bas_counts[bas]:
PA = bas_tar_counts[bas][tar] / float(bas_counts[bas])
logodds[bas][tar_sort.index(tar)] = np.log(PA) - np.log(1.0 - PA)
logodds[bas] = pd.Series(logodds[bas])
logodds[bas].index = range(len(tar_sort))
bas_features = train[base].apply(lambda x: logodds[x])
bas_features.columns = [col_name_prefix + "_odds" + str(x) for x in range(len(bas_features.columns))]
train = pd.concat([train, bas_features], axis = 1)
if(add_base_odds):
train[base + '_odds'] = train[base].apply(lambda x: logoddsPA[x])
if(add_feature):
self.features += bas_features.columns.tolist()
new_bas_sort = sorted(test[base].unique())
new_bas_counts = test.groupby(base).size()
only_new = set(new_bas_sort + bas_sort) - set(bas_sort)
only_old = set(new_bas_sort + bas_sort) - set(new_bas_sort)
in_both = set(new_bas_sort).intersection(bas_sort)
for bas in only_new:
PA = new_bas_counts[bas] / float(len(test) + len(train))
logoddsPA[bas] = np.log(PA) - np.log(1.- PA)
logodds[bas] = deepcopy(tar_logodds)
logodds[bas].index = range(len(tar_sort))
for bas in in_both:
PA = (bas_counts[bas] + new_bas_counts[bas]) / float(len(test) + len(train))
logoddsPA[bas] = np.log(PA) - np.log(1.- PA)
bas_features_te = test[base].apply(lambda x: logodds[x])
bas_features_te.columns = [col_name_prefix + "_odds" + str(x) for x in range(len(bas_features_te.columns))]
test = pd.concat([test, bas_features_te], axis = 1)
if(add_base_odds):
test[base + '_odds'] = test[base].apply(lambda x: logoddsPA[x])
return train, test
def bc_wc_oc(self, data, add_feature = False):
white_crime = ["FRAUD", "FORGERY/COUNTERFEITING", "BAD CHECKS" , "EXTORTION", "EMBEZZLEMENT", "SUSPICIOUS OCC", "BRIBERY", "GAMBLING"]
blue_crime = ["VANDALISM", "LARCENY/THEFT", "STOLEN PROPERTY", "ROBBERY", "DRIVING UNDER THE INFLUENCE", "DISORDERLY CONDUCT", "LIQUOR LAWS", "VEHICLE THEFT", "ASSAULT", "KIDNAPPING", "TRESPASS", "ARSON", "RECOVERED VEHICLE", "SEX OFFENSES FORCIBLE","WEAPON LAWS", "DRUG/NARCOTIC", "FAMILY OFFENSES", "BURGLARY"]
other_crime = ["MISSING PERSON", "RUNAWAY", 'PROSTITUTION', "DRUNKENNESS", "SUICIDE", "LOITERING", "OTHER OFFENSES", "NON-CRIMINAL", "WARRANTS", "SECONDARY CODES"]
data['bc_wc_oc'] = data['Category'].apply(lambda x: 'b' if x in blue_crime else ('w' if x in white_crime else ('o' if x in other_crime else 'error')))
if 'error' in data.train.bc_wc_oc.unique():
print('all categories not found')
if(add_feature):
self.features += ['bc_wc_oc']
def apply_PCA(self, data, n_components):
pca = PCA(n_components = n_components)
pca.fit(data)
new_data = pca.transform(data)
return new_data
def add_cols(self, data, col1, col2, col_type, new_col_name):
data[new_col_name] = data[col1].astype(col_type) + data[col2].astype(col_type)
def correlated_columns(self, data, lim):
corr = data.corr()
columns = np.full((corr.shape[0],), True, dtype=bool)
for i in range(corr.shape[0]):
for j in range(i+1, corr.shape[0]):
if corr.iloc[i,j] >= lim or corr.iloc[i,j] <= -lim:
if columns[j]:
columns[j] = False
return columns
|
{"hexsha": "414331123a6a2b06de2c660296f998af8dd0a995", "size": 9490, "ext": "py", "lang": "Python", "max_stars_repo_path": "Feature_Engineering.py", "max_stars_repo_name": "tanishq1g/San_Francisco_Crime_Classification__kaggle", "max_stars_repo_head_hexsha": "68f46c9b647c366a5bfe0701959bac5faab3cf47", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Feature_Engineering.py", "max_issues_repo_name": "tanishq1g/San_Francisco_Crime_Classification__kaggle", "max_issues_repo_head_hexsha": "68f46c9b647c366a5bfe0701959bac5faab3cf47", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Feature_Engineering.py", "max_forks_repo_name": "tanishq1g/San_Francisco_Crime_Classification__kaggle", "max_forks_repo_head_hexsha": "68f46c9b647c366a5bfe0701959bac5faab3cf47", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.7222222222, "max_line_length": 321, "alphanum_fraction": 0.5640674394, "include": true, "reason": "import numpy", "num_tokens": 2861}
|
# -*- coding: utf-8 -*-
"""
analyze and plot results of experiments
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sb
import yaml
#E2: How large can I make my output domain without loosing skill?
E2_results = pd.read_csv('param_optimization/E2_results_t2m_34_t2m.csv',sep =';')
#E1:
E1_results = pd.read_csv('param_optimization/E1_results_t2m_34_t2m.csv',sep =';')
#E1 label smoothing
E1_smooth_results = pd.read_csv('param_optimization/E1_label_smoothing_results_t2m_34_t2m_14_10_2021.csv',sep =';')
#E1 refined
E1_ref_results= pd.read_csv('param_optimization/E1_refined_results_t2m_34_t2m_ls0.4.csv',sep =';')
E1_ref_add = pd.read_csv('param_optimization/E1_label_smoothing_results_t2m_34_t2m_14_10_2021.csv',sep =';')
E1_ref_add = E1_ref_add.where(E1_ref_add.label_smoothing == 0.4).dropna()
E1_ref_results = pd.concat([E1_ref_results, E1_ref_add])
E1_ref_results.reset_index(inplace = True)
E1_ref_results_06 = pd.read_csv('param_optimization/E1_refined_results_t2m_34_t2m_0.6.csv',sep =';')
E1_ref_results_06 = pd.concat([E1_ref_results_06, E1_ref_add])
E1_ref_results_06.reset_index(inplace = True)
#E4
E4_results = pd.read_csv('param_optimization/E4_results_t2m_34_t2m_all.csv',sep =';')
#E3
E3_results01 = pd.read_csv('param_optimization/E3_results_t2m_34_t2m_folds_0_1.csv',sep =';')
E3_results25 = pd.read_csv('param_optimization/E3_results_t2m_34_t2m_folds_2_5.csv',sep =';')
E3_results= pd.concat([E3_results01, E3_results25])
E3_results.reset_index(inplace = True)
#%%
###############################################################################
###################################E2
#E2 sharpness of forecasts
E2_sharpness = E2_results[['fold_no','radius_basis_func','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2', 'min_percentage', 'max_percentage']]
E2_sharpness = E2_sharpness.melt(id_vars = ['fold_no','radius_basis_func','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2'],
value_vars = ['min_percentage', 'max_percentage'], var_name = 'minmax', value_name = 'probability')
plt.figure()
sb.lineplot(x="epochs", y="probability",hue="radius_basis_func", style="minmax", data=E2_sharpness)
plt.legend(bbox_to_anchor=(1.01, 1),borderaxespad=0)#ncol = 2)
plt.xticks([5,10])
plt.tight_layout()
plt.savefig('plots/E2_sharpness.png')
#%%
#E2 achieved RPSS
results_epoch_end = E2_results[['fold_no','radius_basis_func','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2', 'min_percentage', 'max_percentage']]
results_epoch_end = results_epoch_end.melt(id_vars = ['fold_no','radius_basis_func','epochs', 'accuracy', 'loss', 'min_percentage', 'max_percentage'],
value_vars = ['RPSS_year1', 'RPSS_year2'], var_name = 'year', value_name = 'RPSS')
plt.figure()
sb.lineplot( data = results_epoch_end, x = 'epochs', y = 'RPSS', hue = 'radius_basis_func')#, style = 'year')
plt.xticks([5,10])
plt.hlines(y = 0, xmin= results_epoch_end.epochs.min(), xmax = results_epoch_end.epochs.max(), color = 'black')
plt.tight_layout()
plt.savefig('plots/E2_RPSS.png')
#%%
#E2 history
def extract_history(results):
df_list = []
for i in range(0,results.shape[0],2):
accuracy = yaml.load(results.history[i])['accuracy'] + yaml.load(results.history[i + 1])['accuracy']
val_accuracy = yaml.load(results.history[i])['val_accuracy'] + yaml.load(results.history[i + 1])['val_accuracy']
loss = yaml.load(results.history[i])['loss'] + yaml.load(results.history[i + 1])['loss']
val_loss = yaml.load(results.history[i])['val_loss'] + yaml.load(results.history[i + 1])['val_loss']
df_tr = pd.DataFrame(accuracy, columns = ['accuracy'])
df_tr['loss'] = loss
df_tr['dataset'] = 'train'
df_tr['epochs'] = np.array([1,2,3,4,5,6,7,8,9,10])
df_val = pd.DataFrame(val_accuracy, columns = ['accuracy'])
df_val['loss'] = val_loss
df_val['dataset'] = 'validation'
df_val['epochs'] = np.array([1,2,3,4,5,6,7,8,9,10])
df = pd.concat([df_tr, df_val])
df['radius'] = results.radius_basis_func[i]
df['fold'] = results.fold_no[i]
df_list.append(df)
history = pd.concat(df_list)
history.reset_index(inplace = True)
return history
E2_history = extract_history(E2_results)
#%%
#E2 train and val accuracy and loss from the model history
plt.figure()
sb.lineplot(x = 'epochs', y = 'accuracy', hue = 'radius', style = 'dataset', data = E2_history)
plt.legend(bbox_to_anchor=(1.01, 1),borderaxespad=0)
plt.tight_layout()
plt.savefig('plots/E2_accuracy.png')
plt.figure()
sb.lineplot(x = 'epochs', y = 'loss', hue = 'radius', style = 'dataset', data = E2_history)
plt.legend(bbox_to_anchor=(1.01, 1),borderaxespad=0)
plt.tight_layout()
plt.savefig('plots/E2_loss.png')
#%%
###############################################################################
##########################E1
#E1 sharpness
E1_sharpness = E1_results[['fold_no','radius_basis_func','input_lats','input_lons','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2', 'min_percentage', 'max_percentage']]
E1_sharpness = E1_sharpness.melt(id_vars = ['fold_no','radius_basis_func','input_lats','input_lons','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2'],
value_vars = ['min_percentage', 'max_percentage'], var_name = 'minmax', value_name = 'probability')
plt.figure()
sb.lineplot(x="epochs", y="probability",hue="input_lats", style="minmax", data=E1_sharpness)
plt.legend(bbox_to_anchor=(1.01, 1),borderaxespad=0)#ncol = 2)
plt.xticks([5,10])
plt.tight_layout()
plt.savefig('plots/E1_sharpness.png')
#%%
#E1 achieved RPSS
results_epoch_end_E1 = E1_results[['fold_no','radius_basis_func','input_lats','input_lons','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2', 'min_percentage', 'max_percentage']]
results_epoch_end_E1 = results_epoch_end_E1.melt(id_vars = ['fold_no','radius_basis_func','input_lats','input_lons','epochs', 'accuracy', 'loss', 'min_percentage', 'max_percentage'],
value_vars = ['RPSS_year1', 'RPSS_year2'], var_name = 'year', value_name = 'RPSS')
plt.figure()
sb.lineplot( data = results_epoch_end_E1, x = 'epochs', y = 'RPSS', hue = 'input_lats')#, style = 'year')
plt.xticks([5,10])
plt.hlines(y = 0, xmin= results_epoch_end_E1.epochs.min(), xmax = results_epoch_end_E1.epochs.max(), color = 'black')
plt.tight_layout()
plt.savefig('plots/E1_RPSS.png')
#%%
#E1 history
def extract_history_E1(results):
df_list = []
for i in range(0,results.shape[0],2):
accuracy = yaml.load(results.history[i])['accuracy'] + yaml.load(results.history[i + 1])['accuracy']
val_accuracy = yaml.load(results.history[i])['val_accuracy'] + yaml.load(results.history[i + 1])['val_accuracy']
loss = yaml.load(results.history[i])['loss'] + yaml.load(results.history[i + 1])['loss']
val_loss = yaml.load(results.history[i])['val_loss'] + yaml.load(results.history[i + 1])['val_loss']
df_tr = pd.DataFrame(accuracy, columns = ['accuracy'])
df_tr['loss'] = loss
df_tr['dataset'] = 'train'
df_tr['epochs'] = np.array([1,2,3,4,5,6,7,8,9,10])
df_val = pd.DataFrame(val_accuracy, columns = ['accuracy'])
df_val['loss'] = val_loss
df_val['dataset'] = 'validation'
df_val['epochs'] = np.array([1,2,3,4,5,6,7,8,9,10])
df = pd.concat([df_tr, df_val])
df['radius'] = results.radius_basis_func[i]
df['input_lats'] = results.input_lats[i]
df['input_lons'] = results.input_lons[i]
df['fold'] = results.fold_no[i]
df_list.append(df)
history = pd.concat(df_list)
history.reset_index(inplace = True)
return history
E1_history = extract_history_E1(E1_results)
plt.figure()
sb.lineplot(x = 'epochs', y = 'accuracy', hue = 'input_lats', style = 'dataset', data = E1_history)
plt.legend(bbox_to_anchor=(1.01, 1),borderaxespad=0)
plt.tight_layout()
plt.savefig('plots/E1_accuracy.png')
plt.figure()
sb.lineplot(x = 'epochs', y = 'loss', hue = 'input_lats', style = 'dataset', data = E1_history)
plt.legend(bbox_to_anchor=(1.01, 1),borderaxespad=0)
plt.tight_layout()
plt.savefig('plots/E1_loss.png')
#%%
###############################################################################
##########################E1 label smoothing
#E1 sharpness
E1_sharpness = E1_smooth_results[['fold_no','radius_basis_func','input_lats','input_lons','label_smoothing','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2', 'min_percentage', 'max_percentage']]
E1_sharpness = E1_sharpness.melt(id_vars = ['fold_no','radius_basis_func','input_lats','input_lons','label_smoothing','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2'],
value_vars = ['min_percentage', 'max_percentage'], var_name = 'minmax', value_name = 'probability')
plt.figure()
sb.lineplot(x="epochs", y="probability",hue="label_smoothing", style="minmax", data=E1_sharpness)
plt.legend(bbox_to_anchor=(1.01, 1),borderaxespad=0)#ncol = 2)
plt.xticks([5,10,15,20])
plt.tight_layout()
plt.savefig('plots/E1_smooth_sharpness.png')
#%%
#E1 achieved RPSS
results_epoch_end_E1 = E1_smooth_results[['fold_no','radius_basis_func','input_lats','input_lons','label_smoothing','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2', 'min_percentage', 'max_percentage']]
results_epoch_end_E1 = results_epoch_end_E1.melt(id_vars = ['fold_no','radius_basis_func','input_lats','input_lons','label_smoothing','epochs', 'accuracy', 'loss', 'min_percentage', 'max_percentage'],
value_vars = ['RPSS_year1', 'RPSS_year2'], var_name = 'year', value_name = 'RPSS')
plt.figure()
sb.lineplot( data = results_epoch_end_E1, x = 'epochs', y = 'RPSS', hue = 'label_smoothing')
plt.xticks([5,10,15,20])
plt.hlines(y = 0, xmin= results_epoch_end_E1.epochs.min(), xmax = results_epoch_end_E1.epochs.max(), color = 'black')
plt.tight_layout()
plt.savefig('plots/E1_smooth_RPSS.png')
#%%
#E1 history
def extract_history_E1(results):
df_list = []
for i in range(0,results.shape[0],4):
accuracy = yaml.load(results.history[i])['accuracy'] + yaml.load(results.history[i + 1])['accuracy']+ yaml.load(results.history[i + 2])['accuracy']+ yaml.load(results.history[i + 3])['accuracy']
val_accuracy = yaml.load(results.history[i])['val_accuracy'] + yaml.load(results.history[i + 1])['val_accuracy']+ yaml.load(results.history[i + 2])['val_accuracy']+ yaml.load(results.history[i + 3])['val_accuracy']
loss = yaml.load(results.history[i])['loss'] + yaml.load(results.history[i + 1])['loss'] + yaml.load(results.history[i + 2])['loss'] + yaml.load(results.history[i + 3])['loss']
val_loss = yaml.load(results.history[i])['val_loss'] + yaml.load(results.history[i + 1])['val_loss'] + yaml.load(results.history[i + 2])['val_loss'] + yaml.load(results.history[i + 3])['val_loss']
df_tr = pd.DataFrame(accuracy, columns = ['accuracy'])
df_tr['loss'] = loss
df_tr['dataset'] = 'train'
df_tr['epochs'] = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20])
df_val = pd.DataFrame(val_accuracy, columns = ['accuracy'])
df_val['loss'] = val_loss
df_val['dataset'] = 'validation'
df_val['epochs'] = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20])
df = pd.concat([df_tr, df_val])
df['radius'] = results.radius_basis_func[i]
df['input_lats'] = results.input_lats[i]
df['input_lons'] = results.input_lons[i]
df['label_smoothing'] = results.label_smoothing[i]
df['fold'] = results.fold_no[i]
df_list.append(df)
history = pd.concat(df_list)
history.reset_index(inplace = True)
return history
E1_history = extract_history_E1(E1_smooth_results)
#%%
plt.figure()
sb.lineplot(x = 'epochs', y = 'accuracy', hue = 'label_smoothing', style = 'dataset', data = E1_history)
plt.legend(bbox_to_anchor=(1.01, 1),borderaxespad=0)
plt.tight_layout()
plt.savefig('plots/E1_smooth_accuracy.png')
plt.figure()
sb.lineplot(x = 'epochs', y = 'loss', hue = 'label_smoothing', style = 'dataset', data = E1_history)
plt.legend(bbox_to_anchor=(1.01, 1),borderaxespad=0)
plt.tight_layout()
plt.savefig('plots/E1_smooth_loss.png')
#%%
###############################################################################
##########################E1 refined
#E1 sharpness
E1_sharpness = E1_ref_results[['fold_no','radius_basis_func','input_lats','input_lons','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2', 'min_percentage', 'max_percentage']]
E1_sharpness = E1_sharpness.melt(id_vars = ['fold_no','radius_basis_func','input_lats','input_lons','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2'],
value_vars = ['min_percentage', 'max_percentage'], var_name = 'minmax', value_name = 'probability')
plt.figure()
sb.lineplot(x="epochs", y="probability",hue="input_lons", style="minmax", data=E1_sharpness)
plt.legend(bbox_to_anchor=(1.01, 1),borderaxespad=0)#ncol = 2)
plt.xticks([5,10,15,20])
plt.tight_layout()
plt.savefig('plots/E1_ref_sharpness.png')
#%%
#E1 achieved RPSS
results_epoch_end_E1 = E1_ref_results[['fold_no','radius_basis_func','input_lats','input_lons','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2', 'min_percentage', 'max_percentage']]
results_epoch_end_E1 = results_epoch_end_E1.melt(id_vars = ['fold_no','radius_basis_func','input_lats','input_lons','epochs', 'accuracy', 'loss', 'min_percentage', 'max_percentage'],
value_vars = ['RPSS_year1', 'RPSS_year2'], var_name = 'year', value_name = 'RPSS')
plt.figure()
sb.lineplot( data = results_epoch_end_E1, x = 'epochs', y = 'RPSS', hue = 'input_lons')
plt.xticks([5,10,15,20])
plt.hlines(y = 0, xmin= results_epoch_end_E1.epochs.min(), xmax = results_epoch_end_E1.epochs.max(), color = 'black')
plt.tight_layout()
plt.savefig('plots/E1_ref_RPSS.png')
#%%
#E1 history
def extract_history_E1(results):
df_list = []
for i in range(0,results.shape[0],4):
accuracy = yaml.load(results.history[i])['accuracy'] + yaml.load(results.history[i + 1])['accuracy']+ yaml.load(results.history[i + 2])['accuracy']+ yaml.load(results.history[i + 3])['accuracy']
val_accuracy = yaml.load(results.history[i])['val_accuracy'] + yaml.load(results.history[i + 1])['val_accuracy']+ yaml.load(results.history[i + 2])['val_accuracy']+ yaml.load(results.history[i + 3])['val_accuracy']
loss = yaml.load(results.history[i])['loss'] + yaml.load(results.history[i + 1])['loss'] + yaml.load(results.history[i + 2])['loss'] + yaml.load(results.history[i + 3])['loss']
val_loss = yaml.load(results.history[i])['val_loss'] + yaml.load(results.history[i + 1])['val_loss'] + yaml.load(results.history[i + 2])['val_loss'] + yaml.load(results.history[i + 3])['val_loss']
df_tr = pd.DataFrame(accuracy, columns = ['accuracy'])
df_tr['loss'] = loss
df_tr['dataset'] = 'train'
df_tr['epochs'] = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20])
df_val = pd.DataFrame(val_accuracy, columns = ['accuracy'])
df_val['loss'] = val_loss
df_val['dataset'] = 'validation'
df_val['epochs'] = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20])
df = pd.concat([df_tr, df_val])
df['radius'] = results.radius_basis_func[i]
df['input_lats'] = results.input_lats[i]
df['input_lons'] = results.input_lons[i]
df['label_smoothing'] = results.label_smoothing[i]
df['fold'] = results.fold_no[i]
df_list.append(df)
history = pd.concat(df_list)
history.reset_index(inplace = True)
return history
E1_history = extract_history_E1(E1_ref_results)
plt.figure()
sb.lineplot(x = 'epochs', y = 'accuracy', hue = 'input_lons', style = 'dataset', data = E1_history)
plt.legend(bbox_to_anchor=(1.01, 1),borderaxespad=0)
plt.tight_layout()
plt.savefig('plots/E1_ref_accuracy.png')
plt.figure()
sb.lineplot(x = 'epochs', y = 'loss', hue = 'input_lons', style = 'dataset', data = E1_history)
plt.legend(bbox_to_anchor=(1.01, 1),borderaxespad=0)
plt.tight_layout()
plt.savefig('plots/E1_ref_loss.png')
#%%
###############################################################################
##########################E1 refined 0.6
#E1 sharpness
E1_sharpness = E1_ref_results_06[['fold_no','radius_basis_func','input_lats','input_lons','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2', 'min_percentage', 'max_percentage']]
E1_sharpness = E1_sharpness.melt(id_vars = ['fold_no','radius_basis_func','input_lats','input_lons','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2'],
value_vars = ['min_percentage', 'max_percentage'], var_name = 'minmax', value_name = 'probability')
plt.figure()
sb.lineplot(x="epochs", y="probability",hue="input_lons", style="minmax", data=E1_sharpness)
plt.legend(bbox_to_anchor=(1.01, 1),borderaxespad=0)#ncol = 2)
plt.xticks([5,10,15,20])
plt.tight_layout()
plt.savefig('plots/E1_ref_sharpness_06.png')
#%%
#E1 achieved RPSS
results_epoch_end_E1 = E1_ref_results_06[['fold_no','radius_basis_func','input_lats','input_lons','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2', 'min_percentage', 'max_percentage']]
results_epoch_end_E1 = results_epoch_end_E1.melt(id_vars = ['fold_no','radius_basis_func','input_lats','input_lons','epochs', 'accuracy', 'loss', 'min_percentage', 'max_percentage'],
value_vars = ['RPSS_year1', 'RPSS_year2'], var_name = 'year', value_name = 'RPSS')
plt.figure()
sb.lineplot( data = results_epoch_end_E1, x = 'epochs', y = 'RPSS', hue = 'input_lons')#, style = 'year')
plt.xticks([5,10,15,20])
plt.hlines(y = 0, xmin= results_epoch_end_E1.epochs.min(), xmax = results_epoch_end_E1.epochs.max(), color = 'black')
plt.tight_layout()
plt.savefig('plots/E1_ref_RPSS_06.png')
#%%
#E1 history
def extract_history_E1(results):
df_list = []
for i in range(0,results.shape[0],4):
accuracy = yaml.load(results.history[i])['accuracy'] + yaml.load(results.history[i + 1])['accuracy']+ yaml.load(results.history[i + 2])['accuracy']+ yaml.load(results.history[i + 3])['accuracy']
val_accuracy = yaml.load(results.history[i])['val_accuracy'] + yaml.load(results.history[i + 1])['val_accuracy']+ yaml.load(results.history[i + 2])['val_accuracy']+ yaml.load(results.history[i + 3])['val_accuracy']
loss = yaml.load(results.history[i])['loss'] + yaml.load(results.history[i + 1])['loss'] + yaml.load(results.history[i + 2])['loss'] + yaml.load(results.history[i + 3])['loss']
val_loss = yaml.load(results.history[i])['val_loss'] + yaml.load(results.history[i + 1])['val_loss'] + yaml.load(results.history[i + 2])['val_loss'] + yaml.load(results.history[i + 3])['val_loss']
df_tr = pd.DataFrame(accuracy, columns = ['accuracy'])
df_tr['loss'] = loss
df_tr['dataset'] = 'train'
df_tr['epochs'] = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20])
df_val = pd.DataFrame(val_accuracy, columns = ['accuracy'])
df_val['loss'] = val_loss
df_val['dataset'] = 'validation'
df_val['epochs'] = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20])
df = pd.concat([df_tr, df_val])
df['radius'] = results.radius_basis_func[i]
df['input_lats'] = results.input_lats[i]
df['input_lons'] = results.input_lons[i]
df['label_smoothing'] = results.label_smoothing[i]
df['fold'] = results.fold_no[i]
df_list.append(df)
history = pd.concat(df_list)
history.reset_index(inplace = True)
return history
E1_history = extract_history_E1(E1_ref_results_06)
plt.figure()
sb.lineplot(x = 'epochs', y = 'accuracy', hue = 'input_lons', style = 'dataset', data = E1_history)
plt.legend(bbox_to_anchor=(1.01, 1),borderaxespad=0)
plt.tight_layout()
plt.savefig('plots/E1_ref_accuracy_06.png')
plt.figure()
sb.lineplot(x = 'epochs', y = 'loss', hue = 'input_lons', style = 'dataset', data = E1_history)
plt.legend(bbox_to_anchor=(1.01, 1),borderaxespad=0)
plt.tight_layout()
plt.savefig('plots/E1_ref_loss_06.png')
#%%
#%%
###############################################################################
##########################E4
#E4 sharpness
E4_sharpness = E4_results[['fold_no','radius_basis_func','input_lats','input_lons','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2', 'min_percentage', 'max_percentage','features']]
E4_sharpness = E4_sharpness.melt(id_vars = ['fold_no','radius_basis_func','input_lats','input_lons','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2', 'features'],
value_vars = ['min_percentage', 'max_percentage'], var_name = 'minmax', value_name = 'probability')
plt.figure(figsize= (6,5))
sb.lineplot(x="epochs", y="probability",hue="features", style="minmax", data=E4_sharpness)
plt.legend(bbox_to_anchor=(1, -0.1),borderaxespad=0)#ncol = 2)
plt.xticks([5,10,15,20])
plt.tight_layout()
plt.savefig('plots/E4_sharpness.png')
#%%
#E4 achieved RPSS
results_epoch_end_E4 = E4_results[['fold_no','radius_basis_func','input_lats','input_lons','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2', 'min_percentage', 'max_percentage','features']]
results_epoch_end_E4 = results_epoch_end_E4.melt(id_vars = ['fold_no','radius_basis_func','input_lats','input_lons','epochs', 'accuracy', 'loss', 'min_percentage', 'max_percentage','features'],
value_vars = ['RPSS_year1', 'RPSS_year2'], var_name = 'year', value_name = 'RPSS')
plt.figure()
sb.lineplot( data = results_epoch_end_E4, x = 'epochs', y = 'RPSS', hue = 'features')
plt.xticks([5,10,15,20])
plt.legend(bbox_to_anchor=(1, -0.2),borderaxespad=0)
plt.hlines(y = 0, xmin= results_epoch_end_E4.epochs.min(), xmax = results_epoch_end_E4.epochs.max(), color = 'black')
plt.tight_layout()
plt.savefig('plots/E4_RPSS.png')
#%%
#E1 history
def extract_history_E1(results):
df_list = []
for i in range(0,results.shape[0],4):
accuracy = yaml.load(results.history[i])['accuracy'] + yaml.load(results.history[i + 1])['accuracy']+ yaml.load(results.history[i + 2])['accuracy']+ yaml.load(results.history[i + 3])['accuracy']
val_accuracy = yaml.load(results.history[i])['val_accuracy'] + yaml.load(results.history[i + 1])['val_accuracy']+ yaml.load(results.history[i + 2])['val_accuracy']+ yaml.load(results.history[i + 3])['val_accuracy']
loss = yaml.load(results.history[i])['loss'] + yaml.load(results.history[i + 1])['loss'] + yaml.load(results.history[i + 2])['loss'] + yaml.load(results.history[i + 3])['loss']
val_loss = yaml.load(results.history[i])['val_loss'] + yaml.load(results.history[i + 1])['val_loss'] + yaml.load(results.history[i + 2])['val_loss'] + yaml.load(results.history[i + 3])['val_loss']
df_tr = pd.DataFrame(accuracy, columns = ['accuracy'])
df_tr['loss'] = loss
df_tr['dataset'] = 'train'
df_tr['epochs'] = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20])
df_val = pd.DataFrame(val_accuracy, columns = ['accuracy'])
df_val['loss'] = val_loss
df_val['dataset'] = 'validation'
df_val['epochs'] = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20])
df = pd.concat([df_tr, df_val])
df['radius'] = results.radius_basis_func[i]
df['input_lats'] = results.input_lats[i]
df['input_lons'] = results.input_lons[i]
df['label_smoothing'] = results.label_smoothing[i]
df['fold'] = results.fold_no[i]
df['features'] = results.features[i]
df_list.append(df)
history = pd.concat(df_list)
history.reset_index(inplace = True)
return history
E4_history = extract_history_E1(E4_results)
plt.figure(figsize = (6,5))
sb.lineplot(x = 'epochs', y = 'accuracy', hue = 'features', style = 'dataset', data = E4_history)
plt.legend(bbox_to_anchor=(1, -0.2),borderaxespad=0)#
plt.tight_layout()
plt.savefig('plots/E4_accuracy.png')
plt.figure(figsize = (6,5))
sb.lineplot(x = 'epochs', y = 'loss', hue = 'features', style = 'dataset', data = E4_history)
plt.legend(bbox_to_anchor=(1, -0.2),borderaxespad=0)#
plt.tight_layout()
plt.savefig('plots/E4_loss.png')
#%%
###############################################################################
##########################E3
#E3 sharpness
E3_sharpness = E3_results[['fold_no','radius_basis_func','input_lats','input_lons', 'output_lons', 'output_lats','label_smoothing','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2', 'min_percentage', 'max_percentage']]
E3_sharpness = E3_sharpness.melt(id_vars = ['fold_no','radius_basis_func','input_lats','input_lons', 'output_lons', 'output_lats','label_smoothing','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2'],
value_vars = ['min_percentage', 'max_percentage'], var_name = 'minmax', value_name = 'probability')
plt.figure()
sb.lineplot(x="epochs", y="probability",hue="output_lons", style="minmax", data=E3_sharpness)
plt.legend(bbox_to_anchor=(1.01, 1),borderaxespad=0)#ncol = 2)
plt.xticks([5,10,15,20])
plt.tight_layout()
plt.savefig('plots/E3_sharpness.png')
#%%
#E1 achieved RPSS
results_epoch_end_E3 = E3_results[['fold_no','radius_basis_func','input_lats','input_lons', 'output_lons', 'output_lats','label_smoothing','epochs', 'accuracy', 'loss', 'RPSS_year1', 'RPSS_year2', 'min_percentage', 'max_percentage']]
results_epoch_end_E3 = results_epoch_end_E3.melt(id_vars = ['fold_no','radius_basis_func','input_lats','input_lons', 'output_lons', 'output_lats','label_smoothing','epochs', 'accuracy', 'loss', 'min_percentage', 'max_percentage'],
value_vars = ['RPSS_year1', 'RPSS_year2'], var_name = 'year', value_name = 'RPSS')
plt.figure()
sb.lineplot( data = results_epoch_end_E3, x = 'epochs', y = 'RPSS', hue = 'input_lons')
plt.xticks([5,10,15,20])
plt.hlines(y = 0, xmin= results_epoch_end_E3.epochs.min(), xmax = results_epoch_end_E3.epochs.max(), color = 'black')
plt.tight_layout()
plt.savefig('plots/E3_RPSS.png')
#%%
E3 = results_epoch_end_E3
plt.figure()
g = sb.catplot( data = E3, x = 'epochs', y = 'RPSS', hue = 'output_lons', row = 'radius_basis_func', col = 'label_smoothing', kind = 'box', legend = False)
#plt.hlines(y = 0, xmin= E3_av.epochs.min(), xmax = E3_av.epochs.max(), color = 'black')
g.map(plt.axhline, y=0, ls='--', c='black')
plt.tight_layout()
plt.legend(loc = 'lower right')
plt.savefig('plots/E3_boxplot.png')
|
{"hexsha": "399d3c177bc52f4dc44c7e77e584978ea091393b", "size": 27089, "ext": "py", "lang": "Python", "max_stars_repo_path": "notebooks/param_optimization/plot_parameter_optimization.py", "max_stars_repo_name": "steidani/s2s-ai-challenge-kit-eth-ubern", "max_stars_repo_head_hexsha": "41fca2c6380d5aecfb7b322f005a74ab533368ef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebooks/param_optimization/plot_parameter_optimization.py", "max_issues_repo_name": "steidani/s2s-ai-challenge-kit-eth-ubern", "max_issues_repo_head_hexsha": "41fca2c6380d5aecfb7b322f005a74ab533368ef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/param_optimization/plot_parameter_optimization.py", "max_forks_repo_name": "steidani/s2s-ai-challenge-kit-eth-ubern", "max_forks_repo_head_hexsha": "41fca2c6380d5aecfb7b322f005a74ab533368ef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.3248031496, "max_line_length": 233, "alphanum_fraction": 0.6497840452, "include": true, "reason": "import numpy", "num_tokens": 7664}
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Annealed Flow Transport (AFT) Monte Carlo algorithm.
For more detail see:
Arbel, Matthews and Doucet. 2021. Annealed Flow Transport Monte Carlo.
International Conference on Machine Learning.
"""
import time
from typing import NamedTuple, Tuple
from absl import logging
from annealed_flow_transport import flow_transport
from annealed_flow_transport import markov_kernel
import annealed_flow_transport.aft_types as tp
import jax
import jax.numpy as jnp
import numpy as np
import optax
Array = jnp.ndarray
UpdateFn = tp.UpdateFn
OptState = tp.OptState
FlowParams = tp.FlowParams
FlowApply = tp.FlowApply
LogDensityNoStep = tp.LogDensityNoStep
InitialSampler = tp.InitialSampler
RandomKey = tp.RandomKey
SamplesTuple = tp.SamplesTuple
FreeEnergyAndGrad = tp.FreeEnergyAndGrad
MarkovKernelApply = tp.MarkovKernelApply
FreeEnergyEval = tp.FreeEnergyEval
VfesTuple = tp.VfesTuple
LogDensityByStep = tp.LogDensityByStep
AcceptanceTuple = tp.AcceptanceTuple
LogWeightsTuple = tp.LogWeightsTuple
AlgoResultsTuple = tp.AlgoResultsTuple
def get_initial_samples_log_weight_tuples(
initial_sampler: InitialSampler, key: RandomKey,
config) -> Tuple[SamplesTuple, LogWeightsTuple]:
"""Get initial train/validation/test state depending on config."""
batch_sizes = (config.estimation_batch_size,
config.estimation_batch_size,
config.batch_size)
subkeys = jax.random.split(key, 3)
samples_tuple = SamplesTuple(*[
initial_sampler(elem, batch, config.sample_shape)
for elem, batch in zip(subkeys, batch_sizes)
])
log_weights_tuple = LogWeightsTuple(*[-jnp.log(batch) * jnp.ones(
batch) for batch in batch_sizes])
return samples_tuple, log_weights_tuple
def update_tuples(
samples_tuple: SamplesTuple, log_weights_tuple: LogWeightsTuple,
key: RandomKey, flow_apply: FlowApply, flow_params: FlowParams,
markov_kernel_apply: MarkovKernelApply, log_density: LogDensityByStep,
step: int, config) -> Tuple[SamplesTuple, LogWeightsTuple, AcceptanceTuple]:
"""Update the samples and log weights and return diagnostics."""
samples_list = []
log_weights_list = []
acceptance_tuple_list = []
subkeys = jax.random.split(key, 3)
for curr_samples, curr_log_weights, subkey in zip(samples_tuple,
log_weights_tuple,
subkeys):
new_samples, new_log_weights, acceptance_tuple = flow_transport.update_samples_log_weights(
flow_apply=flow_apply,
markov_kernel_apply=markov_kernel_apply,
flow_params=flow_params,
samples=curr_samples,
log_weights=curr_log_weights,
key=subkey,
log_density=log_density,
step=step,
config=config)
samples_list.append(new_samples)
log_weights_list.append(new_log_weights)
acceptance_tuple_list.append(acceptance_tuple)
samples_tuple = SamplesTuple(*samples_list)
log_weights_tuple = LogWeightsTuple(*log_weights_list)
test_acceptance_tuple = acceptance_tuple_list[-1]
return samples_tuple, log_weights_tuple, test_acceptance_tuple
class OptimizationLoopState(NamedTuple):
opt_state: OptState
flow_params: FlowParams
inner_step: int
opt_vfes: VfesTuple
best_params: FlowParams
best_validation_vfe: Array
best_index: int
def flow_estimate_step(loop_state: OptimizationLoopState,
free_energy_and_grad: FreeEnergyAndGrad,
train_samples: Array, train_log_weights: Array,
outer_step: int, validation_samples: Array,
validation_log_weights: Array,
free_energy_eval: FreeEnergyEval,
opt_update: UpdateFn) -> OptimizationLoopState:
"""A single step of the flow estimation loop."""
# Evaluate the flow on train and validation particles.
train_vfe, flow_grads = free_energy_and_grad(loop_state.flow_params,
train_samples,
train_log_weights,
outer_step)
validation_vfe = free_energy_eval(loop_state.flow_params,
validation_samples,
validation_log_weights,
outer_step)
# Update the best parameters, best validation vfe and index
# if the measured validation vfe is better.
validation_vfe_is_better = validation_vfe < loop_state.best_validation_vfe
new_best_params = jax.lax.cond(validation_vfe_is_better,
lambda _: loop_state.flow_params,
lambda _: loop_state.best_params,
operand=None)
new_best_validation_vfe = jnp.where(validation_vfe_is_better,
validation_vfe,
loop_state.best_validation_vfe)
new_best_index = jnp.where(validation_vfe_is_better,
loop_state.inner_step,
loop_state.best_index)
# Update the logs of train and validation vfes.
new_train_vfes = jax.ops.index_update(loop_state.opt_vfes.train_vfes,
loop_state.inner_step,
train_vfe)
new_validation_vfes = jax.ops.index_update(
loop_state.opt_vfes.validation_vfes, loop_state.inner_step,
validation_vfe)
new_opt_vfes = VfesTuple(train_vfes=new_train_vfes,
validation_vfes=new_validation_vfes)
# Apply gradients ready for next round of flow evaluations in the next step.
updates, new_opt_state = opt_update(flow_grads,
loop_state.opt_state)
new_flow_params = optax.apply_updates(loop_state.flow_params,
updates)
new_inner_step = loop_state.inner_step + 1
# Pack everything into the next loop state.
new_state_tuple = OptimizationLoopState(new_opt_state, new_flow_params,
new_inner_step, new_opt_vfes,
new_best_params,
new_best_validation_vfe,
new_best_index)
return new_state_tuple
def flow_estimation_should_continue(loop_state: OptimizationLoopState,
opt_iters: int,
stopping_criterion: str) -> bool:
"""Based on stopping criterion control termination of flow estimation."""
if stopping_criterion == 'time':
return loop_state.inner_step < opt_iters
elif stopping_criterion == 'greedy_time':
index = loop_state.inner_step
best_index = loop_state.best_index
return jnp.logical_and(best_index == index-1, index < opt_iters)
else:
raise NotImplementedError
def optimize_free_energy(
opt_update: UpdateFn, opt_init_state: OptState,
flow_init_params: FlowParams, free_energy_and_grad: FreeEnergyAndGrad,
free_energy_eval: FreeEnergyEval, train_samples: Array,
train_log_weights: Array, validation_samples: Array,
validation_log_weights: Array, outer_step: int, opt_iters: int,
stopping_criterion: str) -> Tuple[FlowParams, VfesTuple]:
"""Optimize an estimate of the free energy.
Args:
opt_update: function that updates the state of flow based on gradients etc.
opt_init_state: initial state variables of the optimizer.
flow_init_params: initial parameters of the flow.
free_energy_and_grad: function giving estimate of free energy and gradient.
free_energy_eval: function giving estimate of free energy only.
train_samples: Array of shape (batch,)+sample_shape
train_log_weights: Array of shape (batch,)
validation_samples: Array of shape (batch,)
validation_log_weights: Array of shape (batch,)
outer_step: int giving current outer step of algorithm.
opt_iters: number of flow estimation iters.
stopping_criterion: One of 'time' or 'greedy-time'.
Returns:
flow_params: optimized flow parameters.
free_energies: array containing all estimates of free energy.
"""
opt_state = opt_init_state
flow_params = flow_init_params
train_vfes = jnp.zeros(opt_iters)
validation_vfes = jnp.zeros(opt_iters)
opt_vfes = VfesTuple(train_vfes, validation_vfes)
def body_fun(loop_state: OptimizationLoopState) -> OptimizationLoopState:
return flow_estimate_step(loop_state, free_energy_and_grad, train_samples,
train_log_weights, outer_step, validation_samples,
validation_log_weights, free_energy_eval,
opt_update)
def cond_fun(loop_state: OptimizationLoopState) -> bool:
return flow_estimation_should_continue(loop_state, opt_iters,
stopping_criterion)
initial_loop_state = OptimizationLoopState(opt_state, flow_params, 0,
opt_vfes, flow_params, jnp.inf, -1)
final_loop_state = jax.lax.while_loop(cond_fun,
body_fun,
initial_loop_state)
return final_loop_state.best_params, final_loop_state.opt_vfes
def inner_loop(
key: RandomKey, free_energy_and_grad: FreeEnergyAndGrad,
free_energy_eval: FreeEnergyEval, opt_update: UpdateFn,
opt_init_state: OptState, flow_init_params: FlowParams,
flow_apply: FlowApply, markov_kernel_apply: MarkovKernelApply,
samples_tuple: SamplesTuple, log_weights_tuple: LogWeightsTuple,
log_density: LogDensityByStep, step: int, config
) -> Tuple[FlowParams, OptState, VfesTuple, Array, AcceptanceTuple]:
"""Inner loop of the algorithm.
Args:
key: A JAX random key.
free_energy_and_grad: function giving estimate of free energy and gradient.
free_energy_eval: function giving estimate of free energy only.
opt_update: function that updates the state of flow based on gradients etc.
opt_init_state: initial state variables of the optimizer.
flow_init_params: initial parameters of the flow.
flow_apply: function that applies the flow.
markov_kernel_apply: functional that applies the Markov transition kernel.
samples_tuple: Tuple containing train/validation/test samples.
log_weights_tuple: Tuple containing train/validation/test log_weights.
log_density: function returning the log_density of a sample at given step.
step: int giving current step of algorithm.
config: experiment configuration.
Returns:
samples_final: samples after the full inner loop has been performed.
log_weights_final: log_weights after the full inner loop has been performed.
free_energies: array containing all estimates of free energy.
log_normalizer_increment: Scalar log of normalizing constant increment.
"""
flow_params, vfes_tuple = optimize_free_energy(
opt_update=opt_update,
opt_init_state=opt_init_state,
flow_init_params=flow_init_params,
free_energy_and_grad=free_energy_and_grad,
free_energy_eval=free_energy_eval,
train_samples=samples_tuple.train_samples,
train_log_weights=log_weights_tuple.train_log_weights,
validation_samples=samples_tuple.validation_samples,
validation_log_weights=log_weights_tuple.validation_log_weights,
outer_step=step,
opt_iters=config.optimization_config.free_energy_iters,
stopping_criterion=config.stopping_criterion)
log_normalizer_increment = flow_transport.get_log_normalizer_increment(
samples_tuple.test_samples, log_weights_tuple.test_log_weights,
flow_apply, flow_params, log_density, step)
samples_tuple, log_weights_tuple, test_acceptance_tuple = update_tuples(
samples_tuple=samples_tuple,
log_weights_tuple=log_weights_tuple,
key=key,
flow_apply=flow_apply,
flow_params=flow_params,
markov_kernel_apply=markov_kernel_apply,
log_density=log_density,
step=step,
config=config)
return samples_tuple, log_weights_tuple, vfes_tuple, log_normalizer_increment, test_acceptance_tuple
def outer_loop_aft(opt_update: UpdateFn,
opt_init_state: OptState,
flow_init_params: FlowParams,
flow_apply: FlowApply,
initial_log_density: LogDensityNoStep,
final_log_density: LogDensityNoStep,
initial_sampler: InitialSampler,
key: RandomKey,
config,
log_step_output) -> AlgoResultsTuple:
"""The outer loop for Annealed Flow Transport Monte Carlo.
Args:
opt_update: A Optax optimizer update function.
opt_init_state: Optax initial state.
flow_init_params: Initial parameters for the flow.
flow_apply: Function that evaluates flow on parameters and samples.
initial_log_density: The log density of the starting distribution.
final_log_density: The log density of the target distribution.
initial_sampler: A function that produces the initial samples.
key: A Jax random key.
config: A ConfigDict containing the configuration.
log_step_output: Function to log step output or None.
Returns:
An AlgoResults tuple containing a summary of the results.
"""
num_temps = config.num_temps
density_by_step = flow_transport.GeometricAnnealingSchedule(
initial_log_density, final_log_density, num_temps)
markov_kernel_by_step = markov_kernel.MarkovTransitionKernel(
config.mcmc_config, density_by_step, num_temps)
def free_energy_short(flow_params: FlowParams,
samples: Array,
log_weights: Array,
step: int) -> Array:
return flow_transport.transport_free_energy_estimator(
samples, log_weights, flow_apply, flow_params, density_by_step, step)
free_energy_eval = jax.jit(free_energy_short)
free_energy_and_grad = jax.value_and_grad(free_energy_short)
key, subkey = jax.random.split(key)
samples_tuple, log_weights_tuple = get_initial_samples_log_weight_tuples(
initial_sampler, subkey, config)
def short_inner_loop(rng_key: RandomKey,
loc_samples_tuple: SamplesTuple,
loc_log_weights_tuple: LogWeightsTuple,
loc_step: int):
return inner_loop(key=rng_key,
free_energy_and_grad=free_energy_and_grad,
free_energy_eval=free_energy_eval,
opt_update=opt_update,
opt_init_state=opt_init_state,
flow_init_params=flow_init_params,
flow_apply=flow_apply,
markov_kernel_apply=markov_kernel_by_step,
samples_tuple=loc_samples_tuple,
log_weights_tuple=loc_log_weights_tuple,
log_density=density_by_step,
step=loc_step,
config=config)
logging.info('Jitting step...')
inner_loop_jit = jax.jit(short_inner_loop)
opt_iters = config.optimization_config.free_energy_iters
if log_step_output is not None:
zero_vfe_tuple = VfesTuple(train_vfes=jnp.zeros(opt_iters),
validation_vfes=jnp.zeros(opt_iters))
log_step_output(samples_tuple, log_weights_tuple, zero_vfe_tuple, 0., 1.,
1., config.write_samples)
logging.info('Performing initial step redundantly for accurate timing...')
initial_start_time = time.time()
inner_loop_jit(key, samples_tuple, log_weights_tuple, 1)
initial_finish_time = time.time()
initial_time_diff = initial_finish_time - initial_start_time
logging.info('Initial step time / seconds %f: ', initial_time_diff)
logging.info('Launching training...')
log_normalizer_estimate = 0.
start_time = time.time()
for step in range(1, num_temps):
subkey, key = jax.random.split(key)
samples_tuple, log_weights_tuple, vfes_tuple, log_normalizer_increment, test_acceptance = inner_loop_jit(
subkey, samples_tuple, log_weights_tuple, step)
acceptance_nuts = float(np.asarray(test_acceptance[0]))
acceptance_hmc = float(np.asarray(test_acceptance[1]))
log_normalizer_estimate += log_normalizer_increment
if step % config.report_step == 0:
beta = density_by_step.get_beta(step)
logging.info(
'Step %05d: beta %f Acceptance rate NUTS %f Acceptance rate HMC %f',
step, beta, acceptance_nuts, acceptance_hmc
)
if log_step_output is not None:
log_step_output(samples_tuple, log_weights_tuple,
vfes_tuple, log_normalizer_increment, acceptance_nuts,
acceptance_hmc)
finish_time = time.time()
delta_time = finish_time - start_time
logging.info('Delta time / seconds %f: ', delta_time)
logging.info('Log normalizer estimate %f: ', log_normalizer_estimate)
results = AlgoResultsTuple(
test_samples=samples_tuple.test_samples,
test_log_weights=log_weights_tuple.test_log_weights,
log_normalizer_estimate=log_normalizer_estimate,
delta_time=delta_time,
initial_time_diff=initial_time_diff)
return results
|
{"hexsha": "16439363659ef65c20ef0ad8b76cf2419139fac4", "size": 17967, "ext": "py", "lang": "Python", "max_stars_repo_path": "annealed_flow_transport/aft.py", "max_stars_repo_name": "LaudateCorpus1/annealed_flow_transport", "max_stars_repo_head_hexsha": "28f348bb41e3acec5bc925355063d476f2e2aea2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2021-08-13T14:00:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-15T12:44:20.000Z", "max_issues_repo_path": "annealed_flow_transport/aft.py", "max_issues_repo_name": "deepmind/annealed_flow_transport", "max_issues_repo_head_hexsha": "28f348bb41e3acec5bc925355063d476f2e2aea2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-10-05T16:19:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-05T16:19:25.000Z", "max_forks_repo_path": "annealed_flow_transport/aft.py", "max_forks_repo_name": "LaudateCorpus1/annealed_flow_transport", "max_forks_repo_head_hexsha": "28f348bb41e3acec5bc925355063d476f2e2aea2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-10-05T16:14:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-03T15:17:36.000Z", "avg_line_length": 43.8219512195, "max_line_length": 109, "alphanum_fraction": 0.6966104525, "include": true, "reason": "import numpy,import jax", "num_tokens": 3747}
|
{-# LANGUAGE FlexibleContexts #-}
module Evaluator.Numerical where
import LispTypes
import Environment
import Evaluator.Operators
import Data.Complex
import Data.Ratio
import Data.Foldable
import Data.Fixed
import Numeric
import Control.Monad.Except
numericalPrimitives :: [(String, [LispVal] -> ThrowsError LispVal)]
numericalPrimitives =
-- Binary Numerical operations
[("+", numAdd),
("-", numSub),
("*", numMul),
("/", numDivide),
("modulo", numMod),
("quotient", numericBinop quot),
("remainder", numericBinop rem),
("abs", numAbs),
("ceiling", numCeil),
("floor", numFloor),
("round", numRound),
("truncate", numTruncate),
-- Conversion
("number->string", numToString),
-- Numerical Boolean operators
("=", numBoolBinop (==)),
("<", numBoolBinop (<)),
(">", numBoolBinop (>)),
("/=", numBoolBinop (/=)),
(">=", numBoolBinop (>=)),
("<=", numBoolBinop (<=)),
-- Scientific functions
("sqrt", sciSqrt),
("acos", sciAcos),
("asin", sciAsin),
("atan", sciAtan),
("cos", sciCos),
("sin", sciSin),
("tan", sciTan),
-- ("acosh", sciAcosh),
-- ("asinh", sciAsinh),
-- ("atanh", sciAtanh),
-- ("cosh", sciCosh),
-- ("sinh", sciSinh),
-- ("tanh", sciTanh),
("exp", sciExp),
("expt", sciExpt),
("log", sciLog),
-- Complex numbers operations
-- ("angle", cAngle),
("real-part", cRealPart),
("imag-part", cImagPart),
("magnitude", cMagnitude),
("make-polar", cMakePolar),
-- ("make-rectangular", cMakeRectangular),
-- Type testing functions
("number?", unaryOp numberp),
("integer?", unaryOp integerp),
("float?", unaryOp floatp),
("ratio?", unaryOp ratiop),
("complex?", unaryOp complexp)]
-- |Type testing functions
numberp, integerp, floatp, ratiop, complexp :: LispVal -> LispVal
integerp (Number _) = Bool True
integerp _ = Bool False
numberp (Number _) = Bool True
numberp (Float _) = Bool True
numberp (Ratio _) = Bool True
numberp (Complex _) = Bool True
numberp _ = Bool False
floatp (Float _) = Bool True
floatp _ = Bool False
ratiop (Ratio _) = Bool True
ratiop _ = Bool False
complexp (Complex _) = Bool True
complexp _ = Bool False
-- |Absolute value
numAbs :: [LispVal] -> ThrowsError LispVal
numAbs [Number x] = return $ Number $ abs x
numAbs [Float x] = return $ Float $ abs x
numAbs [Complex x] = return $ Complex $ abs x -- Calculates the magnitude
numAbs [Ratio x] = return $ Ratio $ abs x
numAbs [x] = throwError $ TypeMismatch "number" x
numAbs l = throwError $ NumArgs 1 l
-- |Ceiling, floor, round and truncate
numCeil, numFloor, numRound, numTruncate :: [LispVal] -> ThrowsError LispVal
numCeil [Number x] = return $ Number $ ceiling $ fromInteger x
numCeil [Ratio x] = return $ Number $ ceiling $ fromRational x
numCeil [Float x] = return $ Number $ ceiling x
numCeil [Complex x] =
if imagPart x == 0 then return $ Number $ ceiling $ realPart x
else throwError $ TypeMismatch "integer or float" $ Complex x
numCeil [x] = throwError $ TypeMismatch "integer or float" x
numCeil l = throwError $ NumArgs 1 l
numFloor [Number x] = return $ Number $ floor $ fromInteger x
numFloor [Ratio x] = return $ Number $ floor $ fromRational x
numFloor [Float x] = return $ Number $ floor x
numFloor [Complex x] =
if imagPart x == 0 then return $ Number $ floor $ realPart x
else throwError $ TypeMismatch "integer or float" $ Complex x
numFloor [x] = throwError $ TypeMismatch "integer or float" x
numFloor l = throwError $ NumArgs 1 l
numRound [Number x] = return $ Number $ round $ fromInteger x
numRound [Ratio x] = return $ Number $ round $ fromRational x
numRound [Float x] = return $ Number $ round x
numRound [Complex x] =
if imagPart x == 0 then return $ Number $ round $ realPart x
else throwError $ TypeMismatch "integer or float" $ Complex x
numRound [x] = throwError $ TypeMismatch "integer or float" x
numRound l = throwError $ NumArgs 1 l
numTruncate [Number x] = return $ Number $ truncate $ fromInteger x
numTruncate [Ratio x] = return $ Number $ truncate $ fromRational x
numTruncate [Float x] = return $ Number $ truncate x
numTruncate [Complex x] =
if imagPart x == 0 then return $ Number $ truncate $ realPart x
else throwError $ TypeMismatch "integer or float" $ Complex x
numTruncate [x] = throwError $ TypeMismatch "integer or float" x
numTruncate l = throwError $ NumArgs 1 l
-- |foldl1M is like foldlM but has no base case
foldl1M :: Monad m => (a -> a -> m a) -> [a] -> m a
foldl1M f (x : xs) = foldlM f x xs
foldl1M _ _ = error "unexpected error in foldl1M"
-- | Sum numbers
numAdd :: [LispVal] -> ThrowsError LispVal
numAdd [] = return $ Number 0
numAdd l = foldl1M (\ x y -> numCast [x, y] >>= go) l
where
go (List [Number x, Number y]) = return $ Number $ x + y
go (List [Float x, Float y]) = return $ Float $ x + y
go (List [Ratio x, Ratio y]) = return $ Ratio $ x + y
go (List [Complex x, Complex y]) = return $ Complex $ x + y
go _ = throwError $ Default "unexpected error in (+)"
-- | Subtract numbers
numSub :: [LispVal] -> ThrowsError LispVal
numSub [] = throwError $ NumArgs 1 []
numSub [Number x] = return $ Number $ -1 * x
numSub [Float x] = return $ Float $ -1 * x
numSub [Ratio x] = return $ Ratio $ -1 * x
numSub [Complex x] = return $ Complex $ -1 * x
numSub l = foldl1M (\ x y -> numCast [x, y] >>= go) l
where
go (List [Number x, Number y]) = return $ Number $ x - y
go (List [Float x, Float y]) = return $ Float $ x - y
go (List [Ratio x, Ratio y]) = return $ Ratio $ x - y
go (List [Complex x, Complex y]) = return $ Complex $ x - y
go _ = throwError $ Default "unexpected error in (-)"
-- | Multiply numbers
numMul :: [LispVal] -> ThrowsError LispVal
numMul [] = return $ Number 1
numMul l = foldl1M (\ x y -> numCast [x, y] >>= go) l
where
go (List [Number x, Number y]) = return $ Number $ x * y
go (List [Float x, Float y]) = return $ Float $ x * y
go (List [Ratio x, Ratio y]) = return $ Ratio $ x * y
go (List [Complex x, Complex y]) = return $ Complex $ x * y
go _ = throwError $ Default "unexpected error in (*)"
-- |Divide two numbers
numDivide :: [LispVal] -> ThrowsError LispVal
numDivide [] = throwError $ NumArgs 1 []
numDivide [Number 0] = throwError DivideByZero
numDivide [Ratio 0] = throwError DivideByZero
numDivide [Number x] = return $ Ratio $ 1 / fromInteger x
numDivide [Float x] = return $ Float $ 1.0 / x
numDivide [Ratio x] = return $ Ratio $ 1 / x
numDivide [Complex x] = return $ Complex $ 1 / x
numDivide l = foldl1M (\ x y -> numCast [x, y] >>= go) l
where
go (List [Number x, Number y])
| y == 0 = throwError DivideByZero
| mod x y == 0 = return $ Number $ div x y -- Integer division
| otherwise = return $ Ratio $ fromInteger x / fromInteger y
go (List [Float x, Float y])
| y == 0 = throwError DivideByZero
| otherwise = return $ Float $ x / y
go (List [Ratio x, Ratio y])
| y == 0 = throwError DivideByZero
| otherwise = return $ Ratio $ x / y
go (List [Complex x, Complex y])
| y == 0 = throwError DivideByZero
| otherwise = return $ Complex $ x / y
go _ = throwError $ Default "unexpected error in (/)"
-- |Numerical modulus
numMod :: [LispVal] -> ThrowsError LispVal
numMod [] = return $ Number 1
numMod l = foldl1M (\ x y -> numCast [x, y] >>= go) l
where
go (List [Number a, Number b]) = return $ Number $ mod' a b
go (List [Float a, Float b]) = return $ Float $ mod' a b
go (List [Ratio a, Ratio b]) = return $ Ratio $ mod' a b
-- #TODO implement modulus for complex numbers
go (List [Complex a, Complex b]) = throwError $ Default "modulus is not yet implemented for complex numbers"
go _ = throwError $ Default "unexpected error in (modulus)"
-- | Boolean operator
numBoolBinop :: (LispVal -> LispVal -> Bool) -> [LispVal] -> ThrowsError LispVal
numBoolBinop op [] = throwError $ Default "need at least two arguments"
numBoolBinop op [x] = throwError $ Default "need at least two arguments"
numBoolBinop op [x, y] = numCast [x, y] >>= go op
where
go op (List [x@(Number _), y@(Number _)]) = return $ Bool $ x `op` y
go op (List [x@(Float _), y@(Float _)]) = return $ Bool $ x `op` y
go op (List [x@(Ratio _), y@(Ratio _)]) = return $ Bool $ x `op` y
go op (List [x@(Complex _), y@(Complex _)]) = return $ Bool $ x `op` y
go op _ = throwError $ Default "unexpected error in boolean operation"
-- |Accept two numbers and cast one as the appropriate type
numCast :: [LispVal] -> ThrowsError LispVal
-- Same type, just return the two numbers
numCast [a@(Number _), b@(Number _)] = return $ List [a,b]
numCast [a@(Float _), b@(Float _)] = return $ List [a,b]
numCast [a@(Ratio _), b@(Ratio _)] = return $ List [a,b]
numCast [a@(Complex _), b@(Complex _)] = return $ List [a,b]
-- First number is an integer
numCast [Number a, b@(Float _)] = return $ List [Float $ fromInteger a, b]
numCast [Number a, b@(Ratio _)] = return $ List [Ratio $ fromInteger a, b]
numCast [Number a, b@(Complex _)] = return $ List [Complex $ fromInteger a, b]
-- First number is a float
numCast [a@(Float _), Number b] = return $ List [a, Float $ fromInteger b]
numCast [a@(Float _), Ratio b] = return $ List [a, Float $ fromRational b]
numCast [Float a, b@(Complex _)] = return $ List [Complex $ a :+ 0, b]
-- First number is a rational
numCast [a@(Ratio _), Number b] = return $ List [a, Ratio $ fromInteger b]
numCast [Ratio a, b@(Float _)] = return $ List [Float $ fromRational a, b]
numCast [Ratio a, b@(Complex _)] =
return $ List [Complex $ fromInteger (numerator a) / fromInteger (denominator a), b]
-- First number is a complex
numCast [a@(Complex _), Number b] = return $ List [a, Complex $ fromInteger b]
numCast [a@(Complex _), Float b] = return $ List [a, Complex $ b :+ 0]
numCast [a@(Complex _), Ratio b] =
return $ List [a, Complex $ fromInteger (numerator b) / fromInteger (denominator b)]
-- Error cases
numCast [a, b] = case a of
Number _ -> throwError $ TypeMismatch "number" b
Float _ -> throwError $ TypeMismatch "number" b
Ratio _ -> throwError $ TypeMismatch "number" b
Complex _ -> throwError $ TypeMismatch "number" b
_ -> throwError $ TypeMismatch "number" a
numCast _ = throwError $ Default "unknown error in numCast"
-- |Take a primitive Haskell Integer function and wrap it
-- with code to unpack an argument list, apply the function to it
-- and return a numeric value
numericBinop :: (Integer -> Integer -> Integer) -> [LispVal] -> ThrowsError LispVal
-- Throw an error if there's only one argument
numericBinop op val@[_] = throwError $ NumArgs 2 val
-- Fold the operator leftway if there are enough args
numericBinop op params = mapM unpackNum params >>= return . Number . foldl1 op
-- |Convert a Number to a String
numToString :: [LispVal] -> ThrowsError LispVal
numToString [Number x] = return $ String $ show x
numToString [Float x] = return $ String $ show x
numToString [Ratio x] = return $ String $ show x
numToString [Complex x] = return $ String $ show x
numToString [x] = throwError $ TypeMismatch "number" x
numToString badArgList = throwError $ NumArgs 2 badArgList
-- | Trigonometric functions
sciCos, sciSin, sciTan, sciAcos, sciAsin, sciAtan :: [LispVal] -> ThrowsError LispVal
-- | Cosine of a number
sciCos [Number x] = return $ Float $ cos $ fromInteger x
sciCos [Float x] = return $ Float $ cos x
sciCos [Ratio x] = return $ Float $ cos $ fromRational x
sciCos [Complex x] = return $ Complex $ cos x
sciCos [notnum] = throwError $ TypeMismatch "number" notnum
sciCos badArgList = throwError $ NumArgs 1 badArgList
-- | Sine of a number
sciSin [Number x] = return $ Float $ sin $ fromInteger x
sciSin [Float x] = return $ Float $ sin x
sciSin [Ratio x] = return $ Float $ sin $ fromRational x
sciSin [Complex x] = return $ Complex $ sin x
sciSin [notnum] = throwError $ TypeMismatch "number" notnum
sciSin badArgList = throwError $ NumArgs 1 badArgList
-- | Tangent of a number
sciTan [Number x] = return $ Float $ tan $ fromInteger x
sciTan [Float x] = return $ Float $ tan x
sciTan [Ratio x] = return $ Float $ tan $ fromRational x
sciTan [Complex x] = return $ Complex $ tan x
sciTan [notnum] = throwError $ TypeMismatch "number" notnum
sciTan badArgList = throwError $ NumArgs 1 badArgList
-- | Arccosine of a number
sciAcos [Number x] = return $ Float $ acos $ fromInteger x
sciAcos [Float x] = return $ Float $ acos x
sciAcos [Ratio x] = return $ Float $ acos $ fromRational x
sciAcos [Complex x] = return $ Complex $ acos x
sciAcos [notnum] = throwError $ TypeMismatch "number" notnum
sciAcos badArgList = throwError $ NumArgs 1 badArgList
-- | Sine of a number
sciAsin [Number x] = return $ Float $ asin $ fromInteger x
sciAsin [Float x] = return $ Float $ asin x
sciAsin [Ratio x] = return $ Float $ asin $ fromRational x
sciAsin [Complex x] = return $ Complex $ asin x
sciAsin [notnum] = throwError $ TypeMismatch "number" notnum
sciAsin badArgList = throwError $ NumArgs 1 badArgList
-- | Tangent of a number
sciAtan [Number x] = return $ Float $ atan $ fromInteger x
sciAtan [Float x] = return $ Float $ atan x
sciAtan [Ratio x] = return $ Float $ atan $ fromRational x
sciAtan [Complex x] = return $ Complex $ atan x
sciAtan [notnum] = throwError $ TypeMismatch "number" notnum
sciAtan badArgList = throwError $ NumArgs 1 badArgList
-- #TODO Implement hyperbolic functions
-- Ask teacher
-- | Hyperbolic functions
-- sciAcosh, sciAsinh, sciAtanh, sciCosh, sciSinh, sciTanh :: [LispVal] -> ThrowsError LispVal
-- Misc. Scientific primitives
sciSqrt, sciExp, sciExpt, sciLog :: [LispVal] -> ThrowsError LispVal
-- | Square root of a number
sciSqrt [Number x] = return $ Float $ sqrt $ fromInteger x
sciSqrt [Float x] = return $ Float $ sqrt x
sciSqrt [Ratio x] = return $ Float $ sqrt $ fromRational x
sciSqrt [Complex x] = return $ Complex $ sqrt x
sciSqrt [notnum] = throwError $ TypeMismatch "number" notnum
sciSqrt badArgList = throwError $ NumArgs 1 badArgList
-- | Return e to the power of x
sciExp [Number x] = return $ Float $ exp $ fromInteger x
sciExp [Float x] = return $ Float $ exp x
sciExp [Ratio x] = return $ Float $ exp $ fromRational x
sciExp [Complex x] = return $ Complex $ exp x
sciExp [notnum] = throwError $ TypeMismatch "number" notnum
sciExp badArgList = throwError $ NumArgs 1 badArgList
-- | Return x to the power of y
sciExpt [x, y] = numCast [x, y] >>= go
where
go (List [Number x, Number y]) = return $ Number $ round $ (fromInteger x) ** (fromInteger y)
go (List [Float x, Float y]) = return $ Float $ x ** y
go (List [Ratio x, Ratio y]) = return $ Float $ (fromRational x) ** (fromRational y)
go (List [Complex x, Complex y]) = return $ Complex $ x ** y
go _ = throwError $ Default "unexpected error in (-)"
sciExpt badArgList = throwError $ NumArgs 2 badArgList
-- | Return the natural logarithm of x
sciLog [Number x] = return $ Float $ log $ fromInteger x
sciLog [Float x] = return $ Float $ log x
sciLog [Ratio x] = return $ Float $ log $ fromRational x
sciLog [Complex x] = return $ Complex $ log x
sciLog [notnum] = throwError $ TypeMismatch "number" notnum
sciLog badArgList = throwError $ NumArgs 1 badArgList
-- #TODO implement phase (angle)
-- Ask teacher how to convert phase formats from haskell to guile scheme
-- | Complex number functions
cRealPart, cImagPart, cMakePolar, cMagnitude :: [LispVal] -> ThrowsError LispVal
-- | Real part of a complex number
cRealPart [Number x] = return $ Number $ fromInteger x
cRealPart [Float x] = return $ Float x
cRealPart [Ratio x] = return $ Float $ fromRational x
cRealPart [Complex x] = return $ Float $ realPart x
cRealPart [notnum] = throwError $ TypeMismatch "number" notnum
cRealPart badArgList = throwError $ NumArgs 1 badArgList
-- | Imaginary part of a complex number
cImagPart [Number x] = return $ Number 0
cImagPart [Float x] = return $ Number 0
cImagPart [Ratio x] = return $ Number 0
cImagPart [Complex x] = return $ Float $ imagPart x
cImagPart [notnum] = throwError $ TypeMismatch "number" notnum
cImagPart badArgList = throwError $ NumArgs 1 badArgList
-- | Form a complex number from polar components of magnitude and phase.
cMakePolar [mag, p] = numCast [mag, p] >>= go
where
go (List [Number mag, Number p])
| mag == 0 = return $ Number 0
| p == 0 = return $ Number mag
| otherwise = return $ Complex $ mkPolar (fromInteger mag) (fromInteger p)
go (List [Float mag, Float p])
| mag == 0 = return $ Number 0
| p == 0 = return $ Float mag
| otherwise = return $ Complex $ mkPolar mag p
go (List [Ratio mag, Ratio p])
| mag == 0 = return $ Number 0
| p == 0 = return $ Float (fromRational mag)
| otherwise = return $ Complex $ mkPolar (fromRational mag) (fromRational p)
go val@(List [Complex mag, Complex p]) = throwError $ TypeMismatch "real" val
go _ = throwError $ Default "unexpected error in make-polar"
cMakePolar badArgList = throwError $ NumArgs 2 badArgList
-- | Return the magnitude (length) of a complex number
cMagnitude [Number x] = return $ Number $ fromInteger x
cMagnitude [Float x] = return $ Float x
cMagnitude [Ratio x] = return $ Float $ fromRational x
cMagnitude [Complex x] = return $ Float $ magnitude x
cMagnitude [notnum] = throwError $ TypeMismatch "number" notnum
cMagnitude badArgList = throwError $ NumArgs 1 badArgList
|
{"hexsha": "ef5df37af45cac2ec6c15705a3b5064dc87df88b", "size": 18263, "ext": "hs", "lang": "Haskell", "max_stars_repo_path": "src/Evaluator/Numerical.hs", "max_stars_repo_name": "zfnmxt/yasih", "max_stars_repo_head_hexsha": "f9afe967439e5ffd70437e94d62491fe5060d2ef", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Evaluator/Numerical.hs", "max_issues_repo_name": "zfnmxt/yasih", "max_issues_repo_head_hexsha": "f9afe967439e5ffd70437e94d62491fe5060d2ef", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Evaluator/Numerical.hs", "max_forks_repo_name": "zfnmxt/yasih", "max_forks_repo_head_hexsha": "f9afe967439e5ffd70437e94d62491fe5060d2ef", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.2054455446, "max_line_length": 116, "alphanum_fraction": 0.6370804359, "num_tokens": 5401}
|
[STATEMENT]
lemma parts_insert_subset_impl:
"\<lbrakk>x \<in> parts (insert a G); x \<in> parts G \<Longrightarrow> x \<in> synth (parts H); a \<in> synth (parts H)\<rbrakk>
\<Longrightarrow> x \<in> synth (parts H)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>x \<in> parts (insert a G); x \<in> parts G \<Longrightarrow> x \<in> synth (parts H); a \<in> synth (parts H)\<rbrakk> \<Longrightarrow> x \<in> synth (parts H)
[PROOF STEP]
using Fake_parts_sing in_parts_UnE insert_is_Un
parts_idem parts_synth subsetCE sup.absorb2 synth_idem synth_increasing
[PROOF STATE]
proof (prove)
using this:
?X \<in> synth (analz ?H) \<Longrightarrow> parts {?X} \<subseteq> synth (analz ?H) \<union> parts ?H
\<lbrakk>?c \<in> parts (?G \<union> ?H); ?c \<in> parts ?G \<Longrightarrow> ?P; ?c \<in> parts ?H \<Longrightarrow> ?P\<rbrakk> \<Longrightarrow> ?P
insert ?a ?A = {?a} \<union> ?A
parts (parts ?H) = parts ?H
parts (synth ?H) = parts ?H \<union> synth ?H
\<lbrakk>?A \<subseteq> ?B; ?c \<notin> ?A \<Longrightarrow> ?P; ?c \<in> ?B \<Longrightarrow> ?P\<rbrakk> \<Longrightarrow> ?P
?a \<le> ?b \<Longrightarrow> sup ?a ?b = ?b
synth (synth ?H) = synth ?H
?H \<subseteq> synth ?H
goal (1 subgoal):
1. \<lbrakk>x \<in> parts (insert a G); x \<in> parts G \<Longrightarrow> x \<in> synth (parts H); a \<in> synth (parts H)\<rbrakk> \<Longrightarrow> x \<in> synth (parts H)
[PROOF STEP]
by (metis (no_types, lifting) analz_parts)
|
{"llama_tokens": 556, "file": "IsaNet_infrastructure_Message", "length": 2}
|
################################################################################
# HACKATHON PARTICIPANTS -- DO NOT EDIT THIS FILE #
################################################################################
import sys
import time
import pickle
import numpy
import pathlib
testing_data = pathlib.Path("./csvs/testing.csv")
USE_TESTING_DATA = testing_data.exists()
################################################################################
# source scripts - need this to prep data and have access to the predict methods
exec(open("prepare_mortality_data.py").read())
exec(open("prepare_fss_data.py").read())
exec(open("mortality_model.py").read())
exec(open("fss_model.py").read())
evaluation_file = open("./output/evaluation.txt", "a")
################################################################################
# import the trained models
trained_mortality_model = pickle.load(open("./output/trained_mortality_model.pickle", "rb"))
trained_fss_model = pickle.load(open("./output/trained_fss_model.pickle", "rb"))
################################################################################
# import and prepare the training data set
tic = time.time()
m_data = eval("prepare_mortality_data(training = True)")
toc = time.time()
out = "seconds elapsed to prepare mortality training data | " + str(toc - tic) + "\n"
evaluation_file.write(out)
tic = time.time()
f_data = eval("prepare_fss_data(training = True)")
toc = time.time()
out = "seconds elapsed to prepare fss training data | " + str(toc - tic) + "\n"
evaluation_file.write(out)
################################################################################
# Mortality Prediction
tic = time.time()
predicted_mortality = predict_mortality(trained_mortality_model, m_data)
numpy.savetxt("./output/predicted_mortality_training.dat", predicted_mortality, fmt = "%s")
toc = time.time()
out = "seconds elapsed to predict mortality on training data | " + str(toc - tic) + "\n"
evaluation_file.write(out)
# FSS Prediction
tic = time.time()
predicted_fss = predict_fss(trained_fss_model, f_data)
numpy.savetxt("./output/predicted_fss_training.dat", predicted_fss, fmt = "%s")
toc = time.time()
out = "seconds elapsed to predict fss on training data | " + str(toc - tic) + "\n"
evaluation_file.write(out)
if USE_TESTING_DATA:
tic = time.time()
m_data = eval("prepare_mortality_data(training = False)")
toc = time.time()
out = "seconds elapsed to prepare mortality testing data | " + str(toc - tic) + "\n"
evaluation_file.write(out)
tic = time.time()
f_data = eval("prepare_fss_data(training = False)")
toc = time.time()
out = "seconds elapsed to prepare fss testing data | " + str(toc - tic) + "\n"
evaluation_file.write(out)
# Mortality Prediction
tic = time.time()
predicted_mortality = predict_mortality(trained_mortality_model, m_data)
numpy.savetxt("./output/predicted_mortality_testing.dat", predicted_mortality, fmt = "%s")
toc = time.time()
out = "seconds elapsed to predict mortality on testing data | " + str(toc - tic) + "\n"
evaluation_file.write(out)
# FSS Prediction
tic = time.time()
predicted_fss = predict_fss(trained_fss_model, f_data)
numpy.savetxt("./output/predicted_fss_testing.dat", predicted_fss, fmt = "%s")
toc = time.time()
out = "seconds elapsed to predict fss on testing data | " + str(toc - tic) + "\n"
evaluation_file.write(out)
################################################################################
evaluation_file.close()
################################################################################
# End of File #
################################################################################
|
{"hexsha": "7aeb4893043b914a77557dafb47c3bd734b42b2e", "size": 3833, "ext": "py", "lang": "Python", "max_stars_repo_path": "testing.py", "max_stars_repo_name": "niwarei/hptbi-hackathon", "max_stars_repo_head_hexsha": "81e1d32ed27b2d79d8bbc40651d79081cee71d02", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-28T18:03:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-28T18:03:46.000Z", "max_issues_repo_path": "testing.py", "max_issues_repo_name": "niwarei/hptbi-hackathon", "max_issues_repo_head_hexsha": "81e1d32ed27b2d79d8bbc40651d79081cee71d02", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-07-17T23:25:00.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-19T14:05:11.000Z", "max_forks_repo_path": "testing.py", "max_forks_repo_name": "niwarei/hptbi-hackathon", "max_forks_repo_head_hexsha": "81e1d32ed27b2d79d8bbc40651d79081cee71d02", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-07-08T06:27:58.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-01T00:38:52.000Z", "avg_line_length": 37.213592233, "max_line_length": 94, "alphanum_fraction": 0.557004957, "include": true, "reason": "import numpy", "num_tokens": 763}
|
#include <config.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_vector.h>
/* Compile all the inline matrix functions */
#define COMPILE_INLINE_STATIC
#include "build.h"
#include <gsl/gsl_matrix.h>
|
{"hexsha": "fe595fa189f61d40e911309e35a30b21f12325a4", "size": 201, "ext": "c", "lang": "C", "max_stars_repo_path": "gsl-an/matrix/matrix.c", "max_stars_repo_name": "juandesant/astrometry.net", "max_stars_repo_head_hexsha": "47849f0443b890c4a875360f881d2e60d1cba630", "max_stars_repo_licenses": ["Net-SNMP", "Xnet"], "max_stars_count": 460.0, "max_stars_repo_stars_event_min_datetime": "2015-01-06T13:20:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T00:37:55.000Z", "max_issues_repo_path": "gsl-an/matrix/matrix.c", "max_issues_repo_name": "juandesant/astrometry.net", "max_issues_repo_head_hexsha": "47849f0443b890c4a875360f881d2e60d1cba630", "max_issues_repo_licenses": ["Net-SNMP", "Xnet"], "max_issues_count": 208.0, "max_issues_repo_issues_event_min_datetime": "2015-01-08T20:26:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T15:21:34.000Z", "max_forks_repo_path": "gsl-an/matrix/matrix.c", "max_forks_repo_name": "juandesant/astrometry.net", "max_forks_repo_head_hexsha": "47849f0443b890c4a875360f881d2e60d1cba630", "max_forks_repo_licenses": ["Net-SNMP", "Xnet"], "max_forks_count": 173.0, "max_forks_repo_forks_event_min_datetime": "2015-01-08T18:01:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T07:27:04.000Z", "avg_line_length": 18.2727272727, "max_line_length": 45, "alphanum_fraction": 0.7462686567, "num_tokens": 51}
|
import numpy as np
import cupy as cp
import pickle
from cupy.sparse import coo_matrix
from cupy.sparse import csr_matrix
class model_saver:
def __init__(self, model):
self._model = model
if self._model._layer_type == 'Sparse':
if self._model._comp_type == 'GPU':
self._model_arrays = [(i._weights.get(), i._biases.get()) for i in self._model._layers]
if self._model._comp_type == 'CPU':
self._model_arrays = [(i._weights.copy(), np.array(i._biases)) for i in self._model._layers]
if self._model._layer_type == 'Full':
if self._model._comp_type == 'GPU':
self._model_arrays = [(i._weights.get(), i._biases.get()) for i in self._model._layers]
if self._model._comp_type == 'CPU':
self._model_arrays = [(np.array(i._weights), np.array(i._biases)) for i in self._model._layers]
self._sparse_parameters = None
def store_model(self):
''' Stores the current state of the model. '''
if self._model._layer_type == 'Sparse':
if self.model._comp_type == 'GPU':
self._model_arrays = [(i._weights.get(), np.array(i._biases)) for i in self._model._layers]
if self._model._comp_type == 'CPU':
self._model_arrays = [(i._weights.copy(), np.array(i._biases)) for i in self._model._layers]
if self._model._layer_type == 'Full':
if self._model._comp_type == 'GPU':
self._model_arrays = [(i._weights.get(), i._biases.get()) for i in self._model._layers]
if self._model._comp_type == 'CPU':
self._model_arrays = [(np.array(i._weights), np.array(i._biases)) for i in self._model._layers]
return
def restore_model(self):
''' Restores the weights stored in the model saver. '''
if self._model._layer_type == 'Sparse':
if self._model._comp_type == 'CPU':
for i in range(self._model._depth):
self._model._layers[i]._weights = self._model_arrays[i][0].copy()
self._model._layers[i]._biases = np.array(self._model_arrays[i][1])
if self._model._comp_type == 'GPU':
for i in range(self._model._depth):
self._model._layers[i]._weights = cp.sparse.csr_matrix(self._model_arrays[i][0])
self._model._layers[i]._biases = cp.array(self._model_arrays[i][1])
if self._model._layer_type == 'Full':
if self._model._comp_type == 'GPU':
for i in range(self._model._depth):
self._model._layers[i]._weights = cp.array(self._model_arrays[i][0])
self._model._layers[i]._biases = cp.array(self._model_arrays[i][1])
if self._model._comp_type == 'CPU':
for i in range(self._model._depth):
self._model._layers[i]._weights = np.array(self._model_arrays[i][0])
self._model._layers[i]._biases = np.array(self._model_arrays[i][1])
return
def pickle_model(self, filename):
''' Stores the model in a pickle file. '''
pickle.dump(self._model, open(filename, 'wb'))
print('Model pickled')
return
def load_model(self, filename):
''' Loads the model from a pickle file. '''
filelist = pickle.load(open(filename, 'rb'))
if self._model._layer_type == 'Sparse':
self._model_arrays = [(i[0].copy(), np.array(i[1])) for i in filelist]
if self._model._layer_type == 'Full':
for i in range(self._model._depth):
self._model.layers[i]._weights = filelist.layers[i]._weights
self._model.layers[i]._biases = filelist.layers[i]._biases
# Do a check that the layer type matches the weight datatype
def load_sparse_parameters(self, filename):
''' Loads sparse parameters into the loader class, and into the model.
(I can't think of a real use for loading the parameters into the loader, and model seperately)'''
parameters = pickle.load(open(filename, 'rb'))
for i in range(len(parameters)):
# Put the training masks in the layer objects, TODO turn this into a [0,1] mask
self._model._sparse_training_mask = None #parameters[i]
# Put the individual weights in the weight matrices
for j in range(parameters[i].nnz):
self._model._layers[i]._weights[parameters[i].row[j]][parameters[i].col[j]] = parameters[i].data[j]
print('Inserted weights from ', filename, ' into the weight matrices')
return
def store_sparse_parameters(self):
''' This returns the parameters that can be stored in memory in the notebook, use pickle_sparse_parameters after this'''
# What format will this give me, I need sparse matrices.
parameters = []
for i in self._model._layers:
these_parameters = np.multiply(i._weights, i._sparse_training_mask)
# Sparsify these_parameters
these_parameters = csr_matrix(these_parameters, dtype = np.float32)
these_parameters = these_parameters.tocoo()
parameters.append((these_parameters, i._biases))
self._sparse_parameters = parameters
return
def pickle_sparse_parameters(self, filename):
''' Stores the sparse parameters in a pickle file. '''
if self._sparse_parameters == None:
print('No parameters stored')
return
pickle.dump(self._sparse_parameters, open(filename, 'wb'))
print('Model pickled')
return
def restore_sparse_parameters(self):
''' Need a more specific name than sparse parameters. this will take some learning, drop the weights in'''
if self._sparse_parameters == None:
print('No parameters stored')
return
for i in range(len(self._sparse_parameters)):
# Put the training masks in the layer objects, TODO turn this into a [0,1] mask
#self._model._sparse_training_mask = None #parameters[i]
# Put the individual weights in the weight matrices
for j in range(self._sparse_parameters[i][0].nnz):
self._model._layers[i]._weights[int(self._sparse_parameters[i][0].row[j])][int(self._sparse_parameters[i][0].col[j])] = self._sparse_parameters[i][0].data[j]
# Replace full arrays, test
#self._model._layers[i]._weights = np.multiply(self._model._layers[i]._weights, (self._sparse_parameters[i][0] == 0))
#self._model._layers[i]._weights = self._model._layers[i]._weights + self._sparse_parameters[i][0]
self._model._layers[i]._biases = self._sparse_parameters[i][1]
print('Sparse parameters restored')
return
|
{"hexsha": "f95d06c9d5a778f184f2dd6282437329a72f1caf", "size": 7043, "ext": "py", "lang": "Python", "max_stars_repo_path": "sparana/saver.py", "max_stars_repo_name": "jngannon/SpaRaNa", "max_stars_repo_head_hexsha": "35d8853ab842681469db08ef92b4f914e81922a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sparana/saver.py", "max_issues_repo_name": "jngannon/SpaRaNa", "max_issues_repo_head_hexsha": "35d8853ab842681469db08ef92b4f914e81922a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sparana/saver.py", "max_forks_repo_name": "jngannon/SpaRaNa", "max_forks_repo_head_hexsha": "35d8853ab842681469db08ef92b4f914e81922a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.7867647059, "max_line_length": 173, "alphanum_fraction": 0.6038619906, "include": true, "reason": "import numpy,import cupy,from cupy", "num_tokens": 1578}
|
# =============================================================================
# Authors: PAR Government
# Organization: DARPA
#
# Copyright (c) 2016 PAR Government
# All rights reserved.
# ==============================================================================
import numpy as np
from maskgen.algorithms.optical_flow import OpticalFlow, FrameAnalyzer
from maskgen.image_wrap import openImageFile
from tests.test_support import TestSupport
class TestOpticalFlow(TestSupport):
def test_mask_withsame_size(self):
analyzer = FrameAnalyzer(199.87494824016565, 233.18743961352658, 33.31249137336093)
f1 = openImageFile(self.locateFile('tests/algorithms/f1.png')).image_array
f2 = openImageFile(self.locateFile('tests/algorithms/f2.png')).image_array
analyzer.updateFlow(f1, f2, 'forward')
flow_manager = OpticalFlow(f1, f2, analyzer.back_flow, analyzer.jump_flow)
frame = flow_manager.setTime(0.0)
self.assertEqual(0, np.sum(abs(frame - f1)))
frame = flow_manager.setTime(0.1)
self.assertTrue( np.sum(abs(frame - f1)) < np.sum(abs(frame - f2)))
print np.sum(abs(frame - f2))
frame = flow_manager.setTime(0.9)
self.assertTrue(np.sum(abs(frame - f1)) > np.sum(abs(frame - f2)))
frame = flow_manager.setTime(1.0)
self.assertEqual(0, np.sum(abs(frame - f2)))
|
{"hexsha": "54633a9b629135fbe4e2edc5587e4f290c0aa97d", "size": 1377, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/algorithms/tests_optical_flow.py", "max_stars_repo_name": "j-h-m/Media-Journaling-Tool", "max_stars_repo_head_hexsha": "4ab6961e2768dc002c9bbad182f83188631f01bd", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/algorithms/tests_optical_flow.py", "max_issues_repo_name": "j-h-m/Media-Journaling-Tool", "max_issues_repo_head_hexsha": "4ab6961e2768dc002c9bbad182f83188631f01bd", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/algorithms/tests_optical_flow.py", "max_forks_repo_name": "j-h-m/Media-Journaling-Tool", "max_forks_repo_head_hexsha": "4ab6961e2768dc002c9bbad182f83188631f01bd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.4193548387, "max_line_length": 91, "alphanum_fraction": 0.6194625999, "include": true, "reason": "import numpy", "num_tokens": 322}
|
import pygame
from Play.caracters import Human,Goblin
from Play.environment import Nature
import numpy as np
import random
from Utility import is_member ,Direction,full_file
pygame.init()
clock = pygame.time.Clock()
e = Nature()
#e.play_sound()
g = []
number_of_enemy = 10
for n in range(number_of_enemy):
random_position_x = random.randint(400,1000)
random_position_y = random.randint(0,80)
enemy = Goblin( e, position_x=random_position_x,position_y = random_position_y, walk_direction='left' )
enemy.create()
random_speed = random.randint(-5,-1 )
enemy.walk( random_speed )
g.append(enemy)
h = Human(e,position_x=200, position_y = 60)
h.create()
def check_collide(hero: list, enemy: list):
collide_distance_x = []
collide_distance_y = []
distance_jump_collide = []
# get array of enemy position
for j in enemy:
if j.position_x < h.position_x:
# calculate the distance of the enemy from the hero in x axis
collide_distance_x.append(abs(j.position_x + j.width - hero.position_x))
else:
collide_distance_x.append(abs(h.position_x + h.width - j.position_x))
# calculate the distance of the enemy from the hero in y axis
collide_distance_y.append(abs(j.position_y - hero.position_y))
# jump collide
distance_jump_collide.append(abs(j.position_y + j.high - hero.position_y))
x_distance = np.array(collide_distance_x)
y_distance = np.array(collide_distance_y)
distance_jump_collide = np.array(distance_jump_collide)
# if the r is collide show text on screen " collide "
index_enemy_collide = (y_distance < 5) & (x_distance < 5)
index_jump_collide = (distance_jump_collide <10)& (x_distance < 5)
jump_collide = True in index_jump_collide
Enemy_collide = True in index_enemy_collide
enemy_collide = []
if Enemy_collide:
collide = Enemy_collide
index = np.where(index_enemy_collide == True)
# enemy_num = str(index[0]+1)
# print("collide goblins number: " + enemy_num)
enemy_collide = list(np.array(enemy)[index])
injure = 'Hero'
elif jump_collide:
collide = jump_collide
index = np.where( index_jump_collide == True )
# enemy_num = str( index[0] + 1 )
# print( "jump_collide goblins number: " + enemy_num )
enemy_collide = list( np.array( enemy )[index] )
injure = 'Enemy'
else:
collide = False
injure = None
pass
collide_state = {'state': collide, 'injure': injure ,'enemy_collide': enemy_collide}
return collide_state
def live_bar(screen, health):
hit_box = (100, 36, 100, 100)
pygame.draw.rect(screen, (0, 255, 0), (hit_box[0], hit_box[1] - 20, 80, 20))
pygame.draw.rect(screen, (255, 0, 0), (hit_box[0], hit_box[1] - 20, 80 - 79 * health / 100, 20))
def enemy_action(enemy: Goblin, hero, injure):
if injure == 'Hero':
# cause randomly the enemy action
# generate random number between 1- 4
random_number = random.randint(1, 4)
# escape - change direction
if random_number == 1:
enemy.jump(40, 5* - Direction[enemy.walk_direction].value )
elif random_number == 2:
enemy.stop()
enemy.attack()
hero.health = -enemy.power
elif random_number == 3:
enemy.stop()
pass
elif injure == 'Enemy':
enemy.health = -hero.power
if enemy.health == 0:
voice = enemy.sound_dead
else:
voice = enemy.sound_hooch
enemy.play_sound(voice)
enemy.jump( 40, 5 * direction )
def control_character(character):
keys = pygame.key.get_pressed()
if character.live:
if keys[pygame.K_LEFT]:
character.walk(-5)
if keys[pygame.K_RIGHT]:
character.walk(5)
if keys[pygame.K_DOWN]:
character.stop()
if keys[pygame.K_UP]:
character.jump(50, 5)
if keys[pygame.K_SPACE]:
character.attack()
def redrawWindow():
e.draw()
e.move_background()
live_bar(e.win, h.health)
text_score = font.render('Score: ' + str(score), 1, (0,0,0))
health_score = font.render('Health: ' + str(h.health), 1, (0,0,0))
time_left = max(round(sp),0)
time_left_font = font.render('Time:' + str(time_left), 1, (0,0,0))
if (not h.live) or time_left == 0:
game_over = font_game_over.render('GAME OVER', 1, (10, 128, 147))
e.win.blit(game_over, (e.width/4, e.high/3))
elif enemy_alive == 0:
game_over = font_game_over.render('YOU WIN!', 1, (12, 250, 147))
e.win.blit(game_over, (e.width/4, e.high/3))
e.win.blit( text_score, (e.width - 150, 10) )
e.win.blit( health_score, (20, 10) )
e.win.blit( time_left_font, (200, 10) )
h.draw()
[r.draw() for r in g]
pygame.display.update()
sp = 60
score = 0
speed = 100
direction =-1
# create font object
font = pygame.font.SysFont("comicsansms", 22 )
font_game_over = pygame.font.SysFont("comicsansms",40 )
font_game_over.set_bold( 15 )
random_speed = [random.randint( 1, 4 ) for i in range(number_of_enemy)]
run = True
while run:
pygame.time.delay(50)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
control_character(h)
if g[-1].position_x <=0:
direction = 1
elif g[0].position_x>=600:
direction = -1
collide = check_collide(h, g)
if h.live:
if collide['state']:
enemy_action( collide['enemy_collide'][0], h, collide['injure'] )
if collide['injure'] == 'Enemy':
score += 20
[e.walk( random_speed[i] * direction ) for i, e in enumerate( g, 0 ) if not collide['enemy_collide'][0] ]
else:
[e.walk(random_speed[i]*direction) for i, e in enumerate(g,0)]
if not h.live:
# all the goblins are celabrate
[r.jump(30) for r in g if 0 < r.position_x < e.width]
enemy_alive = np.sum([e.live for e in g])
sp -= 1/60
redrawWindow()
clock.tick(100)
pygame.quit()
|
{"hexsha": "3a857c4ca47360b4043d659c85492597bbe81d0b", "size": 6177, "ext": "py", "lang": "Python", "max_stars_repo_path": "game_scene.py", "max_stars_repo_name": "orenber/Goblins-War", "max_stars_repo_head_hexsha": "3978095b8c2fd661501daf9bacc272c5e8c5daa1", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-19T07:16:32.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-19T07:16:32.000Z", "max_issues_repo_path": "game_scene.py", "max_issues_repo_name": "orenber/Goblins-War", "max_issues_repo_head_hexsha": "3978095b8c2fd661501daf9bacc272c5e8c5daa1", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "game_scene.py", "max_forks_repo_name": "orenber/Goblins-War", "max_forks_repo_head_hexsha": "3978095b8c2fd661501daf9bacc272c5e8c5daa1", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9854368932, "max_line_length": 117, "alphanum_fraction": 0.6185850737, "include": true, "reason": "import numpy", "num_tokens": 1657}
|
[STATEMENT]
lemma map_cond_spmf_fst: "map_spmf f (cond_spmf_fst p x) = cond_spmf_fst (map_spmf (apsnd f) p) x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map_spmf f (cond_spmf_fst p x) = cond_spmf_fst (map_spmf (apsnd f) p) x
[PROOF STEP]
by(auto simp add: cond_spmf_fst_def spmf.map_comp intro!: map_spmf_cong arg_cong2[where f="cond_spmf"])
|
{"llama_tokens": 181, "file": "Constructive_Cryptography_CM_More_CC", "length": 1}
|
Require Import Helix.MSigmaHCOL.MemSetoid.
Require Import Helix.LLVMGen.Correctness_Prelude.
Require Import Helix.LLVMGen.Correctness_Invariants.
Require Import Helix.LLVMGen.Correctness_NExpr.
Require Import Helix.LLVMGen.Correctness_MExpr.
Require Import Helix.LLVMGen.IdLemmas.
Require Import Helix.LLVMGen.StateCounters.
Require Import Helix.LLVMGen.VariableBinding.
Require Import Helix.LLVMGen.BidBound.
Require Import Helix.LLVMGen.LidBound.
Require Import Helix.LLVMGen.StateCounters.
Require Import Helix.LLVMGen.Context.
Require Import Helix.LLVMGen.Correctness_While.
Require Import Helix.LLVMGen.Correctness_AExpr.
From Vellvm Require Import Utils.Commutation.
Require Import Paco.paco.
From ITree Require Import ITree Eq.Eq HeterogeneousRelations.
Set Implicit Arguments.
Set Strict Implicit.
Opaque dropVars.
Opaque newLocalVar.
Opaque resolve_PVar.
Opaque incBlockNamed.
Opaque incVoid.
Opaque incLocal.
Opaque genWhileLoop.
Import Memory.NM.
Import ListNotations.
Import MonadNotation.
Local Open Scope monad_scope.
Local Open Scope nat_scope.
Set Implicit Arguments.
Set Strict Implicit.
Opaque dropVars.
Opaque newLocalVar.
Opaque resolve_PVar.
Opaque incBlockNamed.
Opaque incVoid.
Opaque incLocal.
Opaque genWhileLoop.
Import Memory.NM.
Import ListNotations.
Import MonadNotation.
Local Open Scope monad_scope.
Local Open Scope nat_scope.
Section pure.
Ltac interp_MF_ret := setoid_rewrite interp_Mem_ret; setoid_rewrite interp_fail_ret; cbn.
Ltac interp_MF_bind := setoid_rewrite interp_Mem_bind; setoid_rewrite interp_fail_bind; cbn.
Lemma interp_fail_throw :
forall T s mH, interp_fail handle_failure (interp_Mem (T := T) (throw s) mH) ≈ Ret None.
Proof.
intros. setoid_rewrite interp_Mem_vis_eqit.
unfold pure_state in *; cbn in *.
rewrite !interp_fail_bind.
rewrite !interp_fail_vis.
cbn in *.
rewrite Eq.bind_bind, !bind_ret_l. reflexivity.
Qed.
Ltac fail_f := left;
solve [ rewrite bind_ret_l; reflexivity | unfold Dfail, Sfail; rewrite interp_fail_throw; reflexivity ].
Ltac ret_f := right; setoid_rewrite interp_Mem_ret; setoid_rewrite interp_fail_ret; cbn; eexists; reflexivity.
Ltac break_classic name :=
match goal with
| [ H : forall (σ : evalContext) (memH : memoryH),
interp_fail handle_failure (interp_Mem (denoteNExpr σ ?nexp1) memH) ≈ Ret None \/ _ |-
ITree.bind (interp_fail _ (interp_Mem (denoteNExpr _ ?nexp1) _)) _ ≈ _ \/ _] =>
edestruct H as [name | (? & name)]
end.
Ltac fail_or_ret := solve [fail_f | ret_f].
Ltac fr_crush He1 He2 :=
cbn* in *; interp_MF_bind;
break_classic He1; setoid_rewrite He1;
[ fail_f | setoid_rewrite bind_ret_l; interp_MF_bind;
break_classic He2;
setoid_rewrite He2; [fail_f | setoid_rewrite bind_ret_l; try break_match; fail_or_ret ]].
Lemma genNExpr_correct_pure_classic :
forall (* Helix bits *) (nexp: NExpr) (σ: evalContext) (memH: memoryH),
eutt eq (interp_fail handle_failure (interp_Mem (denoteNExpr σ nexp) memH)) (Ret None) \/
exists t, eutt eq (interp_fail handle_failure (interp_Mem (denoteNExpr σ nexp) memH)) (Ret (Some (memH, t))).
Proof.
intros nexp; induction nexp; intros *.
all : try solve [unfold denoteNExpr; fail_or_ret |
unfold denoteNExpr in *; cbn* in *; break_match; simp; fail_or_ret |
fr_crush He1 He2].
Qed.
Lemma genMExpr_correct_pure_classic :
forall (* Helix bits *) (mexp: MExpr) (σ: evalContext) (memH: memoryH),
eutt eq (interp_fail handle_failure (interp_Mem (denoteMExpr σ mexp) memH)) (Ret None) \/
exists t, eutt eq (interp_fail handle_failure (interp_Mem (denoteMExpr σ mexp) memH)) (Ret (Some (memH, t))).
Proof.
intros mexp; induction mexp; intros *; unfold denoteMExpr, denotePExpr in *; cbn* in *.
- break_match; simp; try_abs.
interp_MF_bind. rewrite interp_fail_throw.
setoid_rewrite bind_ret_l. left. reflexivity.
setoid_rewrite bind_ret_l.
interp_MF_bind.
unfold interp_Mem.
setoid_rewrite interp_state_trigger.
cbn. break_match.
cbn. setoid_rewrite interp_fail_vis. cbn.
setoid_rewrite bind_ret_l. left. setoid_rewrite bind_ret_l. reflexivity.
cbn. setoid_rewrite interp_fail_ret. cbn.
setoid_rewrite bind_ret_l.
setoid_rewrite interp_state_ret. setoid_rewrite interp_fail_ret. right.
eexists. cbn. reflexivity.
- interp_MF_ret. right. eexists. reflexivity.
Qed.
Lemma genAExpr_correct_helix_pure_classic :
forall (aexp: AExpr) (σ: evalContext) (memH: memoryH),
eutt eq (interp_fail handle_failure (interp_Mem (denoteAExpr σ aexp) memH)) (Ret None) \/
exists t, eutt eq (interp_fail handle_failure (interp_Mem (denoteAExpr σ aexp) memH)) (Ret (Some (memH, t))).
Proof.
intros aexp; induction aexp; intros *.
- (* Variable case *)
(* Reducing the compilation *)
(* The variable maps to an integer in the IRState *)
unfold denoteAExpr in *; cbn* in *; break_match; simp; interp_MF_bind.
left. rewrite interp_fail_throw, bind_ret_l. reflexivity.
interp_MF_ret.
setoid_rewrite bind_ret_l.
break_match; simp; fail_or_ret.
- (* Constant *)
cbn* in *; ret_f.
- (* ANth m n: lookup to m[n] *)
cbn* in *.
edestruct genNExpr_correct_pure_classic with (nexp := n) as [? | (? & ?)];
interp_MF_bind; setoid_rewrite H.
setoid_rewrite bind_ret_l.
left; reflexivity.
setoid_rewrite bind_ret_l.
edestruct genMExpr_correct_pure_classic with (mexp := m) as [? | (? & ?)];
interp_MF_bind; setoid_rewrite H0.
setoid_rewrite bind_ret_l. left; reflexivity.
setoid_rewrite bind_ret_l. destruct x0.
break_match; simp; break_match; simp.
left. interp_MF_bind. rewrite interp_fail_throw. rewrite bind_ret_l. reflexivity.
left. interp_MF_bind. rewrite interp_fail_throw. rewrite bind_ret_l. reflexivity.
left. cbn. rewrite bind_ret_l. rewrite interp_fail_throw. reflexivity.
right. setoid_rewrite bind_ret_l. interp_MF_ret. eexists; reflexivity.
- (* AAbs *)
cbn* in *; simp.
edestruct IHaexp; eauto.
interp_MF_bind.
setoid_rewrite H.
setoid_rewrite bind_ret_l. left; reflexivity.
destruct H. interp_MF_bind. setoid_rewrite H.
setoid_rewrite bind_ret_l. interp_MF_ret.
right. eexists. reflexivity.
(* - (* APlus *) *)
- cbn* in *; simp;
edestruct IHaexp1 as [He1 | (? & He1)]; interp_MF_bind; setoid_rewrite He1;
setoid_rewrite bind_ret_l; [left; reflexivity |
edestruct IHaexp2 as [He2 | (? & He2)]; interp_MF_bind; setoid_rewrite He2; setoid_rewrite bind_ret_l;
[left; reflexivity |
right; interp_MF_ret; eexists; reflexivity]].
- cbn* in *; simp;
edestruct IHaexp1 as [He1 | (? & He1)]; interp_MF_bind; setoid_rewrite He1;
setoid_rewrite bind_ret_l; [left; reflexivity |
edestruct IHaexp2 as [He2 | (? & He2)]; interp_MF_bind; setoid_rewrite He2; setoid_rewrite bind_ret_l;
[left; reflexivity |
right; interp_MF_ret; eexists; reflexivity]].
- cbn* in *; simp;
edestruct IHaexp1 as [He1 | (? & He1)]; interp_MF_bind; setoid_rewrite He1;
setoid_rewrite bind_ret_l; [left; reflexivity |
edestruct IHaexp2 as [He2 | (? & He2)]; interp_MF_bind; setoid_rewrite He2; setoid_rewrite bind_ret_l;
[left; reflexivity |
right; interp_MF_ret; eexists; reflexivity]].
- cbn* in *; simp;
edestruct IHaexp1 as [He1 | (? & He1)]; interp_MF_bind; setoid_rewrite He1;
setoid_rewrite bind_ret_l; [left; reflexivity |
edestruct IHaexp2 as [He2 | (? & He2)]; interp_MF_bind; setoid_rewrite He2; setoid_rewrite bind_ret_l;
[left; reflexivity |
right; interp_MF_ret; eexists; reflexivity]].
- cbn* in *; simp;
edestruct IHaexp1 as [He1 | (? & He1)]; interp_MF_bind; setoid_rewrite He1;
setoid_rewrite bind_ret_l; [left; reflexivity |
edestruct IHaexp2 as [He2 | (? & He2)]; interp_MF_bind; setoid_rewrite He2; setoid_rewrite bind_ret_l;
[left; reflexivity |
right; interp_MF_ret; eexists; reflexivity]].
- cbn* in *; simp;
edestruct IHaexp1 as [He1 | (? & He1)]; interp_MF_bind; setoid_rewrite He1;
setoid_rewrite bind_ret_l; [left; reflexivity |
edestruct IHaexp2 as [He2 | (? & He2)]; interp_MF_bind; setoid_rewrite He2; setoid_rewrite bind_ret_l;
[left; reflexivity |
right; interp_MF_ret; eexists; reflexivity]].
Qed. (* TODO: crush-ey Ltac.. *)
Lemma genMExpr_correct_pure :
forall (* Helix bits *) (mexp: MExpr) (σ: evalContext) (memH: memoryH),
no_failure (interp_helix (E := E_cfg) (denoteMExpr σ mexp) memH) -> (* Source semantics defined *)
exists t, eutt eq (interp_fail handle_failure (interp_Mem (denoteMExpr σ mexp) memH)) (Ret (Some (memH, t))).
Proof.
intros * NOFAIL.
destruct mexp as [[vid] | mblock]; cbn* in *; simp.
unfold denoteMExpr, denotePExpr in *; cbn* in *.
simp; try_abs. subst.
setoid_rewrite bind_ret_l.
setoid_rewrite bind_ret_l in NOFAIL.
2 : { interp_MF_ret. eexists. reflexivity. }
interp_MF_bind.
unfold interp_Mem.
setoid_rewrite interp_state_trigger.
cbn* in *.
assert (NOFAIL' := NOFAIL).
setoid_rewrite interp_helix_bind in NOFAIL.
unfold interp_helix, interp_Mem in NOFAIL.
apply no_failure_bind_prefix in NOFAIL.
unfold Exception.throw in *.
unfold interp_helix in *.
unfold interp_fail in NOFAIL.
setoid_rewrite interp_state_vis in NOFAIL.
unfold pure_state in *; cbn in *.
setoid_rewrite interp_fail_bind in NOFAIL.
cbn* in *. simp; try_abs. exfalso.
setoid_rewrite interp_fail_vis in NOFAIL.
cbn in *.
rewrite Eq.bind_bind, !bind_ret_l in NOFAIL.
rewrite translate_ret in NOFAIL. red in NOFAIL. red in NOFAIL.
apply eutt_Ret in NOFAIL.
apply NOFAIL; auto.
clear NOFAIL.
setoid_rewrite interp_fail_ret. setoid_rewrite bind_ret_l.
eexists.
setoid_rewrite interp_state_ret.
setoid_rewrite interp_fail_ret.
cbn. reflexivity.
Qed.
Lemma genNExpr_correct_pure :
forall (* Helix bits *) (nexp: NExpr) (σ: evalContext) (memH: memoryH),
no_failure (interp_helix (E := E_cfg) (denoteNExpr σ nexp) memH) -> (* Source semantics defined *)
exists t, eutt eq (interp_fail handle_failure (interp_Mem (denoteNExpr σ nexp) memH)) (Ret (Some (memH, t))).
Proof.
intros nexp; induction nexp; intros * NOFAIL.
- (* Variable case *)
(* Reducing the successful compilation *)
simp.
(* The variable maps to an integer in the IRState *)
unfold denoteNExpr in *; cbn* in *; simp; try_abs.
interp_MF_ret. eexists. reflexivity.
- (* Const *)
unfold denoteNExpr in *; cbn in *; simp; try_abs.
interp_MF_ret. eexists. reflexivity.
- (* NDiv *)
cbn* in *; simp; try_abs.
interp_MF_bind.
clean_goal.
edestruct IHnexp1.
assert (NOFAIL' := NOFAIL).
apply no_failure_helix_bind_prefix in NOFAIL'; eauto.
setoid_rewrite H.
setoid_rewrite bind_ret_l.
edestruct IHnexp2.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
rewrite interp_helix_bind in NOFAIL.
eapply no_failure_bind_prefix in NOFAIL; eauto.
interp_MF_bind.
setoid_rewrite H0.
setoid_rewrite bind_ret_l.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H0 in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
destruct (MInt64asNT.NTypeEqDec x0 MInt64asNT.NTypeZero ).
try_abs.
interp_MF_ret. eexists. reflexivity.
- (* NMod *)
cbn* in *; simp; try_abs.
interp_MF_bind.
clean_goal.
edestruct IHnexp1.
assert (NOFAIL' := NOFAIL).
apply no_failure_helix_bind_prefix in NOFAIL'; eauto.
setoid_rewrite H.
setoid_rewrite bind_ret_l.
edestruct IHnexp2.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
rewrite interp_helix_bind in NOFAIL.
eapply no_failure_bind_prefix in NOFAIL; eauto.
interp_MF_bind.
setoid_rewrite H0.
setoid_rewrite bind_ret_l.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H0 in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
destruct (MInt64asNT.NTypeEqDec x0 MInt64asNT.NTypeZero ).
try_abs.
interp_MF_ret. eexists. reflexivity.
- (* NAdd *)
cbn* in *; simp; try_abs.
interp_MF_bind.
clean_goal.
edestruct IHnexp1.
assert (NOFAIL' := NOFAIL).
apply no_failure_helix_bind_prefix in NOFAIL'; eauto.
setoid_rewrite H.
setoid_rewrite bind_ret_l.
edestruct IHnexp2.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
rewrite interp_helix_bind in NOFAIL.
eapply no_failure_bind_prefix in NOFAIL; eauto.
interp_MF_bind.
setoid_rewrite H0.
setoid_rewrite bind_ret_l.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H0 in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
destruct (MInt64asNT.NTypeEqDec x0 MInt64asNT.NTypeZero ).
try_abs.
interp_MF_ret. eexists. reflexivity.
interp_MF_ret. eexists. reflexivity.
- (* NMinus *)
cbn* in *; simp; try_abs.
interp_MF_bind.
clean_goal.
edestruct IHnexp1.
assert (NOFAIL' := NOFAIL).
apply no_failure_helix_bind_prefix in NOFAIL'; eauto.
setoid_rewrite H.
setoid_rewrite bind_ret_l.
edestruct IHnexp2.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
rewrite interp_helix_bind in NOFAIL.
eapply no_failure_bind_prefix in NOFAIL; eauto.
interp_MF_bind.
setoid_rewrite H0.
setoid_rewrite bind_ret_l.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H0 in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
destruct (MInt64asNT.NTypeEqDec x0 MInt64asNT.NTypeZero ).
try_abs.
interp_MF_ret. eexists. reflexivity.
interp_MF_ret. eexists. reflexivity.
- (* NMult *)
cbn* in *; simp; try_abs.
interp_MF_bind.
clean_goal.
edestruct IHnexp1.
assert (NOFAIL' := NOFAIL).
apply no_failure_helix_bind_prefix in NOFAIL'; eauto.
setoid_rewrite H.
setoid_rewrite bind_ret_l.
edestruct IHnexp2.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
rewrite interp_helix_bind in NOFAIL.
eapply no_failure_bind_prefix in NOFAIL; eauto.
interp_MF_bind.
setoid_rewrite H0.
setoid_rewrite bind_ret_l.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H0 in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
destruct (MInt64asNT.NTypeEqDec x0 MInt64asNT.NTypeZero ).
try_abs.
interp_MF_ret. eexists. reflexivity.
interp_MF_ret. eexists. reflexivity.
- (* NMin *)
cbn* in *; simp; try_abs.
interp_MF_bind.
clean_goal.
edestruct IHnexp1.
assert (NOFAIL' := NOFAIL).
apply no_failure_helix_bind_prefix in NOFAIL'; eauto.
setoid_rewrite H.
setoid_rewrite bind_ret_l.
edestruct IHnexp2.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
rewrite interp_helix_bind in NOFAIL.
eapply no_failure_bind_prefix in NOFAIL; eauto.
interp_MF_bind.
setoid_rewrite H0.
setoid_rewrite bind_ret_l.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H0 in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
destruct (MInt64asNT.NTypeEqDec x0 MInt64asNT.NTypeZero ).
try_abs.
interp_MF_ret. eexists. reflexivity.
interp_MF_ret. eexists. reflexivity.
- (* NMax *)
cbn* in *; simp; try_abs.
interp_MF_bind.
clean_goal.
edestruct IHnexp1.
assert (NOFAIL' := NOFAIL).
apply no_failure_helix_bind_prefix in NOFAIL'; eauto.
setoid_rewrite H.
setoid_rewrite bind_ret_l.
edestruct IHnexp2.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
rewrite interp_helix_bind in NOFAIL.
eapply no_failure_bind_prefix in NOFAIL; eauto.
interp_MF_bind.
setoid_rewrite H0.
setoid_rewrite bind_ret_l.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H0 in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
destruct (MInt64asNT.NTypeEqDec x0 MInt64asNT.NTypeZero ).
try_abs.
interp_MF_ret. eexists. reflexivity.
interp_MF_ret. eexists. reflexivity.
Qed.
Lemma genAExpr_correct_helix_pure :
forall (aexp: AExpr) (σ: evalContext) (memH: memoryH),
no_failure (interp_helix (E := E_cfg) (denoteAExpr σ aexp) memH) -> (* Source semantics defined *)
exists t, eutt eq (interp_fail handle_failure (interp_Mem (denoteAExpr σ aexp) memH)) (Ret (Some (memH, t))).
Proof.
intros aexp; induction aexp; intros * NOFAIL.
- (* Variable case *)
(* Reducing the compilation *)
(* The variable maps to an integer in the IRState *)
unfold denoteAExpr in *; cbn* in *.
simp; try_abs.
setoid_rewrite bind_ret_l.
break_inner_match_goal; try_abs.
interp_MF_ret. eexists. reflexivity.
- (* Constant *)
cbn* in *; simp.
interp_MF_ret.
eexists. reflexivity.
- (* ANth m n: lookup to m[n] *)
cbn* in *; simp.
edestruct genNExpr_correct_pure. apply no_failure_helix_bind_prefix in NOFAIL; eauto.
interp_MF_bind. setoid_rewrite H.
setoid_rewrite bind_ret_l.
edestruct genMExpr_correct_pure.
setoid_rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
apply no_failure_helix_bind_prefix in NOFAIL.
eauto.
interp_MF_bind. setoid_rewrite H0.
setoid_rewrite bind_ret_l. destruct x0.
setoid_rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
setoid_rewrite interp_helix_bind in NOFAIL.
unfold interp_helix at 1 in NOFAIL.
setoid_rewrite H0 in NOFAIL.
rewrite translate_ret in NOFAIL. rewrite bind_ret_l in NOFAIL.
simp; try_abs. setoid_rewrite bind_ret_l.
interp_MF_ret. eexists. reflexivity.
- (* AAbs *)
cbn* in *; simp.
edestruct IHaexp; eauto.
interp_MF_bind.
setoid_rewrite H.
setoid_rewrite bind_ret_l.
interp_MF_ret.
eexists. reflexivity.
- (* APlus *)
cbn* in *; simp...
interp_MF_bind.
edestruct IHaexp1; eauto.
setoid_rewrite H.
setoid_rewrite bind_ret_l.
eapply no_failure_helix_bind_continuation in NOFAIL; [| ]. eapply eutt_translate_gen in H.
2 : { unfold interp_helix. rewrite H; eauto; econstructor. rewrite translate_ret. reflexivity. }
edestruct IHaexp2. eauto.
apply no_failure_helix_bind_prefix in NOFAIL. eauto.
interp_MF_bind ; setoid_rewrite H0;
setoid_rewrite bind_ret_l.
eexists; interp_MF_ret; cbn; reflexivity.
- (* AMinus *)
cbn* in *; simp...
interp_MF_bind.
edestruct IHaexp1; eauto.
setoid_rewrite H.
setoid_rewrite bind_ret_l.
eapply no_failure_helix_bind_continuation in NOFAIL; [| ]. eapply eutt_translate_gen in H.
2 : { unfold interp_helix. rewrite H; eauto; econstructor. rewrite translate_ret. reflexivity. }
edestruct IHaexp2. eauto.
apply no_failure_helix_bind_prefix in NOFAIL. eauto.
interp_MF_bind ; setoid_rewrite H0;
setoid_rewrite bind_ret_l.
eexists; interp_MF_ret; cbn; reflexivity .
- (* AMult *)
cbn* in *; simp...
interp_MF_bind.
edestruct IHaexp1; eauto.
setoid_rewrite H.
setoid_rewrite bind_ret_l.
eapply no_failure_helix_bind_continuation in NOFAIL; [| ]. eapply eutt_translate_gen in H.
2 : { unfold interp_helix. rewrite H; eauto; econstructor. rewrite translate_ret. reflexivity. }
edestruct IHaexp2. eauto.
apply no_failure_helix_bind_prefix in NOFAIL. eauto.
interp_MF_bind ; setoid_rewrite H0;
setoid_rewrite bind_ret_l.
eexists; interp_MF_ret; cbn; reflexivity .
- (* AMin *)
cbn* in *; simp...
interp_MF_bind.
edestruct IHaexp1; eauto.
setoid_rewrite H.
setoid_rewrite bind_ret_l.
eapply no_failure_helix_bind_continuation in NOFAIL; [| ]. eapply eutt_translate_gen in H.
2 : { unfold interp_helix. rewrite H; eauto; econstructor. rewrite translate_ret. reflexivity. }
edestruct IHaexp2. eauto.
apply no_failure_helix_bind_prefix in NOFAIL. eauto.
interp_MF_bind ; setoid_rewrite H0;
setoid_rewrite bind_ret_l.
eexists; interp_MF_ret; cbn; reflexivity .
- (* AMax *)
cbn* in *; simp...
interp_MF_bind.
edestruct IHaexp1; eauto.
setoid_rewrite H.
setoid_rewrite bind_ret_l.
eapply no_failure_helix_bind_continuation in NOFAIL; [| ]. eapply eutt_translate_gen in H.
2 : { unfold interp_helix. rewrite H; eauto; econstructor. rewrite translate_ret. reflexivity. }
edestruct IHaexp2. eauto.
apply no_failure_helix_bind_prefix in NOFAIL. eauto.
interp_MF_bind ; setoid_rewrite H0;
setoid_rewrite bind_ret_l.
eexists; interp_MF_ret; cbn; reflexivity .
- (* AZless *)
cbn* in *; simp...
interp_MF_bind.
edestruct IHaexp1; eauto.
setoid_rewrite H.
setoid_rewrite bind_ret_l.
eapply no_failure_helix_bind_continuation in NOFAIL; [| ]. eapply eutt_translate_gen in H.
2 : { unfold interp_helix. rewrite H; eauto; econstructor. rewrite translate_ret. reflexivity. }
edestruct IHaexp2. eauto.
apply no_failure_helix_bind_prefix in NOFAIL. eauto.
interp_MF_bind ; setoid_rewrite H0;
setoid_rewrite bind_ret_l.
eexists; interp_MF_ret; cbn; reflexivity .
Unshelve.
all : try eauto.
all : intros * [].
Qed.
End pure.
|
{"author": "vzaliva", "repo": "helix", "sha": "5d0a71df99722d2011c36156f12b04875df7e1cb", "save_path": "github-repos/coq/vzaliva-helix", "path": "github-repos/coq/vzaliva-helix/helix-5d0a71df99722d2011c36156f12b04875df7e1cb/coq/LLVMGen/Pure.v"}
|
[STATEMENT]
lemma mult_ceiling_le_Ints:
assumes "0 \<le> a" "a \<in> Ints"
shows "(of_int \<lceil>a * b\<rceil> :: 'a :: linordered_idom) \<le> of_int(\<lceil>a\<rceil> * \<lceil>b\<rceil>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. of_int \<lceil>a * b\<rceil> \<le> of_int (\<lceil>a\<rceil> * \<lceil>b\<rceil>)
[PROOF STEP]
by (metis Ints_cases assms ceiling_le_iff ceiling_of_int le_of_int_ceiling mult_left_mono of_int_le_iff of_int_mult)
|
{"llama_tokens": 210, "file": null, "length": 1}
|
#!/usr/bin/env python
import os
import numpy as np
import time
import copy
import sys
import argparse
ang_2_bohr = 1.0/0.52917721067
hart_2_ev = 27.21138602
import cp2k_spm_tools.cp2k_grid_orbitals as cgo
from cp2k_spm_tools import common, cube
from mpi4py import MPI
comm = MPI.COMM_WORLD
mpi_rank = comm.Get_rank()
mpi_size = comm.Get_size()
parser = argparse.ArgumentParser(
description='Runs bond order analysis based on Bader basins.')
parser.add_argument(
'--cp2k_input_file',
metavar='FILENAME',
required=True,
help='CP2K input of the SCF calculation.')
parser.add_argument(
'--basis_set_file',
metavar='FILENAME',
required=True,
help='File containing the used basis sets.')
parser.add_argument(
'--xyz_file',
metavar='FILENAME',
required=True,
help='.xyz file containing the geometry.')
parser.add_argument(
'--wfn_file',
metavar='FILENAME',
required=True,
help='cp2k restart file containing the wavefunction.')
### -----------------------------------------------------------
parser.add_argument(
'--output_file',
metavar='FILENAME',
required=True,
help='Output file containing the bond orders.')
parser.add_argument(
'--bader_basins_dir',
metavar='DIR',
required=True,
help='directory containing the Bader basin .cube files.')
### -----------------------------------------------------------
parser.add_argument(
'--dx',
type=float,
metavar='DX',
default=0.2,
help='Spatial step for the grid (angstroms).')
parser.add_argument(
'--eval_cutoff',
type=float,
metavar='D',
default=14.0,
help=("Size of the region around the atom where each"
" orbital is evaluated (only used for 'G' region).")
)
parser.add_argument(
'--eval_region',
type=str,
nargs=6,
metavar='X',
required=False,
default = ['G', 'G', 'G', 'G', 'G', 'G'],
help=common.eval_region_description
)
### -----------------------------------------------------------
time0 = time.time()
### ------------------------------------------------------
### Parse args for only one rank to suppress duplicate stdio
### ------------------------------------------------------
args = None
args_success = False
try:
if mpi_rank == 0:
args = parser.parse_args()
args_success = True
finally:
args_success = comm.bcast(args_success, root=0)
if not args_success:
print(mpi_rank, "exiting")
exit(0)
args = comm.bcast(args, root=0)
### ------------------------------------------------------
### Load the Bader basins
### ------------------------------------------------------
bader_atoms = []
bader_masks = []
for f in sorted(os.listdir(args.bader_basins_dir)):
if f.startswith("BvAt"):
num = int(f.split(".")[0][4:]) - 1
bader_atoms.append(num)
c = cube.Cube()
c.read_cube_file(args.bader_basins_dir+"/"+f)
if np.abs(c.dv[0, 0] - args.dx) > 1e-3:
print("ERROR: Basin cube dx doesn't match specified dx!")
print(c.dv[0, 0], args.dx)
exit(0)
bader_masks.append(c.data > 1e-10)
print("R%d/%d: loaded Bader basins, time: %.2fs"%(mpi_rank, mpi_size, (time.time() - time0)))
sys.stdout.flush()
time1 = time.time()
### ------------------------------------------------------
### Evaluate orbitals on the real-space grid
### ------------------------------------------------------
mol_grid_orb = cgo.Cp2kGridOrbitals(mpi_rank, mpi_size, comm, single_precision=False)
mol_grid_orb.read_cp2k_input(args.cp2k_input_file)
mol_grid_orb.read_xyz(args.xyz_file)
mol_grid_orb.center_atoms_to_cell()
mol_grid_orb.read_basis_functions(args.basis_set_file)
mol_grid_orb.load_restart_wfn_file(args.wfn_file, n_occ=None, n_virt=0)
print("R%d/%d: loaded eval files, time: %.2fs"%(mpi_rank, mpi_size, (time.time() - time1)))
sys.stdout.flush()
time1 = time.time()
eval_reg = common.parse_eval_region_input(args.eval_region, mol_grid_orb.ase_atoms, mol_grid_orb.cell)
mol_grid_orb.calc_morbs_in_region(args.dx,
x_eval_region = eval_reg[0],
y_eval_region = eval_reg[1],
z_eval_region = eval_reg[2],
reserve_extrap = 0.0,
eval_cutoff = args.eval_cutoff)
print("R%d/%d: evaluated grids, time: %.2fs"%(mpi_rank, mpi_size, (time.time() - time1)))
sys.stdout.flush()
time1 = time.time()
### ------------------------------------------------------
### Calculate Bond orders
### ------------------------------------------------------
bond_order_matrix = np.zeros((len(bader_atoms), len(bader_atoms)))
n_orb_per_rank = []
for i_spin in range(mol_grid_orb.nspin):
n_orb_per_rank.append(comm.allgather(len(mol_grid_orb.morb_energies[i_spin])))
cell_n = mol_grid_orb.eval_cell_n
vol_elem = np.prod(mol_grid_orb.dv)
if any(cell_n != bader_masks[0].shape):
print("Error: Basin and evaluation size mismatch.")
exit(0)
for i_rank in range(mpi_size):
if mpi_rank == i_rank:
print("R%d/%d: distributing grids and evaluating products..."%(mpi_rank, mpi_size))
sys.stdout.flush()
time1 = time.time()
for i_spin in range(mol_grid_orb.nspin):
bcast_buffer = np.empty(np.prod(cell_n)*n_orb_per_rank[i_spin][i_rank])
if mpi_rank == i_rank:
bcast_buffer = mol_grid_orb.morb_grids[i_spin].flatten()
# Broadcast the current rank grids to all
comm.Bcast([bcast_buffer, MPI.DOUBLE], root=i_rank)
received_grids = np.reshape(bcast_buffer,
(n_orb_per_rank[i_spin][i_rank], cell_n[0], cell_n[1], cell_n[2])
)
# for i_mo in range(received_grids.shape[0]):
#
# i_grid = received_grids[i_mo]
#
# for j_mo in range(mol_grid_orb.morb_grids[i_spin].shape[0]):
#
# j_grid = mol_grid_orb.morb_grids[i_spin][j_mo]
#
# for at_a in range(len(bader_atoms)):
# for at_b in range(at_a):
#
# i_grid_a = i_grid*bader_masks[at_a]
# i_grid_b = i_grid*bader_masks[at_b]
#
# scalar_a = np.dot(i_grid_a.flatten(), j_grid.flatten())*vol_elem
# scalar_b = np.dot(i_grid_b.flatten(), j_grid.flatten())*vol_elem
#
# bond_order_matrix[at_a, at_b] += 4*scalar_a*scalar_b
# bond_order_matrix[at_b, at_a] += 4*scalar_a*scalar_b
n_i = received_grids.shape[0]
n_j = mol_grid_orb.morb_grids[i_spin].shape[0]
for at_a in range(len(bader_atoms)):
for at_b in range(at_a):
i_grid_a = received_grids[:, bader_masks[at_a]].reshape(n_i, -1)
i_grid_b = received_grids[:, bader_masks[at_b]].reshape(n_i, -1)
j_grid_a = mol_grid_orb.morb_grids[i_spin][:, bader_masks[at_a]].reshape(n_j, -1)
j_grid_b = mol_grid_orb.morb_grids[i_spin][:, bader_masks[at_b]].reshape(n_j, -1)
bo = np.sum(np.einsum("ij,kj", i_grid_a, j_grid_a) * np.einsum("ij,kj", i_grid_b, j_grid_b))
bond_order_matrix[at_a, at_b] += 4 * bo * vol_elem**2
bond_order_matrix[at_b, at_a] += 4 * bo * vol_elem**2
if mpi_rank == i_rank:
print("R%d/%d: ... time: %.2fs"%((mpi_rank, mpi_size, time.time()-time1)))
sys.stdout.flush()
# collect all contributions
final_bond_order_mat = np.zeros((len(bader_atoms), len(bader_atoms)))
comm.Reduce(bond_order_matrix, final_bond_order_mat, op=MPI.SUM)
if mpi_rank == 0:
header = ""
for b_at in bader_atoms:
header += "%10d" % b_at
header = header[3:]
np.savetxt(args.output_file, final_bond_order_mat, fmt="%9.6f", header=header)
print("R%d/%d finished, total time: %.2fs"%(mpi_rank, mpi_size, (time.time() - time0)))
|
{"hexsha": "67dd99e24a32b56051bbb6cc4a2b6b276e28e468", "size": 7954, "ext": "py", "lang": "Python", "max_stars_repo_path": "bader_bond_order.py", "max_stars_repo_name": "eimrek/cp2k-spm-tools", "max_stars_repo_head_hexsha": "94b158e7e93bc4cb76e88d59d31347fafdda5e64", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-10-11T15:24:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-03T16:05:24.000Z", "max_issues_repo_path": "bader_bond_order.py", "max_issues_repo_name": "eimrek/cp2k-spm-tools", "max_issues_repo_head_hexsha": "94b158e7e93bc4cb76e88d59d31347fafdda5e64", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bader_bond_order.py", "max_forks_repo_name": "eimrek/cp2k-spm-tools", "max_forks_repo_head_hexsha": "94b158e7e93bc4cb76e88d59d31347fafdda5e64", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-09-27T06:09:18.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-23T15:21:11.000Z", "avg_line_length": 31.9437751004, "max_line_length": 108, "alphanum_fraction": 0.5817198894, "include": true, "reason": "import numpy", "num_tokens": 2000}
|
% test_all_fbp
% todo:
% cuboid_im test
% cuboid_proj test
list = {
'cbct_back test'
'ct_geom test'
'image_geom test'
'sino_geom test'
'cylinder_proj test'
'df_example1'
'ellipse_im test'
'ellipse_sino test'
'ellipsoid_proj test'
'ellipsoid_im test'
'fbp_fan_arc_example'
'fbp_fan_arc_point'
'fbp_fan_flat_example'
'fbp_ramp test'
'fbp2_sino_filter test'
'fbp2_example'
'feldkamp_example'
'jaszczak1 test'
'ir_radon_zwart_powell test'
'rebin_helix test' % helix_example
'rect_im test'
'rect_sino test'
%'sphere_proj test'
};
im nan-fail
run_mfile_local(list)
|
{"author": "JeffFessler", "repo": "mirt", "sha": "b7f36cc46916821e8bc8502301b1554ebc7efe1d", "save_path": "github-repos/MATLAB/JeffFessler-mirt", "path": "github-repos/MATLAB/JeffFessler-mirt/mirt-b7f36cc46916821e8bc8502301b1554ebc7efe1d/fbp/test_all_fbp.m"}
|
# -*- coding: utf-8 -*-
from FGJumperMaster import FGJumperMaster
from ADBHelper import ADBHelper
from FGVisonUtil import FGVisionUtil as vutil
import cv2
import numpy as np
import time
import datetime
# 初次读入图片
img = ADBHelper.getScreenShotByADB()
vutil.printImgInfo(img)
adb = ADBHelper(1080, 1920)
cv2.namedWindow('image', flags= cv2.WINDOW_NORMAL | cv2.WINDOW_FREERATIO)
keyPressed = -1
def distance2time(distance):
ratio = 1.53
# 事件必须是整数类型
return int(distance * ratio)
def saveSampleImg(jmaster, img, tag=True):
img_name = f"{datetime.datetime.now():%Y-%m-%d-%H-%M-%S.png}"
if tag:
cv2.imwrite("./samples/right/"+img_name, img)
cv2.imwrite("./samples/right_log/"+img_name, jmaster.visualization_detail())
else:
cv2.imwrite("./samples/wrong/"+img_name, img)
cv2.imwrite("./samples/wrong_log/"+img_name, jmaster.visualization_detail())
markflag = 0
chessPtr = (0, 0)
boxPtr = (0, 0)
isMarked = False
'''
手动标注
'''
def markChessAndBoxByHand(event,x,y,flags,param):
global markflag
global chessPtr
global boxPtr
global isMarked
global subImg
if event == cv2.EVENT_LBUTTONDOWN:
if markflag == 0 and isMarked == False:
# 开始标注chess
# 更新chess的坐标
chessPtr = (x, y)
print("已标注 chess坐标 {}, {}".format(chessPtr[0], chessPtr[1]))
cv2.circle(subImg,(x,y), int(5), (0, 0, 255), -1)
markflag = 1
elif markflag == 1 and isMarked == False:
boxPtr = (x, y)
print("已标注 box中心坐标 {}, {}".format(boxPtr[0], boxPtr[1]))
cv2.circle(subImg,(x,y), int(5), (255, 0, 0), -1)
markflag = 2
elif event == cv2.EVENT_LBUTTONUP and markflag == 2:
# 检测到鼠标左键抬起
isMarked = True
markflag = 0
# 更新图片
cv2.imshow("image", subImg)
# 设置鼠标事件回调
cv2.setMouseCallback('image',markChessAndBoxByHand)
subImg = None
while True:
img = ADBHelper.getScreenShotByADB()
subImg = img[300:1720, :]
try:
jmaster = FGJumperMaster(subImg)
# 预览算法效果与过程
subImg = jmaster.visualization()
cv2.imshow("image",jmaster.visualization())
except IndexError:
cv2.imshow("image", subImg)
keyPressed = cv2.waitKey(0)
if keyPressed == ord("e"):
print("游戏结束")
break
elif keyPressed == ord("y"):
if isMarked:
saveSampleImg(jmaster, img, tag=False)
isMarked = False
markflag = 0
distance = vutil.cal_distance(chessPtr, boxPtr)
print("手动distance %.2f"%distance)
delay = distance2time(distance)
rc = ADBHelper.pressOnScreen((500, 500), delay=delay)
if rc:
print("成功点击 并延时 1s")
time.sleep(0.5 + delay / 1000)
continue
# 识别正确,确认点击
delay = distance2time(jmaster.distance)
rc = ADBHelper.pressOnScreen((500, 500), delay=delay)
saveSampleImg(jmaster, img)
if rc:
print("成功点击 并延时 1s")
time.sleep(0.5 + delay / 1000)
elif keyPressed == ord("n"):
# 保存失败样例及日志
saveSampleImg(jmaster, img, tag=False)
isMarked = False
markflag = 0
cv2.destroyAllWindows()
|
{"hexsha": "f5024910f093fcb1131aa5215c2e079461a6080c", "size": 3315, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "isoundy000/FGJumperMaster", "max_stars_repo_head_hexsha": "10063f167fbba7d9e16375965f7320a3966169f6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "isoundy000/FGJumperMaster", "max_issues_repo_head_hexsha": "10063f167fbba7d9e16375965f7320a3966169f6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "isoundy000/FGJumperMaster", "max_forks_repo_head_hexsha": "10063f167fbba7d9e16375965f7320a3966169f6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-06-23T12:13:01.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-23T12:13:01.000Z", "avg_line_length": 25.6976744186, "max_line_length": 84, "alphanum_fraction": 0.5891402715, "include": true, "reason": "import numpy", "num_tokens": 1008}
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestLars(hu.HypothesisTestCase):
@given(offset=st.floats(min_value=0, max_value=100), **hu.gcs_cpu_only)
def test_lars(self, offset, dc, gc):
X = np.random.rand(6, 7, 8, 9).astype(np.float32)
dX = np.random.rand(6, 7, 8, 9).astype(np.float32)
def ref_lars(X, dX):
return [1. / (np.linalg.norm(dX) / np.linalg.norm(X) + offset)]
op = core.CreateOperator(
"Lars",
["X", "dX"],
["rescale_factor"],
offset=offset
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, dX],
reference=ref_lars
)
|
{"hexsha": "7fc13aa05801cbe30a34a72fa46f11358f55c60f", "size": 1667, "ext": "py", "lang": "Python", "max_stars_repo_path": "caffe2/python/operator_test/lars_test.py", "max_stars_repo_name": "nrsatish/caffe2", "max_stars_repo_head_hexsha": "a8e7515f33c196e7999277bca2b13aefea8e2573", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "caffe2/python/operator_test/lars_test.py", "max_issues_repo_name": "nrsatish/caffe2", "max_issues_repo_head_hexsha": "a8e7515f33c196e7999277bca2b13aefea8e2573", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "caffe2/python/operator_test/lars_test.py", "max_forks_repo_name": "nrsatish/caffe2", "max_forks_repo_head_hexsha": "a8e7515f33c196e7999277bca2b13aefea8e2573", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.34, "max_line_length": 78, "alphanum_fraction": 0.6448710258, "include": true, "reason": "import numpy", "num_tokens": 398}
|
#!/usr/bin/env python3
from sympy import *
from mpmath import *
from matplotlib.pyplot import *
#init_printing() # make things prettier when we print stuff for debugging.
# ************************************************************************** #
# Self-Inductance L of copper coil with massive aluminium cylinder inserted #
# ************************************************************************** #
# All values are in standard SI units unless otherwise noted.
# ---------------------------------------------------------#
# Init, Define Variables and Constants #
# ---------------------------------------------------------#
mu0 = 4*pi*1e-7 # vacuum permeability
#sigma = 37.7e6 # conductivity of aluminium (de.wikipedia.org)
sigma = 23.75e6 # de.wikipedia.org/wiki/Kupfer: 58.1e6
r = 0 # radial position of measurement probe. Centered on axis
dsp = 98e-3 # diameter of coil
rsp = dsp / 2 # radius of coil
r0 = 45e-3 # radius of alu cylinder
B0 = 6.9e-2 # adjust this as needed for scaling
N0 = 574 # number of turns of copper coil
l = 500e-3 # length of copper coil
npts = 1e3
fmin =1
fmax =250
# -----------------------------------------------------#
# Create a list for convenient printing of vars to #
# file, add LaTeX where necessary. #
# -----------------------------------------------------#
params = [
' ' + '$\mu_0' + '$ & $' + '\SI{' + str(mu0) + r'}{\newton\per\ampere\squared}' + r'$\\' + "\n",
' ' + '$\sigma' + '$ & $' + '\SI{' + str(sigma) + r'}{\ampere\per\volt\per\meter}' + r'$\\' + "\n",
' ' + '$d_{Sp}' + '$ & $' + '\SI{' + str(dsp) + r'}{\meter}' + r'$\\' + "\n",
' ' + '$r_{Sp}' + '$ & $' + '\SI{' + str(rsp) + r'}{\meter}' + r'$\\' + "\n",
' ' + '$r' + '$ & $' + '\SI{' + str(r) + r'}{\meter}' + r'$\\' + "\n",
' ' + '$r_{0}' + '$ & $' + '\SI{' + str(r0) + r'}{\meter}' + r'$\\' + "\n",
' ' + '$B_0' + '$ & $' + '\SI{' + str(B0) + r'}{\tesla}' + r'$\\' + "\n",
' ' + '$l' + '$ & $' + '\SI{' + str(l) + r'}{\meter}' + r'$\\' + "\n",
' ' + '$NPTS' + '$ & $' + r'\num{' + str(npts) + '}' + r'$\\' + "\n",
' ' + '$N_0' + '$ & $' + r'\num{' + str(N0) + '}' + r'$\\' + "\n",
' ' + '$f_{min}' + '$ & $' + '\SI{' + str(fmin) + r'}{\hertz}' + r'$\\' + "\n",
' ' + '$f_{max}' + '$ & $' + '\SI{' + str(fmax) + r'}{\hertz}' + r'$\\' + "\n",
]
font = {
'family' : 'serif',
'color' : 'black',
'weight' : 'normal',
'size' : 9,
}
titlefont = {
'family' : 'serif',
'color' : 'black',
'weight' : 'normal',
'size' : 10,
}
plot_color_fit = 'blue'
plot_color_measurements = 'black'
plot_linewidth = 1
plot_scale_x = 'log'
plot_label_x = 'Frequenz (Hz)'
plot_label_y = 'Selbstinduktion L (mH)'
plot_title = "Selbstinduktionskoeffizient, Spule mit Vollzylinder"
# ---------------------------------------------------------#
# Functions #
# #
# See formula 22 on p.12 of script for experiment. #
# #
# NOTE: We use frequency f instead of angular frequency #
# omega since that is what we actually set on the function #
# generator. #
# ---------------------------------------------------------#
k = lambda f: sqrt((2*np.pi*f*mu0*sigma)/2)*(mpc(1,-1))
LRand = (mu0 * 2 * pi * r0 * (rsp - r0) * N0**2) / l
L = lambda f:(
(mu0*2*pi*r0*N0**2) / l
* re(besselj(1,k(f)*r0) / (k(f)
* besselj(0,k(f)*r0)))
+ LRand
)
# ---------------------------------------------------------#
# Generate points for frequency axis #
# ---------------------------------------------------------#
n = np.linspace(1,npts,npts)
expufunc = np.frompyfunc(exp,1,1)
frequency_vector = fmin*expufunc(n*log(fmax-fmin)/npts)
# ---------------------------------------------------------#
# Numerically evaluate function #
# ---------------------------------------------------------#
L_ufunc = np.frompyfunc(L,1,1)
L_num = L_ufunc(frequency_vector)
L_num = 1e3 * L_num # improve legibility
# ---------------------------------------------------------#
# Plot the Things #
# ---------------------------------------------------------#
matplotlib.pyplot.rc('text', usetex=True)
matplotlib.pyplot.rc('font', family='serif')
figwidth = 8.27 # in inches
fig = figure(1,figsize=(figwidth,figwidth*0.36))
axes = fig.add_subplot(111)
axes.plot(frequency_vector,L_num,linewidth=plot_linewidth,color=plot_color_fit)
axes.set_xscale(plot_scale_x)
axes.set_xlim([fmin*0.9,fmax*1.1])
axes.set_xlabel(plot_label_x,fontdict=font)
axes.set_ylabel(plot_label_y,fontdict=font)
axes.set_title(plot_title,fontdict=titlefont)
axes.tick_params(labelsize=9)
fig.subplots_adjust(bottom=0.15,left=0.125,right=0.925,top=0.90)
fig.savefig('plots-pgf/massive--alu--L.pgf')
fig.savefig('plots-pdf/massive--alu--L.pdf')
# ---------------------------------------------------------#
# Save listing to file #
# ---------------------------------------------------------#
dumpfile = open('listings/massive--alu--L.tex', 'w')
table_opening = r"""
{%
\begin{center}
\captionof{table}{%
Parameterwerte f\"ur Fit-Funktion in Abbildung \ref{fig:alu:freq:L}
}
\label{tab:fitparams:alu:L}
\sisetup{%
%math-rm=\mathtt,
scientific-notation=engineering,
table-format = +3.2e+2,
round-precision = 3,
round-mode = figures,
}
\begin{tabular}{lr}
\toprule
"""
table_closing = r"""
\bottomrule
\end{tabular}
\end{center}
}
"""
dumpfile.writelines(table_opening)
for line in params:
dumpfile.writelines(line)
dumpfile.writelines(table_closing)
dumpfile.close()
|
{"hexsha": "140a38a718eb14897a2b99914562ea869ceb6170", "size": 6912, "ext": "py", "lang": "Python", "max_stars_repo_path": "versuche/skineffect/python/vollzylinder_L.py", "max_stars_repo_name": "alpenwasser/laborjournal", "max_stars_repo_head_hexsha": "1676414fda402c360e713d29ddc79edd0873adb0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "versuche/skineffect/python/vollzylinder_L.py", "max_issues_repo_name": "alpenwasser/laborjournal", "max_issues_repo_head_hexsha": "1676414fda402c360e713d29ddc79edd0873adb0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 90, "max_issues_repo_issues_event_min_datetime": "2015-10-18T19:23:24.000Z", "max_issues_repo_issues_event_max_datetime": "2015-11-11T16:06:10.000Z", "max_forks_repo_path": "versuche/skineffect/python/vollzylinder_L.py", "max_forks_repo_name": "alpenwasser/laborjournal", "max_forks_repo_head_hexsha": "1676414fda402c360e713d29ddc79edd0873adb0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.3892215569, "max_line_length": 120, "alphanum_fraction": 0.3844039352, "include": true, "reason": "from sympy,from mpmath", "num_tokens": 1770}
|
# An implementation from "TF_PetroWU"
import scipy.io as scio
import skimage
import numpy as np
import math
from PIL import Image
import os
from skimage import transform, io as skio
mean_rgb = [122.675, 116.669, 104.008]
scales = [0.6, 0.8, 1.2, 1.5]
rorations = [-45, -22, 22, 45]
gammas = [.05, 0.8, 1.2, 1.5]
def gen_data(name):
reftracker = scio.loadmat('data/images_tracker.00047.mat')['tracker']
desttracker = scio.loadmat('data/images_tracker/'+name+'.mat')['tracker']
refpos = np.floor(np.mean(reftracker, 0))
xxc, yyc = np.meshgrid(np.arange(1, 1801, dtype=np.int), np.arange(1, 2001, dtype=np.int))
#normalize x and y channels
xxc = (xxc - 600 - refpos[0]) * 1.0 / 600
yyc = (yyc - 600 - refpos[1]) * 1.0 / 600
maskimg = Image.open('data/meanmask.png')
maskc = np.array(maskimg, dtype=np.float)
maskc = np.pad(maskc, (600, 600), 'minimum')
# warp is an inverse transform, and so src and dst must be reversed here
tform = transform.estimate_transform('affine', desttracker + 600, reftracker + 600)
img_data = skio.imread('data/images_data/'+name+'.jpg')
# save org mat
warpedxx = transform.warp(xxc, tform, output_shape=xxc.shape)
warpedyy = transform.warp(yyc, tform, output_shape=xxc.shape)
warpedmask = transform.warp(maskc, tform, output_shape=xxc.shape)
warpedxx = warpedxx[600:1400, 600:1200, :]
warpedyy = warpedyy[600:1400, 600:1200, :]
warpedmask = warpedmask[600:1400, 600:1200, :]
img_h, img_w, _ = img_data.shape
mat = np.zeros((img_h, img_w, 6), dtype=np.float)
mat[:, :, 0] = (img_data[2] * 1.0 - 104.008) / 255
mat[:, :, 1] = (img_data[1] * 1.0 - 116.669) / 255
mat[:, :, 2] = (img_data[0] * 1.0 - 122.675) / 255
scio.savemat('portraitFCN_data/' + name + '.mat', {'img':mat})
mat_plus = np.zeros((img_h, img_w, 6), dtype=np.float)
mat_plus[:, :, 0:3] = mat
mat_plus[:, :, 3] = warpedxx
mat_plus[:, :, 4] = warpedyy
mat_plus[:, :, 5] = warpedmask
def gamma_trans(mat, gamma):
gamma_mean = np.pow(mean_rgb / 255, gamma)
tmp_mat = np.pow(mat / 255, gamma)
gamma_mat = np.zeros(mat.shape, dtype=np.float)
gamma_mat[:, :, 0] = tmp_mat[:, :, 2] - gamma_mean[:, :, 2]
gamma_mat[:, :, 1] = tmp_mat[:, :, 1] - gamma_mean[:, :, 1]
gamma_mat[:, :, 2] = tmp_mat[:, :, 0] - gamma_mean[:, :, 0]
return gamma_mat
def crop_all():
files = os.listdir('data/images_data_crop')
if not os.path.exists('data/portraitFCN_data'):
os.mkdir('data/portraitFCN_data')
if not os.path.exists('data/portraitFCN+_data'):
os.mkdir('data/portraitFCN+_data')
|
{"hexsha": "65239ed706bc958de3244a09435cde24d3730fe6", "size": 2638, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocess.py", "max_stars_repo_name": "BigBugX/TensorFlow_MobileNetV2_PortraitMatting", "max_stars_repo_head_hexsha": "2f299900fd50bb32806cd05a725f42e6cc0cd91d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-01-04T07:44:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-07T02:20:06.000Z", "max_issues_repo_path": "preprocess.py", "max_issues_repo_name": "BigBugX/TensorFlow_MobileNetV2_PortraitMatting", "max_issues_repo_head_hexsha": "2f299900fd50bb32806cd05a725f42e6cc0cd91d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-12-08T16:02:55.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-15T05:57:18.000Z", "max_forks_repo_path": "preprocess.py", "max_forks_repo_name": "BigBugX/TensorFlow_MobileNetV2_PortraitMatting", "max_forks_repo_head_hexsha": "2f299900fd50bb32806cd05a725f42e6cc0cd91d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-09-26T16:18:21.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-26T06:48:44.000Z", "avg_line_length": 40.5846153846, "max_line_length": 94, "alphanum_fraction": 0.6304018196, "include": true, "reason": "import numpy,import scipy", "num_tokens": 902}
|
C This File is Automatically generated by ALOHA
C The process calculated in this file is:
C P(1,2)*P(2,1) - P(-1,1)*P(-1,2)*Metric(1,2)
C
SUBROUTINE MP_VVS4L2P0_1(P2, S3, COUP, M1, W1, P1, COEFF)
IMPLICIT NONE
COMPLEX*32 CI
PARAMETER (CI=(0Q0,1Q0))
COMPLEX*32 TMP2
COMPLEX*32 S3(*)
REAL*16 M1
INCLUDE 'coef_specs.inc'
COMPLEX*32 COEFF(MAXLWFSIZE,0:VERTEXMAXCOEFS-1,MAXLWFSIZE)
COMPLEX*32 P2(0:3)
REAL*16 W1
COMPLEX*32 P1(0:3)
COMPLEX*32 COUP
P1(0) = +P2(0)+S3(1)
P1(1) = +P2(1)+S3(2)
P1(2) = +P2(2)+S3(3)
P1(3) = +P2(3)+S3(4)
TMP2 = (P1(0)*P2(0)-P1(1)*P2(1)-P1(2)*P2(2)-P1(3)*P2(3))
COEFF(1,0,1)= COUP*S3(5)*(-CI*(TMP2)+CI*(P1(0)*P2(0)))
COEFF(2,0,1)= COUP*CI * P1(0)*P2(1)*S3(5)
COEFF(3,0,1)= COUP*CI * P1(0)*P2(2)*S3(5)
COEFF(4,0,1)= COUP*CI * P1(0)*P2(3)*S3(5)
COEFF(1,1,1)= 0Q0
COEFF(2,1,1)= COUP*CI * P2(1)*S3(5)
COEFF(3,1,1)= COUP*CI * P2(2)*S3(5)
COEFF(4,1,1)= COUP*CI * P2(3)*S3(5)
COEFF(1,2,1)= COUP*S3(5)*(+CI*(P2(1)+P1(1)))
COEFF(2,2,1)= COUP*CI * P1(0)*S3(5)
COEFF(3,2,1)= 0Q0
COEFF(4,2,1)= 0Q0
COEFF(1,3,1)= COUP*S3(5)*(+CI*(P2(2)+P1(2)))
COEFF(2,3,1)= 0Q0
COEFF(3,3,1)= COUP*CI * P1(0)*S3(5)
COEFF(4,3,1)= 0Q0
COEFF(1,4,1)= COUP*S3(5)*(+CI*(P2(3)+P1(3)))
COEFF(2,4,1)= 0Q0
COEFF(3,4,1)= 0Q0
COEFF(4,4,1)= COUP*CI * P1(0)*S3(5)
COEFF(1,5,1)= 0Q0
COEFF(2,5,1)= 0Q0
COEFF(3,5,1)= 0Q0
COEFF(4,5,1)= 0Q0
COEFF(1,6,1)= 0Q0
COEFF(2,6,1)= COUP*CI * S3(5)
COEFF(3,6,1)= 0Q0
COEFF(4,6,1)= 0Q0
COEFF(1,7,1)= COUP*CI * S3(5)
COEFF(2,7,1)= 0Q0
COEFF(3,7,1)= 0Q0
COEFF(4,7,1)= 0Q0
COEFF(1,8,1)= 0Q0
COEFF(2,8,1)= 0Q0
COEFF(3,8,1)= COUP*CI * S3(5)
COEFF(4,8,1)= 0Q0
COEFF(1,9,1)= 0Q0
COEFF(2,9,1)= 0Q0
COEFF(3,9,1)= 0Q0
COEFF(4,9,1)= 0Q0
COEFF(1,10,1)= COUP*CI * S3(5)
COEFF(2,10,1)= 0Q0
COEFF(3,10,1)= 0Q0
COEFF(4,10,1)= 0Q0
COEFF(1,11,1)= 0Q0
COEFF(2,11,1)= 0Q0
COEFF(3,11,1)= 0Q0
COEFF(4,11,1)= COUP*CI * S3(5)
COEFF(1,12,1)= 0Q0
COEFF(2,12,1)= 0Q0
COEFF(3,12,1)= 0Q0
COEFF(4,12,1)= 0Q0
COEFF(1,13,1)= 0Q0
COEFF(2,13,1)= 0Q0
COEFF(3,13,1)= 0Q0
COEFF(4,13,1)= 0Q0
COEFF(1,14,1)= COUP*CI * S3(5)
COEFF(2,14,1)= 0Q0
COEFF(3,14,1)= 0Q0
COEFF(4,14,1)= 0Q0
COEFF(1,0,2)= COUP*-CI * P1(1)*P2(0)*S3(5)
COEFF(2,0,2)= COUP*-S3(5)*(+CI*(P1(1)*P2(1)+TMP2))
COEFF(3,0,2)= COUP*-CI * P1(1)*P2(2)*S3(5)
COEFF(4,0,2)= COUP*-CI * P1(1)*P2(3)*S3(5)
COEFF(1,1,2)= COUP*-CI * P1(1)*S3(5)
COEFF(2,1,2)= COUP*-S3(5)*(+CI*(P2(0)+P1(0)))
COEFF(3,1,2)= 0Q0
COEFF(4,1,2)= 0Q0
COEFF(1,2,2)= COUP*-CI * P2(0)*S3(5)
COEFF(2,2,2)= 0Q0
COEFF(3,2,2)= COUP*-CI * P2(2)*S3(5)
COEFF(4,2,2)= COUP*-CI * P2(3)*S3(5)
COEFF(1,3,2)= 0Q0
COEFF(2,3,2)= COUP*S3(5)*(+CI*(P2(2)+P1(2)))
COEFF(3,3,2)= COUP*-CI * P1(1)*S3(5)
COEFF(4,3,2)= 0Q0
COEFF(1,4,2)= 0Q0
COEFF(2,4,2)= COUP*S3(5)*(+CI*(P2(3)+P1(3)))
COEFF(3,4,2)= 0Q0
COEFF(4,4,2)= COUP*-CI * P1(1)*S3(5)
COEFF(1,5,2)= 0Q0
COEFF(2,5,2)= COUP*-CI * S3(5)
COEFF(3,5,2)= 0Q0
COEFF(4,5,2)= 0Q0
COEFF(1,6,2)= COUP*-CI * S3(5)
COEFF(2,6,2)= 0Q0
COEFF(3,6,2)= 0Q0
COEFF(4,6,2)= 0Q0
COEFF(1,7,2)= 0Q0
COEFF(2,7,2)= 0Q0
COEFF(3,7,2)= 0Q0
COEFF(4,7,2)= 0Q0
COEFF(1,8,2)= 0Q0
COEFF(2,8,2)= 0Q0
COEFF(3,8,2)= 0Q0
COEFF(4,8,2)= 0Q0
COEFF(1,9,2)= 0Q0
COEFF(2,9,2)= 0Q0
COEFF(3,9,2)= COUP*-CI * S3(5)
COEFF(4,9,2)= 0Q0
COEFF(1,10,2)= 0Q0
COEFF(2,10,2)= COUP*CI * S3(5)
COEFF(3,10,2)= 0Q0
COEFF(4,10,2)= 0Q0
COEFF(1,11,2)= 0Q0
COEFF(2,11,2)= 0Q0
COEFF(3,11,2)= 0Q0
COEFF(4,11,2)= 0Q0
COEFF(1,12,2)= 0Q0
COEFF(2,12,2)= 0Q0
COEFF(3,12,2)= 0Q0
COEFF(4,12,2)= COUP*-CI * S3(5)
COEFF(1,13,2)= 0Q0
COEFF(2,13,2)= 0Q0
COEFF(3,13,2)= 0Q0
COEFF(4,13,2)= 0Q0
COEFF(1,14,2)= 0Q0
COEFF(2,14,2)= COUP*CI * S3(5)
COEFF(3,14,2)= 0Q0
COEFF(4,14,2)= 0Q0
COEFF(1,0,3)= COUP*-CI * P1(2)*P2(0)*S3(5)
COEFF(2,0,3)= COUP*-CI * P1(2)*P2(1)*S3(5)
COEFF(3,0,3)= COUP*-S3(5)*(+CI*(P1(2)*P2(2)+TMP2))
COEFF(4,0,3)= COUP*-CI * P1(2)*P2(3)*S3(5)
COEFF(1,1,3)= COUP*-CI * P1(2)*S3(5)
COEFF(2,1,3)= 0Q0
COEFF(3,1,3)= COUP*-S3(5)*(+CI*(P2(0)+P1(0)))
COEFF(4,1,3)= 0Q0
COEFF(1,2,3)= 0Q0
COEFF(2,2,3)= COUP*-CI * P1(2)*S3(5)
COEFF(3,2,3)= COUP*S3(5)*(+CI*(P2(1)+P1(1)))
COEFF(4,2,3)= 0Q0
COEFF(1,3,3)= COUP*-CI * P2(0)*S3(5)
COEFF(2,3,3)= COUP*-CI * P2(1)*S3(5)
COEFF(3,3,3)= 0Q0
COEFF(4,3,3)= COUP*-CI * P2(3)*S3(5)
COEFF(1,4,3)= 0Q0
COEFF(2,4,3)= 0Q0
COEFF(3,4,3)= COUP*S3(5)*(+CI*(P2(3)+P1(3)))
COEFF(4,4,3)= COUP*-CI * P1(2)*S3(5)
COEFF(1,5,3)= 0Q0
COEFF(2,5,3)= 0Q0
COEFF(3,5,3)= COUP*-CI * S3(5)
COEFF(4,5,3)= 0Q0
COEFF(1,6,3)= 0Q0
COEFF(2,6,3)= 0Q0
COEFF(3,6,3)= 0Q0
COEFF(4,6,3)= 0Q0
COEFF(1,7,3)= 0Q0
COEFF(2,7,3)= 0Q0
COEFF(3,7,3)= COUP*CI * S3(5)
COEFF(4,7,3)= 0Q0
COEFF(1,8,3)= COUP*-CI * S3(5)
COEFF(2,8,3)= 0Q0
COEFF(3,8,3)= 0Q0
COEFF(4,8,3)= 0Q0
COEFF(1,9,3)= 0Q0
COEFF(2,9,3)= COUP*-CI * S3(5)
COEFF(3,9,3)= 0Q0
COEFF(4,9,3)= 0Q0
COEFF(1,10,3)= 0Q0
COEFF(2,10,3)= 0Q0
COEFF(3,10,3)= 0Q0
COEFF(4,10,3)= 0Q0
COEFF(1,11,3)= 0Q0
COEFF(2,11,3)= 0Q0
COEFF(3,11,3)= 0Q0
COEFF(4,11,3)= 0Q0
COEFF(1,12,3)= 0Q0
COEFF(2,12,3)= 0Q0
COEFF(3,12,3)= 0Q0
COEFF(4,12,3)= 0Q0
COEFF(1,13,3)= 0Q0
COEFF(2,13,3)= 0Q0
COEFF(3,13,3)= 0Q0
COEFF(4,13,3)= COUP*-CI * S3(5)
COEFF(1,14,3)= 0Q0
COEFF(2,14,3)= 0Q0
COEFF(3,14,3)= COUP*CI * S3(5)
COEFF(4,14,3)= 0Q0
COEFF(1,0,4)= COUP*-CI * P1(3)*P2(0)*S3(5)
COEFF(2,0,4)= COUP*-CI * P1(3)*P2(1)*S3(5)
COEFF(3,0,4)= COUP*-CI * P1(3)*P2(2)*S3(5)
COEFF(4,0,4)= COUP*-S3(5)*(+CI*(P1(3)*P2(3)+TMP2))
COEFF(1,1,4)= COUP*-CI * P1(3)*S3(5)
COEFF(2,1,4)= 0Q0
COEFF(3,1,4)= 0Q0
COEFF(4,1,4)= COUP*-S3(5)*(+CI*(P2(0)+P1(0)))
COEFF(1,2,4)= 0Q0
COEFF(2,2,4)= COUP*-CI * P1(3)*S3(5)
COEFF(3,2,4)= 0Q0
COEFF(4,2,4)= COUP*S3(5)*(+CI*(P2(1)+P1(1)))
COEFF(1,3,4)= 0Q0
COEFF(2,3,4)= 0Q0
COEFF(3,3,4)= COUP*-CI * P1(3)*S3(5)
COEFF(4,3,4)= COUP*S3(5)*(+CI*(P2(2)+P1(2)))
COEFF(1,4,4)= COUP*-CI * P2(0)*S3(5)
COEFF(2,4,4)= COUP*-CI * P2(1)*S3(5)
COEFF(3,4,4)= COUP*-CI * P2(2)*S3(5)
COEFF(4,4,4)= 0Q0
COEFF(1,5,4)= 0Q0
COEFF(2,5,4)= 0Q0
COEFF(3,5,4)= 0Q0
COEFF(4,5,4)= COUP*-CI * S3(5)
COEFF(1,6,4)= 0Q0
COEFF(2,6,4)= 0Q0
COEFF(3,6,4)= 0Q0
COEFF(4,6,4)= 0Q0
COEFF(1,7,4)= 0Q0
COEFF(2,7,4)= 0Q0
COEFF(3,7,4)= 0Q0
COEFF(4,7,4)= COUP*CI * S3(5)
COEFF(1,8,4)= 0Q0
COEFF(2,8,4)= 0Q0
COEFF(3,8,4)= 0Q0
COEFF(4,8,4)= 0Q0
COEFF(1,9,4)= 0Q0
COEFF(2,9,4)= 0Q0
COEFF(3,9,4)= 0Q0
COEFF(4,9,4)= 0Q0
COEFF(1,10,4)= 0Q0
COEFF(2,10,4)= 0Q0
COEFF(3,10,4)= 0Q0
COEFF(4,10,4)= COUP*CI * S3(5)
COEFF(1,11,4)= COUP*-CI * S3(5)
COEFF(2,11,4)= 0Q0
COEFF(3,11,4)= 0Q0
COEFF(4,11,4)= 0Q0
COEFF(1,12,4)= 0Q0
COEFF(2,12,4)= COUP*-CI * S3(5)
COEFF(3,12,4)= 0Q0
COEFF(4,12,4)= 0Q0
COEFF(1,13,4)= 0Q0
COEFF(2,13,4)= 0Q0
COEFF(3,13,4)= COUP*-CI * S3(5)
COEFF(4,13,4)= 0Q0
COEFF(1,14,4)= 0Q0
COEFF(2,14,4)= 0Q0
COEFF(3,14,4)= 0Q0
COEFF(4,14,4)= 0Q0
END
|
{"hexsha": "fa6e37c10d9666fa72843457c4e570adb8456e84", "size": 8072, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "examples/First_Project/mg_processes/signal1/Source/DHELAS/MP_VVS4L2P0_1.f", "max_stars_repo_name": "JaySandesara/madminer", "max_stars_repo_head_hexsha": "c5fcb9fbbd5d70f7a07114e4ea6afc4e3c4518fb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/First_Project/mg_processes/signal1/Source/DHELAS/MP_VVS4L2P0_1.f", "max_issues_repo_name": "JaySandesara/madminer", "max_issues_repo_head_hexsha": "c5fcb9fbbd5d70f7a07114e4ea6afc4e3c4518fb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/First_Project/mg_processes/signal1/Source/DHELAS/MP_VVS4L2P0_1.f", "max_forks_repo_name": "JaySandesara/madminer", "max_forks_repo_head_hexsha": "c5fcb9fbbd5d70f7a07114e4ea6afc4e3c4518fb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3458646617, "max_line_length": 64, "alphanum_fraction": 0.4741080278, "num_tokens": 4736}
|
[STATEMENT]
lemma veval'_closed:
assumes "\<Gamma> \<turnstile>\<^sub>v t \<down> v" "closed_except t (fmdom \<Gamma>)" "closed_venv \<Gamma>"
assumes "wellformed t" "wellformed_venv \<Gamma>"
shows "vclosed v"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vclosed v
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
\<Gamma> \<turnstile>\<^sub>v t \<down> v
closed_except t (fmdom \<Gamma>)
closed_venv \<Gamma>
pre_strong_term_class.wellformed t
wellformed_venv \<Gamma>
goal (1 subgoal):
1. vclosed v
[PROOF STEP]
proof induction
[PROOF STATE]
proof (state)
goal (6 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>\<Gamma> t cs \<Gamma>' u u' env uu_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vabs cs \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>'); \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uu_, rhs); \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
5. \<And>\<Gamma> t css name \<Gamma>' cs u u' env uv_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>'); fmlookup css name = Some cs; \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uv_, rhs); \<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
6. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
case (comb \<Gamma> t cs \<Gamma>' u u' env pat rhs val)
[PROOF STATE]
proof (state)
this:
\<Gamma> \<turnstile>\<^sub>v t \<down> Vabs cs \<Gamma>'
\<Gamma> \<turnstile>\<^sub>v u \<down> u'
vfind_match cs u' = Some (env, pat, rhs)
\<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val
\<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>')
\<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'
\<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val
closed_except (t $\<^sub>s u) (fmdom \<Gamma>)
closed_venv \<Gamma>
pre_strong_term_class.wellformed (t $\<^sub>s u)
wellformed_venv \<Gamma>
goal (6 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>\<Gamma> t cs \<Gamma>' u u' env uu_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vabs cs \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>'); \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uu_, rhs); \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
5. \<And>\<Gamma> t css name \<Gamma>' cs u u' env uv_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>'); fmlookup css name = Some cs; \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uv_, rhs); \<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
6. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
hence "vclosed (Vabs cs \<Gamma>')"
[PROOF STATE]
proof (prove)
using this:
\<Gamma> \<turnstile>\<^sub>v t \<down> Vabs cs \<Gamma>'
\<Gamma> \<turnstile>\<^sub>v u \<down> u'
vfind_match cs u' = Some (env, pat, rhs)
\<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val
\<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>')
\<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'
\<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val
closed_except (t $\<^sub>s u) (fmdom \<Gamma>)
closed_venv \<Gamma>
pre_strong_term_class.wellformed (t $\<^sub>s u)
wellformed_venv \<Gamma>
goal (1 subgoal):
1. vclosed (Vabs cs \<Gamma>')
[PROOF STEP]
by (auto simp: closed_except_def)
[PROOF STATE]
proof (state)
this:
vclosed (Vabs cs \<Gamma>')
goal (6 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>\<Gamma> t cs \<Gamma>' u u' env uu_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vabs cs \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>'); \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uu_, rhs); \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
5. \<And>\<Gamma> t css name \<Gamma>' cs u u' env uv_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>'); fmlookup css name = Some cs; \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uv_, rhs); \<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
6. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
have "(pat, rhs) \<in> set cs" "vmatch (mk_pat pat) u' = Some env"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (pat, rhs) \<in> set cs &&& vmatch (mk_pat pat) u' = Some env
[PROOF STEP]
by (rule vfind_match_elem; fact)+
[PROOF STATE]
proof (state)
this:
(pat, rhs) \<in> set cs
vmatch (mk_pat pat) u' = Some env
goal (6 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>\<Gamma> t cs \<Gamma>' u u' env uu_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vabs cs \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>'); \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uu_, rhs); \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
5. \<And>\<Gamma> t css name \<Gamma>' cs u u' env uv_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>'); fmlookup css name = Some cs; \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uv_, rhs); \<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
6. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
hence "fmdom env = patvars (mk_pat pat)"
[PROOF STATE]
proof (prove)
using this:
(pat, rhs) \<in> set cs
vmatch (mk_pat pat) u' = Some env
goal (1 subgoal):
1. fmdom env = patvars (mk_pat pat)
[PROOF STEP]
by (simp add: vmatch_dom)
[PROOF STATE]
proof (state)
this:
fmdom env = patvars (mk_pat pat)
goal (6 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>\<Gamma> t cs \<Gamma>' u u' env uu_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vabs cs \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>'); \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uu_, rhs); \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
5. \<And>\<Gamma> t css name \<Gamma>' cs u u' env uv_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>'); fmlookup css name = Some cs; \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uv_, rhs); \<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
6. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
have "vwellformed (Vabs cs \<Gamma>')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vwellformed (Vabs cs \<Gamma>')
[PROOF STEP]
apply (rule veval'_wellformed)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. ?\<Gamma> \<turnstile>\<^sub>v ?t \<down> Vabs cs \<Gamma>'
2. pre_strong_term_class.wellformed ?t
3. wellformed_venv ?\<Gamma>
[PROOF STEP]
using comb
[PROOF STATE]
proof (prove)
using this:
\<Gamma> \<turnstile>\<^sub>v t \<down> Vabs cs \<Gamma>'
\<Gamma> \<turnstile>\<^sub>v u \<down> u'
vfind_match cs u' = Some (env, pat, rhs)
\<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val
\<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>')
\<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'
\<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val
closed_except (t $\<^sub>s u) (fmdom \<Gamma>)
closed_venv \<Gamma>
pre_strong_term_class.wellformed (t $\<^sub>s u)
wellformed_venv \<Gamma>
goal (3 subgoals):
1. ?\<Gamma> \<turnstile>\<^sub>v ?t \<down> Vabs cs \<Gamma>'
2. pre_strong_term_class.wellformed ?t
3. wellformed_venv ?\<Gamma>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
vwellformed (Vabs cs \<Gamma>')
goal (6 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>\<Gamma> t cs \<Gamma>' u u' env uu_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vabs cs \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>'); \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uu_, rhs); \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
5. \<And>\<Gamma> t css name \<Gamma>' cs u u' env uv_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>'); fmlookup css name = Some cs; \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uv_, rhs); \<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
6. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
hence "linear pat"
[PROOF STATE]
proof (prove)
using this:
vwellformed (Vabs cs \<Gamma>')
goal (1 subgoal):
1. linear pat
[PROOF STEP]
using \<open>(pat, rhs) \<in> set cs\<close>
[PROOF STATE]
proof (prove)
using this:
vwellformed (Vabs cs \<Gamma>')
(pat, rhs) \<in> set cs
goal (1 subgoal):
1. linear pat
[PROOF STEP]
by (auto simp: list_all_iff)
[PROOF STATE]
proof (state)
this:
linear pat
goal (6 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>\<Gamma> t cs \<Gamma>' u u' env uu_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vabs cs \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>'); \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uu_, rhs); \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
5. \<And>\<Gamma> t css name \<Gamma>' cs u u' env uv_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>'); fmlookup css name = Some cs; \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uv_, rhs); \<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
6. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
hence "fmdom env = frees pat"
[PROOF STATE]
proof (prove)
using this:
linear pat
goal (1 subgoal):
1. fmdom env = frees pat
[PROOF STEP]
unfolding \<open>fmdom env = _\<close>
[PROOF STATE]
proof (prove)
using this:
linear pat
goal (1 subgoal):
1. patvars (mk_pat pat) = frees pat
[PROOF STEP]
by (simp add: mk_pat_frees)
[PROOF STATE]
proof (state)
this:
fmdom env = frees pat
goal (6 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>\<Gamma> t cs \<Gamma>' u u' env uu_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vabs cs \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>'); \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uu_, rhs); \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
5. \<And>\<Gamma> t css name \<Gamma>' cs u u' env uv_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>'); fmlookup css name = Some cs; \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uv_, rhs); \<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
6. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vclosed val
[PROOF STEP]
proof (rule comb)
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env))
2. closed_venv (\<Gamma>' ++\<^sub>f env)
3. pre_strong_term_class.wellformed rhs
4. wellformed_venv (\<Gamma>' ++\<^sub>f env)
[PROOF STEP]
show "wellformed rhs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pre_strong_term_class.wellformed rhs
[PROOF STEP]
using \<open>(pat, rhs) \<in> set cs\<close> \<open>vwellformed (Vabs cs \<Gamma>')\<close>
[PROOF STATE]
proof (prove)
using this:
(pat, rhs) \<in> set cs
vwellformed (Vabs cs \<Gamma>')
goal (1 subgoal):
1. pre_strong_term_class.wellformed rhs
[PROOF STEP]
by (auto simp: list_all_iff)
[PROOF STATE]
proof (state)
this:
pre_strong_term_class.wellformed rhs
goal (3 subgoals):
1. closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env))
2. closed_venv (\<Gamma>' ++\<^sub>f env)
3. wellformed_venv (\<Gamma>' ++\<^sub>f env)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env))
2. closed_venv (\<Gamma>' ++\<^sub>f env)
3. wellformed_venv (\<Gamma>' ++\<^sub>f env)
[PROOF STEP]
show "closed_venv (\<Gamma>' ++\<^sub>f env)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. closed_venv (\<Gamma>' ++\<^sub>f env)
[PROOF STEP]
apply rule
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. closed_venv \<Gamma>'
2. closed_venv env
[PROOF STEP]
using \<open>vclosed (Vabs cs \<Gamma>')\<close>
[PROOF STATE]
proof (prove)
using this:
vclosed (Vabs cs \<Gamma>')
goal (2 subgoals):
1. closed_venv \<Gamma>'
2. closed_venv env
[PROOF STEP]
apply auto[]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. closed_venv env
[PROOF STEP]
apply (rule vclosed.vmatch_env)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. vmatch ?pat6 ?v6 = Some env
2. vclosed ?v6
[PROOF STEP]
apply (rule vfind_match_elem)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. vfind_match ?cs9 ?v6 = Some (env, ?pat9, ?rhs9)
2. vclosed ?v6
[PROOF STEP]
using comb
[PROOF STATE]
proof (prove)
using this:
\<Gamma> \<turnstile>\<^sub>v t \<down> Vabs cs \<Gamma>'
\<Gamma> \<turnstile>\<^sub>v u \<down> u'
vfind_match cs u' = Some (env, pat, rhs)
\<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val
\<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>')
\<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'
\<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val
closed_except (t $\<^sub>s u) (fmdom \<Gamma>)
closed_venv \<Gamma>
pre_strong_term_class.wellformed (t $\<^sub>s u)
wellformed_venv \<Gamma>
goal (2 subgoals):
1. vfind_match ?cs9 ?v6 = Some (env, ?pat9, ?rhs9)
2. vclosed ?v6
[PROOF STEP]
by (auto simp: closed_except_def)
[PROOF STATE]
proof (state)
this:
closed_venv (\<Gamma>' ++\<^sub>f env)
goal (2 subgoals):
1. closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env))
2. wellformed_venv (\<Gamma>' ++\<^sub>f env)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env))
2. wellformed_venv (\<Gamma>' ++\<^sub>f env)
[PROOF STEP]
show "closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env))
[PROOF STEP]
using \<open>vclosed (Vabs cs \<Gamma>')\<close> \<open>fmdom env = frees pat\<close> \<open>(pat, rhs) \<in> set cs\<close>
[PROOF STATE]
proof (prove)
using this:
vclosed (Vabs cs \<Gamma>')
fmdom env = frees pat
(pat, rhs) \<in> set cs
goal (1 subgoal):
1. closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env))
[PROOF STEP]
by (auto simp: list_all_iff)
[PROOF STATE]
proof (state)
this:
closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env))
goal (1 subgoal):
1. wellformed_venv (\<Gamma>' ++\<^sub>f env)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. wellformed_venv (\<Gamma>' ++\<^sub>f env)
[PROOF STEP]
show "wellformed_venv (\<Gamma>' ++\<^sub>f env)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wellformed_venv (\<Gamma>' ++\<^sub>f env)
[PROOF STEP]
apply rule
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. wellformed_venv \<Gamma>'
2. wellformed_venv env
[PROOF STEP]
using \<open>vwellformed (Vabs cs \<Gamma>')\<close>
[PROOF STATE]
proof (prove)
using this:
vwellformed (Vabs cs \<Gamma>')
goal (2 subgoals):
1. wellformed_venv \<Gamma>'
2. wellformed_venv env
[PROOF STEP]
apply auto[]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wellformed_venv env
[PROOF STEP]
apply (rule vwellformed.vmatch_env)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. vmatch ?pat6 ?v6 = Some env
2. vwellformed ?v6
[PROOF STEP]
apply (rule vfind_match_elem)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. vfind_match ?cs9 ?v6 = Some (env, ?pat9, ?rhs9)
2. vwellformed ?v6
[PROOF STEP]
apply fact
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vwellformed u'
[PROOF STEP]
apply (rule veval'_wellformed)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. ?\<Gamma>12 \<turnstile>\<^sub>v ?t12 \<down> u'
2. pre_strong_term_class.wellformed ?t12
3. wellformed_venv ?\<Gamma>12
[PROOF STEP]
using comb
[PROOF STATE]
proof (prove)
using this:
\<Gamma> \<turnstile>\<^sub>v t \<down> Vabs cs \<Gamma>'
\<Gamma> \<turnstile>\<^sub>v u \<down> u'
vfind_match cs u' = Some (env, pat, rhs)
\<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val
\<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>')
\<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'
\<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val
closed_except (t $\<^sub>s u) (fmdom \<Gamma>)
closed_venv \<Gamma>
pre_strong_term_class.wellformed (t $\<^sub>s u)
wellformed_venv \<Gamma>
goal (3 subgoals):
1. ?\<Gamma>12 \<turnstile>\<^sub>v ?t12 \<down> u'
2. pre_strong_term_class.wellformed ?t12
3. wellformed_venv ?\<Gamma>12
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
wellformed_venv (\<Gamma>' ++\<^sub>f env)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
vclosed val
goal (5 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>\<Gamma> t css name \<Gamma>' cs u u' env uv_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>'); fmlookup css name = Some cs; \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uv_, rhs); \<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
5. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (5 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>\<Gamma> t css name \<Gamma>' cs u u' env uv_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>'); fmlookup css name = Some cs; \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uv_, rhs); \<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
5. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
case (rec_comb \<Gamma> t css name \<Gamma>' cs u u' env pat rhs val)
[PROOF STATE]
proof (state)
this:
\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'
fmlookup css name = Some cs
\<Gamma> \<turnstile>\<^sub>v u \<down> u'
vfind_match cs u' = Some (env, pat, rhs)
\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val
\<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>')
\<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'
\<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val
closed_except (t $\<^sub>s u) (fmdom \<Gamma>)
closed_venv \<Gamma>
pre_strong_term_class.wellformed (t $\<^sub>s u)
wellformed_venv \<Gamma>
goal (5 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>\<Gamma> t css name \<Gamma>' cs u u' env uv_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>'); fmlookup css name = Some cs; \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uv_, rhs); \<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
5. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
have "(pat, rhs) \<in> set cs" "vmatch (mk_pat pat) u' = Some env"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (pat, rhs) \<in> set cs &&& vmatch (mk_pat pat) u' = Some env
[PROOF STEP]
by (rule vfind_match_elem; fact)+
[PROOF STATE]
proof (state)
this:
(pat, rhs) \<in> set cs
vmatch (mk_pat pat) u' = Some env
goal (5 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>\<Gamma> t css name \<Gamma>' cs u u' env uv_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>'); fmlookup css name = Some cs; \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uv_, rhs); \<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
5. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
hence "fmdom env = patvars (mk_pat pat)"
[PROOF STATE]
proof (prove)
using this:
(pat, rhs) \<in> set cs
vmatch (mk_pat pat) u' = Some env
goal (1 subgoal):
1. fmdom env = patvars (mk_pat pat)
[PROOF STEP]
by (simp add: vmatch_dom)
[PROOF STATE]
proof (state)
this:
fmdom env = patvars (mk_pat pat)
goal (5 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>\<Gamma> t css name \<Gamma>' cs u u' env uv_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>'); fmlookup css name = Some cs; \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uv_, rhs); \<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
5. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
have "vwellformed (Vrecabs css name \<Gamma>')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vwellformed (Vrecabs css name \<Gamma>')
[PROOF STEP]
apply (rule veval'_wellformed)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. ?\<Gamma> \<turnstile>\<^sub>v ?t \<down> Vrecabs css name \<Gamma>'
2. pre_strong_term_class.wellformed ?t
3. wellformed_venv ?\<Gamma>
[PROOF STEP]
using rec_comb
[PROOF STATE]
proof (prove)
using this:
\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'
fmlookup css name = Some cs
\<Gamma> \<turnstile>\<^sub>v u \<down> u'
vfind_match cs u' = Some (env, pat, rhs)
\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val
\<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>')
\<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'
\<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val
closed_except (t $\<^sub>s u) (fmdom \<Gamma>)
closed_venv \<Gamma>
pre_strong_term_class.wellformed (t $\<^sub>s u)
wellformed_venv \<Gamma>
goal (3 subgoals):
1. ?\<Gamma> \<turnstile>\<^sub>v ?t \<down> Vrecabs css name \<Gamma>'
2. pre_strong_term_class.wellformed ?t
3. wellformed_venv ?\<Gamma>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
vwellformed (Vrecabs css name \<Gamma>')
goal (5 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>\<Gamma> t css name \<Gamma>' cs u u' env uv_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>'); fmlookup css name = Some cs; \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uv_, rhs); \<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
5. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
hence "wellformed_clauses cs"
[PROOF STATE]
proof (prove)
using this:
vwellformed (Vrecabs css name \<Gamma>')
goal (1 subgoal):
1. wellformed_clauses cs
[PROOF STEP]
using rec_comb
[PROOF STATE]
proof (prove)
using this:
vwellformed (Vrecabs css name \<Gamma>')
\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'
fmlookup css name = Some cs
\<Gamma> \<turnstile>\<^sub>v u \<down> u'
vfind_match cs u' = Some (env, pat, rhs)
\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val
\<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>')
\<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'
\<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val
closed_except (t $\<^sub>s u) (fmdom \<Gamma>)
closed_venv \<Gamma>
pre_strong_term_class.wellformed (t $\<^sub>s u)
wellformed_venv \<Gamma>
goal (1 subgoal):
1. wellformed_clauses cs
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
wellformed_clauses cs
goal (5 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>\<Gamma> t css name \<Gamma>' cs u u' env uv_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>'); fmlookup css name = Some cs; \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uv_, rhs); \<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
5. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
hence "linear pat"
[PROOF STATE]
proof (prove)
using this:
wellformed_clauses cs
goal (1 subgoal):
1. linear pat
[PROOF STEP]
using \<open>(pat, rhs) \<in> set cs\<close>
[PROOF STATE]
proof (prove)
using this:
wellformed_clauses cs
(pat, rhs) \<in> set cs
goal (1 subgoal):
1. linear pat
[PROOF STEP]
by (auto simp: list_all_iff)
[PROOF STATE]
proof (state)
this:
linear pat
goal (5 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>\<Gamma> t css name \<Gamma>' cs u u' env uv_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>'); fmlookup css name = Some cs; \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uv_, rhs); \<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
5. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
hence "fmdom env = frees pat"
[PROOF STATE]
proof (prove)
using this:
linear pat
goal (1 subgoal):
1. fmdom env = frees pat
[PROOF STEP]
unfolding \<open>fmdom env = _\<close>
[PROOF STATE]
proof (prove)
using this:
linear pat
goal (1 subgoal):
1. patvars (mk_pat pat) = frees pat
[PROOF STEP]
by (simp add: mk_pat_frees)
[PROOF STATE]
proof (state)
this:
fmdom env = frees pat
goal (5 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>\<Gamma> t css name \<Gamma>' cs u u' env uv_ rhs val. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'; \<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>'); fmlookup css name = Some cs; \<Gamma> \<turnstile>\<^sub>v u \<down> u'; \<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'; vfind_match cs u' = Some (env, uv_, rhs); \<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val; \<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val; closed_except (t $\<^sub>s u) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (t $\<^sub>s u); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
5. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vclosed val
[PROOF STEP]
proof (rule rec_comb)
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env))
2. closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)
3. pre_strong_term_class.wellformed rhs
4. wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)
[PROOF STEP]
show "closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)
[PROOF STEP]
proof (intro fmpred_add)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. closed_venv \<Gamma>'
2. closed_venv (mk_rec_env css \<Gamma>')
3. closed_venv env
[PROOF STEP]
show "closed_venv \<Gamma>'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. closed_venv \<Gamma>'
[PROOF STEP]
using rec_comb
[PROOF STATE]
proof (prove)
using this:
\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'
fmlookup css name = Some cs
\<Gamma> \<turnstile>\<^sub>v u \<down> u'
vfind_match cs u' = Some (env, pat, rhs)
\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val
\<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>')
\<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'
\<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val
closed_except (t $\<^sub>s u) (fmdom \<Gamma>)
closed_venv \<Gamma>
pre_strong_term_class.wellformed (t $\<^sub>s u)
wellformed_venv \<Gamma>
goal (1 subgoal):
1. closed_venv \<Gamma>'
[PROOF STEP]
by (auto simp: closed_except_def)
[PROOF STATE]
proof (state)
this:
closed_venv \<Gamma>'
goal (2 subgoals):
1. closed_venv (mk_rec_env css \<Gamma>')
2. closed_venv env
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. closed_venv (mk_rec_env css \<Gamma>')
2. closed_venv env
[PROOF STEP]
show "closed_venv env"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. closed_venv env
[PROOF STEP]
using rec_comb
[PROOF STATE]
proof (prove)
using this:
\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'
fmlookup css name = Some cs
\<Gamma> \<turnstile>\<^sub>v u \<down> u'
vfind_match cs u' = Some (env, pat, rhs)
\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val
\<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>')
\<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'
\<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val
closed_except (t $\<^sub>s u) (fmdom \<Gamma>)
closed_venv \<Gamma>
pre_strong_term_class.wellformed (t $\<^sub>s u)
wellformed_venv \<Gamma>
goal (1 subgoal):
1. closed_venv env
[PROOF STEP]
by (auto simp: closed_except_def dest: vfind_match_elem intro: vclosed.vmatch_env)
[PROOF STATE]
proof (state)
this:
closed_venv env
goal (1 subgoal):
1. closed_venv (mk_rec_env css \<Gamma>')
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. closed_venv (mk_rec_env css \<Gamma>')
[PROOF STEP]
show "closed_venv (mk_rec_env css \<Gamma>')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. closed_venv (mk_rec_env css \<Gamma>')
[PROOF STEP]
unfolding mk_rec_env_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. closed_venv (fmmap_keys (\<lambda>name cs. Vrecabs css name \<Gamma>') css)
[PROOF STEP]
using rec_comb
[PROOF STATE]
proof (prove)
using this:
\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'
fmlookup css name = Some cs
\<Gamma> \<turnstile>\<^sub>v u \<down> u'
vfind_match cs u' = Some (env, pat, rhs)
\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val
\<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>')
\<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'
\<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val
closed_except (t $\<^sub>s u) (fmdom \<Gamma>)
closed_venv \<Gamma>
pre_strong_term_class.wellformed (t $\<^sub>s u)
wellformed_venv \<Gamma>
goal (1 subgoal):
1. closed_venv (fmmap_keys (\<lambda>name cs. Vrecabs css name \<Gamma>') css)
[PROOF STEP]
by (auto simp: closed_except_def intro: fmdomI)
[PROOF STATE]
proof (state)
this:
closed_venv (mk_rec_env css \<Gamma>')
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)
goal (3 subgoals):
1. closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env))
2. pre_strong_term_class.wellformed rhs
3. wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env))
2. pre_strong_term_class.wellformed rhs
3. wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)
[PROOF STEP]
have "vclosed (Vrecabs css name \<Gamma>')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vclosed (Vrecabs css name \<Gamma>')
[PROOF STEP]
using mk_rec_env_def
[PROOF STATE]
proof (prove)
using this:
mk_rec_env ?css ?\<Gamma>' = fmmap_keys (\<lambda>name cs. Vrecabs ?css name ?\<Gamma>') ?css
goal (1 subgoal):
1. vclosed (Vrecabs css name \<Gamma>')
[PROOF STEP]
using rec_comb
[PROOF STATE]
proof (prove)
using this:
mk_rec_env ?css ?\<Gamma>' = fmmap_keys (\<lambda>name cs. Vrecabs ?css name ?\<Gamma>') ?css
\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'
fmlookup css name = Some cs
\<Gamma> \<turnstile>\<^sub>v u \<down> u'
vfind_match cs u' = Some (env, pat, rhs)
\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val
\<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>')
\<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'
\<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val
closed_except (t $\<^sub>s u) (fmdom \<Gamma>)
closed_venv \<Gamma>
pre_strong_term_class.wellformed (t $\<^sub>s u)
wellformed_venv \<Gamma>
goal (1 subgoal):
1. vclosed (Vrecabs css name \<Gamma>')
[PROOF STEP]
by (auto simp: closed_except_def intro: fmdom'I)
[PROOF STATE]
proof (state)
this:
vclosed (Vrecabs css name \<Gamma>')
goal (3 subgoals):
1. closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env))
2. pre_strong_term_class.wellformed rhs
3. wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)
[PROOF STEP]
hence "closed_except rhs (fmdom \<Gamma>' |\<union>| frees pat)"
[PROOF STATE]
proof (prove)
using this:
vclosed (Vrecabs css name \<Gamma>')
goal (1 subgoal):
1. closed_except rhs (fmdom \<Gamma>' |\<union>| frees pat)
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. closed_venv \<Gamma>' \<and> fmpred (\<lambda>_. list_all (\<lambda>(pat, t). closed_except t (fmdom \<Gamma>' |\<union>| frees pat))) css \<and> name |\<in>| fmdom css \<Longrightarrow> closed_except rhs (fmdom \<Gamma>' |\<union>| frees pat)
[PROOF STEP]
apply (elim conjE)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>closed_venv \<Gamma>'; fmpred (\<lambda>_. list_all (\<lambda>(pat, t). closed_except t (fmdom \<Gamma>' |\<union>| frees pat))) css; name |\<in>| fmdom css\<rbrakk> \<Longrightarrow> closed_except rhs (fmdom \<Gamma>' |\<union>| frees pat)
[PROOF STEP]
apply (drule fmpredD[where m = css])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>closed_venv \<Gamma>'; name |\<in>| fmdom css\<rbrakk> \<Longrightarrow> fmlookup css ?x3 = Some ?y3
2. \<lbrakk>closed_venv \<Gamma>'; name |\<in>| fmdom css; list_all (\<lambda>(pat, t). closed_except t (fmdom \<Gamma>' |\<union>| frees pat)) ?y3\<rbrakk> \<Longrightarrow> closed_except rhs (fmdom \<Gamma>' |\<union>| frees pat)
[PROOF STEP]
apply (rule rec_comb)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>closed_venv \<Gamma>'; name |\<in>| fmdom css; list_all (\<lambda>(pat, t). closed_except t (fmdom \<Gamma>' |\<union>| frees pat)) cs\<rbrakk> \<Longrightarrow> closed_except rhs (fmdom \<Gamma>' |\<union>| frees pat)
[PROOF STEP]
using \<open>(pat, rhs) \<in> set cs\<close>
[PROOF STATE]
proof (prove)
using this:
(pat, rhs) \<in> set cs
goal (1 subgoal):
1. \<lbrakk>closed_venv \<Gamma>'; name |\<in>| fmdom css; list_all (\<lambda>(pat, t). closed_except t (fmdom \<Gamma>' |\<union>| frees pat)) cs\<rbrakk> \<Longrightarrow> closed_except rhs (fmdom \<Gamma>' |\<union>| frees pat)
[PROOF STEP]
unfolding list_all_iff
[PROOF STATE]
proof (prove)
using this:
(pat, rhs) \<in> set cs
goal (1 subgoal):
1. \<lbrakk>closed_venv \<Gamma>'; name |\<in>| fmdom css; \<forall>(pat, t)\<in>set cs. closed_except t (fmdom \<Gamma>' |\<union>| frees pat)\<rbrakk> \<Longrightarrow> closed_except rhs (fmdom \<Gamma>' |\<union>| frees pat)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
closed_except rhs (fmdom \<Gamma>' |\<union>| frees pat)
goal (3 subgoals):
1. closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env))
2. pre_strong_term_class.wellformed rhs
3. wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)
[PROOF STEP]
thus "closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env))"
[PROOF STATE]
proof (prove)
using this:
closed_except rhs (fmdom \<Gamma>' |\<union>| frees pat)
goal (1 subgoal):
1. closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env))
[PROOF STEP]
unfolding closed_except_def
[PROOF STATE]
proof (prove)
using this:
frees rhs |\<subseteq>| fmdom \<Gamma>' |\<union>| frees pat
goal (1 subgoal):
1. frees rhs |\<subseteq>| fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)
[PROOF STEP]
using \<open>fmdom env = frees pat\<close>
[PROOF STATE]
proof (prove)
using this:
frees rhs |\<subseteq>| fmdom \<Gamma>' |\<union>| frees pat
fmdom env = frees pat
goal (1 subgoal):
1. frees rhs |\<subseteq>| fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env))
goal (2 subgoals):
1. pre_strong_term_class.wellformed rhs
2. wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. pre_strong_term_class.wellformed rhs
2. wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)
[PROOF STEP]
show "wellformed rhs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pre_strong_term_class.wellformed rhs
[PROOF STEP]
using \<open>wellformed_clauses cs\<close> \<open>(pat, rhs) \<in> set cs\<close>
[PROOF STATE]
proof (prove)
using this:
wellformed_clauses cs
(pat, rhs) \<in> set cs
goal (1 subgoal):
1. pre_strong_term_class.wellformed rhs
[PROOF STEP]
by (auto simp: list_all_iff)
[PROOF STATE]
proof (state)
this:
pre_strong_term_class.wellformed rhs
goal (1 subgoal):
1. wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)
[PROOF STEP]
show "wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)
[PROOF STEP]
proof (intro fmpred_add)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. wellformed_venv \<Gamma>'
2. wellformed_venv (mk_rec_env css \<Gamma>')
3. wellformed_venv env
[PROOF STEP]
show "wellformed_venv \<Gamma>'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wellformed_venv \<Gamma>'
[PROOF STEP]
using \<open>vwellformed (Vrecabs css name \<Gamma>')\<close>
[PROOF STATE]
proof (prove)
using this:
vwellformed (Vrecabs css name \<Gamma>')
goal (1 subgoal):
1. wellformed_venv \<Gamma>'
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
wellformed_venv \<Gamma>'
goal (2 subgoals):
1. wellformed_venv (mk_rec_env css \<Gamma>')
2. wellformed_venv env
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. wellformed_venv (mk_rec_env css \<Gamma>')
2. wellformed_venv env
[PROOF STEP]
show "wellformed_venv env"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wellformed_venv env
[PROOF STEP]
using rec_comb
[PROOF STATE]
proof (prove)
using this:
\<Gamma> \<turnstile>\<^sub>v t \<down> Vrecabs css name \<Gamma>'
fmlookup css name = Some cs
\<Gamma> \<turnstile>\<^sub>v u \<down> u'
vfind_match cs u' = Some (env, pat, rhs)
\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env \<turnstile>\<^sub>v rhs \<down> val
\<lbrakk>closed_except t (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed t; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vrecabs css name \<Gamma>')
\<lbrakk>closed_except u (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed u; wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed u'
\<lbrakk>closed_except rhs (fmdom (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)); closed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env); pre_strong_term_class.wellformed rhs; wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)\<rbrakk> \<Longrightarrow> vclosed val
closed_except (t $\<^sub>s u) (fmdom \<Gamma>)
closed_venv \<Gamma>
pre_strong_term_class.wellformed (t $\<^sub>s u)
wellformed_venv \<Gamma>
goal (1 subgoal):
1. wellformed_venv env
[PROOF STEP]
by (auto dest: vfind_match_elem intro: veval'_wellformed vwellformed.vmatch_env)
[PROOF STATE]
proof (state)
this:
wellformed_venv env
goal (1 subgoal):
1. wellformed_venv (mk_rec_env css \<Gamma>')
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. wellformed_venv (mk_rec_env css \<Gamma>')
[PROOF STEP]
show "wellformed_venv (mk_rec_env css \<Gamma>')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wellformed_venv (mk_rec_env css \<Gamma>')
[PROOF STEP]
unfolding mk_rec_env_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wellformed_venv (fmmap_keys (\<lambda>name cs. Vrecabs css name \<Gamma>') css)
[PROOF STEP]
using \<open>vwellformed (Vrecabs css name \<Gamma>')\<close>
[PROOF STATE]
proof (prove)
using this:
vwellformed (Vrecabs css name \<Gamma>')
goal (1 subgoal):
1. wellformed_venv (fmmap_keys (\<lambda>name cs. Vrecabs css name \<Gamma>') css)
[PROOF STEP]
by (auto intro: fmdomI)
[PROOF STATE]
proof (state)
this:
wellformed_venv (mk_rec_env css \<Gamma>')
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
wellformed_venv (\<Gamma>' ++\<^sub>f mk_rec_env css \<Gamma>' ++\<^sub>f env)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
vclosed val
goal (4 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
case (constr name \<Gamma> ts us)
[PROOF STATE]
proof (state)
this:
name |\<in>| C
list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us
closed_except (name $$ ts) (fmdom \<Gamma>)
closed_venv \<Gamma>
pre_strong_term_class.wellformed (name $$ ts)
wellformed_venv \<Gamma>
goal (4 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
have "list_all vclosed us"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. list_all vclosed us
[PROOF STEP]
using \<open>list_all2 _ _ _\<close> \<open>closed_except (_ $$ _) _\<close> \<open>wellformed (_ $$ _)\<close>
[PROOF STATE]
proof (prove)
using this:
list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us
closed_except (name $$ ts) (fmdom \<Gamma>)
pre_strong_term_class.wellformed (name $$ ts)
goal (1 subgoal):
1. list_all vclosed us
[PROOF STEP]
proof (induction ts us rule: list.rel_induct)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>closed_except (name $$ []) (fmdom \<Gamma>); pre_strong_term_class.wellformed (name $$ [])\<rbrakk> \<Longrightarrow> list_all vclosed []
2. \<And>a21 a22 b21 b22. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v a21 \<down> b21 \<and> (closed_except a21 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed a21 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed b21); \<lbrakk>closed_except (name $$ a22) (fmdom \<Gamma>); pre_strong_term_class.wellformed (name $$ a22)\<rbrakk> \<Longrightarrow> list_all vclosed b22; closed_except (name $$ (a21 # a22)) (fmdom \<Gamma>); pre_strong_term_class.wellformed (name $$ (a21 # a22))\<rbrakk> \<Longrightarrow> list_all vclosed (b21 # b22)
[PROOF STEP]
case (Cons v vs u us)
[PROOF STATE]
proof (state)
this:
\<Gamma> \<turnstile>\<^sub>v v \<down> u \<and> (closed_except v (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed v \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed u)
\<lbrakk>closed_except (name $$ vs) (fmdom \<Gamma>); pre_strong_term_class.wellformed (name $$ vs)\<rbrakk> \<Longrightarrow> list_all vclosed us
closed_except (name $$ (v # vs)) (fmdom \<Gamma>)
pre_strong_term_class.wellformed (name $$ (v # vs))
goal (2 subgoals):
1. \<lbrakk>closed_except (name $$ []) (fmdom \<Gamma>); pre_strong_term_class.wellformed (name $$ [])\<rbrakk> \<Longrightarrow> list_all vclosed []
2. \<And>a21 a22 b21 b22. \<lbrakk>\<Gamma> \<turnstile>\<^sub>v a21 \<down> b21 \<and> (closed_except a21 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed a21 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed b21); \<lbrakk>closed_except (name $$ a22) (fmdom \<Gamma>); pre_strong_term_class.wellformed (name $$ a22)\<rbrakk> \<Longrightarrow> list_all vclosed b22; closed_except (name $$ (a21 # a22)) (fmdom \<Gamma>); pre_strong_term_class.wellformed (name $$ (a21 # a22))\<rbrakk> \<Longrightarrow> list_all vclosed (b21 # b22)
[PROOF STEP]
with constr
[PROOF STATE]
proof (chain)
picking this:
name |\<in>| C
list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us__
closed_except (name $$ ts) (fmdom \<Gamma>)
closed_venv \<Gamma>
pre_strong_term_class.wellformed (name $$ ts)
wellformed_venv \<Gamma>
\<Gamma> \<turnstile>\<^sub>v v \<down> u \<and> (closed_except v (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed v \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed u)
\<lbrakk>closed_except (name $$ vs) (fmdom \<Gamma>); pre_strong_term_class.wellformed (name $$ vs)\<rbrakk> \<Longrightarrow> list_all vclosed us
closed_except (name $$ (v # vs)) (fmdom \<Gamma>)
pre_strong_term_class.wellformed (name $$ (v # vs))
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
name |\<in>| C
list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us__
closed_except (name $$ ts) (fmdom \<Gamma>)
closed_venv \<Gamma>
pre_strong_term_class.wellformed (name $$ ts)
wellformed_venv \<Gamma>
\<Gamma> \<turnstile>\<^sub>v v \<down> u \<and> (closed_except v (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed v \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed u)
\<lbrakk>closed_except (name $$ vs) (fmdom \<Gamma>); pre_strong_term_class.wellformed (name $$ vs)\<rbrakk> \<Longrightarrow> list_all vclosed us
closed_except (name $$ (v # vs)) (fmdom \<Gamma>)
pre_strong_term_class.wellformed (name $$ (v # vs))
goal (1 subgoal):
1. list_all vclosed (u # us)
[PROOF STEP]
unfolding closed.list_comb wellformed.list_comb
[PROOF STATE]
proof (prove)
using this:
name |\<in>| C
list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us__
closed_except (const name) (fmdom \<Gamma>) \<and> list_all (\<lambda>t. closed_except t (fmdom \<Gamma>)) ts
closed_venv \<Gamma>
pre_strong_term_class.wellformed (const name) \<and> list_all pre_strong_term_class.wellformed ts
wellformed_venv \<Gamma>
\<Gamma> \<turnstile>\<^sub>v v \<down> u \<and> (closed_except v (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed v \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed u)
\<lbrakk>closed_except (const name) (fmdom \<Gamma>) \<and> list_all (\<lambda>t. closed_except t (fmdom \<Gamma>)) vs; pre_strong_term_class.wellformed (const name) \<and> list_all pre_strong_term_class.wellformed vs\<rbrakk> \<Longrightarrow> list_all vclosed us
closed_except (const name) (fmdom \<Gamma>) \<and> list_all (\<lambda>t. closed_except t (fmdom \<Gamma>)) (v # vs)
pre_strong_term_class.wellformed (const name) \<and> list_all pre_strong_term_class.wellformed (v # vs)
goal (1 subgoal):
1. list_all vclosed (u # us)
[PROOF STEP]
by (auto simp: Sterm.closed_except_simps)
[PROOF STATE]
proof (state)
this:
list_all vclosed (u # us)
goal (1 subgoal):
1. \<lbrakk>closed_except (name $$ []) (fmdom \<Gamma>); pre_strong_term_class.wellformed (name $$ [])\<rbrakk> \<Longrightarrow> list_all vclosed []
[PROOF STEP]
qed simp
[PROOF STATE]
proof (state)
this:
list_all vclosed us
goal (4 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
4. \<And>name \<Gamma> ts us. \<lbrakk>name |\<in>| C; list_all2 (\<lambda>x1 x2. \<Gamma> \<turnstile>\<^sub>v x1 \<down> x2 \<and> (closed_except x1 (fmdom \<Gamma>) \<longrightarrow> closed_venv \<Gamma> \<longrightarrow> pre_strong_term_class.wellformed x1 \<longrightarrow> wellformed_venv \<Gamma> \<longrightarrow> vclosed x2)) ts us; closed_except (name $$ ts) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (name $$ ts); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vconstr name us)
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
list_all vclosed us
goal (1 subgoal):
1. vclosed (Vconstr name us)
[PROOF STEP]
by (simp add: list_all_iff)
[PROOF STATE]
proof (state)
this:
vclosed (Vconstr name us)
goal (3 subgoals):
1. \<And>name \<Gamma> val. \<lbrakk>name |\<notin>| C; fmlookup \<Gamma> name = Some val; closed_except (Sconst name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sconst name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
2. \<And>\<Gamma> name val. \<lbrakk>fmlookup \<Gamma> name = Some val; closed_except (Svar name) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Svar name); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed val
3. \<And>\<Gamma> cs. \<lbrakk>closed_except (Sabs cs) (fmdom \<Gamma>); closed_venv \<Gamma>; pre_strong_term_class.wellformed (Sabs cs); wellformed_venv \<Gamma>\<rbrakk> \<Longrightarrow> vclosed (Vabs cs \<Gamma>)
[PROOF STEP]
qed (auto simp: Sterm.closed_except_simps)
|
{"llama_tokens": 37209, "file": "CakeML_Codegen_Rewriting_Big_Step_Value_ML", "length": 137}
|
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
if cap.isOpened() is False:
print("Capture_Error!")
def nothing(x):
pass
cv2.namedWindow("Blue")
cv2.namedWindow("Red")
cv2.namedWindow("Yellow")
cv2.createTrackbar("H", "Blue", 0, 255, nothing)
cv2.createTrackbar("S", "Blue", 0, 255, nothing)
cv2.createTrackbar("V", "Blue", 0, 255, nothing)
cv2.createTrackbar("H", "Red", 0, 255, nothing)
cv2.createTrackbar("S", "Red", 0, 255, nothing)
cv2.createTrackbar("V", "Red", 0, 255, nothing)
cv2.createTrackbar("H", "Yellow", 0, 255, nothing)
cv2.createTrackbar("S", "Yellow", 0, 255, nothing)
cv2.createTrackbar("V", "Yellow", 0, 255, nothing)
while(True):
ret, frame = cap.read()
frame = cv2.resize(frame, (int(frame.shape[1]/2), int(frame.shape[0]/2)))
#RGB->HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
#RGB->Gray
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (15, 15), 1)
gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
label = cv2.connectedComponentsWithStats(gray)
colimg = frame.copy()
#Blue
h_blue = cv2.getTrackbarPos("H", "Blue")
s_blue = cv2.getTrackbarPos("S", "Blue")
v_blue = cv2.getTrackbarPos("V", "Blue")
lower_blue = np.array([h_blue, s_blue, v_blue]) #h100 s110 v60?
upper_blue = np.array([120, 255, 255])
mask_blue = cv2.inRange(hsv, lower_blue, upper_blue)
#Red
h_red = cv2.getTrackbarPos("H", "Red")
s_red = cv2.getTrackbarPos("S", "Red")
v_red = cv2.getTrackbarPos("V", "Red")
lower_red = np.array([0, s_red, v_red])#s100 v60
upper_red = np.array([h_red, 255, 255])
mask1 = cv2.inRange(hsv, lower_red, upper_red)
lower_red = np.array([156, 128, 81])
upper_red = np.array([255, 255, 255])
mask2 = cv2.inRange(hsv, lower_red, upper_red)
mask_red = mask1 + mask2
#yellow
h_yellow = cv2.getTrackbarPos("H", "Yellow")
s_yellow = cv2.getTrackbarPos("S", "Yellow")
v_yellow = cv2.getTrackbarPos("V", "Yellow")
lower_yellow = np.array([h_yellow, s_yellow, v_yellow])#h18 s90 v110
upper_yellow = np.array([30, 255, 255])
mask_yellow = cv2.inRange(hsv, lower_yellow, upper_yellow)
cv2.imshow("Blue", mask_blue)
cv2.imshow("Red", mask_red)
cv2.imshow("Yellow", mask_yellow)
cv2.imshow("image", colimg)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindow()
|
{"hexsha": "a46cc13fef0564ba5cba752f43d7258a013621c1", "size": 2314, "ext": "py", "lang": "Python", "max_stars_repo_path": "color.py", "max_stars_repo_name": "tiger0421/GetValOfThereshold3ColoredBall", "max_stars_repo_head_hexsha": "a8a0c0e4d0f54ac5ac34502e5de87916ce6901a8", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "color.py", "max_issues_repo_name": "tiger0421/GetValOfThereshold3ColoredBall", "max_issues_repo_head_hexsha": "a8a0c0e4d0f54ac5ac34502e5de87916ce6901a8", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "color.py", "max_forks_repo_name": "tiger0421/GetValOfThereshold3ColoredBall", "max_forks_repo_head_hexsha": "a8a0c0e4d0f54ac5ac34502e5de87916ce6901a8", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2195121951, "max_line_length": 75, "alphanum_fraction": 0.691011236, "include": true, "reason": "import numpy", "num_tokens": 795}
|
// Copyright (C) 2001-2003
// William E. Kempf
// Copyright (C) 2007-8 Anthony Williams
// (C) Copyright 2011-2012 Vicente J. Botet Escriba
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include <boost/thread/detail/config.hpp>
#include <boost/thread/thread_only.hpp>
#if defined BOOST_THREAD_USES_DATETIME
#include <boost/thread/xtime.hpp>
#endif
#include <boost/thread/condition_variable.hpp>
#include <boost/thread/future.hpp>
#include <boost/thread/locks.hpp>
#include <boost/thread/once.hpp>
#include <boost/thread/tss.hpp>
#ifdef __GLIBC__
#include <sys/sysinfo.h>
#elif defined(__APPLE__) || defined(__FreeBSD__)
#include <sys/sysctl.h>
#include <sys/types.h>
#elif defined BOOST_HAS_UNISTD_H
#include <unistd.h>
#endif
#if defined(__VXWORKS__)
#include <vxCpuLib.h>
#endif
#include <string.h> // memcmp.
#include <boost/algorithm/string/split.hpp>
#include <boost/algorithm/string/trim.hpp>
#include <boost/lexical_cast.hpp>
#include <fstream>
#include <set>
#include <string>
#include <vector>
namespace boost
{
namespace detail
{
thread_data_base::~thread_data_base()
{
for (notify_list_t::iterator i = notify.begin(), e = notify.end(); i != e;
++i)
{
i->second->unlock();
i->first->notify_all();
}
//#ifndef BOOST_NO_EXCEPTIONS
for (async_states_t::iterator i = async_states_.begin(),
e = async_states_.end();
i != e; ++i)
{
(*i)->notify_deferred();
}
//#endif
}
struct thread_exit_callback_node
{
boost::detail::thread_exit_function_base* func;
thread_exit_callback_node* next;
thread_exit_callback_node(boost::detail::thread_exit_function_base* func_,
thread_exit_callback_node* next_) :
func(func_),
next(next_)
{
}
};
namespace
{
#ifdef BOOST_THREAD_PROVIDES_ONCE_CXX11
boost::once_flag current_thread_tls_init_flag;
#else
boost::once_flag current_thread_tls_init_flag = BOOST_ONCE_INIT;
#endif
pthread_key_t current_thread_tls_key;
extern "C" {
static void tls_destructor(void* data)
{
// boost::detail::thread_data_base*
// thread_info=static_cast<boost::detail::thread_data_base*>(data);
boost::detail::thread_data_ptr thread_info =
static_cast<boost::detail::thread_data_base*>(data)->shared_from_this();
if (thread_info)
{
while (!thread_info->tss_data.empty() ||
thread_info->thread_exit_callbacks)
{
while (thread_info->thread_exit_callbacks)
{
detail::thread_exit_callback_node* const current_node =
thread_info->thread_exit_callbacks;
thread_info->thread_exit_callbacks = current_node->next;
if (current_node->func)
{
(*current_node->func)();
delete current_node->func;
}
delete current_node;
}
while (!thread_info->tss_data.empty())
{
std::map<void const*, detail::tss_data_node>::iterator current =
thread_info->tss_data.begin();
if (current->second.func && (current->second.value != 0))
{
(*current->second.func)(current->second.value);
}
thread_info->tss_data.erase(current);
}
}
thread_info->self.reset();
}
}
}
#if defined BOOST_THREAD_PATCH
struct delete_current_thread_tls_key_on_dlclose_t
{
delete_current_thread_tls_key_on_dlclose_t()
{
}
~delete_current_thread_tls_key_on_dlclose_t()
{
const boost::once_flag uninitialized = BOOST_ONCE_INIT;
if (memcmp(¤t_thread_tls_init_flag, &uninitialized,
sizeof(boost::once_flag)))
{
void* data = pthread_getspecific(current_thread_tls_key);
if (data)
tls_destructor(data);
pthread_key_delete(current_thread_tls_key);
}
}
};
delete_current_thread_tls_key_on_dlclose_t
delete_current_thread_tls_key_on_dlclose;
#endif
void create_current_thread_tls_key()
{
BOOST_VERIFY(!pthread_key_create(¤t_thread_tls_key, &tls_destructor));
}
} // namespace
boost::detail::thread_data_base* get_current_thread_data()
{
boost::call_once(current_thread_tls_init_flag,
&create_current_thread_tls_key);
return (boost::detail::thread_data_base*)pthread_getspecific(
current_thread_tls_key);
}
void set_current_thread_data(detail::thread_data_base* new_data)
{
boost::call_once(current_thread_tls_init_flag,
create_current_thread_tls_key);
BOOST_VERIFY(!pthread_setspecific(current_thread_tls_key, new_data));
}
} // namespace detail
namespace
{
extern "C" {
static void* thread_proxy(void* param)
{
// boost::detail::thread_data_ptr thread_info =
// static_cast<boost::detail::thread_data_base*>(param)->self;
boost::detail::thread_data_ptr thread_info =
static_cast<boost::detail::thread_data_base*>(param)
->shared_from_this();
thread_info->self.reset();
detail::set_current_thread_data(thread_info.get());
#if defined BOOST_THREAD_PROVIDES_INTERRUPTIONS
BOOST_TRY
{
#endif
thread_info->run();
#if defined BOOST_THREAD_PROVIDES_INTERRUPTIONS
}
BOOST_CATCH(thread_interrupted const&)
{
}
// Removed as it stops the debugger identifying the cause of the exception
// Unhandled exceptions still cause the application to terminate
// BOOST_CATCH(...)
// {
// throw;
//
// std::terminate();
// }
BOOST_CATCH_END
#endif
detail::tls_destructor(thread_info.get());
detail::set_current_thread_data(0);
boost::lock_guard<boost::mutex> lock(thread_info->data_mutex);
thread_info->done = true;
thread_info->done_condition.notify_all();
return 0;
}
}
} // namespace
namespace detail
{
struct externally_launched_thread : detail::thread_data_base
{
externally_launched_thread()
{
#if defined BOOST_THREAD_PROVIDES_INTERRUPTIONS
interrupt_enabled = false;
#endif
}
~externally_launched_thread()
{
BOOST_ASSERT(notify.empty());
notify.clear();
//#ifndef BOOST_NO_EXCEPTIONS
BOOST_ASSERT(async_states_.empty());
async_states_.clear();
//#endif
}
void run()
{
}
void notify_all_at_thread_exit(condition_variable*, mutex*)
{
}
private:
externally_launched_thread(externally_launched_thread&);
void operator=(externally_launched_thread&);
};
thread_data_base* make_external_thread_data()
{
thread_data_base* const me(detail::heap_new<externally_launched_thread>());
me->self.reset(me);
set_current_thread_data(me);
return me;
}
thread_data_base* get_or_make_current_thread_data()
{
thread_data_base* current_thread_data(get_current_thread_data());
if (!current_thread_data)
{
current_thread_data = make_external_thread_data();
}
return current_thread_data;
}
} // namespace detail
thread::thread() BOOST_NOEXCEPT
{
}
bool thread::start_thread_noexcept()
{
thread_info->self = thread_info;
int const res = pthread_create(&thread_info->thread_handle, 0,
&thread_proxy, thread_info.get());
if (res != 0)
{
thread_info->self.reset();
return false;
}
return true;
}
bool thread::start_thread_noexcept(const attributes& attr)
{
thread_info->self = thread_info;
const attributes::native_handle_type* h = attr.native_handle();
int res = pthread_create(&thread_info->thread_handle, h, &thread_proxy,
thread_info.get());
if (res != 0)
{
thread_info->self.reset();
return false;
}
int detached_state;
res = pthread_attr_getdetachstate(h, &detached_state);
if (res != 0)
{
thread_info->self.reset();
return false;
}
if (PTHREAD_CREATE_DETACHED == detached_state)
{
detail::thread_data_ptr local_thread_info;
thread_info.swap(local_thread_info);
if (local_thread_info)
{
// lock_guard<mutex> lock(local_thread_info->data_mutex);
if (!local_thread_info->join_started)
{
// BOOST_VERIFY(!pthread_detach(local_thread_info->thread_handle));
local_thread_info->join_started = true;
local_thread_info->joined = true;
}
}
}
return true;
}
detail::thread_data_ptr thread::get_thread_info
BOOST_PREVENT_MACRO_SUBSTITUTION() const
{
return thread_info;
}
bool thread::join_noexcept()
{
detail::thread_data_ptr const local_thread_info = (get_thread_info)();
if (local_thread_info)
{
bool do_join = false;
{
unique_lock<mutex> lock(local_thread_info->data_mutex);
while (!local_thread_info->done)
{
local_thread_info->done_condition.wait(lock);
}
do_join = !local_thread_info->join_started;
if (do_join)
{
local_thread_info->join_started = true;
}
else
{
while (!local_thread_info->joined)
{
local_thread_info->done_condition.wait(lock);
}
}
}
if (do_join)
{
void* result = 0;
BOOST_VERIFY(
!pthread_join(local_thread_info->thread_handle, &result));
lock_guard<mutex> lock(local_thread_info->data_mutex);
local_thread_info->joined = true;
local_thread_info->done_condition.notify_all();
}
if (thread_info == local_thread_info)
{
thread_info.reset();
}
return true;
}
else
{
return false;
}
}
bool thread::do_try_join_until_noexcept(
detail::internal_platform_timepoint const& timeout, bool& res)
{
detail::thread_data_ptr const local_thread_info = (get_thread_info)();
if (local_thread_info)
{
bool do_join = false;
{
unique_lock<mutex> lock(local_thread_info->data_mutex);
while (!local_thread_info->done)
{
if (!local_thread_info->done_condition.do_wait_until(lock,
timeout))
break; // timeout occurred
}
if (!local_thread_info->done)
{
res = false;
return true;
}
do_join = !local_thread_info->join_started;
if (do_join)
{
local_thread_info->join_started = true;
}
else
{
while (!local_thread_info->joined)
{
local_thread_info->done_condition.wait(lock);
}
}
}
if (do_join)
{
void* result = 0;
BOOST_VERIFY(
!pthread_join(local_thread_info->thread_handle, &result));
lock_guard<mutex> lock(local_thread_info->data_mutex);
local_thread_info->joined = true;
local_thread_info->done_condition.notify_all();
}
if (thread_info == local_thread_info)
{
thread_info.reset();
}
res = true;
return true;
}
else
{
return false;
}
}
bool thread::joinable() const BOOST_NOEXCEPT
{
return (get_thread_info)() ? true : false;
}
void thread::detach()
{
detail::thread_data_ptr local_thread_info;
thread_info.swap(local_thread_info);
if (local_thread_info)
{
lock_guard<mutex> lock(local_thread_info->data_mutex);
if (!local_thread_info->join_started)
{
BOOST_VERIFY(!pthread_detach(local_thread_info->thread_handle));
local_thread_info->join_started = true;
local_thread_info->joined = true;
}
}
}
namespace this_thread
{
namespace no_interruption_point
{
namespace hidden
{
void BOOST_THREAD_DECL sleep_for_internal(const detail::platform_duration& ts)
{
if (ts > detail::platform_duration::zero())
{
// Use pthread_delay_np or nanosleep whenever possible here in the
// no_interruption_point namespace because they do not provide an
// interruption point.
#if defined(BOOST_HAS_PTHREAD_DELAY_NP)
#if defined(__IBMCPP__) || defined(_AIX)
BOOST_VERIFY(!pthread_delay_np(const_cast<timespec*>(&ts.getTs())));
#else
BOOST_VERIFY(!pthread_delay_np(&ts.getTs()));
#endif
#elif defined(BOOST_HAS_NANOSLEEP)
nanosleep(&ts.getTs(), 0);
#else
// This should never be reached due to BOOST_THREAD_SLEEP_FOR_IS_STEADY
#endif
}
}
} // namespace hidden
} // namespace no_interruption_point
void yield() BOOST_NOEXCEPT
{
#if defined(BOOST_HAS_SCHED_YIELD)
BOOST_VERIFY(!sched_yield());
#elif defined(BOOST_HAS_PTHREAD_YIELD)
BOOST_VERIFY(!pthread_yield());
//# elif defined BOOST_THREAD_USES_DATETIME
// ::boost::xtime xt;
// xtime_get(&xt, TIME_UTC_);
// sleep(xt);
// sleep_for(chrono::milliseconds(0));
#else
mutex mx;
unique_lock<mutex> lock(mx);
condition_variable cond;
cond.do_wait_until(lock, detail::internal_platform_clock::now());
#endif
}
} // namespace this_thread
unsigned thread::hardware_concurrency() BOOST_NOEXCEPT
{
#if defined(PTW32_VERSION) || defined(__hpux)
return pthread_num_processors_np();
#elif defined(__APPLE__) || defined(__FreeBSD__)
int count;
size_t size = sizeof(count);
return sysctlbyname("hw.ncpu", &count, &size, NULL, 0) ? 0 : count;
#elif defined(BOOST_HAS_UNISTD_H) && defined(_SC_NPROCESSORS_ONLN)
int const count = sysconf(_SC_NPROCESSORS_ONLN);
return (count > 0) ? count : 0;
#elif defined(__VXWORKS__)
cpuset_t set = ::vxCpuEnabledGet();
#ifdef __DCC__
int i;
for (i = 0; set; ++i)
{
set &= set - 1;
}
return (i);
#else
return (__builtin_popcount(set));
#endif
#elif defined(__GLIBC__)
return get_nprocs();
#else
return 0;
#endif
}
unsigned thread::physical_concurrency() BOOST_NOEXCEPT
{
#ifdef __linux__
try
{
using namespace std;
ifstream proc_cpuinfo("/proc/cpuinfo");
const string physical_id("physical id"), core_id("core id");
typedef std::pair<unsigned, unsigned>
core_entry; // [physical ID, core id]
std::set<core_entry> cores;
core_entry current_core_entry;
string line;
while (getline(proc_cpuinfo, line))
{
if (line.empty())
continue;
vector<string> key_val(2);
boost::split(key_val, line, boost::is_any_of(":"));
if (key_val.size() != 2)
return hardware_concurrency();
string key = key_val[0];
string value = key_val[1];
boost::trim(key);
boost::trim(value);
if (key == physical_id)
{
current_core_entry.first = boost::lexical_cast<unsigned>(value);
continue;
}
if (key == core_id)
{
current_core_entry.second =
boost::lexical_cast<unsigned>(value);
cores.insert(current_core_entry);
continue;
}
}
// Fall back to hardware_concurrency() in case
// /proc/cpuinfo is formatted differently than we expect.
return cores.size() != 0 ? cores.size() : hardware_concurrency();
}
catch (...)
{
return hardware_concurrency();
}
#elif defined(__APPLE__)
int count;
size_t size = sizeof(count);
return sysctlbyname("hw.physicalcpu", &count, &size, NULL, 0) ? 0 : count;
#else
return hardware_concurrency();
#endif
}
#if defined BOOST_THREAD_PROVIDES_INTERRUPTIONS
void thread::interrupt()
{
detail::thread_data_ptr const local_thread_info = (get_thread_info)();
if (local_thread_info)
{
lock_guard<mutex> lk(local_thread_info->data_mutex);
local_thread_info->interrupt_requested = true;
if (local_thread_info->current_cond)
{
boost::pthread::pthread_mutex_scoped_lock internal_lock(
local_thread_info->cond_mutex);
BOOST_VERIFY(
!pthread_cond_broadcast(local_thread_info->current_cond));
}
}
}
bool thread::interruption_requested() const BOOST_NOEXCEPT
{
detail::thread_data_ptr const local_thread_info = (get_thread_info)();
if (local_thread_info)
{
lock_guard<mutex> lk(local_thread_info->data_mutex);
return local_thread_info->interrupt_requested;
}
else
{
return false;
}
}
#endif
thread::native_handle_type thread::native_handle()
{
detail::thread_data_ptr const local_thread_info = (get_thread_info)();
if (local_thread_info)
{
lock_guard<mutex> lk(local_thread_info->data_mutex);
return local_thread_info->thread_handle;
}
else
{
return pthread_t();
}
}
#if defined BOOST_THREAD_PROVIDES_INTERRUPTIONS
namespace this_thread
{
void interruption_point()
{
#ifndef BOOST_NO_EXCEPTIONS
boost::detail::thread_data_base* const thread_info =
detail::get_current_thread_data();
if (thread_info && thread_info->interrupt_enabled)
{
lock_guard<mutex> lg(thread_info->data_mutex);
if (thread_info->interrupt_requested)
{
thread_info->interrupt_requested = false;
throw thread_interrupted();
}
}
#endif
}
bool interruption_enabled() BOOST_NOEXCEPT
{
boost::detail::thread_data_base* const thread_info =
detail::get_current_thread_data();
return thread_info && thread_info->interrupt_enabled;
}
bool interruption_requested() BOOST_NOEXCEPT
{
boost::detail::thread_data_base* const thread_info =
detail::get_current_thread_data();
if (!thread_info)
{
return false;
}
else
{
lock_guard<mutex> lg(thread_info->data_mutex);
return thread_info->interrupt_requested;
}
}
disable_interruption::disable_interruption() BOOST_NOEXCEPT
: interruption_was_enabled(interruption_enabled())
{
if (interruption_was_enabled)
{
detail::get_current_thread_data()->interrupt_enabled = false;
}
}
disable_interruption::~disable_interruption() BOOST_NOEXCEPT
{
if (detail::get_current_thread_data())
{
detail::get_current_thread_data()->interrupt_enabled =
interruption_was_enabled;
}
}
restore_interruption::restore_interruption(disable_interruption& d)
BOOST_NOEXCEPT
{
if (d.interruption_was_enabled)
{
detail::get_current_thread_data()->interrupt_enabled = true;
}
}
restore_interruption::~restore_interruption() BOOST_NOEXCEPT
{
if (detail::get_current_thread_data())
{
detail::get_current_thread_data()->interrupt_enabled = false;
}
}
} // namespace this_thread
#endif
namespace detail
{
void add_thread_exit_function(thread_exit_function_base* func)
{
detail::thread_data_base* const current_thread_data(
get_or_make_current_thread_data());
thread_exit_callback_node* const new_node =
heap_new<thread_exit_callback_node>(
func, current_thread_data->thread_exit_callbacks);
current_thread_data->thread_exit_callbacks = new_node;
}
tss_data_node* find_tss_data(void const* key)
{
detail::thread_data_base* const current_thread_data(
get_current_thread_data());
if (current_thread_data)
{
std::map<void const*, tss_data_node>::iterator current_node =
current_thread_data->tss_data.find(key);
if (current_node != current_thread_data->tss_data.end())
{
return ¤t_node->second;
}
}
return 0;
}
void* get_tss_data(void const* key)
{
if (tss_data_node* const current_node = find_tss_data(key))
{
return current_node->value;
}
return 0;
}
void add_new_tss_node(void const* key,
boost::shared_ptr<tss_cleanup_function> func,
void* tss_data)
{
detail::thread_data_base* const current_thread_data(
get_or_make_current_thread_data());
current_thread_data->tss_data.insert(
std::make_pair(key, tss_data_node(func, tss_data)));
}
void erase_tss_node(void const* key)
{
detail::thread_data_base* const current_thread_data(
get_current_thread_data());
if (current_thread_data)
{
current_thread_data->tss_data.erase(key);
}
}
void set_tss_data(void const* key, boost::shared_ptr<tss_cleanup_function> func,
void* tss_data, bool cleanup_existing)
{
if (tss_data_node* const current_node = find_tss_data(key))
{
if (cleanup_existing && current_node->func &&
(current_node->value != 0))
{
(*current_node->func)(current_node->value);
}
if (func || (tss_data != 0))
{
current_node->func = func;
current_node->value = tss_data;
}
else
{
erase_tss_node(key);
}
}
else if (func || (tss_data != 0))
{
add_new_tss_node(key, func, tss_data);
}
}
} // namespace detail
BOOST_THREAD_DECL void notify_all_at_thread_exit(condition_variable& cond,
unique_lock<mutex> lk)
{
detail::thread_data_base* const current_thread_data(
detail::get_current_thread_data());
if (current_thread_data)
{
current_thread_data->notify_all_at_thread_exit(&cond, lk.release());
}
}
//#ifndef BOOST_NO_EXCEPTIONS
namespace detail
{
void BOOST_THREAD_DECL
make_ready_at_thread_exit(shared_ptr<shared_state_base> as)
{
detail::thread_data_base* const current_thread_data(
detail::get_current_thread_data());
if (current_thread_data)
{
current_thread_data->make_ready_at_thread_exit(as);
}
}
} // namespace detail
//#endif
} // namespace boost
|
{"hexsha": "4ba36920d89a0c11961d75cab63020ead7c0e580", "size": 22640, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/src/debug/boost/1.69.0-r0/boost_1_69_0/libs/thread/src/pthread/thread.cpp", "max_stars_repo_name": "sotaoverride/backup", "max_stars_repo_head_hexsha": "ca53a10b72295387ef4948a9289cb78ab70bc449", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/src/debug/boost/1.69.0-r0/boost_1_69_0/libs/thread/src/pthread/thread.cpp", "max_issues_repo_name": "sotaoverride/backup", "max_issues_repo_head_hexsha": "ca53a10b72295387ef4948a9289cb78ab70bc449", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/src/debug/boost/1.69.0-r0/boost_1_69_0/libs/thread/src/pthread/thread.cpp", "max_forks_repo_name": "sotaoverride/backup", "max_forks_repo_head_hexsha": "ca53a10b72295387ef4948a9289cb78ab70bc449", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0167064439, "max_line_length": 83, "alphanum_fraction": 0.6304770318, "num_tokens": 4992}
|
import os
import shutil
import time
import numpy as np
import matplotlib.pyplot as plt
import SimpleITK as sitk
input_path = None # ANHIR data path
output_path = None # Output path
original = "ANHIR_Data" # assumes that the last folder is names "ANHIR_Data", otherwise replace
to_replace = "ANHIR_MHA" # assumes that the last folder of the outputdata is "ANHIR_MHA", otherwise replace
def run():
for root, dirs, files in os.walk(input_path):
for file in files:
if ".jpg" in file.lower() or ".png" in file.lower():
input_file_path = os.path.join(root, file)
if ".jpg" in file.lower():
output_file_path = os.path.join(root.replace(original, to_replace), file.replace(".jpg", ".mha"))
elif ".png" in file.lower():
output_file_path = os.path.join(root.replace(original, to_replace), file.replace(".png", ".mha"))
if not os.path.isdir(os.path.dirname(output_file_path)):
os.makedirs(os.path.dirname(output_file_path))
print("Current input path:" , input_file_path)
print("Current output path: ", output_file_path)
b_t = time.time()
image_jpg = sitk.ReadImage(input_file_path)
e_t = time.time()
print("JPG loading time: ", e_t - b_t)
sitk.WriteImage(image_jpg, output_file_path)
print("Done")
print()
elif ".csv" in file.lower():
input_file_path = os.path.join(root, file)
output_file_path = os.path.join(root.replace(original, to_replace), file)
print("Current input path:" , input_file_path)
print("Current output path: ", output_file_path)
if not os.path.isdir(os.path.dirname(output_file_path)):
os.makedirs(os.path.dirname(output_file_path))
shutil.copy(input_file_path, output_file_path)
print("Done")
print()
if __name__ == "__main__":
run()
|
{"hexsha": "8f8addd4bfbd69d9b01c1c2218f8c9e25be7ecc8", "size": 2111, "ext": "py", "lang": "Python", "max_stars_repo_path": "parse_to_mha.py", "max_stars_repo_name": "lNefarin/DeepHistReg", "max_stars_repo_head_hexsha": "563dd606899b58e9d220133938d25fd293da15d0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-04-03T11:19:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-05T13:16:52.000Z", "max_issues_repo_path": "parse_to_mha.py", "max_issues_repo_name": "lNefarin/DeepHistReg", "max_issues_repo_head_hexsha": "563dd606899b58e9d220133938d25fd293da15d0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-10-14T08:03:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-30T03:06:58.000Z", "max_forks_repo_path": "parse_to_mha.py", "max_forks_repo_name": "MWod/DeepHistReg", "max_forks_repo_head_hexsha": "563dd606899b58e9d220133938d25fd293da15d0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-04-16T01:53:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-15T23:45:50.000Z", "avg_line_length": 39.0925925926, "max_line_length": 117, "alphanum_fraction": 0.5878730459, "include": true, "reason": "import numpy", "num_tokens": 441}
|
import game.world8.level7 -- hide
namespace mynat -- hide
/-
# Advanced Addition World
## Level 8: `eq_zero_of_add_right_eq_self`
The lemma you're about to prove will be useful when we want to prove that $\leq$ is antisymmetric.
There are some wrong paths that you can take with this one.
-/
/- Lemma
If $a$ and $b$ are natural numbers such that
$$ a + b = a, $$
then $b = 0$.
-/
lemma eq_zero_of_add_right_eq_self {a b : mynat} : a + b = a → b = 0 :=
begin [nat_num_game]
intro h,
apply add_left_cancel a,
rw h,
rw add_zero,
refl,
end
end mynat -- hide
|
{"author": "ImperialCollegeLondon", "repo": "natural_number_game", "sha": "f29b6c2884299fc63fdfc81ae5d7daaa3219f9fd", "save_path": "github-repos/lean/ImperialCollegeLondon-natural_number_game", "path": "github-repos/lean/ImperialCollegeLondon-natural_number_game/natural_number_game-f29b6c2884299fc63fdfc81ae5d7daaa3219f9fd/src/game/world8/level8.lean"}
|
import torch
import torch.nn as nn
from torch.autograd import Function, Variable
import numpy as np
class GRL(Function):
def __init__(self, beta=1):
self.beta = beta
def forward(self, x):
return x.view_as(x)
def backward(self, grad_output):
output = grad_output*(-1)*self.beta
return output
def grad_reverse(x, beta=1):
return GRL(beta)(x)
class domain_img_cls(nn.Module):
def __init__(self, net):
super(domain_img_cls, self).__init__()
if net=="res101":
in_channels = 1024
else:
in_channels = 512
self.conv_1= nn.Conv2d(in_channels=in_channels, out_channels=512, kernel_size=1, padding=0, stride=1)
self.relu = nn.ReLU()
self.conv_2 = nn.Conv2d(in_channels=512, out_channels=1, kernel_size=1, padding=0, stride=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x, beta=1):
x = grad_reverse(x, beta)
x = self.conv_1(x)
x = self.relu(x)
x = self.conv_2(x)
x = self.sigmoid(x)
x = x.view(-1)
return x
class domain_inst_cls(nn.Module):
def __init__(self, net):
super(domain_inst_cls, self).__init__()
if net=="res101":
in_channels = 2048
else:
in_channels = 4096
self.fc_1 = nn.Linear(in_channels, 1024)
self.fc_2 = nn.Linear(1024, 1024)
self.fc_3 = nn.Linear(1024, 1)
self.relu = nn.ReLU()
self.dropout = nn.Dropout()
self.sigmoid = nn.Sigmoid()
def forward(self, x, beta=1):
x = grad_reverse(x, beta)
x = self.fc_1(x)
x = self.relu(x)
x = self.dropout(x)
x = self.fc_2(x)
x = self.relu(x)
x = self.dropout(x)
x = self.fc_3(x)
x = self.sigmoid(x)
x = x.view(-1)
return x
def domain_loss(logits, labels):
#print('domain loss:', logits.size())
if labels==0:
# For source
labels = torch.from_numpy(np.zeros(list(logits.size())[0])).float().cuda()
else:
# For target
labels = torch.from_numpy(np.ones(list(logits.size())[0])).float().cuda()
loss = nn.BCELoss()
return loss(logits, labels)
def consistency_loss(source_logits, target_logits):
target = torch.from_numpy(np.zeros(list(target_logits.size())[0])).float().cuda()
source_logits = torch.sum(source_logits)/list(source_logits.size())[0]
source_logits = torch.ones(target_logits.size()).cuda() * source_logits
#source_logits = source_logits.view(1, list(source_logits.size())[0])
#target_logits = target_logits.view(1, list(target_logits.size())[0])
loss = nn.L1Loss()
return loss(source_logits - target_logits, target)
|
{"hexsha": "b3b46277ccbce2d7bfeb0527c1e863d502da71e5", "size": 2404, "ext": "py", "lang": "Python", "max_stars_repo_path": "domain_adapt.py", "max_stars_repo_name": "Flsahkong/seeDiffDA", "max_stars_repo_head_hexsha": "8c5219b1eb0edb69f24cff03dbbd1a66bdd6cc42", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 62, "max_stars_repo_stars_event_min_datetime": "2018-10-27T02:44:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T12:58:52.000Z", "max_issues_repo_path": "domain_adapt.py", "max_issues_repo_name": "Flsahkong/seeDiffDA", "max_issues_repo_head_hexsha": "8c5219b1eb0edb69f24cff03dbbd1a66bdd6cc42", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2018-11-16T11:22:40.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-07T06:08:10.000Z", "max_forks_repo_path": "domain_adapt.py", "max_forks_repo_name": "Flsahkong/seeDiffDA", "max_forks_repo_head_hexsha": "8c5219b1eb0edb69f24cff03dbbd1a66bdd6cc42", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 24, "max_forks_repo_forks_event_min_datetime": "2018-10-27T02:44:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-12T08:49:17.000Z", "avg_line_length": 25.0416666667, "max_line_length": 103, "alphanum_fraction": 0.6888519135, "include": true, "reason": "import numpy", "num_tokens": 697}
|
import datetime
import networkx as nx
import numpy as np
import bisect, pickle
import random, argparse
import community
def sample_discrete(dist):
# sample a discrete distribution dist with values = dist.keys() and
# probabilities = dist.values()
i = 0
acc = 0
values = {}
probs = []
for e in dist:
values[i] = e
acc += dist[e]
probs.append(acc)
i += 1
rand = random.random()
pos = bisect.bisect(probs, rand)
return values[pos]
def get_parameters(G, method="sbm"):
part = community.best_partition(G)
M = {}
for e in G.edges():
r = part[e[0]]
s = part[e[1]]
el = tuple(sorted([r, s]))
M[el] = M.get(el, 0) + 1
g = {}
for k, v in part.items():
g[v] = g.get(v, []) + [k]
k = G.degree()
K = {}
for c in g:
K[c] = sum([k[i] for i in g[c]])
if method != "sbm":
t = dict(k)
for e in t:
if t[e] != 0:
t[e] = float(t[e])/K[part[e]]
else:
t = part.copy()
for c in g:
node_list = g[c]
prob = 1./len(node_list)
for n in node_list:
t[n] = prob
return (t, M, g)
def generate_from_parameters(t, w, g):
G = nx.Graph()
for i in g:
G.add_nodes_from(g[i])
# generate num of edges
M = w.copy()
for c in M:
M[c] = np.random.poisson(M[c])
# assign edges to vertices
edges = []
for c in M:
r = c[0]
s = c[1]
for i in range(M[c]):
n1 = sample_discrete({j: t[j] for j in g[r]})
n2 = sample_discrete({j: t[j] for j in g[s]})
edges.append((n1, n2))
G.add_edges_from(edges)
return G
def generate(G, method, repeat=1):
t, w, g = get_parameters(G, method)
return [generate_from_parameters(t, w, g) for i in range(repeat)]
|
{"hexsha": "46b9d86650b3f54484402afcbf772342398fcbac", "size": 1910, "ext": "py", "lang": "Python", "max_stars_repo_path": "GraphGenerator/models/sbm.py", "max_stars_repo_name": "xiangsheng1325/GraphGenerator", "max_stars_repo_head_hexsha": "0164c7c1ba14fface015425a619053585f471ef3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2021-03-12T08:33:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-25T06:31:29.000Z", "max_issues_repo_path": "GraphGenerator/models/sbm.py", "max_issues_repo_name": "xiangsheng1325/GraphGenerator", "max_issues_repo_head_hexsha": "0164c7c1ba14fface015425a619053585f471ef3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "GraphGenerator/models/sbm.py", "max_forks_repo_name": "xiangsheng1325/GraphGenerator", "max_forks_repo_head_hexsha": "0164c7c1ba14fface015425a619053585f471ef3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-12T15:53:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-12T15:53:40.000Z", "avg_line_length": 21.2222222222, "max_line_length": 71, "alphanum_fraction": 0.5015706806, "include": true, "reason": "import numpy,import networkx", "num_tokens": 562}
|
#include <ros/ros.h>
#include <actionlib/client/simple_action_client.h>
#include <actionlib/client/terminal_state.h>
#include <actionlib_tutorials/AveragingAction.h>
#include <robot_arm_aansturing/positionAction.h>
#include <boost/thread.hpp>
void spinThread()
{
ros::spin();
}
int main (int argc, char **argv)
{
ros::init(argc, argv, "test_low_interface");
actionlib::SimpleActionClient<robot_arm_aansturing::positionAction> ac("low_interface");
boost::thread spin_thread(&spinThread);
ROS_INFO("Waiting for action server to start.");
ac.waitForServer();
ROS_INFO("Action server started, sending goal.");
// send a goal to the action
robot_arm_aansturing::positionGoal goal;
goal.time = 10000;
goal.angles = {0,-30,100,0,1,0};
//goal.time = 4000;
// goal.angles = {45,45,0,0,0,0};
//4000
//{0,-30,100,0,1,0};
ac.sendGoal(goal);
//wait for the action to return
//bool finished_before_timeout = ac.waitForResult(ros::Duration(30.0));
bool finished_before_timeout = ac.waitForResult(ros::Duration(2.0));
if (finished_before_timeout)
{
actionlib::SimpleClientGoalState state = ac.getState();
ROS_INFO("Action finished: %s",state.toString().c_str());
}
else
{
ROS_INFO("Action did not finish before the time out.");
//ac.cancelAllGoals();
ac.cancelGoal();
}
// shutdown the node and join the thread back before exiting
ros::shutdown();
spin_thread.join();
//exit
return 0;
}
|
{"hexsha": "ca622d07475e0e30ce110bbccb3ec6c7f03503b9", "size": 1459, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "application/src/robot_arm_aansturing/src/Client.cpp", "max_stars_repo_name": "nvg-ict/robot_simulation", "max_stars_repo_head_hexsha": "0d98b21f2c6805a3061c82ef984272baa3343a77", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "application/src/robot_arm_aansturing/src/Client.cpp", "max_issues_repo_name": "nvg-ict/robot_simulation", "max_issues_repo_head_hexsha": "0d98b21f2c6805a3061c82ef984272baa3343a77", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "application/src/robot_arm_aansturing/src/Client.cpp", "max_forks_repo_name": "nvg-ict/robot_simulation", "max_forks_repo_head_hexsha": "0d98b21f2c6805a3061c82ef984272baa3343a77", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.5964912281, "max_line_length": 90, "alphanum_fraction": 0.7011651816, "num_tokens": 399}
|
#pragma once
#include <iostream>
#include <memory>
#include <unordered_set>
#include <unordered_map>
#include <boost/serialization/set.hpp>
#include <sdm/types.hpp>
#include <sdm/tools.hpp>
#include <sdm/public/boost_serializable.hpp>
namespace sdm
{
/**
* @class GraphNode
*
* @brief Node of graphs.
*
* GraphNode class is provide to give the user the possibility to transit directly on them.
* In fact, the class keep all the successors of a node in its attribute.
*
* @tparam TNode the type of the data contains in each node
* @tparam TEdge the type of the edges between two nodes
*
*/
template <typename TNode, typename TEdge>
class GraphNode : public std::enable_shared_from_this<GraphNode<TNode, TEdge>>,
public BoostSerializable<GraphNode<TNode, TEdge>>
{
public:
/**
* @brief Default constructor object
*
*/
GraphNode();
/**
* @brief Construct a graph with an initial node*
*
* @param data
*/
GraphNode(const TNode &data);
/**
* @fn ~GraphNode()
* @brief Destructor of GraphNode.
*
*/
virtual ~GraphNode();
/**
* @brief Get the value of the current node
*
* @return the address of the value
*/
TNode getData() const;
TNode &&data() const;
void setData(const TNode &data);
/**
* @brief Get the number of successors.
*/
number getNumSuccessors() const;
/**
* @brief Get the successor following a given edge
*
* @param edge a specific edge
* @return the address of the successor's node
*/
std::shared_ptr<GraphNode> getSuccessor(const TEdge &edge) const;
/**
* @brief Add a successor node.
*
* @param edge the edge
* @param node the successor node value
*/
void addSuccessor(const TEdge &edge_value, const std::shared_ptr<GraphNode> &node_value);
std::string str() const;
std::shared_ptr<GraphNode> getptr();
template <class Archive>
void serialize(Archive &archive, const unsigned int);
friend std::ostream &operator<<(std::ostream &os, GraphNode &graph)
{
os << graph.str();
return os;
}
public:
/** @brief data of the current node */
TNode data_;
/** @brief The map from edge value to successor */
std::unordered_map<TEdge, std::weak_ptr<GraphNode>> successors;
};
} // namespace sdm
#include <sdm/utils/struct/graph_node.tpp>
|
{"hexsha": "c59a10aef715141358dc2704ee4fa5c4a7226f9f", "size": 2768, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/sdm/utils/struct/graph_node.hpp", "max_stars_repo_name": "SDMStudio/sdms", "max_stars_repo_head_hexsha": "43a86973081ffd86c091aed69b332f0087f59361", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/sdm/utils/struct/graph_node.hpp", "max_issues_repo_name": "SDMStudio/sdms", "max_issues_repo_head_hexsha": "43a86973081ffd86c091aed69b332f0087f59361", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/sdm/utils/struct/graph_node.hpp", "max_forks_repo_name": "SDMStudio/sdms", "max_forks_repo_head_hexsha": "43a86973081ffd86c091aed69b332f0087f59361", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6296296296, "max_line_length": 97, "alphanum_fraction": 0.5625, "num_tokens": 625}
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Waterwall section model test
main equations:
* Heat is given by fire-side boiler model
* Calculate pressure change due to friction and gravity
* Calculate slag layer wall temperature
* Consider a layer of metal and a layer of slag
Created on Thu Aug 24 2020 by Boiler Team (J. Ma, M. Zamarripa)
"""
import pytest
# Import Pyomo libraries
import pyomo.environ as pyo
from pyomo.network import Arc
# Import IDAES core
from idaes.core import FlowsheetBlock
from idaes.core.util.model_statistics import degrees_of_freedom
# Import Unit Model Modules
from idaes.models.properties import iapws95
from idaes.models_extra.power_generation.unit_models.waterwall_section import (
WaterwallSection,
)
from idaes.core.solvers import get_solver
# -----------------------------------------------------------------------------
# Get default solver for testing
solver = get_solver()
# -----------------------------------------------------------------------------
@pytest.fixture(scope="module")
def model():
m = pyo.ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.prop_water = iapws95.Iapws95ParameterBlock()
n_waterwalls = 10
m.fs.ww_zones = pyo.RangeSet(n_waterwalls)
m.fs.Waterwalls = WaterwallSection(
m.fs.ww_zones,
default={
"dynamic": False,
"has_holdup": False,
"property_package": m.fs.prop_water,
"has_heat_transfer": True,
"has_pressure_change": True,
},
)
def arc_rule(b, i):
return {
"source": m.fs.Waterwalls[i].outlet,
"destination": m.fs.Waterwalls[i + 1].inlet,
}
m.arc = Arc(pyo.RangeSet(n_waterwalls - 1), rule=arc_rule)
# Pyomo expands arcs writing constraints outlet unit 1 = inlet unit 2
pyo.TransformationFactory("network.expand_arcs").apply_to(m)
return m
@pytest.mark.unit
def test_basic_build(model):
"""Make a turbine model and make sure it doesn't throw exception"""
assert degrees_of_freedom(model) == 103
# Check unit config arguments
assert len(model.fs.Waterwalls[1].config) == 10
assert model.fs.Waterwalls[1].config.has_heat_transfer
assert model.fs.Waterwalls[1].config.has_pressure_change
assert model.fs.Waterwalls[1].config.property_package is model.fs.prop_water
# @pytest.mark.integration
# def test_units(model):
# assert_units_consistent(model)
@pytest.mark.skipif(not iapws95.iapws95_available(), reason="IAPWS not available")
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialize_waterwall(model):
# fix inputs
# 10 waterwall sections
for i in model.fs.ww_zones:
model.fs.Waterwalls[i].tube_diameter.fix(0.047)
model.fs.Waterwalls[i].tube_thickness.fix(0.00350)
model.fs.Waterwalls[i].fin_thickness.fix(0.00455)
model.fs.Waterwalls[i].slag_thickness[:].fix(0.001)
model.fs.Waterwalls[i].fin_length.fix(0.0115)
model.fs.Waterwalls[i].number_tubes.fix(610)
model.fs.Waterwalls[i].fcorrection_dp.fix(1.2)
# water wall section height (must be equal to the fire side model zones)
model.fs.Waterwalls[1].height.fix(6.150)
model.fs.Waterwalls[2].height.fix(3.150)
model.fs.Waterwalls[3].height.fix(1.5)
model.fs.Waterwalls[4].height.fix(1.450)
model.fs.Waterwalls[5].height.fix(1.350)
model.fs.Waterwalls[6].height.fix(1.250)
model.fs.Waterwalls[7].height.fix(1.150)
model.fs.Waterwalls[8].height.fix(1.350)
model.fs.Waterwalls[9].height.fix(3.250)
model.fs.Waterwalls[10].height.fix(3.450)
# water wall section projected area
model.fs.Waterwalls[1].projected_area.fix(320.0)
model.fs.Waterwalls[2].projected_area.fix(150.3)
model.fs.Waterwalls[3].projected_area.fix(70.8)
model.fs.Waterwalls[4].projected_area.fix(70.0)
model.fs.Waterwalls[5].projected_area.fix(58.6)
model.fs.Waterwalls[6].projected_area.fix(58.6)
model.fs.Waterwalls[7].projected_area.fix(50.1)
model.fs.Waterwalls[8].projected_area.fix(65.6)
model.fs.Waterwalls[9].projected_area.fix(145.6)
model.fs.Waterwalls[10].projected_area.fix(165.5)
# Heat loss to waterwall Q in W
model.fs.Waterwalls[1].heat_fireside[:].fix(2.3e7)
model.fs.Waterwalls[2].heat_fireside[:].fix(1.5e7)
model.fs.Waterwalls[3].heat_fireside[:].fix(6.9e6)
model.fs.Waterwalls[4].heat_fireside[:].fix(1.2e7)
model.fs.Waterwalls[5].heat_fireside[:].fix(1.2e7)
model.fs.Waterwalls[6].heat_fireside[:].fix(1.2e7)
model.fs.Waterwalls[7].heat_fireside[:].fix(1.1e7)
model.fs.Waterwalls[8].heat_fireside[:].fix(9.9e6)
model.fs.Waterwalls[9].heat_fireside[:].fix(2.2e7)
model.fs.Waterwalls[10].heat_fireside[:].fix(1.9e7)
optarg = {"tol": 1e-7, "linear_solver": "ma27", "max_iter": 40}
solver.options = optarg
# Set inlet and operating conditions, and some initial conditions.
model.fs.Waterwalls[1].inlet.flow_mol[0].fix(150055.0) # mol/s
model.fs.Waterwalls[1].inlet.enth_mol[0].fix(31000.0) # J/mol
model.fs.Waterwalls[1].inlet.pressure[0].fix(1.750e7) # Pa
model.fs.Waterwalls[1].initialize(
state_args={
"flow_mol": model.fs.Waterwalls[1].inlet.flow_mol[0].value,
"pressure": model.fs.Waterwalls[1].inlet.pressure[0].value,
"enth_mol": model.fs.Waterwalls[1].inlet.enth_mol[0].value,
},
optarg=optarg,
)
for i in range(2, 11):
model.fs.Waterwalls[i].initialize(
state_args={
"flow_mol": model.fs.Waterwalls[i - 1].outlet.flow_mol[0].value,
"pressure": model.fs.Waterwalls[i - 1].outlet.pressure[0].value,
"enth_mol": model.fs.Waterwalls[i - 1].outlet.enth_mol[0].value,
},
optarg=optarg,
)
assert degrees_of_freedom(model) == 0 # only Waterwalls[1] input is fixed
@pytest.mark.skipif(not iapws95.iapws95_available(), reason="IAPWS not available")
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_waterwall(model):
results = solver.solve(model, tee=True)
# test energy balance
heat_duty = sum(
pyo.value(model.fs.Waterwalls[i].heat_duty[0]) for i in range(1, 11)
)
Fhin = (
model.fs.Waterwalls[1].control_volume.properties_in[0].flow_mol
* model.fs.Waterwalls[1].control_volume.properties_in[0].enth_mol
)
Fhout = (
model.fs.Waterwalls[10].control_volume.properties_out[0].flow_mol
* model.fs.Waterwalls[10].control_volume.properties_out[0].enth_mol
)
assert pytest.approx(heat_duty, abs=1e-3) == pyo.value(Fhout - Fhin)
assert pytest.approx(0.08353, abs=1e-3) == pyo.value(
model.fs.Waterwalls[10].control_volume.properties_out[0].vapor_frac
)
assert pytest.approx(150055.0, abs=1e-3) == pyo.value(
model.fs.Waterwalls[10].control_volume.properties_out[0].flow_mol
)
# test mass conservation
assert pytest.approx(0, abs=1e-3) == pyo.value(
model.fs.Waterwalls[10].control_volume.properties_out[0].flow_mol
- model.fs.Waterwalls[1].control_volume.properties_in[0].flow_mol
)
assert pytest.approx(31951.65106, abs=1e-3) == pyo.value(
model.fs.Waterwalls[10].control_volume.properties_out[0].enth_mol
)
assert degrees_of_freedom(model) == 0
# Check for optimal solution
assert pyo.check_optimal_termination(results)
|
{"hexsha": "ed3f8e20dc90bb630e99062f375acb4faeb543af", "size": 8288, "ext": "py", "lang": "Python", "max_stars_repo_path": "idaes/models_extra/power_generation/unit_models/tests/test_waterwall.py", "max_stars_repo_name": "OOAmusat/idaes-pse", "max_stars_repo_head_hexsha": "ae7d3bb8e372bc32822dcdcb75e9fd96b78da539", "max_stars_repo_licenses": ["RSA-MD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "idaes/models_extra/power_generation/unit_models/tests/test_waterwall.py", "max_issues_repo_name": "OOAmusat/idaes-pse", "max_issues_repo_head_hexsha": "ae7d3bb8e372bc32822dcdcb75e9fd96b78da539", "max_issues_repo_licenses": ["RSA-MD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "idaes/models_extra/power_generation/unit_models/tests/test_waterwall.py", "max_forks_repo_name": "OOAmusat/idaes-pse", "max_forks_repo_head_hexsha": "ae7d3bb8e372bc32822dcdcb75e9fd96b78da539", "max_forks_repo_licenses": ["RSA-MD"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-17T11:08:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T11:08:43.000Z", "avg_line_length": 38.3703703704, "max_line_length": 82, "alphanum_fraction": 0.6684362934, "include": true, "reason": "import pyomo,from pyomo", "num_tokens": 2314}
|
subroutine cal_parm_read
!! ~ ~ ~ PURPOSE ~ ~ ~
!! this function computes new paramter value based on
!! user defined change
use input_file_module
use maximum_data_module
use calibration_data_module
implicit none
integer, dimension (:), allocatable :: elem_cnt ! |
character (len=80) :: titldum ! |title of file
character (len=80) :: header ! |header of file
integer :: eof ! |end of file
integer :: imax ! |determine max number for array (imax) and total number in file
integer :: mchg_par ! |
logical :: i_exist ! |check to determine if file exists
integer :: i !none |counter
imax = 0
mchg_par = 0
!!read parameter change values for calibration
inquire (file=in_chg%cal_parms, exist=i_exist)
if (.not. i_exist .or. in_chg%cal_parms == "null") then
allocate (cal_parms(0:0))
else
do
open (107,file=in_chg%cal_parms)
read (107,*,iostat=eof) titldum
if (eof < 0) exit
read (107,*,iostat=eof) mchg_par
if (eof < 0) exit
allocate (cal_parms(mchg_par))
read (107,*,iostat=eof) header
if (eof < 0) exit
do i = 1, mchg_par
read (107,*,iostat=eof) cal_parms(i)
if (eof < 0) exit
end do
exit
end do
end if
db_mx%cal_parms = mchg_par
return
end subroutine cal_parm_read
|
{"hexsha": "18984e5733eaee0604c08568dec3175e61d00478", "size": 1834, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "tests/data/program_analysis/multifile_multimod/mfmm_02/cal_parm_read.f90", "max_stars_repo_name": "mikiec84/delphi", "max_stars_repo_head_hexsha": "2e517f21e76e334c7dfb14325d25879ddf26d10d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2018-03-03T11:57:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-16T21:19:54.000Z", "max_issues_repo_path": "tests/data/program_analysis/multifile_multimod/mfmm_02/cal_parm_read.f90", "max_issues_repo_name": "mikiec84/delphi", "max_issues_repo_head_hexsha": "2e517f21e76e334c7dfb14325d25879ddf26d10d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 385, "max_issues_repo_issues_event_min_datetime": "2018-02-21T16:52:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-17T07:44:56.000Z", "max_forks_repo_path": "tests/data/program_analysis/multifile_multimod/mfmm_02/cal_parm_read.f90", "max_forks_repo_name": "mikiec84/delphi", "max_forks_repo_head_hexsha": "2e517f21e76e334c7dfb14325d25879ddf26d10d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2018-03-20T01:08:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-29T01:04:49.000Z", "avg_line_length": 34.6037735849, "max_line_length": 132, "alphanum_fraction": 0.4629225736, "num_tokens": 425}
|
import argparse
import random
import numpy as np
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
from torch import optim
from torch.utils.data import Dataset, DataLoader
import dataLoader as loader
import preprocessing as pproc
import models
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def argparser():
p = argparse.ArgumentParser()
p.add_argument('--filename',
default='Posts.xml')
p.add_argument('--clean_drop',
default=False,
help='Drop if either title or body column is NaN')
p.add_argument('--epochs',
type=int,
default=7,
help='Number of epochs to train. Default=7')
p.add_argument('--batch_size',
type=int,
default=2,
help='Mini batch size for gradient descent. Default=2')
p.add_argument('--learning_rate',
type=float,
default=.001,
help='Learning rate. Default=.001')
p.add_argument('--hidden_size',
type=int,
default=64,
help='Hidden size of LSTM. Default=64')
p.add_argument('--n_layers',
type=int,
default=1,
help='Number of layers. Default=1')
p.add_argument('--dropout_p',
type=float,
default=.1,
help='Dropout ratio. Default=.1')
config = p.parse_args()
return config
class QuestionAnswerDataset(Dataset):
# If the data that comes out of the pytorch dataset is unpadded (if samples are of
# different lengths), then pytorch dataloader returns a python list instead of
# pytorch tensor with samples truncated to minimum length of the sample in the batch.
def __init__(self, input, tokenizer, maxlen=32, negative_sampling=True):
self.input = input[input.posttypeid==1]
self.tokenizer = tokenizer
self.maxlen = maxlen
self.questions = np.array([self.indexesFromSentences(sentences) for sentences in self.input.title])
self.answers = np.array([self.indexesFromSentences(sentences) for sentences in self.input.body])
self.labels = np.ones(len(self.questions))
if negative_sampling:
self.n_questions, self.n_answers = self.negativeSampling(questions=self.questions,
answers=self.answers)
self.n_labels = np.zeros(len(self.n_questions))
self.questions = np.concatenate((self.questions, self.n_questions))
self.answers = np.concatenate((self.answers, self.n_answers))
self.labels = np.concatenate((self.labels, self.n_labels))
self.questions_len = np.array([np.count_nonzero(q) for q in self.questions])
self.answers_len = np.array([np.count_nonzero(a) for a in self.answers])
self.labels = self.labels.reshape(-1, 1)
def __getitem__(self, idx):
# |self.questions|, |self.answers| = (n_samples, maxlen)
# |self.questions_len|, |self.answers_len| = (n_samples, 1)
# |self.labels| = (n_samples, 1)
return self.questions[idx], self.answers[idx], self.questions_len[idx], self.answers_len[idx], self.labels[idx]
def __len__(self):
return len(self.questions)
def indexesFromSentences(self, sentences):
indexes = []
for sentence in sentences.splitlines():
sentence = tokenizer.normalizeString(sentence)
indexes += [self.tokenizer.word2index[word] for word in sentence.split(' ')]
padded_indexes = self.padSequences(indexes) # padding
return padded_indexes
def padSequences(self, indexes):
padded = np.zeros((self.maxlen,), dtype=np.int64)
if len(indexes) > self.maxlen:
padded = indexes[:self.maxlen]
else:
padded[:len(indexes)] = indexes
return padded
def negativeSampling(self, questions, answers):
indexes = list(range(len(questions)))
random.shuffle(indexes)
negative_questions = [questions[i] for i in indexes]
random.shuffle(indexes)
negative_answers = [answers[i] for i in indexes]
return np.array(negative_questions), np.array(negative_answers)
def sort_by_len(sequences, sequence_length):
sequence_length, si = sequence_length.sort(0, descending=True)
return sequences[si], sequence_length
def train_model(epoch):
model.train()
losses, accs = 0, 0
for i, data in enumerate(train_loader, 0):
optimizer.zero_grad()
qus, ans, qus_len, ans_len, labels = data
qus, ans, labels = qus.to(device), ans.to(device), labels.to(device=device, dtype=torch.float32)
# |qus|, |ans| = (batch_size, maxlen)
# |qus_len|, |ans_len| = (batch_size)
# |labels| = (batch_size, 1)
# sort by sequence length in descending order
qus, qus_len = sort_by_len(qus, qus_len)
ans, ans_len = sort_by_len(ans, ans_len)
# get loss
output = model(qus, ans, qus_len, ans_len)
loss = criterion(output, labels)
losses += loss.item()
# |output| = (batch_size, 1)
# |loss| = (1)
# get accuracy
acc = (torch.round(output) == labels).sum().item()/len(qus)
accs += acc
loss.backward()
optimizer.step()
# if i % 300 == 0:
# print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAccuracy: {:.2f}%'.format(
# epoch, i * len(qus), len(train_loader.dataset), 100. * i / len(train_loader), loss.item(), 100*acc))
print('====> Train Epoch: {} Average loss: {:.4f}\tAverage accuracy: {:.2f}%'.format(
epoch, losses / len(train_loader), 100*accs/len(train_loader)))
def test_model():
model.eval()
losses, accs = 0, 0
with torch.no_grad():
for i, data in enumerate(test_loader, 0):
qus, ans, qus_len, ans_len, labels = data
qus, ans, labels = qus.to(device), ans.to(device), labels.to(device=device, dtype=torch.float32)
# sort by sequence length in descending order
qus, qus_len = sort_by_len(qus, qus_len)
ans, ans_len = sort_by_len(ans, ans_len)
# get loss
output = model(qus, ans, qus_len, ans_len)
loss = criterion(output, labels)
losses += loss.item()
# get accuracy
acc = (torch.round(output) == labels).sum().item()/len(qus)
accs += acc
print('====> Test Epoch: {} Average loss: {:.4f}\tAverage accuracy: {:.2f}%\n'.format(
epoch, losses / len(test_loader), 100*accs/len(test_loader)))
if __name__=='__main__':
config = argparser()
# data load
data = loader.to_dataframe('data/'+config.filename)
# preprocessing
data, word_emb_matirx, tfidf_matrix, tokenizer = pproc.preprocessing(input = data,
clean_drop = config.clean_drop)
# |data| = (n_pairs, n_columns) = (91,517, 5)
# |word_emb_matrix| = (tokenizer.n_words, 100)
# |tfidf_matrix| = (tokenizer.n_words, 1)
# build dataset & data loader
train, test = train_test_split(data, test_size=0.1)
qa_train = QuestionAnswerDataset(train, tokenizer, negative_sampling=True)
train_loader = DataLoader(dataset=qa_train, batch_size=config.batch_size, shuffle=True, num_workers=4)
qa_test = QuestionAnswerDataset(test, tokenizer, negative_sampling=True)
test_loader = DataLoader(dataset=qa_test, batch_size=len(qa_test), shuffle=False, num_workers=4)
print('Total batches - train: {}, test: {}'.format(len(train_loader), len(test_loader)))
# build model
model = models.Model(input_size = tokenizer.n_words,
embedding_size = 100,
hidden_size = config.hidden_size,
n_layers = config.n_layers,
dropout_p = config.dropout_p,
word_embedding_matrix = word_emb_matirx,
tfidf_matrix = tfidf_matrix
).to(device)
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=config.learning_rate)
print(model)
# train
for epoch in range(1, config.epochs+1):
train_model(epoch)
test_model()
# save model
torch.save(model.state_dict(), 'model.pth')
|
{"hexsha": "6d6b8b2e7bca83fcbee0eb3377c226f88daeec48", "size": 8838, "ext": "py", "lang": "Python", "max_stars_repo_path": "question-answer-matching/train.py", "max_stars_repo_name": "lijian10086/nlp-tutorial", "max_stars_repo_head_hexsha": "4b3773b13d975e7ca812dec6b9409e43dac44534", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1433, "max_stars_repo_stars_event_min_datetime": "2018-12-14T06:20:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T14:12:50.000Z", "max_issues_repo_path": "question-answer-matching/train.py", "max_issues_repo_name": "itsshaikaslam/nlp-tutorial-1", "max_issues_repo_head_hexsha": "6e4c74e103f4cdc5e0559d987ae6e41c40e17a5a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2019-04-03T08:30:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-11T11:41:05.000Z", "max_forks_repo_path": "question-answer-matching/train.py", "max_forks_repo_name": "itsshaikaslam/nlp-tutorial-1", "max_forks_repo_head_hexsha": "6e4c74e103f4cdc5e0559d987ae6e41c40e17a5a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 306, "max_forks_repo_forks_event_min_datetime": "2018-12-20T09:41:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T05:07:14.000Z", "avg_line_length": 39.28, "max_line_length": 119, "alphanum_fraction": 0.5870106359, "include": true, "reason": "import numpy", "num_tokens": 1977}
|
(* begin hide *)
From Coq Require Import
Arith
Lia.
(* Fake dependency due to [eutt_iter'']. To remove once the lemma is moved to the itree library *)
From Vellvm Require Import
Utils.Tactics
Utils.PropT.
From ITree Require Import
ITree
Eq.Eqit.
Set Implicit Arguments.
Set Strict Implicit.
Local Open Scope nat_scope.
Import ITreeNotations.
Local Open Scope itree.
(* end hide *)
(** * tfor: a bounded loop iterator for itrees *)
Section TFor.
(* The combinator [tfor body from to x0] simply sequences the computation:
x1 <- body from x0;;
x2 <- body (S from) x1;;
...
body (to - 1) x_{to-from-1}
i.e. it informally corresponds to:
acc <- x0;
for i = from, i < to, i++ do
acc <- body i acc
return acc
*)
Definition tfor {E X} (body : nat -> X -> itree E X) (from to : nat) : X -> itree E X :=
fun x => ITree.iter (fun '(p,x) =>
if beq_nat p to
then Ret (inr x)
else
y <- (body p x);; Ret (inl (S p,y))
) (from,x).
(* [tfor] excludes the upper bound, hence [tfor body k k] does nothing *)
Lemma tfor_0: forall {E A} k (body : nat -> A -> itree E A) a0,
tfor body k k a0 ≈ Ret a0.
Proof.
intros; unfold tfor; cbn.
unfold iter, CategoryKleisli.Iter_Kleisli, Basics.iter, MonadIter_itree.
rewrite unfold_iter, Nat.eqb_refl, bind_ret_l.
reflexivity.
Qed.
(* One step unrolling of the combinator *)
Lemma tfor_unroll: forall {E A} i j (body : nat -> A -> itree E A) a0,
i < j ->
tfor body i j a0 ≈
a <- body i a0;; tfor body (S i) j a.
Proof.
intros; unfold tfor; cbn.
unfold iter, CategoryKleisli.Iter_Kleisli, Basics.iter, MonadIter_itree.
rewrite unfold_iter at 1.
pose proof Nat.eqb_neq i j as [_ EQ].
rewrite EQ; try lia.
rewrite bind_bind.
apply eutt_eq_bind; intros ?; rewrite bind_ret_l, tau_eutt.
reflexivity.
Qed.
(* We can always split a [tfor] into two sequential ones *)
Lemma tfor_split: forall {E A} (body : nat -> A -> itree E A) i j k a0,
i <= j ->
j <= k ->
tfor body i k a0 ≈
a <- tfor body i j a0;; tfor body j k a.
Proof.
intros * LE1 LE2.
remember (j - i) as p; revert a0 i LE1 Heqp.
induction p as [| p IH]; intros ? ? LE1 EQ.
- replace i with j by lia.
rewrite tfor_0, bind_ret_l.
reflexivity.
- rewrite tfor_unroll; [| lia].
rewrite tfor_unroll; [| lia].
rewrite bind_bind.
apply eutt_eq_bind; intros ?.
eapply IH; lia.
Qed.
(* We can substitute bodies under a [tfor] assuming that they are equivalent at all points.
TODO: various stronger induction principles can be provided:
- obviously restricting the range of indexes to the one iterated over
- using a provided invariant constraining the accumulators.
*)
Lemma eutt_tfor: forall {E A} (body body' : nat -> A -> itree E A) i j a0,
(forall k i, body i k ≈ body' i k) ->
(tfor body i j a0) ≈ (tfor body' i j a0).
Proof.
intros.
unfold tfor, iter, CategoryKleisli.Iter_Kleisli, Basics.iter, MonadIter_itree.
eapply KTreeFacts.eutt_iter.
intros [].
break_match_goal.
reflexivity.
cbn.
rewrite H.
reflexivity.
Qed.
(* If the body does not depend on the index, we can re-index freely the
bounds we iterate over *)
Lemma tfor_ss : forall {E A} i j (body : nat -> A -> itree E A) a0,
(forall x i j, body i x ≈ body j x) ->
i <= j ->
tfor body (S i) (S j) a0 ≈ tfor body i j a0.
Proof.
intros; unfold tfor; cbn.
unfold iter, CategoryKleisli.Iter_Kleisli, Basics.iter, MonadIter_itree.
apply eutt_iter'' with (RI1:=fun '(a,x) '(b, y) => a = S b /\ x = y) (RI2:=fun '(a,x) '(b, y) => a = S b /\ x = y); auto.
intros [j1 acc1] [j2 acc2] H1.
destruct H1. subst.
cbn.
pose proof (Nat.eq_dec j2 j) as [EQ | NEQ].
- subst. rewrite Nat.eqb_refl.
apply eutt_Ret.
constructor; auto.
-
apply Nat.eqb_neq in NEQ.
rewrite NEQ.
eapply eutt_clo_bind.
rewrite H.
reflexivity.
intros; subst.
apply eutt_Ret.
constructor; auto.
Qed.
Lemma tfor_ss_dep : forall {E A} i j (body body' : nat -> A -> itree E A) a0,
(forall x i, body' (S i) x ≈ body i x) ->
i <= j ->
tfor body' (S i) (S j) a0 ≈ tfor body i j a0.
Proof.
intros; unfold tfor; cbn.
unfold iter, CategoryKleisli.Iter_Kleisli, Basics.iter, MonadIter_itree.
eapply eutt_iter'' with
(RI1:=fun '(a,x) '(b, y) => a = S b /\ x = y) (RI2:=fun '(a,x) '(b, y) => a = S b /\ x = y); auto.
intros [j1 acc1] [j2 acc2] H1.
destruct H1. subst.
cbn.
destruct (Nat.eq_dec j2 j) as [EQ | NEQ].
- subst. cbn. rewrite Nat.eqb_refl.
apply eutt_Ret.
constructor; auto.
- apply Nat.eqb_neq in NEQ.
rewrite NEQ.
eapply eutt_clo_bind.
rewrite H.
reflexivity.
intros; subst.
apply eutt_Ret.
constructor; auto.
Qed.
(* If the body does not depend on the index, we can unroll to the left
while chipping at the upper bound *)
Lemma tfor_unroll_down: forall {E A} i j (body : nat -> A -> itree E A) a0,
i < S j ->
(forall x i j, body i x ≈ body j x) ->
tfor body i (S j) a0 ≈
a <- body i a0;; tfor body i j a.
Proof.
intros E A i j. revert E A i.
induction j; intros E A i body a0 H H0.
- rewrite tfor_unroll; auto.
eapply eutt_clo_bind; [reflexivity|].
intros u1 u2 H1.
subst.
assert (i = 0) by lia; subst.
repeat rewrite tfor_0.
reflexivity.
- rewrite tfor_unroll; auto.
eapply eutt_clo_bind; [reflexivity|].
intros u1 u2 H1.
subst.
assert (i <= S j) by lia.
apply le_lt_or_eq in H1.
destruct H1.
+ rewrite IHj; [|lia|auto].
rewrite tfor_unroll; [|lia].
eapply eutt_clo_bind.
erewrite H0; reflexivity.
intros; subst.
do 2 (rewrite tfor_ss; auto); [|lia].
reflexivity.
+ subst.
repeat rewrite tfor_0.
reflexivity.
Qed.
End TFor.
|
{"author": "vellvm", "repo": "vellvm", "sha": "c9b7d6a283c4954b25bf7bcb1b1e54b92b62d699", "save_path": "github-repos/coq/vellvm-vellvm", "path": "github-repos/coq/vellvm-vellvm/vellvm-c9b7d6a283c4954b25bf7bcb1b1e54b92b62d699/src/coq/Utils/TFor.v"}
|
from numpy.random import seed
seed(8) #1
import tensorflow
tensorflow.random.set_seed(7)
# tensorflow.random.set_random_seed(7)
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model ,load_model
from tensorflow.keras.layers import Flatten, Dense, Dropout
from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input
from keras.applications.vgg16 import preprocess_input
from keras.applications.vgg16 import decode_predictions
from keras.applications.vgg16 import VGG16
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import models
from tensorflow.python.keras import layers
from tensorflow.keras import optimizers
from os import listdir
from multiprocessing import Process, Queue, Value, Manager
from ctypes import c_char_p
import socket
import pickle
import util
BASE_PATH = 'coronet_org_data/four_classes'
# data_list = listdir('/content/covid-19/four_classes/train')
data_list = listdir(BASE_PATH + '/train2')
# Delete some classes that may interfere
print(len(data_list))
DATASET_PATH = BASE_PATH + '/train2'
VALIDATION_PATH = BASE_PATH + '/val'
test_dir = BASE_PATH + '/test'
IMAGE_SIZE = (150, 150)
NUM_CLASSES = len(data_list)
BATCH_SIZE = 10 # try reducing batch size or freeze more layers if your GPU runs out of memory
NUM_EPOCHS = 10
LEARNING_RATE = 0.0001
TRAIN_BATCHES = 112
NUM_ITERS = 89600
MAX_WORKERS = 1
port = 17000
TCP_IP = '127.0.0.1'
gradients_q = None
scaler_q = None
ack_q = None
manager = None
global_var_vals = None
global_var_scalers = None
done_flag = None
grad_shape = [[3, 3, 3, 32], [32], [32], [3, 3, 32, 64], [64], [64], [3, 3, 64, 1], [1, 1, 64, 128], [128], [128], [3, 3, 128, 1], [1, 1, 128, 128], [128], [128], [1, 1, 64, 128], [128], [128], [3, 3, 128, 1], [1, 1, 128, 256], [256], [256], [3, 3, 256, 1], [1, 1, 256, 256], [256], [256], [1, 1, 128, 256], [256], [256], [3, 3, 256, 1], [1, 1, 256, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [1, 1, 256, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 728], [728], [728], [3, 3, 728, 1], [1, 1, 728, 1024], [1024], [1024], [1, 1, 728, 1024], [1024], [1024], [3, 3, 1024, 1], [1, 1, 1024, 1536], [1536], [1536], [3, 3, 1536, 1], [1, 1, 1536, 2048], [2048], [2048], [51200, 256], [256], [256, 4], [4]]
# Train datagen here is a preprocessor
# train_datagen = ImageDataGenerator(rescale=1. / 255,
# rotation_range=50,
# featurewise_center=True,
# featurewise_std_normalization=True,
# width_shift_range=0.2,
# height_shift_range=0.2,
# shear_range=0.25,
# zoom_range=0.1,
# zca_whitening=True,
# channel_shift_range=20,
# horizontal_flip=True,
# vertical_flip=True,
# validation_split=0.2,
# fill_mode='constant')
#
# # For multiclass use categorical n for binary use binary
# train_batches = train_datagen.flow_from_directory(DATASET_PATH,
# target_size=IMAGE_SIZE,
# shuffle=True,
# batch_size=BATCH_SIZE,
# subset="training",
# seed=42,
# class_mode="categorical"
# # For multiclass use categorical n for binary use binary
# )
#
# valid_batches = train_datagen.flow_from_directory(DATASET_PATH,
# target_size=IMAGE_SIZE,
# shuffle=True,
# batch_size=BATCH_SIZE,
# subset="validation",
# seed=42,
# class_mode="categorical"
# # For multiclass use categorical n for binary use binary
#
# )
def safe_recv(size, server_socket):
data = bytearray()
while 1:
try:
temp = server_socket.recv(size - len(data))
data.extend(temp)
recv_size = len(data)
if recv_size >= size:
break
except:
print("Error")
data = bytes(data)
return data
# def handleWorker(port, gradients_q, done_flag, global_var_vals, ack_q, n):
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# print("Connecting to port : ", port)
# s.bind((TCP_IP, port))
# s.listen(1)
# conn, addr = s.accept()
# print('Connection address:', addr)
# k = 0
# while 1:
# size = safe_recv(17, conn)
# size = pickle.loads(size)
# data = safe_recv(size, conn)
# # print("Received size: ", size)
# local_worker_gradients = pickle.loads(data)
# # print(local_worker_gradients)
# gradients_q.put(local_worker_gradients)
# while (done_flag.value == 0):
# pass
# size = len(global_var_vals.value)
# size = pickle.dumps(size, pickle.HIGHEST_PROTOCOL)
# conn.sendall(size)
# print("Send size: "+str(len(size)))
# conn.sendall(global_var_vals.value)
# ack_q.put(1)
# k = k + 1
# # print("Worker: ", k)
# if (k == (n + 1)):
# print("Working: Breaking from loop")
# break
# conn.close()
# s.close()
def handleWorker(port, gradients_q, scaler_q, done_flag, global_var_vals, global_var_scalers, ack_q, n):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connecting to port : ", port)
s.bind((TCP_IP, port))
s.listen(1)
conn, addr = s.accept()
print('Connection address:', addr)
k = 0
while 1:
# size = safe_recv(17, conn)
# size = pickle.loads(size)
# data = safe_recv(size, conn)
# print("Received size: ", size)
# local_worker_gradients = pickle.loads(data)
# print("Local worker gradient")
# print(local_worker_gradients)
size_2 = safe_recv(17, conn)
size_2 = pickle.loads(size_2)
data_2 = safe_recv(size_2, conn)
local_worker_ternarized_gradients_2 = pickle.loads(data_2)
scaler_size = safe_recv(15, conn)
scaler_size = pickle.loads(scaler_size)
scaler_data = safe_recv(scaler_size, conn)
scaler_data = pickle.loads(scaler_data)
# gradients_q.put(local_worker_gradients)
gradients_q.put(local_worker_ternarized_gradients_2)
scaler_q.put(scaler_data)
while (done_flag.value == 0):
pass
size = len(global_var_vals.value)
size = pickle.dumps(size, pickle.HIGHEST_PROTOCOL)
conn.sendall(size)
conn.sendall(global_var_vals.value)
# size_var_scalers = len(global_var_scalers.value)
# size_var_scalers = pickle.dumps(size_var_scalers, pickle.HIGHEST_PROTOCOL)
# print(len(size_var_scalers))
# conn.sendall(size_var_scalers)
# conn.sendall(global_var_scalers.value)
ack_q.put(1)
k = k + 1
# print("Worker: ", k)
if (k == (n + 1)):
print("Working: Breaking from loop")
break
conn.close()
s.close()
# global gradients_q
# global global_var_vals
# global ack_q
# global done_flag
# port = int(sys.argv[1])
# MAX_WORKERS = int(sys.argv[2])
# port = 17000
# MAX_WORKERS = 1
from tensorflow.keras.applications import Xception
def train():
conv_base = Xception(weights='imagenet',
include_top=False,
input_shape=(150, 150, 3))
conv_base.trainable = True
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(4, activation='softmax'))
# model.compile(loss='categorical_crossentropy', # for multiclass use categorical_crossentropy
#
# optimizer=optimizers.Adam(lr=LEARNING_RATE),
# metrics=['acc'])
# print("Batch len")
# print(len(train_batches))
# print(len(valid_batches))
# accuracy = tf.keras.metrics.CategoricalAccuracy()
# accuracy_val = tf.keras.metrics.CategoricalAccuracy()
# loss_fn = tf.keras.losses.CategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)
# STEP_SIZE_TRAIN=train_batches.n//train_batches.batch_size
# STEP_SIZE_VALID=valid_batches.n//valid_batches.batch_size
# print("Step size len")
# print(STEP_SIZE_TRAIN)
# print(STEP_SIZE_VALID)
# result=model.fit_generator(train_batches,
# steps_per_epoch =STEP_SIZE_TRAIN,
# validation_data = valid_batches,
# validation_steps = STEP_SIZE_VALID,
# epochs= NUM_EPOCHS,
# )
# global global_var_vals
# global done_flag
for epoch in range(NUM_EPOCHS):
print("###############################################")
# Iterate over the batches of a dataset.
# for step, (x, y) in enumerate(train_batches):
for tb in range(TRAIN_BATCHES):
# with tf.GradientTape() as tape:
# logits = model(x)
# Compute the loss value for this batch.
# loss_value = loss_fn(y, logits)
# Update the state of the `accuracy` metric.
# accuracy.update_state(y, logits)
# Update the weights of the model to minimize the loss value.
# gradients = tape.gradient(loss_value, model.trainable_weights)
# print(gradients)
# print("iteration no: " + str(i))
for w in range(MAX_WORKERS):
# recv_grads = gradients_q.get()
tern_grads = gradients_q.get()
mean_scaler = scaler_q.get()
recv_grads = util.get_full_gradient_from_ternarized(mean_scaler, tern_grads, grad_shape)
# print(recv_grads)
# feed_dict = {}
# for j, grad_var in enumerate(recv_grads):
# feed_dict[self.placeholder_gradients[j][0]] = recv_grads[j]
optimizer.apply_gradients(zip(recv_grads, model.trainable_weights))
# res = self.sess.run(self.apply_grads, feed_dict=feed_dict)
# var_val = []
#
# for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):
# # v_temp = self.sess.run(v, feed_dict=feed_dict)
# v_temp = self.sess.run(v)
# var_val.append(v_temp)
weight_list = []
for w in model.trainable_weights:
weight_list.append(w.numpy())
# print(var_val)
global_var_vals.value = pickle.dumps(weight_list, pickle.HIGHEST_PROTOCOL)
# print("New values of variables ready")
done_flag.value = 1
for i in range(MAX_WORKERS):
val = ack_q.get()
done_flag.value = 0
# optimizer.apply_gradients(zip(gradients, model.trainable_weights))
# i =0
# for w in model.trainable_weights:
# w.assign(weight_list[i])
# i += 1
#
# # Logging the current accuracy value so far.
# if step % 2 == 0:
# print("Epoch:", epoch, "Step:", step, "Loss value:", loss_value.numpy())
# print("Total running accuracy so far: %.3f" % accuracy.result())
# if step > STEP_SIZE_TRAIN:
# break
# Reset the metric's state at the end of an epoch
# accuracy.reset_states()
#
#
# total_val_loss = 0
# for step, (x, y) in enumerate(valid_batches):
# with tf.GradientTape() as tape:
# logits = model(x)
# # Compute the loss value for this batch.
# loss_value = loss_fn(y, logits)
#
# # Update the state of the `accuracy` metric.
# accuracy_val.update_state(y, logits)
# total_val_loss += loss_value.numpy()
# if step > STEP_SIZE_VALID:
# break
#
# # Logging the current accuracy value so far.
# print("Validation Loss value:", total_val_loss/STEP_SIZE_VALID)
# print("Total validation accuracy so far: %.3f" % (accuracy_val.result()))
# # Reset the metric's state at the end of an epoch
# accuracy_val.reset_states()
if __name__ == "__main__":
gradients_q = Queue()
ack_q = Queue()
scaler_q = Queue()
manager = Manager()
global_var_vals = manager.Value(c_char_p, "")
global_var_scalers = manager.Value(c_char_p, "")
done_flag = manager.Value('i', 0)
# n = int(FLAGS.max_steps / MAX_WORKERS)
# print("Each worker does ", n, " iterations")
process_list = []
for i in range(MAX_WORKERS):
process_port = port + i + 1
# p = Process(target=handleWorker, args=(process_port, gradients_q, done_flag, global_var_vals, ack_q, NUM_ITERS))
p = Process(target=handleWorker, args=(
process_port, gradients_q, scaler_q, done_flag, global_var_vals, global_var_scalers, ack_q, NUM_ITERS))
p.start()
process_list.append(p)
train()
# import matplotlib.pyplot as plt
#
#
# def plot_acc_loss(result, epochs):
# acc = result.history['acc']
# loss = result.history['loss']
# val_acc = result.history['val_acc']
# val_loss = result.history['val_loss']
# plt.figure(figsize=(15, 5))
# plt.subplot(121)
# plt.plot(range(1, epochs), acc[1:], label='Train_acc')
# plt.plot(range(1, epochs), val_acc[1:], label='Val_acc')
# plt.title('Accuracy over ' + str(epochs) + ' Epochs', size=15)
# plt.legend()
# plt.grid(True)
# plt.subplot(122)
# plt.plot(range(1, epochs), loss[1:], label='Train_loss')
# plt.plot(range(1, epochs), val_loss[1:], label='Val_loss')
# plt.title('Loss over ' + str(epochs) + ' Epochs', size=15)
# plt.legend()
# plt.grid(True)
# plt.show()
#
#
# # plot_acc_loss(result, 80)
#
# # %%
#
# # Save the trained model and copy to drive
#
# model.save('4-class-Covid19-Mod-Xception.h5')
# # !cp /content/"4-class-Covid19-Mod-Xception.h5" /content/drive/"My Drive"/"Colab Notebooks"
#
#
# # %% md
#
# # ** Evaluate
# # using
# # evaluate
# # Generator **
#
# # %%
#
# # Create evaluate data generator from test set
# # Dont forget shuffle false
#
# test_datagen = ImageDataGenerator(rescale=1. / 255)
# # test_dir = '/content/COVID-19 Radiography Database'
# eval_generator = test_datagen.flow_from_directory(test_dir, target_size=IMAGE_SIZE, batch_size=1,
# shuffle=False, seed=42, class_mode="categorical")
# eval_generator.reset()
#
# # %%
#
# # Evalute the trained model on evaluate generator
# eval_generator.reset()
# x = model.evaluate_generator(eval_generator,
# steps=np.ceil(len(eval_generator)),
# use_multiprocessing=False,
# verbose=1,
# workers=1,
# )
#
# print('Test loss:', x[0])
# print('Test accuracy:', x[1])
#
# # Poor test accuracy due to the small dataset size
#
# # %% md
#
# # ** Create
# # DataGen
# # on
# # single
# # folder /
# #
# #
# # class and predict ! **
#
# # %%
#
#
# IMAGE_SIZE = (150, 150)
# test_datagen = ImageDataGenerator(rescale=1. / 255)
# test_dir = 'data/COVID-19 Radiography Database'
# pred_generator = test_datagen.flow_from_directory(
# test_dir, target_size=IMAGE_SIZE,
# batch_size=1,
# shuffle=False,
#
# seed=42,
#
# class_mode="categorical")
# pred_generator.reset()
#
# count = [0, 0, 0, 0]
#
# files = pred_generator.filenames
#
# for i in range(len(files)):
# x, y = pred_generator.next()
# img = x
# predict = model.predict(img)
#
# p = np.argmax(predict, axis=-1)
# print(str(p[0]) + " " + files[pred_generator.batch_index - 1])
# # print(predict)
# # p=model.predict_classes(img)
# count[p[0]] += 1
#
# # print(str(p[0])+" "+files[i])
# print(count)
#
# # %% md
#
# ### **`Predict Results using predict generator and evaluate the accuracy and Confusion matrix `**
#
# # %%
#
# from sklearn.metrics import confusion_matrix
# from sklearn.metrics import plot_confusion_matrix
# from sklearn.metrics import classification_report
#
# filenames = eval_generator.filenames
# nb_samples = len(filenames)
# eval_generator.reset()
# predict = model.predict_generator(eval_generator, steps=np.ceil(len(eval_generator)))
# pp = predict
# predict = np.argmax(predict, axis=-1)
# classes = eval_generator.classes[eval_generator.index_array]
# acc = sum(predict == classes) / len(predict)
# names = ["covid", "normal", "pneumonia_bac", "pneumonia_vir"]
# # print(confusion_matrix(classes,predict))
#
# font = {
# 'family': 'Times New Roman',
# 'size': 12
# }
# plt.rc('font', **font)
# cm = confusion_matrix(classes, predict)
# print(cm)
# print(classification_report(classes, predict))
# plt.imshow(cm, cmap=plt.cm.Blues)
# plt.xlabel('Predicted labels \nAccuracy: {:0.2f}'.format(acc * 100))
# plt.ylabel("True labels")
# plt.xticks(classes, [])
# plt.yticks(classes, [])
# plt.title('Confusion matrix ')
# plt.colorbar()
# plt.show()
# %% md
# ** Test
# Single
# image **
# %%
# import cv2
# from skimage import transform
#
# img_r = cv2.imread('/content/test/x.jpg')
#
# img1 = np.array(img_r).astype('float32') / 255
# img2 = transform.resize(img1, (150, 150, 3))
#
# img = np.expand_dims(img2, axis=0)
#
# r = model.predict(img)
#
# names = dict((v, k) for k, v in labels.items())
# index = np.argmax(r)
# name = names.get(index, "Unknown")
#
# p = round(r.max() * 100, 3) # to find maximum score
#
# scores = r
# print(scores)
#
# font = {
# 'family': 'Times New Roman',
# 'size': 9,
#
# }
# plt.rc('font', **font)
#
# # plt.title(name +" ("+ str(p)+")")
# plt.title(names[0] + " " + str(round(scores[0][0] * 100, 1)) + "%" + "\n" + names[1] + " " + str(
# round(scores[0][1] * 100, 1)) + "%" + "\n" + names[2] + " " + str(round(scores[0][2] * 100, 1)) + "%" + "\n" +
# names[3] + " " + str(round(scores[0][3] * 100, 1)) + "%")
#
# plt.imshow(img2)
#
# # %% md
#
# # ** Test
# # Whole
# # Folder **
#
# # %%
#
# import cv2
# from skimage import transform
#
# count = [0, 0, 0, 0]
# folder_name = "/content/drive/My Drive/Datasets/covid-19/covidnew/covid"
# files = os.listdir(folder_name)
# for i in range(len(files)):
# img_r = cv2.imread(folder_name + "/" + files[i])
#
# img = np.array(img_r).astype('float32') / 255
#
# img = transform.resize(img, (150, 150, 3))
# img = np.expand_dims(img, axis=0)
#
# predict = model.predict(img)
# p = np.argmax(predict, axis=-1)
# # p=model.predict_classes(img)
# count[p[0]] += 1
# print(str(p[0]) + " " + files[i])
#
# print()
#
# print(count)
|
{"hexsha": "6fa86d571f6303c40d5073cb60a17142b966384d", "size": 21245, "ext": "py", "lang": "Python", "max_stars_repo_path": "coronet/main2_lps_gradient_quantization.py", "max_stars_repo_name": "sabuj7177/CovidProject", "max_stars_repo_head_hexsha": "b4b7bcfa5ace165520507f489dc74da7b695e2f0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "coronet/main2_lps_gradient_quantization.py", "max_issues_repo_name": "sabuj7177/CovidProject", "max_issues_repo_head_hexsha": "b4b7bcfa5ace165520507f489dc74da7b695e2f0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "coronet/main2_lps_gradient_quantization.py", "max_forks_repo_name": "sabuj7177/CovidProject", "max_forks_repo_head_hexsha": "b4b7bcfa5ace165520507f489dc74da7b695e2f0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5862646566, "max_line_length": 1877, "alphanum_fraction": 0.5649329254, "include": true, "reason": "import numpy,from numpy", "num_tokens": 5801}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Dict, Iterable, List, Optional, Sized, Tuple, Union
import torch
from numpy import ndarray
from torch import Tensor
from combustion.util import check_dimension, check_dimension_match, check_is_array, check_is_tensor, check_ndim_match
from .convert import to_8bit
PAD_VALUE: float = -1
try:
import cv2
except ImportError:
class cv2:
def __getattr__(self, attr):
raise ImportError(
"Bounding box visualization requires cv2. "
"Please install combustion with 'vision' extras using "
"pip install combustion [vision]"
)
def __setattr__(self, attr):
raise ImportError(
"Bounding box visualization requires cv2. "
"Please install combustion with 'vision' extras using "
"pip install combustion [vision]"
)
CORRECT_BOX_COLOR = BOX_COLOR = (255, 0, 0)
TEXT_COLOR = (255, 255, 255)
def visualize_bbox(
img: Union[Tensor, ndarray],
bbox: Union[Tensor, ndarray],
classes: Optional[Union[Tensor, ndarray]] = None,
scores: Optional[Union[Tensor, ndarray]] = None,
class_names: Optional[Dict[int, str]] = None,
box_color: Tuple[int, int, int] = (255, 0, 0),
text_color: Tuple[int, int, int] = (255, 255, 255),
label_alpha: float = 0.4,
thickness: int = 2,
pad_value: float = -1,
) -> Tensor:
r"""Adds bounding box visualization to an input array
Args:
img (Tensor or numpy.ndarray):
Background image
bbox (Tensor or numpy.ndarray):
Anchor boxes to draw
classes (Tensor or numpy.ndarray, optional):
Class labels associated with each anchor box
scores (Tensor or numpy.ndarray, optional):
Class scores associated with each anchor box
class_names (dict, optional):
Dictionary mapping integer class labels to string names.
If ``label`` is supplied but ``class_names`` is not, integer
class labels will be used.
box_color (tuple of ints, optional):
A 3-tuple giving the RGB color value to use for anchor boxes.
text_color (tuple of ints, optional):
A 3-tuple giving the RGB color value to use for labels.
label_alpha (float, optional):
Alpha to apply to the colored background for class labels.
thickness (int, optional):
Specifies the thickness of anchor boxes.
pad_value (float, optional):
The padding value used when batching boxes and labels
Returns:
:class:`torch.Tensor` or :class:`numpy.ndarray` (depending on what was given for `img`)
with the output image.
Shape:
* ``img`` - :math:`(B, C, H, W)` or :math:`(C, H, W)` or :math:`(H, W)`
* ``bbox`` - :math:`(B, N, 4)` or :math:`(N, 4)`
* ``classes`` - :math:`(B, N, 1)` or :math:`(N, 1)`
* ``scores`` - :math:`(B, N, S)` or :math:`(N, S)`
* Output - same as ``img``
"""
# type check
check_is_array(img, "img")
check_is_array(bbox, "bbox")
classes is None or check_is_array(classes, "classes")
scores is None or check_is_array(scores, "scores")
# ndim check
classes is None or check_ndim_match(bbox, classes, "bbox", "classes")
scores is None or check_ndim_match(bbox, scores, "bbox", "scores")
# more ndim checks, ensure if one input is batched then all inputs are batched
boxes_batched = bbox.ndim == 3
img_batched = img.ndim == 4
if img_batched != boxes_batched:
raise ValueError(f"Expected bbox.ndim == 3 when img.ndim == 4, found {bbox.shape}, {img.shape}")
if boxes_batched:
if classes is not None and classes.ndim != 3:
raise ValueError(f"Expected classes.ndim == 3, found {classes.ndim}")
if scores is not None and scores.ndim != 3:
raise ValueError(f"Expected scores.ndim == 3, found {scores.ndim}")
batched = img_batched
# individual dimension checks
check_dimension(bbox, dim=-1, size=4, name="bbox")
classes is None or check_dimension(classes, dim=-1, size=1, name="classes")
classes is None or check_dimension_match(bbox, classes, -2, "bbox", "classes")
scores is None or check_dimension_match(bbox, scores, -2, "bbox", "scores")
img_shape = img.shape[-2:]
# convert to cpu tensor
img, bbox = (torch.as_tensor(x).cpu() for x in (img, bbox))
classes, scores = (torch.as_tensor(x).cpu() if x is not None else None for x in (classes, scores))
# add a channel dimension to img if not present
if img.ndim == 2:
img = img.view(1, *img.shape)
# add a batch dimension if not present
img = img.view(1, *img.shape) if not batched else img
bbox = bbox.view(1, *bbox.shape) if not batched else bbox
if classes is not None:
classes = classes.view(1, *classes.shape) if not batched else classes
if scores is not None:
scores = scores.view(1, *scores.shape) if not batched else scores
# convert image to 8-bit and convert to channels_last
img_was_float = img.is_floating_point()
img = to_8bit(img.clone(), per_channel=False, same_on_batch=True)
img = img.permute(0, 2, 3, 1).contiguous()
# convert img to color if grayscale input
if img.shape[-1] == 1:
img = img.repeat(1, 1, 1, 3)
# get box indices that arent padding
valid_indices = (bbox == pad_value).all(dim=-1).logical_not_()
# iterate over each batch, building bbox overlay
result = []
batch_size = bbox.shape[0]
for batch_idx in range(batch_size):
# if this fails with cryptic cv errors, ensure that img is contiguous
result_i = img[batch_idx].numpy()
# extract valid boxes for this batch
valid_indices_i = valid_indices[batch_idx]
bbox_i = bbox[batch_idx][valid_indices_i]
scores_i = scores[batch_idx][valid_indices_i] if scores is not None else None
classes_i = classes[batch_idx][valid_indices_i] if classes is not None else None
# loop over each box and draw the annotation onto result_i
for box_idx, coords in enumerate(bbox_i):
x_min, y_min, x_max, y_max = [int(c) for c in coords]
# draw the bounding box
cv2.rectangle( # type: ignore
result_i,
(x_min, y_min),
(x_max, y_max),
box_color,
thickness,
)
# add class labels to bounding box text if present
text = ""
if classes_i is not None:
cls = int(classes_i[box_idx].item())
# use class integer -> str name if mapping is given, otherwise use class integer
if class_names is not None:
text += class_names.get(cls, f"Class {cls}")
else:
text += f"Class {cls}"
# add score labels to bounding box text if present
if scores_i is not None:
if classes_i is not None:
text += " - "
# add the first score
text += f"{scores_i[box_idx, 0].item():0.3f}"
# if multiple scores are present, add those
num_scores = scores_i.shape[-1]
for score_idx in range(1, num_scores):
text += f" | {scores_i[box_idx, score_idx].item():0.3f}"
# tag bounding box with class name / integer id
((text_width, text_height), _) = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.35, 1) # type: ignore
cv2.rectangle( # type: ignore
result_i, (x_min, y_min - int(1.3 * text_height)), (x_min + text_width, y_min), box_color, -1
) # type: ignore
cv2.putText( # type: ignore
result_i,
text,
(x_min, y_min - int(0.3 * text_height)),
cv2.FONT_HERSHEY_SIMPLEX, # type: ignore
0.35,
text_color,
lineType=cv2.LINE_AA, # type: ignore
)
# permute back to channels first and add to result list
result_i = torch.from_numpy(result_i).permute(-1, 0, 1)
result.append(result_i)
if len(result) > 1:
result = torch.stack(result, dim=0)
else:
result = result[0]
# ensure we include a batch dim if one was present in inputs
if batched and batch_size == 1:
result = result.view(1, *result.shape)
if img_was_float:
result = result.float().div_(255)
return result
def split_box_target(target: Tensor, split_label: Union[bool, Iterable[int]] = False) -> Tuple[Tensor, ...]:
r"""Split a bounding box label set into box coordinates and label tensors.
.. note::
This operation returns views of the original tensor.
Args:
target (:class:`torch.Tensor`):
The target to split.
split_label (bool or iterable of ints):
Whether to further decompose the label tensor. If ``split_label`` is ``True``, split
the label tensor along the last dimension. If an interable of ints is given, treat each
int as a split size arugment to :func:`torch.split` along the last dimension.
Shape:
* ``target`` - :math:`(*, N, 4 + C)` where :math:`N` is the number of boxes and :math:`C` is the
number of labels associated with each box.
* Output - :math:`(*, N, 4)` and :math:`(*, N, C)`
"""
check_is_tensor(target, "target")
bbox = target[..., :4]
label = target[..., 4:]
if isinstance(split_label, bool) and not split_label:
return bbox, label
num_labels = label.shape[-1]
# setup split size of 1 if bool given
if isinstance(split_label, bool):
split_label = [
1,
] * num_labels
lower_bound = 0
upper_bound = 0
final_label = []
for delta in split_label:
upper_bound = lower_bound + delta
final_label.append(label[..., lower_bound:upper_bound])
lower_bound = upper_bound
assert not isinstance(split_label, Sized) or len(final_label) == len(split_label)
return tuple([bbox] + final_label)
def split_bbox_scores_class(target: Tensor, split_scores: Union[bool, Iterable[int]] = False) -> Tuple[Tensor, ...]:
r"""Split a predicted bounding box into box coordinates, probability score, and predicted class.
This implementation supports multiple score assignments for each box. It is expected that ``target``
be ordered along the last dimension as ``bbox``, ``scores``, ``class``.
.. note::
This operation returns views of the original tensor.
Args:
target (:class:`torch.Tensor`):
The target to split.
split_scores (bool or iterable of ints):
Whether to further decompose the scores tensor. If ``split_scores`` is ``True``, split
the scores tensor along the last dimension. If an interable of ints is given, treat each
int as a split size arugment to :func:`torch.split` along the last dimension.
Shape:
* ``target`` - :math:`(*, N, 4 + S + 1)` where :math:`N` is the number of boxes and :math:`S` is the
number of scores associated with each box.
* Output - :math:`(*, N, 4)`, :math:`(*, N, S)`, and :math:`(*, N, 1)`
"""
check_is_tensor(target, "target")
bbox = target[..., :4]
scores = target[..., 4:-1]
cls = target[..., -1:]
if isinstance(split_scores, bool) and not split_scores:
return bbox, scores, cls
num_scores = scores.shape[-1]
# setup split size of 1 if bool given
if isinstance(split_scores, bool):
split_scores = [
1,
] * num_scores
lower_bound = 0
upper_bound = 0
final_scores = []
for delta in split_scores:
upper_bound = lower_bound + delta
final_scores.append(scores[..., lower_bound:upper_bound])
lower_bound = upper_bound
assert not isinstance(split_scores, Sized) or len(final_scores) == len(split_scores)
return tuple([bbox] + final_scores + [cls])
def combine_box_target(bbox: Tensor, label: Tensor, *extra_labels) -> Tensor:
r"""Combine a bounding box coordinates and labels into a single label.
Args:
bbox (:class:`torch.Tensor`):
Coordinates of the bounding box.
label (:class:`torch.Tensor`):
Label associated with each bounding box.
Shape:
* ``bbox`` - :math:`(*, N, 4)`
* ``label`` - :math:`(*, N, 1)`
* Output - :math:`(*, N, 4 + 1)`
"""
# input validation
check_is_tensor(bbox, "bbox")
check_is_tensor(label, "label")
if bbox.shape[-1] != 4:
raise ValueError(f"Expected bbox.shape[-1] == 4, found shape {bbox.shape}")
if bbox.shape[:-1] != label.shape[:-1]:
raise ValueError(f"Expected bbox.shape[:-1] == label.shape[:-1], found shapes {bbox.shape}, {label.shape}")
return torch.cat([bbox, label, *extra_labels], dim=-1)
def combine_bbox_scores_class(bbox: Tensor, cls: Tensor, scores: Tensor, *extra_scores) -> Tensor:
r"""Combine a bounding box coordinates and labels into a single label. Combined tensor
will be ordered along the last dimension as ``bbox``, ``scores``, ``cls``.
Args:
bbox (:class:`torch.Tensor`):
Coordinates of the bounding box.
cls (:class:`torch.Tensor`):
Class associated with each bounding box
scores (:class:`torch.Tensor`):
Probability associated with each bounding box.
*extra_scores (:class:`torch.Tensor`):
Additional scores to combine
Shape:
* ``bbox`` - :math:`(*, N, 4)`
* ``scores`` - :math:`(*, N, S)`
* ``cls`` - :math:`(*, N, 1)`
* Output - :math:`(*, N, 4 + S + 1)`
"""
# input validation
check_is_tensor(bbox, "bbox")
check_is_tensor(scores, "scores")
check_is_tensor(cls, "cls")
if bbox.shape[-1] != 4:
raise ValueError(f"Expected bbox.shape[-1] == 4, found shape {bbox.shape}")
return torch.cat([bbox, scores, *extra_scores, cls], dim=-1)
def batch_box_target(target: List[Tensor], pad_value: float = -1) -> Tensor:
r"""Combine multiple distinct bounding box targets into a single batched target.
Args:
target (list of :class:`torch.Tensor`):
List of bounding box targets to combine
pad_value (float):
Padding value to use when creating the batch
Shape:
* ``target`` - :math:`(*, N_i, 4 + C)` where :math:`N_i` is the number of boxes and :math:`C` is the
number of labels associated with each box.
* Output - :math:`(B, N, 4 + c)`
"""
max_boxes = 0
for elem in target:
# check_is_tensor(elem, "target_elem")
max_boxes = max(max_boxes, elem.shape[-2])
# add a batch dim if not present
target = [x.unsqueeze(0) if x.ndim < 3 else x for x in target]
# compute output batch size
batch_size = int(torch.tensor([x.shape[0] for x in target]).sum().item())
# create empty output tensor of correct shape
output_shape = (batch_size, max_boxes, target[0].shape[-1])
batch = torch.empty(*output_shape, device=target[0].device, dtype=target[0].dtype).fill_(pad_value)
# fill output tensor
start = 0
for elem in target:
end = start + elem.shape[0]
batch[start:end, : elem.shape[-2], :] = elem
start += elem.shape[0]
return batch
def unbatch_box_target(target: Tensor, pad_value: float = -1) -> List[Tensor]:
r"""Splits a padded batch of bounding boxtarget tensors into a list of unpadded target tensors
Args:
target (:class:`torch.Tensor`):
Batch of bounding box targets to split
pad_value (float):
Value used for padding when creating the batch
Shape:
* ``target`` - :math:`(B, N, 4 + C)` where :math:`N` is the number of boxes and :math:`C` is the
number of labels associated with each box.
* Output - :math:`(N, 4 + C)`
"""
check_is_tensor(target, "target")
padding_indices = (target == pad_value).all(dim=-1)
non_padded_coords = (~padding_indices).nonzero(as_tuple=True)
flat_result = target[non_padded_coords]
split_size = non_padded_coords[0].unique(return_counts=True)[1]
return torch.split(flat_result, split_size.tolist(), dim=0)
def flatten_box_target(target: Tensor, pad_value: float = -1) -> Tensor:
r"""Flattens a batch of bounding box target tensors, removing padded locations
Args:
target (:class:`torch.Tensor`):
Batch of bounding box targets to split
pad_value (float):
Value used for padding when creating the batch
Shape:
* ``target`` - :math:`(B, N, 4 + C)` where :math:`N` is the number of boxes and :math:`C` is the
number of labels associated with each box.
* Output - :math:`(N_{tot}, 4 + C)`
"""
check_is_tensor(target, "target")
padding_indices = (target == pad_value).all(dim=-1)
non_padded_coords = (~padding_indices).nonzero(as_tuple=True)
return target[non_padded_coords]
def append_bbox_label(old_label: Tensor, new_label: Tensor) -> Tensor:
r"""Adds a new label element to an existing bounding box target.
The new label will be concatenated to the end of the last dimension in
``old_label``.
Args:
old_label (:class:`torch.Tensor`):
The existing bounding box label
new_label (:class:`torch.Tensor`):
The label entry to add to ``old_label``
Shape:
* ``old_label`` - :math:`(*, N, C_0)`
* ``new_label`` - :math:`(B, N, C_1`)`
* Output - :math:`(B, N, C_0 + C_1)`
"""
check_is_tensor(old_label, "old_label")
check_is_tensor(new_label, "new_label")
check_ndim_match(old_label, new_label, "old_label", "new_label")
if old_label.shape[:-1] != new_label.shape[:-1]:
raise ValueError(
"expected old_label.shape[:-1] == new_label.shape[:-1], " "found {old_label.shape}, {new_label.shape}"
)
return torch.cat([old_label, new_label], dim=-1)
def filter_bbox_classes(
target: Tensor, keep_classes: Iterable[int], pad_value: float = -1, return_inverse: bool = False
) -> Tensor:
r"""Filters bounding boxes based on class, replacing bounding boxes that do not meet the criteria
with padding. Integer class ids should be the last column in ``target``.
Args:
target (:class:`torch.Tensor`):
Bounding boxes to filter
keep_classes (iterable of ints):
Integer id of the classes to keep
pad_value (float):
Value used to indicate padding in both input and output tensors
return_inverse (:class:`torch.Tensor`):
If ``True``, remove boxes with classes not in ``keep_classes``
Shape:
* ``target`` - :math:`(*, N, C)`
* Output - same as ``target``
"""
check_is_tensor(target, "target")
if not isinstance(keep_classes, Iterable):
raise TypeError(f"Expected iterable for keep_classes, found {type(keep_classes)}")
if not keep_classes:
raise ValueError(f"Expected non-empty iterable for keep classes, found {keep_classes}")
locations_to_keep = torch.zeros_like(target[..., -1]).bool()
for keep_cls in keep_classes:
if not isinstance(keep_cls, (float, int)):
raise TypeError(f"Expected int or float for keep_classes elements, found {type(keep_cls)}")
locations_for_cls = torch.as_tensor(target[..., -1] == keep_cls)
locations_to_keep.logical_or_(locations_for_cls)
if return_inverse:
locations_to_keep.logical_not_()
target = target.clone()
target[~locations_to_keep] = -1
return target
|
{"hexsha": "a622ccf340973a90fd9ecdb054ffb03774cbe32a", "size": 20027, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/combustion/vision/bbox.py", "max_stars_repo_name": "TidalPaladin/combustion", "max_stars_repo_head_hexsha": "69b9a2b9baf90b81ed9098b4f0391f5c15efaee7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-07-09T22:18:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-08T03:47:19.000Z", "max_issues_repo_path": "src/combustion/vision/bbox.py", "max_issues_repo_name": "TidalPaladin/combustion", "max_issues_repo_head_hexsha": "69b9a2b9baf90b81ed9098b4f0391f5c15efaee7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2020-06-12T21:48:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-05T18:41:50.000Z", "max_forks_repo_path": "src/combustion/vision/bbox.py", "max_forks_repo_name": "TidalPaladin/combustion", "max_forks_repo_head_hexsha": "69b9a2b9baf90b81ed9098b4f0391f5c15efaee7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-15T20:06:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-15T20:06:16.000Z", "avg_line_length": 36.4127272727, "max_line_length": 117, "alphanum_fraction": 0.6125730264, "include": true, "reason": "from numpy", "num_tokens": 4956}
|
import numpy as np
WAVELENGTH = np.arange(0,12001,1)
WMIN = 3825
WMAX = 9200
MDSPEC = 'm5.active.ha.na.k.fits'
AMS = np.linspace(1.05,1.2,num=6,dtype='float')
|
{"hexsha": "49a29bd7084089ee20d8cda6e567a40e8737fb6e", "size": 158, "ext": "py", "lang": "Python", "max_stars_repo_path": "config.py", "max_stars_repo_name": "RileyWClarke/flarubin", "max_stars_repo_head_hexsha": "eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "config.py", "max_issues_repo_name": "RileyWClarke/flarubin", "max_issues_repo_head_hexsha": "eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "config.py", "max_forks_repo_name": "RileyWClarke/flarubin", "max_forks_repo_head_hexsha": "eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3333333333, "max_line_length": 47, "alphanum_fraction": 0.7025316456, "include": true, "reason": "import numpy", "num_tokens": 68}
|
import sys
import numpy as np
import os
import time
import math
from PIL import Image
import cv2
from datetime import datetime
from pynq import Xlnk
from pynq import Overlay
import pynq
import struct
from multiprocessing import Process, Pipe, Queue, Event, Manager
from IoU import Average_IoU
IMG_DIR = '../sample1000/'
anchor = [1.4940052559648322, 2.3598481287086823, 4.0113013115312155, 5.760873975661669]
bbox_m = [52., 48., 28., 30., 124., 47., 52., 23., 23., 125.]
qm = 131072.0
w = 40
h = 20
def sigmoid(x):
return 1/(1+np.exp(-x))
def compute_bounding_box(batches, result_queue, output_queue):
bbox = np.zeros((4,4),dtype=np.float32)
for i in range(len(batches)):
while output_queue.empty():
continue
bbox_temp = output_queue.get()
bbox_origin = bbox_temp[0]
batch = bbox_temp[1]
for b in range(4):
if(bbox_origin[b,4]>0):
xs = bbox_origin[b][0]*bbox_m[5]/qm
ys = bbox_origin[b][1]*bbox_m[6]/qm
ws = bbox_origin[b][2]*bbox_m[7]/qm
hs = bbox_origin[b][3]*bbox_m[8]/qm
ws_inb = np.exp(ws)*anchor[2]
hs_inb = np.exp(hs)*anchor[3]
else:
xs = bbox_origin[b][0]*bbox_m[0]/qm
ys = bbox_origin[b][1]*bbox_m[1]/qm
ws = bbox_origin[b][2]*bbox_m[2]/qm
hs = bbox_origin[b][3]*bbox_m[3]/qm
ws_inb = np.exp(ws)*anchor[0]
hs_inb = np.exp(hs)*anchor[1]
xs_inb = sigmoid(xs) + bbox_origin[b][5]
ys_inb = sigmoid(ys) + bbox_origin[b][6]
bcx = xs_inb/w
bcy = ys_inb/h
bw = ws_inb/w
bh = hs_inb/h
bbox[b][0] = bcx - bw/2.0
bbox[b][1] = bcy - bh/2.0
bbox[b][2] = bcx + bw/2.0
bbox[b][3] = bcy + bh/2.0
x1 = int(round(bbox[b][0] * 640))
y1 = int(round(bbox[b][1] * 360))
x2 = int(round(bbox[b][2] * 640))
y2 = int(round(bbox[b][3] * 360))
x1 = np.clip(x1,1,640)
y1 = np.clip(y1,1,360)
x2 = np.clip(x2,1,640)
y2 = np.clip(y2,1,360)
print(batch[0]+b, batch[1][b], [x1, x2, y1, y2])
result_queue.append([batch[0]+b, [x1, x2, y1, y2]])
def resort_result(result_queue):#reorder results by indexes
result = []
for i in range(len(result_queue)):
result.append(result_queue[i])
result.sort(key = lambda x: int(x[0]))
result_list = [result[i][1] for i in range(len(result))]
return result_list
# Get image name list
def get_image_names():
names_temp = [f for f in os.listdir(IMG_DIR) if f.endswith('.jpg')]
names_temp.sort(key= lambda x:int(x[:-4]))
return names_temp
BATCH_SIZE = 4
def get_image_batch():
image_list = get_image_names()
batches = list()
for i in range(0, len(image_list), BATCH_SIZE):
batches.append((i,image_list[i:i+BATCH_SIZE]))
return batches
def stitch(batches, image_queue, pid, num_process):
for i in range(len(batches)):
while image_queue.full():#队列满时需要等待
continue
if (i%num_process == pid):#把预处理任务平均分给两个进程
image = np.zeros((4,160,320,4),np.uint8)
image[0] = np.array(Image.open(IMG_DIR+batch[0]).convert('RGBA').resize((320, 160)))
image[1] = np.array(Image.open(IMG_DIR+batch[1]).convert('RGBA').resize((320, 160)))
image[2] = np.array(Image.open(IMG_DIR+batch[2]).convert('RGBA').resize((320, 160)))
image[3] = np.array(Image.open(IMG_DIR+batch[3]).convert('RGBA').resize((320, 160)))
image_queue.put((image_, batches[i]))#imgae_ -> image
else:
continue
xlnk = Xlnk()
xlnk.xlnk_reset()
img = xlnk.cma_array(shape=[4,160,320,4], dtype=np.uint8)
fm = xlnk.cma_array(shape=(628115*32), dtype=np.uint8)
weight = xlnk.cma_array(shape=(220672), dtype=np.int16)
biasm = xlnk.cma_array(shape=(432*16), dtype=np.int16)
bbox = np.empty(64, dtype=np.int16)
print("Allocating memory done")
parameter = np.fromfile('weight/SkyNet.bin', dtype=np.int16)
np.copyto(weight, parameter[0:220672])
np.copyto(biasm[0:428*16], parameter[220672:])
print("Parameters loading done")
overlay = Overlay('SkyNet.bit')
print("Bitstream loaded")
SkyNet = overlay.SkyNet
SkyNet.write(0x10, img.physical_address)
SkyNet.write(0x1c, fm.physical_address)
SkyNet.write(0x28, weight.physical_address)
SkyNet.write(0x34, biasm.physical_address)
rails = pynq.get_rails()
recorder = pynq.DataRecorder(rails['power1'].power)
batches = get_image_batch()
image_queue = Queue(1000)
output_queue = Queue(200)
result_queue = Manager().list()#服务进程管理器 支持 list() 类型 数据可以在不同进程间共享
num_p = 2
p1 = Process(target=stitch, args=(batches, image_queue, 0, num_p))
p2 = Process(target=stitch, args=(batches, image_queue, 1, num_p))
p3 = Process(target=compute_bounding_box, args=(batches, result_queue, output_queue))
print("Start...")
start = time.time()
p1.start()#开启进程
p2.start()
p3.start()
with recorder.record(0.05):
for batch in batches:
while image_queue.empty():
continue
img_ = image_queue.get()
np.copyto(img, img_[0])
SkyNet.write(0x00, 1)
isready = SkyNet.read(0x00)
while( isready == 1 ):
isready = SkyNet.read(0x00)
np.copyto(bbox, biasm[428*16:])
output_queue.put([bbox.reshape(4,16), img_[1]])
p1.join()#等到进程结束后再往后运行
p2.join()
p3.join()
end = time.time()
total_time = end - start
total_energy = recorder.frame["power1_power"].mean()*total_time#功率x时间=功耗
print("Detection finished\n")
print('Total time: ' + str(total_time) + ' s')
print('Total energy: ' + str(total_energy) + ' J')
result = resort_result(result_queue)
result_txt = open('predict.txt','w+')
for i in range(len(result)):
result_txt.write(str(i).zfill(3)+'.jpg '+str(result[i])+'\n')
result_txt.close()
IoU = Average_IoU(IMG_DIR+'ground_truth.txt', 'predict.txt')
|
{"hexsha": "52cad1dfe95b83914ea95f4fed2a4c3946a33277", "size": 6081, "ext": "py", "lang": "Python", "max_stars_repo_path": "Deploy/run_multiprocess.py", "max_stars_repo_name": "guoyudejin/SkrSkr", "max_stars_repo_head_hexsha": "433aac617b549fcf387c8196c292e211eadffa71", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Deploy/run_multiprocess.py", "max_issues_repo_name": "guoyudejin/SkrSkr", "max_issues_repo_head_hexsha": "433aac617b549fcf387c8196c292e211eadffa71", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Deploy/run_multiprocess.py", "max_forks_repo_name": "guoyudejin/SkrSkr", "max_forks_repo_head_hexsha": "433aac617b549fcf387c8196c292e211eadffa71", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2295081967, "max_line_length": 96, "alphanum_fraction": 0.6096036836, "include": true, "reason": "import numpy", "num_tokens": 1877}
|
{-# OPTIONS --without-K --rewriting #-}
open import HoTT
module Reflective where
record ReflectiveSubuniverse {ℓ} : Type (lsucc ℓ) where
field
P : Type ℓ → Type ℓ
R : Type ℓ → Type ℓ
η : (A : Type ℓ) → A → R A
-- replete : (A B : Type ℓ) → P A → A ≃ B → P B
|
{"hexsha": "536bb4c0703ef50be4afc2f2f6efddda5d2533ce", "size": 301, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "theorems/stash/modalities/Reflective.agda", "max_stars_repo_name": "timjb/HoTT-Agda", "max_stars_repo_head_hexsha": "66f800adef943afdf08c17b8ecfba67340fead5e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 294, "max_stars_repo_stars_event_min_datetime": "2015-01-09T16:23:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T13:54:45.000Z", "max_issues_repo_path": "theorems/stash/modalities/Reflective.agda", "max_issues_repo_name": "timjb/HoTT-Agda", "max_issues_repo_head_hexsha": "66f800adef943afdf08c17b8ecfba67340fead5e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 31, "max_issues_repo_issues_event_min_datetime": "2015-03-05T20:09:00.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-03T19:15:25.000Z", "max_forks_repo_path": "theorems/stash/modalities/Reflective.agda", "max_forks_repo_name": "timjb/HoTT-Agda", "max_forks_repo_head_hexsha": "66f800adef943afdf08c17b8ecfba67340fead5e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 50, "max_forks_repo_forks_event_min_datetime": "2015-01-10T01:48:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-14T03:03:25.000Z", "avg_line_length": 17.7058823529, "max_line_length": 57, "alphanum_fraction": 0.5282392027, "num_tokens": 107}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.