text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import unittest
import numpy as np
from pydrake.autodiffutils import (InitializeAutoDiff, AutoDiffXd,
ExtractGradient)
from .normalization_derivatives import calc_normalization_derivatives
class TestNormalizationDerivatives(unittest.TestCase):
def test_normalization_derivatives(self):
q = np.array([1, 2, 3, 4], dtype=float)
q *= 0.99 / np.linalg.norm(q)
D = calc_normalization_derivatives(q)
q_ad = InitializeAutoDiff(q)
q_bar_ad = q_ad / np.linalg.norm(q_ad)
D_ad = ExtractGradient(q_bar_ad)
self.assertTrue(np.allclose(D, D_ad))
|
{"hexsha": "6f44b9010e5ed69f76152799ed9bf74fae2ef77f", "size": 635, "ext": "py", "lang": "Python", "max_stars_repo_path": "qsim/test_normalization_derivatives.py", "max_stars_repo_name": "pangtao22/quasistatic_simulator", "max_stars_repo_head_hexsha": "7c6f99cc7237dd922f6eb0b54c580303e86b5223", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-07-15T03:58:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-23T17:26:16.000Z", "max_issues_repo_path": "qsim/test_normalization_derivatives.py", "max_issues_repo_name": "pangtao22/quasistatic_simulator", "max_issues_repo_head_hexsha": "7c6f99cc7237dd922f6eb0b54c580303e86b5223", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-08-16T22:27:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-07T18:06:07.000Z", "max_forks_repo_path": "qsim/test_normalization_derivatives.py", "max_forks_repo_name": "pangtao22/quasistatic_simulator", "max_forks_repo_head_hexsha": "7c6f99cc7237dd922f6eb0b54c580303e86b5223", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8636363636, "max_line_length": 69, "alphanum_fraction": 0.6787401575, "include": true, "reason": "import numpy", "num_tokens": 148}
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import relay
from tvm.relay.ir_pass import free_vars, free_type_vars, gradient
from tvm.relay import create_executor
from tvm.relay.prelude import Prelude
from tvm.relay.testing import add_nat_definitions, make_nat_expr
import numpy as np
def rand(dtype='float32', *shape):
return tvm.nd.array(np.random.rand(*shape).astype(dtype))
def test_id():
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x)
back_func = relay.ir_pass.infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
ex = create_executor()
x = rand(dtype, *shape)
forward, (grad,) = ex.evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.asnumpy(), x.asnumpy())
tvm.testing.assert_allclose(grad.asnumpy(), np.ones_like(x.asnumpy()))
def test_add():
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x + x)
back_func = relay.ir_pass.infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
ex = create_executor()
x = rand(dtype, *shape)
forward, (grad,) = ex.evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.asnumpy(), 2 * x.asnumpy())
tvm.testing.assert_allclose(grad.asnumpy(), 2 * np.ones_like(x.asnumpy()))
def test_temp_add():
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = x + x
func = relay.Function([x], y + y)
back_func = relay.ir_pass.infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
ex = create_executor()
x = rand(dtype, *shape)
forward, (grad,) = ex.evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.asnumpy(), 4 * x.asnumpy())
tvm.testing.assert_allclose(grad.asnumpy(), 4 * np.ones_like(x.asnumpy()))
def test_sub():
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x - x)
back_func = relay.ir_pass.infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
ex = create_executor()
x = rand(dtype, *shape)
forward, (grad,) = ex.evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.asnumpy(), np.zeros_like(x.asnumpy()))
tvm.testing.assert_allclose(grad.asnumpy(), np.zeros_like(x.asnumpy()))
def test_broadcast_add():
shape1 = (3, 4, 1)
shape2 = (1, 5)
dtype = 'float32'
x_nd = rand(dtype, *shape1)
y_nd = rand(dtype, *shape2)
x_np = x_nd.asnumpy()
y_np = y_nd.asnumpy()
expected_forward = x_np + y_np
t1 = relay.TensorType(shape1, dtype)
t2 = relay.TensorType(shape2, dtype)
x = relay.var("x", t1)
y = relay.var("y", t2)
func = relay.Function([x, y], x + y)
full_func = relay.ir_pass.infer_type(gradient(func))
assert full_func.checked_type == relay.FuncType([t1, t2],
relay.TupleType([relay.TensorType(expected_forward.shape, dtype),
relay.TupleType([t1, t2])]))
ex = create_executor()
forward, (grad_x, grad_y) = ex.evaluate(full_func)(x_nd, y_nd)
tvm.testing.assert_allclose(forward.asnumpy(), expected_forward)
tvm.testing.assert_allclose(grad_x.asnumpy(),
np.ones_like(expected_forward).sum(axis=2, keepdims=True))
tvm.testing.assert_allclose(grad_y.asnumpy(),
np.ones_like(expected_forward).sum(axis=(0, 1), keepdims=True).squeeze(axis=0))
def test_broadcast_subtract():
shape1 = (3, 4, 1)
shape2 = (1, 5)
dtype = 'float32'
x_nd = rand(dtype, *shape1)
y_nd = rand(dtype, *shape2)
x_np = x_nd.asnumpy()
y_np = y_nd.asnumpy()
expected_forward = x_np - y_np
t1 = relay.TensorType(shape1, dtype)
t2 = relay.TensorType(shape2, dtype)
x = relay.var("x", t1)
y = relay.var("y", t2)
func = relay.Function([x, y], x - y)
full_func = relay.ir_pass.infer_type(gradient(func))
assert full_func.checked_type == relay.FuncType([t1, t2],
relay.TupleType([relay.TensorType(expected_forward.shape, dtype),
relay.TupleType([t1, t2])]))
ex = create_executor()
forward, (grad_x, grad_y) = ex.evaluate(full_func)(x_nd, y_nd)
tvm.testing.assert_allclose(forward.asnumpy(), expected_forward)
tvm.testing.assert_allclose(grad_x.asnumpy(),
np.ones_like(expected_forward).sum(axis=2, keepdims=True))
tvm.testing.assert_allclose(grad_y.asnumpy(),
-np.ones_like(expected_forward).sum(axis=(0, 1), keepdims=True).squeeze(axis=0))
def test_tuple():
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.var("y", t)
z = relay.var("z", t)
tup = relay.Var("tup")
func = relay.Function([x, y, z], relay.Let(tup, relay.Tuple([x, y, z]),
relay.TupleGetItem(tup, 0) +
relay.TupleGetItem(tup, 1) -
relay.TupleGetItem(tup, 2)))
back_func = relay.ir_pass.infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t, t, t], relay.TupleType([t, relay.TupleType([t, t, t])]))
x_nd = rand(dtype, *shape)
y_nd = rand(dtype, *shape)
z_nd = rand(dtype, *shape)
x_np = x_nd.asnumpy()
y_np = y_nd.asnumpy()
z_np = z_nd.asnumpy()
expected_forward = x_np + y_np - z_np
ex = create_executor()
forward, (grad_x, grad_y, grad_z) = ex.evaluate(back_func)(x_nd, y_nd, z_nd)
tvm.testing.assert_allclose(forward.asnumpy(), expected_forward)
tvm.testing.assert_allclose(grad_x.asnumpy(), np.ones_like(grad_x.asnumpy()))
tvm.testing.assert_allclose(grad_y.asnumpy(), np.ones_like(grad_y.asnumpy()))
tvm.testing.assert_allclose(grad_z.asnumpy(), -1 * np.ones_like(grad_z.asnumpy()))
def test_pow():
mod = relay.Module()
p = Prelude(mod)
add_nat_definitions(p)
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
double = relay.Function([x], x + x)
i = relay.var("i", t)
func = relay.Function([i], p.nat_iterate(double, make_nat_expr(p, 3))(i))
back_func = relay.ir_pass.infer_type(gradient(func, mod=mod), mod=mod)
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
i_nd = rand(dtype, *shape)
ex = create_executor(mod=mod)
forward, (grad_i,) = ex.evaluate(back_func)(i_nd)
tvm.testing.assert_allclose(forward.asnumpy(), 8 * i_nd.asnumpy())
tvm.testing.assert_allclose(grad_i.asnumpy(), 8 * np.ones_like(grad_i.asnumpy()))
def test_ref():
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
r = relay.Var("r")
u = relay.Var("u")
body = relay.RefRead(r)
body = relay.Let(u, relay.RefWrite(r, relay.RefRead(r) + relay.RefRead(r)), body)
body = relay.Let(r, relay.RefCreate(x), body)
func = relay.Function([x], body)
back_func = relay.ir_pass.infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x_nd = rand(dtype, *shape)
ex = create_executor()
forward, (grad_x,) = ex.evaluate(back_func)(x_nd)
tvm.testing.assert_allclose(forward.asnumpy(), 2 * x_nd.asnumpy())
tvm.testing.assert_allclose(grad_x.asnumpy(), 2 * np.ones_like(grad_x.asnumpy()))
def test_square_second_order():
shape = (10, 10)
dtype = 'float32'
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x * x)
back_func = relay.ir_pass.infer_type(gradient(func))
y = relay.var("y", t)
back_func_adjusted = relay.Function([y], relay.TupleGetItem(relay.TupleGetItem(back_func(y), 1), 0))
back_func_adjusted = relay.ir_pass.infer_type(back_func_adjusted)
back_back_func = relay.ir_pass.infer_type(gradient(back_func_adjusted))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x_nd = rand(dtype, *shape)
ex = create_executor()
forward, (grad_x,) = ex.evaluate(back_back_func)(x_nd)
tvm.testing.assert_allclose(forward.asnumpy(), 2 * x_nd.asnumpy())
tvm.testing.assert_allclose(grad_x.asnumpy(), 2 * np.ones_like(grad_x.asnumpy()))
if __name__ == "__main__":
test_id()
test_add()
test_temp_add()
test_sub()
test_broadcast_add()
test_broadcast_subtract()
test_tuple()
test_pow()
test_ref()
test_square_second_order()
|
{"hexsha": "d99bee58b99bbe350ee8d7e516301f4750753537", "size": 9932, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/python/relay/test_pass_gradient.py", "max_stars_repo_name": "ttyang1018/tvm", "max_stars_repo_head_hexsha": "ade26cacd0767cf14dc053ac4d7778859f83a32c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2019-11-20T03:43:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-25T02:32:17.000Z", "max_issues_repo_path": "tests/python/relay/test_pass_gradient.py", "max_issues_repo_name": "ttyang1018/tvm", "max_issues_repo_head_hexsha": "ade26cacd0767cf14dc053ac4d7778859f83a32c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-06-27T08:05:18.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-09T18:59:11.000Z", "max_forks_repo_path": "tests/python/relay/test_pass_gradient.py", "max_forks_repo_name": "ttyang1018/tvm", "max_forks_repo_head_hexsha": "ade26cacd0767cf14dc053ac4d7778859f83a32c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-03-18T10:14:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-01T12:09:20.000Z", "avg_line_length": 40.5387755102, "max_line_length": 117, "alphanum_fraction": 0.6415626259, "include": true, "reason": "import numpy", "num_tokens": 2681}
|
import arviz as az
import numpy as np
import os
import pystan
import matplotlib.pyplot as plt
from lib.stan_utils import compile_model, get_pickle_filename, get_model_code
from lib.drug_classes import DRUG_CLASSES
from prepare_data import get_formatted_data, add_rank_column, aggregate_treatment_arms, get_variability_effect_sizes
# set path to stan model files
dir_name = os.path.dirname(os.path.abspath(__file__))
parent_dir_name = os.path.dirname(dir_name)
stan_model_path = os.path.join(dir_name, 'stan_models')
def get_data_dict(df, effect_statistic):
return {
'N': len(df.study_id.unique()),
'Y': df.groupby(['study_id']).agg({effect_statistic: 'first'}).reset_index()[effect_statistic].values,
'Y_meas': df.groupby(['study_id']).agg({effect_statistic: 'first'}).reset_index()[effect_statistic].values,
'X_meas': df.groupby(['study_id']).agg({'lnRR': 'first'}).reset_index()['lnRR'].values,
'SD_Y': np.sqrt(df.groupby(['study_id']).agg(
{f'var_{effect_statistic}': 'first'}).reset_index()[f'var_{effect_statistic}'].values),
'SD_X': np.sqrt(df.groupby(['study_id']).agg(
{'var_lnRR': 'first'}).reset_index()['var_lnRR'].values),
'run_estimation': 1
}
def get_subgroup_models():
df = get_formatted_data()
# drug class subgroup analysis
model_res_dict = {}
for drug_class in DRUG_CLASSES:
study_ids = df.query(f'drug_class == "{drug_class}"').study_id.unique()
df_sub = df[(df.study_id.isin(study_ids)) & (df.drug_class.isin([drug_class, 'placebo']))].copy()
placebo_controlled_study_ids = set(df_sub.query('is_active == 1')['study_id']) \
.intersection(df_sub.query('is_active == 0')['study_id'])
df_sub = df_sub[df_sub.study_id.isin(placebo_controlled_study_ids)]
for column in ['study_id', 'scale', 'drug_class']:
df_sub = add_rank_column(df_sub, column)
df_sub = aggregate_treatment_arms(df_sub)
df_sub = get_variability_effect_sizes(df_sub)
model = 'remr'
stan_model = compile_model(
os.path.join(stan_model_path, f'{model}.stan'),
model_name=model
)
data_dict = get_data_dict(df_sub, 'lnVR')
fit = stan_model.sampling(
data=data_dict,
iter=4000,
warmup=1000,
chains=3,
control={'adapt_delta': 0.99},
check_hmc_diagnostics=True,
seed=1
)
pystan.check_hmc_diagnostics(fit)
data = az.from_pystan(
posterior=fit,
posterior_predictive=['Y_pred'],
observed_data=['Y_meas', 'X_meas'],
log_likelihood='log_lik',
)
model_res_dict[drug_class] = data
return model_res_dict
def plot_model_comparison_CIs(model_res_dict):
fig, ax = plt.subplots(nrows=1)
datasets = [
az.convert_to_dataset(
{drug_class: np.exp(model_res_dict[drug_class].posterior.mu.values)}
) for drug_class in DRUG_CLASSES
]
_ = az.plot_forest(
datasets,
combined=True,
credible_interval=0.95,
quartiles=True,
colors='black',
var_names=DRUG_CLASSES,
model_names=['', '', '', ''],
ax=ax
)
ax.set_title('95% HDI $e^\\mu$')
plt.tight_layout()
plt.savefig(os.path.join(parent_dir_name, f'output/hdi_drug_class_comparison.tiff'), format='tiff', dpi=500,
bbox_inches="tight")
return plt
|
{"hexsha": "a14eedae83a38337c5f84825e067d50ff4ad9f4f", "size": 3532, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/subgroup_analysis.py", "max_stars_repo_name": "volkale/advr", "max_stars_repo_head_hexsha": "f817ce31c50a5bb976eb29bffe9832e2aeb6f7c5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/subgroup_analysis.py", "max_issues_repo_name": "volkale/advr", "max_issues_repo_head_hexsha": "f817ce31c50a5bb976eb29bffe9832e2aeb6f7c5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/subgroup_analysis.py", "max_forks_repo_name": "volkale/advr", "max_forks_repo_head_hexsha": "f817ce31c50a5bb976eb29bffe9832e2aeb6f7c5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-02-25T08:25:12.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-18T18:13:01.000Z", "avg_line_length": 34.6274509804, "max_line_length": 116, "alphanum_fraction": 0.6370328426, "include": true, "reason": "import numpy", "num_tokens": 871}
|
# This solution was build using python 2.7
# Author: Tomas F. Venegas Bernal tf.venegas10@uniandes.edu.co
import random
import numpy as np
# This is a simple class that builds the Board that will be shown to the user.
class Board:
def __init__(self, height, width):
self.matrix = np.chararray((height, width))
self.matrix[:] = "."
# This method allows us to pass from the matrix to the console representation
def toString(self):
sttr = " "
for i in xrange(width):
sttr += " " + str(i) + " "
if i < 10:
sttr += " "
sttr += "\n\n"
for i in xrange(len(self.matrix)):
sttr += str(i) + " |"
for j in range(len(self.matrix[0])):
sttr += " " + self.matrix[i][j] + " "
sttr += "|\n"
return sttr
# This function is charged of building the game: the initial board with the specified dimensions
# and the random mines on it.
def buildGame(height, width, nMines):
# real board - with hidden values
realBoard = np.zeros((height, width))
# shown board - with specified caracters
shownBoard = Board(height, width)
tempMines = []
i, j = 0, 0
# First of all we are going to build randomly the set of nMines mines as a set of coordinates
while len(tempMines) != nMines:
i = random.randint(0, height - 1)
j = random.randint(0, width - 1)
if (i, j) not in tempMines:
tempMines.append((i, j))
# Now we are going to go through the set modifing only the boxes with the coordinates of a mine
# we'll set the value to -1 and we'll add 1 to each adjacent box
# we won't check if it already is a mine beacause we will reset all the mines to -1 at the end (to optimize)
for (i, j) in tempMines:
# we will set this at the end realBoard[i][j]=-1
# modify lower boxes
if i + 1 < height:
realBoard[i + 1][j] += 1
if j + 1 < width:
realBoard[i + 1][j + 1] += 1
if j - 1 >= 0:
realBoard[i + 1][j - 1] += 1
# modify upper boxes
if i - 1 >= 0:
realBoard[i - 1][j] += 1
if j + 1 < width:
realBoard[i - 1][j + 1] += 1
if j - 1 >= 0:
realBoard[i - 1][j - 1] += 1
# modify the right box
if j + 1 < width:
realBoard[i][j + 1] += 1
# modify the left box
if j - 1 >= 0:
realBoard[i][j - 1] += 1
for (i, j) in tempMines:
realBoard[i][j] = -1
return realBoard, shownBoard, tempMines
# This function defines the procedure to follow when the user decides to uncover cell (i,j)
# (it's a recursive function)
def uncover(i, j, realBoard, shownBoard, tempMines):
if i < 0 or j < 0 or i >= len(realBoard) or j >= len(realBoard[0]) or shownBoard.matrix[i][j] not in [".", "P"]:
return False
if realBoard[i][j] == -1:
for (l, c) in tempMines:
shownBoard.matrix[l][c] = "*"
return True
if realBoard[i][j] > 0:
shownBoard.matrix[i][j] = str(realBoard[i][j])
return False
else:
shownBoard.matrix[i][j] = "-"
# recursive calls
uncover(i + 1, j, realBoard, shownBoard, tempMines)
uncover(i + 1, j - 1, realBoard, shownBoard, tempMines)
uncover(i + 1, j + 1, realBoard, shownBoard, tempMines)
uncover(i - 1, j, realBoard, shownBoard, tempMines)
uncover(i - 1, j - 1, realBoard, shownBoard, tempMines)
uncover(i - 1, j + 1, realBoard, shownBoard, tempMines)
uncover(i, j + 1, realBoard, shownBoard, tempMines)
uncover(i, j - 1, realBoard, shownBoard, tempMines)
return False
#This method is charged of doing the two possible actions
def doAction(line, column, action, realBoard, shownBoard, missingMines, setMines, tempMines, nMines):
#Mark case
if action == "M":
if shownBoard.matrix[line][column] == "P":
shownBoard.matrix[line][column] = "."
if realBoard[line][column] == -1:
missingMines += 1
setMines -= 1
elif shownBoard.matrix[line][column] == ".":
shownBoard.matrix[line][column] = "P"
if realBoard[line][column] == -1:
missingMines -= 1
setMines += 1
if missingMines == 0 and setMines == nMines:
return (True, True, missingMines, setMines)
#Uncover case
else:
return (uncover(line, column, realBoard, shownBoard, tempMines), False, missingMines, setMines)
return (False, False, missingMines, setMines)
if __name__ == '__main__':
# variable to make the game re-playable
again = True
while (again):
# Initialize with impossible values
height, width, nMines = -1, -1, -1
# loop to read the initial game values
while height <= 0 or width <= 0 or nMines <= 0 or nMines > height * width:
print("We are going to play Minesweeper.")
# Request initial input
print(
"Please enter the board s height, width, and number of mines you want separated by spaces (ex:'10 20 10')")
# read input
line = raw_input()
tempArr = line.split(" ")
# constants instantiation
try:
height, width, nMines = int(tempArr[0]), int(tempArr[1]), int(tempArr[2])
except:
pass
# game crated
realBoard, shownBoard, tempMines = buildGame(height, width, nMines)
# These global parameters need to be defined!
# boolean for when to stop
gameOver = False
# boolean for when the player was victorious
victory = False
# number of missing mines to be found by the player (to win the game)
# a mine is considered found when the player places a flag on it.
missingMines = nMines
# number of flags set by the user
setMines = 0
while not gameOver:
# print board
print(shownBoard.toString())
line, column, action = -1, -1, "O"
# variable to evaluate if the user made a mistake on the input
entries = 0
# read input loop
while line < 0 or line >= height or column < 0 or column >= width or action not in ["U", "M"]:
if entries > 0:
print(
"You entered an INCORRECT value, this game is not that sophisticated please follow the guideline.")
entries += 1
print(
"Please enter the cell you want to modify separated by spaces in the form <line> <column> <action>")
print("The possible actions are: M : mark or un-mark a cell, U : uncover a cell marked as '.' ")
inp = raw_input()
try:
tempArr = inp.split(" ")
line, column, action = int(tempArr[0]), int(tempArr[1]), tempArr[2]
except:
pass
gameOver, victory, missingMines, setMines = doAction(line, column, action, realBoard, shownBoard,
missingMines, setMines, tempMines, nMines)
if victory:
print(shownBoard.toString())
print("Congratulations! You have won :)")
else:
print(shownBoard.toString())
print("Sorry! You have lost. Better chance next time.")
print("Do you want to play again?")
print("Mark 'YES' or 'yes' or 'y' or 'Y' for YES.")
print("Other input for NO")
inp = raw_input()
try:
if inp not in ["yes", "YES", "Y", "y"]:
again = False
except:
again = False
|
{"hexsha": "6534e0a1ec1c980ecdeb1ef9cd4ce1220f48ae03", "size": 7932, "ext": "py", "lang": "Python", "max_stars_repo_path": "Minesweeper.py", "max_stars_repo_name": "tf-venegas10/Minesweeper", "max_stars_repo_head_hexsha": "dfb41284f5bb2469d101c4b05abe5d6897bf69f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Minesweeper.py", "max_issues_repo_name": "tf-venegas10/Minesweeper", "max_issues_repo_head_hexsha": "dfb41284f5bb2469d101c4b05abe5d6897bf69f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Minesweeper.py", "max_forks_repo_name": "tf-venegas10/Minesweeper", "max_forks_repo_head_hexsha": "dfb41284f5bb2469d101c4b05abe5d6897bf69f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.7714285714, "max_line_length": 123, "alphanum_fraction": 0.54702471, "include": true, "reason": "import numpy", "num_tokens": 2081}
|
using GeometricAlgebra
using DataStructures
using MacroTools: postwalk, prewalk, @capture
import BenchmarkTools
import Grassmann
include("printing.jl")
include("utils.jl")
function run_benchmark(name, expr::Expr, level)
indent_level = level * 2
result = @eval @localbenchmark $expr
replprint(string(minimum(result)), newline=1, prefix=name * ": "; indent_level)
end
function run_benchmark(name, group, level)
indent_level = level * 2
replprint(name; indent_level, newline=1, bold=true, color=color_levels[level])
for (k, v) ∈ group
run_benchmark(k, v, level + 1)
end
end
function run_benchmarks(suite)
replprint("Benchmarking geometric algebras", newline=1, color=:yellow)
ga_suite = prepare_suite!(deepcopy(suite))
grassmann_suite = prepare_suite!(deepcopy(suite); grassmann=true)
run_benchmark("GeometricAlgebra", ga_suite, 1)
run_benchmark("Grassmann", grassmann_suite, 1)
end
function prepare_suite!(suite; grassmann=false)
for (k, v) ∈ suite
if v isa Expr
if grassmann
suite[k] = postwalk(suite[k]) do x
if is_blade_symbol(x)
Symbol(replace(string(x), r"^v" => "g"))
else
x
end
end
end
else
prepare_suite!(v; grassmann)
end
end
suite
end
is_blade_symbol(x) = x isa Symbol && startswith(string(x), r"v\d+")
make_suite(base) = make_suite!(OrderedDict(), base)
function make_suite!(suite, base)
res = DefaultOrderedDict(() -> OrderedDict())
for (k, v) ∈ base
if v isa Vector{Expr}
res[k] = OrderedDict(string.(v) .=> v)
else
res[k] = make_suite(v)
end
end
res
end
color_levels = [:red, :cyan]
@basis "+++"
Grassmann.@basis "+++" G g
suite_base = DefaultOrderedDict(() -> OrderedDict())
suite_base["Geometric product"] = [
:(5v1 * 5v2),
]
suite_base["Addition"] = [
:(5v1 + 5v2),
]
suite_base["Mixed"]= [
:((5v1 + 3v3 + 1v12) * 5v2),
]
suite = make_suite(suite_base)
run_benchmarks(suite)
|
{"hexsha": "faa76f1b0c10718dfcf6849b923cedcd18137440", "size": 2149, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "benchmarks/benchmarks.jl", "max_stars_repo_name": "serenity4/ConformalGeometry.jl", "max_stars_repo_head_hexsha": "70ec88954d0d0acfb9d480e17a45593305333c04", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-05-07T14:33:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T21:07:36.000Z", "max_issues_repo_path": "benchmarks/benchmarks.jl", "max_issues_repo_name": "serenity4/GeometricAlgebra.jl", "max_issues_repo_head_hexsha": "4489ec9e3e37541f891f29c3ee69d9295d5cdeac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "benchmarks/benchmarks.jl", "max_forks_repo_name": "serenity4/GeometricAlgebra.jl", "max_forks_repo_head_hexsha": "4489ec9e3e37541f891f29c3ee69d9295d5cdeac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.4204545455, "max_line_length": 83, "alphanum_fraction": 0.6105165193, "num_tokens": 584}
|
{-# OPTIONS --no-universe-polymorphism #-}
open import Data.Product hiding (map)
open import Relation.Binary.Core
open import Function
open import Data.List
open import Data.Unit using (⊤)
open import Data.Empty
open import Equivalence
module BagEquality where
infixr 5 _⊕_
data _⊕_ (A B : Set) : Set where
inj₁ : A → A ⊕ B
inj₂ : B → A ⊕ B
uninhabited : {A : Set} → (A → ⊥) → A ↔ ⊥
uninhabited pf = record {to = pf;
from = ⊥-elim;
from-to = λ a → ⊥-elim (pf a);
to-from = λ false → ⊥-elim false}
Any : {A : Set} → (A → Set) → List A → Set
Any P [] = ⊥
Any P (x ∷ xs) = P x ⊕ Any P xs
infix 6 _∈_
_∈_ : {A : Set} → A → List A → Set
z ∈ xs = Any (λ y → z ≡ y) xs
infixr 4 _≈_
_≈_ : {A : Set} → List A → List A → Set
xs ≈ ys = ∀ z → z ∈ xs ↔ z ∈ ys
infix 10 _>>=_
_>>=_ : {A B : Set} → List A → (A → List B) → List B
xs >>= f = concat (map f xs)
⊕-left-identity : {A : Set} → ⊥ ⊕ A ↔ A
⊕-left-identity {A} = record { to = to' ;
from = inj₂ ;
from-to = from-to' ;
to-from = λ a → refl } where
to' : ⊥ ⊕ A → A
to' (inj₁ b) = ⊥-elim b
to' (inj₂ a) = a
from-to' : (s : ⊥ ⊕ A) → inj₂ (to' s) ≡ s
from-to' (inj₁ b) = ⊥-elim b
from-to' (inj₂ a) = refl
⊕-assoc : {A B C : Set} → A ⊕ (B ⊕ C) ↔ (A ⊕ B) ⊕ C
⊕-assoc {A} {B} {C} = record {to = to' ;
from = from' ;
from-to = from-to' ;
to-from = to-from' } where
to' : A ⊕ (B ⊕ C) → (A ⊕ B) ⊕ C
to' (inj₁ a) = inj₁ (inj₁ a)
to' (inj₂ (inj₁ b)) = inj₁ (inj₂ b)
to' (inj₂ (inj₂ c)) = inj₂ c
from' : (A ⊕ B) ⊕ C → A ⊕ (B ⊕ C)
from' (inj₁ (inj₁ a)) = inj₁ a
from' (inj₁ (inj₂ b)) = inj₂ (inj₁ b)
from' (inj₂ c) = inj₂ (inj₂ c)
from-to' : (x : A ⊕ (B ⊕ C)) → from' (to' x) ≡ x
from-to' (inj₁ _) = refl
from-to' (inj₂ (inj₁ _)) = refl
from-to' (inj₂ (inj₂ _)) = refl
to-from' : (y : (A ⊕ B) ⊕ C) → to' (from' y) ≡ y
to-from' (inj₁ (inj₁ _)) = refl
to-from' (inj₁ (inj₂ _)) = refl
to-from' (inj₂ _) = refl
⊕-comm : {A B : Set} → A ⊕ B ↔ B ⊕ A
⊕-comm {A} {B} = record {to = to' ;
from = from' ;
from-to = from-to' ;
to-from = to-from' } where
to' : A ⊕ B → B ⊕ A
to' (inj₁ a) = inj₂ a
to' (inj₂ b) = inj₁ b
from' : B ⊕ A → A ⊕ B
from' (inj₁ b) = inj₂ b
from' (inj₂ a) = inj₁ a
from-to' : (x : A ⊕ B) → from' (to' x) ≡ x
from-to' (inj₁ _) = refl
from-to' (inj₂ _) = refl
to-from' : (x : B ⊕ A) → to' (from' x) ≡ x
to-from' (inj₁ _) = refl
to-from' (inj₂ _) = refl
⊕-cong : {A₁ A₂ B₁ B₂ : Set} → A₁ ↔ A₂ → B₁ ↔ B₂ → A₁ ⊕ B₁ ↔ A₂ ⊕ B₂
⊕-cong {A₁} {A₂} {B₁} {B₂} A↔ B↔ = record { to = to';
from = from';
from-to = from-to';
to-from = to-from' } where
to' : A₁ ⊕ B₁ → A₂ ⊕ B₂
to' (inj₁ a) = inj₁ (_↔_.to A↔ a)
to' (inj₂ b) = inj₂ (_↔_.to B↔ b)
from' : A₂ ⊕ B₂ → A₁ ⊕ B₁
from' (inj₁ a) = inj₁ (_↔_.from A↔ a)
from' (inj₂ b) = inj₂ (_↔_.from B↔ b)
from-to' : (x : A₁ ⊕ B₁) → from' (to' x) ≡ x
from-to' (inj₁ a) = _↔_.from-to A↔ a under inj₁
from-to' (inj₂ b) = _↔_.from-to B↔ b under inj₂
to-from' : (x : A₂ ⊕ B₂) → to' (from' x) ≡ x
to-from' (inj₁ a) = _↔_.to-from A↔ a under inj₁
to-from' (inj₂ b) = _↔_.to-from B↔ b under inj₂
×-cong : {A₁ A₂ B₁ B₂ : Set} → A₁ ↔ A₂ → B₁ ↔ B₂ → (A₁ × B₁) ↔ (A₂ × B₂)
×-cong {A₁} {A₂} {B₁} {B₂} A↔ B↔ = record { to = to';
from = from';
from-to = from-to';
to-from = to-from' } where
to' : A₁ × B₁ → A₂ × B₂
to' (a , b) = (_↔_.to A↔ a , _↔_.to B↔ b)
from' : A₂ × B₂ → A₁ × B₁
from' (a , b) = (_↔_.from A↔ a , _↔_.from B↔ b)
pairEq : {A B : Set} {x y : A} {u v : B} → x ≡ y → u ≡ v → (x , u) ≡ (y , v)
pairEq refl refl = refl
from-to' : (x : A₁ × B₁) → from' (to' x) ≡ x
from-to' (a , b) = pairEq (_↔_.from-to A↔ a) (_↔_.from-to B↔ b)
to-from' : (y : A₂ × B₂) → to' (from' y) ≡ y
to-from' (a , b) = pairEq (_↔_.to-from A↔ a) (_↔_.to-from B↔ b)
Any-++ : {A : Set} (P : A → Set) (xs ys : List A) → Any P (xs ++ ys) ↔ Any P xs ⊕ Any P ys
Any-++ P [] ys = Any P ys ↔⟨ ↔sym ⊕-left-identity ⟩
⊥ ⊕ Any P ys □↔
Any-++ P (x ∷ xs) ys = P x ⊕ Any P (xs ++ ys) ↔⟨ ⊕-cong (P x □↔) (Any-++ P xs ys) ⟩
P x ⊕ (Any P xs ⊕ Any P ys) ↔⟨ ⊕-assoc ⟩
(P x ⊕ Any P xs) ⊕ Any P ys □↔
-- ++-assoc : {A : Set} (xs ys zs : List A) → ((xs ++ ys) ++ zs) ≡ (xs ++ (ys ++ zs))
++-comm : {A : Set} (xs ys : List A) → xs ++ ys ≈ ys ++ xs
++-comm xs ys z = z ∈ (xs ++ ys) ↔⟨ Any-++ (z ≡_ ) xs ys ⟩
z ∈ xs ⊕ z ∈ ys ↔⟨ ⊕-comm ⟩
z ∈ ys ⊕ z ∈ xs ↔⟨ ↔sym (Any-++ (z ≡_ ) ys xs ) ⟩
z ∈ (ys ++ xs) □↔
Any-concat : {A : Set} (P : A → Set) → (xss : List (List A)) → Any P (concat xss) ↔ Any (Any P) xss
Any-concat P [] = ⊥ □↔
Any-concat P (xs ∷ xss) = Any P (xs ++ concat xss) ↔⟨ Any-++ P xs (concat xss) ⟩
Any P xs ⊕ Any P (concat xss) ↔⟨ ⊕-cong (Any P xs □↔) (Any-concat P xss) ⟩
Any P xs ⊕ Any (Any P) xss □↔
Any-map : {A B : Set} (P : B → Set) → (f : A → B) → (xs : List A) → Any P (map f xs) ↔ Any (P ∘ f) xs
Any-map P f [] = ⊥ □↔
Any-map P f (x ∷ xs) = P (f x) ⊕ Any P (map f xs) ↔⟨ ⊕-cong (P (f x) □↔) (Any-map P f xs) ⟩
(P ∘ f) x ⊕ Any (P ∘ f) xs □↔
Any->>= : {A B : Set} (P : B → Set) → (xs : List A) → (f : A → List B) → Any P (xs >>= f) ↔ Any (Any P ∘ f) xs
Any->>= P xs f = Any P (concat (map f xs)) ↔⟨ Any-concat P (map f xs) ⟩
Any (Any P) (map f xs) ↔⟨ Any-map (Any P) f xs ⟩
Any (Any P ∘ f) xs □↔
Any-∈ : {A : Set} {P : A → Set} {xs : List A} → Any P xs ↔ (∃ λ z → P z × z ∈ xs)
Any-∈ {A} {P} {[]} = ⊥ ↔⟨ (↔sym ∘ uninhabited) (⊥-elim ∘ proj₂ ∘ proj₂) ⟩
(∃ λ z → P z × z ∈ []) □↔
Any-∈ {A} {P} {x ∷ xs} = record {to = to' ;
from = from' ;
from-to = from-to' ;
to-from = to-from' } where
to' : P x ⊕ Any P xs → ∃ (λ z → P z × z ∈ (x ∷ xs))
to' (inj₁ px) = x , (px , (inj₁ refl))
to' (inj₂ any) = (proj₁ recEx) , ( (proj₁ ∘ proj₂) recEx , (inj₂ (proj₂ (proj₂ recEx))) ) where
recEx = _↔_.to Any-∈ any
from' : ∃ (λ z → P z × z ∈ (x ∷ xs)) → P x ⊕ Any P xs
from' ex = from'' (proj₁ ex) (proj₁ (proj₂ ex)) (proj₂ (proj₂ ex)) where
from'' : (z : A) → P z → z ∈ (x ∷ xs) → P x ⊕ Any P xs
from'' .x px (inj₁ refl) = inj₁ px
from'' z pz (inj₂ z∈xs) = inj₂ recAny where
recAny = _↔_.from (Any-∈ {P = P}) (z , (pz , z∈xs))
from-to' : (pf : P x ⊕ Any P xs) → from' (to' pf) ≡ pf
from-to' (inj₁ px) = refl
from-to' (inj₂ any) = _↔_.from-to Any-∈ any under inj₂
to-from' : (ex : ∃ (λ z → P z × z ∈ (x ∷ xs))) → to' (from' ex) ≡ ex
to-from' ex = to-from'' (proj₁ ex) (proj₁ (proj₂ ex)) (proj₂ (proj₂ ex)) where
to-from'' : (z : A) → (pz : P z) → (z∈xxs : z ∈ (x ∷ xs)) →
to' (from' ( z , ( pz , z∈xxs ))) ≡ ( z , ( pz , z∈xxs ))
to-from'' .x px (inj₁ refl) = refl
to-from'' z pz (inj₂ z∈xs) = _↔_.to-from (Any-∈ {P = P}) (z , ( pz , z∈xs ) )
under (λ ex → ((proj₁ ex) , ( (proj₁ ∘ proj₂) ex , (inj₂ (proj₂ (proj₂ ex))))))
∃-cong : {A : Set} {P Q : A → Set} → (∀ x → P x ↔ Q x) → (∃ λ x → P x) ↔ (∃ λ x → Q x)
∃-cong p = record {to = λ ex → proj₁ ex , _↔_.to (p (proj₁ ex)) (proj₂ ex);
from = λ ex → proj₁ ex , _↔_.from (p (proj₁ ex)) (proj₂ ex);
from-to = λ ex → _↔_.from-to (p (proj₁ ex)) (proj₂ ex) under λ pf → (proj₁ ex) , pf;
to-from = λ ex → _↔_.to-from (p (proj₁ ex)) (proj₂ ex) under λ pf → (proj₁ ex) , pf }
Any-cong : {A : Set} (P Q : A → Set) → (xs ys : List A) → (∀ x → P x ↔ Q x) → xs ≈ ys → Any P xs ↔ Any Q ys
Any-cong P Q xs ys p eq = Any P xs ↔⟨ Any-∈ ⟩
(∃ λ z → P z × z ∈ xs) ↔⟨ ∃-cong (λ x → ×-cong (p x) (eq x)) ⟩
(∃ λ z → Q z × z ∈ ys) ↔⟨ ↔sym Any-∈ ⟩
Any Q ys □↔
All-cong : {A : Set} {P Q : A → Set} → {xs ys : List A} → (∀ x → P x ↔ Q x) → xs ≈ ys →
(∀ x → x ∈ xs → P x) ⇔ (∀ y → y ∈ ys → Q y)
All-cong {P} {Q} {xs} {ys} p eq = record {to = λ P∀x y y∈ys → _↔_.to (p y) (P∀x y (_↔_.from (eq y) y∈ys));
from = λ Q∀y x x∈xs → _↔_.from (p x) (Q∀y x (_↔_.to (eq x) x∈xs))}
|
{"hexsha": "33b53bfd9c67ca6d5dd50499a1915729c52c1e81", "size": 12091, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "BagEquality.agda", "max_stars_repo_name": "NAMEhzj/Divide-and-Conquer-in-Agda", "max_stars_repo_head_hexsha": "99bd3a5e772563153d78f61c1bbca48d7809ff48", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "BagEquality.agda", "max_issues_repo_name": "NAMEhzj/Divide-and-Conquer-in-Agda", "max_issues_repo_head_hexsha": "99bd3a5e772563153d78f61c1bbca48d7809ff48", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BagEquality.agda", "max_forks_repo_name": "NAMEhzj/Divide-and-Conquer-in-Agda", "max_forks_repo_head_hexsha": "99bd3a5e772563153d78f61c1bbca48d7809ff48", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.1701244813, "max_line_length": 131, "alphanum_fraction": 0.3003887189, "num_tokens": 3931}
|
"""Lekhnitskii solutions to homogeneous anisotropic plates with loaded and unloaded holes
Notes
-----
This module uses the following acronyms
* CLPT: Classical Laminated Plate Theory
References
----------
.. [1] Esp, B. (2007). *Stress distribution and strength prediction of composite
laminates with multiple holes* (PhD thesis). Retrieved from
https://rc.library.uta.edu/uta-ir/bitstream/handle/10106/767/umi-uta-1969.pdf?sequence=1&isAllowed=y
.. [2] Lekhnitskii, S., Tsai, S., & Cheron, T. (1987). *Anisotropic plates* (2nd ed.).
New York: Gordon and Breach science.
.. [3] Garbo, S. and Ogonowski, J. (1981) *Effect of variances and manufacturing
tolerances on the design strength and life of mechanically fastened
composite joints* (Vol. 1,2,3). AFWAL-TR-81-3041.
.. [4] Waszczak, J.P. and Cruse T.A. (1973) *A synthesis procedure for mechanically
fastened joints in advanced composite materials* (Vol. II). AFML-TR-73-145.
"""
import logging
import abc
from collections.abc import Callable
from typing import Any
import numpy as np
import numpy.testing as nptest
from nptyping import NDArray
logger = logging.getLogger(__name__)
def rotate_stress(stresses: NDArray[3, np.float], angle: float = 0.) -> NDArray[3, np.float]:
r"""Rotates 2D stress components by given angle
The rotation angle is positive counter-clockwise from the positive x-axis in the cartesian xy-plane.
Parameters
----------
stresses : ndarray
array of [:math: `\sigma_x, \sigma_y, \tau_{xy}`] in-plane stresses
angle : float, default 0.
angle measured counter-clockwise from positive x-axis (radians)
Returns
-------
ndarray
2D array of [:math: `\sigma_x', \sigma_y', \tau_{xy}'`] rotated stresses
"""
c = np.cos(angle)
s = np.sin(angle)
rotation_matrix = np.array([
[c**2, s**2, 2*s*c],
[s**2, c**2, -2*s*c],
[-s*c, s*c, c**2-s**2]
])
stresses = rotation_matrix @ stresses.T
return stresses.T
def rotate_strain(strains: NDArray[3, np.float], angle: float = 0.) -> NDArray[3, float]:
r"""Rotates 2D strain components by given angle
The rotation angle is positive counter-clockwise from the positive x-axis in the cartesian xy-plane.
Parameters
----------
strains : ndarray
2D nx3 array of [:math: `\epsilon_x, \epsilon_y, \epsilon_{xy}`] in-plane strains
angle : float, default 0.
angle measured counter-clockwise from positive x-axis (radians)
Returns
-------
ndarray
2D nx3 array of [:math: `\epsilon_x', \epsilon_y', \epsilon_{xy}'`] rotated stresses
"""
c = np.cos(angle)
s = np.sin(angle)
rotation_matrix = np.array([
[c**2, s**2, s*c],
[s**2, c**2, -s*c],
[-2*s*c, 2*s*c, c**2 - s**2]
])
strains = rotation_matrix @ strains.T
return strains.T
def rotate_material_matrix(a_inv: NDArray[(3, 3), np.float], angle: float = 0.) -> NDArray[(3, 3), float]:
r"""Rotates the material compliance matrix by given angle
The rotation angle is positive counter-clockwise from the positive x-axis in the cartesian xy-plane.
Notes
-----
This function implements Eq. 9.6 [1]_
Parameters
----------
a_inv : ndarray
2D (3, 3) inverse CLPT A-matrix
angle : float, default 0.
angle measured counter-clockwise from positive x-axis (radians)
Returns
-------
ndarray
2D (3, 3) rotated compliance matrix
"""
c = np.cos(angle)
s = np.sin(angle)
a11 = a_inv[0, 0]
a12 = a_inv[0, 1]
a16 = a_inv[0, 2]
a22 = a_inv[1, 1]
a26 = a_inv[1, 2]
a66 = a_inv[2, 2]
a11p = a11*c**4 + (2*a12 + a66)*s**2*c**2 + a22*s**4 + (a16*c**2 + a26*s**2)*np.sin(2*angle)
a22p = a11*s**4 + (2*a12 + a66)*s**2*c**2 + a22*c**4 - (a16*s**2 + a26*c**2)*np.sin(2*angle)
a12p = a12 + (a11 + a22 - 2*a12 - a66)*s**2*c**2 + 0.5*(a26 - a16)*np.sin(2*angle)*np.cos(2*angle)
a66p = a66 + 4*(a11 + a22 - 2*a12 - a66)*s**2*c**2 + 2*(a26 - a16)*np.sin(2*angle)*np.cos(2*angle)
a16p = ((a22*s**2 - a11*c**2 + 0.5*(2*a12 + a66)*np.cos(2*angle))*np.sin(2*angle)
+ a16*c**2*(c**2 - 3*s**2) + a26*s**2*(3*c**2 - s**2))
a26p = ((a22*c**2 - a11*s**2 - 0.5*(2*a12 + a66)*np.cos(2*angle))*np.sin(2*angle)
+ a16*s**2*(3*c**2 - s**2) + a26*c**2*(c**2 - 3*s**2))
# test invariants (Eq. 9.7 [2]_)
nptest.assert_almost_equal(a11p + a22p + 2*a12p, a11 + a22 + 2*a12, decimal=4)
nptest.assert_almost_equal(a66p - 4*a12p, a66 - 4*a12, decimal=4)
return np.array([[a11p, a12p, a16p], [a12p, a22p, a26p], [a16p, a26p, a66p]])
def rotate_complex_parameters(mu1: complex, mu2: complex, angle: float = 0.) -> tuple[complex, complex]:
r"""Rotates the complex parameters by given angle
The rotation angle is positive counter-clockwise from the positive x-axis in the cartesian xy-plane.
Notes
-----
Implements Eq. 10.8 [2]_
Parameters
----------
mu1 : complex
first complex parameter
mu2 : complex
second complex parameter
angle : float, default 0.
angle measured counter-clockwise from positive x-axis (radians)
Returns
-------
mu1p, mu2p : complex
first and second transformed complex parameters
"""
c = np.cos(angle)
s = np.sin(angle)
mu1p = (mu1*c - s)/(c + mu1*s)
mu2p = (mu2*c - s)/(c + mu2*s)
return mu1p, mu2p
class Hole(abc.ABC):
"""Abstract parent class for defining a hole in an anisotropic infinite plate
This class defines shared methods and attributes for anisotropic elasticity solutions of plates with circular
holes.
This is an abstract class, do not instantiate this class.
Notes
-----
The following assumptions apply for plates in a state of generalized plane stress.
#. The plates are homogeneous and a plane of elastic symmetry which is parallel to their middle plane
exists at every point.
#. Applied forces act within planes that are parallel and symmetric to the middle plane of the plates,
and have negligible variation through the thickness.
#. Plate deformations are small.
Parameters
----------
diameter : float
hole diameter
thickness : float
laminate thickness
a_inv : array_like
2D (3, 3) inverse of CLPT A-matrix
Attributes
----------
r : float
the hole radius
a : ndarray
(3, 3) inverse a-matrix of the laminate
h : float
thickness of the laminate
mu1 : float
real part of first root of characteristic equation
mu2 : float
real part of second root of characteristic equation
mu1_bar : float
imaginary part of first root of characteristic equation
mu2_bar : float
imaginary part of second root of characteristic equation
"""
MAPPING_PRECISION = 0.0000001
def __init__(self, diameter: float, thickness: float, a_inv: NDArray[(3, 3), float]) -> None:
self.r = diameter/2.
self.a = np.array(a_inv, dtype=float)
self.h = thickness
self.mu1, self.mu2, self.mu1_bar, self.mu2_bar = self.roots()
def roots(self) -> tuple[complex, complex, complex, complex]:
r""" Finds the roots to the characteristic equation
Notes
-----
This method implements Eq. A.2 [1]_ or Eq. 7.4 [2]_
.. math:: a_11\mu^4-2a_16\mu^3+(2a_12+a_66)\mu^2-2a_26\mu+a_22=0
Raises
------
ValueError
If roots cannot be found
"""
a11 = self.a[0, 0]
a12 = self.a[0, 1]
a16 = self.a[0, 2]
a22 = self.a[1, 1]
a26 = self.a[1, 2]
a66 = self.a[2, 2]
roots = np.roots([a11, -2 * a16, (2 * a12 + a66), -2 * a26, a22])
if np.imag(roots[0]) >= 0.0:
mu2 = roots[0]
mu2_bar = roots[1]
elif np.imag(roots[1]) >= 0.0:
mu2 = roots[1]
mu2_bar = roots[0]
else:
raise ValueError("mu1 cannot be solved")
if np.imag(roots[2]) >= 0.0:
mu1 = roots[2]
mu1_bar = roots[3]
elif np.imag(roots[3]) >= 0.0:
mu1 = roots[3]
mu1_bar = roots[2]
else:
raise ValueError("mu2 cannot be solved")
return mu1, mu2, mu1_bar, mu2_bar
def xi_1(self, z1s: NDArray[Any, complex]) -> tuple[NDArray[Any, complex], NDArray[Any, int]]:
r"""Calculates the first mapping parameters
Notes
-----
This method implements Eq. A.4 & Eq. A.5, [1]_ or Eq. 37.4 [2]_
.. math:: \xi_1=\frac{z_1\pm\sqrt{z_1^2-a^2-\mu_1^2b^2}}{a-i\mu_1b}
Parameters
----------
z1s : ndarray
1D array of first parameters from the complex plane :math: `z_1=x+\mu_1y`
Returns
-------
xi_1s : ndarray
1D array of the first mapping parameters
sign_1s : ndarray
1D array of signs producing positive mapping parameters
"""
mu1 = self.mu1
a = self.r
b = self.r
xi_1s = np.zeros(len(z1s), dtype=complex)
sign_1s = np.zeros(len(z1s), dtype=int)
xi_1_pos = (z1s + np.sqrt(z1s * z1s - a * a - mu1 * mu1 * b * b)) / (a - 1j * mu1 * b)
xi_1_neg = (z1s - np.sqrt(z1s * z1s - a * a - mu1 * mu1 * b * b)) / (a - 1j * mu1 * b)
pos_indices = np.where(np.abs(xi_1_pos) >= (1. - self.MAPPING_PRECISION))[0]
neg_indices = np.where(np.abs(xi_1_neg) >= (1. - self.MAPPING_PRECISION))[0]
xi_1s[pos_indices] = xi_1_pos[pos_indices]
xi_1s[neg_indices] = xi_1_neg[neg_indices]
# high level check that all indices were mapped
if not (pos_indices.size + neg_indices.size) == xi_1s.size:
bad_indices = np.where(xi_1s == 0)[0]
logger.warning(f"xi_1 unsolvable\n Failed Indices: {bad_indices}")
sign_1s[pos_indices] = 1
sign_1s[neg_indices] = -1
return xi_1s, sign_1s
def xi_2(self, z2s: NDArray[Any, complex]) -> tuple[NDArray[Any, complex], NDArray[Any, int]]:
r""" Calculates the first mapping parameters
Notes
-----
This method implements Eq. A.4 & Eq. A.5, [1]_ or Eq. 37.4 [2]_
.. math:: \xi_2=\frac{z_2\pm\sqrt{z_2^2-a^2-\mu_2^2b^2}}{a-i\mu_2b}
Parameters
----------
z2s : ndarray
1D array of first parameters from the complex plane :math: `z_1=x+\mu_1y`
Returns
-------
xi_2s : ndarray
1D array of the first mapping parameters
sign_2s : ndarray
1D array of signs producing positive mapping parameters
"""
mu2 = self.mu2
a = self.r
b = self.r
xi_2s = np.zeros(len(z2s), dtype=complex)
sign_2s = np.zeros(len(z2s), dtype=int)
xi_2_pos = (z2s + np.sqrt(z2s * z2s - a * a - mu2 * mu2 * b * b)) / (a - 1j * mu2 * b)
xi_2_neg = (z2s - np.sqrt(z2s * z2s - a * a - mu2 * mu2 * b * b)) / (a - 1j * mu2 * b)
pos_indices = np.where(np.abs(xi_2_pos) >= (1. - self.MAPPING_PRECISION))[0]
neg_indices = np.where(np.abs(xi_2_neg) >= (1. - self.MAPPING_PRECISION))[0]
xi_2s[pos_indices] = xi_2_pos[pos_indices]
xi_2s[neg_indices] = xi_2_neg[neg_indices]
# high level check that all indices were mapped
if not (pos_indices.size + neg_indices.size) == xi_2s.size:
bad_indices = np.where(xi_2s == 0)[0]
logger.warning(f"xi_2 unsolvable\n Failed Indices: {bad_indices}")
sign_2s[pos_indices] = 1
sign_2s[neg_indices] = -1
return xi_2s, sign_2s
@abc.abstractmethod
def phi_1(self, z1: NDArray[Any, complex]) -> NDArray[Any, complex]:
raise NotImplementedError("You must implement this function.")
@abc.abstractmethod
def phi_2(self, z2: NDArray[Any, complex]) -> NDArray[Any, complex]:
raise NotImplementedError("You must implement this function.")
@abc.abstractmethod
def phi_1_prime(self, z1: NDArray[Any, complex]) -> NDArray[Any, complex]:
raise NotImplementedError("You must implement this function.")
@abc.abstractmethod
def phi_2_prime(self, z2: NDArray[Any, complex]) -> NDArray[Any, complex]:
raise NotImplementedError("You must implement this function.")
def stress(self, x: NDArray[Any, float], y: NDArray[Any, float]) -> NDArray[(Any, 3), float]:
r""" Calculates the stress at (x, y) points in the plate
Notes
-----
This method implements Eq. 8.2 [2]_
.. math:: \sigma_x=2Re[\mu_1^2\Phi_1'(z_1)+\mu_2^2\Phi_2'(z_2)]
.. math:: \sigma_y=2Re[\Phi_1'(z_1)+\Phi_2'(z_2)]
.. math:: \tau_xy=-2Re[\mu_1\Phi_1'(z_1)+\mu_2\Phi_2'(z_2)]
Parameters
----------
x : array_like
1D array x locations in the cartesian coordinate system
y : array_like
1D array y locations in the cartesian coordinate system
Returns
-------
ndarray
[[sx0, sy0, sxy0], [sx1, sy1, sxy1], ... , [sxn, syn, sxyn]]
(n, 3) in-plane stress components in the cartesian coordinate system
"""
mu1 = self.mu1
mu2 = self.mu2
x = np.array(x, dtype=float)
y = np.array(y, dtype=float)
z1 = x + mu1 * y
z2 = x + mu2 * y
phi_1_prime = self.phi_1_prime(z1)
phi_2_prime = self.phi_2_prime(z2)
sx = 2.0 * np.real(mu1 * mu1 * phi_1_prime + mu2 * mu2 * phi_2_prime)
sy = 2.0 * np.real(phi_1_prime + phi_2_prime)
sxy = -2.0 * np.real(mu1 * phi_1_prime + mu2 * phi_2_prime)
return np.array([sx, sy, sxy]).T
def displacement(self, x: NDArray[Any, float], y: NDArray[Any, float]) -> NDArray[(Any, 2), float]:
r""" Calculates the displacement at (x, y) points in the plate
Notes
-----
This method implements Eq. 8.3 [2]_
.. math:: u=2Re[p_1\Phi_1(z_1)+p_2\Phi_2(z_2)]
.. math:: v=2Re[q_1\Phi_1(z_1)+q_2\Phi_2(z_2)]
Parameters
----------
x : array_like
1D array x locations in the cartesian coordinate system
y : array_like
1D array y locations in the cartesian coordinate system
Returns
-------
ndarray
[[u0, v0], [u1, v1], ... , [un, vn]]
(n, 2) in-plane displacement components in the cartesian coordinate system
"""
a11 = self.a[0, 0]
a12 = self.a[0, 1]
a16 = self.a[0, 2]
a22 = self.a[1, 1]
a26 = self.a[1, 2]
mu1 = self.mu1
mu2 = self.mu2
p1 = a11*mu1**2 + a12 - a16*mu1
p2 = a11*mu2**2 + a12 - a16*mu2
q1 = a12*mu1 + a22/mu1 - a26
q2 = a12*mu2 + a22/mu2 - a26
x = np.array(x, dtype=float)
y = np.array(y, dtype=float)
z1 = x + mu1 * y
z2 = x + mu2 * y
phi_1 = self.phi_1(z1)
phi_2 = self.phi_2(z2)
u = 2.0 * np.real(p1 * phi_1 + p2 * phi_2)
v = 2.0 * np.real(q1 * phi_1 + q2 * phi_2)
return np.array([u, v]).T
class UnloadedHole(Hole):
r"""Class for defining an unloaded hole in an infinite anisotropic homogeneous plate
This class represents an infinite anisotropic plate with a unfilled circular hole loaded at infinity with
forces in the x, y and xy (shear) directions.
Parameters
----------
loads: array_like
1D array [Nx, Ny, Nxy] force / unit length
diameter: float
hole diameter
thickness: float
laminate thickness
a_inv: array_like
2D array (3, 3) inverse CLPT A-matrix
Attributes
----------
applied_stress : (1, 3) ndarray
[:math:`\sigma_x^*, \sigma_y^*, \tau_{xy}^*`] stresses applied at infinity
"""
def __init__(self, loads: NDArray[3, float], diameter: float, thickness: float,
a_inv: NDArray[(3, 3), float]) -> None:
super().__init__(diameter, thickness, a_inv)
self.applied_stress = np.array(loads, dtype=float) / self.h
def alpha(self) -> complex:
r"""Calculates the alpha loading term for three components of applied stress at infinity
Three components of stress are [:math:`\sigma_{x}^*, \sigma_{y}^*, \tau_{xy}^*`]
Notes
-----
This method implements Eq. A.7 [1]_ which is a combination of Eq. 38.12 & Eq. 38.18 [2]_
.. math:: \alpha_1=\frac{r}{2}(\tau_{xy}^*i-\sigma_{y}^*)
Returns
-------
complex
first fourier series term for applied stress at infinity
"""
sy = self.applied_stress[1]
sxy = self.applied_stress[2]
r = self.r
return 1j * sxy * r / 2 - sy * r / 2
def beta(self) -> complex:
r"""Calculates the beta loading term for three components of applied stress at infinity
Three components of stress are [:math:`\sigma_x^*, \sigma_y^*, \tau_{xy}^*`]
Notes
-----
This method implements Eq. A.7 [1]_ which is a combination of Eq. 38.12 & Eq. 38.18 [2]_
.. math:: \beta_1=\frac{r}{2}(\tau_{xy}^*-\sigma_x^*i)
Returns
-------
complex
first fourier series term for applied stresses at infinity
"""
sx = self.applied_stress[0]
sxy = self.applied_stress[2]
r = self.r
return sxy * r / 2 - 1j * sx * r / 2
def phi_1(self, z1: NDArray[Any, complex]) -> NDArray[Any, complex]:
r"""Calculates the first stress function
Notes
-----
This method implements Eq. A.6 [1]_
.. math:: C_1=\frac{\beta_1-\mu_2\alpha_1}{\mu_1-\mu_2}
.. math:: \Phi_1=\frac{C_1}{\xi_1}
Parameters
----------
z1 : ndarray
1D complex array first mapping parameter
Returns
-------
ndarray
1D complex array
"""
mu1 = self.mu1
mu2 = self.mu2
alpha = self.alpha()
beta = self.beta()
xi_1, sign_1 = self.xi_1(z1)
C1 = (beta - mu2 * alpha) / (mu1 - mu2)
return C1 / xi_1
def phi_2(self, z2: NDArray[Any, complex]) -> NDArray[Any, complex]:
r"""Calculates the second stress function
Notes
-----
This method implements Eq. A.6 [1]_
.. math:: C_2=-\frac{\beta_1-\mu_1\alpha_1}{\mu_1-\mu_2}
.. math:: \Phi_2=\frac{C_2}{\xi_2}
Parameters
----------
z2 : ndarray
1D complex array second mapping parameter
Returns
-------
ndarray
1D complex array
"""
mu1 = self.mu1
mu2 = self.mu2
alpha = self.alpha()
beta = self.beta()
xi_2, sign_2 = self.xi_2(z2)
C2 = -(beta - mu1 * alpha) / (mu1 - mu2)
return C2 / xi_2
def phi_1_prime(self, z1: NDArray[Any, complex]) -> NDArray[Any, complex]:
r"""Calculates derivative of the first stress function
Notes
-----
This method implements Eq. A.8 [1]_
.. math:: C_1=\frac{\beta_1-\mu_2\alpha_1}{\mu_1-\mu_2}
.. math:: \eta_1=\frac{z_1\pm\sqrt{z_1^2-a^2-\mu_1^2b^2}}{a-i\mu_1b}
.. math:: \kappa_1=\frac{1}{a-i\mu_1b}
.. math:: \Phi_1'=-\frac{C_1}{\xi_1^2}(1+\frac{z_1}{\eta_1})\kappa_1
Parameters
----------
z1 : ndarray
1D complex array first mapping parameter
Returns
-------
ndarray
1D complex array
"""
a = self.r
b = self.r
mu1 = self.mu1
mu2 = self.mu2
alpha = self.alpha()
beta = self.beta()
xi_1, sign_1 = self.xi_1(z1)
C1 = (beta - mu2 * alpha) / (mu1 - mu2)
eta1 = sign_1 * np.sqrt(z1 * z1 - a * a - mu1 * mu1 * b * b)
kappa1 = 1 / (a - 1j * mu1 * b)
return -C1 / (xi_1 ** 2) * (1 + z1 / eta1) * kappa1
def phi_2_prime(self, z2: NDArray[Any, complex]) -> NDArray[Any, complex]:
r"""Calculates derivative of the second stress function
Notes
-----
This method implements Eq. A.8 [1]_
.. math:: C_2=-\frac{\beta_1-\mu_1\alpha_1}{\mu_1-\mu_2}
.. math:: \eta_2=\frac{z_2\pm\sqrt{z_2^2-a^2-\mu_2^2b^2}}{a-i\mu_2b}
.. math:: \kappa_2=\frac{1}{a-i\mu_2b}
.. math:: \Phi_2'=-\frac{C_2}{\xi_2^2}(1+\frac{z_2}{\eta_2})\kappa_2
Parameters
----------
z2 : ndarray
1D complex array second mapping parameter
Returns
-------
ndarray
1D complex array
"""
a = self.r
b = self.r
mu1 = self.mu1
mu2 = self.mu2
alpha = self.alpha()
beta = self.beta()
xi_2, sign_2 = self.xi_2(z2)
C2 = -(beta - mu1 * alpha) / (mu1 - mu2)
eta2 = sign_2 * np.sqrt(z2 * z2 - a * a - mu2 * mu2 * b * b)
kappa2 = 1 / (a - 1j * mu2 * b)
return -C2 / (xi_2 ** 2) * (1 + z2 / eta2) * kappa2
def stress(self, x: NDArray[Any, float], y: NDArray[Any, float]) -> NDArray[(Any, 3), float]:
r""" Calculates the stress at (x, y) points in the plate
Parameters
----------
x : array_like
1D array x locations in the cartesian coordinate system
y : array_like
1D array y locations in the cartesian coordinate system
Returns
-------
ndarray
[[sx0, sy0, sxy0], [sx1, sy1, sxy1], ... , [sxn, syn, sxyn]]
(n, 3) in-plane stress components in the cartesian coordinate system
"""
sx, sy, sxy = super().stress(x, y).T
sx_app = self.applied_stress[0]
sy_app = self.applied_stress[1]
sxy_app = self.applied_stress[2]
return np.array([sx + sx_app, sy + sy_app, sxy + sxy_app]).T
def _remove_bad_displacments(displacement_func:
Callable[[object, NDArray[Any, float], NDArray[Any, float]], NDArray[(Any, 2), float]]):
""" removes displacements that are 180 degrees behind bearing load direction"""
def inner(self, x: NDArray[Any, float], y: NDArray[Any, float]) -> NDArray[(Any, 2), float]:
# call displacement function
displacements = displacement_func(self, x, y)
# check if any points are 180 degrees behind bearing load
r, angles = self._cartesian_to_polar(x, y)
bad_angle = np.pi if self.theta == 0 else -1*(np.pi - self.theta)
# if so, replace those results with np.nan
displacements[np.isclose(angles, bad_angle)] = np.nan
return displacements
return inner
class LoadedHole(Hole):
"""Class for defining a loaded hole in an infinite anisotropic homogeneous plate
A cosine bearing load distribution is assumed to apply to the inside of the hole.
Notes
-----
Bearing distribution as shown below Ref. [4]_
.. image:: ../img/cosine_distribution.png
:height: 400px
Parameters
----------
load : float
bearing force
diameter : float
hole diameter
thickness : float
plate thickness
a_inv : array_like
2D array (3, 3) inverse CLPT A-matrix
theta : float, optional
bearing angle counter clock-wise from positive x-axis (radians)
Attributes
----------
p : float
bearing force
theta : float
bearing angle counter clock-wise from positive x-axis (radians)
A : float
real part of equilibrium constant for first stress function
A_bar : float
imaginary part of equilibrium constant for first stress function
B : float
real part of equilibrium constant for second stress function
B_bar : float
imaginary part of equilibrium constant for second stress function
"""
FOURIER_TERMS = 45 # number of fourier series terms [3]_
def __init__(self, load: float, diameter: float, thickness: float,
a_inv: NDArray[(3, 3), float], theta: float = 0.) -> None:
a_inv = rotate_material_matrix(a_inv, angle=theta)
super().__init__(diameter, thickness, a_inv)
self.p = load
self.theta = theta
self.A, self.A_bar, self.B, self.B_bar = self.equilibrium_constants()
def alpha(self) -> NDArray[FOURIER_TERMS, complex]:
r"""Fourier series coefficients modified for use in stress function equations
Notes
-----
Exact solution:
.. math:: \frac{P}{2\pi}\int_{-\pi/2}^{\pi/2} \cos^2 \theta \left( \cos m*\theta - i \sin m*\theta \right) \,d\theta
.. math:: = \frac{-2 P sin(\pi m/2)}{\pi m(m^2-4)}
Modifications to the Fourier series coefficients are developed in Eq. 37.2 [2]_
Returns
-------
ndarray
"""
h = self.h
p = self.p
N = self.FOURIER_TERMS
m = np.arange(3, N + 1)
# modification from Eq. 37.2 [2]_
mod = -1/(h*np.pi)
alpha = np.zeros(N)
alpha[:2] = [p*4/(6*np.pi)*mod, p/8*mod]
alpha[2:] = -2*p*np.sin(np.pi*m/2)/(np.pi*m*(m**2 - 4))*mod
# (in Ref. 2 Eq. 37.2, alpha is associated with the y-direction. Can someone explain?)
return alpha
def beta(self) -> NDArray[FOURIER_TERMS, complex]:
r"""Fourier series coefficients modified for use in stress function equations
Notes
-----
Exact solution:
.. math:: \frac{-P}{2\pi}\int_{-\pi/2}^{\pi/2}\cos\theta\sin\theta\left(\cos m*\theta - i \sin m*\theta\right)\,d\theta
.. math:: = -\frac{i P sin(\pi m/2)}{\pi (m^2-4)}
Modifications to the Fourier series coefficients are developed in Eq. 37.2 [2]_
Returns
-------
complex ndarray
"""
h = self.h
p = self.p
N = self.FOURIER_TERMS
m = np.arange(1, N + 1)
# modification from Eq. 37.2 [2]_
mod = 4 / (np.pi*m**2*h)
beta = np.zeros(N, dtype=complex)
beta[:2] = [-p*1j/(3*np.pi)*mod[0], -1j*p/8*mod[1]]
beta[2:] = 1j*p*np.sin(np.pi*m[2:]/2)/(np.pi*(m[2:]**2 - 4))*mod[2:]
# (in Ref. 2 Eq. 37.2, beta is associated with the x-direction. Can someone explain?)
return beta
def equilibrium_constants(self) -> tuple[float, float, float, float]:
"""Solve for constants of equilibrium
When the plate has loads applied that are not in equilibrium, the unbalanced loads are reacted at infinity.
This function solves for the constant terms in the stress functions that account for these reactions.
Notes
-----
This method implements Eq. 37.5 [2]_. Complex terms have been expanded and resolved for
A, A_bar, B and B_bar (setting Py equal to zero).
Returns
-------
[A, A_bar, B, B_bar] : tuple
real and imaginary parts of constants A and B
"""
R1, R2 = np.real(self.mu1), np.imag(self.mu1)
R3, R4 = np.real(self.mu2), np.imag(self.mu2)
p = self.p
h = self.h
a11 = self.a[0, 0]
a12 = self.a[0, 1]
a22 = self.a[1, 1]
a16 = self.a[0, 2]
pi = np.pi
mu_mat = np.array([[0., 1, 0., 1.],
[R2, R1, R4, R3],
[2*R1*R2, (R1**2 - R2**2), 2*R3*R4, (R3**2 - R4**2)],
[R2/(R1**2 + R2**2), -R1/(R1**2 + R2**2), R4/(R3**2 + R4**2), -R3/(R3**2 + R4**2)]])
load_vec = p/(4.*pi*h) * np.array([0.,
1.,
a16/a11,
a12/a22])
A1, A2, B1, B2 = np.dot(np.linalg.inv(mu_mat), load_vec)
return A1, A2, B1, B2
def phi_1(self, z1: NDArray[Any, complex]) -> NDArray[Any, complex]:
r"""Calculates the first stress function
Notes
-----
This method implements [Eq. 37.3, Ref. 2]
.. math:: C_m=\frac{\beta_m-\mu_2\alpha_m}{\mu_1-\mu_2}
.. math:: \Phi_1=A\ln{\xi_1}+\sum_{m=1}^{\infty}\frac{C_m}{\xi_1^m}
Parameters
----------
z1 : ndarray
1D complex array first mapping parameter
Returns
-------
ndarray
1D complex array
"""
mu1 = self.mu1
mu2 = self.mu2
A = self.A + 1j * self.A_bar
N = self.FOURIER_TERMS
xi_1, sign_1 = self.xi_1(z1)
m = np.arange(1, N + 1)
alpha = self.alpha()
beta = self.beta()
# return results for each point in xi_1
return np.array([(A*np.log(xi_1[i]) + np.sum((beta - mu2 * alpha) / (mu1 - mu2) / xi_1[i] ** m))
for i in range(len(xi_1))])
def phi_2(self, z2: NDArray[Any, complex]) -> NDArray[Any, complex]:
r"""Calculates the second stress function
Notes
-----
This method implements [Eq. 37.3, Ref. 2]
.. math:: C_m=\frac{\beta_m-\mu_1\alpha_m}{\mu_1-\mu_2}
.. math:: \Phi_2=B\ln{\xi_2}-\sum_{m=1}^{\infty}\frac{m C_m}{\xi_2^m}
Parameters
----------
z2 : ndarray
1D complex array second mapping parameter
Returns
-------
ndarray
1D complex array
"""
mu1 = self.mu1
mu2 = self.mu2
B = self.B + 1j * self.B_bar
N = self.FOURIER_TERMS
xi_2, sign_2 = self.xi_2(z2)
m = np.arange(1, N + 1)
alpha = self.alpha()
beta = self.beta()
# return results for each point in xi_2
return np.array([(B*np.log(xi_2[i]) - np.sum((beta - mu1 * alpha) / (mu1 - mu2) / xi_2[i] ** m))
for i in range(len(xi_2))])
def phi_1_prime(self, z1: NDArray[Any, complex]) -> NDArray[Any, complex]:
r"""Calculates derivative of the first stress function
Notes
-----
This method implements [Eq. 37.6, Ref. 2]
.. math:: C_m=\frac{\beta_m-\mu_2\alpha_m}{\mu_1-\mu_2}
.. math:: \eta_1=\pm\sqrt{z_1^2-a^2-\mu_1^2b^2}
.. math:: \Phi_1'=-\frac{1}{\eta_1}(A-\sum_{m=1}^{\infty}\frac{m C_m}{\xi_1^m})
Parameters
----------
z1 : ndarray
1D complex array first mapping parameter
Returns
-------
ndarray
1D complex array
"""
mu1 = self.mu1
mu2 = self.mu2
a = self.r
b = self.r
A = self.A + 1j * self.A_bar
N = self.FOURIER_TERMS
xi_1, sign_1 = self.xi_1(z1)
eta_1 = sign_1 * np.sqrt(z1 * z1 - a * a - b * b * mu1 * mu1)
m = np.arange(1, N + 1)
alpha = self.alpha()
beta = self.beta()
# return results for each point in xi_1
return np.array([1 / eta_1[i] * (A - np.sum(m * (beta - mu2 * alpha) / (mu1 - mu2) / xi_1[i] ** m))
for i in range(len(xi_1))])
def phi_2_prime(self, z2: NDArray[Any, complex]) -> NDArray[Any, complex]:
r"""Calculates derivative of the second stress function
Notes
-----
This method implements [Eq. 37.6, Ref. 2]
.. math:: C_m=\frac{\beta_m-\mu_1\alpha_m}{\mu_1-\mu_2}
.. math:: \eta_2=\pm\sqrt{z_2^2-a^2-\mu_2^2b^2}
.. math:: \Phi_2'=-\frac{1}{\eta_2}(B+\sum_{m=1}^{\infty}\frac{m C_m}{\xi_2^m})
Parameters
----------
z2 : ndarray
1D complex array second mapping parameter
Returns
-------
ndarray
1D complex array
"""
mu1 = self.mu1
mu2 = self.mu2
a = self.r
b = self.r
B = self.B + 1j * self.B_bar
N = self.FOURIER_TERMS
xi_2, sign_2 = self.xi_2(z2)
eta_2 = sign_2 * np.sqrt(z2 * z2 - a * a - b * b * mu2 * mu2)
m = np.arange(1, N + 1)
alpha = self.alpha()
beta = self.beta()
# return results for each point in xi_2
return np.array([1 / eta_2[i] * (B + np.sum(m * (beta - mu1 * alpha) / (mu1 - mu2) / xi_2[i] ** m))
for i in range(len(xi_2))])
def _cartesian_to_polar(self, x: NDArray[Any, float], y: NDArray[Any, float])\
-> tuple[NDArray[Any, float], NDArray[Any, float]]:
"""(Private method) Converts cartesian points to polar coordinates
Parameters
----------
x : array_like
1D array x locations in the cartesian coordinate system
y : array_like
1D array y locations in the cartesian coordinate system
Returns
-------
radii : ndarray
radius of each point
angles : ndarray
angle of each point
"""
x = np.array(x, dtype=float)
y = np.array(y, dtype=float)
r = np.sqrt(x**2 + y**2)
# calculate angles and fix signs
angles = np.arccos(np.array([1, 0]).dot(np.array([x, y])) / r)
where_vals = np.nonzero(y)[0]
angles[where_vals] = angles[where_vals] * np.sign(y[where_vals])
return r, angles
def _rotate_points(self, x: NDArray[Any, float], y: NDArray[Any, float])\
-> tuple[NDArray[Any, float], NDArray[Any, float]]:
"""(Private method) Rotates points to account for bearing angle
Parameters
----------
x : array_like
1D array x locations in the cartesian coordinate system
y : array_like
1D array y locations in the cartesian coordinate system
Returns
-------
x' : ndarray
new x points
y' : ndarray
new y points
"""
# rotation back to original coordinates
rotation = -self.theta
# convert points to polar coordinates
r, angles = self._cartesian_to_polar(x, y)
# rotate coordinates by negative theta
angles += rotation
# convert back to cartesian
x = r * np.cos(angles)
y = r * np.sin(angles)
return x, y
def stress(self, x: NDArray[Any, float], y: NDArray[Any, float]) -> NDArray[(Any, 3), float]:
r""" Calculates the stress at (x, y) points in the plate
Parameters
----------
x : array_like
1D array x locations in the cartesian coordinate system
y : array_like
1D array y locations in the cartesian coordinate system
Returns
-------
ndarray
[[sx0, sy0, sxy0], [sx1, sy1, sxy1], ... , [sxn, syn, sxyn]]
(n, 3) in-plane stress components in the cartesian coordinate system
"""
# rotate points to account for bearing angle
x, y = self._rotate_points(x, y)
# calculate stresses and rotate back
stresses = super().stress(x, y)
return rotate_stress(stresses, angle=-self.theta)
@_remove_bad_displacments
def displacement(self, x: NDArray[Any, float], y: NDArray[Any, float]) -> NDArray[(Any, 2), float]:
r""" Calculates the displacement at (x, y) points in the plate
Notes
-----
This method implements Eq. 8.3 [2]_
.. math:: u=2Re[p_1\Phi_1(z_1)+p_2\Phi_2(z_2)]
.. math:: v=2Re[q_1\Phi_1(z_1)+q_2\Phi_2(z_2)]
Parameters
----------
x : array_like
1D array x locations in the cartesian coordinate system
y : array_like
1D array y locations in the cartesian coordinate system
Returns
-------
ndarray
[[u0, v0], [u1, v1], ... , [un, vn]]
(n, 2) in-plane displacement components in the cartesian coordinate system
"""
# rotate points to account for bearing angle
x, y = self._rotate_points(x, y)
return super().displacement(x, y)
|
{"hexsha": "86d1f038438f52414722894dd3c43ec5092b044d", "size": 36019, "ext": "py", "lang": "Python", "max_stars_repo_path": "bjsfm/lekhnitskii.py", "max_stars_repo_name": "BenjaminETaylor/bjsfm", "max_stars_repo_head_hexsha": "a952183f5acca8139a1dd8ab2191c8dd3dc14710", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-08-10T01:52:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T19:23:41.000Z", "max_issues_repo_path": "bjsfm/lekhnitskii.py", "max_issues_repo_name": "BenjaminETaylor/bjsfm", "max_issues_repo_head_hexsha": "a952183f5acca8139a1dd8ab2191c8dd3dc14710", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-10-10T20:38:26.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-22T02:39:16.000Z", "max_forks_repo_path": "bjsfm/lekhnitskii.py", "max_forks_repo_name": "BenjaminETaylor/bjsfm", "max_forks_repo_head_hexsha": "a952183f5acca8139a1dd8ab2191c8dd3dc14710", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-10-04T17:17:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-14T13:45:45.000Z", "avg_line_length": 31.212305026, "max_line_length": 127, "alphanum_fraction": 0.5519309253, "include": true, "reason": "import numpy", "num_tokens": 10831}
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from mpl_toolkits.axes_grid1 import make_axes_locatable
import os
# dictionary useful in indexing
# of loaded numpy arrays as each
# column in each of these numpy
# array represents a class
c={1:0,2:1,4:2,5:3,6:4,8:5,13:6}
# creating a directory for storing the averaged results
os.mkdir("avg_sub")
# loading w numpy array which contains indices of objects in
# y(prediction through cnn) which are
# classified correctly
w=np.load("w.npy")
# loading t numpy array which contains indices of objects in w which
# have prediction probability greater than or equal to 0.8
t=np.load("t.npy")
# loading the dmdts
x=np.load("../X_test.npy")
# one numpy array contains indices of objects in x numpy array which
# belong to class1
# Similar for the rest of the classes
one=np.where(t[1]==c[1])
two=np.where(t[1]==c[2])
four=np.where(t[1]==c[4])
five=np.where(t[1]==c[5])
six=np.where(t[1]==c[6])
eight=np.where(t[1]==c[8])
thirteen=np.where(t[1]==c[13])
(l,o,m,n)=x.shape
# creating empty lists
x_1=[]
x_2=[]
x_4=[]
x_5=[]
x_6=[]
x_8=[]
x_13=[]
# concatenating the objects belonging to a particular class
# into a list for each of the classes
for i in range(len(one[0])):
x_1=x_1+[x[w[0][t[0][one[0][i]]]]]
for i in range(len(two[0])):
x_2=x_2+[x[w[0][t[0][two[0][i]]]]]
for i in range(len(four[0])):
x_4=x_4+[x[w[0][t[0][four[0][i]]]]]
for i in range(len(five[0])):
x_5=x_5+[x[w[0][t[0][five[0][i]]]]]
for i in range(len(six[0])):
x_6=x_6+[x[w[0][t[0][six[0][i]]]]]
for i in range(len(eight[0])):
x_8=x_8+[x[w[0][t[0][eight[0][i]]]]]
for i in range(len(thirteen[0])):
x_13=x_13+[x[w[0][t[0][thirteen[0][i]]]]]
# converting the concatenated lists into a numpy array
x_1=np.array(x_1)
x_2=np.array(x_2)
x_4=np.array(x_4)
x_5=np.array(x_5)
x_6=np.array(x_6)
x_8=np.array(x_8)
x_13=np.array(x_13)
# saving the numpy arrays
np.save("avg_sub/x_1",x_1)
np.save("avg_sub/x_2",x_2)
np.save("avg_sub/x_4",x_4)
np.save("avg_sub/x_5",x_5)
np.save("avg_sub/x_6",x_6)
np.save("avg_sub/x_8",x_8)
np.save("avg_sub/x_13",x_13)
# computing average dmdt for each of the classes
if x_1.size!=0: # checking whether there exists a class which has no object being classified by cnn
one_sum=np.zeros((23,24))
for j in range(x_1.shape[2]):
for k in range(x_1.shape[3]):
for i in range(x_1.shape[0]):
one_sum[j,k]=one_sum[j,k]+x_1[i,0,j,k]
one_avg=one_sum/x_1.shape[0]
np.save("avg_sub/1_mean",one_avg)
if x_2.size!=0:
two_sum=np.zeros((23,24))
for j in range(x_2.shape[2]):
for k in range(x_2.shape[3]):
for i in range(x_2.shape[0]):
two_sum[j,k]=two_sum[j,k]+x_2[i,0,j,k]
two_avg=two_sum/x_2.shape[0]
np.save("avg_sub/2_mean",two_avg)
if x_4.size!=0:
four_sum=np.zeros((23,24))
for j in range(x_4.shape[2]):
for k in range(x_4.shape[3]):
for i in range(x_4.shape[0]):
four_sum[j,k]=four_sum[j,k]+x_4[i,0,j,k]
four_avg=four_sum/x_4.shape[0]
np.save("avg_sub/4_mean",four_avg)
if x_5.size!=0:
five_sum=np.zeros((23,24))
for j in range(x_5.shape[2]):
for k in range(x_5.shape[3]):
for i in range(x_5.shape[0]):
five_sum[j,k]=five_sum[j,k]+x_5[i,0,j,k]
five_avg=five_sum/x_5.shape[0]
np.save("avg_sub/5_mean",five_avg)
if x_6.size!=0:
six_sum=np.zeros((23,24))
for j in range(x_6.shape[2]):
for k in range(x_6.shape[3]):
for i in range(x_6.shape[0]):
six_sum[j,k]=six_sum[j,k]+x_6[i,0,j,k]
six_avg=six_sum/x_6.shape[0]
np.save("avg_sub/6_mean",six_avg)
if x_8.size!=0:
eight_sum=np.zeros((23,24))
for j in range(x_8.shape[2]):
for k in range(x_8.shape[3]):
for i in range(x_8.shape[0]):
eight_sum[j,k]=eight_sum[j,k]+x_8[i,0,j,k]
eight_avg=eight_sum/x_8.shape[0]
np.save("avg_sub/8_mean",eight_avg)
if x_13.size!=0:
thirteen_sum=np.zeros((23,24))
for j in range(x_13.shape[2]):
for k in range(x_13.shape[3]):
for i in range(x_13.shape[0]):
thirteen_sum[j,k]=thirteen_sum[j,k]+x_13[i,0,j,k]
thirteen_avg=thirteen_sum/x_13.shape[0]
np.save("avg_sub/13_mean",thirteen_avg)
# loading m_a numpy array which contains information regarding
# the changes in probabilities encountered by blanking out individual pixels
m=np.load("m_a.npy")
# creating numpy arrays for getting the average change in probabilities for
# each of the classes
m_1=np.zeros((23,24))
m_2=np.zeros((23,24))
m_4=np.zeros((23,24))
m_5=np.zeros((23,24))
m_6=np.zeros((23,24))
m_8=np.zeros((23,24))
m_13=np.zeros((23,24))
# summing the changes in probabilities for each of the classes
for i in range(len(one[0])):
m_1=m_1+m[w[0][t[0][one[0][i]]]]
for i in range(len(two[0])):
m_2=m_2+m[w[0][t[0][two[0][i]]]]
for i in range(len(four[0])):
m_4=m_4+m[w[0][t[0][four[0][i]]]]
for i in range(len(five[0])):
m_5=m_5+m[w[0][t[0][five[0][i]]]]
for i in range(len(six[0])):
m_6=m_6+m[w[0][t[0][six[0][i]]]]
for i in range(len(eight[0])):
m_8=m_8+m[w[0][t[0][eight[0][i]]]]
for i in range(len(thirteen[0])):
m_13=m_13+m[w[0][t[0][thirteen[0][i]]]]
# computing the average of changes in probabilities for each of the classes
if len(one[0])!=0:# checking whether there exists a class which has no object being classified by cnn
m_1_avg=m_1/len(one[0])
np.save("avg_sub/m_1_mean",m_1_avg)
if len(two[0])!=0:
m_2_avg=m_2/len(two[0])
np.save("avg_sub/m_2_mean",m_2_avg)
if len(four[0])!=0:
m_4_avg=m_4/len(four[0])
np.save("avg_sub/m_4_mean",m_4_avg)
if len(five[0])!=0:
m_5_avg=m_5/len(five[0])
np.save("avg_sub/m_5_mean",m_5_avg)
if len(six[0])!=0:
m_6_avg=m_6/len(six[0])
np.save("avg_sub/m_6_mean",m_6_avg)
if len(eight[0])!=0:
m_8_avg=m_8/len(eight[0])
np.save("avg_sub/m_8_mean",m_8_avg)
if len(thirteen[0])!=0:
m_13_avg=m_13/len(thirteen[0])
np.save("avg_sub/m_13_mean",m_13_avg)
# defining a function for translucent/transparent colormap
# to be used in heatmaps of averaged changes in probabilities
# being overlayed on the averaged dmdts for each of the classes
def transparent_cmap(cmap, N=255):
"Copy colormap and set alpha values"
mycmap = cmap
mycmap._init()
mycmap._lut[:,-1] = np.linspace(0, 0.7, N+4)
return mycmap
def inv_transparent_cmap(cmap, N=255):
"Copy colormap and set alpha values"
mycmap = cmap
mycmap._init()
mycmap._lut[:,-1] = np.linspace(0.7, 0, N+4)
return mycmap
# assigning the colormap for heatmaps
mycmap = transparent_cmap(plt.cm.Greens)
inv_mycmap = inv_transparent_cmap(plt.cm.Greens_r)
# defining figure grid
(wh,h,w)=x[99].shape
u,v=np.mgrid[0:h,0:w]
dmints = [-8,-5,-3,-2.5,-2,-1.5,-1,-0.5,-0.3,-0.2,-0.1,0,0.1,0.2,0.3,0.5,1,1.5,2,2.5,3,5,8]
dtints = [1.0/145,2.0/145,3.0/145,4.0/145,1.0/25,2.0/25,3.0/25,1.5,2.5,3.5,4.5,5.5,7,10,20,30,60,90,120,240,600,960,2000,4000]
xloc=np.arange(24)
yloc=np.arange(23)
yloc=yloc[::-1]
for i in range(len(dtints)):
dtints[i]=round(dtints[i],3)
xloc=xloc[1::2]
yloc=yloc[0::2]
dmints=dmints[0::2]
dtints=dtints[1::2]
##dmdt bins with size according to its original scale
##xloc=(23.0/4000)*np.array(dtints)
##for i in range(len(dtints)):
## dtints[i]=round(dtints[i],3)
##yloc=(-1*(22.0/16)*np.array(dmints))+11
mul=2 #multiplier for xlabels spacing
# plotting the heatmaps representing the averaged changes in
# probabilities which are overlayed on the averaged
# dmdts for each of the classes
if len(one[0])!=0:
fig,ax=plt.subplots(1,2)
im1=ax[0].imshow(one_avg.reshape((23,24)))
im2=ax[1].imshow(m_1_avg)
divider1 = make_axes_locatable(ax[0])
cax1 = divider1.append_axes("right", size="5%", pad=0.1)
divider2 = make_axes_locatable(ax[1])
cax2 = divider2.append_axes("right", size="5%", pad=0.1)
##cb=ax.contourf(v,u,m_1_avg,15,cmap=inv_mycmap)
##plt.colorbar(cb)
plt.xticks(xloc,dtints,rotation=90)
plt.yticks(yloc,dmints)
ax[0].set_xticks(xloc)
ax[0].set_xticklabels(dtints,rotation=90)
ax[1].set_xticks(xloc)
ax[1].set_xticklabels(dtints,rotation=90)
ax[0].set_yticks(yloc)
ax[0].set_yticklabels(dmints)
ax[1].set_yticks(yloc)
ax[1].set_yticklabels(dmints)
ax[0].set(xlabel="dt(days)",ylabel="dm(mag)")
ax[1].set(xlabel="dt(days)",ylabel="dm(mag)")
ax[0].set_title("Averaged dmdt for class1 ("+str(x_1.shape[0])+")"+"\nMin: "+str(round(one_avg.min(),5))+" Violet"+"\n Max: "+str(round(one_avg.max(),5))+" Yellow")
ax[1].set_title("Averaged Change\n in Probabilities\n Min: "+str(round(m_1_avg.min(),5))+" Violet"+"\n Max: "+str(round(m_1_avg.max(),5))+" Yellow")
fig.colorbar(im1,cax=cax1)
fig.colorbar(im2,cax=cax2)
plt.tight_layout()
#plt.ticks.set_xspacing(0.0005*mul)
plt.savefig("avg_sub/1_mean_heatmap.png")
plt.close()
if len(two[0])!=0:
fig,ax=plt.subplots(1,2)
im1=ax[0].imshow(two_avg.reshape((23,24)))
im2=ax[1].imshow(m_2_avg)
divider1 = make_axes_locatable(ax[0])
cax1 = divider1.append_axes("right", size="5%", pad=0.1)
divider2 = make_axes_locatable(ax[1])
cax2 = divider2.append_axes("right", size="5%", pad=0.1)
##cb=ax.contourf(v,u,m_1_avg,15,cmap=inv_mycmap)
##plt.colorbar(cb)
ax[0].set_xticks(xloc)
ax[0].set_xticklabels(dtints,rotation=90)
ax[1].set_xticks(xloc)
ax[1].set_xticklabels(dtints,rotation=90)
ax[0].set_yticks(yloc)
ax[0].set_yticklabels(dmints)
ax[1].set_yticks(yloc)
ax[1].set_yticklabels(dmints)
ax[0].set(xlabel="dt(days)",ylabel="dm(mag)")
ax[1].set(xlabel="dt(days)",ylabel="dm(mag)")
ax[0].set_title("Averaged dmdt for class2 ("+str(x_2.shape[0])+")"+"\nMin: "+str(round(two_avg.min(),5))+" Violet"+"\n Max: "+str(round(two_avg.max(),5))+" Yellow")
ax[1].set_title("Averaged Change\n in Probabilities\n Min: "+str(round(m_2_avg.min(),5))+" Violet"+"\n Max: "+str(round(m_2_avg.max(),5))+" Yellow")
fig.colorbar(im1,cax=cax1)
fig.colorbar(im2,cax=cax2)
plt.tight_layout()
#plt.ticks.set_xspacing(0.0005*mul)
plt.savefig("avg_sub/2_mean_heatmap.png")
plt.close()
if len(four[0])!=0:
fig,ax=plt.subplots(1,2)
im1=ax[0].imshow(four_avg.reshape((23,24)))
im2=ax[1].imshow(m_4_avg)
divider1 = make_axes_locatable(ax[0])
cax1 = divider1.append_axes("right", size="5%", pad=0.1)
divider2 = make_axes_locatable(ax[1])
cax2 = divider2.append_axes("right", size="5%", pad=0.1)
##cb=ax.contourf(v,u,m_1_avg,15,cmap=inv_mycmap)
##plt.colorbar(cb)
ax[0].set_xticks(xloc)
ax[0].set_xticklabels(dtints,rotation=90)
ax[1].set_xticks(xloc)
ax[1].set_xticklabels(dtints,rotation=90)
ax[0].set_yticks(yloc)
ax[0].set_yticklabels(dmints)
ax[1].set_yticks(yloc)
ax[1].set_yticklabels(dmints)
ax[0].set(xlabel="dt(days)",ylabel="dm(mag)")
ax[1].set(xlabel="dt(days)",ylabel="dm(mag)")
ax[0].set_title("Averaged dmdt for class4 ("+str(x_4.shape[0])+")"+"\nMin: "+str(round(four_avg.min(),5))+" Violet"+"\n Max: "+str(round(four_avg.max(),5))+" Yellow")
ax[1].set_title("Averaged Change\n in Probabilities\n Min: "+str(round(m_4_avg.min(),5))+" Violet"+"\n Max: "+str(round(m_4_avg.max(),5))+" Yellow")
fig.colorbar(im1,cax=cax1)
fig.colorbar(im2,cax=cax2)
plt.tight_layout()
#plt.ticks.set_xspacing(0.0005*mul)
plt.savefig("avg_sub/4_mean_heatmap.png")
plt.close()
if len(five[0])!=0:
fig,ax=plt.subplots(1,2)
im1=ax[0].imshow(five_avg.reshape((23,24)))
im2=ax[1].imshow(m_5_avg)
divider1 = make_axes_locatable(ax[0])
cax1 = divider1.append_axes("right", size="5%", pad=0.1)
divider2 = make_axes_locatable(ax[1])
cax2 = divider2.append_axes("right", size="5%", pad=0.1)
##cb=ax.contourf(v,u,m_1_avg,15,cmap=inv_mycmap)
##plt.colorbar(cb)
ax[0].set_xticks(xloc)
ax[0].set_xticklabels(dtints,rotation=90)
ax[1].set_xticks(xloc)
ax[1].set_xticklabels(dtints,rotation=90)
ax[0].set_yticks(yloc)
ax[0].set_yticklabels(dmints)
ax[1].set_yticks(yloc)
ax[1].set_yticklabels(dmints)
ax[0].set(xlabel="dt(days)",ylabel="dm(mag)")
ax[1].set(xlabel="dt(days)",ylabel="dm(mag)")
ax[0].set_title("Averaged dmdt for class5 ("+str(x_5.shape[0])+")"+"\nMin: "+str(round(five_avg.min(),5))+" Violet"+"\n Max: "+str(round(five_avg.max(),5))+" Yellow")
ax[1].set_title("Averaged Change\n in Probabilities\n Min: "+str(round(m_5_avg.min(),5))+" Violet"+"\n Max: "+str(round(m_5_avg.max(),5))+" Yellow")
fig.colorbar(im1,cax=cax1)
fig.colorbar(im2,cax=cax2)
plt.tight_layout()
#plt.ticks.set_xspacing(0.0005*mul)
plt.savefig("avg_sub/5_mean_heatmap.png")
plt.close()
if len(six[0])!=0:
fig,ax=plt.subplots(1,2)
im1=ax[0].imshow(six_avg.reshape((23,24)))
im2=ax[1].imshow(m_6_avg)
divider1 = make_axes_locatable(ax[0])
cax1 = divider1.append_axes("right", size="5%", pad=0.1)
divider2 = make_axes_locatable(ax[1])
cax2 = divider2.append_axes("right", size="5%", pad=0.1)
##cb=ax.contourf(v,u,m_1_avg,15,cmap=inv_mycmap)
##plt.colorbar(cb)
ax[0].set_xticks(xloc)
ax[0].set_xticklabels(dtints,rotation=90)
ax[1].set_xticks(xloc)
ax[1].set_xticklabels(dtints,rotation=90)
ax[0].set_yticks(yloc)
ax[0].set_yticklabels(dmints)
ax[1].set_yticks(yloc)
ax[1].set_yticklabels(dmints)
ax[0].set(xlabel="dt(days)",ylabel="dm(mag)")
ax[1].set(xlabel="dt(days)",ylabel="dm(mag)")
ax[0].set_title("Averaged dmdt for class6 ("+str(x_6.shape[0])+")"+"\nMin: "+str(round(six_avg.min(),5))+" Violet"+"\n Max: "+str(round(six_avg.max(),5))+" Yellow")
ax[1].set_title("Averaged Change\n in Probabilities\n Min: "+str(round(m_6_avg.min(),5))+" Violet"+"\n Max: "+str(round(m_6_avg.max(),5))+" Yellow")
fig.colorbar(im1,cax=cax1)
fig.colorbar(im2,cax=cax2)
plt.tight_layout()
#plt.ticks.set_xspacing(0.0005*mul)
plt.savefig("avg_sub/6_mean_heatmap.png")
plt.close()
if len(eight[0])!=0:
fig,ax=plt.subplots(1,2)
im1=ax[0].imshow(eight_avg.reshape((23,24)))
im2=ax[1].imshow(m_8_avg)
divider1 = make_axes_locatable(ax[0])
cax1 = divider1.append_axes("right", size="5%", pad=0.1)
divider2 = make_axes_locatable(ax[1])
cax2 = divider2.append_axes("right", size="5%", pad=0.1)
##cb=ax.contourf(v,u,m_1_avg,15,cmap=inv_mycmap)
##plt.colorbar(cb)
ax[0].set_xticks(xloc)
ax[0].set_xticklabels(dtints,rotation=90)
ax[1].set_xticks(xloc)
ax[1].set_xticklabels(dtints,rotation=90)
ax[0].set_yticks(yloc)
ax[0].set_yticklabels(dmints)
ax[1].set_yticks(yloc)
ax[1].set_yticklabels(dmints)
ax[0].set(xlabel="dt(days)",ylabel="dm(mag)")
ax[1].set(xlabel="dt(days)",ylabel="dm(mag)")
ax[0].set_title("Averaged dmdt for class8 ("+str(x_8.shape[0])+")"+"\nMin: "+str(round(eight_avg.min(),5))+" Violet"+"\n Max: "+str(round(eight_avg.max(),5))+" Yellow")
ax[1].set_title("Averaged Change\n in Probabilities\n Min: "+str(round(m_8_avg.min(),5))+" Violet"+"\n Max: "+str(round(m_8_avg.max(),5))+" Yellow")
fig.colorbar(im1,cax=cax1)
fig.colorbar(im2,cax=cax2)
plt.tight_layout()
#plt.ticks.set_xspacing(0.0005*mul)
plt.savefig("avg_sub/8_mean_heatmap.png")
plt.close()
if len(thirteen[0])!=0:
fig,ax=plt.subplots(1,2)
im1=ax[0].imshow(thirteen_avg.reshape((23,24)))
im2=ax[1].imshow(m_13_avg)
divider1 = make_axes_locatable(ax[0])
cax1 = divider1.append_axes("right", size="5%", pad=0.1)
divider2 = make_axes_locatable(ax[1])
cax2 = divider2.append_axes("right", size="5%", pad=0.1)
##cb=ax.contourf(v,u,m_1_avg,15,cmap=inv_mycmap)
##plt.colorbar(cb)
ax[0].set_xticks(xloc)
ax[0].set_xticklabels(dtints,rotation=90)
ax[1].set_xticks(xloc)
ax[1].set_xticklabels(dtints,rotation=90)
ax[0].set_yticks(yloc)
ax[0].set_yticklabels(dmints)
ax[1].set_yticks(yloc)
ax[1].set_yticklabels(dmints)
ax[0].set(xlabel="dt(days)",ylabel="dm(mag)")
ax[1].set(xlabel="dt(days)",ylabel="dm(mag)")
ax[0].set_title("Averaged dmdt for class13 ("+str(x_13.shape[0])+")"+"\nMin: "+str(round(thirteen_avg.min(),5))+" Violet"+"\n Max: "+str(round(thirteen_avg.max(),5))+" Yellow")
ax[1].set_title("Averaged Change\n in Probabilities\n Min: "+str(round(m_13_avg.min(),5))+" Violet"+"\n Max: "+str(round(m_13_avg.max(),5))+" Yellow")
fig.colorbar(im1,cax=cax1)
fig.colorbar(im2,cax=cax2)
plt.tight_layout()
#plt.ticks.set_xspacing(0.0005*mul)
plt.savefig("avg_sub/13_mean_heatmap.png")
plt.close()
|
{"hexsha": "0ea65948174ebd6bbe21e49c3c8d668c0492ac0e", "size": 16784, "ext": "py", "lang": "Python", "max_stars_repo_path": "periodic variable classification/code/experiments/cnn/blanking_experiments/average_blanking_exp_subplots.py", "max_stars_repo_name": "MeetGandhi/MeetGandhi-Post-hoc-Explainability-of-Deep-Learning-Models-using-Conditional-Adversarial-Networks", "max_stars_repo_head_hexsha": "089226dce6d318247111ea60c2cc15c247b430d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "periodic variable classification/code/experiments/cnn/blanking_experiments/average_blanking_exp_subplots.py", "max_issues_repo_name": "MeetGandhi/MeetGandhi-Post-hoc-Explainability-of-Deep-Learning-Models-using-Conditional-Adversarial-Networks", "max_issues_repo_head_hexsha": "089226dce6d318247111ea60c2cc15c247b430d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "periodic variable classification/code/experiments/cnn/blanking_experiments/average_blanking_exp_subplots.py", "max_forks_repo_name": "MeetGandhi/MeetGandhi-Post-hoc-Explainability-of-Deep-Learning-Models-using-Conditional-Adversarial-Networks", "max_forks_repo_head_hexsha": "089226dce6d318247111ea60c2cc15c247b430d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1129707113, "max_line_length": 180, "alphanum_fraction": 0.6490705434, "include": true, "reason": "import numpy", "num_tokens": 6015}
|
{-# OPTIONS --without-K --safe #-}
module Categories.Category.Monoidal.Construction.Minus2 where
-- Any -2-Category is Monoidal. Of course, One is Monoidal, but
-- we don't need to shrink to do this, it can be done directly.
-- The assumptions in the construction of a -2-Category are all
-- needed to make things work properly.
open import Data.Product using (proj₁; proj₂)
open import Categories.Minus2-Category
open import Categories.Category.Monoidal
import Categories.Morphism as M
-- Doing it manually is just as easy as going through Cartesian here
-2-Monoidal : ∀ {o ℓ e} → (C : -2-Category {o} {ℓ} {e}) → Monoidal (-2-Category.cat C)
-2-Monoidal C = record
{ ⊗ = record
{ F₀ = proj₁
; F₁ = proj₁
; identity = Hom-Conn
; homomorphism = Hom-Conn
; F-resp-≈ = λ _ → Hom-Conn
}
; unit = proj₁ Obj-Contr
; unitorˡ = λ {X} → proj₂ Obj-Contr X
; unitorʳ = M.≅.refl cat
; associator = M.≅.refl cat
; unitorˡ-commute-from = Hom-Conn
; unitorˡ-commute-to = Hom-Conn
; unitorʳ-commute-from = Hom-Conn
; unitorʳ-commute-to = Hom-Conn
; assoc-commute-from = Hom-Conn
; assoc-commute-to = Hom-Conn
; triangle = Hom-Conn
; pentagon = Hom-Conn
}
where
open -2-Category C
|
{"hexsha": "5c8d655828961c270eeeb76b4d5291097b569bf0", "size": 1228, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "src/Categories/Category/Monoidal/Construction/Minus2.agda", "max_stars_repo_name": "Trebor-Huang/agda-categories", "max_stars_repo_head_hexsha": "d9e4f578b126313058d105c61707d8c8ae987fa8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 279, "max_stars_repo_stars_event_min_datetime": "2019-06-01T14:36:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T00:40:14.000Z", "max_issues_repo_path": "src/Categories/Category/Monoidal/Construction/Minus2.agda", "max_issues_repo_name": "Code-distancing/agda-categories", "max_issues_repo_head_hexsha": "d9e4f578b126313058d105c61707d8c8ae987fa8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 236, "max_issues_repo_issues_event_min_datetime": "2019-06-01T14:53:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T14:31:43.000Z", "max_forks_repo_path": "src/Categories/Category/Monoidal/Construction/Minus2.agda", "max_forks_repo_name": "Code-distancing/agda-categories", "max_forks_repo_head_hexsha": "d9e4f578b126313058d105c61707d8c8ae987fa8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 64, "max_forks_repo_forks_event_min_datetime": "2019-06-02T16:58:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T02:00:59.000Z", "avg_line_length": 29.9512195122, "max_line_length": 86, "alphanum_fraction": 0.6701954397, "num_tokens": 406}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask, render_template, request, redirect,Response
from flask_socketio import SocketIO, emit,send
import flask_socketio
from threading import Lock
import torch
from torch import nn
import os
import itertools
import glob
import math
import sys
import numpy as npx
import numpy as np
from pydub import AudioSegment as am
from VAD import denoiser_VAD
import timeit
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
learning_rate = 0.001
# Define Global Variable
LIVE = 1
SAMPLE_RATE = 16000
SAMPLE_CHANNELS = 1
SAMPLE_WIDTH = 2
BATCH_SIZE = 1
# Frame size to use for the labelling.
FRAME_SIZE_MS = 30
# Calculate frame size in data points.
FRAME_SIZE = int(SAMPLE_RATE * (FRAME_SIZE_MS / 1000.0))
FRAMES = 20
FEATURES = 12
import pyaudio
import sounddevice as sd
import torch
OBJ_CUDA = torch.cuda.is_available()
if OBJ_CUDA:
print('CUDA has been enabled.')
else:
print('CUDA has been disabled.')
import torch.nn as nn
from torch.nn import Linear, RNN, LSTM, GRU
import torch.nn.functional as F
from torch.nn.functional import softmax, relu
from torch.autograd import Variable
import VAD
RESULT = ""
import time
import logging
start = 0
import threading
import time
def query_devices(device, kind):
try:
caps = sd.query_devices(device, kind=kind)
except ValueError:
sys.exit(1)
return caps
time_count = 0
class myThread(threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
sample_rate = 16_000
caps = query_devices(None, "input")
channels_in = min(caps['max_input_channels'], 2)
self.stream_in = sd.InputStream(
device=None,
samplerate=sample_rate,
channels=channels_in)
self.frame = np.array([0]*30)
def run(self):
while True:
# timeit.timeit('test(self.stream_in,self.name)', 'from __main__ import test','from myTread import self.stream_in,self.name',number = 1000)
print(timeit.timeit(lambda:test(self.frame,self.name), number=1))
# timeit.timeit(lambda:test(self.frame,self.name), number=9)
# time_taken = ((time.time()-start)/9)*1000
# if time_taken > 30:
# print(alert!)
def test(frame,name):
VAD_RESULT = denoiser_VAD(frame)
start = time.time()
thread= []
for i in range (0,1):
thread.append(myThread(i, "Thread-%s"%i, i))
thread[i].start()
|
{"hexsha": "a0d762488c013ddba2b2f63483df46797db059ac", "size": 2631, "ext": "py", "lang": "Python", "max_stars_repo_path": "etc/individual modules/Voice_Activity_Detection/performance_test/whole process/app.py", "max_stars_repo_name": "yuzhouhe2000/video-conference-enhancer", "max_stars_repo_head_hexsha": "46aa130c0b7f02db5055c8d15877c8287c2276c7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-07-15T12:02:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T09:15:49.000Z", "max_issues_repo_path": "etc/individual modules/Voice_Activity_Detection/performance_test/whole process/app.py", "max_issues_repo_name": "yuzhouhe2000/video-conference-enhancer", "max_issues_repo_head_hexsha": "46aa130c0b7f02db5055c8d15877c8287c2276c7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-28T09:58:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-04T15:07:17.000Z", "max_forks_repo_path": "etc/individual modules/Voice_Activity_Detection/performance_test/whole process/app.py", "max_forks_repo_name": "yuzhouhe2000/video-conference-enhancer", "max_forks_repo_head_hexsha": "46aa130c0b7f02db5055c8d15877c8287c2276c7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-28T09:48:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-22T06:51:23.000Z", "avg_line_length": 21.7438016529, "max_line_length": 151, "alphanum_fraction": 0.6807297605, "include": true, "reason": "import numpy", "num_tokens": 671}
|
# This file is part of the bapsflib package, a Python toolkit for the
# BaPSF group at UCLA.
#
# http://plasma.physics.ucla.edu/
#
# Copyright 2017-2019 Erik T. Everson and contributors
#
# License: Standard 3-clause BSD; see "LICENSES/LICENSE.txt" for full
# license terms and contributor agreement.
#
"""
Module for the NIXYZ motion control mapper
`~bapsflib._hdf.maps.controls.nixyz.HDFMapControlNIXYZ`.
"""
__all__ = ["HDFMapControlNIXYZ"]
import astropy.units as u
import h5py
import numpy as np
from warnings import warn
from bapsflib.utils import _bytes_to_str
from bapsflib.utils.exceptions import HDFMappingError
from .templates import HDFMapControlTemplate
from .types import ConType
class HDFMapControlNIXYZ(HDFMapControlTemplate):
"""
Mapping module for control device 'NI_XYZ'.
Simple group structure looks like:
.. code-block:: none
+-- NI_XYZ
| +-- <motion list name 1>
| | +--
.
.
.
| +-- <motion list name N>
| | +--
| +-- Run time list
"""
def __init__(self, group: h5py.Group):
"""
:param group: the HDF5 control device group
"""
HDFMapControlTemplate.__init__(self, group)
# define control type
self._info["contype"] = ConType.motion
# populate self.configs
self._build_configs()
def _build_configs(self):
"""Build the :attr:`configs` dictionary"""
# Assumptions:
# 1. only one NI_XYZ drive was ever built, so there will always
# be only one configuration
# - naming configuration 'config01'
# 2. there's only one dataset ever created 'Run time list'
# 3. there can be multiple motion lists defined
# - each sub-group is a configuration for a different
# motion list
# - the name of the sub-group is the name of the motion list
#
# initialize configuration
cname = "config01"
self.configs[cname] = {}
# check there are existing motion lists
if len(self.subgroup_names) == 0:
warn(f"{self.info['group path']}: no defining motion list groups exist")
# get dataset
try:
dset = self.group[self.construct_dataset_name()]
except KeyError:
why = f"Dataset '{self.construct_dataset_name()}' not found"
raise HDFMappingError(self.info["group path"], why=why)
# ---- define general config values ----
self.configs[cname].update(
{
"Note": "The 'r', 'theta', and 'phi' fields in the "
"NI_XYZ data set are suppose to represent "
"spherical coordinates of the probe tip with "
"respect to the pivot point of the probe drive, "
"but the current calculation and population of the"
"fields is inaccurate. For user reference, the "
"distance between the probe drive pivot point and"
"LaPD axis is (Lpp =) 58.771 cm.",
"Lpp": 58.771 * u.cm,
}
)
# ---- define motion list values ----
self.configs[cname]["motion lists"] = {}
# get sub-group names (i.e. ml names)
_ml_names = []
for name in self.group:
if isinstance(self.group[name], h5py.Group):
_ml_names.append(name)
# a motion list group must have the attributes
# Nx, Ny, Nz, dx, dy, dz, x0, y0, z0
names_to_remove = []
for name in _ml_names:
if all(
attr not in self.group[name].attrs
for attr in ("Nx", "Ny", "Nz", "dx", "dy", "dz", "x0", "y0", "z0")
):
names_to_remove.append(name)
if bool(names_to_remove):
for name in names_to_remove:
_ml_names.remove(name)
# warn if no motion lists exist
if not bool(_ml_names):
why = "NI_XYZ has no identifiable motion lists"
warn(why)
# gather ML config values
pairs = [
("Nx", "Nx"),
("Ny", "Ny"),
("Nz", "Nz"),
("dx", "dx"),
("dy", "dy"),
("dz", "dz"),
("fan_XYZ", "fan_XYZ"),
("max_ydrive_steps", "max_ydrive_steps"),
("min_ydrive_steps", "min_ydrive_steps"),
("max_zdrive_steps", "max_zdrive_steps"),
("min_zdrive_steps", "min_zdrive_steps"),
("x0", "x0"),
("y0", "y0"),
("z0", "z0"),
("port", "z_port"),
]
for name in _ml_names:
# initialize ML dictionary
self.configs[cname]["motion lists"][name] = {}
# add ML values
for pair in pairs:
try:
# get attribute value
val = self.group[name].attrs[pair[1]]
# condition value
if np.issubdtype(type(val), np.bytes_):
# - val is a np.bytes_ string
val = _bytes_to_str(val)
if pair[1] == "fan_XYZ":
# convert to boolean
if val == "TRUE":
val = True
else:
val = False
# assign val to configs
self.configs[cname]["motion lists"][name][pair[0]] = val
except KeyError:
self.configs[cname]["motion lists"][name][pair[0]] = None
why = (
f"Motion List attribute '{pair[1]}' not found for "
f"ML group '{name}'"
)
warn(why)
# ---- define 'dset paths' ----
self.configs[cname]["dset paths"] = (dset.name,)
# ---- define 'shotnum' ----
# check dset for 'Shot number' field
if "Shot number" not in dset.dtype.names:
why = f"Dataset '{dset.name}' is missing 'Shot number' field"
raise HDFMappingError(self.info["group path"], why=why)
# initialize
self.configs[cname]["shotnum"] = {
"dset paths": self.configs[cname]["dset paths"],
"dset field": ("Shot number",),
"shape": dset.dtype["Shot number"].shape,
"dtype": np.int32,
}
# ---- define 'state values' ----
self._configs[cname]["state values"] = {
"xyz": {
"dset paths": self._configs[cname]["dset paths"],
"dset field": ("x", "y", "z"),
"shape": (3,),
"dtype": np.float64,
},
}
# check dset for 'x', 'y' and 'z' fields
fx = "x" not in dset.dtype.names
fy = "y" not in dset.dtype.names
fz = "z" not in dset.dtype.names
if fx and fy and fz:
why = f"Dataset '{dset.name}' missing fields 'x', 'y' and 'z'"
raise HDFMappingError(self.info["group path"], why=why)
elif fx or fy or fz:
mlist = [("x", fx), ("y", fy), ("z", fz)]
missf = ", ".join([val for val, bol in mlist if bol])
why = f" Dataset '{dset.name}' missing field '{missf}'"
warn(why)
def construct_dataset_name(self, *args) -> str:
"""
Constructs name of dataset containing control state value data.
"""
return "Run time list"
|
{"hexsha": "1733eceb33f31b8807ab32c01cbe6c93b604d9dc", "size": 7733, "ext": "py", "lang": "Python", "max_stars_repo_path": "bapsflib/_hdf/maps/controls/nixyz.py", "max_stars_repo_name": "BaPSF/bapsflib", "max_stars_repo_head_hexsha": "999c88f813d3a7c5c244a77873850c5c5a4042b8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2018-07-05T21:37:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-05T00:41:52.000Z", "max_issues_repo_path": "bapsflib/_hdf/maps/controls/nixyz.py", "max_issues_repo_name": "BaPSF/bapsflib", "max_issues_repo_head_hexsha": "999c88f813d3a7c5c244a77873850c5c5a4042b8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 54, "max_issues_repo_issues_event_min_datetime": "2018-08-19T00:28:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T17:16:22.000Z", "max_forks_repo_path": "bapsflib/_hdf/maps/controls/nixyz.py", "max_forks_repo_name": "rocco8773/bapsflib", "max_forks_repo_head_hexsha": "999c88f813d3a7c5c244a77873850c5c5a4042b8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2018-08-18T00:16:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T00:06:33.000Z", "avg_line_length": 34.3688888889, "max_line_length": 84, "alphanum_fraction": 0.5001939739, "include": true, "reason": "import numpy,import astropy", "num_tokens": 1821}
|
import matplotlib.pyplot as plt
import numpy as np
model, explain, faith = [],[],[]
model.extend([0,0.07])
explain.extend([0,0.073])
for i in range(10):
model.append(0.073)
explain.append(0.073+(i+1)*0.001)
faith.append(np.corrcoef(model, explain)[0, 1])
print("model ", model)
print("explain", explain)
print()
print(faith)
plt.scatter(model, explain)
plt.show()
# the correlation is nearly 1. Even though the explainer is pointing at features with very small impact, which the model is not using
# but, is that undesirable behaviour? Th explainer is 'forced' to find 10 features, but it does only give them a tiny importance.
|
{"hexsha": "ceeff486cb2060eadbae6cd72c4acb03becfabbe", "size": 643, "ext": "py", "lang": "Python", "max_stars_repo_path": "faithfulness test.py", "max_stars_repo_name": "marnixm/lime_experiments", "max_stars_repo_head_hexsha": "0b6b2acddff3f55c022c7a5eb28a150ab9e4c846", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "faithfulness test.py", "max_issues_repo_name": "marnixm/lime_experiments", "max_issues_repo_head_hexsha": "0b6b2acddff3f55c022c7a5eb28a150ab9e4c846", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "faithfulness test.py", "max_forks_repo_name": "marnixm/lime_experiments", "max_forks_repo_head_hexsha": "0b6b2acddff3f55c022c7a5eb28a150ab9e4c846", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.619047619, "max_line_length": 133, "alphanum_fraction": 0.7200622084, "include": true, "reason": "import numpy", "num_tokens": 173}
|
##########################################################
#
#
#This IFMR comes from Raithel et al. 2017
#https://arxiv.org/pdf/1712.00021.pdf
#
#
#########################################################
import numpy as np
class IFMR(object):
"""
The IFMR base class. The IFMR is a combination of the
WD IFMR from
`Kalirai et al. (2008) <https://ui.adsabs.harvard.edu/abs/2008ApJ...676..594K/abstract>`_
and the NS/BH IFMR from
`Raithel et al. (2018) <https://ui.adsabs.harvard.edu/abs/2018ApJ...856...35R/abstract>`_.
See Lam et al. (submitted) for more details.
"""
def __init__(self):
pass
def BH_mass_core_low(self, MZAMS):
"""
Eqn (1)
Paper: 15 < MZAMS < 40
Us extending: 15 < MZAMS < 42.22
"""
return -2.024 + 0.4130*MZAMS
def BH_mass_all_low(self, MZAMS):
"""
Eqn (2)
Paper: 15 < MZAMS < 40
Us extending: 15 < MZAMS < 42.22
"""
return 16.28 + 0.00694 * (MZAMS - 21.872) - 0.05973 * (MZAMS - 21.872)**2 + 0.003112 * (MZAMS - 21.872)**3
def BH_mass_high(self, MZAMS):
"""
Eqn (3)
Paper: 45 < MZAMS < 120
Us extending: 42.22 < MZAMS < 120
"""
return 5.795 + 1.007 * 10**9 * MZAMS**-4.926
def BH_mass_low(self, MZAMS, f_ej):
"""
Eqn (4)
Paper: 15 < MZAMS < 40
Us extending: 15 < MZAMS < 42.22
"""
return f_ej * self.BH_mass_core_low(MZAMS) + (1 - f_ej) * self.BH_mass_all_low(MZAMS)
def NS_mass(self, MZAMS):
"""
Paper: 9 < MZAMS 120
Simplify to just return one value
"""
return 1.6 * np.ones(len(MZAMS))
def WD_mass(self, MZAMS):
"""
From Kalirai+07
1.16 < MZAMS < 6.5
FIXME: need to extend these ranges...
"""
return 0.109*MZAMS + 0.394
def generate_death_mass(self, mass_array):
"""
The top-level function that assigns the remnant type
and mass based on the stellar initial mass.
Parameters
----------
mass_arr: array of floats
Array of initial stellar masses. Units are
M_sun.
Notes
------
The output typecode tells what compact object formed:
* WD: typecode = 101
* NS: typecode = 102
* BH: typecode = 103
A typecode of value -1 means you're outside the range of
validity for applying the ifmr formula.
A remnant mass of -99 means you're outside the range of
validity for applying the ifmr formula.
Range of validity: 0.5 < MZAMS < 120
Returns
-------
output_arr: 2-element array
output_array[0] contains the remnant mass, and
output_array[1] contains the typecode
"""
#output_array[0] holds the remnant mass
#output_array[1] holds the remnant type
output_array = np.zeros((2, len(mass_array)))
#Random array to get probabilities for what type of object will form
random_array = np.random.randint(1, 1001, size = len(mass_array))
codes = {'WD': 101, 'NS': 102, 'BH': 103}
"""
The id_arrays are to separate all the different formation regimes
"""
id_array0 = np.where((mass_array < 0.5) | (mass_array >= 120))
output_array[0][id_array0] = -99 * np.ones(len(id_array0))
output_array[1][id_array0] = -1 * np.ones(len(id_array0))
id_array1 = np.where((mass_array >= 0.5) & (mass_array < 9))
output_array[0][id_array1] = self.WD_mass(mass_array[id_array1])
output_array[1][id_array1]= codes['WD']
id_array2 = np.where((mass_array >= 9) & (mass_array < 15))
output_array[0][id_array2] = self.NS_mass(mass_array[id_array2])
output_array[1][id_array2] = codes['NS']
id_array3_BH = np.where((mass_array >= 15) & (mass_array < 17.8) & (random_array > 679))
output_array[0][id_array3_BH] = self.BH_mass_low(mass_array[id_array3_BH], 0.9)
output_array[1][id_array3_BH] = codes['BH']
id_array3_NS = np.where((mass_array >= 15) & (mass_array < 17.8) & (random_array <= 679))
output_array[0][id_array3_NS] = self.NS_mass(mass_array[id_array3_NS])
output_array[1][id_array3_NS] = codes['NS']
id_array4_BH = np.where((mass_array >= 17.8) & (mass_array < 18.5) & (random_array > 833))
output_array[0][id_array4_BH]= self.BH_mass_low(mass_array[id_array4_BH], 0.9)
output_array[1][id_array4_BH] = codes['BH']
id_array4_NS = np.where((mass_array >= 17.8) & (mass_array < 18.5) & (random_array <= 833))
output_array[0][id_array4_NS] = self.NS_mass(mass_array[id_array4_NS])
output_array[1][id_array4_NS] = codes['NS']
id_array5_BH = np.where((mass_array >= 18.5) & (mass_array < 21.7) & (random_array > 500))
output_array[0][id_array5_BH] = self.BH_mass_low(mass_array[id_array5_BH], 0.9)
output_array[1][id_array5_BH] = codes['BH']
id_array5_NS = np.where((mass_array >= 18.5) & (mass_array < 21.7) & (random_array <= 500))
output_array[0][id_array5_NS] = self.NS_mass(mass_array[id_array5_NS])
output_array[1][id_array5_NS] = codes['NS']
id_array6 = np.where((mass_array >= 21.7) & (mass_array < 25.2))
output_array[0][id_array6] = self.BH_mass_low(mass_array[id_array6], 0.9)
output_array[1][id_array6]= codes['BH']
id_array7_BH = np.where((mass_array >= 25.2) & (mass_array < 27.5) & (random_array > 652))
output_array[0][id_array7_BH] = self.BH_mass_low(mass_array[id_array7_BH], 0.9)
output_array[1][id_array7_BH] = codes['BH']
id_array7_NS = np.where((mass_array >= 25.2) & (mass_array < 27.5) & (random_array <= 652))
output_array[0][id_array7_NS] = self.NS_mass(mass_array[id_array7_NS])
output_array[1][id_array7_NS] = codes['NS']
id_array8 = np.where((mass_array >= 27.5) & (mass_array < 42.22))
output_array[0][id_array8] = self.BH_mass_low(mass_array[id_array8], 0.9)
output_array[1][id_array8] = codes['BH']
id_array9 = np.where((mass_array >= 42.22) & (mass_array < 60))
output_array[0][id_array9] = self.BH_mass_high(mass_array[id_array9])
output_array[1][id_array9] = codes['BH']
id_array10_BH = np.where((mass_array >= 60) & (mass_array < 120) & (random_array > 400))
output_array[0][id_array10_BH] = self.BH_mass_high(mass_array[id_array10_BH])
output_array[1][id_array10_BH] = codes['BH']
id_array10_NS = np.where((mass_array >= 60) & (mass_array < 120) & (random_array <= 400))
output_array[0][id_array10_NS] = self.NS_mass(mass_array[id_array10_NS])
output_array[1][id_array10_NS] = codes['NS']
return(output_array)
|
{"hexsha": "90265cd9d440643ae2ee89f166aed949469744e0", "size": 8989, "ext": "py", "lang": "Python", "max_stars_repo_path": "popstar/ifmr.py", "max_stars_repo_name": "samrose30/PyPopStar", "max_stars_repo_head_hexsha": "de32db0662c61dbb1141d3acedb7cc2be06bb1dd", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "popstar/ifmr.py", "max_issues_repo_name": "samrose30/PyPopStar", "max_issues_repo_head_hexsha": "de32db0662c61dbb1141d3acedb7cc2be06bb1dd", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "popstar/ifmr.py", "max_forks_repo_name": "samrose30/PyPopStar", "max_forks_repo_head_hexsha": "de32db0662c61dbb1141d3acedb7cc2be06bb1dd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.8532608696, "max_line_length": 114, "alphanum_fraction": 0.4542218267, "include": true, "reason": "import numpy", "num_tokens": 2187}
|
#!/usr/bin/env python3
# MHD linear modes convergence plots
import os,sys
import numpy as np
import matplotlib.pyplot as plt
import pyHARM
from pyHARM.parameters import parse_parthenon_dat
RES = [int(x) for x in sys.argv[1].split(",")]
BASE = "../../"
LONG = sys.argv[2]
SHORT = sys.argv[3]
NVAR = 8
VARS = ['rho', 'u', 'u1', 'u2', 'u3', 'B1', 'B2', 'B3']
amp = 1.e-4
k1 = 2.*np.pi
k2 = 2.*np.pi
k3 = 2.*np.pi
var0 = np.zeros(NVAR)
# Background
var0[0] = 1.
var0[1] = 1.
# Magnetic field
var0[5] = 1.
var0[6] = 0.
var0[7] = 0.
L1 = []
# EIGENMODES
dvar = np.zeros(NVAR)
if "entropy" in SHORT:
dvar[0] = 1.
if "slow" in SHORT:
dvar[0] = 0.556500332363
dvar[1] = 0.742000443151
dvar[2] = -0.282334999306
dvar[3] = 0.0367010491491
dvar[4] = 0.0367010491491
dvar[5] = -0.195509141461
dvar[6] = 0.0977545707307
dvar[7] = 0.0977545707307
if "alfven" in SHORT:
dvar[3] = -0.339683110243
dvar[4] = 0.339683110243
dvar[6] = 0.620173672946
dvar[7] = -0.620173672946
if "fast" in SHORT:
dvar[0] = 0.481846076323
dvar[1] = 0.642461435098
dvar[2] = -0.0832240462505
dvar[3] = -0.224080007379
dvar[4] = -0.224080007379
dvar[5] = 0.406380545676
dvar[6] = -0.203190272838
dvar[7] = -0.203190272838
dvar *= amp
# USE DUMPS IN FOLDERS OF GIVEN FORMAT
for m, res in enumerate(RES):
params = parse_parthenon_dat(BASE+"pars/mhdmodes.par")
params['n1'] = params['n1tot'] = params['nx1'] = res
params['n2'] = params['n2tot'] = params['nx2'] = res
params['n3'] = params['n3tot'] = params['nx3'] = res
dump = pyHARM.load_dump("mhd_3d_{}_end_{}.phdf".format(res, SHORT), params=params)
X1 = dump['x']
X2 = dump['y']
X3 = dump['z']
dvar_code = []
dvar_code.append(dump['RHO'] - var0[0])
dvar_code.append(dump['UU'] - var0[1])
dvar_code.append(dump['U1'] - var0[2])
dvar_code.append(dump['U2'] - var0[3])
dvar_code.append(dump['U3'] - var0[4])
dvar_code.append(dump['B1'] - var0[5])
dvar_code.append(dump['B2'] - var0[6])
dvar_code.append(dump['B3'] - var0[7])
dvar_sol = []
L1.append([])
for k in range(NVAR):
dvar_sol.append(np.real(dvar[k])*np.cos(k1*X1 + k2*X2 + k3*X3))
L1[m].append(np.mean(np.fabs(dvar_code[k] - dvar_sol[k])))
# MEASURE CONVERGENCE
L1 = np.array(L1)
powerfits = [0.,]*NVAR
for k in range(NVAR):
if abs(dvar[k]) != 0.:
powerfits[k] = np.polyfit(np.log(RES), np.log(L1[:,k]), 1)[0]
print("Power fit var {}: {}".format(k, powerfits[k]))
# MAKE PLOTS
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(1,1,1)
for k in range(NVAR):
if abs(dvar[k]) != 0.:
ax.plot(RES, L1[:,k], marker='s', label=VARS[k])
norm = L1[0,0]*RES[0]*RES[0]
if norm < 1e-4:
norm = L1[0,3]*RES[0]*RES[0]
xmin = RES[0]/2.
xmax = RES[-1]*2.
ax.plot([xmin, xmax], norm*np.asarray([xmin, xmax])**-2., color='k', linestyle='--', label='N^-2')
plt.xscale('log', base=2); plt.yscale('log')
plt.xlim([RES[0]/np.sqrt(2.), RES[-1]*np.sqrt(2.)])
plt.xlabel('N'); plt.ylabel('L1')
#plt.title("MHD mode test convergence, {}".format(LONG))
plt.legend(loc=1)
plt.savefig("convergence_modes_{}.png".format(SHORT))
|
{"hexsha": "3b59dea05acf5d2c0585c062db2add8d11001533", "size": 3223, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/convergence/plot_convergence_modes.py", "max_stars_repo_name": "vedantdhruv96/kharma", "max_stars_repo_head_hexsha": "1159aa53d060087e1723166ceb922bd634c14a97", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-07-16T02:14:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-25T22:51:45.000Z", "max_issues_repo_path": "tests/convergence/plot_convergence_modes.py", "max_issues_repo_name": "vedantdhruv96/kharma", "max_issues_repo_head_hexsha": "1159aa53d060087e1723166ceb922bd634c14a97", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2020-05-04T15:49:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T22:42:39.000Z", "max_forks_repo_path": "tests/convergence/plot_convergence_modes.py", "max_forks_repo_name": "vedantdhruv96/kharma", "max_forks_repo_head_hexsha": "1159aa53d060087e1723166ceb922bd634c14a97", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-12-01T23:25:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T06:03:48.000Z", "avg_line_length": 26.4180327869, "max_line_length": 98, "alphanum_fraction": 0.5982004344, "include": true, "reason": "import numpy", "num_tokens": 1303}
|
//////////////////////////////////////////////////////////////////
//
// FreeLing - Open Source Language Analyzers
//
// Copyright (C) 2004 TALP Research Center
// Universitat Politecnica de Catalunya
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
// contact: Lluis Padro (padro@lsi.upc.es)
// TALP Research Center
// despatx C6.212 - Campus Nord UPC
// 08034 Barcelona. SPAIN
//
////////////////////////////////////////////////////////////////
//------------------------------------------------------------------//
// adapted sample_analyzer.cc from FreeLing,
// - output format is conll
// some changes:
// - proper nouns get pos=nc (common noun)
// -> easier for parser, but morph column
// contains np=typeOfNp, so the original tag can
// later be restored
//
//------------------------------------------------------------------//
#include <sstream>
#include <iostream>
#include <map>
#include <list>
#include <boost/algorithm/string.hpp>
/// headers to call freeling library
#include "analyzer.h"
/// config file/options handler for this particular sample application
#include "config.h"
/// functions to print results depending on configuration options
#include "output_squoia.h"
// Semaphores and stuff to handle children count in server mode
#ifdef WIN32
#define getpid() GetCurrentProcessId()
#define pid_t DWORD
#else
#include <sys/wait.h>
#include <semaphore.h>
sem_t semaf;
#endif
// client/server communication
#include "socket.h"
// server performance statistics
#include "stats.h"
// Client/server socket
socket_CS *sock;
bool ServerMode;
using namespace std;
//////// Auxiliary functions for server mode //////////
#ifndef WIN32
//---- Capture signal informing that a child ended ---
void child_ended(int n) {
int status;
wait(&status);
sem_post(&semaf);
}
//---- Capture signal to shut server down cleanly
void terminate (int param) {
wcerr<<L"SERVER.DISPATCHER: Signal received. Stopping"<<endl;
exit(0);
}
#endif
//---- Initialize server socket, signals, etc.
void InitServer(config *cfg) {
pid_t myPID=getpid();
char host[256];
if (gethostname(host,256)!=0)
strcpy(host, "localhost");
wcerr<<endl;
wcerr<<L"Launched squoia analyzer server "<<myPID<<L" at port "<<cfg->Port<<endl;
wcerr<<endl;
wcerr<<L"You can now analyze text with the following command:"<<endl;
wcerr<<L" - From this computer: "<<endl;;
wcerr<<L" analyzer_client "<<cfg->Port<<L" <input.txt >output.txt"<<endl;
wcerr<<L" analyzer_client localhost:"<<cfg->Port<<L" <input.txt >output.txt"<<endl;
wcerr<<L" - From any other computer: "<<endl;
wcerr<<L" analyzer_client "<<util::string2wstring(host)<<L":"<<cfg->Port<<L" <input.txt >output.txt"<<endl;
wcerr<<endl;
wcerr<<L"Stop the server with: "<<endl;
wcerr<<L" kill -15 "<<myPID<<endl;
wcerr<<endl;
// open sockets to listen for clients
sock = new socket_CS(cfg->Port,cfg->QueueSize);
#ifndef WIN32
// Capture terminating signals, to exit cleanly.
signal(SIGTERM,terminate);
signal(SIGQUIT,terminate);
// Be signaled when children finish, to keep count of active workers.
signal(SIGCHLD,child_ended);
// Init worker count sempahore
sem_init(&semaf,0,cfg->MaxWorkers);
#endif
}
//---- Wait for a client and fork a worker to attend its requests
int WaitClient() {
int pid=0;
#ifndef WIN32
wcerr<<L"SERVER.DISPATCHER: Waiting for a free worker slot"<<endl;
sem_wait(&semaf);
#endif
wcerr<<L"SERVER.DISPATCHER: Waiting connections"<<endl;
sock->wait_client();
// If we are a Linux server, fork a worker.
// On windows, only serve one client at a time.
#ifndef WIN32
pid = fork();
if (pid < 0) wcerr<<L"ERROR on fork"<<endl;
if (pid!=0) {
// we are the parent. Close client socket and wait for next client
sock->set_parent();
wcerr<<L"SERVER.DISPATCHER: Connection established. Forked worker "<<pid<<"."<<endl;
}
else {
// we are the child. Close request socket and prepare to get data from client.
sock->set_child();
}
#endif
return pid;
}
//---- Send ACK to the client, informing that we expect more
//---- data to be able to send back an analysis.
void SendACK () {
sock->write_message("FL-SERVER-READY");
}
//---- Read a line from input channel
bool CheckStatsCommands(const wstring &text, ServerStats *stats) {
bool b=false;
if (text==L"RESET_STATS") {
stats->ResetStats();
SendACK();
b=true;
}
else if (text==L"PRINT_STATS") {
sock->write_message(util::wstring2string(stats->GetStats()));
b=true;
}
return b;
}
//---- Clean up and end worker when client finishes.
void CloseWorker(ServerStats *stats) {
wcerr<<L"SERVER.WORKER: client ended. Closing connection."<<endl;
delete stats;
sock->close_connection();
exit(0);
}
/////// Functions to wrap I/O mode (server socket vs stdin/stdout) ////////
//---- Read a line from input channel
int ReadLine(wstring &text) {
int n=0;
if (ServerMode) {
string s;
n = sock->read_message(s);
text = util::string2wstring(s);
}
else
if (getline(wcin,text)) n=1;
return n;
}
//---- Output a string to output channel
void OutputString(const wstring &s) {
if (ServerMode)
sock->write_message(util::wstring2string(s));
else
wcout<<s;
}
//---- Output a list of tokens to output channel
void OutputTokens(const list<word> &av) {
list<word>::const_iterator w;
for (w=av.begin(); w!=av.end(); w++)
OutputString(w->get_form()+L"\n");
}
//---- Output analysis result to output channel
void OutputSentences(output &out, list<sentence> &ls, analyzer &anlz, const document &doc=document()) {
if (ServerMode) {
if (ls.empty()) {
SendACK();
return;
}
wostringstream sout;
out.PrintResults(sout,ls,anlz,doc);
//PrintMorfo(sout,ls,anlz);
sock->write_message(util::wstring2string(sout.str()));
}
else
out.PrintResults(wcout,ls,anlz,doc);
}
//---- Process input line when coreference resolution is requusted
void ProcessLineCoref(analyzer &anlz, const wstring &text, list<word> &av,
list<sentence> &ls,
paragraph &par, document &doc) {
if (text==L"") { // new paragraph.
// flush buffer
anlz.SplitSentences(av,ls,true);
// add sentences to current paragraph
par.insert(par.end(), ls.begin(), ls.end());
// Add paragraph to document
if (not par.empty()) doc.push_back(par);
// prepare for next paragraph
av.clear(); ls.clear(); par.clear();
}
else {
// tokenize input line
anlz.TokenizeText(text,av);
// accumulate list of words in splitter buffer, returning a list of sentences.
anlz.SplitSentences(av,ls,false);
// add sentences to current paragraph
par.insert(par.end(), ls.begin(), ls.end());
// clear temporary lists;
av.clear(); ls.clear();
}
}
//---- Once document is finished, flush buffers and solve correferences
void PostProcessCoreference(analyzer &anlz, output &out, const list<word> &av, list<sentence> &ls,
paragraph &par, document &doc, ServerStats *stats) {
// flush splitter buffer
anlz.SplitSentences(av,ls,true);
// add sentences to paragraph
par.insert(par.end(), ls.begin(), ls.end());
// add paragraph to document.
doc.push_back(par);
// All document read, solve correferences.
anlz.SolveCoreferences(doc);
// output results in requested format
for (document::iterator par=doc.begin(); par!=doc.end(); par++) {
OutputSentences(out,*par,anlz,doc);
if (ServerMode) stats->UpdateStats(ls);
}
}
//---- Proces an input line when InputFormat=TOKEN
void ProcessLineToken(analyzer &anlz, const wstring &text, unsigned long &totlen, list<word> &av, list<sentence> &ls) {
// get next word
word w (text);
w.set_span (totlen, totlen + text.size ());
totlen += text.size () + 1;
av.push_back (w);
// check for splitting after some words have been accumulated,
if (av.size () > 10) {
anlz.AnalyzeTokens(av,ls,false);
av.clear (); // clear list of words for next use
}
}
//---- Proces an input line when InputFormat>=SPLITTED
void ProcessLineSplitted(analyzer &anlz, config *cfg, const wstring &text, unsigned long &totlen, sentence &av, list<sentence> &ls) {
wstring form, lemma, tag, sn, spr;
double prob;
if (text != L"") { // got a word line
wistringstream sin;
sin.str (text);
// get word form
sin >> form;
// build new word
word w (form);
w.set_span (totlen, totlen + form.size ());
totlen += text.size () + 1;
// process word line, according to input format.
// add all analysis in line to the word.
w.clear ();
if (cfg->InputFormat == MORFO) {
while (sin >> lemma >> tag >> spr) {
analysis an (lemma, tag);
prob = util::wstring2double (spr);
an.set_prob (prob);
w.add_analysis (an);
}
}
else if (cfg->InputFormat == SENSES) {
while (sin >> lemma >> tag >> spr >> sn) {
analysis an (lemma, tag);
prob = util::wstring2double (spr);
an.set_prob (prob);
list<wstring> lpair=util::wstring2list (sn,L"/");
list<pair<wstring,double> > lsen;
for (list<wstring>::iterator i=lpair.begin(); i!=lpair.end(); i++) {
size_t p=i->find(L":");
lsen.push_back(make_pair(i->substr(0,p),util::wstring2double(i->substr(p))));
}
an.set_senses(lsen);
w.add_analysis (an);
}
}
else if (cfg->InputFormat == TAGGED) {
sin >> lemma >> tag;
analysis an (lemma, tag);
an.set_prob (1.0);
w.add_analysis (an);
}
// append new word to sentence
av.push_back (w);
// no complete sentences so far.
ls.clear();
}
else { // blank line, sentence end.
totlen += 2;
ls.push_back(av);
anlz.AnalyzeSentences(ls);
av.clear (); // clear list of words for next use
}
}
void FlushBuffers(analyzer &anlz, config *cfg, const list<word> &av,
sentence &sent, list<sentence> &ls, output &out, ServerStats *stats) {
if (ServerMode) wcerr << L"SERVER.WORKER: client ended. Flushing buffers." <<endl;
if (cfg->InputFormat == PLAIN or cfg->InputFormat == TOKEN) {
// flush splitter buffer
if (cfg->OutputFormat == TOKEN) {
OutputTokens(av);
}
else if (cfg->OutputFormat >= SPLITTED) {
anlz.AnalyzeTokens(av,ls,true);
OutputSentences(out,ls,anlz);
}
}
else { // cfg->InputFormat >= SPLITTED.
if (!sent.empty()) {
// if a blank line after last sentence was missing, the sentence is
// still in the splitter buffer.
ls.push_back(sent);
anlz.AnalyzeSentences(ls);
OutputSentences(out,ls,anlz);
if (ServerMode) stats->UpdateStats(ls);
}
}
}
//---------------------------------------------
// Main program
//---------------------------------------------
int main (int argc, char **argv) {
// read configuration file and command-line options,
// and create appropriate analyzers
config *cfg = new config(argc,argv);
ServerMode = cfg->Server;
// If server activated, make sure port was specified, and viceversa.
if (ServerMode and cfg->Port==0) {
wcerr <<L"Error - Server mode requires the use of option '--port' to specify a port number."<<endl;
exit (1);
}
else if (not ServerMode and cfg->Port>0) {
wcerr <<L"Error - Ignoring unexpected server port number. Use '--server' option to activate server mode."<<endl;
cfg->Port=0;
}
output out(cfg);
analyzer anlz(cfg);
cfg->MACO_DatesDetection = false;
cfg->MACO_QuantitiesDetection = false;
analyzer anlz2(cfg);
if (ServerMode) {
wcerr<<L"SERVER: Squoia analyzers loaded."<<endl;
InitServer(cfg);
}
ServerStats *stats=NULL;
bool stop=false; /// The server version will never stop.
while (not stop) { /// The standalone version will stop after one iteration.
if (ServerMode) {
int n=WaitClient(); // Wait for a client and fork a worker to attend it.
if (n!=0) continue; // If we are the dispatcher, go to wait for a new client.
stats = new ServerStats(); // If we are the worker, get ready.
}
// --- Begin text analysis
unsigned long offs=0;
wstring text; list<word> av; sentence sent;
list<sentence> ls; paragraph par; document doc;
// if language identification requested, do not enter analysis loop,
// just identify language for each line.
if (cfg->OutputFormat == IDENT) {
while (ReadLine(text)) {
// call the analyzer to identify language
OutputString (anlz.IdentifyLanguage(text)+L"\n");
}
}
else {
// --- Main loop: read and process all input lines up to EOF ---
while (ReadLine(text)) {
// if we get a stats-related command, process it and wait for next line
if (ServerMode and CheckStatsCommands(text,stats)) continue;
// coreference requested, assume plain text input, and accumulate
// sentences and paragrafs until the document is complete
if (cfg->COREF_CoreferenceResolution) {
ProcessLineCoref(anlz,text,av,ls,par,doc);
if (ServerMode) SendACK();
}
// No coreferences required
else {
bool outputrequired=false;
switch (cfg->InputFormat) {
case PLAIN: // input is plain text
if (cfg->OutputFormat == TOKEN) {
// only tokenized output is requested
anlz.TokenizeText(text,av);
OutputTokens(av);
}
else {
/*bool CRFformat=false;
if (cfg->OutputFormat == CRFMORF) {
CRFformat=true;
cfg->OutputFormat=MORFO;
}*/
// splitter (and maybe more) requested
anlz.AnalyzeText(text,ls);
/*if (CRFformat) {
cfg->OutputFormat=CRFMORF;
}*/
outputrequired = true;
}
break;
case TOKEN: // Input is tokenized.
ProcessLineToken(anlz,text,offs,av,ls);
outputrequired = true;
break;
default: // Input is (at least) tokenized and splitted.
ProcessLineSplitted(anlz,cfg,text,offs,sent,ls);
outputrequired = true;
break;
}
// Output results if needed.
if (outputrequired) {
if (not ls.empty()) {
OutputSentences(out,ls, anlz2);
if (ServerMode) stats->UpdateStats(ls);
}
else
if (ServerMode) SendACK();
}
}
} // --- end while(readline)
// Document has been read. Perform appropriate post-processing
if (cfg->COREF_CoreferenceResolution)
// If we wanted coreference, now it's time, since we have the whole document.
PostProcessCoreference(anlz,out,av,ls,par,doc,stats);
else
// no coreferences, just flush buffers and process remaining sentences
FlushBuffers(anlz,cfg,av,sent,ls,out,stats);
}
// if we are a forked server attending a client, and the client is done, we exit.
if (ServerMode) CloseWorker(stats);
// if not server version, stop when document is processed
else stop=true;
}
// clean up and exit
delete cfg;
}
|
{"hexsha": "46a5b7f6dadf1cd85d33ec557a173d6c8da8ff45", "size": 16334, "ext": "cc", "lang": "C++", "max_stars_repo_path": "FreeLingModules/deprecated/squoia_server_analyzer.cc", "max_stars_repo_name": "ariosquoia/squoia", "max_stars_repo_head_hexsha": "3f3c3c253bdb2d891889e0427790e6c972870f08", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 9.0, "max_stars_repo_stars_event_min_datetime": "2016-04-27T16:48:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-17T21:55:55.000Z", "max_issues_repo_path": "FreeLingModules/deprecated/squoia_server_analyzer.cc", "max_issues_repo_name": "ariosquoia/squoia", "max_issues_repo_head_hexsha": "3f3c3c253bdb2d891889e0427790e6c972870f08", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "FreeLingModules/deprecated/squoia_server_analyzer.cc", "max_forks_repo_name": "ariosquoia/squoia", "max_forks_repo_head_hexsha": "3f3c3c253bdb2d891889e0427790e6c972870f08", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6.0, "max_forks_repo_forks_event_min_datetime": "2016-03-29T22:26:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-17T21:56:21.000Z", "avg_line_length": 30.8188679245, "max_line_length": 133, "alphanum_fraction": 0.6090363659, "num_tokens": 4191}
|
struct AggressivenessBeliefMDP <: MDP{AggressivenessBelief, MLAction}
up::AggressivenessUpdater
end
function generate_sr(p::AggressivenessBeliefMDP, b_old::AggressivenessBelief, a::MLAction, rng::AbstractRNG)
up = p.up
pomdp = get(up.problem)
s = rand(rng, b_old)
sp = generate_s(pomdp, s, a, rng)
o = generate_o(pomdp, s, a, sp)
b_new = AggressivenessBelief(CorrelatedIDMMOBIL(
get(up.problem).dmodel.behaviors), o,
Vector{Vector{Float64}}(length(o.cars)),
Vector{Vector{Float64}}(length(o.cars)))
rsum = 0.0
particles = Vector{MLState}(up.nb_sims)
stds = max.(agg_stds(b_old), 0.01)
for i in 1:up.nb_sims
if rand(up.rng) < up.p_resample_noise
s = rand(up.rng, b_old, up.resample_noise_factor*stds)
else
s = rand(up.rng, b_old)
end
particles[i], r = generate_sr(get(up.problem), s, a, up.rng)
rsum += r
end
cweights_from_particles!(b_new, get(up.problem), o, particles, up.params)
for i in 1:length(o.cars)
if isempty(b_new.cweights[i])
# println("car $i has empty weights")
b_new.particles[i] = rand(up.rng, up.nb_sims)
b_new.cweights[i] = 1.0:1.0:up.nb_sims
end
end
return b_new, rsum/up.nb_sims
end
# actions(p::AggressivenessBeliefMDP) = actions(get(p.up.problem))
actions(p::AggressivenessBeliefMDP, b::AggressivenessBelief) = actions(get(p.up.problem), b.physical)
discount(p::AggressivenessBeliefMDP) = discount(get(p.up.problem))
struct ABMDPSolver <: Solver
solver
updater
end
function solve(sol::ABMDPSolver, pomdp)
up = deepcopy(sol.updater)
set_problem!(up, pomdp)
return solve(sol.solver, AggressivenessBeliefMDP(up))
end
struct BehaviorBeliefMDP{G} <: MDP{BehaviorParticleBelief{G}, MLAction}
up::BehaviorParticleUpdater
end
BehaviorBeliefMDP(up) = BehaviorBeliefMDP{typeof(get(up.problem).dmodel.behaviors)}(up)
function generate_sr(p::BehaviorBeliefMDP, b_old::BehaviorParticleBelief, a::MLAction, rng::AbstractRNG)
up = p.up
pomdp = get(up.problem)
s = rand(rng, b_old)
sp = generate_s(pomdp, s, a, rng)
o = generate_o(pomdp, s, a, sp)
b_new::BehaviorParticleBelief=BehaviorParticleBelief(get(up.problem).dmodel.behaviors, o,
Vector{Vector{IDMMOBILBehavior}}(length(o.cars)),
Vector{Vector{Float64}}(length(o.cars)))
rsum = 0.0
particles = Vector{MLState}(up.nb_sims)
samples = lv_resample(b_old, up)
for i in 1:up.nb_sims
particles[i], r = generate_sr(get(up.problem), samples[i], a, up.rng)
rsum += r
end
cweights_from_particles!(b_new, get(up.problem), o, particles, up.params)
for i in 1:length(o.cars)
if isempty(b_new.cweights[i])
b_new.particles[i] = [rand(up.rng, b_new.gen) for i in 1:up.nb_sims]
b_new.cweights[i] = collect(1.0:convert(Float64, up.nb_sims))./up.nb_sims
end
end
return b_new, rsum/up.nb_sims
end
actions(p::BehaviorBeliefMDP, b::BehaviorParticleBelief) = actions(get(p.up.problem), b.physical)
discount(p::BehaviorBeliefMDP) = discount(get(p.up.problem))
struct BBMDPSolver <: Solver
solver
updater
end
function solve(sol::BBMDPSolver, pomdp)
up = deepcopy(sol.updater)
set_problem!(up, pomdp)
return solve(sol.solver, BehaviorBeliefMDP(up))
end
|
{"hexsha": "26cf50b76e4d4581bd6ea4125b487afabca2da73", "size": 3539, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/aggressiveness_belief_mdp.jl", "max_stars_repo_name": "zsunberg/Multilane.jl", "max_stars_repo_head_hexsha": "2f19dd2a60a0786e6bbcf6a150a173d35be068a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2016-09-06T09:24:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-15T07:57:24.000Z", "max_issues_repo_path": "src/aggressiveness_belief_mdp.jl", "max_issues_repo_name": "zsunberg/Multilane.jl", "max_issues_repo_head_hexsha": "2f19dd2a60a0786e6bbcf6a150a173d35be068a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2016-04-22T05:21:07.000Z", "max_issues_repo_issues_event_max_datetime": "2016-06-05T00:10:51.000Z", "max_forks_repo_path": "src/aggressiveness_belief_mdp.jl", "max_forks_repo_name": "sisl/Multilane.jl", "max_forks_repo_head_hexsha": "2f19dd2a60a0786e6bbcf6a150a173d35be068a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2017-01-20T10:51:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-28T04:36:51.000Z", "avg_line_length": 32.1727272727, "max_line_length": 108, "alphanum_fraction": 0.6425543939, "num_tokens": 1039}
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import os,sys
import numpy as np
import json
sys.path.append('/opt/render')
from tools import mysql
from tools import bash
from colorama import *
init(autoreset=True)
from tools.decorators import *
#################################################################################
class NetVidSegClass(object):
"""
NetVidSeg - Net for Video Segmentations
Общий класс для вытаскивания признаков c помощью нейронок
"""
def __init__(self):
import NetVidSeg
# self.allParam = allParam
return
# @decor_function_call
# def extract(self):
# return
#
# @decor_function_call
# def load_model(self):
# return
#
# @decor_function_call
# def load_images(self):
# return
@decor_function_call
def save_to_db(self, segments, allParam, test=False):
from collections import Counter
#инициализация сетки
# images_predicted = neural.ssd()
# images_predicted = vgg.keras_vgg16.predict(segments)
# vgg.distances(segments, allParam, images_predicted)
# classSum=[]
#
# for index,segParam in enumerate(segments):
# print (segParam)
#
# if segParam['duration']>allParam['audio_bpm']*1.1:
# #начинаем распознование если не короткий
#
#
#
# id_segm = segParam['id']
#
# recParam={
# 'id_file':str(segParam['id_file']),
# 'id_segm':str(id_segm),
# 'status':'0',
# 'img':str(segParam['path']+'.jpg'),
# 'type':'googlenet',
# 'class':str(predicted_class),
# #'metadata':str(labels[predicted_class]).replace("'","\\'")
# # 'metadata':'{"name":"'+str(labels[predicted_class]).replace("'","\\'")+'"}'
# }
#
# classSum.append(recParam['class'])
#
#
# #print (Fore.YELLOW + '> ' + str(recParam['type']) + ': ' + Fore.CYAN + str(predicted_class)+' --> ' +str(labels[predicted_class]))
#
# if not test:
# # mysql.delRecogn(id_segm)
# # mysql.insRecogn(recParam)
# pass
#
# else:
# print(Fore.YELLOW + 'skip ' + segParam['duration'])
return
# --------------------------------------------------------------------
if __name__ == '__main__':
print '---------------------------------'
obj = NetVidSegClass()
obj.detect_object_on_image()
|
{"hexsha": "b89331c197cea079a1417c1dbbb8058482bcf74d", "size": 2567, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "bashgu/NetVidSeg", "max_stars_repo_head_hexsha": "93d5549713497f77806f1589032270ff42c61327", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-03-17T01:46:17.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-17T01:46:17.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "bashgu/NetVidSeg", "max_issues_repo_head_hexsha": "93d5549713497f77806f1589032270ff42c61327", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "bashgu/NetVidSeg", "max_forks_repo_head_hexsha": "93d5549713497f77806f1589032270ff42c61327", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.7685185185, "max_line_length": 145, "alphanum_fraction": 0.5091546552, "include": true, "reason": "import numpy", "num_tokens": 655}
|
(* seplog (c) AIST 2005-2013. R. Affeldt, N. Marti, et al. GNU GPLv3. *)
(* seplog (c) AIST 2014-2018. R. Affeldt et al. GNU GPLv3. *)
Require Import Div2 Even.
From mathcomp Require Import ssreflect ssrfun ssrbool eqtype ssrnat seq choice.
From mathcomp Require Import tuple.
Require Import ssrZ ZArith_ext String_ext seq_ext.
Require Import seq_ext.
Require Import machine_int.
Import MachineInt.
Require Import C_types C_types_fp C_value C_expr C_expr_equiv C_expr_ground C_seplog C_tactics.
Require Import rfc5246.
Import RFC5932.
Require Import POLAR_library_functions POLAR_library_functions_triple.
Require Import POLAR_ssl_ctxt POLAR_parse_client_hello POLAR_parse_client_hello_header.
Close Scope select_scope.
Local Open Scope nat_scope.
Local Open Scope string_scope.
Local Open Scope zarith_ext_scope.
Local Open Scope seq_ext_scope.
Local Open Scope machine_int_scope.
Local Open Scope C_assert_scope.
Local Open Scope C_expr_scope.
Local Open Scope C_cmd_scope.
Local Open Scope C_value_scope.
Local Open Scope C_types_scope.
(** * Verification of the ClientHello Parsing Program (4/4) *)
Section POLAR_parse_client_hello_triple.
Variable SI : seq (int 8).
Lemma POLAR_parse_client_hello_triple4 (BU RB : seq (int 8)) (CI : seq (int 32))
(HCI : ciphers_seq CI)
(cipher0 length0 : int 32)
(bu rb id : (:* (ityp: uchar)).-phy)
(ses : (:* (g .-typ: ssl_session)).-phy)
(ciphers : (:* (ityp: sint)).-phy)
(vssl : int ptr_len)
(md5s sha1s : 5.-tuple (int ptr_len)) :
let init_ssl_var := `! \b __ssl \= [ phy<=ptr _ vssl ]c in
let init_ciphers := [ ciphers ]c |--> map phy<=si32 CI in
let final_bu := Final_bu SI bu in
let final_ses := Final_ses SI CI ses id in
let final_rb := Final_rb SI RB rb in
let final_id := Final_id SI id in
let final_ssl_context := Ssl_context (zext 24 S74.server_hello)
(zext 24 (SI `_ maj_ver))
(zext 24 (if (u2Z (SI `_ min_req) <=? u2Z (S621.TLSv11_min))%Z then
SI `_ min_req else S621.TLSv11_min))
(zext 24 (SI `_ maj_req)) (zext 24 (SI `_ min_req ))
ses bu `( 0 )s_32 md5s sha1s ciphers rb in
forall BU1 : seq (int 8), BU1 |{ 8, 5) = SI |{ 0, 5) -> size BU1 = size BU ->
forall in_left0, in_left0 = `( 5 )_32 ->
let the_n := Z<=s ((zext 24 BU1 `_ 11 `<< 8) `|` zext 24 BU1 `_ 12) in
let the_n_plus5 := (5 + the_n)%Z in
(45 <= the_n)%Z -> (the_n <= 512)%Z ->
forall in_left2, in_left2 = `( the_n_plus5 )_32 ->
forall BU2 : seq (int 8),
let Hbu := [ bu ]c |---> map phy<=ui8 BU2 in
BU2 |{ 8 + 5, '| the_n |) = SI |{ 5, '| the_n |) ->
BU2 |{ 8, 5) = BU1 |{ 8, 5) ->
size BU2 = size BU1 ->
'| the_n_plus5 | <= size SI ->
let Hbuf := `! \b __buf \= [ bu ]c \+ [ 13 ]sc in
let Hn0 := `! \b __n0 \= [ in_left2 ]pc in
let Hn_old := `! \b __n_old \= [ the_n ]sc in
let Hn := `! \b __n \= __n0 \- [ 5 ]sc in
BU1 `_ 8 `& `( 128 )_8 = `( 0 )_8 /\ BU1 `_ 8 = S621.handshake ->
BU2 `_ 13 = S74.client_hello ->
BU2 `_ 17 = S621.SSLv30_maj ->
let Hbuf5 := `! \b __buf5 \= [ BU2 `_ 18 ]pc in
let minver_exp : exp sigma _ :=
[ BU2 `_ 18 ]pc \<= [ SSL_MINOR_VERSION_2 ]c \?
[ BU2 `_ 18 ]pc \: [ SSL_MINOR_VERSION_2 ]c in
let minver_u := si32<=phy ((phyint) (ground_exp minver_exp erefl)) in
let reqmin_sslcontext := Ssl_context (zext 24 S74.client_hello)
(si32<=phy ((phyint) SSL_MAJOR_VERSION_3))
minver_u (zext 24 BU2 `_ 17) (zext 24 BU2 `_ 18)
ses bu in_left2 md5s sha1s ciphers rb in
BU1 `_ 9 = S621.SSLv30_maj ->
BU2 `_ 14 = zero8 ->
let Hm := `! \b __n \= [ 4 ]sc \+
((int) ([ BU2 `_ 15 ]pc : exp _ (ityp: uchar)) \<< [ 8 ]sc \|
(int) ([ BU2 `_ 16 ]pc : exp _ (ityp: uchar))) in
let Hsess_len := `! \b __sess_len \= (int) ([ BU2 `_ 51 ]pc : exp _ (ityp: uchar)) in
let Hsess_len_bound := `! \~b \b __sess_len \< [ 0 ]sc \|| __sess_len \> [ 32 ]sc in
let Hssl_session_0 := `! \b __ssl_session_0 \= [ ses ]c in
(Z<=nat csuites.+1 + Z<=u BU2 `_ 51 < the_n_plus5)%Z ->
let Hses_length := ses |lV~> mk_ssl_session cipher0 (zext 24 BU2 `_ 51) (ptr<=phy id) in
let Hssl_session_0_length :=
`! \b __ssl_session_0_length \= (int) ([ BU2 `_ 51 ]pc : exp _ (ityp: uchar)) in
let Hit := `! \b __it \= [ id ]c in
(Z<=u BU2 `_ 51 <= 32)%Z ->
nat<=u BU2 `_ 51 <= 32 ->
let Shigh : exp _ (ityp: uchar):= [ BU2 `_ (52 + nat<=u BU2 `_ 51) ]pc in
let Slow : exp _ (ityp: uchar) := [ BU2 `_ (53 + nat<=u BU2 `_ 51) ]pc in
let Hciph_len := `! \b __ciph_len \= ((int) Shigh \<< [ 8 ]sc \| (int) Slow) in
let Hciph_len_bound := `! \~b
\b __ciph_len \< [ 2 ]sc \|| __ciph_len \> [ 256 ]sc \|| __ciph_len \% 1 \!= [ 0 ]sc in
let ciph_len_exp := (int) Shigh \<< [ 8 ]sc \| (int) Slow in
let ciph_len_value := ground_exp ciph_len_exp erefl in
let ciph_len_value_Z := Z<=s (si32<=phy ciph_len_value) in
let ciph_len_value_nat := '| ciph_len_value_Z | in
(Z<=nat compmeth + Z<=u BU2 `_ 51 + ciph_len_value_Z < the_n_plus5)%Z ->
(2 <= ciph_len_value_Z <= 256)%Z ->
1 < ciph_len_value_nat <= 256 ->
let comp_len_exp : exp sigma _ := (int)
([ BU2 `_ (54 + nat<=u BU2 `_ 51 + ciph_len_value_nat) ]pc : exp sigma (ityp: uchar)) in
let comp_len_value := ground_exp comp_len_exp Logic.eq_refl in
let Hcomp_len := `! \b __comp_len \= [ comp_len_value ]c in
let Hextensions := `! \~b
\b [ Z<=nat (compmeth.+1) ]sc \+ __sess_len \+ __ciph_len \+ __comp_len \!=
[ 5 ]sc \+ __n_old in
let Hciph_len_even := sepex k', !!((Z<=nat k' * 2 < 2 ^^ 30)%Z) **
`! \b __ciph_len \= [ Z<=nat k' * 2 ]sc in
let inv_outer := reqmin_sslcontext ** Hbuf ** Hbu **
init_ciphers ** Hciph_len ** Hciph_len_even **
Hsess_len ** Hses_length **
(`! \b __goto_have_cipher \= [ 1 ]sc **
(sepex i, (!!(i < size CI)) **
(sepex k, (!!(k < 128)) **
`! \b [ Z<=nat k * 2 ]sc \< __ciph_len **
`! \b __ssl_ciphers_i \= [ CI `32_ i ]pc **
!!( BU2 `_ (54 + nat<=u (BU2 `_ 51) + 2 * k) = `( 0 )_ 8 ) **
`! \b (int) __ssl_ciphers_i \= (int) ([ BU2 `_ (54 + nat<=u BU2 `_ 51 + k * 2 + 1) ]pc : exp _ (ityp: uchar))))) \\//
`! \b __goto_have_cipher \= [ 0 ]sc **
((`! \b __ssl_ciphers_i \!= [ 0 ]sc **
(sepex i, (!!(i < size CI)) **
`! \b __i \= [ Z<=nat i ]sc **
`! \b __ssl_ciphers_i \= [ CI `32_ i ]pc)) \\//
`! \b __ssl_ciphers_i \= [ 0 ]sc) in
{{ Hcomp_len ** Hit ** Hssl_session_0_length ** Hssl_session_0 ** Hm **
Hbuf5 ** Hn ** Hn_old ** Hn0 ** success ** init_ssl_var ** final_rb **
final_id ** Hsess_len_bound ** Hciph_len_bound **
`! \~b \b __comp_len \< [ 1 ]sc \|| __comp_len \> [ 16 ]sc ** Hextensions ** inv_outer }}
ssl_parse_client_hello5
{{ error \\//
success ** final_bu ** final_ses ** final_rb ** final_id **
final_ssl_context ** !!(PolarSSLClientHellop SI) ** init_ciphers }}.
Proof.
move=> init_ssl_var init_ciphers final_bu final_ses final_rb
final_id final_ssl_context BU1 BU1SI sz_BU1 in_left0 in_left0_5
the_n the_n_plus5 HN1 HN0 in_left2 in_left2_n5 BU2 Hbu BU2SI
BU2BU1 sz_BU2 HSI_new Hbuf Hn0 Hn_old Hn BU1_8 BU2_13 BU2_17
Hbuf5 minver_exp minver_u reqmin_sslcontext BU1_9 BU2_14 Hm Hsess_len
Hsess_len_bound Hssl_session_0 csuites_max Hses_length
Hssl_session_0_length Hit BU_51 BU1_51 Shigh Slow Hciph_len
Hciph_len_bound ciph_len_exp ciph_len_value ciph_len_value_Z
ciph_len_value_nat compmeth_max Hciph_len_bound_Z
Hciph_len_bound_nat comp_len_exp comp_len_value
Hcomp_len Hextensions Hciph_len_even inv_outer.
unfold ssl_parse_client_hello5.
(** If \b __goto_have_cipher \!= [ 0 ]sc Then *)
(** Else
_ret <- [ POLARSSL_ERR_SSL_NO_CIPHER_CHOSEN ]c; Return *)
idtac "71) ifte".
Hoare_ifte_bang Hgoto_have_cipher0; last by apply POLAR_ret_err.
(** __ssl_session_0 &-> _cipher *<- __ssl_ciphers_i; *)
idtac "72) mutation".
unfold inv_outer.
Hoare_L_or 0 (* NB: do not work with 1 *); last first.
apply hoare_stren with FF; last by apply hoare_L_F.
rewrite /Hgoto_have_cipher0 [in X in _ ** X ===> _]bneq_neg_eq.
set H1 := `! \b __goto_have_cipher \= _.
set H2 := `! \~b \b __goto_have_cipher \= _.
by Ent_L_contradict (H1 :: H2 :: nil).
Hoare_L_ex O i.
Hoare_L_ex O k.
set ssl_cipher_i := BU2 `_ (54 + nat<=u BU2 `_ 51 + k * 2 + 1).
set Hssl_cipher_i := `! \b __ssl_ciphers_i \= [ CI `32_ i ]pc.
pose Hses_cipher := ses |lV~> mk_ssl_session (CI `32_ i) (zext 24 (BU2 `_ 51)) (ptr<=phy id).
Hoare_seq_replace1 Hses_length Hses_cipher.
Hoare_L_dup (Hssl_cipher_i :: Hssl_session_0 :: nil).
Hoare_frame (Hssl_cipher_i :: Hssl_session_0 :: Hses_length :: nil) (Hses_cipher :: nil).
unfold Hses_length, Hses_cipher.
apply hoare_mutation_fldp_subst_ityp with (str := _ssl_ciphers_i) (e := [ CI `32_ i ]pc) (Hstr := erefl).
- by apply monotony_L.
- rewrite /=.
Hoare_L_contract_bbang Hssl_cipher_i.
apply hoare_mutation_fldp_subst_ptr with (str := _ssl_session_0) (e'' := [ ses ]c) (Hstr := erefl).
+ by apply monotony_L.
+ rewrite /=.
Hoare_L_contract_bbang Hssl_session_0.
eapply hoare_weak; last first.
have He2 : @vars _ sigma (ityp: sint) [ CI `32_ i ]pc = nil by done.
apply hoare_mutation_fldp_local_forward_ground_lV with (val := mkSintLog CI `32_ i) (He2 := He2).
by rewrite /phylog_conv /= ge_cst_e.
rewrite /= -Eqdep.Eq_rect_eq.eq_rect_eq; by apply ent_id.
(** __ssl &-> _in_left *<- [ 0 ]sc; *)
idtac "73) mutation".
pose Hssl_in_left3 := Ssl_context (zext 24 S74.client_hello)
(si32<=phy ((phyint) SSL_MAJOR_VERSION_3)) minver_u (zext 24 (BU2 `_ 17))
(zext 24 (BU2 `_ 18)) ses bu `( 0 )s_32 md5s sha1s ciphers rb.
Hoare_seq_replace1 reqmin_sslcontext Hssl_in_left3.
Hoare_frame (reqmin_sslcontext :: nil) (Hssl_in_left3 :: nil).
eapply hoare_weak; last first.
have He2 : @vars _ sigma (ityp: sint) [ 0 ]sc = nil by done.
apply hoare_mutation_fldp_local_forward_ground_le with (val := mkSintLog `( 0 )s_32) (He2 := He2).
by rewrite /phylog_conv /= ge_cst_e.
rewrite set_in_left_ssl_ctxt; by apply ent_id.
(** _ssl_state <-* __ssl &-> _state; *)
idtac "74) lookup".
pose H_ssl_state := `! \b __ssl_state \= [ Z<=u S74.client_hello ]sc.
Hoare_seq_ext H_ssl_state.
Hoare_frame (Hssl_in_left3 :: nil) (Hssl_in_left3 :: H_ssl_state :: nil).
apply hoare_lookup_fldp_stren.
apply ent_R_lookup_fldp with (pv := [ u2Z S74.client_hello ]s ).
- rewrite get_state_ssl_ctxt /phylog_conv /=.
apply/eqP/mkPhy_irrelevance => /=.
congr (i8<=i32).
apply/u2Z_inj.
by rewrite (u2Z_zext 24) 2!u2ZE Z2sE.
- clear.
Ent_R_subst_con_distr.
rewrite [in X in _ ===> X ** _]wp_assign_mapsto_le.
apply monotony_L2.
Ent_R_subst_apply.
by Ent_monotony0.
(** __ssl &-> _state *<- __ssl_state \+ [ 1 ]sc; *)
pose Hssl_server := Ssl_context (zext 24 S74.server_hello)
(si32<=phy ((phyint) SSL_MAJOR_VERSION_3)) minver_u (zext 24 (BU2 `_ 17))
(zext 24 (BU2 `_ 18)) ses bu `( 0 )s_32 md5s sha1s ciphers rb.
Hoare_seq_replace1 Hssl_in_left3 Hssl_server.
idtac "75) mutation".
Hoare_L_dup (H_ssl_state :: nil).
Hoare_frame (H_ssl_state :: Hssl_in_left3 :: nil) (Hssl_server :: nil).
apply hoare_mutation_fldp_subst_ityp with (str := _ssl_state) (e := [ Z<=u S74.client_hello ]sc) (Hstr := erefl).
- by apply monotony_L.
- rewrite /=.
Hoare_L_contract_bbang H_ssl_state.
eapply hoare_weak; last first.
have He2 : @vars _ sigma (ityp: uchar) [S74.server_hello]pc = nil by done.
apply hoare_mutation_fldp_local_forward_ground_le with (val := mkSintLog (zext 24 S74.server_hello)) (He2 := He2).
rewrite /phylog_conv /= -(ground_exp_sem (store0 sigma)) sequiv_add_e_sc_pos; last 3 first.
by apply min_u2Z.
by vm_compute.
by rewrite u2ZE.
rewrite (ground_exp_sem (store0 sigma)) ge_cst_e /=.
apply/eqP/mkPhy_irrelevance => /=.
congr (i8<=i32).
apply u2Z_inj.
by rewrite (u2Z_zext 24) 3!u2ZE Z2sE.
rewrite set_state_ssl_ctxt /=; by apply ent_id.
apply hoare_R_or_2. (* success branch *)
(** _ret <- [ 0 ]sc; Return *)
match goal with
|- {{ ?P }} _ ; _ {{ _ }} => apply hoare_seq with P
end.
Hoare_frame (success :: nil) (success :: nil).
clear.
apply hoare_assign_stren.
Ent_R_subst_apply.
rewrite bbang_eq_exx; by apply ent_bang_contract.
(** Return *)
unfold Return.
eapply hoare_stren; last by apply hoare_hoare0, hoare0_skip.
Ent_decompose (17 (* success *) :: nil) (0 (* success*) :: nil); first by apply ent_id.
have the_n_n_SI : '| the_n | = n SI.
rewrite /n /n'; congr ('| _ |).
rewrite /the_n /multi_int.bSum_c /=.
rewrite (_ : BU1 `_ 11 = SI `_ record_sz); last by rewrite /nth' (nth_slices _ _ _ BU1SI).
rewrite (_ : BU1 `_ 12 = SI `_ record_sz.+1); last by rewrite /nth' (nth_slices _ _ _ BU1SI).
rewrite (_ : zext 24 (SI `_ record_sz) `<< 8 = zext 16 (SI `_ record_sz) `|| `( 0 )_ 8); last first.
rewrite concat_shl.
apply u2Z_inj.
rewrite u2Z_castC.
congr (Z<=u (_ `<< 8)).
apply u2Z_inj.
by rewrite (u2Z_zext 24) (u2Z_zext 8) (u2Z_zext 16).
rewrite s2Z_u2Z_pos'; last first.
rewrite u2Z_or (u2Z_zext 16).
split.
apply addZ_ge0; last by apply min_u2Z.
apply Z.mul_nonneg_nonneg => //; by apply min_u2Z.
apply (@ltZ_trans (2 ^^ 8 * 2 ^^ 8 + 2 ^^ 8)%Z) => //.
apply ltZ_add; last by apply max_u2Z.
apply ltZ_pmul2r => //; exact: max_u2Z.
by rewrite u2Z_or (u2Z_zext 16) 2!u2ZE.
Ent_decompose (26 (* Hbu *) :: nil) (0 (* final_bu *) :: nil).
unfold Hbu, final_bu, Final_bu.
Ent_R_ex BU2.
rewrite -[X in X ===> _] coneP.
apply monotony; last by apply ent_id.
apply ent_R_sbang.
by rewrite -the_n_n_SI /u2nat -(addnC '|the_n|) addnC slice_app BU2SI BU2BU1 BU1SI -slice_app.
have SI_sid_BU_51 : SI `_ sid = BU2 `_ 51.
rewrite /nth' (nth_slices _ _ _ (esym BU2SI)) //=.
- apply leq_ltn_trans with (5 + '| 44 |) => //.
rewrite ltn_add2l.
apply/ltP.
apply Zabs_nat_lt.
clear -HN1; lia.
- rewrite leqnn andbC /=.
apply O_lt_Zabs_nat; lia.
have SI_compmeth_BU_helper :
4 < compmeth + nat<=u (BU2 `_ 51) + ciph_len_value_nat < 5 + '| the_n |.
apply/andP; split; first by done.
apply/ltP; apply Nat2Z.inj_lt.
rewrite 2!inj_plus [in X in (_ < X)%Z]inj_plus /u2nat Z_of_nat_Zabs_nat; last by apply min_u2Z.
rewrite Z_of_nat_Zabs_nat; last by clear -HN1; lia.
rewrite -/the_n_plus5 /ciph_len_value_nat Z_of_nat_Zabs_nat; last by lia.
exact: compmeth_max.
Ent_L_dup (Hciph_len :: nil).
Ent_L_dup (Hsess_len :: nil).
Ent_decompose (1 (* k < 128 *) :: 2 (* k * 2 < ciph_len *) :: 3 (* Hssl_cipher_i *) :: 4 (* BU2 `_ (54 + nat<=u BU2 `_ 51 + 2 * k) = `( 0 )_8 *) :: 5 (* !b b[(int) __ssl_ciphers_i \= (int) ([ ssl_cipher_i ]8c)]*) :: 27 (* Hciph_len *) :: 30 (* Hsess_len *) :: 32 (* Hses_cipher *) :: nil) (0 (* final_ses*) :: nil).
apply ent_L_sbang_con => Hk.
set k_ciph := `! \b _ \< __ciph_len.
Ent_L_stren_by ( !!(Z<=nat k * 2 < ciph_len_value_Z)%Z ) (Hciph_len :: k_ciph :: nil).
unfold k_ciph, Hciph_len.
Ent_LR_rewrite_eq_e O (* ciph_len *).
Ent_R_subst_apply; Ent_L_subst_apply.
rewrite -/ciph_len_exp.
Bbang2sbang.
apply ent_sbang_sbang.
move/Zlt_gb. move/(_ erefl erefl).
have k2_bound : (- 2 ^^ 31 <= Z<=nat k * 2 < 2 ^^ 31)%Z.
move/ltP : Hk; move/Nat2Z.inj_lt.
rewrite (_ : Z<=nat 128 = 128%Z) // => Hk.
simpl expZ; lia.
rewrite s2Z_ge_s_cst_e; last by exact k2_bound.
apply; first by lia.
rewrite -/ciph_len_value_Z [_ ^^ _]/=; lia.
apply ent_L_sbang_con => k2.
Ent_L_contract_bbang 0 (* k_ciph *); clear k_ciph.
Ent_L_contract_bbang 4 (* Hciph_len *).
unfold final_ses, Final_ses.
Ent_R_ex i.
Ent_R_ex k.
Ent_R_ex (CI `32_ i).
have Htmp : CI `32_ i = CI `32_ i := erefl. Ent_R_remove_sbang 1 Htmp; clear Htmp.
unfold Hssl_cipher_i.
Ent_LR_rewrite_eq_e 0 (* ssl_ciphers_i *).
Ent_L_subst_apply.
apply ent_L_sbang_con => Hp0.
Ent_L_subst_apply.
Ent_L_subst_apply.
Ent_LR_subst_inde.
Ent_R_subst_con_distr.
Ent_R_subst_apply.
Ent_LR_subst_inde.
rewrite /sess_len Z_of_nat_Zabs_nat; last by apply min_u2Z.
rewrite SI_sid_BU_51 /Hses_cipher (_ : zext 24 (BU2 `_ 51) = `( Z<=u (BU2 `_ 51) )_32 ); last first.
apply u2Z_inj.
rewrite (u2Z_zext 24) Z2uK //=.
split; first by apply min_u2Z.
apply (@ltZ_trans (2 ^^ 8)%Z) => //; exact: max_u2Z.
Ent_L_conA. (* TODO: check *)
apply monotony_R.
Bbang2sbang.
rewrite /ssl_cipher_i gb_eq_e ge_cast_sint_cst_8c ge_cast_sint_cst_sint.
apply ent_L_sbang_con.
move/eqP/phy_of_si32_inj => CI_i_BU.
have SI_compmeth_BU :
SI `_ (compmeth + nat<=u (BU2 `_ 51) + 2 * k + 1) = BU2 `_ (54 + nat<=u BU2 `_ 51 + k * 2 + 1).
rewrite /nth' (nth_slices _ _ _ (esym BU2SI)) //.
- by rewrite -addnA subnKC mulnC.
- apply/andP; split; first by done.
eapply leq_ltn_trans; last by apply SI_compmeth_BU_helper.
rewrite -addnA leq_add2l -ltnS addn1 ltnS mulnC /ciph_len_value_nat.
rewrite Z_of_nat_lt Z_of_nat_Zabs_nat; last by lia.
rewrite inj_mult; exact/ltZP.
- rewrite leqnn andbC /=.
apply O_lt_Zabs_nat; lia.
rewrite SI_compmeth_BU CI_i_BU.
have SI_compmeth_BU2 : SI `_ (compmeth + nat<=u (BU2 `_ 51) + 2 * k) =
BU2 `_ (54 + nat<=u BU2 `_ 51 + 2 * k).
rewrite /nth' (nth_slices _ _ _ (esym BU2SI)) //.
- apply/andP; split; first by done.
eapply leq_ltn_trans; last by apply SI_compmeth_BU_helper.
rewrite leq_add // /ciph_len_value_nat Z_of_nat_le Z_of_nat_Zabs_nat; last by lia.
rewrite mulnC inj_mult; exact/leZP/ltZW.
- rewrite leqnn andbC /=.
apply O_lt_Zabs_nat; lia.
rewrite SI_compmeth_BU2 Hp0 zext_Z2u // -zext_concat.
by apply ent_L_bbang, ent_R_sbang.
Ent_decompose (13 (* final_rb *) :: nil) (0 :: nil); first by apply ent_id.
Ent_decompose (13 (* final_id *) :: nil) (0 :: nil); first by apply ent_id.
have nat_the_n : 0 < '| the_n |.
clear -HN1.
rewrite (_ : 0 = '| 0 |) //.
apply/ltP; apply Zabs2Nat.inj_lt; lia.
have SI_min_req_BU_18 : SI `_ min_req = BU2 `_ 18.
rewrite /nth' (nth_slices _ _ _ (esym BU2SI)) //.
- apply (@leq_ltn_trans (5 + '| 44 |)) => //.
rewrite ltn_add2l.
apply/ltP; apply Zabs2Nat.inj_lt; lia.
- apply/andP; by rewrite leqnn.
have SI_maj_req : SI `_ maj_req = BU2 `_ 17.
rewrite /nth' (nth_slices _ _ _ (esym BU2SI)) //.
- apply (@leq_ltn_trans (5 + '| 44 |)) => //.
rewrite ltn_add2l.
apply/ltP/Zabs2Nat.inj_lt; lia.
- apply/andP; by rewrite leqnn.
have SI_maj_ver_BU : SI `_ maj_ver = BU1 `_ 9.
by rewrite /nth' (nth_slices _ _ _ (esym BU1SI)).
have SI_min_ver_BU : SI `_ min_ver = BU1 `_ 10.
by rewrite /nth' (nth_slices _ _ _ (esym BU1SI)).
Ent_decompose (17 (* Hssl_server *) :: nil) (0 (* final_ssl_context *):: nil).
unfold Hssl_server, final_ssl_context.
have -> : si32<=phy ((phyint) SSL_MAJOR_VERSION_3) = zext 24 (SI `_ maj_ver).
by rewrite SI_maj_ver_BU BU1_9 /safe_cast_phy /si32_of_phy /= i8_of_i32Ko.
set ca := zext _ (if _ then _ else _).
have -> : minver_u = ca.
rewrite /minver_u /minver_exp /ca SI_min_req_BU_18.
by rewrite si32_of_phy_safe_cast_phy_uchar i8_of_phy_ifte !phy_of_ui8K Z2uK.
rewrite SI_maj_req -SI_min_req_BU_18; by apply ent_id.
unfold PolarSSLClientHellop.
have -> : SI `_ 0 = S621.handshake.
have -> : SI `_ 0 = BU1 `_ 8 by rewrite /nth' (nth_slices _ _ _ (esym BU1SI)).
by case: BU1_8 => _ ->.
rewrite -sbang_con.
Ent_R_flat; apply ent_R_sbang_con => //.
have -> : SI `_ ('| S74.Handshake_hd + 1 |) = BU2 `_ 13.
rewrite /nth' (nth_slices _ _ _ (esym BU2SI)) //.
apply/andP; split; first by done.
by rewrite leqnn.
rewrite BU2_13 // -sbang_con.
Ent_R_flat; apply ent_R_sbang_con => //.
have SI_handshake_sz : SI `_ handshake_sz = BU2 `_ 14.
rewrite /nth' (nth_slices _ _ _ (esym BU2SI)) //.
- apply/andP; split; first by done.
rewrite (_ : 5 = '| Z<=nat 5 |) // -plusE -Zabs_nat_Zplus //; last by lia.
rewrite (_ : handshake_sz = '| 6 |) //.
apply/ltP; apply Zabs_nat_lt => //; lia.
- by rewrite leqnn andbC.
have -> : SI `_ maj_ver = S621.SSLv30_maj by rewrite SI_maj_ver_BU BU1_9.
rewrite -sbang_con.
Ent_R_flat; apply ent_R_sbang_con => //.
have -> : SI `_ maj_req = S621.SSLv30_maj by rewrite SI_maj_req BU2_17.
rewrite -sbang_con.
Ent_R_flat; apply ent_R_sbang_con => //.
rewrite -sbang_con.
have -> : !!( S621.length_maxp (n' SI) ) <==> emp.
rewrite /n' /S621.length_maxp /= /S41.bytes2nat /= /MachineIntByte_m.bytes2nat /= /multi_int.bSum_c /=.
have -> : ('| (u2Zc (SI `_ record_sz) * 256 + u2Zc (SI `_ record_sz.+1)) | = '| the_n |)%Z.
by rewrite the_n_n_SI /n /= /n' /= /multi_int.bSum_c.
have -> : '| the_n | <= 2 ^ 14.
rewrite (_ : 2 ^ 14 = '| (2 ^^ 14) |) //.
apply/leP; apply Zabs2Nat.inj_le => //.
lia.
simpl expZ; lia.
by apply sbang_emp.
have SI_Shandshake_sz : SI `_ handshake_sz.+1 = BU2 `_ 15.
rewrite /nth' (nth_slices _ _ _ (esym BU2SI)) //.
- apply/andP; split; first by done.
rewrite (_ : 5 = '| (Z<=nat 5) |) //.
rewrite -plusE -Zabs_nat_Zplus //.
rewrite (_ : handshake_sz.+1 = '| 7 |) //.
apply/ltP; apply Zabs_nat_lt; lia.
lia.
- by rewrite leqnn andbC.
have SI_SShandshake_sz : SI `_ handshake_sz.+2 = BU2 `_ 16.
rewrite /nth' (nth_slices _ _ _ (esym BU2SI)) //.
- apply/andP; split; first by done.
rewrite (_ : 5 = '| Z<=nat 5 |) //.
rewrite -plusE -Zabs_nat_Zplus //.
rewrite (_ : handshake_sz.+2 = '| 8 |) //.
apply/ltP; apply Zabs_nat_lt; lia.
lia.
- by rewrite leqnn andbC.
have SI_csuites_BU : SI `_ (csuites + nat<=u (BU2 `_ 51)) = BU2 `_ (52 + nat<=u BU2 `_ 51).
rewrite /nth' (nth_slices _ _ _ (esym BU2SI)) //.
- rewrite (_ : csuites = 44) //.
apply/andP; split; first by done.
apply/ltP; apply Nat2Z.inj_lt.
rewrite 2!inj_plus.
rewrite {1}/u2nat Z_of_nat_Zabs_nat; last by apply min_u2Z.
rewrite Z_of_nat_Zabs_nat; last by lia.
apply: ltZ_trans; last by apply csuites_max.
rewrite (_ : Z_of_nat 44 = 44%Z) // (_ : Z<=nat csuites.+1 = 45%Z) //.
rewrite -/(BU2 `_ 51); lia.
- rewrite leqnn andbC /=.
apply O_lt_Zabs_nat; lia.
have SI_Scsuites_BU : SI `_ (csuites.+1 + nat<=u BU2 `_ 51) = BU2 `_ (53 + nat<=u BU2 `_ 51).
rewrite /nth' (nth_slices _ _ _ (esym BU2SI)) //.
- rewrite (_ : csuites = 44) //.
apply/andP; split; first by done.
apply/ltP; apply Nat2Z.inj_lt.
rewrite 2!inj_plus {1}/u2nat Z_of_nat_Zabs_nat; last by apply min_u2Z.
rewrite Z_of_nat_Zabs_nat; last by lia.
apply: leZ_ltZ_trans; last by apply csuites_max.
exact: leZZ.
- rewrite leqnn andbC /=.
apply O_lt_Zabs_nat; lia.
have SI_ciph_len_value_Z : Z<=u (SI `_ (csuites + nat<=u (BU2 `_ 51)) `||
SI `_ (csuites.+1 + nat<=u (BU2 `_ 51))) = ciph_len_value_Z.
unfold ciph_len_value_Z, ciph_len_value, ciph_len_exp, Shigh, Slow.
rewrite SI_csuites_BU SI_Scsuites_BU si32_of_phy_gb_or_e ge_cast_sint_cst_8c.
rewrite [in X in _ = Z<=s ( _ `|` X) ] phy_of_si32K sint_shl_e_to_i32_ge s2Z_u2Z_pos; last first.
apply/leZP.
rewrite le0_or //; last by rewrite (s2Z_zext 24) //; apply/leZP/min_u2Z.
apply/leZP.
rewrite zext_concat concatA (@s2Z_castA 16 8 8).
by apply le0concat.
by rewrite (@u2Z_or 24) u2Z_concat (u2Z_zext 16).
have SI_compmeth_BU : SI `_ (compmeth + nat<=u (BU2 `_ 51) + ciph_len_value_nat) =
BU2 `_ (54 + nat<=u BU2 `_ 51 + ciph_len_value_nat).
rewrite /nth' (@nth_slices _ (8 + 5) '| the_n | SI BU2 5 '| the_n |) //.
rewrite leqnn andbC /=.
apply O_lt_Zabs_nat; lia.
have SI_comp_len_value_Z :
Z<=u (SI `_ (compmeth + nat<=u (BU2 `_ 51) + ciph_len_value_nat)) = Z<=s (si32<=phy comp_len_value).
rewrite /comp_len_value /comp_len_exp SI_compmeth_BU ge_cast_sint_cst_8c.
rewrite phy_of_si32K s2Z_u2Z_pos; last by rewrite (s2Z_zext 24) //; apply min_u2Z.
by rewrite (u2Z_zext 24).
pose comp_len_value_Z := Z<=s (si32<=phy comp_len_value).
pose Hcomp_len_value_Z := !!(1 <= comp_len_value_Z <= 16)%Z.
set Hcomp_len2 := `! \~b \b __comp_len \< [ 1 ]sc \|| __comp_len \> [ 16 ]sc.
Ent_L_stren_by Hcomp_len_value_Z (Hcomp_len :: Hcomp_len2 :: nil).
rewrite /Hcomp_len.
Ent_LR_rewrite_eq_e 0.
Ent_L_subst_apply; Ent_R_subst_apply.
rewrite -bbang_bneg_or -CgeqNlt sequiv_ge_sym sequiv_gt_sym -CgeqNlt sequiv_ge_sym.
Bbang2sbang.
rewrite sbang_con.
apply ent_sbang_sbang.
case.
move/Zle_gb. move/(_ erefl erefl).
rewrite i32_ge_s_cst_e Z2sK // -/comp_len_value_Z => ?.
move/Zle_gb. move/(_ erefl erefl).
by rewrite i32_ge_s_cst_e Z2sK.
rewrite /Hcomp_len_value_Z {Hcomp_len_value_Z}.
apply ent_L_sbang_con => Hcomp_len_value_Z.
pose Hextensions' :=
!!(Z<=nat compmeth.+1 + Z<=u BU2 `_ 51 + ciph_len_value_Z + Z<=s (si32<=phy (comp_len_value)) =
the_n_plus5)%Z.
Ent_L_stren_by Hextensions' (Hsess_len :: Hn_old :: Hciph_len :: Hcomp_len :: Hextensions :: nil).
unfold Hcomp_len, Hciph_len, Hsess_len, Hn0, Hn_old, Hn, Hextensions.
Ent_LR_rewrite_eq_e 0 (* comp_len *).
Ent_R_subst_apply; do 4 Ent_L_subst_apply.
rewrite -/Hextensions'.
Ent_LR_rewrite_eq_e 0 (* n_old *).
Ent_R_subst_apply; do 3 Ent_L_subst_apply.
rewrite -/Hextensions'.
Ent_LR_rewrite_eq_e 0 (* ciph_len *).
do 3 Ent_LR_subst_apply.
rewrite -/Hextensions'.
Ent_LR_rewrite_eq_e 0 (* sess_len *).
do 2 Ent_LR_subst_apply.
rewrite -/Shigh -/Slow -/ciph_len_exp bneg_neq_eq.
Bbang2sbang.
apply ent_sbang_sbang.
rewrite gb_eq_e.
move/eqP.
rewrite -[in X in _ = X -> _](ground_exp_sem (store0 sigma)).
rewrite -> (@sequiv_add_e_sc_pos _ sigma 5 the_n); last 3 first.
done.
lia.
simpl expZ; lia.
rewrite -/the_n_plus5.
rewrite [in X in _ = X -> _](ground_exp_sem (store0 sigma)).
set lhs := ground_exp _ _.
set rhs := ground_exp _ _.
move=> H.
have {H} : Z<=s (si32<=phy lhs) = Z<=s (si32<=phy rhs) by rewrite H.
rewrite /lhs /rhs {lhs rhs} si32_of_phy_gb_add_e si32_of_phy_gb_add_e.
rewrite si32_of_phy_gb_add_e ge_cast_sint_cst_8c (phy_of_si32K (zext 24 BU2`_51)).
rewrite i32_ge_s_cst_e i32_ge_s_cst_e Z2sK; last by unfold the_n_plus5; simpl expZ; lia.
move=> <-.
rewrite -/ciph_len_value /comp_len_value ge_cst_e -/comp_len_value (_ : Z<=nat _ = 47%Z) //.
move: (min_u2Z (BU2 `_ 51)) (max_u2Z (BU2 `_ 51)) => BU51max.
rewrite s2Z_add; last first.
rewrite -/comp_len_value_Z s2Z_add; last first.
rewrite -/ciph_len_value_Z s2Z_add; rewrite Z2sK // (s2Z_zext 24) //; simpl expZ; lia.
rewrite -/ciph_len_value_Z s2Z_add; rewrite Z2sK // (s2Z_zext 24) //; simpl expZ; lia.
rewrite s2Z_add; last first.
rewrite -/ciph_len_value_Z s2Z_add; rewrite Z2sK // (s2Z_zext 24) //; simpl expZ; lia.
rewrite -/ciph_len_value_Z s2Z_add; last by rewrite Z2sK // (s2Z_zext 24) //; simpl expZ; lia.
by rewrite Z2sK // (s2Z_zext 24).
apply ent_L_sbang_con => Hextensions''.
clear Hextensions'.
Ent_L_contract_bbang 16 (* Hextensions *); clear Hextensions.
rewrite coneP.
Ent_L_dup (Hn0 :: Hn :: Hm :: nil).
rewrite -sbang_con.
Ent_decompose (13 (* Hn0 *) :: 10 (* Hn *) :: 7 (* Hm *) :: nil) (0 (* length_maxp *) :: nil).
unfold Hn0, Hn, Hm.
Ent_LR_rewrite_eq_e 0 (* n0 *).
do 3 Ent_LR_subst_apply.
Ent_LR_rewrite_eq_e 0 (* n *).
Ent_R_subst_apply; Ent_L_subst_apply.
rewrite -the_n_n_SI in_left2_n5 /the_n_plus5 (_ : `( 5 + the_n )_32 = Z2s 32 (5 + the_n)); last first.
apply s2Z_inj.
rewrite s2Z_u2Z_pos'; last first.
split; first by apply min_u2Z.
rewrite Z2uK; last by simpl expZ; lia.
simpl expZ; lia.
rewrite Z2uK; last by simpl expZ; lia.
rewrite Z2sK //; by simpl expZ; lia.
Bbang2sbang.
rewrite -(ground_bexp_sem (store0 sigma)).
rewrite (@sequiv_sub_e_sc _ sigma); last 3 first.
simpl expZ; lia.
done.
simpl expZ; lia.
rewrite (_ : 5 + the_n - 5 = the_n)%Z; last by lia.
rewrite (ground_bexp_sem (store0 sigma)) gb_eq_e.
apply ent_sbang_sbang.
move/eqP.
set lhs := [ _ ]ge.
set rhs := [ _ ]ge.
move=> abs; have {abs} : si32<=phy lhs = si32<=phy rhs. by rewrite abs.
rewrite /lhs /rhs {lhs rhs} i32_ge_s_cst_e.
set lhs := Z2s _ _.
set rhs := si32<=phy _.
move=> abs; have {abs} : s2Z lhs = s2Z rhs. by rewrite abs.
rewrite Z2sK; last by simpl expZ; lia.
move=> K; rewrite K; rewrite /rhs /lhs.
rewrite si32_of_phy_gb_add_e s2Z_add; last first.
rewrite s2Z_ge_s_cst_e // si32_of_phy_gb_or_e sint_shl_e_to_i32_ge -SI_Shandshake_sz -SI_SShandshake_sz.
have Htmp2 : (0 <= Z<=u SI `_ handshake_sz.+1 * 2 ^^ 8 +
Z<=u SI `_ handshake_sz.+2 < 2 ^^ predn 32)%Z.
split.
apply/addZ_ge0; last by apply min_u2Z.
apply/mulZ_ge0 => //; by apply min_u2Z.
apply (@ltZ_trans (2 ^^ 8 * 2 ^^ 8 + 2 ^^ 8)%Z) => //.
apply ltZ_add; last by apply max_u2Z.
by apply ltZ_pmul2r => //; apply max_u2Z.
rewrite s2Z_u2Z_pos'; last first.
rewrite ge_cast_sint_cst_8c phy_of_si32K (u2Z_or (zext 16 SI `_ handshake_sz.+1)) u2Z_zext; exact Htmp2.
rewrite ge_cast_sint_cst_8c phy_of_si32K (u2Z_or (zext 16 SI `_ handshake_sz.+1)) u2Z_zext.
split.
apply (@leZ_trans Z0) => //.
apply addZ_ge0 => //.
apply addZ_ge0; last by apply min_u2Z.
apply mulZ_ge0 => //; by apply min_u2Z.
apply (@ltZ_trans (4 + (2 ^^ 8 * 2 ^^ 8 + 2 ^^ 8))%Z) => //.
apply leZ_lt_add => //.
apply ltZ_add; last exact: max_u2Z.
by apply ltZ_pmul2r => //; apply max_u2Z.
rewrite s2Z_ge_s_cst_e // si32_of_phy_gb_or_e sint_shl_e_to_i32_ge.
set tmp2 := Z<=s _.
have Htmp2 : (0 <= tmp2)%Z.
apply/leZP.
apply le0_or.
rewrite zext_concat.
apply/leZP.
rewrite concatA (@s2Z_castA 16 8 8).
by apply le0concat.
rewrite ge_cast_sint_cst_8c phy_of_si32K (s2Z_zext 24) //.
exact/leZP/min_u2Z.
rewrite Zabs2Nat.inj_add; last 2 first.
done.
exact Htmp2.
rewrite (_ : Pos.to_nat 4 = '| 4 |) // plusE.
apply leq_add; first by apply leqnn.
unfold m.
apply/leP/Zabs2Nat.inj_le.
rewrite multi_int.bSum_c_Sum; by apply multi_int.min_lSum.
exact Htmp2.
rewrite /m' /= /multi_int.bSum_c /= -!u2ZE SI_handshake_sz BU2_14 /tmp2 Z2uK //=.
apply Zeq_le.
rewrite s2Z_u2Z_pos; last by rewrite -/tmp2; exact Htmp2.
by rewrite ge_cast_sint_cst_8c phy_of_si32K (@u2Z_or 24) SI_Shandshake_sz (u2Z_zext 16) SI_SShandshake_sz.
rewrite -sbang_con.
have -> : !!( (Z<=nat (sess_len SI) <= tls_max S7412.SessionID)%Z ) <==> emp.
rewrite /sess_len SI_sid_BU_51 Z_of_nat_Zabs_nat; last by apply min_u2Z.
by apply sbang_emp.
Ent_R_flat.
rewrite -sbang_con.
have -> : !!( (tls_min S7412.cipher_suites_type <= Z<=nat (ciph_len SI) <=
tls_max S7412.cipher_suites_type)%Z) <==> emp.
(* csuites = 44 *)
rewrite /ciph_len /sess_len SI_sid_BU_51 Z_of_nat_Zabs_nat; last by apply min_u2Z.
rewrite SI_ciph_len_value_Z.
apply sbang_emp.
simpl (tls_min S7412.cipher_suites_type); simpl (tls_max S7412.cipher_suites_type); lia.
Ent_R_flat.
rewrite -sbang_con.
Ent_decompose (18 (* Hciph_len *) :: 19 (* Hciph_len_even *) :: nil) (0 (* even (ciph_len SI) *) :: nil).
unfold Hciph_len, Hciph_len_even.
Ent_L_ex k'.
Ent_LR_rewrite_eq_e 1 (* ciph_len *) . (* CHECK: fait un simpl indesirable *)
do 2 Ent_L_subst_apply.
apply ent_L_sbang_con2 => Hk'.
Ent_R_subst_apply.
Bbang2sbang.
apply ent_sbang_sbang.
rewrite gb_eq_e.
move/eqP => X.
rewrite /ciph_len /sess_len SI_sid_BU_51 SI_csuites_BU SI_Scsuites_BU.
set lhs := ground_exp (_ \| _) _ in X.
suff : ~~ odd (nat<=u (si32<=phy lhs)).
rewrite /lhs si32_of_phy_gb_or_e sint_shl_e_to_i32_ge /u2nat ge_cast_sint_cst_8c.
by rewrite phy_of_si32K (@u2Z_or 24) u2Z_concat (u2Z_zext 16).
rewrite -X i32_ge_s_cst_e /u2nat -s2Z_u2Z_pos; last first.
rewrite Z2sK; [lia | simpl expZ; lia].
rewrite Z2sK; last by simpl expZ; lia.
by rewrite Zabs_nat_mult muln2 odd_double.
have comp_len_SI : comp_len SI = nat<=s (si32<=phy comp_len_value).
rewrite /s2nat -SI_comp_len_value_Z /comp_len /sess_len SI_sid_BU_51.
rewrite /ciph_len /ciph_len_value_nat.
by rewrite -SI_ciph_len_value_Z /sess_len SI_sid_BU_51.
rewrite -sbang_con -conA.
apply ent_R_sbang_con.
rewrite comp_len_SI Z_of_nat_Zabs_nat; rewrite -/comp_len_value_Z; lia.
rewrite -sbang_con -[in X in _ ===> X] conA.
apply ent_R_sbang_con.
move: HSI_new; clear -the_n_n_SI HN1.
rewrite /the_n_plus5 Zabs2Nat.inj_add //; last by lia.
by rewrite the_n_n_SI addnC.
Ent_decompose (7 (* Hm *) :: 9 (* Hn*) :: 11 (* Hn0 *) :: nil) (0 :: nil).
unfold Hm, Hn, Hn0, S7412.client_extensions_present, m, sess_len, ciph_len, comp_len, sess_len.
rewrite SI_sid_BU_51 {2}/u2nat SI_ciph_len_value_Z.
rewrite /ciph_len /sess_len SI_sid_BU_51 {4}/u2nat SI_ciph_len_value_Z.
fold ciph_len_value_nat.
rewrite {2}/u2nat SI_comp_len_value_Z -/comp_len_value_Z /S7412.ClientHello_sz.
rewrite [fixed_sz S7412.cipher_suites_type]/= [fixed_sz S7412.compression_methods_type]/=.
rewrite /S7412.Hello_sz [fixed_sz S621.ProtocolVersion]/=.
rewrite [fixed_sz S7412.Random]/= [fixed_sz S7412.SessionID]/=.
rewrite {1}/u2nat Z_of_nat_Zabs_nat; last by apply min_u2Z.
rewrite /multi_int.bSum_c [foldl _ _ _]/= SI_handshake_sz SI_Shandshake_sz SI_SShandshake_sz.
rewrite -!u2ZE BU2_14 Z2uK // mul0Z add0Z.
set lock := !!( _ ).
Ent_LR_rewrite_eq_e 0.
do 3 Ent_LR_subst_apply.
rewrite -/lock.
Ent_LR_rewrite_eq_e 1.
do 2 Ent_LR_subst_apply.
rewrite -/lock in_left2_n5 /lock.
Bbang2sbang.
apply ent_sbang_sbang.
rewrite gb_eq_e.
move/eqP.
rewrite (_ : [_ \- _]ge = [ Z2u 32 the_n ]p); last first.
rewrite -(ground_exp_sem (store0 sigma)).
rewrite (_ : [ `( the_n_plus5 )_32 ]pc = [ the_n_plus5 ]sc :> exp _ (ityp: sint)); last first.
congr ([ _ ]pc).
rewrite Z2s_Z2u_k //.
simpl expZ; unfold the_n_plus5; lia.
rewrite sequiv_sub_e_sc; last 3 first.
simpl expZ; unfold the_n_plus5; lia.
done.
simpl expZ; unfold the_n_plus5; lia.
rewrite /the_n_plus5 (_ : 5 + the_n - 5 = the_n)%Z; last by ring.
rewrite (ground_exp_sem (store0 sigma)).
apply si32_of_phy_inj.
rewrite i32_ge_s_cst_e phy_of_si32K Z2s_Z2u_k //.
simpl expZ; unfold the_n_plus5; lia.
set lhs := [ _ ]ge.
set rhs := [ `( the_n )_32 ]p.
move=> abs; have {abs} : si32<=phy lhs = si32<=phy rhs by rewrite abs.
rewrite {}/lhs {}/rhs (phy_of_si32K (`( the_n )_32)) si32_of_phy_gb_add_e.
set lhs := _ `+ _.
move=> abs; have {abs} : u2Z lhs = the_n.
rewrite abs Z2uK //.
simpl expZ; lia.
rewrite {}/lhs.
set tmp := [ _ \| _ ]ge.
have tmptmp : u2Z (si32<=phy tmp) = (Z<=u BU2`_15 * 256 + Z<=u BU2`_16)%Z.
rewrite {} /tmp si32_of_phy_gb_or_e sint_shl_e_to_i32_ge.
by rewrite ge_cast_sint_cst_8c phy_of_si32K (@u2Z_or 24) (u2Z_zext 16).
rewrite [in X in _ -> _ = X]Z_of_nat_Zabs_nat; last first.
apply addZ_ge0; last exact: min_u2Z.
apply mulZ_ge0 => //; exact: min_u2Z.
rewrite -tmptmp i32_ge_s_cst_e.
rewrite u2Z_add; last first.
rewrite Z2s_Z2u_k // Z2uK // tmptmp.
apply (@ltZ_trans (4 + 2 ^^ 16 + 2 ^^ 8)%Z) => //.
rewrite -addZA ltZ_add2l.
apply ltZ_add; last exact: max_u2Z.
rewrite (_ : 2 ^^ 16 = 2 ^^ 8 * 2 ^^ 8)%Z //.
apply ltZ_pmul2r => //; exact/max_u2Z.
rewrite Z2s_Z2u_k // Z2uK //.
move=> Htmptmp.
rewrite /the_n_plus5 -Htmptmp (_ : Z<=nat _ = 47%Z) // in Hextensions''.
rewrite (_ : Z<=nat 1 = 1%Z) // (_ : Z<=nat 2 = 2%Z) // Z_of_nat_Zabs_nat; last by lia.
rewrite -/comp_len_value_Z in Hextensions''.
clear -Hextensions'' Hciph_len_bound_Z.
rewrite (_ : Z<=nat ciph_len_value_nat = ciph_len_value_Z); last first.
rewrite /ciph_len_value_nat Z_of_nat_Zabs_nat //; lia.
lia.
Ent_L_contract_bbang 0.
apply ent_L_sbang_con => i_CI.
Ent_L_contract_bbang 0.
Ent_L_contract_bbang 0.
Ent_L_contract_bbang 0.
Ent_L_contract_bbang 0.
Ent_L_contract_bbang 0.
Ent_L_contract_bbang 0.
Ent_L_contract_bbang 0.
Ent_L_contract_bbang 0.
Ent_L_contract_bbang 0.
Ent_L_contract_bbang 0.
Ent_L_contract_bbang 0.
Ent_L_contract_bbang 0.
Ent_L_contract_bbang 1.
Ent_L_contract_bbang 1.
by apply ent_id.
Qed.
End POLAR_parse_client_hello_triple.
|
{"author": "affeldt-aist", "repo": "seplog", "sha": "b08516d34f5dedd0aafbe77d8ef270fa838e8f85", "save_path": "github-repos/coq/affeldt-aist-seplog", "path": "github-repos/coq/affeldt-aist-seplog/seplog-b08516d34f5dedd0aafbe77d8ef270fa838e8f85/seplogC/POLAR_parse_client_hello_triple4.v"}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 10 15:02:51 2020
@author: parsotak
"""
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset
from mpl_toolkits.basemap import Basemap
#Define input file
infile = '/wx/storage/halpea17/wx272/20170909_erai.nc'
#Read in the file
f = Dataset(infile, 'r')
#read in mslp data (3D : time, lat, lon.)
mslp = f['PRMSL_GDS0_SFC'][:]
#3-D array
mslp00 = mslp[0, :, :]
print(mslp00.shape)
#Read in lat. and long. and printing it
lats = f['g0_lat_1'][:]
lons = f['g0_lon_2'][:]
print(lats.shape)
print(lons.shape)
#from 1D to 2D to plot to a map
lon2d, lat2d = np.meshgrid(lons, lats)
#Define a figure
fig = plt.figure(figsize = (12,8))
ax = fig.add_axes([0.1,0.1,0.8,0.8])
#Define basemap
m = Basemap(llcrnrlon = 240., llcrnrlat = 5., urcrnrlon = 350., urcrnrlat = 55., resolution = 'l', projection = 'merc', ax = ax)
xi, yi = m(lon2d, lat2d)
m.drawcoastlines()
m.drawstates()
m.drawcountries()
m.drawparallels(np.arange(-80., 81, 10.), labels = [1, 0, 0, 0], fontsize = 12)
m.drawmeridians(np.arange(0., 359., 20.), labels = [0, 0 ,0, 1], fontsize = 14)
#Define range of MSLP values to be printed
clevs = np.arange(960, 1041, 4)
#Add contour tick marks
cticks = np.arange(960, 1041, 8)
#Define MSLP values as a color filler
mslpfill = m.contourf(xi, yi, mslp00, clevs, cmap = 'rainbow')
#Add a color bar
cbar = plt.colorbar(mslpfill, orientation = 'horizontal', pad = 0.05, shrink = 0.75, ax = ax, ticks = cticks)
#increase size of labels
cbar.ax.tick_params(labelsize = 14)
cbar.set_label('MSLP (mb)', fontsize = 14)
mslplines = m.contour(xi, yi, mslp00, clevs, colors = 'k')
mslplab = plt.clabel(mslplines, clevs, inline = True, fontsize = 12, fmt = '%1.0f')
# add a title
ax.set_title('Test plot of MSLP (mb) from netCDF file')
plt.show()
|
{"hexsha": "be8dcdd5e6052a8c94cb931f9827b3e7c2fdc5e3", "size": 1874, "ext": "py", "lang": "Python", "max_stars_repo_path": "example_grib_2_10_20.py", "max_stars_repo_name": "Kyl67899/python-labs", "max_stars_repo_head_hexsha": "aafc6fc94837ee43c9ef2e1b103d86f80dfc9814", "max_stars_repo_licenses": ["FSFAP"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "example_grib_2_10_20.py", "max_issues_repo_name": "Kyl67899/python-labs", "max_issues_repo_head_hexsha": "aafc6fc94837ee43c9ef2e1b103d86f80dfc9814", "max_issues_repo_licenses": ["FSFAP"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example_grib_2_10_20.py", "max_forks_repo_name": "Kyl67899/python-labs", "max_forks_repo_head_hexsha": "aafc6fc94837ee43c9ef2e1b103d86f80dfc9814", "max_forks_repo_licenses": ["FSFAP"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.1224489796, "max_line_length": 128, "alphanum_fraction": 0.6686232657, "include": true, "reason": "import numpy", "num_tokens": 685}
|
# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the (private) TabuSearch python interface."""
import time
import unittest
from concurrent.futures import ThreadPoolExecutor, wait
import dimod
import tabu
import numpy as np
try:
perf_counter = time.perf_counter
except AttributeError: # pragma: no cover
# python 2
perf_counter = time.time
class RunTimeAssertionMixin(object):
class assertRuntimeWithin(object):
def __init__(self, low, high):
"""Min/max runtime in milliseconds."""
self.limits = (low, high)
def __enter__(self):
self.tick = perf_counter()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.dt = (perf_counter() - self.tick) * 1000.0
self.test()
def test(self):
low, high = self.limits
if low is not None and self.dt < low:
raise AssertionError("Min runtime unreached: %g ms < %g ms" % (self.dt, low))
if high is not None and self.dt > high:
raise AssertionError("Max runtime exceeded: %g ms > %g ms" % (self.dt, high))
class assertMinRuntime(assertRuntimeWithin):
def __init__(self, t):
"""Min runtime in milliseconds."""
self.limits = (t, None)
class assertMaxRuntime(assertRuntimeWithin):
def __init__(self, t):
"""Max runtime in milliseconds."""
self.limits = (None, t)
class TestTabuSearch(unittest.TestCase, RunTimeAssertionMixin):
def test_trivial(self):
qubo = [[1.0]]
init = [1]
tenure = len(init) - 1
timeout = 1
restarts = 100
search = tabu.TabuSearch(qubo, init, tenure, timeout, restarts)
solution = list(search.bestSolution())
energy = search.bestEnergy()
self.assertEqual(solution, [0])
self.assertEqual(energy, 0.0)
def test_correctness(self):
qubo = [[-1.2, 1.1], [1.1, -1.2]]
init = [1, 1]
tenure = len(init) - 1
timeout = 20
restarts = 100
search = tabu.TabuSearch(qubo, init, tenure, timeout, restarts)
solution = list(search.bestSolution())
energy = search.bestEnergy()
self.assertEqual(solution, [0, 1])
self.assertEqual(energy, -1.2)
def test_concurrency(self):
def search(timeout, restarts=int(1e6)):
return tabu.TabuSearch([[1.0]], [1], 0, timeout, restarts).bestEnergy()
with ThreadPoolExecutor(max_workers=3) as executor:
# ~ 500 ms (but be gracious on slow CI VMs)
with self.assertRuntimeWithin(400, 1600):
wait([executor.submit(search, timeout=500) for _ in range(3)])
# ~ 1000 ms (but be gracious on slow CI VMs)
with self.assertRuntimeWithin(900, 2100):
wait([executor.submit(search, timeout=500) for _ in range(4)])
def test_float(self):
n = 20
init = [1] * n
tenure = len(init) - 1
timeout = 20
restarts = 100
bqm = dimod.generators.random.uniform(n, 'BINARY', low=-100, high=100, seed=123)
Q, _ = tabu.TabuSampler._bqm_to_tabu_qubo(bqm)
search = tabu.TabuSearch(Q, init, tenure, timeout, restarts)
self.assertAlmostEqual(search.bestEnergy(), -1465.9867898)
bqm = dimod.generators.random.uniform(n, 'BINARY', low=-1, high=1, seed=123)
Q, _ = tabu.TabuSampler._bqm_to_tabu_qubo(bqm)
search = tabu.TabuSearch(Q, init, tenure, timeout, restarts)
self.assertAlmostEqual(search.bestEnergy(), -14.65986790)
def test_exceptions(self):
qubo = [[-1.2, 1.1], [1.1, -1.2]]
timeout = 10
restarts = 100
# Wrong length for init_solution
with self.assertRaises(RuntimeError):
init = [1, 1, 1]
tenure = len(init) - 1
search = tabu.TabuSearch(qubo, init, tenure, timeout, restarts)
# Tenure out of bounds
with self.assertRaises(RuntimeError):
init = [1, 1]
tenure = 3
search = tabu.TabuSearch(qubo, init, tenure, timeout, restarts)
|
{"hexsha": "b305cde76e4f814cd51622fd34eaf9ee146ba063", "size": 4739, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_search.py", "max_stars_repo_name": "zeta1999/dwave-tabu", "max_stars_repo_head_hexsha": "2a6393c4e7444c0c60d9718840160c83eadb7c31", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_search.py", "max_issues_repo_name": "zeta1999/dwave-tabu", "max_issues_repo_head_hexsha": "2a6393c4e7444c0c60d9718840160c83eadb7c31", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_search.py", "max_forks_repo_name": "zeta1999/dwave-tabu", "max_forks_repo_head_hexsha": "2a6393c4e7444c0c60d9718840160c83eadb7c31", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-07T13:59:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-07T13:59:22.000Z", "avg_line_length": 31.8053691275, "max_line_length": 93, "alphanum_fraction": 0.611521418, "include": true, "reason": "import numpy", "num_tokens": 1206}
|
[STATEMENT]
lemma SMP_subterms_subset: "subterms\<^sub>s\<^sub>e\<^sub>t M \<subseteq> SMP M"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. subterms\<^sub>s\<^sub>e\<^sub>t M \<subseteq> SMP M
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<sqsubseteq>\<^sub>s\<^sub>e\<^sub>t M \<Longrightarrow> x \<in> SMP M
[PROOF STEP]
fix t
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<sqsubseteq>\<^sub>s\<^sub>e\<^sub>t M \<Longrightarrow> x \<in> SMP M
[PROOF STEP]
assume "t \<in> subterms\<^sub>s\<^sub>e\<^sub>t M"
[PROOF STATE]
proof (state)
this:
t \<sqsubseteq>\<^sub>s\<^sub>e\<^sub>t M
goal (1 subgoal):
1. \<And>x. x \<sqsubseteq>\<^sub>s\<^sub>e\<^sub>t M \<Longrightarrow> x \<in> SMP M
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
t \<sqsubseteq>\<^sub>s\<^sub>e\<^sub>t M
[PROOF STEP]
obtain m where "m \<in> M" "t \<sqsubseteq> m"
[PROOF STATE]
proof (prove)
using this:
t \<sqsubseteq>\<^sub>s\<^sub>e\<^sub>t M
goal (1 subgoal):
1. (\<And>m. \<lbrakk>m \<in> M; t \<sqsubseteq> m\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
m \<in> M
t \<sqsubseteq> m
goal (1 subgoal):
1. \<And>x. x \<sqsubseteq>\<^sub>s\<^sub>e\<^sub>t M \<Longrightarrow> x \<in> SMP M
[PROOF STEP]
thus "t \<in> SMP M"
[PROOF STATE]
proof (prove)
using this:
m \<in> M
t \<sqsubseteq> m
goal (1 subgoal):
1. t \<in> SMP M
[PROOF STEP]
using SMP_I[of _ _ Var]
[PROOF STATE]
proof (prove)
using this:
m \<in> M
t \<sqsubseteq> m
\<lbrakk>?s \<in> ?M; wt\<^sub>s\<^sub>u\<^sub>b\<^sub>s\<^sub>t Var; ?t \<sqsubseteq> ?s \<cdot> Var; \<And>v. wf\<^sub>t\<^sub>r\<^sub>m (Var v)\<rbrakk> \<Longrightarrow> ?t \<in> SMP ?M
goal (1 subgoal):
1. t \<in> SMP M
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
t \<in> SMP M
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 851, "file": "Stateful_Protocol_Composition_and_Typing_Typed_Model", "length": 10}
|
import numpy as np
from sensors.msg import RawMeasurement
from sensors.msg import ProcessedMeasurement
def C_to_F(cel):
return cel * 9/5 + 32
def F_to_C(far):
return (far - 32) * 5/9
def process_measurement(measurement):
proc_measurement = ProcessedMeasurement()
proc_measurement.day = measurement.day
proc_measurement.avg_temp = F_to_C(measurement.avg_temp)
proc_measurement.min_temp = F_to_C(measurement.min_temp)
proc_measurement.max_temp = F_to_C(measurement.max_temp)
proc_measurement.big_difference = (
proc_measurement.max_temp - proc_measurement.min_temp
) > 15
return proc_measurement
def prepare_to_record(measurement):
prep_measurement = ProcessedMeasurement()
prep_measurement.day = measurement.day
prep_measurement.avg_temp = C_to_F(measurement.avg_temp)
prep_measurement.min_temp = C_to_F(measurement.min_temp)
prep_measurement.max_temp = C_to_F(measurement.max_temp)
return prep_measurement
|
{"hexsha": "df696beadf4d1bf544cf98ce2028276aad85f84c", "size": 987, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/sensors/scripts/utils/analysis.py", "max_stars_repo_name": "IvanovicM/ros", "max_stars_repo_head_hexsha": "ef9f4b0661459f3911f5a937af74d18ac7170173", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-10-23T15:45:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-07T09:05:34.000Z", "max_issues_repo_path": "src/sensors/scripts/utils/analysis.py", "max_issues_repo_name": "IvanovicM/ros", "max_issues_repo_head_hexsha": "ef9f4b0661459f3911f5a937af74d18ac7170173", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/sensors/scripts/utils/analysis.py", "max_forks_repo_name": "IvanovicM/ros", "max_forks_repo_head_hexsha": "ef9f4b0661459f3911f5a937af74d18ac7170173", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9090909091, "max_line_length": 61, "alphanum_fraction": 0.7689969605, "include": true, "reason": "import numpy", "num_tokens": 223}
|
section \<open>Cones\<close>
text \<open>We define the notions like cone, polyhedral cone, etc. and prove some basic facts about them.\<close>
theory Cone
imports
Basis_Extension
Missing_VS_Connect
Integral_Bounded_Vectors
begin
context gram_schmidt
begin
definition "nonneg_lincomb c Vs b = (lincomb c Vs = b \<and> c ` Vs \<subseteq> {x. x \<ge> 0})"
definition "nonneg_lincomb_list c Vs b = (lincomb_list c Vs = b \<and> (\<forall> i < length Vs. c i \<ge> 0))"
definition finite_cone :: "'a vec set \<Rightarrow> 'a vec set" where
"finite_cone Vs = ({ b. \<exists> c. nonneg_lincomb c (if finite Vs then Vs else {}) b})"
definition cone :: "'a vec set \<Rightarrow> 'a vec set" where
"cone Vs = ({ x. \<exists> Ws. finite Ws \<and> Ws \<subseteq> Vs \<and> x \<in> finite_cone Ws})"
definition cone_list :: "'a vec list \<Rightarrow> 'a vec set" where
"cone_list Vs = {b. \<exists>c. nonneg_lincomb_list c Vs b}"
lemma finite_cone_iff_cone_list: assumes Vs: "Vs \<subseteq> carrier_vec n"
and id: "Vs = set Vsl"
shows "finite_cone Vs = cone_list Vsl"
proof -
have fin: "finite Vs" unfolding id by auto
from Vs id have Vsl: "set Vsl \<subseteq> carrier_vec n" by auto
{
fix c b
assume b: "lincomb c Vs = b" and c: "c ` Vs \<subseteq> {x. x \<ge> 0}"
from lincomb_as_lincomb_list[OF Vsl, of c]
have b: "lincomb_list (\<lambda>i. if \<exists>j<i. Vsl ! i = Vsl ! j then 0 else c (Vsl ! i)) Vsl = b"
unfolding b[symmetric] id by simp
have "\<exists> c. nonneg_lincomb_list c Vsl b"
unfolding nonneg_lincomb_list_def
apply (intro exI conjI, rule b)
by (insert c, auto simp: set_conv_nth id)
}
moreover
{
fix c b
assume b: "lincomb_list c Vsl = b" and c: "(\<forall> i < length Vsl. c i \<ge> 0)"
have "nonneg_lincomb (mk_coeff Vsl c) Vs b"
unfolding b[symmetric] nonneg_lincomb_def
apply (subst lincomb_list_as_lincomb[OF Vsl])
by (insert c, auto simp: id mk_coeff_def intro!: sum_list_nonneg)
hence "\<exists> c. nonneg_lincomb c Vs b" by blast
}
ultimately show ?thesis unfolding finite_cone_def cone_list_def
nonneg_lincomb_def nonneg_lincomb_list_def using fin by auto
qed
lemma cone_alt_def: assumes Vs: "Vs \<subseteq> carrier_vec n"
shows "cone Vs = ({ x. \<exists> Ws. set Ws \<subseteq> Vs \<and> x \<in> cone_list Ws})"
unfolding cone_def
proof (intro Collect_cong iffI)
fix x
assume "\<exists>Ws. finite Ws \<and> Ws \<subseteq> Vs \<and> x \<in> finite_cone Ws"
then obtain Ws where *: "finite Ws" "Ws \<subseteq> Vs" "x \<in> finite_cone Ws" by auto
from finite_list[OF *(1)] obtain Wsl where id: "Ws = set Wsl" by auto
from finite_cone_iff_cone_list[OF _ this] *(2-3) Vs
have "x \<in> cone_list Wsl" by auto
with *(2) id show "\<exists>Wsl. set Wsl \<subseteq> Vs \<and> x \<in> cone_list Wsl" by blast
next
fix x
assume "\<exists>Wsl. set Wsl \<subseteq> Vs \<and> x \<in> cone_list Wsl"
then obtain Wsl where "set Wsl \<subseteq> Vs" "x \<in> cone_list Wsl" by auto
thus "\<exists>Ws. finite Ws \<and> Ws \<subseteq> Vs \<and> x \<in> finite_cone Ws" using Vs
by (intro exI[of _ "set Wsl"], subst finite_cone_iff_cone_list, auto)
qed
lemma cone_mono: "Vs \<subseteq> Ws \<Longrightarrow> cone Vs \<subseteq> cone Ws"
unfolding cone_def by blast
lemma finite_cone_mono: assumes fin: "finite Ws"
and Ws: "Ws \<subseteq> carrier_vec n"
and sub: "Vs \<subseteq> Ws"
shows "finite_cone Vs \<subseteq> finite_cone Ws"
proof
fix b
assume "b \<in> finite_cone Vs"
then obtain c where b: "b = lincomb c Vs" and c: "c ` Vs \<subseteq> {x. x \<ge> 0}"
unfolding finite_cone_def nonneg_lincomb_def using finite_subset[OF sub fin] by auto
define d where "d = (\<lambda> v. if v \<in> Vs then c v else 0)"
from c have d: "d ` Ws \<subseteq> {x. x \<ge> 0}" unfolding d_def by auto
have "lincomb d Ws = lincomb d (Ws - Vs) + lincomb d Vs"
by (rule lincomb_vec_diff_add[OF Ws sub fin], auto)
also have "lincomb d Vs = lincomb c Vs"
by (rule lincomb_cong, insert Ws sub, auto simp: d_def)
also have "lincomb d (Ws - Vs) = 0\<^sub>v n"
by (rule lincomb_zero, insert Ws sub, auto simp: d_def)
also have "0\<^sub>v n + lincomb c Vs = lincomb c Vs" using Ws sub by auto
also have "\<dots> = b" unfolding b by simp
finally
have "b = lincomb d Ws" by auto
then show "b \<in> finite_cone Ws" using d fin
unfolding finite_cone_def nonneg_lincomb_def by auto
qed
lemma finite_cone_carrier: "A \<subseteq> carrier_vec n \<Longrightarrow> finite_cone A \<subseteq> carrier_vec n"
unfolding finite_cone_def nonneg_lincomb_def by auto
lemma cone_carrier: "A \<subseteq> carrier_vec n \<Longrightarrow> cone A \<subseteq> carrier_vec n"
using finite_cone_carrier unfolding cone_def by blast
lemma cone_iff_finite_cone: assumes A: "A \<subseteq> carrier_vec n"
and fin: "finite A"
shows "cone A = finite_cone A"
proof
show "finite_cone A \<subseteq> cone A" unfolding cone_def using fin by auto
show "cone A \<subseteq> finite_cone A" unfolding cone_def using fin finite_cone_mono[OF fin A] by auto
qed
lemma set_in_finite_cone:
assumes Vs: "Vs \<subseteq> carrier_vec n"
and fin: "finite Vs"
shows "Vs \<subseteq> finite_cone Vs"
proof
fix x
assume x: "x \<in> Vs"
show "x \<in> finite_cone Vs" unfolding finite_cone_def
proof
let ?c = "\<lambda> y. if x = y then 1 else 0 :: 'a"
have Vsx: "Vs - {x} \<subseteq> carrier_vec n" using Vs by auto
have "lincomb ?c Vs = x + lincomb ?c (Vs - {x})"
using lincomb_del2 x Vs fin by auto
also have "lincomb ?c (Vs - {x}) = 0\<^sub>v n" using lincomb_zero Vsx by auto
also have "x + 0\<^sub>v n = x " using M.r_zero Vs x by auto
finally have "lincomb ?c Vs = x" by auto
moreover have "?c ` Vs \<subseteq> {z. z \<ge> 0}" by auto
ultimately show "\<exists>c. nonneg_lincomb c (if finite Vs then Vs else {}) x"
unfolding nonneg_lincomb_def
using fin by auto
qed
qed
lemma set_in_cone:
assumes Vs: "Vs \<subseteq> carrier_vec n"
shows "Vs \<subseteq> cone Vs"
proof
fix x
assume x: "x \<in> Vs"
show "x \<in> cone Vs" unfolding cone_def
proof (intro CollectI exI)
have "x \<in> carrier_vec n" using Vs x by auto
then have "x \<in> finite_cone {x}" using set_in_finite_cone by auto
then show "finite {x} \<and> {x} \<subseteq> Vs \<and> x \<in> finite_cone {x}" using x by auto
qed
qed
lemma zero_in_finite_cone:
assumes Vs: "Vs \<subseteq> carrier_vec n"
shows "0\<^sub>v n \<in> finite_cone Vs"
proof -
let ?Vs = "(if finite Vs then Vs else {})"
have "lincomb (\<lambda> x. 0 :: 'a) ?Vs = 0\<^sub>v n" using lincomb_zero Vs by auto
moreover have "(\<lambda> x. 0 :: 'a) ` ?Vs \<subseteq> {y. y \<ge> 0}" by auto
ultimately show ?thesis unfolding finite_cone_def nonneg_lincomb_def by blast
qed
lemma lincomb_in_finite_cone:
assumes "x = lincomb l W"
and "finite W"
and "\<forall>i \<in> W . l i \<ge> 0"
and "W \<subseteq> carrier_vec n"
shows "x \<in> finite_cone W"
using cone_iff_finite_cone assms unfolding finite_cone_def nonneg_lincomb_def by auto
lemma lincomb_in_cone:
assumes "x = lincomb l W"
and "finite W"
and "\<forall>i \<in> W . l i \<ge> 0"
and "W \<subseteq> carrier_vec n"
shows "x \<in> cone W"
using cone_iff_finite_cone assms unfolding finite_cone_def nonneg_lincomb_def by auto
lemma zero_in_cone: "0\<^sub>v n \<in> cone Vs"
proof -
have "finite {}" by auto
moreover have "{} \<subseteq> cone Vs" by auto
moreover have "0\<^sub>v n \<in> finite_cone {}" using zero_in_finite_cone by auto
ultimately show ?thesis unfolding cone_def by blast
qed
lemma cone_smult:
assumes a: "a \<ge> 0"
and Vs: "Vs \<subseteq> carrier_vec n"
and x: "x \<in> cone Vs"
shows "a \<cdot>\<^sub>v x \<in> cone Vs"
proof -
from x Vs obtain Ws c where Ws: "Ws \<subseteq> Vs" and fin: "finite Ws" and
"nonneg_lincomb c Ws x"
unfolding cone_def finite_cone_def by auto
then have "nonneg_lincomb (\<lambda> w. a * c w) Ws (a \<cdot>\<^sub>v x)"
unfolding nonneg_lincomb_def using a lincomb_distrib Vs by auto
then show ?thesis using Ws fin unfolding cone_def finite_cone_def by auto
qed
lemma finite_cone_empty[simp]: "finite_cone {} = {0\<^sub>v n}"
by (auto simp: finite_cone_def nonneg_lincomb_def)
lemma cone_empty[simp]: "cone {} = {0\<^sub>v n}"
unfolding cone_def by simp
lemma cone_elem_sum:
assumes Vs: "Vs \<subseteq> carrier_vec n"
and x: "x \<in> cone Vs"
and y: "y \<in> cone Vs"
shows "x + y \<in> cone Vs"
proof -
obtain Xs where Xs: "Xs \<subseteq> Vs" and fin_Xs: "finite Xs"
and Xs_cone: "x \<in> finite_cone Xs"
using Vs x unfolding cone_def by auto
obtain Ys where Ys: "Ys \<subseteq> Vs" and fin_Ys: "finite Ys"
and Ys_cone: "y \<in> finite_cone Ys"
using Vs y unfolding cone_def
by auto
have "x \<in> finite_cone (Xs \<union> Ys)" and "y \<in> finite_cone (Xs \<union> Ys)"
using finite_cone_mono fin_Xs fin_Ys Xs Ys Vs Xs_cone Ys_cone
by (blast, blast)
then obtain cx cy where "nonneg_lincomb cx (Xs \<union> Ys) x"
and "nonneg_lincomb cy (Xs \<union> Ys) y"
unfolding finite_cone_def using fin_Xs fin_Ys by auto
hence "nonneg_lincomb (\<lambda> v. cx v + cy v) (Xs \<union> Ys) (x + y)"
unfolding nonneg_lincomb_def
using lincomb_sum[of "Xs \<union> Ys" cx cy] fin_Xs fin_Ys Xs Ys Vs
by fastforce
hence "x + y \<in> finite_cone (Xs \<union> Ys)"
unfolding finite_cone_def using fin_Xs fin_Ys by auto
thus ?thesis unfolding cone_def using fin_Xs fin_Ys Xs Ys by auto
qed
lemma cone_cone:
assumes Vs: "Vs \<subseteq> carrier_vec n"
shows "cone (cone Vs) = cone Vs"
proof
show "cone Vs \<subseteq> cone (cone Vs)"
by (rule set_in_cone[OF cone_carrier[OF Vs]])
next
show "cone (cone Vs) \<subseteq> cone Vs"
proof
fix x
assume x: "x \<in> cone (cone Vs)"
then obtain Ws c where Ws: "set Ws \<subseteq> cone Vs"
and c: "nonneg_lincomb_list c Ws x"
using cone_alt_def Vs cone_carrier unfolding cone_list_def by auto
have "set Ws \<subseteq> cone Vs \<Longrightarrow> nonneg_lincomb_list c Ws x \<Longrightarrow> x \<in> cone Vs"
proof (induction Ws arbitrary: x c)
case Nil
hence "x = 0\<^sub>v n" unfolding nonneg_lincomb_list_def by auto
thus "x \<in> cone Vs" using zero_in_cone by auto
next
case (Cons a Ws)
have "a \<in> cone Vs" using Cons.prems(1) by auto
moreover have "c 0 \<ge> 0"
using Cons.prems(2) unfolding nonneg_lincomb_list_def by fastforce
ultimately have "c 0 \<cdot>\<^sub>v a \<in> cone Vs" using cone_smult Vs by auto
moreover have "lincomb_list (c \<circ> Suc) Ws \<in> cone Vs"
using Cons unfolding nonneg_lincomb_list_def by fastforce
moreover have "x = c 0 \<cdot>\<^sub>v a + lincomb_list (c \<circ> Suc) Ws"
using Cons.prems(2) unfolding nonneg_lincomb_list_def
by auto
ultimately show "x \<in> cone Vs" using cone_elem_sum Vs by auto
qed
thus "x \<in> cone Vs" using Ws c by auto
qed
qed
lemma cone_smult_basis:
assumes Vs: "Vs \<subseteq> carrier_vec n"
and l: "l ` Vs \<subseteq> {x. x > 0}"
shows "cone {l v \<cdot>\<^sub>v v | v . v \<in> Vs} = cone Vs"
proof
have "{l v \<cdot>\<^sub>v v |v. v \<in> Vs} \<subseteq> cone Vs"
proof
fix x
assume "x \<in> {l v \<cdot>\<^sub>v v | v. v \<in> Vs}"
then obtain v where "v \<in> Vs" and "x = l v \<cdot>\<^sub>v v" by auto
thus "x \<in> cone Vs" using
set_in_cone[OF Vs] cone_smult[OF _ Vs, of "l v" v] l by fastforce
qed
thus "cone {l v \<cdot>\<^sub>v v | v. v \<in> Vs} \<subseteq> cone Vs"
using cone_mono cone_cone[OF Vs] by blast
next
have lVs: "{l v \<cdot>\<^sub>v v | v. v \<in> Vs} \<subseteq> carrier_vec n" using Vs by auto
have "Vs \<subseteq> cone {l v \<cdot>\<^sub>v v | v. v \<in> Vs}"
proof
fix v assume v: "v \<in> Vs"
hence "l v \<cdot>\<^sub>v v \<in> cone {l v \<cdot>\<^sub>v v | v. v \<in> Vs}" using set_in_cone[OF lVs] by auto
moreover have "1 / l v > 0" using l v by auto
ultimately have "(1 / l v) \<cdot>\<^sub>v (l v \<cdot>\<^sub>v v) \<in> cone {l v \<cdot>\<^sub>v v | v. v \<in> Vs}"
using cone_smult[OF _ lVs] by auto
also have "(1 / l v) \<cdot>\<^sub>v (l v \<cdot>\<^sub>v v) = v" using l v
by(auto simp add: smult_smult_assoc)
finally show "v \<in> cone {l v \<cdot>\<^sub>v v | v. v \<in> Vs}" by auto
qed
thus "cone Vs \<subseteq> cone {l v \<cdot>\<^sub>v v | v. v \<in> Vs}"
using cone_mono cone_cone[OF lVs] by blast
qed
lemma cone_add_cone:
assumes C: "C \<subseteq> carrier_vec n"
shows "cone C + cone C = cone C"
proof
note CC = cone_carrier[OF C]
have "cone C = cone C + {0\<^sub>v n}" by (subst add_0_right_vecset[OF CC], simp)
also have "\<dots> \<subseteq> cone C + cone C"
by (rule set_plus_mono2, insert zero_in_cone, auto)
finally show "cone C \<subseteq> cone C + cone C" by auto
from cone_elem_sum[OF C]
show "cone C + cone C \<subseteq> cone C"
by (auto elim!: set_plus_elim)
qed
lemma orthogonal_cone:
assumes X: "X \<subseteq> carrier_vec n"
and W: "W \<subseteq> carrier_vec n"
and finX: "finite X"
and spanLW: "span (set Ls \<union> W) = carrier_vec n"
and ortho: "\<And> w x. w \<in> W \<Longrightarrow> x \<in> set Ls \<Longrightarrow> w \<bullet> x = 0"
and WWs: "W = set Ws"
and spanL: "span (set Ls) = span X"
and LX: "set Ls \<subseteq> X"
and lin_Ls_Bs: "lin_indpt_list (Ls @ Bs)"
and len_Ls_Bs: "length (Ls @ Bs) = n"
shows "cone (X \<union> set Bs) \<inter> {x \<in> carrier_vec n. \<forall>w\<in>W. w \<bullet> x = 0} = cone X"
"\<And> x. \<forall>w\<in>W. w \<bullet> x = 0 \<Longrightarrow> Z \<subseteq> X \<Longrightarrow> B \<subseteq> set Bs \<Longrightarrow> x = lincomb c (Z \<union> B)
\<Longrightarrow> x = lincomb c (Z - B)"
proof -
from WWs have finW: "finite W" by auto
define Y where "Y = X \<union> set Bs"
from lin_Ls_Bs[unfolded lin_indpt_list_def] have
Ls: "set Ls \<subseteq> carrier_vec n" and
Bs: "set Bs \<subseteq> carrier_vec n" and
distLsBs: "distinct (Ls @ Bs)" and
lin: "lin_indpt (set (Ls @ Bs))" by auto
have LW: "set Ls \<inter> W = {}"
proof (rule ccontr)
assume "\<not> ?thesis"
then obtain x where xX: "x \<in> set Ls" and xW: "x \<in> W" by auto
from ortho[OF xW xX] have "x \<bullet> x = 0" by auto
hence "sq_norm x = 0" by (auto simp: sq_norm_vec_as_cscalar_prod)
with vs_zero_lin_dep[OF _ lin] xX Ls Bs show False by auto
qed
have Y: "Y \<subseteq> carrier_vec n" using X Bs unfolding Y_def by auto
have CLB: "carrier_vec n = span (set (Ls @ Bs))"
using lin_Ls_Bs len_Ls_Bs lin_indpt_list_length_eq_n by blast
also have "\<dots> \<subseteq> span Y"
by (rule span_is_monotone, insert LX, auto simp: Y_def)
finally have span: "span Y = carrier_vec n" using Y by auto
have finY: "finite Y" using finX finW unfolding Y_def by auto
{
fix x Z B d
assume xX: "\<forall>w\<in>W. w \<bullet> x = 0" and ZX: "Z \<subseteq> X" and B: "B \<subseteq> set Bs" and
xd: "x = lincomb d (Z \<union> B)"
from ZX B X Bs have ZB: "Z \<union> B \<subseteq> carrier_vec n" by auto
with xd have x: "x \<in> carrier_vec n" by auto
from xX W have w0: "w \<in> W \<Longrightarrow> w \<bullet> x = 0" for w by auto
from finite_in_span[OF _ _ x[folded spanLW]] Ls X W finW finX
obtain c where xc: "x = lincomb c (set Ls \<union> W)" by auto
have "x = lincomb c (set Ls \<union> W)" unfolding xc by auto
also have "\<dots> = lincomb c (set Ls) + lincomb c W"
by (rule lincomb_union, insert X LX W LW finW, auto)
finally have xsum: "x = lincomb c (set Ls) + lincomb c W" .
{
fix w
assume wW: "w \<in> W"
with W have w: "w \<in> carrier_vec n" by auto
from w0[OF wW, unfolded xsum]
have "0 = w \<bullet> (lincomb c (set Ls) + lincomb c W)" by simp
also have "\<dots> = w \<bullet> lincomb c (set Ls) + w \<bullet> lincomb c W"
by (rule scalar_prod_add_distrib[OF w], insert Ls W, auto)
also have "w \<bullet> lincomb c (set Ls) = 0" using ortho[OF wW]
by (subst lincomb_scalar_prod_right[OF Ls w], auto)
finally have "w \<bullet> lincomb c W = 0" by simp
}
hence "lincomb c W \<bullet> lincomb c W = 0" using W
by (subst lincomb_scalar_prod_left, auto)
hence "sq_norm (lincomb c W) = 0"
by (auto simp: sq_norm_vec_as_cscalar_prod)
hence 0: "lincomb c W = 0\<^sub>v n" using lincomb_closed[OF W, of c] by simp
have xc: "x = lincomb c (set Ls)" unfolding xsum 0 using Ls by auto
hence xL: "x \<in> span (set Ls)" by auto
let ?X = "Z - B"
have "lincomb d ?X \<in> span X" using finite_subset[OF _ finX, of ?X] X ZX by auto
from finite_in_span[OF finite_set Ls this[folded spanL]]
obtain e where ed: "lincomb e (set Ls) = lincomb d ?X" by auto
from B finite_subset[OF B] have finB: "finite B" by auto
from B Bs have BC: "B \<subseteq> carrier_vec n" by auto
define f where "f =
(\<lambda> x. if x \<in> set Bs then if x \<in> B then d x else 0 else if x \<in> set Ls then e x else undefined)"
have "x = lincomb d (?X \<union> B)" unfolding xd by auto
also have "\<dots> = lincomb d ?X + lincomb d B"
by (rule lincomb_union[OF _ _ _ finite_subset[OF _ finX]], insert ZX X finB B Bs, auto)
finally have xd: "x = lincomb d ?X + lincomb d B" .
also have "\<dots> = lincomb e (set Ls) + lincomb d B" unfolding ed by auto
also have "lincomb e (set Ls) = lincomb f (set Ls)"
by (rule lincomb_cong[OF _ Ls], insert distLsBs, auto simp: f_def)
also have "lincomb d B = lincomb f B"
by (rule lincomb_cong[OF _ BC], insert B, auto simp: f_def)
also have "lincomb f B = lincomb f (B \<union> (set Bs - B))"
by (subst lincomb_clean, insert finB Bs B, auto simp: f_def)
also have "B \<union> (set Bs - B) = set Bs" using B by auto
finally have "x = lincomb f (set Ls) + lincomb f (set Bs)" by auto
also have "lincomb f (set Ls) + lincomb f (set Bs) = lincomb f (set (Ls @ Bs))"
by (subst lincomb_union[symmetric], insert Ls distLsBs Bs, auto)
finally have "x = lincomb f (set (Ls @ Bs))" .
hence f: "f \<in> set (Ls @ Bs) \<rightarrow>\<^sub>E UNIV \<and> lincomb f (set (Ls @ Bs)) = x"
by (auto simp: f_def split: if_splits)
from finite_in_span[OF finite_set Ls xL] obtain g where
xg: "x = lincomb g (set Ls)" by auto
define h where "h = (\<lambda> x. if x \<in> set Bs then 0 else if x \<in> set Ls then g x else undefined)"
have "x = lincomb h (set Ls)" unfolding xg
by (rule lincomb_cong[OF _ Ls], insert distLsBs, auto simp: h_def)
also have "\<dots> = lincomb h (set Ls) + 0\<^sub>v n" using Ls by auto
also have "0\<^sub>v n = lincomb h (set Bs)"
by (rule lincomb_zero[symmetric, OF Bs], auto simp: h_def)
also have "lincomb h (set Ls) + lincomb h (set Bs) = lincomb h (set (Ls @ Bs))"
by (subst lincomb_union[symmetric], insert Ls Bs distLsBs, auto)
finally have "x = lincomb h (set (Ls @ Bs))" .
hence h: "h \<in> set (Ls @ Bs) \<rightarrow>\<^sub>E UNIV \<and> lincomb h (set (Ls @ Bs)) = x"
by (auto simp: h_def split: if_splits)
have basis: "basis (set (Ls @ Bs))" using lin_Ls_Bs[unfolded lin_indpt_list_def] len_Ls_Bs
using CLB basis_def by blast
from Ls Bs have "set (Ls @ Bs) \<subseteq> carrier_vec n" by auto
from basis[unfolded basis_criterion[OF finite_set this], rule_format, OF x] f h
have fh: "f = h" by auto
hence "\<And> x. x \<in> set Bs \<Longrightarrow> f x = 0" unfolding h_def by auto
hence "\<And> x. x \<in> B \<Longrightarrow> d x = 0" unfolding f_def using B by force
thus "x = lincomb d ?X" unfolding xd
by (subst (2) lincomb_zero, insert BC ZB X, auto intro!: M.r_zero)
} note main = this
have "cone Y \<inter> {x \<in> carrier_vec n. \<forall>w\<in>W. w \<bullet> x = 0} = cone X" (is "?I = _")
proof
{
fix x
assume xX: "x \<in> cone X"
with cone_carrier[OF X] have x: "x \<in> carrier_vec n" by auto
have "X \<subseteq> Y" unfolding Y_def by auto
from cone_mono[OF this] xX have xY: "x \<in> cone Y" by auto
from cone_iff_finite_cone[OF X finX] xX have "x \<in> finite_cone X" by auto
from this[unfolded finite_cone_def nonneg_lincomb_def] finX obtain c
where "x = lincomb c X" by auto
with finX X have "x \<in> span X" by auto
with spanL have "x \<in> span (set Ls)" by auto
from finite_in_span[OF _ Ls this] obtain c where
xc: "x = lincomb c (set Ls)" by auto
{
fix w
assume wW: "w \<in> W"
hence w: "w \<in> carrier_vec n" using W by auto
have "w \<bullet> x = 0" unfolding xc using ortho[OF wW]
by (subst lincomb_scalar_prod_right[OF Ls w], auto)
}
with xY x have "x \<in> ?I" by blast
}
thus "cone X \<subseteq> ?I" by blast
{
fix x
let ?X = "X - set Bs"
assume "x \<in> ?I"
with cone_carrier[OF Y] cone_iff_finite_cone[OF Y finY]
have xY: "x \<in> finite_cone Y" and x: "x \<in> carrier_vec n"
and w0: "\<And> w. w \<in> W \<Longrightarrow> w \<bullet> x = 0" by auto
from xY[unfolded finite_cone_def nonneg_lincomb_def] finY obtain d
where xd: "x = lincomb d Y" and nonneg: "d ` Y \<subseteq> Collect ((\<le>) 0)" by auto
from main[OF _ _ _ xd[unfolded Y_def]] w0
have "x = lincomb d ?X" by auto
hence "nonneg_lincomb d ?X x" unfolding nonneg_lincomb_def
using nonneg[unfolded Y_def] by auto
hence "x \<in> finite_cone ?X" using finX
unfolding finite_cone_def by auto
hence "x \<in> cone X" using finite_subset[OF _ finX, of ?X] unfolding cone_def by blast
}
then show "?I \<subseteq> cone X" by auto
qed
thus "cone (X \<union> set Bs) \<inter> {x \<in> carrier_vec n. \<forall>w\<in>W. w \<bullet> x = 0} = cone X" unfolding Y_def .
qed
definition "polyhedral_cone (A :: 'a mat) = { x . x \<in> carrier_vec n \<and> A *\<^sub>v x \<le> 0\<^sub>v (dim_row A)}"
lemma polyhedral_cone_carrier: assumes "A \<in> carrier_mat nr n"
shows "polyhedral_cone A \<subseteq> carrier_vec n"
using assms unfolding polyhedral_cone_def by auto
lemma cone_in_polyhedral_cone:
assumes CA: "C \<subseteq> polyhedral_cone A"
and A: "A \<in> carrier_mat nr n"
shows "cone C \<subseteq> polyhedral_cone A"
proof
interpret nr: gram_schmidt nr "TYPE ('a)".
from polyhedral_cone_carrier[OF A] assms(1)
have C: "C \<subseteq> carrier_vec n" by auto
fix x
assume x: "x \<in> cone C"
then have xn: "x \<in> carrier_vec n"
using cone_carrier[OF C] by auto
from x[unfolded cone_alt_def[OF C] cone_list_def nonneg_lincomb_list_def]
obtain ll Ds where l0: "lincomb_list ll Ds = x" and l1: "\<forall>i<length Ds. 0 \<le> ll i"
and DsC: "set Ds \<subseteq> C"
by auto
from DsC C have Ds: "set Ds \<subseteq> carrier_vec n" by auto
have "A *\<^sub>v x = A *\<^sub>v (lincomb_list ll Ds)" using l0 by auto
also have "\<dots> = nr.lincomb_list ll (map (\<lambda> d. A *\<^sub>v d) Ds)"
proof -
have one: " \<forall>w\<in>set Ds. dim_vec w = n" using DsC C by auto
have two: "\<forall>w\<in>set (map ((*\<^sub>v) A) Ds). dim_vec w = nr" using A DsC C by auto
show "A *\<^sub>v lincomb_list ll Ds = nr.lincomb_list ll (map ((*\<^sub>v) A) Ds)"
unfolding lincomb_list_as_mat_mult[OF one] nr.lincomb_list_as_mat_mult[OF two] length_map
proof (subst assoc_mult_mat_vec[symmetric, OF A], force+, rule arg_cong[of _ _ "\<lambda> x. x *\<^sub>v _"])
show "A * mat_of_cols n Ds = mat_of_cols nr (map ((*\<^sub>v) A) Ds)"
unfolding mat_of_cols_def
by (intro eq_matI, insert A Ds[unfolded set_conv_nth],
(force intro!: arg_cong[of _ _ "\<lambda> x. row A _ \<bullet> x"])+)
qed
qed
also have "\<dots> \<le> 0\<^sub>v nr"
proof (intro lesseq_vecI[of _ nr])
have *: "set (map ((*\<^sub>v) A) Ds) \<subseteq> carrier_vec nr" using A Ds by auto
show Carr: "nr.lincomb_list ll (map ((*\<^sub>v) A) Ds) \<in> carrier_vec nr"
by (intro nr.lincomb_list_carrier[OF *])
fix i
assume i: "i < nr"
from CA[unfolded polyhedral_cone_def] A
have l2: "x \<in> C \<Longrightarrow> A *\<^sub>v x \<le> 0\<^sub>v nr" for x by auto
show "nr.lincomb_list ll (map ((*\<^sub>v) A) Ds) $ i \<le> 0\<^sub>v nr $ i"
unfolding subst nr.lincomb_list_index[OF i *] length_map index_zero_vec(1)[OF i]
proof (intro sum_nonpos mult_nonneg_nonpos)
fix j
assume "j \<in> {0..<length Ds}"
hence j: "j < length Ds" by auto
from j show "0 \<le> ll j" using l1 by auto
from j have "Ds ! j \<in> C" using DsC by auto
from l2[OF this] have l2: "A *\<^sub>v Ds ! j \<le> 0\<^sub>v nr" by auto
from lesseq_vecD[OF _ this i] i have "(A *\<^sub>v Ds ! j) $ i \<le> 0" by auto
thus "map ((*\<^sub>v) A) Ds ! j $ i \<le> 0" using j i by auto
qed
qed auto
finally show "x \<in> polyhedral_cone A"
unfolding polyhedral_cone_def using A xn by auto
qed
lemma bounded_cone_is_zero:
assumes Ccarr: "C \<subseteq> carrier_vec n" and bnd: "cone C \<subseteq> Bounded_vec bnd"
shows "cone C = {0\<^sub>v n}"
proof(rule ccontr)
assume "\<not> ?thesis"
then obtain v where vC: "v \<in> cone C" and vnz: "v \<noteq> 0\<^sub>v n"
using zero_in_cone assms by auto
have vcarr: "v \<in> carrier_vec n" using vC Ccarr cone_carrier by blast
from vnz vcarr obtain i where i_le_n: "i < dim_vec v" and vinz: "v $ i \<noteq> 0" by force
define M where "M = (1 / (v $ i) * (bnd + 1))"
have abs_ge_bnd: "abs (M * (v $ i)) > bnd" unfolding M_def by (simp add: vinz)
have aMvC: "(abs M) \<cdot>\<^sub>v v \<in> cone C" using cone_smult[OF _ Ccarr vC] abs_ge_bnd by simp
have "\<not>(abs (abs M * (v $ i)) \<le> bnd)" using abs_ge_bnd by simp
hence "(abs M) \<cdot>\<^sub>v v \<notin> Bounded_vec bnd" unfolding Bounded_vec_def using i_le_n aMvC by auto
thus False using aMvC bnd by auto
qed
end
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/SeLFiE/Example/afp-2020-05-16/thys/Linear_Inequalities/Cone.thy"}
|
import math
import numpy as np
"""
This function calculates the roots of the quadratic inequality for the Rh reuse factor.
Parameters:
lx - list of input sizes of the lstms. The size of this list is equal to the number of layers.
lh - list of input sizes of the hidden layers. The size of this list is equal to the number of layers.
lt_sigma - the latency of the sigmoid/tanh functions.
lt_tail - the latency of the tail.
dsp_total - the total number of dsps
This returns the roots of the quadratic inequality.
"""
def reuse_factor(lx, lh, lt_sigma, lt_tail, dsp_total):
a = dsp_total - 4 * sum(lh)
b = dsp_total * (lt_sigma + lt_tail) - 4 * np.dot(lx, lh) - 4 * np.dot(lh, lh) - 4 * (lt_sigma + lt_tail) * sum(lh)
c = - 4 * (lt_sigma + lt_tail) * np.dot(lh, lh)
# print(a)
# print(b)
# print(c)
r_1 = (-b + math.sqrt(b**2 - 4*a*c)) / (2*a)
r_2 = (-b - math.sqrt(b**2 - 4*a*c)) / (2*a)
return r_1, r_2
print("ZYNQ")
print(reuse_factor([1,9],[9,9], 3,8,220))
print("lstm_ae_small exmaple")
print(reuse_factor([1,9],[9,9], 3,8,900))
print("\n")
print("KU115")
print("mnist 1/2 layers examples")
print(reuse_factor([28],[32], 3,8,5520))
print(reuse_factor([28,16],[16,16], 3,8,5520))
print("\n")
print("U250")
print("lstm_ae exmaple")
print(reuse_factor([1,32,8,8],[32,8,8,32], 3,8,12200))
|
{"hexsha": "8fbadd797079a59bb7c16b5b056dc3c3435e7089", "size": 1390, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/reuse_factors_examples.py", "max_stars_repo_name": "walkieq/LSTM-HLS", "max_stars_repo_head_hexsha": "f90bc769153e667eb8a30c7c4147bd53620f02bb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2021-06-17T18:25:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T09:30:50.000Z", "max_issues_repo_path": "tools/reuse_factors_examples.py", "max_issues_repo_name": "vamsikrishnabodaballa/RNN_HLS", "max_issues_repo_head_hexsha": "892b5315c27953af7dc387f4df5475962178201a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-10-30T17:48:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-08T21:03:40.000Z", "max_forks_repo_path": "tools/reuse_factors_examples.py", "max_forks_repo_name": "vamsikrishnabodaballa/RNN_HLS", "max_forks_repo_head_hexsha": "892b5315c27953af7dc387f4df5475962178201a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-06-17T18:25:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T11:05:54.000Z", "avg_line_length": 28.3673469388, "max_line_length": 119, "alphanum_fraction": 0.6273381295, "include": true, "reason": "import numpy", "num_tokens": 465}
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The base image interface.
"""
import numpy as np
from scipy import ndimage
# Local imports
from .image import Image
from ..transforms.affines import to_matrix_vector
from ..reference.coordinate_system import CoordinateSystem
from ..reference.coordinate_map import (AffineTransform,
product as cmap_product)
################################################################################
# class `AffineImage`
################################################################################
class AffineImage(Image):
""" The affine image for nipy.
This object is a subclass of Image that
assumes the first 3 coordinates
are spatial.
**Attributes**
:metadata: dictionnary
Optional, user-defined, dictionnary used to carry around
extra information about the data as it goes through
transformations. The Image class does not garanty consistency
of this information as the data is modified.
:_data:
Private pointer to the data.
**Properties**
:affine: 4x4 ndarray
Affine mapping from voxel axes to world coordinates
(world coordinates are always forced to be 'x', 'y', 'z').
:spatial_coordmap: AffineTransform
Coordinate map describing the spatial coordinates
(always forced to be 'x', 'y', 'z') and the coordinate
axes with names axis_names[:3].
:coordmap: AffineTransform
Coordinate map describing the relationship between
all coordinates and axis_names.
**Notes**
The data is stored in an undefined way: prescalings might need to
be applied to it before using it, or the data might be loaded on
demand. The best practice to access the data is not to access the
_data attribute, but to use the `get_data` method.
"""
#---------------------------------------------------------------------------
# Attributes, BaseImage interface
#---------------------------------------------------------------------------
# The name of the reference coordinate system
coord_sys = ''
# User defined meta data
metadata = dict()
# The data (ndarray)
_data = None
# XXX: Need an attribute to determine in a clever way the
# interplation order/method
def __init__(self, data, affine, coord_sys, metadata=None):
""" Creates a new nipy image with an affine mapping.
Parameters
----------
data : ndarray
ndarray representing the data.
affine : 4x4 ndarray
affine transformation to the reference coordinate system
coord_system : string
name of the reference coordinate system.
"""
affine = np.asarray(affine)
if affine.shape != (4,4):
raise ValueError('Affine image takes 4x4 affine as input')
function_domain = CoordinateSystem(['axis%d' % i for i in range(3)],
name=coord_sys)
function_range = CoordinateSystem(['x','y','z'], name='world')
spatial_coordmap = AffineTransform(function_domain, function_range,
affine)
nonspatial_names = ['axis%d' % i for i in range(3, data.ndim)]
if nonspatial_names:
nonspatial_coordmap = AffineTransform.from_start_step(nonspatial_names, nonspatial_names, [0]*(data.ndim-3), [1]*(data.ndim-3))
full_coordmap = cmap_product(spatial_coordmap, nonspatial_coordmap)
else:
full_coordmap = spatial_coordmap
self._spatial_coordmap = spatial_coordmap
self.coord_sys = coord_sys
Image.__init__(self, data, full_coordmap)
if metadata is not None:
self.metadata = metadata
def _get_spatial_coordmap(self):
"""
Returns 3 dimensional AffineTransform, which is the same
as self.coordmap if self.ndim == 3.
"""
return self._spatial_coordmap
spatial_coordmap = property(_get_spatial_coordmap)
def _get_affine(self):
"""
Returns the affine of the spatial coordmap which will
always be a 4x4 matrix.
"""
return self._spatial_coordmap.affine
affine = property(_get_affine)
def get_data(self):
# XXX What's wrong with __array__? Wouldn't that be closer to numpy?
""" Return data as a numpy array.
"""
return np.asarray(self._data)
def resampled_to_affine(self, affine_transform, world_to_world=None,
interpolation_order=3,
shape=None):
""" Resample the image to be an affine image.
Parameters
----------
affine_transform : AffineTransform
Affine of the new grid.
XXX In the original proposal, it said something about "if only 3x3 it is assumed
to be a rotation", but this wouldn't work the way the code was written becuase
it was written as if affine was the affine of an AffineImage. So, if you input
a "rotation matrix" that is assuming you have voxels of size 1....
This rotation can now be expressed with the world_to_world argument.
world_to_world: 4x4 ndarray, optional
A matrix representing a mapping from the target's (affine_transform) "world"
to self's "world". Defaults to np.identity(4)
interpolation_order : int, optional
Order of the spline interplation. If 0, nearest-neighbour
interpolation is performed.
shape: tuple
Shape of the resulting image. Defaults to self.shape.
Returns
-------
resampled_image : nipy AffineImage
New nipy image with the data resampled in the given
affine.
Notes
-----
The coordinate system of the resampled_image is the world
of affine_transform. Therefore, if world_to_world=np.identity(4),
the coordinate system is not changed: the
returned image points to the same world space.
"""
shape = shape or self.shape
shape = shape[:3]
if world_to_world is None:
world_to_world = np.identity(4)
world_to_world_transform = AffineTransform(affine_transform.function_range,
self.spatial_coordmap.function_range,
world_to_world)
# Delayed import to avoid circular imports
from ...algorithms.resample import resample
if self.ndim == 3:
im = resample(self, affine_transform, world_to_world_transform,
shape, order=interpolation_order)
return AffineImage(np.array(im), affine_transform.affine,
affine_transform.function_domain.name)
# XXX this below wasn't included in the original AffineImage proposal
# and it would fail for an AffineImage with ndim == 4.
# I don't know if it should be included as a special case in the AffineImage,
# but then we should at least raise an exception saying that these resample_* methods
# only work for AffineImage's with ndim==3.
#
# This is part of the reason nipy.core.image.Image does not have
# resample_* methods...
elif self.ndim == 4:
result = np.empty(shape + (self.shape[3],))
data = self.get_data()
for i in range(self.shape[3]):
tmp_affine_im = AffineImage(data[...,i], self.affine,
self.axis_names[:-1])
tmp_im = tmp_affine_im.resampled_to_affine(affine_transform,
world_to_world,
interpolation_order,
shape)
result[...,i] = np.array(tmp_im)
return AffineImage(result, affine_transform.affine,
affine_transform.function_domain.name)
else:
raise ValueError('resampling only defined for 3d and 4d AffineImage')
def resampled_to_img(self, target_image, world_to_world=None, interpolation_order=3):
""" Resample the image to be on the same grid than the target image.
Parameters
----------
target_image : AffineImage
Nipy image onto the grid of which the data will be
resampled.
XXX In the proposal, target_image was assumed to be a matrix if it had no attribute "affine". It now has to have a spatial_coordmap attribute.
world_to_world: 4x4 ndarray, optional
A matrix representing a mapping from the target's "world"
to self's "world". Defaults to np.identity(4)
interpolation_order : int, optional
Order of the spline interplation. If 0, nearest neighboor
interpolation is performed.
Returns
-------
resampled_image : nipy_image
New nipy image with the data resampled.
Notes
-----
The coordinate system of the resampled_image is the world
of target_image. Therefore, if world_to_world=np.identity(4),
the coordinate system is not changed: the
returned image points to the same world space.
XXX Since you've enforced the outputs always to be 'x','y','z' -- EVERY image is embedded in the same coordinate system (i.e. 'x','y','z'), but images can have different coordinate axes. The term "embedding" that was here in the proposal refers to something in the range of a function, not its domain. By adding a world_to_world transformation, i.e. a rotation or something, we
now change the coordinate system of the resampled_image
"""
return self.resampled_to_affine(target_image.spatial_coordmap,
world_to_world,
interpolation_order,
target_image.shape)
def values_in_world(self, x, y, z, interpolation_order=3):
""" Return the values of the data at the world-space positions given by
x, y, z
Parameters
----------
x : number or ndarray
x positions in world space, in other words milimeters
y : number or ndarray
y positions in world space, in other words milimeters.
The shape of y should match the shape of x
z : number or ndarray
z positions in world space, in other words milimeters.
The shape of z should match the shape of x
interpolation_order : int, optional
Order of the spline interplation. If 0, nearest neighboor
interpolation is performed.
Returns
-------
values : number or ndarray
Data values interpolated at the given world position.
This is a number or an ndarray, depending on the shape of
the input coordinate.
"""
x = np.atleast_1d(x)
y = np.atleast_1d(y)
z = np.atleast_1d(z)
shape = x.shape
if not ((x.shape == y.shape) and (x.shape == z.shape)):
raise ValueError('x, y and z shapes should be equal')
x = x.ravel()
y = y.ravel()
z = z.ravel()
xyz = np.c_[x, y, z]
world_to_voxel = self.spatial_coordmap.inverse()
ijk = world_to_voxel(xyz)
values = ndimage.map_coordinates(self.get_data(), ijk.T,
order=interpolation_order)
values = np.reshape(values, shape)
return values
#---------------------------------------------------------------------------
# AffineImage interface
#---------------------------------------------------------------------------
def xyz_ordered(self):
""" Returns an image with the affine diagonal and positive
in its coordinate system.
"""
A, b = to_matrix_vector(self.affine)
if not np.all((np.abs(A) > 0.001).sum(axis=0) == 1):
raise CoordSystemError(
'Cannot reorder the axis: the image affine contains rotations'
)
axis_numbers = list(np.argmax(np.abs(A), axis=1))
axis_names = [self.spatial_coordmap.function_domain.coord_names[a] for a in axis_numbers]
reordered_coordmap = self.spatial_coordmap.reordered_domain(axis_names)
data = self.get_data()
transposed_data = np.transpose(data, axis_numbers + range(3, self.ndim))
return AffineImage(transposed_data, reordered_coordmap.affine,
reordered_coordmap.function_domain.name)
#---------------------------------------------------------------------------
# Private methods
#---------------------------------------------------------------------------
def __repr__(self):
options = np.get_printoptions()
np.set_printoptions(precision=6, threshold=64, edgeitems=2)
representation = \
'AffineImage(\n data=%s,\n affine=%s,\n coord_sys=%s)' % (
'\n '.join(repr(self._data).split('\n')),
'\n '.join(repr(self.affine).split('\n')),
repr(self.coord_sys))
np.set_printoptions(**options)
return representation
def __copy__(self):
""" Copy the Image and the arrays and metadata it contains.
"""
return self.__class__(data=self.get_data().copy(),
affine=self.affine.copy(),
coord_sys=self.coord_sys,
metadata=self.metadata.copy())
def __deepcopy__(self, option):
""" Copy the Image and the arrays and metadata it contains.
"""
import copy
return self.__class__(data=self.get_data().copy(),
affine=self.affine.copy(),
coord_sys=self.coord_sys,
metadata=copy.deepcopy(self.metadata))
def __eq__(self, other):
return ( isinstance(other, self.__class__)
and np.all(self.get_data() == other.get_data())
and np.all(self.affine == other.affine)
and (self.coord_sys == other.coord_sys))
|
{"hexsha": "96e358ba5b0d984090a93647829bb204053bc846", "size": 15070, "ext": "py", "lang": "Python", "max_stars_repo_path": "nipy/core/image/affine_image.py", "max_stars_repo_name": "neurospin/nipy", "max_stars_repo_head_hexsha": "cc54600a0dca1e003ad393bc05c46f91eef30a68", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-03-08T15:01:06.000Z", "max_stars_repo_stars_event_max_datetime": "2016-03-08T15:01:06.000Z", "max_issues_repo_path": "nipy/core/image/affine_image.py", "max_issues_repo_name": "neurospin/nipy", "max_issues_repo_head_hexsha": "cc54600a0dca1e003ad393bc05c46f91eef30a68", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nipy/core/image/affine_image.py", "max_forks_repo_name": "neurospin/nipy", "max_forks_repo_head_hexsha": "cc54600a0dca1e003ad393bc05c46f91eef30a68", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9734748011, "max_line_length": 377, "alphanum_fraction": 0.5556735236, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2971}
|
# Copyright (c) 2018, Curious AI Ltd. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Functions to load data from folders and augment it"""
import itertools
import logging
import os.path
from PIL import Image
import numpy as np
from torch.utils.data.sampler import Sampler
import torch
LOG = logging.getLogger('main')
NO_LABEL = -1
class data_prefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
# self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1, 3, 1, 1)
# self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1, 3, 1, 1)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.mean = self.mean.half()
# self.std = self.std.half()
self.preload()
def preload(self):
try:
self.next_input, self.next_target = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
with torch.cuda.stream(self.stream):
if len(self.next_input) == 2:
input_1, input_2 = self.next_input
input_1 = input_1.cuda(non_blocking=True)
input_2 = input_2.cuda(non_blocking=True)
self.next_input = input_1.float(), input_2.float()
else:
self.next_input = self.next_input.cuda(non_blocking=True).float()
self.next_target = self.next_target.cuda(non_blocking=True)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
self.preload()
return input, target
class RandomTranslateWithReflect:
"""Translate image randomly
Translate vertically and horizontally by n pixels where
n is integer drawn uniformly independently for each axis
from [-max_translation, max_translation].
Fill the uncovered blank area with reflect padding.
"""
def __init__(self, max_translation):
self.max_translation = max_translation
def __call__(self, old_image):
xtranslation, ytranslation = np.random.randint(-self.max_translation,
self.max_translation + 1,
size=2)
xpad, ypad = abs(xtranslation), abs(ytranslation)
xsize, ysize = old_image.size
flipped_lr = old_image.transpose(Image.FLIP_LEFT_RIGHT)
flipped_tb = old_image.transpose(Image.FLIP_TOP_BOTTOM)
flipped_both = old_image.transpose(Image.ROTATE_180)
new_image = Image.new("RGB", (xsize + 2 * xpad, ysize + 2 * ypad))
new_image.paste(old_image, (xpad, ypad))
new_image.paste(flipped_lr, (xpad + xsize - 1, ypad))
new_image.paste(flipped_lr, (xpad - xsize + 1, ypad))
new_image.paste(flipped_tb, (xpad, ypad + ysize - 1))
new_image.paste(flipped_tb, (xpad, ypad - ysize + 1))
new_image.paste(flipped_both, (xpad - xsize + 1, ypad - ysize + 1))
new_image.paste(flipped_both, (xpad + xsize - 1, ypad - ysize + 1))
new_image.paste(flipped_both, (xpad - xsize + 1, ypad + ysize - 1))
new_image.paste(flipped_both, (xpad + xsize - 1, ypad + ysize - 1))
new_image = new_image.crop((xpad - xtranslation,
ypad - ytranslation,
xpad + xsize - xtranslation,
ypad + ysize - ytranslation))
return new_image
class TransformTwice:
def __init__(self, transform):
self.transform = transform
def __call__(self, inp):
out1 = self.transform(inp)
out2 = self.transform(inp)
return out1, out2
def relabel_dataset(dataset, labels):
unlabeled_idxs = []
# t_idxs = np.empty(len(labels), dtype=int)
# t0 = 0
# t1 = 1
# t2 = 2
# t3 = 3
for idx in range(len(dataset.imgs)):
path, _ = dataset.imgs[idx]
filename = os.path.basename(path)
if filename in labels:
label_idx = dataset.class_to_idx[labels[filename]]
dataset.imgs[idx] = path, label_idx
# if label_idx == 0:
# t_idxs[t0] = idx
# t0 = t0 + 4
# elif label_idx == 1:
# t_idxs[t1] = idx
# t1 = t1 + 4
# elif label_idx == 2:
# t_idxs[t2] = idx
# t2 = t2 + 4
# elif label_idx == 3:
# t_idxs[t3] = idx
# t3 = t3 + 4
del labels[filename]
else:
dataset.imgs[idx] = path, NO_LABEL
unlabeled_idxs.append(idx)
if len(labels) != 0:
message = "List of unlabeled contains {} unknown files: {}, ..."
some_missing = ', '.join(list(labels.keys()))
raise LookupError(message.format(len(labels), some_missing))
# assert len(t_idxs) == len(dataset.imgs) - len(unlabeled_idxs)
labeled_idxs = sorted(set(range(len(dataset.imgs))) - set(unlabeled_idxs))
# labeled_idxs = t_idxs.tolist()
return labeled_idxs, unlabeled_idxs
class TwoStreamBatchSampler(Sampler):
"""Iterate two sets of indices
An 'epoch' is one iteration through the primary indices.
During the epoch, the secondary indices are iterated through
as many times as needed.
"""
def __init__(self, primary_indices, secondary_indices, batch_size, secondary_batch_size):
self.primary_indices = primary_indices
self.secondary_indices = secondary_indices
self.secondary_batch_size = secondary_batch_size
self.primary_batch_size = batch_size - secondary_batch_size
assert len(self.primary_indices) >= self.primary_batch_size > 0
assert len(self.secondary_indices) >= self.secondary_batch_size > 0
def __iter__(self):
primary_iter = iterate_once(self.primary_indices)
secondary_iter = iterate_eternally(self.secondary_indices)
return (
# list(primary_batch) + (np.random.permutation(secondary_batch)).tolist()
primary_batch + secondary_batch
for (primary_batch, secondary_batch)
in zip(grouper(primary_iter, self.primary_batch_size),
grouper(secondary_iter, self.secondary_batch_size))
)
def __len__(self):
return len(self.primary_indices) // self.primary_batch_size
def iterate_once(iterable):
return np.random.permutation(iterable)
def iterate_eternally(indices):
def infinite_shuffles():
while True:
yield np.random.permutation(indices)
# yield indices
return itertools.chain.from_iterable(infinite_shuffles())
def grouper(iterable, n):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3) --> ABC DEF"
args = [iter(iterable)] * n
return zip(*args)
|
{"hexsha": "f11dbac353b5cda43a1097feb2726b11f781ea2d", "size": 7325, "ext": "py", "lang": "Python", "max_stars_repo_path": "MT-CNV/mean_teacher/data.py", "max_stars_repo_name": "Wangzheaos/DARD-Net", "max_stars_repo_head_hexsha": "4b0dc7e87c82c7f6f5892c257fd397d7217fd7f1", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MT-CNV/mean_teacher/data.py", "max_issues_repo_name": "Wangzheaos/DARD-Net", "max_issues_repo_head_hexsha": "4b0dc7e87c82c7f6f5892c257fd397d7217fd7f1", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MT-CNV/mean_teacher/data.py", "max_forks_repo_name": "Wangzheaos/DARD-Net", "max_forks_repo_head_hexsha": "4b0dc7e87c82c7f6f5892c257fd397d7217fd7f1", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3896713615, "max_line_length": 99, "alphanum_fraction": 0.6128327645, "include": true, "reason": "import numpy", "num_tokens": 1769}
|
[STATEMENT]
lemma i0_less[simp]: "(0::enat) < n \<longleftrightarrow> n \<noteq> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (0 < n) = (n \<noteq> 0)
[PROOF STEP]
by (rule zero_less_iff_neq_zero)
|
{"llama_tokens": 95, "file": null, "length": 1}
|
[STATEMENT]
lemma mreq_end2: "applied_rule_rev C x b = applied_rule_rev C x c \<Longrightarrow>
applied_rule_rev C x (a#b) = applied_rule_rev C x (a#c)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. applied_rule_rev C x b = applied_rule_rev C x c \<Longrightarrow> applied_rule_rev C x (a # b) = applied_rule_rev C x (a # c)
[PROOF STEP]
apply (case_tac "applied_rule_rev C x b = None")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>applied_rule_rev C x b = applied_rule_rev C x c; applied_rule_rev C x b = \<bottom>\<rbrakk> \<Longrightarrow> applied_rule_rev C x (a # b) = applied_rule_rev C x (a # c)
2. \<lbrakk>applied_rule_rev C x b = applied_rule_rev C x c; applied_rule_rev C x b \<noteq> \<bottom>\<rbrakk> \<Longrightarrow> applied_rule_rev C x (a # b) = applied_rule_rev C x (a # c)
[PROOF STEP]
apply (auto intro: mreq_end mreq_endNone)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 382, "file": "UPF_Normalisation", "length": 3}
|
# 1 "./libxc_master.F90"
# 1 "<built-in>"
# 1 "<command-line>"
# 1 "./libxc_master.F90"
!! Copyright (C) 2003-2006 M. Marques, A. Castro, A. Rubio, G. Bertsch
!!
!! This program is free software; you can redistribute it and/or modify
!! it under the terms of the GNU Lesser General Public License as published by
!! the Free Software Foundation; either version 2, or (at your option)
!! any later version.
!!
!! This program is distributed in the hope that it will be useful,
!! but WITHOUT ANY WARRANTY; without even the implied warranty of
!! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
!! GNU Lesser General Public License for more details.
!!
!! You should have received a copy of the GNU Lesser General Public License
!! along with this program; if not, write to the Free Software
!! Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
!! 02110-1301, USA.
!!
!! $Id: libxc.f90 3550 2007-11-19 14:32:49Z marques $
# 27 "./libxc_master.F90"
!-------------------------------------------------------------------
module xc_f90_types_m
integer, public, parameter :: xc_f90_kind = selected_real_kind(14)
type xc_f90_pointer_t
private
integer, pointer :: buffer
end type xc_f90_pointer_t
end module xc_f90_types_m
!-------------------------------------------------------------------
module xc_f90_lib_m
use xc_f90_types_m
use libxc_funcs_m
implicit none
public
! Families of xc functionals
integer, parameter :: &
XC_FAMILY_UNKNOWN = -1, &
XC_FAMILY_NONE = 0, &
XC_FAMILY_LDA = 1, &
XC_FAMILY_GGA = 2, &
XC_FAMILY_MGGA = 4, &
XC_FAMILY_LCA = 8, &
XC_FAMILY_OEP = 16, &
XC_FAMILY_HYB_GGA = 32, &
XC_FAMILY_HYB_MGGA = 64
integer, parameter :: &
XC_UNPOLARIZED = 1, & ! Spin unpolarized
XC_POLARIZED = 2 ! Spin polarized
integer, parameter :: &
XC_NON_RELATIVISTIC = 0, & ! Functional includes or not relativistic
XC_RELATIVISTIC = 1 ! corrections. Only available in some functionals.
! Kinds
integer, parameter :: &
XC_EXCHANGE = 0, &
XC_CORRELATION = 1, &
XC_EXCHANGE_CORRELATION = 2, &
XC_KINETIC = 3
integer, parameter :: &
XC_FLAGS_HAVE_EXC = 1, &
XC_FLAGS_HAVE_VXC = 2, &
XC_FLAGS_HAVE_FXC = 4, &
XC_FLAGS_HAVE_KXC = 8, &
XC_FLAGS_HAVE_LXC = 16, &
XC_FLAGS_1D = 32, &
XC_FLAGS_2D = 64, &
XC_FLAGS_3D = 128, &
XC_FLAGS_STABLE = 512, &
XC_FLAGS_DEVELOPMENT = 1024
! These are old names keep for compatibility, and that should disappear soon
integer, parameter :: XC_GGA_XC_LB = 160
integer, parameter :: XC_GGA_K_ABSR1 = 506
integer, parameter :: XC_GGA_K_ABSR2 = 507
!----------------------------------------------------------------
interface
subroutine xc_f90_version(major, minor)
integer, intent(out) :: major, minor
end subroutine xc_f90_version
end interface
!----------------------------------------------------------------
interface
integer function xc_f90_info_number(info)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: info
end function xc_f90_info_number
integer function xc_f90_info_kind(info)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: info
end function xc_f90_info_kind
subroutine xc_f90_info_name(info, s)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: info
character(len=*), intent(out) :: s
end subroutine xc_f90_info_name
integer function xc_f90_info_family(info)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: info
end function xc_f90_info_family
integer function xc_f90_info_flags(info)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: info
end function xc_f90_info_flags
subroutine xc_f90_info_refs(info, number, str, s)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: info
integer, intent(inout) :: number ! number of the reference. Must be 0 in the first call
type(xc_f90_pointer_t), intent(inout) :: str ! this will hold a (char **) pointer
character(len=*), intent(out) :: s ! the string that is output
end subroutine xc_f90_info_refs
subroutine xc_f90_functional_get_name(func_number, func_string)
integer, intent(in) :: func_number
character(len=256), intent(out) :: func_string
end subroutine xc_f90_functional_get_name
integer function xc_f90_functional_get_number(func_string)
character(len=*), intent(in) :: func_string
end function xc_f90_functional_get_number
integer function xc_f90_family_from_id(id)
use xc_f90_types_m
integer, intent(in) :: id
end function xc_f90_family_from_id
end interface
!----------------------------------------------------------------
interface
subroutine xc_f90_func_init(p, info, functional, nspin)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(out) :: p
type(xc_f90_pointer_t), intent(out) :: info
integer, intent(in) :: functional
integer, intent(in) :: nspin
end subroutine xc_f90_func_init
subroutine xc_f90_func_end(p)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(inout) :: p
end subroutine xc_f90_func_end
end interface
! LDAs
!----------------------------------------------------------------
interface
subroutine xc_f90_lda(p, np, rho, zk, vrho, fxc, kxc)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
integer, intent(in) :: np
real(xc_f90_kind), intent(in) :: rho ! rho(nspin) the density
real(xc_f90_kind), intent(out) :: zk ! the energy per unit particle
real(xc_f90_kind), intent(out) :: vrho ! v(nspin) the potential
real(xc_f90_kind), intent(out) :: fxc ! v(nspin,nspin) the xc kernel
real(xc_f90_kind), intent(out) :: kxc ! v(nspin,nspin,nspin) the derivative of xc kernel
end subroutine xc_f90_lda
subroutine xc_f90_lda_exc(p, np, rho, zk)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
integer, intent(in) :: np
real(xc_f90_kind), intent(in) :: rho ! rho(nspin) the density
real(xc_f90_kind), intent(out) :: zk ! the energy per unit particle
end subroutine xc_f90_lda_exc
subroutine xc_f90_lda_exc_vxc(p, np, rho, e, v)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
integer, intent(in) :: np
real(xc_f90_kind), intent(in) :: rho ! rho(nspin) the density
real(xc_f90_kind), intent(out) :: e ! the energy per unit particle
real(xc_f90_kind), intent(out) :: v ! v(nspin) the potential
end subroutine xc_f90_lda_exc_vxc
subroutine xc_f90_lda_vxc(p, np, rho, v)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
integer, intent(in) :: np
real(xc_f90_kind), intent(in) :: rho ! rho(nspin) the density
real(xc_f90_kind), intent(out) :: v ! v(nspin) the potential
end subroutine xc_f90_lda_vxc
subroutine xc_f90_lda_fxc(p, np, rho, fxc)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
integer, intent(in) :: np
real(xc_f90_kind), intent(in) :: rho ! rho(nspin) the density
real(xc_f90_kind), intent(out) :: fxc ! v(nspin,nspin) the xc kernel
end subroutine xc_f90_lda_fxc
subroutine xc_f90_lda_kxc(p, np, rho, kxc)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
integer, intent(in) :: np
real(xc_f90_kind), intent(in) :: rho ! rho(nspin) the density
real(xc_f90_kind), intent(out) :: kxc
end subroutine xc_f90_lda_kxc
end interface
interface
subroutine xc_f90_lda_x_1d_set_par(p, interaction, bb)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(inout) :: p
integer, intent(in) :: interaction
real(xc_f90_kind), intent(in) :: bb
end subroutine xc_f90_lda_x_1d_set_par
subroutine xc_f90_lda_c_xalpha_set_par(p, alpha)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(inout) :: p
real(xc_f90_kind), intent(in) :: alpha
end subroutine xc_f90_lda_c_xalpha_set_par
subroutine xc_f90_lda_x_set_par(p, alpha, relativistic, omega)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(inout) :: p
real(xc_f90_kind), intent(in) :: alpha ! of Xalpha, set to 4/3 to obtain standard LDA
integer, intent(in) :: relativistic
real(xc_f90_kind), intent(in) :: omega
end subroutine xc_f90_lda_x_set_par
subroutine xc_f90_lda_c_1d_csc_set_par(p, interaction, bb)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(inout) :: p
integer, intent(in) :: interaction
real(xc_f90_kind), intent(in) :: bb
end subroutine xc_f90_lda_c_1d_csc_set_par
subroutine xc_f90_lda_c_2d_prm_set_par(p, N)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(inout) :: p
real(xc_f90_kind), intent(in) :: N
end subroutine xc_f90_lda_c_2d_prm_set_par
end interface
! GGAs
!----------------------------------------------------------------
interface
subroutine xc_f90_gga(p, np, rho, sigma, zk, vrho, vsigma, &
v2rho2, v2rhosigma, v2sigma2, v3rho3, v3rho2sigma, v3rhosigma2, v3sigma3)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
integer, intent(in) :: np
real(xc_f90_kind), intent(in) :: rho
real(xc_f90_kind), intent(in) :: sigma
real(xc_f90_kind), intent(out) :: zk
real(xc_f90_kind), intent(out) :: vrho
real(xc_f90_kind), intent(out) :: vsigma
real(xc_f90_kind), intent(out) :: v2rho2
real(xc_f90_kind), intent(out) :: v2rhosigma
real(xc_f90_kind), intent(out) :: v2sigma2
real(xc_f90_kind), intent(out) :: v3rho3
real(xc_f90_kind), intent(out) :: v3rho2sigma
real(xc_f90_kind), intent(out) :: v3rhosigma2
real(xc_f90_kind), intent(out) :: v3sigma3
end subroutine xc_f90_gga
subroutine xc_f90_gga_exc(p, np, rho, sigma, zk)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
integer, intent(in) :: np
real(xc_f90_kind), intent(in) :: rho
real(xc_f90_kind), intent(in) :: sigma
real(xc_f90_kind), intent(out) :: zk
end subroutine xc_f90_gga_exc
subroutine xc_f90_gga_exc_vxc(p, np, rho, sigma, zk, vrho, vsigma)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
integer, intent(in) :: np
real(xc_f90_kind), intent(in) :: rho
real(xc_f90_kind), intent(in) :: sigma
real(xc_f90_kind), intent(out) :: zk
real(xc_f90_kind), intent(out) :: vrho
real(xc_f90_kind), intent(out) :: vsigma
end subroutine xc_f90_gga_exc_vxc
subroutine xc_f90_gga_vxc(p, np, rho, sigma, vrho, vsigma)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
integer, intent(in) :: np
real(xc_f90_kind), intent(in) :: rho
real(xc_f90_kind), intent(in) :: sigma
real(xc_f90_kind), intent(out) :: vrho
real(xc_f90_kind), intent(out) :: vsigma
end subroutine xc_f90_gga_vxc
subroutine xc_f90_gga_fxc(p, np, rho, sigma, v2rho2, v2rhosigma, v2sigma2)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
integer, intent(in) :: np
real(xc_f90_kind), intent(in) :: rho
real(xc_f90_kind), intent(in) :: sigma
real(xc_f90_kind), intent(out) :: v2rho2
real(xc_f90_kind), intent(out) :: v2rhosigma
real(xc_f90_kind), intent(out) :: v2sigma2
end subroutine xc_f90_gga_fxc
subroutine xc_f90_gga_kxc(p, np, rho, sigma, v3rho3, v3rho2sigma, v3rhosigma2, v3sigma3)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
integer, intent(in) :: np
real(xc_f90_kind), intent(in) :: rho
real(xc_f90_kind), intent(in) :: sigma
real(xc_f90_kind), intent(out) :: v3rho3
real(xc_f90_kind), intent(out) :: v3rho2sigma
real(xc_f90_kind), intent(out) :: v3rhosigma2
real(xc_f90_kind), intent(out) :: v3sigma3
end subroutine xc_f90_gga_kxc
end interface
!----------------------------------------------------------------
interface
subroutine xc_f90_gga_lb_set_par(p, modified, threshold, ip, qtot)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
integer, intent(in) :: modified ! should we use the modified version
real(xc_f90_kind), intent(in) :: threshold ! if so, the threshold to use the asymtotic version
real(xc_f90_kind), intent(in) :: ip ! ionization potential
real(xc_f90_kind), intent(in) :: qtot ! total charge
end subroutine xc_f90_gga_lb_set_par
end interface
!----------------------------------------------------------------
interface
subroutine xc_f90_gga_lb_modified(p, np, rho, grho, r, dedd)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
integer, intent(in) :: np
real(xc_f90_kind), intent(in) :: rho ! rho(nspin) the density
real(xc_f90_kind), intent(in) :: grho ! grho(3,nspin) the gradient of the density
real(xc_f90_kind), intent(in) :: r ! distance from center of finite system
real(xc_f90_kind), intent(out) :: dedd
end subroutine xc_f90_gga_lb_modified
end interface
!----------------------------------------------------------------
interface
subroutine xc_f90_gga_x_wpbeh_set_par(p, omega)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
real(xc_f90_kind), intent(in) :: omega ! range separation
end subroutine xc_f90_gga_x_wpbeh_set_par
end interface
!----------------------------------------------------------------
interface
subroutine xc_f90_gga_x_hjs_set_par(p, omega)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
real(xc_f90_kind), intent(in) :: omega ! range separation
end subroutine xc_f90_gga_x_hjs_set_par
end interface
!----------------------------------------------------------------
interface
subroutine xc_f90_gga_ak13_get_asymptotic(homo, asymp)
use xc_f90_types_m
real(xc_f90_kind), intent(in) :: homo
real(xc_f90_kind), intent(out) :: asymp
end subroutine xc_f90_gga_ak13_get_asymptotic
end interface
!----------------------------------------------------------------
interface
subroutine xc_f90_hyb_exx_coef(p, coef)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
real(xc_f90_kind), intent(out) :: coef
end subroutine xc_f90_hyb_exx_coef
subroutine xc_f90_hyb_cam_coef(p, omega, alpha, beta)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
real(xc_f90_kind), intent(out) :: omega, alpha, beta
end subroutine xc_f90_hyb_cam_coef
end interface
!----------------------------------------------------------------
interface
subroutine xc_f90_hyb_gga_xc_hse_set_par(p, beta, omega)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
real(xc_f90_kind), intent(in) :: beta ! mixing
real(xc_f90_kind), intent(in) :: omega ! range separation
end subroutine xc_f90_hyb_gga_xc_hse_set_par
subroutine xc_f90_hyb_gga_xc_pbeh_set_par(p, alpha)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
real(xc_f90_kind), intent(in) :: alpha ! mixing
end subroutine xc_f90_hyb_gga_xc_pbeh_set_par
end interface
! the meta-GGAs
!----------------------------------------------------------------
interface
subroutine xc_f90_mgga(p, np, rho, sigma, lapl, tau, zk, vrho, vsigma, vlapl, vtau, &
v2rho2, v2sigma2, v2lapl2, v2tau2, v2rhosigma, v2rholapl, v2rhotau, &
v2sigmalapl, v2sigmatau, v2lapltau)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
integer, intent(in) :: np
real(xc_f90_kind), intent(in) :: rho
real(xc_f90_kind), intent(in) :: sigma
real(xc_f90_kind), intent(in) :: lapl
real(xc_f90_kind), intent(in) :: tau
real(xc_f90_kind), intent(out) :: zk
real(xc_f90_kind), intent(out) :: vrho
real(xc_f90_kind), intent(out) :: vsigma
real(xc_f90_kind), intent(out) :: vlapl
real(xc_f90_kind), intent(out) :: vtau
real(xc_f90_kind), intent(out) :: v2rho2
real(xc_f90_kind), intent(out) :: v2sigma2
real(xc_f90_kind), intent(out) :: v2lapl2
real(xc_f90_kind), intent(out) :: v2tau2
real(xc_f90_kind), intent(out) :: v2rhosigma
real(xc_f90_kind), intent(out) :: v2rholapl
real(xc_f90_kind), intent(out) :: v2rhotau
real(xc_f90_kind), intent(out) :: v2sigmalapl
real(xc_f90_kind), intent(out) :: v2sigmatau
real(xc_f90_kind), intent(out) :: v2lapltau
end subroutine xc_f90_mgga
subroutine xc_f90_mgga_exc(p, np, rho, sigma, lapl, tau, zk)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
integer, intent(in) :: np
real(xc_f90_kind), intent(in) :: rho
real(xc_f90_kind), intent(in) :: sigma
real(xc_f90_kind), intent(in) :: lapl
real(xc_f90_kind), intent(in) :: tau
real(xc_f90_kind), intent(out) :: zk
end subroutine xc_f90_mgga_exc
subroutine xc_f90_mgga_exc_vxc(p, np, rho, sigma, lapl, tau, zk, vrho, vsigma, vlapl, vtau)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
integer, intent(in) :: np
real(xc_f90_kind), intent(in) :: rho
real(xc_f90_kind), intent(in) :: sigma
real(xc_f90_kind), intent(in) :: lapl
real(xc_f90_kind), intent(in) :: tau
real(xc_f90_kind), intent(out) :: zk
real(xc_f90_kind), intent(out) :: vrho
real(xc_f90_kind), intent(out) :: vsigma
real(xc_f90_kind), intent(out) :: vlapl
real(xc_f90_kind), intent(out) :: vtau
end subroutine xc_f90_mgga_exc_vxc
subroutine xc_f90_mgga_vxc(p, np, rho, sigma, lapl, tau, vrho, vsigma, vlapl, vtau)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
integer, intent(in) :: np
real(xc_f90_kind), intent(in) :: rho
real(xc_f90_kind), intent(in) :: sigma
real(xc_f90_kind), intent(in) :: lapl
real(xc_f90_kind), intent(in) :: tau
real(xc_f90_kind), intent(out) :: vrho
real(xc_f90_kind), intent(out) :: vsigma
real(xc_f90_kind), intent(out) :: vlapl
real(xc_f90_kind), intent(out) :: vtau
end subroutine xc_f90_mgga_vxc
subroutine xc_f90_mgga_fxc(p, np, rho, sigma, lapl, tau, &
v2rho2, v2sigma2, v2lapl2, v2tau2, v2rhosigma, v2rholapl, v2rhotau, &
v2sigmalapl, v2sigmatau, v2lapltau)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(in) :: p
integer, intent(in) :: np
real(xc_f90_kind), intent(in) :: rho
real(xc_f90_kind), intent(in) :: sigma
real(xc_f90_kind), intent(in) :: lapl
real(xc_f90_kind), intent(in) :: tau
real(xc_f90_kind), intent(out) :: v2rho2
real(xc_f90_kind), intent(out) :: v2sigma2
real(xc_f90_kind), intent(out) :: v2lapl2
real(xc_f90_kind), intent(out) :: v2tau2
real(xc_f90_kind), intent(out) :: v2rhosigma
real(xc_f90_kind), intent(out) :: v2rholapl
real(xc_f90_kind), intent(out) :: v2rhotau
real(xc_f90_kind), intent(out) :: v2sigmalapl
real(xc_f90_kind), intent(out) :: v2sigmatau
real(xc_f90_kind), intent(out) :: v2lapltau
end subroutine xc_f90_mgga_fxc
end interface
interface
subroutine xc_f90_mgga_x_tb09_set_par(p, cc)
use xc_f90_types_m
type(xc_f90_pointer_t), intent(inout) :: p
real(xc_f90_kind), intent(in) :: cc
end subroutine xc_f90_mgga_x_tb09_set_par
end interface
end module xc_f90_lib_m
!! Local Variables:
!! mode: f90
!! coding: utf-8
!! End:
|
{"hexsha": "30cefb7279f6fe574ec2a20f301d91056dd4148b", "size": 19638, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "libxc-2.2.0/src/libxc.f90", "max_stars_repo_name": "rdietric/lsms", "max_stars_repo_head_hexsha": "8d0d5f01186abf9a1cc54db3f97f9934b422cf92", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2018-04-03T15:35:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T03:19:23.000Z", "max_issues_repo_path": "libxc-2.2.0/src/libxc.f90", "max_issues_repo_name": "rdietric/lsms", "max_issues_repo_head_hexsha": "8d0d5f01186abf9a1cc54db3f97f9934b422cf92", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2019-07-30T13:59:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T17:43:35.000Z", "max_forks_repo_path": "libxc-2.2.0/src/libxc.f90", "max_forks_repo_name": "rdietric/lsms", "max_forks_repo_head_hexsha": "8d0d5f01186abf9a1cc54db3f97f9934b422cf92", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2018-06-30T00:30:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T09:14:29.000Z", "avg_line_length": 36.6380597015, "max_line_length": 100, "alphanum_fraction": 0.6444138914, "num_tokens": 5969}
|
# © 2021 Nokia
#
# Licensed under the BSD 3 Clause license
# SPDX-License-Identifier: BSD-3-Clause
import subprocess
import numpy as np
def gpuCount():
query = [
'nvidia-smi',
'--list-gpus'
]
res = subprocess.run(query, stdout=subprocess.PIPE).stdout.decode('ascii')[0:-1]
# Count GPUs
res = res.split("\n")
return(len(res))
def gpuQuery(gpu_id = 0):
query = [
'nvidia-smi',
'--query-gpu=gpu_bus_id,temperature.gpu,utilization.gpu,memory.used,clocks.current.sm',
'--format=csv,noheader,nounits',
f'--id={gpu_id}'
]
res = subprocess.run(query, stdout=subprocess.PIPE).stdout.decode('ascii')[0:-1]
# Create dict
res = res.split(", ")
return({
"bus_id" : res[0],
"temp" : res[1],
"util" : res[2],
"mem" : res[3],
"clk" : res[4]
})
def gpuAssign(overwrite = None, verbose = True):
gpu_count = gpuCount()
gpu_mems = np.array([int(gpuQuery(gpu_id)['mem']) for gpu_id in range(gpu_count)])
gpu_temps = np.array([int(gpuQuery(gpu_id)['temp']) for gpu_id in range(gpu_count)])
if overwrite is not None:
gpu_id = overwrite
else:
if(np.all(np.less(gpu_mems, 2000))):
if(np.all(np.less(gpu_temps, 40))):
gpu_id = np.random.randint(gpu_count)
else:
gpu_id = np.argmin(gpu_temps)
else:
gpu_id = np.argmin(gpu_mems)
if verbose:
if overwrite is not None:
print(f'Assigned task to gpu_id: {gpu_id} (OVERWRITE!)')
else:
print(f'Assigned task to gpu_id: {gpu_id}')
return(gpu_id)
|
{"hexsha": "d1278720a9b2cf7634d0e16c7e1abcc4240843e4", "size": 1695, "ext": "py", "lang": "Python", "max_stars_repo_path": "common/utils/gpu.py", "max_stars_repo_name": "nokia/integratedimputation", "max_stars_repo_head_hexsha": "ca72bda54cb66e99d79ff0b174cf8f99ccb554ba", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-01-13T13:05:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T10:06:58.000Z", "max_issues_repo_path": "common/utils/gpu.py", "max_issues_repo_name": "nokia/integratedimputation", "max_issues_repo_head_hexsha": "ca72bda54cb66e99d79ff0b174cf8f99ccb554ba", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "common/utils/gpu.py", "max_forks_repo_name": "nokia/integratedimputation", "max_forks_repo_head_hexsha": "ca72bda54cb66e99d79ff0b174cf8f99ccb554ba", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.484375, "max_line_length": 95, "alphanum_fraction": 0.5646017699, "include": true, "reason": "import numpy", "num_tokens": 467}
|
"""
varN(B::LogBinner[, lvl])
Calculates the variance/N of a given level in the Binning Analysis.
"""
function varN(B::LogBinner, lvl::Integer = _reliable_level(B))
n = B.count[lvl]
var(B, lvl) / n
end
"""
var(B::LogBinner[, lvl])
Calculates the variance of a given level in the Binning Analysis.
"""
function var(B::LogBinner) end
function var(
B::LogBinner{T,N},
lvl::Integer = _reliable_level(B)
) where {N, T <: Real}
n = B.count[lvl]
X = B.x_sum[lvl]
X2 = B.x2_sum[lvl]
# lvl = 1 <=> original values
# correct variance:
# (∑ xᵢ^2) / (N-1) - (∑ xᵢ)(∑ xᵢ) / (N(N-1))
X2 / (n - 1) - X^2 / (n*(n - 1))
end
function var(
B::LogBinner{T,N},
lvl::Integer = _reliable_level(B)
) where {N, T <: Complex}
n = B.count[lvl]
X = B.x_sum[lvl]
X2 = B.x2_sum[lvl]
# lvl = 1 <=> original values
(real(X2) + imag(X2)) / (n - 1) - (real(X)^2 + imag(X)^2) / (n*(n - 1))
end
function var(
B::LogBinner{<: AbstractArray{T, D}, N},
lvl::Integer = _reliable_level(B)
) where {N, D, T <: Real}
n = B.count[lvl]
X = B.x_sum[lvl]
X2 = B.x2_sum[lvl]
@. X2 / (n - 1) - X^2 / (n*(n - 1))
end
function var(
B::LogBinner{<: AbstractArray{T, D}, N},
lvl::Integer = _reliable_level(B)
) where {N, D, T <: Complex}
n = B.count[lvl]
X = B.x_sum[lvl]
X2 = B.x2_sum[lvl]
@. (real(X2) + imag(X2)) / (n - 1) - (real(X)^2 + imag(X)^2) / (n*(n - 1))
end
"""
all_vars(B::LogBinner)
Calculates the variance for each level of the Binning Analysis.
"""
function all_vars(B::LogBinner{T,N}) where {T,N}
[var(B, lvl) for lvl in 1:N if B.count[lvl] > 1]
end
"""
all_varNs(B::LogBinner)
Calculates the variance/N for each level of the Binning Analysis.
"""
function all_varNs(B::LogBinner{T,N}) where {T,N}
[varN(B, lvl) for lvl in 1:N if B.count[lvl] > 1]
end
################################################################################
# NOTE works for all
"""
mean(B::LogBinner[, lvl])
Calculates the mean for a given level in the Binning Analysis.
"""
function mean(B::LogBinner, lvl::Integer = 1)
B.x_sum[lvl] / B.count[lvl]
end
# NOTE works for all
"""
all_means(B::LogBinner)
Calculates the mean for each level of the `LogBinner`.
"""
function all_means(B::LogBinner{T,N}) where {T,N}
[mean(B, lvl) for lvl in 1:N if B.count[lvl] > 1]
end
################################################################################
"""
tau(B::LogBinner[, lvl])
Calculates the autocorrelation time tau.
"""
function tau(B::LogBinner{T,N}, lvl::Integer = _reliable_level(B)) where {N , T <: Number}
var_0 = varN(B, 1)
var_l = varN(B, lvl)
0.5 * (var_l / var_0 - 1)
end
function tau(B::LogBinner{T,N}, lvl::Integer = _reliable_level(B)) where {N , T <: AbstractArray}
var_0 = varN(B, 1)
var_l = varN(B, lvl)
@. 0.5 * (var_l / var_0 - 1)
end
"""
all_taus(B::LogBinner)
Calculates the autocorrelation time tau for each level of the `LogBinner`.
"""
function all_taus(B::LogBinner{T,N}) where {T,N}
[tau(B, lvl) for lvl in 1:N if B.count[lvl] > 1]
end
################################################################################
# Heuristic for selecting the level with the (presumably) most reliable
# standard error estimate:
# Take the highest lvl with at least 32 bins.
# (Chose 32 based on https://doi.org/10.1119/1.3247985)
function _reliable_level(B::LogBinner{T,N})::Int64 where {T,N}
isempty(B) && (return 1) # results in NaN in std_error
i = findlast(x -> x >= 32, B.count)
something(i, 1)
end
"""
std_error(B::LogBinner[, lvl])
Calculates the standard error of the mean.
"""
function std_error(B::LogBinner) end
function std_error(B::LogBinner{T,N}, lvl::Integer=_reliable_level(B)) where {N, T <: Number}
sqrt(varN(B, lvl))
end
function std_error(B::LogBinner{T,N}, lvl::Integer=_reliable_level(B)) where {N, T <: AbstractArray}
sqrt.(varN(B, lvl))
end
"""
all_std_errors(B::LogBinner)
Calculates the standard error for each level of the Binning Analysis.
"""
function all_std_errors(B::LogBinner) end
all_std_errors(B::LogBinner{T,N}) where {N, T <: Number} = sqrt.(all_varNs(B))
all_std_errors(B::LogBinner{T,N}) where {N, T <: AbstractArray} = (x -> sqrt.(x)).(all_varNs(B))
"""
convergence(B::LogBinner, lvl)
Computes the difference between the variance of this lvl and the last,
normalized to the last lvl. If this value tends to 0, the Binning Analysis has
converged.
"""
function convergence(B::LogBinner) end
function convergence(B::LogBinner{T,N}, lvl::Integer=_reliable_level(B)) where {N, T <: Number}
abs((varN(B, lvl+1) - varN(B, lvl)) / varN(B, lvl))
end
function convergence(B::LogBinner{T,N}, lvl::Integer=_reliable_level(B)) where {N, T <: AbstractArray}
mean(abs.((varN(B, lvl+1) .- varN(B, lvl)) ./ varN(B, lvl)))
end
"""
has_converged(B::LogBinner, lvl[, threshhold = 0.05])
Returns true if the Binning Analysis has converged for a given lvl.
"""
function has_converged(B::LogBinner, lvl::Integer=_reliable_level(B), threshhold::Float64 = 0.05)
convergence(B, lvl) <= threshhold
end
|
{"hexsha": "05b20af320f553d5a0d6594928488da6d487df5a", "size": 5231, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/log/statistics.jl", "max_stars_repo_name": "UnofficialJuliaMirror/BinningAnalysis.jl-b7192094-8e58-5052-a244-180a858778ee", "max_stars_repo_head_hexsha": "9eb6cf6ae6e623d76a778cc72311fb058cc75196", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/log/statistics.jl", "max_issues_repo_name": "UnofficialJuliaMirror/BinningAnalysis.jl-b7192094-8e58-5052-a244-180a858778ee", "max_issues_repo_head_hexsha": "9eb6cf6ae6e623d76a778cc72311fb058cc75196", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/log/statistics.jl", "max_forks_repo_name": "UnofficialJuliaMirror/BinningAnalysis.jl-b7192094-8e58-5052-a244-180a858778ee", "max_forks_repo_head_hexsha": "9eb6cf6ae6e623d76a778cc72311fb058cc75196", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.7914691943, "max_line_length": 102, "alphanum_fraction": 0.5964442745, "num_tokens": 1657}
|
module Sessions
using DataFrames, NaturalSort, DataStructures, HDF5
export PROCESSED_DATA_DIR, REGIONS, REGION_LABELS, Session, ClusterMetadata, sessionnames, readsession, patient, siteregion
const RAW_DATA_DIR = joinpath(dirname(@__FILE__), "..", "raw")
const PROCESSED_DATA_DIR = joinpath(dirname(@__FILE__), "..", "processed")
const REGIONS = OrderedDict(
:H => r"^..H$",
:EC => r"^.EC",
:A => r"^.A$",
:PHG => r"^.(?:PHG|PHC|PG.?)$",
:MTL => r"^.(?:.H|EC|A|PHG|PHC|PG.?)$",
:All => r""
)
const REGION_LABELS = OrderedDict(
:H => "Hippocampus",
:EC => "Entorhinal Cortex",
:A => "Amygdala",
:PHG => "Parahippocampal Cortex",
:MTL => "Medial Temporal Lobe",
:All => "All"
)
type Session
folder::String
trials::DataFrame
spikes::Vector{Vector{Vector{Float64}}}
unit_type::Vector{Vector{Symbol}}
sites::Vector{String}
stimuli::Vector{String}
end
sessionnames() = sort!([x[1:rsearch(x, '.')-1] for x in readdir(RAW_DATA_DIR) if endswith(x, ".h5")], lt=natural)
splitcols(x) = [x[:, i] for i = 1:size(x, 2)]
function readsession(sname)::Session
h5open(joinpath(RAW_DATA_DIR, "$sname.h5")) do f
ftrials = f["trials"]
stim = read(ftrials, "stimuli")
nstim = maximum(stim)
trials = DataFrame(correct=read(ftrials, "correct") .== 1,
isi=read(ftrials, "isi"),
probe1=read(ftrials, "probe1"),
probe2=read(ftrials, "probe2"),
stimuli=splitcols(stim),
onsets=splitcols(read(ftrials, "onsets")),
offsets=splitcols(read(ftrials, "offsets")),
maint=read(ftrials, "maint"),
probes=read(ftrials, "probes"))
close(ftrials)
fchannels = f["channels"]
sites = read(f, "sites")
spikes = Vector{Vector{Vector{Float64}}}(length(sites))
unit_type = Vector{Vector{Symbol}}(length(sites))
for i = 1:length(sites)
st = "ch$(i)_spike_times"
if exists(fchannels, st)
spikes[i] = read(fchannels, st)
unit_type[i] = map(Symbol, read(fchannels, "ch$(i)_unit_types"))
end
end
# Not included in data files for reasons of confidentiality
stimuli = ["Stimulus $i" for i in 1:nstim]
Session(sname, trials, spikes, unit_type, sites, stimuli)
end
end
immutable ClusterMetadata
session::String
site::String
channel::Int
cluster::Int
unit_type::Symbol
end
ClusterMetadata(s::Session, ch::Int, cluster::Int) =
ClusterMetadata(s.folder, s.sites[ch], ch, cluster, s.unit_type[ch][cluster])
Base.:(==)(x::ClusterMetadata, y::ClusterMetadata) =
x.cluster == y.cluster && x.channel == y.channel && x.session == y.session
Base.hash(x::ClusterMetadata) = hash(x.session, hash(x.site, hash(x.channel, hash(x.cluster))))
Base.isless(x::ClusterMetadata, y::ClusterMetadata) =
x.session != y.session ? natural(x.session, y.session) : x.channel != y.channel ? isless(x.channel, y.channel) :
isless(x.cluster, y.cluster)
Base.show(io::IO, m::ClusterMetadata) =
print(io, m.session, ' ', m.site, " ch", m.channel, " clu", m.cluster, ' ', m.unit_type)
function siteregion(m::ClusterMetadata)
for (k, v) in REGIONS
(k == :All || k == :MTL) && continue
ismatch(v, m.site) && return k
end
ismatch(REGIONS[:MTL], m.site) && return :MTL
:All
end
end
|
{"hexsha": "88f8523031e449051baab996f4969161f720cab0", "size": 3540, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Sessions.jl", "max_stars_repo_name": "simonster/Persistent-Single-Neuron-Activity-during-Working-Memory-in-the-Human-Medial-Temporal-Lobe", "max_stars_repo_head_hexsha": "4b46fef287b3d849695ec93d69c1f82075bf7ed8", "max_stars_repo_licenses": ["MIT", "Unlicense"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2017-03-29T01:37:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-16T21:17:05.000Z", "max_issues_repo_path": "src/Sessions.jl", "max_issues_repo_name": "simonster/Persistent-Single-Neuron-Activity-during-Working-Memory-in-the-Human-Medial-Temporal-Lobe", "max_issues_repo_head_hexsha": "4b46fef287b3d849695ec93d69c1f82075bf7ed8", "max_issues_repo_licenses": ["MIT", "Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Sessions.jl", "max_forks_repo_name": "simonster/Persistent-Single-Neuron-Activity-during-Working-Memory-in-the-Human-Medial-Temporal-Lobe", "max_forks_repo_head_hexsha": "4b46fef287b3d849695ec93d69c1f82075bf7ed8", "max_forks_repo_licenses": ["MIT", "Unlicense"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-10-02T14:52:41.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-02T14:52:41.000Z", "avg_line_length": 35.4, "max_line_length": 123, "alphanum_fraction": 0.590960452, "num_tokens": 1002}
|
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from horsetailmatching import UncertainParameter, HorsetailMatching
from horsetailmatching import UniformParameter, IntervalParameter
from horsetailmatching.demoproblems import TP2
def main():
u1 = IntervalParameter(lower_bound=-1, upper_bound=1)
u2 = UniformParameter(lower_bound=-1, upper_bound=1)
def my_target(h):
return 1
theHM = HorsetailMatching(TP2, [u1, u2],
ftarget=(my_target, my_target),
samples_prob=500, samples_int=50)
print(theHM.evalMetric([2,3]))
upper, lower, CDFs = theHM.getHorsetail()
for CDF in CDFs:
plt.plot(CDF[0], CDF[1], 'grey', lw=0.5)
plt.plot(upper[0], upper[1], 'b')
plt.plot(lower[0], lower[1], 'b', label='Initial Horsetail Plot')
solution = minimize(theHM.evalMetric, x0=[1,1], method='Nelder-Mead')
print(solution)
upper, lower, CDFs = theHM.getHorsetail()
for CDF in CDFs:
plt.plot(CDF[0], CDF[1], 'grey', lw=0.5)
plt.plot(upper[0], upper[1], 'r')
plt.plot(lower[0], lower[1], 'r', label='Optimum Horsetail Plot')
plt.plot([theHM.ftarget[0](y) for y in lower[1]], lower[1], 'k--',
label='Target')
plt.xlim([-3, 15])
plt.legend(loc='upper left')
plt.show()
if __name__ == "__main__":
main()
|
{"hexsha": "dca083cb9d6ec48804deec1ec7a114f49c649012", "size": 1378, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/mixed_uncertainties.py", "max_stars_repo_name": "lwcook/horsetail-matching", "max_stars_repo_head_hexsha": "f3d5f8d01249debbca978f412ce4eae017458119", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-05-17T17:07:08.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-29T12:42:36.000Z", "max_issues_repo_path": "examples/mixed_uncertainties.py", "max_issues_repo_name": "lwcook/horsetail-matching", "max_issues_repo_head_hexsha": "f3d5f8d01249debbca978f412ce4eae017458119", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/mixed_uncertainties.py", "max_forks_repo_name": "lwcook/horsetail-matching", "max_forks_repo_head_hexsha": "f3d5f8d01249debbca978f412ce4eae017458119", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6222222222, "max_line_length": 73, "alphanum_fraction": 0.6349782293, "include": true, "reason": "from scipy", "num_tokens": 410}
|
import pydevd
pydevd.settrace('localhost', port=51234, stdoutToServer=True, stderrToServer=True)
import os
import sys
import plyvel
import numpy
import matplotlib.pyplot as plt
import numpy as np
import leveldb
# First compile the Datum, protobuf so that we can load using protobuf
# This will create datum_pb2.py
os.system('protoc -I={0} --python_out={1} {0}datum.proto'.format("./", "./"))
sys.path.append('/home/karn_s/caffe/python')
import caffe
from caffe.proto import caffe_pb2
import datum_pb2
LMDB_PATH = "TIWaffer_Features_new1/features/"
def vis_square(data, padsize=1, padval=0):
data -= data.min()
data /= data.max()
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
plt.imshow(data)
def vis_db(db_dir):
h = leveldb.LevelDB(db_dir)
datum = caffe_pb2.Datum()
for key_val , ser_str in h.RangeIter():
datum.ParseFromString(ser_str)
rows = datum.height;
cols = datum.width;
img_pre = np.fromstring(datum.data, dtype=np.float64)
img = img_pre.reshape(rows, cols)
print "\nKey val: ", key_val
print "Image: ", img
if __name__ == '__main__':
vis_db(LMDB_PATH)
db = plyvel.DB(LMDB_PATH)
visualize = True
datum = datum_pb2.Datum()
for key, value in db:
datum.ParseFromString(value)
# Read the datum.data
rows = datum.height
cols = datum.width
img_pre = np.fromstring(datum.data, dtype=np.float64)
img = img_pre.reshape(rows, cols)
img_data = numpy.array(bytearray(datum.float_data))\
.reshape(datum.channels, datum.height, datum.width)
if visualize:
plt.imshow(img_data.transpose([1,2,0]))
plt.show()
#filters = np.array(bytearray(datum.data)).reshape(datum.channels, datum.height, datum.width)
#vis_square(filters.transpose(0, 2, 3, 1))
print key
|
{"hexsha": "c6c6928b89f50b1d25ae9956a3d6b9dd91b20a72", "size": 2343, "ext": "py", "lang": "Python", "max_stars_repo_path": "distanceMetricLearning/leveldbToNP.py", "max_stars_repo_name": "KareemYousrii/2015-DL-TIWafer", "max_stars_repo_head_hexsha": "5aa7d2ecfcfd3da95811a0a49c855ba1bcb3f034", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "distanceMetricLearning/leveldbToNP.py", "max_issues_repo_name": "KareemYousrii/2015-DL-TIWafer", "max_issues_repo_head_hexsha": "5aa7d2ecfcfd3da95811a0a49c855ba1bcb3f034", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "distanceMetricLearning/leveldbToNP.py", "max_forks_repo_name": "KareemYousrii/2015-DL-TIWafer", "max_forks_repo_head_hexsha": "5aa7d2ecfcfd3da95811a0a49c855ba1bcb3f034", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8289473684, "max_line_length": 105, "alphanum_fraction": 0.6436192915, "include": true, "reason": "import numpy", "num_tokens": 644}
|
"""
File name: extracted_features_gridsearch.py
Author: Esra Zihni
Date created: 29.04.2019
"""
import numpy as np
import sys
import os
import yaml
import pickle
import pandas as pd
import pandas.core.indexes
sys.modules['pandas.indexes'] = pandas.core.indexes
import json
import time
import keras
import tensorflow as tf
from keras.models import load_model,Sequential, Model
from keras.layers import Dense, Dropout, Input, concatenate
from keras.callbacks import EarlyStopping
from keras.backend.tensorflow_backend import set_session
from sklearn.model_selection import ParameterGrid
from sklearn.metrics import roc_auc_score
from helper import dataset, model
from imaging_predictive_models import imaging_dataset
from clinical_predictive_models import clinical_dataset, MLP
from multimodal_prediction_helper import multimodal_dataset
from plotting_helper import plot_evolution
#### ENVIRONMENT AND SESSION SET UP ####################################################################
# set the environment variable
os.environ["KERAS_BACKEND"] = "tensorflow"
# Silence INFO logs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
# create a configuration protocol
config = tf.ConfigProto()
# set the allow_growth option to true in the protocol
config.gpu_options.allow_growth = True
# define GPU to use
config.gpu_options.visible_device_list = "0,1"
# start a sesstion that uses the configuration protocol
set_session(tf.Session(config=config))
#### READ CONFIGURATION FILE ##########
def join(loader,node):
seq = loader.construct_sequence(node)
return ''.join(str(i) for i in seq)
yaml.add_constructor('!join',join)
cfg = yaml.load(open('config.yml', 'r'))
#### ASSIGN PATHS AND VARIABLES #########################################################################
dataset_name = cfg['dataset name']
data_path = 'data/'
clin_feat_splits_path = data_path+ cfg['clinical dataset']['feature splits path']
img_feat_splits_path = data_path + cfg['imaging dataset']['feature splits path']
num_splits = cfg['number of runs']
model_name = cfg['model name']
def_params = cfg['hyperparameters'][model_name]
tuning_params = cfg['tuning parameters'][model_name]
performance_scores = cfg['final performance measures']
save_models = cfg['save options']['models path']
save_params = cfg['save options']['params path']
save_scores = cfg['save options']['scores path']
save_figures = cfg['save options']['figures path']
##### GET TRAINING,VALIDATION AND TEST DATA ##############################################################
data = multimodal_dataset(dataset_name)
data.load_feature_sets(img_feat_splits_path, clin_feat_splits_path)
#feature_sets = data.combine_features(combining_method = 'concat_and_normalize')
##### TRAIN AND SAVE MODELS #################################################################################
for i in range(num_splits):
#### ASSIGN TRAINING, TEST AND VALIDATION SETS FOR CURRENT SPLIT ##########################################
current_split_num = i+1
img_X_tr = data.img_sets[i]['train_data']
img_X_val = data.img_sets[i]['val_data']
img_X_te = data.img_sets[i]['test_data']
clin_X_tr = data.clin_sets[i]['train_data']
clin_X_val = data.clin_sets[i]['val_data']
clin_X_te = data.clin_sets[i]['test_data']
y_tr = data.img_sets[i]['train_labels']
y_val = data.img_sets[i]['val_labels']
y_te = data.img_sets[i]['test_labels']
if def_params['out_activation'] == 'softmax':
y_tr = pd.get_dummies(y_tr)
y_val = pd.get_dummies(y_val)
y_te = pd.get_dummies(y_te)
model_path = save_models + '/best_model_on_outer_training_set_split_'+str(current_split_num)+'.h5'
#params_path = save_params + '/best_parameters_run_'+str(current_split_num)+'.json'
tune_params_path = save_params + '/best_tuning_parameters_split_'+str(current_split_num)+'.json'
if os.path.isfile(model_path):
pass
else:
if not os.path.exists(save_models):
os.makedirs(save_models)
#### START GRID SEARCH #####################################################################################
start = time.time()
best_AUC = 0.5
i = 0
for tune in ParameterGrid(tuning_params):
img_input = Input(shape= (img_X_tr.shape[1],), name='image_input')
clin_input = Input(shape= (clin_X_tr.shape[1],), name='clinical_input')
dense1 = Dense(tune['num_neurons_embedding'][0], kernel_initializer = def_params['weight_init'], activation = def_params['hidden_activation'],
kernel_regularizer= keras.regularizers.l2(tune['l2_ratio']))(clin_input)
dense2 = Dense(tune['num_neurons_embedding'][1], kernel_initializer = def_params['weight_init'], activation = def_params['hidden_activation'],
kernel_regularizer= keras.regularizers.l2(tune['l2_ratio']))(img_input)
x = concatenate([dense1, dense2])
x = Dense(tune['num_neurons_final'], kernel_initializer = def_params['weight_init'], activation = def_params['hidden_activation'],
kernel_regularizer= keras.regularizers.l2(tune['l2_ratio']))(x)
x= Dropout(tune['dropout_rate'])(x)
if def_params['out_activation'] == 'softmax':
output = Dense(2,kernel_initializer = def_params['weight_init'],activation= def_params['out_activation'],
kernel_regularizer= keras.regularizers.l2(tune['l2_ratio']))(x)
else:
output = Dense(1,kernel_initializer = def_params['weight_init'],activation= def_params['out_activation'],
kernel_regularizer= keras.regularizers.l2(tune['l2_ratio']))(x)
optimizer = keras.optimizers.Adam(lr = tune['learning_rate'])
model = Model(inputs=[img_input, clin_input], outputs=[output])
model.compile(loss=def_params['loss_func'], optimizer = optimizer)
e_stop = EarlyStopping(monitor = 'val_loss', min_delta = def_params['min_delta'], patience = def_params['iter_patience'], mode='auto')
callbacks = [e_stop]
history = model.fit({'image_input' : img_X_tr,'clinical_input' : clin_X_tr}, y_tr, callbacks = callbacks,validation_data= ([img_X_val, clin_X_val],y_val),
epochs=def_params['epochs'], batch_size= tune['batch_size'], verbose=0)
probs_val = model.predict([img_X_val,clin_X_val],batch_size = 8)
score_val = roc_auc_score(y_val, probs_val)
i +=1
if i%10 == 0:
print(i)
if score_val > best_AUC:
best_AUC = score_val
best_params = tune
loss_tr = history.history['loss']
loss_val = history.history['val_loss']
model.save(save_models + '/best_model_on_inner_training_set_split_'+str(current_split_num)+'.h5')
keras.backend.clear_session()
best_model = load_model(save_models + '/best_model_on_inner_training_set_split_'+str(current_split_num)+'.h5')
probs_tr = best_model.predict([img_X_tr,clin_X_tr],batch_size = 8)
probs_val = best_model.predict([img_X_val,clin_X_val],batch_size = 8)
probs_te = best_model.predict([img_X_te,clin_X_te],batch_size = 8)
score_tr = roc_auc_score(y_tr, probs_tr)
score_val = roc_auc_score(y_val, probs_val)
score_te = roc_auc_score(y_te, probs_te)
# Save tuning parameters that resulted in the best model:
if not os.path.exists(save_params):
os.makedirs(save_params)
json.dump(best_params,open(tune_params_path,'w'))
# Save loss and auc scores calculated at each epoch during training:
if not os.path.exists(save_scores):
os.makedirs(save_scores)
np.savetxt(save_scores+'/inner_loop_loss_over_epochs_split_'+str(current_split_num)+'.csv', [loss_tr,loss_val], delimiter=",")
np.savetxt(save_scores+ "/inner_loop_auc_scores_split_"+str(current_split_num)+".csv", [score_tr, score_val, score_te], delimiter=",")
end = time.time()
print('Training time for split %s: %i minutes.'%(str(current_split_num),np.floor(((end-start)%3600)/60)))
|
{"hexsha": "994c12e38f2a4f62bbd27c7c76592e6520b353ab", "size": 7656, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/extracted_features_gridsearch.py", "max_stars_repo_name": "prediction2020/multimodal-classification", "max_stars_repo_head_hexsha": "0805d5b48b640e89ab942c5e44be22e2315a8079", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-11-28T12:03:58.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-01T11:55:37.000Z", "max_issues_repo_path": "code/extracted_features_gridsearch.py", "max_issues_repo_name": "Alabenba/multimodal-classification-1", "max_issues_repo_head_hexsha": "15dad8385b2e7986e09e72ba62d6303be63b68f3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/extracted_features_gridsearch.py", "max_forks_repo_name": "Alabenba/multimodal-classification-1", "max_forks_repo_head_hexsha": "15dad8385b2e7986e09e72ba62d6303be63b68f3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-12-17T13:35:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-21T05:42:18.000Z", "avg_line_length": 38.8629441624, "max_line_length": 158, "alphanum_fraction": 0.7088557994, "include": true, "reason": "import numpy", "num_tokens": 1828}
|
"""
Author: michealowen
Last edited: 2019.9.20,Friday
DBSCAN聚类,使用sklearn生成数据集
"""
#encoding=UTF-8
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cluster import DBSCAN
from queue import Queue
from time import time
from scipy.spatial import KDTree
def genData():
'''
使用sklear生成数据集
'''
x1,y1 = datasets.make_moons(n_samples=1000,shuffle=True,noise=0.05,random_state=None)
x2,y2 = datasets.make_blobs(n_samples=1000,n_features=2,centers=[[2,2]],cluster_std=[[0.1]],random_state=5)
x = np.concatenate((x1,x2))
y = np.concatenate((y1,y2))
return x,y
def show(cluster,data):
'''
使用plt绘制数据
'''
color = np.array(['r','c','m','b','y','k','g'])
for i in range(len(cluster)):
plt.scatter(data[cluster[i]][:,0],data[cluster[i]][:,1],s=8,c=color[i])
for i in range(len(data)):
flag = False
for j in range(len(cluster)):
if i in cluster[j]:
flag = True
break
if flag == False:
plt.plot(data[i,0],data[i,1],'ok')
plt.show()
def Eucl_dis(x,y):
'''
计算两个点的欧几里得距离
'''
return np.sqrt(np.sum((x-y)**2))
def getDistMatrix(data):
'''
计算距离矩阵
Args:
data:点的坐标
Returns:
M:距离矩阵
'''
length = len(data)
M = np.array([[ 0 for i in range(length)] for i in range(length)])
M = M/1.0
for i in range(length):
for j in range(i+1,length):
M[i,j] = Eucl_dis(data[i],data[j])
M[j,i] = M[i,j]
return M
def myDBSCAN(data,Eps,minPts):
'''
DBSCAN算法
Args:
Eps:标准半径
minPts:最少包含点数
'''
M = getDistMatrix(data)
m = len(M)
core = np.array([],dtype=int)
border = {}
noise = np.array([],dtype=int)
# 1.标记点
for i in range(m):
num=0
for j in range(m):
if i == j:
continue
elif M[i,j] <= Eps:
num+=1
if num >= minPts:
#此点为核心点
core = np.append(core,i)
for j in range(m):
if i != j:
if M[i,j] <= Eps and j not in core:
# 加入边界点
if i not in border.keys():
border[i] = np.array([j])
else:
if j in noise:
noise = np.delete(noise,np.argwhere(noise==j))
border[i] = np.append(border[i],j)
else:
#若此点此时还不是边界点,则先加入噪音点.
#若该点不是噪音点,则在之后可将该点从噪音点还原为边界点
noise = np.append(noise,i)
#2.密度可达core点合并,BFS
#cluster为簇
cluster={}
clusterNum=0
#visited保存core点的访问信息
visited = np.array([False for i in range(len(core))])
#pointQ为队列
pointQ = Queue()
while np.all(visited==True) == False:
for i in range(len(core)):
if visited[i] == False:
visited[i] == True
pointQ.put(core[i])
cluster[clusterNum]=np.array([core[i]],dtype=int)
break
while pointQ.empty() == False:
currentCore=pointQ.get()
#visited[np.argwhere(core==currentCore)]=True
for i in range(len(core)):
if core[i] != currentCore and visited[i] == False:
if M[currentCore,core[i]] <= Eps:
#加入队列
pointQ.put(core[i])
visited[i]=True
#加入簇
cluster[clusterNum] = np.append(cluster[clusterNum],core[i])
clusterNum+=1
#3.把border点合并到簇
for it in border.items():
for i in range(clusterNum):
if it[0] in cluster[i]:
cluster[i] = np.append(cluster[i],it[1])
return cluster
def myKDDBSCAN(data,Eps,minPts):
'''
DBSCAN算法(使用KD树改进)
Args:
Eps:标准半径
minPts:最少包含点数
'''
kd = KDTree(data)
#M = getDistMatrix(data)
#m = len(M)
core = np.array([],dtype=int)
border = {}
noise = np.array([],dtype=int)
# 1.标记点
for i in range(len(data)):
N = kd.query_ball_point(data[i],r=Eps)
if len(N) >= minPts:
#该点为core
core = np.append(core,i)
#将该点的紧邻加入border
for j in N:
if i not in border.keys():
border[i] = np.array([j])
else:
if j in noise:
noise = np.delete(noise,np.argwhere(noise==j))
border[i] = np.append(border[i],j)
else:
#若此点此时还不是边界点,则先加入噪音点.
#若该点不是噪音点,则在之后可将该点从噪音点还原为边界点
noise = np.append(noise,i)
#2.密度可达core点合并,BFS
#cluster为簇
cluster={}
clusterNum=0
#visited保存core点的访问信息
visited = np.array([False for i in range(len(core))])
#pointQ为队列
pointQ = Queue()
while np.all(visited==True) == False:
for i in range(len(core)):
if visited[i] == False:
visited[i] == True
pointQ.put(core[i])
cluster[clusterNum]=np.array([core[i]],dtype=int)
break
while pointQ.empty() == False:
currentCore=pointQ.get()
visited[np.argwhere(core==currentCore)]=True
N = kd.query_ball_point(data[int(currentCore)],r=Eps)
for i in N:
#若未访问过,加入队列
index = np.argwhere(core==i)
#print(index)
if visited[index] == False:
pointQ.put(i)
visited[index]=True
cluster[clusterNum] = np.append(cluster[clusterNum],core[index])
clusterNum+=1
print(clusterNum)
#3.把border点合并到簇
for it in border.items():
for i in range(clusterNum):
if it[0] in cluster[i]:
cluster[i] = np.append(cluster[i],it[1])
return cluster
if __name__ == '__main__':
x,y=genData()
t = time()
cluster=myDBSCAN(x,0.1,10)
print(time()-t)
#1000个数据,原始DBSCAN24秒
show(cluster,x)
t = time()
cluster=myKDDBSCAN(x,0.1,10)
print(time()-t)
#1000个数据,KD树改进DBSCAN12秒
show(cluster,x)
t = time()
y=DBSCAN(0.1,10,'euclidean').fit_predict(x)
print(time()-t)
#1000个数据sklearn的DBSCAN0.03秒
plt.scatter(x[:, 0], x[:, 1], c=y)
plt.show()
|
{"hexsha": "6d7a021de2377ab8a53d3346c043e79afacab500", "size": 6468, "ext": "py", "lang": "Python", "max_stars_repo_path": "cluster/density/DBSCAN.py", "max_stars_repo_name": "michealowen/MachingLearning", "max_stars_repo_head_hexsha": "9dcc908f2d3e468390e5abb7f051b449b0ecb455", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-09-11T07:02:25.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-17T10:40:02.000Z", "max_issues_repo_path": "cluster/density/DBSCAN.py", "max_issues_repo_name": "michealowen/MachingLearning", "max_issues_repo_head_hexsha": "9dcc908f2d3e468390e5abb7f051b449b0ecb455", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cluster/density/DBSCAN.py", "max_forks_repo_name": "michealowen/MachingLearning", "max_forks_repo_head_hexsha": "9dcc908f2d3e468390e5abb7f051b449b0ecb455", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1217391304, "max_line_length": 111, "alphanum_fraction": 0.501546073, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1927}
|
/*
* Copyright (C) 2019 LEIDOS.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
#include <ros/ros.h>
#include <tuple>
#include <algorithm>
#include <assert.h>
#include <carma_wm/CARMAWorldModel.h>
#include <lanelet2_routing/RoutingGraph.h>
#include <lanelet2_traffic_rules/TrafficRulesFactory.h>
#include <lanelet2_core/Attribute.h>
#include <lanelet2_core/geometry/LineString.h>
#include <lanelet2_core/primitives/Traits.h>
#include <Eigen/Core>
#include <Eigen/LU>
#include <cmath>
#include <lanelet2_core/geometry/Polygon.h>
#include <boost/geometry.hpp>
#include <boost/geometry/geometries/polygon.hpp>
#include <carma_wm/Geometry.h>
#include <queue>
namespace carma_wm
{
std::pair<TrackPos, TrackPos> CARMAWorldModel::routeTrackPos(const lanelet::ConstArea& area) const
{
// Check if the route was loaded yet
if (!route_)
{
throw std::invalid_argument("Route has not yet been loaded");
}
lanelet::ConstLineStrings3d outer_bound = area.outerBound();
if (outer_bound.empty())
{
throw std::invalid_argument("Provided area outer bound is invalid as it contains no points");
}
TrackPos minPos(0, 0);
TrackPos maxPos(0, 0);
bool first = true;
for (lanelet::ConstLineString3d sub_bound3d : outer_bound)
{
auto sub_bound = lanelet::utils::to2D(sub_bound3d);
for (lanelet::ConstPoint2d point : sub_bound)
{
TrackPos tp = routeTrackPos(point);
if (first)
{
minPos = maxPos = tp;
first = false;
}
else if (tp.downtrack < minPos.downtrack)
{
minPos.downtrack = tp.downtrack;
minPos.crosstrack = tp.crosstrack;
}
else if (tp.downtrack > maxPos.downtrack)
{
maxPos.downtrack = tp.downtrack;
maxPos.crosstrack = tp.crosstrack;
}
}
}
return std::make_pair(minPos, maxPos);
}
TrackPos CARMAWorldModel::routeTrackPos(const lanelet::ConstLanelet& lanelet) const
{
// Check if the route was loaded yet
if (!route_)
{
throw std::invalid_argument("Route has not yet been loaded");
}
lanelet::ConstLineString2d centerline = lanelet::utils::to2D(lanelet.centerline());
if (centerline.empty())
{
throw std::invalid_argument("Provided lanelet has invalid centerline containing no points");
}
auto front = centerline.front();
return routeTrackPos(front);
}
TrackPos CARMAWorldModel::routeTrackPos(const lanelet::BasicPoint2d& point) const
{
// Check if the route was loaded yet
if (!route_)
{
throw std::invalid_argument("Route has not yet been loaded");
}
// Find the nearest continuos shortest path centerline segment using fast map nearest search
lanelet::Points3d near_points =
shortest_path_filtered_centerline_view_->pointLayer.nearest(point, 1); // Find the nearest points
// Match point with linestring using fast map lookup
auto lineString_1 =
lanelet::utils::to2D(shortest_path_filtered_centerline_view_->lineStringLayer.findUsages(near_points[0])
.front()); // Only need the first index due to nature of route centerline
if (lineString_1.size() == 0)
{
throw std::invalid_argument("Invalid route loaded. Shortest path does not have proper references");
}
// New approach
// 1. Find nearest point
// 2. Find linestring associated with nearest point
// 3. If the nearest point is the first point on the linestring then we need to check the downtrack value
// 4. -- If donwtrack is negative then iterate over route segments to find preceeding segment
// 5. -- If downtrack is positive then we are on the correct segment
// 6. If the nearest point is the last point on the linestring then we need to check downtrack value
// 7. -- If downtrack is less then seg length then we are on the correct segment
// 8. -- If downtrack is greater then seg length then iterate over route segments to find succeeding segment
// 9. With correct segment identified compute segment downtrack distance
// 10. Accumulate previos segment distances if needed.
// Find best route segment
lanelet::Id bestRouteSegId;
TrackPos tp(0, 0);
// Check for end cases
auto indexes = shortest_path_distance_map_.getIndexFromId(near_points[0].id());
size_t ls_i = indexes.first;
size_t p_i = indexes.second;
if (near_points[0].id() == lineString_1.front().id())
{ // Nearest point is at the start of a line string
// Get start point of cur segment and add 1
auto next_point = lineString_1[1];
TrackPos tp_next = geometry::trackPos(point, lineString_1.front().basicPoint(), next_point.basicPoint());
if (tp_next.downtrack >= 0 || ls_i == 0)
{
bestRouteSegId = lineString_1.id();
tp = tp_next;
// If downtrack is positive then we are on the correct segment
}
else
{
// If downtrack is negative then iterate over route segments to find preceeding segment
const size_t prev_ls_i = ls_i - 1;
auto prev_centerline = lanelet::utils::to2D(shortest_path_centerlines_[prev_ls_i]); // Get prev centerline
tp = geometry::trackPos(point, prev_centerline[prev_centerline.size() - 2].basicPoint(),
prev_centerline[prev_centerline.size() - 1].basicPoint());
tp.downtrack += shortest_path_distance_map_.distanceToPointAlongElement(prev_ls_i, prev_centerline.size() - 2);
bestRouteSegId = prev_centerline.id();
}
}
else if (near_points[0].id() == lineString_1.back().id())
{ // Nearest point is the end of a line string
// Get end point of cur segment and subtract 1
auto prev_prev_point = lineString_1[lineString_1.size() - 2];
auto prev_point = lineString_1[lineString_1.size() - 1];
TrackPos tp_prev = geometry::trackPos(point, prev_prev_point.basicPoint(), prev_point.basicPoint());
double last_seg_length =
shortest_path_distance_map_.distanceBetween(ls_i, lineString_1.size() - 2, lineString_1.size() - 1);
if (tp_prev.downtrack < last_seg_length || ls_i == shortest_path_centerlines_.size() - 1)
{
// If downtrack is less then seg length then we are on the correct segment
bestRouteSegId = lineString_1.id();
tp = tp_prev;
tp.downtrack += shortest_path_distance_map_.distanceToPointAlongElement(ls_i, lineString_1.size() - 2);
}
else
{
// If downtrack is greater then seg length then we need to find the succeeding segment
auto next_centerline = lanelet::utils::to2D(shortest_path_centerlines_[ls_i + 1]); // Get prev centerline
tp = geometry::trackPos(point, next_centerline[0].basicPoint(), next_centerline[1].basicPoint());
bestRouteSegId = next_centerline.id();
}
}
else
{ // The nearest point is in the middle of a line string
// Graph the two bounding points on the line string and call matchSegment using a 3 element segment
// There is a guarantee from the earlier if statements that near_points[0] will always be located at an index within
// the exclusive range (0,lineString_1.size() - 1) so no need for range checks
lanelet::BasicLineString2d subSegment = lanelet::BasicLineString2d(
{ lineString_1[p_i - 1].basicPoint(), lineString_1[p_i].basicPoint(), lineString_1[p_i + 1].basicPoint() });
tp = std::get<0>(geometry::matchSegment(point, subSegment)); // Get track pos along centerline
tp.downtrack += shortest_path_distance_map_.distanceToPointAlongElement(ls_i, p_i - 1);
bestRouteSegId = lineString_1.id();
}
// Accumulate distance
auto bestRouteSegIndex = shortest_path_distance_map_.getIndexFromId(bestRouteSegId);
tp.downtrack += shortest_path_distance_map_.distanceToElement(bestRouteSegIndex.first);
return tp;
}
class LaneletDowntrackPair
{
public:
lanelet::ConstLanelet lanelet_;
double downtrack_ = 0;
LaneletDowntrackPair(lanelet::ConstLanelet lanelet, double downtrack) : lanelet_(lanelet), downtrack_(downtrack) {}
bool operator<(const LaneletDowntrackPair& pair) const
{
return this->downtrack_ < pair.downtrack_;
}
bool operator>(const LaneletDowntrackPair& pair) const
{
return this->downtrack_ > pair.downtrack_;
}
};
std::vector<lanelet::ConstLanelet> CARMAWorldModel::getLaneletsBetween(double start, double end, bool shortest_path_only) const
{
// Check if the route was loaded yet
if (!route_)
{
throw std::invalid_argument("Route has not yet been loaded");
}
if (start >= end)
{
throw std::invalid_argument("Start distance is greater than or equal to end distance");
}
std::vector<lanelet::ConstLanelet> output;
std::priority_queue<LaneletDowntrackPair, std::vector<LaneletDowntrackPair>, std::greater<LaneletDowntrackPair>> prioritized_lanelets;
auto lanelet_map = route_->laneletMap();
for (lanelet::ConstLanelet lanelet : lanelet_map->laneletLayer)
{
if (shortest_path_only && !shortest_path_view_->laneletLayer.exists(lanelet.id())) {
continue; // Continue if we are only evaluating the shortest path and this lanelet is not part of it
}
lanelet::ConstLineString2d centerline = lanelet::utils::to2D(lanelet.centerline());
auto front = centerline.front();
auto back = centerline.back();
TrackPos min = routeTrackPos(front);
TrackPos max = routeTrackPos(back);
if (std::max(min.downtrack, start) > std::min(max.downtrack, end))
{ // Check for 1d intersection
// No intersection so continue
continue;
}
// Intersection has occurred so add lanelet to list
LaneletDowntrackPair pair(lanelet, min.downtrack);
prioritized_lanelets.push(pair);
}
output.reserve(prioritized_lanelets.size());
while(!prioritized_lanelets.empty()) {
auto pair = prioritized_lanelets.top();
prioritized_lanelets.pop();
output.push_back(pair.lanelet_);
}
return output;
}
lanelet::LaneletMapConstPtr CARMAWorldModel::getMap() const
{
return std::static_pointer_cast<lanelet::LaneletMap const>(semantic_map_); // Cast pointer to const variant
}
LaneletRouteConstPtr CARMAWorldModel::getRoute() const
{
return std::static_pointer_cast<const lanelet::routing::Route>(route_); // Cast pointer to const variant
}
void CARMAWorldModel::setMap(lanelet::LaneletMapPtr map)
{
semantic_map_ = map;
// Build routing graph from map
TrafficRulesConstPtr traffic_rules = *(getTrafficRules(lanelet::Participants::Vehicle));
lanelet::routing::RoutingGraphUPtr map_graph = lanelet::routing::RoutingGraph::build(*semantic_map_, *traffic_rules);
map_routing_graph_ = std::move(map_graph);
}
lanelet::LaneletMapPtr CARMAWorldModel::getMutableMap() const
{
return semantic_map_;
}
void CARMAWorldModel::setRoute(LaneletRoutePtr route)
{
route_ = route;
lanelet::ConstLanelets path_lanelets(route_->shortestPath().begin(), route_->shortestPath().end());
shortest_path_view_ = lanelet::utils::createConstMap(path_lanelets, {});
computeDowntrackReferenceLine();
}
lanelet::LineString3d CARMAWorldModel::copyConstructLineString(const lanelet::ConstLineString3d& line) const
{
std::vector<lanelet::Point3d> coppied_points;
coppied_points.reserve(line.size());
for (auto basic_p : line.basicLineString())
{
coppied_points.push_back(lanelet::Point3d(lanelet::utils::getId(), basic_p));
}
return lanelet::LineString3d(lanelet::utils::getId(), coppied_points);
}
void CARMAWorldModel::computeDowntrackReferenceLine()
{
IndexedDistanceMap distance_map;
lanelet::routing::LaneletPath shortest_path = route_->shortestPath();
// Build shortest path routing graph
TrafficRulesConstPtr traffic_rules = *(getTrafficRules(lanelet::Participants::Vehicle));
lanelet::routing::RoutingGraphUPtr shortest_path_graph =
lanelet::routing::RoutingGraph::build(*shortest_path_view_, *traffic_rules);
std::vector<lanelet::LineString3d> lineStrings; // List of continuos line strings representing segments of the route
// reference line
bool first = true;
size_t next_index = 0;
// Iterate over each lanelet in the shortest path this loop works by looking one lanelet ahead to detect lane changes
for (lanelet::ConstLanelet ll : shortest_path)
{
next_index++;
if (first)
{ // For the first lanelet store its centerline and length
lineStrings.push_back(copyConstructLineString(ll.centerline()));
first = false;
}
if (next_index < shortest_path.size())
{ // Check for remaining lanelets
auto nextLanelet = shortest_path[next_index];
lanelet::LineString3d nextCenterline = copyConstructLineString(nextLanelet.centerline());
size_t connectionCount = shortest_path_graph->possiblePaths(ll, (uint32_t)2, false).size();
if (connectionCount == 1)
{ // Get list of connected lanelets without lanechanges. On the shortest path this should only return 1 or 0
// No lane change
// Append distance to current centerline
lineStrings.back().insert(lineStrings.back().end(), nextCenterline.begin(), nextCenterline.end());
}
else if (connectionCount == 0)
{
// Lane change required
// Break the point chain when a lanechange occurs
if (lineStrings.back().size() == 0) continue; //we don't have to create empty_linestring if we already have one
//occurs when route is changing lanes multiple times in sequence
lanelet::LineString3d empty_linestring;
empty_linestring.setId(lanelet::utils::getId());
distance_map.pushBack(lanelet::utils::to2D(lineStrings.back()));
lineStrings.push_back(empty_linestring);
}
else
{
assert(false); // It should not be possable to reach this point. Doing so demonstrates a bug
}
}
}
// Copy values to member variables
while (lineStrings.back().size() == 0) lineStrings.pop_back(); //clear empty linestrings that was never used in the end
shortest_path_centerlines_ = lineStrings;
shortest_path_distance_map_ = distance_map;
// Add length of final sections
if (shortest_path_centerlines_.size() > shortest_path_distance_map_.size())
{
shortest_path_distance_map_.pushBack(lanelet::utils::to2D(lineStrings.back())); // Record length of last continuous
// segment
}
shortest_path_filtered_centerline_view_ = lanelet::utils::createMap(shortest_path_centerlines_);
}
LaneletRoutingGraphConstPtr CARMAWorldModel::getMapRoutingGraph() const
{
return std::static_pointer_cast<const lanelet::routing::RoutingGraph>(map_routing_graph_); // Cast pointer to const
// variant
}
lanelet::Optional<TrafficRulesConstPtr> CARMAWorldModel::getTrafficRules(const std::string& participant) const
{
lanelet::Optional<TrafficRulesConstPtr> optional_ptr;
// Create carma traffic rules object
try
{
lanelet::traffic_rules::TrafficRulesUPtr traffic_rules = lanelet::traffic_rules::TrafficRulesFactory::create(
lanelet::traffic_rules::CarmaUSTrafficRules::Location, participant);
auto carma_traffic_rules = std::make_shared<lanelet::traffic_rules::CarmaUSTrafficRules>();
carma_traffic_rules = std::static_pointer_cast<lanelet::traffic_rules::CarmaUSTrafficRules>(
lanelet::traffic_rules::TrafficRulesPtr(std::move(traffic_rules)));
carma_traffic_rules->setConfigSpeedLimit(config_speed_limit_);
optional_ptr = std::static_pointer_cast<const lanelet::traffic_rules::CarmaUSTrafficRules>(
carma_traffic_rules);
}
catch (const lanelet::InvalidInputError& e)
{
return optional_ptr;
}
return optional_ptr;
}
lanelet::Optional<cav_msgs::RoadwayObstacle>
CARMAWorldModel::toRoadwayObstacle(const cav_msgs::ExternalObject& object) const
{
if (!semantic_map_ || semantic_map_->laneletLayer.size() == 0)
{
throw std::invalid_argument("Map is not set or does not contain lanelets");
}
lanelet::BasicPoint2d object_center(object.pose.pose.position.x, object.pose.pose.position.y);
auto nearestLaneletBoost = getIntersectingLanelet(object);
if (!nearestLaneletBoost)
return boost::none;
lanelet::Lanelet nearestLanelet = nearestLaneletBoost.get();
cav_msgs::RoadwayObstacle obs;
obs.object = object;
obs.connected_vehicle_type.type =
cav_msgs::ConnectedVehicleType::NOT_CONNECTED; // TODO No clear way to determine automation state at this time
obs.lanelet_id = nearestLanelet.id();
carma_wm::TrackPos obj_track_pos = geometry::trackPos(nearestLanelet, object_center);
obs.down_track = obj_track_pos.downtrack;
obs.cross_track = obj_track_pos.crosstrack;
for (auto prediction : object.predictions)
{
// Compute prediction polygon
lanelet::BasicPolygon2d prediction_polygon = geometry::objectToMapPolygon(prediction.predicted_position, object.size);
lanelet::BasicPoint2d prediction_center(prediction.predicted_position.position.x,
prediction.predicted_position.position.y);
auto predNearestLanelet = semantic_map_->laneletLayer.nearest(prediction_center, 1)[0];
carma_wm::TrackPos pred_track_pos = geometry::trackPos(predNearestLanelet, prediction_center);
obs.predicted_lanelet_ids.emplace_back(predNearestLanelet.id());
obs.predicted_cross_tracks.emplace_back(pred_track_pos.crosstrack);
obs.predicted_down_tracks.emplace_back(pred_track_pos.downtrack);
// Since the predictions are having their lanelet ids matched based on the nearest nounding box search rather than
// checking for intersection The id confidence will be set to 90% of the position confidence
obs.predicted_lanelet_id_confidences.emplace_back(0.9 * prediction.predicted_position_confidence);
obs.predicted_cross_track_confidences.emplace_back(0.9 * prediction.predicted_position_confidence);
obs.predicted_down_track_confidences.emplace_back(0.9 * prediction.predicted_position_confidence);
}
return obs;
}
void CARMAWorldModel::setRoadwayObjects(const std::vector<cav_msgs::RoadwayObstacle>& rw_objs)
{
roadway_objects_ = rw_objs;
}
std::vector<cav_msgs::RoadwayObstacle> CARMAWorldModel::getRoadwayObjects() const
{
return roadway_objects_;
}
std::vector<cav_msgs::RoadwayObstacle> CARMAWorldModel::getInLaneObjects(const lanelet::ConstLanelet& lanelet, const LaneSection& section) const
{
// Get all lanelets on current lane section
std::vector<lanelet::ConstLanelet> lane = getLane(lanelet, section);
// Check if any roadway object is registered
if (roadway_objects_.size() == 0)
{
return std::vector<cav_msgs::RoadwayObstacle>{};
}
// Initialize useful variables
std::vector<cav_msgs::RoadwayObstacle> lane_objects, roadway_objects_copy = roadway_objects_;
/*
* Get all in lane objects
* For each lane, we check if each object is on it by matching lanelet_id
* Complexity N*K, where N: num of lanelets, K: num of objects
*/
int curr_idx;
std::queue <int> obj_idxs_queue;
// Create an index queue for roadway objects to quickly pop the idx if associated
// lanelet is found. This is to reduce number of objects to check as we check new lanelets
for (size_t i = 0; i < roadway_objects_copy.size(); i++)
{
obj_idxs_queue.push((int)i);
}
// check each lanelets
for (auto llt: lane)
{
int checked_queue_items = 0, to_check = obj_idxs_queue.size();
// check each objects
while (checked_queue_items < to_check)
{
curr_idx = obj_idxs_queue.front();
obj_idxs_queue.pop();
checked_queue_items++;
// Check if the object is in the lanelet
auto curr_obj = roadway_objects_copy[curr_idx];
if (curr_obj.lanelet_id == llt.id())
{
// found intersecting lanelet for this object
lane_objects.push_back(curr_obj);
}
// handle a case where an object might be lane-changing, so check adjacent ids
// a bit faster than checking intersection solely as && is left-to-right evaluation
else if (((map_routing_graph_->left(llt) && curr_obj.lanelet_id == map_routing_graph_->left(llt).get().id()) ||
(map_routing_graph_->right(llt) && curr_obj.lanelet_id == map_routing_graph_->right(llt).get().id())) &&
boost::geometry::intersects(llt.polygon2d().basicPolygon(), geometry::objectToMapPolygon(curr_obj.object.pose.pose, curr_obj.object.size)))
{
// found intersecting lanelet for this object
lane_objects.push_back(curr_obj);
}
else
{
// did not find suitable lanelet, so it will be processed again for next lanelet
obj_idxs_queue.push(curr_idx);
}
}
}
return lane_objects;
}
lanelet::Optional<lanelet::Lanelet> CARMAWorldModel::getIntersectingLanelet (const cav_msgs::ExternalObject& object) const
{
// Check if the map is loaded yet
if (!semantic_map_ || semantic_map_->laneletLayer.size() == 0)
{
throw std::invalid_argument("Map is not set or does not contain lanelets");
}
lanelet::BasicPoint2d object_center(object.pose.pose.position.x, object.pose.pose.position.y);
lanelet::BasicPolygon2d object_polygon = geometry::objectToMapPolygon(object.pose.pose, object.size);
auto nearestLanelet = semantic_map_->laneletLayer.nearest(
object_center, 1)[0]; // Since the map contains lanelets there should always be at least 1 element
// Check if the object is inside or intersecting this lanelet
// If no intersection then the object can be considered off the road and does not need to processed
if (!boost::geometry::intersects(nearestLanelet.polygon2d().basicPolygon(), object_polygon))
{
return boost::none;
}
return nearestLanelet;
}
lanelet::Optional<double> CARMAWorldModel::distToNearestObjInLane(const lanelet::BasicPoint2d& object_center) const
{
// Check if the map is loaded yet
if (!semantic_map_ || semantic_map_->laneletLayer.size() == 0)
{
throw std::invalid_argument("Map is not set or does not contain lanelets");
}
// return empty if there is no object nearby
if (roadway_objects_.size() == 0)
return boost::none;
// Get the lanelet of this point
auto curr_lanelet = semantic_map_->laneletLayer.nearest(object_center, 1)[0];
// Check if this point at least is actually within this lanelet; otherwise, it wouldn't be "in-lane"
if (!boost::geometry::within(object_center, curr_lanelet.polygon2d().basicPolygon()))
throw std::invalid_argument("Given point is not within any lanelet");
std::vector<cav_msgs::RoadwayObstacle> lane_objects = getInLaneObjects(curr_lanelet);
// return empty if there is no object in the lane
if (lane_objects.size() == 0)
return boost::none;
// Record the closest distance out of all polygons, 4 points each
double min_dist = INFINITY;
for (auto obj: roadway_objects_)
{
lanelet::BasicPolygon2d object_polygon =
geometry::objectToMapPolygon(obj.object.pose.pose, obj.object.size);
// Point to closest edge on polygon distance by boost library
double curr_dist = lanelet::geometry::distance(object_center, object_polygon);
if (min_dist > curr_dist)
min_dist = curr_dist;
}
// Return the closest distance out of all polygons
return min_dist;
}
lanelet::Optional<std::tuple<TrackPos,cav_msgs::RoadwayObstacle>> CARMAWorldModel::getNearestObjInLane(const lanelet::BasicPoint2d& object_center, const LaneSection& section) const
{
// Check if the map is loaded yet
if (!semantic_map_ || semantic_map_->laneletLayer.size() == 0)
{
throw std::invalid_argument("Map is not set or does not contain lanelets");
}
// return empty if there is no object nearby
if (roadway_objects_.size() == 0)
return boost::none;
// Get the lanelet of this point
auto curr_lanelet = semantic_map_->laneletLayer.nearest(object_center, 1)[0];
// Check if this point at least is actually within this lanelet; otherwise, it wouldn't be "in-lane"
if (!boost::geometry::within(object_center, curr_lanelet.polygon2d().basicPolygon()))
throw std::invalid_argument("Given point is not within any lanelet");
// Get objects that are in the lane
std::vector<cav_msgs::RoadwayObstacle> lane_objects = getInLaneObjects(curr_lanelet, section);
// return empty if there is no object in the lane
if (lane_objects.size() == 0)
return boost::none;
// Get the lane that is including this lanelet
std::vector<lanelet::ConstLanelet> lane_section = getLane(curr_lanelet, section);
std::vector<double> object_downtracks, object_crosstracks;
std::vector<int> object_idxs;
std::queue<int> obj_idxs_queue;
double base_downtrack = 0;
double input_obj_downtrack = 0;
int curr_idx = 0;
// Create an index queue for in lane objects to quickly pop the idx if associated
// lanelet is found. This is to reduce number of objects to check as we check new lanelets
for (size_t i = 0; i < lane_objects.size(); i++)
{
obj_idxs_queue.push((int)i);
}
// For each lanelet, check if each object is inside it. if so, calculate downtrack
for (auto llt: lane_section)
{
int checked_queue_items = 0, to_check = obj_idxs_queue.size();
// check each remaining objects
while (checked_queue_items < to_check)
{
curr_idx = obj_idxs_queue.front();
obj_idxs_queue.pop();
checked_queue_items++;
// if the object is on it, store its total downtrack distance
if (lane_objects[curr_idx].lanelet_id == llt.id())
{
object_downtracks.push_back(base_downtrack + lane_objects[curr_idx].down_track);
object_crosstracks.push_back(lane_objects[curr_idx].cross_track);
object_idxs.push_back(curr_idx);
}
// if it's not on it, try adjacent lanelets because the object could be lane changing
else if ((map_routing_graph_->left(llt) && lane_objects[curr_idx].lanelet_id == map_routing_graph_->left(llt).get().id()) ||
(map_routing_graph_->right(llt) && lane_objects[curr_idx].lanelet_id == map_routing_graph_->right(llt).get().id()))
{
// no need to check intersection as the objects are guaranteed to be intersecting this lane
lanelet::BasicPoint2d obj_center(lane_objects[curr_idx].object.pose.pose.position.x, lane_objects[curr_idx].object.pose.pose.position.y);
TrackPos new_tp = geometry::trackPos(llt, obj_center);
object_downtracks.push_back(base_downtrack + new_tp.downtrack);
object_crosstracks.push_back(lane_objects[curr_idx].cross_track);
object_idxs.push_back(curr_idx);
}
else
{
// the object is not in the lanelet if above conditions do not meet
obj_idxs_queue.push(curr_idx);
}
}
// try to update object_center's downtrack
if (curr_lanelet.id() == llt.id())
input_obj_downtrack = base_downtrack + geometry::trackPos(llt, object_center).downtrack;
// this lanelet's entire centerline as downtrack
base_downtrack += geometry::trackPos(llt.centerline().back().basicPoint2d(),
llt.centerline().front().basicPoint2d(), llt.centerline().back().basicPoint2d()).downtrack;
}
// compare with input's downtrack and return the min_dist
size_t min_idx = 0;
double min_dist = INFINITY;
for (size_t idx = 0 ; idx < object_downtracks.size(); idx ++)
{
if (min_dist > std::fabs(object_downtracks[idx] - input_obj_downtrack))
{
min_dist = std::fabs(object_downtracks[idx] - input_obj_downtrack);
min_idx = idx;
}
}
// if before the parallel line with the start of the llt that crosses given object_center, neg downtrack.
// if left to the parallel line with the centerline of the llt that crosses given object_center, pos crosstrack
return std::tuple<TrackPos, cav_msgs::RoadwayObstacle>
(TrackPos(object_downtracks[min_idx] - input_obj_downtrack, object_crosstracks[min_idx] - geometry::trackPos(curr_lanelet, object_center).crosstrack),
lane_objects[object_idxs[min_idx]]);
}
lanelet::Optional<std::tuple<TrackPos,cav_msgs::RoadwayObstacle>> CARMAWorldModel::nearestObjectAheadInLane(const lanelet::BasicPoint2d& object_center) const
{
return getNearestObjInLane(object_center, LANE_AHEAD);
}
lanelet::Optional<std::tuple<TrackPos,cav_msgs::RoadwayObstacle>> CARMAWorldModel::nearestObjectBehindInLane(const lanelet::BasicPoint2d& object_center) const
{
return getNearestObjInLane(object_center, LANE_BEHIND);
}
std::vector<lanelet::ConstLanelet> CARMAWorldModel::getLane(const lanelet::ConstLanelet& lanelet, const LaneSection& section) const
{
// Check if the map is loaded yet
if (!semantic_map_ || semantic_map_->laneletLayer.size() == 0)
{
throw std::invalid_argument("Map is not set or does not contain lanelets");
}
// Check if the lanelet is in map
if (semantic_map_->laneletLayer.find(lanelet.id()) == semantic_map_->laneletLayer.end())
{
throw std::invalid_argument("Lanelet is not on the map");
}
// Check if the lane section input is correct
if (section != LANE_FULL && section != LANE_BEHIND && section != LANE_AHEAD)
{
throw std::invalid_argument("Undefined lane section is requested");
}
std::vector<lanelet::ConstLanelet> following_lane = {lanelet};
std::stack<lanelet::ConstLanelet> prev_lane_helper;
std::vector<lanelet::ConstLanelet> prev_lane;
std::vector<lanelet::ConstLanelet> connecting_lanelet = map_routing_graph_->following(lanelet, false);
// if only interested in following lanelets, as it is the most case
while (connecting_lanelet.size() != 0)
{
following_lane.push_back(connecting_lanelet[0]);
connecting_lanelet = map_routing_graph_->following(connecting_lanelet[0], false);
}
if (section == LANE_AHEAD)
return following_lane;
// if interested in lanelets behind
connecting_lanelet = map_routing_graph_->previous(lanelet, false);
while (connecting_lanelet.size() != 0)
{
prev_lane_helper.push(connecting_lanelet[0]);
connecting_lanelet = map_routing_graph_->previous(connecting_lanelet[0], false);
}
// gather all lanelets with correct start order
while (prev_lane_helper.size() != 0)
{
prev_lane.push_back(prev_lane_helper.top());
prev_lane_helper.pop();
}
// if only interested in lane behind
if (section == LANE_BEHIND)
{
prev_lane.push_back(lanelet);
return prev_lane;
}
// if interested in full lane
prev_lane.insert(prev_lane.end(), following_lane.begin(), following_lane.end());
return prev_lane;
}
std::vector<lanelet::Lanelet> CARMAWorldModel::getLaneletsFromPoint(const lanelet::BasicPoint2d& point, const unsigned int n) const
{
// Check if the map is loaded yet
if (!semantic_map_ || semantic_map_->laneletLayer.size() == 0)
{
throw std::invalid_argument("Map is not set or does not contain lanelets");
}
std::vector<lanelet::Lanelet> possible_lanelets;
auto nearestLanelets = lanelet::geometry::findNearest(semantic_map_->laneletLayer,point,n);
if (nearestLanelets.size() == 0) return {};
int id = 0; // closest ones are in the back
// loop through until the point is no longer geometrically in the lanelet
while (boost::geometry::within(point, nearestLanelets[id].second.polygon2d()))
{
possible_lanelets.push_back(nearestLanelets[id].second);
id++;
if (id >= nearestLanelets.size()) break;
}
return possible_lanelets;
}
void CARMAWorldModel::setConfigSpeedLimit(double config_lim)
{
config_speed_limit_ = config_lim;
}
} // namespace carma_wm
|
{"hexsha": "e0f6fa957c1c3b25cbd1bdd4f4cb9c4709a89a7e", "size": 32173, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "carma_wm/src/CARMAWorldModel.cpp", "max_stars_repo_name": "harderthan/carma-platform", "max_stars_repo_head_hexsha": "29921896a761a866db9cfee473f02a481d8bb9c9", "max_stars_repo_licenses": ["Apache-2.0", "CC-BY-4.0", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "carma_wm/src/CARMAWorldModel.cpp", "max_issues_repo_name": "harderthan/carma-platform", "max_issues_repo_head_hexsha": "29921896a761a866db9cfee473f02a481d8bb9c9", "max_issues_repo_licenses": ["Apache-2.0", "CC-BY-4.0", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "carma_wm/src/CARMAWorldModel.cpp", "max_forks_repo_name": "harderthan/carma-platform", "max_forks_repo_head_hexsha": "29921896a761a866db9cfee473f02a481d8bb9c9", "max_forks_repo_licenses": ["Apache-2.0", "CC-BY-4.0", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7160048135, "max_line_length": 180, "alphanum_fraction": 0.717527119, "num_tokens": 8097}
|
\documentclass[letterpaper]{article}
\usepackage{common/ohpc-doc}
\setcounter{secnumdepth}{5}
\setcounter{tocdepth}{5}
% Include git variables
\input{vc.tex}
% Define Base OS and other local macros
\newcommand{\baseOS}{CentOS8.4}
\newcommand{\OSRepo}{CentOS\_8.4}
\newcommand{\OSTree}{CentOS\_8}
\newcommand{\OSTag}{el8}
\newcommand{\baseos}{centos8.4}
\newcommand{\baseosshort}{centos8}
\newcommand{\provisioner}{xCAT}
\newcommand{\provheader}{xCAT (stateful)}
\newcommand{\rms}{SLURM}
\newcommand{\rmsshort}{slurm}
\newcommand{\arch}{x86\_64}
\newcommand{\installimage}{install}
%%% WARNING: Hack below. The version should be read from ohpc-doc.sty, but the
%%% perl parsing script does not read that file. This works for one release, but
%%% needs a proper fix.
\newcommand{\VERLONG}{2.0}
% Define package manager commands
\newcommand{\pkgmgr}{yum}
\newcommand{\addrepo}{wget -P /etc/yum.repos.d}
\newcommand{\chrootaddrepo}{wget -P \$CHROOT/etc/yum.repos.d}
\newcommand{\clean}{yum clean expire-cache}
\newcommand{\chrootclean}{yum --installroot=\$CHROOT clean expire-cache}
\newcommand{\install}{yum -y install}
\newcommand{\chrootinstall}{psh compute yum -y install}
\newcommand{\groupinstall}{yum -y groupinstall}
\newcommand{\groupchrootinstall}{psh compute yum -y groupinstall}
\newcommand{\remove}{yum -y remove}
\newcommand{\upgrade}{yum -y upgrade}
\newcommand{\chrootupgrade}{yum -y --installroot=\$CHROOT upgrade}
\newcommand{\tftppkg}{syslinux-tftpboot}
\newcommand{\beegfsrepo}{https://www.beegfs.io/release/beegfs\_7.2.1/dists/beegfs-rhel8.repo}
% boolean for os-specific formatting
\toggletrue{isCentOS}
\toggletrue{isCentOS_ww_slurm_x86}
\toggletrue{isSLURM}
\toggletrue{isx86}
\toggletrue{isxCAT}
\toggletrue{isxCATstateful}
\toggletrue{isCentOS_x86}
\begin{document}
\graphicspath{{common/figures/}}
\thispagestyle{empty}
% Title Page
\input{common/title}
% Disclaimer
\input{common/legal}
\newpage
\tableofcontents
\newpage
% Introduction --------------------------------------------------
\section{Introduction} \label{sec:introduction}
\input{common/install_header}
\input{common/intro} \\
\input{common/base_edition/edition}
\input{common/audience}
\input{common/requirements}
\input{common/inputs}
% Base Operating System --------------------------------------------
\vspace*{0.2cm}
\section{Install Base Operating System (BOS)}
\input{common/bos}
%\clearpage
% begin_ohpc_run
% ohpc_validation_newline
% ohpc_validation_comment Disable firewall
\begin{lstlisting}[language=bash,keywords={}]
[sms](*\#*) systemctl disable firewalld
[sms](*\#*) systemctl stop firewalld
\end{lstlisting}
% end_ohpc_run
% ------------------------------------------------------------------
\section{Install \xCAT{} and Provision Nodes with BOS} \label{sec:provision_compute_bos}
\input{common/xcat_stateful_compute_bos_intro}
\subsection{Enable \xCAT{} repository for local use} \label{sec:enable_xcat}
\input{common/enable_xcat_repo}
\noindent \xCAT{} has a number of dependencies that are required for
installation that are housed in separate public repositories for various
distributions. To enable for local use, issue the following:
% begin_ohpc_run
\begin{lstlisting}[language=bash,keywords={},basicstyle=\fontencoding{T1}\fontsize{8.0}{10}\ttfamily,literate={ARCH}{\arch{}}1 {-}{-}1]
[sms](*\#*) (*\install*) centos-release-stream
[sms](*\#*) (*\addrepo*) https://xcat.org/files/xcat/repos/yum/xcat-dep/rh8/ARCH/xcat-dep.repo
\end{lstlisting}
% end_ohpc_run
\subsection{Add provisioning services on {\em master} node} \label{sec:add_provisioning}
\input{common/install_provisioning_xcat_intro_stateful}
%\input{common/enable_pxe}
\vspace*{-0.15cm}
\subsection{Complete basic \xCAT{} setup for {\em master} node} \label{sec:setup_xcat}
\input{common/xcat_setup}
\subsection{Define {\em compute} image for provisioning}
\input{common/xcat_init_os_images_centos}
\clearpage
\subsection{Add compute nodes into \xCAT{} database} \label{sec:xcat_add_nodes}
\input{common/add_xcat_hosts_intro}
%\vspace*{-0.25cm}
\subsection{Boot compute nodes} \label{sec:boot_computes}
\input{common/reset_computes_xcat}
\section{Install \OHPC{} Components} \label{sec:basic_install}
\input{common/install_ohpc_components_intro}
\subsection{Enable \OHPC{} repository for local use} \label{sec:enable_repo}
\input{common/enable_local_ohpc_repo}
% begin_ohpc_run
% ohpc_validation_newline
% ohpc_validation_comment Verify OpenHPC repository has been enabled before proceeding
% ohpc_validation_newline
% ohpc_command yum repolist | grep -q OpenHPC
% ohpc_command if [ $? -ne 0 ];then
% ohpc_command echo "Error: OpenHPC repository must be enabled locally"
% ohpc_command exit 1
% ohpc_command fi
% end_ohpc_run
In addition to the \OHPC{} and \xCAT{} package repositories, the {\em master} host also
requires access to the standard base OS distro repositories in order to resolve
necessary dependencies. For \baseOS{}, the requirements are to have access to
both the base OS and EPEL repositories for which mirrors are freely available online:
\begin{itemize*}
\item CentOS-8 - Base 8.3.2011
(e.g. \href{http://mirror.centos.org/centos-8/8/BaseOS/x86\_64/os}
{\color{blue}{http://mirror.centos.org/centos-8/8/BaseOS/x86\_64/os}} )
\item EPEL 8 (e.g. \href{http://download.fedoraproject.org/pub/epel/8/Everything/x86\_64}
{\color{blue}{http://download.fedoraproject.org/pub/epel/8/Everything/x86\_64}} )
\end{itemize*}
\noindent The public EPEL repository is enabled by installing
\texttt{epel-release} package. Note that this requires the CentOS Extras
repository, which is shipped with CentOS and is enabled by default.
% begin_ohpc_run
\begin{lstlisting}[language=bash,keywords={},basicstyle=\fontencoding{T1}\fontsize{8.0}{10}\ttfamily,literate={ARCH}{\arch{}}1 {-}{-}1]
[sms](*\#*) (*\install*) epel-release
\end{lstlisting}
% end_ohpc_run
Now \OHPC{} packages can be installed. To add the base package on the SMS
issue the following
% begin_ohpc_run
\begin{lstlisting}[language=bash,keywords={},basicstyle=\fontencoding{T1}\fontsize{8.0}{10}\ttfamily,literate={ARCH}{\arch{}}1 {-}{-}1]
[sms](*\#*) (*\install*) ohpc-base
\end{lstlisting}
% end_ohpc_run
\input{common/automation}
\subsection{Setup time synchronization service on {\em master} node} \label{sec:add_ntp}
\input{common/time}
\subsection{Add resource management services on {\em master} node} \label{sec:add_rm}
\input{common/install_slurm}
\subsection{Optionally add \InfiniBand{} support services on {\em master} node} \label{sec:add_ofed}
\input{common/ibsupport_sms_centos}
\vspace*{0.3cm}
\subsection{Optionally add \OmniPath{} support services on {\em master} node} \label{sec:add_opa}
\input{common/opasupport_sms_centos}
%\vspace*{0.5cm}
\clearpage
\subsubsection{Add \OHPC{} components to {\em compute} nodes} \label{sec:add_components}
\input{common/add_to_compute_stateful_xcat_intro}
%\newpage
% begin_ohpc_run
% ohpc_validation_comment Add OpenHPC components to compute instance
\begin{lstlisting}[language=bash,literate={-}{-}1,keywords={},upquote=true]
# Add Slurm client support meta-package
[sms](*\#*) (*\chrootinstall*) ohpc-slurm-client
# Add Network Time Protocol (NTP) support
[sms](*\#*) (*\chrootinstall*) ntp
# Add kernel drivers
[sms](*\#*) (*\chrootinstall*) kernel
# Include modules user environment
[sms](*\#*) (*\chrootinstall*) --enablerepo=powertools lmod-ohpc
\end{lstlisting}
% end_ohpc_run
% ohpc_comment_header Optionally add InfiniBand support services in compute node image \ref{sec:add_components}
% ohpc_command if [[ ${enable_ib} -eq 1 ]];then
% ohpc_indent 5
\begin{lstlisting}[language=bash,literate={-}{-}1,keywords={},upquote=true]
# Optionally add IB support and enable
[sms](*\#*) (*\groupchrootinstall*) "InfiniBand Support"
\end{lstlisting}
% ohpc_indent 0
% ohpc_command fi
% end_ohpc_run
\vspace*{-0.25cm}
\subsubsection{Customize system configuration} \label{sec:master_customization}
\input{common/xcat_stateful_customize_centos}
% Additional commands when additional computes are requested
% begin_ohpc_run
% ohpc_validation_newline
% ohpc_validation_comment Update basic slurm configuration if additional computes defined
% ohpc_validation_comment This is performed on the SMS, nodes will pick it up config file is copied there later
% ohpc_command if [ ${num_computes} -gt 4 ];then
% ohpc_command perl -pi -e "s/^NodeName=(\S+)/NodeName=${compute_prefix}[1-${num_computes}]/" /etc/slurm/slurm.conf
% ohpc_command perl -pi -e "s/^PartitionName=normal Nodes=(\S+)/PartitionName=normal Nodes=${compute_prefix}[1-${num_computes}]/" /etc/slurm/slurm.conf
% ohpc_command fi
% end_ohpc_run
%\clearpage
\subsubsection{Additional Customization ({\em optional})} \label{sec:addl_customizations}
\input{common/compute_customizations_intro}
\paragraph{Increase locked memory limits}
\input{common/memlimits_stateful}
\paragraph{Enable ssh control via resource manager}
\input{common/slurm_pam_stateful}
\paragraph{Add \Lustre{} client} \label{sec:lustre_client}
\input{common/lustre-client}
\input{common/lustre-client-centos-stateful}
\input{common/lustre-client-post-stateful}
\paragraph{Add \Nagios{} monitoring} \label{sec:add_nagios}
\input{common/nagios_stateful}
\vspace*{0.4cm}
\paragraph{Add \clustershell{}}
\input{common/clustershell}
\paragraph{Add \genders{}}
\input{common/genders}
\paragraph{Add Magpie}
\input{common/magpie}
\paragraph{Add \conman{}} \label{sec:add_conman}
\input{common/conman}
\paragraph{Add \nhc{}} \label{sec:add_nhc}
\input{common/nhc}
\input{common/nhc_slurm}
%\subsubsection{Identify files for synchronization} \label{sec:file_import}
%\input{common/import_xcat_files}
%\input{common/import_xcat_files_slurm}
%%%\subsubsection{Optional kernel arguments} \label{sec:optional_kargs}
%%%\input{common/conman_post}
\section{Install \OHPC{} Development Components}
\input{common/dev_intro.tex}
%\vspace*{-0.15cm}
%\clearpage
\subsection{Development Tools} \label{sec:install_dev_tools}
\input{common/dev_tools}
\vspace*{-0.15cm}
\subsection{Compilers} \label{sec:install_compilers}
\input{common/compilers}
%\clearpage
\subsection{MPI Stacks} \label{sec:mpi}
\input{common/mpi}
\subsection{Performance Tools} \label{sec:install_perf_tools}
\input{common/perf_tools}
\subsection{Setup default development environment}
\input{common/default_dev}
%\vspace*{0.2cm}
\subsection{3rd Party Libraries and Tools} \label{sec:3rdparty}
\input{common/third_party_libs_intro}
\input{common/third_party_libs}
\input{common/third_party_mpi_libs_x86}
\vspace*{.6cm}
\subsection{Optional Development Tool Builds} \label{sec:3rdparty_intel}
\input{common/oneapi_enabled_builds}
\section{Resource Manager Startup} \label{sec:rms_startup}
\input{common/slurm_startup_stateful}
\section{Run a Test Job} \label{sec:test_job}
\input{common/xcat_slurm_test_job}
\clearpage
\appendix
{\bf \LARGE \centerline{Appendices}} \vspace*{0.2cm}
\addcontentsline{toc}{section}{Appendices}
\renewcommand{\thesubsection}{\Alph{subsection}}
\input{common/automation_appendix}
\input{common/upgrade_stateful}
\input{common/test_suite}
\input{common/customization_appendix_centos}
\input{manifest}
\input{common/signature}
\end{document}
|
{"hexsha": "21cfca89cd5cb661964125cc18b1876a7feb5325", "size": 11277, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "docs/recipes/install/rocky8/x86_64/xcat_stateful/slurm/steps.tex", "max_stars_repo_name": "viniciusferrao/ohpc", "max_stars_repo_head_hexsha": "edae86737aeeeee1a9d0c1e6a1ac7139d5fce971", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/recipes/install/rocky8/x86_64/xcat_stateful/slurm/steps.tex", "max_issues_repo_name": "viniciusferrao/ohpc", "max_issues_repo_head_hexsha": "edae86737aeeeee1a9d0c1e6a1ac7139d5fce971", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/recipes/install/rocky8/x86_64/xcat_stateful/slurm/steps.tex", "max_forks_repo_name": "viniciusferrao/ohpc", "max_forks_repo_head_hexsha": "edae86737aeeeee1a9d0c1e6a1ac7139d5fce971", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.22, "max_line_length": 154, "alphanum_fraction": 0.7573822825, "num_tokens": 3313}
|
import os
os.chdir('seqFISH_AllenVISp/')
import numpy as np
import pandas as pd
import pickle
import matplotlib
matplotlib.use('qt5agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import matplotlib.pyplot as plt
#from matplotlib import cm
import scipy.stats as st
with open ('data/SpaGE_pkl/seqFISH_Cortex.pkl', 'rb') as f:
datadict = pickle.load(f)
seqFISH_data = datadict['seqFISH_data']
seqFISH_meta= datadict['seqFISH_meta']
del datadict
with open ('data/SpaGE_pkl/Allen_VISp.pkl', 'rb') as f:
datadict = pickle.load(f)
RNA_data = datadict['RNA_data']
del datadict
Gene_Order = np.intersect1d(seqFISH_data.columns,RNA_data.columns)
### SpaGE
SpaGE_imputed = pd.read_csv('Results/SpaGE_LeaveOneOut.csv',header=0,index_col=0,sep=',')
SpaGE_imputed = SpaGE_imputed.loc[:,Gene_Order]
SpaGE_seqCorr = pd.Series(index = Gene_Order)
for i in Gene_Order:
SpaGE_seqCorr[i] = st.spearmanr(seqFISH_data[i],SpaGE_imputed[i])[0]
SpaGE_seqCorr[np.isnan(SpaGE_seqCorr)] = 0
os.chdir('STARmap_AllenVISp/')
with open ('data/SpaGE_pkl/Starmap.pkl', 'rb') as f:
datadict = pickle.load(f)
coords = datadict['coords']
Starmap_data = datadict['Starmap_data']
del datadict
Gene_Order = np.intersect1d(Starmap_data.columns,RNA_data.columns)
### SpaGE
SpaGE_imputed = pd.read_csv('Results/SpaGE_LeaveOneOut_cutoff.csv',header=0,index_col=0,sep=',')
SpaGE_imputed = SpaGE_imputed.loc[:,Gene_Order]
SpaGE_starCorr = pd.Series(index = Gene_Order)
for i in Gene_Order:
SpaGE_starCorr[i] = st.spearmanr(Starmap_data[i],SpaGE_imputed[i])[0]
def Compare_Correlations(X,Y):
fig, ax = plt.subplots(figsize=(4.5, 4.5))
ax.scatter(X, Y, s=1)
ax.axvline(linestyle='--',color='gray')
ax.axhline(linestyle='--',color='gray')
plt.gca().set_ylim([-0.5,1])
lims = [np.min([ax.get_xlim(), ax.get_ylim()]),
np.max([ax.get_xlim(), ax.get_ylim()])]
ax.plot(lims, lims, 'k-')
ax.set_aspect('equal')
ax.set_xlim(lims)
ax.set_ylim(lims)
plt.xticks(size=8)
plt.yticks(size=8)
plt.show()
Starmap_seq_genes = np.intersect1d(Starmap_data.columns,seqFISH_data.columns)
Compare_Correlations(SpaGE_starCorr[Starmap_seq_genes],SpaGE_seqCorr[Starmap_seq_genes])
plt.xlabel('Spearman Correlation STARmap',size=12)
plt.ylabel('Spearman Correlation seqFISH',size=12)
plt.show()
fig, ax = plt.subplots(figsize=(3.7, 4.5))
ax.boxplot([SpaGE_starCorr[Starmap_seq_genes],SpaGE_seqCorr[Starmap_seq_genes]],widths=0.5)
y = SpaGE_starCorr[Starmap_seq_genes]
x = np.random.normal(1, 0.05, len(y))
plt.plot(x, y, 'g.', markersize=1, alpha=0.2)
y = SpaGE_seqCorr[Starmap_seq_genes]
x = np.random.normal(2, 0.05, len(y))
plt.plot(x, y, 'g.', markersize=1, alpha=0.2)
plt.xticks((1,2),('STARmap','seqFISH'),size=12)
plt.yticks(size=8)
plt.gca().set_ylim([-0.4,0.8])
plt.ylabel('Spearman Correlation',size=12)
#ax.set_aspect(aspect=3)
_,p_val = st.wilcoxon(SpaGE_starCorr[Starmap_seq_genes],SpaGE_seqCorr[Starmap_seq_genes])
plt.text(2,np.max(plt.gca().get_ylim()),'%1.2e'%p_val,color='black',size=8)
plt.show()
os.chdir('osmFISH_AllenVISp/')
with open ('data/SpaGE_pkl/osmFISH_Cortex.pkl', 'rb') as f:
datadict = pickle.load(f)
osmFISH_data = datadict['osmFISH_data']
del datadict
Gene_Order = osmFISH_data.columns
### SpaGE
SpaGE_imputed = pd.read_csv('Results/SpaGE_LeaveOneOut_cutoff.csv',header=0,index_col=0,sep=',')
SpaGE_imputed = SpaGE_imputed.loc[:,Gene_Order]
SpaGE_osmCorr = pd.Series(index = Gene_Order)
for i in Gene_Order:
SpaGE_osmCorr[i] = st.spearmanr(osmFISH_data[i],SpaGE_imputed[i])[0]
def Compare_Correlations(X,Y):
fig, ax = plt.subplots(figsize=(4.5, 4.5))
ax.scatter(X, Y, s=25)
ax.axvline(linestyle='--',color='gray')
ax.axhline(linestyle='--',color='gray')
plt.gca().set_ylim([-0.5,1])
lims = [np.min([ax.get_xlim(), ax.get_ylim()]),
np.max([ax.get_xlim(), ax.get_ylim()])]
ax.plot(lims, lims, 'k-')
ax.set_aspect('equal')
ax.set_xlim(lims)
ax.set_ylim(lims)
plt.xticks(size=8)
plt.yticks(size=8)
plt.show()
osm_seq_genes = np.intersect1d(osmFISH_data.columns,seqFISH_data.columns)
Compare_Correlations(SpaGE_osmCorr[osm_seq_genes],SpaGE_seqCorr[osm_seq_genes])
plt.xlabel('Spearman Correlation osmFISH',size=12)
plt.ylabel('Spearman Correlation seqFISH',size=12)
plt.show()
fig, ax = plt.subplots(figsize=(3.7, 4.5))
ax.boxplot([SpaGE_osmCorr[osm_seq_genes],SpaGE_seqCorr[osm_seq_genes]],widths=0.5)
y = SpaGE_osmCorr[osm_seq_genes]
x = np.random.normal(1, 0.05, len(y))
plt.plot(x, y, 'g.', alpha=0.2)
y = SpaGE_seqCorr[osm_seq_genes]
x = np.random.normal(2, 0.05, len(y))
plt.plot(x, y, 'g.', alpha=0.2)
plt.xticks((1,2),('osmFISH','seqFISH'),size=12)
plt.yticks(size=8)
plt.gca().set_ylim([-0.5,1])
plt.ylabel('Spearman Correlation',size=12)
#ax.set_aspect(aspect=3)
_,p_val = st.wilcoxon(SpaGE_osmCorr[osm_seq_genes],SpaGE_seqCorr[osm_seq_genes])
plt.text(2,np.max(plt.gca().get_ylim()),'%1.2e'%p_val,color='black',size=8)
plt.show()
|
{"hexsha": "76d7f42aadff293229047298824135aa97f40b4b", "size": 5040, "ext": "py", "lang": "Python", "max_stars_repo_path": "benchmark/seqFISH_AllenVISp/Performance_evaluation.py", "max_stars_repo_name": "tabdelaal/SpaGE", "max_stars_repo_head_hexsha": "7533cbf2275c3049561e8a17b9f7866e0e324743", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-11-15T05:56:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T22:59:58.000Z", "max_issues_repo_path": "benchmark/seqFISH_AllenVISp/Performance_evaluation.py", "max_issues_repo_name": "tabdelaal/SpaGE", "max_issues_repo_head_hexsha": "7533cbf2275c3049561e8a17b9f7866e0e324743", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "benchmark/seqFISH_AllenVISp/Performance_evaluation.py", "max_forks_repo_name": "tabdelaal/SpaGE", "max_forks_repo_head_hexsha": "7533cbf2275c3049561e8a17b9f7866e0e324743", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-21T09:45:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-21T09:45:32.000Z", "avg_line_length": 31.3043478261, "max_line_length": 96, "alphanum_fraction": 0.7172619048, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1759}
|
[STATEMENT]
lemma wf\<^sub>s\<^sub>s\<^sub>t_prefix[dest]: "wf'\<^sub>s\<^sub>s\<^sub>t V (S@S') \<Longrightarrow> wf'\<^sub>s\<^sub>s\<^sub>t V S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wf'\<^sub>s\<^sub>s\<^sub>t V (S @ S') \<Longrightarrow> wf'\<^sub>s\<^sub>s\<^sub>t V S
[PROOF STEP]
by (induct S rule: wf'\<^sub>s\<^sub>s\<^sub>t.induct) auto
|
{"llama_tokens": 169, "file": "Stateful_Protocol_Composition_and_Typing_Stateful_Strands", "length": 1}
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
import time
EXP_ID = '2021-09-10 16:48:03.161207'
# Load odometry data
odom_addr = f'train/odometry/odometry_log_{EXP_ID}.csv'
odom_data_df = pd.read_csv(odom_addr, delimiter=',')
# Setup odometry as a image
map_res = 10
min_x = np.abs(odom_data_df['x'].min())
min_y = np.abs(odom_data_df['y'].min())
# Load video data
video_addr = f'train/videos/kairos_minerl_{EXP_ID}.mp4'
cap = cv2.VideoCapture(video_addr)
frame_counter = 1
# Setup video recorder
out = cv2.VideoWriter(f'tmp/odom_{EXP_ID}.mp4',cv2.VideoWriter_fourcc(*'DIVX'), 15, (1024, 512))
# Read until video is completed
while(cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
if ret == True:
# Display odometry as a frame
# Convert odometry to pixels
x = (map_res*(odom_data_df['x'].iloc[:frame_counter]+min_x)).astype(int).values
y = (map_res*(odom_data_df['y'].iloc[:frame_counter]+min_y)).astype(int).values
# Setup odometry image with maximum x or y dimension
max_coord = max(x.max(), y.max())
odom_img = np.zeros((max_coord+1, max_coord+1, 3), np.uint8)
# Substitute coordinates as white pixels
odom_img[x, y] = 255
# Add circle to current robot position
x_pos = y[-1]
y_pos = x[-1]
odom_img = cv2.circle(odom_img, (x_pos, y_pos), 5, (0,255,0), -1)
# Make sure image always has the same size
odom_img = cv2.resize(odom_img, (512, 512), interpolation=cv2.INTER_LINEAR)
# Add text with odometry info
font = cv2.FONT_HERSHEY_SIMPLEX
thickness = 1
font_scale = 0.5
text_color = (255, 255, 255)
odom_curr_x = odom_data_df['x'].iloc[frame_counter]
odom_curr_y = odom_data_df['y'].iloc[frame_counter]
odom_curr_heading = odom_data_df['heading'].iloc[frame_counter]
odom_img = cv2.putText(odom_img, f'x: {odom_curr_x:.2f} m', (10, 20), font,
font_scale, text_color, thickness, cv2.LINE_AA)
odom_img = cv2.putText(odom_img, f'y: {odom_curr_y:.2f} m', (10, 40), font,
font_scale, text_color, thickness, cv2.LINE_AA)
odom_img = cv2.putText(odom_img, f'heading: {odom_curr_heading:.2f} deg', (10, 60), font,
font_scale, text_color, thickness, cv2.LINE_AA)
# Concatenate with frame and display
frame_display = np.concatenate((frame, odom_img), axis=1)
# Write to disk
out.write(frame_display)
# # Display image
# cv2.imshow('frame', frame_display)
# if cv2.waitKey(1) == ord('q'):
# break
# Next frame
frame_counter += 1
# Break the loop
else:
break
# Release video capture
out.release()
|
{"hexsha": "9ca691fe1ba68201ccb13bdc8d57f7ee2b07a3c5", "size": 2879, "ext": "py", "lang": "Python", "max_stars_repo_path": "kairos_minerl/src/kairos_minerl/viz_odometry.py", "max_stars_repo_name": "viniciusguigo/kairos_minerl_basalt", "max_stars_repo_head_hexsha": "8f76e1d293dbcf62653ed3f7f326bd090a0af6f0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2021-12-07T09:52:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T20:08:44.000Z", "max_issues_repo_path": "kairos_minerl/src/kairos_minerl/viz_odometry.py", "max_issues_repo_name": "viniciusguigo/kairos_minerl_basalt", "max_issues_repo_head_hexsha": "8f76e1d293dbcf62653ed3f7f326bd090a0af6f0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kairos_minerl/src/kairos_minerl/viz_odometry.py", "max_forks_repo_name": "viniciusguigo/kairos_minerl_basalt", "max_forks_repo_head_hexsha": "8f76e1d293dbcf62653ed3f7f326bd090a0af6f0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-12-11T18:29:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-12T18:46:42.000Z", "avg_line_length": 32.7159090909, "max_line_length": 98, "alphanum_fraction": 0.6269538034, "include": true, "reason": "import numpy", "num_tokens": 812}
|
[STATEMENT]
lemma vlrestriction_VLambda: "(\<lambda>a\<in>\<^sub>\<circ>A. f a) \<restriction>\<^sup>l\<^sub>\<circ> B = (\<lambda>a\<in>\<^sub>\<circ>A \<inter>\<^sub>\<circ> B. f a)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. VLambda A f \<restriction>\<^sup>l\<^sub>\<circ> B = VLambda (A \<inter>\<^sub>\<circ> B) f
[PROOF STEP]
by auto
|
{"llama_tokens": 136, "file": "CZH_Foundations_czh_sets_CZH_Sets_BRelations", "length": 1}
|
import tensorflow as tf
from tensorflow.python.ops import variable_scope as vs
import numpy as np
import pickle
import os,json
from samples import read_clips
from laplace_temporal_net import create_r3d
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
gpus=[0]
video_json_path = 'three_samples.json'
data_root = '/home/pr606/Pictures/ucf_images'
checkpoint_path = '/home/pr606/YUAN/history/tf-R2plus1D/saved_models/model.ckpt'
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
_SAMPLE_VIDEO_FRAMES = 32
_IMAGE_SIZE = 224
NUM_CLASS = 101
with open(video_json_path) as f1:
annotation_json = json.load(f1)
data_json = annotation_json["database"]
label_list = annotation_json["labels"]
video_name = 'v_GolfSwing_g04_c03'
video_length = data_json['v_GolfSwing_g04_c03']['annotations']['n_frames']
labe = data_json['v_GolfSwing_g04_c03']['annotations']['label']
batchsize_gpu_0 = video_length // _SAMPLE_VIDEO_FRAMES
frame_indexs = range(video_length)
video_path = os.path.join(data_root, labe, video_name)
assignments = [batchsize_gpu_0]
batch_size = 0
for n in assignments:
batch_size += n
assert batch_size >= 1
pathes = [video_path]*batchsize_gpu_0
one_batch = read_clips.get_batch(pathes,[frame_indexs[i*_SAMPLE_VIDEO_FRAMES:(i+1)*_SAMPLE_VIDEO_FRAMES] for i in range(batchsize_gpu_0)])
data = tf.placeholder(shape=(batch_size, _SAMPLE_VIDEO_FRAMES, _IMAGE_SIZE, _IMAGE_SIZE, 3), dtype=tf.float32,name='clips')
label = tf.placeholder(shape=(batch_size, NUM_CLASS), dtype=tf.int32,name='labels')
data_list = tf.split(data,assignments,axis=0,name='sep_0_for_%d_gpus'%len(gpus))
label_list = tf.split(label,assignments,axis=0,name='sep_1_for_%d_gpus'%len(gpus))
variable_map = {}
regular_map = {}
extra_map = {}
results = []
for i in gpus:
with tf.device("/gpu:%d" % i):
with tf.variable_scope('parall_model',reuse=tf.AUTO_REUSE):
result = create_r3d(
data=data_list[i],
model_depth=18,
num_labels=NUM_CLASS,
num_input_channels=1,
is_decomposed=False,
no_bias=1,
is_test=1,
spatial_bn_mom=0.9,
final_spatial_kernel=14,
final_temporal_kernel=4,
)
if i == 0:
# Note: key of variable_map should be specific enough to match the unique variable
for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
print(var.name, var.shape)
if 'L101' in var.name:
extra_map[var.name[13:].replace(":0", '')] = var
continue
elif 'conv1_middle/kernel' in var.name or 'conv1/kernel' in var.name:
extra_map[var.name[13:].replace(":0", '')] = var
continue
elif 'conv1_middle_spatbn_relu' in var.name:
extra_map[var.name[13:].replace(":0", '')] = var
continue
elif 'Lp_conv_' in var.name or 'global_step' in var.name:
continue
elif 'gaussian_filter' in var.name:
continue
elif 'kernel:0' in var.name:
regular_map[var.name[13:].replace(":0", '')] = var
continue
else:
variable_map[var.name[13:].replace(":0", '')] = var
labels = tf.stop_gradient(label_list[i],
name='disallow_grad_labels%d' % i)
top1_prediction = tf.nn.in_top_k(result, tf.argmax(labels, 1), 1)
top5_prediction = tf.nn.in_top_k(result, tf.argmax(labels, 1), 5)
else:
labels = tf.stop_gradient(label_list[i], name='disallow_grad_labels%d' % i)
top1_prediction = tf.concat([top1_prediction, tf.nn.in_top_k(result, tf.argmax(labels, 1), 1)],
axis=0)
top5_prediction = tf.concat([top5_prediction, tf.nn.in_top_k(result, tf.argmax(labels, 1), 5)],
axis=0)
pass
tf.get_variable_scope().reuse_variables()
results.append(result)
# vs.get_variable_scope().reuse_variables()
acc_top1 = tf.reduce_mean(tf.cast(top1_prediction, tf.float32))
acc_top5 = tf.reduce_mean(tf.cast(top5_prediction, tf.float32))
rgb_resaver = tf.train.Saver(var_list=dict(variable_map, **regular_map, **extra_map), reshape=True)
nodes = tf.get_collection('source_node')
features = []
for var in nodes:
# parall_model/final_avg/AvgPool3D:0
# features.append(var)
if '_com2_GPA' in var.name:
com2_gpa = var
continue
# parall_model_1/final_avg/AvgPool3D:0
# features.append(var)
if '_com4_GPA' in var.name:
com4_gpa = var
continue
# assert len(features) != 0
final_results = results[0] if len(results) == 1 else tf.concat(results, axis=0, name='merge_result_across_gpus')
# final_features = features[0] if len(features) == 1 else tf.concat(features, axis=0, name='merge_features_across_gpus')
global_init = tf.global_variables_initializer()
config = tf.ConfigProto(allow_soft_placement=True)
# config.gpu_options.allow_growth=True
config.gpu_options.per_process_gpu_memory_fraction = 0.85
with tf.Session(config=config) as sess:
sess.run(global_init)
feed_dict = {}
if os.path.exists(checkpoint_path+'.index'):
rgb_resaver.restore(sess, checkpoint_path)
print('restoring checkpoint from {} '.format(checkpoint_path))
tf.logging.info('RGB checkpoint restored')
else:
assert False,'can not find any checkpoint files'
all_activations = {'com2_gpa': None, 'com4_gpa': None, 'mapping': None}
feed_dict[data] = one_batch #[:, :, :, :, np.newaxis]
feat2,feat4 = sess.run([com2_gpa,com4_gpa], feed_dict=feed_dict)
all_activations['com2_gpa'] = feat2
all_activations['com4_gpa'] = feat4
all_activations['mapping'] = video_path
print(all_activations['com2_gpa'].shape)
print(all_activations['com4_gpa'].shape)
with open(video_name+'.pickle', 'wb') as handle:
pickle.dump(all_activations, handle)
|
{"hexsha": "6a17d806ac31c4144395144b5f8e1b1ef7c3944d", "size": 6815, "ext": "py", "lang": "Python", "max_stars_repo_path": "samples/fetch_gpa_feat.py", "max_stars_repo_name": "shenqiang-Yuan/GPA", "max_stars_repo_head_hexsha": "ad8bb4540ef4126c817c5fe007dad93a5a7ddc2a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "samples/fetch_gpa_feat.py", "max_issues_repo_name": "shenqiang-Yuan/GPA", "max_issues_repo_head_hexsha": "ad8bb4540ef4126c817c5fe007dad93a5a7ddc2a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "samples/fetch_gpa_feat.py", "max_forks_repo_name": "shenqiang-Yuan/GPA", "max_forks_repo_head_hexsha": "ad8bb4540ef4126c817c5fe007dad93a5a7ddc2a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.5424836601, "max_line_length": 142, "alphanum_fraction": 0.587674248, "include": true, "reason": "import numpy", "num_tokens": 1585}
|
/**
* Copyright 2015 Christian Dreher (dreher@charlydelta.org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdlib>
#include <exception>
#include <iostream>
#include <time.h>
#include <boost/algorithm/string/predicate.hpp>
#include <boost/lexical_cast.hpp>
#include <nddlgen/controllers/ObjectFactory.h>
#include <nddlgen.h>
#include <nddlgen/exceptions/FileAlreadyExistsException.hpp>
// CLI argument helpers
void processArguments(int argc, char* argv[]);
std::string getOptionValue(int argc, char* argv[], std::string longOptionName);
bool isOptionSet(int argc, char* argv[], std::string longOptionName, std::string shortOptionName);
// Output helpers
void printUsage(std::string calledName);
void printHelp(std::string calledName);
void printUsageOrHelp(std::string calledName, bool help);
void printLicense();
void print(std::string);
void printNewLine();
void printNewLine(std::string);
// Coloring text helpers
std::string color(std::string text, int colorCode);
std::string red(std::string text);
std::string green(std::string text);
std::string yellow(std::string text);
std::string blue(std::string text);
// Version of nddlgen-cli
std::string _nddlgenCliVersion = "2.0.0";
// Supported nddlgen-core version (major)
std::string _supportedNddlgenCoreVersion = "2";
// CLI arguments
bool _help = false;
bool _coreVersion = false;
bool _version = false;
std::string _inputSdfFile = "";
std::string _inputIsdFile = "";
std::string _outputPath = "";
bool _verbose = false;
bool _forceOverwrite = false;
int main(int argc, char* argv[])
{
std::string calledName(argv[0]);
// Check if program was called with arguments.
if (argc == 1)
{
printUsage(calledName);
return EXIT_FAILURE;
}
// Process arguments and initialize variables.
processArguments(argc, argv);
// Check if help was called.
if (_help)
{
printHelp(calledName);
return EXIT_SUCCESS;
}
// Check if version of core was requested.
if (_coreVersion)
{
std::cout << nddlgen::VERSION << std::endl;
return EXIT_SUCCESS;
}
// Check if nddlgen-cli verson was requested.
if (_version)
{
std::cout << _nddlgenCliVersion << std::endl;
return EXIT_SUCCESS;
}
// Check if mandatory parameters where set
if (_inputSdfFile == "" || _inputIsdFile == "")
{
printUsage(calledName);
return EXIT_FAILURE;
}
// Print header if verbose
printNewLine(yellow("nddlgen-cli v" + _nddlgenCliVersion + " using nddlgen-core v"
+ nddlgen::VERSION));
// Print license header if verbose
printLicense();
// Begin time measure
clock_t time = clock();
// Create nddlgen controller configuration
nddlgen::ControllerConfigPtr cc(new nddlgen::ControllerConfig());
nddlgen::controllers::ObjectFactoryPtr modelFactory(new nddlgen::controllers::ObjectFactory());
// Initialize controller config (setting adapter name and version, input files, output path)
cc->setAdapter("nddlgen-cli v" + _nddlgenCliVersion);
cc->setSdfInputFile(_inputSdfFile);
cc->setIsdInputFile(_inputIsdFile);
cc->setOutputFilesPath(_outputPath);
cc->setObjectFactory(modelFactory);
// Throw warning if installed nddlgen-core version differs to much from the supported version
if (!boost::starts_with(nddlgen::VERSION, _supportedNddlgenCoreVersion + "."))
{
printNewLine(yellow("Warning. This version of nddlgen-cli was intended for nddlgen-core v"
+ _supportedNddlgenCoreVersion + ".x"));
printNewLine(yellow("If you experience problems, try one of the supported versions of nddlgen-core."));
printNewLine();
}
// Create nddlgen controller, passing config
nddlgen::Controller* c = new nddlgen::Controller(cc);
// Run the workflow. All outputs except for warnings and exceptions work only if verbose.
// Catch any exceptions and print them (even if not verbose).
try
{
printNewLine("Processing files\t\t\t\t" + yellow(cc->getSdfInputFileName()) + ", ");
printNewLine("\t\t\t\t\t\t" + yellow(cc->getIsdInputFileName()));
printNewLine();
print("Parsing SDF file...\t\t\t\t");
c->parseSdfInputFile();
printNewLine(green("[OK]"));
print("Parsing ISD file...\t\t\t\t");
c->parseIsdInputFile();
printNewLine(green("[OK]"));
print("Building domain description...\t\t\t");
c->buildDomainDescription();
printNewLine(green("[OK]"));
print("Generating NDDL model file...\t\t\t");
try
{
c->writeNddlModelFile(_forceOverwrite);
}
catch (const nddlgen::exceptions::FileAlreadyExistsException& e)
{
std::string errorMessage(e.what());
std::string overwrite;
printNewLine(yellow("[WARN]"));
printNewLine();
if (_verbose)
{
std::cout << yellow("Warning. " + errorMessage) << std::endl << "Overwrite existing file? [y/n] ";
}
else
{
std::cout << "Warning. " << errorMessage << std::endl << "Overwrite existing file? [y/n] ";
}
std::cin >> overwrite;
printNewLine();
print("Overwriting existing NDDL model file...\t\t");
if (overwrite == "Y" || overwrite == "y")
{
c->writeNddlModelFile(true);
}
else
{
throw std::runtime_error("Aborted due to user request.");
}
}
printNewLine(green("[OK]"));
print("Generating NDDL initial state file...\t\t");
try
{
c->writeNddlInitialStateFile(_forceOverwrite);
}
catch (const nddlgen::exceptions::FileAlreadyExistsException& e)
{
std::string errorMessage(e.what());
std::string overwrite;
printNewLine(yellow("[WARN]"));
printNewLine();
if (_verbose)
{
std::cout << yellow("Warning. " + errorMessage) << std::endl << "Overwrite existing file? [y/n] ";
}
else
{
std::cout << "Warning. " << errorMessage << std::endl << "Overwrite existing file? [y/n] ";
}
std::cin >> overwrite;
printNewLine();
print("Overwriting existing NDDL initial state file...\t");
if (overwrite == "Y" || overwrite == "y")
{
c->writeNddlInitialStateFile(true);
}
else
{
throw std::runtime_error("Aborted due to user request.");
}
}
printNewLine(green("[OK]"));
// Stop time measure
time = clock() - time;
std::string executionTime = boost::lexical_cast<std::string>((((float) time) / CLOCKS_PER_SEC) * 1000);
printNewLine();
printNewLine(green("NDDL files successfully generated."));
printNewLine();
printNewLine("Saved files in path \t\t\t\t" + yellow(cc->getOutputFilesPath()));
printNewLine("Domain models saved as \t\t\t\t" + yellow(cc->getOutputModelFileName()));
printNewLine("Domain initial state saved as \t\t\t" + yellow(cc->getOutputInitialStateFileName()));
printNewLine("Generating NDDL files took " + executionTime + "ms");
boost::checked_delete(c);
return EXIT_SUCCESS;
}
catch (const std::exception& e)
{
std::string errorMsg = e.what();
printNewLine(red("[FAIL]"));
if (_verbose)
{
std::cout << std::flush;
std::cerr << std::endl << red("Error. " + errorMsg) << std::endl;
}
else
{
std::cerr << "Error. " << errorMsg << std::endl;
}
boost::checked_delete(c);
return EXIT_FAILURE;
}
}
void processArguments(int argc, char* argv[])
{
_help = isOptionSet(argc, argv, "help", "h");
_coreVersion = isOptionSet(argc, argv, "core-version", "c");
_version = isOptionSet(argc, argv, "version", "v");
_verbose = isOptionSet(argc, argv, "verbose", "x");
_forceOverwrite = isOptionSet(argc, argv, "force-overwrite", "f");
_inputSdfFile = getOptionValue(argc, argv, "in-sdf");
_inputIsdFile = getOptionValue(argc, argv, "in-isd");
_outputPath = getOptionValue(argc, argv, "out-path");
}
std::string getOptionValue(int argc, char* argv[], std::string longOptionName)
{
for (int i = 1; i < argc; i++)
{
std::string arg = argv[i];
if (arg == "--" + longOptionName)
{
if (i + 1 == argc)
{
return "";
}
return argv[i + 1];
}
}
return "";
}
bool isOptionSet(int argc, char* argv[], std::string longOptionName, std::string shortOptionName)
{
for (int i = 1; i < argc; i++)
{
std::string arg = argv[i];
// If option is explicitly set (e.g. "-x -y -z")
if (arg == "--" + longOptionName || arg == "-" + shortOptionName)
{
return true;
}
// If option is implicitly set (e.g. "-xyz")
else if (!boost::starts_with(arg, "--") && boost::starts_with(arg, "-")
&& boost::contains(arg, shortOptionName))
{
return true;
}
}
return false;
}
void printUsage(std::string calledName)
{
printUsageOrHelp(calledName, false);
}
void printHelp(std::string calledName)
{
printUsageOrHelp(calledName, true);
}
void printUsageOrHelp(std::string calledName, bool help)
{
std::cout << "nddlgen-cli v" << _nddlgenCliVersion << " using nddlgen-core v"
<< nddlgen::VERSION << std::endl;
std::cout << "nddlgen is a program suite to generate .nddl files out of Gazebo's .sdf"
<< std::endl << std::endl;
std::cout << "Usage:" << std::endl;
std::cout << " " << calledName << " [options] --in-sdf <sdf_file> --in-isd <isd_file> [--out-path <output_path>]"
<< std::endl << std::endl;
if (help)
{
std::cout << "Options:" << std::endl;
std::cout << " --help, -h Print help" << std::endl;
std::cout << " --core-version, -c Version of nddlgen-core" << std::endl;
std::cout << " --version, -v Version of nddlgen-cli" << std::endl;
std::cout << " --verbose, -x Verbose output" << std::endl;
std::cout << " --force-overwrite, -f Force overwrite if files exist" << std::endl << std::endl;
std::cout << "Arguments:" << std::endl;
std::cout << " --in-sdf Input SDF file (relative or absolute). Mandatory." << std::endl;
std::cout << " --in-isd Input ISD file (relative or absolute). Mandatory." << std::endl;
std::cout << " --out-path Output path (relative or absolute). If not set, the path of --in-sdf is used"
<< std::endl << std::endl;
}
else
{
std::cout << "For more help, type:" << std::endl;
std::cout << " " << calledName << " -h" << std::endl << std::endl;
}
std::cout << "nddlgen Homepage:" << std::endl;
std::cout << " <" << nddlgen::utilities::Meta::NDDLGEN_PROJECT_HOMEPAGE << ">" << std::endl;
std::cout << "Author:" << std::endl;
std::cout << " Christian Dreher <" << nddlgen::utilities::Meta::AUTHOR_CHR_DREHER_EMAIL << ">" << std::endl;
std::cout << "Support:" << std::endl;
std::cout << " <" << nddlgen::utilities::Meta::NDDLGEN_SUPPORT_EMAIL << ">" << std::endl;
}
void printLicense()
{
if (_verbose)
{
std::cout << std::endl;
std::cout << blue(" Copyright 2016 Christian Dreher (dreher@charlydelta.org)") << std::endl;
std::cout << std::endl;
std::cout << blue(" This software is distributed on an \"AS IS\" BASIS, WITHOUT") << std::endl;
std::cout << blue(" WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.") << std::endl;
std::cout << std::endl;
}
}
void print(std::string text)
{
if (_verbose)
{
std::cout << text;
}
}
void printNewLine()
{
if (_verbose)
{
printNewLine("");
}
}
void printNewLine(std::string text)
{
if (_verbose)
{
std::cout << text << std::endl;
}
}
std::string color(std::string text, int colorCode)
{
return "\033[1;3" + boost::lexical_cast<std::string>(colorCode) + "m" + text + "\033[0m";
}
std::string red(std::string text)
{
return color(text, 1);
}
std::string green(std::string text)
{
return color(text, 2);
}
std::string yellow(std::string text)
{
return color(text, 3);
}
std::string blue(std::string text)
{
return color(text, 4);
}
|
{"hexsha": "648ac5077e3428d4520c74a82540ffdc37a39c43", "size": 11872, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/Main.cpp", "max_stars_repo_name": "crgdreher/nddlgen-cli", "max_stars_repo_head_hexsha": "3efbde7ee95bd51f76ba3a067725bc0f5f3d010c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Main.cpp", "max_issues_repo_name": "crgdreher/nddlgen-cli", "max_issues_repo_head_hexsha": "3efbde7ee95bd51f76ba3a067725bc0f5f3d010c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Main.cpp", "max_forks_repo_name": "crgdreher/nddlgen-cli", "max_forks_repo_head_hexsha": "3efbde7ee95bd51f76ba3a067725bc0f5f3d010c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3237250554, "max_line_length": 115, "alphanum_fraction": 0.6628200809, "num_tokens": 3369}
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
u"""
Fast Nearest Neighbor Search on python using kd-tree
author Atsushi Sakai
usage: see test codes as below
license: MIT
"""
import numpy as np
import scipy.spatial
class NNS:
def __init__(self, data):
# store kd-tree
self.tree = scipy.spatial.cKDTree(data)
def search(self, inp):
u"""
Search NN
inp: input data, single frame or multi frame
"""
if len(inp.shape) >= 2: # multi input
index = []
dist = []
for i in inp.T:
idist, iindex = self.tree.query(i)
index.append(iindex)
dist.append(idist)
return index, dist
else:
dist, index = self.tree.query(inp)
return index, dist
def search_in_distance(self, inp, r):
u"""
find points with in a distance r
"""
index = self.tree.query_ball_point(inp, r)
return index
def test_2d():
import matplotlib.pyplot as plt
data2d = np.random.random(10000).reshape(5000, 2)
print(data2d)
# input2d = np.random.random(2).reshape(2, 1)
input2d = np.random.random(2)
print(input2d)
nns1 = NNS(data2d)
index, dist = nns1.search(input2d)
print(index, dist)
# data2d =
plt.plot(data2d[:, 0], data2d[:, 1], ".r")
plt.plot(input2d[0], input2d[1], "xk")
plt.plot(data2d[index, 0], data2d[index, 1], "xb")
plt.show()
def test_3d():
# 3d
data3d = np.random.random(15000).reshape(5000, 3)
print(data3d)
# input2d = np.random.random(2).reshape(2, 1)
input3d = np.random.random(3)
print(input3d)
nns2 = NNS(data3d)
index, dist = nns2.search(input3d)
print(index, dist)
def test():
data2d = np.random.random(10000).reshape(5000, 2)
print(data2d)
# input2d = np.random.random(2).reshape(2, 1)
input2d = np.random.random(6).reshape(2, 3)
print(input2d)
nns = NNS(data2d)
index, dist = nns.search(input2d)
print(index, dist)
if __name__ == '__main__':
test()
|
{"hexsha": "fa979d99d943c91e162afdc3a7ed34e0d14ca7ae", "size": 2121, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyfastnns.py", "max_stars_repo_name": "AtsushiSakai/pyfastnns", "max_stars_repo_head_hexsha": "d3baabba8d2639c6b065bcfcf756adc93b4e8326", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-03-05T07:19:02.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-19T06:39:01.000Z", "max_issues_repo_path": "pyfastnns.py", "max_issues_repo_name": "AtsushiSakai/pyfastnns", "max_issues_repo_head_hexsha": "d3baabba8d2639c6b065bcfcf756adc93b4e8326", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyfastnns.py", "max_forks_repo_name": "AtsushiSakai/pyfastnns", "max_forks_repo_head_hexsha": "d3baabba8d2639c6b065bcfcf756adc93b4e8326", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.0094339623, "max_line_length": 54, "alphanum_fraction": 0.5733144743, "include": true, "reason": "import numpy,import scipy", "num_tokens": 629}
|
import numpy as np
from numba import jit
from ..constants import Constants as c
from .universal_propagate import propagateUniversal
__all__ = [
"addLightTime",
"addStellarAberration"
]
MU = c.MU
C = c.C
@jit(["Tuple((f8[:,:], f8[:]))(f8[:,:], f8[:], f8[:,:], f8, f8, i8, f8)"], nopython=True, cache=True)
def addLightTime(orbits, t0, observer_positions, lt_tol=1e-10, mu=MU, max_iter=1000, tol=1e-15):
"""
When generating ephemeris, orbits need to be backwards propagated to the time
at which the light emitted or relflected from the object towards the observer.
Light time correction must be added to orbits in expressed in an inertial frame (ie, orbits
must be barycentric)
Parameters
----------
orbits : `~numpy.ndarray` (N, 6)
Barycentric orbits in cartesian elements to correct for light time delay.
t0 : `~numpy.ndarray` (N)
Epoch at which orbits are defined.
observer_positions : `numpy.ndarray` (N, 3)
Location of the observer in barycentric cartesian elements at the time of observation.
lt_tol : float, optional
Calculate aberration to within this value in time (units of days.)
mu : float, optional
Gravitational parameter (GM) of the attracting body in units of
AU**3 / d**2.
max_iter : int, optional
Maximum number of iterations over which to converge for propagation.
tol : float, optional
Numerical tolerance to which to compute universal anomaly during propagation using the Newtown-Raphson
method.
Returns
-------
corrected_orbits : `~numpy.ndarray` (N, 6)
Orbits adjusted for light travel time.
lt : `~numpy.ndarray` (N)
Light time correction (t0 - corrected_t0).
"""
corrected_orbits = np.zeros((len(orbits), 6))
lts = np.zeros(len(orbits))
num_orbits = len(orbits)
for i in range(num_orbits):
# Set up running variables
orbit_i = orbits[i:i+1, :]
observer_position_i = observer_positions[i:i+1, :]
t0_i = t0[i:i+1]
dlt = 1e30
lt_i = 1e30
while dlt > lt_tol:
# Calculate topocentric distance
rho = np.linalg.norm(orbit_i[:, :3] - observer_position_i)
# Calculate initial guess of light time
lt = rho / C
# Calculate difference between previous light time correction
# and current guess
dlt = np.abs(lt - lt_i)
# Propagate backwards to new epoch
orbit = propagateUniversal(orbits[i:i+1, :], t0[i:i+1], t0[i:i+1] - lt, mu=mu, max_iter=max_iter, tol=tol)
# Update running variables
t0_i = orbit[:, 1]
orbit_i = orbit[:, 2:]
lt_i = lt
corrected_orbits[i, :] = orbit[0, 2:]
lts[i] = lt
return corrected_orbits, lts
@jit(["f8[:,:](f8[:,:], f8[:,:])"], nopython=True, cache=True)
def addStellarAberration(orbits, observer_states):
"""
The motion of the observer in an inertial frame will cause an object
to appear in a different location than its true geometric location. This
aberration is typically applied after light time corrections have been added.
The velocity of the input orbits are unmodified only the position
vector is modified with stellar aberration.
Parameters
----------
orbits : `~numpy.ndarray` (N, 6)
Orbits in barycentric cartesian elements.
observer_states : `~numpy.ndarray` (N, 6)
Observer states in barycentric cartesian elements.
Returns
-------
rho_aberrated : `~numpy.ndarray` (N, 3)
The topocentric position vector for each orbit with
added stellar aberration.
"""
topo_states = orbits - observer_states
rho_aberrated = topo_states[:, :3].copy()
for i in range(len(orbits)):
v_obs = observer_states[i, 3:]
beta = v_obs / C
gamma_inv = np.sqrt(1 - np.linalg.norm(beta)**2)
delta = np.linalg.norm(topo_states[i, :3])
rho = topo_states[i, :3] / delta
rho_aberrated[i, :] = (gamma_inv * rho + beta + np.dot(rho, beta) * beta / (1 + gamma_inv)) / (1 + np.dot(rho, beta))
rho_aberrated[i, :] *= delta
return rho_aberrated
|
{"hexsha": "0636601bfcc2b9dabe271b7798b5b2c06b755633", "size": 4256, "ext": "py", "lang": "Python", "max_stars_repo_path": "thor/orbits/aberrations.py", "max_stars_repo_name": "KatKiker/thor", "max_stars_repo_head_hexsha": "ffc8ab3fbaa8af046f531e8111907a891998d14b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "thor/orbits/aberrations.py", "max_issues_repo_name": "KatKiker/thor", "max_issues_repo_head_hexsha": "ffc8ab3fbaa8af046f531e8111907a891998d14b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "thor/orbits/aberrations.py", "max_forks_repo_name": "KatKiker/thor", "max_forks_repo_head_hexsha": "ffc8ab3fbaa8af046f531e8111907a891998d14b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-29T15:20:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-29T15:20:34.000Z", "avg_line_length": 35.173553719, "max_line_length": 125, "alphanum_fraction": 0.6299342105, "include": true, "reason": "import numpy,from numba", "num_tokens": 1109}
|
using Knet
# sample usage:
# m128 = MLP([3644,128,1])
# mlprun(data; model=m128, epochs=20)
# define a model type with weights and optimization params so we can
# use the Adam optimizer which works faster than SGD:
type MLP
weights
oparams
function MLP(sizes; optimizer=Adam, winit=0.1, atype=Array{Float32})
m = new(Any[],Any[])
for i=2:length(sizes)
w = convert(atype,winit*randn(sizes[i],sizes[i-1]))
b = convert(atype,zeros(sizes[i],1))
push!(m.weights, w)
push!(m.oparams, optimizer(w))
push!(m.weights, b)
push!(m.oparams, optimizer(b))
end
return m
end
end
# y = mlppred(model.weights, x)
function mlppred(w,x)
for i=1:2:length(w)-2
x = max(0, w[i]*x .+ w[i+1])
end
return w[end-1]*x .+ w[end]
end
# this is the logistic loss
mlploss(w,x,y) = mean(log(1 .+ exp(-y .* mlppred(w,x))))
mlpgrad = grad(mlploss)
# training loop, does one pass over data, modifies mlp in place
function train!(m::MLP, data)
for (x,y) in data
dw = mlpgrad(m.weights, x, y)
for i in 1:length(m.weights)
(m.weights[i],m.oparams[i]) = update!(m.weights[i], dw[i], m.oparams[i])
end
end
end
# returns logistic loss for model on data
function test(m::MLP, data)
sumloss = numloss = 0
for (x,y) in data
sumloss += mlploss(m.weights, x, y)
numloss += 1
end
sumloss / numloss
end
# returns classification accuracy for model on data
function acc(m::MLP, data)
sumloss = numloss = 0
for (x,y) in data
z = mlppred(m.weights,x)
sumloss += mean((z .* y) .> 0)
numloss += 1
end
sumloss / numloss
end
# sample use script
function mlprun(data; epochs=10, sizes=[3644,128,1], model=MLP(sizes; atype = typeof(data[1][1][1])))
msg(e) = println((e,map(d->acc(model,d),data)...,map(d->test(model,d),data)...)); msg(0)
for epoch = 1:epochs
train!(model, data[1])
msg(epoch)
end
return model
end
# missing in Knet:
Base.mean(a::KnetArray) = sum(a)/length(a)
Base.mean(a::AutoGrad.Rec) = sum(a)/length(a)
# to convert models:
function cpu2gpu(m::MLP)
g = deepcopy(m)
for i=1:length(g.weights)
g.weights[i] = KnetArray(g.weights[i])
g.oparams[i].fstm = KnetArray(g.oparams[i].fstm)
g.oparams[i].scndm = KnetArray(g.oparams[i].scndm)
end
return g
end
|
{"hexsha": "bfc2c854f5151e76927d0327db8a52a4e98b2298", "size": 2455, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "mlp.jl", "max_stars_repo_name": "JuliaTagBot/melseg", "max_stars_repo_head_hexsha": "8f63c00516624be8625244fc64dcbfb26d5d5851", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-01-03T15:34:42.000Z", "max_stars_repo_stars_event_max_datetime": "2017-01-03T15:34:42.000Z", "max_issues_repo_path": "mlp.jl", "max_issues_repo_name": "JuliaTagBot/melseg", "max_issues_repo_head_hexsha": "8f63c00516624be8625244fc64dcbfb26d5d5851", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mlp.jl", "max_forks_repo_name": "JuliaTagBot/melseg", "max_forks_repo_head_hexsha": "8f63c00516624be8625244fc64dcbfb26d5d5851", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:20:10.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:20:10.000Z", "avg_line_length": 24.068627451, "max_line_length": 101, "alphanum_fraction": 0.5918533605, "num_tokens": 787}
|
import argparse
import numpy as np
def quantize(data,pred,error_bound):
radius=32768
diff = data - pred
quant_index = (int) (abs(diff)/ error_bound) + 1
#print(quant_index)
if (quant_index < radius * 2) :
quant_index =quant_index>> 1
half_index = quant_index
quant_index =quant_index<< 1
#print(quant_index)
quant_index_shifted=0
if (diff < 0) :
quant_index = -quant_index
quant_index_shifted = radius - half_index
else :
quant_index_shifted = radius + half_index
decompressed_data = pred + quant_index * error_bound
#print(decompressed_data)
if abs(decompressed_data - data) > error_bound :
#print("b")
return 0,data
else:
#print("c")
data = decompressed_data
return quant_index_shifted,data
else:
#print("a")
return 0,data
parser = argparse.ArgumentParser()
parser.add_argument('--input', '-i',type=str
)
parser.add_argument('--error', '-e',type=float,default=1e-2
)
parser.add_argument('-recon', '-r',type=str,default=None
)
parser.add_argument('--decomp', '-d',type=str,default=None
)
parser.add_argument('--quant', '-q',type=str,default=None
)
parser.add_argument('--unpred', '-u',type=str,default=None
)
parser.add_argument('--global_max','-gma',type=float,default=None)
parser.add_argument('--global_min','-gmi',type=float,default=None)
args = parser.parse_args()
orig=np.fromfile(args.input,dtype=np.float32)
recon=np.fromfile(args.recon,dtype=np.float32)
eb=args.error*(np.max(orig)-np.min(orig))
qs=[]
us=[]
for i in range(orig.size):
o=orig[i]
r=recon[i]
if args.global_max!=None:
r=min(r,args.global_max)
if args.global_min!=None:
r=max(r,args.global_min)
quant,decomp=quantize(o,r,eb)
qs.append(quant)
if quant==0:
us.append(decomp)
quants=np.array(qs,dtype=np.int32)
unpreds=np.array(us,dtype=np.float32)
if args.quant!=None:
quants.tofile(args.quant)
if args.unpred!=None:
unpreds.tofile(args.unpred)
|
{"hexsha": "fc0880df6f1e2ecce1b350f11b3fbf397fb28950", "size": 2263, "ext": "py", "lang": "Python", "max_stars_repo_path": "quantize.py", "max_stars_repo_name": "Meso272/PyTorch-VAE", "max_stars_repo_head_hexsha": "08c44bdcb30ba8795a7c0da5597af80c8c42e9f0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "quantize.py", "max_issues_repo_name": "Meso272/PyTorch-VAE", "max_issues_repo_head_hexsha": "08c44bdcb30ba8795a7c0da5597af80c8c42e9f0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "quantize.py", "max_forks_repo_name": "Meso272/PyTorch-VAE", "max_forks_repo_head_hexsha": "08c44bdcb30ba8795a7c0da5597af80c8c42e9f0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-11T23:22:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-11T23:22:41.000Z", "avg_line_length": 26.9404761905, "max_line_length": 66, "alphanum_fraction": 0.6005302696, "include": true, "reason": "import numpy", "num_tokens": 564}
|
[STATEMENT]
lemma constant_function_eq':
assumes "a \<in> carrier R"
assumes "b \<notin> carrier R"
shows "\<cc>\<^bsub>a\<^esub> b = undefined"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. constant_function (carrier R) a b = undefined
[PROOF STEP]
by (simp add: constant_function_closed assms(1) assms(2) function_ring_not_car)
|
{"llama_tokens": 127, "file": "Padic_Ints_Function_Ring", "length": 1}
|
# wczytanie zestawu danych
import numpy as np
dataset = 'australian'
dataset = np.genfromtxt("%s.csv" % (dataset), delimiter=",")
X = dataset[:, :-1]
y = dataset[:, -1].astype(int)
# zdefiniowanie klasyfikatorów
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
clfs = {
'gnb': GaussianNB(),
'knn': KNeighborsClassifier(),
'cart': DecisionTreeClassifier(random_state=42),
}
# walidacja krzyżowa
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.base import clone
from sklearn.metrics import accuracy_score
n_splits = 5
n_repeats = 2
rskf = RepeatedStratifiedKFold(
n_splits=n_splits, n_repeats=n_repeats, random_state=42)
scores = np.zeros((len(clfs), n_splits * n_repeats))
for fold_id, (train, test) in enumerate(rskf.split(X, y)):
for clf_id, clf_name in enumerate(clfs):
clf = clone(clfs[clf_name])
clf.fit(X[train], y[train])
y_pred = clf.predict(X[test])
scores[clf_id, fold_id] = accuracy_score(y[test], y_pred)
mean = np.mean(scores, axis=1)
std = np.std(scores, axis=1)
for clf_id, clf_name in enumerate(clfs):
print("%s: %.3f (%.2f)" % (clf_name, mean[clf_id], std[clf_id]))
print("\n")
# Zapisanie wynikow
np.save('results', scores)
# wczytanie wyników
import numpy as np
scores = np.load('results.npy')
print("Folds:\n", scores)
print("\n")
# test parowy
# t-statistic i p-value
from scipy.stats import ttest_ind
alfa = .05
t_statistic = np.zeros((len(clfs), len(clfs)))
p_value = np.zeros((len(clfs), len(clfs)))
for i in range(len(clfs)):
for j in range(len(clfs)):
t_statistic[i, j], p_value[i, j] = ttest_ind(scores[i], scores[j])
print("t-statistic:\n", t_statistic, "\n\np-value:\n", p_value)
# print("\n")
# print("p-value:\n", p_value)
print("\n")
# wypisanie z uzyciem tabulate
from tabulate import tabulate
headers = ["GNB", "KNN", "CART"]
names_column = np.array([["GNB"], ["KNN"], ["CART"]])
t_statistic_table = np.concatenate((names_column, t_statistic), axis=1)
t_statistic_table = tabulate(t_statistic_table, headers, floatfmt=".2f")
p_value_table = np.concatenate((names_column, p_value), axis=1)
p_value_table = tabulate(p_value_table, headers, floatfmt=".2f")
print("t-statistic:\n", t_statistic_table, "\n\np-value:\n", p_value_table)
# print()
print("\n")
# advantage
advantage = np.zeros((len(clfs), len(clfs)))
advantage[t_statistic > 0] = 1
advantage_table = tabulate(np.concatenate(
(names_column, advantage), axis=1), headers)
print("Advantage:\n", advantage_table)
print("\n")
# statistical singificance
significance = np.zeros((len(clfs), len(clfs)))
significance[p_value < alfa] = 1
significance_table = tabulate(np.concatenate(
(names_column, significance), axis=1), headers)
print("Statistical significance (alpha = 0.05):\n", significance_table)
# statistically significantly better
print("\n")
stat_better = significance * advantage
stat_better_table = tabulate(np.concatenate(
(names_column, stat_better), axis=1), headers)
print("Statistically significantly better:\n", stat_better_table)
|
{"hexsha": "3a04c28800d08731619b6a9e8d1d97f298fcc5b9", "size": 3150, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/kod4.py", "max_stars_repo_name": "metsi/metsi.github.io", "max_stars_repo_head_hexsha": "4a195236d73ea90b46be1662c45ab473c893af29", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/kod4.py", "max_issues_repo_name": "metsi/metsi.github.io", "max_issues_repo_head_hexsha": "4a195236d73ea90b46be1662c45ab473c893af29", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/kod4.py", "max_forks_repo_name": "metsi/metsi.github.io", "max_forks_repo_head_hexsha": "4a195236d73ea90b46be1662c45ab473c893af29", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7169811321, "max_line_length": 75, "alphanum_fraction": 0.7158730159, "include": true, "reason": "import numpy,from scipy", "num_tokens": 904}
|
/*
* This file is part of the Sequoia MSO Solver.
*
* Copyright 2012 Alexander Langer, Theoretical Computer Science,
* RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @author Alexander Langer
*/
#include <assert.h>
#include "atomar_game_factory.h"
#include <boost/scoped_ptr.hpp>
namespace sequoia {
template <typename TFormula, typename TIndex, typename TReturnGame>
void LeafGameFactory::visit_impl(const TFormula* f) {
assert(f != NULL);
DEBUG({
tab_prefix(_level) << "Building for Formula: "
<< f->toString() << std::endl;
});
TReturnGame* returngame = new TReturnGame(f);
DEBUG(returngame->level(_level));
typename TFormula::subformula_iterator s, send;
for (boost::tie(s, send) = f->subformulas(); s != send; ++s) {
const Formula* subf = *s;
boost::scoped_ptr<const Assignment_f> alpha(_alpha->clone());
const TIndex *index = prepare_index(f, subf, alpha);
DEBUG(tab_prefix(_level) << TOSTRING(index) << std::endl);
LeafGameFactory factory(alpha.get());
DEBUG(factory.level(_level+1));
subf->accept(&factory);
const MCGame_f* sub = factory.get();
DEBUG(tab_prefix(_level) << TOSTRING(index) << " --> " << sub->get()->toString());
const MCGame_f* tmpgame = returngame->add_subgame(index, sub);
if (tmpgame != NULL) {
_game = tmpgame;
return;
}
}
_game = returngame->minimize();
}
} // namespace
|
{"hexsha": "a1ae32591427d51ac3cfc805d328c18ce0e8eea9", "size": 2020, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/leaf_game_factory.hpp", "max_stars_repo_name": "sequoia-mso/sequoia-core", "max_stars_repo_head_hexsha": "d2a6a461ffbe38dc8abb005b2c8f1f3bc1dc2bbe", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7.0, "max_stars_repo_stars_event_min_datetime": "2016-11-12T17:55:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-28T20:23:27.000Z", "max_issues_repo_path": "src/leaf_game_factory.hpp", "max_issues_repo_name": "sequoia-mso/sequoia-core", "max_issues_repo_head_hexsha": "d2a6a461ffbe38dc8abb005b2c8f1f3bc1dc2bbe", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/leaf_game_factory.hpp", "max_forks_repo_name": "sequoia-mso/sequoia-core", "max_forks_repo_head_hexsha": "d2a6a461ffbe38dc8abb005b2c8f1f3bc1dc2bbe", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2015-05-12T13:51:54.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-07T22:01:28.000Z", "avg_line_length": 34.2372881356, "max_line_length": 87, "alphanum_fraction": 0.6594059406, "num_tokens": 493}
|
#!/usr/bin/env python
#
# This software is distributed under BSD 3-clause license (see LICENSE file).
#
# Authors: Sergey Lisitsyn
from numpy import *
from numpy.random import randn
# generate some overlapping training vectors
num_vectors=100
vec_distance=1
traindat=concatenate((randn(2,num_vectors)-vec_distance,
randn(2,num_vectors)+vec_distance), axis=1)
label_traindat=concatenate((-ones(num_vectors), ones(num_vectors)));
parameter_list = [[traindat,label_traindat]]
def modelselection_random_search_liblinear (traindat=traindat, label_traindat=label_traindat):
from shogun import machine_evaluation
from shogun import ContingencyTableEvaluation, ACCURACY
from shogun import splitting_strategy
from shogun import RandomSearchModelSelection
from shogun import ModelSelectionParameters, R_EXP
from shogun import ParameterCombination
from shogun import BinaryLabels
import shogun as sg
# build parameter tree to select C1 and C2
param_tree_root=ModelSelectionParameters()
c1=ModelSelectionParameters("C1");
param_tree_root.append_child(c1)
c1.build_values(-2.0, 2.0, R_EXP);
c2=ModelSelectionParameters("C2");
param_tree_root.append_child(c2);
c2.build_values(-2.0, 2.0, R_EXP);
# training data
features=sg.features(traindat)
labels=BinaryLabels(label_traindat)
# classifier
classifier=sg.machine("LibLinear", liblinear_solver_type="L2R_L2LOSS_SVC")
# print all parameter available for modelselection
# Dont worry if yours is not included but, write to the mailing list
#classifier.print_modsel_params()
# splitting strategy for cross-validation
splitting_strategy = splitting_strategy(
"StratifiedCrossValidationSplitting", labels=labels, num_subsets=10)
# evaluation method
evaluation_criterium=ContingencyTableEvaluation(ACCURACY)
# cross-validation instance
cross_validation = machine_evaluation(
"CrossValidation", machine=classifier, features=features,
labels=labels, splitting_strategy=splitting_strategy,
evaluation_criterion=evaluation_criterium, autolock=False)
# model selection instance
model_selection=RandomSearchModelSelection(cross_validation, param_tree_root, 0.5)
# perform model selection with selected methods
#print "performing model selection of"
#param_tree_root.print_tree()
best_parameters=model_selection.select_model()
# print best parameters
#print "best parameters:"
#best_parameters.print_tree()
# apply them and print result
best_parameters.apply_to_machine(classifier)
result=cross_validation.evaluate()
#result.print_result()
if __name__=='__main__':
print('ModelSelectionRandomSearchLibLinear')
modelselection_random_search_liblinear(*parameter_list[0])
|
{"hexsha": "e0c311e74457a139b8e119abaae4c8964d777752", "size": 2821, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/undocumented/python/modelselection_random_search_liblinear.py", "max_stars_repo_name": "shiyi001/shogun", "max_stars_repo_head_hexsha": "287f02d11d5914ded2d410ab9c6f38712e11ca2b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-08-17T21:19:20.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-17T21:19:20.000Z", "max_issues_repo_path": "examples/undocumented/python/modelselection_random_search_liblinear.py", "max_issues_repo_name": "shiyi001/shogun", "max_issues_repo_head_hexsha": "287f02d11d5914ded2d410ab9c6f38712e11ca2b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/undocumented/python/modelselection_random_search_liblinear.py", "max_forks_repo_name": "shiyi001/shogun", "max_forks_repo_head_hexsha": "287f02d11d5914ded2d410ab9c6f38712e11ca2b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9879518072, "max_line_length": 94, "alphanum_fraction": 0.7699397377, "include": true, "reason": "from numpy", "num_tokens": 656}
|
using Fn
using MacroTools
using Test
@testset "Fn.jl" begin
@testset "no _ creates 0-argument function" begin
@test @capture(Fn.fn(:(exp(5 * √π))), function () exp(5 * √π) end)
end
@testset "_ creates single argument function" begin
@test @capture(Fn.fn(:(5 + _)), function (x_) 5 + x_ end)
end
@testset "numbered underscores introduce multiple arguments" begin
@test @capture(Fn.fn(:(sin(_1) * cos(_2))), function (x_, y_) sin(x_) * cos(y_) end)
end
@testset "numbered arguments are ordered by number, not by occurence" begin
@test @capture(Fn.fn(:(sin(_2) * cos(_1))), function (x_, y_) sin(y_) * cos(x_) end)
end
@testset "missing numbers introduce unused extra arguments" begin
expr = Fn.fn(:(sin(_4) * cos(_1)))
@test @capture(expr, function (w_, x_, y_, z_) sin(z_) * cos(w_) end)
end
@testset "leading zeros are not accepted" begin
# They would make everything more complicated
expr = Fn.fn(:(sin(_01) * cos(_02)))
@test @capture(expr, function () sin(x_) * cos(y_) end)
# Putting _01 and _02 into the @capture expression confuses MacroTools
@test x == :_01 && y == :_02
end
@testset "mixing _ and _n raises an error" begin
@test_throws ErrorException Fn.fn(:(_ + _1))
end
@testset "@fn does not replace _ in nested @fn" begin
fn = Fn.fn(:(map(@fn(_ + 3), _)))
matched = @capture(fn, function (x_) map(@fn(y_ + 3), x_) end)
@test matched
@test y === :_
end
@testset "@fn escapes function body" begin
h(x) = x^2
f = @fn(h(_))
@test f(5) == 25
end
end
|
{"hexsha": "75fb2bd64bb6242217876115bc38623db6bff6ac", "size": 1692, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "cqql/Fn.jl", "max_stars_repo_head_hexsha": "b6784f1b5b8a9592e56decaea3d68d8a0f942944", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "cqql/Fn.jl", "max_issues_repo_head_hexsha": "b6784f1b5b8a9592e56decaea3d68d8a0f942944", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "cqql/Fn.jl", "max_forks_repo_head_hexsha": "b6784f1b5b8a9592e56decaea3d68d8a0f942944", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3333333333, "max_line_length": 92, "alphanum_fraction": 0.5862884161, "num_tokens": 520}
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['mathtext.fontset'] = 'cm'
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
cm = plt.get_cmap('Set1')
deficits_nice = ['Gait speed', 'Dom grip str', 'Ndom grip str', 'ADL score','IADL score', '5 Chair rises','Leg raise','Full tandem', 'Self-rated health', 'Eyesight', 'Hearing', 'Walking ability', 'Dias BP', 'Sys BP', 'Pulse', 'Trig','C-RP','HDL','LDL','Gluc','IGF-1','Hgb','Fibr','Ferr', 'Total chol', r'WBC', 'MCH', 'HgbA1c', 'Vit-D']
deficits = ['gait speed', 'grip dom', 'grip ndom', 'FI ADL', 'FI IADL', 'chair','leg raise', 'full tandem', 'srh', 'eye',
'hear', 'func',
'dias', 'sys', 'pulse', 'trig','crp','hdl','ldl','glucose','igf1','hgb','fib','fer', 'chol', 'wbc', 'mch', 'hba1c', 'vitd']
medications = ['BP med', 'anticoagulent med', 'chol med', 'hip/knee treat', 'lung/asthma med']
background = ['longill', 'limitact', 'effort', 'smkevr', 'smknow','height', 'bmi', 'mobility', 'country',
'alcohol', 'jointrep', 'fractures', 'sex', 'ethnicity'] + medications
medications = []
background_nice = ['Longterm ill', 'Illness limits', 'Everythings effort', 'Ever smoke', 'Smoke now', 'Height', 'BMI','Mobility', 'Country', 'Alcohol', 'Joint replace', 'Fractures', 'Sex', 'Ethnicity', 'Blood pres med', 'Blood thin med', 'Chol med', 'Hipknee med', 'Lung med']
data = pd.read_csv('../Data/ELSA_cleaned.csv')
for n in range(len(deficits)):
if n in [15, 16, 23, 25, 26, 28]:
data = data.rename(columns={deficits[n]:deficits_nice[n]})
deficits[n] = deficits_nice[n]
else:
data = data.rename(columns={deficits[n]:deficits_nice[n]})
deficits[n] = deficits_nice[n]
deficits_nice = deficits
for n in range(len(background)):
data = data.rename(columns = {background[n] : background_nice[n]})
background = background_nice
count = 0
total = 0
for label, group in data.groupby('id'):
total += 1
if group['death age'].unique()[0] > 0:
updated = group['death age'].values
updated[:-1] = -1
count += 1
data.loc[data['id']==label, 'death age'] = updated
times = np.arange(-0.5, 20.5, 1)
data['delta_t'] = (data['age'] - data.groupby('id')['age'].transform('first')).astype(int)
data['delta_death_age'] = (data['death age'] - data.groupby('id')['age'].transform('first')).astype(int)
data[data < -100] = np.nan
data.loc[data['delta_death_age'] < 0, 'delta_death_age'] = np.nan
missing = data.drop('delta_t', 1)[deficits].notna().groupby(data.delta_t, sort=False).sum().reset_index()
missing_background = data.drop('delta_t', 1)[background + medications].notna().groupby(data.delta_t, sort=False).sum().reset_index()
deaths = data.drop('delta_t', 1)['delta_death_age'].notna().groupby(data.delta_t, sort=False).sum().reset_index()
non_nurse = [0, 3, 4, 8, 9, 10, 11]
nurse = [1, 2, 5, 6, 7, 12, 13, 14,15,16,17,18,19,20,21,22,23,24,25,26,27,28]
(1 - data[np.array(deficits)].notna().mean()).to_csv('../Analysis_Data/ELSA_missing_percent.csv')
missing = missing.sort_values('delta_t')
missing[deficits] = (missing[deficits]).astype(int)
missing.set_index('delta_t', inplace=True)
total_count = pd.DataFrame()
total_count['delta_t'] = missing_background['delta_t']
total_count['Total individuals'] = missing_background['Sex']
total_count.sort_values('delta_t', inplace=True)
total_count = total_count.astype(int)
total_count.set_index('delta_t', inplace=True)
missing_background = missing_background.sort_values('delta_t')
missing_background[background + medications] = (missing_background[background + medications]).astype(int)
missing_background.set_index('delta_t', inplace=True)
death_count = pd.DataFrame()
death_count['delta_t'] = deaths['delta_t']
death_count['Deaths'] = deaths['delta_death_age']
death_count.sort_values('delta_t', inplace=True)
death_count = death_count.astype(int)
death_count.set_index('delta_t', inplace=True)
fig,ax = plt.subplots(4,1, figsize=(15,10),gridspec_kw={'height_ratios': [1, 0.73076923076, 1.0/26, 1.0/26]})
norm = mpl.colors.LogNorm(1, 25290)
import seaborn as sns
sns.set_style("white")
sns.heatmap(missing.transpose(), annot=True, fmt="d",cbar=False,ax=ax[0],cmap="Purples", yticklabels=deficits_nice,xticklabels=23*[''],vmin=1)
sns.heatmap(missing_background.transpose(), annot=True, fmt="d",cbar=False,ax=ax[1],cmap="Greens",yticklabels=background_nice,vmin=1)
g = sns.heatmap(death_count.transpose(), annot=True, fmt="d",cbar=False,ax=ax[2],cmap="Oranges",vmin=1)
g.set_yticklabels(g.get_yticklabels(), rotation=0)
g = sns.heatmap(total_count.transpose(), annot=True, fmt="d",cbar=False,ax=ax[3],cmap="Oranges",vmin=1)
g.set_yticklabels(g.get_yticklabels(), rotation=0)
ax[0].set_xlabel('')
ax[1].set_xlabel('')
ax[2].set_xlabel('')
ax[3].set_xlabel('Followup from baseline (years)', fontsize = 15)
ax[0].set_title('Number of individuals with measurements', fontsize = 15)
ax[0].set_xticks([],[])
ax[1].set_xticks([],[])
ax[2].set_xticks([],[])
for _, spine in ax[0].spines.items():
if _ != 'bottom':
spine.set_visible(True)
for _, spine in ax[1].spines.items():
if _ != 'top' and _ != 'bottom':
spine.set_visible(True)
for _, spine in ax[2].spines.items():
if _ != 'top' and _ != 'bottom':
spine.set_visible(True)
for _, spine in ax[3].spines.items():
if _ != 'top':
spine.set_visible(True)
ax[0].text(-0.13, 0.5, 'Health variables', horizontalalignment='center', verticalalignment='center',transform=ax[0].transAxes,fontsize = 20, zorder=1000000, rotation = 90, color = "#88419d", weight='bold')
ax[1].text(-0.13, 0.5, 'Background', horizontalalignment='center', verticalalignment='center',transform=ax[1].transAxes,fontsize = 20, zorder=1000000, rotation = 90, color="#238b45", weight='bold')
plt.tight_layout()
plt.subplots_adjust(hspace=0)
plt.subplots_adjust(left=0.15)
plt.savefig('../Plots/missing_values_ELSA.pdf')
|
{"hexsha": "e3ac5175a6226bbcc5b1b766fa48af6c07c5a9ca", "size": 6013, "ext": "py", "lang": "Python", "max_stars_repo_path": "Plotting_code/plot_ELSA_missing.py", "max_stars_repo_name": "Spencerfar/djin-aging", "max_stars_repo_head_hexsha": "f6513226e879e6061996d819b4de0e2873860fbc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-08-24T08:33:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T23:50:33.000Z", "max_issues_repo_path": "Plotting_code/plot_ELSA_missing.py", "max_issues_repo_name": "Spencerfar/djin-aging", "max_issues_repo_head_hexsha": "f6513226e879e6061996d819b4de0e2873860fbc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Plotting_code/plot_ELSA_missing.py", "max_forks_repo_name": "Spencerfar/djin-aging", "max_forks_repo_head_hexsha": "f6513226e879e6061996d819b4de0e2873860fbc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-24T08:34:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-24T08:34:30.000Z", "avg_line_length": 39.821192053, "max_line_length": 335, "alphanum_fraction": 0.6753700316, "include": true, "reason": "import numpy", "num_tokens": 1839}
|
import numpy as np
@np.vectorize
def vectorized_get(dictionary, key):
"""
Helper vectorized function to get keys
from a dictionary.
"""
return dictionary.get(key, -1)
def is_iter(something):
"""
Helper vectorized function to test
if something is an iterable other
than a str.
"""
try:
iter(something)
return True and not isinstance(something, str)
except TypeError:
return False
@np.vectorize
def is_number(something):
"""
Helper vectorized function to test
if a value is or represents a number.
"""
try:
int(something)
return True
except ValueError:
return False
def get_features(geojson):
"""
Helper function to extract features
from dictionary. If it doesn't find
it, raise a value error with a more
informative error message.
"""
try:
features = geojson["features"]
except KeyError:
raise KeyError(f"{geojson} is an invalid GeoJSON. Not a feature collection")
return features
@np.vectorize
def from_iso88591_to_utf8(string):
"""
Convert weird characters to UTF-8.
For example, "médio" should be "médio".
"""
try:
return bytes(string, "iso-8859-1").decode("utf-8")
except UnicodeDecodeError:
return string
|
{"hexsha": "1171bd9deec381480e4c6fafcbc2ec974f7a239d", "size": 1332, "ext": "py", "lang": "Python", "max_stars_repo_path": "mapsbr/helpers/utils.py", "max_stars_repo_name": "phelipetls/mapsbr", "max_stars_repo_head_hexsha": "36e2637c612d333a327199fd0687dbba09e964ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mapsbr/helpers/utils.py", "max_issues_repo_name": "phelipetls/mapsbr", "max_issues_repo_head_hexsha": "36e2637c612d333a327199fd0687dbba09e964ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mapsbr/helpers/utils.py", "max_forks_repo_name": "phelipetls/mapsbr", "max_forks_repo_head_hexsha": "36e2637c612d333a327199fd0687dbba09e964ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.1428571429, "max_line_length": 84, "alphanum_fraction": 0.6388888889, "include": true, "reason": "import numpy", "num_tokens": 315}
|
#!/usr/bin/env python
# vim: fdm=indent
'''
author: Fabio Zanini
date: 05/08/13
content: Correct the allele frequencies comparing read types and write to file.
'''
# Modules
import subprocess as sp
import argparse
from operator import itemgetter
import numpy as np
from hivwholeseq.sequencing.samples import load_sequencing_run, SampleSeq
from hivwholeseq.utils.miseq import alpha
from hivwholeseq.sequencing.filenames import get_allele_counts_filename, get_coverage_filename, \
get_allele_frequencies_filename
from hivwholeseq.sequencing.adapter_info import load_adapter_table
from hivwholeseq.utils.one_site_statistics import filter_nus, plot_SFS_folded
from hivwholeseq.cluster.fork_cluster import fork_filter_allele_frequencies as fork_self
# Functions
def write_frequency_files(data_folder, adaID, fragment, nu_filtered, VERBOSE=0):
'''Write the corrected allele frequencies to file'''
if VERBOSE:
print 'Storing allele frequencies to file:', adaID, fragment
nu_filtered.dump(get_allele_frequencies_filename(data_folder, adaID, fragment))
# Script
if __name__ == '__main__':
# Input arguments
parser = argparse.ArgumentParser(description='Study minor allele frequency')
parser.add_argument('--run', required=True,
help='Seq run to analyze (e.g. Tue28)')
parser.add_argument('--adaIDs', nargs='*',
help='Adapter IDs to analyze (e.g. TS2)')
parser.add_argument('--fragments', nargs='*',
help='Fragment to map (e.g. F1 F6)')
parser.add_argument('--verbose', type=int, default=0,
help='Verbosity level [0-3]')
parser.add_argument('--submit', action='store_true',
help='Execute the script in parallel on the cluster')
parser.add_argument('--no-summary', action='store_false', dest='summary',
help='Do not save results in a summary file')
args = parser.parse_args()
seq_run = args.run
adaIDs = args.adaIDs
fragments = args.fragments
VERBOSE = args.verbose
submit = args.submit
summary = args.summary
# Specify the dataset
dataset = MiSeq_runs[seq_run]
data_folder = dataset['folder']
# If the script is called with no adaID, iterate over all
if not adaIDs:
adaIDs = load_adapter_table(data_folder)['ID']
if VERBOSE >= 3:
print 'adaIDs', adaIDs
# Iterate over all requested samples
for adaID in adaIDs:
# If the script is called with no fragment, iterate over all
samplename = dataset['samples'][dataset['adapters'].index(adaID)]
if fragments is None:
fragments_sample = [fr[:2] for fr in samples[samplename]['fragments']]
else:
fragments_sample = fragments
if VERBOSE >= 3:
print 'adaID:', adaID+', fragments:', fragments_sample
for fragment in fragments_sample:
# Submit to the cluster self if requested
if submit:
fork_self(data_folder, adaID, fragment, VERBOSE=VERBOSE,
summary=summary)
continue
# Get coverage and counts
counts = np.load(get_allele_counts_filename(data_folder, adaID, fragment))
if len(counts.shape) == 2:
import warnings
warnings.warn('Counts not divided by read type: will normalize instead of filter!')
nu_filtered = 1.0 * counts / counts.sum(axis=0)
else:
# Filter the minor frequencies by comparing the read types
nu_filtered = filter_nus(counts)
# Write output
write_frequency_files(data_folder, adaID, fragment, nu_filtered,
VERBOSE=VERBOSE)
if summary:
import matplotlib.pyplot as plt
was_interactive = plt.isinteractive()
plt.ioff()
plot_SFS_folded(data_folder, adaID, fragment, nu_filtered,
VERBOSE=VERBOSE, savefig=True)
plt.interactive(was_interactive)
|
{"hexsha": "2f37e663a317d2d653f74ae96ded391cbd9bca32", "size": 4179, "ext": "py", "lang": "Python", "max_stars_repo_path": "hivwholeseq/sequencing/filter_allele_frequencies.py", "max_stars_repo_name": "neherlab/hivwholeseq", "max_stars_repo_head_hexsha": "978ce4060362e4973f92b122ed5340a5314d7844", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2016-09-13T12:15:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-03T01:28:56.000Z", "max_issues_repo_path": "hivwholeseq/sequencing/filter_allele_frequencies.py", "max_issues_repo_name": "iosonofabio/hivwholeseq", "max_issues_repo_head_hexsha": "d504c63b446c3a0308aad6d6e484ea1666bbe6df", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hivwholeseq/sequencing/filter_allele_frequencies.py", "max_forks_repo_name": "iosonofabio/hivwholeseq", "max_forks_repo_head_hexsha": "d504c63b446c3a0308aad6d6e484ea1666bbe6df", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2016-01-17T03:43:46.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-25T07:00:11.000Z", "avg_line_length": 36.982300885, "max_line_length": 99, "alphanum_fraction": 0.6389088299, "include": true, "reason": "import numpy", "num_tokens": 899}
|
# This file was generated by the Julia Swagger Code Generator
# Do not modify this file directly. Modify the swagger specification instead.
mutable struct AccountSasParameters <: SwaggerModel
signedServices::Any # spec type: Union{ Nothing, String } # spec name: signedServices
signedResourceTypes::Any # spec type: Union{ Nothing, String } # spec name: signedResourceTypes
signedPermission::Any # spec type: Union{ Nothing, String } # spec name: signedPermission
signedIp::Any # spec type: Union{ Nothing, String } # spec name: signedIp
signedProtocol::Any # spec type: Union{ Nothing, String } # spec name: signedProtocol
signedStart::Any # spec type: Union{ Nothing, DateTime } # spec name: signedStart
signedExpiry::Any # spec type: Union{ Nothing, DateTime } # spec name: signedExpiry
keyToSign::Any # spec type: Union{ Nothing, String } # spec name: keyToSign
function AccountSasParameters(;signedServices=nothing, signedResourceTypes=nothing, signedPermission=nothing, signedIp=nothing, signedProtocol=nothing, signedStart=nothing, signedExpiry=nothing, keyToSign=nothing)
o = new()
validate_property(AccountSasParameters, Symbol("signedServices"), signedServices)
setfield!(o, Symbol("signedServices"), signedServices)
validate_property(AccountSasParameters, Symbol("signedResourceTypes"), signedResourceTypes)
setfield!(o, Symbol("signedResourceTypes"), signedResourceTypes)
validate_property(AccountSasParameters, Symbol("signedPermission"), signedPermission)
setfield!(o, Symbol("signedPermission"), signedPermission)
validate_property(AccountSasParameters, Symbol("signedIp"), signedIp)
setfield!(o, Symbol("signedIp"), signedIp)
validate_property(AccountSasParameters, Symbol("signedProtocol"), signedProtocol)
setfield!(o, Symbol("signedProtocol"), signedProtocol)
validate_property(AccountSasParameters, Symbol("signedStart"), signedStart)
setfield!(o, Symbol("signedStart"), signedStart)
validate_property(AccountSasParameters, Symbol("signedExpiry"), signedExpiry)
setfield!(o, Symbol("signedExpiry"), signedExpiry)
validate_property(AccountSasParameters, Symbol("keyToSign"), keyToSign)
setfield!(o, Symbol("keyToSign"), keyToSign)
o
end
end # type AccountSasParameters
const _property_map_AccountSasParameters = Dict{Symbol,Symbol}(Symbol("signedServices")=>Symbol("signedServices"), Symbol("signedResourceTypes")=>Symbol("signedResourceTypes"), Symbol("signedPermission")=>Symbol("signedPermission"), Symbol("signedIp")=>Symbol("signedIp"), Symbol("signedProtocol")=>Symbol("signedProtocol"), Symbol("signedStart")=>Symbol("signedStart"), Symbol("signedExpiry")=>Symbol("signedExpiry"), Symbol("keyToSign")=>Symbol("keyToSign"))
const _property_types_AccountSasParameters = Dict{Symbol,String}(Symbol("signedServices")=>"String", Symbol("signedResourceTypes")=>"String", Symbol("signedPermission")=>"String", Symbol("signedIp")=>"String", Symbol("signedProtocol")=>"String", Symbol("signedStart")=>"DateTime", Symbol("signedExpiry")=>"DateTime", Symbol("keyToSign")=>"String")
Base.propertynames(::Type{ AccountSasParameters }) = collect(keys(_property_map_AccountSasParameters))
Swagger.property_type(::Type{ AccountSasParameters }, name::Symbol) = Union{Nothing,eval(Base.Meta.parse(_property_types_AccountSasParameters[name]))}
Swagger.field_name(::Type{ AccountSasParameters }, property_name::Symbol) = _property_map_AccountSasParameters[property_name]
const _allowed_AccountSasParameters_signedServices = ["b", "q", "t", "f"]
const _allowed_AccountSasParameters_signedResourceTypes = ["s", "c", "o"]
const _allowed_AccountSasParameters_signedPermission = ["r", "d", "w", "l", "a", "c", "u", "p"]
const _allowed_AccountSasParameters_signedProtocol = ["https,http", "https"]
function check_required(o::AccountSasParameters)
(getproperty(o, Symbol("signedExpiry")) === nothing) && (return false)
true
end
function validate_property(::Type{ AccountSasParameters }, name::Symbol, val)
if name === Symbol("signedServices")
Swagger.validate_param(name, "AccountSasParameters", :enum, val, _allowed_AccountSasParameters_signedServices)
end
if name === Symbol("signedResourceTypes")
Swagger.validate_param(name, "AccountSasParameters", :enum, val, _allowed_AccountSasParameters_signedResourceTypes)
end
if name === Symbol("signedPermission")
Swagger.validate_param(name, "AccountSasParameters", :enum, val, _allowed_AccountSasParameters_signedPermission)
end
if name === Symbol("signedProtocol")
Swagger.validate_param(name, "AccountSasParameters", :enum, val, _allowed_AccountSasParameters_signedProtocol)
end
end
|
{"hexsha": "91bb4d90d497e89b11748be1396cb19fad10ea00", "size": 4798, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Storage/StorageManagementClient/model_AccountSasParameters.jl", "max_stars_repo_name": "JuliaComputing/Azure.jl", "max_stars_repo_head_hexsha": "0e2b55e7602352d86bdf3579e547a74a9b5f44f8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2019-12-18T16:23:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T07:39:13.000Z", "max_issues_repo_path": "src/Storage/StorageManagementClient/model_AccountSasParameters.jl", "max_issues_repo_name": "JuliaComputing/Azure.jl", "max_issues_repo_head_hexsha": "0e2b55e7602352d86bdf3579e547a74a9b5f44f8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-05-08T19:57:11.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-11T11:20:41.000Z", "max_forks_repo_path": "src/Storage/StorageManagementClient/model_AccountSasParameters.jl", "max_forks_repo_name": "JuliaComputing/Azure.jl", "max_forks_repo_head_hexsha": "0e2b55e7602352d86bdf3579e547a74a9b5f44f8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-05-07T10:26:07.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-11T13:04:47.000Z", "avg_line_length": 68.5428571429, "max_line_length": 460, "alphanum_fraction": 0.751146311, "num_tokens": 1115}
|
from collections import Counter
import numpy as np
import sklearn
from imblearn.base import BaseSampler
class GlobalCS(BaseSampler):
"""
Global CS is an algorithm that equalizes number of samples in each class. It duplicates all samples equally
for each class to achieve majority class size
"""
def __init__(self, shuffle: bool = True):
super().__init__()
self._sampling_type = 'over-sampling'
self.shuffle = shuffle
self.quantities, self.max_quantity, self.X, self.y = [None] * 4
def _fit_resample(self, X, y):
"""
:param X:
two dimensional numpy array (number of samples x number of features) with float numbers
:param y:
one dimensional numpy array with labels for rows in X
:return:
Resampled X (max class quantity * number of unique classes), y (number of rows in X) as numpy array
"""
assert len(X.shape) == 2, 'X should have 2 dimension'
assert X.shape[0] == y.shape[0], 'Number of labels must be equal to number of samples'
self.quantities = Counter(y)
self.max_quantity = int(np.max(list(self.quantities.values())))
self.X = X
self.y = y
result_X, result_y = list(), list()
for class_name, class_quantity in self.quantities.items():
temp_X, temp_y = self._equal_oversample(self.X, self.y, class_name)
result_X.extend(temp_X)
result_y.extend(temp_y)
if self.shuffle:
result_X, result_y = sklearn.utils.shuffle(result_X, result_y)
return np.array(result_X), np.array(result_y)
def _equal_oversample(self, X, y, class_name):
indices_in_class = [i for i, class_label in enumerate(y) if class_label == class_name]
desired_quantity = self.max_quantity - len(indices_in_class)
oversampled_X, oversampled_y = list(X[indices_in_class]), list(y[indices_in_class])
for i in range(desired_quantity):
sample_index_to_duplicate: int = i % self.quantities[class_name]
sample_id: int = indices_in_class[sample_index_to_duplicate]
oversampled_X.append(X[sample_id])
oversampled_y.append(y[sample_id])
return oversampled_X, oversampled_y
|
{"hexsha": "8991e0b22c105cedfbdcffdda36902103e6b31aa", "size": 2296, "ext": "py", "lang": "Python", "max_stars_repo_path": "multi_imbalance/resampling/global_cs.py", "max_stars_repo_name": "NaIwo/multi-imbalance", "max_stars_repo_head_hexsha": "237c5842b27a58edfdfb88073faa0021eb243348", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 63, "max_stars_repo_stars_event_min_datetime": "2019-08-12T09:12:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T10:39:37.000Z", "max_issues_repo_path": "multi_imbalance/resampling/global_cs.py", "max_issues_repo_name": "NaIwo/multi-imbalance", "max_issues_repo_head_hexsha": "237c5842b27a58edfdfb88073faa0021eb243348", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 31, "max_issues_repo_issues_event_min_datetime": "2019-08-15T20:24:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T11:54:05.000Z", "max_forks_repo_path": "multi_imbalance/resampling/global_cs.py", "max_forks_repo_name": "NaIwo/multi-imbalance", "max_forks_repo_head_hexsha": "237c5842b27a58edfdfb88073faa0021eb243348", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-12-19T22:39:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-09T02:24:53.000Z", "avg_line_length": 36.4444444444, "max_line_length": 111, "alphanum_fraction": 0.6524390244, "include": true, "reason": "import numpy", "num_tokens": 522}
|
import numpy
import theano
from theano import scalar, gof
from theano.tests.unittest_tools import SkipTest, assert_allclose
from theano.tensor.tests import test_elemwise
from .config import mode_with_gpu, test_ctx_name
from .test_basic_ops import rand_gpuarray
from ..elemwise import (GpuElemwise, GpuDimShuffle,
GpuCAReduceCuda, GpuCAReduceCPY)
from ..type import GpuArrayType, get_context
from pygpu import ndgpuarray as gpuarray
# This is acutally a test for GpuElemwise
class test_gpu_Broadcast(test_elemwise.test_Broadcast):
op = GpuElemwise
type = GpuArrayType
cop = GpuElemwise
ctype = GpuArrayType
# The order is important
linkers = [gof.PerformLinker, gof.CLinker]
def setUp(self):
if get_context(test_ctx_name).kind != 'cuda':
self.linkers = [gof.PerformLinker]
def rand_val(self, shp):
return rand_gpuarray(*shp, **dict(cls=gpuarray))
def rand_cval(self, shp):
return rand_gpuarray(*shp, **dict(cls=gpuarray))
def test_c(self):
if get_context(test_ctx_name).kind != 'cuda':
raise SkipTest("Cuda specific tests")
super(test_gpu_Broadcast, self).test_c()
def test_c_inplace(self):
if get_context(test_ctx_name).kind != 'cuda':
raise SkipTest("Cuda specific tests")
super(test_gpu_Broadcast, self).test_c_inplace()
def test_elemwise_pow():
# Test that GpuElemwise(pow) can compile with any combination of integer
# or float input dtype.
if get_context(test_ctx_name).kind != 'cuda':
raise SkipTest("Cuda specific tests")
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64"]
for dtype_base in dtypes:
for dtype_exp in dtypes:
# Compile a gpu function with the specified dtypes
base = theano.tensor.vector(dtype=dtype_base)
exp = theano.tensor.vector(dtype=dtype_exp)
output = base ** exp
f = theano.function([base, exp], output)
# Call the function to make sure the output is valid
base_val = numpy.random.randint(0, 5, size=10).astype(dtype_base)
exp_val = numpy.random.randint(0, 3, size=10).astype(dtype_exp)
out = f(base_val, exp_val)
expected_out = base_val ** exp_val
assert_allclose(out, expected_out)
class test_GpuDimShuffle(test_elemwise.test_DimShuffle):
op = GpuDimShuffle
class test_GpuCAReduceCPY(test_elemwise.test_CAReduce):
dtypes = ["float32"]
bin_dtypes = ["uint8", "int8"]
op = GpuCAReduceCPY
reds = [scalar.add, scalar.mul]
pre_scalar_op = None
def test_perform(self):
for dtype in self.dtypes + self.bin_dtypes:
for op in self.reds:
self.with_linker(gof.PerformLinker(), op, dtype=dtype,
pre_scalar_op=self.pre_scalar_op)
def test_perform_nan(self):
for dtype in self.dtypes:
if not dtype.startswith('float'):
continue
for op in self.reds:
self.with_linker(gof.PerformLinker(), op, dtype=dtype,
test_nan=True,
pre_scalar_op=self.pre_scalar_op)
def test_c(self):
for dtype in self.dtypes + self.bin_dtypes:
for op in self.reds:
self.with_linker(gof.CLinker(), op, dtype=dtype,
pre_scalar_op=self.pre_scalar_op)
def test_c_nan(self):
for dtype in self.dtypes:
if not dtype.startswith('float'):
continue
for op in self.reds:
self.with_linker(gof.CLinker(), op, dtype=dtype,
test_nan=True,
pre_scalar_op=self.pre_scalar_op)
def test_infer_shape(self):
for dtype in self.dtypes:
super(test_GpuCAReduceCPY, self).test_infer_shape(dtype)
class test_GpuCAReduceCuda(test_GpuCAReduceCPY):
dtypes = ["float32", "int64"]
bin_dtypes = ["uint8", "int8"]
cases = [((5, 6), None),
((5, 6), (0, 1)),
((5, 6), (0, )),
((5, 6), (1, )),
((5, 6), (-1, )),
((5, 6), (-2, )),
# ((5, 6), ()), #reduce on no axis(copy) isn't implemented
# ((2, 3, 4, 5), (0, 1, 3)), mask 1101 isn't implemented
# ((2, 3, 4, 5), (-2, -3)), mask 0110 isn't implemented
((5, 0), None),
((5, 0), (0, )),
((5, 0), (1, )),
# ((5, 0), ()), reduce on no axis isn't implemented
# ((), None), reduce on no axis isn't implemented
# ((), ()) reduce on no axis isn't implemented
# Test all GPU cases implemented
((1, 0), (1,)),
((0, 1), (1,)),
((0, 0), (1,)),
((0, 0, 0), (1, 2)),
((0, 0, 0, 0), (1, 2, 3)),
((2, 1), (1,)),
((1, 2), (1,)),
((100, 3, 1300), [1]),
((0,), [0]), ((5,), [0]),
((0, 0), [0, 1]), ((1, 0), [0, 1]), ((5, 4), [0, 1]), ((33, 31), [0, 1]), ((5, 4), [1]), ((5, 4), [0]), # need something bigger then 32 for some opt test.
((5, 4, 3), [0]), ((5, 4, 3), [1]), ((5, 4, 3), [0, 1]), ((5, 4, 3), [2]), ((5, 4, 3), [1, 2]), ((5, 4, 3), [0, 1, 2]),
((0, 0, 0, 0), [0, 1, 2, 3]),
((5, 4, 3, 20), [2, 3]), ((5, 4, 3, 2), [0, 1, 2, 3]), ((5, 4, 3, 2), [0, 2, 3]), ((5, 4, 3, 2), [1, 2, 3]),
# test shape bigger then 4096 on each dimension to make sure that we work correctly when we don't have enough thread/block in each dimensions
((4100, 3), [0]), ((3, 4101), [0]), # 10
((1024, 33), [0]), ((33, 1024), [0]), # 10
((1025, 33), [0]), ((33, 1025), [0]), # 10
((4100, 3), [1]), ((3, 4101), [1]), # 01
((1024, 33), [1]), ((33, 1024), [1]), # 01
((1025, 33), [1]), ((33, 1025), [1]), # 01
((4100, 3), [0, 1]), ((3, 4101), [0, 1]), # 11
((1024, 33), [0, 1]), ((33, 1024), [0, 1]), # 01
((1025, 33), [0, 1]), ((33, 1025), [0, 1]), # 01
((4100, 4, 3), [0]), ((5, 4100, 3), [0]), ((5, 4, 4100), [0]), ((3, 65536, 1), [0]), # 100
((4100, 4, 3), [1]), ((5, 4100, 3), [1]), ((5, 4, 4100), [1]), # 010
((4100, 4, 3), [2]), ((5, 4100, 3), [2]), ((5, 4, 4100), [2]), # 001
((4100, 4, 3), [0, 1]), ((5, 4100, 3), [0, 1]), ((5, 4, 4100), [0, 1]), # 110
((4100, 4, 3), [1, 2]), ((5, 4100, 3), [1, 2]), ((5, 4, 4100), [1, 2]), # 011
# ((4100,4,3),[0,2]),((5,4100,3),[0,2]),((5,4,4100),[0,2]),#101 ##not implemented
((4100, 4, 3), [0, 1, 2]), ((5, 4100, 3), [0, 1, 2]), ((5, 4, 4100), [0, 1, 2]), # 111
((65, 4, 3), [0, 1, 2]), ((5, 65, 3), [0, 1, 2]), ((5, 4, 65), [0, 1, 2]), # 111
((4100, 4, 3, 2), [2, 3]), ((4, 4100, 3, 2), [2, 3]), ((4, 3, 4100, 2), [2, 3]), ((4, 3, 2, 4100), [2, 3]), # 0011
((4100, 4, 3, 2), [1, 3]), ((4, 4100, 3, 2), [1, 3]), ((4, 3, 4100, 2), [1, 3]), ((4, 3, 2, 4100), [1, 3]), # 0101
((4100, 4, 3, 2), [0, 2, 3]), ((4, 4100, 3, 2), [0, 2, 3]), ((4, 3, 4100, 2), [0, 2, 3]), # ((4,3,2,4100),[0,2,3]),#1011
((4100, 4, 3, 2), [1, 2, 3]), ((4, 4100, 3, 2), [1, 2, 3]), ((4, 3, 4100, 2), [1, 2, 3]), ((4, 3, 2, 4100), [1, 2, 3]), # 0111
((65, 4, 3, 2), [1, 2, 3]), ((4, 65, 3, 2), [1, 2, 3]), ((4, 3, 65, 2), [1, 2, 3]), ((4, 3, 2, 65), [1, 2, 3]), # 0111
((4100, 2, 3, 4), [0, 1, 2, 3]), ((2, 4100, 3, 4), [0, 1, 2, 3]), ((2, 3, 4100, 4), [0, 1, 2, 3]), ((2, 3, 4, 4100), [0, 1, 2, 3]), ((128, 1, 2, 3), [0, 1, 2, 3]), # 1111
# test pattern implemented by reshape
# Skip them as this test the op directly, not the optimization with reshape
# ((4100,4,3,2),[0]),((4,4100,3,2),[0]),((4,3,4100,2),[0]),((4,3,2,4100),[0]),#1000
# ((4100,4,3,2),[1]),((4,4100,3,2),[1]),((4,3,4100,2),[1]),((4,3,2,4100),[1]),#0100
# ((4100,4,3,2),[2]),((4,4100,3,2),[2]),((4,3,4100,2),[2]),((4,3,2,4100),[2]),#0010
# ((4100,4,3,2),[3]),((4,4100,3,2),[3]),((4,3,4100,2),[3]),((4,3,2,4100),[3]),#0001
# ((1100,2,3,4,5),[0,1,2,3,4]),((2,1100,3,4,5),[0,1,2,3,4]),((2,3,1100,4,5),[0,1,2,3,4]),((2,3,4,1100,5),[0,1,2,3,4]),((2,3,4,5,1100),[0,1,2,3,4]),#11111
# ((5,4,3,10,11),[1,2]),
]
op = GpuCAReduceCuda
reds = [scalar.add, scalar.mul,
scalar.maximum, scalar.minimum]
pre_scalar_op = None
def test_perform(self):
return
def test_perform_nan(self):
return
def setUp(self):
super(test_GpuCAReduceCuda, self).setUp()
if get_context(test_ctx_name).kind != 'cuda':
raise SkipTest("Cuda specific tests")
class T_gpureduce_dtype(test_elemwise.T_reduce_dtype):
mode = mode_with_gpu.excluding('local_cut_useless_reduce')
op = GpuCAReduceCuda
# Currently we don't support reduction on 0 axis
axes = [None, 0, 1, 1, [0], [1], [0, 1]]
# We don't support complex dtype
dtypes = ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64']
def setUp(self):
if get_context(test_ctx_name).kind != 'cuda':
raise SkipTest("Cuda specific tests")
def speed_reduce10():
import numpy
import theano
data = numpy.random.rand(1000, 1000).astype("float32")
m = theano.tensor.fmatrix()
f = theano.function([m], [m.sum(axis=0), m.T.sum(axis=0)],
mode=mode_with_gpu)
f(data)
|
{"hexsha": "1c9a2b1fb969c88a2e7c12fae35a4f466207db68", "size": 9912, "ext": "py", "lang": "Python", "max_stars_repo_path": "theano/sandbox/gpuarray/tests/test_elemwise.py", "max_stars_repo_name": "oplatek/Theano", "max_stars_repo_head_hexsha": "09605e7cae876e15c5502c4edaba6a9644c50c11", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "theano/sandbox/gpuarray/tests/test_elemwise.py", "max_issues_repo_name": "oplatek/Theano", "max_issues_repo_head_hexsha": "09605e7cae876e15c5502c4edaba6a9644c50c11", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "theano/sandbox/gpuarray/tests/test_elemwise.py", "max_forks_repo_name": "oplatek/Theano", "max_forks_repo_head_hexsha": "09605e7cae876e15c5502c4edaba6a9644c50c11", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.1787234043, "max_line_length": 184, "alphanum_fraction": 0.4868845843, "include": true, "reason": "import numpy,import theano,from theano", "num_tokens": 3672}
|
import csv
import nltk
import pandas as pd
import numpy as np
import spacy
from nltk.tokenize import word_tokenize
import string
def load_book(book_path, lower=False):
'''
Reads in a novel from a .txt file, and returns it in (optionally
lowercased) string form
Parameters
----------
book_path : string (required)
path to txt file containing full text of book to be analysed
lower : bool (optional)
If True, the returned string will be lowercased;
If False, the returned string will retain its original case formatting.
Returns
-------
book : string
book in string form
'''
with open(book_path) as f:
book = f.read()
if lower:
book = book.lower()
return book
def load_characters(charaters_path):
'''
Reads in a .csv file of character names
Parameters
----------
charaters_path : string (required)
path to csv file containing full list of characters to be examined.
Each character should take up one line of the file. If the character is
referred to by multiple names, nicknames or sub-names within their
full name, these should be split by commas, eg:
Harry, Potter
Lord, Voldemort, You-Know-Who
Giant Squid
Returns
-------
characters : list
list of tuples naming characters in text
'''
with open(charaters_path) as f:
reader = csv.reader(f)
characters = [tuple(name.lower()+' ' for name in row) for row in reader]
return characters
def remove_punctuation(input_string):
'''
Removes all punctuation from an input string
Parameters
----------
input_string : string (required)
input string
Returns
-------
clean_string : string
clean string
'''
return input_string.translate(str.maketrans('', '', string.punctuation+'’'))
def extract_character_names(book):
'''
Automatically extracts lists of plausible character names from a book
Parameters
----------
book : string (required)
book in string form (with original upper/lowercasing intact)
Returns
-------
characters : list
list of plasible character names
'''
nlp = spacy.load('en')
stopwords = nltk.corpus.stopwords.words('english')
words = [remove_punctuation(w) for w in book.split()]
unique_words = list(set(words))
characters = [word.text for word in nlp(' '.join(unique_words)) if word.pos_ == 'PROPN']
characters = [c for c in characters if len(c) > 2]
characters = [c for c in characters if c.istitle()]
characters = [c for c in characters if not (c[-1] == 's' and c[:-1] in characters)]
characters = list(set([c.title() for c in [c.lower() for c in characters]]) - set(stopwords))
return [tuple([c + ' ']) for c in set(characters)]
def get_sentence_sequences(book):
'''
Splits a book into its constituent sentences
Parameters
----------
book : string (required)
book in string form
Returns
-------
sentences : list
list of strings, where each string is a sentence in the novel as
interpreted by NLTK's tokenize() function.
'''
detector = nltk.data.load('tokenizers/punkt/english.pickle')
sentences = detector.tokenize(book)
return sentences
def get_word_sequences(book, n=50):
'''
Takes a book and splits it into its constituent words, returning a list of
substrings which comprise the book, whose lengths are determined by a set
number of words (default = 50).
Parameters
----------
book : string (required)
book in string form
n : int (optional)
number of words to be contained in each returned sequence (default = 50)
Returns
-------
sequences : list
list of strings, where each string is a list of n words as interpreted
by NLTK's word_tokenize() function.
'''
book_words = word_tokenize(book)
return [' '.join(book_words[i: i+n]) for i in range(0, len(book_words), n)]
def get_character_sequences(book, n=200):
'''
Takes a book and splits it into a list of substrings of length n
(default = 200).
Parameters
----------
book : string (required)
book in string form
n : int (optional)
number of characters to be contained in each returned sequence
(default = 200)
Returns
-------
sequences : list
list of strings comprising the book, where each string is of length n.
'''
return [''.join(book[i: i+n]) for i in range(0, len(book), n)]
def find_connections(sequences, characters):
'''
Takes a novel and its character list and counts instances of each character
in each sequence.
Parameters
----------
sequences : list (required)
list of substrings representing the novel to be analysed
characters : list (required)
list of charater names (as tuples)
Returns
-------
df : pandas.DataFrame
columns = character names
indexes = sequences
values = counts of instances of character name in sequence
'''
if any(len(names) > 1 for names in characters):
df = pd.DataFrame({str(character):
{sequence: sum([sequence.count(name) for name in character])
for sequence in sequences}
for character in characters})
else:
characters = [c[0] for c in characters]
df = pd.DataFrame([[sequence.count(character)
for character in characters]
for sequence in sequences],
index=sequences,
columns=characters)
return df
def calculate_cooccurence(df):
'''
Uses the dot product to calculate the number of times two characters appear
in the same sequences. This is the core of the bookworm graph.
Parameters
----------
df : pandas.DataFrame (required)
columns = character names
indexes = sequences
values = counts of instances of character name in sequence
Returns
-------
cooccurence : pandas.DataFrame
columns = character names
indexes = character names
values = counts of character name cooccurences in all sequences
'''
characters = df.columns.values
cooccurence = df.values.T.dot(df.values)
np.fill_diagonal(cooccurence, 0)
cooccurence = pd.DataFrame(cooccurence, columns=characters, index=characters)
return cooccurence
def get_interaction_df(cooccurence, threshold=0):
'''
Produces an dataframe of interactions between characters using the
cooccurence matrix of those characters. The return format is directly
analysable by networkx in the construction of a graph of characters.
Parameters
----------
cooccurence : pandas.DataFrame (required)
columns = character names
indexes = character names
values = counts of character name cooccurences in all sequences
threshold : int (optional)
The minimum character interaction strength needed to be included in the
returned interaction_df
Returns
-------
interaction_df : pandas.DataFrame
DataFrame enumerating the strength of interactions between charcters.
source = character one
target = character two
value = strength of interaction between character one and character two
'''
rows, columns = np.where(np.triu(cooccurence.values, 1) > threshold)
return pd.DataFrame(np.column_stack([cooccurence.index[rows],
cooccurence.columns[columns],
cooccurence.values[rows, columns]]),
columns=['source', 'target', 'value'])
def bookworm(book_path, charaters_path=None, threshold=2):
'''
Wraps the full bookworm analysis from the raw .txt file's path, to
production of the complete interaction dataframe. The returned dataframe is
directly analysable by networkx using:
nx.from_pandas_dataframe(interaction_df,
source='source',
target='target')
Parameters
----------
book_path : string (required)
path to txt file containing full text of book to be analysed
charaters_path : string (optional)
path to csv file containing full list of characters to be examined
Returns
-------
interaction_df : pandas.DataFrame
DataFrame enumerating the strength of interactions between charcters.
source = character one
target = character two
value = strength of interaction between character one and character two
'''
book = load_book(book_path)
sequences = get_sentence_sequences(book)
if charaters_path is None:
characters = extract_character_names(book)
else:
characters = load_characters(charaters_path)
df = find_connections(sequences, characters)
cooccurence = calculate_cooccurence(df)
return get_interaction_df(cooccurence, threshold)
|
{"hexsha": "4330fe6a853c314c59d81009c80518c0e93ffaf9", "size": 9222, "ext": "py", "lang": "Python", "max_stars_repo_path": "bookworm/build_network.py", "max_stars_repo_name": "harrisonpim/bookworm", "max_stars_repo_head_hexsha": "d5fffe9630079236a64708f767186aa0748de4cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 78, "max_stars_repo_stars_event_min_datetime": "2017-08-29T08:24:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-30T14:14:26.000Z", "max_issues_repo_path": "bookworm/build_network.py", "max_issues_repo_name": "harrisonpim/bookworm", "max_issues_repo_head_hexsha": "d5fffe9630079236a64708f767186aa0748de4cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2017-09-25T13:39:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-15T03:11:38.000Z", "max_forks_repo_path": "bookworm/build_network.py", "max_forks_repo_name": "harrisonpim/bookworm", "max_forks_repo_head_hexsha": "d5fffe9630079236a64708f767186aa0748de4cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 22, "max_forks_repo_forks_event_min_datetime": "2017-12-01T18:58:58.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-07T09:10:35.000Z", "avg_line_length": 30.9463087248, "max_line_length": 97, "alphanum_fraction": 0.6348948167, "include": true, "reason": "import numpy", "num_tokens": 1948}
|
module mod_output
use mod_error
use mod_graph
implicit none
save
private
! public procedures
public :: write_input_graph, &
write_graph_paths
! private variables
integer, parameter :: fout_numb = 700
character(*), parameter :: fout_name = "graph.out"
contains
!!! Public !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
subroutine write_input_graph()
character(*), parameter :: my_name = "write_input_graph"
integer :: i
integer :: sz
character(8) :: i_str
character(120) :: frmt
integer :: err_n
character(120) :: err_msg
open(unit=fout_numb,file=fout_name,status="replace",action="write", &
iostat=err_n,iomsg=err_msg)
if (err_n /= 0) call error(my_name,err_msg)
sz = size(graph_conn,1)
write(i_str,'(I8)') sz
i_str = adjustl(i_str)
frmt = "("//trim(i_str)//"(X,L1))"
write(fout_numb,*) "Graph connections"
write(fout_numb,*)
do i = 1, sz
write(fout_numb,frmt) graph_conn(i,:)
end do
write(fout_numb,*)
write(fout_numb,*) "Start vertex: ", start_vert
write(fout_numb,*) "End vertex: ", end_vert
write(fout_numb,*)
write(fout_numb,*) "Paths:"
close(unit=fout_numb,iostat=err_n,iomsg=err_msg)
if (err_n /= 0) call error(my_name,err_msg)
end subroutine write_input_graph
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
subroutine write_graph_paths(flag_total_paths)
logical, intent(in) :: flag_total_paths
character(*), parameter :: my_name = "write_graph_paths"
integer :: i
integer :: j
integer :: err_n
character(120) :: err_msg
open(unit=fout_numb,file=fout_name,status="unknown",action="write", &
position="append",iostat=err_n,iomsg=err_msg)
if (err_n /= 0) call error(my_name,err_msg)
do i = 1, size(graph_paths)
do j = 1, graph_paths(i)%sz
write(fout_numb,'(1X,I4)',advance="no") graph_paths(i)%node(j)
end do
write(fout_numb,*)
end do
if (flag_total_paths) then
write(fout_numb,*)
write(fout_numb,*) "Total paths found: ", paths_found
write(fout_numb,*) "Dead paths found: ", dead_paths
end if
close(unit=fout_numb,iostat=err_n,iomsg=err_msg)
if (err_n /= 0) call error(my_name,err_msg)
end subroutine write_graph_paths
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
end module mod_output
|
{"hexsha": "167dc95c954a68bdc0753cbbe70e39db7fb1e867", "size": 2363, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/mod_output.f90", "max_stars_repo_name": "marberti/pathgen", "max_stars_repo_head_hexsha": "9ba8df5c1577492d49ea8423516f477bb8dfbfb7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/mod_output.f90", "max_issues_repo_name": "marberti/pathgen", "max_issues_repo_head_hexsha": "9ba8df5c1577492d49ea8423516f477bb8dfbfb7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mod_output.f90", "max_forks_repo_name": "marberti/pathgen", "max_forks_repo_head_hexsha": "9ba8df5c1577492d49ea8423516f477bb8dfbfb7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4086021505, "max_line_length": 79, "alphanum_fraction": 0.6182818451, "num_tokens": 686}
|
#!/usr/bin/env python
# coding: utf-8
# # Problem 3 - Purchasing Paint
# In[98]:
from tqdm import tqdm
import numpy as np
import os
# Sometimes an assertion fails. Just shuffle the seed again when that happens. #ostrich
rng = np.random.default_rng(42838858382)
# In[99]:
class Case:
def __init__(self, name, K, D, costs):
self.name = name
self.K = K
self.D = D
self.costs = costs
def validate(self):
N = len(self.costs)
seen_colours = set()
assert 1 <= N <= 2000
assert 1 <= self.K <= 2000
assert 1 <= self.D <= N
for d, c in self.costs:
assert 0 <= c <= 1000000000
assert 1 <= d <= self.D
seen_colours.add(d)
assert seen_colours == set(range(1, self.D+1))
def save(self, path, index):
output_file = os.path.join(path, f"{len(self.costs)}-{self.K}-{self.D}-{self.name}-{index}.in")
with open(output_file, "w") as f:
f.write(f"{len(self.costs)} {self.K} {self.D}\n")
for a, b in self.costs:
f.write(f"{a} {b}\n")
return output_file
def __str__(self):
return f"[Case {len(self.costs)}-{self.K}-{self.D}-{self.name}: {self.costs}]"
# In[100]:
def min_case():
K, D = 1, 1
return Case("min", K, D, [(1, 10**9)])
def max_case():
N, K, D = 2000, 1, 2000
return Case("max", K, D, [(i, 1000000000) for i in range(1, 2001)])
def uniform_distribute(N, D):
res = [(N + i) // D for i in range(D)]
assert sum(res) == N
return np.array(res).astype(int)
def random_distribute(N, D):
if N == 1:
return np.array([1] + [0] * (D-1))
res = set()
while len(res) < D-1:
res.add(rng.integers(1, N))
parts = np.concatenate([np.array([0]), np.sort(list(res)), np.array([N])])
return np.diff(parts).astype(int)
# In[101]:
def make_case(N, D, K, total, random_paints, random_weights, random_costs):
print(f"Make {N} {D} {K} {total}")
if random_paints:
paints = random_distribute(N, D)
else:
paints = uniform_distribute(N, D)
if random_weights:
weights = random_distribute(total, D)
else:
weights = uniform_distribute(total, D)
res = []
for i, (p, w) in enumerate(zip(paints, weights)):
if random_costs:
vals = random_distribute(w, p)
else:
vals = uniform_distribute(w, p)
vals = np.min(np.array([np.repeat(1000000000, p), vals]), axis=0)
for v in vals:
res.append((i+1, v))
return Case(f"{int(random_paints)}{int(random_weights)}{int(random_costs)}", K, D, res)
# In[102]:
str(make_case(10, 1, 10, 5000000000, False, False, False))
# # Dataplan
#
# ## Subtask 1 - K = 1
# ## Subtask 2 - K = 2
# ## Subtask 3 - N <= 18
# ## Subtask 4 - Total cost <= 2e3, Kth best is always take best
# - Generate a bunch of small cases and pray some of them end up here
# - Make N large enough and uniformly distribute paint costs
# ## Subtask 5 - No further constraints
# - Uniformly distribute paint types
# - Choose for paint costs either uniform or random
# - Shold be good enough lol
# - Worst case - consider worst possible option
# In[107]:
def gen_cases_for(N, K, total):
if K <= N:
yield make_case(N, 1, K, total, True, True, True)
yield make_case(N, 1, K, total, False, False, False)
if K * 8 <= N*N:
yield make_case(N, 2, K, total, True, False, True)
yield make_case(N, 2, K, total, True, True, True)
yield make_case(N, 2, K, total, False, False, False)
yield make_case(N, 10, K, total, True, True, True)
yield make_case(N, 10, K, total, False, False, True)
yield make_case(N, 10, K, total, False, True, True)
if N >= 100:
yield make_case(N, 100, K, total, True, True, True)
yield make_case(N, 100, K, total, False, True, True)
yield make_case(N, N//2, K, total, False, False, False)
yield make_case(N, N//2, K, total, False, False, True)
yield make_case(N, N//2, K, total, False, True, False)
yield make_case(N, N//2, K, total, False, True, True)
yield make_case(N, N, K, total, False, False, False)
SUBTASKS = [
(2000, 1, 1000 * 1000000000),
(2000, 2, 1000 * 1000000000),
(18, 18, 2000),
(18, 2000, 2000),
(300, 300, 150 * 1000000000),
(2000, 2000, 1000 * 1000000000)
]
def gen_all_cases():
# Min, max
yield min_case()
yield max_case()
for n, k, tot in SUBTASKS:
for c in gen_cases_for(n, k, tot):
yield c
# Special subtask 4 stuff
yield make_case(2000, 2, 2000, 2000, False, False, False)
yield make_case(2000, 10, 2000, 2000, False, False, False)
yield make_case(2000, 100, 2000, 2000, False, False, False)
yield make_case(2000, 1000, 2000, 2000, False, False, False)
# In[108]:
ALL_CASES = list(tqdm(gen_all_cases()))
# In[109]:
# Validate
for c in ALL_CASES:
try:
c.validate()
except AssertionError:
print(c)
raise
print("Validation passed!")
# In[83]:
# Output
print(os.getcwd())
OUTPUT_DIR = "."
for i, c in enumerate(ALL_CASES):
c.save(OUTPUT_DIR, i)
# In[113]:
str(make_case(100, 2, 2000, 2000, True, True, True))
# In[ ]:
|
{"hexsha": "96b463f5749d3ae6cd36250207a5aa5bc57d2908", "size": 5355, "ext": "py", "lang": "Python", "max_stars_repo_path": "paint/data/generator.py", "max_stars_repo_name": "acio-olympiad/ACIO2022Contest2", "max_stars_repo_head_hexsha": "53c7ced66ae916b118cfab8c72e4531dd08defd2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "paint/data/generator.py", "max_issues_repo_name": "acio-olympiad/ACIO2022Contest2", "max_issues_repo_head_hexsha": "53c7ced66ae916b118cfab8c72e4531dd08defd2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "paint/data/generator.py", "max_forks_repo_name": "acio-olympiad/ACIO2022Contest2", "max_forks_repo_head_hexsha": "53c7ced66ae916b118cfab8c72e4531dd08defd2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.4520547945, "max_line_length": 103, "alphanum_fraction": 0.5798319328, "include": true, "reason": "import numpy", "num_tokens": 1699}
|
import math
from dataclasses import dataclass
from typing import Dict
import numpy as np
import jax.numpy as jnp
from jax import random, ops
import nltk
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from data_collator import DataCollatorForTextInfilling, SentenceTokenize, DataCollatorForSentencePermutation
from transformers import AutoTokenizer
example = {"text": " My dog is cute. It loves to play in the park. There are many parks in SF."}
sent_tok = SentenceTokenize()
tokenizer = AutoTokenizer.from_pretrained("facebook/bart-base")
permuate_sent = DataCollatorForSentencePermutation(tokenizer)
example = sent_tok(example)
print(example['text'])
out = permuate_sent(tokenizer(example['text'], add_special_tokens=False))
example['text'] = tokenizer.decode(out['input_ids'])
print(example['text'])
masking = DataCollatorForTextInfilling(tokenizer)
out = masking(out)
example['text'] = tokenizer.decode(out['input_ids'][0])
print(example['text'])
|
{"hexsha": "50c154d549f4bde8d2749167f7a9d2e0871006ce", "size": 981, "ext": "py", "lang": "Python", "max_stars_repo_path": "sentence_permutation.py", "max_stars_repo_name": "patrickvonplaten/rotobart", "max_stars_repo_head_hexsha": "eb1482677889b6de3f1708621ad8d2da263afadb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sentence_permutation.py", "max_issues_repo_name": "patrickvonplaten/rotobart", "max_issues_repo_head_hexsha": "eb1482677889b6de3f1708621ad8d2da263afadb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sentence_permutation.py", "max_forks_repo_name": "patrickvonplaten/rotobart", "max_forks_repo_head_hexsha": "eb1482677889b6de3f1708621ad8d2da263afadb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8275862069, "max_line_length": 108, "alphanum_fraction": 0.8063200815, "include": true, "reason": "import numpy,import jax,from jax", "num_tokens": 227}
|
#! /opt/anaconda3/envs/align/bin/python
# -*- coding: utf-8 -*-
# Copyright 2021 The align-experiment Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for analysis."""
import numpy as np
import scipy as sp
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from os.path import dirname, abspath
import sys
d = dirname(abspath(__file__))
sys.path.append(
d
)
sys.path.append(
dirname(d)
)
from utils import get_95_ci
def calc_dist(x1, y1, x2, y2):
"""Calculate distance between two points."""
sqr = (x1 - x2)**2 + (y1 - y2)**2
return np.sqrt(sqr)
def pairwise_distance(systemA, systemB):
"""
Calculate pairwise distances between points in two systems.
Args:
- systemA and systemB: nxd
"""
n = systemA.shape[0]
B_transpose = np.transpose(systemB)
inner = -2 * np.matmul(systemA, B_transpose)
A_squares = np.sum(
np.square(systemA), axis=-1
)
A_squares = np.transpose(np.tile(A_squares, (n, 1)))
B_squares = np.transpose(
np.sum(np.square(systemB), axis=-1)
)
B_squares = np.tile(B_squares, (n, 1))
pairwise_distances = np.sqrt(
np.abs(
inner + A_squares + B_squares
)
)
return pairwise_distances
def alignment_correlation(systemA, systemB):
"""Assumes systems are in the same space."""
def f(x, y):
return np.sqrt(np.sum((x-y)**2, axis=1))
# Index of upper triangular matrices
idx_upper = np.triu_indices(systemA.shape[0], 1)
# Pairwise distance matrix between system A and system B
pairwise_both = pairwise_distance(systemA, systemB)
# Take upper diagonal of corresponding sim matrices for A->B
vec_A = f(systemA[idx_upper[0]], systemA[idx_upper[1]])
vec_B = f(systemB[idx_upper[0]], systemB[idx_upper[1]])
# Spearman correlation
r_s = sp.stats.spearmanr(vec_A, vec_B)[0]
return r_s
def plot_block_means_2conditions(df, col_name="correct", unit="block",
fn=None,
title=None):
"""Plot means of input metric by unit time and alignment condition.
Arguments:
-df: dataframe containing data for plot
-col_name: name of column to be plotted on y-axis
-unit: name of column to be plotted on x-axis
"""
color_dict = {
"aligned": "#2ab7ca",
"misaligned": "#fe4a49"
}
yaxis_dict = {
"correct": "Proportion correct trials",
"dist_from_correct": "Distance from correct response",
}
fig, ax = plt.subplots(1, 1, figsize=(6, 7), sharey=True)
plot_df = df.loc[df["trial_type"] == "trial-mainTrial"]
by_pid = plot_df.copy()
plot_df = plot_df[["pid", "align_condition", unit, col_name]].groupby(
["align_condition", unit]
).agg(
{col_name: ["mean", "std"],
"pid": ["nunique"]}
)
plot_df.reset_index(inplace=True)
plot_df.columns = [
"align_condition", unit, "mean_correct", "std_correct", "n_pid"
]
plot_df = get_95_ci(plot_df)
# Plot confidence intervals
for a_c in list(set(plot_df["align_condition"])):
ac_df = plot_df.loc[plot_df["align_condition"] == a_c]
ax.fill_between(ac_df[unit],
ac_df["upperci"], ac_df["lowerci"],
color=color_dict[a_c], alpha=0.3)
# Plot mean lines
for a_c in list(set(plot_df["align_condition"])):
ac_df = plot_df.loc[plot_df["align_condition"] == a_c]
ax.scatter(ac_df[unit],
ac_df["mean_correct"],
color=color_dict[a_c],
marker="x")
ax.plot(ac_df[unit],
ac_df["mean_correct"],
color=color_dict[a_c])
ax.set_xlabel(unit.capitalize())
ax.set_ylabel(yaxis_dict[col_name])
ax.set_xticks([x for x in range(len(list(set(plot_df[unit]))))],
[x for x in range(1, 6)])
if col_name == "correct":
ax.set_ylim(-0.05, 1.05)
chance = 1/6
if col_name == "dist_from_correct":
# Calculate chance performance
a = np.array(
[[0, 0], [0, 0.6], [0, 1], [0.4, 1], [1, 1], [1, 0.6]])
pw = pairwise_distance(a, a)
chance = np.mean(pw)
ax.set_xticks(
[
x for x
in range(
int(
df.loc[df["trial_type"] == "trial-mainTrial", unit].max()
) + 1
)
]
)
ax.set_xticklabels(
[
x + 1 for x
in range(
int(
df.loc[df["trial_type"] == "trial-mainTrial", unit].max()
) + 1
)
]
)
ax.hlines(chance, 0, 4, color="lightgrey", linestyles="dashed")
patchList = []
for key in color_dict:
data_key = mpatches.Patch(color=color_dict[key], label=key)
patchList.append(data_key)
# plt.ylabel("Proportion correct trials")
plt.legend(handles=patchList)
if title is not None:
plt.title(title)
if fn is not None:
plt.savefig(fn)
plt.show()
|
{"hexsha": "c134f0ac889bbcca03c8fa629b1fd2015e4e019e", "size": 5847, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/processing/process_utils.py", "max_stars_repo_name": "kaarinaaho/learning_alignment", "max_stars_repo_head_hexsha": "605f730834b7ef2d57b01eb519a10bf60c93f7bc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/processing/process_utils.py", "max_issues_repo_name": "kaarinaaho/learning_alignment", "max_issues_repo_head_hexsha": "605f730834b7ef2d57b01eb519a10bf60c93f7bc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/processing/process_utils.py", "max_forks_repo_name": "kaarinaaho/learning_alignment", "max_forks_repo_head_hexsha": "605f730834b7ef2d57b01eb519a10bf60c93f7bc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6617647059, "max_line_length": 80, "alphanum_fraction": 0.5756798358, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1464}
|
\section{Analysis of soft matter scattering}
The use of neutron and X-ray scattering experiments for the study of soft matter is well developed, with early research into the structure of phospholipid monolayers by reflectometry methods being conducted in the late 1970s by Albrecht \emph{et al.}\autocite{albrecht_polymorphism_1978}
While, the work of Kratky and Porod,\autocite{kratky_diffuse_1949} who used small angle X-ray scattering\footnote{Generally abbreviated to SAS; with SAXS indicating the use of X-rays and SANS neutrons} for the study of colloidal systems was published in 1949.
Since these early works, instrumentation developments have enabled more challenging experiments to be conducted, such as time-resolved studies \autocite{jensen_monitoring_2014} and the study of floating phospholipid bilayers.\autocite{rondelli_reflectivity_2012}
However, the analysis of soft matter scattering has changed little since these early works, still typically involving the use of very coarse models.
These include the shape-based modelling common in SAS \sidecite[][see Section~\ref{sec:sasanal}]{hassan_small_20032} and reflectometry analysis.\sidecite[][see Section~\ref{sec:sasanal}]{campbell_structure_20183,lu_analysis_1996}
More sophisticated model refinements have been developed, such as the use of Monte-Carlo sampling,\autocite{pedersen_monte_2002} differential evolution optimisation,\sidecite[abbreviated to DE]{wormington_characterization_19992} and Bayesian inference.\autocite{nelson_refnx_2019,larsen_analysis_2018}
However, there has been little change in the definition of the models that unpin the analysis processes.
Recently, there have been movements towards the use of atomistic modelling techniques\footnote{Such as molecular dynamics (MD).} to augment, and assist, the analysis of soft matter scattering measurements, in a multi-modal approach.\autocite{scoppola_combining_2018}
Much of the work relating to the use of atomistic simulation for the analysis of SAS measurements has been focused on the study of protein molecules in solution.\footnote{The historical context of this is discussed briefly in Chapter~\ref{smallangle}.}
This has allowed for more profound understanding aspects of biology such as the conformational states available to protein molecules in solution.\autocite{bowerman_determining_2017}
The uptake of atomistic simulation for the analysis of SAS from systems such as micelles has been slower, in part due to the more complex conformation landscape available to these systems under standard conditions.
However, the work of Hargreaves \emph{et al.} paired atomistic simulation with total scattering measurements\footnote{In the form of Empirical Potential Structure Refinement.} to resolve the structure of a simple short-tail surfactant micelle.\autocite{hargreaves_atomistic_2011}
Furthermore, the work of Ivanovi\'{c} \emph{et al.} used scattering experiments to refine the output of MD simulations of micelles of a pre-defined size.\autocite{ivanovic_temperature-dependent_2018}
Both of these examples required significant computational resource; in the former case, the computational time taken was quoted as 200 days, while the later required the running of multiple simulations at different micelle sizes in order to determine the appropriate simulation.
The use of atomistic simulation for the analysis of reflectometry measurements of soft matter systems began with the work of Miller \emph{et al.} and Anderson and Wilson,\autocite{miller_monte_2003,anderson_molecular_2004} where atomistic simulations\footnote{Monte Carlo and MD respectively.} were used to study polymer self-assembly at the oil-water interface.
These simulation trajectories were then compared with experimental neutron reflectometry\footnote{Abbreviated to NR.} measurements.
Dabkowska \emph{et al.} also used atomistic simulation and NR measurements to study the structure of a surfactant monolayer at the air-water interface, providing the first example of a direct comparison between experimental reflectometry data and that determined from simulation.\autocite{dabkowska_modulation_2014}
To date, there is only one work that has used coarse-grained MD simulation to aid in the analysis of NR, this is the work of Koutsioubas.\autocite{koutsioubas_combined_2016}
This work made use of the MARTINI coarse-grained potential model to simulate a phospholipid bilayer and was compared with experimental NR measurements.
|
{"hexsha": "32f447900e6636e9c079048b45e641ad91e49165", "size": 4458, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "reports/chapters/introduction/scattering.tex", "max_stars_repo_name": "arm61/thesis", "max_stars_repo_head_hexsha": "4c76e837b1041472a5522427de0069a5a28d40c9", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-06-04T20:53:19.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-01T06:25:20.000Z", "max_issues_repo_path": "reports/chapters/introduction/scattering.tex", "max_issues_repo_name": "arm61/thesis", "max_issues_repo_head_hexsha": "4c76e837b1041472a5522427de0069a5a28d40c9", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-06-04T17:11:33.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-04T17:11:33.000Z", "max_forks_repo_path": "reports/chapters/introduction/scattering.tex", "max_forks_repo_name": "arm61/thesis", "max_forks_repo_head_hexsha": "4c76e837b1041472a5522427de0069a5a28d40c9", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 185.75, "max_line_length": 362, "alphanum_fraction": 0.8355764917, "num_tokens": 1012}
|
{-# OPTIONS --sized-types #-}
module SList.Order {A : Set}(_≤_ : A → A → Set) where
open import List.Sorted _≤_
open import Size
open import SList
data _*≤_ : {ι : Size} → A → SList A {ι} → Set where
genx : {ι : Size}{b : A}
→ (_*≤_) {↑ ι} b snil
gecx : {ι : Size}{b x : A}{xs : SList A {ι}}
→ b ≤ x
→ b *≤ xs
→ b *≤ (x ∙ xs)
data _≤*_ : {ι : Size} → SList A {ι} → A → Set where
lenx : {ι : Size}{t : A}
→ (_≤*_) {↑ ι} snil t
lecx : {ι : Size}{x t : A}{xs : SList A {ι}}
→ x ≤ t
→ xs ≤* t
→ (x ∙ xs) ≤* t
|
{"hexsha": "f1aab2fd78ddcd60d189b7079ad13e2aa3dac2f9", "size": 678, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "agda/SList/Order.agda", "max_stars_repo_name": "bgbianchi/sorting", "max_stars_repo_head_hexsha": "b8d428bccbdd1b13613e8f6ead6c81a8f9298399", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2015-05-21T12:50:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-24T22:11:15.000Z", "max_issues_repo_path": "agda/SList/Order.agda", "max_issues_repo_name": "bgbianchi/sorting", "max_issues_repo_head_hexsha": "b8d428bccbdd1b13613e8f6ead6c81a8f9298399", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "agda/SList/Order.agda", "max_forks_repo_name": "bgbianchi/sorting", "max_forks_repo_head_hexsha": "b8d428bccbdd1b13613e8f6ead6c81a8f9298399", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.12, "max_line_length": 53, "alphanum_fraction": 0.3716814159, "num_tokens": 253}
|
% $Id$
%
% ObitSingle dish and OTF/GBT Tables class definitions
%
%\def\section #1.#2.{\medskip\leftline{\bf #1. #2.}\smallskip}
%\def\bfitem #1#2{{{\bf (#1)}}{\it #2}}
\def\bfi #1#2{{\bf #1:}{ #2}\par}
\def\extname #1#2{ {\bf 1} Extension Name {\it #2}\smallskip}
%\def\tabname #1#2{\bfitem 1{#1 Table Name #2}}
\def\keyword {\leftline{\bf Keywords:}}
%\def\table {\leftline{\bf Table Definition:}}
\documentclass[11pt]{article}
\usepackage{PennArrayDoc}
\begin{document}
\setcounter{section}{0}
% Title page
\vskip 5cm
\centerline{\ttlfont Single Dish Tables}
\vskip 1cm
\centerline{\ttlfont Obit: }
\centerline{\ttlfont Merx mollis mortibus nuper}
\vskip 3cm
\centerline{\secfont version: 1.1.1 \today}
\vskip 1cm
\centerline{\secfont W. D. Cotton}
\clearpage
% Table Of Contents
\tableofcontents
\cleardoublepage
\section {Introduction}
This document is intended to define the contents and meaning of the
various tables developed for single dish radio astronomical data,
especially ``On--the--fly'' (OTF) data from the Penn Array bolometer
camera on the GBT.
This software works inside the Obit software package.
Usage documentation uses the doxygen system and is in separate
documents.
\section{Obit Tables}
This document uses latex macroes which are translated by a perl
script (bin/PennArrayTables.pl) into the c source code defining the
classes used to access these tables.
The tables defined in this document are both those needed for the OTF
data structure and for reading the GBT archive data files.
\subsection{LaTeX Macros}
Tables used in Obit are defined in this document by using LaTeX
macroes to formally define the table.
These macroes are:
\begin{itemize}
\item tabletitle\{Title of table, e.g. ``OTFArrayGeom''\}
\item tablename\{Name of table, e.g. ``OTFArrayGeom''\}
\item tableintro\{Short description of class\}
\item tableover\{Overview of usage of class\}
\item tablekey[\{name\}\{type code\}\{ software name\} \{default value\}
\{(range of indices)\} \{description\}]\\
Defines Table keyword.
\item tablecol[\{name\}\{units\}\{type code\} \{(dimensionality)\} \{software
name\} \{description\}]\\
Defines Table column..
\end{itemize}
\section{ObitOTF data}
The ObitOTF data structure class and related classes are for storing
``On--The--Fly'' single dish radio astronomy data.
The data structure is patterned after the AIPS single dish data format
but, due to AIPS table naming restrictions, only a FITS binary table
implementation is currently supported.
An ObitOTF data file can be thought of as a relational database.
The measured sky brightness measurements are kept in the OTFScanData
table together with some auxillary information such as the nominal sky
pointing position, time etc.
Other auxillary information and calibration and editing information is
kept in other tables (see below).
\subsection{ObitOTF tables}
The basic calibration strategy is to manipulate tables which tell how
to transform the data from ``raw'' data as tabulated to ``calibrated''
data.
There are two basic calibration tables, with the same internal
structure.
The OTFSoln table is a differential calibration relative to a
potential prior calibration.
A particular calibration operation determines a OTFSoln table.
A OTFCal table is a cumulative table which is obtained from a
(possible) prior calibration corrected by an OTFSoln table.
The tables in an ObitOTF data file include:
\begin{itemize}
\item OTFScanData\\
Raw sky brightness data and auxillary information.
\item OTFArrayGeom\\
Table giving the geometric offsets of a feed/detector array from the
pointing axis of the telescope.
\item OTFTarget\\
Table of sources or targets.
These are referred to in the OTFScanData table as an index into this table.
\item OTFIndex\\
Scan table [optional] giving start and stop times and row numbers in
the OTFScanData table as well as targets etc.
This index is used to improve data access times.
\item OTFFlag\\
Table describing ``flagged'' data - data to be ignored.
\item OTFCal\\
Cumulative calibration table.
This table gives multiplicative and additive corrections to the raw
sky brightness measurements in the OTFScanData table as well as
corrections to the nominal telescope pointing direction.
\item OTFSoln\\
Differential calibration (``Solution'') table.
\end{itemize}
\subsection{ObitOTF Data Structure}
Data in the OTFScanData table are stored in table records with a row
corresponding to the data obtained in a given integration.
The row contains a number of descriptive columns giving time,
celestial pointing etc. followed by a column containing a regular data
array.
The data in the OTFScanData table are all stored as floats to increase
access performance.
In general, the data in the data array is a multidimensional array with
different quanties along different axes (feed/detector, frequency,
polarization).
The dimensionality, types and axis values are given in the header of
the OTFScanData table as the TDIMn (dimensionality), mCTYPn (axis
type), mCDLTn (increment in axis values between pixels), mCRPXn (axis
reference pixel), mCROTn (axis rotation angle) and mCRVLn (coordinate
on axis at reference pixel) keywords where m is the axis number and n
is the data column in the table.
(This is the standard FITS convention for conveying this information.)
The following illustration is for the Penn Array with one Stokes
(total power), 64 detectors (FEED) and 1 frequency; the data column is
number 9.
\begin{verbatim}
TDIM9 = '(1,64,1)' / size of the multidimensional array
TZERO1 = 2.452814500000E+06 / Offset of Date from JD
1CTYP9 = 'STOKES ' / Stokes axis
2CTYP9 = 'FEED ' / Feed/detector axis
3CTYP9 = 'FREQ ' / Frequency axis
1CDLT9 = 1.000000E+00 / Stokes ``increment''
2CDLT9 = 1.000000E+00 / Feed ``increment''
3CDLT9 = 1.000000E+00 / Frequency increment (Hz)
1CRPX9 = 1.000000E+00 / Stokes reference pixel
2CRPX9 = 1.000000E+00 / Feed reference pixel
3CRPX9 = 1.000000E+00 / Frequency reference pixel
1CROT9 = 0.000000E+00 / Stokes ``rotation'' (no meaning)
2CROT9 = 0.000000E+00 / Feed ``rotation'' (no meaning)
3CROT9 = 0.000000E+00 / Frequency ``rotation'' (no meaning)
1CRVL9 = 1.000000000000E+00 / Stokes 'I'
2CRVL9 = 1.000000000000E+00 / ``Feed'' 1
3CRVL9 = 9.000000000000E+10 / Frequency in Hz
\end{verbatim}
\section{ObitSD software}
The high level view of the Obit system is included in file
OBITdoc.ps.
The class documentation for the software for processing ObitOTF data
is derived from the source code using doxygen and is available
in html format starting at doc/doxygen/html/index.html.
\section{Building ObitSD}
The ObitSD package is an addon to the basic Obit package which should
be installed first.
See OBITdoc.ps for details.
ObitSD comes with a configure script to construct the Makefiles to build
ObitSD.
Note: there are a number of third party packages as well as basic Obit
that should be installed first.
The basic installation is thus:
\begin{verbatim}
% setenv OBIT /where/ever/you/installed/Obit
% gtar xzvf ObitSD1.0.tgz
% cd ObitSD
% ./configure
% make
\end{verbatim}
For ``/where/ever/you/installed/Obit'' substitute the actual path of
the Obit base directory.
Alternatively, use the ``--with-obit=DIR'' option with configure.
In addition to the packages used by basic Obit, ObitSD uses the GSL
(GNU Scientific Library) package.
The location of GSL can be specified to configure with the ''--with-gsl=DIR''
configure option if configure cannot find it.
When using ObitSD from python, specify the PYTHONPATH environment
variable as ``ObitSD/python : Obit/python'' where for ObitSD and Obit
substitute the base directories of the ObitSD and Obit packages.
\clearpage
%%%%%%%%%%%%%%% ObitTableOTFArrayGeom Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableOTFArrayGeom}]
ObitTableOTFArrayGeom Class
\tabletitle{OTFArrayGeom}
% table name
\tablename{OTFArrayGeom}
\tableintro[
{This class contains tabular data and allows access.
"OTFArrayGeom" contains information about the locations and characteristics
of detectors in the camera, the location of the telescope and time
related information.}
]
\tableover{
In memory tables are stored in a fashion similar to how they are
stored on disk - in large blocks in memory rather than structures.
Due to the word alignment requirements of some machines, they are
stored by order of the decreasing element size:
double, float long, int, short, char rather than the logical order.
The details of the storage in the buffer are kept in the
ObitTableDesc.
}
% Table keyword description
\begin{keywords}
\tablekey[{"TELEX "}{D}{TeleX}{0.0}{}
{Telescope X coordinate. (meters, earth center)}
]
\tablekey[{"TELEY "}{D}{TeleY}{0.0}{}
{Telescope Y coordinate. (meters, earth center)}
]
\tablekey[{"TELEZ "}{D}{TeleZ}{0.0}{}
{Telescope Z coordinate. (meters, earth center)}
]
\tablekey[{"RDATE "}{A}{RefDate}{"YYYYMMDD"}{}
{Reference date as "YYYYMMDD"}
]
\tablekey[{"DEGPDY "}{D}{DegDay}{360.0}{}
{Earth rotation rate (deg/IAT day)}
]
\tablekey[{"POLARX "}{E}{PolarX}{0.0}{}
{Polar position X (meters) on ref. date}
]
\tablekey[{"POLARY "}{E}{PolarY}{0.0}{}
{Polar position Y (meters) on ref. date}
]/**
\tablekey[{"GSTIA0 "}{D}{GSTiat0}{0.0}{}
{GST at time=0 (degrees) on the reference date}
]
\tablekey[{"UT1UTC "}{E}{ut1Utc}{}{}
{UT1-UTC (time sec.) }
]
\tablekey[{"DATUTC "}{E}{dataUtc}{}{}
{data time-UTC (time sec.)}
]
\tablekey[{"IATUTC "}{E}{iatUtc}{}{}
{IAT - UTC (sec).}
]
\tablekey[{"TIMSYS"}{A}{TimeSys}{"UTC"}{}
{Time system, 'IAT' or 'UTC'}
]
\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"DETECTOR"}{" " }{J}{(1)}{detector}
{Detector number}
]
\tablecol[{"AZ\_OFF"}{"DEGREE " }{E}{(1)}{azOff}
{``Azimuth'' offset from nominal pointing, this is formally ``cross
elevation'' in GBT-speak and is the offset on the sky in the direction
of azimuth.
}
]
\tablecol[{"EL\_OFF"}{"DEGREE " }{E}{(1)}{elOff}
{Elevation offset from nominal pointing}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{W. D. Cotton}{14/03/2003}{Revision 1: Initial definition}]
\end{history}
%
\clearpage
%%%%%%%%%%%%%%% ObitTableOTFCal Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableOTFCal}]
ObitTableOTFCal Class
\tabletitle{OTFCal}
% table name
\tablename{OTFCal}
\tableintro[
{This class contains tabular data and allows access.
"OTFCal" contains calibration information for OTF data.
Calibrated data for each detector are:
$${\rm cal\_data}\ = \ {\rm mult} ({\rm raw\_data}\ -\ {\rm cal}\ -\
{\rm add}\ -\ {\rm poly})$$
where cal is the calibration noise value for ``Cal on'' data and 0 for
``Cal off'' date and poly is evaluated in the direction of the detector.
]
\tableover{
In memory tables are stored in a fashion similar to how they are
stored on disk - in large blocks in memory rather than structures.
Due to the word alignment requirements of some machines, they are
stored by order of the decreasing element size:
double, float long, int, short, char rather than the logical order.
The details of the storage in the buffer are kept in the ObitTableDesc.
}
% Table keyword description
\begin{keywords}
\tablekey[{"NO\_DETEC"}{J}{numDet}{1}{()}
{Number of detectors)}
]
\tablekey[{"NO\_POLY"}{J}{numPoly}{1}{()}
{Number of polynomial coefficients describing atmospheric emission.
0 => no polynomial model.}
]
\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"TIME "}{"DAYS " }{E}{(1)}{Time}
{The center time.}
]
\tablecol[{"TIME\_INT"}{"DAYS " }{E}{(1)}{TimeI}
{The integration time.}
]
\tablecol[{"TARGET"}{" " }{J}{(1)}{Target}
{Celestial target, as index in target table.}
]
\tablecol[{"DELTA\_AZ"}{"DEGREE " }{E}{(1)}{dAz}
{Correction to the ``Azimuth'' for all detectors.
This is formally ``cross elevation'' in GBT-speak and is the offset on
the sky in the direction of azimuth.}
]
\tablecol[{"DELTA\_El"}{"DEGREE " }{E}{(1)}{dEl}
{Correction to the Elevation for all detectors.}
]
\tablecol[{"CAL "}{" " }{E}{(numDet)}{cal}
{Cal value in units of raw data per detector, to be subtracted from
``Cal on'' data.}
]
\tablecol[{"ADD "}{" " }{E}{(numDet)}{add}
{Additive (subtractive actually) term per detector}
]
\tablecol[{"MULT "}{" " }{E}{(numDet)}{mult}
{Additive (subtractive actually) term per detector}
]
\tablecol[{"WEIGHT "}{" " }{E}{(numDet)}{wt}
{ Weight value per detector}
]
\tablecol[{"POLY "}{" " }{E}{(numPoly)}{poly}
{Polynomial atmosphere model, expansion in RA, dec about pointing position}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{W. D. Cotton}{03/04/2003}{Revision 1: Initial definition}]
\end{history}
%
\clearpage
%%%%%%%%%%%%%%% ObitTableOTFModel Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableOTFModel}]
ObitTableCC Class
\tabletitle{CLEAN Components Table}
% table name
\tablename{OTFModel}
\tableintro[
{This class contains tabular data and allows access.
"OTFModel" table contains image model components which may be derived
via a CLEAN or other fitting process.
An OTFModel is the front end to a persistent disk resident structure.
Only FITS data are supported.
This class is derived from the ObitTable class. }
]
\tableover{
In memory tables are stored in a fashion similar to how they are
stored on disk - in large blocks in memory rather than structures.
Due to the word alignment requirements of some machines, they are
stored by order of the decreasing element size:
double, float long, int, short, char rather than the logical order.
The details of the storage in the buffer are kept in the
ObitTableDesc.
A number of model types are supported as described with their
parameters in the following:
\begin{itemize}
\item Point\\
A Point model has a position and a flux but no extent on the sky.
This model is indicated by the absence of the Type column or a value
of 0.
No additional parameters are needed.
\item Gaussian on Sky\\
This is a Gaussian shaped model.
This model is indicated by a value in the Type column of 1.
The extra model parameters are:.
\begin{enumerate}
\item Major axis size in asec.
\item Minor axis size in asec.
\item position angle on sky in deg.
\end{enumerate}
\item Convolved Gaussian\\
This is a Gaussian shaped model.
This model is indicated by a value in the Type column of 2.
The extra model parameters are:.
\begin{enumerate}
\item Major axis size in asec.
\item Minor axis size in asec.
\item position angle on sky in deg.
\end{enumerate}
\item Uniform optically thin sphere\\
This corresponds to a uniformly filled sphere model which is optically
thin.
This model is indicated by a value in the Type column of 3.
The extra model parameters are:.
\begin{enumerate}
\item Radius in aseconds.\\
\end{enumerate}
\end{itemize}
}
% Table keyword description
\begin{keywords}
\tablekey[{"NO\_PARM"}{J}{numParm}{0}{()}
{The number of IFs}
]
\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"X "}{"Degree " }{E}{(1)}{X}
{``X'' position of component centroid as offset from reference position}
]
\tablecol[{"Y "}{"Degree " }{E}{(1)}{Y}
{``Y'' position of component centroid as offset from reference position}
]
\tablecol[{"FLUX "}{"Jansky " }{E}{(1)}{Flux}
{Flux density of component}
]
\tablecol[{"TYPE "}{" " }{J}{(1)}{Type}
{Component type: 0: (or not present) point, 1=Gaussian on sky, 2=
convolved Gaussian, 3=Uniform optically thin sphere}
]
\tablecol[{"PARMS "}{" " }{E}{(numParm)}{Parms}
{Model components as needed by model}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{W. D. Cotton}{17/12/2003}{Revision 1: Initial definition}]
\end{history}
%
%
\clearpage
%%%%%%%%%%%%%%% ObitTableSkyModel Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableSkyModel}]
ObitTableSkyModel Class
\tabletitle{SkyModel}
% table name
\tablename{SkyModel}
\tableintro[
{This class contains tabular data and allows access.
"SkyModel" contains a sky brightness model in terms of discrete components.}
]
\tableover{
In memory tables are stored in a fashion similar to how they are
stored on disk - in large blocks in memory rather than structures.
Due to the word alignment requirements of some machines, they are
stored by order of the decreasing element size:
double, float long, int, short, char rather than the logical order.
The details of the storage in the buffer are kept in the
ObitTableDesc.
}
% Table keyword description
\begin{keywords}
\tablekey[{"RA "}{E}{RA}{0.0}{}
{Tangent point RA (deg)}
]
\tablekey[{"DEC "}{E}{Dec}{0.0}{}
{Tangent point Dec (deg)}
]
\tablekey[{"PROJ "}{A}{Proj}{"-SIN"}{}
{Projection code '-SIN', '-ARC', '-TAN'}
]
\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"RA\_OFF "}{"DEGREE " }{E}{(1)}{RAOff}
{Right ascension offset from tangent point}
]
\tablecol[{"DEC\_OFF "}{"DEGREE " }{E}{(1)}{DecOff}
{Declination offset from tangent point}
]
\tablecol[{"FLUX "}{"JY " }{E}{(1)}{Flux}
{Flux density}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{W. D. Cotton}{14/03/2003}{Revision 1: Initial definition}]
\end{history}
%
\clearpage
%%%%%%%%%%%%%%% ObitTableOTFSoln Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableOTFSoln}]
ObitTableOTFSoln Class
\tabletitle{OTFSoln}
% table name
\tablename{OTFSoln}
\tableintro[
{This class contains tabular data and allows access.
"OTFSoln" contains calibration solution information for OTF data.
Calibrated data for each detector are:
$${\rm cal\_data}\ = \ {\rm mult} ({\rm raw\_data}\ -\ {\rm cal}\ -\
{\rm add}\ -\ {\rm poly})$$
where cal is the calibration noise value for ``Cal on'' data and 0 for
``Cal off'' date and poly is evaluated in the direction of the detector.
OTFSoln tables may be applied to either an OTFCal table or directly
the data in a self-cal mode.
]
\tableover{
In memory tables are stored in a fashion similar to how they are
stored on disk - in large blocks in memory rather than structures.
Due to the word alignment requirements of some machines, they are
stored by order of the decreasing element size:
double, float long, int, short, char rather than the logical order.
The details of the storage in the buffer are kept in the ObitTableDesc.
}
% Table keyword description
\begin{keywords}
\tablekey[{"NO\_DETEC"}{J}{numDet}{1}{()}
{Number of detectors)}
]
\tablekey[{"NO\_POLY"}{J}{numPoly}{1}{()}
{Number of polynomial coefficients describing atmospheric emission.
0 => no polynomial model.}
]
\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"TIME "}{"DAYS " }{E}{(1)}{Time}
{The center time.}
]
\tablecol[{"TIME\_INT"}{"DAYS " }{E}{(1)}{TimeI}
{The integration time.}
]
\tablecol[{"TARGET"}{" " }{J}{(1)}{Target}
{Celestial target, as index in target table.}
]
\tablecol[{"DELTA\_AZ"}{"DEGREE " }{E}{(1)}{dAz}
{Correction to the ``Azimuth'' for all detectors.
This is formally ``cross elevation'' in GBT-speak and is the offset on
the sky in the direction of azimuth.}
]
\tablecol[{"DELTA\_EL"}{"DEGREE " }{E}{(1)}{dEl}
{Correction to the El for all detectors.}
]
\tablecol[{"CAL "}{" " }{E}{(numDet)}{cal}
{Cal value in units of raw data per detector, to be subtracted from
``Cal on'' data.}
]
\tablecol[{"ADD "}{" " }{E}{(numDet)}{add}
{Additive (subtractive actually) term per detector}
]
\tablecol[{"MULT "}{" " }{E}{(numDet)}{mult}
{Additive (subtractive actually) term per detector}
]
\tablecol[{"WEIGHT "}{" " }{E}{(numDet)}{wt}
{ Weight value per detector}
]
\tablecol[{"POLY "}{" " }{E}{(numPoly)}{poly}
{Polynomial atmosphere model, expansion in RA, dec about pointing position}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{W. D. Cotton}{05/04/2003}{Revision 1: Initial definition}]
\end{history}
%
%
\clearpage
%%%%%%%%%%%%%%% ScanData Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableOTFScanData}]
ObitTableOTFScanData Class
\tabletitle{OTFScan data}
% table name
\tablename{OTFScanData}
\tableintro[
{This class contains tabular data and allows access.
An OTFScanData table has ``on the fly'' mode observational data from the
bolometer array.}
]
\tableover{
In memory tables are stored in a fashion similar to how they are
stored on disk - in large blocks in memory rather than structures.
Due to the word alignment requirements of some machines, they are
stored by order of the decreasing element size:
double, float long, int, short, char rather than the logical order.
The details of the storage in the buffer are kept in the
ObitTableDesc.
}
% Table keyword description
\begin{keywords}
\tablekey[{"NO\_DETEC"}{J}{numDet}{}{()}
{The number of detectors.}
]
\tablekey[{"ORIGIN"}{A}{origin}{}{}
{Originator of file}
]
\tablekey[{"OBJECT"}{A}{object}{}{}
{Name of object}
]
\tablekey[{"TELESCOP"}{A}{teles}{}{}
{Telescope used}
]
\tablekey[{"DATE-OBS"}{A}{obsdat}{}{}
{Date (yyyy-mm-dd) of observation}
]
\tablekey[{"EPOCH"}{E}{epoch}{}{}
{Celestial coordiate equinox}
]
\tablekey[{"BUNIT"}{A}{bunit}{}{}
{Data units}
]
\tablekey[{"OBSRA"}{D}{obsra}{}{}
{Observed Right Ascension in deg.}
]
\tablekey[{"OBSDEC"}{D}{obsdec}{}{}
{Observed declination in deg.}
]
\tablekey[{"BEAMSIZE"}{E}{beamSize}{0.00111}{}
{Gaussian FWHM of telescope beam size.}
]
\tablekey[{"DIAMETER"}{E}{diameter}{100.0}{}
{Diameter of telescope in meters.}
]
\tablekey[{"OTFTYPE"}{A}{OTFType}{"Unknown"}{}
{Type of data: ``DCR'': GBT DCR, ``SP'': GBT Spectral processor,
``CCB'':CalTech Continuum Backend, ``PAR'':Penn Array Receiver}
]
\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"TIME "}{"DAYS " }{E}{(1)}{Time}
{The center time.}
]
\tablecol[{"TIME\_INT"}{"DAYS " }{E}{(1)}{TimeI}
{The integration time.}
]
\tablecol[{"TARGET"}{" " }{E}{(1)}{Target}
{Celestial target, as index in target table.}
]
\tablecol[{"Scan"}{" " }{E}{(1)}{Scan}
{Observing scan index.}
]
\tablecol[{"RA "}{"DEGREE " }{E}{(1)}{RA}
{Nominal RA of array center}
]
\tablecol[{"DEC "}{"DEGREE" }{E}{(1)}{Dec}
{Nominal Dec of array center}
]
\tablecol[{"ROTATE "}{" " }{E}{(1)}{rotate}
{Rotation of array on sky (parallactic angle)}
]
\tablecol[{"CAL "}{" " }{E}{(1)}{cal}
{if > 0 then the cal source is on.}
]
\tablecol[{"DATA "}{" " }{E}{(numDet)}{data}
{Detector sample data per detector )}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{W. D. Cotton}{14/03/2003}{Revision 1: Initial definition}]
\modhistory[{W. D. Cotton}{14/09/2003}{Added diameter, change name to
OTFScanData}]
\end{history}
%
\clearpage
%%%%%%%%%%%%%%% ObitTableOTFTarget Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableOTFTarget}]
ObitTableOTFTarget Class
\tabletitle{Target table for OTF data documentation}
% table name
\tablename{OTFTarget}
\tableintro[
{This class contains tabular data and allows access.
OTFTarget contains information about astronomical sources.
An ObitTableOTFTarget is the front end to a persistent disk resident structure.
Only FITS cataloged data are supported.
This class is derived from the ObitTable class. }
]
\tableover{
In memory tables are stored in a fashion similar to how they are
stored on disk - in large blocks in memory rather than structures.
Due to the word alignment requirements of some machines, they are
stored by order of the decreasing element size:
double, float long, int, short, char rather than the logical order.
The details of the storage in the buffer are kept in the
ObitTableDesc.
}
% Table keyword description
\begin{keywords}
\tablekey[{"VELTYP "}{A}{velType}{}{}
{Velocity type,}
]
\tablekey[{"VELDEF "}{A}{velDef}{}{}
{Velocity definition 'RADIO' or 'OPTICAL'}
]
\tablekey[{"FREQID "}{J}{FreqID}{0}{}
{The Frequency ID for which the source parameters are relevant.}
]
\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"ID. NO. "}{" " }{J}{(1)}{TargID}
{Target ID}
]
\tablecol[{"TARGET "}{" " }{A}{(16)}{Target}
{Target name }
]
\tablecol[{"QUAL "}{" " }{J}{(1)}{Qual}
{Target qualifier}
]
\tablecol[{"CALCODE "}{" " }{A}{(4)}{CalCode}
{Calibrator code}
]
\tablecol[{"IFLUX "}{"JY " }{E}{(1)}{IFlux}
{Total Stokes I flux density}
]
\tablecol[{"QFLUX "}{"JY " }{E}{(1)}{QFlux}
{Total Stokes Q flux density}
]
\tablecol[{"UFLUX "}{"JY " }{E}{(1)}{UFlux}
{Total Stokes U flux density}
]
\tablecol[{"VFLUX "}{"JY " }{E}{(1)}{VFlux}
{Total Stokes V flux density}
]
\tablecol[{"FREQOFF "}{"HZ " }{D}{(1)}{FreqOff}
{Frequency offset (Hz) from nominal}
]
\tablecol[{"BANDWIDTH"}{"HZ " }{D}{(1)}{Bandwidth}
{Bandwidth}
]
\tablecol[{"RAEPO "}{"DEGREES " }{D}{(1)}{RAMean}
{Right ascension at mean EPOCH (actually equinox) }
]
\tablecol[{"DECEPO "}{"DEGREES " }{D}{(1)}{DecMean}
{Declination at mean EPOCH (actually equinox) }
]
\tablecol[{"EPOCH "}{"YEARS " }{D}{(1)}{Epoch}
{Mean Epoch (really equinox) for position in yr. since year 0.0}
]
\tablecol[{"RAAPP "}{"DEGREES " }{D}{(1)}{RAApp}
{Apparent Right ascension }
]
\tablecol[{"DECAPP "}{"DEGREES " }{D}{(1)}{DecApp}
{Apparent Declination}
]
\tablecol[{"LSRVEL "}{"M/SEC " }{D}{(1)}{LSRVel}
{LSR velocity per IF }
]
\tablecol[{"RESTFREQ"}{"HZ " }{D}{(1)}{RestFreq}
{Line rest frequency per IF }
]
\tablecol[{"PMRA "}{"DEG/DAY " }{D}{(1)}{PMRa}
{Proper motion (deg/day) in RA}
]
\tablecol[{"PMDEC "}{"DEG/DAY " }{D}{(1)}{PMDec}
{Proper motion (deg/day) in declination}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{W. D. Cotton}{10/07/2003}{Revision 1: Initial version}]
\end{history}
\clearpage
%%%%%%%%%%%%%%% ObitTableOTFIndex Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableOTFIndex}]
ObitTableOTFIndex Class
\tabletitle{Index table for OTF data}
% table name
\tablename{OTFIndex}
\tableintro[
{This class contains tabular data and allows access.
ObitTableOTFIndex contains an index for a OTF data file giving the times,
target and datum range for a sequence of scans.
A scan is a set of observations in the same mode and on the same target.
An ObitTableOTFIndex is the front end to a persistent disk resident structure.
This class is derived from the ObitTable class. }
]
\tableover{
In memory tables are stored in a fashion similar to how they are
stored on disk - in large blocks in memory rather than structures.
Due to the word alignment requirements of some machines, they are
stored by order of the decreasing element size:
double, float long, int, short, char rather than the logical order.
The details of the storage in the buffer are kept in the
ObitTableDesc.
}
% Table keyword description
No Keywords in table.
%\begin{keywords}
%\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"SCAN\_ID "}{" " }{J}{(1)}{ScanID}
{Scan ID number}
]
\tablecol[{"TIME "}{"DAYS " }{E}{(1)}{Time}
{The center time of the scan.}
]
\tablecol[{"TIME\_INTERVAL "}{"DAYS " }{E}{(1)}{TimeI}
{Duration of scan}
]
\tablecol[{"TARGET\_ID "}{" " }{J}{(1)}{TargetID}
{Target ID as defined in the OTFTarget table}
]
\tablecol[{"START\_REC "}{" "}{J}{(1)}{StartRec}
{First record number (1-rel) in scan}
]
\tablecol[{"END\_REC "}{" " }{J}{(1)}{EndRec}
{Last record number (1-rel) in scan}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{W. D. Cotton}{08/06/2003}{Revision 1: Initial version}]
\end{history}
%
\clearpage
%%%%%%%%%%%%%%% ObitTableOTFFlag Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableOTFFlag}]
ObitTableOTFFlag Class
\tabletitle{Flag table for OTF data documentation}
% table name
\tablename{OTFFlag}
\tableintro[
{This class contains tabular data and allows access.
{ObitTableOTFFlag contains descriptions of data to be ignored
An {ObitTableOTFFlag is the front end to a persistent disk resident structure.
This class is derived from the ObitTable class. }
]
\tableover{
In memory tables are stored in a fashion similar to how they are
stored on disk - in large blocks in memory rather than structures.
Due to the word alignment requirements of some machines, they are
stored by order of the decreasing element size:
double, float long, int, short, char rather than the logical order.
The details of the storage in the buffer are kept in the
ObitTableDesc.
}
% Table keyword description
No Keywords in table.
%\begin{keywords}
%\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"TARGET "}{" " }{J}{(1)}{TargetID}
{Target ID as defined in the OTFTarget table}
]
\tablecol[{"FEED "}{" " }{J}{(1)}{Feed}
{Feed number to flag, 0=$>$all}
]
\tablecol[{"TIME RANGE "}{"DAYS " }{E}{(2)}{TimeRange}
{Start and end time of data to be flagged }
]
\tablecol[{"FREQ "}{" " }{J}{(2)}{chans}
{First and last frequency channel numbers to flag}
]
\tablecol[{"PFLAGS "}{" " }{X}{(4)}{pFlags}
{Polarization flags, same order as in data, T=$>$flagged}
]
\tablecol[{"REASON "}{" " }{A}{(24)}{reason}
{Reason for flagging}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{W. D. Cotton}{08/06/2003}{Revision 1: Initial}]
\end{history}
%
% ++++++++++++++++++++++++++++++ GBT tables +++++++++++++++++++++++++++++++++++++=\
\clearpage
%%%%%%%%%%%%%%% GBT Antenna file %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%% ObitTableGBTBEAM\_OFFSETS Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableGBTBEAM\_OFFSETS}]
ObitTableGBTBEAM\_OFFSETS Class
\tabletitle{Template ObitTableGBT document}
% table name
\tablename{BEAM\_OFFSETS}
\tableintro[
{Table in GBT archive/Antenna file.
This class contains tabular data and allows access.
This class is derived from the ObitTable class. }
]
\tableover{
}
% Table keyword description
%
% Table column description
\begin{columns}
\tablecol[{"NAME "}{" " }{A}{(32)}{Name}
{}
]
\tablecol[{"BEAMXELOFFSET"}{"DEGREE " }{D}{(1)}{xeloff}
{}
]
\tablecol[{"BEAMELOFFSET"}{"DEGREE " }{D}{(1)}{eloff}
{}
]
\tablecol[{"SRFEED1 "}{" " }{J}{(1)}{srfeed1}
{}
]
\tablecol[{"SRFEED2 "}{" " }{J}{(1)}{srfeed2}
{}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from GBT}]
\end{history}
\clearpage
%%%%%%%%%%%%%%% ObitTableGBTANTPOSGR Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableGBTANTPOSGR}]
ObitTableGBTANTPOSGR Class
\tabletitle{Template ObitTableGBT document}
% table name
\tablename{ANTPOSGR}
\tableintro[
{Table in GBT archive/Antenna file.
This class contains tabular data and allows access.
This class is derived from the ObitTable class.
Secondary Focus receivers.}
]
\tableover{
The details of the storage in the buffer are kept in the
ObitTableDesc.
}
% Table keyword description
%
% Table column description
\begin{columns}
\tablecol[{"DMJD "}{"DAY " }{D}{(1)}{dmjd}
{}
]
\tablecol[{"RAJ2000 "}{"DEGREE " }{D}{(1)}{raj2000}
{}
]
\tablecol[{"DECJ2000"}{"DEGREE " }{D}{(1)}{decj2000}
{}
]
\tablecol[{"MNT\_AZ "}{"DEGREE " }{D}{(1)}{mntAaz}
{}
]
\tablecol[{"MNT\_EL "}{"DEGREE " }{D}{(1)}{mntEl}
{}
]
\tablecol[{"REFRACT "}{"DEGREE " }{D}{(1)}{refract}
{}
]
\tablecol[{"MAJOR "}{"DEGREE " }{D}{(1)}{major}
{}
]
\tablecol[{"MINOR "}{"DEGREE " }{D}{(1)}{minor}
{}
]
\tablecol[{"SR\_XP"}{"MM " }{D}{(1)}{srXp}
{}
]
\tablecol[{"SR\_YP"}{"MM " }{D}{(1)}{srYp}
{}
]
\tablecol[{"SR\_ZP "}{"MM " }{D}{(1)}{srZp}
{}
]
\tablecol[{"SR\_XT "}{"DEGREE " }{D}{(1)}{srXt}
{}
]
\tablecol[{"SR\_YT "}{"DEGREE " }{D}{(1)}{srYt}
{}
]
\tablecol[{"SR\_ZT "}{"DEGREE " }{D}{(1)}{srZt}
{}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from GBT}]
\end{history}
\clearpage
%%%%%%%%%%%%%%% ObitTableGBTANTPOSF Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableGBTANTPOSPF}]
ObitTableGBTANTPOSPF Class
\tabletitle{Template ObitTableGBT document}
% table name
\tablename{ANTPOSPF}
\tableintro[
{Table in GBT archive/Antenna file.
This class contains tabular data and allows access.
This class is derived from the ObitTable class.
Prime Focus receivers.}
]
\tableover{
The details of the storage in the buffer are kept in the
ObitTableDesc.
}
% Table keyword description
%
% Table column description
\begin{columns}
\tablecol[{"DMJD "}{"DAY " }{D}{(1)}{dmjd}
{}
]
\tablecol[{"RAJ2000 "}{"DEGREE " }{D}{(1)}{raj2000}
{}
]
\tablecol[{"DECJ2000"}{"DEGREE " }{D}{(1)}{decj2000}
{}
]
\tablecol[{"MNT\_AZ "}{"DEGREE " }{D}{(1)}{mntAaz}
{}
]
\tablecol[{"MNT\_EL "}{"DEGREE " }{D}{(1)}{mntEl}
{}
]
\tablecol[{"REFRACT "}{"DEGREE " }{D}{(1)}{refract}
{}
]
\tablecol[{"MAJOR "}{"DEGREE " }{D}{(1)}{major}
{}
]
\tablecol[{"MINOR "}{"DEGREE " }{D}{(1)}{minor}
{}
]
\tablecol[{"PF\_FOCUS"}{"MM " }{D}{(1)}{pfFocus}
{Prime focus focus.}
]
\tablecol[{"PF\_ROTATION"}{"DEGREE " }{D}{(1)}{pfRotation}
{Prime focus rotation}
]
\tablecol[{"PF\_X "}{"MM " }{D}{(1)}{pfX}
{}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from GBT}]
\end{history}
\clearpage
%%%%%%%%%%%%%%% GBT Quadrant detector file %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%% ObitTableGBTQUADDETECTOR Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableGBTQUADDETECTOR}]
ObitTableGBTQUADDETECTOR Class
\tabletitle{ObitTableGBTQUADDETECTOR document}
% table name
\tablename{QuadrantDetectorData}
\tableintro[
{Table in GBT archive/Quadrent detector file.
File in directory QuadrantDetector-QuadrantDetector-QuadrantDetectorData.
This class contains tabular data and allows access.
This class is derived from the ObitTable class. }
]
\tableover{
The quadrant detector measures motions of the GBT Feed arm resulting
in pointing errors.
}
% Table keyword description
%
% Table column description
\begin{columns}
\tablecol[{"DMJD "}{"DAY " }{D}{(1)}{dmjd}
{Modified Julian Date of time sample taken;}
]
\tablecol[{"ch1Voltage"}{"VOLT " }{E}{(1)}{ch1Voltage}
{Channel 1 raw voltage}
]
\tablecol[{"ch3Voltage"}{"VOLT " }{E}{(1)}{ch3Voltage}
{Channel 3 raw voltage}
]
\tablecol[{"ch4Voltage"}{"VOLT " }{E}{(1)}{ch4Voltage}
{Channel 4 raw voltage}
]
\tablecol[{"ch5Voltage"}{"VOLT " }{E}{(1)}{ch5Voltage}
{Channel 5 raw voltage}
]
\tablecol[{"X_Axis"}{" " }{E}{(1)}{X_Axis}
{Calculated feed-arm motion in the X Axis, in arcseconds}
]
\tablecol[{"Z_Axis"}{" " }{E}{(1)}{Z_Axis}
{Calculated feed-arm motion in the Z Axis, in arcseconds}
]
\tablecol[{"T1 "}{"DAY " }{D}{(1)}{T1}
{Sample time-stamp for channel 1 data, as MJD}
]
\tablecol[{"T3 "}{"DAY " }{D}{(1)}{T3}
{Sample time-stamp for channel 3 data, as MJD}
]
\tablecol[{"T4 "}{"DAY " }{D}{(1)}{T4}
{Sample time-stamp for channel 4 data, as MJD}
]
\tablecol[{"T5 "}{"DAY " }{D}{(1)}{T5}
{Sample time-stamp for channel 5 data, as MJD}
]
\tablecol[{"MedianClockOffset"}{"SECOND " }{D}{(1)}{MedianClockOffset}
{Median estimate of GDAQ clock offset}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from GBT}]
\end{history}
\clearpage
%%%%%%%%%%%%%%% GBT DCR file %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%% ObitTableGBTDCRSTATE Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableGBTDCRSTATE}]
ObitTableGBTSTATE Class
\tabletitle{ObitTableGBTDCRState document}
% table name
\tablename{STATE}
\tableintro[
{Table in GBT archive/DCR file.
This class contains tabular data and allows access.
This class is derived from the ObitTable class. }
]
\tableover{
The details of the storage in the buffer are kept in the
ObitTableDesc.
}
% Table keyword description
\begin{keywords}
\tablekey[{"MASTER "}{A}{master}{"DCR"}{}
{Switching signals master
}
]
\tablekey[{"SCAN "}{J}{scan}{}{}
{Scan number
}
]
\tablekey[{"UTDATE "}{J}{utdate}{}{}
{MJD of start time
}
]
\tablekey[{"UTCSTART "}{D}{utcstart}{}{}
{Start time
}
]
\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"BLANKTIM"}{"SECOND " }{D}{(1)}{blanktim}
{}
]
\tablecol[{"PHASETIM"}{"SECOND " }{D}{(1)}{phasetim}
{}
]
\tablecol[{"SIGREF"}{"" }{B}{(1)}{sigref}
{}
]
\tablecol[{"CAL"}{"" }{B}{(1)}{cal}
{}
]
\tablecol[{"SWSIG1 "}{"" }{B}{(1)}{swsig1}
{}
]
\tablecol[{"SWSIG2 "}{"" }{B}{(1)}{swsig2}
{}
]
\tablecol[{"SWSIG3 "}{"" }{B}{(1)}{swsig3}
{}
]
\tablecol[{"SWSIG4"}{"" }{B}{(1)}{swsig4}
{}
]
\tablecol[{"SWSIG5"}{"" }{B}{(1)}{swsig5}
{}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from GBT}]
\end{history}
\clearpage
%%%%%%%%%%%%%%% ObitTableGBTDCRRECEIVER Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableGBTDCRRECEIVER}]
ObitTableGBTDCRRECEIVER Class
\tabletitle{ObitTableGBTDCRRECEIVER document}
% table name
\tablename{RECEIVER}
\tableintro[
{Table in GBT archive/DCR file.
This class contains tabular data and allows access.
This class is derived from the ObitTable class. }
]
\tableover{
The details of the storage in the buffer are kept in the
ObitTableDesc.
}
%
% Table keyword description
\begin{keywords}
\tablekey[{"SCAN "}{J}{scan}{}{}
{Scan number
}
]
\tablekey[{"UTDATE "}{J}{utdate}{}{}
{MJD of start time
}
]
\tablekey[{"UTCSTART "}{D}{utcstart}{}{}
{Start time
}
]
\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"CHANNELID"}{"" }{I}{(1)}{channelid}
{}
]
\tablecol[{"TESTDATA"}{"" }{B}{(1)}{testdata}
{}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from GBT}]
\end{history}
\clearpage
%%%%%%%%%%%%%%% ObitTableGBTDCRDATA Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableGBTDCRDATA}]
ObitTableGBTDCRDATA Class
\tabletitle{ObitTableGBTDCRDATA document}
% table name
\tablename{DATA }
\tableintro[
{Table in GBT archive/DCR file.
This class contains tabular data and allows access.
This class is derived from the ObitTable class. }
]
\tableover{
The details of the storage in the buffer are kept in the
ObitTableDesc.
}
% Table keyword description
\begin{keywords}
\tablekey[{"SCAN "}{J}{scan}{}{}
{Scan number
}
]
\tablekey[{"UTDATE "}{J}{utdate}{}{}
{MJD of start time
}
]
\tablekey[{"UTCSTART "}{D}{utcstart}{}{}
{Start time
}
]
\tablekey[{"BACKEND"}{A}{backend}{"DCR"}{}
{Which backend
}
]
\tablekey[{"CTYPE1"}{A}{ctype1}{"STATE"}{}
{First data axis is State
}
]
\tablekey[{"CTYPE2"}{A}{ctype2}{"RECEIVER"}{}
{Second data axis is Receiver
}
]
\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"IFFLAG "}{"CODE " }{I}{(1)}{ifflag}
{}
]
\tablecol[{"SUBSCAN "}{"CODE " }{J}{(1)}{subscan}
{}
]
\tablecol[{"TIMETAG "}{"DMJD" }{D}{(1)}{timetag}
{}
]
\tablecol[{"DATA "}{"COUNTS " }{J}{(2,2))}{data}
{}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from GBT}]
\end{history}
\clearpage
%%%%%%%%%%%%%%% GBT CCB file %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%% ObitTableGBTCCBSTATE Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableGBTCCBSTATE}]
ObitTableGBTSTATE Class
\tabletitle{ObitTableGBTCCBState document}
% table name
\tablename{STATE}
\tableintro[
{Table in GBT archive/CCB file.
This class contains tabular data and allows access.
This class is derived from the ObitTable class. }
]
\tableover{
A binary table extension called CCBSTATE records the physical
definitions of the phases of the data. There are a number of rows
equal to the number of phases NPHASES returned for each CCB input port
for each integration. Columns PHIA and PHIB records the values of the
phase switches A \& B at each phase state. Valid values are 0 or 1.
Data type of each entry in the column is an unsigned byte. The
ordering of rows in the CCBSTATE table corresponds to ordering of the
phase columns in the DATA table. The number of rows in the CCBSTATE
table is equal to the number of phases in the phase switch cycle
(which is in turn equal to 2 to the power of the number of active
phase switches hence 1,2, or 4). The number of phases in the
phase switch cycle is referred to as NPHASES elsewhere in this
document.\\
Comments:
\begin{itemize}
\item NPHASES is equal to 2NACTPSW where NACTPSW is the number of active phase switches.
Since NACTPSW has valid values of 0, 1, and 2, NPHASES can be 1, 2, or 4.
%
\item This table is analagous to GBT Backends' STATE tables but differs due to
the different implementations of CALs (individual integrations are Cal On or Cal off,
rather than having sub-integrations or ``phases'' be Cal On or Cal Off as for other
GBT backends) and SIGREF (next bullet point) for this backend.
%
\item The REF state corresponds to ``PHIA XOR PHIB''. A SIG state is ``NOT(PHIA XOR PHIB)''.
With two phase switches active there will be two physically distinct rows of the
CCBSTATE table that correspond to SIG and two that correspond to REF.
\end{itemize}
The details of the storage in the buffer are kept in the
ObitTableDesc.
}
% Table keyword description
\begin{keywords}
\tablekey[{"NPHASES"}{J}{nphases}{}{}
{Scan number
}
]
\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"PHIA"}{"state " }{J}{(1)}{phia}
{Value of phase switch A}
]
\tablecol[{"PHIB"}{"state " }{J}{(1)}{phib}
{Value of phase switch B}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from GBT}]
\end{history}
\clearpage
%%%%%%%%%%%%%%% ObitTableGBTCCBRBPORT Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableGBTCCBPORT}]
ObitTableGBTCCBPORT Class
\tabletitle{ObitTableGBTCCBPORT document}
% table name
\tablename{RECEIVER}
\tableintro[
{Table in GBT archive/CCB file.
This class contains tabular data and allows access.
This class is derived from the ObitTable class. }
]
\tableover{
A standard (SPN/004) PORT binary table extension is recorded in order
to allow the CCB inputs to be crossindexed with the physical
descriptions provided in the IF manager IF table.
There are two columns: BANK (a character), and PORT (a non-zero integer).
A given value of PORT uniquely identifies a physical input to the CCB,
and may be used to index physical descriptions (frequency, feed, polarization
,etc) in the IF manager IF table.
It also uniquely defines a row in the PORT table.
The BANK column is retained for compliance with the GBT FITS standard
and are set to a fiducial value of 'A'.
Additionally there is a SLAVE column indicating which daughter card a
given input port is associated with.
Data are unsigned 8-bit integers with valid values 0,1,2,3.
The order of the rows of the PORT table correspond to the ordering of
PORT columns in the DATA table. The number of rows NPORTS of the PORT
table is equal to the number of ports input ports selected as active
in the manager for the given scan.
The details of the storage in the buffer are kept in the
ObitTableDesc.
}
%
% Table keyword description
%
% Table column description
\begin{columns}
\tablecol[{"BANK"}{" " }{A}{(1)}{bank}
{Always has value of ``A'' but included for compliance with GBT standards.}
]
\tablecol[{"PORT"}{" " }{I}{(1)}{port}
{Identifier for a physical input to the CCB}
]
\tablecol[{"SLAVE"}{" " }{I}{(1)}{slave}
{Which daughter card a given input port is associated with.}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from GBT}]
\end{history}
\clearpage
%%%%%%%%%%%%%%% ObitTableGBTCCBDATA Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableGBTCCBDATA}]
ObitTableGBTCCBDATA Class
\tabletitle{ObitTableGBTCCBDATA document}
% table name
\tablename{DATA }
\tableintro[
{Table in GBT archive/CCB file.
This class contains tabular data and allows access.
This class is derived from the ObitTable class. }
]
\tableover{
The DATA binary table extension contains raw accumulated total power
integrations for each phase of each CCB input port that was used for a
given scan. The first (DMJD) column of the data array contains the
MJD of the integration start. The DATA column is a multidimensional
column with dimensions (NPORTS,NPHASES). Each datum is recorded as a
32 bit two's complement integer; subsequent transformation to unsigned
values is facilitated by recording a TZERO keyword with a value of
$2^31$. The order of the PORT and PHASE sub-columns should correspond to
the order of the rows in the PORT and CCBSTATE tables
respectively. The number of phases NPHASES is determined by the number
of active switches and is 1, 2, or 4. The number of ports NPORTS is
equal to the number of ports selected in the manager as active for the
given scan.\\
A second multi-dimensional OVRFLOW column, of the same dimensions as
the DATA column, comprises LOGICAL data with 'T' indicating
integrations that overflowed and 'F' indicating integrations that did
not. The value at subcolumn M row N in the OVRFLOW column denotes the
overflow status of the integration datum at subcolumn M row N of the
DATA column. One multi-dimensional LOGICAL column contains the four
SLAVEOK flags. Two LOGICAL columns contain CAL A and CAL B ON flags
indicating whether, for a given integration, a given call was on or
not; the ``integration usable'' flag is a separate SHORT-INT column,
indicating whether each integration is usable based on the cal diode
rise and fall time flags applied by the CCB.\\
Comments\\
\begin{itemize}
\item Integration data are returned by the CCB as unsigned 32 bit integers. Conversion from signed
32 bit two's complements values, to unsigned 32 bit values, may require use of double precision
on the data processing end.
\item The ``integration usable columns'' is short int not logical in order to more closely
line up the columns with machine byte boundaries, for better performance.
\item The SLAVEOK flags can be associated with individual columns of data
(ie input ports) using the information in the SLAVE column of the PORT table.
\item The cabling-dependent mapping of the CCB's ``Cal A'' and ``Cal B'' to a physical Cal diode
(nominally Left and Right, or perhaps, tags in the calibration FITS file database) is
presently unspecified.
\end{itemize}
The details of the storage in the buffer are kept in the
ObitTableDesc.
The ordering of ``PORTS'' is given in the IF table (Freq, feed, poln).
CCBSTATE given in CCBSTATE table; guessing PHIA = sig/ref (ref state
swap the two feeds), PHIB = cal, 1=on.
}
% Table keyword description
% None
%
% Table column description
\begin{columns}
\tablecol[{"DMJD "}{"d " }{D}{(1)}{dmjd}
{MJD of the integration start.}
]
\tablecol[{"SLAVEOK"}{"bool " }{L}{(4)}{slaveok}
{Is each daughter card in an OK state?}
]
\tablecol[{"USABLE "}{"status" }{I}{(1)}{usable}
{}
]
\tablecol[{"CALA "}{"status" }{L}{(1)}{cala}
{Is Cal A ``on''?}
]
\tablecol[{"CALB "}{"status" }{L}{(1)}{calb}
{Is Cal B ``on''?}
]
\tablecol[{"OVRFLOW "}{"bool " }{L}{(16,4))}{ovrflow}
{ DATA overflowed? - same order as DATA}
]
\tablecol[{"DATA "}{"ulong " }{J}{(16,4))}{data}
{unsigned (PORT,CCBSTATE)}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from GBT}]
\end{history}
\clearpage
%%%%%%%%%%%%%%% GBT Penn Array Camera file %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%% ObitTableGBTPARDATA Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableGBTPARDATA}]
ObitTableGBTPARDATA Class
\tabletitle{ObitTableGBTPARDATA document}
% table name
\tablename{DATA }
\tableintro[
{Table in GBT archive/Penn Array Camera file.
This class contains tabular data and allows access.
This class is derived from the ObitTable class. }
]
\tableover{
The DATA binary table extension contains raw bolometer data.
The ``TimeStamp'' column contains the modified Julian day number
in spite of any indications from the name and Units .\\
The main file HDU contains the following keywords:
\begin{itemize}
\item ``DATE-OBS''\\
String in form yyyy-mm-dd giving UTC start date
\item ``INSTRUME''\\
String 'PennArrayReceiver'
\item ``UTCSTART''\\
String: UTC start time in seconds since midnight
\item ``UTDSTART''\\
String: UTC starttime in MJD, ``unknown'' = unspecified
\item ``UTCEND''\\
String: UTC of exposure end, ``unknown'' = unspecified
\item ``SCANNUM''\\
Integer: scan number(?).
\item ``PROJID''\\
String: Project ID
\end{itemize}
}
% Table keyword description
\begin{keywords}
\tablekey[{"CFGVALID"}{L}{cfgvalid}{TRUE}{}
{If true, configuration has not changed during scan}
]
\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"TimeStamp"}{"Second" }{D}{(1)}{TimeStamp}
{Unix time in seconds.}
]
\tablecol[{"daccounts"}{"Count" }{D}{(72)}{daccounts}
{daccounts}
]
\tablecol[{"saecounts"}{"Count" }{D}{(72)}{saecounts}
{saecounts}
]
\tablecol[{"DigInput"}{"bool" }{B}{(6)}{DigInput}
{DigInput}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from GBT}]
\end{history}
\clearpage
%%%%%%%%%%%%%%% GBT Penn Array Camera file %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%% ObitTableGBTPARDATA2 Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableGBTPARDATA2}]
ObitTableGBTPARDATA Class
\tabletitle{ObitTableGBTPARDATA2 document}
% table name
\tablename{DATA }
\tableintro[
{Table in GBT archive/Penn Array Camera file.
This class contains tabular data and allows access.
This class is derived from the ObitTable class. }
]
\tableover{
The DATA binary table extension contains raw bolometer data.
The ``TimeStamp'' column contains the number of seconds since the
beginning of Unix Time (1 Jan 1976?).\\
The main file HDU contains the following keywords:
\begin{itemize}
\item ``DATE-OBS''\\
String in form yyyy-mm-dd giving UTC start date
\item ``INSTRUME''\\
String 'PennArrayReceiver'
\item ``UTCSTART''\\
String: UTC start time in seconds since midnight
\item ``UTDSTART''\\
String: UTC starttime in MJD, ``unknown'' = unspecified
\item ``UTCEND''\\
String: UTC of exposure end, ``unknown'' = unspecified
\item ``SCANNUM''\\
Integer: scan number(?).
\item ``PROJID''\\
String: Project ID
\end{itemize}
}
% Table keyword description
\begin{keywords}
\tablekey[{"CFGVALID"}{L}{cfgvalid}{TRUE}{}
{If true, configuration has not changed during scan}
]
\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"DMJD"}{"Day" }{D}{(1)}{DMJD}
{MJD.}
]
\tablecol[{"daccounts"}{"Count" }{E}{(72)}{daccounts}
{daccounts}
]
\tablecol[{"saecounts"}{"Count" }{E}{(72)}{saecounts}
{saecounts}
]
\tablecol[{"DigInput"}{"bool" }{B}{(6)}{DigInput}
{DigInput}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from GBT}]
\modhistory[{A. N. Author}{99/99/9999}{Revision 2: GBT changed format}]
\end{history}
\clearpage
%%%%%%%%%%%%%%% ObitTableGBTPARSENSOR Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableGBTPARSENSOR}]
ObitTableGBTPARSENSOR Class
\tabletitle{ObitTableGBTPARSensor document}
% table name
\tablename{Sensor}
\tableintro[
{Table in GBT archive/PAR file.
This class contains tabular data and allows access.
This class is derived from the ObitTable class. }
]
\tableover{
A binary table extension called PARSENSOR Gives the row and column
numbers of each sensor.
NB this does not appear to correspond to sky position.
Eight of the sensors are ``dark'', i.e. don't see the sky.
}
% Table keyword description
% none
%
% Table column description
\begin{columns}
\tablecol[{"Row"}{" " }{J}{(1)}{row}
{Row number (0-rel) of corresponding sensor, row number=index in data arrays}
]
\tablecol[{"Col"}{" " }{J}{(1)}{col}
{Column number (0-rel) of corresponding sensor, row number=index in data arrays}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from GBT}]
\end{history}
\clearpage
%%%%%%%%%%%%%%% GBT SpectralProcessor file %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%1 ObitTableGBTSTATE Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableGBTSPSTATE}]
ObitTableGBTSTATE Class
\tabletitle{ObitTableGBTSPSTATE document}
% table name
\tablename{STATE}
\tableintro[
{Table in GBT archive/SpectralProcessor file.
This class contains tabular data and allows access.
This class is derived from the ObitTable class. }
]
\tableover{
The details of the storage in the buffer are kept in the
ObitTableDesc.
}
% Table keyword description
\begin{keywords}
\tablekey[{"FORMATID"}{A}{formatid}{"GBSDD007"}{}
{SDD\_FORMAT\_ID
}
]
\tablekey[{"SCAN"}{J}{scan}{}{}
{Scan number
}
]
\tablekey[{"SUBSCAN"}{J}{subscan}{}{}
{Scan record number
}
]
\tablekey[{"UTDATE"}{J}{utdate}{}{}
{MJD of start time
}
]
\tablekey[{"UTCSTART"}{D}{utcstart}{}{}
{UTC start time seconds.
}
]
\tablekey[{"UTCSTOP"}{D}{utcstop}{}{}
{Stop time seconds.
}
]
\tablekey[{"RCVRS"}{J}{rcvrs}{}{}
{Each item is index by RCVRS.
}
]
\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"BLANKTIM"}{"SECOND " }{E}{(2)}{blanktim}
{}
]
\tablecol[{"PHASETIM"}{"SECOND " }{E}{(2)}{phasetim}
{}
]
\tablecol[{"SIGREF"}{"" }{B}{(2)}{sigref}
{}
]
\tablecol[{"CAL"}{"" }{B}{(2)}{cal}
{}
]
\tablecol[{"FFTS "}{"" }{J}{(2)}{ffts}
{}
]
\tablecol[{"DELETED "}{"" }{J}{(2)}{deleted}
{}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from GBT}]
\end{history}
\clearpage
%%%%%%%%%%%%%%% ObitTableGBTSPRECEIVER Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableGBTSPRECEIVER}]
ObitTableGBTSPRECEIVER Class
\tabletitle{ObitTableGBTSPRECEIVER document}
% table name
\tablename{RECEIVER}
\tableintro[
{Table in GBT archive/SpectralProcessor file.
This class contains tabular data and allows access.
This class is derived from the ObitTable class. }
]
\tableover{
The details of the storage in the buffer are kept in the
ObitTableDesc.
}
%
% Table keyword description
\begin{keywords}
\tablekey[{"SCAN "}{J}{scan}{}{}
{Scan number
}
]
\tablekey[{"UTDATE "}{J}{utdate}{}{}
{MJD of start time
}
]
\tablekey[{"UTCSTART "}{D}{utcstart}{}{}
{Start time
}
]
\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"RCVRID"}{" "}{J}{(1)}{rcvrid}
{}
]
\tablecol[{"TAPER"}{" "}{A}{(8)}{taper}
{}
]
\tablecol[{"OBSFREQ"}{"HZ"}{D}{(1)}{obsfreq}
{}
]
\tablecol[{"IFF"}{"HZ"}{D}{(1)}{iff}
{}
]
\tablecol[{"FREQRES"}{"HZ"}{D}{(1)}{freqres}
{}
]
\tablecol[{"BANDWD"}{"HZ"}{E}{(1)}{bandwd}
{}
]
\tablecol[{"TCAL"}{"DEGREE"}{E}{(1)}{tcal}
{}
]
\tablecol[{"TPLEVEL"}{" "}{E}{(1)}{tplevel}
{}
]
\tablecol[{"FASTTIM"}{"SECOND"}{E}{(1)}{fasttim}
{}
]
\tablecol[{"SLOWTIM"}{"SECOND"}{E}{(1)}{slowtim}
{}
]
\tablecol[{"CLIP"}{"SECOND"}{E}{(1)}{clip}
{}
]
\tablecol[{"THRESH"}{"SECOND"}{E}{(1)}{thresh}
{}
]
\tablecol[{"SYNTHL"}{"CODE"}{B}{(1)}{synthl}
{}
]
\tablecol[{"OVERL"}{"CODE"}{B}{(1)}{overl}
{}
]
\tablecol[{"IMODF"}{"CODE"}{B}{(1)}{imodf}
{}
]
\tablecol[{"IFSYNTH"}{"CODE"}{B}{(1)}{ifsynth}
{}
]
\tablecol[{"TAPEROFF"}{"CODE"}{B}{(1)}{taperoff}
{}
]
\tablecol[{"RFIEXC"}{"CODE"}{B}{(1)}{rfiexc}
{}
]
\tablecol[{"CLKSRC"}{"CODE"}{B}{(1)}{clksrc}
{}
]
\tablecol[{"IFLO"}{"CODE"}{B}{(1)}{iflo}
{}
]
\tablecol[{"IFSIDE"}{"CODE"}{B}{(1)}{ifside}
{}
]
\tablecol[{"RFSIDE"}{"CODE"}{B}{(1)}{rfside}
{}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from GBT}]
\end{history}
\clearpage
%%%%%%%%%%%%%%% ObitTableGBTSPDATA Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableGBTSPDATA}]
ObitTableGBTSPDATA Class
\tabletitle{Template ObitTableGBTSPDATA document}
% table name
\tablename{DATA}
\tableintro[
{Table in GBT archive/SpectralProcessor file.
This class contains tabular data and allows access.
This class is derived from the ObitTable class. }
]
\tableover{
The details of the storage in the buffer are kept in the
ObitTableDesc.
}
% Table keyword description
\begin{keywords}
\tablekey[{"OBJECT "}{A}{object}{}{}
{Source name
}
]
\tablekey[{"SCAN "}{J}{scan}{}{}
{Scan number
}
]
\tablekey[{"UTDATE "}{J}{utdate}{}{}
{MJD of start time
}
]
\tablekey[{"UTCSTART "}{D}{utcstart}{}{}
{Start time in seconds.
}
]
\tablekey[{"UTCSTOP "}{D}{utcstop}{}{}
{Stop time in seconds.
}
]
\\tablekey[{"INTTIME "}{D}{inttime}{}{}
{Integration time in seconds.
}
]
tablekey[{"BACKEND"}{A}{backend}{"DCR"}{}
{Which backend
}
]
\tablekey[{"CTYPE1"}{A}{ctype1}{"STATE"}{}
{First data axis is State
}
]
\tablekey[{"CTYPE2"}{A}{ctype2}{"RECEIVER"}{}
{Second data axis is Receiver
}
]
\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"SUBSCAN "}{"CODE " }{J}{(1)}{subscan}
{}
]
\tablecol[{"UTDATE "}{"DAY" }{J}{(1)}{utdate}
{}
]
\tablecol[{"UTCSTART "}{"SECOND" }{D}{(1)}{utcstart}
{}
]
\tablecol[{"PSRPER "}{" " }{D}{(1)}{psrper}
{Pulsar period.}
]
\tablecol[{"DATA "}{"COUNT " }{E}{(1024,2,2))}{data}
{Data}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from GBT}]
\end{history}
\clearpage
%%%%%%%%%%%%%%% GBT IF file %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%% ObitTableGBTIF Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableGBTIF}]
ObitTableGBTIF Class
\tabletitle{ObitTableGBTIF document}
% table name
\tablename{IF }
\tableintro[
{Table in GBT archive/IF file.
This class contains tabular data and allows access.
This class is derived from the ObitTable class. }
]
\tableover{
The details of the storage in the buffer are kept in the
ObitTableDesc.
IF controler information about frequency and polarization setup.
Sky Frequency Formula:
$$ sky = SFF\_SIDEBAND*IF + SFF\_MULTIPLIER*LO1 + SFF\_OFFSET $$
Signed Sum of the LOs:
$$ sum = -(SFF\_MULTIPLIER*LO1 + SFF\_OFFSET)/SFF\_SIDEBAND $$
}
% Table keyword description
%
% Table column description
\begin{columns}
\tablecol[{"BACKEND"}{" " }{A}{(32)}{backend}
{Name of the terminating backend.}
]
\tablecol[{"BANK "}{" " }{A}{(2)}{bank}
{Name of the backend's set of inputs.}
]
\tablecol[{"PORT "}{" " }{J}{(1)}{port}
{Index of the backend's input.}
]
\tablecol[{"RECEIVER"}{" " }{A}{(32)}{receiver}
{Name of the receiver of origin.}
]
\tablecol[{"FEED"}{" " }{J}{(1)}{feed}
{Index of receiver RF entry point (0 indicates none.}
]
\tablecol[{"SRFEED1"}{" " }{J}{(1)}{srfeed1}
{Index of first FEED of a sig/ref pair.}
]
\tablecol[{"SRFEED2"}{" " }{J}{(1)}{srfeed2}
{Index of second FEED of a sig/ref pair.}
]
\tablecol[{"RECEPTOR"}{" " }{A}{(8)}{receptor}
{Name of the receiver's detector.}
]
\tablecol[{"LO\_CIRCUIT"}{" " }{A}{(32)}{loCircuit}
{Circuit producing the tracking frequency.}
]
\tablecol[{"LO\_COMPONENT"}{" " }{A}{(32)}{loComponent}
{component producing the tracking frequency.}
]
\tablecol[{"SIDEBAND"}{" " }{A}{(2)}{sideband}
{Resulting sideband: upper or lower.}
]
\tablecol[{"POLARIZE"}{" " }{A}{(2)}{polarize}
{Resulting polarization ('X', 'Y', 'R', 'L').}
]
\tablecol[{"CENTER\_IF"}{"HZ" }{E}{(1)}{CenterIF}
{Approximate physical center frequency.}
]
\tablecol[{"CENTER\_SKY"}{"HZ" }{E}{(1)}{CenterSky}
{Approximate center frequency on the sky.}
]
\tablecol[{"BANDWDTH"}{"HZ" }{E}{(1)}{bandwdth}
{Approximate resulting bandwidth.
BANDWDTH of 0 denotes the bandpass is outside the optimal range.
}
]
\tablecol[{"HIGH\_CAL"}{" " }{J}{(1)}{highCal}
I{ndicates a high powered calibrator was used.}
]
\tablecol[{"TEST\_TONE\_IF"}{"HZ" }{E}{(1)}{testToneIF}
{Approximate physical test tone frequency, if any.}
]
\tablecol[{"TEST\_TONE\_SKY"}{"HZ" }{E}{(1)}{YestToneSky}
{Approximate test tone frequency on the sky, if any.}
]
\tablecol[{"TEST\_TONE\_CIRCUIT"}{" " }{A}{(32)}{TestToneCircuit}
{Circuit producing the test tone, if any.}
]
\tablecol[{"TEST\_TONE\_COMPONENT"}{" " }{A}{(32)}{testToneComponent}
{Component producing the test tone, if any.}
]
\tablecol[{"SFF\_MULTIPLIER"}{" " }{D}{(1)}{sffMultiplier}
{Sky Frequency Formula multiplier coefficient.}
]
\tablecol[{"SFF\_SIDEBAND"}{" " }{D}{(1)}{sffSideband}
{Sky Frequency Formula sideband coefficient.}
]
\tablecol[{"SFF\_OFFSET"}{" " }{D}{(1)}{sffOffset}
{Sky Frequency Formula offset coefficient.}
]
\tablecol[{"TRANSFORM\_COUNT"}{" " }{J}{(1)}{transformCount}
{Number of transform.}
]
\tablecol[{"TRANSFORMS"}{" " }{A}{(4096)}{transforms}
{Matrix of transform descriptions (frequencies in MHz).}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from GBT}]
\end{history}
\end{document}
\clearpage
%%%%%%%%%%%%%%% ObitTableXX Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableXX}]
ObitTableXX Class
\tabletitle{Template ObitTable document}
% table name
\tablename{XX}
\tableintro[
{This class contains tabular data and allows access.
"AIPS XX" contains highly secret information.
An ObitTableXX is the front end to a persistent disk resident structure.
Both FITS and AIPS cataloged data are supported.
This class is derived from the ObitTable class. }
]
\tableover{
In memory tables are stored in a fashion similar to how they are
stored on disk - in large blocks in memory rather than structures.
Due to the word alignment requirements of some machines, they are
stored by order of the decreasing element size:
double, float long, int, short, char rather than the logical order.
The details of the storage in the buffer are kept in the
ObitTableDesc.
}
% Table keyword description
\begin{keywords}
\tablekey[{"REVISION"}{J}{revision}{1}{}
{Revision number of the table definition.
}
]
\tablekey[{"NO\_SECRET"}{J}{numPol}{}{(1,2)}
{The number of secrets.
}
]
\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"TIME "}{"DAYS " }{D}{(1)}{Time}
{The center time of the secret.}
]
\tablecol[{""}{"" }{}{()}{}
{}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from AIPS}]
\end{history}
\end{document}
\clearpage
%%%%%%%%%%%%%%% ObitTableXX Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableXX}]
ObitTableXX Class
\tabletitle{Template ObitTable document}
% table name
\tablename{XX}
\tableintro[
{This class contains tabular data and allows access.
"AIPS XX" contains highly secret information.
An ObitTableXX is the front end to a persistent disk resident structure.
Both FITS and AIPS cataloged data are supported.
This class is derived from the ObitTable class. }
]
\tableover{
In memory tables are stored in a fashion similar to how they are
stored on disk - in large blocks in memory rather than structures.
Due to the word alignment requirements of some machines, they are
stored by order of the decreasing element size:
double, float long, int, short, char rather than the logical order.
The details of the storage in the buffer are kept in the
ObitTableDesc.
}
% Table keyword description
\begin{keywords}
\tablekey[{"REVISION"}{J}{revision}{1}{}
{Revision number of the table definition.
}
]
\tablekey[{"NO\_SECRET"}{J}{numPol}{}{(1,2)}
{The number of secrets.
}
]
\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"TIME "}{"DAYS " }{D}{(1)}{Time}
{The center time of the secret.}
]
\tablecol[{""}{"" }{}{()}{}
{}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from AIPS}]
\end{history}
\end{document}
\clearpage
%%%%%%%%%%%%%%% ObitTableXX Class %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
\ClassName[{ObitTableXX}]
ObitTableXX Class
\tabletitle{Template ObitTable document}
% table name
\tablename{XX}
\tableintro[
{This class contains tabular data and allows access.
"AIPS XX" contains highly secret information.
An ObitTableXX is the front end to a persistent disk resident structure.
Both FITS and AIPS cataloged data are supported.
This class is derived from the ObitTable class. }
]
\tableover{
In memory tables are stored in a fashion similar to how they are
stored on disk - in large blocks in memory rather than structures.
Due to the word alignment requirements of some machines, they are
stored by order of the decreasing element size:
double, float long, int, short, char rather than the logical order.
The details of the storage in the buffer are kept in the
ObitTableDesc.
}
% Table keyword description
\begin{keywords}
\tablekey[{"REVISION"}{J}{revision}{1}{}
{Revision number of the table definition.
}
]
\tablekey[{"NO\_SECRET"}{J}{numPol}{}{(1,2)}
{The number of secrets.
}
]
\end{keywords}
%
% Table column description
\begin{columns}
\tablecol[{"TIME "}{"DAYS " }{D}{(1)}{Time}
{The center time of the secret.}
]
\tablecol[{""}{"" }{}{()}{}
{}
]
\end{columns}
%
% Table modification history
\begin{history}
\modhistory[{A. N. Author}{99/99/9999}{Revision 1: Copied from AIPS}]
\end{history}
%
|
{"hexsha": "735e5888d8ce5b1d0cb2f5da1a6d12e534b7530d", "size": 65928, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "ObitSystem/ObitSD/doc/ObitSD.tex", "max_stars_repo_name": "sarrvesh/Obit", "max_stars_repo_head_hexsha": "e4ce6029e9beb2a8c0316ee81ea710b66b2b7986", "max_stars_repo_licenses": ["Linux-OpenIB"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-08-26T06:53:08.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-20T01:08:59.000Z", "max_issues_repo_path": "ObitSystem/ObitSD/doc/ObitSD.tex", "max_issues_repo_name": "sarrvesh/Obit", "max_issues_repo_head_hexsha": "e4ce6029e9beb2a8c0316ee81ea710b66b2b7986", "max_issues_repo_licenses": ["Linux-OpenIB"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ObitSystem/ObitSD/doc/ObitSD.tex", "max_forks_repo_name": "sarrvesh/Obit", "max_forks_repo_head_hexsha": "e4ce6029e9beb2a8c0316ee81ea710b66b2b7986", "max_forks_repo_licenses": ["Linux-OpenIB"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2017-08-29T15:12:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T12:16:08.000Z", "avg_line_length": 28.8398950131, "max_line_length": 102, "alphanum_fraction": 0.6757978401, "num_tokens": 20235}
|
import mujoco as mj
import numpy as np
from mujoco.glfw import glfw
from mujoco_base import MuJoCoBase
# Define states for the finite state machine
FSM_HOLD = 0
FSM_SWING1 = 1
FSM_SWING2 = 2
FSM_STOP = 3
class LegSwing(MuJoCoBase):
def __init__(self, xml_path):
super().__init__(xml_path)
self.simend = 5.0
self.fsm_state = FSM_HOLD
# Define durations of each state
self.t_hold = 0.5
self.t_swing1 = 1.0
self.t_swing2 = 1.0
# Define setpoints
self.q_init = np.array([[-1.0], [0.0]])
self.q_mid = np.array([[0.5], [-2.0]])
self.q_end = np.array([[1.0], [0.0]])
# Define setpoint times
self.t_init = self.t_hold
self.t_mid = self.t_hold + self.t_swing1
self.t_end = self.t_hold + self.t_swing1 + self.t_swing2
# Get trajectories
self.a_swing1 = self.generate_trajectory(
self.t_init, self.t_mid, self.q_init, self.q_mid)
self.a_swing2 = self.generate_trajectory(
self.t_mid, self.t_end, self.q_mid, self.q_end)
def reset(self):
# Set initial angle of pendulum
self.data.qpos[0] = -1
# Set camera configuration
self.cam.azimuth = 89.608063
self.cam.elevation = -11.588379
self.cam.distance = 5.0
self.cam.lookat = np.array([0.0, 0.0, 1.5])
self.fsm_state = FSM_HOLD
mj.set_mjcb_control(self.controller)
def controller(self, model, data):
"""
This function implements a PD controller for tracking
the reference motion.
"""
time = data.time
# Check for state change
if self.fsm_state == FSM_HOLD and time >= self.t_hold:
self.fsm_state = FSM_SWING1
elif self.fsm_state == FSM_SWING1 and time >= self.t_mid:
self.fsm_state = FSM_SWING2
elif self.fsm_state == FSM_SWING2 and time >= self.t_end:
self.fsm_state = FSM_STOP
# Get reference joint position & velocity
if self.fsm_state == FSM_HOLD:
q_ref = self.q_init
dq_ref = np.zeros((2, 1))
elif self.fsm_state == FSM_SWING1:
q_ref = self.a_swing1[0] + self.a_swing1[1]*time + \
self.a_swing1[2]*(time**2) + self.a_swing1[3]*(time**3)
dq_ref = self.a_swing1[1] + 2 * self.a_swing1[2] * \
time + 3 * self.a_swing1[3]*(time**2)
elif self.fsm_state == FSM_SWING2:
q_ref = self.a_swing2[0] + self.a_swing2[1]*time + \
self.a_swing2[2]*(time**2) + self.a_swing2[3]*(time**3)
dq_ref = self.a_swing2[1] + 2 * self.a_swing2[2] * \
time + 3 * self.a_swing2[3]*(time**2)
elif self.fsm_state == FSM_STOP:
q_ref = self.q_end
dq_ref = np.zeros((2, 1))
# Define PD gains
kp = 500
kv = 50
# Compute PD control
data.ctrl = kp * (q_ref[:, 0] - data.qpos) + \
kv * (dq_ref[:, 0] - data.qvel)
def simulate(self):
while not glfw.window_should_close(self.window):
simstart = self.data.time
while (self.data.time - simstart < 1.0/60.0):
# Step simulation environment
mj.mj_step(self.model, self.data)
if self.data.time >= self.simend:
break
# get framebuffer viewport
viewport_width, viewport_height = glfw.get_framebuffer_size(
self.window)
viewport = mj.MjrRect(0, 0, viewport_width, viewport_height)
# Update scene and render
mj.mjv_updateScene(self.model, self.data, self.opt, None, self.cam,
mj.mjtCatBit.mjCAT_ALL.value, self.scene)
mj.mjr_render(viewport, self.scene, self.context)
# swap OpenGL buffers (blocking call due to v-sync)
glfw.swap_buffers(self.window)
# process pending GUI events, call GLFW callbacks
glfw.poll_events()
glfw.terminate()
def generate_trajectory(self, t0, tf, q0, qf):
"""
Generates a trajectory
q(t) = a0 + a1t + a2t^2 + a3t^3
which satisfies the boundary condition
q(t0) = q0, q(tf) = qf, dq(t0) = 0, dq(tf) = 0
"""
tf_t0_3 = (tf - t0)**3
a0 = qf*(t0**2)*(3*tf-t0) + q0*(tf**2)*(tf-3*t0)
a0 = a0 / tf_t0_3
a1 = 6 * t0 * tf * (q0 - qf)
a1 = a1 / tf_t0_3
a2 = 3 * (t0 + tf) * (qf - q0)
a2 = a2 / tf_t0_3
a3 = 2 * (q0 - qf)
a3 = a3 / tf_t0_3
return a0, a1, a2, a3
def main():
xml_path = "./xml/doublependulum_leg.xml"
sim = LegSwing(xml_path)
sim.reset()
sim.simulate()
if __name__ == "__main__":
main()
|
{"hexsha": "cb9bd05abed9219f6269694466b643224693d646", "size": 4828, "ext": "py", "lang": "Python", "max_stars_repo_path": "example_leg_swing.py", "max_stars_repo_name": "BolunDai0216/PyMuJoCoBase", "max_stars_repo_head_hexsha": "3d9250feacd6129e44d99342663616aaf06c5d43", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2022-03-23T03:15:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T06:25:42.000Z", "max_issues_repo_path": "example_leg_swing.py", "max_issues_repo_name": "BolunDai0216/PyMuJoCoBase", "max_issues_repo_head_hexsha": "3d9250feacd6129e44d99342663616aaf06c5d43", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example_leg_swing.py", "max_forks_repo_name": "BolunDai0216/PyMuJoCoBase", "max_forks_repo_head_hexsha": "3d9250feacd6129e44d99342663616aaf06c5d43", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7515923567, "max_line_length": 79, "alphanum_fraction": 0.5536454018, "include": true, "reason": "import numpy", "num_tokens": 1411}
|
import numpy as np
import pygame
from src.pgassets import pgObject
class pgSlider(pgObject):
def __init__(self, pos, size):
pgObject.__init__(self, pos, size)
self.slider_rect = pygame.Rect(pos, (size[0] * 0.8, 4))
self.slider_rect.center = self.rect.center
self.slider_button = self.slider_rect.center
self.slider_button_radius = 8
def draw(self, screen):
pygame.draw.rect(screen, (0, 0, 0), self.slider_rect)
pygame.draw.rect(screen, self.color, self.rect, 2)
pygame.draw.circle(screen, (255, 50, 50), self.slider_button, self.slider_button_radius)
def collidepoint(self, pos):
distance = np.hypot(pos[0] - self.slider_button[0], pos[1] - self.slider_button[1])
if distance <= self.slider_button_radius:
return True
def update_slider(self, pos):
if self.slider_rect.left <= pos <= self.slider_rect.right:
self.slider_button = (pos, self.slider_button[1])
def get_value(self):
value = self.slider_button[0] - self.slider_rect.left
return round(2*value/self.slider_rect.width, 1)
|
{"hexsha": "d8f21b8da3c790ee3e72d31ddcbc78ba5837a507", "size": 1135, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pgassets/common/pgSlider.py", "max_stars_repo_name": "Blackdevil132/machineLearning", "max_stars_repo_head_hexsha": "de048bb1473994052f8ed1afb11a15b7833b506d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-05-04T07:28:19.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-04T07:28:19.000Z", "max_issues_repo_path": "src/pgassets/common/pgSlider.py", "max_issues_repo_name": "Blackdevil132/machineLearning", "max_issues_repo_head_hexsha": "de048bb1473994052f8ed1afb11a15b7833b506d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-04-29T09:20:11.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-29T09:23:22.000Z", "max_forks_repo_path": "src/pgassets/common/pgSlider.py", "max_forks_repo_name": "Blackdevil132/machineLearning", "max_forks_repo_head_hexsha": "de048bb1473994052f8ed1afb11a15b7833b506d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.46875, "max_line_length": 96, "alphanum_fraction": 0.6599118943, "include": true, "reason": "import numpy", "num_tokens": 284}
|
from functools import partial
import jax
import jax.numpy as jnp
from e3nn_jax import IrrepsData, index_add
from e3nn_jax.util import prod
from jax import lax
@partial(jax.jit, static_argnums=(1, 2, 3, 4))
def lowpass_filter(input, scale, strides, transposed=False, steps=(1, 1, 1)):
r"""Lowpass filter for 3D field.
Args:
input: [..., x, y, z]
scale (float): typically 2.0
strides (int): typically 1 or 2
transposed (bool): if True, dilate the input instead of stride
steps (tuple): physical dimensions of the voxel grid
Known issues:
stride=2 transposed=True
64 -------> 32 --------------> 63
"""
if isinstance(strides, int):
strides = (strides,) * 3
with jax.ensure_compile_time_eval():
sigma = 0.5 * (scale ** 2 - 1)**0.5
size = int(1 + 2 * 2.5 * sigma)
if size % 2 == 0:
size += 1
r = jnp.linspace(-1, 1, size)
x = r * steps[0] / min(steps)
x = x[jnp.abs(x) <= 1]
y = r * steps[1] / min(steps)
y = y[jnp.abs(y) <= 1]
z = r * steps[2] / min(steps)
z = z[jnp.abs(z) <= 1]
lattice = jnp.stack(jnp.meshgrid(x, y, z, indexing='ij'), axis=-1) # [x, y, z, R^3]
lattice = (size // 2) * lattice
kernel = jnp.exp(-jnp.sum(lattice**2, axis=-1) / (2 * sigma**2))
kernel = kernel / jnp.sum(kernel)
if transposed:
kernel = kernel * strides[0] * strides[1] * strides[2]
kernel = kernel[None, None] # [1, 1, x, y, z]
if scale <= 1:
assert strides == (1,) * 3
return input
pad = (kernel.shape[-3] // 2, kernel.shape[-2] // 2, kernel.shape[-1] // 2)
output = input
output = output.reshape(-1, 1, *output.shape[-3:])
output = lax.conv_general_dilated(
lhs=output,
rhs=kernel,
lhs_dilation=strides if transposed else (1, 1, 1),
window_strides=(1, 1, 1) if transposed else strides,
padding=((pad[0], pad[0]), (pad[1], pad[1]), (pad[2], pad[2])),
dimension_numbers=('NCXYZ', 'IOXYZ', 'NCXYZ')
)
output = output.reshape(*input.shape[:-3], *output.shape[-3:])
return output
@jax.jit
def interpolate_bilinear(input, x, y, z):
r"""interpolate voxels in coordinate (x, y, z).
Args:
input: [..., x, y, z]
x: x coordinate
y: y coordinate
z: z coordinate
"""
# based on http://stackoverflow.com/a/12729229
x_lo = jnp.floor(x).astype(int)
x_hi = x_lo + 1
y_lo = jnp.floor(y).astype(int)
y_hi = y_lo + 1
z_lo = jnp.floor(z).astype(int)
z_hi = z_lo + 1
nx, ny, nz = input.shape[-3:]
def xclip(x):
return jnp.clip(x, 0, nx - 1)
def yclip(y):
return jnp.clip(y, 0, ny - 1)
def zclip(z):
return jnp.clip(z, 0, nz - 1)
Ia = input[..., xclip(x_lo), yclip(y_lo), zclip(z_lo)]
Ib = input[..., xclip(x_hi), yclip(y_lo), zclip(z_lo)]
Ic = input[..., xclip(x_lo), yclip(y_hi), zclip(z_lo)]
Id = input[..., xclip(x_hi), yclip(y_hi), zclip(z_lo)]
Ie = input[..., xclip(x_lo), yclip(y_lo), zclip(z_hi)]
If = input[..., xclip(x_hi), yclip(y_lo), zclip(z_hi)]
Ig = input[..., xclip(x_lo), yclip(y_hi), zclip(z_hi)]
Ih = input[..., xclip(x_hi), yclip(y_hi), zclip(z_hi)]
wa = (x_hi - x) * (y_hi - y) * (z_hi - z)
wb = (x - x_lo) * (y_hi - y) * (z_hi - z)
wc = (x_hi - x) * (y - y_lo) * (z_hi - z)
wd = (x - x_lo) * (y - y_lo) * (z_hi - z)
we = (x_hi - x) * (y_hi - y) * (z - z_lo)
wf = (x - x_lo) * (y_hi - y) * (z - z_lo)
wg = (x_hi - x) * (y - y_lo) * (z - z_lo)
wh = (x - x_lo) * (y - y_lo) * (z - z_lo)
return wa * Ia + wb * Ib + wc * Ic + wd * Id + we * Ie + wf * If + wg * Ig + wh * Ih
@partial(jax.jit, static_argnums=(1,))
def zoom(input, resize_rate):
r"""Rescale the input by a factor of `resize_rate`.
Args:
input: [..., x, y, z]
resize_rate (float): typically 2.0 or 0.5
Returns:
resize_rate times larger field
"""
nx, ny, nz = input.shape[-3:]
if isinstance(resize_rate, (float, int)):
resize_rate = (resize_rate,) * 3
def f(n_src, n_dst):
a = n_src / n_dst * jnp.arange(n_dst)
delta = 0.5 * (n_src / n_dst - 1)
return delta + a
xi = f(nx, round(nx * resize_rate[0]))
yi = f(ny, round(ny * resize_rate[1]))
zi = f(nz, round(nz * resize_rate[2]))
xg, yg, zg = jnp.meshgrid(xi, yi, zi, indexing='ij')
output = jax.vmap(interpolate_bilinear, (None, 0, 0, 0), -1)(input, xg.flatten(), yg.flatten(), zg.flatten())
output = output.reshape(*input.shape[:-3], len(xi), len(yi), len(zi))
return output
def _index_max_norm(input, strides):
norms = jnp.sum(input**2, axis=-1)
shape = input.shape[:-1]
assert len(shape) == len(strides)
idxs = jnp.arange(prod(shape)).reshape(shape)
def g(a, b):
an, ai = a
bn, bi = b
which = an >= bn
return (jnp.where(which, an, bn), jnp.where(which, ai, bi))
_, idxs = lax.reduce_window(
(norms, idxs),
(-jnp.inf, -1),
g,
window_dimensions=strides,
window_strides=strides,
padding=((0, 0),) * len(strides),
)
return idxs
@jax.custom_vjp
def norm_maxpool(input, strides):
idxs = _index_max_norm(input, strides)
return input.reshape(prod(input.shape[:-1]), input.shape[-1])[idxs]
def norm_maxpool_fwd(input, strides):
idxs = _index_max_norm(input, strides)
output = input.reshape(prod(input.shape[:-1]), input.shape[-1])[idxs]
return output, (idxs, input.shape)
def norm_maxpool_bwd(residuals, grad):
idxs, shape = residuals
idxs = idxs.flatten()
grad = index_add(idxs, grad.reshape(idxs.shape[0], -1), prod(shape[:-1]))
grad = grad.reshape(shape)
return (grad, None)
norm_maxpool.defvjp(norm_maxpool_fwd, norm_maxpool_bwd)
def maxpool(input: IrrepsData, strides) -> IrrepsData:
r"""
Args:
input: IrrepsData of shape [x, y, z]
strides: tuple of ints
Returns:
IrrepsData
"""
assert isinstance(input, IrrepsData)
assert len(input.shape) == len(strides)
list = [
None if x is None else jax.vmap(lambda x: norm_maxpool(x, strides), -2, -2)(x)
for x in input.list
]
shape = tuple(a // s for a, s in zip(input.shape, strides))
return IrrepsData.from_list(input.irreps, list, shape)
|
{"hexsha": "77c3311ba025cb0ae67c1a2970b4c6c9f8480f95", "size": 6488, "ext": "py", "lang": "Python", "max_stars_repo_path": "e3nn_jax/experimental/voxel_pooling.py", "max_stars_repo_name": "yilunliao/e3nn-jax", "max_stars_repo_head_hexsha": "dfe472eb3dcc58abb07ae91eedc39f6fa6926bc8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "e3nn_jax/experimental/voxel_pooling.py", "max_issues_repo_name": "yilunliao/e3nn-jax", "max_issues_repo_head_hexsha": "dfe472eb3dcc58abb07ae91eedc39f6fa6926bc8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "e3nn_jax/experimental/voxel_pooling.py", "max_forks_repo_name": "yilunliao/e3nn-jax", "max_forks_repo_head_hexsha": "dfe472eb3dcc58abb07ae91eedc39f6fa6926bc8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9642857143, "max_line_length": 113, "alphanum_fraction": 0.5573366215, "include": true, "reason": "import jax,from jax", "num_tokens": 2105}
|
import packages.mdp.gridworld as gw
import numpy as np
def test_mapcreation_booleanWalls():
walls = np.array([[0, 1, 0], [0, 0, 0], [0, 0, 1]])
walls = walls.astype('bool')
world = gw.Gridworld(walls)
desired = np.array([[0, np.nan, 1], [2, 3, 4], [5, 6, np.nan]])
assert np.allclose(world.map, desired, equal_nan=True)
def test_mapcreation_integerWalls():
walls = [1, 8]
dims = [3, 3]
world = gw.Gridworld(walls, dims)
desired = np.array([[0, np.nan, 1], [2, 3, 4], [5, 6, np.nan]])
assert np.allclose(world.map, desired, equal_nan=True)
def test_mapcreation_noWalls():
dims = [2, 3]
world = gw.Gridworld(shape=dims)
desired = np.array([[0, 1, 2], [3, 4, 5]])
assert np.array_equal(world.map, desired)
|
{"hexsha": "fbde81ddcbafc76549391e85dad56c1db85f9514", "size": 763, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_gridworld.py", "max_stars_repo_name": "bastianalt/correlation_priors_for_rl", "max_stars_repo_head_hexsha": "9a98f345ac10e9767d854cd7a9681057a50a9737", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_gridworld.py", "max_issues_repo_name": "bastianalt/correlation_priors_for_rl", "max_issues_repo_head_hexsha": "9a98f345ac10e9767d854cd7a9681057a50a9737", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_gridworld.py", "max_forks_repo_name": "bastianalt/correlation_priors_for_rl", "max_forks_repo_head_hexsha": "9a98f345ac10e9767d854cd7a9681057a50a9737", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7916666667, "max_line_length": 67, "alphanum_fraction": 0.6173001311, "include": true, "reason": "import numpy", "num_tokens": 260}
|
import torch
import numpy as np
import pytest
import deepspeed
from deepspeed.ops.adagrad import DeepSpeedCPUAdagrad
from deepspeed.ops.op_builder import CPUAdagradBuilder
if not deepspeed.ops.__compatible_ops__[CPUAdagradBuilder.NAME]:
pytest.skip("cpu-adagrad is not compatible")
def check_equal(first, second, atol=1e-2, verbose=False):
x = first.detach().numpy()
y = second.detach().numpy()
if verbose:
print("x = {}".format(x.flatten()))
print("y = {}".format(y.flatten()))
print('-' * 80)
np.testing.assert_allclose(x, y, err_msg="param-update mismatch!", atol=atol)
@pytest.mark.parametrize('model_size',
[
(64),
(22),
(55),
(127),
(1024),
(1048576),
(30000000),
]) # yapf: disable
def test_cpu_adagrad_opt(model_size):
device = 'cpu'
rng_state = torch.get_rng_state()
param = torch.nn.Parameter(torch.randn(model_size, device=device))
torch.set_rng_state(rng_state)
param1 = torch.nn.Parameter(torch.randn(model_size, device=device))
torch.set_rng_state(rng_state)
optimizer = DeepSpeedCPUAdagrad([param])
optimizer1 = torch.optim.Adagrad([param1])
for i in range(10):
rng_state = torch.get_rng_state()
param.grad = torch.randn(model_size, device=device)
torch.set_rng_state(rng_state)
param1.grad = torch.randn(model_size, device=device)
optimizer.step()
optimizer1.step()
check_equal(param, param1, atol=1e-2, verbose=True)
@pytest.mark.parametrize('model_size,vocabulary_size,dim',
[
(16 * 2, 16 * 4, 16),
(16 * 32, 16 * 256, 16),
(16 * 256, 16 * 16384, 16),
]) # yapf: disable
def test_cpu_adagrad_opt_sparse_embedding(model_size, vocabulary_size, dim):
device = 'cpu'
rng_state = torch.get_rng_state()
def gen_sparse_grad(vocabulary_size, dim, num_indices, dtype, device):
i = torch.randint(vocabulary_size,
size=(1,
num_indices),
dtype=torch.int64,
device=device)
v = torch.randn(num_indices, dim, dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, (vocabulary_size, dim), device=device)
t = t.coalesce()
new_i = (t.indices().view(-1,
1).repeat(1,
dim) * dim +
torch.tensor(range(dim))).flatten().unsqueeze(0)
new_v = t.values().flatten()
new_t = torch.sparse_coo_tensor(new_i,
new_v,
(vocabulary_size * dim,
),
device=device)
new_t = new_t.coalesce()
new_t.requires_grad = False
return new_t
voc_size = vocabulary_size
dim = dim
num_indices = int(model_size // dim)
dtype = torch.float32
param = torch.nn.Parameter(torch.randn((voc_size * dim,
),
dtype=dtype,
device=device),
requires_grad=True)
torch.set_rng_state(rng_state)
param1 = torch.nn.Parameter(torch.randn((voc_size * dim,
),
dtype=dtype,
device=device),
requires_grad=True)
torch.set_rng_state(rng_state)
optimizer = DeepSpeedCPUAdagrad([param])
optimizer1 = torch.optim.Adagrad([param1])
for i in range(10):
torch.set_rng_state(rng_state)
param.grad = gen_sparse_grad(voc_size,
dim,
num_indices,
dtype=dtype,
device=device)
torch.set_rng_state(rng_state)
param1.grad = gen_sparse_grad(voc_size,
dim,
num_indices,
dtype=dtype,
device=device)
optimizer.step()
optimizer1.step()
check_equal(param, param1, atol=1e-2, verbose=True)
|
{"hexsha": "b8a025fe02a8145c1f4295728e8b1d4453c89275", "size": 4861, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/unit/test_cpu_adagrad.py", "max_stars_repo_name": "Seong-yeop/DeepSpeed", "max_stars_repo_head_hexsha": "76f2b5e51d8cf68d1966dceaf1a562a6f02d73fb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6728, "max_stars_repo_stars_event_min_datetime": "2020-02-07T23:53:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T20:02:53.000Z", "max_issues_repo_path": "tests/unit/test_cpu_adagrad.py", "max_issues_repo_name": "Seong-yeop/DeepSpeed", "max_issues_repo_head_hexsha": "76f2b5e51d8cf68d1966dceaf1a562a6f02d73fb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1104, "max_issues_repo_issues_event_min_datetime": "2020-02-08T00:26:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T22:33:56.000Z", "max_forks_repo_path": "tests/unit/test_cpu_adagrad.py", "max_forks_repo_name": "Seong-yeop/DeepSpeed", "max_forks_repo_head_hexsha": "76f2b5e51d8cf68d1966dceaf1a562a6f02d73fb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 801, "max_forks_repo_forks_event_min_datetime": "2020-02-10T15:33:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T16:32:33.000Z", "avg_line_length": 38.5793650794, "max_line_length": 82, "alphanum_fraction": 0.4710964822, "include": true, "reason": "import numpy", "num_tokens": 913}
|
# libraries
import argparse
import logging
from google.cloud import storage
import joblib
import numpy as np
import pandas as pd
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
def run(argv=None):
# download the enhanced features
bucket_name = 'enhanced-features'
source_blob_name = 'enhanced_features.csv'
destination_file_name = 'enhanced_features.csv'
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
# load the features
df = pd.read_csv('enhanced_features.csv')
# collect the max index for each of the labels
pitch_types = ['FT','FS','CH','FF','SL','CU','FC','SI','KC','EP','KN','FO']
labels = []
for i in range(0, len(df)):
values = df.iloc[[i]][pitch_types].values.tolist()[0]
max_index = values.index(max(values))
labels.append(max_index)
# drop the initial pitchtype columns
df = df.drop(pitch_types, axis=1)
# define data and labels
y = np.asarray(labels)
X = df.values
# train the RF classifier
clf = RandomForestClassifier()
cv = KFold(10)
clf = GridSearchCV(clf, {'n_estimators': [10, 100, 1000]}, n_jobs=-1, cv=cv)
clf.fit(X, y)
# save the model to disk
'''
joblib.dump(clf, 'model.joblib')
'''
with open('model.pkl', 'wb') as model_file:
pickle.dump(clf, model_file)
# push the model to cloud storage
bucket_name = 'rf-model'
destination_blob_name = 'model.pkl'
source_file_name = 'model.pkl'
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.ERROR)
run()
|
{"hexsha": "3fb3372f93717899061ede3b655307d8c458bf35", "size": 1953, "ext": "py", "lang": "Python", "max_stars_repo_path": "pitch-predictor/answers/components/trainRF/train_rf.py", "max_stars_repo_name": "data-describe/awesome-data-science-models", "max_stars_repo_head_hexsha": "aa9b3aa8137a30b47fa044c7b4db46568c0d6316", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2020-11-11T18:28:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-27T20:32:04.000Z", "max_issues_repo_path": "pitch-predictor/answers/components/trainRF/train_rf.py", "max_issues_repo_name": "data-describe/awesome-data-science-models", "max_issues_repo_head_hexsha": "aa9b3aa8137a30b47fa044c7b4db46568c0d6316", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2020-09-13T16:52:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-25T04:36:05.000Z", "max_forks_repo_path": "pitch-predictor/answers/components/trainRF/train_rf.py", "max_forks_repo_name": "data-describe/awesome-data-science-models", "max_forks_repo_head_hexsha": "aa9b3aa8137a30b47fa044c7b4db46568c0d6316", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2021-02-09T18:27:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-07T21:22:38.000Z", "avg_line_length": 26.3918918919, "max_line_length": 80, "alphanum_fraction": 0.6850998464, "include": true, "reason": "import numpy", "num_tokens": 473}
|
import multiprocessing as mp
import random
import numpy as np
import nltk
from nltk.corpus import stopwords
import csv
import gzip
import pandas as pd
from nltk.corpus import wordnet as wn
from nltk.util import ngrams
from flashtext import KeywordProcessor
from seqeval.metrics import accuracy_score, classification_report, f1_score, precision_score, recall_score
import os
class Annotator:
def __init__(self, train=None, test=None, tag2idx=None, args=None):
self.train = train
self.test = test
self.tag2idx = tag2idx
def reset(self, seed):
pass
def __call__(self, obs=None):
[expert_tag, ref_tag] = obs
idx = expert_tag.item()
return (idx, None)
class Reference:
def __init__(self, train=None, test=None, tag2idx=None, args=None):
self.train = train
self.test = test
self.tag2idx = tag2idx
def reset(self, seed):
pass
def __call__(self, obs=None):
[expert_tag, ref_tag] = obs
idx = ref_tag.item()
return (idx, None)
|
{"hexsha": "1871ac557871c8a015232888b9113bbab6a3ad0f", "size": 1059, "ext": "py", "lang": "Python", "max_stars_repo_path": "leaqi/src/oracles.py", "max_stars_repo_name": "xkianteb/leaqi", "max_stars_repo_head_hexsha": "924435590e74421ed16488429056f26747c99421", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-05-25T16:50:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-20T08:00:04.000Z", "max_issues_repo_path": "leaqi/src/oracles.py", "max_issues_repo_name": "chenyangh/leaqi", "max_issues_repo_head_hexsha": "924435590e74421ed16488429056f26747c99421", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "leaqi/src/oracles.py", "max_forks_repo_name": "chenyangh/leaqi", "max_forks_repo_head_hexsha": "924435590e74421ed16488429056f26747c99421", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-05-28T18:25:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-25T10:29:41.000Z", "avg_line_length": 25.2142857143, "max_line_length": 106, "alphanum_fraction": 0.6770538244, "include": true, "reason": "import numpy", "num_tokens": 268}
|
import pandas as pd
import numpy as np
import math
from tensorflow.keras.models import load_model
from agents.agent import Agent
from agents.meter import Meter
from agents.prediction_market_adapter import NUM_PREDICTIONS, ACCOUNT_0
class LstmMultiAgent(Agent):
"""Agent that uses multivariate LSTM with private and public data.
Attributes:
model: a pretrained LSTM model
predictions_count: counter for keeping track of the number of betting
rounds participated in
my_history: past individual energy consumption (as a list)
meter: Meter for fetching latest household energy consumption
"""
NUM_HISTORIC_DATA = 144
def __init__(self, model_file_name, household_name, normalise_values,
account=ACCOUNT_0, logging=True, **kwargs):
super(LstmMultiAgent, self).__init__(account, logging, **kwargs)
self.predictions_count = 0
self.model = load_model(model_file_name)
data_file_name = './data/household_' + household_name + '_history.pkl'
self.my_history = list(pd.read_pickle(data_file_name).consumption)
self.meter = Meter(household_name)
self.agg_mean = normalise_values[0]
self.agg_std_dev = normalise_values[1]
self.mean = normalise_values[2]
self.std_dev = normalise_values[3]
self.num_history_added = 0
self.log('LstmMultiAgent for household {0}'.format(household_name))
def predict_for_tomorrow(self):
agg_history = np.array(self.aggregate_history[-LstmMultiAgent.NUM_HISTORIC_DATA:])
agg_history = (agg_history-self.agg_mean)/self.agg_std_dev # normalise input data
# offset to make sure that you take an exact 3 days starting from start of day
offset = self.num_history_added % NUM_PREDICTIONS
if offset > 0:
history = np.array(self.my_history[-LstmMultiAgent.NUM_HISTORIC_DATA-offset:-offset])
else:
history = np.array(self.my_history[-LstmMultiAgent.NUM_HISTORIC_DATA:])
history = (history-self.mean)/self.std_dev
# batch data into format that model requires: 3D array of (?, 144, 2)
tuple = []
for j in range(LstmMultiAgent.NUM_HISTORIC_DATA):
tuple.append([agg_history[j], history[j]])
data = [tuple]
predictions = []
predictions_ = self.model.predict(data)
for j in range(NUM_PREDICTIONS):
predictions.append(predictions_[0][j] * self.agg_std_dev + self.agg_mean)
self.predictions_count += NUM_PREDICTIONS
return list(map(int, predictions))
def update_per_period(self):
self.my_history.append(self.meter.get_latest_consumption())
self.num_history_added += 1
|
{"hexsha": "d3c5696945946171234c8e3fba9de0d67a5dfbca", "size": 2759, "ext": "py", "lang": "Python", "max_stars_repo_path": "agent/agents/lstm_multi_agent.py", "max_stars_repo_name": "rampopat/charje", "max_stars_repo_head_hexsha": "3af178bd72800e339c45637356440780c3b0563a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-22T02:04:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-22T02:04:40.000Z", "max_issues_repo_path": "agent/agents/lstm_multi_agent.py", "max_issues_repo_name": "rampopat/charje", "max_issues_repo_head_hexsha": "3af178bd72800e339c45637356440780c3b0563a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "agent/agents/lstm_multi_agent.py", "max_forks_repo_name": "rampopat/charje", "max_forks_repo_head_hexsha": "3af178bd72800e339c45637356440780c3b0563a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9855072464, "max_line_length": 97, "alphanum_fraction": 0.6901051105, "include": true, "reason": "import numpy", "num_tokens": 630}
|
The Peoples Vanguard of Davis (http://davisvanguard.org/ link) is a political blogs blog that was launched in July 2006. It is dedicated to exposing what it calls the dark underbelly of the Peoples Republic of Davis by writing about things that arent completely reported in the Davis Enterprise mainstream press. Its updated daily with news, opinions, and commentary focusing on politics in Davis and environs, with occasional forays onto the state and national stages. It also features Yolo Judicial Watch, a section devoted to the activities of the judiciary in Yolo County and the surrounding areas.
The blog is written by David Greenwald (no relation to former councilmember Sue Greenwald), who uses the pen name Doug Paul Davis (DPD) online. He remained anonymous for the first several months after the blog launched, using only his pen name. He finally identified himself as the author in a January 2007 Sacramento Bee article.
The initial focus of the blog was to comment on the activities of the Davis Police Department (the other DPD). Greenwald has written about why hes deliberately http://davisvanguard.org/index.php?optioncom_content&viewarticle&id428:commentarywhenfairandbalancedislessaccurate&catid56:davisenterprise&Itemid82 biased, saying it gives more freedom than if there were an expectation of unbiased writing. Users/TomGarberson Critics find the bias excessive in that it sometimes leads to the presentation of only those facts supporting the authors viewpoint. The omission of not only the opposing viewpoint (which is fine) but also facts that dont fit the authors opinion is intellectually dishonest. Users/CovertProfessor Others think that whereas the point of view being presented is clear, that facts that support other points of view are presented.
On March 2, 2009, the Vanguard moved from the blogger server to its own server and a new web address, davisvanguard.org. It added new features such as the Community Blogs that allow select members of the public to post their own comments. They also had a radio show on KDRT 95.7 FM (archives http://peoplesvanguard.com/ here). Registration is required to post comments. Pseudonyms are allowed. The comments are moderated.
Greenwald states that he started the blog in July 2006 in response to the City Council disbanding the Human Relations Commission and the treatment of Halema Buzayan. He finally made the plunge in late July to create a place where people can get news not being reported in the Enterprise. Its intended for the progressive left, highlighting whats happening in Davis among the elected and unelected government. The site took off and gets thousands of weekly hits and breaks news stories ahead of the mainstream press. Some stories are indeed covered in far more detail than the mainstream press; recent examples include the budget situation and the city council election.
Excellent, that was a good article, and Im sure it will enlighten a few mature davisites as to what the wiki is. Ive been pondering ways to get Davisites aware of the wiki, this does a great job albeit unintentionally. Users/StevenDaubert
Steven, I dont see it as unintentional on my part. Part of the reason I wanted to do the interview with Philip was to honor his great achievement and expose an audience to the Wiki, who have in many cases only heard of it. The big role that the Vanguard tries to play is an educational role in this community. Users/DavidGreenwald
Touche! Users/StevenDaubert
20070913 11:49:18 nbsp Im unimpressed about your truancy series. I fail to find an issue that is noteworthy inside of them. If kids chose to cut class enough, the police will always get involved. Having known Pam Mari for the vast majority of my life, both personally and when she was an administrator at Holmes / DHS. Thru this knowledge of her, I can safely say she has the kids well being at heart, ahead of covering her ass or the districts.
long rant short: Whats the problem? I can tell you first hand how easy it was to walk off DHS campus as a student, at any time, despite the fact its an open campus only at lunch. Users/StevenDaubert
20070913 13:18:45 nbsp Start with the issue that she implemented this policy without authority from the school board and as a result the policy has been terminated. Start with the issue that no one in the community who had any sort of direct accountability to the voters had any knowledge of what was going on. Do you really fail to understand the importance of this? Even Officer Pytel acknowledged that this was a problem. Policy decisions need to be made primarily by office holders who are elected not unelected administrators. As I suggest in todays article, the community was uninformed and did not weigh in on this, as a result this has been a huge mess with mass confusion. Users/DavidGreenwald
20070914 13:10:15 nbsp Whats so wrong with Pam Mari making calls like this? She lived in Davis for a long time, and had plenty of time not only being an administrator but teaching classes as well... Kids choose to be truant, no one forces them to cut class. Sure they are putting themselves behind in work, but at the time you dont notice the consequences of truancy. Kids need to to at least make it thru high school... Whats so atrocious about the policy in the first place? Users/StevenDaubert
Whats wrong with Pam Mari taking the lead on this? She didnt inform the school board and they are the ones that set district policy. Im not necessarily opposed the policy in the first place, but the process was completely messed up. Users/DavidGreenwald
20070914 13:34:20 nbsp hehehehe, being a relatively new high school alum, its interesting noticing how my attitude shifts. When I was going to DHS having a friend with a car that you pile into and is one of the most awesome things. Now when I go around DHS at lunch time I notice the general tomfollery, the lack of seatbelts, etc that gets people concerned. XI have quite a bit to say about the student interviews, but I dont have the timeX. When I started reading I thought what I would have done in each situation. I would have avoided all the tickets. Users/StevenDaubert
enjoy:
Here is what you say to the officer if he hasnt Flashed red and blue lights, or given you a direct lawful order: Officer, this consensual encounter is over.
tada, officer has to issue a citation, Officer cant issue driving for under a year citation as the primary issue on the citation.
I knew this when I went to DHS a couple hears ago... Having to dodge the Traffic cops on the BMWs, etc.
For the female student to not say the above opens herself up legally for the citation, and thats all our friend Jeff Resig cares about, the citation will fly in court due to said students actions, and thats all the system cares about, the end.
However, If she though she was a victim of profiling, and had just realized it was foolish to admit to committing a crime to a officer, she could have gotten his badge number, and demanded to speak to a watch commander. This is a request the officer has to take seriously, and the watch commander is the one responsible for the interpretation of the law. Female student explains to watch commander that she felt it violated the law that the officer was engaging her solely to figure out if she was driving under a year, which is a violation, but its supposed to be only a secondary check, and it isnt reason to pull a car over in the first place.
For Soren the same thing applies, Ive had an officer pull up behind me while Ive been parked on the street near South D Safeway, I made the mistake of trying to deal with it hesitantly, and wound up searched and the whole nine, despite the fact that I never explicitly gave permission.
Know what you can and cant do in police encounters, cause quick wits will save you tons of hassle.
Did Drew and Mohamed have a note from the teacher, and the appropriate note to be off campus? If so tell the officer to sod off via asking for a watch commander and explaining your situation. Officers are just the foot soldiers who blindly enforce policy, whenever a conflict arises they will ask the watch commander how to act, if its okay to kick in this door cause I know there is an occupant who is ignoring me? or is this a bad stop? etc. However if they lacked the appropriate set of passes and notes, and the officer detained them and took them back to DHS, and then threw them a curve ball to see how they take it, thats to be expected. Its just another day in the life for said officer, and thats the routine. Dont expect cops to be nice to you when they suspect you of doing something.
It doesnt default to the officers to make sure the students understand when they can and cant speak. I have zero sympathy for any of the students mentioned, simply for the fact that all you have to do is sign up for the citizens police academy, happens twice a year, and DPD tells you how they work on a day to day basis. Thats more than enough information to be armed with, and what makes it best is that it comes STRAIGHT FROM THE HORSES MOUTH.
If you make it understood your watching whats going on, and are going to kick and scream in all the right ways, most officers simply dont want to deal with the hassle... Im sorry if this seems discombobulated, or fragmented, I just banged this out freewrite style >_< Im sure its all just one big run on sentence.
20080208 18:41:48 nbsp Response to a deleted comment about why some names were just last names and others full names. When I started the blog there were no labels, that feature was added later on. The first few labels I did, I just put peoples last names. Then after I wrote about Sue Greenwald, I obviously could not just call her Greenwald, so then I put everyones label as first and last name as added. But I never went back and changed the old ones and it would take too long to do it now. So thats it is the way it is. Users/DavidGreenwald
Its Official: Greenwalds Vanguard of Davis is a blog now minus the People, for good or ill. And what in general, neologistically, is a blog? A bad combo of a blurb and a bog. Users/SolidSender
20081117 13:01:07 nbsp Feher has (from what Ive personally observed) had a fair and steady hand behind the the uniform of DPD, and I for one think that he has done a bit to help the much maligned public imagine of DPD. Its a shame to see off duty activities potentially jeopardize a budding career. One should consider the implications of their actions, but when your inebriated... Users/StevenDaubert
20081118 08:48:38 nbsp Re Officer Fehers arrest in Sacramento: Im all for giving people second chances, but what jumps out at me here is that he misrepresented himself and lied to the arresting officers. First, by trying to get away with saying he worked for YONET (thats a special task force, btw) and then for saying he worked for BNE. And he didnt come clean. It was later that Sac PD discovered that Davis PD was his employer.
Seems to me that reinstatement should be contingent upon some sort of alcohol treatment program. There are signs of addiction here.
And btw: He hasnt learned anything working as a police officer? When you are under arrest, keep your mouth shut! After giving hundreds and hundreds of Miranda warnings, he doesnt know this yet??!!
Jebus. Users/CurlyGirl26
20081118 10:18:40 nbsp Is it true that the Davis Enterprise didnt cover this story (Officer Fehers arrest)? If so, doesnt this seem odd to anyone? The Enterprise reports all sorts mundane little stories all the time. Why not this? And dont tell me its because they werent aware of it. Because we all know that is bogus. Users/CurlyGirl26
20081118 17:28:11 nbsp I was led to believe that the Enterprise knew about this story before I did and declined to run it. If that is inaccurate, i apologize. Users/DavidGreenwald
20090604 07:51:05 nbsp Major new story today: Covell Village campaign failed to report hundreds of thousands in campaign contributions during their 2005 campaign according to paperwork filed in 2008 http://davisvanguard.org/index.php?optioncom_content&viewarticle&id2830:vanguardreportcovellvillagecampaignfailedtodisclosehundredsofthousandsincampaignexpenditures&catid53:landuseopenspace&Itemid86 Failure To Disclose . This may sound like old news, but Covell is launching a new campaign and these changes were not filed until April 2008, two and a half years AFTER the campaign and following FPPC complaints about underreporting of campaign expendituresUsers/DavidGreenwald
20091121 11:55:07 nbsp Really appreciating the coverage of UCD students protesting the tuition increases. Keep up the good work! Users/ZachStednick
20091209 00:27:47 nbsp I cant help but notice the websites down as of this moment. Any reason why? Users/ARWENNHOLD
Its not unusual for websites to be down from time to time... Its back up now. Users/CovertProfessor
20100214 17:45:26 nbsp Its interesting reading about the history of this Peoples blogsite; in reality, this blogsite is for selected people of Davis, namely, those who dont blog about Davids pal and wifes political crony Bill Ritter.
Case in point, as of yesterday, I was officially banned from blogging on the Vanguard. Unlike many others, I blogged with my real name and real email address, because I believe in being honest about my views regarding political issues in our great town. I comanaged (on a volunteer basis) the No on P campaign last fall against the proposed Wildhorse Ranch development, which we won with an astounding 75% of the vote. Davids pal, Bill Ritter, was the paid campaign manager by Parlin Development, and he did as a remarkable favor by running a horrible campaign (any of you remember all those goofy Yes on P mailers, if not, they are documented on this site.)
Any ways, yesterday David posted a story on the Vanguard about the CHA (Choices for Healthy Aging) group and their efforts behind supporting a seniorsonly development at the old Covell Village site. I had blogged in response to a previous blogger that this sounded a lot like a Really Grey campaign, similar to Bill Ritters Really Green campaign for Measure P, and to watch out for all the mailers coming our way with pictures of happy healthy seniors having picnics, lawn bowling, going on hay rides, etc, and perhaps the developers should hire Ritter. I NEVER used any profanity or any derogatory terms against Bill Ritter. David edited this (and all of my subsequent blogs), and sent me an email basically admonishing me of no more personal attacks against Bill Ritter, or I would be banned, to which I told him basically to shove it, and I still believe in the First Amendment when it comes to political opinion sites. I have now been banned; if I chose to use my real name and real email, which so few of his bloggers do, I am not able to post comments on the Vanguard. Are there any other banned bloggers out there? or do I have the honor of being the first.
This is what the Peoples Vanguard is all about, it is a selective site, which appears to filter the opinions, no different than the fair and balanced FOX News! Users/GregSokolov
Thanks for posting about this, its great to have various points of view on various sites. Would you say the First Amendment applies just as well to political letters to the editor of a newspaper not being printed at the choice of the editor though? Is David in a similar position on the Vanguard? I dont expect private media like a newspaper or the Vanguard and certainly not FOX News to be fair and balanced. I dont really want them to be either, I think its reasonable to get information from multiple sources (like you posting here on the wiki) and form my own opinions. For more information about the explicit biases of the Vanguard, see http://davisvanguard.org/index.php?optioncom_content&viewarticle&id428:commentarywhenfairandbalancedislessaccurate&catid56:davisenterprise&Itemid82 Commentary: When Fair and Balanced is Less Accurate Users/NickSchmalenberger
20100214 19:48:32 nbsp Hi Nick, thanks for your insightful reply and historical reference to prior Vanguard opinion about fair and balanced (very interesting to say the least!) My main point regarding free speech and the Peoples Vanguard is simply to highlight the utter hypocrisy of David Greenwald (and his lack of credibility) when it comes to peripheral growth issues. In the Wildhorse Ranch election, he was clearly on the side of more development, and now with Covell Village Senior City, he is incensed at the whole really grey campaign being waged by developerbacked CHA group; and why the radical divergence in his views (literally only three months post Wildhorse Ranch)? Bill Ritter. I blog about it, using my full real name and email, and use no profanity, derogatory language and I get axed it. Why? Because perhaps I struck a sensitive nerve in his political body and he deems it personal attack; eerily similar to the way OReilly gets all defensive and cuts people off (watch his latest interview with Jon Stewart as a great example). David apparently has no backbone when it comes to hardcore analysis of his ever shifting views on growth issues, and has resorted to banning folks. Since I assume my views are safe at Davis Wiki, I will continue to bring up these issues to the Davis citizenry. Users/GregSokolov
I agree that David Greenwalds blog takes definite stands on particular issues but surely you dont think that one cant be against a particular development while for another? I say this as someone who voted no on P and who isnt crazy about this new Senior City proposal, either. But they are different proposals with different pros and cons; I can see supporting one and not the other. As for his banning of you, its hard to judge that without knowing the exact content of what you wrote. Users/CovertProfessor
20100214 21:29:19 nbsp I strongly disagree about the public nature of the Enterprise editorial page. I think the Enterprise and most papers are biased, although the Enterprise tries to hide it more than the Winters Express for example. I think that the Express has a more interesting editorial page because of its bias. Users/NickSchmalenberger
20100214 22:55:21 nbsp To CovertProfessor:
I blogged essentially that if Covell Village II was to be defeated (as both I and David Greenwald agree upon it should), perhaps Bill Ritter should be hired to run their campaign, and do as awful a job as he did for Yes on P. I then followed that comment with additional comment asking David to explain his differences only three months later in opposing Covell Village Senior City, in the face of his recent support for Wildhorse Ranch. THATS IT!!! That is what he construed as personal attacks; I guess when he called Bob Dunning vile and depraved for his humorous comments on paid college students during the Measure P, that was not personal; its only when it is directed towards his political crony Bill Ritter, that it becomes personal, By the way, I have heard rumors that David and Cecilia Greenwald still owe Bill Ritter about $30,000 from her failed City Council bid in 2008; makes one wonder if David is paying him back by erasing all negative comments about him on his blog site...some free speech! Its all bought and paid for, like any other news site! Users/GregSokolov
20100516 06:21:44 nbsp Made my first visit to the Vanguard today and read it for several hours. The vanguard is:
Extremely biased: Greenwald presents his opinions completely without acknowledgment to dissenting opinions of any kind, let alone going so far as including a quote to allow one of his targets to defend themselves. Each article is written from a position of absolute metaphysical certainty. (a prime example, 42yearold homeless being stomped on by the DA, but his domestic burglary charge remains comfortably unexplained)
Extremely poor: Ive seen some shoddy editing standards in my day but this is ridiculous.
Confusing and uninformative: The average article refers to other past events without explanation or crosslinking of any kind.
Interesting: Does a good job bringing issues to light for further research from less biased sources.
Sensationalist: Guilt is commutatively applied to broad swaths of individuals or organizations in public positions based on the testimony of interviewees with hazy or missing credentials but never once leveled at any person not of public standing in any way however brief or insignificant.
I appreciate GregSokolovs position that the Vanguard is similar to FOX news. The techniques used to mislead readers in the Vanguard are very similar to those used by They, our most auspicious indoctrination organization. Users/khabok
20100517 05:06:53 nbsp I obviously havent read this page in awhile. I dont want to get into Sokolovs post, needless to say, he presents a onesided view of the reason he was banned. In fact, he doesnt state, and apparently doesnt know why he was banned. Weve been trying to crack down on posts that lead to titfortat fights, I asked him to tone it down, he responded by telling me not to contact him anymore and telling me that I was being put on spam block. At that point, I had little choice but to ban him, since he was unwilling to cooperate in our efforts to change the tone of posting.
He posts this: I have heard rumors that David and Cecilia Greenwald still owe Bill Ritter about $30,000 from her failed City Council bid in 2008. This is just untrue. We have not paid Bill Ritter nor do we owe Bill Ritter anything. I suggest that should be taken down as unfounded gossip.
Khabok: Thanks for your post. I agree with you on the editing issues, we have tried to address them, but unfortunately it is still largely one person produce a mountain load of content. Biased? Certainly. Although your example has an explanation, the domestic burglary charge was from over 20 years ago, Im not sure what you wanted there, the information was not readily available on it and probably not all that relevant to a case of vandalism. I agree I could probably do better on background information, crosslinking would be helpful, Ill try to do better with that. I have no idea what youre trying to say on your last point. Feel free to contact me if you have some specific suggestions. Users/DavidGreenwald
20100518 06:19:26 nbsp ah, this sort of thing is much easier when you can address people directly.
Weve been trying to crack down on posts does it really matter what you choose to follow that statement with?
I have heard rumors that David and Cecilia Greenwald still owe Bill Ritter about $30,000 from her failed City Council bid in 2008. he says hes heard rumors to that effect. Do you have definitive information that hes heard no such rumors?
he was unwilling to cooperate in our efforts to change the tone of posting. If his posts have a tasteless tone let his readership speak for itself, thats how democratic information functions. Personally, I believe most people in this town are very crude and pointlessly antagonistic, but if thats the truth should we take the roll of cultural exemplars and try to hide it?
I agree with you on the editing issues good. Id love to respect the Vanguard, I was quite excited when I found it. Hey, Ill be happy to edit it for you if you like, just dont be surprised to get back a lot of notes like explain this idea or what does the opposition say about this.
Im not sure what you wanted there to not have to WONDER whos place he broke into, what he took, why you wont tell me and whether youre asking me to feel sorry for some sort of really awful scumbag. I cant begin to tell you the experiences Ive had with street kids (sorry, homeless people) in this area and Id love to know what the DA knows about him.
I have no idea what youre trying to say on your last point that its really easy to jump down the throat of an organization without talking about why they do what they do. Your treatment of the Buzayan incident, for instance. Among the long list of people whove tried to make me upset about the Buzayan familys situation, youve done the least to explain that idea to me, yet you reference that arrest offhand in other articles the way one might mention the Holocaust: as a token worstpossiblething to end debate before it starts.
oh by the way, your article Are Recent Shootings A Sign of Things To Come or an Aberration from last October. First of all, thanks for actually covering all those incidents. Second, have a free anonymous tip, the problem was coke traffic but it should be over now. Users/khabok
20100518 09:15:08 nbsp khabok:
We made a decision that the tone of the comment section was too antagonistic and it was driving away people who otherwise might participate, so we have made a concerted effort to change the tone. Some people naturally disagree with our editorial/ moderation decisions, usually we can talk it through. This was the only case to date where that has not worked.
As to the rumor, there is no doubt in my mind that hes heard it, but my understanding of wiki policies is that this is not a place to post unsubstantiated rumors. It is completely false and therefore zero basis to it. We never borrow nor paid money to the individual in question.
Finally, it was a 1989 burglary case, 21 years ago, information not readily available and not especially pertinent to the case. Why is the guy an awful scumbag, he has bipolar disorder? Thats really the important information, why is the DAs office trying to use three strikes on a guy that their own psychiatrists are saying is mentally ill and was when he committed a relatively minor crime, actually two relatively minorcrimes?
Ive written extensively on the Buzayan case, do a search and you can find the background information. Maybe at some point we can do backlinks, but thats not practical right now. Users/DavidGreenwald
20100518 12:36:57 nbsp Since when are personal comments of any kind part of the operation of a typical wiki? If that opinion or belief exists, that is informational. If you assert it is untrue, that is ALSO informational. If morality in this area isnt interesting to you, how about this: deleting his post makes you look really guilty.
If you think I was implying that the mentallyill man in question is in actual fact a scumbag, youre once again completely missing the point. Incomplete information is just one more way to be evasive and for anyone reading critically youve severely undermined your credibility because of it. Also poor for your credibility is making your point in the form of a question. This isnt jeopardy. When you ask why is the DAs office trying to etcetera I personally start formulating possible answers, of which there are many. Not everything about a person is a matter of public record. Care to address real possibilities in your next article?
I know you covered the background of the Buzayan case. I know where your search bar is and I like to know what Im talking about before I start. Users/khabok
Actually, most wikis have plenty of personal opinion. It wasnt until Wikipedia appeared that the concept of NPOV was connected to wikis. Alas, since its since become one of the most well known wikis, the two things are connected in peoples minds. A wiki is a good community platform to have an open, public discussion about allegations and gather facts from multiple people. With tactful presentation, even very serious allegations can be presented and either laid to rest or supported by fact and witnesses. A good example is the currently evolving Con Artists entry talk of a local scam has slowly accrued multiple sources and confirmation, including a photo. As the information is gathered, it can be refactored into a concise and clear whole, presenting the many aspects that a multitude of people have contributed. Cooperative editing, when handled well, is an amazingly powerful and insightful method of presenting issues. Users/JabberWokky Evan JabberWokky Edwards
I would concur wholeheartedly with the previous statement Daubert
Thanks to JabberWokky for pointing out the facts on this one. I love community interaction as well. So much so that I like to see all of it intact without so much as a ur mom deleted. Users/khabok
20100603 08:48:54 nbsp I havent come back to these posts in a long time, too busy watching David Greenwalds hypocrisy on the whole Ruth AsmundsonVergis political affairs; where his outrage towards Asmundson last July when she let Parlin Developers essentially pick an election date for Measure P ballot? Is it because his political puppeteer Bill Ritter was hired by Parlin to help influence the City Council?
Thanks Khabok for your independent analysis of the Peoples Vanguard; I wish that more people (you have not been censured by David yet) continue to do the same.
In his feeble explanation to you why I was banned, he failed to mention a statement in his email to me which prompted my reply and statement to him that I would spam all of his future replies.
He basically said Measure P is over; you won, get over it; to which I replied, although the election had passed, the key principles and players on the Yes on P side (i.e. Ritter) remain alive and need to be questioned in subsequent issues (i.e. how is that only five months later after Measure P, the outrage over the Covell Village redux project is suddenly all about bad development and unneccessary growth, when Wildhorse Ranch was so necessary?)
The rumors about $30 K they owe Ritter came to me from some very well informed people in local politics...whether it is a large amount of money or something else, the relationship between Ritter and the Greenwalds clearly influences his opinions and those of others who he decides to characterize as negative on his Peoples blog site. Users/GregSokolov
20100603 12:43:46 nbsp To Khabok:
I am gald you brought up the whole Buzayan case again. Ask David Greenwald how much Mr. Buzayan (father) has donated to the Vangaurd since that case???
Also, Mr. Buzayan lives in South Davis, near the new proposed development at Willowbank. SURPRISE, SURPRISE David Greenwald was outraged at the City Council for approving the Willowbank project and wrote many blogs about it.
What, I ask, is the difference between Willowbank (a much smaller project away from major roadways or school) vs. Wildhorse Ranch (larger project which would led to increased traffic on Covell Blvd nearby the Harper Junior High)??? The answer: NO Bill Ritter hired by Willowbank developers and Buzayan lives in South Davis and not Wildhorse. Users/GregSokolov
20111031 04:45:34 nbsp Love what you have done with the place! The web site is great. . although it has gotten so big and bloated over the last few years!
Like. . .I kinda dug it when you were writing under a pseudonym and just using blogger. It was just cool back then.
Youre now like a WalMart! How does that make you feel? It;s like . . .you just put up a big WalMart in your precious city!
Haha! Users/AshleyHamidi
The new Vanguard is now (12142013) almost entirely black and even more bloated after for some reason it went totally blank. I liked it better when it was on blogger and unpretentiously exposing the dark underbelly of Davis politics. Users/BrianKenyon
|
{"hexsha": "cf6db6bbea706d55673a7747af120a5f47d45a64", "size": 31439, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/The_People%27s_Vanguard_of_Davis.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/The_People%27s_Vanguard_of_Davis.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/The_People%27s_Vanguard_of_Davis.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 184.9352941176, "max_line_length": 1328, "alphanum_fraction": 0.7981806037, "num_tokens": 6988}
|
function t = isascii(c)
%ISASCII True for decimal digits.
%
% For a string C, ISASCII(C) is 1 for any ASCII character code between 0
% and 127, inclusive, and 0 otherwise.
%
% See also: ISALNUM, ISALPHA, ISDIGIT, ISLOWER, ISPRTCHR, ISUPPER,
% ISXDIGIT.
% Author: Peter J. Acklam
% Time-stamp: 2002-03-03 13:51:07 +0100
% E-mail: pjacklam@online.no
% URL: http://home.online.no/~pjacklam
error(nargchk(1, 1, nargin));
t = ischar(c) & ( 0 <= c ) & ( c <= 127 );
|
{"author": "CovertLab", "repo": "WholeCell", "sha": "6cdee6b355aa0f5ff2953b1ab356eea049108e07", "save_path": "github-repos/MATLAB/CovertLab-WholeCell", "path": "github-repos/MATLAB/CovertLab-WholeCell/WholeCell-6cdee6b355aa0f5ff2953b1ab356eea049108e07/lib/util/matutil/isascii.m"}
|
! ###################################################################
! Copyright (c) 2013-2019, Marc De Graef Research Group/Carnegie Mellon University
! All rights reserved.
!
! Redistribution and use in source and binary forms, with or without modification, are
! permitted provided that the following conditions are met:
!
! - Redistributions of source code must retain the above copyright notice, this list
! of conditions and the following disclaimer.
! - Redistributions in binary form must reproduce the above copyright notice, this
! list of conditions and the following disclaimer in the documentation and/or
! other materials provided with the distribution.
! - Neither the names of Marc De Graef, Carnegie Mellon University nor the names
! of its contributors may be used to endorse or promote products derived from
! this software without specific prior written permission.
!
! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
! DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
! SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
! CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
! OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
! USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
! ###################################################################
!--------------------------------------------------------------------------
! EMsoft:EMmkxtal.f90
!--------------------------------------------------------------------------
!
! PROGRAM: mkxtal
!
!> @author Marc De Graef, Carnegie Mellon University
!
!> @brief create a crystal structure file (very simple program)
!
!> @date 11/23/13 MDG 1.0 original
!> @date 08/10/14 MDG 1.1 added GenerateSymmetry call
!> @date 03/30/15 MDG 2.0 modified output to HDF format
!> @date 10/07/15 MDG 2.1 minor cleanup in preparation for release 3.0
!> @date 09/06/16 MDG 3.0 added Wyckoff position entry as an option
!> @date 04/20/18 MDG 4.0 start incorporation of 2D and 3D quasi-crystal structures
!--------------------------------------------------------------------------
program EMmkxtal
use local
use typedefs
use HDFsupport
use io
use crystal
use symmetry
use files
IMPLICIT NONE
type(unitcell), pointer :: cell
character(fnlen) :: progname, progdesc, fname, source
integer(kind=irg) :: numarg, i
integer(kind=irg) :: iargc !< external function for command line
character(fnlen) :: arg !< to be read from the command line
logical :: useWyckoff
progname = 'EMmkxtal.f90'
progdesc = 'Create an HDF crystal structure file and place it in the XtalFolder'
call EMsoft(progname, progdesc)
useWyckoff = .FALSE.
numarg = iargc()
if (numarg.gt.0) then ! there is at least one argument
do i=1,numarg
call getarg(i,arg)
! mess = 'Found the following argument: '//trim(arg); call Message("(/A/)")
! does the argument start with a '-' character?
if (arg(1:1).eq.'-') then
if (trim(arg).eq.'-h') then
call Message(' Program should be called as follows: ', frm = "(/A)")
call Message(' '//trim(progname)//' [-h] [-w] ', frm = "(A)")
call Message(' To produce this message, type '//trim(progname)//' -h', frm = "(A)")
call Message(' To use Wyckoff positions to enter atom coordinates, use -w option', frm = "(A)")
end if
if (trim(arg).eq.'-w') then
! with this option the GetAsymPosWyckoff routine will ask the user for Wyckoff Positions instead of regular cordinate strings
useWyckoff = .TRUE.
end if
end if
end do
end if
allocate(cell)
cell%SYM_SGset=0
call GetLatParm(cell)
call GetSpaceGroup(cell)
call GenerateSymmetry(cell,.TRUE.)
if (useWyckoff) then
call GetAsymPosWyckoff(cell)
else
call GetAsymPos(cell)
end if
call ReadValue('Enter output file name (*.xtal) ', fname)
call ReadValue('Enter the source for this data [max. 512 characters] ', source)
cell%source = trim(source)
cell%fname = fname
write (*,*) 'Source = '//trim(cell%source)
call SaveDataHDF(cell)
end program EMmkxtal
|
{"hexsha": "5f2c5477b39a13b6b4491e3b048eac4687c85d86", "size": 4630, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Source/Utilities/EMmkxtal.f90", "max_stars_repo_name": "ZachClayburn/EMsoft", "max_stars_repo_head_hexsha": "5852e630fd0ce6c9538d7c0b7b1653dda28d0f1f", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Source/Utilities/EMmkxtal.f90", "max_issues_repo_name": "ZachClayburn/EMsoft", "max_issues_repo_head_hexsha": "5852e630fd0ce6c9538d7c0b7b1653dda28d0f1f", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Source/Utilities/EMmkxtal.f90", "max_forks_repo_name": "ZachClayburn/EMsoft", "max_forks_repo_head_hexsha": "5852e630fd0ce6c9538d7c0b7b1653dda28d0f1f", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9734513274, "max_line_length": 125, "alphanum_fraction": 0.6408207343, "num_tokens": 1107}
|
/* vim:set ts=3 sw=3 sts=3 et: */
/**
* Copyright © 2008-2013 Last.fm Limited
*
* This file is part of libmoost.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef MOOST_IO_DETAIL_HELPER_WIN32_HPP__
#define MOOST_IO_DETAIL_HELPER_WIN32_HPP__
#include <boost/asio.hpp>
#include <windows.h>
#include <strsafe.h>
namespace moost { namespace io { namespace detail {
class helper
{
public:
typedef HANDLE native_io_t;
typedef boost::asio::windows::basic_stream_handle<> async_stream_t;
typedef DWORD error_t;
static native_io_t duplicate(native_io_t in)
{
native_io_t duped;
if (!DuplicateHandle(GetCurrentProcess(), in, GetCurrentProcess(), &duped, 0, FALSE, DUPLICATE_SAME_ACCESS))
{
throw std::runtime_error("failed to duplicate handle");
}
return duped;
}
static bool close(native_io_t in)
{
return CloseHandle(in) == TRUE;
}
static void create_pipe(native_io_t& read_end, native_io_t& write_end)
{
/*
* For some strange reason, anonymous pipes, even though they're using named pipes under
* the hood according to the documentation, do not support asynchronous i/o.
* So I'm resorting to creating a named pipe with a unique name here.
*/
unsigned int instance = 0;
const unsigned int max_instances = 100;
std::basic_string<TCHAR> pipename;
native_io_t pipe_read, pipe_write;
while (instance < max_instances)
{
std::basic_stringstream<TCHAR> ss;
ss << TEXT("\\\\.\\pipe\\") << GetCurrentProcessId() << TEXT("\\iohelper") << instance;
pipename = ss.str();
pipe_write = CreateNamedPipe(pipename.c_str(), PIPE_ACCESS_OUTBOUND | FILE_FLAG_FIRST_PIPE_INSTANCE,
PIPE_TYPE_BYTE | PIPE_WAIT, 1, 64, 64, 0, NULL);
if (pipe_write != INVALID_HANDLE_VALUE)
{
break;
}
++instance;
}
if (pipe_write == INVALID_HANDLE_VALUE)
{
throw std::runtime_error("failed to create named pipe");
}
try
{
pipe_read = CreateFile(pipename.c_str(), GENERIC_READ, 0, NULL, OPEN_EXISTING, FILE_FLAG_NO_BUFFERING | FILE_FLAG_OVERLAPPED, NULL);
if (pipe_read == INVALID_HANDLE_VALUE)
{
throw std::runtime_error("failed to open named pipe");
}
}
catch (...)
{
CloseHandle(pipe_write);
throw;
}
read_end = pipe_read;
write_end = pipe_write;
}
static bool write(native_io_t io, const void *data, size_t length, size_t *written)
{
DWORD wr;
bool rv = ::WriteFile(io, data, length, &wr, NULL) == TRUE;
if (written)
{
*written = rv ? static_cast<size_t>(wr) : 0;
}
return rv;
}
static error_t error()
{
return GetLastError();
}
};
} } }
#endif
|
{"hexsha": "ad3bd424b05bb0771c3ccb8d77e43202e90971af", "size": 3966, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/moost/io/detail/helper_win32.hpp", "max_stars_repo_name": "lastfm/libmoost", "max_stars_repo_head_hexsha": "895db7cc5468626f520971648741488c373c5cff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 37.0, "max_stars_repo_stars_event_min_datetime": "2015-02-22T17:15:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T02:24:41.000Z", "max_issues_repo_path": "include/moost/io/detail/helper_win32.hpp", "max_issues_repo_name": "lastfm/libmoost", "max_issues_repo_head_hexsha": "895db7cc5468626f520971648741488c373c5cff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/moost/io/detail/helper_win32.hpp", "max_forks_repo_name": "lastfm/libmoost", "max_forks_repo_head_hexsha": "895db7cc5468626f520971648741488c373c5cff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11.0, "max_forks_repo_forks_event_min_datetime": "2015-02-12T04:35:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T12:46:32.000Z", "avg_line_length": 28.5323741007, "max_line_length": 141, "alphanum_fraction": 0.6520423601, "num_tokens": 917}
|
import os
import pickle
import jieba
import operator
import statistics
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from datetime import datetime
from collections import Counter
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from PIL import Image
font_path = '../tools/msjh.ttc'
font = font_manager.FontProperties(fname='../tools/msjh.ttc',
weight='bold',
style='normal', size=16)
# jieba.set_dictionary('../jieba_data/dict.txt.big')
# jieba.load_userdict('../jieba_data/userdict.txt')
stopwords = []
with open('../jieba_data/stopwords.txt', 'r', encoding='UTF-8') as file:
for each in file.readlines():
stopwords.append(each.strip())
stopwords.append(' ')
# with open('crawler/data/new_talk.pkl', 'rb') as f:
# data = pickle.load(f)
# data = data[::-1]
# contents = [news['content'] for news in data]
# date_list = [news['date'] for news in data]
# all_date = sorted(list(set(date_list)))
# aall_date = [date[5:] for date in all_date][::-1]
# date_index = [date_list.index(each_date) for each_date in all_date]
# date_index.append(len(date_list)-1)
# number_of_news = [date_index[i+1] - date_index[i]-1 for i in range(len(date_index)-1)]
# number_of_terms = [sum([sum(data[ni]['cutted_dict'].values()) for ni in range(date_index[i], date_index[i+1])]) for i in range(len(date_index)-1)]
def remove_punctuation(content_string, user_pc=False):
if(user_pc):
punctuation = user_pc
else:
punctuation=list("!@#$%^&*()_+=-[]`~'\"|/\\abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ,.;{}\r\xa0\u3000、,。「」!?;:<>")
for p in punctuation:
content_string = content_string.replace(p, " ")
return(content_string)
def remove_stopwords_from_dict(word_dict, stopwords):
for w in stopwords:
word_dict.pop(w, word_dict)
return word_dict
def remove_stopwords_from_list(word_list, stopwords):
for w in stopwords:
word_list.remove(w)
return word_list
def lcut_to_dict(lcut):
'''count list of cut words, and transform into dict'''
word_dict = dict(Counter(lcut)) #count every word repetition
# word_dict.pop(' ')
return(remove_stopwords_from_dict(word_dict, stopwords))
def sort_dict_by_values(d):
'''sorted the dict by the number of occurrences'''
return(sorted(d.items(), key=lambda x: x[1], reverse=True)) #desc
def news_containing_keyword(keyword, news_list):
'''fiter the contents contend with the keyword;
e.g. list all the news contain IN-WEN_TSAI'''
return list(filter(lambda news: keyword in news, news_list))
def data_containing_keyword(keyword, data):
return list(filter(lambda news: keyword in news['cutted_dict'].keys(), data))
def news_containing_keywords(keywords, news_list):
news = news_list
for keyword in keywords:
news = news_containing_keyword(keyword, news)
return news
def get_coshow(contents):
'''create a dictionary with key equals wcw, value equals the occurance'''
coshow_dict = {}
cat_content = ' '.join(contents)
clean_content = remove_punctuation(cat_content)
cut_content = jieba.lcut(clean_content) # return words cut in list
cut_content = list(filter(lambda x: x!=' ', cut_content)) # filter ' '
for i in range(len(cut_content)-1):
wcw = cut_content[i] + cut_content[i+1]
# print(wcw)
try:
coshow_dict[wcw] = coshow_dict[wcw] + 1 #exist, then add 1
except:
coshow_dict[wcw] = 1 #no exist, then set 1
sdbv = sort_dict_by_values(coshow_dict)
return sdbv
def get_cutted_dict(list_of_news):
cat = ' '.join(list_of_news)
cat = remove_punctuation(cat)
cutted = jieba.lcut(cat)
return lcut_to_dict(cutted)
def first_n_words(cutted_dict, n, word_len=2, to = 1000):
'''filter the word len from 2~1000'''
sdbv = sort_dict_by_values(cutted_dict)
return list(filter(lambda x: len(x[0])>=word_len and len(x[0])<=to, sdbv))[:n]
def get_wordcloud_of_keywords(keywords, list_of_news, image_path=False):
'''generate the image with mask_colors'''
if type(keywords) == str:
keywords = [keywords]
if image_path:
coloring = np.array(Image.open(os.path.join(image_path)))
color_func = ImageColorGenerator(coloring) # scratch the color from origin pic
wc = WordCloud(max_font_size=30,
background_color="white",
mask=coloring,
color_func=color_func,
font_path=font_path,
width=1000, height=1000,
max_words=10000)
else:
wc = WordCloud(max_font_size=30,
background_color="white",
colormap='Set2',
font_path=font_path,
width=1000, height=300,
max_words=1000)
# only include the news with keyword
keyword_news = news_containing_keywords(keywords, list_of_news)
keyword_dict = get_cutted_dict(keyword_news)
print(len(keyword_dict))
im = wc.generate_from_frequencies(keyword_dict)
return im
def merge_one_day_news_dict(one_day_dict, count='wt', divide = 1, inverse = False):
'''Append a new parameter: inverse, to calculate the idf of a word'''
all_words = set([word for each_dict in one_day_dict for word in each_dict])
one_day_wf = {}
for word in all_words:
one_day_wf[word] = 0 #initilize every word
for news in one_day_dict: #find word in each new, if none return 0
if count == 'wt':
one_day_wf[word] += news.get(word, 0)/divide #cal. word reptition
elif count == 'occur' and inverse == False:
one_day_wf[word] += bool(news.get(word, 0))/divide #cal. word occurrence
elif count == 'occur' and inverse == True:
one_day_wf[word] += bool(news.get(word, 0))
if inverse == True:
try:
one_day_wf[word] = np.log(divide/one_day_wf[word])
except:
one_day_wf[word] = 0.0
return one_day_wf
def plot_line_of_word(word, date_from='2018-06-07', date_to='2019-01-22'):
from_index = df.columns.get_loc(date_from)
to_index = df.columns.get_loc(date_to)+1
date_length = to_index-from_index
date_int = date_length//25
font = font_manager.FontProperties(fname='msjh.ttc',
weight='bold',
style='normal', size=16)
plt.plot(aall_date[from_index:to_index], df.loc[word][date_from:date_to], '-o', label=word)
plt.legend(prop=font)
plt.xticks(list(range(0, date_length, date_int)), [aall_date[from_index:to_index][i] for i in range(0, date_length, date_int)])
def plot_tfdf_of_word(word, df_tf, df_occur, date_from='2018-06-07', date_to='2019-01-22'):
from_index = df_tf.columns.get_loc(date_from)
to_index = df_tf.columns.get_loc(date_to)+1
date_length = to_index-from_index
date_int = date_length//25
font = font_manager.FontProperties(fname='msjh.ttc',
weight='bold',
style='normal', size=16)
plt.plot(aall_date[from_index:to_index],
df_tf.loc[word][date_from:date_to]*df_occur.loc[word][date_from:date_to], '-o', label=word)
plt.legend(prop=font)
plt.xticks(list(range(0, date_length, date_int)), [aall_date[from_index:to_index][i] for i in range(0, date_length, date_int)])
def get_tfdf(word, df_tf, df_occur):
tfdf = df_occur.loc[word] * df_tf.loc[word]
return tfdf
def get_high_tfdf_date(word, df_tf, df_occur):
tfdf = get_tfdf(word, df_tf, df_occur)
m = statistics.mean(tfdf)
s = statistics.stdev(tfdf)
tfdf_bool = [x > m+s for x in tfdf]
tfdf_date = {all_date[x[0]]: tfdf[x[0]] for x in list(filter(lambda e: e[1], enumerate(tfdf_bool)))}
return tfdf_date
def keyword_with_event(keyword):
news_containing_key = news_containing_keyword(keyword, contents)
key_dict = get_cutted_dict(news_containing_key)
key_term = first_n_words(key_dict, 300)
return list(filter(lambda x: x in hot4, [x[0] for x in key_term]))
def draw_event(event, i, df_tf, df_occur, all_date):
event_date = get_high_tfdf_date(event, df_tf, df_occur)
date_index = [all_date.index(x) for x in event_date.keys()]
plt.scatter(date_index, [i for x in date_index], s=[x*100000 for x in list(event_date.values())])
def draw_by_list(tf_list, i):
plt.scatter(aall_date, [i for x in aall_date], s=[x*10 for x in tf_list])
|
{"hexsha": "3e529828b07347ebe33d57409b34a62103117a24", "size": 8757, "ext": "py", "lang": "Python", "max_stars_repo_path": "final_demo/modules.py", "max_stars_repo_name": "A2Zntu/HW0_Political_News_Analysis", "max_stars_repo_head_hexsha": "898c593ee2ed514a0052edd672a44ec3c68bb9fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-04-26T14:53:19.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-26T14:53:19.000Z", "max_issues_repo_path": "final_demo/modules.py", "max_issues_repo_name": "A2Zntu/HW0_Political_News_Analysis", "max_issues_repo_head_hexsha": "898c593ee2ed514a0052edd672a44ec3c68bb9fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "final_demo/modules.py", "max_forks_repo_name": "A2Zntu/HW0_Political_News_Analysis", "max_forks_repo_head_hexsha": "898c593ee2ed514a0052edd672a44ec3c68bb9fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9863013699, "max_line_length": 148, "alphanum_fraction": 0.6529633436, "include": true, "reason": "import numpy", "num_tokens": 2241}
|
import os
import sys
from pathlib import Path
_package_path = Path(__file__).parent.absolute()
_package_search_path = _package_path.parent
sys.path.append(str(_package_search_path))
import json
import numpy as np
import h5py
from data.vsum_tool import generate_summary, evaluate_summary
# example usage: python compute_fscores.py <results_dir> TVSum avg
results_dir = Path(sys.argv[1])
dataset = sys.argv[2]
eval_method = sys.argv[3]
print("results_dir:", results_dir)
print("dataset:", dataset)
print("eval_method:", eval_method)
# dataset prefix: {SumMe | TVSum}_
DS_PREFIX_LEN = 6
def epochFromFileName(fileName):
# file name format: {SumMe | TVSum}_{epoch}.json
try:
return int(fileName[DS_PREFIX_LEN:-5])
except:
return -1
results = os.listdir(results_dir)
results.sort(key=epochFromFileName)
HOME_PATH = _package_path / "../data"
DATASET_PATH = HOME_PATH / dataset / f"eccv16_dataset_{dataset.lower()}_google_pool5.h5"
# for each epoch, read the results' file and compute the f_score
f_score_epochs = []
for epoch in results:
print(epoch)
if epochFromFileName(epoch) < 0:
print(" Invalid epoch!")
continue
all_user_summary, all_summaries = [], []
with open(results_dir / epoch) as f:
epoch_results = json.loads(f.read())
with h5py.File(DATASET_PATH, "r") as hdf:
video_names = list(epoch_results.keys())
for video_name in video_names:
scores = np.asarray(epoch_results[video_name])
data = hdf[video_name]
user_summary = np.array(data["user_summary"])
change_points = np.array(data["change_points"])
n_frames = np.array(data["n_frames"])
picks = np.array(data["picks"])
summary, _, _, _ = generate_summary(scores, change_points, n_frames, picks)
all_user_summary.append(user_summary)
all_summaries.append(summary)
all_f_scores = []
# compare the resulting summary with the ground truth one, for each video
for video_index in range(len(all_summaries)):
summary = all_summaries[video_index]
user_summary = all_user_summary[video_index]
f_score, _, _ = evaluate_summary(summary, user_summary, eval_method)
all_f_scores.append(f_score)
f_score_epochs.append(np.mean(all_f_scores))
print(" f_score: ", np.mean(all_f_scores))
with open(results_dir / "f_scores.txt", "w") as outfile:
json.dump(f_score_epochs, outfile)
|
{"hexsha": "56529ea8c314e61a4bacc7fb46b55c04c44a2764", "size": 2543, "ext": "py", "lang": "Python", "max_stars_repo_path": "evaluation/compute_fscores.py", "max_stars_repo_name": "jongwookyi/AC-SUM-GAN", "max_stars_repo_head_hexsha": "2408d231f316be1265cda2abf231a21a2a1f5399", "max_stars_repo_licenses": ["FSFAP"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "evaluation/compute_fscores.py", "max_issues_repo_name": "jongwookyi/AC-SUM-GAN", "max_issues_repo_head_hexsha": "2408d231f316be1265cda2abf231a21a2a1f5399", "max_issues_repo_licenses": ["FSFAP"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "evaluation/compute_fscores.py", "max_forks_repo_name": "jongwookyi/AC-SUM-GAN", "max_forks_repo_head_hexsha": "2408d231f316be1265cda2abf231a21a2a1f5399", "max_forks_repo_licenses": ["FSFAP"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6385542169, "max_line_length": 91, "alphanum_fraction": 0.6795123869, "include": true, "reason": "import numpy", "num_tokens": 583}
|
[STATEMENT]
lemma const_res_subseq_prop_1:
assumes "s \<in> closed_seqs Zp"
shows "(\<forall>m.(const_res_subseq k s) m k = (const_res k s) )"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>m. const_res_subseq k s m k = const_res k s
[PROOF STEP]
using const_res_subseq_prop_0[of s] const_res_def[of k s]
[PROOF STATE]
proof (prove)
using this:
s \<in> carrier Zp\<^bsup>\<omega>\<^esup> \<Longrightarrow> \<exists>l. const_res_subseq ?k s = filtered_seq s (kth_res_equals ?k l) \<and> is_subseq_of s (const_res_subseq ?k s) \<and> (\<forall>m. const_res_subseq ?k s m ?k = l)
const_res k s = (THE n. \<forall>m. const_res_subseq k s m k = n)
goal (1 subgoal):
1. \<forall>m. const_res_subseq k s m k = const_res k s
[PROOF STEP]
by (smt assms const_res_subseq_def const_res_def the_equality)
|
{"llama_tokens": 349, "file": "Padic_Ints_Zp_Compact", "length": 2}
|
import numpy as np
class Method:
def __call__(self, x):
raise NotImplementedError()
def disable(self, index, x):
raise NotImplementedError
def __repr__(self):
raise NotImplementedError
class Max(Method):
def __call__(self, x):
return x.argmax()
def disable(self, index, x):
x[index] = -np.inf
def __repr__(self):
return 'Max'
class Top(Method):
def __init__(self, n=5):
self.n = n
def __call__(self, x):
return np.random.choice(np.argpartition(x, -self.n, axis=None)[-self.n:])
def disable(self, index, x):
x[index] = -np.inf
def __repr__(self):
return 'Top' + str(self.n)
class PowerProb(Method):
def __init__(self, power=4):
self.power = power
def __call__(self, x):
raveled = np.power(np.ravel(x), self.power)
return np.random.choice(np.arange(x.size), p=(raveled / np.sum(raveled)))
def disable(self, index, x):
x[index] = 0.0
def __repr__(self):
return 'PowerProb' + str(self.power)
class RandomInference(Method):
def __call__(self, x):
return np.random.choice(np.arange(x.size))
def disable(self, index, x):
pass
def __repr__(self):
return 'RandomInference'
|
{"hexsha": "8e6112ccc4d5760092e699a7f8dd640290964132", "size": 1298, "ext": "py", "lang": "Python", "max_stars_repo_path": "griffig/infer/selection.py", "max_stars_repo_name": "pantor/griffig", "max_stars_repo_head_hexsha": "0b10ef5d69902b14a4d648a809a51933a8f5fe8a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2021-07-22T10:22:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-04T15:10:15.000Z", "max_issues_repo_path": "griffig/infer/selection.py", "max_issues_repo_name": "pantor/griffig", "max_issues_repo_head_hexsha": "0b10ef5d69902b14a4d648a809a51933a8f5fe8a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-09-03T17:20:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-03T17:20:23.000Z", "max_forks_repo_path": "griffig/infer/selection.py", "max_forks_repo_name": "pantor/griffig", "max_forks_repo_head_hexsha": "0b10ef5d69902b14a4d648a809a51933a8f5fe8a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-10-19T02:49:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-24T18:53:55.000Z", "avg_line_length": 20.28125, "max_line_length": 81, "alphanum_fraction": 0.5978428351, "include": true, "reason": "import numpy", "num_tokens": 331}
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
import pickle
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from keras.layers import Dropout
from sklearn.preprocessing import MinMaxScaler
from keras import losses
# grid search to find best hyperparameter
# this takes extremely long (not even 25% done in 48 hours)
all_dat = pd.read_csv('../curated.csv', index_col=0)
# sift out pwrs
all_dat = all_dat.loc[all_dat['reactor_type'] == 'PWR']
# only get assemblies with enrichment bigger than 1.5 and bunrup higher than 10,000
all_dat = all_dat.loc[(all_dat['init_enr'] > 1.5) & (all_dat['bu'] > 10000)]
x = all_dat[['init_enr', 'bu']].as_matrix()
y = all_dat.iloc[:,5:].as_matrix()
print('x shape', np.shape(x))
print('y shape', np.shape(y))
xscaler = MinMaxScaler().fit(x)
yscaler = MinMaxScaler().fit(y)
xscale = xscaler.transform(x)
yscale = yscaler.transform(y)
# 70-15-15 split
x_train, x_test, y_train, y_test = train_test_split(xscale, yscale, test_size=0.2)
iso_list = list(all_dat.iloc[:, 5:])
test_dict = {'xscaler': xscaler,
'yscaler': yscaler,
'x': x_test,
'y': y_test,
'iso_list': iso_list}
pickle.dump(test_dict, open('test_set.pkl', 'wb'))
def run_model(hidden_layers_=3,
node_per_hidden_layer_=4,
dropout_rate_=0,
output_activation_='linear',
epochs_=500,
batch_size_=50,
hidden_layer_activation_='relu',
return_model=False,
loss_function_='mse'
):
cvscores = []
kfold = KFold(n_splits=3)
param_dict = {'hidden_layers':hidden_layers_,
'node_per_hidden_layer': node_per_hidden_layer_,
'dropout_rate': dropout_rate_,
'output_activation': output_activation_,
'epochs': epochs_,
'batch_size': batch_size_}
print('Running:\n', param_dict)
for train, val in kfold.split(xscale):
model = Sequential()
model.add(Dense(len(x_train[0]), input_dim=len(x_train[0]), kernel_initializer='normal', activation='relu'))
for i in range(hidden_layers_):
model.add(Dense(node_per_hidden_layer_,
activation=hidden_layer_activation_))
if dropout_rate_ != 0:
model.add(Dropout(dropout_rate_))
model.add(Dense(len(y_train[0]), activation=output_activation_))
model.compile(loss=loss_function_, optimizer='adam')
model.fit(x_train[train], y_train[train],
epochs=epochs_, batch_size=batch_size_)
scores = model.evaluate(x_train[val], y_train[val])
cvscores.append(scores[1] * 100)
print('%.2f%% (+/- %.2f%%)' %(np.mean(cvscores), np.std(cvscores)))
if return_model:
# so the model that eventually gets stored / used
# is only trained on 2/3 of data.
model_dict = {'model': model,
'xscaler': xscaler,
'yscaler': yscaler,
'iso_list': list(all_dat.iloc[:, 5:])
}
f = open('ann_model.pkl', 'wb')
pickle.dump(model_dict, f)
f.close()
return model_dict
return param_dict, scores
def hyperparameter_search(hidden_layers_list=range(1,5),
node_per_hidden_list=[4, 8, 16, 32],
dropout_rate_list=[0, 0.2, 0.5],
output_activation_list=['linear', 'softmax', 'sigmoid'],
epochs_list=[500],
batch_size_list=[50],
hidden_layer_activation_list=['relu'],
loss_function='mse'):
score_model_dict = {}
for _hidden_layers in hidden_layers_list:
for _node_per_hidden_layer in node_per_hidden_list:
for _dropout_rate in dropout_rate_list:
for _output_activation in output_activation_list:
for _epochs in epochs_list:
for _batch_size in batch_size_list:
for _hidden_layer_activation in hidden_layer_activation_list:
param_dict, score = run_model(hidden_layers_=_hidden_layers,
node_per_hidden_layer_= _node_per_hidden_layer,
dropout_rate_=_dropout_rate,
output_activation_=_output_activation,
epochs_=_epochs,
batch_size_=_batch_size,
hidden_layer_activation_=_hidden_layer_activation,
loss_function_=loss_function)
score_model_dict[score] = param_dict
f = open('ann.pkl', 'wb')
pickle.dump(score_model_dict, f)
f.close()
min_key = min(score_model_dict.keys())
best_param_dict = score_model_dict[min_key]
print('Best performing hyperparameter set:\n', best_param_dict)
return best_param_dict
best_param_dict = hyperparameter_search(hidden_layers_list=[2],
node_per_hidden_list=[128],
dropout_rate_list=[0],
output_activation_list=['linear'],
epochs_list=[500],
batch_size_list=[50],
hidden_layer_activation_list=['relu'],
loss_function=losses.mean_absolute_percentage_error)
model_dict = run_model(hidden_layers_=best_param_dict['hidden_layers'],
node_per_hidden_layer_=best_param_dict['node_per_hidden_layer'],
dropout_rate_=best_param_dict['dropout_rate'],
output_activation_=best_param_dict['output_activation'],
epochs_=best_param_dict['epochs'],
batch_size_=best_param_dict['batch_size'],
return_model=True)
print('Finished! The final file is ann_model.pkl')
|
{"hexsha": "7ff1c1bbe7f9c7e18674f2e9a439193de6c243a8", "size": 6602, "ext": "py", "lang": "Python", "max_stars_repo_path": "script/hyperparameter_search.py", "max_stars_repo_name": "jbae11/depletion_rom", "max_stars_repo_head_hexsha": "06d68465e81be13de5b6d6c8c8030a8ae9315b1b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-11T12:45:35.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-11T12:45:35.000Z", "max_issues_repo_path": "script/hyperparameter_search.py", "max_issues_repo_name": "jbae11/depletion_rom", "max_issues_repo_head_hexsha": "06d68465e81be13de5b6d6c8c8030a8ae9315b1b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 26, "max_issues_repo_issues_event_min_datetime": "2019-02-10T18:30:40.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-06T19:02:04.000Z", "max_forks_repo_path": "script/hyperparameter_search.py", "max_forks_repo_name": "gwenchee/depletion_rom", "max_forks_repo_head_hexsha": "050782d8fc79f211e1593dced36fb0ee22f8364e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-01-18T20:16:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-15T04:23:38.000Z", "avg_line_length": 45.2191780822, "max_line_length": 116, "alphanum_fraction": 0.5569524387, "include": true, "reason": "import numpy", "num_tokens": 1290}
|
import struct
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import numpy as np
import pyaudio
plt.rcParams['toolbar'] = 'None'
class LiveViewer(object):
def __init__(self, approx_fps, border_color):
self.pause_time = 1 / approx_fps
self.fig = plt.figure()
plt.tight_layout(pad=0)
self.fig.patch.set_facecolor(border_color)
self.fig.canvas.mpl_connect('close_event', self.handle_close)
self.fig.canvas.mpl_connect('key_press_event', self.toggle_fullscreen)
def display(self, image_generator):
im = plt.imshow(np.zeros((1, 1, 3)), animated=True)
def update(frame, *_):
im.set_array(frame)
return im,
ani = FuncAnimation(self.fig, func=update, frames=image_generator, interval=self.pause_time, blit=True)
plt.show()
@staticmethod
def toggle_fullscreen(event=None):
if event is None or event.key == "escape":
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
@staticmethod
def handle_close(event):
plt.close("all")
class AudioRecorder(object):
def __init__(self, feature_extractor, device_idx=None, frames_per_buffer=2048):
self.feature_extractor = feature_extractor
self.device_idx = device_idx
self.frames_per_buffer = frames_per_buffer
self.format = pyaudio.paInt16
self.channels = None
self.sampling_rate = None
self.stream = None
self.feature_dim = 41 # TODO get this properly somehow
self.audio_controller = pyaudio.PyAudio()
self.setup_device()
def setup_device(self):
global_info = self.audio_controller.get_host_api_info_by_index(0)
self.device_idx = int(global_info.get("defaultInputDevice")) if self.device_idx is None else self.device_idx
device_info = self.audio_controller.get_device_info_by_host_api_device_index(0, self.device_idx)
self.sampling_rate = int(device_info.get("defaultSampleRate"))
self.channels = int(device_info.get("maxInputChannels"))
def print_input_device_info(self):
info = self.audio_controller.get_host_api_info_by_index(0)
numdevices = info.get('deviceCount')
print("-------Available audio input devices-------")
for i in range (numdevices):
if self.audio_controller.get_device_info_by_host_api_device_index(0,i).get('maxInputChannels') > 0:
device_name = self.audio_controller.get_device_info_by_host_api_device_index(0,i).get('name')
print(f"Input Device id {i} - {device_name}")
print("-------------------------------------------")
def __enter__(self):
self.stream = self.audio_controller.open(input_device_index=self.device_idx,
format=self.format,
channels=self.channels,
rate=self.sampling_rate,
input=True,
frames_per_buffer=self.frames_per_buffer)
return self
def __exit__(self, *args, **kwargs):
if self.stream is not None:
self.stream.stop_stream()
self.stream.close()
self.audio_controller.terminate()
def __iter__(self):
if self.stream is None or self.audio_controller is None:
return
else:
for i in range(self.frames_per_buffer):
data = self.stream.read(self.frames_per_buffer, exception_on_overflow=False)
fmt = "<H"
data = np.array(list(struct.iter_unpack(fmt, data)))
yield self.feature_extractor(data)
if __name__ == "__main__":
from synethesia.network.feature_creators import logfbank_features
with AudioRecorder(feature_extractor=logfbank_features) as rec:
for audio in rec:
print(audio)
|
{"hexsha": "1f49796f67a87277d01e1a137aa78e3ff9112d4c", "size": 4047, "ext": "py", "lang": "Python", "max_stars_repo_path": "synethesia/network/io/live_viewer.py", "max_stars_repo_name": "RunOrVeith/SyNEThesia", "max_stars_repo_head_hexsha": "0ef5de759b4bf74cb318fc5e6e9be64520b8faf5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2018-03-26T08:47:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-21T07:57:59.000Z", "max_issues_repo_path": "synethesia/network/io/live_viewer.py", "max_issues_repo_name": "RunOrVeith/SyNEThesia", "max_issues_repo_head_hexsha": "0ef5de759b4bf74cb318fc5e6e9be64520b8faf5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "synethesia/network/io/live_viewer.py", "max_forks_repo_name": "RunOrVeith/SyNEThesia", "max_forks_repo_head_hexsha": "0ef5de759b4bf74cb318fc5e6e9be64520b8faf5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-07-07T21:19:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-31T17:22:40.000Z", "avg_line_length": 38.9134615385, "max_line_length": 116, "alphanum_fraction": 0.6239189523, "include": true, "reason": "import numpy", "num_tokens": 802}
|
import os
import cv2
from pathlib import Path
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset, DataLoader, random_split
from torchvision import transforms, datasets
import torchvision.transforms.functional as tf
from albumentations import (Compose, RandomCrop, Resize, HorizontalFlip, ShiftScaleRotate, RandomResizedCrop, RandomBrightnessContrast, ElasticTransform, IAAAffine, IAAPerspective, OneOf)
class ValidationData(Dataset):
def __init__(self, root_dir=os.path.join(Path(__file__).parents[0], "data\\val"), transform=None, image_size=(512, 512)):
# Initialize Directory Tree from current working directory if no directory is provided
self.root_dir = root_dir
# Get Image/Mask Path
self.img_dir = os.path.join(self.root_dir, 'images')
self.mask_dir = os.path.join(self.root_dir, 'masks')
# Get List of Images/Masks in Carvana Directory
self.img_list = os.listdir(self.img_dir)
self.mask_list = os.listdir(self.mask_dir)
# Get Number of Images/Masks
self.num_img = len(self.img_list)
self.num_mask = len(self.mask_list)
# Define Transform Image Dimensions
self.image_height = image_size[1]
self.image_width = image_size[0]
# Define Custom Augmentation Transform
self.transform = transform
# Define Default Aug Transformation
self.album_transform = Compose([
HorizontalFlip(),
ShiftScaleRotate(
shift_limit=0.0625,
scale_limit=0.2,
rotate_limit=45,
p=0.2),
OneOf([
ElasticTransform(p=.2),
IAAPerspective(p=.35),
], p=.35)
])
def __len__(self):
"""
Define the length of the dataset.
"""
# Check if number of image/masks are equal
if self.num_img == self.num_mask:
return self.num_img
else:
raise Exception("Number of Images & GT Masks is NOT equal")
def __getitem__(self, item):
"""
Get the image/mask at index "item"
:return:
"""
# Define full image/mask path for extracting data
img_path = os.path.join(self.img_dir, self.img_list[item])
mask_path = os.path.join(self.mask_dir, self.mask_list[item])
# Use OpenCV to read Image/Masks from given paths
img = cv2.imread(img_path)
msk = cv2.imread(mask_path, 0)
if self.transform is not None:
# augment = self.album_transform(image=image, mask=mask)
augment = self.transform(image=img, mask=msk)
img, msk = augment['image'], augment['mask']
# Convert & Resize Image & Mask
img, msk = Image.fromarray(img), Image.fromarray(msk)
image_resized = tf.resize(img=img, size=[self.image_height, self.image_width])
mask_resized = tf.resize(img=msk, size=[self.image_height, self.image_width])
# Normalize Image but NOT mask (implicit in applied transforms)
image_ten = tf.to_tensor(image_resized).float()
mask_ten = tf.pil_to_tensor(mask_resized).float()
return image_ten, mask_ten
if __name__ == '__main__':
c_ds = ValidationData()
dl = DataLoader(c_ds, batch_size=1, shuffle=True)
for image, mask in dl:
print(f"Image Shape = {image.shape}, type = {type(image)}, min = {torch.min(image)} max = {torch.max(image)}")
print(f"Mask Shape = {mask.shape}, type = {type(mask)}, min = {torch.min(mask)} max = {torch.max(mask)}")
break
|
{"hexsha": "d2e4278a34917a669221a32f1061736a62744c1f", "size": 3639, "ext": "py", "lang": "Python", "max_stars_repo_path": "RGB_Segmentation/data/Carvana_Dataset/ValDS.py", "max_stars_repo_name": "JonnyD1117/RGB-D-Plant-Segmentation", "max_stars_repo_head_hexsha": "b98eb0f32c27205abc9801eca4b2ad3f61ad80d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-13T02:44:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-13T02:44:04.000Z", "max_issues_repo_path": "RGB_Segmentation/data/Carvana_Dataset/ValDS.py", "max_issues_repo_name": "JonnyD1117/RGB-D-Plant-Segmentation", "max_issues_repo_head_hexsha": "b98eb0f32c27205abc9801eca4b2ad3f61ad80d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-05-27T10:49:29.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-27T10:49:29.000Z", "max_forks_repo_path": "RGB_Segmentation/data/Carvana_Dataset/ValDS.py", "max_forks_repo_name": "JonnyD1117/RGB-D-Plant-Segmentation", "max_forks_repo_head_hexsha": "b98eb0f32c27205abc9801eca4b2ad3f61ad80d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-14T02:15:23.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-14T02:15:23.000Z", "avg_line_length": 36.0297029703, "max_line_length": 187, "alphanum_fraction": 0.6336905743, "include": true, "reason": "import numpy", "num_tokens": 833}
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
from __future__ import division
import os
import numpy as np
import argparse
from glob import glob
from pose_evaluation_utils import *
import sys
parser = argparse.ArgumentParser()
parser.add_argument("--gtruth_dir", type=str,
help='Path to the directory with ground-truth trajectories')
parser.add_argument("--pred_dir", type=str,
help="Path to the directory with predicted trajectories")
args = parser.parse_args()
def main():
path = os.listdir(args.pred_dir)
seq=['09','10']
for SEQ in seq:
print('evaluate'+' '+SEQ+':')
for p in path:
up_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
pred_files = glob(up_path +'/'+args.pred_dir+ p +'/'+ SEQ + '/*.txt')
pred_files.sort()
pred_files.sort()
ate_all = []
for i in range(len(pred_files)):
gtruth_file = args.gtruth_dir +SEQ+'/'+ os.path.basename(pred_files[i])
if not os.path.exists(gtruth_file):
continue
ate = compute_ate(gtruth_file, pred_files[i])
if ate == False:
continue
ate_all.append(ate)
ate_all = np.array(ate_all)
print('result of :%s'%p)
print("ATE mean: %.4f, std: %.4f" % (np.mean(ate_all), np.std(ate_all)))
main()
|
{"hexsha": "184ce84f652a9e02c5db0e134297d16f17b8c077", "size": 1413, "ext": "py", "lang": "Python", "max_stars_repo_path": "kitti_eval/eval_pose_all.py", "max_stars_repo_name": "xuyufan936831611/vo_imu", "max_stars_repo_head_hexsha": "8a5753384b4a5c08dc83edf718d76a2ac308a298", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kitti_eval/eval_pose_all.py", "max_issues_repo_name": "xuyufan936831611/vo_imu", "max_issues_repo_head_hexsha": "8a5753384b4a5c08dc83edf718d76a2ac308a298", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kitti_eval/eval_pose_all.py", "max_forks_repo_name": "xuyufan936831611/vo_imu", "max_forks_repo_head_hexsha": "8a5753384b4a5c08dc83edf718d76a2ac308a298", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.325, "max_line_length": 87, "alphanum_fraction": 0.5916489738, "include": true, "reason": "import numpy", "num_tokens": 326}
|
// Copyright Carl Philipp Reh 2009 - 2016.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef FCPPT_CONTAINER_GRID_POS_ITERATOR_DECL_HPP_INCLUDED
#define FCPPT_CONTAINER_GRID_POS_ITERATOR_DECL_HPP_INCLUDED
#include <fcppt/container/grid/min.hpp>
#include <fcppt/container/grid/pos.hpp>
#include <fcppt/container/grid/pos_iterator_fwd.hpp>
#include <fcppt/container/grid/size_type.hpp>
#include <fcppt/container/grid/sup.hpp>
#include <fcppt/container/grid/detail/pos_iterator_base.hpp>
#include <fcppt/preprocessor/disable_gcc_warning.hpp>
#include <fcppt/preprocessor/pop_warning.hpp>
#include <fcppt/preprocessor/push_warning.hpp>
#include <fcppt/config/external_begin.hpp>
#include <boost/iterator/iterator_facade.hpp>
#include <fcppt/config/external_end.hpp>
namespace fcppt
{
namespace container
{
namespace grid
{
FCPPT_PP_PUSH_WARNING
FCPPT_PP_DISABLE_GCC_WARNING(-Weffc++)
/**
\brief An iterator over grid position
\ingroup fcpptcontainergrid
*/
template<
typename SizeType,
fcppt::container::grid::size_type Size
>
class pos_iterator final
:
public
fcppt::container::grid::detail::pos_iterator_base<
SizeType,
Size
>
{
typedef
fcppt::container::grid::detail::pos_iterator_base<
SizeType,
Size
>
base_type;
public:
typedef
typename
base_type::value_type
value_type;
typedef
value_type
pos;
typedef
typename
pos::value_type
size_type;
typedef
typename
base_type::reference
reference;
typedef
typename
base_type::pointer
pointer;
typedef
typename
base_type::iterator_category
iterator_category;
typedef
typename
base_type::difference_type
difference_type;
typedef
fcppt::container::grid::min<
size_type,
Size
>
min;
typedef
fcppt::container::grid::sup<
size_type,
Size
>
sup;
pos_iterator(
pos,
min,
sup
);
private:
friend class boost::iterator_core_access;
void
increment();
reference
dereference() const;
bool
equal(
pos_iterator const &
) const;
pos current_;
min min_;
sup sup_;
};
FCPPT_PP_POP_WARNING
}
}
}
#endif
|
{"hexsha": "3dcc752f29617ed49f0b814406a3235893816c9b", "size": 2171, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/fcppt/container/grid/pos_iterator_decl.hpp", "max_stars_repo_name": "vinzenz/fcppt", "max_stars_repo_head_hexsha": "3f8cc5babdee178a9bbd06ca3ce7ad405d19aa6a", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/fcppt/container/grid/pos_iterator_decl.hpp", "max_issues_repo_name": "vinzenz/fcppt", "max_issues_repo_head_hexsha": "3f8cc5babdee178a9bbd06ca3ce7ad405d19aa6a", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/fcppt/container/grid/pos_iterator_decl.hpp", "max_forks_repo_name": "vinzenz/fcppt", "max_forks_repo_head_hexsha": "3f8cc5babdee178a9bbd06ca3ce7ad405d19aa6a", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.618705036, "max_line_length": 61, "alphanum_fraction": 0.758636573, "num_tokens": 573}
|
"""
This module evaluates the results from multiple runs and generates plots.
"""
import csv
import os
from itertools import product
from pprint import pprint
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import cycler
from data import read_compas
class Evaluation(object):
# Convert abbreviations to full words for plot labels
optimizer_labels = {'lagrange': 'Lagr.',
'iplb': 'iplb',
'projected': 'proj.',
'baseline': 'bl'}
dataset_labels = {'adult': 'Adult',
'bank': 'Bank',
'synthetic': 'Synthetic',
'compas': 'COMPAS',
'sqf': 'SQF',
'german': 'German'}
def __init__(self, filepath, fontsize=12):
"""
Initialize an evaluation instance.
Args:
filepath: The csv file containing the results
fontsize: Fontsize used in plot titles and labels
"""
self.filepath = filepath
self.fontsize = fontsize
self.r = pd.read_csv(filepath)
# We will rely on the values being sorted by the constraints
self.r.sort_values(by='constraint', inplace=True)
# Reset the indices after the sorting
self.r.reset_index(drop=True, inplace=True)
# Get the parameters for which results are present
self.params = {
'datasets': get_distinct_values(self.r, 'dataset'),
'optimizers': get_distinct_values(self.r, 'optimizer'),
'fairness': get_distinct_values(self.r, 'fairness'),
'approximations': get_distinct_values(self.r, 'approximation'),
'constraints': get_distinct_values(self.r, 'constraint'),
'batchsizes': get_distinct_values(self.r, 'batchsize'),
'bits': list(set([tuple(a)
for a in self.r[['nbits', 'nintbits']].values])),
'max_sample_size': get_distinct_values(self.r, 'max_sample_size'),
}
def print_value_summary(self):
"""Print the parameters for which results are present."""
pprint(self.params)
def plot_accuracy(self,
datasets=None,
fairness='ppercent',
batchsize=64,
nbits=0,
nintbits=0,
show=['valid_accuracy'],
save_fig=False,
ppercent_on_x=True,
per_plot_size=3):
"""Compare accuracies from optimization techniques across datasets."""
if datasets is None:
datasets = self.params['datasets']
n_data = len(datasets)
optimizers = self.params['optimizers']
optimizers.remove('baseline')
n_optimizers = len(optimizers)
n_approx = len(self.params['approximations'])
fig, axs = plt.subplots(1, n_data,
figsize=(per_plot_size * n_data, per_plot_size),
sharex=(not ppercent_on_x))
# Number of colors for lines is number of optimizers
# Get color cycle list with linestyles for two different approximations
cls = plt.rcParams['axes.prop_cycle'].by_key()['color'][:n_optimizers]
if n_approx == 1:
linestyle = {self.params['approximations'][0]: '-'}
elif n_approx == 2:
cls = [cl for cls in list(zip(cls, cls)) for cl in cls]
linestyle = {'none': '-', 'secureml': '--'}
else:
raise ValueError('Only support 1 or 2 different approximations.')
# Loop over datasets for different plots
for j, data in enumerate(datasets):
if n_data == 1:
curax = axs
else:
curax = axs[j]
lines = []
leglabels = []
select = self.r.loc[
(self.r['dataset'] == data) &
(self.r['fairness'] == fairness) &
(self.r['optimizer'] == 'baseline')]
accs = []
constraints = get_distinct_values(select, 'constraint')
for c in constraints:
acc = select.loc[select['constraint'] == c, show].values
if acc.size > 0:
accs.append(acc[0])
else:
accs.append(0 if len(show) == 1 else [0]*len(show))
if ppercent_on_x:
x_vals = self.get_ppercent(select)
else:
x_vals = constraints
if ppercent_on_x:
td, = curax.plot(x_vals, accs, 'r', label='baseline')
else:
td, = curax.semilogx(x_vals, accs, 'r', label='baseline')
lines.append(td)
leglabels.append(self.optimizer_labels['baseline'])
# Loop over optimizers for different colors
curax.set_prop_cycle(cycler('color', cls))
for k, opt in enumerate(optimizers):
# Loop over approximations for different line styles
for approx in self.params['approximations']:
select = self.r.loc[
(self.r['approximation'] == approx) &
(self.r['dataset'] == data) &
(self.r['fairness'] == fairness) &
(self.r['batchsize'] == batchsize) &
(self.r['optimizer'] == opt) &
(self.r['nbits'] == nbits) &
(self.r['nintbits'] == nintbits)]
accs = []
constraints = get_distinct_values(select, 'constraint')
for c in constraints:
acc = select.loc[select['constraint'] == c, show].values
if acc.size > 0:
accs.append(acc[0])
else:
accs.append(0 if len(show) == 1 else [0]*len(show))
if ppercent_on_x:
x_vals = self.get_ppercent(select)
else:
x_vals = constraints
if nbits != 0:
bitlabel = (str(int(nbits)) + ' bits')
else:
bitlabel = 'float'
pltlabel = opt + ', ' + approx + ', ' + bitlabel
if ppercent_on_x:
td, = curax.plot(x_vals, accs, label=pltlabel,
linestyle=linestyle[approx])
else:
td, = curax.semilogx(x_vals, accs, label=pltlabel,
linestyle=linestyle[approx])
lines.append(td)
leglabels.append(self.optimizer_labels[opt] + ', ' + approx)
try:
baseline_acc = select['baseline_accuracy'].values[0]
curax.axhline(baseline_acc, color='k', linestyle='--',
label='baseline')
except KeyError:
pass
curax.grid(axis='y')
curax.set_title(self.dataset_labels[data])
if n_data == 1:
axs.set_ylabel('constraint', fontsize=self.fontsize)
else:
axs[n_data-1].set_ylabel('accuracy', fontsize=self.fontsize)
fig.legend(lines, leglabels, loc='lower center', bbox_to_anchor=(0.5, -0.1), ncol=3)
fig.tight_layout()
fig.show()
if save_fig:
from datetime import datetime
timestamp = datetime.now().strftime("%Y-%m-$d_%H-%M-%S")
figname = "accuracy_comparison_" + timestamp + ".pdf"
fig.savefig(figname, bbox_inches='tight')
def plot_frac_in_positive(self,
datasets=None,
compas_index=1,
batchsize=64,
fairness='ppercent',
optimizer='lagrange',
save_fig=False,
ppercent_on_x=True,
per_plot_size=3):
if datasets is None:
datasets = self.params['datasets']
n_data = len(datasets)
n_approximations = len(self.params['approximations'])
n_bits = len(self.params['bits'])
approximations = self.params['approximations']
bits = self.params['bits']
show = ('valid_z0_in_y1_test', 'valid_z1_in_y1_test')
# Draw lines for the positive fractions in both demographic groups
labels = ['z=0', 'z=1']
linestyle = ['-', '--']
n_lines = n_approximations * n_bits
cls = plt.rcParams['axes.prop_cycle'].by_key()['color'][:n_lines+1]
cls = [cl for cls in list(zip(cls, cls)) for cl in cls]
# We need to load the compas dataset again to compute fractions
if 'compas' in datasets:
_, Xte, _, yte, _, Zte = read_compas()
fig, axs = plt.subplots(1, n_data,
figsize=(per_plot_size * n_data, per_plot_size),
sharex=(not ppercent_on_x))
# Loop through datasets for different plots
for j, data in enumerate(datasets):
if n_data == 1:
curax = axs
else:
curax = axs[j]
curax.set_prop_cycle(cycler('color', cls))
lines = []
leglabels = []
select = self.r.loc[
(self.r['dataset'] == data) &
(self.r['fairness'] == fairness) &
(self.r['optimizer'] == 'baseline')]
accs = []
constraints = get_distinct_values(select, 'constraint')
if data == 'compas':
accs = self.get_frac_compas(select, compas_index,
Xte, Zte)
else:
for c in constraints:
acc = select.loc[select['constraint'] == c, show].values
if acc.size > 0:
accs.append(acc[0])
else:
accs.append([0] * len(show))
accs = np.array(accs)
for acc, label, ls in zip(accs.T, labels, linestyle):
pltlabel = 'baseline ' + label
if ppercent_on_x:
x_vals = self.get_ppercent(select)
td, = curax.plot(x_vals, acc, label=pltlabel, linestyle=ls)
else:
x_vals = constraints
td, = curax.semilogx(x_vals, acc, label=pltlabel,
linestyle=ls)
lines.append(td)
leglabels.append('baseline')
for approx, (nbits, nintbits) in list(product(approximations, bits)):
select = self.r.loc[
(self.r['approximation'] == approx) &
(self.r['dataset'] == data) &
(self.r['fairness'] == fairness) &
(self.r['batchsize'] == batchsize) &
(self.r['optimizer'] == optimizer) &
(self.r['nbits'] == nbits) &
(self.r['nintbits'] == nintbits)]
accs = []
constraints = get_distinct_values(select, 'constraint')
if data == 'compas':
accs = self.get_frac_compas(select, compas_index,
Xte, Zte)
else:
for c in constraints:
acc = select.loc[select['constraint'] == c, show].values
if acc.size > 0:
accs.append(acc[0])
else:
accs.append([0] * len(show))
if ppercent_on_x:
x_vals = self.get_ppercent(select)
else:
x_vals = constraints
accs = np.array(accs)
for acc, label, ls in zip(accs.T, labels, linestyle):
if nbits != 0:
bitlabel = (str(int(nbits)) + ' bits')
else:
bitlabel = 'float'
pltlabel = approx + ', ' + bitlabel + ', ' + label
if ppercent_on_x:
td, = curax.plot(x_vals, acc, label=pltlabel,
linestyle=ls)
else:
td, = curax.semilogx(x_vals, acc, label=pltlabel,
linestyle=ls)
lines.append(td)
leglabels.append(pltlabel)
curax.grid(axis='y')
curax.set_title(self.dataset_labels[data], fontsize=self.fontsize)
curax.set_xlabel('constraint', fontsize=self.fontsize)
if n_data == 1:
axs.set_xlabel('constraint', fontsize=self.fontsize)
else:
pass
fig.legend(lines, leglabels, loc='lower center',
bbox_to_anchor=(0.5, -0.1), ncol=2)
fig.tight_layout()
fig.show()
if save_fig:
from datetime import datetime
timestamp = datetime.now().strftime("%Y-%m-$d_%H-%M-%S")
figname = "ppercent_fracs_" + timestamp + ".pdf"
fig.savefig(figname, bbox_inches='tight')
def get_ppercent(self, select):
"""For a number of constraitns get the corresponding p percent values"""
y1z0 = select['valid_z0_in_y1_test'].values
y1z1 = select['valid_z1_in_y1_test'].values
frac1 = np.divide(y1z0, y1z1, out=np.full_like(y1z0, np.inf),
where=(y1z1 != 0))
frac2 = np.divide(y1z1, y1z0, out=np.full_like(y1z1, np.inf),
where=(y1z0 != 0))
return np.minimum(frac1, frac2) * 100.
def get_frac_compas(self, select, index, Xte, Zte):
accs = []
for c in select['constraint']:
weight = select.loc[select['constraint'] == c, 'valid_weights'].values
try:
failed = False
if weight.size > 0:
weight = weight[0]
else:
failed = True
try:
isnan = np.isnan(weight)
except:
isnan = False
if not isnan:
if not isinstance(weight, np.ndarray):
weight_str = weight.replace('\n', '')\
.replace('[', '')\
.replace(']', '')\
.split()
weight = [float(i) for i in weight_str]
weight = np.array(weight, dtype=float)
I0 = (Zte[:, index].ravel() <= 0)
I1 = (Zte[:, index].ravel() > 0)
X0, X1 = Xte[I0, :], Xte[I1, :]
n0, n1 = X0.shape[0], X1.shape[0]
z0iny1 = np.sum(np.round(X0 @ weight) == 1) / n0
z1iny1 = np.sum(np.round(X1 @ weight) == 1) / n0
else:
failed = True
except:
failed = True
if not failed:
accs.append([z0iny1, z1iny1])
else:
accs.append([np.nan, np.nan])
return accs
def save_csv(self, name, table):
with open(name, 'w') as f:
fw = csv.writer(f, delimiter=',')
for row in table:
fw.writerow(row)
def export_tables(self,
basedir='../doc/tables/ppercent/',
fairness='ppercent',
compas_index=1,
batchsize=64):
_, Xte, _, yte, _, Zte = read_compas()
writeout = ['constraint',
'valid_accuracy',
'valid_train_accuracy',
'valid_z0_in_y1_train',
'valid_z1_in_y1_train',
'valid_z0_in_y1_test',
'valid_z1_in_y1_test',
'ppercent']
compas_writeout = ['constraint',
'valid_accuracy',
'valid_train_accuracy',
'valid_constraint_satisfied',
'valid_max_constraint_value',
'valid_z0_in_y1_test',
'valid_z1_in_y1_test',
'ppercent']
if not os.path.exists(basedir):
os.makedirs(basedir)
# write out all other data
settings = product(self.params['datasets'],
self.params['optimizers'],
self.params['bits'],
self.params['approximations'])
for data, opt, (nbits, nintbits), approx in list(settings):
select = self.r.loc[
(self.r['approximation'] == approx) &
(self.r['dataset'] == data) &
(self.r['fairness'] == fairness) &
(self.r['batchsize'] == batchsize) &
(self.r['optimizer'] == opt) &
(self.r['nbits'] == nbits) &
(self.r['nintbits'] == nintbits)]
name = basedir + "{}-{}-{}-{}.csv".format(data, opt, approx,
int(nbits))
if data != 'compas':
select['ppercent'] = self.get_ppercent(select)
select[writeout].to_csv(name, index=False)
else:
ziny1 = self.get_frac_compas(select, compas_index, Xte, Zte)
ziny1 = np.array(ziny1, dtype=float)
new_select = select.copy(deep=True)
new_select['valid_z0_in_y1_test'] = ziny1[:, 0]
new_select['valid_z1_in_y1_test'] = ziny1[:, 1]
new_select['ppercent'] = self.get_ppercent(new_select)
new_select = new_select[compas_writeout]
new_select.to_csv(name, index=False)
name = basedir + "{}-baseline.csv".format(data)
baseline_acc = select['baseline_accuracy'].values[0]
self.save_csv(name, [["baseline"]] + [[baseline_acc]])
# write only ppercent baseline here
for data in self.params['datasets']:
select = self.r.loc[
(self.r['dataset'] == data) &
(self.r['fairness'] == fairness) &
(self.r['optimizer'] == 'baseline')]
select['ppercent'] = self.get_ppercent(select)
name = basedir + "{}-ppercent-bl.csv".format(data)
select[writeout].to_csv(name, index=False)
def get_distinct_values(df, key):
"""Get the distinct values that are present in a given column."""
return sorted(list(df[key].value_counts().index.values))
if __name__ == '__main__':
evaluation = Evaluation('../results/final.csv')
evaluation.plot_accuracy(ppercent_on_x=True)
evaluation.plot_frac_in_positive(ppercent_on_x=True)
# evaluation.export_tables(basedir='../tmp/')
|
{"hexsha": "b2958b0a35cb2f64879e5aeba35a7be9e5a42cc9", "size": 19394, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/src/evaluation.py", "max_stars_repo_name": "nikikilbertus/blind-justice", "max_stars_repo_head_hexsha": "2344609e55a2af20396ec042627ffed368e01e56", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2018-06-11T21:12:34.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-20T18:49:16.000Z", "max_issues_repo_path": "python/src/evaluation.py", "max_issues_repo_name": "nikikilbertus/blind-justice", "max_issues_repo_head_hexsha": "2344609e55a2af20396ec042627ffed368e01e56", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-17T14:28:11.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-17T14:28:11.000Z", "max_forks_repo_path": "python/src/evaluation.py", "max_forks_repo_name": "nikikilbertus/blind-justice", "max_forks_repo_head_hexsha": "2344609e55a2af20396ec042627ffed368e01e56", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-06-06T08:46:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-03T15:39:26.000Z", "avg_line_length": 42.6241758242, "max_line_length": 92, "alphanum_fraction": 0.4778797566, "include": true, "reason": "import numpy", "num_tokens": 4084}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised))
# Copyright (c) 2016-2021, Cabral, Juan; Luczywo, Nadia
# Copyright (c) 2022, QuatroPe
# All rights reserved.
# =============================================================================
# DOCS
# =============================================================================
"""Functionalities to add an value when an array has a zero.
In addition to the main functionality, an MCDA agnostic function is offered
to add value to zero on an array along an arbitrary axis.
"""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
from ..core import SKCMatrixAndWeightTransformerABC
from ..utils import doc_inherit
# =============================================================================
# FUNCTIONS
# =============================================================================
def add_value_to_zero(arr, value, axis=None):
r"""Add value if the axis has a value 0.
.. math::
\overline{X}_{ij} = X_{ij} + value
Parameters
----------
arr: :py:class:`numpy.ndarray` like.
A array with values
value: number
Number to add if the axis has a 0.
axis : :py:class:`int` optional
Axis along which to operate. By default, flattened input is used.
Returns
-------
:py:class:`numpy.ndarray`
array with all values >= value.
Examples
--------
.. code-block:: pycon
>>> from skcriteria import add_to_zero
# no zero
>>> mtx = [[1, 2], [3, 4]]
>>> add_to_zero(mtx, value=0.5)
array([[1, 2],
[3, 4]])
# with zero
>>> mtx = [[0, 1], [2,3]]
>>> add_to_zero(mtx, value=0.5)
array([[ 0.5, 1.5],
[ 2.5, 3.5]])
"""
arr = np.asarray(arr)
zeros = np.any(arr == 0, axis=axis, keepdims=True)
increment = zeros * value
return arr + increment
class AddValueToZero(SKCMatrixAndWeightTransformerABC):
r"""Add value if the matrix/weight whe has a value 0.
.. math::
\overline{X}_{ij} = X_{ij} + value
"""
_skcriteria_parameters = ["target", "value"]
def __init__(self, target, value=1.0):
super().__init__(target=target)
self._eps = float(value)
@property
def value(self):
"""Value to add to the matrix/weight when a zero is found."""
return self._eps
@doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights)
def _transform_weights(self, weights):
return add_value_to_zero(weights, value=self.value, axis=None)
@doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix)
def _transform_matrix(self, matrix):
return add_value_to_zero(matrix, value=self.value, axis=0)
|
{"hexsha": "d3410a8d49cb4643fe3aff9c3851c453a0fb4f06", "size": 2962, "ext": "py", "lang": "Python", "max_stars_repo_path": "skcriteria/preprocessing/increment.py", "max_stars_repo_name": "leliel12/scikitcriteria", "max_stars_repo_head_hexsha": "f13a75b5a39cd2d3db30a37b69e61a2814a5cea4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-12-01T02:52:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T11:12:52.000Z", "max_issues_repo_path": "skcriteria/preprocessing/increment.py", "max_issues_repo_name": "leliel12/scikitcriteria", "max_issues_repo_head_hexsha": "f13a75b5a39cd2d3db30a37b69e61a2814a5cea4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2021-11-06T21:31:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T22:43:38.000Z", "max_forks_repo_path": "skcriteria/preprocessing/increment.py", "max_forks_repo_name": "leliel12/scikitcriteria", "max_forks_repo_head_hexsha": "f13a75b5a39cd2d3db30a37b69e61a2814a5cea4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-12-24T04:33:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-20T15:03:41.000Z", "avg_line_length": 27.4259259259, "max_line_length": 79, "alphanum_fraction": 0.5178933153, "include": true, "reason": "import numpy", "num_tokens": 694}
|
# imports
import matplotlib
from matplotlib import pyplot as plt
import numpy as np
from datetime import datetime
import urllib.request # the module we'll need
import shutil
# 1. DOWNLOAD DATA from the web
url = 'http://cdn.knmi.nl/knmi/map/page/seismologie/all_induced.csv' # the url where the data is located (look it up!)
file_name = 'nd_eqs.txt' # name of the file to save the data into
with urllib.request.urlopen(url) as response, open(file_name, 'wb') as out_file: # not super important you understand this line
shutil.copyfileobj(response, out_file) # pull down and save the data
# 2. LOAD THE DATA downloaded previously
data = np.genfromtxt('nd_eqs.txt', delimiter = ',', skip_header=1)
# we will pick out four columns: date (col 1), lat (col 4), lon (col 5), magnitude (col 7)
date = data[:,0] # extract all the rows, first column
lat = data[:,3]/180.*np.pi # same, fourth column (coverted from degrees to radians)
lon = data[:,4]/180.*np.pi # etc
mag = data[:,6]
# convert lat-lon to approximate x-y
r = 6371 # radius of earth (km)
lat_avg = np.mean(lat)
x = r*lon*np.cos(lat_avg)
y = r*lat
# 3. FILTER THE DATA using the 'where' function and conditions to find the data indices we're interested in
# (while we're at it, get rid of the events smaller than M 1)
inds = np.where((435 < x) & (x < 475) & (5900 < y) & (y < 5950) & (mag > 1.0)) # & = 'and' but when comparing arrays
# 'slice' the arrays to keep only the values satisfying the conditions above
date = date[inds]
x = x[inds]
y = y[inds]
mag = mag[inds]
# 4. CONVERT THE DATES from float -> integer -> string
t0 = datetime.strptime('19910101', '%Y%m%d') # reference time 1 Jan 1991
times = [] # an empty list for storing each date as it is calculated
for each_date in date:
# first, convert from float -> integer -> string (converting straight to a string leaves an awkward decimal point)
str_date = str(int(each_date))
# interpret each datestring
t = datetime.strptime(str_date, '%Y%m%d')
# take the difference between the datestring and the reference time
dt = t - t0
# find the total seconds and convert the date to decimal years
times.append(dt.total_seconds()/(3600*24*365.25)+1991)
# 5. CONVERT THE MAGNITUDES to a size between 1 and 100 (largest event is a 100 point circle)
# first, rescale magnitudes to range between 0 and 1
s = (mag - np.min(mag))/(np.max(mag) - np.min(mag))
# second, rescale to range between 1 and 100 (this will be the size of the marker)
s = s*(100-2)+1
# 6. PLOTTING COMMANDS
f,ax = plt.subplots(1,1)
f.set_size_inches(10,10)
# use the imread function to read an image file 'im' is a MxNx3 array of image data (each pixel has three components, corresponding to an RGB color)
im = plt.imread('groningen_reservoir.png')
implot = ax.imshow(im, extent=[430, 475, 5902, 5950]) # plot the image and 'stretch' it to the given x and y limits
# then, let's plot the earthquakes over the top
coolwarm = matplotlib.cm.get_cmap('coolwarm_r') # import a new colormap - this one is 'coolwarm', reversed it with '_r'
CS = ax.scatter(x,y,s, c= times, cmap=coolwarm)
ax.set_aspect('equal', adjustable='box')
plt.colorbar(CS, ax = ax)
# save the figure to a file
plt.savefig('scatter_plot.png', dpi = 300) # dpi is the resolution, 300 is good for many applications
|
{"hexsha": "47e79c42c249a91117d8f070255794830d60cabf", "size": 3568, "ext": "py", "lang": "Python", "max_stars_repo_path": "2_visualisation/scatter_plot.py", "max_stars_repo_name": "ddempsey/python_for_geoscientists", "max_stars_repo_head_hexsha": "ac3e3e9951b530ecd5f0ed3128083edd4f55b2c0", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2017-10-31T01:56:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T18:21:05.000Z", "max_issues_repo_path": "2_visualisation/scatter_plot.py", "max_issues_repo_name": "mtoqeerpk/python_for_geoscientists", "max_issues_repo_head_hexsha": "428e2eaeb869f8478a3517d01a5fdff6de30e7d2", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2_visualisation/scatter_plot.py", "max_forks_repo_name": "mtoqeerpk/python_for_geoscientists", "max_forks_repo_head_hexsha": "428e2eaeb869f8478a3517d01a5fdff6de30e7d2", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2019-05-02T11:35:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-17T13:23:42.000Z", "avg_line_length": 47.5733333333, "max_line_length": 149, "alphanum_fraction": 0.6569506726, "include": true, "reason": "import numpy", "num_tokens": 972}
|
import unittest
import numpy as np
from coremltools.models import datatypes, MLModel
from coremltools.models.neural_network import NeuralNetworkBuilder
class BasicNumericCorrectnessTest(unittest.TestCase):
def test_undefined_shape_single_output(self):
W = np.ones((3,3))
input_features = [('data', datatypes.Array(3))]
output_features = [('probs', None)]
builder = NeuralNetworkBuilder(input_features, output_features)
builder.add_inner_product(name = 'ip1',
W = W,
b = None,
input_channels = 3,
output_channels = 3,
has_bias = False,
input_name = 'data',
output_name = 'probs')
mlmodel = MLModel(builder.spec)
data = np.ones((3,))
data_dict = {'data': data}
probs = mlmodel.predict(data_dict)['probs']
self.assertTrue(np.allclose(probs, np.ones(3) * 3))
|
{"hexsha": "56968be924239642c19de139f36c77d9f7112c97", "size": 1097, "ext": "py", "lang": "Python", "max_stars_repo_path": "coremltools/test/test_nn_builder.py", "max_stars_repo_name": "Vijayrajsinh/Core-ML", "max_stars_repo_head_hexsha": "b103f513cfd42cdf5b60f6261448d1ce667f590b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "coremltools/test/test_nn_builder.py", "max_issues_repo_name": "Vijayrajsinh/Core-ML", "max_issues_repo_head_hexsha": "b103f513cfd42cdf5b60f6261448d1ce667f590b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "coremltools/test/test_nn_builder.py", "max_forks_repo_name": "Vijayrajsinh/Core-ML", "max_forks_repo_head_hexsha": "b103f513cfd42cdf5b60f6261448d1ce667f590b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.1923076923, "max_line_length": 71, "alphanum_fraction": 0.5278030994, "include": true, "reason": "import numpy", "num_tokens": 216}
|
import os
import csv
import cv2
import numpy as np # for np.array() np.append()
from datetime import datetime # print timestamps
# for loss history visualization image
import matplotlib; matplotlib.use('agg')
import matplotlib.pyplot as plt
from scipy import ndimage # to convert to RBG due to np.imread reading in BGR
from random import shuffle
import sklearn
# list of common functions
# from commonFunctions_vxx import get_log_pathSampleData
# from commonFunctions_vxx import get_lines_logfile
# from commonFunctions_vxx import get_info_from_lines
# from commonFunctions_vxx import get_info_from_logfile
# from commonFunctions_vxx import flip_horizontally
# from commonFunctions_vxx import visualize_loss_history
# from commonFunctions_vxx import RGB2YUV
# from commonFunctions_vxx import print_info
# from commonFunctions_vxx import BGR2YUV
# from commonFunctions_vxx import
# History
# v01 : Start
# v02 : add nb_images to read parameter
# v03 : from scipy import ndimage, due to cv2.imread will get images in BGR format, while drive.py uses RGB. In the video above one way you could keep the same image formatting is to do "image = ndimage.imread(current_path)"
# v04 : use left + right images to augment data + measurements extrapolations
# v05 : add Generator Function + modify all other functions whenever necessary to use generator function ...
# v06 : Re-start from v04 aas fit_generator and need to add generator obsolete.
# Latest Keras.Model.fit integrates a generator in itself.
# ie v06 : visualize loss history
# v07 : For nvidia model, convert RGB to YUV
# v08 : add print_info() to print debug/progress info
# v09 : Try to avoid list to numpy conversion, taking few minutes, start with numpy image array straight from start
# But failed. Need to use and adapt to Generator
# v10 : adapt to generator_02.py
# v11 : move generator() to commonFunctionFile + parameters
# v12 : Add functionality to load different data collections
# v13 : Correct image format read. imread() reads BGR, remove nd.image (supposed
# to convert to RGB but did not use right parameter.
# And anyway drive.py needs to be modified to get YUV images to enter the model.
# Add BGR2YUV for drive_vxx.py
# v14 : While writing writeup_report.md, found errors in left/right angles calculation --> corrected.
driving_log_file = 'driving_log.csv'
STEER_CORRECTION_FACTOR = 0.2 # to tune up for left and right images/measurements
# Set our batch size for fit generator
batch_len= 6
path_to_replace='/home/workspace/CarND-Behavioral-Cloning-P3/simulationData/recording/'
# Select right sample data folder whether in GPU mode or not
# check if ./data/driving_log.csv exists otherwise select
# simulationData/001_1stTrackSampleDrivingData/
def get_log_pathSampleData() :
if os.path.exists("./data/" + driving_log_file) :
return("./data/")
else :
return("./simulationData/001_1stTrackSampleDrivingData/")
def get_lines_logfile(path) :
l = []
with open (path + driving_log_file ) as csv_file :
reader = csv.reader(csv_file)
for line in reader :
# Need to change path of image center, left, right
# to be able to read them regardless of path later on
for i in range(3) :
if path_to_replace in line[i] :
line[i] = line[i].replace(path_to_replace,path)
else :
line[i] = path + line[i].strip()
l.append(line)
# return without 1st line (title row
return l[1:]
def get_info_from_lines(l,leftright_steer_corr,nb_images=None) :
imgs = []
meas = []
# Now since v12, path information is directly in the line/sample fields
# for both image center, left, right.
# log_path = get_log_pathSampleData()
for line in l[:nb_images] :
#image = cv2.imread(log_path + line[0].strip())
for i in range(3) : # center image, left , right images
#image = ndimage.imread(line[i].strip())
image = cv2.imread(line[i].strip())
imgs.append(image)
measurement = float(line[3]) # center image
meas.append(measurement)
meas_left = measurement + leftright_steer_corr # left image
#print(meas_left)
meas.append(meas_left)
meas_right = measurement - leftright_steer_corr # right image
#print(meas_right)
meas.append(meas_right)
return imgs,meas
def get_info_from_logfile(leftright_steer_correction,path,nb_images=None) :
lines = get_lines_logfile(path)
return get_info_from_lines(lines,leftright_steer_correction,nb_images)
def flip_horizontally(img,meas):
aug_img, aug_meas = [],[]
for i,m in zip(img,meas) :
aug_img.append(cv2.flip(i,1))
aug_meas.append(m*(-1.0))
return aug_img,aug_meas
def visualize_loss_history(history) :
### plot the training and validation loss for each epoch
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
# plt.show()
plt.savefig('lossHistory.png')
def RGB2YUV(im):
yuv = []
for i in im :
yuv.append(cv2.cvtColor(i, cv2.COLOR_RGB2YUV))
return yuv
def BGR2YUV(im):
yuv = []
for i in im :
yuv.append(cv2.cvtColor(i, cv2.COLOR_BGR2YUV))
return yuv
def print_info(info):
now = datetime.now()
infotime = now.strftime("%H:%M:%S")
# can not use f-string due to GPU python version v3.5.2
print('{}. Time : {}'.format(info,infotime))
def generator(samples, batch_size=batch_len):
num_samples = len(samples)
# print('num_samples : {}'.format(num_samples))
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
# correction : should go only until min(num_samples,offset+batch_size)
batch_samples = samples[offset: min(num_samples,offset+batch_size)]
# here will get both center, left, right images + their measurements.
# if batch_size = 32 --> 32*3 = 96 images ....
images, angles = get_info_from_lines(batch_samples,STEER_CORRECTION_FACTOR,nb_images=None)
# data augmentation flip horizontally image + inverse measurements
augm_images, augm_measurements = flip_horizontally(images,angles)
images.extend(augm_images)
angles.extend(augm_measurements)
# Nvidia : need to convert images in YUV ...
# images = RGB2YUV(images)
# cv2.imread() reads in BGR need to convert to YUV.
images = BGR2YUV(images)
# trim image to only see section with road --> Done in the model
# NORMALISATION --> done in the model
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
|
{"hexsha": "848d408ebe7d89625baf89d47b03ff7ca1de3dd1", "size": 7168, "ext": "py", "lang": "Python", "max_stars_repo_path": "commonFunctions_v14.py", "max_stars_repo_name": "remichartier/014_selfDrivingCarND_BehavioralCloningProject", "max_stars_repo_head_hexsha": "1dcaa7c5a937929d4481e5efbf7ccc856c04c4ff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-23T08:28:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-23T08:28:54.000Z", "max_issues_repo_path": "commonFunctions_v14.py", "max_issues_repo_name": "remichartier/014_selfDrivingCarND_BehavioralCloningProject", "max_issues_repo_head_hexsha": "1dcaa7c5a937929d4481e5efbf7ccc856c04c4ff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "commonFunctions_v14.py", "max_forks_repo_name": "remichartier/014_selfDrivingCarND_BehavioralCloningProject", "max_forks_repo_head_hexsha": "1dcaa7c5a937929d4481e5efbf7ccc856c04c4ff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4971751412, "max_line_length": 224, "alphanum_fraction": 0.6815011161, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1708}
|
from phylo._core.phylogenytree import *
import networkx as nx
import pytest as pt
def dummy_graph():
G = nx.DiGraph()
G.add_node('0', label = 'sabbia')
G.add_node('1', label = 'pollo,boh')
G.add_node('2', label = 'sBin')
G.add_edge('0', '1')
G.add_edge('0', '2')
return G
def dummy_tree():
return PhylogenyTree(dummy_graph())
class TestInit:
def test_standard_init(self):
tree = dummy_tree()
def test_init_with_nonstring_label_type(self):
G = nx.DiGraph()
G.add_node('0', label = 1337)
G = PhylogenyTree(G)
assert G.as_digraph().nodes['0']['label'] == str(1337)
def test_init_with_empty_label(self):
G = nx.DiGraph()
G.add_node('0', label = '')
with pt.raises(ValueError):
tree = PhylogenyTree(G)
def test_init_with_non_tree(self):
not_a_tree = dummy_graph()
not_a_tree.add_edge('2', '0')
with pt.raises(NotATreeError):
tree = PhylogenyTree(not_a_tree)
def test_init_with_bad_node(self):
integer_node = nx.DiGraph()
integer_node.add_node(0)
with pt.raises(TypeError):
tree = PhylogenyTree(integer_node)
def test_init_with_bad_graph_attribute(self):
G = dummy_graph()
G.graph['edge'] = 'this graph attribute is not allowed!'
with pt.raises(ValueError):
tree = PhylogenyTree(G)
class TestAccessors:
def test_graph_accessor(self):
G = dummy_graph()
T = dummy_tree()
assert list(T.as_digraph().nodes(data = True)) == list(G.nodes(data = True))
assert list(T.as_digraph().edges(data = True)) == list(G.edges(data = True))
class TestImmutability:
def test_immutability_after_init(self):
G = dummy_graph()
T = PhylogenyTree(G)
G.add_edge('2', '0')
assert ('2', '0') not in T.as_digraph().edges
def test_immutability_after_graph_access(self):
T = dummy_tree()
G = T.as_digraph()
G.add_edge('2', '0')
assert ('2', '0') not in T.as_digraph().edges
class TestSerialization:
def test_serialization_roundtrip(self):
T = dummy_tree()
T_as_string = T.to_dotstring()
T_after_roundtrip = PhylogenyTree.from_dotstring(T_as_string)
assert list(T.as_digraph().graph) == list(T_after_roundtrip.as_digraph().graph)
assert list(T.as_digraph().nodes(data = True)) == list(T_after_roundtrip.as_digraph().nodes(data = True))
assert list(T.as_digraph().edges(data = True)) == list(T_after_roundtrip.as_digraph().edges(data = True))
class TestDrawing:
def test_rendering_to_png_and_pdf(self):
T = dummy_tree()
T.draw_to_file('sabbia.png')
T.draw_to_file('pollo.pdf')
|
{"hexsha": "6457e6de343a068d85fc9162f0b439bb24b9ed25", "size": 2838, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/core/test_phylogenytree.py", "max_stars_repo_name": "plastic-phy/plastic", "max_stars_repo_head_hexsha": "101dcfd2c4e32be432d46d89ab151f48e34d6458", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-23T07:50:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-23T07:50:47.000Z", "max_issues_repo_path": "tests/core/test_phylogenytree.py", "max_issues_repo_name": "plastic-phy/plastic", "max_issues_repo_head_hexsha": "101dcfd2c4e32be432d46d89ab151f48e34d6458", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-02-23T08:59:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-25T09:13:43.000Z", "max_forks_repo_path": "tests/core/test_phylogenytree.py", "max_forks_repo_name": "plastic-phy/plastic", "max_forks_repo_head_hexsha": "101dcfd2c4e32be432d46d89ab151f48e34d6458", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6666666667, "max_line_length": 113, "alphanum_fraction": 0.6152219873, "include": true, "reason": "import networkx", "num_tokens": 714}
|
from copy import copy
import numpy as np
class IndicatorBase(object):
def __init__(self, marketevent):
self.instrument = None
self.preload_bar_list = []
self.instrument = marketevent.instrument
self.iteral_buffer = marketevent.feed.iteral_buffer
self.bar_list = copy(marketevent.bar.data)
self.bar_list2 = copy(self.bar_list)
self.preload_bar_list = marketevent.feed.preload_bar_list
def get_preload(self, minperiod, index, ohlc='close'):
"""将preload插入到bar_dict前,然后根据当前时间点动态获取固定长度的数据"""
preload_limit = self.preload_bar_list[:minperiod]
self.bar_list = copy(self.bar_list2)
for i in preload_limit:
self.bar_list.insert(0, i) # load to bar_list
data = [i[ohlc] for i in self.bar_list][-minperiod + index:]
return np.array(data)
|
{"hexsha": "d4ae48090d025328df9db7655269ca599daed187", "size": 859, "ext": "py", "lang": "Python", "max_stars_repo_path": "OnePy/indicators/indicatorbase.py", "max_stars_repo_name": "sibuzu/OnePy", "max_stars_repo_head_hexsha": "464fca1c68a10f90ad128da3bfb03f05d2fc24bc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "OnePy/indicators/indicatorbase.py", "max_issues_repo_name": "sibuzu/OnePy", "max_issues_repo_head_hexsha": "464fca1c68a10f90ad128da3bfb03f05d2fc24bc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "OnePy/indicators/indicatorbase.py", "max_forks_repo_name": "sibuzu/OnePy", "max_forks_repo_head_hexsha": "464fca1c68a10f90ad128da3bfb03f05d2fc24bc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8148148148, "max_line_length": 68, "alphanum_fraction": 0.6728754366, "include": true, "reason": "import numpy", "num_tokens": 220}
|
#####################################################
## Read bag from file ##
#####################################################
import pyrealsense2 as rs
import numpy as np
import cv2
import argparse
import os
import shutil
from mmcv import ProgressBar
def parse_args():
# Create object for parsing command-line options
parser = argparse.ArgumentParser(description="Read recorded bag file and display depth stream in jet colormap.\
Remember to change the stream resolution, fps and format to match the recorded.")
# Add argument which takes path to a bag file as an input
parser.add_argument("input", type=str, help="Path to the bag file. Set to None to get from Camera")
parser.add_argument("--depth-resolution", choices=['low','vga', 'xga'], default='xga',
help="Depth resolution: low(320x240), vga(640x480), xga (1024x768).\
If data is loaded from a bag-file, it must be consistent with recording condition.")
parser.add_argument("--rgb-resolution", choices=['vga','hd', 'fhd'], default='hd',
help="RGB resolution: vga(960x540), hd(1280x720), fhd(1920x1080).\
If data is loaded from a bag-file, it must be consistent with recording condition.")
parser.add_argument("--color-mode", choices=['rgb8','bgr8'], default='rgb8',
help="The color mode when collecting data.\
If data is loaded from a bag-file, it must be consistent with recording condition.")
parser.add_argument("--align", choices=['to_depth','to_color'], default='to_color',
help="Align Depth and RGB images together: to_depth(RGB is align to Depth image), and vice versa.\
If data is loaded from a bag-file, it must be consistent with recording condition.")
parser.add_argument("--threshold-distance", type=float, default=None,
help="clipping distance (meter) for visualization. Objects farther than this distance is ignored.")
parser.add_argument("--num_frames", type=int, default=None,
help="Number of frames to stream. Will stream all data if None.")
parser.add_argument("--outdir", type=str, default=None, help="Output directory")
parser.add_argument("--show", action='store_true', help='show the video')
# Parse the command line arguments to an object
args = parser.parse_args()
return args
def setup_pipeline(args):
# Check if the given file have bag extension
assert os.path.isfile(args.input), f'file {args.input} does not exit'
assert os.path.splitext(args.input)[1] == ".bag", "Only .bag files are accepted"
# Create pipeline and config
pipeline = rs.pipeline()
config = rs.config()
# Tell config that we will use a recorded device from filem to be used by the pipeline through playback.
rs.config.enable_device_from_file(config, args.input)
# Configure the pipeline to stream the depth & color
if args.depth_resolution=='vga':
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
elif args.depth_resolution=='xga':
config.enable_stream(rs.stream.depth, 1024, 768, rs.format.z16, 30)
else:
config.enable_stream(rs.stream.depth, 320, 240, rs.format.z16, 30)
color_mode= rs.format.rgb8 if args.color_mode=='rgb8' else rs.format.bgr8
if args.rgb_resolution=='hd':
config.enable_stream(rs.stream.color, 1280, 720, color_mode, 30)
elif args.rgb_resolution=='fhd':
config.enable_stream(rs.stream.color, 1920, 1080, color_mode, 30)
else:
config.enable_stream(rs.stream.color, 960, 540, color_mode, 30)
# Start streaming from file
try:
profile = pipeline.start(config)
except ValueError:
print("The config does not match with bag-file recorded conditiion.\
Please check your depth-resolution,rgb-resolution or color mode,etc")
clipping_distance=None
if args.threshold_distance:
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print("Depth Scale is: " , depth_scale)
clipping_distance = args.clipping_distance_in_meters / depth_scale
# Create an align object
align_to = rs.stream.depth if args.align=='to_depth' else rs.stream.color
align = rs.align(align_to)
return pipeline, align, clipping_distance
if __name__ == "__main__":
args = parse_args()
# Setup camera
pipeline, align, clipping_distance = setup_pipeline(args)
if args.outdir:
if os.path.exists(args.outdir):
shutil.rmtree(args.outdir)
os.makedirs(args.outdir,exist_ok=True)
os.makedirs(os.path.join(args.outdir,'Depth'),exist_ok=True)
os.makedirs(os.path.join(args.outdir,'RGB'),exist_ok=True)
# Streaming loop
frame_i=0
# progress = ProgressBar(task_num=args.num_frames if args.num_frames else 1e3)
progress = ProgressBar(task_num=1e1)
while True:
frames = pipeline.wait_for_frames()
# Align the depth frame and color frame
aligned_frames = align.process(frames)
depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
if not depth_frame or not color_frame:
continue
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
if args.color_mode=='rgb8':
color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
if args.outdir:
filename_depth=os.path.join(args.outdir,f'Depth/{frame_i}.png')
filename_rgb=os.path.join(args.outdir,f'RGB/{frame_i}.png')
cv2.imwrite(filename_depth,depth_image)
cv2.imwrite(filename_rgb,color_image)
# import pdb; pdb.set_trace()
# check_depth=cv2.imread(filename_depth,cv2.IMREAD_UNCHANGED)
# print((depth_image-check_depth).abs().sum())
if args.show:
# Remove background - Set pixels further than clipping_distance to grey
grey_color = 153
depth_image_3d = np.dstack((depth_image,depth_image,depth_image)) #depth image is 1 channel, color is 3 channels
mask = (depth_image_3d <0)
if clipping_distance:
mask = mask | (depth_image_3d > clipping_distance)
bg_removed = np.where(mask, grey_color, color_image)
# Render images
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
images = np.hstack((bg_removed, depth_colormap))
cv2.namedWindow('Align Example', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Align Example', images)
key = cv2.waitKey(1000)
# Press esc or 'q' to close the image window
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
frame_i+=1
if args.num_frames and frame_i > args.num_frames:
break
if frame_i % 1e1 == 0:
# Reset progress bar
progress.completed=0
progress.file.flush()
progress.update()
|
{"hexsha": "28524cd2aade959f02baa7c193badd93a4b9ad15", "size": 7544, "ext": "py", "lang": "Python", "max_stars_repo_path": "gradslam/utils/convert_bagfile.py", "max_stars_repo_name": "chuong98/gradslam", "max_stars_repo_head_hexsha": "4f744c54605980aa6e5a5ef3e3b6a04db0fe6597", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gradslam/utils/convert_bagfile.py", "max_issues_repo_name": "chuong98/gradslam", "max_issues_repo_head_hexsha": "4f744c54605980aa6e5a5ef3e3b6a04db0fe6597", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gradslam/utils/convert_bagfile.py", "max_forks_repo_name": "chuong98/gradslam", "max_forks_repo_head_hexsha": "4f744c54605980aa6e5a5ef3e3b6a04db0fe6597", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.8571428571, "max_line_length": 136, "alphanum_fraction": 0.6369300106, "include": true, "reason": "import numpy", "num_tokens": 1668}
|
using SimpleCTF
using Test
@testset "SimpleCTF.jl" begin
# Write your tests here.
@test isapprox(SimpleCTF.wavelength_from_voltage(200), 2.5079, atol=1e-4)
@test isapprox(SimpleCTF.wavelength_from_voltage(300), 1.9687, atol=1e-4)
end
|
{"hexsha": "6dcb7e7c7b4240ddffc22e0786cd0c21fd2c37a2", "size": 248, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "alberttxu/SimpleCTF.jl", "max_stars_repo_head_hexsha": "e5052ef99274ffd09952c240fd86598f07a5ce54", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-04-20T17:45:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-21T07:04:41.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "alberttxu/SimpleCTF.jl", "max_issues_repo_head_hexsha": "e5052ef99274ffd09952c240fd86598f07a5ce54", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "alberttxu/SimpleCTF.jl", "max_forks_repo_head_hexsha": "e5052ef99274ffd09952c240fd86598f07a5ce54", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.8, "max_line_length": 77, "alphanum_fraction": 0.7459677419, "num_tokens": 86}
|
[STATEMENT]
lemma fls_X_conv_shift_1: "fls_X = fls_shift (-1) 1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fls_X = fls_shift (- 1) 1
[PROOF STEP]
by (intro fls_eqI) simp
|
{"llama_tokens": 91, "file": null, "length": 1}
|
#encoding=utf-8
import argparse
from distutils.dir_util import copy_tree
import os
def is_valid_file(parser, arg):
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return arg
parser = argparse.ArgumentParser(description="Easy trainable text generation RNN model with TensorFlow",
epilog="Copyright NikiTricky, 2021")
dst = parser.add_argument_group('Dataset preferences')
dst.add_argument('--dataset_path', required=True, help="The dataset filepath.", type=lambda x: is_valid_file(parser, x))
dst.add_argument('--dataset_cutoff', help="If set, the dataset will be trimmed to the specified length.", type=int)
dst.add_argument('--buffer_size', default=10000, help="Buffer size to shuffle the dataset.", type=int)
mpf = parser.add_argument_group('Model/Traning preferences')
mpf.add_argument('--epochs', required=True, help="Number of epochs to run the model.", type=int)
mpf.add_argument('--batch_size', required=True, help="Batch size for the model.", type=int)
mpf.add_argument('--seq_size', required=True, help="Sequence size/length to train the model on.", type=int)
mpf.add_argument('--embedding_dim', required=True, help="Model's embedding dimention.", type=int)
mpf.add_argument('--rnn_units', required=True, help="Model's RNN units.", type=int)
ckpt = parser.add_argument_group('Checkpoints')
ckpt.add_argument('--checkpoint_path', help="Path to store checkpoints in, if left empty, no checkpoints will be stored.")
ckpt.add_argument('--checkpoint_prefix', default="ckpt", help="Prefix to store checkpoint files with. Default is 'ckpt'.")
ckpt.add_argument('--epochs_per_checkpoint', default=10, help="The amount of epochs after to store a checkpoint. Default is 10.", type=int)
lg = parser.add_argument_group('Text logging')
lg.add_argument('--epochs_per_text_log', default=0, help="Epochs per text log. If --use_wandb is enabled, it will send the logs to wandb. If the value is 0, no text will be loged. Requires --text_log_prompt. Default is 0.", type=int)
lg.add_argument('--text_log_prompt', help="Text prompt for logs.")
lg.add_argument('--text_log_prompt_len', default=100, help="Text log generated text length. Default is 100.", type=int)
lg.add_argument('--text_out_prompt', help="Text output prompt. Runs at the end of training. Requires --text_out_prompt_len and --text_out_file.")
lg.add_argument('--text_out_prompt_len', default=0, help="Text length for output text.", type=int)
lg.add_argument('--text_out_file', help="The file to store the output text.")
exp = parser.add_argument_group('Exporting')
exp.add_argument('--export_model_filepath', required=True, help="Folder to export the model.")
#exp.add_argument('--export_keras_filepath', help="Save the model to a .h5 format or folder.")
exp.add_argument('--export_loss_file', help="Save the losses to a newline-separated file.")
exp.add_argument('--export_text_logs', help="Save the text logs to a text file. Requires --text_log_prompt.")
wndb = parser.add_argument_group('Weights and Biases fuctions')
wndb.add_argument('--use_wandb', action='store_const', default=False, const=True, help="Uses wandb. Logs the model loss and starting conditions. Requires for you to be logged in wandb and --wandb_project_name and --wandb_entity. Don't include if --in-sweep is enabled.")
wndb.add_argument('--in_sweep', action='store_const', default=False, const=True, help="If in wandb sweep. Removes some functions to help sweeps.")
wndb.add_argument('--wandb_project_name', help="Wandb project name to log the model training.")
wndb.add_argument('--wandb_entity', help="Entity to use when logging model.")
parser.add_argument('--use_gpu_mem_growth', action='store_const', default=False, const=True, help="If you are getting errors with the code. Works only if you have a GPU.")
args = parser.parse_args()
if args.epochs_per_text_log != 0 and (args.text_log_prompt is None):
parser.error("--epochs_per_text_log requires --text_log_prompt.")
if args.text_out_prompt_len != 0 and (args.text_out_prompt is None or args.text_out_file is None):
parser.error("--text_out_prompt_len requires --text_out_prompt and --text_out_file.")
if args.use_wandb and (args.wandb_project_name is None or args.wandb_entity is None):
parser.error("--use_wandb requires --wandb_project_name and --wandb_entity.")
if args.export_loss_file and (args.epochs_per_text_log == 0):
parser.error("--export_loss_file requires --epochs_per_text_log.")
if args.export_text_logs and (args.text_log_prompt is None):
parser.error("--export_text_logs requires --text_log_prompt.")
import tensorflow as tf
if args.use_gpu_mem_growth:
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], enable=True)
import numpy as np
import os
import time
if args.use_wandb:
import wandb
# Read, then decode for py2 compat.
text = open(args.dataset_path, 'rb').read().decode(encoding='utf-8')
if args.dataset_cutoff:
text = text[:args.dataset_cutoff]
# length of text is the number of characters in it
print(f'Length of text: {len(text)} characters')
# The unique characters in the file
vocab = sorted(set(text))
print(f'{len(vocab)} unique characters')
ids_from_chars = tf.keras.layers.StringLookup(
vocabulary=list(vocab), mask_token=None)
chars_from_ids = tf.keras.layers.StringLookup(
vocabulary=ids_from_chars.get_vocabulary(), invert=True, mask_token=None)
def text_from_ids(ids):
return tf.strings.reduce_join(chars_from_ids(ids), axis=-1)
all_ids = ids_from_chars(tf.strings.unicode_split(text, 'UTF-8'))
ids_dataset = tf.data.Dataset.from_tensor_slices(all_ids)
def split_input_target(sequence):
input_text = sequence[:-1]
target_text = sequence[1:]
return input_text, target_text
# Length of the vocabulary in chars
vocab_size = len(vocab)
class MyModel(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, rnn_units):
super().__init__(self)
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(rnn_units,
return_sequences=True,
return_state=True)
self.dense = tf.keras.layers.Dense(vocab_size)
def call(self, inputs, states=None, return_state=False, training=False):
x = inputs
x = self.embedding(x, training=training)
if states is None:
states = self.gru.get_initial_state(x)
x, states = self.gru(x, initial_state=states, training=training)
x = self.dense(x, training=training)
if return_state:
return x, states
else:
return x
if args.checkpoint_path:
# Name of the checkpoint files
checkpoint_prefix = os.path.join(args.checkpoint_path, args.checkpoint_prefix + "_{epoch}")
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_prefix,
save_weights_only=True)
class CustomTraining(MyModel):
@tf.function
def train_step(self, inputs, model):
inputs, labels = inputs
with tf.GradientTape() as tape:
predictions = self(inputs, training=True)
loss = self.loss(labels, predictions)
grads = tape.gradient(loss, model.trainable_variables)
self.optimizer.apply_gradients(zip(grads, model.trainable_variables))
return {'loss': loss}
class OneStep(tf.keras.Model):
def __init__(self, model, chars_from_ids, ids_from_chars, temperature=1.0):
super().__init__()
self.temperature = temperature
self.model = model
self.chars_from_ids = chars_from_ids
self.ids_from_chars = ids_from_chars
# Create a mask to prevent "[UNK]" from being generated.
skip_ids = self.ids_from_chars(['[UNK]'])[:, None]
sparse_mask = tf.SparseTensor(
# Put a -inf at each bad index.
values=[-float('inf')]*len(skip_ids),
indices=skip_ids,
# Match the shape to the vocabulary
dense_shape=[len(ids_from_chars.get_vocabulary())])
self.prediction_mask = tf.sparse.to_dense(sparse_mask)
@tf.function
def generate_one_step(self, inputs, states=None):
# Convert strings to token IDs.
input_chars = tf.strings.unicode_split(inputs, 'UTF-8')
input_ids = self.ids_from_chars(input_chars).to_tensor()
# Run the model
# predicted_logits.shape is [batch, char, next_char_logits]
predicted_logits, states = self.model(inputs=input_ids, states=states,
return_state=True)
# Only use the last prediction.
predicted_logits = predicted_logits[:, -1, :]
predicted_logits = predicted_logits/self.temperature
# Apply the prediction mask: prevent "[UNK]" from being generated.
predicted_logits = predicted_logits + self.prediction_mask
# Sample the output logits to generate token IDs.
predicted_ids = tf.random.categorical(predicted_logits, num_samples=1)
predicted_ids = tf.squeeze(predicted_ids, axis=-1)
# Convert from token ids to characters
predicted_chars = self.chars_from_ids(predicted_ids)
# Return the characters and model state.
return predicted_chars, states
def gen_text(model, prompt, length):
one_step_model = OneStep(model, chars_from_ids, ids_from_chars)
next_char = tf.constant([prompt])
states = None
result = [next_char]
for n in range(length):
next_char, states = one_step_model.generate_one_step(next_char, states=states)
result.append(next_char)
return tf.strings.join(result)[0].numpy().decode('utf-8')
from tqdm import tqdm
def create_dataset(seq_length, BATCH_SIZE):
global args
examples_per_epoch = len(text)//(seq_length+1)
sequences = ids_dataset.batch(seq_length+1, drop_remainder=True)
dataset = sequences.map(split_input_target)
# Buffer size to shuffle the dataset
# (TF data is designed to work with possibly infinite sequences,
# so it doesn't attempt to shuffle the entire sequence in memory. Instead,
# it maintains a buffer in which it shuffles elements).
BUFFER_SIZE = args.buffer_size
dataset = (
dataset
.shuffle(BUFFER_SIZE)
.batch(BATCH_SIZE, drop_remainder=True)
.prefetch(tf.data.experimental.AUTOTUNE))
return dataset
def create_model(embedding_dim, rnn_units):
model = CustomTraining(
vocab_size=len(ids_from_chars.get_vocabulary()),
embedding_dim=embedding_dim,
rnn_units=rnn_units)
model.compile(optimizer = tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
return model
def train(EPOCHS, BATCH_SIZE, seq_length, rnn_units, embedding_dim):
global args, checkpoint_prefix
dataset = create_dataset(seq_length, BATCH_SIZE)
model = create_model(rnn_units, embedding_dim)
mean = tf.metrics.Mean()
dataset = list(dataset)
ma = []
if args.use_wandb:
wandb.init(project=args.wandb_project_name, entity=args.wandb_entity)
wandb.config.epochs = EPOCHS
wandb.config.batch_size = BATCH_SIZE
wandb.config.seq_size = seq_length
wandb.config.rnn_units = rnn_units
wandb.config.embedding_dim = embedding_dim
srt = time.time()
text = ""
losses = []
text_logs = []
for epoch in range(0, EPOCHS):
print(f'Epoch {epoch+1}/{EPOCHS}')
start = time.time()
mean.reset_states()
for (batch_n, (inp, target)) in tqdm(enumerate(dataset), total=len(dataset)):
logs = model.train_step([inp, target], model)
mean.update_state(logs['loss'])
losses.append(mean.result().numpy())
if args.checkpoint_path:
if (epoch + 1) % args.epochs_per_checkpoint == 0:
model.save_weights(checkpoint_prefix.format(epoch=epoch))
if args.epochs_per_text_log != 0:
if (epoch + 1) % args.epochs_per_text_log == 0:
text = gen_text(model, args.text_log_prompt, args.text_log_prompt_len)
text_logs.append(text)
print(f"Sample for epoch {epoch+1}:\\n{text}")
t = time.time() - start
print(f"Loss: {mean.result().numpy():.4f}")
if args.use_wandb or args.in_sweep:
wandb.log({"loss": mean.result().numpy(), "time_per_epoch": t, "gen_text": wandb.Html(text)})
print(f'Time taken for 1 epoch {t:.2f} sec')
if len(ma) < 3:
ma.append(t)
else:
ma.append(t)
ma.pop(0)
eta = (sum(ma)/len(ma))*EPOCHS-(time.time()-srt)
print(f"ETA: {float(eta)}s ({(eta/60):.2f}m, {(eta/3600):.2f}h)")
if args.checkpoint_path:
model.save_weights(checkpoint_prefix.format(epoch=epoch))
one_step_model = OneStep(model, chars_from_ids, ids_from_chars)
tf.saved_model.save(one_step_model, args.export_model_filepath)
if args.use_wandb or args.in_sweep:
#wandb.save('/model/*', base_path="/model")
wandb.finish()
if args.export_loss_file:
with open(args.export_loss_file, 'w') as f:
for loss in losses:
f.write(f'{loss}\\n')
if args.export_text_logs:
with open(args.export_text_logs, 'w') as f:
for text in text_logs:
f.write(f'{text}\\n\\n')
return model
# EPOCHS, BATCH_SIZE, seq_length, rnn_units, embedding_dim
model = train(args.epochs, args.batch_size, args.seq_size, args.rnn_units, args.embedding_dim)
if args.text_out_file:
with open(args.text_out_file, 'w+') as f:
f.write(gen_text(model, args.text_out_prompt, args.text_out_prompt_len))
|
{"hexsha": "15cee439caedbe704ac7a68f38db94b80987d70d", "size": 13740, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "artificialis-ai/rnn-training", "max_stars_repo_head_hexsha": "0e38be1fce7e7c76e8653306fff1dced8565f48e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-29T07:50:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T07:50:16.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "artificialis-ai/rnn-training", "max_issues_repo_head_hexsha": "0e38be1fce7e7c76e8653306fff1dced8565f48e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "artificialis-ai/rnn-training", "max_forks_repo_head_hexsha": "0e38be1fce7e7c76e8653306fff1dced8565f48e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.5386996904, "max_line_length": 270, "alphanum_fraction": 0.7018922853, "include": true, "reason": "import numpy", "num_tokens": 3210}
|
"""
Point collocation method, or regression based polynomial chaos expansion builds
open the idea of fitting a polynomial chaos expansion to a set of generated
samples and evaluations. The experiment can be done as follows:
- Select a :ref:`distributions`::
>>> distribution = chaospy.Iid(chaospy.Normal(0, 1), 2)
- Generate :ref:`orthogonality`::
>>> orthogonal_expansion = chaospy.orth_ttr(2, distribution)
>>> orthogonal_expansion
polynomial([1.0, q1, q0, q1**2-1.0, q0*q1, q0**2-1.0])
- Generate samples using :ref:`sampling` (or alternative abscissas from
:ref:`quadrature`)::
>>> samples = distribution.sample(
... 2*len(orthogonal_expansion), rule="hammersley")
>>> samples[:, :4]
array([[ 0.67448975, -1.15034938, 0.31863936, -0.31863936],
[-1.42607687, -1.02007623, -0.73631592, -0.50240222]])
- A function evaluated using the nodes generated in the second step::
>>> def model_solver(param):
... return [param[0]*param[1], param[0]*numpy.e**-param[1]+1]
>>> solves = numpy.array([model_solver(sample) for sample in samples.T])
>>> solves[:4].round(8)
array([[-0.96187423, 3.80745414],
[ 1.17344406, -2.19038608],
[-0.23461924, 1.66539168],
[ 0.16008512, 0.47338898]])
- Bring it all together using `~chaospy.regression.fit_regression`::
>>> approx_model = chaospy.fit_regression(
... orthogonal_expansion, samples, solves)
>>> approx_model.round(2)
polynomial([q0*q1, 0.11*q1**2-1.44*q0*q1+0.05*q0**2-0.09*q1+1.22*q0+0.94])
In this example, the number of collocation points is selected to be twice the
number of unknown coefficients :math:`N+1`. Changing this is obviously
possible. When the number of parameter is equal the number of unknown, the, the
polynomial approximation becomes an interpolation method and overlap with
Lagrange polynomials. If the number of samples are fewer than the number of
unknown, classical least squares can not be used. Instead it possible to use
methods for doing estimation with too few samples.
"""
import logging
import numpy
from scipy import linalg
import numpoly
import chaospy
def fit_regression(
polynomials,
abscissas,
evals,
model=None,
retall=False,
):
"""
Fit a polynomial chaos expansion using linear regression.
Args:
polynomials (numpoly.ndpoly):
Polynomial expansion with ``polynomials.shape == (M,)`` and
`polynomials.dim=D`.
abscissas (numpy.ndarray):
Collocation nodes with ``abscissas.shape == (D, K)``.
evals (numpy.ndarray):
Model evaluations with ``len(evals) == K``.
model (Optional[sklearn.base.BaseEstimator]):
By default regression is done using the classical least-square
method. However, if provided, and `sklearn` regression model can be
used instead.
retall (bool):
If True return Fourier coefficients in addition to R.
Returns:
(chaospy.ndpoly, numpy.ndarray):
Fitted polynomial with ``R.shape=evals.shape[1:]`` and ``R.dim=D``.
The Fourier coefficients in the estimation.
Examples:
>>> x, y = chaospy.variable(2)
>>> polynomials = chaospy.polynomial([1, x, y])
>>> abscissas = [[-1,-1,1,1], [-1,1,-1,1]]
>>> evals = [0,1,1,2]
>>> chaospy.fit_regression(polynomials, abscissas, evals).round(14)
polynomial(0.5*q1+0.5*q0+1.0)
"""
logger = logging.getLogger(__name__)
abscissas = numpy.asarray(abscissas)
if len(abscissas.shape) == 1:
abscissas = abscissas.reshape(1, *abscissas.shape)
evals = numpy.array(evals)
poly_evals = polynomials(*abscissas).T
shape = evals.shape[1:]
if shape:
evals = evals.reshape(evals.shape[0], int(numpy.prod(evals.shape[1:])))
if model is None:
uhat = linalg.lstsq(poly_evals, evals)[0]
else:
try:
from sklearn.base import BaseEstimator
except ImportError:
raise ValueError(
"arg model != None requires that scikit-learn is installed")
if not isinstance(model, BaseEstimator):
raise ValueError("model not recognized; "
"Optional[sklearn.base.BaseEstimator] expected")
if hasattr(model, "fit_intercept"):
assert not model.fit_intercept, (
"model %s must have fit_intercept=False" % model.__class__.__name__)
uhat = model.fit(poly_evals, evals).coef_.T
if shape:
evals = evals.reshape(evals.shape[0], *shape)
approx_model = numpoly.sum((polynomials*uhat.T), -1)
approx_model = approx_model.reshape(shape)
if retall == 1:
return approx_model, uhat
if retall == 2:
return approx_model, uhat, poly_evals
return approx_model
|
{"hexsha": "94fa1a73f1b145ed7131d08922c5dd14dafa7c5f", "size": 4917, "ext": "py", "lang": "Python", "max_stars_repo_path": "chaospy/regression.py", "max_stars_repo_name": "lblonk/chaospy", "max_stars_repo_head_hexsha": "1759a4307c6134b74ce63ff44973195f1e185f94", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-28T07:53:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-28T07:53:32.000Z", "max_issues_repo_path": "chaospy/regression.py", "max_issues_repo_name": "lblonk/chaospy", "max_issues_repo_head_hexsha": "1759a4307c6134b74ce63ff44973195f1e185f94", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chaospy/regression.py", "max_forks_repo_name": "lblonk/chaospy", "max_forks_repo_head_hexsha": "1759a4307c6134b74ce63ff44973195f1e185f94", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6304347826, "max_line_length": 84, "alphanum_fraction": 0.639617653, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1323}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.