text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import numpy as np
from pytest import fixture
import pyvista
from pyvista import examples
pyvista.OFF_SCREEN = True
@fixture(scope='session')
def set_mpl():
"""Avoid matplotlib windows popping up."""
try:
import matplotlib
except Exception:
pass
else:
matplotlib.use('agg', force=True)
@fixture()
def cube():
return pyvista.Cube()
@fixture()
def airplane():
return examples.load_airplane()
@fixture()
def rectilinear():
return examples.load_rectilinear()
@fixture()
def sphere():
return examples.load_sphere()
@fixture()
def uniform():
return examples.load_uniform()
@fixture()
def ant():
return examples.load_ant()
@fixture()
def globe():
return examples.load_globe()
@fixture()
def hexbeam():
return examples.load_hexbeam()
@fixture()
def struct_grid():
x, y, z = np.meshgrid(
np.arange(-10, 10, 2, dtype=np.float32),
np.arange(-10, 10, 2, dtype=np.float32),
np.arange(-10, 10, 2, dtype=np.float32),
)
return pyvista.StructuredGrid(x, y, z)
@fixture()
def plane():
return pyvista.Plane()
@fixture()
def spline():
return examples.load_spline()
@fixture()
def tri_cylinder():
"""Triangulated cylinder"""
return pyvista.Cylinder().triangulate()
@fixture()
def datasets():
return [
examples.load_uniform(), # UniformGrid
examples.load_rectilinear(), # RectilinearGrid
examples.load_hexbeam(), # UnstructuredGrid
examples.load_airplane(), # PolyData
examples.load_structured(), # StructuredGrid
]
|
{"hexsha": "be29edfedecbea76344a05b0b003821dbeea01b1", "size": 1601, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/conftest.py", "max_stars_repo_name": "eino/pyvista", "max_stars_repo_head_hexsha": "b9c4e67d43491958f70b04cd2664965b938910ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-21T04:47:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T04:47:27.000Z", "max_issues_repo_path": "tests/conftest.py", "max_issues_repo_name": "eino/pyvista", "max_issues_repo_head_hexsha": "b9c4e67d43491958f70b04cd2664965b938910ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2022-03-25T03:33:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T03:37:05.000Z", "max_forks_repo_path": "tests/conftest.py", "max_forks_repo_name": "eino/pyvista", "max_forks_repo_head_hexsha": "b9c4e67d43491958f70b04cd2664965b938910ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.6770833333, "max_line_length": 55, "alphanum_fraction": 0.6502186134, "include": true, "reason": "import numpy", "num_tokens": 387}
|
"""
Evaluate using simple graph convolution networks.
"""
from shutil import which
import numpy as np
from matplotlib import pyplot as plt
import pdb
import scipy as sp
from scipy.sparse.csgraph import laplacian
from scipy.sparse.linalg import eigsh
from scipy.sparse.linalg.eigen.arpack.arpack import eigs
from sklearn.metrics import confusion_matrix
from sklearn.cluster import KMeans
from munkres import Munkres
from tqdm.auto import tqdm
def convolution_step(features, uedge, vedge, num_conv=1):
"""
"""
sx, sy = features.shape
wedge = np.ones(len(uedge), dtype=np.float64)
graph = sp.sparse.csr_matrix((wedge, (uedge, vedge)), shape=(sx, sx))
graph = graph + graph.transpose()
L = laplacian(graph, normed=True)
adj_matrix = sp.sparse.eye(sx) - 0.5*L
for _ in range(num_conv):
features = adj_matrix.dot(features)
return features
def convolution_step_weighted(features, uedge, vedge, wedge, num_conv=1, beta=1.0):
"""
"""
sx, sy = features.shape
wedge_sim = np.exp(-1*beta*wedge/(wedge.std()+1e-6))
graph = sp.sparse.csr_matrix((wedge_sim, (uedge, vedge)), shape=(sx, sx))
graph = graph + graph.transpose()
L = laplacian(graph, normed=True)
eigval, eigvec = eigsh(L, k=1, which='LM')
adj_matrix = sp.sparse.eye(sx) - (1/np.max(eigval))*L
for _ in range(num_conv):
features = adj_matrix.dot(features)
return features
def update_pred_labels_matching(pred_labels, gt_labels):
"""
"""
indfilter = gt_labels != 0
pred, gt = pred_labels[indfilter], gt_labels[indfilter]
number_labels_pred = np.max(np.unique(pred_labels))
number_labels_gt = len(np.unique(gt))
C = confusion_matrix(gt, pred, labels=np.unique(np.sort(gt)))
matching = Munkres()
indexes = matching.compute((-1*(C.T)))
map_arr = np.zeros(number_labels_pred+1, dtype=np.int64)
for row, col in indexes:
map_arr[row] = col+1
return map_arr[pred_labels-1]
def cluster_OA_with_matching(pred_labels, gt_labels):
"""
The number of classes should be the same.
"""
pred_labels_match = update_pred_labels_matching(pred_labels, gt_labels)
indfilter = gt_labels > 0
return np.mean(pred_labels_match[indfilter] == gt_labels[indfilter])
def get_cluster_score(data, uedge, vedge, labels, wedge=None, beta=1.0):
"""
"""
sx, sy = data.shape
features = np.array(data.reshape((sx, sy)), copy=True)
n_clusters = np.max(labels)
max_score = 0.0
score1 = []
score2 = []
if wedge is None:
sx, sy = features.shape
wedge = np.ones(len(uedge), dtype=np.float64)
graph = sp.sparse.csr_matrix((wedge, (uedge, vedge)), shape=(sx, sx))
graph = graph + graph.transpose()
L = laplacian(graph, normed=True)
adj_matrix = sp.sparse.eye(sx) - 0.5*L
elif wedge is not None:
sx, sy = features.shape
# wedge_sim = np.exp(-1*beta*wedge/(wedge.std()+1e-6))+1e-6
wedge_sim = 1-wedge
graph = sp.sparse.csr_matrix((wedge_sim, (uedge, vedge)), shape=(sx, sx))
graph = graph + graph.transpose()
L = laplacian(graph, normed=True)
eigval, eigvec = eigsh(L, k=1, which='LM')
adj_matrix = sp.sparse.eye(sx) - (1/np.max(eigval))*L
for _ in tqdm(range(200), desc='Spectral Eval', leave=False):
features = adj_matrix.dot(features)
score_tmp = []
u, s, v = sp.sparse.linalg.svds(features, k=n_clusters, which='LM')
for _ in range(1):
kmeans = KMeans(n_clusters=n_clusters, n_init=10).fit(u)
predict_labels = kmeans.predict(u)
score_tmp.append(cluster_OA_with_matching(predict_labels, labels))
max_score = max(max_score, np.mean(score_tmp))
score1.append(np.mean(score_tmp))
score2.append(max_score)
return score1, score2
def evaluate_using_GCN(data, uedge, vedge, labels):
"""
"""
tot = 10
scores = []
with tqdm(total=tot, desc='GCN', leave=False) as pbar:
for i in range(tot):
scores.append(get_cluster_score(data, uedge, vedge, labels))
pbar.update()
pbar.set_postfix({'mean': '{:0.2f}'.format(np.mean(scores)), 'var': '{:0.4f}'.format(np.std(scores))})
return np.mean(scores), np.std(scores)
def evaluate_using_GCN_weighted(data, uedge, vedge, wedge, labels):
"""
"""
tot = 10
scores = []
with tqdm(total=tot, desc='GCN', leave=False) as pbar:
for i in range(tot):
scores.append(get_cluster_score(data, uedge, vedge, labels, wedge))
pbar.update()
pbar.set_postfix({'mean': '{:0.2f}'.format(np.mean(scores)), 'var': '{:0.4f}'.format(np.std(scores))})
return np.mean(scores), np.std(scores)
|
{"hexsha": "1a067d6e559aa23cf5677fe414a083ddd715dbe2", "size": 4794, "ext": "py", "lang": "Python", "max_stars_repo_path": "Evaluate_GCN.py", "max_stars_repo_name": "ac20/EnsembleEdgeWeightsHSI", "max_stars_repo_head_hexsha": "cee9a1c4252a606e4748b8dec879d4603dd55d29", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Evaluate_GCN.py", "max_issues_repo_name": "ac20/EnsembleEdgeWeightsHSI", "max_issues_repo_head_hexsha": "cee9a1c4252a606e4748b8dec879d4603dd55d29", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Evaluate_GCN.py", "max_forks_repo_name": "ac20/EnsembleEdgeWeightsHSI", "max_forks_repo_head_hexsha": "cee9a1c4252a606e4748b8dec879d4603dd55d29", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.96, "max_line_length": 114, "alphanum_fraction": 0.6405924072, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 1297}
|
#ifndef SHIFT_CORE_RINGBUFFER_HPP
#define SHIFT_CORE_RINGBUFFER_HPP
#include <utility>
#include <algorithm>
#include <cstring>
#include <shift/core/boost_disable_warnings.hpp>
#include <boost/call_traits.hpp>
#include <shift/core/boost_restore_warnings.hpp>
namespace shift::core
{
/// A ring buffer using a single linear memory buffer to store elements of
/// various type.
/// @remarks
/// The buffer is organized into up to two regions of memory which are
/// marked using the begin and end member pointers. Use of these is as
/// follows (b1 = _begin1, e1 = _end1, b2 = _begin2, e2 = _end2):
/// Case 1) Buffer is empty:
/// | | | | | | | | | | |
/// ^b1=e1=e2 ^b2
/// Case 2) Buffer is full:
/// |a|b|c|d|e|f|g|h|i|j|
/// ^b1=e2 ^e1=b2
/// Case 3) Data is split into two blocks:
/// |e|f|g| | | |a|b|c|d|
/// ^b2 ^e2 ^b1 ^e1
/// Case 4) Free space is split into two blocks:
/// | | |a|b|c|d|e|f| | |
/// ^e2 ^b1 ^e1 ^b2
template <typename T>
class ring_buffer
{
public:
using buffer_range_t = std::pair<T*, std::size_t>;
using const_buffer_range_t = std::pair<const T*, std::size_t>;
/// Constructor.
ring_buffer(std::size_t capacity = 4096);
/// Destructor.
~ring_buffer();
/// Returns the size in bytes of the data currently being stored.
std::size_t size() const;
/// Returns the size in bytes of the internal memory buffer.
std::size_t capacity() const;
/// Attempts to write a block of data to the ring buffer. If there is not
/// enough free space the method will write as much as possible.
/// @param buffer
/// A pointer to the memory to be written. If this is null then no data
/// is actually written (see ring_buffer::produce).
/// @param size
/// The number of elements in the data block to write to the ring buffer.
/// @param allow_partial
/// When set to true attampts to write more elements than free space is
/// available will instruct the method to write as many elements as
/// possible to the ring buffer. Otherwise these calls will return without
/// writing any data.
/// @return
/// The number of elements that have been written to the ring buffer.
std::size_t write(const T* buffer, std::size_t size,
bool allow_partial = false);
/// Templated version of ring_buffer::write.
template <typename U>
inline std::size_t write(typename boost::call_traits<U>::param_type value,
bool allow_partial = false);
/// Attempts to read a number of elements from the ring buffer.
/// @param buffer
/// The memory location to write the elements to. If this parameter is
/// null the method will not copy any elements (see ring_buffer::consume).
/// If this parameter is not null it must be large enough to store at
/// least size elements.
/// @param size
/// The number of elements to read from the ring buffer.
/// @param allow_partial
/// When set to true attempts to read more elements than are available
/// instruct the method to read all available data. Otherwise these
/// calls will return without reading any data.
/// @return
/// The number of elements that have been read from the ring buffer.
std::size_t read(T* buffer, std::size_t size, bool allow_partial = false);
/// Templated version of ring_buffer::read.
template <typename U>
inline std::size_t read(typename boost::call_traits<U>::reference value,
bool allow_partial = false);
/// Produces data without modifying the internal buffer. This is useful
/// after directly writing to the buffer using spare_array*.
/// @param size
/// The number of elements to produce.
/// @param allow_partial
/// If there is not enough space in the ring buffer and this parameter is
/// set to true then this call will write as many elements until the ring
/// buffer is full. If this parameter is false attempts to write too many
/// elements will be ignored.
/// @return
/// The number of elements successfully produced.
std::size_t produce(std::size_t size, bool allow_partial = false);
/// Consumes data without actually reading from the buffer. This is useful
/// after directly reading from the buffer using data_array*.
/// @param size
/// The number of elements to consume from the ring buffer.
/// @param allow_partial
/// When set to true attempts to read more elements than are available
/// will be performed partially. Otherwise these attempts will result in
/// no elements being read from the ring buffer.
/// @return
/// The number of elements successfully consumed.
std::size_t consume(std::size_t size, bool allow_partial = false);
/// Returns a description of the memory region containing the first block
/// of data. If there is none the size of the region will be zero and the
/// pointer will be unspecified.
buffer_range_t data_array1();
/// @see ring_buffer::data_array1.
const_buffer_range_t data_array1() const;
/// Returns a description of the memory region containing the second block
/// of data. If there is none the size of the region will be zero and the
/// pointer will be unspecified.
buffer_range_t data_array2();
/// @see ring_buffer::data_array2.
const_buffer_range_t data_array2() const;
/// Returns a description of the free memory region behind the first block
/// of data. If there is none the size of the region will be zero and the
/// pointer will be unspecified.
buffer_range_t spare_array1();
/// @see ring_buffer::spare_array1.
const_buffer_range_t spare_array1() const;
/// Returns a description of the free memory region behind the second block
/// of data. If there is none the size of the region will be zero and the
/// pointer will be unspecified.
buffer_range_t spare_array2();
/// @see ring_buffer::spare_array2.
const_buffer_range_t spare_array2() const;
private:
T* _data = nullptr;
std::size_t _capacity;
std::size_t _size = 0;
T* _begin1 = nullptr;
T* _end1 = nullptr;
T* _begin2 = nullptr;
T* _end2 = nullptr;
};
template <typename T>
ring_buffer<T>::ring_buffer(std::size_t capacity) : _capacity(capacity)
{
_data = new T[_capacity];
_begin1 = &_data[0];
_end1 = &_data[0];
_begin2 = &_data[_capacity];
_end2 = &_data[0];
}
template <typename T>
ring_buffer<T>::~ring_buffer()
{
delete _data;
}
template <typename T>
std::size_t ring_buffer<T>::size() const
{
return _size;
}
template <typename T>
std::size_t ring_buffer<T>::capacity() const
{
return _capacity;
}
template <typename T>
std::size_t ring_buffer<T>::write(const T* buffer, std::size_t size,
bool allow_partial)
{
std::size_t free = _capacity - _size;
if (size == 0 || free == 0)
return 0;
if (size > free)
{
if (!allow_partial)
return 0;
size = free;
}
if (_begin2 > _end1)
{
// There is free space behind the first data block.
std::size_t size_block1 = _begin2 - _end1;
if (size_block1 > size)
{
if (buffer)
memcpy(_end1, buffer, size);
_end1 += size;
_size += size;
}
else
{
std::size_t size_block2 = size - size_block1;
if (buffer)
memcpy(_end1, buffer, size_block1);
_end1 = _begin2;
// There is not enough space behind data block one, so start the second
// one.
_begin2 = _data;
if (buffer)
memcpy(_begin2, buffer + size_block1, size_block2);
_end2 = _begin2 + size_block2;
_size += size;
}
}
else
{
if (buffer)
memcpy(_end2, buffer, size);
_end2 += size;
_size += size;
}
return size;
}
template <typename T>
template <typename U>
inline std::size_t ring_buffer<T>::write(
typename boost::call_traits<U>::param_type value, bool allow_partial)
{
return write(reinterpret_cast<const T*>(&value), sizeof(U) / sizeof(T),
allow_partial);
}
template <typename T>
std::size_t ring_buffer<T>::read(T* buffer, std::size_t size,
bool allow_partial)
{
if (size == 0 || _size == 0)
return 0;
if (size > _size)
{
if (!allow_partial)
return 0;
size = _size;
}
std::size_t size_block1 = _end1 - _begin1;
if (size_block1 > size)
{
if (buffer)
memcpy(buffer, _begin1, size);
_begin1 += size;
_size -= size;
}
else
{
if (buffer)
{
memcpy(buffer, _begin1, size_block1);
if (size_block1 < size)
memcpy(buffer + size_block1, _data, size - size_block1);
}
if (_size > size)
{
_begin1 = _begin2 + (size - size_block1);
_end1 = _end2;
_size -= size;
}
else
{
// Ringbuffer is empty now, so completely reset it.
_begin1 = _data;
_end1 = _data;
_size = 0;
}
_begin2 = _data + _capacity;
_end2 = _begin2;
}
return size;
}
template <typename T>
template <typename U>
inline std::size_t ring_buffer<T>::read(
typename boost::call_traits<U>::reference value, bool allow_partial)
{
return read(reinterpret_cast<T*>(&value), sizeof(U) / sizeof(T),
allow_partial);
}
template <typename T>
std::size_t ring_buffer<T>::produce(std::size_t size, bool allow_partial)
{
return write(nullptr, size, allow_partial);
}
template <typename T>
std::size_t ring_buffer<T>::consume(std::size_t size, bool allow_partial)
{
return read(nullptr, size, allow_partial);
}
template <typename T>
typename ring_buffer<T>::buffer_range_t ring_buffer<T>::data_array1()
{
return buffer_range_t(_begin1, _end1 - _begin1);
}
template <typename T>
typename ring_buffer<T>::const_buffer_range_t ring_buffer<T>::data_array1()
const
{
return const_buffer_range_t(_begin1, _end1 - _begin1);
}
template <typename T>
typename ring_buffer<T>::buffer_range_t ring_buffer<T>::data_array2()
{
return buffer_range_t(_begin2, std::max<std::ptrdiff_t>(_end2 - _begin2, 0));
}
template <typename T>
typename ring_buffer<T>::const_buffer_range_t ring_buffer<T>::data_array2()
const
{
return const_buffer_range_t(_begin2,
std::max<std::ptrdiff_t>(_end2 - _begin2, 0));
}
template <typename T>
typename ring_buffer<T>::buffer_range_t ring_buffer<T>::spare_array1()
{
return buffer_range_t(_end1, std::max<std::ptrdiff_t>(_begin2 - _end1, 0));
}
template <typename T>
typename ring_buffer<T>::const_buffer_range_t ring_buffer<T>::spare_array1()
const
{
return const_buffer_range_t(_end1,
std::max<std::ptrdiff_t>(_begin2 - _end1, 0));
}
template <typename T>
typename ring_buffer<T>::buffer_range_t ring_buffer<T>::spare_array2()
{
return buffer_range_t(_end2, std::max<std::ptrdiff_t>(_begin1 - _end2, 0));
}
template <typename T>
typename ring_buffer<T>::const_buffer_range_t ring_buffer<T>::spare_array2()
const
{
return const_buffer_range_t(_end2,
std::max<std::ptrdiff_t>(_begin1 - _end2, 0));
}
}
#endif
|
{"hexsha": "f0583e8c44290a1c1ed21be5fe6bf2c22121ca16", "size": 11068, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "shift/core/public/shift/core/ring_buffer.hpp", "max_stars_repo_name": "cspanier/shift", "max_stars_repo_head_hexsha": "5b3b9be310155fbc57d165d06259b723a5728828", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2018-11-28T18:14:08.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-06T07:44:36.000Z", "max_issues_repo_path": "shift/core/public/shift/core/ring_buffer.hpp", "max_issues_repo_name": "cspanier/shift", "max_issues_repo_head_hexsha": "5b3b9be310155fbc57d165d06259b723a5728828", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2018-11-06T21:01:05.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-19T07:52:52.000Z", "max_forks_repo_path": "shift/core/public/shift/core/ring_buffer.hpp", "max_forks_repo_name": "cspanier/shift", "max_forks_repo_head_hexsha": "5b3b9be310155fbc57d165d06259b723a5728828", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.752688172, "max_line_length": 79, "alphanum_fraction": 0.6638959162, "num_tokens": 2933}
|
import carla
import numpy as np
class Vehicle:
def __init__(self, controller, vehicle_id, auto_pilot=True, dashcam=True, third_camera=True, color=None):
self.controller = controller
self.world = self.controller.world
self.blueprint = self.controller.world.get_blueprint_library().find(vehicle_id)
recommend_spawn_points = self.world.get_map().get_spawn_points()
vehicle_spawn_point = np.random.choice(recommend_spawn_points)
if color is None:
color = np.random.choice(
self.blueprint.get_attribute('color').recommended_values)
self.blueprint.set_attribute('color', color)
self.entity = self.world.spawn_actor(
self.blueprint, vehicle_spawn_point)
self.set_autopilot(auto_pilot)
camera_bp = self.world.get_blueprint_library().find('sensor.camera.rgb')
camera_bp.set_attribute('image_size_x', str(1028))
camera_bp.set_attribute('image_size_y', str(720))
self.dash_camera = None
if dashcam:
self.dash_camera = self.world.spawn_actor(camera_bp,
carla.Transform(carla.Location(
x=1.5, y=0, z=1.2)),
self.entity,
carla.AttachmentType.Rigid
)
self.third_camera = None
if third_camera:
self.third_camera = self.world.spawn_actor(camera_bp,
carla.Transform(carla.Location(
x=-5.5, z=2.5), carla.Rotation(pitch=8.0)),
self.entity,
carla.AttachmentType.SpringArm
)
def set_autopilot(self, value):
self.entity.set_autopilot(value)
pass
|
{"hexsha": "85afab026c98f6e97cba97c1c8bd83a1e2d24781", "size": 2104, "ext": "py", "lang": "Python", "max_stars_repo_path": "monitor/Vehicle.py", "max_stars_repo_name": "canyue1111/Alset-Autopilot-System", "max_stars_repo_head_hexsha": "c85764ecb6788f947aa97bcfc4e95a87e371fd7d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "monitor/Vehicle.py", "max_issues_repo_name": "canyue1111/Alset-Autopilot-System", "max_issues_repo_head_hexsha": "c85764ecb6788f947aa97bcfc4e95a87e371fd7d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "monitor/Vehicle.py", "max_forks_repo_name": "canyue1111/Alset-Autopilot-System", "max_forks_repo_head_hexsha": "c85764ecb6788f947aa97bcfc4e95a87e371fd7d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.08, "max_line_length": 109, "alphanum_fraction": 0.5057034221, "include": true, "reason": "import numpy", "num_tokens": 368}
|
from keras.models import Sequential
from keras.utils import np_utils
from keras import models
from keras import layers
from keras import optimizers
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Read data
train = pd.read_csv('../../source/train.csv')
labels = train.ix[:,0].values.astype('int32')
x_train = (train.ix[:,1:].values).astype('float32')
x_test = (pd.read_csv('../../source/test.csv').values).astype('float32')
# one hot
y_train = np_utils.to_categorical(labels)
# pre-processing: divide by max and substract mean
scale = np.max(x_train)
x_train /= scale
x_test /= scale
mean = np.std(x_train)
x_train -= mean
x_test -= mean
# build network
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
# compile
model.compile(optimizer=optimizers.RMSprop(lr=1e-4), loss='categorical_crossentropy', metrics=['accuracy'])
# reshape
x_train = x_train.reshape((42000, 28, 28, 1))
x_train = x_train.astype('float32')
x_test = x_test.reshape((28000, 28 ,28, 1))
x_test = x_test.astype('float32')
print("Learning...")
# history = model.fit(x_train, y_train, epochs=10, batch_size=16)
history = model.fit(x_train, y_train, epochs=10, batch_size=16, validation_split=0.2)
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training Accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation Accuracy')
plt.title('Training & Validation Accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Tranning Loss')
plt.plot(epochs, val_loss, 'b', label='Validation Loss')
plt.title('Training & Validation Loss')
plt.legend()
plt.show()
model.save('kaggle_aug_drop_1.h5')
# print("Generating test predictions...")
# preds = model.predict_classes(x_test, verbose=0)
# def write_preds(preds, fname):
# pd.DataFrame({"ImageId": list(range(1,len(preds)+1)), "Label": preds}).to_csv(fname, index=False, header=True)
# write_preds(preds, "predc-keras-convnet3.csv")
|
{"hexsha": "01307f570121e1b15ebffb21585901f512009476", "size": 2475, "ext": "py", "lang": "Python", "max_stars_repo_path": "digits/digits_convnet_kaggle2.py", "max_stars_repo_name": "MidSummersEveee/Chollet", "max_stars_repo_head_hexsha": "cd2b23c9a72d9a6dcc776adaef8acbb74e04e88b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "digits/digits_convnet_kaggle2.py", "max_issues_repo_name": "MidSummersEveee/Chollet", "max_issues_repo_head_hexsha": "cd2b23c9a72d9a6dcc776adaef8acbb74e04e88b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "digits/digits_convnet_kaggle2.py", "max_forks_repo_name": "MidSummersEveee/Chollet", "max_forks_repo_head_hexsha": "cd2b23c9a72d9a6dcc776adaef8acbb74e04e88b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.902173913, "max_line_length": 116, "alphanum_fraction": 0.72, "include": true, "reason": "import numpy", "num_tokens": 675}
|
"""
MIT License
Copyright (c) 2021 Libin Jiao
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import shutil
import sys
import time
from xml.dom.minidom import parse
import numpy as np
import tensorflow as tf
from PIL import Image
from model import pix2pix
input_height, input_width = 512, 512
batch_size = 1
buffer_size = batch_size * 4
checkpoint_path = 'checkpoints/train/'
LAMBDA = 100
loss_obj = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def readXML(config_xml):
domTree = parse(config_xml)
rootNode = domTree.documentElement
args = {}
args['num_epochs'] = int(rootNode.getElementsByTagName(
'numEpochs')[0].childNodes[0].data)
args['input_channels'] = int(rootNode.getElementsByTagName(
'inputChannels')[0].childNodes[0].data)
args['output_channels'] = int(rootNode.getElementsByTagName(
'outputChannels')[0].childNodes[0].data)
args['from_tfrecord'] = rootNode.getElementsByTagName(
'fromTFRecord')[0].childNodes[0].data == 'True'
args['initial_learning_rate'] = float(
rootNode.getElementsByTagName('initLR')[0].childNodes[0].data)
args['decay_steps'] = int(rootNode.getElementsByTagName(
'decaySteps')[0].childNodes[0].data)
args['decay_rate'] = float(rootNode.getElementsByTagName(
'decayRate')[0].childNodes[0].data)
args['checkpoint_path'] = str(rootNode.getElementsByTagName(
'checkpointPath')[0].childNodes[0].data)
args['restore'] = rootNode.getElementsByTagName(
'restoreCheckpoint')[0].childNodes[0].data == 'True'
args['visualization'] = rootNode.getElementsByTagName(
'visualization')[0].childNodes[0].data == 'True'
print('==== Train configuration ====')
for key, value in args.items():
print('{} = {}'.format(key, value))
print()
return args
def _byte_feature(value):
if isinstance(value, type(tf.constant(0))):
value = value.numpy()
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value_list):
return tf.train.Feature(float_list=tf.train.FloatList(value=value_list))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def image2tfrecord(image_path, label_path, output_path):
""" Convert images and labels into TFRecord """
image_names = os.listdir(image_path)
tfrecord_path = os.path.join(output_path, 'tfrecord/')
if os.path.exists(tfrecord_path):
shutil.rmtree(tfrecord_path)
os.makedirs(tfrecord_path)
for im_name in image_names:
print('Converting {}...'.format(im_name))
im_name_prefix = os.path.splitext(im_name)[0]
try:
image = np.asarray(Image.open(
os.path.join(image_path, im_name)))
label = np.asarray(Image.open(
os.path.join(label_path, im_name)))
writer = tf.io.TFRecordWriter(
os.path.join(tfrecord_path, im_name_prefix + '.tfrecord'))
except:
print('File {} open error!'.format(im_name))
continue
assert image.shape == label.shape and len(image.shape) == 3
height, width, num_channels = image.shape
image = image.tostring()
label = label.tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'image': _byte_feature(image),
'label': _byte_feature(label),
'height': _int64_feature(height),
'width': _int64_feature(width),
'num_channels': _int64_feature(num_channels)
}))
writer.write(example.SerializeToString())
writer.close()
print('{} converted.'.format(im_name))
def load_dataset(filenames):
dataset = tf.data.TFRecordDataset(filenames)
feature_description = {
'image': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.string),
'height': tf.io.FixedLenFeature([], tf.int64),
'width': tf.io.FixedLenFeature([], tf.int64),
'num_channels': tf.io.FixedLenFeature([], tf.int64),
}
def _parse_function(example_proto):
example = tf.io.parse_single_example(
example_proto, feature_description)
image = tf.io.decode_raw(example['image'], tf.uint8)
label = tf.io.decode_raw(example['label'], tf.uint8)
height, width, num_channels = example['height'], example['width'], example['num_channels']
image = tf.reshape(image, [height, width, num_channels])
label = tf.reshape(label, [height, width, num_channels])
image = tf.cast(image, tf.float32)
image = image / 127.5 - 1
label = tf.cast(label, tf.float32)
label = label / 127.5 - 1
example['image'] = image
example['label'] = label
return example
dataset = dataset.map(_parse_function).batch(batch_size)
return dataset
def generator_loss(disc_generated_output, gen_output, target):
gan_loss = loss_obj(tf.ones_like(
disc_generated_output), disc_generated_output)
l1_loss = tf.reduce_mean(tf.abs(target - gen_output))
total_gen_loss = gan_loss + (LAMBDA * l1_loss)
return total_gen_loss, gan_loss, l1_loss
def discriminator_loss(disc_real_output, disc_generated_output):
real_loss = loss_obj(tf.ones_like(disc_real_output), disc_real_output)
generated_loss = loss_obj(tf.zeros_like(
disc_generated_output), disc_generated_output)
total_disc_loss = real_loss + generated_loss
return total_disc_loss
def train_loop(args, output_path):
# Load train and val sets
if not args['from_tfrecord']:
tfrecord_path = os.path.join(output_path, 'tfrecord')
else:
tfrecord_path = os.path.join(input_path, 'tfrecord')
train_names = os.listdir(tfrecord_path)
train_names = [os.path.join(tfrecord_path, name) for name in train_names]
train_set = load_dataset(train_names)
# Log
logdir = os.path.join(output_path, 'logs/')
file_writer = tf.summary.create_file_writer(logdir + 'metrics')
file_writer.set_as_default()
if args['visualization']:
import matplotlib.pyplot as plt
vis_path = os.path.join(output_path, 'visualizations')
if not os.path.exists(vis_path):
os.makedirs(vis_path)
# Generator: hazy --> dehazy
generator = pix2pix.dehaze_generator(
input_channels=args['input_channels'], estimation_channels=args['output_channels'], norm_type='instancenorm')
# Discriminator: real or fake
discriminator = pix2pix.discriminator(
input_channels=args['input_channels'], norm_type='instancenorm', target=True)
# Optimizers
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
args['initial_learning_rate'],
decay_steps=args['decay_steps'],
decay_rate=args['decay_rate']
)
generator_optimizer = tf.keras.optimizers.Adam(lr_schedule, beta_1=0.5)
discriminator_optimizer = tf.keras.optimizers.Adam(lr_schedule, beta_1=0.5)
# Checkpoints and manager
ckpt = tf.train.Checkpoint(
generator=generator,
discriminator=discriminator,
generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
)
ckpt_manager = tf.train.CheckpointManager(
ckpt, os.path.join(output_path, args['checkpoint_path']), max_to_keep=5)
# if restoration is enabled and a checkpoint exists, restore the latest checkpoint.
if args['restore'] and ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print('Latest checkpoint restored, from {}'.format(
ckpt_manager.latest_checkpoint))
def _random_crop(record):
hazy, gt = record['image'], record['label']
images = tf.concat([hazy, gt], axis=-1)
images = tf.image.resize_with_pad(images, input_height + input_height // 2,
input_width + input_width // 2, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
images = tf.image.random_crop(
images, [batch_size, input_height, input_width, args['input_channels'] * 2])
images = tf.image.random_flip_left_right(images)
images = tf.image.random_flip_up_down(images)
hazy, gt = images[..., :args['input_channels']], images[...,
args['input_channels']:]
record['image'], record['label'] = hazy, gt
return record
def _est_ale(hazy):
hazy_shape = tf.shape(hazy)
hsv = tf.image.rgb_to_hsv(hazy * 0.5 + 0.5)
hsv_shape = tf.shape(hsv)
hsv = tf.reshape(hsv[..., 2], [-1])
idx = tf.argmax(hsv)
ale = tf.reshape(hazy, [-1, hazy_shape[-1]])[idx]
ale = ale[tf.newaxis, tf.newaxis, tf.newaxis, ...]
return ale
@tf.function
def _train_step(hazy, gt, ale):
with tf.GradientTape(persistent=True) as tape:
dehazy, rtme, dehazy0, tme = generator([hazy, ale], training=True)
disc_real_output = discriminator([hazy, gt], training=True)
disc_generated_output = discriminator(
[hazy, dehazy], training=True)
gen_total_loss, gen_gan_loss, gen_l1_loss = generator_loss(
disc_generated_output, dehazy, gt)
disc_loss = discriminator_loss(
disc_real_output, disc_generated_output)
generator_gradients = tape.gradient(
gen_total_loss, generator.trainable_variables)
discriminator_gradients = tape.gradient(
disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(
zip(generator_gradients, generator.trainable_variables))
discriminator_optimizer.apply_gradients(
zip(discriminator_gradients, discriminator.trainable_variables))
return gen_total_loss, gen_gan_loss, gen_l1_loss, disc_loss
with file_writer.as_default():
step = 0
for epoch in range(args['num_epochs']):
start = time.time()
train_set_ = train_set.map(_random_crop).shuffle(
buffer_size=buffer_size)
for record in train_set_:
hazy, gt = record['image'], record['label']
ale = _est_ale(hazy)
gen_total_loss, gen_gan_loss, gen_l1_loss, disc_loss = _train_step(
hazy, gt, ale)
step += 1
tf.summary.scalar('gen_total_loss', gen_total_loss, step=step)
tf.summary.scalar('gen_gan_loss', gen_gan_loss, step=step)
tf.summary.scalar('gen_l1_loss', gen_l1_loss, step=step)
tf.summary.scalar('disc_loss', disc_loss, step=step)
print('Step {}, gen_total_loss: {}, gen_gan_loss: {}, gen_l1_loss: {}, disc_loss: {}'.format(
step, gen_total_loss.numpy(), gen_gan_loss.numpy(), gen_l1_loss.numpy(), disc_loss.numpy()))
print('Time taken for epoch {} is {} sec'.format(
epoch + 1, time.time() - start))
ckpt_save_path = ckpt_manager.save()
print('Checkpoint for epoch {} saved at {}'.format(
epoch + 1, ckpt_save_path))
# Train visualization
if args['visualization']:
for record in train_set_.take(1):
hazy, gt = record['image'], record['label']
ale = _est_ale(hazy)
dehazy, rtme, dehazy0, tme = generator(
[hazy, ale], training=False)
hazy, gt, ale = hazy.numpy()[0], gt.numpy()[
0], ale.numpy()[0]
dehazy, rtme, dehazy0, tme = dehazy.numpy()[0], rtme.numpy()[
0], dehazy0.numpy()[0], tme.numpy()[0]
np.savez(os.path.join(
vis_path, 'epoch_{}.npz'.format(epoch)),
hazy=hazy,
gt=gt,
ale=ale,
dehazy=dehazy,
rtme=rtme,
dehazy0=dehazy0,
tme=tme)
hazy, gt = hazy / 2 + 0.5, gt / 2 + 0.5
dehazy, dehazy0 = dehazy.clip(-1, 1) / \
2 + 0.5, dehazy0.clip(-1, 1) / 2 + 0.5
ale = np.broadcast_to(ale, hazy.shape)
rtme, tme = np.broadcast_to(
rtme, hazy.shape), np.broadcast_to(tme, hazy.shape)
rtme = (rtme - rtme.min()) / (rtme.max() - rtme.min())
tme = (tme - tme.min()) / (tme.max() - tme.min())
imgs = [hazy, gt, ale, np.ones_like(hazy),
dehazy, rtme, dehazy0, tme]
titles = ['Hazy', 'GT', 'ALE', '',
'Dehazy', 'RTME', 'Hazy', 'TME']
row, col = 2, 4
plt.figure(figsize=(4 * col, 4 * row))
for i in range(len(imgs)):
plt.subplot(row, col, i + 1)
plt.title(titles[i])
plt.imshow(imgs[i])
plt.savefig(os.path.join(
vis_path, 'epoch_{}.png'.format(epoch)))
plt.close()
tf.saved_model.save(generator, os.path.join(output_path, 'generator_g/1/'))
if __name__ == "__main__":
try:
input_path = sys.argv[1]
output_path = sys.argv[2]
train_args = readXML(os.path.join(input_path, 'train.xml'))
if not train_args['from_tfrecord']:
image_path = os.path.join(input_path, 'cloud')
label_path = os.path.join(input_path, 'label')
image2tfrecord(image_path, label_path, output_path)
train_loop(train_args, output_path)
except Exception as e:
print(e)
|
{"hexsha": "046518765c4cf4eda8a571f5d9fef161c040110d", "size": 15052, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "92xianshen/guided-pix2pix", "max_stars_repo_head_hexsha": "2929bb526db59e6eda25c8ac3c78266a15d4cd25", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train.py", "max_issues_repo_name": "92xianshen/guided-pix2pix", "max_issues_repo_head_hexsha": "2929bb526db59e6eda25c8ac3c78266a15d4cd25", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train.py", "max_forks_repo_name": "92xianshen/guided-pix2pix", "max_forks_repo_head_hexsha": "2929bb526db59e6eda25c8ac3c78266a15d4cd25", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-07T11:45:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-07T11:45:56.000Z", "avg_line_length": 38.0101010101, "max_line_length": 120, "alphanum_fraction": 0.6196518735, "include": true, "reason": "import numpy", "num_tokens": 3424}
|
import tensorflow as tf
import numpy as np
import random
import cv2
PIXEL_MEANS = np.array([[[122.7717, 115.9465, 102.9801]]])
PIXEL_STDV = [[[0.229, 0.224, 0.2254]]]
def normlize(image, mean=PIXEL_MEANS):
image = (image - mean / 255.0) / PIXEL_STDV
return image
def flip_left_right(image, boxes, labels):
width = tf.cast(tf.shape(image)[1], tf.float32)
image = tf.image.flip_left_right(image)
xmin = 0 - boxes[:, 2] + width
ymin = boxes[:, 1]
xmax = 0 - boxes[:, 0] + width
ymax = boxes[:, 3]
boxes = tf.stack([xmin, ymin, xmax, ymax], axis=-1)
return image, boxes, labels
def flip_down_up(image, boxes, labels):
height = tf.cast(tf.shape(image)[0], tf.float32)
image = tf.image.flip_up_down(image)
xmin = boxes[:, 0]
ymin = 0 - boxes[:, 3] + height
xmax = boxes[:, 2]
ymax = 0 - boxes[:, 1] + height
boxes = tf.stack([xmin, ymin, xmax, ymax], axis=-1)
return image, boxes, labels
def distort_color(image, boxes, labels):
def nothing(image):
return image
sequence = [0, 1, 2, 3]
sequence = tf.random_shuffle(sequence)
for i in range(4):
image = tf.cond(tf.equal(sequence[i], 0), lambda: tf.image.random_brightness(image, max_delta=32./255), lambda: nothing(image))
image = tf.cond(tf.equal(sequence[i], 1), lambda: tf.image.random_saturation(image, lower=0.8, upper=1.2), lambda: nothing(image))
image = tf.cond(tf.equal(sequence[i], 2), lambda: tf.image.random_hue(image, max_delta=0.2), lambda: nothing(image))
image = tf.cond(tf.equal(sequence[i], 3), lambda: tf.image.random_contrast(image, lower=0.8, upper=1.2), lambda: nothing(image))
return image, boxes, labels
def crop(image, boxes, labels, min_object_covered=0.5, aspect_ratio_range=[0.5, 2.0], area_range=[0.3, 1.0]):
h, w = tf.cast(tf.shape(image)[0], tf.float32), tf.cast(tf.shape(image)[1], tf.float32)
xmin, ymin, xmax, ymax = tf.unstack(boxes, axis=1)
bboxes = tf.stack([ymin/h, xmin/w, ymax/h, xmax/w], axis=1)
bboxes = tf.clip_by_value(bboxes, 0, 1)
begin, size, dist_boxes = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=tf.expand_dims(bboxes, axis=0),
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=50)
# NOTE dist_boxes with shape: [ymin, xmin, ymax, xmax] and in values in range(0, 1)
# Employ the bounding box to distort the image.
croped_box = [dist_boxes[0,0,1]*w, dist_boxes[0,0,0]*h, dist_boxes[0,0,3]*w, dist_boxes[0,0,2]*h]
croped_xmin = tf.clip_by_value(xmin, croped_box[0], croped_box[2])-croped_box[0]
croped_ymin = tf.clip_by_value(ymin, croped_box[1], croped_box[3])-croped_box[1]
croped_xmax = tf.clip_by_value(xmax, croped_box[0], croped_box[2])-croped_box[0]
croped_ymax = tf.clip_by_value(ymax, croped_box[1], croped_box[3])-croped_box[1]
image = tf.slice(image, begin, size)
boxes = tf.stack([croped_xmin, croped_ymin, croped_xmax, croped_ymax], axis=1)
return image, boxes, labels
def resize_image_and_correct_boxes(image, boxes, labels, image_size):
origin_image_size = tf.cast(tf.shape(image)[0:2], tf.float32)
def w_long():
new_w = image_size[1]
new_h = tf.cast(origin_image_size[0] / origin_image_size[1] * image_size[1], tf.int32)
return [new_h, new_w]
def h_long():
new_h = image_size[0]
new_w = tf.cast(origin_image_size[1] / origin_image_size[0] * image_size[0], tf.int32)
return [new_h, new_w]
new_size = tf.cond(tf.less(origin_image_size[0] / image_size[0], origin_image_size[1] / image_size[1]),
w_long, h_long)
image = tf.image.resize_images(image, new_size)
offset_h = tf.cast((image_size[0] - new_size[0]) / 2, tf.int32)
offset_w = tf.cast((image_size[1] - new_size[1]) / 2, tf.int32)
image = tf.image.pad_to_bounding_box(image, offset_h, offset_w, image_size[0], image_size[1])
# correct the boxes
xmin = tf.clip_by_value(boxes[:, 0] / origin_image_size[1], 0.0, 1.0) * tf.cast(new_size[1], tf.float32) + tf.cast(offset_w, tf.float32)
ymin = tf.clip_by_value(boxes[:, 1] / origin_image_size[0], 0.0, 1.0) * tf.cast(new_size[0], tf.float32) + tf.cast(offset_h, tf.float32)
xmax = tf.clip_by_value(boxes[:, 2] / origin_image_size[1], 0.0, 1.0) * tf.cast(new_size[1], tf.float32) + tf.cast(offset_w, tf.float32)
ymax = tf.clip_by_value(boxes[:, 3] / origin_image_size[0], 0.0, 1.0) * tf.cast(new_size[0], tf.float32) + tf.cast(offset_h, tf.float32)
# if the object is not in the dist_box, just remove it
mask = tf.logical_not(tf.logical_or(tf.equal(xmin, xmax), tf.equal(ymin, ymax)))
xmin = tf.boolean_mask(xmin, mask)
ymin = tf.boolean_mask(ymin, mask)
xmax = tf.boolean_mask(xmax, mask)
ymax = tf.boolean_mask(ymax, mask)
labels = tf.boolean_mask(labels, mask)
boxes = tf.stack([ymin, xmin, ymax, xmax], axis=-1)
return image, boxes, labels
def data_augmentation(image, boxes, labels):
def nothing(image, boxes, labels):
return image, boxes, labels
image, boxes, labels = tf.cond(tf.equal(tf.cast(tf.random_uniform(shape=[]) * 2, tf.int64), 0), lambda: flip_left_right(image, boxes, labels), lambda: nothing(image, boxes, labels))
image, boxes, labels = tf.cond(tf.equal(tf.cast(tf.random_uniform(shape=[]) * 2, tf.int64), 0), lambda: flip_down_up(image, boxes, labels), lambda: nothing(image, boxes, labels))
# image, boxes, labels = tf.cond(tf.equal(tf.cast(tf.random_uniform(shape=[]) * 2, tf.int64), 0), lambda: distort_color(image, boxes, labels), lambda: nothing(image, boxes, labels))
# image, boxes, labels = crop(image, boxes, labels)
return image, boxes, labels
def preprocess(image, boxes, labels, image_size, mode):
if len(image.get_shape().as_list()) != 3:
raise ValueError('Input image must have 3 shapes H W C')
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# data augmentation for train data
if mode == 'train':
image, boxes, labels = data_augmentation(image, boxes, labels)
image = normlize(image)
image, boxes, labels = resize_image_and_correct_boxes(image, boxes, labels, image_size)
# Pad the boxes and labels to 20
pad_num = 60 - tf.shape(boxes)[0]
boxes = tf.pad(boxes, [[0, pad_num], [0, 0]], "CONSTANT")
labels = tf.pad(labels, [[0, pad_num]], "CONSTANT")
return image, boxes, labels
def parser(serialized_example, image_size, mode):
features = tf.parse_single_example(
serialized_example,
features={
'image' : tf.FixedLenFeature([], dtype = tf.string),
'boxes' : tf.FixedLenFeature([], dtype = tf.string),
'labels' : tf.FixedLenFeature([], dtype = tf.string),
})
image = features['image']
boxes = features['boxes']
labels = features['labels']
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.uint8)
boxes = tf.decode_raw(boxes, tf.float32)
boxes = tf.reshape(boxes, shape=[-1, 4])
labels = tf.decode_raw(labels, tf.int64)
labels = tf.reshape(labels, shape=[-1])
return preprocess(image, boxes, labels, image_size, mode)
|
{"hexsha": "c70e17a51a2658207702d85d3414942bac2d2ba4", "size": 7594, "ext": "py", "lang": "Python", "max_stars_repo_path": "retinanet/utils/data_utils.py", "max_stars_repo_name": "bharatmahaur/ComparativeStudy", "max_stars_repo_head_hexsha": "2e3b6de882acc2a465e1b7c8bcd23cc9c8181d3d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-09-26T07:19:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T23:25:36.000Z", "max_issues_repo_path": "retinanet/utils/data_utils.py", "max_issues_repo_name": "bharatmahaur/ComparativeStudy", "max_issues_repo_head_hexsha": "2e3b6de882acc2a465e1b7c8bcd23cc9c8181d3d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "retinanet/utils/data_utils.py", "max_forks_repo_name": "bharatmahaur/ComparativeStudy", "max_forks_repo_head_hexsha": "2e3b6de882acc2a465e1b7c8bcd23cc9c8181d3d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.4093567251, "max_line_length": 185, "alphanum_fraction": 0.6436660521, "include": true, "reason": "import numpy", "num_tokens": 2178}
|
import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import numpy as np
from numpy import *
import argparse
from PIL import Image
import imageio
import os
from tqdm import tqdm
from utils.metrices import *
from utils import render
from utils.saver import Saver
from utils.iou import IoU
from data.imagenet import Imagenet_Segmentation
from ViT_explanation_generator import Baselines, LRP
from ViT_new import vit_large_patch16_224
from ViT_LRP import vit_large_patch16_224 as vit_LRP
from sklearn.metrics import precision_recall_curve
import matplotlib.pyplot as plt
import torch.nn.functional as F
plt.switch_backend('agg')
# hyperparameters
num_workers = 0
batch_size = 1
cls = ['airplane',
'bicycle',
'bird',
'boat',
'bottle',
'bus',
'car',
'cat',
'chair',
'cow',
'dining table',
'dog',
'horse',
'motobike',
'person',
'potted plant',
'sheep',
'sofa',
'train',
'tv'
]
# Args
parser = argparse.ArgumentParser(description='Training multi-class classifier')
parser.add_argument('--arc', type=str, default='vgg', metavar='N',
help='Model architecture')
parser.add_argument('--train_dataset', type=str, default='imagenet', metavar='N',
help='Testing Dataset')
parser.add_argument('--method', type=str,
default='transformer_attribution',
choices=[ 'rollout', 'lrp','transformer_attribution', 'full_lrp', 'lrp_last_layer',
'attn_last_layer', 'attn_gradcam'],
help='')
parser.add_argument('--thr', type=float, default=0.,
help='threshold')
parser.add_argument('--K', type=int, default=1,
help='new - top K results')
parser.add_argument('--save-img', action='store_true',
default=True,
help='')
parser.add_argument('--no-ia', action='store_true',
default=False,
help='')
parser.add_argument('--no-fx', action='store_true',
default=False,
help='')
parser.add_argument('--no-fgx', action='store_true',
default=False,
help='')
parser.add_argument('--no-m', action='store_true',
default=False,
help='')
parser.add_argument('--no-reg', action='store_true',
default=False,
help='')
parser.add_argument('--is-ablation', type=bool,
default=False,
help='')
args = parser.parse_args()
args.checkname = args.method + '_' + args.arc
alpha = 2
cuda = torch.cuda.is_available()
device = torch.device("cuda" if cuda else "cpu")
# Define Saver
saver = Saver(args) # I edited Saver by adding Desktop to the path
saver.results_dir = os.path.join(saver.experiment_dir, 'results')
if not os.path.exists(saver.results_dir):
os.makedirs(saver.results_dir)
if not os.path.exists(os.path.join(saver.results_dir, 'input')):
os.makedirs(os.path.join(saver.results_dir, 'input'))
if not os.path.exists(os.path.join(saver.results_dir, 'explain')):
os.makedirs(os.path.join(saver.results_dir, 'explain'))
args.exp_img_path = os.path.join(saver.results_dir, 'explain/img')
if not os.path.exists(args.exp_img_path):
os.makedirs(args.exp_img_path)
args.exp_np_path = os.path.join(saver.results_dir, 'explain/np')
if not os.path.exists(args.exp_np_path):
os.makedirs(args.exp_np_path)
# Data
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
test_img_trans = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
normalize,
])
test_lbl_trans = transforms.Compose([
transforms.Resize((224, 224), Image.NEAREST), # what does NEAREST mean?
])
ROOT_DIR = "/home/t-akarthik/PycharmProjects2/ImageNet_Data/"
OUTPUT_DIR = ROOT_DIR + "/output/Segmentation/ViT_Large"
imagenet_seg_path = ROOT_DIR + '/gtsegs_ijcv (1).mat'
ds = Imagenet_Segmentation(imagenet_seg_path,
transform=test_img_trans, target_transform=test_lbl_trans)
dl = DataLoader(ds, batch_size=batch_size, shuffle=False, num_workers=1, drop_last=False)
# Model
model = vit_large_patch16_224(pretrained=True).cuda()
baselines = Baselines(model)
# LRP
model_LRP = vit_LRP(pretrained=True).cuda()
model_LRP.eval()
lrp = LRP(model_LRP)
metric = IoU(2, ignore_index=-1)
iterator = tqdm(dl)
model.eval()
def compute_pred(output):
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
# pred[0, 0] = 282
# print('Pred cls : ' + str(pred))
T = pred.squeeze().cpu().numpy()
T = np.expand_dims(T, 0)
T = (T[:, np.newaxis] == np.arange(1000)) * 1.0
T = torch.from_numpy(T).type(torch.FloatTensor)
Tt = T.cuda()
return Tt
def eval_batch(image, labels, evaluator, index):
evaluator.zero_grad()
# Save input image
if args.save_img:
img = image[0].permute(1, 2, 0).data.cpu().numpy()
img = 255 * (img - img.min()) / (img.max() - img.min())
img = img.astype('uint8')
Image.fromarray(img, 'RGB').save(os.path.join(saver.results_dir, 'input/{}_input.png'.format(index)))
Image.fromarray((labels.repeat(3, 1, 1).permute(1, 2, 0).data.cpu().numpy() * 255).astype('uint8'), 'RGB').save(
os.path.join(saver.results_dir, 'input/{}_mask.png'.format(index)))
image.requires_grad = True
image = image.requires_grad_()
predictions = evaluator(image)
# segmentation test for our method
if args.method == 'transformer_attribution':
Res = lrp.generate_LRP(image.cuda(), start_layer=1, method="transformer_attribution").reshape(batch_size, 1, 14, 14)
if args.method != 'full_lrp':
# interpolate to full image size (224,224)
Res = torch.nn.functional.interpolate(Res, scale_factor=16, mode='bilinear').cuda()
# threshold between FG and BG is the mean
Res = (Res - Res.min()) / (Res.max() - Res.min())
ret = Res.mean()
Res_1 = Res.gt(ret).type(Res.type())
Res_0 = Res.le(ret).type(Res.type())
Res_1_AP = Res
Res_0_AP = 1-Res
Res_1[Res_1 != Res_1] = 0
Res_0[Res_0 != Res_0] = 0
Res_1_AP[Res_1_AP != Res_1_AP] = 0
Res_0_AP[Res_0_AP != Res_0_AP] = 0
# TEST
pred = Res.clamp(min=args.thr) / Res.max()
pred = pred.view(-1).data.cpu().numpy()
target = labels.view(-1).data.cpu().numpy()
# print("target", target.shape)
output = torch.cat((Res_0, Res_1), 1)
output_AP = torch.cat((Res_0_AP, Res_1_AP), 1)
if args.save_img:
# Save predicted mask
mask = F.interpolate(Res_1, [64, 64], mode='bilinear')
mask = mask[0].squeeze().data.cpu().numpy()
# mask = Res_1[0].squeeze().data.cpu().numpy()
mask = 255 * mask
mask = mask.astype('uint8')
imageio.imsave(os.path.join(args.exp_img_path, 'mask_' + str(index) + '.jpg'), mask)
relevance = F.interpolate(Res, [64, 64], mode='bilinear')
relevance = relevance[0].permute(1, 2, 0).data.cpu().numpy()
# relevance = Res[0].permute(1, 2, 0).data.cpu().numpy()
hm = np.sum(relevance, axis=-1)
maps = (render.hm_to_rgb(hm, scaling=3, sigma=1, cmap='seismic') * 255).astype(np.uint8)
imageio.imsave(os.path.join(args.exp_img_path, 'heatmap_' + str(index) + '.jpg'), maps)
# Evaluate Segmentation
batch_inter, batch_union, batch_correct, batch_label = 0, 0, 0, 0
batch_ap, batch_f1 = 0, 0
# Segmentation resutls
print('Calculating...')
correct, labeled = batch_pix_accuracy(output[0].data.cpu(), labels[0])
inter, union = batch_intersection_union(output[0].data.cpu(), labels[0], 2)
batch_correct += correct
batch_label += labeled
batch_inter += inter
batch_union += union
# print("output", output.shape)
# print("ap labels", labels.shape)
# ap = np.nan_to_num(get_ap_scores(output, labels))
ap = np.nan_to_num(get_ap_scores(output_AP, labels))
f1 = np.nan_to_num(get_f1_scores(output[0, 1].data.cpu(), labels[0]))
batch_ap += ap
batch_f1 += f1
return batch_correct, batch_label, batch_inter, batch_union, batch_ap, batch_f1, pred, target
total_inter, total_union, total_correct, total_label = np.int64(0), np.int64(0), np.int64(0), np.int64(0)
total_ap, total_f1 = [], []
predictions, targets = [], []
print('Evaluating...')
for batch_idx, (image, labels) in enumerate(iterator):
if args.method == "blur":
images = (image[0].cuda(), image[1].cuda())
else:
images = image.cuda()
labels = labels.cuda()
# print("image", image.shape)
# print("lables", labels.shape)
correct, labeled, inter, union, ap, f1, pred, target = eval_batch(images, labels, model, batch_idx)
predictions.append(pred)
targets.append(target)
total_correct += correct.astype('int64')
total_label += labeled.astype('int64')
total_inter += inter.astype('int64')
total_union += union.astype('int64')
total_ap += [ap]
total_f1 += [f1]
pixAcc = np.float64(1.0) * total_correct / (np.spacing(1, dtype=np.float64) + total_label)
IoU = np.float64(1.0) * total_inter / (np.spacing(1, dtype=np.float64) + total_union)
mIoU = IoU.mean()
mAp = np.mean(total_ap)
mF1 = np.mean(total_f1)
iterator.set_description('pixAcc: %.4f, mIoU: %.4f, mAP: %.4f, mF1: %.4f' % (pixAcc, mIoU, mAp, mF1))
predictions = np.concatenate(predictions)
targets = np.concatenate(targets)
print(predictions)
print(predictions.shape)
print(targets)
print(targets.shape)
pr, rc, thr = precision_recall_curve(targets, predictions)
np.save(os.path.join(saver.experiment_dir, 'precision.npy'), pr)
np.save(os.path.join(saver.experiment_dir, 'recall.npy'), rc)
plt.figure()
plt.plot(rc, pr)
plt.savefig(os.path.join(saver.experiment_dir, 'PR_curve_{}.png'.format(args.method)))
txtfile = os.path.join(saver.experiment_dir, 'result_mIoU_%.4f.txt' % mIoU)
# txtfile = 'result_mIoU_%.4f.txt' % mIoU
fh = open(txtfile, 'w')
print("Mean IoU over %d classes: %.4f\n" % (2, mIoU))
print("Pixel-wise Accuracy: %2.2f%%\n" % (pixAcc * 100))
print("Mean AP over %d classes: %.4f\n" % (2, mAp))
print("Mean F1 over %d classes: %.4f\n" % (2, mF1))
fh.write("Mean IoU over %d classes: %.4f\n" % (2, mIoU))
fh.write("Pixel-wise Accuracy: %2.2f%%\n" % (pixAcc * 100))
fh.write("Mean AP over %d classes: %.4f\n" % (2, mAp))
fh.write("Mean F1 over %d classes: %.4f\n" % (2, mF1))
fh.close()
|
{"hexsha": "88107192d7eeb4ede62c763571679e37b48d54ef", "size": 10651, "ext": "py", "lang": "Python", "max_stars_repo_path": "philly_exp/ViT/imagenet_seg_eval_dino_gcr.py", "max_stars_repo_name": "ananyak100-dev/Transformer-Explainability", "max_stars_repo_head_hexsha": "75b5f34276f9a840f98df8a87c3387fa55147acf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "philly_exp/ViT/imagenet_seg_eval_dino_gcr.py", "max_issues_repo_name": "ananyak100-dev/Transformer-Explainability", "max_issues_repo_head_hexsha": "75b5f34276f9a840f98df8a87c3387fa55147acf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "philly_exp/ViT/imagenet_seg_eval_dino_gcr.py", "max_forks_repo_name": "ananyak100-dev/Transformer-Explainability", "max_forks_repo_head_hexsha": "75b5f34276f9a840f98df8a87c3387fa55147acf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3887147335, "max_line_length": 124, "alphanum_fraction": 0.6390949207, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2929}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Jan 29, 2021
@file: transforms.py
@desc: Module containing all the transformations that can be done on a datasets.
@author: laugh12321
@contact: laugh12321@vip.qq.com
"""
import abc
import numpy as np
from typing import List, Dict
from src.model import enums
from src.model.models import rnn_supervised, pixel_based_cnn, \
pixel_based_fnnc, pixel_based_dacn
class BaseTransform(abc.ABC):
@abc.abstractmethod
def __call__(self, *args, **kwargs):
"""
Each subclass should implement this method.
:param args: Arbitrary list of arguments.
:param kwargs: Arbitrary dictionary of arguments.
"""
pass
class SpectralTransform(BaseTransform):
def __init__(self, **kwargs):
"""
Initializer of the spectral transformation.
"""
super().__init__()
def __call__(self, samples: np.ndarray,
labels: np.ndarray) -> List[np.ndarray]:
"""
Transform 1D samples along the spectral axis.
Only the spectral features are present for each sample in the datasets.
:param samples: Input samples that will undergo transformation.
:param labels: Class value for each samples.
:return: List containing the transformed samples and the class labels.
"""
return [np.expand_dims(samples.astype(np.float32), -1), labels]
class MinMaxNormalize(BaseTransform):
def __init__(self, min_: float, max_: float):
"""
Normalize each sample.
:param min_: Minimum value of features.
:param max_: Maximum value of features.
"""
super().__init__()
self.min_ = min_
self.max_ = max_
def __call__(self, samples: np.ndarray, labels: np.ndarray) -> List[
np.ndarray]:
""""
Perform min-max normalization on the passed samples.
:param samples: Input samples that will undergo normalization.
:param labels: Class values for each sample.
:return: List containing the normalized samples and the class labels.
"""
return [(samples - self.min_) / (self.max_ - self.min_), labels]
def apply_transformations(data: Dict,
transformations: List[BaseTransform]) -> Dict:
"""
Apply each transformation from provided list
:param data: Dictionary with 'data' and 'labels' keys holding np.ndarrays
:param transformations: List of transformations
:return: Transformed data, in the same format as input
"""
for transformation in transformations:
data[enums.Dataset.DATA], data[enums.Dataset.LABELS] = transformation(
data[enums.Dataset.DATA], data[enums.Dataset.LABELS])
return data
UNMIXING_TRANSFORMS = {
rnn_supervised.__name__: [SpectralTransform],
pixel_based_cnn.__name__: [SpectralTransform],
pixel_based_fnnc.__name__: [SpectralTransform],
pixel_based_dacn.__name__: [SpectralTransform]
}
|
{"hexsha": "733e32d1619bce7c68f4f16b9dc3650173130070", "size": 3022, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/utils/transforms.py", "max_stars_repo_name": "laugh12321/Hyperspectral-Unmixing", "max_stars_repo_head_hexsha": "a75b3b4d4ea2b39f6181848c12179b662e88e103", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-12T09:26:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T09:26:50.000Z", "max_issues_repo_path": "src/utils/transforms.py", "max_issues_repo_name": "laugh12321/DACN", "max_issues_repo_head_hexsha": "0314bb4ef8f22ab85e8e739f02ccd86564af4d88", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utils/transforms.py", "max_forks_repo_name": "laugh12321/DACN", "max_forks_repo_head_hexsha": "0314bb4ef8f22ab85e8e739f02ccd86564af4d88", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-01-11T22:12:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T05:39:16.000Z", "avg_line_length": 30.8367346939, "max_line_length": 80, "alphanum_fraction": 0.6628060887, "include": true, "reason": "import numpy", "num_tokens": 660}
|
### Illustrates the piece-wise linear approximation of the cumulative distribution using constant size bins
fade = rgb(0,0,0,alpha=0.5)
dot.size = 0.7
n = 10000
set.seed(5)
pdf("linear-interpolation.pdf", width=6, height=2.7, pointsize=10)
layout(matrix(c(1,2),byrow=T, ncol=2), widths=c(1.1,1))
u = sort(runif(n))
x = log(1-u)
x = sort(x)
F = ((0:(n-1))+0.5)/n
par(mar=c(2.5,3,0.5,1))
plot(x, F, cex=dot.size, pch=21, bg=fade, col=NA, type='b', xlim=c(-9,-4.5), ylim=c(0,0.01), xaxt='n', ylab=NA, mgp=c(1,0.5,0), xlab=NA)
axis(side=1, at=-10:-1, labels=NA)
title(xlab='x', line=0.8, cex.lab=1.5)
title(ylab='q', line=1.5, cex.lab=1.5)
left.end = x[1] - (x[2]-x[1])
lines(c(left.end, x[100]), c(0, 0.01), lwd=2)
lines(c(left.end, left.end), c(-0.0005, 0.0005), lt=1, col='black', lwd=0.5)
lines(c(x[100], x[100]), c(0.0085, 0.015), lt=1, col='black', lwd=0.5)
text(-7, 0.006, "100")
###text(-5, 0.4, adj=0, "Constant size bins result in large")
###text(-5, 0.35, adj=0, "errors at extreme quantiles")
par(mar=c(2.5,1.5,0.5,1))
plot(x, F, cex=dot.size, pch=21, bg=fade, col=NA, type='b', xlim=c(-9,-4.5), ylim=c(0,0.01), yaxt='n', xaxt='n')
axis(side=1, at=-10:-1, labels=NA)
axis(side=2, at=(0:6)/10, labels=NA)
title(xlab='x', line=0.8, cex.lab=1.5)
title(ylab='q', line=2, cex.lab=1.5)
q.to.k = function(q) {
(asin(2*q-1)/pi + 1/2)
}
k.to.q = function(k,compression) {
sin(k/compression*pi - pi/2)/2 + 0.5
}
weights = c(2, 8, 19, 35, 56, 81, 111)
q.bin = cumsum(c(0, weights)/n)
i.bin = c(1, cumsum(weights)+1)
i.right = i.bin-1
i.right = i.right[i.right > 0]
m = length(i.right)
i.bin = i.bin[1:m]
x.bin = c(left.end, (x[i.right[1:(m-1)]] + x[i.bin[2:m]])/2)
F.bin = (i.bin-1) / n
lines(x.bin, F.bin, lwd=2)
dy = 0.0005
for (i in 1:m) {
x.text = (x[i.bin[i]] + x[i.right[i]])/2
y.text = (F.bin[i] + F.bin[i+1])/2
x.offset = 0.3 * y.text
y.offset = dy * (1 + 500*y.text)
x.pos = x.text - x.offset
y.pos = y.text + y.offset
lines(c(x.bin[i],x.bin[i]), c(F.bin[i]-dy+0.000, F.bin[i]+dy+y.offset*0.6-0.0005), lt=1, lwd=0.5, col='black')
text(x.text - x.offset, y.text + y.offset, i.right[i]-i.bin[i]+1)
}
###text(-5, 0.35, adj=0, "Variable size bins keep errors")
###text(-5, 0.3, adj=0, "small at extreme quantiles")
dev.off()
|
{"hexsha": "75153944fa59eae91a0e996dee12e356e532d0ef", "size": 2280, "ext": "r", "lang": "R", "max_stars_repo_path": "docs/t-digest-paper/linear-interpolation.r", "max_stars_repo_name": "ajwerner/t-digest", "max_stars_repo_head_hexsha": "fce13b0cee5daa1a98b84e8ca49cdf8f7ccff6b7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/t-digest-paper/linear-interpolation.r", "max_issues_repo_name": "ajwerner/t-digest", "max_issues_repo_head_hexsha": "fce13b0cee5daa1a98b84e8ca49cdf8f7ccff6b7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/t-digest-paper/linear-interpolation.r", "max_forks_repo_name": "ajwerner/t-digest", "max_forks_repo_head_hexsha": "fce13b0cee5daa1a98b84e8ca49cdf8f7ccff6b7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2328767123, "max_line_length": 136, "alphanum_fraction": 0.5850877193, "num_tokens": 1012}
|
[STATEMENT]
lemma eFreshInp_simp[simp]:
"igWlsInp MOD delta inp
\<Longrightarrow> eFreshInp MOD ys y (OKI inp) = igFreshInp MOD ys y inp"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. igWlsInp MOD delta inp \<Longrightarrow> eFreshInp MOD ys y (OKI inp) = igFreshInp MOD ys y inp
[PROOF STEP]
by (force simp: igFreshInp_def OKI_def liftAll_lift_comp igWlsInp_defs intro!: liftAll_cong)
|
{"llama_tokens": 176, "file": "Binding_Syntax_Theory_Iteration", "length": 1}
|
import pandas as pd
import glob
import xml.etree.ElementTree as ET
from astropy import units as u
from astropy.coordinates import SkyCoord
def read_candidate_files(files, verbose=True):
# Reads candidates files and include the candidates in a single pandas DataFrame
#files = glob.glob(path + '*/overview.xml')
if verbose:
print(f"{len(files)} candidates files found.")
all_rows = []
file_index = 0
for file in files:
tree = ET.parse(file)
root = tree.getroot()
# Indexing might break when the candidate files look differently
candidates = root[6]
all_rows.extend(create_row(root, candidates, file, file_index))
# Grab needed meta data of obs
# Maybe should grab all values and check if comparison between files makes sense
if file_index == 0:
tsamp = float(root[1].find("tsamp").text)
nsamples = float(root[1].find("nsamples").text)
obs_length = tsamp * nsamples
speed_of_light = 299792458.0
obs_length_over_c = obs_length / speed_of_light
obs_meta_data = {"tsamp": tsamp,
"nsamples": nsamples,
"obs_length": obs_length,
'obs_length_over_c': obs_length_over_c}
file_index += 1
df_candidates = pd.DataFrame(all_rows)
# Additional type casting may be necessary or not necessary at all
df_candidates = df_candidates.astype({"snr": float, "dm": float, "period": float,
"acc": float, "nassoc": int})
if verbose:
print(f"{len(df_candidates)} candidates read.")
# sort by snr
df_candidates.sort_values('snr', inplace=True, ascending=False)
df_candidates.reset_index(inplace=True, drop=True)
return df_candidates, obs_meta_data
def create_row(root, candidates, file, file_index):
# Read a candidate file and creates data rows
src_raj = float(root[1].find("src_raj").text)
src_dej = float(root[1].find("src_dej").text)
src_rajd, src_dejd = convert_to_deg(src_raj, src_dej)
rows = []
# Enter attributes that should be ignored here
ignored_entries = ['candidate']
#ignored_entries = ['candidate', 'byte_offset', 'opt_period', 'folded_snr']
for candidate in candidates:
new_dict = {}
for can_entry in candidate.iter():
if not can_entry.tag in ignored_entries:
new_dict[can_entry.tag] = can_entry.text
cand_id = candidate.attrib.get("id")
new_dict['cand_id_in_file'] = cand_id
new_dict['src_raj'] = src_raj
new_dict['src_rajd'] = src_rajd
new_dict['src_dej'] = src_dej
new_dict['src_dejd'] = src_dejd
new_dict['file_index'] = file_index
new_dict['file'] = file
rows.append(new_dict)
return rows
def convert_to_deg(ra, dec):
# Convert hour angle strings to degrees
ra_string = str(ra)
ra_string = ra_string[:2] + ' ' + ra_string[2:4] + ' ' + ra_string[4:]
dec_string = str(dec)
dec_string = dec_string[:3] + ' ' + dec_string[3:5] + ' ' + dec_string[5:]
coord = SkyCoord(ra_string + ' ' + dec_string, unit=(u.hourangle, u.deg))
return coord.ra.deg, coord.dec.deg
|
{"hexsha": "310afc7f5967ec137b3e805c932f0d5f48b6c94d", "size": 3298, "ext": "py", "lang": "Python", "max_stars_repo_path": "candidate_filter/reading_cands.py", "max_stars_repo_name": "larskuenkel/candidate_filter", "max_stars_repo_head_hexsha": "8364491b05a614885156fd92ad2ea0ad9fb3c478", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "candidate_filter/reading_cands.py", "max_issues_repo_name": "larskuenkel/candidate_filter", "max_issues_repo_head_hexsha": "8364491b05a614885156fd92ad2ea0ad9fb3c478", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "candidate_filter/reading_cands.py", "max_forks_repo_name": "larskuenkel/candidate_filter", "max_forks_repo_head_hexsha": "8364491b05a614885156fd92ad2ea0ad9fb3c478", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-07-31T12:40:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-15T12:29:50.000Z", "avg_line_length": 35.085106383, "max_line_length": 88, "alphanum_fraction": 0.6285627653, "include": true, "reason": "from astropy", "num_tokens": 806}
|
import sys
import numpy as np
from collections import OrderedDict
from ..utils.utils_def import FlopyBinaryData
class SwrFile(FlopyBinaryData):
"""
Read binary SWR output from MODFLOW SWR Process binary output files
The SwrFile class is the super class from which specific derived
classes are formed. This class should not be instantiated directly
Parameters
----------
filename : string
Name of the swr output file
swrtype : str
swr data type. Valid data types are 'stage', 'budget',
'flow', 'exchange', or 'structure'. (default is 'stage')
precision : string
'single' or 'double'. Default is 'double'.
verbose : bool
Write information to the screen. Default is False.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> so = flopy.utils.SwrFile('mymodel.swr.stage.bin')
"""
def __init__(
self, filename, swrtype="stage", precision="double", verbose=False
):
"""
Class constructor.
"""
super(SwrFile, self).__init__()
self.set_float(precision=precision)
self.header_dtype = np.dtype(
[
("totim", self.floattype),
("kswr", "i4"),
("kstp", "i4"),
("kper", "i4"),
]
)
self._recordarray = []
self.file = open(filename, "rb")
self.types = ("stage", "budget", "flow", "exchange", "structure")
if swrtype.lower() in self.types:
self.type = swrtype.lower()
else:
err = (
"SWR type ({}) is not defined. ".format(type)
+ "Available types are:\n"
)
for t in self.types:
err = "{} {}\n".format(err, t)
raise Exception(err)
# set data dtypes
self._build_dtypes()
# debug
self.verbose = verbose
# Read the dimension data
self.flowitems = 0
if self.type == "flow":
self.flowitems = self.read_integer()
self.nrecord = self.read_integer()
# set-up
self.items = len(self.out_dtype) - 1
# read connectivity for velocity data if necessary
self.conn_dtype = None
if self.type == "flow":
self.connectivity = self._read_connectivity()
if self.verbose:
print("Connectivity: ")
print(self.connectivity)
# initialize itemlist and nentries for qaq data
self.nentries = {}
self.datastart = self.file.tell()
# build index
self._build_index()
def get_connectivity(self):
"""
Get connectivity data from the file.
Parameters
----------
Returns
----------
data : numpy array
Array has size (nrecord, 3). None is returned if swrtype is not
'flow'
See Also
--------
Notes
-----
Examples
--------
"""
if self.type == "flow":
return self.connectivity
else:
return None
def get_nrecords(self):
"""
Get the number of records in the file
Returns
----------
out : tuple of int
A tupe with the number of records and number of flow items
in the file. The number of flow items is non-zero only if
swrtype='flow'.
"""
return self.nrecord, self.flowitems
def get_kswrkstpkper(self):
"""
Get a list of unique stress periods, time steps, and swr time steps
in the file
Returns
----------
out : list of (kswr, kstp, kper) tuples
List of unique kswr, kstp, kper combinations in binary file.
kswr, kstp, and kper values are zero-based.
"""
return self._kswrkstpkper
def get_ntimes(self):
"""
Get the number of times in the file
Returns
----------
out : int
The number of simulation times (totim) in binary file.
"""
return self._ntimes
def get_times(self):
"""
Get a list of unique times in the file
Returns
----------
out : list of floats
List contains unique simulation times (totim) in binary file.
"""
return self._times.tolist()
def get_record_names(self):
"""
Get a list of unique record names in the file
Returns
----------
out : list of strings
List of unique text names in the binary file.
"""
return self.out_dtype.names
def get_data(self, idx=None, kswrkstpkper=None, totim=None):
"""
Get data from the file for the specified conditions.
Parameters
----------
idx : int
The zero-based record number. The first record is record 0.
(default is None)
kswrkstpkper : tuple of ints
A tuple containing the swr time step, time step, and stress period
(kswr, kstp, kper). These are zero-based kswr, kstp, and kper
values. (default is None)
totim : float
The simulation time. (default is None)
Returns
----------
data : numpy record array
Array has size (nitems).
See Also
--------
Notes
-----
if both kswrkstpkper and totim are None, will return the last entry
Examples
--------
"""
if kswrkstpkper is not None:
kswr1 = kswrkstpkper[0]
kstp1 = kswrkstpkper[1]
kper1 = kswrkstpkper[2]
totim1 = self._recordarray[
np.where(
(self._recordarray["kswr"] == kswr1)
& (self._recordarray["kstp"] == kstp1)
& (self._recordarray["kper"] == kper1)
)
]["totim"][0]
elif totim is not None:
totim1 = totim
elif idx is not None:
totim1 = self._recordarray["totim"][idx]
else:
totim1 = self._times[-1]
try:
ipos = self.recorddict[totim1]
self.file.seek(ipos)
if self.type == "exchange":
self.nitems, self.itemlist = self.nentries[totim1]
r = self._read_qaq()
elif self.type == "structure":
self.nitems, self.itemlist = self.nentries[totim1]
r = self._read_structure()
else:
r = self.read_record(count=self.nrecord)
# add totim to data record array
s = np.zeros(r.shape[0], dtype=self.out_dtype)
s["totim"] = totim1
for name in r.dtype.names:
s[name] = r[name]
return s.view(dtype=self.out_dtype)
except:
return None
def get_ts(self, irec=0, iconn=0, klay=0, istr=0):
"""
Get a time series from a swr binary file.
Parameters
----------
irec : int
is the zero-based reach (stage, qm, qaq) or reach group number
(budget) to retrieve. (default is 0)
iconn : int
is the zero-based connection number for reach (irch) to retrieve
qm data. iconn is only used if qm data is being read.
(default is 0)
klay : int
is the zero-based layer number for reach (irch) to retrieve
qaq data . klay is only used if qaq data is being read.
(default is 0)
klay : int
is the zero-based structure number for reach (irch) to retrieve
structure data . isrt is only used if structure data is being read.
(default is 0)
Returns
----------
out : numpy recarray
Array has size (ntimes, nitems). The first column in the
data array will contain time (totim). nitems is 2 for stage
data, 15 for budget data, 3 for qm data, and 11 for qaq
data.
See Also
--------
Notes
-----
The irec, iconn, and klay values must be zero-based.
Examples
--------
"""
if irec + 1 > self.nrecord:
err = "Error: specified irec ({}) ".format(
irec
) + "exceeds the total number of records ()".format(self.nrecord)
raise Exception(err)
gage_record = None
if self.type == "stage" or self.type == "budget":
gage_record = self._get_ts(irec=irec)
elif self.type == "flow":
gage_record = self._get_ts_qm(irec=irec, iconn=iconn)
elif self.type == "exchange":
gage_record = self._get_ts_qaq(irec=irec, klay=klay)
elif self.type == "structure":
gage_record = self._get_ts_structure(irec=irec, istr=istr)
return gage_record
def _read_connectivity(self):
self.conn_dtype = np.dtype(
[("reach", "i4"), ("from", "i4"), ("to", "i4")]
)
conn = np.zeros((self.nrecord, 3), np.int)
icount = 0
for nrg in range(self.flowitems):
flowitems = self.read_integer()
for ic in range(flowitems):
conn[icount, 0] = nrg
conn[icount, 1] = self.read_integer() - 1
conn[icount, 2] = self.read_integer() - 1
icount += 1
return conn
def _build_dtypes(self):
self.vtotim = ("totim", self.floattype)
if self.type == "stage":
vtype = [("stage", self.floattype)]
elif self.type == "budget":
vtype = [
("stage", self.floattype),
("qsflow", self.floattype),
("qlatflow", self.floattype),
("quzflow", self.floattype),
("rain", self.floattype),
("evap", self.floattype),
("qbflow", self.floattype),
("qeflow", self.floattype),
("qexflow", self.floattype),
("qbcflow", self.floattype),
("qcrflow", self.floattype),
("dv", self.floattype),
("inf-out", self.floattype),
("volume", self.floattype),
]
elif self.type == "flow":
vtype = [("flow", self.floattype), ("velocity", self.floattype)]
elif self.type == "exchange":
vtype = [
("layer", "i4"),
("bottom", "f8"),
("stage", "f8"),
("depth", "f8"),
("head", "f8"),
("wetper", "f8"),
("cond", "f8"),
("headdiff", "f8"),
("exchange", "f8"),
]
elif self.type == "structure":
vtype = [
("usstage", "f8"),
("dsstage", "f8"),
("gateelev", "f8"),
("opening", "f8"),
("strflow", "f8"),
]
self.dtype = np.dtype(vtype)
temp = list(vtype)
if self.type == "exchange":
temp.insert(0, ("reach", "i4"))
self.qaq_dtype = np.dtype(temp)
elif self.type == "structure":
temp.insert(0, ("structure", "i4"))
temp.insert(0, ("reach", "i4"))
self.str_dtype = np.dtype(temp)
temp.insert(0, self.vtotim)
self.out_dtype = np.dtype(temp)
return
def _read_header(self):
nitems = 0
if self.type == "exchange" or self.type == "structure":
itemlist = np.zeros(self.nrecord, np.int)
try:
for i in range(self.nrecord):
itemlist[i] = self.read_integer()
nitems += itemlist[i]
self.nitems = nitems
except:
if self.verbose:
sys.stdout.write("\nCould not read itemlist")
return 0.0, 0.0, 0, 0, 0, False
try:
totim = self.read_real()
dt = self.read_real()
kper = self.read_integer() - 1
kstp = self.read_integer() - 1
kswr = self.read_integer() - 1
if self.type == "exchange" or self.type == "structure":
self.nentries[totim] = (nitems, itemlist)
return totim, dt, kper, kstp, kswr, True
except:
return 0.0, 0.0, 0, 0, 0, False
def _get_ts(self, irec=0):
# create array
gage_record = np.zeros(self._ntimes, dtype=self.out_dtype)
# iterate through the record dictionary
idx = 0
for key, value in self.recorddict.items():
totim = np.array(key)
gage_record["totim"][idx] = totim
self.file.seek(value)
r = self._get_data()
for name in r.dtype.names:
gage_record[name][idx] = r[name][irec]
idx += 1
return gage_record.view(dtype=self.out_dtype)
def _get_ts_qm(self, irec=0, iconn=0):
# create array
gage_record = np.zeros(self._ntimes, dtype=self.out_dtype)
# iterate through the record dictionary
idx = 0
for key, value in self.recorddict.items():
totim = key
gage_record["totim"][idx] = totim
self.file.seek(value)
r = self._get_data()
# find correct entry for reach and connection
for i in range(self.nrecord):
inode = self.connectivity[i, 1]
ic = self.connectivity[i, 2]
if irec == inode and ic == iconn:
for name in r.dtype.names:
gage_record[name][idx] = r[name][i]
break
idx += 1
return gage_record.view(dtype=self.out_dtype)
def _get_ts_qaq(self, irec=0, klay=0):
# create array
gage_record = np.zeros(self._ntimes, dtype=self.out_dtype)
# iterate through the record dictionary
idx = 0
for key, value in self.recorddict.items():
totim = key
gage_record["totim"][idx] = totim
self.nitems, self.itemlist = self.nentries[key]
self.file.seek(value)
r = self._get_data()
# find correct entry for record and layer
ilen = np.shape(r)[0]
for i in range(ilen):
ir = r["reach"][i]
il = r["layer"][i]
if ir == irec and il == klay:
for name in r.dtype.names:
gage_record[name][idx] = r[name][i]
break
idx += 1
return gage_record.view(dtype=self.out_dtype)
def _get_ts_structure(self, irec=0, istr=0):
# create array
gage_record = np.zeros(self._ntimes, dtype=self.out_dtype)
# iterate through the record dictionary
idx = 0
for key, value in self.recorddict.items():
totim = key
gage_record["totim"][idx] = totim
self.nitems, self.itemlist = self.nentries[key]
self.file.seek(value)
r = self._get_data()
# find correct entry for record and structure number
ilen = np.shape(r)[0]
for i in range(ilen):
ir = r["reach"][i]
il = r["structure"][i]
if ir == irec and il == istr:
for name in r.dtype.names:
gage_record[name][idx] = r[name][i]
break
idx += 1
return gage_record.view(dtype=self.out_dtype)
def _get_data(self):
if self.type == "exchange":
return self._read_qaq()
elif self.type == "structure":
return self._read_structure()
else:
return self.read_record(count=self.nrecord)
def _read_qaq(self):
# read qaq data using standard record reader
bd = self.read_record(count=self.nitems)
bd["layer"] -= 1
# add reach number to qaq data
r = np.zeros(self.nitems, dtype=self.qaq_dtype)
# build array with reach numbers
reaches = np.zeros(self.nitems, dtype=np.int32)
idx = 0
for irch in range(self.nrecord):
klay = self.itemlist[irch]
for k in range(klay):
# r[idx, 0] = irch
reaches[idx] = irch
idx += 1
# add reach to array returned
r["reach"] = reaches.copy()
# add read data to array returned
for idx, k in enumerate(self.dtype.names):
r[k] = bd[k]
return r
def _read_structure(self):
# read qaq data using standard record reader
bd = self.read_record(count=self.nitems)
# add reach and structure number to structure data
r = np.zeros(self.nitems, dtype=self.str_dtype)
# build array with reach numbers
reaches = np.zeros(self.nitems, dtype=np.int32)
struct = np.zeros(self.nitems, dtype=np.int32)
idx = 0
for irch in range(self.nrecord):
nstr = self.itemlist[irch]
for n in range(nstr):
reaches[idx] = irch
struct[idx] = n
idx += 1
# add reach to array returned
r["reach"] = reaches.copy()
r["structure"] = struct.copy()
# add read data to array returned
for idx, k in enumerate(self.dtype.names):
r[k] = bd[k]
return r
def _build_index(self):
"""
Build the recordarray recarray and recorddict dictionary, which map
the header information to the position in the binary file.
"""
self.file.seek(self.datastart)
if self.verbose:
sys.stdout.write("Generating SWR binary data time list\n")
self._ntimes = 0
self._times = []
self._kswrkstpkper = []
self.recorddict = OrderedDict()
idx = 0
while True:
# --output something to screen so it is possible to determine
# that the time list is being created
idx += 1
if self.verbose:
v = divmod(float(idx), 72.0)
if v[1] == 0.0:
sys.stdout.write(".")
# read header
totim, dt, kper, kstp, kswr, success = self._read_header()
if success:
if self.type == "exchange":
bytes = self.nitems * (
self.integerbyte + 8 * self.realbyte
)
elif self.type == "structure":
bytes = self.nitems * (5 * self.realbyte)
else:
bytes = self.nrecord * self.items * self.realbyte
ipos = self.file.tell()
self.file.seek(bytes, 1)
# save data
self._ntimes += 1
self._times.append(totim)
self._kswrkstpkper.append((kswr, kstp, kper))
header = (totim, kswr, kstp, kper)
self.recorddict[totim] = ipos
self._recordarray.append(header)
else:
if self.verbose:
sys.stdout.write("\n")
self._recordarray = np.array(
self._recordarray, dtype=self.header_dtype
)
self._times = np.array(self._times)
self._kswrkstpkper = np.array(self._kswrkstpkper)
return
class SwrStage(SwrFile):
"""
Read binary SWR stage output from MODFLOW SWR Process binary output files
Parameters
----------
filename : string
Name of the swr stage output file
precision : string
'single' or 'double'. Default is 'double'.
verbose : bool
Write information to the screen. Default is False.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> stageobj = flopy.utils.SwrStage('mymodel.swr.stg')
"""
def __init__(self, filename, precision="double", verbose=False):
super(SwrStage, self).__init__(
filename, swrtype="stage", precision=precision, verbose=verbose
)
return
class SwrBudget(SwrFile):
"""
Read binary SWR budget output from MODFLOW SWR Process binary output files
Parameters
----------
filename : string
Name of the swr budget output file
precision : string
'single' or 'double'. Default is 'double'.
verbose : bool
Write information to the screen. Default is False.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> stageobj = flopy.utils.SwrStage('mymodel.swr.bud')
"""
def __init__(self, filename, precision="double", verbose=False):
super(SwrBudget, self).__init__(
filename, swrtype="budget", precision=precision, verbose=verbose
)
return
class SwrFlow(SwrFile):
"""
Read binary SWR flow output from MODFLOW SWR Process binary output files
Parameters
----------
filename : string
Name of the swr flow output file
precision : string
'single' or 'double'. Default is 'double'.
verbose : bool
Write information to the screen. Default is False.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> stageobj = flopy.utils.SwrStage('mymodel.swr.flow')
"""
def __init__(self, filename, precision="double", verbose=False):
super(SwrFlow, self).__init__(
filename, swrtype="flow", precision=precision, verbose=verbose
)
return
class SwrExchange(SwrFile):
"""
Read binary SWR surface-water groundwater exchange output from MODFLOW SWR Process binary output files
Parameters
----------
filename : string
Name of the swr surface-water groundwater exchange output file
precision : string
'single' or 'double'. Default is 'double'.
verbose : bool
Write information to the screen. Default is False.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> stageobj = flopy.utils.SwrStage('mymodel.swr.qaq')
"""
def __init__(self, filename, precision="double", verbose=False):
super(SwrExchange, self).__init__(
filename, swrtype="exchange", precision=precision, verbose=verbose
)
return
class SwrStructure(SwrFile):
"""
Read binary SWR structure output from MODFLOW SWR Process binary output
files
Parameters
----------
filename : string
Name of the swr structure output file
precision : string
'single' or 'double'. Default is 'double'.
verbose : bool
Write information to the screen. Default is False.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> stageobj = flopy.utils.SwrStage('mymodel.swr.str')
"""
def __init__(self, filename, precision="double", verbose=False):
super(SwrStructure, self).__init__(
filename, swrtype="structure", precision=precision, verbose=verbose
)
return
|
{"hexsha": "7c36c98edfe9a43cc9b720e00dcb16e02bab80b9", "size": 23925, "ext": "py", "lang": "Python", "max_stars_repo_path": "flopy/utils/swroutputfile.py", "max_stars_repo_name": "aleaf/flopy", "max_stars_repo_head_hexsha": "a5777a4d4a745e473110a167c69603ac4ad3106c", "max_stars_repo_licenses": ["CC0-1.0", "BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "flopy/utils/swroutputfile.py", "max_issues_repo_name": "aleaf/flopy", "max_issues_repo_head_hexsha": "a5777a4d4a745e473110a167c69603ac4ad3106c", "max_issues_repo_licenses": ["CC0-1.0", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "flopy/utils/swroutputfile.py", "max_forks_repo_name": "aleaf/flopy", "max_forks_repo_head_hexsha": "a5777a4d4a745e473110a167c69603ac4ad3106c", "max_forks_repo_licenses": ["CC0-1.0", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2134433962, "max_line_length": 106, "alphanum_fraction": 0.5129780564, "include": true, "reason": "import numpy", "num_tokens": 5635}
|
"""Checks module: gather utilities to perform routine checks. """
# Authors: Hamza Cherkaoui <hamza.cherkaoui@inria.fr>
# License: BSD (3-clause)
import numpy as np
from .convolution import adjconv_uH
from .atlas import get_indices_from_roi
class EarlyStopping(Exception):
""" Raised when the algorithm converged."""
class CostFunctionIncreased(Exception):
""" Raised when the cost-function has increased."""
def check_obj(lobj, ii, max_iter, early_stopping=True, raise_on_increase=True,
eps=np.finfo(np.float64).eps, level=1):
""" If raise_on_increase is True raise a CostFunctionIncreased exception
when the objectif function has increased. Raise a EarlyStopping exception
if the algorithm converged.
Parameters
----------
lobj : array or None, shape (n_iter,) or (3 * n_iter,), the saved
cost-function
ii : int, the index of the current iteration
max_iter : int, (default=100), maximum number of iterations to perform the
analysis
early_stopping : bool, (default=True), whether to early stop the analysis
raise_on_increase : bool, (default=True), whether to stop the analysis if
the cost-function increases during an iteration. This can be due to the
fact that the temporal regularization parameter is set to high
eps : float, (default=np.finfo(np.float64).eps, stoppping parameter w.r.t
evolution of the cost-function
level : int, (default=1), desired level of cost-function monitoring, 1 for
cost-function computation at each iteration, 2 for cost-function
computation at each steps (z, u, v)
Throws
------
CostFunctionIncreased : if the cost-function increases during an iteration,
of the analysis. This can be due to the fact that the temporal
regularization parameter is set to high
EarlyStopping : if the cost-function has converged
"""
if level == 1:
_check_obj_level_1(lobj, ii, max_iter, early_stopping=early_stopping,
raise_on_increase=raise_on_increase, eps=eps)
if level == 2:
_check_obj_level_2(lobj, ii, max_iter, early_stopping=early_stopping,
raise_on_increase=raise_on_increase, eps=eps)
def _check_obj_level_1(lobj, ii, max_iter, early_stopping=True,
raise_on_increase=True, eps=np.finfo(np.float64).eps):
""" Check after each iteration.
Parameters
----------
lobj : array or None, shape (n_iter,) or (3 * n_iter,), the saved
cost-function
ii : int, the index of the current iteration
max_iter : int, (default=100), maximum number of iterations to perform the
analysis
early_stopping : bool, (default=True), whether to early stop the analysis
raise_on_increase : bool, (default=True), whether to stop the analysis if
the cost-function increases during an iteration. This can be due to the
fact that the temporal regularization parameter is set to high
eps : float, (default=np.finfo(np.float64).eps, stoppping parameter w.r.t
evolution of the cost-function
Throws
------
CostFunctionIncreased : if the cost-function increases during an iteration,
of the analysis. This can be due to the fact that the temporal
regularization parameter is set to high
EarlyStopping : if the cost-function has converged
"""
eps_ = (lobj[-2] - lobj[-1]) / lobj[-2]
# check increasing cost-function
if raise_on_increase and eps_ < 0.0:
raise CostFunctionIncreased(
f"[{ii}/{max_iter}] Iteration relatively increase "
f"global cost-function of "
f"{-eps_:.3e}")
# check early-stopping
if early_stopping and np.abs(eps_) <= eps:
msg = (f"[{ii}/{max_iter}] Early-stopping (!) with: "
f"eps={eps_:.3e}")
raise EarlyStopping(msg)
def _check_obj_level_2(lobj, ii, max_iter, early_stopping=True,
raise_on_increase=True, eps=np.finfo(np.float64).eps):
""" Check after each update.
Parameters
----------
lobj : array or None, shape (n_iter,) or (3 * n_iter,), the saved
cost-function
ii : int, the index of the current iteration
max_iter : int, (default=100), maximum number of iterations to perform the
analysis
early_stopping : bool, (default=True), whether to early stop the analysis
raise_on_increase : bool, (default=True), whether to stop the analysis if
the cost-function increases during an iteration. This can be due to the
fact that the temporal regularization parameter is set to high
eps : float, (default=np.finfo(np.float64).eps, stoppping parameter w.r.t
evolution of the cost-function
Throws
------
CostFunctionIncreased : if the cost-function increases during an iteration,
of the analysis. This can be due to the fact that the temporal
regularization parameter is set to high
EarlyStopping : if the cost-function has converged
"""
eps_z = (lobj[-4] - lobj[-3]) / lobj[-4]
eps_u = (lobj[-3] - lobj[-2]) / lobj[-3]
eps_v = (lobj[-2] - lobj[-1]) / lobj[-2]
# check increasing cost-function
if raise_on_increase and eps_z < 0.0:
raise CostFunctionIncreased(
f"[{ii + 1}/{max_iter}] Updating z relatively "
f"increase global cost-function of "
f"{-eps_z:.3e}")
if raise_on_increase and eps_u < 0.0:
raise CostFunctionIncreased(
f"[{ii + 1}/{max_iter}] Updating u relatively "
f"increase global cost-function of "
f"{-eps_u:.3e}")
if raise_on_increase and eps_v < 0.0:
raise CostFunctionIncreased(
f"[{ii + 1}/{max_iter}] Updating v relatively "
f"increase global cost-function of "
f"{-eps_v:.3e}")
# check early-stopping
eps_check = (np.abs(eps_z) <= eps)
eps_check = eps_check and (np.abs(eps_u) <= eps)
eps_check = eps_check and (np.abs(eps_v) <= eps)
if (early_stopping and eps_check):
msg = (f"[{ii + 1}/{max_iter}] Early-stopping (!) with: "
f"z-eps={eps_z:.3e}, u-eps={eps_u:.3e}, v-eps={eps_v:.3e}")
raise EarlyStopping(msg)
def check_len_hrf(h, n_times_atom):
""" Check that the HRF has the proper length.
Parameters
----------
h : array, shape (n_times_atom, ), HRF
n_times_atom : int, number of components on which to decompose the neural
activity (number of temporal components and its associated spatial
maps).
Return
------
h : array, shape (n_times_atom, ), HRF with a correct length
"""
n = n_times_atom - len(h)
if n < 0:
h = h[:n]
elif n > 0:
h = np.hstack([h, np.zeros(n)])
return h
def check_if_vanished(A, msg="Vanished raw", eps=np.finfo(np.float64).eps):
""" Raise an AssertionException if one raw of A has negligeable
l2-norm.
Parameters
----------
A : array, the array on which to check if a raw is too close to zero
msg : str, (default="Vanished raw"), message to display with the
AssertionError exception
eps : float, (default=np.finfo(np.float64).eps), tolerance among the
squared l2-norm of the raws
Throws
------
AssertionError : if a raw has vanished
"""
norm_A_k = [A_k.dot(A_k) for A_k in A]
check_A_k_nonzero = norm_A_k > eps
assert np.all(check_A_k_nonzero), msg
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance.
Parameters
----------
seed : None, int, random-instance, (default=None), random-instance
or random-seed used to initialize the random-instance
Return
------
random_instance : random-instance used to initialize the analysis
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (int, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(f"{seed} cannot be used to seed a "
f"numpy.random.RandomState instance")
def _get_lambda_max(X, u, H, rois_idx):
""" Get lbda max (the maximum value of the temporal regularization
parameter which systematically provide zero temporal components).
Parameters
----------
X : array, shape (n_voxels, n_times), fMRI data
u : array, shape (n_atoms, n_voxels), spatial maps
H : array, shape (n_hrf_rois, n_times, n_times_valid), Toeplitz matrices
rois_idx: array, shape (n_hrf_rois, max_indices_per_rois), HRF ROIs
Return
------
lbda_max : float, the maximum value of the temporal regularization
parameter which systematically provide zero temporal components
"""
n_hrf_rois, _, n_times_valid = H.shape
_, n_times = X.shape
n_atoms, n_voxels = u.shape
# compute spatial vascular maps operator
DD = np.empty((n_atoms, n_times * n_voxels))
for k in range(n_atoms):
uk_1 = np.ones((n_times_valid, 1)).dot(u[k, :][None, :])
Dk_1 = np.empty((n_voxels, n_times))
for m in range(n_hrf_rois):
indices = get_indices_from_roi(m, rois_idx)
Dk_1[indices, :] = H[m, :, :].dot(uk_1[:, indices]).T
DD[k, :] = Dk_1.flatten()
pinv_DD = np.linalg.pinv(DD)
c_1 = X.ravel().dot(pinv_DD)[:, None] * np.ones((n_atoms, n_times_valid))
# compute X_hat
c_1u = c_1.T.dot(u)
X_hat = np.empty((n_voxels, n_times))
for m in range(n_hrf_rois):
indices = get_indices_from_roi(m, rois_idx)
X_hat[indices, :] = H[m, :, :].dot(c_1u[:, indices]).T
# return lambda max
L = np.triu(np.ones((n_times_valid, n_times_valid)))
LtuvtX = adjconv_uH(X - X_hat, u, H, rois_idx).dot(L.T)
return np.max(np.abs(LtuvtX))
def check_lbda(lbda, lbda_strategy, X, u, H, rois_idx, prox_z='tv'):
""" Return the temporal regularization parameter.
Parameters
----------
lbda : float, (default=0.1), whether the temporal regularization parameter
if lbda_strategy == 'fixed' or the ratio w.r.t lambda max if
lbda_strategy == 'ratio'
lbda_strategy str, (default='ratio'), strategy to fix the temporal
regularization parameter, possible choice are ['ratio', 'fixed']
X : array, shape (n_voxels, n_times), fMRI data
u : array, shape (n_atoms, n_voxels), spatial maps
H : array, shape (n_hrf_rois, n_times_valid, n_times), Toeplitz matrices
rois_idx: array, shape (n_hrf_rois, max_indices_per_rois), HRF ROIs
prox_z : str, (default='tv'), temporal proximal operator should be in
['tv', 'l1', 'l2', 'elastic-net']
Return
------
lbda : float, the value of the temporal regularization parameter
"""
if not isinstance(lbda, (int, float)):
raise ValueError(f"'lbda' should be numerical, got '{type(lbda)}'")
lbda = float(lbda)
if lbda_strategy not in ['ratio', 'fixed']:
raise ValueError(f"'lbda_strategy' should belong to "
f"['ratio', 'fixed'], got '{lbda_strategy}'")
if lbda_strategy == 'ratio' and not prox_z == 'tv':
raise ValueError("If 'lbda_strategy' is set to 'ratio', 'prox_z' "
"should be set to 'tv'")
if lbda_strategy == 'ratio':
lbda_max = _get_lambda_max(X, u=u, H=H, rois_idx=rois_idx)
lbda = lbda * lbda_max
return lbda
|
{"hexsha": "0632e9ebfdc8e7ac70de03c04ef060e3d3ffb9ed", "size": 11692, "ext": "py", "lang": "Python", "max_stars_repo_path": "hemolearn/checks.py", "max_stars_repo_name": "hemolearn/hemolearn", "max_stars_repo_head_hexsha": "87db924a020497a236c410e828d5029c334032f5", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hemolearn/checks.py", "max_issues_repo_name": "hemolearn/hemolearn", "max_issues_repo_head_hexsha": "87db924a020497a236c410e828d5029c334032f5", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-04-08T06:57:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-13T22:29:22.000Z", "max_forks_repo_path": "hemolearn/checks.py", "max_forks_repo_name": "hemolearn/hemolearn", "max_forks_repo_head_hexsha": "87db924a020497a236c410e828d5029c334032f5", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.5874587459, "max_line_length": 79, "alphanum_fraction": 0.6334245638, "include": true, "reason": "import numpy", "num_tokens": 2992}
|
import numpy as np
import wave
import math
import os
def nextpow2(n):
return np.ceil(np.log2(np.abs(n))).astype("long")
def berouti(SNR):
if -5.0 <= SNR <= 20.0:
a = 4 - SNR * 3 / 20
else:
if SNR < -5.0:
a = 5
if SNR > 20:
a = 1
return a
def berouti1(SNR):
if -5.0 <= SNR <= 20.0:
a = 3 - SNR * 2 / 20
else:
if SNR < -5.0:
a = 4
if SNR > 20:
a = 1
return a
def find_index(x_list):
index_list = []
for i in range(len(x_list)):
if x_list[i] < 0:
index_list.append(i)
return index_list
def denoise(audio_file, input_path, output_path):
file_name = os.path.basename(audio_file).split(".")[0]
input_file_path = os.path.join(input_path, audio_file)
output_file_path = os.path.join(output_path, file_name)
with wave.open(input_file_path, 'r') as f:
# (nchannels, sampwidth, framerate, nframes, comptype, compname)
params = f.getparams()
nchannels, sampwidth, framerate, nframes = params[:4]
str_data = f.readframes(nframes)
x = np.frombuffer(str_data, dtype=np.short)
len_ = 20 * framerate // 1000
len1 = len_ * 50 // 100
# The percentage of the frame that the window overlaps
len2 = len_ - len1
Thres = 3
Expnt = 2.0
beta = 0.002
G = 0.9
win = np.hamming(len_)
# normalization gain for overlap+add with 50% overlap
winGain = len2 / sum(win)
# Noise magnitude calculations - assuming that the first 5 frames is noise/silence
nFFT = 2 * 2 ** (nextpow2(len_))
noise_mean = np.zeros(nFFT)
j = 0
for k in range(1, 6):
noise_mean = noise_mean + abs(np.fft.fft(win * x[j : j + len_], nFFT))
j = j + len_
noise_mu = noise_mean / 5
# --- allocate memory and initialize various variables
k = 1
img = 1j
x_old = np.zeros(len1)
Nframes = len(x) // len2 - 1
xfinal = np.zeros(Nframes * len2)
# Start Processing
for n in range(0, Nframes):
# Windowing
insign = win * x[k - 1 : k + len_ - 1]
# compute fourier transform of a frame
spec = np.fft.fft(insign, nFFT)
# compute the magnitude
sig = abs(spec)
# save the noisy phase information
theta = np.angle(spec)
SNRseg = 10 * np.log10(
np.linalg.norm(sig, 2) ** 2 / np.linalg.norm(noise_mu, 2) ** 2
)
if Expnt == 1.0:
alpha = berouti1(SNRseg)
else:
alpha = berouti(SNRseg)
sub_speech = sig ** Expnt - alpha * noise_mu ** Expnt
diffw = sub_speech - beta * noise_mu ** Expnt
z = find_index(diffw)
if len(z) > 0:
sub_speech[z] = beta * noise_mu[z] ** Expnt
# implement a simple VAD detector
if SNRseg < Thres: # Update noise spectrum
noise_temp = G * noise_mu ** Expnt + (1 - G) * sig ** Expnt
noise_mu = noise_temp ** (1 / Expnt)
sub_speech[nFFT // 2 + 1 : nFFT] = np.flipud(sub_speech[1 : nFFT // 2])
x_phase = (sub_speech ** (1 / Expnt)) * (
np.array([math.cos(x) for x in theta])
+ img * (np.array([math.sin(x) for x in theta]))
)
# take the IFFT
xi = np.fft.ifft(x_phase).real
# Overlap and add
xfinal[k - 1 : k + len2 - 1] = x_old + xi[0:len1]
x_old = xi[0 + len1 : len_]
k = k + len2
with wave.open(output_file_path + "-denoised.wav", "wb") as wf:
wf.setparams(params)
wave_data = (winGain * xfinal).astype(np.short)
wf.writeframes(wave_data.tobytes())
print(f"writing to {output_file_path}-denoised.wav")
|
{"hexsha": "e9294175d00bdfe29423c677e210b7e6b131fc92", "size": 3870, "ext": "py", "lang": "Python", "max_stars_repo_path": "speaker_verification/denoise.py", "max_stars_repo_name": "jakerenzella/speaker-verification", "max_stars_repo_head_hexsha": "b40635a11295f5113d5d721001cfdaa105209bd8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "speaker_verification/denoise.py", "max_issues_repo_name": "jakerenzella/speaker-verification", "max_issues_repo_head_hexsha": "b40635a11295f5113d5d721001cfdaa105209bd8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "speaker_verification/denoise.py", "max_forks_repo_name": "jakerenzella/speaker-verification", "max_forks_repo_head_hexsha": "b40635a11295f5113d5d721001cfdaa105209bd8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-29T03:03:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-29T03:03:37.000Z", "avg_line_length": 29.3181818182, "max_line_length": 87, "alphanum_fraction": 0.5341085271, "include": true, "reason": "import numpy", "num_tokens": 1149}
|
# Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
# the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights
# reserved. See files LICENSE and NOTICE for details.
#
# This file is part of CEED, a collection of benchmarks, miniapps, software
# libraries and APIs for efficient high-order finite element and spectral
# element discretizations for exascale applications. For more information and
# source code availability see http://github.com/ceed.
#
# The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
# a collaborative effort of two U.S. Department of Energy organizations (Office
# of Science and the National Nuclear Security Administration) responsible for
# the planning and preparation of a capable exascale ecosystem, including
# software, applications, hardware, advanced system engineering and early
# testbed platforms, in support of the nation's exascale computing imperative.
# @file
# Test Ceed ElemRestriction functionality
import os
import libceed
import numpy as np
import check
# -------------------------------------------------------------------------------
# Test creation, use, and destruction of an element restriction
# -------------------------------------------------------------------------------
def test_200(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
ne = 3
x = ceed.Vector(ne + 1)
a = np.arange(10, 10 + ne + 1, dtype="float64")
x.set_array(a, cmode=libceed.USE_POINTER)
ind = np.zeros(2 * ne, dtype="int32")
for i in range(ne):
ind[2 * i + 0] = i
ind[2 * i + 1] = i + 1
r = ceed.ElemRestriction(ne, 2, 1, 1, ne + 1, ind,
cmode=libceed.USE_POINTER)
y = ceed.Vector(2 * ne)
y.set_value(0)
r.apply(x, y)
with y.array_read() as y_array:
for i in range(2 * ne):
assert 10 + (i + 1) // 2 == y_array[i]
# -------------------------------------------------------------------------------
# Test creation, use, and destruction of a strided element restriction
# -------------------------------------------------------------------------------
def test_201(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
ne = 3
x = ceed.Vector(2 * ne)
a = np.arange(10, 10 + 2 * ne, dtype="float64")
x.set_array(a, cmode=libceed.USE_POINTER)
strides = np.array([1, 2, 2], dtype="int32")
r = ceed.StridedElemRestriction(ne, 2, 1, 2 * ne, strides)
y = ceed.Vector(2 * ne)
y.set_value(0)
r.apply(x, y)
with y.array_read() as y_array:
for i in range(2 * ne):
assert 10 + i == y_array[i]
# -------------------------------------------------------------------------------
# Test creation and destruction of a blocked element restriction
# -------------------------------------------------------------------------------
def test_202(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
ne = 8
blksize = 5
x = ceed.Vector(ne + 1)
a = np.arange(10, 10 + ne + 1, dtype="float64")
x.set_array(a, cmode=libceed.USE_POINTER)
ind = np.zeros(2 * ne, dtype="int32")
for i in range(ne):
ind[2 * i + 0] = i
ind[2 * i + 1] = i + 1
r = ceed.BlockedElemRestriction(ne, 2, blksize, 1, 1, ne + 1, ind,
cmode=libceed.USE_POINTER)
y = ceed.Vector(2 * blksize * 2)
y.set_value(0)
r.apply(x, y)
print(y)
x.set_value(0)
r.T.apply(y, x)
print(x)
stdout, stderr, ref_stdout = check.output(capsys)
assert not stderr
assert stdout == ref_stdout
# -------------------------------------------------------------------------------
# Test creation, use, and destruction of a blocked element restriction
# -------------------------------------------------------------------------------
def test_208(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
ne = 8
blksize = 5
x = ceed.Vector(ne + 1)
a = np.arange(10, 10 + ne + 1, dtype="float64")
x.set_array(a, cmode=libceed.USE_POINTER)
ind = np.zeros(2 * ne, dtype="int32")
for i in range(ne):
ind[2 * i + 0] = i
ind[2 * i + 1] = i + 1
r = ceed.BlockedElemRestriction(ne, 2, blksize, 1, 1, ne + 1, ind,
cmode=libceed.USE_POINTER)
y = ceed.Vector(blksize * 2)
y.set_value(0)
r.apply_block(1, x, y)
print(y)
x.set_value(0)
r.T.apply_block(1, y, x)
print(x)
stdout, stderr, ref_stdout = check.output(capsys)
assert not stderr
assert stdout == ref_stdout
# -------------------------------------------------------------------------------
# Test getting the multiplicity of the indices in an element restriction
# -------------------------------------------------------------------------------
def test_209(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
ne = 3
ind = np.zeros(4 * ne, dtype="int32")
for i in range(ne):
ind[4 * i + 0] = i * 3 + 0
ind[4 * i + 1] = i * 3 + 1
ind[4 * i + 2] = i * 3 + 2
ind[4 * i + 3] = i * 3 + 3
r = ceed.ElemRestriction(ne, 4, 1, 1, 3 * ne + 1, ind,
cmode=libceed.USE_POINTER)
mult = r.get_multiplicity()
with mult.array_read() as mult_array:
for i in range(3 * ne + 1):
val = 1 + (1 if (i > 0 and i < 3 * ne and i % 3 == 0) else 0)
assert val == mult_array[i]
# -------------------------------------------------------------------------------
# Test creation and view of an element restriction
# -------------------------------------------------------------------------------
def test_210(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
ne = 3
ind = np.zeros(2 * ne, dtype="int32")
for i in range(ne):
ind[2 * i + 0] = i + 0
ind[2 * i + 1] = i + 1
r = ceed.ElemRestriction(ne, 2, 1, 1, ne + 1, ind,
cmode=libceed.USE_POINTER)
print(r)
stdout, stderr, ref_stdout = check.output(capsys)
assert not stderr
assert stdout == ref_stdout
# -------------------------------------------------------------------------------
# Test creation and view of a strided element restriction
# -------------------------------------------------------------------------------
def test_211(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
ne = 3
strides = np.array([1, 2, 2], dtype="int32")
r = ceed.StridedElemRestriction(ne, 2, 1, ne + 1, strides)
print(r)
stdout, stderr, ref_stdout = check.output(capsys)
assert not stderr
assert stdout == ref_stdout
# -------------------------------------------------------------------------------
# Test creation and view of a blocked strided element restriction
# -------------------------------------------------------------------------------
def test_212(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
ne = 3
strides = np.array([1, 2, 2], dtype="int32")
r = ceed.BlockedStridedElemRestriction(ne, 2, 2, 1, ne + 1, strides)
print(r)
stdout, stderr, ref_stdout = check.output(capsys)
assert not stderr
assert stdout == ref_stdout
# -------------------------------------------------------------------------------
|
{"hexsha": "95de43d5bf868045adfb851411e7c82b38c70b68", "size": 7367, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/tests/test-2-elemrestriction.py", "max_stars_repo_name": "DiffeoInvariant/libCEED", "max_stars_repo_head_hexsha": "fef6d6185073a4ded914e81d25fd2b60cc92d311", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/tests/test-2-elemrestriction.py", "max_issues_repo_name": "DiffeoInvariant/libCEED", "max_issues_repo_head_hexsha": "fef6d6185073a4ded914e81d25fd2b60cc92d311", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/tests/test-2-elemrestriction.py", "max_forks_repo_name": "DiffeoInvariant/libCEED", "max_forks_repo_head_hexsha": "fef6d6185073a4ded914e81d25fd2b60cc92d311", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-30T23:13:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-30T23:13:18.000Z", "avg_line_length": 30.316872428, "max_line_length": 81, "alphanum_fraction": 0.5022397177, "include": true, "reason": "import numpy", "num_tokens": 1905}
|
import os
import numpy as np
from collections import OrderedDict
from ..utils import transform_utils as T
from ..models.grippers import gripper_factory
from ..controllers import controller_factory, load_controller_config
from .robot import Robot
class Bimanual(Robot):
"""Initializes a bimanual robot, as defined by a single corresponding XML"""
def __init__(
self,
robot_type: str,
idn=0,
controller_config=None,
initial_qpos=None,
initialization_noise=None,
gripper_type="default",
gripper_visualization=False,
control_freq=10
):
"""
Args:
robot_type (str): Specification for specific robot arm to be instantiated within this env (e.g: "Panda")
idn (int or str): Unique ID of this robot. Should be different from others
controller_config (dict or list of dict): If set, contains relevant controller parameters for creating
custom controllers. Else, uses the default controller for this specific task. Should either be single
dict if same controller is to be used for both robot arms or else it should be a list of length 2.
NOTE: In the latter case, assumes convention of [right, left]
initial_qpos (sequence of float): If set, determines the initial joint positions of the robot to be
instantiated for the task
initialization_noise (float): The scale factor of uni-variate Gaussian random noise
applied to each of a robot's given initial joint positions. Setting this value to "None" or 0.0 results
in no noise being applied
gripper_type (str or list of str): type of gripper, used to instantiate
gripper models from gripper factory. Default is "default", which is the default gripper associated
within the 'robot' specification. None removes the gripper, and any other (valid) model overrides the
default gripper. Should either be single str if same gripper type is to be used for both arms or else
it should be a list of length 2
NOTE: In the latter case, assumes convention of [right, left]
gripper_visualization (bool or list of bool): True if using gripper visualization.
Useful for teleoperation. Should either be single bool if gripper visualization is to be used for both
arms or else it should be a list of length 2
NOTE: In the latter case, assumes convention of [right, left]
control_freq (float): how many control signals to receive
in every second. This sets the amount of simulation time
that passes between every action input.
"""
self.controller = self._input2dict(None)
self.controller_config = self._input2dict(controller_config)
self.gripper = self._input2dict(None)
self.gripper_type = self._input2dict(gripper_type)
self.has_gripper = self._input2dict([gripper_type is not None for _, gripper_type in self.gripper_type.items()])
self.gripper_visualization = self._input2dict(gripper_visualization)
self.control_freq = control_freq
self.gripper_joints = self._input2dict(None) # xml joint names for gripper
self._ref_gripper_joint_pos_indexes = self._input2dict(None) # xml gripper joint position indexes in mjsim
self._ref_gripper_joint_vel_indexes = self._input2dict(None) # xml gripper joint velocity indexes in mjsim
self._ref_joint_gripper_actuator_indexes = self._input2dict(None) # xml gripper (pos) actuator indexes for robot in mjsim
self.eef_site_id = self._input2dict(None) # xml element id for eef in mjsim
self.eef_cylinder_id = self._input2dict(None) # xml element id for eef cylinder in mjsim
self.torques = None # Current torques being applied
super().__init__(
robot_type=robot_type,
idn=idn,
initial_qpos=initial_qpos,
initialization_noise=initialization_noise,
)
def _load_controller(self):
"""
Loads controller to be used for dynamic trajectories
"""
# Flag for loading urdf once (only applicable for IK controllers)
urdf_loaded = False
# Load controller configs for both left and right arm
for arm in self.arms:
# First, load the default controller if none is specified
if not self.controller_config[arm]:
# Need to update default for a single agent
controller_path = os.path.join(os.path.dirname(__file__), '..',
'controllers/config/{}.json'.format(
self.robot_model.default_controller_config[arm]))
self.controller_config[arm] = load_controller_config(custom_fpath=controller_path)
# Assert that the controller config is a dict file:
# NOTE: "type" must be one of: {JOINT_IMP, JOINT_TOR, JOINT_VEL, EE_POS, EE_POS_ORI, EE_IK}
assert type(self.controller_config[arm]) == dict, \
"Inputted controller config must be a dict! Instead, got type: {}".format(
type(self.controller_config[arm]))
# Add to the controller dict additional relevant params:
# the robot name, mujoco sim, eef_name, actuator_range, joint_indexes, timestep (model) freq,
# policy (control) freq, and ndim (# joints)
self.controller_config[arm]["robot_name"] = self.name
self.controller_config[arm]["sim"] = self.sim
self.controller_config[arm]["eef_name"] = self.robot_model.eef_name[arm]
self.controller_config[arm]["ndim"] = self._joint_split_idx
self.controller_config[arm]["policy_freq"] = self.control_freq
(start, end) = (None, self._joint_split_idx) if arm == "right" else (self._joint_split_idx, None)
self.controller_config[arm]["joint_indexes"] = {
"joints": self.joint_indexes[start:end],
"qpos": self._ref_joint_pos_indexes[start:end],
"qvel": self._ref_joint_vel_indexes[start:end]
}
self.controller_config[arm]["actuator_range"] = (self.torque_limits[0][start:end],
self.torque_limits[1][start:end])
# Only load urdf the first time this controller gets called
self.controller_config[arm]["load_urdf"] = True if not urdf_loaded else False
urdf_loaded = True
# Instantiate the relevant controller
self.controller[arm] = controller_factory(self.controller_config[arm]["type"], self.controller_config[arm])
def load_model(self):
"""
Loads robot and optionally add grippers.
"""
# First, run the superclass method to load the relevant model
super().load_model()
# Verify that the loaded model is of the correct type for this robot
if self.robot_model.arm_type != "bimanual":
raise TypeError("Error loading robot model: Incompatible arm type specified for this robot. "
"Requested model arm type: {}, robot arm type: {}"
.format(self.robot_model.arm_type, type(self)))
# Now, load the gripper if necessary
for arm in self.arms:
if self.has_gripper[arm]:
if self.gripper_type[arm] == 'default':
# Load the default gripper from the robot file
self.gripper[arm] = gripper_factory(self.robot_model.gripper[arm],
idn="_".join((str(self.idn), arm)))
else:
# Load user-specified gripper
self.gripper[arm] = gripper_factory(self.gripper_type[arm])
if not self.gripper_visualization[arm]:
self.gripper[arm].hide_visualization()
self.robot_model.add_gripper(self.gripper[arm], self.robot_model.eef_name[arm])
def reset(self, deterministic=False):
"""
Sets initial pose of arm and grippers. Overrides gripper joint configuration if we're using a
deterministic reset (e.g.: hard reset from xml file)
"""
# First, run the superclass method to reset the position and controller
super().reset(deterministic)
if not deterministic:
# Now, reset the griipper if necessary
for arm in self.arms:
if self.has_gripper[arm]:
self.sim.data.qpos[
self._ref_gripper_joint_pos_indexes[arm]
] = self.gripper[arm].init_qpos
for arm in self.arms:
# Update base pos / ori references in controller (technically only needs to be called once)
self.controller[arm].update_base_pose(self.base_pos, self.base_ori)
def setup_references(self):
"""
Sets up necessary reference for robots, grippers, and objects.
"""
# First, run the superclass method to setup references for joint-related values / indexes
super().setup_references()
# Now, add references to gripper if necessary
# indices for grippers in qpos, qvel
for arm in self.arms:
if self.has_gripper[arm]:
self.gripper_joints[arm] = list(self.gripper[arm].joints)
self._ref_gripper_joint_pos_indexes[arm] = [
self.sim.model.get_joint_qpos_addr(x) for x in self.gripper_joints[arm]
]
self._ref_gripper_joint_vel_indexes[arm] = [
self.sim.model.get_joint_qvel_addr(x) for x in self.gripper_joints[arm]
]
self._ref_joint_gripper_actuator_indexes[arm] = [
self.sim.model.actuator_name2id(actuator)
for actuator in self.gripper[arm].actuators
]
# IDs of sites for gripper visualization
self.eef_site_id[arm] = self.sim.model.site_name2id(
self.gripper[arm].visualization_sites["grip_site"])
self.eef_cylinder_id[arm] = self.sim.model.site_name2id(
self.gripper[arm].visualization_sites["grip_cylinder"])
def control(self, action, policy_step=False):
"""
Actuate the robot with the
passed joint velocities and gripper control.
Args:
action (numpy array): The control to apply to the robot. The first
@self.robot_model.dof dimensions should be the desired
normalized joint velocities and if the robot has
a gripper, the next @self.gripper.dof dimensions should be
actuation controls for the gripper.
NOTE: Assumes inputted actions are of form:
[right_arm_control, right_gripper_control, left_arm_control, left_gripper_control]
policy_step (bool): Whether a new policy step (action) is being taken
"""
# clip actions into valid range
assert len(action) == self.action_dim, \
"environment got invalid action dimension -- expected {}, got {}".format(
self.action_dim, len(action))
self.torques = np.array([])
# Now execute actions for each arm
for arm in self.arms:
# Make sure to split action space correctly
(start, end) = (None, self._action_split_idx) if arm == "right" else (self._action_split_idx, None)
sub_action = action[start:end]
gripper_action = None
if self.has_gripper[arm]:
# get all indexes past controller dimension indexes
gripper_action = sub_action[self.controller[arm].control_dim:]
sub_action = sub_action[:self.controller[arm].control_dim]
# Update model in controller
self.controller[arm].update()
# Update the controller goal if this is a new policy step
if policy_step:
self.controller[arm].set_goal(sub_action)
# Now run the controller for a step and add it to the torques
self.torques = np.concatenate((self.torques, self.controller[arm].run_controller()))
# Get gripper action, if applicable
if self.has_gripper[arm]:
gripper_action_actual = self.gripper[arm].format_action(gripper_action)
# rescale normalized gripper action to control ranges
ctrl_range = self.sim.model.actuator_ctrlrange[self._ref_joint_gripper_actuator_indexes[arm]]
bias = 0.5 * (ctrl_range[:, 1] + ctrl_range[:, 0])
weight = 0.5 * (ctrl_range[:, 1] - ctrl_range[:, 0])
applied_gripper_action = bias + weight * gripper_action_actual
self.sim.data.ctrl[self._ref_joint_gripper_actuator_indexes[arm]] = applied_gripper_action
# Clip the torques
low, high = self.torque_limits
self.torques = np.clip(self.torques, low, high)
# Apply joint torque control
self.sim.data.ctrl[self._ref_joint_torq_actuator_indexes] = self.torques
def gripper_visualization(self):
"""
Do any needed visualization here.
"""
for arm in self.arms:
if self.gripper_visualization[arm]:
# By default, don't do any coloring.
self.sim.model.site_rgba[self.eef_site_id[arm]] = [0., 0., 0., 0.]
def get_observations(self, di: OrderedDict):
"""
Returns an OrderedDict containing robot observations [(name_string, np.array), ...].
Important keys:
robot-state: contains robot-centric information.
"""
# Get prefix from robot model to avoid naming clashes for multiple robots
pf = self.robot_model.naming_prefix
# proprioceptive features
di[pf + "joint_pos"] = np.array(
[self.sim.data.qpos[x] for x in self._ref_joint_pos_indexes]
)
di[pf + "joint_vel"] = np.array(
[self.sim.data.qvel[x] for x in self._ref_joint_vel_indexes]
)
robot_states = [
np.sin(di[pf + "joint_pos"]),
np.cos(di[pf + "joint_pos"]),
di[pf + "joint_vel"],
]
for arm in self.arms:
if self.has_gripper[arm]:
di[pf + "_{}_".format(arm) + "gripper_qpos"] = np.array(
[self.sim.data.qpos[x] for x in self._ref_gripper_joint_pos_indexes[arm]]
)
di[pf + "_{}_".format(arm) + "gripper_qvel"] = np.array(
[self.sim.data.qvel[x] for x in self._ref_gripper_joint_vel_indexes[arm]]
)
di[pf + "_{}_".format(arm) + "eef_pos"] = np.array(self.sim.data.site_xpos[self.eef_site_id[arm]])
di[pf + "_{}_".format(arm) + "eef_quat"] = T.convert_quat(
self.sim.data.get_body_xquat(self.robot_model.eef_name[arm]), to="xyzw"
)
# add in gripper information
robot_states.extend([di[pf + "_{}_".format(arm) + "gripper_qpos"],
di[pf + "_{}_".format(arm) + "eef_pos"],
di[pf + "_{}_".format(arm) + "eef_quat"]])
di[pf + "robot-state"] = np.concatenate(robot_states)
return di
def _input2dict(self, inp):
"""
Helper function that converts an input that is either a single value or a list into a dict with keys for
each arm: "right", "left"
@inp (str or list): Input value to be converted to dict
Note: If inp is a list, then assumes format is [right, left]
"""
# First, convert to list if necessary
if type(inp) is not list:
inp = [inp for _ in range(2)]
# Now, convert list to dict and return
return {key: value for key, value in zip(self.arms, inp)}
@property
def arms(self):
"""
Returns name of arms used as naming convention throughout this module
"""
return "right", "left"
@property
def action_limits(self):
"""
Action lower/upper limits per dimension.
"""
# Action limits based on controller limits
low, high = [], []
for arm in self.arms:
low_g, high_g = ([-1] * self.gripper[arm].dof, [1] * self.gripper[arm].dof) \
if self.has_gripper[arm] else ([], [])
low, high = np.concatenate([low, self.controller[arm].input_min, low_g]), \
np.concatenate([high, self.controller[arm].input_max, high_g])
return low, high
@property
def torque_limits(self):
"""
Action lower/upper limits per dimension.
"""
# Torque limit values pulled from relevant robot.xml file
low = self.sim.model.actuator_ctrlrange[self._ref_joint_torq_actuator_indexes, 0]
high = self.sim.model.actuator_ctrlrange[self._ref_joint_torq_actuator_indexes, 1]
return low, high
@property
def action_dim(self):
"""
Action space dimension for this robot (controller dimension + gripper dof)
"""
dim = 0
for arm in self.arms:
dim += self.controller[arm].control_dim + self.gripper[arm].dof if \
self.has_gripper[arm] else self.controller[arm].control_dim
return dim
@property
def dof(self):
"""
Returns the DoF of the robot (with grippers).
"""
# Get the dof of the base robot model
dof = super().dof
for arm in self.arms:
if self.has_gripper[arm]:
dof += self.gripper[arm].dof
return dof
@property
def _hand_pose(self, arm="right"):
"""
Returns eef pose in base frame of robot.
"""
return self.pose_in_base_from_name(self.robot_model.eef_name[arm])
@property
def _right_hand_quat(self, arm="right"):
"""
Returns eef quaternion in base frame of robot.
"""
return T.mat2quat(self._hand_orn(arm))
#@property
# Todo: maybe don't use properties for bimanual? or figure out another clean way to encapsulate all these values for r/l
def _hand_total_velocity(self, arm="right"):
"""
Returns the total eef velocity (linear + angular) in the base frame
as a numpy array of shape (6,)
"""
# Determine correct start, end points based on arm
(start, end) = (None, self._joint_split_idx) if arm == "right" else (self._joint_split_idx, None)
# Use jacobian to translate joint velocities to end effector velocities.
Jp = self.sim.data.get_body_jacp(self.robot_model.eef_name[arm]).reshape((3, -1))
Jp_joint = Jp[:, self._ref_joint_vel_indexes[start:end]]
Jr = self.sim.data.get_body_jacr(self.robot_model.eef_name[arm]).reshape((3, -1))
Jr_joint = Jr[:, self._ref_joint_vel_indexes[start:end]]
eef_lin_vel = Jp_joint.dot(self._joint_velocities)
eef_rot_vel = Jr_joint.dot(self._joint_velocities)
return np.concatenate([eef_lin_vel, eef_rot_vel])
@property
def _hand_pos(self, arm="right"):
"""
Returns position of eef in base frame of robot.
"""
eef_pose_in_base = self._hand_pose(arm)
return eef_pose_in_base[:3, 3]
@property
def _hand_orn(self, arm="right"):
"""
Returns orientation of eef in base frame of robot as a rotation matrix.
"""
eef_pose_in_base = self._hand_pose(arm)
return eef_pose_in_base[:3, :3]
@property
def _hand_vel(self, arm="right"):
"""
Returns velocity of eef in base frame of robot.
"""
return self._hand_total_velocity(arm)[:3]
@property
def _hand_ang_vel(self, arm="right"):
"""
Returns angular velocity of eef in base frame of robot.
"""
return self._hand_total_velocity(arm)[3:]
@property
def _action_split_idx(self):
"""
Returns the index that correctly splits the right arm from the left arm actions
NOTE: Assumes inputted actions are of form:
[right_arm_control, right_gripper_control, left_arm_control, left_gripper_control]
"""
return self.controller["right"].control_dim + self.gripper["right"].dof if self.has_gripper["right"] \
else self.controller["right"].control_dim
@property
def _joint_split_idx(self):
"""
Returns the index that correctly splits the right arm from the left arm joints
"""
return int(len(self.robot_joints) / 2)
|
{"hexsha": "2aaf842ca65ada7ff3f1e39944050d538360ee4b", "size": 21295, "ext": "py", "lang": "Python", "max_stars_repo_path": "robosuite/robots/bimanual.py", "max_stars_repo_name": "StanfordVL/Lasersuite", "max_stars_repo_head_hexsha": "8b78c3d202f2a4b8712c5f228feaf5fae61f16e9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-08-09T16:47:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-06T05:43:12.000Z", "max_issues_repo_path": "robosuite/robots/bimanual.py", "max_issues_repo_name": "StanfordVL/Lasersuite", "max_issues_repo_head_hexsha": "8b78c3d202f2a4b8712c5f228feaf5fae61f16e9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-06T06:31:08.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-06T06:31:08.000Z", "max_forks_repo_path": "robosuite/robots/bimanual.py", "max_forks_repo_name": "StanfordVL/Lasersuite", "max_forks_repo_head_hexsha": "8b78c3d202f2a4b8712c5f228feaf5fae61f16e9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.2123142251, "max_line_length": 135, "alphanum_fraction": 0.6036158723, "include": true, "reason": "import numpy", "num_tokens": 4676}
|
!-------------------------------------------------------------------------------
! The @header, @table_section, @table_subsection, @item and @end_table commands
! are custom defined commands in Doxygen.in. They are defined under ALIASES.
! For the page created here, the 80 column limit is exceeded. Arguments of
! aliases are separated by ','. If you intended ',' to be a string you must use
! an escaped comma '\,'.
!
!> @page siesta_namelist_sec Namelist siesta_main_nli definition
!>
!> @tableofcontents
!> @section siesta_namelist_intro_sec Introduction
!> This page documents the contents of a SIESTA namelist input file. SIESTA
!> namelist variables are defined in the @fixed_width{siesta_info} common
!> block.
!>
!> @section siesta_namelist_var_sec Namelist Variables
!> @header{Input variable, Description, Code Reference}
!>
!> @table_section{siesta_flag_sec, Control Flags}
!> @item{ladd_pert, Use helical perturbation., siesta_namelist::ladd_pert}
!> @item{lresistive, Use resistive perturbation., siesta_namelist::lresistive}
!> @item{lrestart, Restart SIESTA from pervious run., siesta_namelist::lrestart}
!> @item{l_tracing, Produce output file for fieldline tracing., siesta_namelist::l_tracing}
!> @item{lcolscale, Use column scaling., shared_data::lcolscale}
!> @item{l_silo_output, Produce silo output., siesta_namelist::l_silo_output}
!> @item{l_silo3D, Produce silo 3D output., siesta_namelist::l_silo3d}
!> @item{l_output_alliter, Write output files on all iterations instead of
!> only iterations that lower the MHD energy and
!> force residuals., siesta_namelist::l_output_alliter}
!> @item{l_VMEC_Uniform, UNKNOWN, siesta_namelist::l_vmec_uniform}
!> @item{lasym, Use non stellarator symmetric terms., shared_data::lasym}
!> @item{l_vessel, Use free-boundary grid extension to vessel wall., siesta_namelist::l_vessel}
!> @item{lrecon, Add additional output to the restart file when
!> used in a reconstruction context.
!> DEPRICATED, shared_data::lrecon}
!> @end_table
!>
!> @table_section{siesta_algrothim_sec, Algrothim Control Variables}
!> @item{niter, Maximum number of iterations after diagonal prec., siesta_namelist::niter}
!> @item{ftol, Minimum force tolarance for converged solution., siesta_namelist::ftol}
!> @item{mupar, Resistivity factor., hessian::mupar}
!> @item{levmarq_param, Inital value of Levenberg-Marquardt parameter., hessian::levmarq_param}
!> @item{eta_factor, Resistivity value., siesta_namelist::eta_factor}
!> @item{nprecon, Skip diagonal preconditioner if greater than zero., siesta_namelist::nprecon}
!> @item{ngmres_type, Preconditioner type., shared_data::ngmres_type}
!> @item{iortho, UNKNOWN, shared_data::iortho}
!> @end_table
!>
!> @table_section{siesta_island_sec, Island Parameters}
!> @item{mres, M numbers of island resonances., siesta_namelist::mres}
!> @item{helpert, Sizes of the helical perturbation., siesta_namelist::helpert}
!> @end_table
!>
!> @table_section{siesta_grid_size_sec, Grid Sizes}
!> @item{nsin, Size of plasma radial grid., siesta_namelist::nsin}
!> @item{nsin_ext, Size of extended radial grid., siesta_namelist::nsin_ext}
!> @item{mpolin, Number of poloidal modes., siesta_namelist::mpolin}
!> @item{ntorin, Number of toroidal modes., siesta_namelist::ntorin}
!> @item{nfpin, Number of field periods to use. Setting this to
!> anything less than one will use the value form the wout
!> file., siesta_namelist::nfpin}
!> @table_subsection{siesta_grid_size_out_sec, Output Grid Sizes}
!> @item{nphis, Number of cylindrical phi planes., siesta_namelist::nphis}
!> @item{nrs, Number of radial grid points., siesta_namelist::nrs}
!> @item{nzs, Number of vertical grid points., siesta_namelist::nzs}
!> @item{nvs, Number of flux space toroidal points., siesta_namelist::nvs}
!> @item{nus, Number of flux space poloidal points., siesta_namelist::nus}
!> @item{nss, Number of flux space radial points., siesta_namelist::nss}
!> @end_table
!>
!> @table_section{siesta_file_name_sec, File Names}
!> @item{wout_file, Filename of the VMEC woutfile., siesta_namelist::wout_file}
!> @item{restart_ext, Name of the restart file extension., siesta_namelist::restart_ext}
!> @item{mgrid_file, Filename of the MGRID file., siesta_namelist::mgrid_file}
!> @item{vessel_file, Filename of the extended surfaces., siesta_namelist::vessel_file}
!> @end_table
!>
!> @table_section{siesta_test_sec, Test Controls}
!> @item{hesspass_test, UNKNOWN, shared_data::hesspass_test}
!> @item{mupar_test, UNKNOWN, shared_data::mupar_test}
!> @end_table
!>
!> @section siesta_namelist_prog_ref_sec Programmers Reference
!> Reference material for the coding to implement this namelist is found in the
!> @ref siesta_namelist module.
!-------------------------------------------------------------------------------
!*******************************************************************************
!> @file siesta_namelist.f90
!> @brief Contains module @ref siesta_namelist.
!
! Note separating the Doxygen comment block here so detailed decription is
! found in the Module not the file.
!
!> This file contains all the variables and maximum sizes of the inputs for a
!> SIESTA namelist input file. The module contained within does not represent
!> an object instance. Instead all variables are contained in a global context.
!> This is required due to limitations of FORTRAN 95 and namelist inputs.
!>
!> @ref siesta_namelist_sec "Namelist siesta_info definition"
!>
!> @note Some of the references are missing here. This is due to a bug in
!> Doxygen when variable decalarations span multiple lines.
!*******************************************************************************
MODULE siesta_namelist
USE shared_data, ONLY: ngmres_type, iortho, lcolscale, lasym, &
hesspass_test, mupar_test, lrecon
USE Hessian, ONLY: levmarq_param, mupar
USE stel_kinds
IMPLICIT NONE
!*******************************************************************************
! siesta_namelist input module parameters
!*******************************************************************************
!> Input string length.
INTEGER, PARAMETER :: siesta_namelist_name_length = 256
!*******************************************************************************
! DERIVED-TYPE DECLARATIONS
! 1) siesta_namelist_class
!
!*******************************************************************************
! Control Flags
!> Use helical perturbation.
LOGICAL :: ladd_pert = .TRUE.
!> Use resistive perturbaton.
LOGICAL :: lresistive = .TRUE.
!> Restart SIESTA from pervious run.
LOGICAL :: lrestart = .FALSE.
!> Produce output file for fieldline tracing.
LOGICAL :: l_tracing = .FALSE.
!> Produce silo output.
LOGICAL :: l_silo_output = .FALSE.
!> Produce silo 3D output.
LOGICAL :: l_silo3D = .FALSE.
!> Write output files on all iterations.
LOGICAL :: l_output_alliter = .FALSE.
!> FIXME: Unknown
LOGICAL :: l_VMEC_Uniform
!> If extended grid is to be used using an available vessel file
LOGICAL :: l_vessel = .FALSE.
! Algrothim Control Variables}
!> Maximum number of iterations after diagonal prec.
INTEGER :: niter = 10
!> Force tolarance.
REAL(dp) :: ftol = 1.E-20_dp
!> Resistivity value.
REAL(dp) :: eta_factor = 1.E-2_dp
!> Skip diagonal preconditioner if greater than zero.
INTEGER :: nprecon = 0
! Island parameters
!> Sizes of the helical perturbation.
INTEGER, DIMENSION(20) :: mres = 0
!> Sizes of the helical perturbation.
REAL(dp), DIMENSION(20) :: HelPert = 0.0
!> Sizes of the helical perturbation.
REAL(dp), DIMENSION(20) :: HelPertA = 0.0
! Grid Sizes
!> Radial size of the plasma grid.
INTEGER :: nsin = 101
!> Radial size of the extended grid.
INTEGER :: nsin_ext = 0
!> Number of poloidal modes.
INTEGER :: mpolin = 12
!> Number of toroidal modes.
INTEGER :: ntorin = 3
!> Number of field periods to use. -1 means set this to the value in the wout
!> file
INTEGER :: nfpin = 0
! Output Grid Sizes
!> Number of cylindrical phi planes.
INTEGER :: nphis = 2
!> Number of radial grid points.
INTEGER :: nrs = 200
!> Number of vertical grid points.
INTEGER :: nzs = 200
!> Number of flux space toroidal points.
INTEGER :: nvs = 150
!> Number of flux space poloidal points.
INTEGER :: nus = 150
!> Number of flux space radial points.
INTEGER :: nss = 100
! File Names
!> Filename of the VMEC woutfile.
CHARACTER(LEN=siesta_namelist_name_length) :: wout_file = ''
!> Name of the restart file extension.
CHARACTER(LEN=siesta_namelist_name_length) :: restart_ext = ''
!> Filename of the VMEC woutfile.
CHARACTER(LEN=siesta_namelist_name_length) :: mgrid_file = ''
!> Name of the restart file extension.
CHARACTER(LEN=siesta_namelist_name_length) :: vessel_file = ''
! Declare Namelist
NAMELIST/siesta_info/ &
! Control flags
ladd_pert, lresistive, lrestart, l_tracing, lcolscale, &
l_silo_output, l_silo_output, l_silo3D, l_output_alliter, &
l_VMEC_Uniform, lasym, lrecon, l_vessel, &
! Algrothim Control Variables
niter, ftol, mupar, levmarq_param, eta_factor, nprecon, &
ngmres_type, iortho, &
! Island parameters Island Parameters
mres, HelPert, HelPertA, &
! Input grid sizes
nsin, nsin_ext, mpolin, ntorin, nfpin, &
! Output grid sizes
nphis, nrs, nzs, nvs, nus, nss, &
! File names
wout_file, restart_ext, mgrid_file, vessel_file, &
! Test controls
hesspass_test, mupar_test
CONTAINS
!*******************************************************************************
! UTILITY SUBROUTINES
!*******************************************************************************
!-------------------------------------------------------------------------------
!> @brief Reads the namelist input file.
!>
!> Reads the namelist input file.
!>
!> @param[in] namelist_file The file name of the namelist input file.
!-------------------------------------------------------------------------------
SUBROUTINE siesta_namelist_read(namelist_file)
USE safe_open_mod
USE v3_utilities
USE Hessian, ONLY: levmarq_param, mupar, levmarq_param0, mupar0
IMPLICIT NONE
! Declare Arguments
CHARACTER (len=*), INTENT(in) :: namelist_file
! local variables
INTEGER :: iou_mnli
INTEGER :: status
! Start of executable code
levmarq_param = 1.E-3_dp
mupar = 0
niter = 10
mres = 0
HelPert = 0
HelPertA = 0
lcolscale = .TRUE.
mupar_test = 0
! Initalize a default value of the I\O unit. SIESTA increments from there.
iou_mnli = 0
CALL safe_open(iou_mnli, status, TRIM(namelist_file), &
'old', 'formatted')
CALL assert_eq(0, status, 'siesta_namelist_read' // &
': Safe_open of ' // TRIM(namelist_file) // ' failed')
! Read the namelist input file.
READ (iou_mnli, nml=siesta_info)
CLOSE (iou_mnli, iostat=status)
CALL assert_eq(0, status, 'siesta_namelist_read' // &
': Error closing ' // TRIM(namelist_file) // ' failed')
levmarq_param0 = levmarq_param
mupar0 = mupar
END SUBROUTINE
!-------------------------------------------------------------------------------
!> @brief Writes the namelist input file.
!>
!> Writes the namelist input file.
!>
!> @param[in] namelist_file The file name of the namelist input file.
!-------------------------------------------------------------------------------
SUBROUTINE siesta_namelist_write(namelist_file)
USE safe_open_mod
USE v3_utilities
IMPLICIT NONE
! Declare Arguments
CHARACTER (len=*), INTENT(in) :: namelist_file
! local variables
INTEGER :: iou_mnli
INTEGER :: status
! Start of executable code
! Initalize a default value of the I\O unit. SIESTA increments from there.
iou_mnli = 0
CALL safe_open(iou_mnli, status, TRIM(namelist_file), &
& 'replace', 'formatted', delim_in='quote')
CALL assert_eq(0, status, 'siesta_namelist_write' // &
& ': Safe_open of ' // TRIM(namelist_file) // ' failed')
! Write the namelist input file.
WRITE (iou_mnli, nml=siesta_info)
CLOSE (iou_mnli, iostat=status)
CALL assert_eq(0, status, 'siesta_namelist_read' // &
& ': Error closing ' // TRIM(namelist_file) // ' failed')
END SUBROUTINE
END MODULE
|
{"hexsha": "0bcb6d43d2a2e040aa8b134edd7528cf549ad8d6", "size": 14265, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Sources/siesta_namelist.f90", "max_stars_repo_name": "ORNL-Fusion/SIESTA", "max_stars_repo_head_hexsha": "58934c964dfa5c7e052f112e4a8e1f274f028bc7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Sources/siesta_namelist.f90", "max_issues_repo_name": "ORNL-Fusion/SIESTA", "max_issues_repo_head_hexsha": "58934c964dfa5c7e052f112e4a8e1f274f028bc7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-02-16T16:42:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-16T20:20:53.000Z", "max_forks_repo_path": "Sources/siesta_namelist.f90", "max_forks_repo_name": "ORNL-Fusion/SIESTA", "max_forks_repo_head_hexsha": "58934c964dfa5c7e052f112e4a8e1f274f028bc7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.0161290323, "max_line_length": 115, "alphanum_fraction": 0.5657903961, "num_tokens": 3667}
|
[STATEMENT]
lemma matchs_app[simp]:
assumes "length xs\<^sub>2 = length ys\<^sub>2"
shows "matchs (xs\<^sub>1 @ xs\<^sub>2) (ys\<^sub>1 @ ys\<^sub>2) =
matchs xs\<^sub>1 ys\<^sub>1 \<bind> (\<lambda>env\<^sub>1. matchs xs\<^sub>2 ys\<^sub>2 \<bind> (\<lambda>env\<^sub>2. Some (env\<^sub>1 ++\<^sub>f env\<^sub>2)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. matchs (xs\<^sub>1 @ xs\<^sub>2) (ys\<^sub>1 @ ys\<^sub>2) = matchs xs\<^sub>1 ys\<^sub>1 \<bind> (\<lambda>env\<^sub>1. matchs xs\<^sub>2 ys\<^sub>2 \<bind> (\<lambda>env\<^sub>2. Some (env\<^sub>1 ++\<^sub>f env\<^sub>2)))
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
length xs\<^sub>2 = length ys\<^sub>2
goal (1 subgoal):
1. matchs (xs\<^sub>1 @ xs\<^sub>2) (ys\<^sub>1 @ ys\<^sub>2) = matchs xs\<^sub>1 ys\<^sub>1 \<bind> (\<lambda>env\<^sub>1. matchs xs\<^sub>2 ys\<^sub>2 \<bind> (\<lambda>env\<^sub>2. Some (env\<^sub>1 ++\<^sub>f env\<^sub>2)))
[PROOF STEP]
by (induct xs\<^sub>1 ys\<^sub>1 rule: matchs.induct) fastforce+
|
{"llama_tokens": 454, "file": "Higher_Order_Terms_Term_Class", "length": 2}
|
import argparse
from datetime import datetime
from os import makedirs, path
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
class Matcher:
def __init__(self, probe_path, gallery_path, dataset_name):
# lenght of ids to get from feature files
self.id_length = -1
self.dataset_name = dataset_name
# load features, subject ids, feature labels from probe file
probe_file = np.sort(np.loadtxt(probe_path, dtype=np.str))
self.probe, self.probe_ids, self.probe_labels = self.get_features(probe_file)
if gallery_path is not None:
print(f"Matching {probe_path} to {gallery_path}")
gallery_file = np.sort(np.loadtxt(args.gallery, dtype=np.str))
# if matching different files, load gallery features, ids and labels
self.probe_equal_gallery = False
self.gallery, self.gallery_ids, self.gallery_labels = self.get_features(
gallery_file
)
else:
print(f"Matching {probe_path} to {probe_path}")
# if matching to the same file, just create a simbolic link to save memory
self.probe_equal_gallery = True
self.gallery = self.probe
self.gallery_ids = self.probe_ids
self.gallery_labels = self.probe_labels
# initiate a matrix NxM with zeros representing impostor matches
self.authentic_impostor = np.zeros(shape=(len(self.probe), len(self.gallery)))
for i in range(len(self.probe)):
# convert authentic matches to 1
self.authentic_impostor[i, self.probe_ids[i] == self.gallery_ids] = 1
# remove same feature files
self.authentic_impostor[i, self.probe_labels[i] == self.gallery_labels] = -1
if gallery_path is None:
# remove duplicate matches if matching probe to probe
self.authentic_impostor[i, 0 : min(i + 1, len(self.gallery))] = -1
self.matches = None
def get_features_label(self, feature_path):
subject_id = path.split(feature_path)[1]
feature_label = path.join(
path.split(path.split(feature_path)[0])[1], subject_id[:-4]
)
if self.dataset_name == "CHIYA":
subject_id = subject_id[:-5]
elif self.dataset_name == "CHIYA_VAL":
subject_id = feature_label[1:-4]
elif self.dataset_name == "PUBLIC_IVS":
subject_id = path.split(feature_label)[0]
elif self.id_length > 0:
subject_id = subject_id[: self.id_length]
else:
subject_id = subject_id.split("_")[0]
return subject_id, feature_label
def get_features(self, file):
all_features = []
all_labels = []
all_subject_ids = []
for j in range(len(file)):
image_path = file[j]
features = np.load(image_path)
subject_id, feature_label = self.get_features_label(image_path)
all_features.append(features)
all_subject_ids.append(subject_id)
all_labels.append(feature_label)
return (
np.asarray(all_features),
np.asarray(all_subject_ids),
np.asarray(all_labels),
)
def match_features(self):
self.matches = cosine_similarity(self.probe, self.gallery)
def create_label_indices(self, labels):
indices = np.linspace(0, len(labels) - 1, len(labels)).astype(int)
return np.transpose(np.vstack([indices, labels]))
def get_indices_score(self, auth_or_imp):
x, y = np.where(self.authentic_impostor == auth_or_imp)
return np.transpose(
np.vstack(
[
x,
y,
np.round(self.matches[self.authentic_impostor == auth_or_imp], 6),
]
)
)
def save_matches(self, output, group):
np.save(path.join(output, f"{group}_authentic.npy"), self.get_indices_score(1))
np.save(path.join(output, f"{group}_impostor.npy"), self.get_indices_score(0))
np.savetxt(
path.join(output, f"{group}_labels.txt"),
self.create_label_indices(self.probe_labels),
delimiter=" ",
fmt="%s",
)
if not self.probe_equal_gallery:
np.savetxt(
path.join(output, f"{group}_gallery_labels.txt"),
self.create_label_indices(self.gallery_labels),
delimiter=" ",
fmt="%s",
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Match Extracted Features")
parser.add_argument("-probe", "-p", help="Probe image list.")
parser.add_argument("-gallery", "-g", help="Gallery image list.")
parser.add_argument("-output", "-o", help="Output folder.")
parser.add_argument("-dataset", "-d", help="Dataset name.")
parser.add_argument("-group", "-gr", help="Group name, e.g. AA")
args = parser.parse_args()
time1 = datetime.now()
if not path.exists(args.output):
makedirs(args.output)
matcher = Matcher(args.probe, args.gallery, args.dataset.upper())
matcher.match_features()
matcher.save_matches(args.output, args.group)
time2 = datetime.now()
print(f"Total time to match: {time2 - time1}")
|
{"hexsha": "8741794b619850582a5f81786f27705df347c193", "size": 5394, "ext": "py", "lang": "Python", "max_stars_repo_path": "feature_match.py", "max_stars_repo_name": "abhatta1234/face_analysis_pytorch", "max_stars_repo_head_hexsha": "2abe930c0ca02a1fd819d4710fd9bff392f32f58", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2020-05-19T16:51:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T05:00:16.000Z", "max_issues_repo_path": "feature_match.py", "max_issues_repo_name": "abhatta1234/face_analysis_pytorch", "max_issues_repo_head_hexsha": "2abe930c0ca02a1fd819d4710fd9bff392f32f58", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-04-09T04:46:24.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-21T18:57:05.000Z", "max_forks_repo_path": "feature_match.py", "max_forks_repo_name": "abhatta1234/face_analysis_pytorch", "max_forks_repo_head_hexsha": "2abe930c0ca02a1fd819d4710fd9bff392f32f58", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2020-05-11T19:50:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T11:49:52.000Z", "avg_line_length": 36.2013422819, "max_line_length": 88, "alphanum_fraction": 0.6091954023, "include": true, "reason": "import numpy", "num_tokens": 1156}
|
import numpy as np
import math
class Dataset(object):
def __init__(self, dataset):
self._dataset = dataset
self.n_samples = dataset.n_samples
self._train = dataset.train
self._index_in_epoch = 0
self._epochs_complete = 0
self._perm = np.arange(self.n_samples)
np.random.shuffle(self._perm)
return
def next_batch(self, batch_size):
index_start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self.n_samples:
if self._train:
self._epochs_complete += 1
index_start = 0
self._index_in_epoch = batch_size
else:
# Validation stage only process once
index_start = self.n_samples - batch_size
self._index_in_epoch = self.n_samples
index_end = self._index_in_epoch
data, label = self._dataset.data(self._perm[index_start:index_end])
return data, label
@property
def label(self):
return self._dataset.get_labels()
def finish_epoch(self):
self._index_in_epoch = 0
|
{"hexsha": "8a9ddb5a2a393b08cde6c63f90c0857ea1709b7e", "size": 1163, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/penet/input.py", "max_stars_repo_name": "firmamentqj/bodyemotion", "max_stars_repo_head_hexsha": "d1e83ee6043e1ff50a2e37b5d17c2dbccc4ca688", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/penet/input.py", "max_issues_repo_name": "firmamentqj/bodyemotion", "max_issues_repo_head_hexsha": "d1e83ee6043e1ff50a2e37b5d17c2dbccc4ca688", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/penet/input.py", "max_forks_repo_name": "firmamentqj/bodyemotion", "max_forks_repo_head_hexsha": "d1e83ee6043e1ff50a2e37b5d17c2dbccc4ca688", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8205128205, "max_line_length": 75, "alphanum_fraction": 0.6139294927, "include": true, "reason": "import numpy", "num_tokens": 257}
|
"""Basic drawing functions to generate a shape-based graphic."""
import types
import numpy as np
import cv2
import cv2.cv as cv
from lumos.util import KeyCode
import graphics
class Drawing(object):
window_name = "Drawing"
window_width, window_height = 640, 480
window_delay = 10 # ms; determines the window update rate
canvas_width, canvas_height = window_width, window_height # TODO allow canvas size to be different from window size
canvas_channels = 3 # 3 for color (BGR), 1 for grayscale
canvas_dtype = np.uint8 # data type of each pixel: np.uint8, np.float32, etc.
canvas_shape = (canvas_height, canvas_width, canvas_channels) # numpy convention
overlay_channels = canvas_channels # number of channels in overlay, should be equal to (or less than) canvas_channels
def __init__(self):
self.graphicsContext = graphics.GraphicsContext # common graphics context object
self.scene = graphics.Shape(self.graphicsContext) # root scene node
self.imageCanvas = np.ones(self.canvas_shape, self.canvas_dtype) * 255 # underlying canvas image
self.imageOverlay = np.zeros(self.canvas_shape, self.canvas_dtype) # transient overlay
self.imageDisplay = np.zeros(self.canvas_shape, self.canvas_dtype) # canvas image + overlay
self.isDisplayUpdateRequired = True
self.keyMap = dict(q="Quit", d="Dump") # a mapping for all keyboard handlers, most of them directly mapped to shape types (some special ones directly inserted here)
self.addShapesToKeyMap()
print "Drawing.__init__(): Key map: {{{}}}".format(", ".join(("'{}': {}".format(key, obj.__name__ if (isinstance(obj, type) or isinstance(obj, types.ClassType)) else str(obj)) for key, obj in self.keyMap.iteritems()))) # [debug]
self.shapeType = None # selected shape type
self.shape = None # current shape being edited, if any
self.shapeParams = dict(color="0.8 0.4 0.4", stroke=2, fill=False) # initial values for common shape parameters
def run(self):
# Open window and register callbacks
cv2.namedWindow(self.window_name)
cv2.setMouseCallback(self.window_name, self.onMouseEvent)
cv2.waitKey(self.window_delay)
# Main loop
print "Drawing.run(): Starting main loop..."
while True:
try:
self.updateDisplay() # ensure we have a valid display image
cv2.imshow(self.window_name, self.imageDisplay)
key = cv2.waitKey(self.window_delay)
if key != -1:
keyCode = key & 0x00007f # key code is in the last 8 bits, pick 7 bits for correct ASCII interpretation (8th bit indicates ?)
keyChar = chr(keyCode) if not (key & KeyCode.SPECIAL) else None # if keyCode is normal, convert to char (str)
if keyCode == 0x1b or keyChar == 'q':
break
else:
self.onKeyPress(key, keyChar) # returns True if event was consumed
except KeyboardInterrupt:
break
# Clean-up
cv2.destroyWindow(self.window_name)
cv2.waitKey(self.window_delay)
print "Drawing.run(): Done."
def onKeyPress(self, key, keyChar=None):
if keyChar is not None: # special keys may not have keyChar defined
keyChar = keyChar.lower() # NOTE this limits us to case-insensitive key mapping
# Find key in map, and get mapped object
if keyChar in self.keyMap:
obj = self.keyMap[keyChar] # mapped object can be anything
# Check if mapped object is a type, and if so, is it derived from Shape?
if (isinstance(obj, type) or isinstance(obj, types.ClassType)) and issubclass(obj, graphics.Shape):
# TODO Cancel any active edit operation
self.shapeType = obj # set active type
print "[{}]".format(self.shapeType.__name__) # [info] common output, useful for tracking active shape changes
return True
elif isinstance(obj, str): # this looks like a named command
if obj == "Dump":
print "[Dump]\n", repr(self.scene)
else:
print "[WARNING] Drawing.onKeyPress(): Unknown key: {}".format(KeyCode.describeKey(key))
# TODO else: any special key mappings?
return False # unconsumed event
def onMouseEvent(self, event, x, y, flags, param):
#print "[Mouse] flags: {:08b}, left: {}, right: {}".format(flags, bool(flags & cv2.EVENT_FLAG_LBUTTON), bool(flags & cv2.EVENT_FLAG_RBUTTON)) # [debug]
if self.shapeType is not None: # NOTE self.shapeType must be a Shape subclass
xNorm = float(x) / self.canvas_width
yNorm = float(y) / self.canvas_height
if event == cv2.EVENT_LBUTTONDOWN:
self.createShape(xNorm, yNorm)
self.updateOverlay()
elif event == cv2.EVENT_MOUSEMOVE and bool(flags & cv2.EVENT_FLAG_LBUTTON):
self.updateShape(xNorm, yNorm)
self.updateOverlay()
elif event == cv2.EVENT_LBUTTONUP:
self.finalizeShape(xNorm, yNorm)
self.updateCanvas()
self.resetShape() # do this after drawing to canvas and before clearing overlay
self.updateOverlay()
else:
return False
return True
return False # unconsumed event
def addShapesToKeyMap(self):
# For each shape type available, add a (key, type) mapping
for name, shapeType in graphics.Shape.types.iteritems():
# Find first available key
key = None
for letter in name:
if letter.lower() not in self.keyMap:
key = letter.lower()
break
# Add (key, type) mapping
if key is not None:
self.keyMap[key] = shapeType
else:
print "[WARNING] Drawing.addShapesToKeyMap(): No suitable key found for shape: {}".format(name)
def createShape(self, x, y):
coords = str(np.float32([x, y])).strip('[ ]') # convert coordinate to string, as required by Shape.__init__()
if self.shapeType == graphics.Point:
newShapeParams = self.shapeParams.copy() # start with a copy of common shape parameters (color, stroke, etc.)
newShapeParams['location'] = coords # add in any shape-specific parameters, default and optional ones can be omitted
self.shape = self.shapeType(self.graphicsContext, **newShapeParams) # create shape object
return True
elif self.shapeType == graphics.Line or self.shapeType == graphics.Rectangle:
newShapeParams = self.shapeParams.copy()
newShapeParams['begin'] = coords
newShapeParams['end'] = newShapeParams['begin']
self.shape = self.shapeType(self.graphicsContext, **newShapeParams)
return True
elif self.shapeType == graphics.Circle:
newShapeParams = self.shapeParams.copy()
newShapeParams['center'] = coords
newShapeParams['radius'] = 0
self.shape = self.shapeType(self.graphicsContext, **newShapeParams)
return True
return False
def updateShape(self, x, y):
if self.shapeType == graphics.Point:
self.shape.location = np.float32([x, y]) # update any shape-specific parameters
return True
elif self.shapeType == graphics.Line or self.shapeType == graphics.Rectangle:
self.shape.end = np.float32([x, y])
return True
elif self.shapeType == graphics.Circle:
self.shape.radius = np.linalg.norm(np.float32([x, y]) - self.shape.center)
return True
return False
def finalizeShape(self, x, y):
# Add shape as a child of root scene node
self.scene.addChild(self.shape)
# TODO Push on history stack so that we can undo and stuff
pass
def resetShape(self):
self.shape = None
def updateCanvas(self):
if self.shape is not None:
self.shape.render(self.imageCanvas) # active shape is drawn onto current canvas
self.isDisplayUpdateRequired = True
def updateOverlay(self):
self.imageOverlay.fill(0) # clear before drawing
if self.shape is not None:
self.shape.render(self.imageOverlay) # only active shape is drawn
self.isDisplayUpdateRequired = True
def updateDisplay(self):
if self.isDisplayUpdateRequired:
self.imageDisplay[:] = self.imageCanvas[:] # start display image with a copy of canvas image
overlayMask = (self.imageOverlay > 0) if self.overlay_channels == 1 else np.any(self.imageOverlay > 0, axis=2) # create mask for (non-zero) overlay regions that need to be copied (NOTE mask generated is a 1-channel 2D boolean array)
self.imageDisplay[overlayMask] = self.imageOverlay[overlayMask] # copy in masked pixels from overlay onto display image
self.isDisplayUpdateRequired = False
if __name__ == "__main__":
Drawing().run()
|
{"hexsha": "aa0b6211e284d4d6a67480cfc5b190ad29b515ad", "size": 8590, "ext": "py", "lang": "Python", "max_stars_repo_path": "nap/util/drawing.py", "max_stars_repo_name": "napratin/nap", "max_stars_repo_head_hexsha": "a5735a2a2a0ad9a4da2d48671f3072ad60173b0c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-02-19T21:56:23.000Z", "max_stars_repo_stars_event_max_datetime": "2016-02-19T21:56:23.000Z", "max_issues_repo_path": "nap/util/drawing.py", "max_issues_repo_name": "napratin/nap", "max_issues_repo_head_hexsha": "a5735a2a2a0ad9a4da2d48671f3072ad60173b0c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-06-05T17:34:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T00:40:43.000Z", "max_forks_repo_path": "nap/util/drawing.py", "max_forks_repo_name": "napratin/nap", "max_forks_repo_head_hexsha": "a5735a2a2a0ad9a4da2d48671f3072ad60173b0c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.2105263158, "max_line_length": 239, "alphanum_fraction": 0.680209546, "include": true, "reason": "import numpy", "num_tokens": 2054}
|
"""
Data Class
"""
###########
# Imports #
###########
import os
import pdb
import sys
import pickle
from collections import namedtuple
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
from torchvision import transforms
from torchvision import datasets
###########
# Globals #
###########
DataParams = namedtuple("DataParams", ["root", "train",
"transform", "target_transform", "download", "mini"])
DATA_ROOT_10 = "/scratch/artemis/azouaoui/datasets/CIFAR10/data"
DATA_ROOT_100 = "/scratch/artemis/azouaoui/datasets/CIFAR100/data"
DATA_ROOTS = {"cifar10": DATA_ROOT_10, "cifar100": DATA_ROOT_100}
#########
# Utils #
#########
def get_normalization_constants(dset_name="cifar10"):
"""
Compute training mean and std per channel
to normalize the data using the ``Normalize`` transform
"""
assert dset_name in DATA_ROOTS.keys(), dset_name
root = DATA_ROOTS[dset_name]
train_params = DataParams(root=root,
train=True,
transform=None,
target_transform=None,
download=False,
mini=None)
if dset_name == "cifar10":
dset = CIFAR10(train_params)
elif dset_name == "cifar100":
dset = CIFAR100(train_params)
data = dset.data
means = np.mean(data, axis=(0, 1, 2)) / 255.
stds = np.std(data, axis=(0, 1, 2)) / 255.
return (means, stds)
###########
# Classes #
###########
class CIFAR10(datasets.cifar.CIFAR10):
# Override parent class __init__ method
def __init__(self, params : DataParams):
self.params = params
self.root = os.path.expanduser(params.root)
self.transform = params.transform
self.target_transform = params.target_transform
self.train = params.train # training set or test set
if params.download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.data = []
self.targets = []
# now load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
if params.mini is not None:
mini = min(params.mini, len(self.data))
self.data = self.data[:mini]
self.targets = self.targets[:mini]
assert len(self.data) == len(self.targets)
self._load_meta()
class CIFAR100(datasets.cifar.CIFAR100):
# Override parent class __init__ method
def __init__(self, params : DataParams):
self.params = params
self.root = os.path.expanduser(params.root)
self.transform = params.transform
self.target_transform = params.target_transform
self.train = params.train # training set or test set
if params.download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.data = []
self.targets = []
# now load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
if params.mini is not None:
mini = min(params.mini, len(self.data))
self.data = self.data[:mini]
self.targets = self.targets[:mini]
assert len(self.data) == len(self.targets)
self._load_meta()
########
# Main #
########
if __name__ == "__main__":
pdb.set_trace()
# Normalization constants
means, stds = get_normalization_constants()
print(f"Mean per channel: {means}")
print(f"Std per channel: {stds}")
# Dataset mini version
pdb.set_trace()
trfs = transforms.Compose([transforms.RandomHorizontalFlip(),
# randomly translate by 4 pixels in each direction
transforms.RandomAffine(degrees=0,
translate=(0.125, 0.125)),
transforms.ToTensor(),
transforms.Normalize(means, stds)])
mini_params = DataParams(root=DATA_ROOT,
train=True,
transform=trfs,
target_transform=None,
download=False,
mini=100)
mini_dset = CIFAR10(mini_params)
assert len(mini_dset) == 100, len(mini_dset)
mini_params_100 = DataParams(root=DATA_ROOT_100,
train=True,
transform=trfs,
target_transform=None,
download=False,
mini=250)
mini_dset_100 = CIFAR100(mini_params_100)
assert len(mini_dset_100) == 250, len(mini_dset_100)
from torch.utils.data import DataLoader
loader = DataLoader(dataset=mini_dset_100, batch_size=25, shuffle=True)
for batch_idx, (data, target) in enumerate(loader):
pdb.set_trace()
|
{"hexsha": "f30bedbcb0488df2a7a89745b076646d7465f8a8", "size": 6765, "ext": "py", "lang": "Python", "max_stars_repo_path": "data.py", "max_stars_repo_name": "inzouzouwetrust/pytorch-cifar", "max_stars_repo_head_hexsha": "7bf35ac1a96a7ad67cabc2e6144b528d87756c1c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data.py", "max_issues_repo_name": "inzouzouwetrust/pytorch-cifar", "max_issues_repo_head_hexsha": "7bf35ac1a96a7ad67cabc2e6144b528d87756c1c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data.py", "max_forks_repo_name": "inzouzouwetrust/pytorch-cifar", "max_forks_repo_head_hexsha": "7bf35ac1a96a7ad67cabc2e6144b528d87756c1c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7605633803, "max_line_length": 81, "alphanum_fraction": 0.5540280857, "include": true, "reason": "import numpy", "num_tokens": 1443}
|
from os import path, pardir
from setuptools import setup, find_packages, Extension
import numpy as np
from Cython.Build import cythonize
CYTHON_DEBUG = False
if CYTHON_DEBUG:
from Cython.Compiler.Options import get_directive_defaults
directive_defaults = get_directive_defaults()
directive_defaults['linetrace'] = True
directive_defaults['binding'] = True
CYTHON_MACROS = [('CYTHON_TRACE', '1')] if CYTHON_DEBUG else None
VERSION = '0.2.1'
PKG_FOLDER = path.abspath(path.join(__file__, pardir))
with open(path.join(PKG_FOLDER, 'requirements.txt')) as req_file:
requirements = req_file.read().splitlines()
# set a long description which is basically the README
with open(path.join(PKG_FOLDER, 'README.md')) as f:
long_description = f.read()
extensions = [
Extension(
name="mibi_bin_tools._extract_bin",
sources=[path.join(PKG_FOLDER, "mibi_bin_tools/_extract_bin.pyx")],
include_dirs=[np.get_include()],
define_macros=CYTHON_MACROS
)
]
setup(
name='mibi-bin-tools',
version=VERSION,
packages=find_packages(),
license='Modified Apache License 2.0',
description='Scripts for extracting .bin files from the commercial MIBI instrument',
author='Angelo Lab',
url='https://github.com/angelolab/mibi-bin-tools',
download_url=f'https://github.com/angelolab/mibi-bin-tools/archive/v{VERSION}.tar.gz',
ext_modules=cythonize(extensions, compiler_directives={'language_level': "3"}),
install_requires=requirements,
extras_require={
'tests': ['pytest',
'pytest-cov',
'pytest-pycodestyle',
'testbook']
},
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=['License :: OSI Approved :: Apache Software License',
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6']
)
|
{"hexsha": "b4e3c9ba008bdd2a0f060e0928339eaefea29060", "size": 2002, "ext": "py", "lang": "Python", "max_stars_repo_path": "setup.py", "max_stars_repo_name": "angelolab/mibi-bin-tools", "max_stars_repo_head_hexsha": "369f62dfb64af41966d25ca188e96dce676f5e38", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "setup.py", "max_issues_repo_name": "angelolab/mibi-bin-tools", "max_issues_repo_head_hexsha": "369f62dfb64af41966d25ca188e96dce676f5e38", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2021-11-23T14:39:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T23:52:27.000Z", "max_forks_repo_path": "setup.py", "max_forks_repo_name": "angelolab/mibi-bin-tools", "max_forks_repo_head_hexsha": "369f62dfb64af41966d25ca188e96dce676f5e38", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-21T11:20:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-21T11:20:36.000Z", "avg_line_length": 32.8196721311, "max_line_length": 90, "alphanum_fraction": 0.6823176823, "include": true, "reason": "import numpy", "num_tokens": 458}
|
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from pyts.classification import KNeighborsClassifier
from sklearn.metrics import (
accuracy_score,
auc,
classification_report,
f1_score,
plot_confusion_matrix,
roc_auc_score,
roc_curve,
)
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from tslearn.piecewise import SymbolicAggregateApproximation
from tslearn.preprocessing import TimeSeriesScalerMeanVariance
from tslearn.shapelets import ShapeletModel
from music import MusicDB
"""CLASSIFICAZIONE CON SAX E SHAPLET KNN DTW"""
def draw_confusion_matrix(Clf, X, y):
titles_options = [
("Confusion matrix, without normalization", None),
("KNN-Shaplet-Sax-Dtw Confusion matrix", "true"),
]
for title, normalize in titles_options:
disp = plot_confusion_matrix(Clf, X, y, cmap="RdPu", normalize=normalize)
disp.ax_.set_title(title)
plt.show()
# Carico il dataframe
musi = MusicDB()
print(musi.df.info())
print(musi.feat["enc_genre"].unique())
X_no = musi.df
y = musi.feat["enc_genre"] # classe targed ovvero genere con l'encoding
# normalizzazione con mean variance
scaler = TimeSeriesScalerMeanVariance()
X_no = pd.DataFrame(
scaler.fit_transform(musi.df.values).reshape(
musi.df.values.shape[0], musi.df.values.shape[1]
)
)
X_no.index = musi.df.index
# approssimazione con sax
sax = SymbolicAggregateApproximation(n_segments=130, alphabet_size_avg=20)
X1 = sax.fit_transform(X_no)
print(X1.shape)
X = np.squeeze(X1)
print(X.shape)
# Classification
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=100, stratify=y
)
"""INSERISCO PARTE SHAPLET"""
n_ts, ts_sz = X_train.shape
n_classes = len(set(y))
# shapelet_sizes = grabocka_params_to_shapelet_size_dict(
# n_ts=n_ts, ts_sz=ts_sz, n_classes=n_classes, l=0.1, r=1
# )
# 6 shaplet da 15 con grabocka
shapelet_sizes = {15: 24}
print("n_ts", n_ts)
print("ts_sz", ts_sz)
print("n_classes", n_classes)
print("shapelet_sizes", shapelet_sizes)
shp_clf = ShapeletModel(
n_shapelets_per_size=shapelet_sizes,
optimizer="sgd",
weight_regularizer=0.01,
max_iter=50,
verbose=1,
)
shp_clf.fit(X_train, y_train)
print("Apply on the test set and evaluate the performance: \n")
y_pred = shp_clf.predict(X_test)
print("Accuracy %s" % accuracy_score(y_test, y_pred))
print("F1-score %s" % f1_score(y_test, y_pred, average=None))
print(classification_report(y_test, y_pred))
"""CLASSIFICATORE"""
print("KNN- Shaplet Based-DTW")
X_train2 = shp_clf.transform(X_train)
print("train shape:", X_train2.shape)
X_test2 = shp_clf.transform(X_test)
knn = KNeighborsClassifier(n_neighbors=19, weights="distance", metric="dtw_sakoechiba")
# Best parameters: {'n_neighbors': 19, p=1, weights='distance'}
knn.fit(X_train2, y_train)
# Apply on the test set and evaluate the performance
print("Apply on the test set and evaluate the performance-KNN: \n")
y_pred = knn.predict(X_test2)
print("Accuracy %s" % accuracy_score(y_test, y_pred))
print("F1-score %s" % f1_score(y_test, y_pred, average=None))
print(classification_report(y_test, y_pred))
draw_confusion_matrix(knn, X_test2, y_test)
"""ROC CURVE"""
lb = LabelBinarizer()
lb.fit(y_test)
lb.classes_.tolist()
fpr = dict()
tpr = dict()
roc_auc = dict()
by_test = lb.transform(y_test)
by_pred = lb.transform(y_pred)
for i in range(8):
fpr[i], tpr[i], _ = roc_curve(by_test[:, i], by_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
roc_auc = roc_auc_score(by_test, by_pred, average=None)
plt.figure(figsize=(8, 5))
for i in range(8):
plt.plot(
fpr[i],
tpr[i],
label="%s ROC curve (area = %0.2f)" % (lb.classes_.tolist()[i], roc_auc[i]),
)
plt.plot([0, 1], [0, 1], "k--")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.title("KNN-Shaplet-Sax-Dtw Roc-Curve")
plt.xlabel("False Positive Rate", fontsize=10)
plt.ylabel("True Positive Rate", fontsize=10)
plt.tick_params(axis="both", which="major", labelsize=12)
plt.legend(loc="lower right", fontsize=7, frameon=False)
plt.show()
|
{"hexsha": "2ce38ee585df2eb61c5888acbdb98c45e552e0dd", "size": 4150, "ext": "py", "lang": "Python", "max_stars_repo_path": "ts_classification_5.py", "max_stars_repo_name": "Mariabba/DataMining2-project", "max_stars_repo_head_hexsha": "d12cf4d6605a16194f24b8410c332cff701d2b6d", "max_stars_repo_licenses": ["CC-BY-4.0", "MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-16T12:51:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-17T08:28:07.000Z", "max_issues_repo_path": "ts_classification_5.py", "max_issues_repo_name": "Mariabba/fma-datamining", "max_issues_repo_head_hexsha": "d12cf4d6605a16194f24b8410c332cff701d2b6d", "max_issues_repo_licenses": ["CC-BY-4.0", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ts_classification_5.py", "max_forks_repo_name": "Mariabba/fma-datamining", "max_forks_repo_head_hexsha": "d12cf4d6605a16194f24b8410c332cff701d2b6d", "max_forks_repo_licenses": ["CC-BY-4.0", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1241830065, "max_line_length": 87, "alphanum_fraction": 0.721686747, "include": true, "reason": "import numpy", "num_tokens": 1201}
|
# -*- coding: utf-8 -*-
"""
this script computes the expected auroral power output for the
oi 5577 angstrom line for proxima b, given stellar wind conditions
for planet 'b' from cohen et al 2014
@author: mtilley [matt a. tilley, university of washington]
@email: mtilley (at) uw (dot) edu
"""
# imports
from __future__ import print_function, division
import numpy as np
from numpy import linalg as la
import scipy as sp
import scipy.constants as spcon
import auroral_signal as asig
'''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
relevant parameters
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
'''
# wind parameters include relative motion of planet
# sub-alfvenic stellar wind
nsub = 433. # [cm^-3]
vsub = [-630.,-48.3,30.] # [km s^-1]
bsub = [-804.,-173.,63.] # [nt]
tsub = 3.42 # [10^5 k]
# super-alfvenic stellar wind
nsup = 12895. # [cm^-3]
vsup = [-202.,54.7,22.] # [km s^-1]
bsup = [-57.,-223.,92.] # [nt]
tsup = 4.77 # [10^5 k]
# cme scaling using wang et al formula
# and nominal increases in density, velocity, and imf [arb]
cme_scale = 10**0.24 * 3.**1.47 * 15**0.86
# energy per photon [j photon^-1]
e_5577 = spcon.h*spcon.c/5.577e-7
e_1041 = spcon.h*spcon.c/1.041e-7
# electron fraction of auroral precip -- hubert 2002 [arb]
e_frac = 0.8
# steele & mcewen conversion efficiency [photons (erg cm^-2 s^-1)^-1]
oi_eff = 1.48e9
# Mauk 1994 converstion efficiency for UV band [photons (erg cm^-2 s^-1)^-1]
uv_eff = 5.e9
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
'''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
power estimations
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
'''
# power estimated at earth
print( '\n-=-=-=-=-=-=-=-=-=-=-' )
print( '\n\tearth\n' )
print( '-=-=-=-=-=-=-=-=-=-=-\n' )
# magnetopause distance
dist = asig.mpause_dist()
print( ' estimated sub-stellar magnetopause distance: %.3e' % dist + ' m\n' )
# auroral oval
oval = asig.auroral_oval()*1.e4
print( ' estimated auroral oval area: %.3e' % oval + ' cm^2\n' )
print( ' estimated auroral energetic particle power delivered to auroral regions:\n' )
# quiet magnetosphere
power_out_q = asig.power_calc()
print( ' quiet\t\t-\t%.3e' % power_out_q + ' w' )
# stormy magnetosphere
power_out_s = asig.power_calc( theta=spcon.pi )
print( ' substorm\t-\t%.3e' % power_out_s + ' w' )
# cme wind conditions
power_out_c = asig.power_calc()*cme_scale
print( ' cme\t\t-\t%.3e' % power_out_c + ' w' )
# cme wind conditions + stormy magnetosphere
power_out_cs = asig.power_calc( theta=spcon.pi )*cme_scale
print( ' cme+substorm\t-\t%.3e' % power_out_cs + ' w' )
print( '\n estimated auroral power for the oi 5577 a line:\n' )
# 5577 power out for quiet msphere, both hemispheres
out_5577_q = oi_eff*power_out_q*1.e7*e_frac*2*e_5577
print( ' quiet 5577\t-\t%.3e' % out_5577_q + ' w' )
# 5577 power out for stormy msphere, both hemispheres
out_5577_s = oi_eff*power_out_s*1.e7*e_frac*2*e_5577
print( ' substorm 5577\t-\t%.3e' % out_5577_s + ' w' )
# 5577 power out for cme winds, both hemispheres
out_5577_c = oi_eff*power_out_c*1.e7*e_frac*2*e_5577
print( ' cme 5577\t-\t%.3e' % out_5577_c + ' w' )
# 5577 power out for cme winds + stormy msphere, both hemispheres
out_5577_cs = oi_eff*power_out_cs*1.e7*e_frac*2*e_5577
print( ' cme+ss 5577\t-\t%.3e' % out_5577_cs + ' w' )
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# power estimated at
print( '\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-' )
print( '\n proxima b - sub-alfvenic,earth-like dipole\n' )
print( '-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n' )
# magnetopause distance
dist = asig.mpause_dist( nsub, la.norm(vsub), asig.m_earth )
print( ' estimated sub-stellar magnetopause distance: %.3e' % dist + ' m\n' )
# auroral oval
oval = asig.auroral_oval( dist )*1.e4
print( ' estimated auroral oval area: %.3e' % oval + ' cm^2\n' )
# imf clock angle and transverse imf
imf_clock = np.arctan2( abs(bsub[1]), bsub[2] )
b_t = np.sqrt( bsub[1]**2. + bsub[2]**2. )
print( ' estimated auroral energetic particle power delivered to auroral regions:\n' )
# quiet magnetosphere
power_out_q = asig.power_calc( nsub, la.norm(vsub), b_t, imf_clock, asig.m_earth )
print( ' quiet\t\t-\t%.3e' % power_out_q + ' w' )
# stormy magnetosphere
power_out_s = asig.power_calc( nsub, la.norm(vsub), b_t, spcon.pi, asig.m_earth )
print( ' substorm\t-\t%.3e' % power_out_s + ' w' )
# cme wind conditions
power_out_c = asig.power_calc( nsub, la.norm(vsub), b_t, imf_clock, asig.m_earth )*cme_scale
print( ' cme\t\t-\t%.3e' % power_out_c + ' w' )
# cme wind conditions + stormy magnetosphere
power_out_cs = asig.power_calc( nsub, la.norm(vsub), b_t, spcon.pi, asig.m_earth )*cme_scale
print( ' cme+substorm\t-\t%.3e' % power_out_cs + ' w' )
print( '\n estimated auroral power for the oi 5577 a line:\n' )
# 5577 power out for quiet msphere, both hemispheres
out_5577_q = oi_eff*power_out_q*1.e7*e_frac*2*e_5577
print( ' quiet 5577\t-\t%.3e' % out_5577_q + ' w' )
# 5577 power out for stormy msphere, both hemispheres
out_5577_s = oi_eff*power_out_s*1.e7*e_frac*2*e_5577
print( ' substorm 5577\t-\t%.3e' % out_5577_s + ' w' )
# 5577 power out for cme winds, both hemispheres
out_5577_c = oi_eff*power_out_c*1.e7*e_frac*2*e_5577
print( ' cme 5577\t-\t%.3e' % out_5577_c + ' w' )
# 5577 power out for cme winds + stormy msphere, both hemispheres
out_5577_cs = oi_eff*power_out_cs*1.e7*e_frac*2*e_5577
print( ' cme+ss 5577\t-\t%.3e' % out_5577_cs + ' w' )
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# power estimated at
print( '\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-' )
print( '\n proxima b - super-alfvenic,earth-like dipole\n' )
print( '-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n' )
# magnetopause distance
dist = asig.mpause_dist( nsup, la.norm(vsup), asig.m_earth )
print( ' estimated sub-stellar magnetopause distance: %.3e' % dist + ' m\n' )
# auroral oval
oval = asig.auroral_oval( dist )*1.e4
print( ' estimated auroral oval area: %.3e' % oval + ' cm^2\n' )
# imf clock angle and transverse imf
imf_clock = np.arctan2( abs(bsup[1]), bsup[2] )
b_t = np.sqrt( bsup[1]**2. + bsup[2]**2. )
print( ' estimated auroral energetic particle power delivered to auroral regions:\n' )
# quiet magnetosphere
power_out_q = asig.power_calc( nsup, la.norm(vsup), b_t, imf_clock, asig.m_earth )
print( ' quiet\t\t-\t%.3e' % power_out_q + ' w' )
# stormy magnetosphere
power_out_s = asig.power_calc( nsup, la.norm(vsup), b_t, spcon.pi, asig.m_earth )
print( ' substorm\t-\t%.3e' % power_out_s + ' w' )
# cme wind conditions
power_out_c = asig.power_calc( nsup, la.norm(vsup), b_t, imf_clock, asig.m_earth )*cme_scale
print( ' cme\t\t-\t%.3e' % power_out_c + ' w' )
# cme wind conditions + stormy magnetosphere
power_out_cs = asig.power_calc( nsup, la.norm(vsup), b_t, spcon.pi, asig.m_earth )*cme_scale
print( ' cme+substorm\t-\t%.3e' % power_out_cs + ' w' )
print( '\n estimated auroral power for the oi 5577 a line:\n' )
# 5577 power out for quiet msphere, both hemispheres
out_5577_q = oi_eff*power_out_q*1.e7*e_frac*2*e_5577
print( ' quiet 5577\t-\t%.3e' % out_5577_q + ' w' )
# 5577 power out for stormy msphere, both hemispheres
out_5577_s = oi_eff*power_out_s*1.e7*e_frac*2*e_5577
print( ' substorm 5577\t-\t%.3e' % out_5577_s + ' w' )
# 5577 power out for cme winds, both hemispheres
out_5577_c = oi_eff*power_out_c*1.e7*e_frac*2*e_5577
print( ' cme 5577\t-\t%.3e' % out_5577_c + ' w' )
# 5577 power out for cme winds + stormy msphere, both hemispheres
out_5577_cs = oi_eff*power_out_cs*1.e7*e_frac*2*e_5577
print( ' cme+ss 5577\t-\t%.3e' % out_5577_cs + ' w' )
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# power estimated at
print( '\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-' )
print( '\n proxima b - sub-alfvenic,neptune-like dipole\n' )
print( '-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n' )
# magnetopause distance
dist = asig.mpause_dist( nsub, la.norm(vsub), asig.m_neptune )
print( ' estimated sub-stellar magnetopause distance: %.3e' % dist + ' m\n' )
# auroral oval
oval = asig.auroral_oval( dist )*1.e4
print( ' estimated auroral oval area: %.3e' % oval + ' cm^2\n' )
# imf clock angle and transverse imf
imf_clock = np.arctan2( abs(bsub[1]), bsub[2] )
b_t = np.sqrt( bsub[1]**2. + bsub[2]**2. )
print( ' estimated auroral energetic particle power delivered to auroral regions:\n' )
# quiet magnetosphere
power_out_q = asig.power_calc( nsub, la.norm(vsub), b_t, imf_clock, asig.m_neptune )
print( ' quiet\t\t-\t%.3e' % power_out_q + ' w' )
# stormy magnetosphere
power_out_s = asig.power_calc( nsub, la.norm(vsub), b_t, spcon.pi, asig.m_neptune )
print( ' substorm\t-\t%.3e' % power_out_s + ' w' )
# cme wind conditions
power_out_c = asig.power_calc( nsub, la.norm(vsub), b_t, imf_clock, asig.m_neptune )*cme_scale
print( ' cme\t\t-\t%.3e' % power_out_c + ' w' )
# cme wind conditions + stormy magnetosphere
power_out_cs = asig.power_calc( nsub, la.norm(vsub), b_t, spcon.pi, asig.m_neptune )*cme_scale
print( ' cme+substorm\t-\t%.3e' % power_out_cs + ' w' )
print( '\n estimated auroral power for the oi 5577 a line:\n' )
# 5577 power out for quiet msphere, both hemispheres
out_5577_q = oi_eff*power_out_q*1.e7*e_frac*2*e_5577
print( ' quiet 5577\t-\t%.3e' % out_5577_q + ' w' )
# 5577 power out for stormy msphere, both hemispheres
out_5577_s = oi_eff*power_out_s*1.e7*e_frac*2*e_5577
print( ' substorm 5577\t-\t%.3e' % out_5577_s + ' w' )
# 5577 power out for cme winds, both hemispheres
out_5577_c = oi_eff*power_out_c*1.e7*e_frac*2*e_5577
print( ' cme 5577\t-\t%.3e' % out_5577_c + ' w' )
# 5577 power out for cme winds + stormy msphere, both hemispheres
out_5577_cs = oi_eff*power_out_cs*1.e7*e_frac*2*e_5577
print( ' cme+ss 5577\t-\t%.3e' % out_5577_cs + ' w' )
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# power estimated at
print( '\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-' )
print( '\n proxima b - super-alfvenic,neptune-like dipole\n' )
print( '-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n' )
# magnetopause distance
dist = asig.mpause_dist( nsup, la.norm(vsup), asig.m_neptune )
print( ' estimated sub-stellar magnetopause distance: %.3e' % dist + ' m\n' )
# auroral oval
oval = asig.auroral_oval( dist )*1.e4
print( ' estimated auroral oval area: %.3e' % oval + ' cm^2\n' )
# imf clock angle and transverse imf
imf_clock = np.arctan2( abs(bsup[1]), bsup[2] )
b_t = np.sqrt( bsup[1]**2. + bsup[2]**2. )
print( ' estimated auroral energetic particle power delivered to auroral regions:\n' )
# quiet magnetosphere
power_out_q = asig.power_calc( nsup, la.norm(vsup), b_t, imf_clock, asig.m_neptune )
print( ' quiet\t\t-\t%.3e' % power_out_q + ' w' )
# stormy magnetosphere
power_out_s = asig.power_calc( nsup, la.norm(vsup), b_t, spcon.pi, asig.m_neptune )
print( ' substorm\t-\t%.3e' % power_out_s + ' w' )
# cme wind conditions
power_out_c = asig.power_calc( nsup, la.norm(vsup), b_t, imf_clock, asig.m_neptune )*cme_scale
print( ' cme\t\t-\t%.3e' % power_out_c + ' w' )
# cme wind conditions + stormy magnetosphere
power_out_cs = asig.power_calc( nsup, la.norm(vsup), b_t, spcon.pi, asig.m_neptune )*cme_scale
print( ' cme+substorm\t-\t%.3e' % power_out_cs + ' w' )
print( '\n estimated auroral power for the oi 5577 a line:\n' )
# 5577 power out for quiet msphere, both hemispheres
out_5577_q = oi_eff*power_out_q*1.e7*e_frac*2*e_5577
print( ' quiet 5577\t-\t%.3e' % out_5577_q + ' w' )
# 5577 power out for stormy msphere, both hemispheres
out_5577_s = oi_eff*power_out_s*1.e7*e_frac*2*e_5577
print( ' substorm 5577\t-\t%.3e' % out_5577_s + ' w' )
# 5577 power out for cme winds, both hemispheres
out_5577_c = oi_eff*power_out_c*1.e7*e_frac*2*e_5577
print( ' cme 5577\t-\t%.3e' % out_5577_c + ' w' )
# 5577 power out for cme winds + stormy msphere, both hemispheres
out_5577_cs = oi_eff*power_out_cs*1.e7*e_frac*2*e_5577
print( ' cme+ss 5577\t-\t%.3e' % out_5577_cs + ' w' )
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# power estimated at
print( '\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-' )
print( '\n proxima b - sub-alf neptune mass, radius and dipole\n' )
print( '-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n' )
# magnetopause distance
dist = asig.mpause_dist( nsub, la.norm(vsub), asig.m_neptune )
print( ' estimated sub-stellar magnetopause distance: %.3e' % dist + ' m\n' )
# auroral oval
oval = asig.auroral_oval( dist, 3.883*asig.r_earth )*1.e4
print( ' estimated auroral oval area: %.3e' % oval + ' cm^2\n' )
# imf clock angle and transverse imf
imf_clock = np.arctan2( abs(bsub[1]), bsub[2] )
b_t = np.sqrt( bsub[1]**2. + bsub[2]**2. )
print( ' estimated auroral energetic particle power delivered to auroral regions:\n' )
# quiet magnetosphere
power_out_q = asig.power_calc( nsub, la.norm(vsub), b_t, imf_clock, asig.m_neptune )
print( ' quiet\t\t-\t%.3e' % power_out_q + ' w' )
# stormy magnetosphere
power_out_s = asig.power_calc( nsub, la.norm(vsub), b_t, spcon.pi, asig.m_neptune )
print( ' substorm\t-\t%.3e' % power_out_s + ' w' )
# cme wind conditions
power_out_c = asig.power_calc( nsub, la.norm(vsub), b_t, imf_clock, asig.m_neptune )*cme_scale
print( ' cme\t\t-\t%.3e' % power_out_c + ' w' )
# cme wind conditions + stormy magnetosphere
power_out_cs = asig.power_calc( nsub, la.norm(vsub), b_t, spcon.pi, asig.m_neptune )*cme_scale
print( ' cme+substorm\t-\t%.3e' % power_out_cs + ' w' )
print( '\n estimated auroral power for the 967-1115 UV band:\n' )
# 5577 power out for quiet msphere, both hemispheres
out_1041_q = uv_eff*power_out_q*1.e7*e_frac*2*e_1041
print( ' quiet 967-1115\t\t-\t%.3e' % out_1041_q + ' w' )
# 5577 power out for stormy msphere, both hemispheres
out_1041_s = uv_eff*power_out_s*1.e7*e_frac*2*e_1041
print( ' substorm 967-1115\t-\t%.3e' % out_1041_s + ' w' )
# 5577 power out for cme winds, both hemispheres
out_1041_c = uv_eff*power_out_c*1.e7*e_frac*2*e_1041
print( ' cme 967-1115\t\t-\t%.3e' % out_1041_c + ' w' )
# 5577 power out for cme winds + stormy msphere, both hemispheres
out_1041_cs = uv_eff*power_out_cs*1.e7*e_frac*2*e_1041
print( ' cme+ss 967-1115\t-\t%.3e' % out_1041_cs + ' w' )
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# power estimated at
print( '\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-' )
print( '\n proxima b - super alf neptune mass, radius and dipole\n' )
print( '-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n' )
# magnetopause distance
dist = asig.mpause_dist( nsup, la.norm(vsup), asig.m_neptune )
print( ' estimated sub-stellar magnetopause distance: %.3e' % dist + ' m\n' )
# auroral oval
oval = asig.auroral_oval( dist, 3.883*asig.r_earth )*1.e4
print( ' estimated auroral oval area: %.3e' % oval + ' cm^2\n' )
# imf clock angle and transverse imf
imf_clock = np.arctan2( abs(bsup[1]), bsup[2] )
b_t = np.sqrt( bsup[1]**2. + bsup[2]**2. )
print( ' estimated auroral energetic particle power delivered to auroral regions:\n' )
# quiet magnetosphere
power_out_q = asig.power_calc( nsup, la.norm(vsup), b_t, imf_clock, asig.m_neptune )
print( ' quiet\t\t-\t%.3e' % power_out_q + ' w' )
# stormy magnetosphere
power_out_s = asig.power_calc( nsup, la.norm(vsup), b_t, spcon.pi, asig.m_neptune )
print( ' substorm\t-\t%.3e' % power_out_s + ' w' )
# cme wind conditions
power_out_c = asig.power_calc( nsup, la.norm(vsup), b_t, imf_clock, asig.m_neptune )*cme_scale
print( ' cme\t\t-\t%.3e' % power_out_c + ' w' )
# cme wind conditions + stormy magnetosphere
power_out_cs = asig.power_calc( nsup, la.norm(vsup), b_t, spcon.pi, asig.m_neptune )*cme_scale
print( ' cme+substorm\t-\t%.3e' % power_out_cs + ' w' )
print( '\n estimated auroral power for the 967-1115 UV band:\n' )
# 5577 power out for quiet msphere, both hemispheres
out_1041_q = uv_eff*power_out_q*1.e7*e_frac*2*e_1041
print( ' quiet 967-1115\t\t-\t%.3e' % out_1041_q + ' w' )
# 5577 power out for stormy msphere, both hemispheres
out_1041_s = uv_eff*power_out_s*1.e7*e_frac*2*e_1041
print( ' substorm 967-1115\t-\t%.3e' % out_1041_s + ' w' )
# 5577 power out for cme winds, both hemispheres
out_1041_c = uv_eff*power_out_c*1.e7*e_frac*2*e_1041
print( ' cme 967-1115\t\t-\t%.3e' % out_1041_c + ' w' )
# 5577 power out for cme winds + stormy msphere, both hemispheres
out_1041_cs = uv_eff*power_out_cs*1.e7*e_frac*2*e_1041
print( ' cme+ss 967-1115\t-\t%.3e' % out_1041_cs + ' w' )
|
{"hexsha": "004697482942dad4b22eb7a23fb4b8f2c3a5ce88", "size": 16524, "ext": "py", "lang": "Python", "max_stars_repo_path": "signal/power_m2.py", "max_stars_repo_name": "rodluger/exoaurora", "max_stars_repo_head_hexsha": "0ec1c59c368ccbf0c9eb2450d52b7ec7897ce322", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-18T04:23:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-18T04:23:34.000Z", "max_issues_repo_path": "signal/power_m2.py", "max_issues_repo_name": "rodluger/exoaurora", "max_issues_repo_head_hexsha": "0ec1c59c368ccbf0c9eb2450d52b7ec7897ce322", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "signal/power_m2.py", "max_forks_repo_name": "rodluger/exoaurora", "max_forks_repo_head_hexsha": "0ec1c59c368ccbf0c9eb2450d52b7ec7897ce322", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-01-18T04:23:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-06T11:03:53.000Z", "avg_line_length": 37.6400911162, "max_line_length": 94, "alphanum_fraction": 0.6378600823, "include": true, "reason": "import numpy,from numpy,import scipy", "num_tokens": 5754}
|
# Autogenerated wrapper script for WaveFD_jll for i686-linux-musl
export libillumination, libprop2DAcoIsoDenQ_DEO2_FDTD, libprop2DAcoTTIDenQ_DEO2_FDTD, libprop2DAcoVTIDenQ_DEO2_FDTD, libprop3DAcoIsoDenQ_DEO2_FDTD, libprop3DAcoTTIDenQ_DEO2_FDTD, libprop3DAcoVTIDenQ_DEO2_FDTD, libspacetime
using CompilerSupportLibraries_jll
using FFTW_jll
JLLWrappers.@generate_wrapper_header("WaveFD")
JLLWrappers.@declare_library_product(libillumination, "libillumination.so")
JLLWrappers.@declare_library_product(libprop2DAcoIsoDenQ_DEO2_FDTD, "libprop2DAcoIsoDenQ_DEO2_FDTD.so")
JLLWrappers.@declare_library_product(libprop2DAcoTTIDenQ_DEO2_FDTD, "libprop2DAcoTTIDenQ_DEO2_FDTD.so")
JLLWrappers.@declare_library_product(libprop2DAcoVTIDenQ_DEO2_FDTD, "libprop2DAcoVTIDenQ_DEO2_FDTD.so")
JLLWrappers.@declare_library_product(libprop3DAcoIsoDenQ_DEO2_FDTD, "libprop3DAcoIsoDenQ_DEO2_FDTD.so")
JLLWrappers.@declare_library_product(libprop3DAcoTTIDenQ_DEO2_FDTD, "libprop3DAcoTTIDenQ_DEO2_FDTD.so")
JLLWrappers.@declare_library_product(libprop3DAcoVTIDenQ_DEO2_FDTD, "libprop3DAcoVTIDenQ_DEO2_FDTD.so")
JLLWrappers.@declare_library_product(libspacetime, "libspacetime.so")
function __init__()
JLLWrappers.@generate_init_header(CompilerSupportLibraries_jll, FFTW_jll)
JLLWrappers.@init_library_product(
libillumination,
"lib/libillumination.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libprop2DAcoIsoDenQ_DEO2_FDTD,
"lib/libprop2DAcoIsoDenQ_DEO2_FDTD.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libprop2DAcoTTIDenQ_DEO2_FDTD,
"lib/libprop2DAcoTTIDenQ_DEO2_FDTD.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libprop2DAcoVTIDenQ_DEO2_FDTD,
"lib/libprop2DAcoVTIDenQ_DEO2_FDTD.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libprop3DAcoIsoDenQ_DEO2_FDTD,
"lib/libprop3DAcoIsoDenQ_DEO2_FDTD.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libprop3DAcoTTIDenQ_DEO2_FDTD,
"lib/libprop3DAcoTTIDenQ_DEO2_FDTD.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libprop3DAcoVTIDenQ_DEO2_FDTD,
"lib/libprop3DAcoVTIDenQ_DEO2_FDTD.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@init_library_product(
libspacetime,
"lib/libspacetime.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@generate_init_footer()
end # __init__()
|
{"hexsha": "043f4e679009b550f5f7878a623a2963f1fc6ca5", "size": 2595, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/wrappers/i686-linux-musl.jl", "max_stars_repo_name": "JuliaBinaryWrappers/WaveFD_jll.jl", "max_stars_repo_head_hexsha": "2a6c3695f9296062c62f83a22df5fc3a191e36f5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/wrappers/i686-linux-musl.jl", "max_issues_repo_name": "JuliaBinaryWrappers/WaveFD_jll.jl", "max_issues_repo_head_hexsha": "2a6c3695f9296062c62f83a22df5fc3a191e36f5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/wrappers/i686-linux-musl.jl", "max_forks_repo_name": "JuliaBinaryWrappers/WaveFD_jll.jl", "max_forks_repo_head_hexsha": "2a6c3695f9296062c62f83a22df5fc3a191e36f5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7313432836, "max_line_length": 222, "alphanum_fraction": 0.7818882466, "num_tokens": 884}
|
#!/usr/bin/env python
# coding: utf-8
# # Notebook for pulling account usage data on the IBM Cloud
# [IBM Cloud](https://cloud.ibm.com) is a platform of cloud services that help partners and clients solve a variety business problems.
#
# **NOTE:**
# This notebook was initially based upon a Python notebook provided by Dan Toczala.
#
# Further work to extend some of the concepts, and take advantage of the IBM Cloud usage API (see https://cloud.ibm.com/apidocs/metering-reporting), was done by the folowing contributors:
#
# D. Toczala (dtoczala@us.ibm.com)
#
# In[ ]:
#Import utilities
import json
import sys
import codecs
import re
import time
import requests
from os.path import join, dirname
from datetime import datetime
import unicodecsv as csv
import pandas as pd
import numpy as np
from botocore.client import Config
import ibm_boto3
# In[ ]:
#
# Build a simple class to hold all of the billable data and other supporting data
# coming from all of these different sources.
#
class CloudService:
def __init__(self, guid):
self.guid = guid
self.resource_id = ""
self.type = ""
self.name = ""
self.crn = ""
self.region = ""
self.resource_grp = ""
self.resource_grp_id = ""
self.org = ""
self.org_id = ""
self.space = ""
self.space_id = ""
self.cost = 0.0
self.month = ""
self.year = ""
# In[ ]:
DEBUG = True
#DEBUG = False
#
UNDERSCORE = '_'
#
# Set Initial IBM Cloud and COS parameters
#
# YOU WILL NEED TO UNCOMMENT THIS CREDENTIALS LIST AND PROVIDE VALID SERVICE CREDENTIALS
# FOR THIS SCRIPT TO RUN PROPERLY
#
#credentials = {
# 'IBM_CLOUD_ACCOUNT_ID': 'x50xx4xx0dxxxxxxf8xxxxx1xx1xxxx5',
# 'IBM_CLOUD_ACCOUNT_API_KEY': 'xxxxL1ixxxxxFX9fxxxxxxxdgFsxxxxxIDxxxxxxx08S',
# # Account COS creds
# 'IAM_COS_SERVICE_ID': 'crn:v1:bluemix:public:iam-identity::a/dxxxxxxxxdc000000000exxxxa0002xx::serviceid:ServiceId-exa00000-7xx0-00xx-bxx6-60006xxxxxx5',
# 'IBM_COS_API_KEY_ID': 'Wxxxxxxi00007-x04-0YAxxxxqFwxxxxxxexZ0000004',
# 'RESULTS_BUCKET': 'billing-data'
#}
#
# Service endpoints
#
# You may need to change these if you use a different data center
#
endpoints = {
'IBM_CLOUD_BILLING_ENDPOINT': 'https://billing.cloud.ibm.com',
'IBM_CLOUD_IAM_ENDPOINT': 'https://iam.cloud.ibm.com',
'IBM_CLOUD_TAG_ENDPOINT': 'https://tags.global-search-tagging.cloud.ibm.com',
'IBM_CLOUD_RESCONT_ENDPOINT': 'https://resource-controller.cloud.ibm.com/v2',
'IBM_CLOUD_RESMGR_ENDPOINT': 'https://resource-manager.bluemix.net/v2/',
'IBM_CLOUD_REPORTING_PREFIX': '/v4/accounts/',
'IBM_AUTH_ENDPOINT': 'https://iam.bluemix.net/oidc/token',
#
# Cloud Object Storage Settings
#
'COS_ENDPOINT': 'https://s3.us-south.cloud-object-storage.appdomain.cloud',
}
#
# Get current date and time
#
myDatetime = datetime.now()
CURRENT_MONTH = datetime.now().strftime('%m')
CURRENT_YEAR = datetime.now().strftime('%Y')
if (DEBUG):
print (myDatetime, " Month - ", CURRENT_MONTH, " Year - ", CURRENT_YEAR)
myDatetime = re.sub(r'\s',UNDERSCORE,str(myDatetime))
goodDatetime,junk = myDatetime.split('.')
#
# Build filename
#
Billing_file = 'IBM_Cloud_Billing_'+goodDatetime+'.csv'
Billing_path = './'+ Billing_file
#
# Build your common request header, userID for REST calls
#
#REQ_HEADER = {'Authorization':credentials['IBM_CLOUD_ACCOUNT_BEARER_TOKEN']}
USER_ID = credentials['IBM_CLOUD_ACCOUNT_ID']
API_KEY = credentials['IBM_CLOUD_ACCOUNT_API_KEY']
IBM_CLOUD_BILLING_ENDPOINT = endpoints['IBM_CLOUD_BILLING_ENDPOINT']
IBM_CLOUD_REPORTING_PREFIX = endpoints['IBM_CLOUD_REPORTING_PREFIX']
# # Setup Cloud Object Storage (COS)
# In[ ]:
#
# IBM COS interface
#
def __iter__(self): return 0
from ibm_botocore.client import Config
#
cos = ibm_boto3.client(service_name='s3',
ibm_api_key_id=credentials['IBM_COS_API_KEY_ID'],
ibm_service_instance_id=credentials['IAM_COS_SERVICE_ID'],
ibm_auth_endpoint=endpoints['IBM_AUTH_ENDPOINT'],
config=Config(signature_version='oauth'),
endpoint_url=endpoints['COS_ENDPOINT'])
# # IBM Cloud Usage API methods
# Define some useful methods to grab data using the IBM Cloud Usage REST API (https://cloud.ibm.com/apidocs/metering-reporting).
#
# In[ ]:
#
# Go and grab a bearer token for an account using their API Key, from a call to the IAM Identity API endpoint
#
def getBearerToken(accountID,APIKey):
MAX_ATTEMPTS = 3
tries = 0
REQ_HEADER = {
'Content-Type':'application/x-www-form-urlencoded',
'Accept':'application/json'
}
DATA = {
'grant_type':'urn:ibm:params:oauth:grant-type:apikey',
'apikey': credentials['IBM_CLOUD_ACCOUNT_API_KEY']
}
data = {}
#
target_url = endpoints['IBM_CLOUD_IAM_ENDPOINT'] + '/identity/token'
#
# Set up your parameters (in a PARAMS dict)
#
#PARAMS = {}
#
# Try API - only do MAX_ATTEMPTS at most
#
while True:
try:
# sending get request and saving the response as response object
r = requests.post(url = target_url, data = DATA, headers = REQ_HEADER)
if r.status_code == 200:
# extracting data in json format
data = r.json()
if (DEBUG):
print ("Got bearer token")
break
if tries < MAX_ATTEMPTS:
tries += 1
print ("ERROR - Status ",str(r.status_code)," returned from call to IAM Identity management.\n")
time.sleep(2)
continue
except:
if tries < MAX_ATTEMPTS:
tries += 1
time.sleep(2)
continue
break
break
# end of loop to retry
#
if (DEBUG):
print(json.dumps(data, indent=2))
token = data['access_token']
#
# return token
#
return token
# Go and grab the JSON respopnse from a call to the SUMMARY API endpoint for some user selected month and year
# If month and year are not provided, use the current month and year
#
def getAccountSummaryJSON(accountID,token,billYear,billMonth):
MAX_ATTEMPTS = 3
tries = 0
data = {}
#
# Determine time period
#
if billYear == "":
billYear = CURRENT_YEAR
if billMonth == "":
billMonth = CURRENT_MONTH
#
target_url = IBM_CLOUD_BILLING_ENDPOINT + IBM_CLOUD_REPORTING_PREFIX + accountID + '/summary/' + billYear + '-' + billMonth
#
# Set up your parameters (in a PARAMS dict)
#
PARAMS = {}
REQ_HEADER = {'Authorization':token}
#
# Try API - only do MAX_ATTEMPTS at most
#
while True:
try:
# sending get request and saving the response as response object
r = requests.get(url = target_url, params = PARAMS, headers = REQ_HEADER)
if r.status_code == 200:
# extracting data in json format
data = r.json()
if (DEBUG):
print ("Got summary data")
break
if tries < MAX_ATTEMPTS:
tries += 1
print ("ERROR - Status ",str(r.status_code)," returned from call to /summary.\n")
time.sleep(2)
continue
except:
if tries < MAX_ATTEMPTS:
tries += 1
time.sleep(2)
continue
break
break
# end of loop to retry
#
# return response
#
return data
# Go and grab the JSON respopnse from a call to the USAGE API endpoint for some user selected month and year
# If month and year are not provided, use the current month and year
#
def getAccountUsageJSON(accountID,token,billYear,billMonth):
MAX_ATTEMPTS = 3
tries = 0
data = {}
#
# Determine time period
#
if billYear == "":
billYear = CURRENT_YEAR
if billMonth == "":
billMonth = CURRENT_MONTH
#
target_url = IBM_CLOUD_BILLING_ENDPOINT + IBM_CLOUD_REPORTING_PREFIX + accountID + '/usage/' + billYear + '-' + billMonth
#
# Set up your parameters (in a PARAMS dict)
#
PARAMS = {}
REQ_HEADER = {'Authorization':token}
#
# Try API - only do MAX_ATTEMPTS at most
#
while True:
try:
# sending get request and saving the response as response object
r = requests.get(url = target_url, params = PARAMS, headers = REQ_HEADER)
if r.status_code == 200:
# extracting data in json format
data = r.json()
if (DEBUG):
print ("Got usage data")
break
if tries < MAX_ATTEMPTS:
tries += 1
print ("ERROR - Status ",str(r.status_code)," returned from call to /usage.\n")
time.sleep(2)
continue
except:
if tries < MAX_ATTEMPTS:
tries += 1
time.sleep(2)
continue
break
break
# end of loop to retry
#
# return response
#
return data
#
# Go and grab the JSON response from a call to the RESOURCE CONTROLLER API endpoint for some user,
# and return a list of resources
#
def getAccountResourceList(accountID,token):
MAX_ATTEMPTS = 3
tries = 0
return_token = ""
data = {}
#
target_url = endpoints['IBM_CLOUD_RESCONT_ENDPOINT'] + "/resource_instances"
#
# Set up your parameters (in a PARAMS dict)
#
this_token = "Bearer " + token
PARAMS = {}
DATA = {}
REQ_HEADER = {
'Authorization':this_token
}
#
# Try API - only do MAX_ATTEMPTS at most
#
while True:
try:
# sending get request and saving the response as response object
r = requests.get(url = target_url, headers = REQ_HEADER)
if r.status_code == 200:
# extracting data in json format
data = r.json()
if (DEBUG):
print ("Got resource data")
break
if tries < MAX_ATTEMPTS:
tries += 1
print ("ERROR - Status ",str(r.status_code)," returned from call to Resource Controller.\n")
time.sleep(2)
continue
except:
if tries < MAX_ATTEMPTS:
tries += 1
time.sleep(2)
continue
break
break
# end of loop to retry
#
# return response
#
return data
#
# Go and grab the JSON response from a call to the RESOURCE CONTROLLER API endpoint for some user,
# and return a list of resources
#
def parseAccountResourceList(accountID,token):
MAX_ATTEMPTS = 3
tries = 0
return_token = ""
data = {}
#
target_url = endpoints['IBM_CLOUD_RESCONT_ENDPOINT'] + "/resource_instances"
#
# Set up your parameters (in a PARAMS dict)
#
this_token = "Bearer " + token
PARAMS = {}
DATA = {}
REQ_HEADER = {
'Authorization':this_token
}
#
# Try API - only do MAX_ATTEMPTS at most
#
while True:
try:
# sending get request and saving the response as response object
r = requests.get(url = target_url, headers = REQ_HEADER)
if r.status_code == 200:
# extracting data in json format
data = r.json()
if (DEBUG):
print ("Got resource data")
break
if tries < MAX_ATTEMPTS:
tries += 1
print ("ERROR - Status ",str(r.status_code)," returned from call to Resource Controller.\n")
time.sleep(2)
continue
except:
if tries < MAX_ATTEMPTS:
tries += 1
time.sleep(2)
continue
break
break
# end of loop to retry
#
# return response
#
return data
#
# Go and grab the JSON response from a call to the RESOURCE CONTROLLER API endpoint for some user,
# and return a list of resources
#
def getAccountResourceGroupList(accountID,token):
MAX_ATTEMPTS = 3
tries = 0
return_token = ""
data = {}
#
target_url = endpoints['IBM_CLOUD_RESCONT_ENDPOINT'] + "/resource_groups?account_id=" + accountID
#
# Set up your parameters (in a PARAMS dict)
#
this_token = "Bearer " + token
PARAMS = {}
DATA = {}
REQ_HEADER = {
'Authorization':this_token
}
#
# Try API - only do MAX_ATTEMPTS at most
#
while True:
try:
# sending get request and saving the response as response object
r = requests.get(url = target_url, headers = REQ_HEADER)
if r.status_code == 200:
# extracting data in json format
data = r.json()
if (DEBUG):
print ("Got resource group data")
break
if tries < MAX_ATTEMPTS:
tries += 1
print ("ERROR - Status ",str(r.status_code)," returned from call to Resource Controller.\n")
time.sleep(2)
continue
except:
if tries < MAX_ATTEMPTS:
tries += 1
time.sleep(2)
continue
break
break
# end of loop to retry
#
# return response
#
return data
# # Go and grab all of your data
#
# First we get a bearer token from the IAM Identity API - this will be in our header to authenticate all of the following calls.
#
# Then we make a call to get the account summary and the account usage data from the Usage API.
#
#
#
# In[ ]:
#
# Go and grab the monthly usage data for some specified month,
# and print a list of resources and usage costs
#
# Bill year and Bill Month must be two digit numeric strings (i.e. "01", "09", 19", etc.)
#
def getMonthlyUsageList(accountID,apikey,billYear,billMonth):
#DEBUG = True
DEBUG=False
result_array = []
#
# Build a simple class to hold all of the billable data and other supporting data
# coming from all of these different sources.
#
class CloudService:
def __init__(self, guid):
self.guid = guid
self.resource_id = ""
self.type = ""
self.name = ""
self.crn = ""
self.region = ""
self.resource_grp = ""
self.resource_grp_id = ""
self.org = ""
self.org_id = ""
self.space = ""
self.space_id = ""
self.cost = 0.0
self.month = ""
self.year = ""
#
AllCloudServices = []
#
# Get current date and time
#
myDatetime = datetime.now()
CURRENT_MONTH = datetime.now().strftime('%m')
CURRENT_YEAR = datetime.now().strftime('%Y')
#
# Determine time period
#
if billYear == "":
billYear = CURRENT_YEAR
if billMonth == "":
billMonth = CURRENT_MONTH
#
# First go and get a bearer token using your API key - and reset the request header
#
IBM_CLOUD_ACCOUNT_BEARER_TOKEN = getBearerToken(accountID,apikey)
#
# Reset the default request header
#
#REQ_HEADER = {'Authorization':IBM_CLOUD_ACCOUNT_BEARER_TOKEN}
#
#
# First get account summary - current date/time
#
#DEBUG=True
acct_summary_data = getAccountSummaryJSON(USER_ID,IBM_CLOUD_ACCOUNT_BEARER_TOKEN,billYear,billMonth)
if (DEBUG):
print ("\n*************************\n")
print ("Account Summary Data")
print ("\n**********\n")
print(json.dumps(acct_summary_data, indent=2))
#
# Then get account usage data
#
#DEBUG=True
acct_usage_data = getAccountUsageJSON(USER_ID,IBM_CLOUD_ACCOUNT_BEARER_TOKEN,billYear,billMonth)
if (DEBUG):
print ("\n*************************\n")
print ("Account Usage Data")
print ("\n**********\n")
print(json.dumps(acct_usage_data, indent=2))
#
# Get Resource Group data
#
resource_group_data = getAccountResourceGroupList(USER_ID,IBM_CLOUD_ACCOUNT_BEARER_TOKEN)
if (DEBUG):
print ("\n*************************\n")
print ("Account Resource Group List")
print ("\n**********\n")
print(json.dumps(resource_group_data, indent=2))
#
# Now go and get service/instance data
#
data = getAccountResourceList(USER_ID,IBM_CLOUD_ACCOUNT_BEARER_TOKEN)
if (DEBUG):
print ("\n*************************\n")
print ("Account Resource List")
print ("\n**********\n")
print(json.dumps(data, indent=2))
#
# Create a dict of Resource Group names, keyed by the Resource Group ID
#
resourceNames = {}
for resGroup in resource_group_data['resources']:
thisKey = resGroup['id']
resourceNames[thisKey] = resGroup['name']
if (DEBUG):
print (resourceNames)
#
# Parse out all of that resource data - we want a list of services objects, with relevant data
# for each instance
#
#DEBUG = True
for resource in data['resources']:
a = CloudService(resource['guid'])
a.type = resource['type']
a.name = resource['name']
a.crn = resource['crn']
a.region = resource['region_id']
a.resource_grp_id = resource['resource_group_id']
rgKey = resource['resource_group_id']
a.resource_grp = resourceNames[rgKey]
a.resource_id = resource['resource_id']
a.org = ""
a.space = ""
a.cost = 0.0
a.month = billMonth
a.year = billYear
#
AllCloudServices.append(a)
if (DEBUG):
print ("Storing service " + a.name)
print (" GUID is " + a.guid)
print (" type is " + a.type)
print (" name is " + a.name)
print (" crn is " + a.crn)
print (" region ID is " + a.region)
print ("Resource group ID is " + a.resource_grp_id)
print (" Resource ID is " + a.resource_id)
print (" Billable cost is " + str(a.cost))
print ("")
#
# Now loop thru the Account Summary data, and fill in the missing pieces
#
for CloudService in AllCloudServices:
#
# Find matching resource id
#
for accounts in acct_usage_data['resources']:
#
# Do we have a match?
#
matchId = CloudService.resource_id
thisId = accounts['resource_id']
if thisId == matchId:
#
# We have a match, save off the cost information
#
serviceCost = accounts['billable_cost']
CloudService.cost = serviceCost
if (DEBUG):
print ("Match found for " + thisId + " named " + CloudService.name + " Cost of " + str(serviceCost))
# end loop accounts
# end loop CloudService
#
# Now loop thru the Account Summary data, and we're going to dump
# all resources with some kind of billable usage
#
storageArray = []
#DEBUG=True
for CloudService in AllCloudServices:
#
# Find any resource ID that has billable use (above $0.01)
#
if CloudService.cost > 0.01:
#
# Save the name and GUID in an array (so you can sort by name)
#
packedName = CloudService.resource_grp + " -- " + CloudService.name + " -- " + CloudService.guid
storageArray.append(packedName)
if (DEBUG):
print ("Saved " + packedName)
#
# Sort the results
#
storageArray.sort()
#
# Now loop thru the sorted storageArray
#
for BillableUnit in storageArray:
#
# Split out the name and the resource ID
#
(thisResourceGroup,thisName,thisID) = BillableUnit.split(" -- ")
#
# Make resource group 40 characters long
#
# if len(thisResourceGroup) > 40:
# dispResourceGroup = thisResourceGroup[:40]
# else:
# dispResourceGroup = thisResourceGroup
# while len(dispResourceGroup) < 40:
# dispResourceGroup = dispResourceGroup + " "
#
# Make name 60 characters long
#
# if len(thisName) > 60:
# dispName = thisName[:60]
# else:
# dispName = thisName
# while len(dispName) < 60:
# dispName = dispName + " "
#
# Find resource and billable amount
#
for CloudService in AllCloudServices:
if ((CloudService.guid == thisID) and (CloudService.name == thisName)):
thisCost = '${:>10.2f}'.format(CloudService.cost)
thisDate = billMonth + "/" + billYear
# print (thisDate + " " + dispResourceGroup + " " + CloudService.guid + " " + dispName + " " + thisCost)
# result_line = (thisDate,dispResourceGroup,CloudService.guid,dispName,thisCost)
result_line = (thisDate,thisResourceGroup,CloudService.guid,thisName,thisCost)
result_array.append(result_line)
#
return result_array
# In[ ]:
def printResults(resultArray):
#
# loop thru entries
#
for outputline in resultArray:
#
# Grab each field
#
(thisDate,thisResourceGroup,thisGuid,thisName,thisCost) = outputline
#
# Make resource group 40 characters long
#
if len(thisResourceGroup) > 40:
thisResourceGroup = thisResourceGroup[:40]
else:
while len(thisResourceGroup) < 40:
thisResourceGroup = thisResourceGroup + " "
#
# Make name 60 characters long
#
if len(thisName) > 60:
thisName = thisName[:60]
else:
while len(thisName) < 60:
thisName = thisName + " "
#
# Print entry
#
print (thisDate + " " + thisResourceGroup + " " + thisGuid + " " + thisName + " " + thisCost)
#
return True
# In[ ]:
def dumpToCSV(resultArray,csvfilename):
#
# Save CSV in local storage
#
csvfileOut = './'+ csvFilename
#
# Open CSV file for writing
#
csvWriter = codecs.open(csvfileOut, 'w', encoding="utf-8-sig")
#
# Write header row
#
csvWriter.write("Date"+","+"Resource Group"+","+"Resource GUID"+","+"Resource Name"+","+"Cost")
csvWriter.write("\n")
#
# loop thru entries
#
for outputline in resultArray:
#
# Grab each field
#
(thisDate,thisResourceGroup,thisGuid,thisName,thisCost) = outputline
#
# Dump entry to CSV file
#
csvWriter.write(thisDate+","+thisResourceGroup+","+thisGuid+","+thisName+","+thisCost)
csvWriter.write("\n")
#
#
# Close CSV file
#
csvWriter.close()
#
# Write results out to Cloud Object Storage
#
cos.upload_file(Filename=csvfileOut,Bucket=credentials['RESULTS_BUCKET'],Key=csvFilename)
return True
# In[ ]:
#
# MAIN PROGRAM
#
# This sample program will both print and write the usage data to a CSV file out on Cloud
# Object Storage, for the months of August 2019, September 2019 and October 2019.
#
# You can easily modify this to provide billing data over the past year, or some user
# defined window of time.
#
DEBUG=False
#
overall_results =[]
csvFilename = 'Results_'+goodDatetime+'.csv'
#
# Print Header
#
print ("*******************************************************************************************************************************************************************")
print ("Date Resource Group Resource GUID Resource Name Cost")
print ("*******************************************************************************************************************************************************************")
#
# Get usage for August 2019
#
results = getMonthlyUsageList(USER_ID,API_KEY,"2019","08")
for entry in results:
overall_results.append(entry)
status = printResults(results)
#
# Get usage for September 2019
#
results = getMonthlyUsageList(USER_ID,API_KEY,"2019","09")
for entry in results:
overall_results.append(entry)
status = printResults(results)
#
# Get usage for October 2019
#
results = getMonthlyUsageList(USER_ID,API_KEY,"2019","10")
for entry in results:
overall_results.append(entry)
status = printResults(results)
#
# Dump ALL results out to a CSV file
#
csvFilename = 'Results_'+goodDatetime+'.csv'
status = dumpToCSV(overall_results,csvFilename)
#
# end
# In[ ]:
|
{"hexsha": "4b27249dde45369f2a6caa715b95659b3e19f822", "size": 25085, "ext": "py", "lang": "Python", "max_stars_repo_path": "CSM_IBM_Cloud_Usage.py", "max_stars_repo_name": "dtoczala/IBMCloudBillingScript", "max_stars_repo_head_hexsha": "2d8674bc768fc060a9993fc30c63127bd6c7ff3a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-11-08T01:19:55.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-26T10:59:05.000Z", "max_issues_repo_path": "CSM_IBM_Cloud_Usage.py", "max_issues_repo_name": "dtoczala/IBMCloudBillingScript", "max_issues_repo_head_hexsha": "2d8674bc768fc060a9993fc30c63127bd6c7ff3a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CSM_IBM_Cloud_Usage.py", "max_forks_repo_name": "dtoczala/IBMCloudBillingScript", "max_forks_repo_head_hexsha": "2d8674bc768fc060a9993fc30c63127bd6c7ff3a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-12T16:47:34.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-12T16:47:34.000Z", "avg_line_length": 30.0059808612, "max_line_length": 187, "alphanum_fraction": 0.5752441698, "include": true, "reason": "import numpy", "num_tokens": 5891}
|
import os
import warnings
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
from typing import Dict, List, Callable
from tensorflow.keras import Model
from tensorflow.keras.utils import Sequence
from .report_utils import cae_report, cnn_report, flat_report
from ..models import cae_200, cae_500, cae_1000, cnn_200, cnn_500, cnn_1000
from ..utils import get_model_history_path, get_model_weights_path
from ..datasets import build_multivariate_dataset_cae, build_synthetic_dataset_cae, build_biological_dataset_cae
from ..datasets import build_multivariate_dataset_cnn, build_synthetic_dataset_cnn, build_biological_dataset_cnn
warnings.simplefilter("ignore")
weights = [2, 10]
models = {
"cae": {
200: cae_200,
500: cae_500,
1000: cae_1000,
},
"cnn": {
200: cnn_200,
500: cnn_500,
1000: cnn_1000
}
}
datasets = {
"cae": [
build_synthetic_dataset_cae,
build_multivariate_dataset_cae,
build_biological_dataset_cae
],
"cnn": [
build_synthetic_dataset_cnn,
build_multivariate_dataset_cnn,
build_biological_dataset_cnn
]
}
report_types = {
"cae": cae_report,
"cnn": cnn_report
}
def get_report_path(root, model, dataset, trained_on, run_type):
path = "{root}/report_{model}_{dataset}_{trained_on}_{run_type}.csv".format(
root=root,
model=model.name,
dataset=dataset.__name__,
trained_on=trained_on,
run_type=run_type
)
os.makedirs(os.path.dirname(path), exist_ok=True)
return path
def execute_report(root, report, model, trained_on, dataset, run_type, sequence):
path = get_report_path(root, model, dataset, trained_on, run_type)
if os.path.exists(path):
return
pd.DataFrame(flat_report(
build_report(model, report, sequence),
model,
trained_on,
dataset,
run_type
)).to_csv(path)
def build_report(model: Model, report: Callable, sequence: Sequence):
sequence.on_epoch_end()
X, y = zip(*[
sequence[batch]
for batch in tqdm(range(min(100, sequence.steps_per_epoch)), desc="Rendering batches", leave=False)
])
X = np.concatenate(X)
y = np.concatenate(y)
return report(y, model.predict(X))
def build_reports(root, **dataset_kwargs):
for model_type in tqdm(models, desc="Model types", leave=False):
report = report_types[model_type]
for window_size, build_model in tqdm(models[model_type].items(), desc="Models", leave=False):
single_gap_dataset, multivariate_dataset, biological_dataset = datasets[model_type]
single_train, single_test = single_gap_dataset(window_size, **dataset_kwargs)
multivariate_train, multivariate_test = multivariate_dataset(window_size, **dataset_kwargs)
bio = biological_dataset(window_size)
model = build_model(verbose=False)
root_directories = ("single_gap", "multivariate_gaps")
if model_type == "cae":
for weight in weights:
root_directories += (
"single_gap_with_weight_%d"%weight,
"multivariate_gaps_with_weight_%d"%weight
)
for weight_directory in tqdm(root_directories, desc="weights", leave=False):
model.load_weights(get_model_weights_path(model, path=weight_directory))
bar = tqdm(desc="Running reports", total=5, leave=False)
execute_report(
root, report, model, weight_directory, single_gap_dataset, "single gap test", single_test
)
bar.update()
execute_report(
root, report, model, weight_directory, single_gap_dataset, "single gap train", single_train
)
bar.update()
execute_report(
root, report, model, weight_directory, multivariate_dataset, "multivariate gaps test", multivariate_test
)
bar.update()
execute_report(
root, report, model, weight_directory, multivariate_dataset, "multivariate gaps train", multivariate_train
)
bar.update()
execute_report(
root, report, model, weight_directory, biological_dataset, "biological validation", bio
)
bar.update()
bar.close()
|
{"hexsha": "debf5e24693709a7dd00571939021e8d8f13c0c7", "size": 4557, "ext": "py", "lang": "Python", "max_stars_repo_path": "repairing_genomic_gaps/reports/build_reports.py", "max_stars_repo_name": "LucaCappelletti94/repairing_genomic_gaps", "max_stars_repo_head_hexsha": "38d43c732cbd092b52c1eaf0b33a9bd47a14ebd4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "repairing_genomic_gaps/reports/build_reports.py", "max_issues_repo_name": "LucaCappelletti94/repairing_genomic_gaps", "max_issues_repo_head_hexsha": "38d43c732cbd092b52c1eaf0b33a9bd47a14ebd4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "repairing_genomic_gaps/reports/build_reports.py", "max_forks_repo_name": "LucaCappelletti94/repairing_genomic_gaps", "max_forks_repo_head_hexsha": "38d43c732cbd092b52c1eaf0b33a9bd47a14ebd4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.786259542, "max_line_length": 126, "alphanum_fraction": 0.6357252578, "include": true, "reason": "import numpy", "num_tokens": 985}
|
## ---------------- Bounded learning
"""
$(SIGNATURES)
For a single college (not a ModelObject).
Each college is endowed with `maxLearn`. Once a student has learned this much, learning productivity falls to 0 (or a constant).
`dh = exp(aScale * a) * studyTime ^ timeExp * A`
The functional form for `A` is governed by the `tfpSpec`. It depends on how much has been learned.
The way learning is defined is governed by `learnRelativeToH0`.
- false: `h learned = h - h0`
- true: `h learned = (h / h0 - 1)`. Then a college limits the percentage increase in the h endowment.
Options should be type parameters +++
A potential alternative with better scaling would be
`A = hExp .* ( log(maxLearn) .- log.(max.(1.0, h learned)) )`
`hExp` governs the slope. `maxLearn` governs the intercept. But the shape is fixed.
"""
mutable struct HcProdFctBounded <: AbstractHcProdFct
minTfp :: Double # Additive minimum tfp
tfp :: Double
maxLearn :: Double
timeExp :: Double
# Curvature: how strongly does learning decline as (h-h0) → maxLearn
hExp :: Double
# Depreciation rate
deltaH :: Double
# Ability scale
aScale :: Double
# Fixed time cost per course
timePerCourse :: Double
# Study time per course minimum (this is assigned when study time very low)
minTimePerCourse :: Double
# Learning as percentage of endowment? Or as (h - h0).
learnRelativeToH0 :: Bool
# TFP can be computed in several ways. See `base_tfp`.
tfpSpec :: AbstractTfpSpec
end
## ------------- All colleges
Base.@kwdef mutable struct HcProdBoundedSwitches <: AbstractHcProdSwitches
# Same exponents on time and h. If yes, ignore `hExp`.
minTfp :: Double = 1.0
calMinTfp :: Bool = true
tfp :: Double = 1.0
calTfpBase :: Bool = true
sameExponents :: Bool = true
timeExp :: Double = 0.6
calTimeExp :: Bool = true
hExp :: Double = 0.9
# hExpLb :: Double = 0.5
calHExp :: Bool = true
deltaH :: Double = 0.0
calDeltaH :: Bool = false
aScale :: Double = 0.2
calAScale :: Bool = true
# Learning as percentage of endowment?
learnRelativeToH0 :: Bool = false
# TFP from (max learning - learning)
tfpSpec :: AbstractTfpSpec = TfpMaxLearnMinusLearn()
end
"""
$(SIGNATURES)
Since all colleges share some parameters, we need a model object that keeps
track of parameters that are common or differ by college.
"""
mutable struct HcProdBoundedSet <: AbstractHcProdSet
objId :: ObjectId
switches :: HcProdBoundedSwitches
nc :: CollInt
# Calibrated parameters
minTfp :: Double
tfp :: Double
maxLearnV :: BoundedVector
timeExp :: Double
hExp :: Double
deltaH :: Double
# Ability scale
aScale :: Double
# Fixed time cost per course
timePerCourse :: Double
# Study time per course minimum (this is assigned when study time very low)
minTimePerCourse :: Double
pvec :: ParamVector
end
## H production: Bounded learning
max_learn(hs :: HcProdBoundedSet, ic) = ModelParams.values(hs.maxLearnV, ic);
max_learn(h :: HcProdFctBounded) = h.maxLearn;
learning_relative_to_h0(h :: HcProdFctBounded) = h.learnRelativeToH0;
learning_relative_to_h0(h :: HcProdBoundedSwitches) = h.learnRelativeToH0;
learning_relative_to_h0(h :: HcProdBoundedSet) = learning_relative_to_h0(h.switches);
tfp_spec(h :: HcProdFctBounded) = h.tfpSpec;
tfp_spec(h :: HcProdBoundedSwitches) = h.tfpSpec;
tfp_spec(h :: HcProdBoundedSet) = tfp_spec(h.switches);
## ---------- Construction
# Initialize with defaults
function make_hc_prod_set(objId :: ObjectId, nc :: Integer,
switches :: HcProdBoundedSwitches)
st = symbol_table(); # eventually use preconstructed +++
@assert validate_hprod(switches);
pTimePerCourse = init_time_per_course();
pMaxLearn = init_max_learn(objId, switches, nc);
minTfp = switches.minTfp;
pMinTfp = Param(:minTfp, "Min tfp", "A_{min}",
minTfp, minTfp, 0.0, 2.0, switches.calMinTfp);
tfpBase = switches.tfp;
pTfpBase = Param(:tfp, ldescription(:hTfpNeutral), lsymbol(:hTfpNeutral),
tfpBase, tfpBase, 0.1, 2.0, switches.calTfpBase);
timeExp = switches.timeExp;
pTimeExp = Param(:timeExp, ldescription(:hTimeExp), lsymbol(:hTimeExp),
timeExp, timeExp, 0.2, 0.9, switches.calTimeExp);
deltaH = delta_h(switches);
pDeltaH = Param(:deltaH, ldescription(:ddh), lsymbol(:ddh),
deltaH, deltaH, 0.0, 0.5, cal_delta_h(switches));
# Governs slope inside of TFP (should be inside of TFP spec +++)
hExp = switches.hExp;
pHExp = Param(:hExp, "TFP slope coefficient", lsymbol(:hHExp),
hExp, hExp, gma_range(tfp_spec(switches))..., switches.calHExp);
aScale = switches.aScale;
tfpSpec = tfp_spec(switches);
pAScale = Param(:aScale, ldescription(:hAScale), lsymbol(:hAScale),
aScale, aScale, gma_range(tfpSpec)..., switches.calAScale);
pvec = ParamVector(objId, [pMinTfp, pTfpBase, pTimeExp, pHExp, pDeltaH, pAScale, pTimePerCourse]);
# Min study time required per course. Should never bind.
minTimePerCourse =
hours_per_week_to_mtu(0.1 / data_to_model_courses(1));
h = HcProdBoundedSet(objId, switches, nc,
minTfp, tfpBase, pMaxLearn,
timeExp, hExp, deltaH, aScale,
pTimePerCourse.value, minTimePerCourse, pvec);
@assert validate_hprod_set(h)
return h
end
# Upper bound should depend on whether learning is relative to h0.
function init_max_learn(objId :: ObjectId, switches, nc :: Integer)
ownId = make_child_id(objId, :tfpV);
dMaxLearnV = fill(0.2, nc);
if learning_relative_to_h0(switches)
ub = 3.0;
else
ub = 5.0;
end
b = BoundedVector(ownId, ParamVector(ownId), :increasing, 0.2, ub, dMaxLearnV);
set_pvector!(b; description = ldescription(:maxLearn),
symbol = lsymbol(:maxLearn));
return b
end
make_test_hc_bounded_set(; learnRelativeToH0 = true,
tfpSpec = TfpMaxLearnMinusLearn()) =
make_hc_prod_set(ObjectId(:HProd), 4,
HcProdBoundedSwitches(
deltaH = 0.05,
learnRelativeToH0 = learnRelativeToH0,
tfpSpec = tfpSpec
));
# Make h production function for one college
function make_h_prod(hs :: HcProdBoundedSet, iCollege :: Integer)
return HcProdFctBounded(hs.minTfp, hs.tfp,
max_learn(hs, iCollege),
time_exp(hs), h_exp(hs),
delta_h(hs), hs.aScale,
hs.timePerCourse, hs.minTimePerCourse,
learning_relative_to_h0(hs), tfp_spec(hs));
end
function make_test_hprod_bounded(;
learnRelativeToH0 = true, tfpSpec = TfpMaxLearnMinusLearn())
minTfp = 0.7;
gma = sum(gma_range(tfpSpec)) / 2;
hS = HcProdFctBounded(minTfp, 0.6, 3.1, 0.7, gma, 0.1, 0.3, 0.01, 0.005,
learnRelativeToH0, tfpSpec);
@assert validate_hprod(hS);
return hS
end
## ---------- One college
function validate_hprod(hS :: HcProdFctBounded)
isValid = (max_learn(hS) > 0.05) && (0.0 < time_exp(hS) ≤ 1.0);
gmaMin, gmaMax = gma_range(tfp_spec(hS));
gma = h_exp(hS);
isValid = isValid && (gmaMin <= gma <= gmaMax);
return isValid
end
"""
$(SIGNATURES)
H produced (before shock is realized). Nonnegative.
# Arguments
- nTriedV
number of courses attempted this period.
- h0V
h endowments, so that `hV - h0V` is learning.
"""
function dh(hS :: HcProdFctBounded, abilV, hV, h0V, timeV, nTriedV)
sTimeV = study_time_per_course(hS, timeV, nTriedV);
# deltaHV = (max_learn(hS) ^ h_exp(hS) .- learned_h(hS, hV, h0V) .^ h_exp(hS));
# tfpV = hS.tfp .* max.(0.0, deltaHV) .^ (1.0 / h_exp(hS));
return nTriedV .* base_tfp(hS, hV, h0V) .* (sTimeV .^ hS.timeExp) .*
exp.(hS.aScale .* abilV);
end
## ---------- TFP specs
# Base TFP: the term in front of (sTime ^ beta * exp(ability))
function base_tfp(hS :: HcProdFctBounded, hV, h0V)
tfpSpec = tfp_spec(hS);
learnV = learned_h(hS, hV, h0V);
tfpV = hS.minTfp .+ hS.tfp .* tfp(tfpSpec, learnV, max_learn(hS), h_exp(hS));
return tfpV
end
# Expected range of TFP
function tfp_range(hS :: HcProdFctBounded)
return hS.minTfp .+ tfp(hS) .*
tfp_range(tfp_spec(hS), h_exp(hS), max_learn(hS));
end
# Learned h, scaled for the production function
function learned_h(hS :: HcProdFctBounded, hV, h0V)
if learning_relative_to_h0(hS)
dh = max.(0.0, hV .- h0V) ./ h0V;
else
dh = max.(0.0, hV .- h0V);
end
return dh
end
# function show_string(hS :: HcProdFctBounded)
# fs = Formatting.FormatExpr("dh = {1:.2f} h ^ {2:.2f} t ^ {3:.2f} exp({3:.2f} a)");
# return format(fs, hS.tfp, h_exp(hS), hS.timeExp, hS.aScale);
# end
function Base.show(io :: IO, hS :: HcProdFctBounded)
maxLearn = round(max_learn(hS), digits = 2);
print(io, "H prod fct: Bounded learning < $maxLearn");
end
## --------------------- For all colleges
function Base.show(io :: IO, switches :: HcProdBoundedSwitches)
print(io, "H production: bounded learning.");
end
function settings_table(h :: HcProdBoundedSwitches)
ddh = delta_h(h);
cal_delta_h(h) ? deprecStr = "calibrated" : deprecStr = "fixed at $ddh";
h.learnRelativeToH0 ? learnStr = "h/h0 - 1" : learnStr = "h - h0";
ownSettings = [
"H production function" "Bounded learning";
"Depreciation" deprecStr
"Learning of the form" learnStr
];
return vcat(ownSettings, settings_table(h.tfpSpec))
end
function settings_list(h :: HcProdBoundedSwitches, st)
eqnHChange = ["H production function", "eqnHChange", eqn_hchange(h)];
return [eqnHChange]
end
function validate_hprod(s :: HcProdBoundedSwitches)
isValid = true;
return isValid
end
function validate_hprod_set(h :: HcProdBoundedSet)
isValid = (h.nc > 1) && (h.timeExp > 0.0) &&
(h.aScale > 0.0) && (h.timePerCourse > 0.0);
isValid = isValid && (1.0 > delta_h(h) >= 0.0);
return isValid
end
function eqn_hchange(h :: HcProdBoundedSwitches)
"\\hTfp \\sTimePerCourse^{\\hTimeExp} e^{\\hAScale \\abil}"
end
# --------------
|
{"hexsha": "e5bc2741dc04e590c7a8fbdc7f19816afd628b89", "size": 10167, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/hprod_bounded.jl", "max_stars_repo_name": "hendri54/CollegeStratCollege", "max_stars_repo_head_hexsha": "13d5032142751053d0341395353d379fa3d980a0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/hprod_bounded.jl", "max_issues_repo_name": "hendri54/CollegeStratCollege", "max_issues_repo_head_hexsha": "13d5032142751053d0341395353d379fa3d980a0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/hprod_bounded.jl", "max_forks_repo_name": "hendri54/CollegeStratCollege", "max_forks_repo_head_hexsha": "13d5032142751053d0341395353d379fa3d980a0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4767801858, "max_line_length": 128, "alphanum_fraction": 0.6557489918, "num_tokens": 3262}
|
import numpy as np
def playerSkill(mu=None, sigma=None):
if (mu or sigma) == None:
return
assert(mu>0, "Player skill must be a positive real number")
else:
return np.random.normal(mu, sigma**2)
def playerPerformance(mu=None, beta=None):
assert(mu>0, "Player performance must be a positive real number")
return np.random.normal(mu, beta**2)
def step_winning(perf1, perf2, epsilon):
assert(epsilon>0, "epsilon must be a positive real number")
if (perf1-perf2) > epsilon:
return "p1"
elif np.abs(perf1-perf2) <= epsilon:
return "draw"
else:
return "p2"
def teamPerformance(perfP={}, corrMatrix=None): #we do not consider time at this time
def individual_contrib():
return teamP.sum()
|
{"hexsha": "e35943c248d8e2e0c1686c5bd45fe109675238aa", "size": 780, "ext": "py", "lang": "Python", "max_stars_repo_path": "2.Classic_TrueSkill/model.py", "max_stars_repo_name": "SkylakeXx/TrueSkill2", "max_stars_repo_head_hexsha": "ac9b29522c47882c6d9d55b1b934d6ce4c7e545a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "2.Classic_TrueSkill/model.py", "max_issues_repo_name": "SkylakeXx/TrueSkill2", "max_issues_repo_head_hexsha": "ac9b29522c47882c6d9d55b1b934d6ce4c7e545a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2.Classic_TrueSkill/model.py", "max_forks_repo_name": "SkylakeXx/TrueSkill2", "max_forks_repo_head_hexsha": "ac9b29522c47882c6d9d55b1b934d6ce4c7e545a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8571428571, "max_line_length": 85, "alphanum_fraction": 0.6512820513, "include": true, "reason": "import numpy", "num_tokens": 201}
|
!---------------------------------------------------------------------!
! OWNER: Ithaca Combustion Enterprise, LLC !
! COPYRIGHT: © 2012, Ithaca Combustion Enterprise, LLC !
! LICENSE: BSD 3-Clause License (The complete text of the license can !
! be found in the `LICENSE-ICE.txt' file included in the ISAT-CK7 !
! source directory.) !
!---------------------------------------------------------------------!
module isat_abort_m
! Serial version: file isat_abort_ser.f90
integer, save :: lu_err = 0
contains
subroutine isat_abort( sub, loc, mess, chv, isv, ivar, rsv, rvar )
! Aborts ISATAB because of error: prints diagnostic and stops
use isat_prec
implicit none
character(*), intent(in) :: sub ! name of calling routine
integer, intent(in) :: loc ! location number
character(*), intent(in), optional :: mess ! message
character(*), intent(in), optional :: chv ! character variable
integer, intent(in), optional :: isv, ivar(:) ! integer variables
real(k_xf), intent(in), optional :: rsv, rvar(:) ! real variables
write(lu_err,*)' '
write(lu_err,*)'********** ISAT_ABORT **************'
write(lu_err,*)' '
write(lu_err,*)'routine = ', sub
write(lu_err,*)'location = ', loc
if( present(mess) ) write(lu_err,*)'message = ', mess
if( present(chv ) ) write(lu_err,1) chv
if( present(isv ) ) write(lu_err,2) isv
if( present(ivar) ) write(lu_err,2) ivar
if( present(rsv ) ) write(lu_err,3) rsv
if( present(rvar) ) write(lu_err,3) rvar
write(lu_err,*)' '
write(lu_err,*)'****** END ISAT_ABORT **************'
write(lu_err,*)' '
call exit(-1)
1 format((a))
2 format((8i10))
3 format((1p,5e13.4))
end subroutine isat_abort
end module isat_abort_m
|
{"hexsha": "ad53ff87454fea203ccaf5ca396d4c130ce8b858", "size": 1888, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/isat/isatab_ser/isat_abort_ser.f90", "max_stars_repo_name": "xuhan425/isat_ffd", "max_stars_repo_head_hexsha": "3a5449f7e49b686c33fe0e97ca90ea8d92fc2f00", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-04-10T20:16:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-15T11:09:44.000Z", "max_issues_repo_path": "src/isat/isatab_ser/isat_abort_ser.f90", "max_issues_repo_name": "xuhan425/isat_ffd", "max_issues_repo_head_hexsha": "3a5449f7e49b686c33fe0e97ca90ea8d92fc2f00", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-06-07T14:10:25.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-07T14:10:25.000Z", "max_forks_repo_path": "src/isat/isatab_ser/isat_abort_ser.f90", "max_forks_repo_name": "xuhan425/isat_ffd", "max_forks_repo_head_hexsha": "3a5449f7e49b686c33fe0e97ca90ea8d92fc2f00", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-06-07T03:44:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-05T05:54:10.000Z", "avg_line_length": 35.6226415094, "max_line_length": 74, "alphanum_fraction": 0.5444915254, "num_tokens": 529}
|
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import os.path as osp
import numpy as np
import torch as to
from typing import Optional
from init_args_serializer.serializable import Serializable
import pyrado
from pyrado.environments.pysim.base import SimPyEnv
from pyrado.environments.quanser import max_act_qbb
from pyrado.spaces.box import BoxSpace
from pyrado.spaces.polar import Polar2DPosVelSpace
from pyrado.tasks.base import Task
from pyrado.tasks.desired_state import DesStateTask
from pyrado.tasks.reward_functions import ScaledExpQuadrErrRewFcn
from pyrado.utils.data_types import RenderMode
from pyrado.utils.input_output import print_cbt_once
class QBallBalancerSim(SimPyEnv, Serializable):
"""
Environment in which a ball rolls on an actuated plate. The ball is randomly initialized on the plate and is to be
stabilized on the center of the plate. The problem formulation treats this setup as 2 independent ball-on-beam
problems. The plate is actuated via 2 servo motors that lift the plate.
.. note::
The dynamics are not the same as in the Quanser Workbook (2 DoF Ball-Balancer - Instructor). Here, we added
the coriolis forces and linear-viscous friction. However, the 2 dim system is still modeled to be decoupled.
This is the case, since the two rods (connected to the servos) are pushing the plate at the center lines.
As a result, the angles alpha and beta are w.r.t. to the inertial frame, i.e. they are not 2 sequential rations.
"""
name: str = "qbb"
def __init__(
self,
dt: float,
max_steps: int = pyrado.inf,
task_args: Optional[dict] = None,
simple_dynamics: bool = False,
load_experimental_tholds: bool = True,
):
"""
Constructor
:param dt: simulation step size [s]
:param max_steps: maximum number of simulation steps
:param task_args: arguments for the task construction
:param simple_dynamics: if `True, use a dynamics model without Coriolis forces and without friction effects
:param load_experimental_tholds: use the voltage thresholds determined from experiments
"""
Serializable._init(self, locals())
self._simple_dynamics = simple_dynamics
self.plate_angs = np.zeros(2) # plate's angles alpha and beta [rad] (unused for simple_dynamics = True)
# Call SimPyEnv's constructor
super().__init__(dt, max_steps, task_args)
if not simple_dynamics:
self._kin = QBallBalancerKin(self)
def _create_spaces(self):
l_plate = self.domain_param["l_plate"]
# Define the spaces
max_state = np.array(
[
np.pi / 4.0,
np.pi / 4.0,
l_plate / 2.0,
l_plate / 2.0, # [rad, rad, m, m, ...
5 * np.pi,
5 * np.pi,
0.5,
0.5,
]
) # ... rad/s, rad/s, m/s, m/s]
min_init_state = np.array([0.75 * l_plate / 2, -np.pi, -0.05 * max_state[6], -0.05 * max_state[7]])
max_init_state = np.array([0.8 * l_plate / 2, np.pi, 0.05 * max_state[6], 0.05 * max_state[7]])
self._state_space = BoxSpace(
-max_state,
max_state,
labels=["theta_x", "theta_y", "x", "y", "theta_x_dot", "theta_y_dot", "x_dot", "y_dot"],
)
self._obs_space = self._state_space.copy()
self._init_space = Polar2DPosVelSpace(min_init_state, max_init_state, labels=["r", "phi", "x_dot", "y_dot"])
self._act_space = BoxSpace(-max_act_qbb, max_act_qbb, labels=["V_x", "V_y"])
self._curr_act = np.zeros_like(max_act_qbb) # just for usage in render function
def _create_task(self, task_args: dict) -> Task:
# Define the task including the reward function
state_des = task_args.get("state_des", np.zeros(8))
Q = task_args.get("Q", np.diag([1e0, 1e0, 5e3, 5e3, 1e-2, 1e-2, 5e-1, 5e-1]))
R = task_args.get("R", np.diag([1e-2, 1e-2]))
# Q = np.diag([1e2, 1e2, 5e2, 5e2, 1e-2, 1e-2, 1e+1, 1e+1]) # for LQR
# R = np.diag([1e-2, 1e-2]) # for LQR
return DesStateTask(
self.spec, state_des, ScaledExpQuadrErrRewFcn(Q, R, self.state_space, self.act_space, min_rew=1e-4)
)
# Cache measured thresholds during one run and reduce console log spam that way
measured_tholds = None
@classmethod
def get_V_tholds(cls, load_experiments: bool = True) -> dict:
""" If available, the voltage thresholds computed from measurements, else use default values. """
# Hard-coded default thresholds
tholds = dict(V_thold_x_pos=0.28, V_thold_x_neg=-0.10, V_thold_y_pos=0.28, V_thold_y_neg=-0.074)
if load_experiments:
if cls.measured_tholds is None:
ex_dir = osp.join(pyrado.EVAL_DIR, "volt_thold_qbb")
if osp.exists(ex_dir) and osp.isdir(ex_dir) and os.listdir(ex_dir):
print_cbt_once("Found measured thresholds, using the averages.", "g")
# Calculate cumulative running average
cma = np.zeros((2, 2))
i = 0.0
for f in os.listdir(ex_dir):
if f.endswith(".npy"):
i += 1.0
cma = cma + (np.load(osp.join(ex_dir, f)) - cma) / i
tholds["V_thold_x_pos"] = cma[0, 1]
tholds["V_thold_x_neg"] = cma[0, 0]
tholds["V_thold_y_pos"] = cma[1, 1]
tholds["V_thold_y_neg"] = cma[1, 0]
else:
print_cbt_once("No measured thresholds found, falling back to default values.", "y")
# Cache results for future calls
cls.measured_tholds = tholds
else:
tholds = cls.measured_tholds
return tholds
@classmethod
def get_nominal_domain_param(cls) -> dict:
V_tholds = cls.get_V_tholds()
return dict(
g=9.81, # gravity constant [m/s**2]
m_ball=0.003, # mass of the ball [kg]
r_ball=0.019625, # radius of the ball [m]
l_plate=0.275, # length of the (square) plate [m]
r_arm=0.0254, # distance between the servo output gear shaft and the coupled joint [m]
K_g=70.0, # gear ratio [-]
eta_g=0.9, # gearbox efficiency [-]
J_l=5.2822e-5, # load moment of inertia [kg*m**2]
J_m=4.6063e-7, # motor moment of inertia [kg*m**2]
k_m=0.0077, # motor torque constant [N*m/A] = back-EMF constant [V*s/rad]
R_m=2.6, # motor armature resistance
eta_m=0.69, # motor efficiency [-]
B_eq=0.015, # equivalent viscous damping coefficient w.r.t. load [N*m*s/rad]
c_frict=0.05, # viscous friction coefficient [N*s/m]
V_thold_x_pos=V_tholds["V_thold_x_pos"], # voltage required to move the x servo in positive dir
V_thold_x_neg=V_tholds["V_thold_x_neg"], # voltage required to move the x servo in negative dir
V_thold_y_pos=V_tholds["V_thold_y_pos"], # voltage required to move the y servo in positive dir
V_thold_y_neg=V_tholds["V_thold_y_neg"], # voltage required to move the y servo in negative dir
offset_th_x=0.0, # angular offset of the x axis motor shaft [rad]
offset_th_y=0.0,
) # angular offset of the y axis motor shaft [rad]
def _calc_constants(self):
l_plate = self.domain_param["l_plate"]
m_ball = self.domain_param["m_ball"]
r_ball = self.domain_param["r_ball"]
eta_g = self.domain_param["eta_g"]
eta_m = self.domain_param["eta_m"]
K_g = self.domain_param["K_g"]
J_m = self.domain_param["J_m"]
J_l = self.domain_param["J_l"]
r_arm = self.domain_param["r_arm"]
k_m = self.domain_param["k_m"]
R_m = self.domain_param["R_m"]
B_eq = self.domain_param["B_eq"]
self.J_ball = 2.0 / 5 * m_ball * r_ball ** 2 # inertia of the ball [kg*m**2]
self.J_eq = eta_g * K_g ** 2 * J_m + J_l # equivalent moment of inertia [kg*m**2]
self.c_kin = 2.0 * r_arm / l_plate # coefficient for the rod-plate kinematic
self.A_m = eta_g * K_g * eta_m * k_m / R_m
self.B_eq_v = eta_g * K_g ** 2 * eta_m * k_m ** 2 / R_m + B_eq
self.zeta = m_ball * r_ball ** 2 + self.J_ball # combined moment of inertial for the ball
def _state_from_init(self, init_state):
state = np.zeros(8)
state[2:4] = init_state[:2]
state[6:8] = init_state[2:]
return state
def reset(self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
obs = super().reset(init_state=init_state, domain_param=domain_param)
# Reset the plate angles
if self._simple_dynamics:
self.plate_angs = np.zeros(2) # actually not necessary since not used
else:
offset_th_x = self.domain_param["offset_th_x"]
offset_th_y = self.domain_param["offset_th_y"]
# Get the plate angles from inverse kinematics for initial pose
self.plate_angs[0] = self._kin(self.state[0] + offset_th_x)
self.plate_angs[1] = self._kin(self.state[1] + offset_th_y)
# Return perfect observation
return obs
def _step_dynamics(self, act: np.ndarray):
g = self.domain_param["g"]
m_ball = self.domain_param["m_ball"]
r_ball = self.domain_param["r_ball"]
c_frict = self.domain_param["c_frict"]
V_thold_x_neg = self.domain_param["V_thold_x_neg"]
V_thold_x_pos = self.domain_param["V_thold_x_pos"]
V_thold_y_neg = self.domain_param["V_thold_y_neg"]
V_thold_y_pos = self.domain_param["V_thold_y_pos"]
offset_th_x = self.domain_param["offset_th_x"]
offset_th_y = self.domain_param["offset_th_y"]
if not self._simple_dynamics:
# Apply a voltage dead zone (i.e. below a certain amplitude the system does not move). This is a very
# simple model of static friction. Experimentally evaluated the voltage required to get the plate moving.
if V_thold_x_neg <= act[0] <= V_thold_x_pos:
act[0] = 0
if V_thold_y_neg <= act[1] <= V_thold_y_pos:
act[1] = 0
# State
th_x = self.state[0] + offset_th_x # angle of the x axis servo (load)
th_y = self.state[1] + offset_th_y # angle of the y axis servo (load)
x = self.state[2] # ball position along the x axis
y = self.state[3] # ball position along the y axis
th_x_dot = self.state[4] # angular velocity of the x axis servo (load)
th_y_dot = self.state[5] # angular velocity of the y axis servo (load)
x_dot = self.state[6] # ball velocity along the x axis
y_dot = self.state[7] # ball velocity along the y axis
th_x_ddot = (self.A_m * act[0] - self.B_eq_v * th_x_dot) / self.J_eq
th_y_ddot = (self.A_m * act[1] - self.B_eq_v * th_y_dot) / self.J_eq
"""
THIS IS TIME INTENSIVE
if not self._simple_dynamics:
# Get the plate angles from inverse kinematics
self.plate_angs[0] = self._kin(self.state[0] + self.offset_th_x)
self.plate_angs[1] = self._kin(self.state[1] + self.offset_th_y)
"""
# Plate (not part of the state since it is a redundant information)
# The definition of th_y is opposing beta, i.e.
a = self.plate_angs[0] # plate's angle around the y axis (alpha)
b = self.plate_angs[1] # plate's angle around the x axis (beta)
a_dot = self.c_kin * th_x_dot * np.cos(th_x) / np.cos(a) # plate's angular velocity around the y axis (alpha)
b_dot = self.c_kin * -th_y_dot * np.cos(-th_y) / np.cos(b) # plate's angular velocity around the x axis (beta)
# Plate's angular accelerations (unused for simple_dynamics = True)
a_ddot = (
1.0
/ np.cos(a)
* (self.c_kin * (th_x_ddot * np.cos(th_x) - th_x_dot ** 2 * np.sin(th_x)) + a_dot ** 2 * np.sin(a))
)
b_ddot = (
1.0
/ np.cos(b)
* (self.c_kin * (-th_y_ddot * np.cos(th_y) - (-th_y_dot) ** 2 * np.sin(-th_y)) + b_dot ** 2 * np.sin(b))
)
# kinematics: sin(a) = self.c_kin * sin(th_x)
if self._simple_dynamics:
# Ball dynamic without friction and Coriolis forces
x_ddot = self.c_kin * m_ball * g * r_ball ** 2 * np.sin(th_x) / self.zeta # symm inertia
y_ddot = self.c_kin * m_ball * g * r_ball ** 2 * np.sin(th_y) / self.zeta # symm inertia
else:
# Ball dynamic with friction and Coriolis forces
x_ddot = (
-c_frict * x_dot * r_ball ** 2 # friction
- self.J_ball * r_ball * a_ddot # plate influence
+ m_ball * x * a_dot ** 2 * r_ball ** 2 # centripetal
+ self.c_kin * m_ball * g * r_ball ** 2 * np.sin(th_x) # gravity
) / self.zeta
y_ddot = (
-c_frict * y_dot * r_ball ** 2 # friction
- self.J_ball * r_ball * b_ddot # plate influence
+ m_ball * y * (-b_dot) ** 2 * r_ball ** 2 # centripetal
+ self.c_kin * m_ball * g * r_ball ** 2 * np.sin(th_y) # gravity
) / self.zeta
# Integration step (symplectic Euler)
self.state[4:] += np.array([th_x_ddot, th_y_ddot, x_ddot, y_ddot]) * self._dt # next velocity
self.state[:4] += self.state[4:] * self._dt # next position
# Integration step (forward Euler)
self.plate_angs += np.array([a_dot, b_dot]) * self._dt # just for debugging when simplified dynamics
def _init_anim(self):
# Import PandaVis Class
from pyrado.environments.pysim.pandavis import QBallBalancerVis
# Create instance of PandaVis
self._visualization = QBallBalancerVis(self, self._rendering)
class QBallBalancerKin(Serializable):
"""
Calculates and visualizes the kinematics from the servo shaft angles (th_x, th_x) to the plate angles (a, b).
"""
def __init__(self, qbb, num_opt_iter=100, render_mode=RenderMode()):
"""
Constructor
:param qbb: QBallBalancerSim object
:param num_opt_iter: number of optimizer iterations for the IK
:param mode: the render mode: a for animating (pyplot), or `` for no animation
"""
from matplotlib import pyplot as plt
Serializable._init(self, locals())
self._qbb = qbb
self.num_opt_iter = num_opt_iter
self.render_mode = render_mode
self.r = float(self._qbb.domain_param["r_arm"])
self.l = float(self._qbb.domain_param["l_plate"] / 2.0)
self.d = 0.10 # [m] roughly measured
# Visualization
if render_mode.video:
self.fig, self.ax = plt.subplots(figsize=(5, 5))
self.ax.set_xlim(-0.5 * self.r, 1.2 * (self.r + self.l))
self.ax.set_ylim(-1.0 * self.d, 2 * self.d)
self.ax.set_aspect("equal")
(self.line1,) = self.ax.plot([0, 0], [0, 0], marker="o")
(self.line2,) = self.ax.plot([0, 0], [0, 0], marker="o")
(self.line3,) = self.ax.plot([0, 0], [0, 0], marker="o")
def __call__(self, th):
"""
Compute the inverse kinematics of the Quanser 2 DoF Ball-Balancer for one DoF
:param th: angle of the servo (x or y axis)
:return: plate angle al pha or beta
"""
from matplotlib import pyplot as plt
if not isinstance(th, to.Tensor):
th = to.tensor(th, dtype=to.get_default_dtype(), requires_grad=False)
# Update the lengths, e.g. if the domain has been randomized
# Need to use float() since the parameters might be 0d-arrays
self.r = float(self._qbb.domain_param["r_arm"])
self.l = float(self._qbb.domain_param["l_plate"] / 2.0)
self.d = 0.10 # roughly measured
tip = self.rod_tip(th)
ang = self.plate_ang(tip)
if self.render_mode.video:
self.render(th, tip)
plt.pause(0.001)
return ang
@to.enable_grad()
def rod_tip(self, th):
"""
Get Cartesian coordinates of the rod tip for one servo.
:param th: current value of the respective servo shaft angle
:return tip: 2D position of the rod tip in the sagittal plane
"""
# Initial guess for the rod tip
tip_init = [self.r, self.l] # [x, y] in the sagittal plane
tip = to.tensor(tip_init, requires_grad=True)
optim = to.optim.SGD([tip], lr=0.01, momentum=0.9)
for i in range(self.num_opt_iter):
optim.zero_grad()
loss = self._loss_fcn(tip, th)
loss.backward()
optim.step()
return tip
def _loss_fcn(self, tip, th):
"""
Cost function for the optimization problem, which only consists of 2 constraints that should be fulfilled.
:param tip:
:param th:
:return: the cost value
"""
# Formulate the constrained optimization problem as an unconstrained using the known segment lengths
rod_len = to.sqrt((tip[0] - self.r * to.cos(th)) ** 2 + (tip[1] - self.r * to.sin(th)) ** 2)
half_palte = to.sqrt((tip[0] - self.r - self.l) ** 2 + (tip[1] - self.d) ** 2)
return (rod_len - self.d) ** 2 + (half_palte - self.l) ** 2
def plate_ang(self, tip):
"""
Compute plate angle (alpha or beta) from the rod tip position which has been calculated from servo shaft angle
(th_x or th_y) before.
:return tip: 2D position of the rod tip in the sagittal plane (from the optimizer)
"""
ang = np.pi / 2.0 - to.atan2(self.r + self.l - tip[0], tip[1] - self.d)
return float(ang)
def render(self, th, tip):
"""
Visualize using pyplot
:param th: angle of the servo
:param tip: 2D position of the rod tip in the sagittal plane (from the optimizer)
"""
A = [0, 0]
B = [self.r * np.cos(th), self.r * np.sin(th)]
C = [tip[0], tip[1]]
D = [self.r + self.l, self.d]
self.line1.set_data([A[0], B[0]], [A[1], B[1]])
self.line2.set_data([B[0], C[0]], [B[1], C[1]])
self.line3.set_data([C[0], D[0]], [C[1], D[1]])
def _get_state(self, state_dict):
state_dict["r"] = self.r
state_dict["l"] = self.l
def _set_state(self, state_dict, copying=False):
self.r = state_dict["r"]
self.l = state_dict["l"]
|
{"hexsha": "46c81409d7d74a91c17624885e1c01c5413cac43", "size": 20628, "ext": "py", "lang": "Python", "max_stars_repo_path": "Pyrado/pyrado/environments/pysim/quanser_ball_balancer.py", "max_stars_repo_name": "KhanhThiVo/SimuRLacra", "max_stars_repo_head_hexsha": "fdeaf2059c2ed80ea696f018c29290510b5c4cb9", "max_stars_repo_licenses": ["DOC", "Zlib", "BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Pyrado/pyrado/environments/pysim/quanser_ball_balancer.py", "max_issues_repo_name": "KhanhThiVo/SimuRLacra", "max_issues_repo_head_hexsha": "fdeaf2059c2ed80ea696f018c29290510b5c4cb9", "max_issues_repo_licenses": ["DOC", "Zlib", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Pyrado/pyrado/environments/pysim/quanser_ball_balancer.py", "max_forks_repo_name": "KhanhThiVo/SimuRLacra", "max_forks_repo_head_hexsha": "fdeaf2059c2ed80ea696f018c29290510b5c4cb9", "max_forks_repo_licenses": ["DOC", "Zlib", "BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-24T15:25:26.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-24T15:25:26.000Z", "avg_line_length": 44.8434782609, "max_line_length": 120, "alphanum_fraction": 0.610190033, "include": true, "reason": "import numpy", "num_tokens": 5619}
|
"""
Vector mathematics functions.
"""
from numpy import array
SOUTH = [1, 0, 0]
WEST = [0, 1, 0]
UP = [0, 0, 1]
NORTH = [-1, 0, 0]
EAST = [0, -1, 0]
DOWN = [0, 0, -1]
def addVectors(v1, v2):
return list(array(v1) + array(v2))
def subtractVectors(v1, v2):
return list(array(v1) - array(v2))
def multiplyVectors(v1, v2):
return list(array(v1) * array(v2))
def divideVectors(v1, v2):
"""
Divides the elements of vector v1 by the elements of vector v2.
"""
return list(array(v1) / array(v2))
def replaceVector(v1, v2):
for index, element in enumerate(v1):
v2[index] = element
def squish(a):
if a == 0:
return a
return a / abs(a)
def squishAndSwitch(a):
return squish(a) * -1
def removeMagnitude(v1):
return map(squish, v1)
def getLargestComponentVector(v1):
largest = [0] * 3
v2 = map(abs, v1)
largest[v2.index(max(v2))] = 1
return multiplyVectors(largest, map(squish, v1))
def reverseDirection(v1, v2):
"""
Defines a vector that goes in the opposite direction of the computed
difference between two given vectors.
"""
return map(squishAndSwitch, subtractVectors(v1, v2))
def vectorToDirection(v1):
"""
Converts a vector to a direction. For vectors with more than one non-zero
component, the "prominant" direction is dictated by the component that
contributes the most (i.e., the largest one); all other components are set
to zero. If there two or more components share a high value, the first one
will be chosen over later ones (e.g., y will be chosen over z). Valid
values for a direction range from 0 to 5.
"""
face = 0
for index, element in enumerate(getLargestComponentVector(v1)):
if abs(element) == 1:
if element == -1:
element = 1
else:
element = 0
face = element + (index * 2)
return face
def squaredMagnitude(v1):
"""
Square of the magnitude of the vector.
"""
return sum(array(v1) * array(v1))
|
{"hexsha": "44da04db090ec492f4436f0bea12646cd9d91250", "size": 2071, "ext": "py", "lang": "Python", "max_stars_repo_path": "isomyr/util/vector.py", "max_stars_repo_name": "chrisonntag/Isomyr", "max_stars_repo_head_hexsha": "31eb90f0169a10d1079087ca689de76f600cc958", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "isomyr/util/vector.py", "max_issues_repo_name": "chrisonntag/Isomyr", "max_issues_repo_head_hexsha": "31eb90f0169a10d1079087ca689de76f600cc958", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "isomyr/util/vector.py", "max_forks_repo_name": "chrisonntag/Isomyr", "max_forks_repo_head_hexsha": "31eb90f0169a10d1079087ca689de76f600cc958", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.2688172043, "max_line_length": 78, "alphanum_fraction": 0.6190246258, "include": true, "reason": "from numpy", "num_tokens": 596}
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.5.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Alternating Direction Implicit Method Applied to the 3D Wave Equation
# By: Stefen Gill
#
# Both the simple explicit and simple implicit methods used in `simple_explicit_implicit_methods.ipynb` can be applied to higher-dimensional problems. However, they become very computationally expensive<sub>[1]</sub>. This motivates the alternating direction implicit method (ADI), which combines the simple explicit and simple implicit methods to produce finite difference discretizations corresponding to efficiently solvable tridiagonal matrix equations.
#
# ADI seems to be regarded as a numerical method for solving elliptic and parabolic equations<sub>[2]</sub>. It will be shown in this notebook that ADI can be applied to solving the wave equation, which is hyperbolic:
#
# $$ \frac{\partial^{2} u}{\partial t^{2}} = c^{2} \nabla^{2} u $$
#
# Where $\nabla^{2}$ is the spatial Laplace operator. In three dimensions, and letting $c = 1$, the PDE becomes the following:
#
# $$ \frac{\partial^{2} u}{\partial t^{2}} = \frac{\partial^{2} u}{\partial x^{2}} + \frac{\partial^{2} u}{\partial y^{2}} + \frac{\partial^{2} u}{\partial z^{2}} $$
#
# There is now a choice to be made regarding finite difference discretizations of the spatial derivatives. Either the implicit or explicit methods could be employed to approximate a second derivative in an arbitrary spatial dimension. Superscripts denote a time step and subscripts denote a spatial node for the remainder of this document. The explicit central difference discretization is taken at the current time step, $l$, so that the values of $u$ are known thanks to an initial condition:
#
# $$ \frac{\partial^{2} u}{\partial x^{2}} \approx \frac{u^{l}_{i - 1, \, j, \, k} - 2u^{l}_{i, \, j, \, k} + u^{l}_{i + 1, \, j, \, k}}{(\Delta x)^{2}} $$
#
# And the implicit central difference discretization is taken at the next time step, $l + 1$. Values of $u$ are unknown in this discretization:
#
# $$ \frac{\partial^{2} u}{\partial x^{2}} \approx \frac{u^{l + 1}_{i - 1, \, j, \, k} - 2u^{l + 1}_{i, \, j, \, k} + u^{l + 1}_{i + 1, \, j, \, k}}{(\Delta x)^{2}} $$
#
# Rather than solving for the future values of $u$ in all dimensions at once, the implicit discretization can be applied to one dimension at a time and the resulting tridiagonal matrix equation solved for values at a partial time step in the future<sub>[2]</sub>. Since there are three dimensions that need solving, this partial time step is chosen to be $1/3$ so that a whole step has elapsed after the third dimension is solved.
#
# $$ \frac{u^{l - 1/3}_{i, \, j, \, k} - 2u^{l}_{i, \, j, \, k} + u^{l + 1/3}_{i, \, j, \, k}}{(\Delta t)^{2}} = \overbrace{\frac{u^{l + 1/3}_{i - 1, \, j, \, k} - 2u^{l + 1/3}_{i, \, j, \, k} + u^{l + 1/3}_{i + 1, \, j, \, k}}{(\Delta x)^{2}}}^{\text{Implicit in the x dimension}} + \underbrace{\frac{u^{l}_{i, \, j - 1, \, k} - 2u^{l}_{i, \, j, \, k} + u^{l}_{i, \, j + 1, \, k}}{(\Delta y)^{2}} + \frac{u^{l}_{i, \, j, \, k - 1} - 2u^{l}_{i, \, j, \, k} + u^{l}_{i, \, j, \, k + 1}}{(\Delta z)^{2}}}_{\text{Explicit in other dimensions}} $$
#
# The expression can be simplified with the establishment of a uniform grid where $\Delta d = \Delta x = \Delta y = \Delta z$.
#
# $$ \frac{u^{l - 1/3}_{i, \, j, \, k} - 2u^{l}_{i, \, j, \, k} + u^{l + 1/3}_{i, \, j, \, k}}{(\Delta t)^{2}} = \frac{1}{( \Delta d )^{2}} \left ( u^{l + 1/3}_{i - 1, \, j, \, k} - 2u^{l + 1/3}_{i, \, j, \, k} + u^{l + 1/3}_{i + 1, \, j, \, k} + u^{l}_{i, \, j - 1, \, k} - 2u^{l}_{i, \, j, \, k} + u^{l}_{i, \, j + 1, \, k} + u^{l}_{i, \, j, \, k - 1} - 2u^{l}_{i, \, j, \, k} + u^{l}_{i, \, j, \, k + 1} \right ) $$
#
# Defining $\lambda \equiv ( \Delta d / \Delta t )^{2}$, combining terms, and isolating the unknown future values on the left side yields the following:
#
# $$ -u^{l + 1/3}_{i - 1, \, j, \, k} + ( \lambda + 2 ) u^{l + 1/3}_{i, \, j, \, k} - u^{l + 1/3}_{i + 1, \, j, \, k} = 2 ( \lambda - 2 ) u^{l}_{i, \, j, \, k} - \lambda u^{l - 1/3}_{i, \, j, \, k} + u^{l}_{i, \, j - 1, \, k} + u^{l}_{i, \, j + 1, \, k} + u^{l}_{i, \, j, \, k - 1} + u^{l}_{i, \, j, \, k + 1} $$
#
# Which, for a domain of $0$ to $n$ nodes in the $x$ dimension, corresponds to the tridiagonal matrix equation below. Notice that there are equations written only for nodes $1$ to $n - 1$ because the discretization can only be applied to interior nodes. $u^{l + 1/3}_{0, \, j, \, k}$ and $u^{l + 1/3}_{n, \, j, \, k}$ in the right hand side vector are the future values of the exterior nodes, and should be set according to the problem's boundary conditions.
#
# $$
# \begin{pmatrix}
# ( \lambda + 2 ) & -1 & & & 0 \\
# -1 & ( \lambda + 2 ) & -1 & & \\
# & \ddots & \ddots & \ddots & \\
# & & -1 & ( \lambda + 2 ) & -1 \\
# 0 & & & -1 & ( \lambda + 2 )
# \end{pmatrix}
# \begin{pmatrix} u^{l + 1/3}_{1, \, j, \, k} \\
# u^{l + 1/3}_{2, \, j, \, k} \\
# \vdots \\
# u^{l + 1/3}_{n - 2, \, j, \, k} \\
# u^{l + 1/3}_{n - 1, \, j, \, k}
# \end{pmatrix}
# =
# \begin{pmatrix}
# u^{l + 1/3}_{0, \, j, \, k} + \beta_{1, \, j, \, k} \\
# \beta_{2, \, j, \, k} \\
# \vdots \\
# \beta_{n - 2, \, j, \, k} \\
# u^{l + 1/3}_{n, \, j, \, k} + \beta_{n - 1, \, j, \, k}
# \end{pmatrix} \tag{1}
# $$
#
# $$ \beta_{i, \, j, \, k} \equiv 2 ( \lambda - 2 ) u^{l}_{i, \, j, \, k} - \lambda u^{l - 1/3}_{i, \, j, \, k} + u^{l}_{i, \, j - 1, \, k} + u^{l}_{i, \, j + 1, \, k} + u^{l}_{i, \, j, \, k - 1} + u^{l}_{i, \, j, \, k + 1} $$
#
# A problem with this matrix equation is that it requires that $u^{l - 1/3}_{i, \, j, \, k}$ is known, which is not possible during the first time step. A special matrix equation not requiring knowledge of pre-initial conditions can be formulated by revisiting the temporal finite difference discretization--specifically by replacing the central difference with a forward difference discretization. Consider a forward Taylor series expansion:
#
# $$ f(t + \Delta t) = f(t) + \Delta t f'(t) + \frac{(\Delta t)^{2}}{2!} f''(t) + \mathcal{O} \left [ (\Delta t)^{3} \right ] $$
#
# Truncating the higher-order terms and solving for $f''(t)$ yields a forward finite difference approximation of the second derivative which is used to form an alternate discretization of the wave equation:
#
# $$ f''(t) \approx \frac{2 \left [ f(t + \Delta t) - f(t) - (\Delta t) f'(t) \right ]}{(\Delta t)^{2}} $$
#
# $$ \frac{2 \left [ u^{l + 1/3}_{i, \, j, \, k} - u^{l}_{i, \, j, \, k} - (\Delta t) \dfrac{\partial}{\partial t} u^{l}_{i, \, j, \, k} \right ]}{(\Delta t)^{2}} = \frac{u^{l + 1/3}_{i - 1, \, j, \, k} - 2u^{l + 1/3}_{i, \, j, \, k} + u^{l + 1/3}_{i + 1, \, j, \, k}}{(\Delta x)^{2}} + \frac{u^{l}_{i, \, j - 1, \, k} - 2u^{l}_{i, \, j, \, k} + u^{l}_{i, \, j + 1, \, k}}{(\Delta y)^{2}} + \frac{u^{l}_{i, \, j, \, k - 1} - 2u^{l}_{i, \, j, \, k} + u^{l}_{i, \, j, \, k + 1}}{(\Delta z)^{2}} $$
#
# Maintaining the earlier definitions of $\Delta d$ and $\lambda$, combining terms, and isolating unknown values on the left side yields the following expression and its corresponding matrix equation:
#
# $$ -u^{l + 1/3}_{i - 1, \, j, \, k} + 2 (\lambda + 1) u^{l + 1/3}_{i, \, j, \, k} - u^{l + 1/3}_{i + 1, \, j, \, k} = 2 (\lambda - 2) u^{l}_{i, \, j, \, k} + 2 \lambda (\Delta t) \frac{\partial}{\partial t} u^{l}_{i, \, j, \, k} + u^{l}_{i, \, j - 1, \, k} + u^{l}_{i, \, j + 1, \, k} + u^{l}_{i, \, j, \, k - 1} + u^{l}_{i, \, j, \, k + 1} $$
#
# $$
# \begin{pmatrix}
# 2 ( \lambda + 1 ) & -1 & & & 0 \\
# -1 & 2 ( \lambda + 1 ) & -1 & & \\
# & \ddots & \ddots & \ddots & \\
# & & -1 & 2 ( \lambda + 1 ) & -1 \\
# 0 & & & -1 & 2 ( \lambda + 1 )
# \end{pmatrix}
# \begin{pmatrix} u^{l + 1/3}_{1, \, j, \, k} \\
# u^{l + 1/3}_{2, \, j, \, k} \\
# \vdots \\
# u^{l + 1/3}_{n - 2, \, j, \, k} \\
# u^{l + 1/3}_{n - 1, \, j, \, k}
# \end{pmatrix}
# =
# \begin{pmatrix}
# u^{l + 1/3}_{0, \, j, \, k} + \gamma_{1, \, j, \, k} \\
# \gamma_{2, \, j, \, k} \\
# \vdots \\
# \gamma_{n - 2, \, j, \, k} \\
# u^{l + 1/3}_{n, \, j, \, k} + \gamma_{n - 1, \, j, \, k}
# \end{pmatrix} \tag{2}
# $$
#
# $$ \gamma_{i, \, j, \, k} \equiv 2 (\lambda - 2) u^{l}_{i, \, j, \, k} + 2 \lambda (\Delta t) \frac{\partial}{\partial t} u^{l}_{i, \, j, \, k} + u^{l}_{i, \, j - 1, \, k} + u^{l}_{i, \, j + 1, \, k} + u^{l}_{i, \, j, \, k - 1} + u^{l}_{i, \, j, \, k + 1} $$
#
# Equation $(2)$ must be solved during for first partial time step, and equation $(1)$ must be solved for all others. It is reasonable that equation $(2)$ requires that $\partial u/\partial t$ is initially known because the wave equation is second order in time. Physical intuition for this requirement can come from the case of a vibrating string: the position and velocity of a point must be known to predict its future.
#
# Multiple matrix equations need to be solved for a single spatial dimension since location on the two explicit axes is required to select specific values of $u$. When solving for all spatial dimensions is done, the simulation is at time step $l + 1$, and the process can be repeated for the remaining times. This procedure is implemented in the following cell.
# %% tags=[]
import numpy as np
import h5py
from tqdm import trange
from scipy.linalg import lu
from thomas_solve import thomas_solve
# Parameters:
length = 1 # Length of one side of the cube domain.
time = 3 # Total simulation time.
Dd = 0.01 # Node (grid) spacing.
Dt = 0.01 # Whole time step
partial_Dt = Dt/3
lam = (Dd/partial_Dt)**2
num_nodes = int(length/Dd) # Number of nodes in one dimension.
num_eqns = num_nodes - 2 # Also the number of interior nodes in one dimension.
num_partial_time_steps = int(np.rint(time/partial_Dt))
num_time_steps = int(np.rint(time/Dt))
# The HDF5 data format is used to overcome memory limitations associated with fine space and time steps. A file is prepared
# to be written to: It will have one group per simulation. Attributes documenting the number of nodes, number of time steps,
# and magnitudes of the space and time steps will be attached to each group. Within each group are data sets corresponding to
# a single time step each. These data sets are the 3D solution arrays u[x, y, z].
try:
wave_sims = h5py.File('output/3d_wave_sims.hdf5', 'w')
sim = wave_sims.create_group('sim_0')
# Record this simulation's parameters:
sim.attrs['num_time_steps'] = num_time_steps
sim.attrs['num_nodes'] = num_nodes
sim.attrs['time_step'] = Dt
sim.attrs['space_step'] = Dd
# Function to create a new dataset corresponding to values at a particular time step.
def new_dataset(t_step):
return sim.create_dataset(t_step, (num_nodes, num_nodes, num_nodes), dtype='f', compression='gzip', \
compression_opts=9)
# Record initial and boundary conditions. The boundary is held at zero to allow wave reflections.
u_init = new_dataset('l_0_0')
u_init[:, :, :] = np.zeros((num_nodes, num_nodes, num_nodes))
perturb_pos = int(np.rint(0.3*num_nodes))
u_init[perturb_pos, perturb_pos, perturb_pos] = 5
# The other initial condition is the initial rate of change, du/dt:
dudt = np.zeros((num_nodes, num_nodes, num_nodes))
# Preallocate matrix equation arrays:
A = np.zeros((num_eqns, num_eqns))
x = np.zeros(num_eqns)
b = np.zeros(num_eqns)
# LU decompose the coefficient matrix in equation (2):
main_diag = [2*(lam + 1)]*num_eqns
off_diag = [-1]*(num_eqns - 1)
A = A + np.diag(main_diag) + np.diag(off_diag, k=1) + np.diag(off_diag, k=-1)
P, L, U = lu(A)
assert P.all() == np.eye(num_eqns).all() # If the permutation matrix is not the identity matrix, there is a problem.
l1 = np.diag(L, k=-1)
u0 = np.diag(U)
u1 = np.diag(U, k=1)
# Solve equation (2) for the first partial time step:
# x dimension:
u_pres = u_init
u_fut = new_dataset('l_0_1')
for j in range(1, num_eqns):
for k in range(1, num_eqns):
# Assemble b and solve:
b[:] = 2*(lam - 2)*u_pres[1:-1, j, k] + 2*lam*partial_Dt*dudt[1:-1, j, k] + u_pres[1:-1, j - 1, k] \
+ u_pres[1:-1, j + 1, k] + u_pres[1:-1, j, k - 1] + u_pres[1:-1, j, k + 1]
b[0] += u_pres[0, j, k]
b[-1] += u_pres[-1, j, k]
u_fut[1:-1, j, k] = thomas_solve(l1, u0, u1, b)
# Now that the first partial step in the future has been solved, There is enough information to solve for the remaining
# partial steps until the first whole step using eqn (1). First LU decompose the coefficient matrix in eqn (1):
main_diag = [lam + 2]*num_eqns
off_diag = [-1]*(num_eqns - 1)
A = np.zeros((num_eqns, num_eqns))
A = A + np.diag(main_diag) + np.diag(off_diag, k=1) + np.diag(off_diag, k=-1)
P, L, U = lu(A)
assert P.all() == np.eye(num_eqns).all() # If the permutation matrix is not the identity matrix, there is a problem.
l1 = np.diag(L, k=-1)
u0 = np.diag(U)
u1 = np.diag(U, k=1)
# y dimension:
u_past = u_init
u_pres = sim['l_0_1']
u_fut = new_dataset('l_0_2')
for i in range(1, num_eqns):
for k in range(1, num_eqns):
# Assemble b and solve:
b[:] = 2*(lam - 2)*u_pres[i, 1:-1, k] - lam*u_past[i, 1:-1, k] + u_pres[i - 1, 1:-1, k] \
+ u_pres[i + 1, 1:-1, k] + u_pres[i, 1:-1, k - 1] + u_pres[i, 1:-1, k + 1]
b[0] += u_init[i, 0, k]
b[-1] += u_init[i, -1, k]
u_fut[i, 1:-1, k] = thomas_solve(l1, u0, u1, b)
# z dimension:
u_past = sim['l_0_1']
u_pres = sim['l_0_2']
u_fut = new_dataset('l_1_0')
for i in range(1, num_eqns):
for j in range(1, num_eqns):
# Assemble b and solve:
b[:] = 2*(lam - 2)*u_pres[i, j, 1:-1] - lam*u_past[i, j, 1:-1] + u_pres[i, j - 1, 1:-1] \
+ u_pres[i, j + 1, 1:-1] + u_pres[i - 1, j, 1:-1] + u_pres[i + 1, j, 1:-1]
b[0] += u_init[i, j, 0]
b[-1] += u_init[i, j, -1]
u_fut[i, j, 1:-1] = thomas_solve(l1, u0, u1, b)
del sim['l_0_1']
# Solve equation (1) for the remaining time steps:
for l in trange(1, num_time_steps - 1, desc='Solving with \u0394d = %.6f, \u0394t = %.6f' %(Dd, Dt)):
# x dimension:
u_past = sim['l_%d_2' %(l - 1)]
u_pres = sim['l_%d_0' %l]
u_fut = new_dataset('l_%d_1' %l)
for j in range(1, num_eqns):
for k in range(1, num_eqns):
# Assemble b and solve:
b[:] = 2*(lam - 2)*u_pres[1:-1, j, k] - lam*u_past[1:-1, j, k] + u_pres[1:-1, j - 1, k] \
+ u_pres[1:-1, j + 1, k] + u_pres[1:-1, j, k - 1] + u_pres[1:-1, j, k + 1]
b[0] += u_init[0, j, k]
b[-1] += u_init[-1, j, k]
u_fut[1:-1, j, k] = thomas_solve(l1, u0, u1, b)
del sim['l_%d_2' %(l - 1)]
# y dimension:
u_past = sim['l_%d_0' %l]
u_pres = sim['l_%d_1' %l]
u_fut = new_dataset('l_%d_2' %l)
for i in range(1, num_eqns):
for k in range(1, num_eqns):
# Assemble b and solve:
b[:] = 2*(lam - 2)*u_pres[i, 1:-1, k] - lam*u_past[i, 1:-1, k] + u_pres[i - 1, 1:-1, k] \
+ u_pres[i + 1, 1:-1, k] + u_pres[i, 1:-1, k - 1] + u_pres[i, 1:-1, k + 1]
b[0] += u_init[i, 0, k]
b[-1] += u_init[i, -1, k]
u_fut[i, 1:-1, k] = thomas_solve(l1, u0, u1, b)
# z dimension:
u_past = sim['l_%d_1' %l]
u_pres = sim['l_%d_2' %l]
u_fut = new_dataset('l_%d_0' %(l + 1))
for i in range(1, num_eqns):
for j in range(1, num_eqns):
# Assemble b and solve:
b[:] = 2*(lam - 2)*u_pres[i, j, 1:-1] - lam*u_past[i, j, 1:-1] + u_pres[i, j - 1, 1:-1] \
+ u_pres[i, j + 1, 1:-1] + u_pres[i - 1, j, 1:-1] + u_pres[i + 1, j, 1:-1]
b[0] += u_init[i, j, 0]
b[-1] += u_init[i, j, -1]
u_fut[i, j, 1:-1] = thomas_solve(l1, u0, u1, b)
del sim['l_%d_1' %l]
# Delete the last dataset computed at a partial time step to save disk space:
del sim['l_%d_2' %(num_time_steps - 2)]
finally:
# Even if the simulation failed for some reason, close the hdf5 file:
wave_sims.close()
# %% [markdown]
# ## Visualization
# The animation below is produced in the following cell. Blues and reds denote negative and positive magnitudes, respectively. Notice that the Dirichlet boundary condition causes the wave to reflect around the domain and then interfere with itself.
#
# This is a more accurate representation of certain waves encountered in nature than the one-dimensional sine waves typically used. For example, this animation could be used to picture acoustic pressure in a sound wave.
#
# 
# %% tags=[]
import numpy as np
import pyvista as pv
import h5py
from tqdm import trange
# Load simulation data from its HDF5 file:
try:
wave_sims = h5py.File('output/3d_wave_sims.hdf5', 'r')
sim = wave_sims['sim_0']
num_time_steps = sim.attrs['num_time_steps']
# Set up the plotting space:
pv.set_plot_theme('document')
p = pv.Plotter(window_size=(768, 768))
p.add_bounding_box()
# Position the camera so its focus is at the center of the volume.
u = sim['l_0_0'][:]
vol = p.add_volume(u)
x_min, x_max, y_min, y_max, z_min, z_max = vol.GetBounds()
pos = (5*x_max, 2*y_max, 5*z_max)
focus = (np.mean([x_min, x_max]), np.mean([y_min, y_max]), np.mean([z_min, z_max]))
viewup = (0, 1, 0)
# Function to write a frame to an animation:
def write_frame(angle):
u = sim['l_%d_0' %l][:]
p.clear()
p.add_volume(u, cmap='bwr', opacity=[0.9, 0.6, 0, 0, 0.6, 0.9], clim=(-10, 10))
p.add_text('l = %d' %l, font_size=11)
p.camera_position = [(pos[0]*np.cos(angle), pos[1], pos[2]*np.sin(angle)), focus, viewup]
p.write_frame()
# Write this scene to a gif in the output folder:
p.open_gif('output/3d_wave.gif')
step = int(np.rint(num_time_steps/100))
angle_inc = 0.05/step
for l in trange(0, num_time_steps, step, desc='Exporting gif animation'):
write_frame(angle_inc*l)
# Write the scene to an mp4 in the output folder:
fps = int(num_time_steps/12) # A 12 s animation is desired. The framerate is set accordingly.
p.open_movie('output/3d_wave.mp4', framerate=fps)
for l in trange(num_time_steps, desc='Exporting mp4 animation'):
write_frame(angle_inc*l)
finally:
wave_sims.close()
p.close()
# %% [markdown]
# ## References
# [1] Chapra, S. C., & Canale, R. P. (2015). Numerical Methods for Engineers (7th ed.). New York, NY: McGraw-Hill Education.
#
# [2] Peaceman, D., & Rachford, H. (1955). The Numerical Solution of Parabolic and Elliptic Differential Equations. Journal of the Society for Industrial and Applied Mathematics, 3(1), 28-41. Retrieved August 4, 2020, from www.jstor.org/stable/2098834
|
{"hexsha": "aca2f20d36c04e130b9c62a06d756323ded9a7f3", "size": 19311, "ext": "py", "lang": "Python", "max_stars_repo_path": "alternating_direction_implicit_method.py", "max_stars_repo_name": "smgill/Finite-Difference-Methods-for-PDEs", "max_stars_repo_head_hexsha": "51518d0073093f2d66459a1602184fb4dd82b844", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "alternating_direction_implicit_method.py", "max_issues_repo_name": "smgill/Finite-Difference-Methods-for-PDEs", "max_issues_repo_head_hexsha": "51518d0073093f2d66459a1602184fb4dd82b844", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "alternating_direction_implicit_method.py", "max_forks_repo_name": "smgill/Finite-Difference-Methods-for-PDEs", "max_forks_repo_head_hexsha": "51518d0073093f2d66459a1602184fb4dd82b844", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 54.0924369748, "max_line_length": 543, "alphanum_fraction": 0.5738180312, "include": true, "reason": "import numpy,from scipy", "num_tokens": 6984}
|
\documentclass[openany]{./llncs2e/llncs}
\usepackage{graphicx}
\usepackage{multirow}
\usepackage{graphicx}
\usepackage{amssymb}
\usepackage{pifont}
\usepackage{pdflscape}
\usepackage{url}
\usepackage[table,xcdraw]{xcolor}
\usepackage{fixltx2e}
\usepackage{mathtools}
\usepackage{lmodern}
\usepackage{rotating}
\usepackage{textcomp}
\usepackage{booktabs}
\usepackage[square,sort,comma,numbers]{natbib}
\usepackage[nolist,nohyperlinks]{acronym}
% Maintain images and tables within their respective sections
\usepackage[section]{placeins}
\pagestyle{plain}
\usepackage{geometry}
\geometry{
a4paper, % or letterpaper
textwidth=15cm, % llncs has 12.2cm
textheight=24cm, % llncs has 19.3cm
heightrounded, % integer number of lines
hratio=1:1, % horizontally centered
vratio=2:3, % not vertically centered
}
% make a proper TOC despite llncs
\setcounter{tocdepth}{3}
\makeatletter
\renewcommand*\l@author[2]{}
\renewcommand*\l@title[2]{}
\makeatletter
\setcounter{secnumdepth}{3}
\renewcommand\bibsection{%
\section*{References}%
\markboth{\MakeUppercase{\refname}}{\MakeUppercase{\refname}}%
}%
\begin{document}
\title{Project Management and Maintenance on Information Systems}
\subtitle{Diogo Pinto - n\textordmasculine 69905}
\author{Diogo.Reis.Pinto@tecnico.ulisboa.pt}
\institute{Instituto Superior T\'{e}cnico}
\maketitle
\input{sections/abstract.tex}
\input{sections/keywords.tex}
\begingroup
\let\cleardoublepage\relax
\let\clearpage\relax
{\def\large{} \def\normalsize{} \tableofcontents}
\endgroup
\input{sections/introduction.tex}
\input{sections/researchMethodology.tex}
\input{sections/problem-contextualization.tex}
\input{sections/related-work.tex}
\input{sections/architecture.tex}
\input{sections/evaluation.tex}
\newpage
\input{sections/conclusions.tex}
\newpage
\appendix
\input{sections/appendix.tex}
\newpage
%
% Bibliography
%
\bibliographystyle{plain}
% replace example.bib with your .bib
\bibliography{refs.bib}
\end{document}
|
{"hexsha": "f18cf9de132955da7b1e86122dd07a783941ddcb", "size": 1999, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "report.tex", "max_stars_repo_name": "DiogoReisPinto/thesis-project", "max_stars_repo_head_hexsha": "7f706f0b1dbe467dd37e2d0984001e00fa6371e8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "report.tex", "max_issues_repo_name": "DiogoReisPinto/thesis-project", "max_issues_repo_head_hexsha": "7f706f0b1dbe467dd37e2d0984001e00fa6371e8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "report.tex", "max_forks_repo_name": "DiogoReisPinto/thesis-project", "max_forks_repo_head_hexsha": "7f706f0b1dbe467dd37e2d0984001e00fa6371e8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.0843373494, "max_line_length": 65, "alphanum_fraction": 0.7708854427, "num_tokens": 644}
|
step = int((95900-88300+100)/100)
print (step)
a = list(range(88300, 95900, 77))
print (a[2])
a = [item / 100 for item in a]
print (a)
print (len(a))
import sys
sys.path.append("../")
from scipy.stats import linregress
from Whole_Movie_Check_Plots.Server_Movies_Paths import GetMovieFilesPaths
def Check100Percent(percentage=100):
"""
:return:
"""
_, txt_file_list = GetMovieFilesPaths(exp_type="MDCK_WT_Pure")
cct_hrs_list = []
den_per_list = []
for raw_file in txt_file_list:
smoothed_density_file = raw_file.split("/")[:-2]
smoothed_density_file = "/".join(smoothed_density_file) + "/density/cellID_density_smoothed.txt"
print(smoothed_density_file)
for line in open(smoothed_density_file, 'r'):
line = line.rstrip().split("\t")
if len(line) < 102:
print (line[0])
"""
if line[0] != "Cell_ID":
print (int(line[0]))
print (float(line[1]))
print (float(line[percentage+1]))
cct_hrs_list.append(float(line[1]))
den_per_list.append(float(line[percentage+1]))
"""
den_per_list = [item * 10000 for item in den_per_list]
return cct_hrs_list, den_per_list
#Check100Percent(percentage=100)
|
{"hexsha": "8ea6c267566ba0e4810c542251a550e78d8edb03", "size": 1316, "ext": "py", "lang": "Python", "max_stars_repo_path": "Biological_Questions/Cell_Density_Impacts/Check_100_Percent_Function.py", "max_stars_repo_name": "The-Kristina/CellComp", "max_stars_repo_head_hexsha": "29ec7690e0d9adb1a6214937ca41fd1dadce18c6", "max_stars_repo_licenses": ["CNRI-Python", "RSA-MD", "Xnet", "Net-SNMP", "X11"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-05-13T10:07:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T16:20:48.000Z", "max_issues_repo_path": "Biological_Questions/Cell_Density_Impacts/Check_100_Percent_Function.py", "max_issues_repo_name": "The-Kristina/CellComp", "max_issues_repo_head_hexsha": "29ec7690e0d9adb1a6214937ca41fd1dadce18c6", "max_issues_repo_licenses": ["CNRI-Python", "RSA-MD", "Xnet", "Net-SNMP", "X11"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Biological_Questions/Cell_Density_Impacts/Check_100_Percent_Function.py", "max_forks_repo_name": "The-Kristina/CellComp", "max_forks_repo_head_hexsha": "29ec7690e0d9adb1a6214937ca41fd1dadce18c6", "max_forks_repo_licenses": ["CNRI-Python", "RSA-MD", "Xnet", "Net-SNMP", "X11"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-04-23T18:13:20.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-11T18:46:48.000Z", "avg_line_length": 28.6086956522, "max_line_length": 104, "alphanum_fraction": 0.6117021277, "include": true, "reason": "from scipy", "num_tokens": 343}
|
import numpy as np
import matplotlib.pyplot as plt
import random
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
# data
X = np.array([[15, 39], [15, 81], [16, 6], [16, 77], [17, 40], [17, 76], [18, 6], [18, 94], [19, 3], [19, 72], [19, 14], [19, 99], [20, 15], [20, 77], [20, 13], [20, 79], [21, 35], [21, 66], [23, 29], [23, 98], [24, 35], [24, 73], [25, 5], [25, 73], [28, 14], [28, 82], [28, 32], [28, 61], [29, 31], [29, 87], [30, 4], [30, 73], [33, 4], [33, 92], [33, 14], [33, 81], [34, 17], [34, 73], [37, 26], [37, 75], [38, 35], [38, 92], [39, 36], [39, 61], [39, 28], [39, 65], [40, 55], [40, 47], [40, 42], [40, 42], [42, 52], [42, 60], [43, 54], [43, 60], [43, 45], [43, 41], [44, 50], [44, 46], [46, 51], [46, 46], [46, 56], [46, 55], [47, 52], [47, 59], [48, 51], [48, 59], [48, 50], [48, 48], [48, 59], [48, 47], [49, 55], [49, 42], [50, 49], [50, 56], [54, 47], [54, 54], [54, 53], [54, 48], [54, 52], [54, 42], [54, 51], [54, 55], [54, 41], [54, 44], [54, 57], [54, 46], [57, 58], [57, 55], [58, 60], [58, 46], [59, 55], [59, 41], [60, 49], [60, 40], [60, 42], [60, 52], [60, 47], [60, 50], [61, 42], [61, 49], [62, 41], [62, 48], [62, 59], [62, 55], [62, 56], [62, 42], [63, 50], [63, 46], [63, 43], [63, 48], [63, 52], [63, 54], [64, 42], [64, 46], [65, 48], [65, 50], [65, 43], [65, 59], [67, 43], [67, 57], [67, 56], [67, 40], [69, 58], [69, 91], [70, 29], [70, 77], [71, 35], [71, 95], [71, 11], [71, 75], [71, 9], [71, 75], [72, 34], [72, 71], [73, 5], [73, 88], [73, 7], [73, 73], [74, 10], [74, 72], [75, 5], [75, 93], [76, 40], [76, 87], [77, 12], [77, 97], [77, 36], [77, 74], [78, 22], [78, 90], [78, 17], [78, 88], [78, 20], [78, 76], [78, 16], [78, 89], [78, 1], [78, 78], [78, 1], [78, 73], [79, 35], [79, 83], [81, 5], [81, 93], [85, 26], [85, 75], [86, 20], [86, 95], [87, 27], [87, 63], [87, 13], [87, 75], [87, 10], [87, 92], [88, 13], [88, 86], [88, 15], [88, 69], [93, 14], [93, 90], [97, 32], [97, 86], [98, 15], [98, 88], [99, 39], [99, 97], [101, 24], [101, 68], [103, 17], [103, 85], [103, 23], [103, 69], [113, 8], [113, 91], [120, 16], [120, 79], [126, 28], [126, 74], [137, 18], [137, 83]])
# type of kernel we use in this sample
def gaussian_kernel(distance, bandwidth):
return (1 / (bandwidth * np.sqrt(2 * np.pi))) * np.exp(-0.5 * ((distance / bandwidth)) ** 2)
STOP_THRESHOLD = 1e-3
CLUSTER_THRESHOLD = 1e-1
class MeanShift:
def __init__(self, bandwidth=20, kernel=gaussian_kernel):
self.bandwidth=bandwidth
self.kernel=kernel
def _distance(self, a, b):
return np.linalg.norm(np.array(a) - np.array(b))
def _shift_point(self, point, points):
shift_x = 0.0
shift_y = 0.0
scale = 0.0
for p in points:
dist = self._distance(point, p)
# shift point location based on the distance weight
weight = self.kernel(dist, self.bandwidth)
shift_x += p[0] * weight
shift_y += p[1] * weight
scale += weight
# remove scale (weights) value
shift_x = shift_x / scale
shift_y = shift_y / scale
new_point = [shift_x, shift_y]
distance = self._distance(new_point, point)
return (distance, new_point)
def _cluster_points(self, points):
clusters = []
cluster_idx = 0
cluster_centers = []
for i, point in enumerate(points):
if(len(clusters) == 0):
clusters.append(cluster_idx)
cluster_centers.append(point)
cluster_idx += 1
else:
for center in cluster_centers:
dist = self._distance(point, center)
if(dist < CLUSTER_THRESHOLD):
clusters.append(cluster_centers.index(center))
if(len(clusters) < i + 1):
clusters.append(cluster_idx)
cluster_centers.append(point)
cluster_idx += 1
return clusters
def fit_predict(self, X):
shift_points = X.copy()
shifting = [True] * X.shape[0]
# prevent infinity loop,
prev_dist = 0
threshold = 0
while True:
# update points locations
max_dist = 0
for i in range(0, len(shift_points)):
if not shifting[i]:
continue
# update point location
distance, shift_points[i] = self._shift_point(shift_points[i], X)
max_dist = max(max_dist, distance)
shifting[i] = (distance > STOP_THRESHOLD)
if threshold >= 10:
break
elif prev_dist >= max_dist:
threshold += 1
prev_dist = max_dist
clusters = self._cluster_points(shift_points.tolist())
return clusters
if __name__ == "__main__":
# define model
model = MeanShift(bandwidth=23)
# assign a cluster to each example
yhat = model.fit_predict(X)
# retrieve unique clusters
clusters = np.unique(yhat)
# create scatter plot for samples from each cluster
for cluster in clusters:
# get row indexes for samples with this cluster
row_ix = np.where(yhat == cluster)
# create scatter of these samples
plt.scatter(X[row_ix, 0], X[row_ix, 1])
# show the plot
plt.show()
|
{"hexsha": "b76aca1316c1f2a15586e525f6d10df812b27fc5", "size": 5434, "ext": "py", "lang": "Python", "max_stars_repo_path": "unsupervised_learning/clustering/mean_shift/sample_scratch.py", "max_stars_repo_name": "niektuytel/Machine_Learning", "max_stars_repo_head_hexsha": "0cd5656ca8076c383fd81c5e32a49969a20ad042", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-07-05T15:51:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T15:17:37.000Z", "max_issues_repo_path": "unsupervised_learning/clustering/mean_shift/sample_scratch.py", "max_issues_repo_name": "niektuytel/Machine_Learning", "max_issues_repo_head_hexsha": "0cd5656ca8076c383fd81c5e32a49969a20ad042", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "unsupervised_learning/clustering/mean_shift/sample_scratch.py", "max_forks_repo_name": "niektuytel/Machine_Learning", "max_forks_repo_head_hexsha": "0cd5656ca8076c383fd81c5e32a49969a20ad042", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.1788617886, "max_line_length": 2014, "alphanum_fraction": 0.5031284505, "include": true, "reason": "import numpy", "num_tokens": 1965}
|
import sys
import time
import numpy as np
import os
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext(appName="LR")
D = 10 # Number of dimensions
iterations = 20
N = 10
if len(sys.argv)>1:
N = int(sys.argv[1])
if len(sys.argv)>2:
iterations = int(sys.argv[2])
if len(sys.argv)>3:
D = int(sys.argv[3])
print("N %d D %d iterations %d" %(N,D,iterations))
points = sc.parallelize(range(1,N)).mapPartitions(lambda r: [np.random.ranf(size=(len(list(r)),D+1))])
points.cache().first()
start = time.time()
w = 2 * np.random.ranf(size=D) - 1
print("Initial w: " + str(w))
# Compute logistic regression gradient for a matrix of data points
def gradient(matrix, w):
Y = matrix[:, 0] # point labels (first column of input file)
X = matrix[:, 1:] # point coordinates
# For each point (x, y), compute gradient function, then sum these up
return ((1.0 / (1.0 + np.exp(-Y * X.dot(w))) - 1.0) * Y * X.T).sum(1)
def add(x, y):
x += y
return x
for i in range(iterations):
#print("On iteration %i" % (i + 1))
w -= points.map(lambda m: gradient(m, w)).reduce(add)
print("Final w: " + str(w))
print("lr exec time %f" % (time.time()-start))
sc.stop()
|
{"hexsha": "8887cc4f4a6b62ce54d5a230c115260588f07d92", "size": 1341, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/main/python/logistic_regression_gen.py", "max_stars_repo_name": "ehsantn/data-analytics-benchmarks", "max_stars_repo_head_hexsha": "db6ecf8c3e1d899a146ced47a8cc3e92b52fca69", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-06-19T17:09:06.000Z", "max_stars_repo_stars_event_max_datetime": "2017-06-19T17:09:06.000Z", "max_issues_repo_path": "src/main/python/logistic_regression_gen.py", "max_issues_repo_name": "ehsantn/data-analytics-benchmarks", "max_issues_repo_head_hexsha": "db6ecf8c3e1d899a146ced47a8cc3e92b52fca69", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/main/python/logistic_regression_gen.py", "max_forks_repo_name": "ehsantn/data-analytics-benchmarks", "max_forks_repo_head_hexsha": "db6ecf8c3e1d899a146ced47a8cc3e92b52fca69", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3673469388, "max_line_length": 106, "alphanum_fraction": 0.5697240865, "include": true, "reason": "import numpy", "num_tokens": 403}
|
from typing import Union, Tuple, List
import numpy as np
import plotly.graph_objects as go
from garrus.core import BaseVisualization
class ReliabilityDiagram(BaseVisualization):
def __calc_statistics(
self, confidences: np.ndarray, accuracies: np.ndarray
) -> Tuple[List[float], List[float], List[float]]:
mean_conf_by_bin: List[float] = []
mean_acc_by_bin: List[float] = []
samples_pct: List[float] = []
bin_idxs = np.digitize(confidences, np.histogram_bin_edges(confidences, bins=self.n_bins))
for bin_idx in range(1, 11):
mean_conf_by_bin.append(confidences[bin_idxs == bin_idx].mean() \
if any(confidences[bin_idxs == bin_idx]) else 0)
mean_acc_by_bin.append(accuracies[bin_idxs == bin_idx].mean() \
if any(accuracies[bin_idxs == bin_idx]) else 0)
samples_pct.append((bin_idxs == bin_idx).sum() / bin_idxs.shape[0])
return mean_conf_by_bin, mean_acc_by_bin, samples_pct
def _plot(self, confidences: np.ndarray, accuracies: np.ndarray, **kwargs: Union[int, float]) -> None:
mean_conf_by_bin, mean_acc_by_bin, samples_pct = self.__calc_statistics(confidences, accuracies)
bars = [x / self.n_bins for x in range(self.n_bins + 1)]
fig = go.Figure()
# confidence --------------
fig.add_trace(
go.Bar(
y=mean_conf_by_bin, x=bars, name="Mean confidence",
marker={"color": "red", "opacity": 0.6}
),
)
# accuracy ----------------
fig.add_trace(
go.Bar(
y=mean_acc_by_bin, x=bars, name="Mean accuracy",
marker={"color": "blue", "opacity": 0.6}
),
)
fig.add_trace(
go.Scatter(
y=mean_acc_by_bin, x=bars, name="Mean confidence dot",
marker={"color": "blue", "size": 9}, mode="markers+lines", showlegend=False
),
)
# % of samples ------------
fig.add_trace(
go.Bar(
y=samples_pct, x=bars, name="% of samples", width=0.025,
marker={"color": "black", "opacity": 1.0}
),
)
# ideal line --------------
fig.add_shape(
type="line",
x0=-0.05, y0=0, x1=0.95, y1=1,
line=dict(
color="black",
width=2,
dash="dot",
)
)
fig.update_yaxes(rangemode="tozero")
fig.update_layout(
title="Reliability Plot",
xaxis_title="Confidence",
yaxis_title="Accuracy",
showlegend=True,
barmode="overlay",
bargap=0,
height=self.plot_h, width=self.plot_w,
)
fig.show()
|
{"hexsha": "e32fd6b8b2fdc126dbc86bcb93d1fcee3572cbfe", "size": 2905, "ext": "py", "lang": "Python", "max_stars_repo_path": "garrus/visualizations/reliability_diagram.py", "max_stars_repo_name": "sleep3r/garrus", "max_stars_repo_head_hexsha": "28096ca0d6166117be23e740a68831396ba92a7e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2021-04-06T15:00:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-12T21:27:46.000Z", "max_issues_repo_path": "garrus/visualizations/reliability_diagram.py", "max_issues_repo_name": "sleep3r/garrus", "max_issues_repo_head_hexsha": "28096ca0d6166117be23e740a68831396ba92a7e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "garrus/visualizations/reliability_diagram.py", "max_forks_repo_name": "sleep3r/garrus", "max_forks_repo_head_hexsha": "28096ca0d6166117be23e740a68831396ba92a7e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-26T04:25:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-26T04:25:59.000Z", "avg_line_length": 32.6404494382, "max_line_length": 106, "alphanum_fraction": 0.5235800344, "include": true, "reason": "import numpy", "num_tokens": 676}
|
"""
aux_functions.py contains auxillary functions for tracking spatial provenance
"""
import numpy as np
import time
import os
import uuid
import random
# from numpy.core.numeric import allclose
def reset_array_prov(array, id = None):
if id == None:
id = uuid.uuid1()
for i in range(array.shape[0]):
for j in range(array.shape[1]):
array[i,j].set_provenance((id,(i,j)))
return array
def save_array_prov(array, path):
prov = np.empty(array.shape, dtype=object)
for i in range(array.shape[0]):
for j in range(array.shape[1]):
prov[i, j] = array[i, j].provenance
path = os.path.join(path, str(time.time()))
np.save(path, prov)
# arr = np.load('logs/1626725745.618419.npy', allow_pickle=True)
|
{"hexsha": "6f63ded4edfd763ec5229bdbf4b7d2609bd444ac", "size": 772, "ext": "py", "lang": "Python", "max_stars_repo_path": "aux_functions.py", "max_stars_repo_name": "j2zhao/test-provenance", "max_stars_repo_head_hexsha": "321953e3a9854f004c3e450a57f0e7abe096e1f7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "aux_functions.py", "max_issues_repo_name": "j2zhao/test-provenance", "max_issues_repo_head_hexsha": "321953e3a9854f004c3e450a57f0e7abe096e1f7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aux_functions.py", "max_forks_repo_name": "j2zhao/test-provenance", "max_forks_repo_head_hexsha": "321953e3a9854f004c3e450a57f0e7abe096e1f7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9032258065, "max_line_length": 77, "alphanum_fraction": 0.6528497409, "include": true, "reason": "import numpy,from numpy", "num_tokens": 201}
|
export solve_vf_all!
function solve_vf_all!(evs::dcdp_Emax, t::dcdp_tmpvars, p::dcdp_primitives, θt::AbstractVector, σ::Real, itype::Tuple, dograd::Bool; kwargs...)
solve_vf_terminal!(evs, p)
solve_vf_infill!( evs, t, p, θt, σ, dograd, itype; kwargs...)
learningUpdate!( evs, t, p, σ, dograd)
solve_vf_explore!( evs, t, p, θt, σ, dograd, itype; kwargs...)
end
function solve_vf_all!(evs, t, p, θ, itype, dograd; kwargs...)
solve_vf_all!(evs, t, p, _θt(θ, p), _σv(θ), itype, dograd; kwargs...)
end
|
{"hexsha": "f05ae679d143c4d459acbc125d62b97cbf2c1e03", "size": 530, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/vf_solve_all.jl", "max_stars_repo_name": "magerton/ShaleDrillingModel.jl", "max_stars_repo_head_hexsha": "25c8845309603fafb19f406a7b065496078f01f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-04-07T05:10:00.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-07T05:10:00.000Z", "max_issues_repo_path": "src/vf_solve_all.jl", "max_issues_repo_name": "magerton/ShaleDrillingModel.jl", "max_issues_repo_head_hexsha": "25c8845309603fafb19f406a7b065496078f01f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/vf_solve_all.jl", "max_forks_repo_name": "magerton/ShaleDrillingModel.jl", "max_forks_repo_head_hexsha": "25c8845309603fafb19f406a7b065496078f01f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-09-23T23:33:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-13T15:20:16.000Z", "avg_line_length": 37.8571428571, "max_line_length": 143, "alphanum_fraction": 0.6528301887, "num_tokens": 210}
|
using FourierTools, BenchmarkTools
function main()
x = randn((133, 513, 33))
y = copy(x)
@btime $y .= real.(ifft(fft($x)));
@btime $y .= real.(ifft(ifftshift(fftshift(fft($x)))));
@btime $y .= real.(iffts(ffts($x)));
@btime $y .= real.(ift(ft($x)));
return
end
main()
|
{"hexsha": "0a30a1604c28fb098eac54644c2b5ce5e944d480", "size": 299, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/ft_helpers_benchmark.jl", "max_stars_repo_name": "bionanoimaging/FourierTools.jl", "max_stars_repo_head_hexsha": "c8a37c8f38b0b9897fac2be1bf3c1e109f844964", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2021-03-30T23:15:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T14:20:15.000Z", "max_issues_repo_path": "examples/ft_helpers_benchmark.jl", "max_issues_repo_name": "bionanoimaging/FourierTools.jl", "max_issues_repo_head_hexsha": "c8a37c8f38b0b9897fac2be1bf3c1e109f844964", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-03-28T13:01:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-22T09:43:02.000Z", "max_forks_repo_path": "examples/ft_helpers_benchmark.jl", "max_forks_repo_name": "bionanoimaging/FourierTools.jl", "max_forks_repo_head_hexsha": "c8a37c8f38b0b9897fac2be1bf3c1e109f844964", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.9333333333, "max_line_length": 59, "alphanum_fraction": 0.5518394649, "num_tokens": 105}
|
import os
import numpy as np
import nltk
from nltk.corpus import stopwords
from collections import Counter
def extract_features_from(path, dictionary):
emails = [os.path.join(path, f) for f in os.listdir(path)]
features_matrix = np.zeros((len(emails), len(dictionary)))
labels = np.zeros(len(emails))
index = 0
for mail in emails:
with open(mail, encoding="latin1") as m:
all_words = []
for line in m:
words = line.split()
all_words += words
for word in all_words:
wordID = 0
for i, d in enumerate(dictionary):
if d[0] == word:
wordID = i
features_matrix[index, wordID] = all_words.count(word)
labels[index] = int(mail.split(".")[-2] == 'spam')
index = index + 1
return features_matrix, labels
def make_dictionary_from(path, max_size):
emails = [os.path.join(path, f) for f in os.listdir(path)]
all_words = []
for email in emails:
with open(email, encoding='latin1') as m:
content = m.read()
all_words += nltk.word_tokenize(content)
dictionary = [word.lower() for word in all_words if word.isalnum()]
dictionary = [word for word in dictionary if word not in stopwords.words('english')]
dictionary = Counter(dictionary).most_common(max_size)
return dictionary
|
{"hexsha": "0680ef612185f99a377fab3930c67a529d0453e2", "size": 1447, "ext": "py", "lang": "Python", "max_stars_repo_path": "Enron/EnronSpam/ml_library.py", "max_stars_repo_name": "elliott-stroud/PySpam", "max_stars_repo_head_hexsha": "6ad477023628cf3781841dd541a22a5aa3b4c422", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-11T07:44:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-11T07:44:28.000Z", "max_issues_repo_path": "Enron/EnronSpam/ml_library.py", "max_issues_repo_name": "elliott-stroud/PySpam", "max_issues_repo_head_hexsha": "6ad477023628cf3781841dd541a22a5aa3b4c422", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Enron/EnronSpam/ml_library.py", "max_forks_repo_name": "elliott-stroud/PySpam", "max_forks_repo_head_hexsha": "6ad477023628cf3781841dd541a22a5aa3b4c422", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5306122449, "max_line_length": 88, "alphanum_fraction": 0.593642018, "include": true, "reason": "import numpy", "num_tokens": 319}
|
"""
Module with reading functionalities of color and magnitude data from photometric and
spectral libraries.
"""
import os
import configparser
from typing import Optional, Tuple
import h5py
import numpy as np
from typeguard import typechecked
from species.core import box
from species.read import read_spectrum
from species.util import phot_util
class ReadColorMagnitude:
"""
Class for reading color-magnitude data from the database.
"""
@typechecked
def __init__(self,
library: str,
filters_color: Tuple[str, str],
filter_mag: str) -> None:
"""
Parameters
----------
library : str
Photometric ('vlm-plx' or 'leggett') or spectral ('irtf' or 'spex') library.
filters_color : tuple(str, str)
Filter names for the color. For a photometric library, these have to be present in
the database (typically in the MKO, 2MASS, or WISE system). For a spectral library,
any filter names can be provided as long as they overlap with the wavelength range
of the spectra.
filter_mag : str
Filter name for the absolute magnitudes (see also description of ``filters_color``).
Returns
-------
NoneType
None
"""
self.library = library
self.filters_color = filters_color
self.filter_mag = filter_mag
config_file = os.path.join(os.getcwd(), 'species_config.ini')
config = configparser.ConfigParser()
config.read_file(open(config_file))
self.database = config['species']['database']
with h5py.File(self.database, 'r') as hdf_file:
if 'photometry' in hdf_file and self.library in hdf_file['photometry']:
self.lib_type = 'phot_lib'
elif 'spectra' in hdf_file and self.library in hdf_file['spectra']:
self.lib_type = 'spec_lib'
else:
raise ValueError(f'The \'{self.library}\' library is not present in the database.')
@typechecked
def get_color_magnitude(self,
object_type: Optional[str] = None) -> box.ColorMagBox:
"""
Function for extracting color-magnitude data from the selected library.
Parameters
----------
object_type : str, None
Object type for which the colors and magnitudes are extracted. Either field dwarfs
('field') or young/low-gravity objects ('young'). All objects are selected if set
to ``None``.
Returns
-------
species.core.box.ColorMagBox
Box with the colors and magnitudes.
"""
if self.lib_type == 'phot_lib':
with h5py.File(self.database, 'r') as h5_file:
sptype = np.asarray(h5_file[f'photometry/{self.library}/sptype'])
dist = np.asarray(h5_file[f'photometry/{self.library}/distance'])
dist_error = np.asarray(h5_file[f'photometry/{self.library}/distance_error'])
flag = np.asarray(h5_file[f'photometry/{self.library}/flag'])
obj_names = np.asarray(h5_file[f'photometry/{self.library}/name'])
if object_type is None:
indices = np.arange(0, np.size(sptype), 1)
elif object_type == 'field':
indices = np.where(flag == 'null')[0]
elif object_type == 'young':
indices = []
for j, object_flag in enumerate(flag):
if 'young' in object_flag:
indices.append(j)
elif 'lowg' in object_flag:
indices.append(j)
indices = np.array(indices)
if indices.size > 0:
with h5py.File(self.database, 'r') as h5_file:
mag1 = np.asarray(h5_file[f'photometry/{self.library}/{self.filters_color[0]}'])
mag2 = np.asarray(h5_file[f'photometry/{self.library}/{self.filters_color[1]}'])
else:
raise ValueError(f'There is not data available from \'{self.library}\' for '
f'\'{object_type}\' type objects with the chosen filters.')
color = mag1 - mag2
if self.filter_mag == self.filters_color[0]:
mag, _ = phot_util.apparent_to_absolute((mag1, None), (dist, dist_error))
elif self.filter_mag == self.filters_color[1]:
mag, _ = phot_util.apparent_to_absolute((mag2, None), (dist, dist_error))
color = color[indices]
mag = mag[indices]
sptype = sptype[indices]
obj_names = obj_names[indices]
indices = []
for i in range(color.size):
if not np.isnan(color[i]) and not np.isnan(mag[i]):
indices.append(i)
colormag_box = box.create_box(boxtype='colormag',
library=self.library,
object_type=object_type,
filters_color=self.filters_color,
filter_mag=self.filter_mag,
color=color[indices],
magnitude=mag[indices],
sptype=sptype[indices],
names=obj_names[indices])
elif self.lib_type == 'spec_lib':
read_spec_0 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filters_color[0])
read_spec_1 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filters_color[1])
read_spec_2 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filter_mag)
phot_box_0 = read_spec_0.get_magnitude(sptypes=None)
phot_box_1 = read_spec_1.get_magnitude(sptypes=None)
phot_box_2 = read_spec_2.get_magnitude(sptypes=None)
colormag_box = box.create_box(boxtype='colormag',
library=self.library,
object_type=object_type,
filters_color=self.filters_color,
filter_mag=self.filter_mag,
color=phot_box_0.app_mag[:, 0]-phot_box_1.app_mag[:, 0],
magnitude=phot_box_2.abs_mag[:, 0],
sptype=phot_box_0.sptype,
names=None)
return colormag_box
class ReadColorColor:
"""
Class for reading color-color data from the database.
"""
@typechecked
def __init__(self,
library: str,
filters_colors: Tuple[Tuple[str, str], Tuple[str, str]]) -> None:
"""
Parameters
----------
library : str
Photometric ('vlm-plx' or 'leggett') or spectral ('irtf' or 'spex') library.
filters_colors : tuple(tuple(str, str), tuple(str, str))
Filter names for the colors. For a photometric library, these have to be present in
the database (typically in the MKO, 2MASS, or WISE system). For a spectral library,
any filter names can be provided as long as they overlap with the wavelength range
of the spectra.
Returns
-------
NoneType
None
"""
self.library = library
self.filters_colors = filters_colors
config_file = os.path.join(os.getcwd(), 'species_config.ini')
config = configparser.ConfigParser()
config.read_file(open(config_file))
self.database = config['species']['database']
with h5py.File(self.database, 'r') as hdf_file:
if 'photometry' in hdf_file and self.library in hdf_file['photometry']:
self.lib_type = 'phot_lib'
elif 'spectra' in hdf_file and self.library in hdf_file['spectra']:
self.lib_type = 'spec_lib'
else:
raise ValueError(f'The \'{self.library}\' library is not present in the database.')
@typechecked
def get_color_color(self,
object_type: Optional[str] = None) -> box.ColorColorBox:
"""
Function for extracting color-color data from the selected library.
Parameters
----------
object_type : str, None
Object type for which the colors and magnitudes are extracted. Either field dwarfs
('field') or young/low-gravity objects ('young'). All objects are selected if set
to ``None``.
Returns
-------
species.core.box.ColorColorBox
Box with the colors.
"""
if self.lib_type == 'phot_lib':
h5_file = h5py.File(self.database, 'r')
sptype = np.asarray(h5_file[f'photometry/{self.library}/sptype'])
flag = np.asarray(h5_file[f'photometry/{self.library}/flag'])
obj_names = np.asarray(h5_file[f'photometry/{self.library}/name'])
if object_type is None:
indices = np.arange(0, np.size(sptype), 1)
elif object_type == 'field':
indices = np.where(flag == 'null')[0]
elif object_type == 'young':
indices = []
for j, object_flag in enumerate(flag):
if 'young' in object_flag:
indices.append(j)
elif 'lowg' in object_flag:
indices.append(j)
indices = np.array(indices)
mag1 = np.asarray(h5_file[f'photometry/{self.library}/{self.filters_colors[0][0]}'])
mag2 = np.asarray(h5_file[f'photometry/{self.library}/{self.filters_colors[0][1]}'])
mag3 = np.asarray(h5_file[f'photometry/{self.library}/{self.filters_colors[1][0]}'])
mag4 = np.asarray(h5_file[f'photometry/{self.library}/{self.filters_colors[1][1]}'])
color1 = mag1 - mag2
color2 = mag3 - mag4
color1 = color1[indices]
color2 = color2[indices]
sptype = sptype[indices]
obj_names = obj_names[indices]
indices = []
for i in range(color1.size):
if not np.isnan(color1[i]) and not np.isnan(color2[i]):
indices.append(i)
colorbox = box.create_box(boxtype='colorcolor',
library=self.library,
object_type=object_type,
filters=self.filters_colors,
color1=color1[indices],
color2=color2[indices],
sptype=sptype[indices],
names=obj_names[indices])
h5_file.close()
elif self.lib_type == 'spec_lib':
read_spec_0 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filters_colors[0][0])
read_spec_1 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filters_colors[0][1])
read_spec_2 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filters_colors[1][0])
read_spec_3 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filters_colors[1][1])
phot_box_0 = read_spec_0.get_magnitude(sptypes=None)
phot_box_1 = read_spec_1.get_magnitude(sptypes=None)
phot_box_2 = read_spec_2.get_magnitude(sptypes=None)
phot_box_3 = read_spec_3.get_magnitude(sptypes=None)
colorbox = box.create_box(boxtype='colorcolor',
library=self.library,
object_type=object_type,
filters=self.filters_colors,
color1=phot_box_0.app_mag[:, 0]-phot_box_1.app_mag[:, 0],
color2=phot_box_2.app_mag[:, 0]-phot_box_3.app_mag[:, 0],
sptype=phot_box_0.sptype,
names=None)
return colorbox
|
{"hexsha": "3b3ee1d6c082dc69b60bab96d107a86c451c72fd", "size": 12938, "ext": "py", "lang": "Python", "max_stars_repo_path": "species/read/read_color.py", "max_stars_repo_name": "vandalt/species", "max_stars_repo_head_hexsha": "527dd900a60c4d691bd490569cd3b2007f9beead", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "species/read/read_color.py", "max_issues_repo_name": "vandalt/species", "max_issues_repo_head_hexsha": "527dd900a60c4d691bd490569cd3b2007f9beead", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "species/read/read_color.py", "max_forks_repo_name": "vandalt/species", "max_forks_repo_head_hexsha": "527dd900a60c4d691bd490569cd3b2007f9beead", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.5657492355, "max_line_length": 100, "alphanum_fraction": 0.5291389705, "include": true, "reason": "import numpy", "num_tokens": 2543}
|
\chapter{Guidelines on the preparation of theses} \label{ch-1}
These guidelines set out the organization and formatting requirements of the OIST PhD thesis, in order to assist students in the preparation of theses for submission. The academic requirements of the thesis are defined in the PRP in section 5.3.13, while the format of the submitted examination and publication versions of the thesis are described here.
This particular documents refers specifically a thesis written in \LaTeX. As such, some points from the full guideline (for example page sizes) are not referenced directly here as they are already defined in this template. Some other points concerning specific pages (for example the abstract) are described in the specific pages themselves in this PDF.
\section{Guidelines on the preparation of theses}
\textbf{Plagiarism and Fraud}: Students are reminded that they must take all necessary precautions to avoid plagiarism and fraudulent misrepresentations of data. The Graduate School does check each thesis for plagiarism using automated online checks, and we will ask you to rewrite if this is present. It is your responsibility to ensure that self-plagiarism is avoided as far as possible by rewriting, by self-citation, and by absolutely refraining from copy-paste from earlier articles you or others have published.
\textbf{Reproducibility}: OIST is committed to openness in science, and a cornerstone of this is the concept of reproducibility. Your thesis should present all the data and methods necessary to allow complete repetition of the experiments and their results, and to allow expert review of your analysis of data. Accordingly, you must ensure that your methods are comprehensive, and that your data sets and code are available for subsequent review by lodging them in the OIST Institutional Repository or some other data repository or database, as appropriate.
\textbf{Inclusion of Published Material}: In some cases, inclusion of published material as chapters is desirable. Normally, however, when published material is included in the thesis, it should be modified in order to remove redundancy and achieve a coherent narrative. It is essential to indicate clearly any portion of the thesis that duplicates parts of articles that were previously published by the candidate. The candidate must cite the article and indicate any parts of a section or chapter of the thesis that depend on the previously published article. This does not apply to previous documents such as thesis proposals and reports written as part of the candidate?s research.
An appropriate level of independence on the part of the student is expected. If parts of the thesis are based on published work under joint authorship, the supervisor should provide a statement about the extent to which this is the candidate?s own work, as part of the standard supervisor declaration.
When including material from publications in a thesis, students should be aware of the copyright policies of journals. It is recommended that students request journals to vary their normal copyright agreements to allow material from an article to be included in a thesis (as the thesis will be publicly available through the University?s Institutional Repository, and from there the National Diet Library database of Japan PhD theses). If, for copyright reasons, material from previously published papers may not be included in the electronically published thesis, the electronically published thesis may cite papers that are already published.
\section{Organization of chapters and sections}
\textbf{Title Page}: This page is the first page, and should list your name, the thesis title, and the name of your Supervisor and official Co-supervisor, if any, and the year of submission. Only OIST Faculty or external faculty with whom a Graduate School Co-Supervision agreement exists may be listed as Co-supervisors.
\textbf{Choice of Title}: Select a descriptive and unique title that clearly communicates your research. Avoid brief or misleading titles. The title will be displayed on your graduation certificate. The title should be unique within OIST, to distinguish your thesis from those of others working on similar topics.
\textbf{Declaration of Original Authorship}: You must declare that the work is your own, and original, by signing and including the declaration.
\textbf{Co-authorship}: Co-authors are not permitted on an OIST PhD thesis. All research and analysis should be your own work. Where co-authors have contributed to papers arising from the work, you should not include their data unless it is essential for the scientific narrative. In such cases, full disclosure of the contribution is required. Acknowledge any work performed by others, whether at OIST or outside OIST.
\textbf{Abstract, Acknowledgements, List of Abbreviations, Glossary, Nomenclature, Dedication, Table of Contents, List of Figures and Tables}: Those are commented directly in the template. Glossary, Nomenclature, and Dedication pages are optional.
\textbf{Main body}: The main body may be arranged as a single body of material, divided into sub-sections of Introduction (including a statement of the problem), Methods, Results, Discussion, or if preferred, in chapters that each deal with a smaller part of the research, each one itself divided into sub-chapters of Introduction, Methods, Results, Discussion (or similar), as appropriate.
\textbf{Reference List}: Provide a complete list of all articles and books cited in your thesis, once only, at the end of the thesis using BibTeX or BibLaTeX. The citations should provide the title of the article, and list at least the first three authors (et al. format is acceptable). Do NOT include articles not cited in the thesis. Do include ALL articles cited.
\textbf{Appendices}: The examination versions of the thesis must include, as an appendix, published papers; unpublished manuscripts that have been submitted for publication; and manuscripts ready for, or very close to, journal submission. These should be placed immediately after the final pages of the thesis, and separated from the thesis itself by a single dividing page with the text: ``Previously published articles associated with the research described in this thesis'', or similar. These published papers are included solely for reference by your examiners, and to show that you satisfy the graduation requirement for at least one submitted article. They will not be included in the on-line version of the thesis once your revisions have been accepted. Papers co-authored during the period of the thesis that do not include material presented in this work should not be included.
\textbf{Appendices and Supplementary Data}: Unlike a journal article, no data or discussion may be presented separately as unpublished supplementary documents or data. Appendices should be used instead for material that is tangentially relevant to the thesis but does not fit in the main narrative. If you need to refer to large volumes of data that cannot be printed (such as an annotated genome, or a simulation with moving images), lodge the data on an OIST repository or a public database and provide the URL of the dataset in the thesis. (See also: Reproducibility, above.)
\section{Formatting Requirements}
\textbf{Page size, Margins, Spacing, Justification, Pagination, Header, Fonts}: those are already built-in the template. Do not modify them.
\textbf{Equations}: Equations are considered to be part of the text; they should be formatted consistently throughout the thesis, following the advice of the student's supervisor.
\textbf{Spelling}: American spelling should be used.
\textbf{Printing}: Theses submitted for examination should be printed double-spaced on one side of a page only (so that when bound in temporary bindings, the right hand page is the printed page). Temporary bindings may use any reasonable white bond paper, in A4 size. Laser printing (in black ink wherever possible, and colour for images where necessary), should be used exclusively, rather than alternatives such as ink-jet, dye sublimation, or wax transfer (for durability of the print). Final bound copies should use acid-free paper (also known as archival paper) to ensure longevity of the thesis in the collection, and should be printed on both sides of the page (single-spaced, with adjusted margins).
\textbf{Colors}: Colors may be used in images and charts where necessary to enhance comprehension, but should not be used for normal text or headings. Avoid the exclusive combination of red and green for binary images, to assist those who have difficulty discriminating hues. All text should be in black unless color-coding is necessary for meaning or contrast.
\textbf{Figues, Tables, Images}: Those are detailed in a later Chapter with examples.
\textbf{Word length}: No minimum word length is imposed on OIST graduate theses. However, be concise in your language and succinct in your expression. The average length of a PhD thesis will vary between fields and between authors, but typical PhD theses are 100-400 pages in length (approximately 20,000-80,000 words of main body text).
\textbf{Citations}: All papers that you reference in your work must be referenced in full using a style relevant to your field. Provide the full title, a complete list of authors, and the article location and year of publication in the same style for all papers. Use one of several styles you have been introduced to in previous writing. Refer to papers in the text by either a numerical superscript, a bracketed number or by reference to (Author et al., 1999). Be consistent in your citation style throughout all sections of your thesis. Provide a complete list of all papers, books, and proceedings cited in your thesis at the end of the main body of text. Do not include papers in this list that were not cited in the thesis. Reference manager software such as Endnote or similar programs that offer ?Cite-While You Write? functionality can assist this process greatly. Use BibTeX or BibLaTeX if you are using LaTeX.
Citing one reference can be done like so: \cite{Lee98} and multiple references in one go like so \cite{Fil09, Muc10, Kra27}.
\textbf{Editing}: The thesis should be entirely your own work. Minimal editing may be provided by your supervisor(s) or peers but only as a review of initial drafts. Do not seek assistance from OIST internal or paid external editing services, unless directed to do so by the Dean in revision stages.
\section{Intellectual property and copyright}
The student will retain copyright of the published work, in perpetuity. The student acknowledges that OIST remains the owner of the intellectual property generated by the research presented in the thesis and that publication of the thesis under the author?s copyright does not diminish this claim. The thesis will be published online in electronic form within one year of graduation.
\section{Submission of examination and final versions}
The thesis will be submitted first as a single PDF in a style formatted to assist the examination process. This version should include (inserted after the thesis itself) any papers published during the tenure of the thesis that are relevant to the material therein, for the convenience of the examiners. Large files should be sent using FileSender, smaller ones less than 10 MB can be sent by email to examination@oist.jp.
The submitted examination thesis should be accompanied by a Declaration from the Supervisor (see Appendix 2 for template). The purpose of the Declaration, a printed sheet which should be completed and signed by the thesis supervisor, is to acknowledge that the work was done in the laboratory of the supervisor, that any coauthors of included material have consented to such inclusion, and that there is no unauthorized use of material for which the copyright is held by other parties. The Declaration will be retained by the Graduate School, and will not be part of the thesis sent for examination or published later.
The two external examiners will examine the written thesis and send their reports to Graduate School, to arrive before the oral. The oral exam (1 hour presentation, 2 hours closed exam) is then conducted at OIST, and a report prepared by the Chair. Any required revisions are then sent to the student, and the student then makes the appropriate revisions to the approval of the Supervisor (and maybe checking by examiners as well). Once these are accepted by the Chair, the student is eligible to graduate.
The student should then prepare and submit the final version of the thesis (the final approved PDF without any appended articles or manuscripts, or highlighted areas of revision). Once this is received and checked by the Graduate School, the degree may be awarded by acclaim of the Faculty Assembly.
The final version must be accompanied by a signed Deposit Consent Form, which confirms to us that you agree to the immediate publication of the thesis online, and provides a checklist for you to complete essentially vouching that the document is free from any copyright restrictions or private information concerns. You may elect to withhold publication of the main part of the thesis for a set period, with support from the Supervisor, perhaps due to a patent application, or other time-sensitive concern. The abstract and metadata will be published instead. Once the reasons for withholding complete publications are no longer relevant, the thesis must be published in full on OIST IR, from whence it is archived in the National Diet Library and accessible online. No changes can be made once you submit the final version, although the title and other details can be changed prior to that. The title of the final version is reproduced on the degree certificate.
|
{"hexsha": "038994c3d2c010180e08b2dc6486146c39cfa4f4", "size": 13893, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "PhD Thesis/MainText/chapter1.tex", "max_stars_repo_name": "aisulu-maipas/LaTeX-templates", "max_stars_repo_head_hexsha": "f81a7bc2bcbd496d554b7cc6bc8ee30bb60b9254", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-25T16:51:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-25T16:51:23.000Z", "max_issues_repo_path": "PhD Thesis/MainText/chapter1.tex", "max_issues_repo_name": "aisulu-maipas/LaTeX-templates", "max_issues_repo_head_hexsha": "f81a7bc2bcbd496d554b7cc6bc8ee30bb60b9254", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PhD Thesis/MainText/chapter1.tex", "max_forks_repo_name": "aisulu-maipas/LaTeX-templates", "max_forks_repo_head_hexsha": "f81a7bc2bcbd496d554b7cc6bc8ee30bb60b9254", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 175.8607594937, "max_line_length": 969, "alphanum_fraction": 0.807385014, "num_tokens": 2782}
|
module OnlineEstimators
include("parameter_estimators.jl")
include("state_estimators.jl")
end
|
{"hexsha": "0dbfdf95f0a54f6c8cfb34f40a50d7e07ed5c31d", "size": 95, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/OnlineEstimators/OnlineEstimators.jl", "max_stars_repo_name": "jonniedie/ControlSimTools.jl", "max_stars_repo_head_hexsha": "6f9b390924dd23f2c905fc3735ed3177e01914f5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-02T18:22:04.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-04T06:36:40.000Z", "max_issues_repo_path": "src/OnlineEstimators/OnlineEstimators.jl", "max_issues_repo_name": "jonniedie/ControlSimTools.jl", "max_issues_repo_head_hexsha": "6f9b390924dd23f2c905fc3735ed3177e01914f5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-07-11T22:32:03.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-11T22:36:38.000Z", "max_forks_repo_path": "src/OnlineEstimators/OnlineEstimators.jl", "max_forks_repo_name": "jonniedie/ControlSimTools.jl", "max_forks_repo_head_hexsha": "6f9b390924dd23f2c905fc3735ed3177e01914f5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.8333333333, "max_line_length": 34, "alphanum_fraction": 0.8315789474, "num_tokens": 20}
|
#! /usr/bin/env python3
import click
import os
import shutil
import fileinput
import subprocess
import re
import numpy as np
from natsort import natsorted
import pandas as pd
from pandas import DataFrame
import csv
from pathlib import Path
def make_cutoff_folders(path_cutoff, einputs):
""" Make cutoff folders
Args:
path_cutoff (:obj:'str'): Path to folder inside which subfolders for cutoff convergence will be created.
einputs (:obj:'list'): List of cutoff/encut values for convergence test.
"""
for i in einputs:
path = os.path.join(path_cutoff, i)
if not os.path.exists(path):
os.makedirs(path)
def make_kmesh_folders(path_kmesh, kinputs):
""" Make kmesh folders
Args:
path_kmesh (:obj:'str'): Path to folder insider which subfolders for kmesh convergence will be created.
kinputs (:obj:'list'): List of kmesh values for convergence test.
"""
sum_k = sum(len(i.replace(" ", "")) for i in kinputs)
if sum_k % 3 == 0:
pass
else:
raise Exception('Invalid number of kmeshes\n'
'Each element in the array should contain 3 integers')
for i in kinputs:
path = os.path.join(path_kmesh, i.replace(" ", "x"))
if not os.path.exists(path):
os.makedirs(path)
def copy_files(path_cutoff, einputs, path_kmesh, kinputs):
""" Copy input files inside each folder for cutoff and kmesh convergence test.
Args:
path_cutoff (:obj:'str'): Path to folder inside which subfolders for cutoff convergence are present.
einputs (:obj:'list'): List of cutoff/encut values for convergence test.
path_kmesh (:obj:'str'): Path to folder insider which subfolders for kmesh convergence are present.
kinputs (:obj:'list'): List of kmesh values for convergence test.
"""
destinations = [os.path.join(path_cutoff, i) for i in einputs]
destinations.extend(os.path.join(path_kmesh, i.replace(" ","x")) for i in kinputs)
files = ['POSCAR', 'POTCAR', 'KPOINTS', 'INCAR', 'job']
for dest in destinations:
for f in files:
shutil.copy(f, dest)
def update_values(filepath, save_path, value):
""" Create a new file with updated values.
Args:
filepath (:obj:'str'): Path to the input file.
save_path (:obj:'str'): Path to the output/updated file.
value (:obj:'str'): New/updated cutoffs and kmeshes.
"""
with open(filepath, 'r') as fin: # Read the input file
lines = fin.readlines()
if filepath.name == 'INCAR':
with open(save_path, 'w') as fout: # Write the output INCAR file with updated value
for line in lines:
if line.strip().lower().startswith('encut'):
line = 'encut =' + value + '\n'
fout.write(line)
elif filepath.name == 'KPOINTS': # Write the output KPOINTS file with updated value
with open(save_path, 'w') as fout:
lines[-1] = ' '.join([i for i in value.split('x')])
for line in lines:
fout.write(line)
|
{"hexsha": "ef4de2fa8db91bfc888a89e8f71f650addf79735", "size": 3202, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/convtest/conv_setup.py", "max_stars_repo_name": "warda-rahim/ConvTest", "max_stars_repo_head_hexsha": "5f999baa2c7781abf5d3be9f10bffa60b01e7c9c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-02-12T20:07:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-04T07:04:56.000Z", "max_issues_repo_path": "tests/convtest/conv_setup.py", "max_issues_repo_name": "warda-rahim/ConvTest", "max_issues_repo_head_hexsha": "5f999baa2c7781abf5d3be9f10bffa60b01e7c9c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/convtest/conv_setup.py", "max_forks_repo_name": "warda-rahim/ConvTest", "max_forks_repo_head_hexsha": "5f999baa2c7781abf5d3be9f10bffa60b01e7c9c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-13T10:08:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-13T10:08:31.000Z", "avg_line_length": 30.2075471698, "max_line_length": 112, "alphanum_fraction": 0.6152404747, "include": true, "reason": "import numpy", "num_tokens": 750}
|
using Printf
# @todo does Node need to be mutable?
mutable struct Node
visit_count::Int32
prior::Float32
value_sum::Float32
children::Dict{Int32,Node}
hidden_state::Union{Nothing,AbstractArray{Float32, 3}}
reward::Float32
info::String
state::AbstractEnvState # @todo - remove state altogether
prior_policy::Dict
action_scores::Dict
function Node(prior, state; visit_count=0, value_sum=0,
children=Dict(), hidden_state=nothing, reward=0)
new(visit_count, prior, value_sum, children, hidden_state, reward, "", state, Dict(), Dict())
end
end
gethiddenstate(n::Node) = n.hidden_state
gettoplay(n::Node) = n.state.to_play
function json(x::Node, visit_pr=0.0)
d = Dict()
noisy_prior_policy = Dict()
for (action, node) in x.children
noisy_prior_policy[action] = node.prior
end
d["visit_pr"] = visit_pr
d["visit_count"] = x.visit_count
d["to_play"] = get_player_label(x)
d["prior"] = x.prior
d["value_sum"] = x.value_sum
d["prior_policy"] = x.prior_policy
d["noisy_prior_policy"] = noisy_prior_policy
d["reward"] = x.reward
d["children"] = Dict()
d["action_scores"] = x.action_scores
total_child_visit_count = sum([child.visit_count for child in values(x.children)])
for (action, child) in x.children
visit_pr = child.visit_count / total_child_visit_count
d["children"][action] = json(child, visit_pr)
end
d
end
get_player_label(x::Node) = x.state.to_play.idx == PLAYER_ID ? "A" : "B"
function Base.show(io::IO, x::Node)
show_indented(io, x)
end
function show_indented(io::IO, x::Node, level=0, action=nothing)
#show(io, x.channels)
print(" "^(level*5))
action_label = isnothing(action) ? "" : "A($action)"
hex_state = "hex_str" #hex_str(x.state)
@printf "%s %s pr: %.2f {c: %i, r: %i, vs: %.3f}|%s %s" action_label hex_state x.prior x.visit_count x.reward x.value_sum get_player_label(x) x.info
println(io)
actions = [action for action in keys(x.children)]
for action in sort(actions)
child = x.children[action]
show_indented(io, child, level+1, action)
end
end
expanded(x::Node) = length(x.children) > 0
function calc_value(x::Node)
Float32(x.visit_count == 0 ? 0.0 : x.value_sum / x.visit_count)
end
function priors(x::Node)
sort([(action, child.prior) for (action, child) in x.children])
end
#=
We expand a node using the value, reward and policy prediction obtained from
the neural network.
=#
function expand_node!(node::Node, actions::Vector, output::SingleNetworkOutput, reward::Number)
p_logits = policy_logits(output)
hidden = hidden_state(output)
node.hidden_state = hidden
node.reward = Float32(reward)
for (action, pr) in enumerate(softmax(p_logits))
node.prior_policy[action] = pr
end
if !isempty(actions)
policy = Dict([a=>exp(p_logits[a.idx]) for a in actions])
policy_sum = sum(values(policy))
for (action, p) in policy
node.children[action.idx] = Node(p / policy_sum, getnextstate(node.state, action)) #reward=get_reward(node.state, action)
end
end
end
function expand_node!(
nodes::Vector{Node},
actionSets::Vector,
output::NetworkOutput,
reward::Vector{Float32}
)
setfield!.(
nodes,
:hidden_state,
eachslice(hidden_state(output), dims=4)
)
setfield!.(
nodes,
:reward,
Float32.(reward)
)
#### @todo - efficiency could be much improved
p_logits_tensor = policy_logits(output)
for (idx, actions) in enumerate(actionSets)
node = nodes[idx]
p_logits = view(p_logits_tensor, :, idx)
for (action, pr) in enumerate(softmax(p_logits))
node.prior_policy[action] = pr
end
if !isempty(actions)
policy = Dict([a=>exp(p_logits[a.idx]) for a in actions])
policy_sum = sum(values(policy))
for (action, p) in policy
node.children[action.idx] = Node(p / policy_sum, getnextstate(node.state, action))
end
end
end
####
end
|
{"hexsha": "889618873a01d267f850f3bcd73216b5c7f4db8e", "size": 4177, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/node.jl", "max_stars_repo_name": "JuliaRL/MuZero.jl", "max_stars_repo_head_hexsha": "2fcb23e8d5b49b6030987f2441f6b7157c3c7601", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-11T21:27:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-11T21:27:53.000Z", "max_issues_repo_path": "src/node.jl", "max_issues_repo_name": "JuliaRL/MuZero.jl", "max_issues_repo_head_hexsha": "2fcb23e8d5b49b6030987f2441f6b7157c3c7601", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/node.jl", "max_forks_repo_name": "JuliaRL/MuZero.jl", "max_forks_repo_head_hexsha": "2fcb23e8d5b49b6030987f2441f6b7157c3c7601", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4890510949, "max_line_length": 152, "alphanum_fraction": 0.6428058415, "num_tokens": 1123}
|
from __future__ import print_function
import sys
import time
import numpy as np
from itertools import chain
from genomic_neuralnet.common.base_compare import try_predictor
from genomic_neuralnet.config import REQUIRED_MARKER_CALL_PROPORTION, \
REQUIRED_MARKERS_PER_SAMPLE_PROP
from genomic_neuralnet.config import CPU_CORES, NUM_FOLDS
from genomic_neuralnet.config import PARALLEL_BACKEND, SINGLE_CORE_BACKEND
from genomic_neuralnet.util import get_markers_and_pheno, get_use_celery, \
get_verbose, get_reuse_celery_cache, \
get_celery_gpu, get_species_and_trait
from genomic_neuralnet.common.read_clean_data import get_clean_data
ACCURACY_IDX = 0
IDENTIFIER_IDX = 1
FOLD_IDX = 0
PRED_FUNC_IDX = 1
MAX_INT = 2**31 * 2 - 1
MIN_INT = 0
def _dot_wrapper(func, *params):
res = func(*params)
print('.', end='')
sys.stdout.flush()
return res
def _run_joblib(job_params):
from joblib import delayed, Parallel
accuracies = Parallel(n_jobs=CPU_CORES)(delayed(_dot_wrapper)(try_predictor, *x) for x in job_params)
return accuracies
def _run_debug(job_params):
""" Single process for easy debugging. """
accuracies = []
for args in job_params:
accuracies.append(_dot_wrapper(try_predictor, *args))
return accuracies
def _run_celery(job_params):
from genomic_neuralnet.common.celery_slave \
import celery_try_predictor, get_num_workers, get_queue_length, \
disk_cache, load_and_clear_cache, is_disk_cached
job_idx = 0
results = {}
done = 0
while True:
queue_len = get_queue_length()
workers = get_num_workers()
# Keep putting messages on the queue until there
# is one message waiting for every worker.
desired_messages = workers
# Account for exhausting the work queue.
remaining_jobs = len(job_params) - job_idx
num_to_add = np.min([desired_messages - queue_len, remaining_jobs])
if get_verbose():
print('{} Workers / Desired Messages'.format(workers))
print('{} In Flight'.format(len(results)))
print('{} Completed'.format(done))
print('{} Not Started'.format(len(job_params) - len(results) - done))
print('Adding {} messages'.format(num_to_add))
# Add messages to fill queue.
for _ in range(num_to_add):
if get_reuse_celery_cache():
while is_disk_cached(job_idx):
print('Skipping {}. Already completed'.format(job_idx))
done += 1
job_idx += 1
# Skip what's already done.
if job_idx >= len(job_params):
continue # Don't run past the end of the list of parameters to run.
else:
delayed = celery_try_predictor.delay(*job_params[job_idx])
results[job_idx] = delayed
job_idx += 1
# Cache finished work.
keys = results.keys()
for key in keys:
result = results[key]
if result.ready():
accs = result.get()
disk_cache(accs, key)
del results[key] # Stop tracking.
if get_verbose():
print('Done with id {}'.format(key))
done += 1
if done == len(job_params):
if get_verbose():
print('All done!')
break # All done!
else:
# Wait a bit while work gets done.
print('Completed {} of {} cycles.'.format(done, len(job_params)))
time.sleep(10) # One check every ten seconds is plenty.
accuracies = load_and_clear_cache(range(len(job_params)))
return accuracies
def run_predictors(prediction_functions, backend=SINGLE_CORE_BACKEND, random_seed=0, runs=1, retry_nans=False):
"""
Runs all prediction functions on the same data in a
batch process across the configured number of CPUs.
Returns the accuracies of the functions as list of arrays
ordered by function.
"""
species, trait = get_species_and_trait()
# Set up the parameters for processing.
pred_func_idxs = range(len(prediction_functions))
accuracy_results = []
for _ in range(runs):
job_params = []
for prediction_function_idx in pred_func_idxs:
for fold_idx in range(NUM_FOLDS):
identifier = (fold_idx, prediction_function_idx)
prediction_function = prediction_functions[prediction_function_idx]
params = (prediction_function, random_seed, species, trait, identifier, retry_nans, get_celery_gpu())
job_params.append(params)
# Run the jobs and return a tuple of the accuracy and the id (which is also a tuple).
if backend == PARALLEL_BACKEND and get_use_celery():
accuracies = _run_celery(job_params)
elif backend == PARALLEL_BACKEND:
accuracies = _run_joblib(job_params)
elif backend == SINGLE_CORE_BACKEND:
accuracies = _run_debug(job_params)
else:
print('Unsupported Backend Settings.')
sys.exit(1)
accuracy_results.append(accuracies)
random_seed = np.random.randint(MIN_INT, MAX_INT) # New seed to obtain new data folds this run.
accuracies = list(chain.from_iterable(accuracy_results))
# Sort results by prediction function, default is ascending.
# This puts things back into the order they were made
# which is also the order they were passed into this function.
accuracies.sort(key=lambda x: x[IDENTIFIER_IDX][PRED_FUNC_IDX])
grouped = []
# Create groups of results, one group per prediction function.
# Because we just sorted the results, new groups start every
# (NUM_FOLDS * runs) elements.
group_size = NUM_FOLDS * runs
for idx in range(0, len(accuracies), group_size):
group = accuracies[idx:idx+(group_size)]
grouped.append(group)
# Drop everything from the output except the accuracy, but
# still return the accuracies grouped by which prediction
# function ran them.
return map(lambda x: map(lambda y: y[ACCURACY_IDX], x), grouped)
|
{"hexsha": "ab9a1761b38b228b7fe97918f1a7f9d87af74249", "size": 6342, "ext": "py", "lang": "Python", "max_stars_repo_path": "genomic_neuralnet/common/parallel_predictors.py", "max_stars_repo_name": "lambdaman/genomic-neuralnet", "max_stars_repo_head_hexsha": "67ed4f55dc8d5fefb1d9e3a9fc920a0b643fe9c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-05-25T22:07:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-01T19:17:52.000Z", "max_issues_repo_path": "genomic_neuralnet/common/parallel_predictors.py", "max_issues_repo_name": "prabaharravichandran/genomic-neuralnet", "max_issues_repo_head_hexsha": "67ed4f55dc8d5fefb1d9e3a9fc920a0b643fe9c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "genomic_neuralnet/common/parallel_predictors.py", "max_forks_repo_name": "prabaharravichandran/genomic-neuralnet", "max_forks_repo_head_hexsha": "67ed4f55dc8d5fefb1d9e3a9fc920a0b643fe9c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2018-10-12T17:05:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T23:58:12.000Z", "avg_line_length": 39.3913043478, "max_line_length": 117, "alphanum_fraction": 0.642068748, "include": true, "reason": "import numpy", "num_tokens": 1378}
|
import numpy as np
from tqdm.auto import tqdm
import matplotlib.pyplot as plt
import pandas as pd
from argent.live_plot import LivePlot
class Sweep:
def __init__(self, client, x, start, stop, steps, averages=1, sweeps=1, plot=None, legend=None):
''' Run a sweep across one or more variables.
Arguments:
client: handle to argent.Client
x (str): name of independent variable to sweep
start (float): beginning of sweep
stop (float): end of sweep
steps (int): number of steps in sweep
averages (int): cycles to be repeated at each sweep point to gather statistics
sweeps (int): number of sweeps to perform
plot (str): a variable name can be passed to this argument to display a live plot
legend (list): an optional second variable to iterate over. Should be a list with two elements,
where the first is the name of the variable and the second is the set of points to try.
Example: legend=['z', (0, 1, 2, 3)]
'''
self.x = x
self.start = start
self.stop = stop
self.steps = steps
self.client = client
self.sweeps = sweeps
self.averages = averages
self.y = plot
self.dataset = self.client.dataset()
self.legend = legend
if plot is not None:
self.progress_plot = LivePlot(client, x, plot, xlim=[start, stop], legend=legend)
self.run()
def run(self):
sweep_points = np.linspace(self.start, self.stop, self.steps)
if self.y is None:
sweep_points = tqdm(sweep_points) # show progress bar is no variable is designated for plotting
if self.legend is None:
for _ in range(self.sweeps):
for point in sweep_points:
self.client.set(self.x, point)
self.client.collect(self.averages)
self.progress_plot.update()
else:
for z0 in self.legend[1]:
self.client.set(self.legend[0], z0)
for _ in range(self.sweeps):
for point in sweep_points:
self.client.set(self.x, point)
self.client.collect(self.averages)
self.progress_plot.update()
self.dataset.stop()
def save(self, filename):
self.dataset.data.to_csv(filename)
|
{"hexsha": "aa93fd85d539f81b1633c85326d2ec802bdaeb39", "size": 2517, "ext": "py", "lang": "Python", "max_stars_repo_path": "argent/sweep.py", "max_stars_repo_name": "robertfasano/argent", "max_stars_repo_head_hexsha": "49a779e54063ad4f6432b78d1f8070d2f0a932a7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "argent/sweep.py", "max_issues_repo_name": "robertfasano/argent", "max_issues_repo_head_hexsha": "49a779e54063ad4f6432b78d1f8070d2f0a932a7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2020-11-01T20:39:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-17T18:23:49.000Z", "max_forks_repo_path": "argent/sweep.py", "max_forks_repo_name": "robertfasano/argent", "max_forks_repo_head_hexsha": "49a779e54063ad4f6432b78d1f8070d2f0a932a7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.262295082, "max_line_length": 111, "alphanum_fraction": 0.5709177592, "include": true, "reason": "import numpy", "num_tokens": 522}
|
# coding=utf-8
'''
@ Summary: 获取wav音频数据
@ Update: 1.0.2 计算wav的mfcc数据
@ file: get_output_from_network.py
@ version: 2.0.0 获取cnn 网络的中间变量并输出
@ version: 2.0.1 代码重构
@ version: 2.0.2 保存每一个层输出的最大值和最小值
@ Date: 2020/05/27
需要对批量数据推理时的每一层的输出;
CNN好像出了点玄学问题,转到office_get_layers_output.py 继续更新
@ Author: Lebhoryi@gmail.com
@ Date: 2020/4/28 15:01
'''
import os
import sys
import argparse
import tensorflow as tf
import numpy as np
from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
from tensorflow.python.ops import io_ops
from models_rebuild import *
os.environ["TF_CPP_MIN_LOG_LEVEL"]='2' # Only showing warning & Error
sys.path.append("..")
def load_files(path):
# load trained variables
f = open(path)
lines = f.readlines()
lines = list(map(lambda x:x[:-1], lines))
result = list(filter(lambda i: i[:2] != "bn", lines))
res = []
return result
def get_mfcc(wav_path):
'''读取wav, 计算mfcc
'''
# get wav
wav_loader = io_ops.read_file(wav_path)
# wav_decoder: (audio, sample_rate) (16000, 1)
wav_decoder = contrib_audio.decode_wav(wav_loader,
desired_channels=1,
desired_samples=16000)
# stft , get spectrogram
spectrogram = contrib_audio.audio_spectrogram(
wav_decoder.audio,
window_size=640,
stride=640,
magnitude_squared=True)
# get mfcc (C, H, W)
_mfcc = contrib_audio.mfcc(
spectrogram,
wav_decoder.sample_rate,
upper_frequency_limit=4000,
lower_frequency_limit=20,
filterbank_channel_count=40,
dct_coefficient_count=10)
# mfcc = _mfcc.eval()
return _mfcc
def main(_):
# recreate the model, load weights from weight.h and run on test
sess = tf.InteractiveSession(config=tf.ConfigProto(allow_soft_placement=True))
# Begin by making sure we have the training data we need. If you already have
# training data of your own, use `--data_url= ` on the command line to avoid
# downloading.
model_settings = prepare_model_settings(
FLAGS.label_count,
FLAGS.sample_rate,
FLAGS.clip_duration_ms,
FLAGS.window_size_ms,
FLAGS.window_stride_ms,
FLAGS.dct_coefficient_count)
# load variable names
variable_names = load_files(path=FLAGS.variables_path)
# load variables from weight file
variable_lines = load_files(path=FLAGS.weight_path)
# get mfcc from wav file
mfcc = get_mfcc(FLAGS.wav_path) # (1, 25, 10)
# shape (B, fingerprint_input)
input_data = tf.reshape(mfcc, (1, model_settings['fingerprint_size']))
# test model
output = create_model(input_data, variable_lines, model_settings,
variable_names, FLAGS.label_count,
FLAGS.model_architecture,
FLAGS.model_size_info)
sess.run(tf.global_variables_initializer())
res = sess.run(output)
# res = tf.reshape(res[-1], (FLAGS.label_count))
predictions = sess.run(tf.nn.softmax(res[-1]))
# print(predictions.sum())
for i in range(len(res)):
print(f'第{i+1}层输出的最大值是:{res[i].max()},'
f' 最小值是:{res[i].min()}' + '\n')
# with open(FLAGS.every_network_output, 'a') as f:
# f.write(f'第{i+1}层输出的最大值是:{res[i].max()},'
# f' 最小值是:{res[i].min()}' + '\n')
index = predictions.argmax()
print(f"predictions : {predictions}")
print(f"index : {index}")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--wav_path', type = str,
default = '../../data/nihaoxr/2.wav', help = 'Where to load wav file.')
parser.add_argument('--weight_path', type = str,
default = '../weights_h/521_cnn/without_quant.h',
help = 'Where to load weight file. ')
parser.add_argument('--variables_path', type = str,
default = '../weights_h/521_cnn/name.txt',
help = 'The file saved variables name')
parser.add_argument('--every_network_output', type = str,
default = '', required = True, help = 'Where to save network max/min outputs.')
parser.add_argument('--dct_coefficient_count', type = int,
default = 10, help = 'How many bins to use for the MFCC fingerprint',)
parser.add_argument('--window_size_ms', type=float,
default = 40.0, help = 'How long each spectrogram timeslice is',)
parser.add_argument('--window_stride_ms', type=float,
default = 40.0, help = 'How long each spectrogram timeslice is',)
parser.add_argument('--sample_rate', type = int,
default = 16000, help = 'Expected sample rate of the wavs',)
parser.add_argument('--clip_duration_ms', type=int,
default=1000, help='Expected duration in milliseconds of the wavs',)
parser.add_argument('--label_count', type = int,
default = 14, help='How many labels',)
parser.add_argument('--model_architecture', type = str,
default = 'cnn', help = 'What model architecture to use')
parser.add_argument('--model_size_info', type = int, nargs = "+",
default = [28,10,4,1,1,30,10,4,2,1,16,128],
# default = [128, 128, 128],
help = 'Model dimensions - different for various models')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
{"hexsha": "fd852f576e6f4762bdab1e7b00e1c42a0b09e3ea", "size": 5887, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/get_output_from_network.py", "max_stars_repo_name": "Lebhoryi/ML-KWS-for-MCU", "max_stars_repo_head_hexsha": "47316dafb1cfbb8ead3e049cc846bdb45f3e7995", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-15T20:34:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-15T20:34:14.000Z", "max_issues_repo_path": "utils/get_output_from_network.py", "max_issues_repo_name": "Lebhoryi/ML-KWS-for-MCU", "max_issues_repo_head_hexsha": "47316dafb1cfbb8ead3e049cc846bdb45f3e7995", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/get_output_from_network.py", "max_forks_repo_name": "Lebhoryi/ML-KWS-for-MCU", "max_forks_repo_head_hexsha": "47316dafb1cfbb8ead3e049cc846bdb45f3e7995", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9945652174, "max_line_length": 108, "alphanum_fraction": 0.5914727365, "include": true, "reason": "import numpy", "num_tokens": 1472}
|
import unittest
import numpy as np
from abcpy.backends import BackendDummy
from abcpy.continuousmodels import Normal
from abcpy.continuousmodels import Uniform
from abcpy.inferences import DrawFromPrior
from abcpy.output import Journal, GenerateFromJournal
class JournalTests(unittest.TestCase):
# def test_add_parameters(self):
# params1 = np.zeros((2,4))
# params2 = np.ones((2,4))
#
# # test whether production mode only stores the last set of parameters
# journal_prod = Journal(0)
# journal_prod.add_parameters(params1)
# journal_prod.add_parameters(params2)
# self.assertEqual(len(journal_prod.parameters), 1)
# np.testing.assert_equal(journal_prod.parameters[0], params2)
#
# # test whether reconstruction mode stores all parameter sets
# journal_recon = Journal(1)
# journal_recon.add_parameters(params1)
# journal_recon.add_parameters(params2)
# self.assertEqual(len(journal_recon.parameters), 2)
# np.testing.assert_equal(journal_recon.parameters[0], params1)
# np.testing.assert_equal(journal_recon.parameters[1], params2)
def test_add_weights(self):
weights1 = np.zeros((2, 4))
weights2 = np.ones((2, 4))
# test whether production mode only stores the last set of parameters
journal_prod = Journal(0)
journal_prod.add_weights(weights1)
journal_prod.add_weights(weights2)
self.assertEqual(len(journal_prod.weights), 1)
np.testing.assert_equal(journal_prod.weights[0], weights2)
# test whether reconstruction mode stores all parameter sets
journal_recon = Journal(1)
journal_recon.add_weights(weights1)
journal_recon.add_weights(weights2)
self.assertEqual(len(journal_recon.weights), 2)
np.testing.assert_equal(journal_recon.weights[0], weights1)
np.testing.assert_equal(journal_recon.weights[1], weights2)
def test_add_simulations(self):
simulations1 = np.zeros((2, 4))
simulations2 = np.ones((2, 4))
# test whether production mode only stores the last set of parameters
journal_prod = Journal(0)
journal_prod.add_accepted_simulations(simulations1)
journal_prod.add_accepted_simulations(simulations2)
self.assertEqual(len(journal_prod.get_accepted_simulations()), 2)
np.testing.assert_equal(journal_prod.get_accepted_simulations(), simulations2)
# test whether reconstruction mode stores all parameter sets
journal_recon = Journal(1)
journal_recon.add_accepted_simulations(simulations1)
journal_recon.add_accepted_simulations(simulations2)
self.assertEqual(len(journal_recon.get_accepted_simulations()), 2)
np.testing.assert_equal(journal_recon.get_accepted_simulations(0), simulations1)
np.testing.assert_equal(journal_recon.get_accepted_simulations(1), simulations2)
# test whether not storing it returns the correct value
journal_empty = Journal(0)
self.assertIsNone(journal_empty.get_accepted_simulations())
def test_add_cov_mats(self):
cov_mats1 = np.zeros((2, 4))
cov_mats2 = np.ones((2, 4))
# test whether production mode only stores the last set of parameters
journal_prod = Journal(0)
journal_prod.add_accepted_cov_mats(cov_mats1)
journal_prod.add_accepted_cov_mats(cov_mats2)
self.assertEqual(len(journal_prod.get_accepted_cov_mats()), 2)
np.testing.assert_equal(journal_prod.get_accepted_cov_mats(), cov_mats2)
# test whether reconstruction mode stores all parameter sets
journal_recon = Journal(1)
journal_recon.add_accepted_cov_mats(cov_mats1)
journal_recon.add_accepted_cov_mats(cov_mats2)
self.assertEqual(len(journal_recon.get_accepted_cov_mats()), 2)
np.testing.assert_equal(journal_recon.get_accepted_cov_mats(0), cov_mats1)
np.testing.assert_equal(journal_recon.get_accepted_cov_mats(1), cov_mats2)
# test whether not storing it returns the correct value
journal_empty = Journal(0)
self.assertIsNone(journal_empty.get_accepted_cov_mats())
def test_load_and_save(self):
params1 = np.zeros((2, 4))
weights1 = np.zeros((2, 4))
journal = Journal(0)
# journal.add_parameters(params1)
journal.add_weights(weights1)
journal.save('journal_tests_testfile.pkl')
new_journal = Journal.fromFile('journal_tests_testfile.pkl')
# np.testing.assert_equal(journal.parameters, new_journal.parameters)
np.testing.assert_equal(journal.weights, new_journal.weights)
def test_ESS(self):
weights_identical = np.ones((100, 1))
weights = np.arange(100).reshape(-1, 1)
journal = Journal(1)
journal.add_weights(weights_identical)
journal.add_weights(weights)
journal.add_ESS_estimate(weights=weights_identical)
journal.add_ESS_estimate(weights=weights)
self.assertEqual(len(journal.ESS), 2)
self.assertAlmostEqual(journal.get_ESS_estimates(), 74.62311557788945)
self.assertAlmostEqual(journal.get_ESS_estimates(0), 100)
def test_plot_ESS(self):
weights_identical = np.ones((100, 1))
weights_1 = np.arange(100).reshape(-1, 1)
weights_2 = np.arange(100, 200).reshape(-1, 1)
journal = Journal(1)
journal.add_weights(weights_identical)
journal.add_ESS_estimate(weights=weights_identical)
journal.add_weights(weights_1)
journal.add_ESS_estimate(weights=weights_1)
journal.add_weights(weights_2)
journal.add_ESS_estimate(weights=weights_2)
journal.plot_ESS()
journal_2 = Journal(0)
self.assertRaises(RuntimeError, journal_2.plot_ESS)
def test_plot_wass_dist(self):
rng = np.random.RandomState(1)
weights_identical = np.ones((100, 1))
params_0 = rng.randn(100).reshape(-1, 1)
weights_1 = np.arange(100)
params_1 = rng.randn(100).reshape(-1, 1, 1)
weights_2 = np.arange(100, 200)
params_2 = rng.randn(100).reshape(-1, 1)
weights_3 = np.arange(200, 300)
params_3 = rng.randn(100).reshape(-1, 1)
weights_4 = np.arange(300, 400)
params_4 = rng.randn(100).reshape(-1, 1)
journal = Journal(1)
journal.add_weights(weights_identical)
journal.add_accepted_parameters(params_0)
journal.add_weights(weights_1)
journal.add_accepted_parameters(params_1)
journal.add_weights(weights_2)
journal.add_accepted_parameters(params_2)
journal.add_weights(weights_3)
journal.add_accepted_parameters(params_3)
journal.add_weights(weights_4)
journal.add_accepted_parameters(params_4)
fig, ax, wass_dist_lists = journal.Wass_convergence_plot()
self.assertAlmostEqual(wass_dist_lists[0], 0.22829193592175878)
# check the Errors
journal_2 = Journal(0)
self.assertRaises(RuntimeError, journal_2.Wass_convergence_plot)
journal_3 = Journal(1)
journal_3.add_weights(weights_identical)
self.assertRaises(RuntimeError, journal_3.Wass_convergence_plot)
journal_4 = Journal(1)
journal_4.add_accepted_parameters(np.array([np.array([1]), np.array([1, 2])], dtype="object"))
print(len(journal_4.accepted_parameters))
self.assertRaises(RuntimeError, journal_4.Wass_convergence_plot)
# now use lists
weights_identical = np.ones((100, 1))
params_0 = params_0.tolist()
weights_1 = np.arange(100)
params_1 = params_1.tolist()
weights_2 = np.arange(100, 200)
params_2 = params_2.tolist()
weights_3 = np.arange(200, 300)
params_3 = params_3.tolist()
weights_4 = np.arange(300, 400)
params_4 = params_4.tolist()
journal = Journal(1)
journal.add_weights(weights_identical)
journal.add_accepted_parameters(params_0)
journal.add_weights(weights_1)
journal.add_accepted_parameters(params_1)
journal.add_weights(weights_2)
journal.add_accepted_parameters(params_2)
journal.add_weights(weights_3)
journal.add_accepted_parameters(params_3)
journal.add_weights(weights_4)
journal.add_accepted_parameters(params_4)
fig, ax, wass_dist_lists = journal.Wass_convergence_plot()
self.assertAlmostEqual(wass_dist_lists[0], 0.22829193592175878)
# check the Errors
journal_2 = Journal(0)
self.assertRaises(RuntimeError, journal_2.Wass_convergence_plot)
journal_3 = Journal(1)
journal_3.add_weights(weights_identical)
self.assertRaises(RuntimeError, journal_3.Wass_convergence_plot)
journal_4 = Journal(1)
journal_4.add_accepted_parameters(np.array([np.array([1]), np.array([1, 2])], dtype="object"))
print(len(journal_4.accepted_parameters))
self.assertRaises(RuntimeError, journal_4.Wass_convergence_plot)
def test_plot_post_distr(self):
rng = np.random.RandomState(1)
weights_identical = np.ones((100, 1))
params = rng.randn(100, 2, 1, 1)
weights = np.arange(100).reshape(-1, 1)
journal = Journal(1)
journal.add_user_parameters([("par1", params[:, 0]), ("par2", params[:, 1])])
journal.add_user_parameters([("par1", params[:, 0]), ("par2", params[:, 1])])
journal.add_weights(weights=weights_identical)
journal.add_weights(weights=weights)
journal.plot_posterior_distr(single_marginals_only=True, iteration=0)
journal.plot_posterior_distr(true_parameter_values=[0.5, 0.3], show_samples=True)
journal.plot_posterior_distr(double_marginals_only=True, show_samples=True,
true_parameter_values=[0.5, 0.3])
journal.plot_posterior_distr(contour_levels=10, ranges_parameters={"par1": [-1, 1]},
parameters_to_show=["par1"])
with self.assertRaises(KeyError):
journal.plot_posterior_distr(parameters_to_show=["par3"])
with self.assertRaises(RuntimeError):
journal.plot_posterior_distr(single_marginals_only=True, double_marginals_only=True)
with self.assertRaises(RuntimeError):
journal.plot_posterior_distr(parameters_to_show=["par1"], double_marginals_only=True)
with self.assertRaises(RuntimeError):
journal.plot_posterior_distr(parameters_to_show=["par1"], true_parameter_values=[0.5, 0.3])
with self.assertRaises(TypeError):
journal.plot_posterior_distr(ranges_parameters={"par1": [-1]})
with self.assertRaises(TypeError):
journal.plot_posterior_distr(ranges_parameters={"par1": np.zeros(1)})
def test_traceplot(self):
rng = np.random.RandomState(1)
weights_identical = np.ones((100, 1))
params = rng.randn(100).reshape(-1, 1)
journal = Journal(1)
journal.add_weights(weights_identical)
journal.add_accepted_parameters(params)
journal.add_user_parameters([("mu", params[:, 0])])
self.assertRaises(RuntimeError, journal.traceplot) # as it does not have "acceptance_rates" in configuration
journal.configuration["acceptance_rates"] = [0.3]
with self.assertRaises(KeyError):
journal.traceplot(parameters_to_show=["sigma"])
# now try correctly:
fig, ax = journal.traceplot()
def test_resample(self):
# -- setup --
# setup backend
dummy = BackendDummy()
# define a uniform prior distribution
mu = Uniform([[-5.0], [5.0]], name='mu')
sigma = Uniform([[0.0], [10.0]], name='sigma')
# define a Gaussian model
model = Normal([mu, sigma])
sampler = DrawFromPrior([model], dummy, seed=1)
original_journal = sampler.sample(100)
# expected mean values from bootstrapped samples:
mu_mean = -0.5631214403709973
sigma_mean = 5.2341427118053705
# expected mean values from subsampled samples:
mu_mean_2 = -0.6414897172489
sigma_mean_2 = 6.217381777130734
# -- bootstrap --
new_j = original_journal.resample(path_to_save_journal="tmp.jnl", seed=42)
mu_sample = np.array(new_j.get_parameters()['mu'])
sigma_sample = np.array(new_j.get_parameters()['sigma'])
accepted_parameters = new_j.get_accepted_parameters()
self.assertEqual(len(accepted_parameters), 100)
self.assertEqual(len(accepted_parameters[0]), 2)
# test shape of samples
mu_shape, sigma_shape = (len(mu_sample), mu_sample[0].shape[1]), \
(len(sigma_sample), sigma_sample[0].shape[1])
self.assertEqual(mu_shape, (100, 1))
self.assertEqual(sigma_shape, (100, 1))
# Compute posterior mean
self.assertAlmostEqual(np.average(mu_sample), mu_mean)
self.assertAlmostEqual(np.average(sigma_sample), sigma_mean)
self.assertTrue(new_j.number_of_simulations[0] == 0)
# check whether the dictionary or parameter list contain same data:
self.assertEqual(new_j.get_parameters()["mu"][9], new_j.get_accepted_parameters()[9][0])
self.assertEqual(new_j.get_parameters()["sigma"][7], new_j.get_accepted_parameters()[7][1])
# -- subsample (replace=False, smaller number than the full sample) --
new_j_2 = original_journal.resample(replace=False, n_samples=10, seed=42)
mu_sample = np.array(new_j_2.get_parameters()['mu'])
sigma_sample = np.array(new_j_2.get_parameters()['sigma'])
accepted_parameters = new_j_2.get_accepted_parameters()
self.assertEqual(len(accepted_parameters), 10)
self.assertEqual(len(accepted_parameters[0]), 2)
# test shape of samples
mu_shape, sigma_shape = (len(mu_sample), mu_sample[0].shape[1]), \
(len(sigma_sample), sigma_sample[0].shape[1])
self.assertEqual(mu_shape, (10, 1))
self.assertEqual(sigma_shape, (10, 1))
# Compute posterior mean
self.assertAlmostEqual(np.average(mu_sample), mu_mean_2)
self.assertAlmostEqual(np.average(sigma_sample), sigma_mean_2)
self.assertTrue(new_j_2.number_of_simulations[0] == 0)
# check whether the dictionary or parameter list contain same data:
self.assertEqual(new_j_2.get_parameters()["mu"][9], new_j_2.get_accepted_parameters()[9][0])
self.assertEqual(new_j_2.get_parameters()["sigma"][7], new_j_2.get_accepted_parameters()[7][1])
# -- check that resampling the full samples with replace=False gives the exact same posterior mean and std --
new_j_3 = original_journal.resample(replace=False, n_samples=100)
mu_sample = np.array(new_j_3.get_parameters()['mu'])
sigma_sample = np.array(new_j_3.get_parameters()['sigma'])
# original journal
mu_sample_original = np.array(original_journal.get_parameters()['mu'])
sigma_sample_original = np.array(original_journal.get_parameters()['sigma'])
# Compute posterior mean and std
self.assertAlmostEqual(np.average(mu_sample), np.average(mu_sample_original))
self.assertAlmostEqual(np.average(sigma_sample), np.average(sigma_sample_original))
self.assertAlmostEqual(np.std(mu_sample), np.std(mu_sample_original))
self.assertAlmostEqual(np.std(sigma_sample), np.std(sigma_sample_original))
# check whether the dictionary or parameter list contain same data:
self.assertEqual(new_j_3.get_parameters()["mu"][9], new_j_3.get_accepted_parameters()[9][0])
self.assertEqual(new_j_3.get_parameters()["sigma"][7], new_j_3.get_accepted_parameters()[7][1])
# -- test the error --
with self.assertRaises(RuntimeError):
original_journal.resample(replace=False, n_samples=200)
class GenerateFromJournalTests(unittest.TestCase):
def setUp(self):
# setup backend
dummy = BackendDummy()
# define a uniform prior distribution
mu = Uniform([[-5.0], [5.0]], name='mu')
sigma = Uniform([[0.0], [10.0]], name='sigma')
# define a Gaussian model
self.model = Normal([mu, sigma])
# define a stupid uniform model now
self.model2 = Uniform([[0], [10]])
self.sampler = DrawFromPrior([self.model], dummy, seed=1)
self.original_journal = self.sampler.sample(100)
self.generate_from_journal = GenerateFromJournal([self.model], dummy, seed=2)
self.generate_from_journal_2 = GenerateFromJournal([self.model2], dummy, seed=2)
# expected mean values from bootstrapped samples:
self.mu_mean = -0.2050921750330999
self.sigma_mean = 5.178647189918053
# expected mean values from subsampled samples:
self.mu_mean_2 = -0.021275259024241676
self.sigma_mean_2 = 5.672004487129107
def test_generate(self):
# sample single simulation for each par value
parameters, simulations, normalized_weights = self.generate_from_journal.generate(journal=self.original_journal)
self.assertEqual(parameters.shape, (100, 2))
self.assertEqual(simulations.shape, (100, 1, 1))
self.assertEqual(normalized_weights.shape, (100,))
# sample multiple simulations for each par value
parameters, simulations, normalized_weights = self.generate_from_journal.generate(self.original_journal,
n_samples_per_param=3,
iteration=-1)
self.assertEqual(parameters.shape, (100, 2))
self.assertEqual(simulations.shape, (100, 3, 1))
self.assertEqual(normalized_weights.shape, (100,))
def test_errors(self):
# check whether using a different model leads to errors:
with self.assertRaises(RuntimeError):
self.generate_from_journal_2.generate(self.original_journal)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "347cad3bf89a0765a8f2dfca1b9c17bb64659d3e", "size": 18317, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/output_tests.py", "max_stars_repo_name": "vishalbelsare/abcpy", "max_stars_repo_head_hexsha": "72d0d31ae3fa531b69ea3fef39c96af6628ee76f", "max_stars_repo_licenses": ["BSD-3-Clause-Clear"], "max_stars_count": 89, "max_stars_repo_stars_event_min_datetime": "2017-02-23T23:34:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T20:35:17.000Z", "max_issues_repo_path": "tests/output_tests.py", "max_issues_repo_name": "vishalbelsare/abcpy", "max_issues_repo_head_hexsha": "72d0d31ae3fa531b69ea3fef39c96af6628ee76f", "max_issues_repo_licenses": ["BSD-3-Clause-Clear"], "max_issues_count": 35, "max_issues_repo_issues_event_min_datetime": "2017-03-31T13:24:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-09T11:31:38.000Z", "max_forks_repo_path": "tests/output_tests.py", "max_forks_repo_name": "vishalbelsare/abcpy", "max_forks_repo_head_hexsha": "72d0d31ae3fa531b69ea3fef39c96af6628ee76f", "max_forks_repo_licenses": ["BSD-3-Clause-Clear"], "max_forks_count": 32, "max_forks_repo_forks_event_min_datetime": "2017-03-22T06:27:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-17T15:50:42.000Z", "avg_line_length": 45.9072681704, "max_line_length": 120, "alphanum_fraction": 0.6738002948, "include": true, "reason": "import numpy", "num_tokens": 4158}
|
[STATEMENT]
lemma isolated_verts_app_iso[simp]:
"pre_digraph.isolated_verts (app_iso hom G) = iso_verts hom ` isolated_verts"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pre_digraph.isolated_verts (app_iso hom G) = iso_verts hom ` isolated_verts
[PROOF STEP]
using hom
[PROOF STATE]
proof (prove)
using this:
digraph_isomorphism hom
goal (1 subgoal):
1. pre_digraph.isolated_verts (app_iso hom G) = iso_verts hom ` isolated_verts
[PROOF STEP]
by (auto simp: pre_digraph.isolated_verts_def iso_verts_tail inj_image_mem_iff out_arcs_app_iso_eq)
|
{"llama_tokens": 234, "file": "Planarity_Certificates_Planarity_Graph_Genus", "length": 2}
|
[STATEMENT]
lemma disj_assoc:"(((P::'\<alpha> predicate) \<or> Q) \<or> S) = (P \<or> (Q \<or> S))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((P \<or> Q) \<or> S) = (P \<or> Q \<or> S)
[PROOF STEP]
by (rule ext) blast
|
{"llama_tokens": 109, "file": "Circus_Relations", "length": 1}
|
import math
import numpy as np
import torch
from torch.utils.data.sampler import Sampler
__all__ = ["DistSequentialSampler"]
class DistSequentialSampler(Sampler):
def __init__(self, dataset, world_size, rank):
assert rank >= 0
assert dataset.num >= world_size, '{} vs {}'.format(dataset.size, world_size)
sub_num = int(math.ceil(1. * dataset.num / world_size))
# add extra samples to make it evenly divisible
tot_num = sub_num * world_size
self.dsize = dataset.num
self.beg = sub_num * rank
self.end = min(self.beg + sub_num, tot_num)
def __iter__(self):
indices = [i % self.dsize for i in range(self.beg, self.end)]
return iter(indices)
def __len__(self):
return self.end - self.beg
|
{"hexsha": "543b89b041711163f2953935aac19256f6237f66", "size": 790, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/sampler.py", "max_stars_repo_name": "ardihikaru/hfsoftmax", "max_stars_repo_head_hexsha": "55966f3a902c16df9b1ca93a77c5cd43efd47fd9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 95, "max_stars_repo_stars_event_min_datetime": "2018-02-10T05:12:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T07:58:58.000Z", "max_issues_repo_path": "datasets/sampler.py", "max_issues_repo_name": "ardihikaru/hfsoftmax", "max_issues_repo_head_hexsha": "55966f3a902c16df9b1ca93a77c5cd43efd47fd9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2018-05-15T08:48:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-06T08:31:56.000Z", "max_forks_repo_path": "datasets/sampler.py", "max_forks_repo_name": "ardihikaru/hfsoftmax", "max_forks_repo_head_hexsha": "55966f3a902c16df9b1ca93a77c5cd43efd47fd9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2018-07-04T09:16:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-07T20:54:47.000Z", "avg_line_length": 29.2592592593, "max_line_length": 85, "alphanum_fraction": 0.6506329114, "include": true, "reason": "import numpy", "num_tokens": 195}
|
(*
Title: The pi-calculus
Author/Maintainer: Jesper Bengtson (jebe.dk), 2012
*)
theory Strong_Late_Expansion_Law
imports Strong_Late_Bisim_SC
begin
nominal_primrec summands :: "pi \<Rightarrow> pi set" where
"summands \<zero> = {}"
| "summands (\<tau>.(P)) = {\<tau>.(P)}"
| "x \<sharp> a \<Longrightarrow> summands (a<x>.P) = {a<x>.P}"
| "summands (a{b}.P) = {a{b}.P}"
| "summands ([a\<frown>b]P) = {}"
| "summands ([a\<noteq>b]P) = {}"
| "summands (P \<oplus> Q) = (summands P) \<union> (summands Q)"
| "summands (P \<parallel> Q) = {}"
| "summands (<\<nu>x>P) = (if (\<exists>a P'. a \<noteq> x \<and> P = a{x}.P') then ({<\<nu>x>P}) else {})"
| "summands (!P) = {}"
apply(auto simp add: fresh_singleton name_fresh_abs fresh_set_empty fresh_singleton pi.fresh)
apply(finite_guess)+
by(fresh_guess)+
lemma summandsInput[simp]:
fixes a :: name
and x :: name
and P :: pi
shows "summands (a<x>.P) = {a<x>.P}"
proof -
obtain y where yineqa: "y \<noteq> a" and yFreshP: "y \<sharp> P"
by(force intro: name_exists_fresh[of "(a, P)"] simp add: fresh_prod)
from yFreshP have "a<x>.P = a<y>.([(x, y)] \<bullet> P)" by(simp add: alphaInput)
with yineqa show ?thesis by simp
qed
lemma finiteSummands:
fixes P :: pi
shows "finite(summands P)"
by(induct P rule: pi.induct) auto
lemma boundSummandDest[dest]:
fixes x :: name
and y :: name
and P' :: pi
and P :: pi
assumes "<\<nu>x>x{y}.P' \<in> summands P"
shows False
using assms
by(induct P rule: pi.induct, auto simp add: if_split pi.inject name_abs_eq name_calc)
lemma summandFresh:
fixes P :: pi
and Q :: pi
and x :: name
assumes "P \<in> summands Q"
and "x \<sharp> Q"
shows "x \<sharp> P"
using assms
by(nominal_induct Q avoiding: P rule: pi.strong_induct, auto simp add: if_split)
nominal_primrec hnf :: "pi \<Rightarrow> bool" where
"hnf \<zero> = True"
| "hnf (\<tau>.(P)) = True"
| "x \<sharp> a \<Longrightarrow> hnf (a<x>.P) = True"
| "hnf (a{b}.P) = True"
| "hnf ([a\<frown>b]P) = False"
| "hnf ([a\<noteq>b]P) = False"
| "hnf (P \<oplus> Q) = ((hnf P) \<and> (hnf Q) \<and> P \<noteq> \<zero> \<and> Q \<noteq> \<zero>)"
| "hnf (P \<parallel> Q) = False"
| "hnf (<\<nu>x>P) = (\<exists>a P'. a \<noteq> x \<and> P = a{x}.P')"
| "hnf (!P) = False"
apply(auto simp add: fresh_bool)
apply(finite_guess)+
by(fresh_guess)+
lemma hnfInput[simp]:
fixes a :: name
and x :: name
and P :: pi
shows "hnf (a<x>.P)"
proof -
obtain y where yineqa: "y \<noteq> a" and yFreshP: "y \<sharp> P"
by(force intro: name_exists_fresh[of "(a, P)"] simp add: fresh_prod)
from yFreshP have "a<x>.P = a<y>.([(x, y)] \<bullet> P)" by(simp add: alphaInput)
with yineqa show ?thesis by simp
qed
lemma summandTransition:
fixes P :: pi
and a :: name
and x :: name
and b :: name
and P' :: pi
assumes "hnf P"
shows "P \<longmapsto>\<tau> \<prec> P' = (\<tau>.(P') \<in> summands P)"
and "P \<longmapsto>a<x> \<prec> P' = (a<x>.P' \<in> summands P)"
and "P \<longmapsto>a[b] \<prec> P' = (a{b}.P' \<in> summands P)"
and "a \<noteq> x \<Longrightarrow> P \<longmapsto>a<\<nu>x> \<prec> P' = (<\<nu>x>a{x}.P' \<in> summands P)"
proof -
from assms show "P \<longmapsto>\<tau> \<prec> P' = (\<tau>.(P') \<in> summands P)"
proof(induct P rule: pi.induct)
case PiNil
show ?case by auto
next
case(Output a b P)
show ?case by auto
next
case(Tau P)
have "\<tau>.(P) \<longmapsto>\<tau> \<prec> P' \<Longrightarrow> \<tau>.(P') \<in> summands (\<tau>.(P))"
by(auto elim: tauCases simp add: pi.inject residual.inject)
moreover have "\<tau>.(P') \<in> summands (\<tau>.(P)) \<Longrightarrow> \<tau>.(P) \<longmapsto>\<tau> \<prec> P'"
by(auto simp add: pi.inject intro: transitions.Tau)
ultimately show ?case by blast
next
case(Input a x P)
show ?case by auto
next
case(Match a b P)
have "hnf ([a\<frown>b]P)" by fact
hence False by simp
thus ?case by simp
next
case(Mismatch a b P)
have "hnf ([a\<noteq>b]P)" by fact
hence False by simp
thus ?case by simp
next
case(Sum P Q)
have "hnf (P \<oplus> Q)" by fact
hence Phnf: "hnf P" and Qhnf: "hnf Q" by simp+
have IHP: "P \<longmapsto>\<tau> \<prec> P' = (\<tau>.(P') \<in> summands P)"
proof -
have "hnf P \<Longrightarrow> P \<longmapsto>\<tau> \<prec> P' = (\<tau>.(P') \<in> summands P)" by fact
with Phnf show ?thesis by simp
qed
have IHQ: "Q \<longmapsto>\<tau> \<prec> P' = (\<tau>.(P') \<in> summands Q)"
proof -
have "hnf Q \<Longrightarrow> Q \<longmapsto>\<tau> \<prec> P' = (\<tau>.(P') \<in> summands Q)" by fact
with Qhnf show ?thesis by simp
qed
from IHP IHQ have "P \<oplus> Q \<longmapsto>\<tau> \<prec> P' \<Longrightarrow> \<tau>.(P') \<in> summands (P \<oplus> Q)"
by(erule_tac sumCases, auto)
moreover from IHP IHQ have "\<tau>.(P') \<in> summands (P \<oplus> Q) \<Longrightarrow> P \<oplus> Q \<longmapsto>\<tau> \<prec> P'"
by(auto dest: Sum1 Sum2)
ultimately show ?case by blast
next
case(Par P Q)
have "hnf (P \<parallel> Q)" by fact
hence False by simp
thus ?case by simp
next
case(Res x P)
thus ?case by(auto elim: resCasesF)
next
case(Bang P)
have "hnf (!P)" by fact
hence False by simp
thus ?case by simp
qed
next
from assms show "P \<longmapsto>a<x> \<prec> P' = (a<x>.P' \<in> summands P)"
proof(induct P rule: pi.induct)
case PiNil
show ?case by auto
next
case(Output c b P)
show ?case by auto
next
case(Tau P)
show ?case by auto
next
case(Input b y P)
have "b<y>.P \<longmapsto>a<x> \<prec> P' \<Longrightarrow> a<x>.P' \<in> summands (b<y>.P)"
by(auto elim: inputCases' simp add: pi.inject residual.inject)
moreover have "a<x>.P' \<in> summands (b<y>.P) \<Longrightarrow> b<y>.P \<longmapsto>a<x> \<prec> P'"
apply(auto simp add: pi.inject name_abs_eq intro: Late_Semantics.Input)
apply(subgoal_tac "b<x> \<prec> [(x, y)] \<bullet> P = (b<y> \<prec> [(x, y)] \<bullet> [(x, y)] \<bullet> P)")
apply(auto intro: Late_Semantics.Input)
by(simp add: alphaBoundResidual name_swap)
ultimately show ?case by blast
next
case(Match a b P)
have "hnf ([a\<frown>b]P)" by fact
hence False by simp
thus ?case by simp
next
case(Mismatch a b P)
have "hnf ([a\<noteq>b]P)" by fact
hence False by simp
thus ?case by simp
next
case(Sum P Q)
have "hnf (P \<oplus> Q)" by fact
hence Phnf: "hnf P" and Qhnf: "hnf Q" by simp+
have IHP: "P \<longmapsto>a<x> \<prec> P' = (a<x>.P' \<in> summands P)"
proof -
have "hnf P \<Longrightarrow> P \<longmapsto>a<x> \<prec> P' = (a<x>.P' \<in> summands P)" by fact
with Phnf show ?thesis by simp
qed
have IHQ: "Q \<longmapsto>a<x> \<prec> P' = (a<x>.P' \<in> summands Q)"
proof -
have "hnf Q \<Longrightarrow> Q \<longmapsto>a<x> \<prec> P' = (a<x>.P' \<in> summands Q)" by fact
with Qhnf show ?thesis by simp
qed
from IHP IHQ have "P \<oplus> Q \<longmapsto>a<x> \<prec> P' \<Longrightarrow> a<x>.P' \<in> summands (P \<oplus> Q)"
by(erule_tac sumCases, auto)
moreover from IHP IHQ have "a<x>.P' \<in> summands (P \<oplus> Q) \<Longrightarrow> P \<oplus> Q \<longmapsto>a<x> \<prec> P'"
by(auto dest: Sum1 Sum2)
ultimately show ?case by blast
next
case(Par P Q)
have "hnf (P \<parallel> Q)" by fact
hence False by simp
thus ?case by simp
next
case(Res y P)
have "hnf(<\<nu>y>P)" by fact
thus ?case by(auto simp add: if_split)
next
case(Bang P)
have "hnf (!P)" by fact
hence False by simp
thus ?case by simp
qed
next
from assms show "P \<longmapsto>a[b] \<prec> P' = (a{b}.P' \<in> summands P)"
proof(induct P rule: pi.induct)
case PiNil
show ?case by auto
next
case(Output c d P)
have "c{d}.P \<longmapsto>a[b] \<prec> P' \<Longrightarrow> a{b}.P' \<in> summands (c{d}.P)"
by(auto elim: outputCases simp add: residual.inject pi.inject)
moreover have "a{b}.P' \<in> summands (c{d}.P) \<Longrightarrow> c{d}.P \<longmapsto>a[b] \<prec> P'"
by(auto simp add: pi.inject intro: transitions.Output)
ultimately show ?case by blast
next
case(Tau P)
show ?case by auto
next
case(Input c x P)
show ?case by auto
next
case(Match a b P)
have "hnf ([a\<frown>b]P)" by fact
hence False by simp
thus ?case by simp
next
case(Mismatch a b P)
have "hnf ([a\<noteq>b]P)" by fact
hence False by simp
thus ?case by simp
next
case(Sum P Q)
have "hnf (P \<oplus> Q)" by fact
hence Phnf: "hnf P" and Qhnf: "hnf Q" by simp+
have IHP: "P \<longmapsto>a[b] \<prec> P' = (a{b}.P' \<in> summands P)"
proof -
have "hnf P \<Longrightarrow> P \<longmapsto>a[b] \<prec> P' = (a{b}.P' \<in> summands P)" by fact
with Phnf show ?thesis by simp
qed
have IHQ: "Q \<longmapsto>a[b] \<prec> P' = (a{b}.P' \<in> summands Q)"
proof -
have "hnf Q \<Longrightarrow> Q \<longmapsto>a[b] \<prec> P' = (a{b}.P' \<in> summands Q)" by fact
with Qhnf show ?thesis by simp
qed
from IHP IHQ have "P \<oplus> Q \<longmapsto>a[b] \<prec> P' \<Longrightarrow> a{b}.P' \<in> summands (P \<oplus> Q)"
by(erule_tac sumCases, auto)
moreover from IHP IHQ have "a{b}.P' \<in> summands (P \<oplus> Q) \<Longrightarrow> P \<oplus> Q \<longmapsto>a[b] \<prec> P'"
by(auto dest: Sum1 Sum2)
ultimately show ?case by blast
next
case(Par P Q)
have "hnf (P \<parallel> Q)" by fact
hence False by simp
thus ?case by simp
next
case(Res x P)
have "hnf (<\<nu>x>P)" by fact
thus ?case by(force elim: resCasesF outputCases simp add: if_split residual.inject)
next
case(Bang P)
have "hnf (!P)" by fact
hence False by simp
thus ?case by simp
qed
next
assume "a\<noteq>x"
with assms show "P \<longmapsto>a<\<nu>x> \<prec> P' = (<\<nu>x>a{x}.P' \<in> summands P)"
proof(nominal_induct P avoiding: x P' rule: pi.strong_induct)
case PiNil
show ?case by auto
next
case(Output a b P)
show ?case by auto
next
case(Tau P)
show ?case by auto
next
case(Input a x P)
show ?case by auto
next
case(Match a b P)
have "hnf ([a\<frown>b]P)" by fact
hence False by simp
thus ?case by simp
next
case(Mismatch a b P)
have "hnf ([a\<noteq>b]P)" by fact
hence False by simp
thus ?case by simp
next
case(Sum P Q)
have "hnf (P \<oplus> Q)" by fact
hence Phnf: "hnf P" and Qhnf: "hnf Q" by simp+
have aineqx: "a \<noteq> x" by fact
have IHP: "P \<longmapsto>a<\<nu>x> \<prec> P' = (<\<nu>x>a{x}.P' \<in> summands P)"
proof -
have "\<And>x P'. \<lbrakk>hnf P; a \<noteq> x\<rbrakk> \<Longrightarrow> P \<longmapsto>a<\<nu>x> \<prec> P' = (<\<nu>x>a{x}.P' \<in> summands P)" by fact
with Phnf aineqx show ?thesis by simp
qed
have IHQ: "Q \<longmapsto>a<\<nu>x> \<prec> P' = (<\<nu>x>a{x}.P' \<in> summands Q)"
proof -
have "\<And>x Q'. \<lbrakk>hnf Q; a \<noteq> x\<rbrakk> \<Longrightarrow> Q \<longmapsto>a<\<nu>x> \<prec> P' = (<\<nu>x>a{x}.P' \<in> summands Q)" by fact
with Qhnf aineqx show ?thesis by simp
qed
from IHP IHQ have "P \<oplus> Q \<longmapsto>a<\<nu>x> \<prec> P' \<Longrightarrow> <\<nu>x>a{x}.P' \<in> summands (P \<oplus> Q)"
by(erule_tac sumCases, auto)
moreover from IHP IHQ have "<\<nu>x>a{x}.P' \<in> summands (P \<oplus> Q) \<Longrightarrow> P \<oplus> Q \<longmapsto>a<\<nu>x> \<prec> P'"
by(auto dest: Sum1 Sum2)
ultimately show ?case by blast
next
case(Par P Q)
have "hnf (P \<parallel> Q)" by fact
hence False by simp
thus ?case by simp
next
case(Res y P)
have Phnf: "hnf (<\<nu>y>P)" by fact
then obtain b P'' where bineqy: "b \<noteq> y" and PeqP'': "P = b{y}.P''"
by auto
have "y \<sharp> x" by fact hence xineqy: "x \<noteq> y" by simp
have yFreshP': "y \<sharp> P'" by fact
have aineqx: "a\<noteq>x" by fact
have "<\<nu>y>P \<longmapsto>a<\<nu>x> \<prec> P' \<Longrightarrow> (<\<nu>x>a{x}.P' \<in> summands (<\<nu>y>P))"
proof -
assume Trans: "<\<nu>y>P \<longmapsto>a<\<nu>x> \<prec> P'"
hence aeqb: "a = b" using xineqy bineqy PeqP''
by(induct rule: resCasesB', auto elim: outputCases simp add: residual.inject alpha' abs_fresh pi.inject)
have Goal: "\<And>x P'. \<lbrakk><\<nu>y>b{y}.P'' \<longmapsto>b<\<nu>x> \<prec> P'; x \<noteq> y; x \<noteq> b; x \<sharp> P''\<rbrakk> \<Longrightarrow>
<\<nu>x>b{x}.P' \<in> summands(<\<nu>y>b{y}.P'')"
proof -
fix x P'
assume xFreshP'': "(x::name) \<sharp> P''" and xineqb: "x \<noteq> b"
assume "<\<nu>y>b{y}.P'' \<longmapsto>b<\<nu>x> \<prec> P'" and xineqy: "x \<noteq> y"
moreover from \<open>x \<noteq> b\<close> \<open>x \<sharp> P''\<close> \<open>x \<noteq> y\<close> have "x \<sharp> b{y}.P''" by simp
ultimately show "<\<nu>x>b{x}.P' \<in> summands (<\<nu>y>b{y}.P'')"
proof(induct rule: resCasesB)
case(cOpen a P''')
have "BoundOutputS b = BoundOutputS a" by fact hence beqa: "b = a" by simp
have Trans: "b{y}.P'' \<longmapsto>a[y] \<prec> P'''" by fact
with PeqP'' have P''eqP''': "P'' = P'''"
by(force elim: outputCases simp add: residual.inject)
with bineqy xineqy xFreshP'' have "y \<sharp> b{x}.([(x, y)] \<bullet> P''')"
by(simp add: name_fresh_abs name_calc name_fresh_left)
with bineqy Phnf PeqP'' P''eqP''' xineqb show ?case
by(simp only: alphaRes, simp add: name_calc)
next
case(cRes P''')
have "b{y}.P'' \<longmapsto>b<\<nu>x> \<prec> P'''" by fact
hence False by auto
thus ?case by simp
qed
qed
obtain z where zineqx: "z \<noteq> x" and zineqy: "z \<noteq> y" and zFreshP'': "z \<sharp> P''"
and zineqb: "z \<noteq> b" and zFreshP': "z \<sharp> P'"
by(force intro: name_exists_fresh[of "(x, y, b, P'', P')"] simp add: fresh_prod)
from zFreshP' aeqb PeqP'' Trans have Trans': "<\<nu>y>b{y}.P'' \<longmapsto>b<\<nu>z> \<prec> [(z, x)] \<bullet> P'"
by(simp add: alphaBoundResidual name_swap)
hence "<\<nu>z>b{z}.([(z, x)] \<bullet> P') \<in> summands (<\<nu>y>b{y}.P'')" using zineqy zineqb zFreshP''
by(rule Goal)
moreover from bineqy zineqx zFreshP' aineqx aeqb have "x \<sharp> b{z}.([(z, x)] \<bullet> P')"
by(simp add: name_fresh_left name_calc)
ultimately have "<\<nu>x>b{x}.P' \<in> summands (<\<nu>y>b{y}.P'')" using zineqb
by(simp add: alphaRes name_calc)
with aeqb PeqP'' show ?thesis by blast
qed
moreover have "<\<nu>x>a{x}.P' \<in> summands(<\<nu>y>P) \<Longrightarrow> <\<nu>y>P \<longmapsto>a<\<nu>x> \<prec> P'"
proof -
assume "<\<nu>x>a{x}.P' \<in> summands(<\<nu>y>P)"
with PeqP'' have Summ: "<\<nu>x>a{x}.P' \<in> summands(<\<nu>y>b{y}.P'')" by simp
moreover with bineqy xineqy have aeqb: "a = b"
by(auto simp add: if_split pi.inject name_abs_eq name_fresh_fresh)
from bineqy xineqy yFreshP' have "y \<sharp> b{x}.P'" by(simp add: name_calc)
with Summ aeqb bineqy aineqx have "<\<nu>y>b{y}.([(x, y)] \<bullet> P') \<in> summands(<\<nu>y>b{y}.P'')"
by(simp only: alphaRes, simp add: name_calc)
with aeqb PeqP'' have "<\<nu>y>P \<longmapsto>a<\<nu>y> \<prec> [(x, y)] \<bullet> P'"
by(auto intro: Open Output simp add: if_split pi.inject name_abs_eq)
moreover from yFreshP' have "x \<sharp> [(x, y)] \<bullet> P'" by(simp add: name_fresh_left name_calc)
ultimately show ?thesis by(simp add: alphaBoundResidual name_swap)
qed
ultimately show ?case by blast
next
case(Bang P)
have "hnf (!P)" by fact
hence False by simp
thus ?case by simp
qed
qed
definition "expandSet" :: "pi \<Rightarrow> pi \<Rightarrow> pi set" where
"expandSet P Q \<equiv> {\<tau>.(P' \<parallel> Q) | P'. \<tau>.(P') \<in> summands P} \<union>
{\<tau>.(P \<parallel> Q') | Q'. \<tau>.(Q') \<in> summands Q} \<union>
{a{b}.(P' \<parallel> Q) | a b P'. a{b}.P' \<in> summands P} \<union>
{a{b}.(P \<parallel> Q') | a b Q'. a{b}.Q' \<in> summands Q} \<union>
{a<x>.(P' \<parallel> Q) | a x P'. a<x>.P' \<in> summands P \<and> x \<sharp> Q} \<union>
{a<x>.(P \<parallel> Q') | a x Q'. a<x>.Q' \<in> summands Q \<and> x \<sharp> P} \<union>
{<\<nu>x>a{x}.(P' \<parallel> Q) | a x P'. <\<nu>x>a{x}.P' \<in> summands P \<and> x \<sharp> Q} \<union>
{<\<nu>x>a{x}.(P \<parallel> Q') | a x Q'. <\<nu>x>a{x}.Q' \<in> summands Q \<and> x \<sharp> P} \<union>
{\<tau>.(P'[x::=b] \<parallel> Q') | x P' b Q'. \<exists>a. a<x>.P' \<in> summands P \<and> a{b}.Q' \<in> summands Q} \<union>
{\<tau>.(P' \<parallel> (Q'[x::=b])) | b P' x Q'. \<exists>a. a{b}.P' \<in> summands P \<and> a<x>.Q' \<in> summands Q} \<union>
{\<tau>.(<\<nu>y>(P'[x::=y] \<parallel> Q')) | x P' y Q'. \<exists>a. a<x>.P' \<in> summands P \<and> <\<nu>y>a{y}.Q' \<in> summands Q \<and> y \<sharp> P} \<union>
{\<tau>.(<\<nu>y>(P' \<parallel> (Q'[x::=y]))) | y P' x Q'. \<exists>a. <\<nu>y>a{y}.P' \<in> summands P \<and> a<x>.Q' \<in> summands Q \<and> y \<sharp> Q}"
lemma finiteExpand:
fixes P :: pi
and Q :: pi
shows "finite(expandSet P Q)"
proof -
have "finite {\<tau>.(P' \<parallel> Q) | P'. \<tau>.(P') \<in> summands P}"
by(induct P rule: pi.induct, auto simp add: pi.inject Collect_ex_eq conj_disj_distribL
Collect_disj_eq UN_Un_distrib)
moreover have "finite {\<tau>.(P \<parallel> Q') | Q'. \<tau>.(Q') \<in> summands Q}"
by(induct Q rule: pi.induct, auto simp add: pi.inject Collect_ex_eq conj_disj_distribL
Collect_disj_eq UN_Un_distrib)
moreover have "finite {a{b}.(P' \<parallel> Q) | a b P'. a{b}.P' \<in> summands P}"
by(induct P rule: pi.induct, auto simp add: pi.inject Collect_ex_eq conj_disj_distribL
Collect_disj_eq UN_Un_distrib)
moreover have "finite {a{b}.(P \<parallel> Q') | a b Q'. a{b}.Q' \<in> summands Q}"
by(induct Q rule: pi.induct, auto simp add: pi.inject Collect_ex_eq conj_disj_distribL
Collect_disj_eq UN_Un_distrib)
moreover have "finite {a<x>.(P' \<parallel> Q) | a x P'. a<x>.P' \<in> summands P \<and> x \<sharp> Q}"
proof -
have Aux: "\<And>a x P Q. (x::name) \<sharp> Q \<Longrightarrow> {a'<x'>.(P' \<parallel> Q) |a' x' P'. a'<x'>.P' = a<x>.P \<and> x' \<sharp> Q} = {a<x>.(P \<parallel> Q)}"
by(auto simp add: pi.inject name_abs_eq name_fresh_fresh)
thus ?thesis
by(nominal_induct P avoiding: Q rule: pi.strong_induct,
auto simp add: Collect_ex_eq conj_disj_distribL conj_disj_distribR
Collect_disj_eq UN_Un_distrib)
qed
moreover have "finite {a<x>.(P \<parallel> Q') | a x Q'. a<x>.Q' \<in> summands Q \<and> x \<sharp> P}"
proof -
have Aux: "\<And>a x P Q. (x::name) \<sharp> P \<Longrightarrow> {a'<x'>.(P \<parallel> Q') |a' x' Q'. a'<x'>.Q' = a<x>.Q \<and> x' \<sharp> P} = {a<x>.(P \<parallel> Q)}"
by(auto simp add: pi.inject name_abs_eq name_fresh_fresh)
thus ?thesis
by(nominal_induct Q avoiding: P rule: pi.strong_induct,
auto simp add: Collect_ex_eq conj_disj_distribL conj_disj_distribR
Collect_disj_eq UN_Un_distrib)
qed
moreover have "finite {<\<nu>x>a{x}.(P' \<parallel> Q) | a x P'. <\<nu>x>a{x}.P' \<in> summands P \<and> x \<sharp> Q}"
proof -
have Aux: "\<And>a x P Q. \<lbrakk>x \<sharp> Q; a \<noteq> x\<rbrakk> \<Longrightarrow> {<\<nu>x'>a'{x'}.(P' \<parallel> Q) |a' x' P'. <\<nu>x'>a'{x'}.P' = <\<nu>x>a{x}.P \<and> x' \<sharp> Q} =
{<\<nu>x>a{x}.(P \<parallel> Q)}"
by(auto simp add: pi.inject name_abs_eq name_fresh_fresh)
thus ?thesis
by(nominal_induct P avoiding: Q rule: pi.strong_induct,
auto simp add: Collect_ex_eq conj_disj_distribL conj_disj_distribR
Collect_disj_eq UN_Un_distrib)
qed
moreover have "finite {<\<nu>x>a{x}.(P \<parallel> Q') | a x Q'. <\<nu>x>a{x}.Q' \<in> summands Q \<and> x \<sharp> P}"
proof -
have Aux: "\<And>a x P Q. \<lbrakk>x \<sharp> P; a \<noteq> x\<rbrakk> \<Longrightarrow> {<\<nu>x'>a'{x'}.(P \<parallel> Q') |a' x' Q'. <\<nu>x'>a'{x'}.Q' = <\<nu>x>a{x}.Q \<and> x' \<sharp> P} =
{<\<nu>x>a{x}.(P \<parallel> Q)}"
by(auto simp add: pi.inject name_abs_eq name_fresh_fresh)
thus ?thesis
by(nominal_induct Q avoiding: P rule: pi.strong_induct,
auto simp add: Collect_ex_eq conj_disj_distribL conj_disj_distribR
Collect_disj_eq UN_Un_distrib)
qed
moreover have "finite {\<tau>.(P'[x::=b] \<parallel> Q') | x P' b Q'. \<exists>a. a<x>.P' \<in> summands P \<and> a{b}.Q' \<in> summands Q}"
proof -
have Aux: "\<And>a x P b Q. {\<tau>.(P'[x'::=b'] \<parallel> Q') | a' x' P' b' Q'. a'<x'>.P' = a<x>.P \<and> a'{b'}.Q' = a{b}.Q} = {\<tau>.(P[x::=b] \<parallel> Q)}"
by(auto simp add: name_abs_eq pi.inject renaming)
have "\<And>a x P Q b::'a::{}. finite {\<tau>.(P'[x'::=b] \<parallel> Q') | a' x' P' b Q'. a'<x'>.P' = a<x>.P \<and> a'{b}.Q' \<in> summands Q}"
apply(induct rule: pi.induct, simp_all)
apply(case_tac "a=name1")
apply(simp add: Aux)
apply(simp add: pi.inject)
by(simp add: Collect_ex_eq conj_disj_distribL conj_disj_distribR
Collect_disj_eq UN_Un_distrib)
hence "finite {\<tau>.(P'[x::=b] \<parallel> Q') | a x P' b Q'. a<x>.P' \<in> summands P \<and> a{b}.Q' \<in> summands Q}"
by(nominal_induct P avoiding: Q rule: pi.strong_induct,
auto simp add: Collect_ex_eq conj_disj_distribL conj_disj_distribR
Collect_disj_eq UN_Un_distrib name_abs_eq)
thus ?thesis
apply(rule_tac finite_subset)
defer
by blast+
qed
moreover have "finite {\<tau>.(P' \<parallel> (Q'[x::=b])) | b P' x Q'. \<exists>a. a{b}.P' \<in> summands P \<and> a<x>.Q' \<in> summands Q}"
proof -
have Aux: "\<And>a x P b Q. {\<tau>.(P' \<parallel> (Q'[x'::=b'])) | a' b' P' x' Q'. a'{b'}.P' = a{b}.P \<and> a'<x'>.Q' = a<x>.Q} = {\<tau>.(P \<parallel> (Q[x::=b]))}"
by(auto simp add: name_abs_eq pi.inject renaming)
have "\<And>a b P Q x::'a::{}. finite {\<tau>.(P' \<parallel> (Q'[x::=b'])) | a' b' P' x Q'. a'{b'}.P' = a{b}.P \<and> a'<x>.Q' \<in> summands Q}"
apply(induct rule: pi.induct, simp_all)
apply(case_tac "a=name1")
apply(simp add: Aux)
apply(simp add: pi.inject)
by(simp add: Collect_ex_eq conj_disj_distribL conj_disj_distribR
Collect_disj_eq UN_Un_distrib)
hence "finite {\<tau>.(P' \<parallel> (Q'[x::=b])) | a b P' x Q'. a{b}.P' \<in> summands P \<and> a<x>.Q' \<in> summands Q}"
by(nominal_induct P avoiding: Q rule: pi.strong_induct,
auto simp add: Collect_ex_eq conj_disj_distribL conj_disj_distribR
Collect_disj_eq UN_Un_distrib name_abs_eq)
thus ?thesis
apply(rule_tac finite_subset) defer by blast+
qed
moreover have "finite {\<tau>.(<\<nu>y>(P'[x::=y] \<parallel> Q')) | x P' y Q'. \<exists>a. a<x>.P' \<in> summands P \<and> <\<nu>y>a{y}.Q' \<in> summands Q \<and> y \<sharp> P}"
proof -
have Aux: "\<And>a x P y Q. y \<sharp> P \<and> y \<noteq> a \<Longrightarrow> {\<tau>.(<\<nu>y'>(P'[x'::=y'] \<parallel> Q')) | a' x' P' y' Q'. a'<x'>.P' = a<x>.P \<and> <\<nu>y'>a'{y'}.Q' = <\<nu>y>a{y}.Q \<and> y' \<sharp> a<x>.P} = {\<tau>.(<\<nu>y>(P[x::=y] \<parallel> Q))}"
apply(auto simp add: pi.inject name_abs_eq name_fresh_abs name_calc fresh_fact2 fresh_fact1 eqvts forget)
apply(subst name_swap, simp add: injPermSubst fresh_fact1 fresh_fact2)+
by(simp add: name_swap injPermSubst)+
have BC: "\<And>a x P Q. finite {\<tau>.(<\<nu>y>(P'[x'::=y] \<parallel> Q')) | a' x' P' y Q'. a'<x'>.P' = a<x>.P \<and> <\<nu>y>a'{y}.Q' \<in> summands Q \<and> y \<sharp> a<x>.P}"
proof -
fix a x P Q
show "finite {\<tau>.(<\<nu>y>(P'[x'::=y] \<parallel> Q')) | a' x' P' y Q'. a'<x'>.P' = a<x>.P \<and> <\<nu>y>a'{y}.Q' \<in> summands Q \<and> y \<sharp> a<x>.P}"
apply(nominal_induct Q avoiding: a P rule: pi.strong_induct, simp_all)
apply(simp add: Collect_ex_eq conj_disj_distribL conj_disj_distribR
Collect_disj_eq UN_Un_distrib)
apply(clarsimp)
apply(case_tac "a=aa")
apply(insert Aux, auto)
by(simp add: pi.inject name_abs_eq name_calc)
qed
have IH: "\<And>P P' Q. {\<tau>.(<\<nu>y>(P''[x::=y] \<parallel> Q')) | a x P'' y Q'. (a<x>.P'' \<in> summands P \<or> a<x>.P'' \<in> summands P') \<and> <\<nu>y>a{y}.Q' \<in> summands Q \<and> y \<sharp> P \<and> y \<sharp> P'} = {\<tau>.(<\<nu>y>(P''[x::=y] \<parallel> Q')) | a x P'' y Q'. a<x>.P'' \<in> summands P \<and> <\<nu>y>a{y}.Q' \<in> summands Q \<and> y \<sharp> P \<and> y \<sharp> P'} \<union> {\<tau>.(<\<nu>y>(P''[x::=y] \<parallel> Q')) | a x P'' y Q'. a<x>.P'' \<in> summands P' \<and> <\<nu>y>a{y}.Q' \<in> summands Q \<and> y \<sharp> P \<and> y \<sharp> P'}"
by blast
have IH': "\<And>P Q P'. {\<tau>.(<\<nu>y>(P''[x::=y] \<parallel> Q')) | a x P'' y Q'. a<x>.P'' \<in> summands P \<and> <\<nu>y>a{y}.Q' \<in> summands Q \<and> y \<sharp> P \<and> y \<sharp> P'} \<subseteq> {\<tau>.(<\<nu>y>(P''[x::=y] \<parallel> Q')) | a x P'' y Q'. a<x>.P'' \<in> summands P \<and> <\<nu>y>a{y}.Q' \<in> summands Q \<and> y \<sharp> P}"
by blast
have IH'': "\<And>P Q P'. {\<tau>.(<\<nu>y>(P''[x::=y] \<parallel> Q')) | a x P'' y Q'. a<x>.P'' \<in> summands P' \<and> <\<nu>y>a{y}.Q' \<in> summands Q \<and> y \<sharp> P \<and> y \<sharp> P'} \<subseteq> {\<tau>.(<\<nu>y>(P''[x::=y] \<parallel> Q')) | a x P'' y Q'. a<x>.P'' \<in> summands P' \<and> <\<nu>y>a{y}.Q' \<in> summands Q \<and> y \<sharp> P'}"
by blast
have "finite {\<tau>.(<\<nu>y>(P'[x::=y] \<parallel> Q')) | a x P' y Q'. a<x>.P' \<in> summands P \<and> <\<nu>y>a{y}.Q' \<in> summands Q \<and> y \<sharp> P}"
apply(nominal_induct P avoiding: Q rule: pi.strong_induct, simp_all)
apply(insert BC, force)
apply(insert IH, auto)
apply(blast intro: finite_subset[OF IH'])
by(blast intro: finite_subset[OF IH''])
thus ?thesis
apply(rule_tac finite_subset) defer by(blast)+
qed
moreover have "finite {\<tau>.(<\<nu>y>(P' \<parallel> (Q'[x::=y]))) | y P' x Q'. \<exists>a. <\<nu>y>a{y}.P' \<in> summands P \<and> a<x>.Q' \<in> summands Q \<and> y \<sharp> Q}"
proof -
have Aux: "\<And>a y P x Q. \<lbrakk>y \<sharp> Q; y \<noteq> a\<rbrakk> \<Longrightarrow> {\<tau>.(<\<nu>y'>(P' \<parallel> (Q'[x'::=y']))) | a' y' P' x' Q'. <\<nu>y'>a'{y'}.P' = <\<nu>y>a{y}.P \<and> a'<x'>.Q' = a<x>.Q \<and> y' \<sharp> a<x>.Q} = {\<tau>.(<\<nu>y>(P \<parallel> (Q[x::=y])))}"
apply(auto simp add: pi.inject name_abs_eq name_fresh_abs name_calc fresh_fact2 fresh_fact1 forget eqvts fresh_left renaming[symmetric])
apply(subst name_swap, simp add: injPermSubst fresh_fact1 fresh_fact2)+
by(simp add: name_swap injPermSubst)+
have IH: "\<And>P y a Q Q'. {\<tau>.(<\<nu>y'>(P' \<parallel> (Q''[x::=y']))) | a' y' P' x Q''. <\<nu>y'>a'{y'}.P' = <\<nu>y>a{y}.P \<and> (a'<x>.Q'' \<in> summands Q \<or> a'<x>.Q'' \<in> summands Q') \<and> y' \<sharp> Q \<and> y' \<sharp> Q'} = {\<tau>.(<\<nu>y'>(P' \<parallel> (Q''[x::=y']))) | a' y' P' x Q''. <\<nu>y'>a'{y'}.P' = <\<nu>y>a{y}.P \<and> a'<x>.Q'' \<in> summands Q \<and> y' \<sharp> Q \<and> y' \<sharp> Q'} \<union> {\<tau>.(<\<nu>y'>(P' \<parallel> (Q''[x::=y']))) | a' y' P' x Q''. <\<nu>y'>a'{y'}.P' = <\<nu>y>a{y}.P \<and> a'<x>.Q'' \<in> summands Q' \<and> y' \<sharp> Q \<and> y' \<sharp> Q'}"
by blast
have IH': "\<And>a y P Q Q'. {\<tau>.(<\<nu>y'>(P' \<parallel> (Q''[x::=y']))) | a' y' P' x Q''. <\<nu>y'>a'{y'}.P' = <\<nu>y>a{y}.P \<and> a'<x>.Q'' \<in> summands Q \<and> y' \<sharp> Q \<and> y' \<sharp> Q'} \<subseteq> {\<tau>.(<\<nu>y'>(P' \<parallel> (Q''[x::=y']))) | a' y' P' x Q''. <\<nu>y'>a'{y'}.P' = <\<nu>y>a{y}.P \<and> a'<x>.Q'' \<in> summands Q \<and> y' \<sharp> Q}"
by blast
have IH'': "\<And>a y P Q Q'. {\<tau>.(<\<nu>y'>(P' \<parallel> (Q''[x::=y']))) | a' y' P' x Q''. <\<nu>y'>a'{y'}.P' = <\<nu>y>a{y}.P \<and> a'<x>.Q'' \<in> summands Q' \<and> y' \<sharp> Q \<and> y' \<sharp> Q'} \<subseteq> {\<tau>.(<\<nu>y'>(P' \<parallel> (Q''[x::=y']))) | a' y' P' x Q''. <\<nu>y'>a'{y'}.P' = <\<nu>y>a{y}.P \<and> a'<x>.Q'' \<in> summands Q' \<and> y' \<sharp> Q'}"
by blast
have BC: "\<And>a y P Q. \<lbrakk>y \<sharp> Q; y \<noteq> a\<rbrakk> \<Longrightarrow> finite {\<tau>.(<\<nu>y'>(P' \<parallel> (Q'[x::=y']))) | a' y' P' x Q'. <\<nu>y'>a'{y'}.P' = <\<nu>y>a{y}.P \<and> a'<x>.Q' \<in> summands Q \<and> y' \<sharp> Q}"
proof -
fix a y P Q
assume "(y::name) \<sharp> (Q::pi)" and "y \<noteq> a"
thus "finite {\<tau>.(<\<nu>y'>(P' \<parallel> (Q'[x::=y']))) | a' y' P' x Q'. <\<nu>y'>a'{y'}.P' = <\<nu>y>a{y}.P \<and> a'<x>.Q' \<in> summands Q \<and> y' \<sharp> Q}"
apply(nominal_induct Q avoiding: y rule: pi.strong_induct, simp_all)
apply(case_tac "a=name1")
apply auto
apply(subgoal_tac "ya \<sharp> (pi::pi)")
apply(insert Aux)
apply auto
apply(simp add: name_fresh_abs)
apply(simp add: pi.inject name_abs_eq name_calc)
apply(insert IH)
apply auto
apply(blast intro: finite_subset[OF IH'])
by(blast intro: finite_subset[OF IH''])
qed
have "finite {\<tau>.(<\<nu>y>(P' \<parallel> (Q'[x::=y]))) | a y P' x Q'. <\<nu>y>a{y}.P' \<in> summands P \<and> a<x>.Q' \<in> summands Q \<and> y \<sharp> Q}"
apply(nominal_induct P avoiding: Q rule: pi.strong_induct, simp_all)
apply(simp add: Collect_ex_eq conj_disj_distribL conj_disj_distribR name_fresh_abs
Collect_disj_eq UN_Un_distrib)
by(auto intro: BC)
thus ?thesis
apply(rule_tac finite_subset) defer by blast+
qed
ultimately show ?thesis
by(simp add: expandSet_def)
qed
lemma expandHnf:
fixes P :: pi
and Q :: pi
shows "\<forall>R \<in> (expandSet P Q). hnf R"
by(force simp add: expandSet_def)
inductive_set sumComposeSet :: "(pi \<times> pi set) set"
where
empty: "(\<zero>, {}) \<in> sumComposeSet"
| insert: "\<lbrakk>Q \<in> S; (P, S - {Q}) \<in> sumComposeSet\<rbrakk> \<Longrightarrow> (P \<oplus> Q, S) \<in> sumComposeSet"
lemma expandAction:
fixes P :: pi
and Q :: pi
and S :: "pi set"
assumes "(P, S) \<in> sumComposeSet"
and "Q \<in> S"
and "Q \<longmapsto> Rs"
shows "P \<longmapsto> Rs"
using assms
proof(induct arbitrary: Q rule: sumComposeSet.induct)
case empty
have "Q \<in> {}" by fact
hence False by simp
thus ?case by simp
next
case(insert Q' S P Q)
have QTrans: "Q \<longmapsto> Rs" by fact
show ?case
proof(case_tac "Q = Q'")
assume "Q = Q'"
with QTrans show "P \<oplus> Q' \<longmapsto> Rs" by(blast intro: Sum2)
next
assume QineqQ': "Q \<noteq> Q'"
have IH: "\<And>Q. \<lbrakk>Q \<in> S - {Q'}; Q \<longmapsto> Rs\<rbrakk> \<Longrightarrow> P \<longmapsto> Rs" by fact
have QinS: "Q \<in> S" by fact
with QineqQ' have "Q \<in> S - {Q'}" by simp
hence "P \<longmapsto> Rs" using QTrans by(rule IH)
thus ?case by(rule Sum1)
qed
qed
lemma expandAction':
fixes P :: pi
and Q :: pi
and R :: pi
assumes "(R, S) \<in> sumComposeSet"
and "R \<longmapsto> Rs"
shows "\<exists>P \<in> S. P \<longmapsto> Rs"
using assms
proof(induct rule: sumComposeSet.induct)
case empty
have "\<zero> \<longmapsto> Rs" by fact
hence False by blast
thus ?case by simp
next
case(insert Q S P)
have QinS: "Q \<in> S" by fact
have "P \<oplus> Q \<longmapsto> Rs" by fact
thus ?case
proof(induct rule: sumCases)
case cSum1
have "P \<longmapsto> Rs" by fact
moreover have "P \<longmapsto> Rs \<Longrightarrow> \<exists>P \<in> (S - {Q}). P \<longmapsto> Rs" by fact
ultimately obtain P where PinS: "P \<in> (S - {Q})" and PTrans: "P \<longmapsto> Rs" by blast
show ?case
proof(case_tac "P = Q")
assume "P = Q"
with PTrans QinS show ?case by blast
next
assume PineqQ: "P \<noteq> Q"
from PinS have "P \<in> S" by simp
with PTrans show ?thesis by blast
qed
next
case cSum2
have "Q \<longmapsto> Rs" by fact
with QinS show ?case by blast
qed
qed
lemma expandTrans:
fixes P :: pi
and Q :: pi
and R :: pi
and a :: name
and b :: name
and x :: name
assumes Exp: "(R, expandSet P Q) \<in> sumComposeSet"
and Phnf: "hnf P"
and Qhnf: "hnf Q"
shows "(P \<parallel> Q \<longmapsto>\<tau> \<prec> P') = (R \<longmapsto>\<tau> \<prec> P')"
and "(P \<parallel> Q \<longmapsto>a[b] \<prec> P') = (R \<longmapsto>a[b] \<prec> P')"
and "(P \<parallel> Q \<longmapsto>a<x> \<prec> P') = (R \<longmapsto>a<x> \<prec> P')"
and "(P \<parallel> Q \<longmapsto>a<\<nu>x> \<prec> P') = (R \<longmapsto>a<\<nu>x> \<prec> P')"
proof -
show "P \<parallel> Q \<longmapsto> \<tau> \<prec> P' = R \<longmapsto> \<tau> \<prec> P'"
proof(rule iffI)
assume "P \<parallel> Q \<longmapsto>\<tau> \<prec> P'"
thus "R \<longmapsto>\<tau> \<prec> P'"
proof(induct rule: parCasesF[of _ _ _ _ _ "(P, Q)"])
case(cPar1 P')
have "P \<longmapsto>\<tau> \<prec> P'" by fact
with Phnf have "\<tau>.(P') \<in> summands P" by(simp add: summandTransition)
hence "\<tau>.(P' \<parallel> Q) \<in> expandSet P Q" by(auto simp add: expandSet_def)
moreover have "\<tau>.(P' \<parallel> Q) \<longmapsto>\<tau> \<prec> (P' \<parallel> Q)" by(rule Tau)
ultimately show ?case using Exp by(blast intro: expandAction)
next
case(cPar2 Q')
have "Q \<longmapsto>\<tau> \<prec> Q'" by fact
with Qhnf have "\<tau>.(Q') \<in> summands Q" by(simp add: summandTransition)
hence "\<tau>.(P \<parallel> Q') \<in> expandSet P Q" by(auto simp add: expandSet_def)
moreover have "\<tau>.(P \<parallel> Q') \<longmapsto>\<tau> \<prec> (P \<parallel> Q')" by(rule Tau)
ultimately show ?case using Exp by(blast intro: expandAction)
next
case(cComm1 P' Q' a b x)
have "P \<longmapsto>a<x> \<prec> P'" and "Q \<longmapsto>a[b] \<prec> Q'" by fact+
with Phnf Qhnf have "a<x>.P' \<in> summands P" and "a{b}.Q' \<in> summands Q" by(simp add: summandTransition)+
hence "\<tau>.(P'[x::=b] \<parallel> Q') \<in> expandSet P Q" by(simp add: expandSet_def, blast)
moreover have "\<tau>.(P'[x::=b] \<parallel> Q') \<longmapsto>\<tau> \<prec> (P'[x::=b] \<parallel> Q')" by(rule Tau)
ultimately show ?case using Exp by(blast intro: expandAction)
next
case(cComm2 P' Q' a b x)
have "P \<longmapsto>a[b] \<prec> P'" and "Q \<longmapsto>a<x> \<prec> Q'" by fact+
with Phnf Qhnf have "a{b}.P' \<in> summands P" and "a<x>.Q' \<in> summands Q" by(simp add: summandTransition)+
hence "\<tau>.(P' \<parallel> (Q'[x::=b])) \<in> expandSet P Q" by(simp add: expandSet_def, blast)
moreover have "\<tau>.(P' \<parallel> (Q'[x::=b])) \<longmapsto>\<tau> \<prec> (P' \<parallel> (Q'[x::=b]))" by(rule Tau)
ultimately show ?case using Exp by(blast intro: expandAction)
next
case(cClose1 P' Q' a x y)
have "y \<sharp> (P, Q)" by fact
hence yFreshP: "y \<sharp> P" by(simp add: fresh_prod)
have PTrans: "P \<longmapsto>a<x> \<prec> P'" by fact
with Phnf have PSumm: "a<x>.P' \<in> summands P" by(simp add: summandTransition)
have "Q \<longmapsto>a<\<nu>y> \<prec> Q'" by fact
moreover from PTrans yFreshP have "y \<noteq> a" by(force dest: freshBoundDerivative)
ultimately have "<\<nu>y>a{y}.Q' \<in> summands Q" using Qhnf by(simp add: summandTransition)
with PSumm yFreshP have "\<tau>.(<\<nu>y>(P'[x::=y] \<parallel> Q')) \<in> expandSet P Q"
by(auto simp add: expandSet_def)
moreover have "\<tau>.(<\<nu>y>(P'[x::=y] \<parallel> Q')) \<longmapsto>\<tau> \<prec> <\<nu>y>(P'[x::=y] \<parallel> Q')" by(rule Tau)
ultimately show ?case using Exp by(blast intro: expandAction)
next
case(cClose2 P' Q' a x y)
have "y \<sharp> (P, Q)" by fact
hence yFreshQ: "y \<sharp> Q" by(simp add: fresh_prod)
have QTrans: "Q \<longmapsto>a<x> \<prec> Q'" by fact
with Qhnf have QSumm: "a<x>.Q' \<in> summands Q" by(simp add: summandTransition)
have "P \<longmapsto>a<\<nu>y> \<prec> P'" by fact
moreover from QTrans yFreshQ have "y \<noteq> a" by(force dest: freshBoundDerivative)
ultimately have "<\<nu>y>a{y}.P' \<in> summands P" using Phnf by(simp add: summandTransition)
with QSumm yFreshQ have "\<tau>.(<\<nu>y>(P' \<parallel> (Q'[x::=y]))) \<in> expandSet P Q"
by(simp add: expandSet_def, blast)
moreover have "\<tau>.(<\<nu>y>(P' \<parallel> (Q'[x::=y]))) \<longmapsto>\<tau> \<prec> <\<nu>y>(P' \<parallel> (Q'[x::=y]))" by(rule Tau)
ultimately show ?case using Exp by(blast intro: expandAction)
qed
next
assume "R \<longmapsto>\<tau> \<prec> P'"
with Exp obtain R where "R \<in> expandSet P Q" and "R \<longmapsto>\<tau> \<prec> P'" by(blast dest: expandAction')
thus "P \<parallel> Q \<longmapsto>\<tau> \<prec> P'"
proof(auto simp add: expandSet_def)
fix P''
assume "\<tau>.(P'') \<in> summands P"
with Phnf have "P \<longmapsto>\<tau> \<prec> P''" by(simp add: summandTransition)
hence PQTrans: "P \<parallel> Q \<longmapsto>\<tau> \<prec> P'' \<parallel> Q" by(rule Par1F)
assume "\<tau>.(P'' \<parallel> Q) \<longmapsto>\<tau> \<prec> P'"
hence "P' = P'' \<parallel> Q" by(erule_tac tauCases, auto simp add: pi.inject residual.inject)
with PQTrans show ?thesis by simp
next
fix Q'
assume "\<tau>.(Q') \<in> summands Q"
with Qhnf have "Q \<longmapsto>\<tau> \<prec> Q'" by(simp add: summandTransition)
hence PQTrans: "P \<parallel> Q \<longmapsto>\<tau> \<prec> P \<parallel> Q'" by(rule Par2F)
assume "\<tau>.(P \<parallel> Q') \<longmapsto>\<tau> \<prec> P'"
hence "P' = P \<parallel> Q'" by(erule_tac tauCases, auto simp add: pi.inject residual.inject)
with PQTrans show ?thesis by simp
next
fix a x P'' b Q'
assume "a<x>.P'' \<in> summands P" and "a{b}.Q' \<in> summands Q"
with Phnf Qhnf have "P \<longmapsto>a<x> \<prec> P''" and "Q \<longmapsto>a[b] \<prec> Q'" by(simp add: summandTransition)+
hence PQTrans: "P \<parallel> Q \<longmapsto>\<tau> \<prec> P''[x::=b] \<parallel> Q'" by(rule Comm1)
assume "\<tau>.(P''[x::=b] \<parallel> Q') \<longmapsto>\<tau> \<prec> P'"
hence "P' = P''[x::=b] \<parallel> Q'" by(erule_tac tauCases, auto simp add: pi.inject residual.inject)
with PQTrans show ?thesis by simp
next
fix a b P'' x Q'
assume "a{b}.P'' \<in> summands P" and "a<x>.Q' \<in> summands Q"
with Phnf Qhnf have "P \<longmapsto>a[b] \<prec> P''" and "Q \<longmapsto>a<x> \<prec> Q'" by(simp add: summandTransition)+
hence PQTrans: "P \<parallel> Q \<longmapsto>\<tau> \<prec> P'' \<parallel> (Q'[x::=b])" by(rule Comm2)
assume "\<tau>.(P'' \<parallel> (Q'[x::=b])) \<longmapsto>\<tau> \<prec> P'"
hence "P' = P'' \<parallel> (Q'[x::=b])" by(erule_tac tauCases, auto simp add: pi.inject residual.inject)
with PQTrans show ?thesis by simp
next
fix a x P'' y Q'
assume yFreshP: "(y::name) \<sharp> P"
assume "a<x>.P'' \<in> summands P"
with Phnf have PTrans: "P \<longmapsto>a<x> \<prec> P''" by(simp add: summandTransition)
assume "<\<nu>y>a{y}.Q' \<in> summands Q"
moreover from yFreshP PTrans have "y \<noteq> a" by(force dest: freshBoundDerivative)
ultimately have "Q \<longmapsto>a<\<nu>y> \<prec> Q'" using Qhnf by(simp add: summandTransition)
with PTrans have PQTrans: "P \<parallel> Q \<longmapsto>\<tau> \<prec> <\<nu>y>(P''[x::=y] \<parallel> Q')" using yFreshP by(rule Close1)
assume "\<tau>.(<\<nu>y>(P''[x::=y] \<parallel> Q')) \<longmapsto>\<tau> \<prec> P'"
hence "P' = <\<nu>y>(P''[x::=y] \<parallel> Q')" by(erule_tac tauCases, auto simp add: pi.inject residual.inject)
with PQTrans show ?thesis by simp
next
fix a y P'' x Q'
assume yFreshQ: "(y::name) \<sharp> Q"
assume "a<x>.Q' \<in> summands Q"
with Qhnf have QTrans: "Q \<longmapsto>a<x> \<prec> Q'" by(simp add: summandTransition)
assume "<\<nu>y>a{y}.P'' \<in> summands P"
moreover from yFreshQ QTrans have "y \<noteq> a" by(force dest: freshBoundDerivative)
ultimately have "P \<longmapsto>a<\<nu>y> \<prec> P''" using Phnf by(simp add: summandTransition)
hence PQTrans: "P \<parallel> Q \<longmapsto>\<tau> \<prec> <\<nu>y>(P'' \<parallel> Q'[x::=y])" using QTrans yFreshQ by(rule Close2)
assume "\<tau>.(<\<nu>y>(P'' \<parallel> Q'[x::=y])) \<longmapsto>\<tau> \<prec> P'"
hence "P' = <\<nu>y>(P'' \<parallel> Q'[x::=y])" by(erule_tac tauCases, auto simp add: pi.inject residual.inject)
with PQTrans show ?thesis by simp
qed
qed
next
show "P \<parallel> Q \<longmapsto> a[b] \<prec> P' = R \<longmapsto> a[b] \<prec> P'"
proof(rule iffI)
assume "P \<parallel> Q \<longmapsto>a[b] \<prec> P'"
thus "R \<longmapsto>a[b] \<prec> P'"
proof(induct rule: parCasesF[where C="()"])
case(cPar1 P')
have "P \<longmapsto>a[b] \<prec> P'" by fact
with Phnf have "a{b}.P' \<in> summands P" by(simp add: summandTransition)
hence "a{b}.(P' \<parallel> Q) \<in> expandSet P Q" by(auto simp add: expandSet_def)
moreover have "a{b}.(P' \<parallel> Q) \<longmapsto>a[b] \<prec> (P' \<parallel> Q)" by(rule Output)
ultimately show ?case using Exp by(blast intro: expandAction)
next
case(cPar2 Q')
have "Q \<longmapsto>a[b] \<prec> Q'" by fact
with Qhnf have "a{b}.Q' \<in> summands Q" by(simp add: summandTransition)
hence "a{b}.(P \<parallel> Q') \<in> expandSet P Q" by(simp add: expandSet_def, blast)
moreover have "a{b}.(P \<parallel> Q') \<longmapsto>a[b] \<prec> (P \<parallel> Q')" by(rule Output)
ultimately show ?case using Exp by(blast intro: expandAction)
next
case cComm1
thus ?case by auto
next
case cComm2
thus ?case by auto
next
case cClose1
thus ?case by auto
next
case cClose2
thus ?case by auto
qed
next
assume "R \<longmapsto>a[b] \<prec> P'"
with Exp obtain R where "R \<in> expandSet P Q" and "R \<longmapsto>a[b] \<prec> P'" by(blast dest: expandAction')
thus "P \<parallel> Q \<longmapsto>a[b] \<prec> P'"
proof(auto simp add: expandSet_def)
fix a' b' P''
assume "a'{b'}.P'' \<in> summands P"
with Phnf have "P \<longmapsto>a'[b'] \<prec> P''" by(simp add: summandTransition)
hence PQTrans: "P \<parallel> Q \<longmapsto>a'[b'] \<prec> P'' \<parallel> Q" by(rule Par1F)
assume "a'{b'}.(P'' \<parallel> Q) \<longmapsto>a[b] \<prec> P'"
hence "P' = P'' \<parallel> Q" and "a = a'" and "b = b'"
by(erule_tac outputCases, auto simp add: pi.inject residual.inject)+
with PQTrans show ?thesis by simp
next
fix a' b' Q'
assume "a'{b'}.Q' \<in> summands Q"
with Qhnf have "Q \<longmapsto>a'[b'] \<prec> Q'" by(simp add: summandTransition)
hence PQTrans: "P \<parallel> Q \<longmapsto>a'[b'] \<prec> P \<parallel> Q'" by(rule Par2F)
assume "a'{b'}.(P \<parallel> Q') \<longmapsto>a[b] \<prec> P'"
hence "P' = P \<parallel> Q'" and "a = a'" and "b = b'"
by(erule_tac outputCases, auto simp add: pi.inject residual.inject)+
with PQTrans show ?thesis by simp
qed
qed
next
show "P \<parallel> Q \<longmapsto> a<x> \<prec> P' = R \<longmapsto> a<x> \<prec> P'"
proof(rule iffI)
{
fix x P'
assume "P \<parallel> Q \<longmapsto>a<x> \<prec> P'" and "x \<sharp> P" and "x \<sharp> Q"
hence "R \<longmapsto>a<x> \<prec> P'"
proof(induct rule: parCasesB)
case(cPar1 P')
have "P \<longmapsto>a<x> \<prec> P'" by fact
with Phnf have "a<x>.P' \<in> summands P" by(simp add: summandTransition)
moreover have "x \<sharp> Q" by fact
ultimately have "a<x>.(P' \<parallel> Q) \<in> expandSet P Q" by(auto simp add: expandSet_def)
moreover have "a<x>.(P' \<parallel> Q) \<longmapsto>a<x> \<prec> (P' \<parallel> Q)" by(rule Input)
ultimately show ?case using Exp by(blast intro: expandAction)
next
case(cPar2 Q')
have "Q \<longmapsto>a<x> \<prec> Q'" by fact
with Qhnf have "a<x>.Q' \<in> summands Q" by(simp add: summandTransition)
moreover have "x \<sharp> P" by fact
ultimately have "a<x>.(P \<parallel> Q') \<in> expandSet P Q" by(simp add: expandSet_def, blast)
moreover have "a<x>.(P \<parallel> Q') \<longmapsto>a<x> \<prec> (P \<parallel> Q')" by(rule Input)
ultimately show ?case using Exp by(blast intro: expandAction)
qed
}
moreover obtain y::name where "y \<sharp> P" and "y \<sharp> Q" and "y \<sharp> P'"
by(generate_fresh "name") auto
assume "P \<parallel> Q \<longmapsto>a<x> \<prec> P'"
with \<open>y \<sharp> P'\<close> have "P \<parallel> Q \<longmapsto>a<y> \<prec> ([(x, y)] \<bullet> P')"
by(simp add: alphaBoundResidual)
ultimately have "R \<longmapsto>a<y> \<prec> ([(x, y)] \<bullet> P')" using \<open>y \<sharp> P\<close> \<open>y \<sharp> Q\<close>
by auto
thus "R \<longmapsto>a<x> \<prec> P'" using \<open>y \<sharp> P'\<close> by(simp add: alphaBoundResidual)
next
assume "R \<longmapsto>a<x> \<prec> P'"
with Exp obtain R where "R \<in> expandSet P Q" and "R \<longmapsto>a<x> \<prec> P'" by(blast dest: expandAction')
thus "P \<parallel> Q \<longmapsto>a<x> \<prec> P'"
proof(auto simp add: expandSet_def)
fix a' y P''
assume "a'<y>.P'' \<in> summands P"
with Phnf have "P \<longmapsto>a'<y> \<prec> P''" by(simp add: summandTransition)
moreover assume "y \<sharp> Q"
ultimately have PQTrans: "P \<parallel> Q \<longmapsto>a'<y> \<prec> P'' \<parallel> Q" by(rule Par1B)
assume "a'<y>.(P'' \<parallel> Q) \<longmapsto>a<x> \<prec> P'"
hence "a<x> \<prec> P' = a'<y> \<prec> P'' \<parallel> Q" and "a = a'"
by(erule_tac inputCases', auto simp add: pi.inject residual.inject)+
with PQTrans show ?thesis by simp
next
fix a' y Q'
assume "a'<y>.Q' \<in> summands Q"
with Qhnf have "Q \<longmapsto>(a'::name)<y> \<prec> Q'" by(simp add: summandTransition)
moreover assume "y \<sharp> P"
ultimately have PQTrans: "P \<parallel> Q \<longmapsto>a'<y> \<prec> P \<parallel> Q'" by(rule Par2B)
assume "a'<y>.(P \<parallel> Q') \<longmapsto>a<x> \<prec> P'"
hence "a<x> \<prec> P' = a'<y> \<prec> P \<parallel> Q'" and "a = a'"
by(erule_tac inputCases', auto simp add: pi.inject residual.inject)+
with PQTrans show ?thesis by simp
qed
qed
next
have Goal: "\<And>P Q a x P' R. \<lbrakk>(R, expandSet P Q) \<in> sumComposeSet; hnf P; hnf Q; a \<noteq> x\<rbrakk> \<Longrightarrow> P \<parallel> Q \<longmapsto>a<\<nu>x> \<prec> P' = R \<longmapsto>a<\<nu>x> \<prec> P'"
proof -
fix P Q a x P' R
assume aineqx: "(a::name) \<noteq> x"
assume Exp: "(R, expandSet P Q) \<in> sumComposeSet"
assume Phnf: "hnf P"
assume Qhnf: "hnf Q"
show "P \<parallel> Q \<longmapsto>a<\<nu>x> \<prec> P' = R \<longmapsto> a<\<nu>x> \<prec> P'"
proof(rule iffI)
{
fix x P'
assume "P \<parallel> Q \<longmapsto>a<\<nu>x> \<prec> P'" and "x \<sharp> P" and "x \<sharp> Q" and "a \<noteq> x"
hence "R \<longmapsto>a<\<nu>x> \<prec> P'"
proof(induct rule: parCasesB)
case(cPar1 P')
have "P \<longmapsto>a<\<nu>x> \<prec> P'" by fact
with Phnf \<open>a \<noteq> x\<close> have "<\<nu>x>a{x}.P' \<in> summands P" by(simp add: summandTransition)
moreover have "x \<sharp> Q" by fact
ultimately have "<\<nu>x>a{x}.(P' \<parallel> Q) \<in> expandSet P Q" by(auto simp add: expandSet_def)
moreover have "<\<nu>x>a{x}.(P' \<parallel> Q) \<longmapsto>a<\<nu>x> \<prec> (P' \<parallel> Q)" using \<open>a \<noteq> x\<close>
by(blast intro: Open Output)
ultimately show ?case using Exp by(blast intro: expandAction)
next
case(cPar2 Q')
have "Q \<longmapsto>a<\<nu>x> \<prec> Q'" by fact
with Qhnf \<open>a \<noteq> x\<close> have "<\<nu>x>a{x}.Q' \<in> summands Q" by(simp add: summandTransition)
moreover have "x \<sharp> P" by fact
ultimately have "<\<nu>x>a{x}.(P \<parallel> Q') \<in> expandSet P Q" by(simp add: expandSet_def, blast)
moreover have "<\<nu>x>a{x}.(P \<parallel> Q') \<longmapsto>a<\<nu>x> \<prec> (P \<parallel> Q')" using \<open>a \<noteq> x\<close>
by(blast intro: Open Output)
ultimately show ?case using Exp by(blast intro: expandAction)
qed
}
moreover obtain y::name where "y \<sharp> P" and "y \<sharp> Q" and "y \<sharp> P'" and "y \<noteq> a"
by(generate_fresh "name") auto
assume "P \<parallel> Q \<longmapsto>a<\<nu>x> \<prec> P'"
with \<open>y \<sharp> P'\<close> have "P \<parallel> Q \<longmapsto>a<\<nu>y> \<prec> ([(x, y)] \<bullet> P')"
by(simp add: alphaBoundResidual)
ultimately have "R \<longmapsto>a<\<nu>y> \<prec> ([(x, y)] \<bullet> P')" using \<open>y \<sharp> P\<close> \<open>y \<sharp> Q\<close> \<open>y \<noteq> a\<close>
by auto
thus "R \<longmapsto>a<\<nu>x> \<prec> P'" using \<open>y \<sharp> P'\<close> by(simp add: alphaBoundResidual)
next
{
fix R x P'
assume "R \<longmapsto>a<\<nu>x> \<prec> P'" and "R \<in> expandSet P Q" and "x \<sharp> R" and "x \<sharp> P" and "x \<sharp> Q"
hence "P \<parallel> Q \<longmapsto>a<\<nu>x> \<prec> P'"
proof(auto simp add: expandSet_def)
fix a' y P''
assume "<\<nu>y>a'{y}.P'' \<in> summands P"
moreover hence "a' \<noteq> y" by auto
ultimately have "P \<longmapsto>a'<\<nu>y> \<prec> P''" using Phnf by(simp add: summandTransition)
moreover assume "y \<sharp> Q"
ultimately have PQTrans: "P \<parallel> Q \<longmapsto>a'<\<nu>y> \<prec> P'' \<parallel> Q" by(rule Par1B)
assume ResTrans: "<\<nu>y>a'{y}.(P'' \<parallel> Q) \<longmapsto>a<\<nu>x> \<prec> P'" and "x \<sharp> [y].a'{y}.(P'' \<parallel> Q)"
with ResTrans \<open>a' \<noteq> y\<close> \<open>x \<sharp> P\<close> \<open>x \<sharp> Q\<close> have "a<\<nu>x> \<prec> P' = a'<\<nu>y> \<prec> P'' \<parallel> Q"
apply(case_tac "x=y")
defer
apply(erule_tac resCasesB)
apply simp
apply(simp add: abs_fresh)
apply(auto simp add: residual.inject alpha' calc_atm fresh_left abs_fresh elim: outputCases)
apply(ind_cases "<\<nu>y>a'{y}.(P'' \<parallel> Q) \<longmapsto> a<\<nu>y> \<prec> P'")
apply(simp add: pi.inject alpha' residual.inject abs_fresh eqvts calc_atm)
apply(auto elim: outputCases)
apply(simp add: pi.inject residual.inject alpha' calc_atm)
apply auto
apply(ind_cases "<\<nu>y>a'{y}.(P'' \<parallel> Q) \<longmapsto> a<\<nu>y> \<prec> P'")
apply(auto simp add: pi.inject alpha' residual.inject abs_fresh eqvts calc_atm)
apply(auto elim: outputCases)
apply(erule_tac outputCases)
apply(auto simp add: freeRes.inject)
apply hypsubst_thin
apply(drule_tac pi="[(b, y)]" in pt_bij3)
by simp
with PQTrans show ?thesis by simp
next
fix a' y Q'
assume "<\<nu>y>a'{y}.Q' \<in> summands Q"
moreover hence "a' \<noteq> y" by auto
ultimately have "Q \<longmapsto>a'<\<nu>y> \<prec> Q'" using Qhnf by(simp add: summandTransition)
moreover assume "y \<sharp> P"
ultimately have PQTrans: "P \<parallel> Q \<longmapsto>a'<\<nu>y> \<prec> P \<parallel> Q'" by(rule Par2B)
assume ResTrans: "<\<nu>y>a'{y}.(P \<parallel> Q') \<longmapsto>a<\<nu>x> \<prec> P'" and "x \<sharp> [y].a'{y}.(P \<parallel> Q')"
with ResTrans \<open>a' \<noteq> y\<close> have "a<\<nu>x> \<prec> P' = a'<\<nu>y> \<prec> P \<parallel> Q'"
apply(case_tac "x=y")
defer
apply(erule_tac resCasesB)
apply simp
apply(simp add: abs_fresh)
apply(auto simp add: residual.inject alpha' calc_atm fresh_left abs_fresh elim: outputCases)
apply(ind_cases "<\<nu>y>a'{y}.(P \<parallel> Q') \<longmapsto> a<\<nu>y> \<prec> P'")
apply(simp add: pi.inject alpha' residual.inject abs_fresh eqvts calc_atm)
apply(auto elim: outputCases)
apply(simp add: pi.inject residual.inject alpha' calc_atm)
apply auto
apply(ind_cases "<\<nu>y>a'{y}.(P \<parallel> Q') \<longmapsto> a<\<nu>y> \<prec> P'")
apply(auto simp add: pi.inject alpha' residual.inject abs_fresh eqvts calc_atm)
apply(auto elim: outputCases)
apply(erule_tac outputCases)
apply(auto simp add: freeRes.inject)
apply hypsubst_thin
apply(drule_tac pi="[(b, y)]" in pt_bij3)
by simp
with PQTrans show ?thesis by simp
qed
}
moreover assume "R \<longmapsto>a<\<nu>x> \<prec> P'"
with Exp obtain R where "R \<in> expandSet P Q" and "R \<longmapsto>a<\<nu>x> \<prec> P'"
apply(drule_tac expandAction') by auto
moreover obtain y::name where "y \<sharp> P" and "y \<sharp> Q" and "y \<sharp> R" and "y \<sharp> P'"
by(generate_fresh "name") auto
moreover with \<open>y \<sharp> P'\<close> \<open>R \<longmapsto>a<\<nu>x> \<prec> P'\<close> have "R \<longmapsto>a<\<nu>y> \<prec> ([(x, y)] \<bullet> P')" by(simp add: alphaBoundResidual)
ultimately have "P \<parallel> Q \<longmapsto>a<\<nu>y> \<prec> ([(x, y)] \<bullet> P')" by auto
thus "P \<parallel> Q \<longmapsto>a<\<nu>x> \<prec> P'" using \<open>y \<sharp> P'\<close> by(simp add: alphaBoundResidual)
qed
qed
obtain y where yineqx: "a \<noteq> y" and yFreshP': "y \<sharp> P'"
by(force intro: name_exists_fresh[of "(a, P')"] simp add: fresh_prod)
from Exp Phnf Qhnf yineqx have "(P \<parallel> Q \<longmapsto>a<\<nu>y> \<prec> [(x, y)] \<bullet> P') = (R \<longmapsto>a<\<nu>y> \<prec> [(x, y)] \<bullet> P')"
by(rule Goal)
moreover with yFreshP' have "x \<sharp> [(x, y)] \<bullet> P'" by(simp add: name_fresh_left name_calc)
ultimately show "(P \<parallel> Q \<longmapsto>a<\<nu>x> \<prec> P') = (R \<longmapsto>a<\<nu>x> \<prec> P')"
by(simp add: alphaBoundResidual name_swap)
qed
lemma expandLeft:
fixes P :: pi
and Q :: pi
and R :: pi
and Rel :: "(pi \<times> pi) set"
assumes Exp: "(R, expandSet P Q) \<in> sumComposeSet"
and Phnf: "hnf P"
and Qhnf: "hnf Q"
and Id: "Id \<subseteq> Rel"
shows "P \<parallel> Q \<leadsto>[Rel] R"
proof(induct rule: simCases)
case(Bound a x R')
have "R \<longmapsto>a\<guillemotleft>x\<guillemotright> \<prec> R'" by fact
with Exp Phnf Qhnf have "P \<parallel> Q \<longmapsto>a\<guillemotleft>x\<guillemotright> \<prec> R'" by(cases a, auto simp add: expandTrans)
moreover from Id have "derivative R' R' a x Rel" by(cases a, auto simp add: derivative_def)
ultimately show ?case by blast
next
case(Free \<alpha> R')
have "R \<longmapsto>\<alpha> \<prec> R'" by fact
with Exp Phnf Qhnf have "P \<parallel> Q \<longmapsto>\<alpha> \<prec> R'" by(cases \<alpha>, auto simp add: expandTrans)
moreover from Id have "(R', R') \<in> Rel" by blast
ultimately show ?case by blast
qed
lemma expandRight:
fixes P :: pi
and Q :: pi
and R :: pi
and Rel :: "(pi \<times> pi) set"
assumes Exp: "(R, expandSet P Q) \<in> sumComposeSet"
and Phnf: "hnf P"
and Qhnf: "hnf Q"
and Id: "Id \<subseteq> Rel"
shows "R \<leadsto>[Rel] P \<parallel> Q"
proof(induct rule: simCases)
case(Bound a x R')
have "P \<parallel> Q \<longmapsto>a\<guillemotleft>x\<guillemotright> \<prec> R'" by fact
with Exp Phnf Qhnf have "R \<longmapsto>a\<guillemotleft>x\<guillemotright> \<prec> R'" by(cases a, auto simp add: expandTrans)
moreover from Id have "derivative R' R' a x Rel" by(cases a, auto simp add: derivative_def)
ultimately show ?case by blast
next
case(Free \<alpha> R')
have "P \<parallel> Q \<longmapsto>\<alpha> \<prec> R'" by fact
with Exp Phnf Qhnf have "R \<longmapsto>\<alpha> \<prec> R'" by(cases \<alpha>, auto simp add: expandTrans)
moreover from Id have "(R', R') \<in> Rel" by blast
ultimately show ?case by blast
qed
lemma expandSC:
fixes P :: pi
and Q :: pi
and R :: pi
assumes "(R, expandSet P Q) \<in> sumComposeSet"
and "hnf P"
and "hnf Q"
shows "P \<parallel> Q \<sim> R"
proof -
let ?X = "{(P \<parallel> Q, R) | P Q R. (R, expandSet P Q) \<in> sumComposeSet \<and> hnf P \<and> hnf Q} \<union> {(R, P \<parallel> Q) | P Q R. (R, expandSet P Q) \<in> sumComposeSet \<and> hnf P \<and> hnf Q}"
from assms have "(P \<parallel> Q, R) \<in> ?X" by auto
thus ?thesis
proof(coinduct rule: bisimCoinduct)
case(cSim P Q)
thus ?case
by(blast intro: reflexive expandLeft expandRight)
next
case(cSym P Q)
thus ?case by auto
qed
qed
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/SeLFiE/Example/afp-2020-05-16/thys/Pi_Calculus/Strong_Late_Expansion_Law.thy"}
|
(* In this file we explain how to do the "list examples" from the
Chapter on Separation Logic for Sequential Programs in the
Iris Lecture Notes *)
(* Contains definitions of the weakest precondition assertion, and its basic rules. *)
From iris.program_logic Require Export weakestpre.
(* Instantiation of Iris with the particular language. The notation file
contains many shorthand notations for the programming language constructs, and
the lang file contains the actual language syntax. *)
From iris.heap_lang Require Export notation lang.
(* Files related to the interactive proof mode. The first import includes the
general tactics of the proof mode. The second provides some more specialized
tactics particular to the instantiation of Iris to a particular programming
language. *)
From iris.proofmode Require Export tactics.
From iris.heap_lang Require Import proofmode.
(* The following line makes Coq check that we do not use any admitted facts /
additional assumptions not in the statement of the theorems being proved. *)
Set Default Proof Using "Type".
(* ---------------------------------------------------------------------- *)
Section list_model.
(* This section contains the definition of our model of lists, i.e.,
definitions relating pointer data structures to our model, which is
simply mathematical sequences (Coq lists). *)
(* In order to do the proof we need to assume certain things about the
instantiation of Iris. The particular, even the heap is handled in an
analogous way as other ghost state. This line states that we assume the
Iris instantiation has sufficient structure to manipulate the heap, e.g.,
it allows us to use the points-to predicate. *)
Context `{!heapGS Σ}.
Implicit Types l : loc.
(* The variable Σ has to do with what ghost state is available, and the type
of Iris propositions (written Prop in the lecture notes) depends on this Σ.
But since Σ is the same throughout the development we shall define
shorthand notation which hides it. *)
Notation iProp := (iProp Σ).
(* Here is the basic is_list representation predicate:
is_list hd xs holds if hd points to a linked list consisting of
the elements in the mathematical sequence (Coq list) xs.
*)
Fixpoint is_list (hd : val) (xs : list val) : iProp :=
match xs with
| [] => ⌜hd = NONEV⌝
| x :: xs => ∃ l hd', ⌜hd = SOMEV #l⌝ ∗ l ↦ (x,hd') ∗ is_list hd' xs
end%I.
(* The following predicate
is_listP P hd xs
holds if hd points to a linked list consisting of the elements in xs and
each of those elements satisfy P.
*)
Fixpoint is_listP P (hd : val) (xs : list val) : iProp :=
match xs with
| [] => ⌜hd = NONEV⌝
| x :: xs => ∃ l hd', ⌜hd = SOMEV #l⌝ ∗ l ↦ (x,hd') ∗ is_listP P hd' xs ∗ P x
end%I.
(* about_isList expresses how is_listP P hd xs can be seen as a combination of the
basic is_list predicate and the property that P holds for all the elements in xs.
*)
Lemma about_isList P hd xs :
is_listP P hd xs ⊣⊢ is_list hd xs ∗ [∗ list] x ∈ xs, P x.
Proof.
generalize dependent hd.
induction xs as [| x xs' IHxs]; simpl; intros hd; iSplit.
- eauto.
- by iIntros "(? & _)".
- iDestruct 1 as (l hd') "(? & ? & H & ?)". rewrite IHxs. iDestruct "H" as "(H_isListxs' & ?)".
iFrame. iExists l, hd'. iFrame.
- iDestruct 1 as "(H_isList & ? & H)". iDestruct "H_isList" as (l hd') "(? & ? & ?)".
iExists l, hd'. rewrite IHxs. iFrame.
Qed.
(* The predicate
is_list_nat hd xs
holds if hd is a pointer to a linked list of numbers (integers).
*)
Fixpoint is_list_nat (hd : val) (xs : list Z) : iProp :=
match xs with
| [] => ⌜hd = NONEV⌝
| x :: xs => ∃ l hd', ⌜hd = SOMEV #l⌝ ∗ l ↦ (#x,hd') ∗ is_list_nat hd' xs
end%I.
(* The reverse function on Coq lists is defined in the Coq library. *)
Definition reverse (l : list val) := List.rev l.
Definition inj {A B : Type} (f : A -> B) : Prop :=
forall (x y : A),
f x = f y -> x = y.
Lemma map_injective {A B : Type} :
forall xs ys (f : A -> B), inj f -> map f xs = map f ys
-> xs = ys.
Proof.
intros xs. induction xs as [|x xs IHxs]; intros ys f H_f H_map.
- symmetry in H_map. by apply map_eq_nil in H_map.
- destruct ys as [|y ys].
+ by apply map_eq_nil in H_map.
+ specialize (IHxs ys f). inversion H_map as [H_a].
rewrite -> IHxs; try done.
apply H_f in H_a. by rewrite H_a.
Qed.
End list_model.
(* ---------------------------------------------------------------------- *)
Section list_code.
(* This section contains the code of the list functions we specify *)
(* Function inc hd assumes all values in the linked list pointed to by hd
are numbers and increments them by 1, in-place *)
Definition inc : val :=
rec: "inc" "hd" :=
match: "hd" with
NONE => #()
| SOME "l" =>
let: "tmp1" := Fst !"l" in
let: "tmp2" := Snd !"l" in
"l" <- (("tmp1" + #1), "tmp2");;
"inc" "tmp2"
end.
(* Function app l l' appends linked list l' to end of linked list l *)
Definition app : val :=
rec: "app" "l" "l'" :=
match: "l" with
NONE => "l'"
| SOME "hd" =>
let: "tmp1" := !"hd" in
let: "tmp2" := "app" (Snd "tmp1") "l'" in
"hd" <- ((Fst "tmp1"), "tmp2");;
"l"
end.
(* Function rev l acc reverses all the pointers in linked list l and stiches
the accumulator argument acc at the end *)
Definition rev : val :=
rec: "rev" "l" "acc" :=
match: "l" with
NONE => "acc"
| SOME "p" =>
let: "h" := Fst !"p" in
let: "t" := Snd !"p" in
"p" <- ("h", "acc");;
"rev" "t" "l"
end.
(* Function len l returns the lenght of linked list l *)
Definition len : val :=
rec: "len" "l" :=
match: "l" with
NONE => #0
| SOME "p" =>
("len" (Snd !"p") + #1)
end.
(* Function foldr f a l is the usual fold right function for linked list l, with
base value a and combination function f *)
Definition foldr : val :=
rec: "foldr" "f" "a" "l" :=
match: "l" with
NONE => "a"
| SOME "p" =>
let: "hd" := Fst !"p" in
let: "t" := Snd !"p" in
"f" ("hd", ("foldr" "f" "a" "t"))
end.
(* sum_list l returns the sum of the list of numbers in linked list l,
implemented by call to foldr *)
Definition sum_list : val :=
rec: "sum_list" "l" :=
let: "f'" := (λ: "p", let: "x" := Fst "p" in
let: "y" := Snd "p" in
"x" + "y")
in
(foldr "f'" #0 "l").
Definition cons : val := (λ: "x" "xs",
let: "p" := ("x", "xs") in
SOME (Alloc("p"))).
Definition empty_list : val := NONEV.
(* filter prop l is the usual filter function on linked lists, prop is supposed
to be a function from values to booleans. Implemented using foldr. *)
Definition filter : val :=
rec: "filter" "prop" "l" :=
let: "f" := (λ: "p",
let: "x" := Fst "p" in
let: "xs" := Snd "p" in
if: ("prop" "x")
then (cons "x" "xs")
else "xs")
in (foldr "f" empty_list "l").
(* map_list f l is the usual map function on linked lists with f the function
to be mapped over the list l. Implemented using foldr. *)
Definition map_list : val :=
rec: "map_list" "f" "l" :=
let: "f'" := (λ: "p",
let: "x" := Fst "p" in
let: "xs" := Snd "p" in
(cons ("f" "x") "xs"))
in
(foldr "f'" empty_list "l").
(* incr l is another variant of the increment function on linked lists, implemented using map. *)
Definition incr : val :=
rec: "incr" "l" :=
map_list (λ: "n", "n" + #1)%I "l".
End list_code.
(* ---------------------------------------------------------------------- *)
Section list_spec.
(* This section contains the specifications and proofs for the list functions.
The specifications and proofs are explained in the Iris Lecture Notes
*)
Context `{!heapGS Σ}.
Lemma inc_spec hd xs :
{{{ is_list_nat hd xs }}}
inc hd
{{{ w, RET w; ⌜w = #()⌝ ∗ is_list_nat hd (List.map Z.succ xs) }}}.
Proof.
iIntros (ϕ) "Hxs H".
iLöb as "IH" forall (hd xs ϕ). wp_rec. destruct xs as [|x xs]; iSimplifyEq.
- wp_match. iApply "H". done.
- iDestruct "Hxs" as (l hd') "(% & Hx & Hxs)". iSimplifyEq.
wp_match. do 2 (wp_load; wp_proj; wp_let). wp_op.
wp_store. iApply ("IH" with "Hxs").
iNext. iIntros (w) "H'". iApply "H". iDestruct "H'" as "[Hw Hislist]".
iFrame. iExists l, hd'. iFrame. done.
Qed.
Lemma app_spec xs ys (l l' : val) :
{{{ is_list l xs ∗ is_list l' ys }}}
app l l'
{{{ v, RET v; is_list v (xs ++ ys) }}}.
Proof.
iIntros (ϕ) "[Hxs Hys] H".
iLöb as "IH" forall (l xs l' ys ϕ).
destruct xs as [| x xs']; iSimplifyEq.
- wp_rec. wp_let. wp_match. iApply "H". done.
- iDestruct "Hxs" as (l0 hd0) "(% & Hx & Hxs)". iSimplifyEq.
wp_rec. wp_let. wp_match. wp_load. wp_let. wp_proj. wp_bind (app _ _)%E.
iApply ("IH" with "Hxs Hys").
iNext. iIntros (v) "?". wp_let. wp_proj. wp_store. iSimplifyEq. iApply "H".
iExists l0, _. iFrame. done.
Qed.
Lemma rev_spec vs us (l acc : val) :
{{{ is_list l vs ∗ is_list acc us }}}
rev l acc
{{{ w, RET w; is_list w (reverse vs ++ us) }}}.
Proof.
iIntros (ϕ) "[H1 H2] HL".
iInduction vs as [| v vs'] "IH" forall (acc l us).
- iSimplifyEq. wp_rec. wp_let. wp_match. iApply "HL". done.
- simpl. iDestruct "H1" as (l' t) "(%H & H3 & H1)". wp_rec. wp_let.
rewrite -> H at 1. wp_match. do 2 (wp_load; wp_proj; wp_let).
wp_store. iSpecialize ("IH" $! l t ([v] ++ us)).
iApply ("IH" with "[H1] [H3 H2]").
+ done.
+ simpl. iExists l', acc. iFrame. done.
+ iNext. rewrite -> app_assoc. done.
Qed.
Lemma len_spec (l : val) xs :
{{{ is_list l xs }}}
len l
{{{ v, RET v; ⌜v = #(length xs)⌝ }}}.
Proof.
iIntros (ϕ) "Hl H".
iInduction xs as [| x xs'] "IH" forall (l ϕ); iSimplifyEq.
- wp_rec. wp_match. iApply "H". done.
- iDestruct "Hl" as (p hd') "(% & Hp & Hhd')". wp_rec. iSimplifyEq.
wp_match. wp_load. wp_proj. wp_bind (len hd')%I. iApply ("IH" with "[Hhd'] [Hp H]"); try done.
iNext. iIntros. iSimplifyEq. wp_op. iApply "H". iPureIntro. do 2 f_equal. lia.
Qed.
(* The following specifications for foldr are non-trivial because the code is higher-order
and hence the specifications involved nested triples.
The specifications are explained in the Iris Lecture Notes. *)
Lemma foldr_spec_PI P I (f a hd : val) (xs : list val) :
{{{ (∀ (x a' : val) (ys : list val),
{{{ P x ∗ I ys a'}}}
f (x, a')
{{{r, RET r; I (x::ys) r }}})
∗ is_list hd xs
∗ ([∗ list] x ∈ xs, P x)
∗ I [] a
}}}
foldr f a hd
{{{
r, RET r; is_list hd xs
∗ I xs r
}}}.
Proof.
iIntros (ϕ) "(#H_f & H_isList & H_Px & H_Iempty) H_inv".
iInduction xs as [|x xs'] "IH" forall (ϕ a hd); wp_rec; do 2 wp_let; iSimplifyEq.
- wp_match. iApply "H_inv". eauto.
- iDestruct "H_isList" as (l hd') "[% [H_l H_isList]]".
iSimplifyEq.
wp_match. do 2 (wp_load; wp_proj; wp_let).
wp_bind (((foldr f) a) hd').
iDestruct "H_Px" as "(H_Px & H_Pxs')".
iApply ("IH" with "H_isList H_Pxs' H_Iempty [H_l H_Px H_inv]").
iNext. iIntros (r) "(H_isListxs' & H_Ixs')".
iApply ("H_f" with "[$H_Ixs' $H_Px] [H_inv H_isListxs' H_l]").
iNext. iIntros (r') "H_inv'". iApply "H_inv". iFrame.
iExists l, hd'. by iFrame.
Qed.
Lemma foldr_spec_PPI P I (f a hd : val) (xs : list val) :
{{{ (∀ (x a' : val) (ys : list val),
{{{ P x ∗ I ys a'}}}
f (x, a')
{{{r, RET r; I (x::ys) r }}})
∗ is_listP P hd xs
∗ I [] a
}}}
foldr f a hd
{{{
r, RET r; is_listP (fun x => True) hd xs
∗ I xs r
}}}.
Proof.
iIntros (ϕ) "(#H_f & H_isList & H_Iempty) H_inv".
rewrite about_isList. iDestruct "H_isList" as "(H_isList & H_Pxs)".
iApply (foldr_spec_PI with "[-H_inv]").
- iFrame. by iFrame "H_f".
- iNext. iIntros (r) "(H_isList & H_Ixs)".
iApply "H_inv". iFrame. rewrite about_isList. iFrame. by rewrite big_sepL_forall.
Qed.
Lemma sum_spec (hd: val) (xs: list Z) :
{{{ is_list hd (map (fun (n : Z) => #n) xs)}}}
sum_list hd
{{{ v, RET v; ⌜v = # (fold_right Z.add 0 xs)⌝ }}}.
Proof.
iIntros (ϕ) "H_is_list H_later".
wp_rec. wp_pures.
iApply (foldr_spec_PI
(fun x => (∃ (n : Z), ⌜x = #n⌝)%I)
(fun xs' acc => ∃ ys,
⌜acc = #(fold_right Z.add 0 ys)⌝
∗ ⌜xs' = map (fun (n : Z) => #n) ys⌝
∗ ([∗ list] x ∈ xs',∃ (n' : Z), ⌜x = #n'⌝))%I
with "[$H_is_list] [H_later]").
- iSplitR.
+ iIntros (x a' ys). iModIntro. iIntros (ϕ') "(H1 & H2) H3".
do 5 (wp_pure _).
iDestruct "H2" as (zs) "(% & % & H_list)".
iDestruct "H1" as (n2) "%". iSimplifyEq. wp_pures. iModIntro.
iApply "H3". iExists (n2::zs). repeat (iSplit; try done).
by iExists _.
+ iSplit.
* induction xs as [|x xs IHxs]; iSimplifyEq; first done.
iSplit; [iExists _; done | apply IHxs].
* iExists []. eauto.
- iNext. iIntros (r) "(H1 & H2)".
iApply "H_later". iDestruct "H2" as (ys) "(% & % & H_list)".
iSimplifyEq. rewrite (map_injective xs ys (λ n : Z, #n)); try done.
unfold inj. intros x y H_xy. by inversion H_xy.
Qed.
Lemma filter_spec (hd p : val) (xs : list val) (P : val -> bool) :
{{{ is_list hd xs
∗ (∀ x : val ,
{{{ True }}}
p x
{{{r, RET r; ⌜r = #(P x)⌝ }}})
}}}
filter p hd
{{{ v, RET v; is_list hd xs
∗ is_list v (List.filter P xs)
}}}.
Proof.
iIntros (ϕ) "[H_isList #H_p] H_ϕ". wp_rec. wp_pures.
iApply (foldr_spec_PI (fun x => True)%I
(fun xs' acc => is_list acc (List.filter P xs'))%I
with "[$H_isList] [H_ϕ]").
- iSplitL.
+ iIntros (x a' ?) "!# %ϕ'". iIntros "[_ H_isList] H_ϕ'".
repeat (wp_pure _). wp_bind (p x). iApply "H_p"; first done.
iNext. iIntros (r) "H". iSimplifyEq. destruct (P x); wp_if.
* wp_rec. wp_pures. wp_alloc l. wp_pures. iApply "H_ϕ'".
iExists l, a'. by iFrame.
* by iApply "H_ϕ'".
+ iSplit; last done.
rewrite big_sepL_forall. eauto.
- iNext. iApply "H_ϕ".
Qed.
Lemma map_spec P Q (f hd : val) (xs : list val) :
{{{
is_list hd xs
∗ (∀ (x : val), {{{ P x }}}
f x
{{{r, RET r; Q x r}}})
∗ [∗ list] x ∈ xs, P x
}}}
map_list f hd
{{{
r, RET r; ∃ (ys : list val), is_list r ys
∗ ([∗ list] p ∈ zip xs ys, Q (fst p) (snd p))
∗ ⌜List.length ys = List.length xs⌝
}}}.
Proof.
iIntros (ϕ) "[H_is_list [#H1 H_P_xs]] H_ϕ".
wp_rec. wp_pures.
iApply (foldr_spec_PI
P
(fun xs acc => ∃ ys, (is_list acc ys)
∗ ([∗ list] p ∈ zip xs ys, Q (fst p) (snd p))
∗ ⌜length xs = length ys⌝)%I
with "[H_is_list H1 H_P_xs] [H_ϕ]").
- iSplitR "H_is_list H_P_xs".
+ iIntros (x a' ys). iModIntro. iIntros (ϕ') "(H_Px & H_Q) H_ϕ'". wp_pures.
wp_bind (f x). iApply ("H1" with "[H_Px][H_Q H_ϕ']"); try done.
iNext. iIntros (r) "H_Qr". wp_rec. wp_alloc l. wp_pures. iApply "H_ϕ'".
iDestruct "H_Q" as (ys') "(H_is_list_ys' & H_Qys' & %)". iExists (r::ys').
iSplitR "H_Qr H_Qys'".
* unfold is_list. iExists l, a'. fold is_list. by iFrame.
* iSimplifyEq. iFrame. eauto.
+ iFrame. iExists []. iSplit; by simpl.
- iNext. iIntros (r) "H". iApply "H_ϕ". iDestruct "H" as "(_ & H_Q)".
iDestruct "H_Q" as (ys) "(H_isList & H_Q & %)". iExists ys. by iFrame.
Qed.
Lemma about_length {A : Type} (xs : list A) (n : nat) :
length xs = S n -> exists x xs', xs = x::xs'.
Proof.
intro H. induction xs as [| x xs' ].
- inversion H.
- by exists x, xs'.
Qed.
Lemma inc_with_map hd (xs : list Z) :
{{{ is_list hd (List.map (fun (n : Z) => #n) xs) }}}
incr hd
{{{ v, RET v; is_list v (List.map (fun (n : Z) => #n) (List.map Z.succ xs)) }}}.
Proof.
iIntros (ϕ) "H_isList H_ϕ". wp_rec. wp_pures.
wp_apply (map_spec
(fun x => (∃ (n : Z), ⌜x = #n⌝)%I)
(fun x r => (∃ (n' : Z),
⌜r = #(Z.succ n')⌝
∗ ⌜x = #n'⌝)%I)
with "[$H_isList] [H_ϕ]").
- iSplit. iIntros (x) "!#". iIntros (ϕ') "H1 H2". wp_lam. iDestruct "H1" as (n) "H_x".
iSimplifyEq. wp_binop. iApply "H2". by iExists n.
rewrite big_sepL_fmap. rewrite big_sepL_forall. eauto.
- iNext. iIntros (r) "H". iApply "H_ϕ". iDestruct "H" as (ys) "(H_isList & H_post & H_length)".
iAssert (⌜ys = (List.map (λ n : Z, #n) (List.map Z.succ xs))⌝)%I with "[-H_isList]" as %->.
{ iInduction ys as [| y ys'] "IH" forall (xs); iDestruct "H_length" as %H.
- simpl. destruct xs. by simpl. inversion H.
- rewrite fmap_length in H. symmetry in H. simpl in H.
destruct (about_length _ _ H) as (x & xs' & ->). simpl.
iDestruct "H_post" as "(H_head & H_tail)".
iDestruct "H_head" as (n') "(% & %)". iSimplifyEq.
iDestruct ("IH" with "H_tail []") as %->. by rewrite fmap_length. done.
}
iFrame.
Qed.
End list_spec.
|
{"author": "pavel-ivanov-rnd", "repo": "iris_tutorial", "sha": "da37eb4017e059abd357da1ebf8c0da440efefdc", "save_path": "github-repos/coq/pavel-ivanov-rnd-iris_tutorial", "path": "github-repos/coq/pavel-ivanov-rnd-iris_tutorial/iris_tutorial-da37eb4017e059abd357da1ebf8c0da440efefdc/Coq_examples/lists.v"}
|
"""Match subregions within beat-chroma matrices.
2016-04-09 Dan Ellis dpwe@ee.columbia.edu
"""
"""
Plan:
- read in beat-chroma matrix
- break into 32 beat segments every ?8 beats
- take 2DFTM
- PCA down to ? 8 dimensions
- build (8752*100), 8 matrix = 28 MB of float32
- find closest match to query
"""
import os
import numpy as np
import cPickle as pickle
import numpy.lib.stride_tricks
import sklearn.decomposition
DATA_DIR = '/Users/dpwe/Downloads/prac10/data'
CHROMA_BASE_DIR = os.path.join(DATA_DIR, 'beatchromlabs')
def read_beat_chroma_labels(filename):
"""Read back a precomputed beat-synchronous chroma record."""
with open(filename, "rb") as f:
beat_times, chroma_features, label_indices = pickle.load(f)
return beat_times, chroma_features, label_indices
def read_beat_chroma_labels_for_id(id_):
chroma_filename = os.path.join(CHROMA_BASE_DIR, id_ + ".pkl")
return read_beat_chroma_labels(chroma_filename)
def frame_array(data, frame_length=48, frame_hop=8):
"""Return multiple overlapping submatrices from data."""
item_bytes = data.itemsize
num_vectors, num_dimensions = data.shape
frame_starts = np.arange(0, num_vectors - frame_length, frame_hop)
num_frames = len(frame_starts)
data_frames = np.lib.stride_tricks.as_strided(
data, strides=(frame_hop * num_dimensions * item_bytes,
num_dimensions * item_bytes, item_bytes),
shape=(num_frames, frame_length, num_dimensions))
return data_frames, frame_starts
def construct_pca_object(ids, num_pcas=20, frame_length=48, frame_hop=8):
"""Build a PCA transform based on random draws from a subset of tracks."""
all_features, ids, starts = build_incipit_array(
ids, pca=None, frame_length=frame_length, frame_hop=frame_hop)
pca = sklearn.decomposition.PCA(n_components=num_pcas, whiten=True, copy=True)
pca.fit(all_features)
return pca
def build_incipit_array(ids_to_process, pca=None, frame_length=48, frame_hop=8):
"""Build a single matrix of stacked 2DFTM projections for frames of IDs."""
# all_features is a single np.array of (total_num_frames, pca_dimensions).
# all_ids[i] gives the file id from which row i of all features derives.
# all_starts[i] gives the beat offset within that id for the frame.
all_features = []
all_ids = []
all_starts = []
for id_ in ids_to_process:
_, chroma, _ = read_beat_chroma_labels_for_id(id_)
chroma_frames, frame_starts = frame_array(chroma, frame_length, frame_hop)
stftm_frames = np.abs(np.fft.fft2(chroma_frames))
flattened_frames = np.reshape(
stftm_frames, (stftm_frames.shape[0],
stftm_frames.shape[1] * stftm_frames.shape[2]))
if pca is not None:
flattened_frames = pca.transform(flattened_frames)
num_frames = flattened_frames.shape[0]
all_features.append(flattened_frames)
all_ids.extend([id_] * num_frames)
all_starts.extend(frame_starts)
return np.concatenate(all_features), all_ids, all_starts
def read_list_file(filename):
"""Read a text file with one item per line."""
items = []
with open(filename, 'r') as f:
for line in f:
items.append(line.strip())
return items
id_list_file = os.path.join(DATA_DIR, 'trainfilelist.txt')
all_ids = read_list_file(id_list_file)
#pca_ids = all_ids[0:-1:10]
pca_ids = all_ids
frame_length = 16
frame_hop = 4
pca_object = construct_pca_object(pca_ids, num_pcas=16,
frame_length=frame_length,
frame_hop=frame_hop)
all_features, all_ids, all_starts = build_incipit_array(
all_ids, pca_object, frame_length=frame_length, frame_hop=frame_hop)
print("frame_length=", frame_length, "frame_hop=", frame_hop,
"all_features.shape=", all_features.shape)
|
{"hexsha": "9a55cc657196764234575229537f31e356516c75", "size": 3808, "ext": "py", "lang": "Python", "max_stars_repo_path": "prac_matchchroma/match_chroma.py", "max_stars_repo_name": "dpwe/elene4896", "max_stars_repo_head_hexsha": "49e64315b7cd0da45449835d6e73f868e9b4268e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2016-03-05T06:11:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-01T05:58:50.000Z", "max_issues_repo_path": "prac_matchchroma/match_chroma.py", "max_issues_repo_name": "dpwe/elene4896", "max_issues_repo_head_hexsha": "49e64315b7cd0da45449835d6e73f868e9b4268e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "prac_matchchroma/match_chroma.py", "max_forks_repo_name": "dpwe/elene4896", "max_forks_repo_head_hexsha": "49e64315b7cd0da45449835d6e73f868e9b4268e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2016-02-22T14:18:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-30T02:58:33.000Z", "avg_line_length": 34.3063063063, "max_line_length": 80, "alphanum_fraction": 0.7240021008, "include": true, "reason": "import numpy", "num_tokens": 977}
|
"""Temperature convertor between Celcius, Fahrenheit and Kelvin"""
# This code is part of a class assignment for ATMS 597, Spring 2020,
# at the University of Illinois at Urbana Champaign.
# Use this class function to convert temperature data from already
# known units to different units.
# It supports conversion to Celcius, Fahrenheit and Kelvin.
import numpy as np
class TempConvert:
"""Temperature convertor for numpy arrays and lists"""
# mem attribute will keep memory of the input data type
# Initialized as 0, it will become 1 if list is input
mem = 0
def __init__(self, in_temp, unit):
"""Create a temperature object.
The input is assigned to the object as class instance. Method calls
are used to convert to desired units and retrieve output.
**Arguments:**
*in_temp*
Input temperature. This can be numpy ndarray, list, float, integer,
or string. Missing values are not permitted.
*units*
Input units. The units of temperature must be known,
and appropriate method be used to convert to desired units.
Celsius: degC, C, or c
Fahrenheit: degF, F, or f
Kelvin: kelvin, K, k
**Returns:**
*temp*
A temperature convertor instance
**Example:**
If in_temp is a numpy ndarray or a list:
from tempconvert import tempconvert
temperature = tempconvert(0, 'degC').to('degF')
"""
# check if the input is a list or numpy array
if isinstance(in_temp, list):
# store as a numpy array in an instance variable
self.__temp = np.asarray(in_temp)
# change mem to 1
self.mem = 1
elif isinstance(in_temp, np.ndarray):
# store numpy array in an instance variable
self.__temp = in_temp
# Check to see if it is an integer
elif isinstance(in_temp, int):
# store integer in an instance variable
self.__temp = in_temp
# Check to see if it is a float
elif isinstance(in_temp, float):
# store float in an instance variable
self.__temp = in_temp
elif isinstance(in_temp, str):
# store float in an instance variable
self.__temp = float(in_temp)
else:
raise Exception('Input not recognized')
# Bring in the units and assign it to self.units
self.unit = unit
def _c2f(self):
"""Convert temperature from Celcius to Fahrenheit.
**Returns:**
*temp*
A temperature convertor instance, in degrees Fahrenheit.
**Example:**
If in_temp is in degrees Celcius:
from tempconvert import tempconvert
output = tempconvert(in_temp).C2F()
"""
# modify the .__temp attribute of object directly
self.__temp = (self.__temp*9/5) + 32
# return the same numerical type as the input
if self.mem > 0:
return self.__temp.tolist()
else:
return self.__temp
def _f2c(self):
"""Convert temperature from Fahrenheit to Celcius.
**Returns:**
*temp*
A temperature convertor instance, in degrees Celcius.
**Example:**
If in_temp is in degrees Fahrenheit:
from tempconvert import tempconvert
output = tempconvert(in_temp).F2C()
"""
# modify the .__temp attribute of object directly
self.__temp = (self.__temp-32) * (5/9)
# return the same numerical type as the input
if self.mem > 0:
return self.__temp.tolist()
else:
return self.__temp
def _c2k(self):
"""Convert temperature from Celcius to Kelvin.
**Returns:**
*temp*
A temperature convertor instance, in degrees Kelvin.
**Example:**
If in_temp is in degrees Celcius:
from tempconvert import tempconvert
output = tempconvert(in_temp).C2K()
"""
# modify the .__temp attribute of object directly
self.__temp = self.__temp + 273.15
# return the same numerical type as the input
if self.mem > 0:
return self.__temp.tolist()
else:
return self.__temp
def _k2c(self):
"""Convert temperature from Kelvin to Celcius.
**Returns:**
*temp*
A temperature convertor instance, in degrees Celcius.
**Example:**
If in_temp is in degrees Kelvin:
from tempconvert import tempconvert
output = tempconvert(in_temp).K2C()
"""
# modify the .__temp attribute of object directly
self.__temp = self.__temp - 273.15
# return the same numerical type as the input
if self.mem > 0:
return self.__temp.tolist()
else:
return self.__temp
def _f2k(self):
"""Convert temperature from Fahrenheit to Kelvin.
**Returns:**
*temp*
A temperature convertor instance, in degrees Kelvin.
**Example:**
If in_temp is in degrees Fahrenheit:
from tempconvert import tempconvert
output = tempconvert(in_temp).F2K()
"""
# modify the .__temp attribute of object directly
self.__temp = (self.__temp-32) * (5/9) + 273.15
# return the same numerical type as the input
if self.mem > 0:
return self.__temp.tolist()
else:
return self.__temp
def _k2f(self):
"""Convert temperature from Kelvin to Fahrenheit.
**Returns:**
*temp*
A temperature convertor instance, in degrees Fahrenheit.
**Example:**
If in_temp is in degrees Kelvin:
from tempconvert import tempconvert
output = tempconvert(in_temp).K2F()
"""
# modify the .__temp attribute of object directly
self.__temp = (self.__temp - 273.15) * (9/5) + 32
# return the same numerical type as the input
if self.mem > 0:
return self.__temp.tolist()
else:
return self.__temp
def to(self, out_unit):
"""
Convert temperature from one unit to another unit defined by the user.
**Arguments**
*out_unit*
Unit that the temperature will be converted to. As a string
**Returns**
*Temperature*
Value of new temperature in same data type as was entered
"""
# Handle input temperature in Fahrenheit
if (self.unit == 'degF') or (self.unit == 'F') or (self.unit == 'f'):
# Check if out_unit is Celsius
if (out_unit == 'degC') or (out_unit == 'C') or (out_unit == 'c'):
self.__temp = self._f2c()
# Check to see if out_unit is Kelvin
elif (out_unit == 'kelvin') or (out_unit == 'K') or (out_unit == 'k'):
self.__temp = self._f2k()
elif (out_unit == 'degF') or (out_unit == 'F') or (out_unit == 'f'):
self.__temp = self.__temp
# Handle input temperature in Celsius
if (self.unit == 'degC') or (self.unit == 'C') or (self.unit == 'c'):
# Check if out_unit is Celsius
if (out_unit == 'degC') or (out_unit == 'C') or (out_unit == 'c'):
self.__temp = self.__temp
# Check to see if out_unit is Kelvin and convert
elif (out_unit == 'kelvin') or (out_unit == 'K') or (out_unit == 'k'):
self.__temp = self._c2k()
# Check to see if out_unit is Fahrenheit and convert
elif (out_unit == 'degF') or (out_unit == 'F') or (out_unit == 'f'):
self.__temp = self._c2f()
# Handle input temperature in Kelvin
elif (self.unit == 'kelvin') or (self.unit == 'K') or (self.unit == 'k'):
# Check if out_unit is Celsius
if (out_unit == 'degC') or (out_unit == 'C') or (out_unit == 'c'):
self.__temp = self._k2c()
# Check to see if out_unit is Kelvin and convert
elif (out_unit == 'kelvin') or (out_unit == 'K') or (out_unit == 'k'):
self.__temp = self.__temp
# Check to see if out_unit is Fahrenheit and convert
elif (out_unit == 'degF') or (out_unit == 'F') or (out_unit == 'f'):
self.__temp = self._k2f()
return self.__temp
|
{"hexsha": "ae7cc4df61a6796236c00ead33e9851eb57f0b17", "size": 8723, "ext": "py", "lang": "Python", "max_stars_repo_path": "ATMS-597-SP-2020-Project-1/modules/tempconvert.py", "max_stars_repo_name": "szymbor2/ATMS-597-SP-2020", "max_stars_repo_head_hexsha": "9d55e93d8c1ee0980181421d6b430a7876e0dcbb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ATMS-597-SP-2020-Project-1/modules/tempconvert.py", "max_issues_repo_name": "szymbor2/ATMS-597-SP-2020", "max_issues_repo_head_hexsha": "9d55e93d8c1ee0980181421d6b430a7876e0dcbb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-01-23T20:03:24.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-30T22:52:25.000Z", "max_forks_repo_path": "ATMS-597-SP-2020-Project-1/modules/tempconvert.py", "max_forks_repo_name": "szymbor2/ATMS-597-SP-2020", "max_forks_repo_head_hexsha": "9d55e93d8c1ee0980181421d6b430a7876e0dcbb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-01-23T22:14:28.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-26T17:35:11.000Z", "avg_line_length": 31.2652329749, "max_line_length": 82, "alphanum_fraction": 0.5597844778, "include": true, "reason": "import numpy", "num_tokens": 2008}
|
[STATEMENT]
lemma in_Def_valid_SDG_node:
"V \<in> Def\<^bsub>SDG\<^esub> n \<Longrightarrow> valid_SDG_node n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. V \<in> Def\<^bsub>SDG\<^esub> n \<Longrightarrow> valid_SDG_node n
[PROOF STEP]
by(induct rule:SDG_Def.induct,auto intro:valid_SDG_CFG_node)
|
{"llama_tokens": 133, "file": "HRB-Slicing_StaticInter_SDG", "length": 1}
|
import tensorflow as tf
import numpy as np
# feature_map = feature_inference(image_tensor)
# rpn_loss_bbox_tensor = rpn_loss_bbox(feature_map, im_info_tensor, boxes_tensor)
# rpn_cls_loss()
# rpn_rois_tensor = rpn_rois()
# roi_pool_tensor = roi_pool(rpn_rois_tensor)
x = tf.placeholder(dtype=tf.float32)
p_op = tf.Print(x, [x])
init_op = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init_op)
for i in range(10):
value = np.ones((i, i))
sess.run(p_op, feed_dict={x: value})
|
{"hexsha": "fc660f29e41a5a24f7a26c8a3cdf2433c6f84b88", "size": 498, "ext": "py", "lang": "Python", "max_stars_repo_path": "image_interpreter/train.py", "max_stars_repo_name": "ThoughtWorksInc/tf-image-intepreter", "max_stars_repo_head_hexsha": "113fc808a081984c8be4814bc3403b908bb6b2c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "image_interpreter/train.py", "max_issues_repo_name": "ThoughtWorksInc/tf-image-intepreter", "max_issues_repo_head_hexsha": "113fc808a081984c8be4814bc3403b908bb6b2c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "image_interpreter/train.py", "max_forks_repo_name": "ThoughtWorksInc/tf-image-intepreter", "max_forks_repo_head_hexsha": "113fc808a081984c8be4814bc3403b908bb6b2c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.652173913, "max_line_length": 81, "alphanum_fraction": 0.7449799197, "include": true, "reason": "import numpy", "num_tokens": 139}
|
"""
This is a demo of VQE through the forest stack. We will do the H2 binding from the Google paper using
OpenFermion to generate Hamiltonians and Forest to simulate the system
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize # for real runs I recommend using ADAM optimizer because momentum helps with noise
from openfermionpsi4 import run_psi4
from openfermion.hamiltonians import MolecularData
from openfermion.transforms import symmetry_conserving_bravyi_kitaev, get_fermion_operator
from openfermion.utils import uccsd_singlet_get_packed_amplitudes
from forestopenfermion import qubitop_to_pyquilpauli
from pyquil.quil import Program
from pyquil.paulis import sX, sY, exponentiate, PauliSum
from pyquil.gates import X, I
from pyquil.api import QVMConnection
from pyquil.unitary_tools import tensor_up
from grove.measurements.estimation import estimate_locally_commuting_operator
def get_h2_dimer(bond_length):
# Set molecule parameters.
basis = 'sto-3g'
multiplicity = 1
charge = 0
geometry = [('H', [0.0, 0.0, 0.0]), ('H', [0.0, 0.0, bond_length])]
molecule = MolecularData(geometry, basis, multiplicity, charge)
molecule.filename = "./" + molecule.filename.split('/')[-1]
# Run Psi4.
molecule = run_psi4(molecule,
run_scf=True,
run_mp2=False,
run_cisd=False,
run_ccsd=True,
run_fci=True)
return molecule
def ucc_circuit(theta):
"""
Implements
exp(-i theta X_{0}Y_{1})
:param theta: rotation parameter
:return: pyquil.Program
"""
generator = sX(0) * sY(1)
initial_prog = Program().inst(X(1), X(0))
# compiled program
program = initial_prog + exponentiate(float(theta) * generator) # float is required because pyquil has weird casting behavior
return program
def objective_fun(theta, hamiltonian=None,
quantum_resource=QVMConnection(endpoint='http://localhost:5000')):
"""
Evaluate the Hamiltonian bny operator averaging
:param theta:
:param hamiltonian:
:return:
"""
if hamiltonian is None:
# Hamiltonian is Identity
return 1.0
if isinstance(hamiltonian, PauliSum):
result = estimate_locally_commuting_operator(ucc_circuit(theta), hamiltonian,
1.0E-6, quantum_resource=quantum_resource)
result = result[0][0].real # first output is expected value, second is variance, third is shots
elif isinstance(hamiltonian, np.ndarray) and isinstance(quantum_resource, QVMConnection):
wf = quantum_resource.wavefunction(ucc_circuit(theta))
wf = wf.amplitudes.reshape((-1, 1))
result = np.conj(wf).T.dot(hamiltonian).dot(wf)[0, 0].real
print(result)
else:
raise TypeError("type of hamiltonian or qvm is unrecognized")
return result
if __name__ == "__main__":
qvm = QVMConnection(endpoint='http://localhost:5000')
bond_length = np.linspace(0.25, 3, 30)
ucc_energy = []
fci_energy = []
hf_energy = []
for rr in bond_length:
molecule = get_h2_dimer(rr)
hamiltonian = molecule.get_molecular_hamiltonian()
bk_hamiltonian = symmetry_conserving_bravyi_kitaev(get_fermion_operator(hamiltonian), 4, 2)
# generate the spin-adapted classical coupled-cluster amplitude to use as the input for the
# circuit
packed_amps = uccsd_singlet_get_packed_amplitudes(molecule.ccsd_single_amps, molecule.ccsd_double_amps,
molecule.n_qubits, molecule.n_electrons)
theta = packed_amps[-1] # always take the doubles amplitude
# now that we're done setting up the Hamiltonian and grabbing initial opt parameters
# we can switch over to how to run things
ucc_program = ucc_circuit(theta)
paulis_bk_hamiltonian = qubitop_to_pyquilpauli(bk_hamiltonian)
bk_mat = tensor_up(paulis_bk_hamiltonian, [0, 1])
w, v = np.linalg.eigh(bk_mat)
wf = qvm.wavefunction(ucc_program)
wf = wf.amplitudes.reshape((-1, 1))
tenergy = np.conj(wf).T.dot(bk_mat).dot(wf)[0, 0].real
# observable = objective_fun(theta, hamiltonian=paulis_bk_hamiltonian, quantum_resource=qvm)
observable = objective_fun(theta, hamiltonian=bk_mat, quantum_resource=qvm)
result = minimize(objective_fun, x0=theta, args=(bk_mat, qvm), method='CG',
options={'disp': True})
ucc_energy.append(result.fun)
fci_energy.append(molecule.fci_energy)
hf_energy.append(molecule.hf_energy)
print(w[0], molecule.fci_energy, tenergy, result.fun)
plt.plot(bond_length, hf_energy, 'C1o-', label='HF')
plt.plot(bond_length, ucc_energy, 'C0o-', label='UCC-VQE')
plt.plot(bond_length, fci_energy, 'k-', label='FCI')
plt.xlabel(r'Bond Distance [$\AA$]', fontsize=14)
plt.ylabel('Energy [Hartree]', fontsize=14)
plt.legend(loc='upper right', fontsize=13)
plt.tight_layout()
plt.show()
|
{"hexsha": "f3abd9f4854d9e5f301ab65f3b9dbf3a9686a1b7", "size": 5184, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/1.3_vqe_demo.py", "max_stars_repo_name": "kalzoo/pyquil", "max_stars_repo_head_hexsha": "f37d55acb906a02c0f3320ee3990e9051ee64145", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/1.3_vqe_demo.py", "max_issues_repo_name": "kalzoo/pyquil", "max_issues_repo_head_hexsha": "f37d55acb906a02c0f3320ee3990e9051ee64145", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/1.3_vqe_demo.py", "max_forks_repo_name": "kalzoo/pyquil", "max_forks_repo_head_hexsha": "f37d55acb906a02c0f3320ee3990e9051ee64145", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0285714286, "max_line_length": 130, "alphanum_fraction": 0.6736111111, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1349}
|
#!/usr/bin/env python
import os, sys, glob
sys.path.append('../')
import scipy
from scipy.linalg import pinv
import numpy as np
import matplotlib
from pylab import *
from RateSpecClass import *
from RateSpecTools import *
from PlottingTools import *
# For nice plots :)
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def analyticSpectrum(Rates, beta, tau=1.0, du = 0.01, maxval=100.0):
"""Returns an "analytic" solution of the rate spectrum as outlined in Berberan-Santos et al. (ChemPhys 2005):
H(k) = tau0/\pi * \int_0^\infty exp(-k tau0 u) * exp(-u^beta cos(beta*pi)) * sin( u^beta sin(beta*pi) )
"""
u = np.arange(0., maxval, du)
print 'Summing over', len(u), 'values for beta =', beta, '...'
print 'du', du
A = []
for k in Rates:
if k >= 1.0:
H_k = tau/np.pi * du * np.sum( np.exp( -1.0*k*tau*u)*np.exp( -1.0*(u**beta)*np.cos(beta*np.pi))*np.sin((u**beta) * np.sin(beta*np.pi)) )
else:
H_k = tau/np.pi * du * np.sum( np.exp( -1.0*(u**beta)*np.cos(beta*np.pi/2.0))*np.cos((u**beta) * np.sin(beta*np.pi/2.0) - k*tau*u) )
A.append(H_k)
return np.array(A)
def addAxes(pos):
"""Add axes according the pos list, and return the axes handle."""
rect = pos[0]+margins[0], pos[1]+margins[2], pos[2]-margins[0]-margins[1], pos[3]-margins[2]-margins[3]
return fig.add_axes(rect)
# Default plotting parameters
if (1):
plt.rc('figure', figsize=(6.5, 6.5)) # in inches
plt.rc('figure.subplot', left=0.125, right=0.9, bottom=0.1, top=0.90)
plt.rc('lines', linewidth=0.5, markersize=2)
plt.rc('font', size=8.0)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Arial']})
plt.rc('xtick', labelsize='small')
plt.rc('ytick', labelsize='small')
plt.rc('legend', fontsize='medium')
# Define workspace for all panels
panelpos = []
# top four panels
panelpos.append( [0, 0.75, 0.25, 0.25] ) # bottom left corner x, y; width, height
panelpos.append( [0.25, 0.75, 0.25, 0.25] ) # bottom left corner x, y; width, height
panelpos.append( [0.5, 0.75, 0.25, 0.25] ) # bottom left corner x, y; width, height
panelpos.append( [0.75, 0.75, 0.25, 0.25] ) # bottom left corner x, y; width, height
# top four panels
panelpos.append( [0, 0.5, 0.25, 0.25] ) # bottom left corner x, y; width, height
panelpos.append( [0.25, 0.5, 0.25, 0.25] ) # bottom left corner x, y; width, height
panelpos.append( [0.5, 0.5, 0.25, 0.25] ) # bottom left corner x, y; width, height
panelpos.append( [0.75, 0.5, 0.25, 0.25] ) # bottom left corner x, y; width, height
# top four panels
panelpos.append( [0, 0.25, 0.25, 0.25] ) # bottom left corner x, y; width, height
panelpos.append( [0.25, 0.25, 0.25, 0.25] ) # bottom left corner x, y; width, height
panelpos.append( [0.5, 0.25, 0.25, 0.25] ) # bottom left corner x, y; width, height
panelpos.append( [0.75, 0.25, 0.25, 0.25] ) # bottom left corner x, y; width, height
# bottom four panels
panelpos.append( [0, 0.0, 0.25, 0.25] ) # bottom left corner x, y; width, height
panelpos.append( [0.25, 0.0, 0.25, 0.25] ) # bottom left corner x, y; width, height
panelpos.append( [0.5, 0.0, 0.25, 0.25] ) # bottom left corner x, y; width, height
panelpos.append( [0.75, 0.0, 0.25, 0.25] ) # bottom left corner x, y; width, height
# define margins within each panel
margins = [0.06, 0.02, 0.07, 0.02] # for each panel (left, right, bottom, top)
fig = plt.figure(1)
plt.clf()
#### Filenames ###
outpdffile = 'Figure3.pdf'
sigma = 0.050
betas = [0.3, 0.5, 0.7, 0.9]
priors = ['Lridge', 'Llasso', 'Lenet']
for j in range(len(priors)):
prior = priors[j]
ratespec_files = ['data/stretched.sigma0.0500.beta%3.2f.dat.nRates100.%s.ratespec.dat'%(beta,prior) for beta in betas]
mctraj_files = ['data/stretched.sigma0.0500.beta%3.2f.dat.nRates100.%s.mctraj.dat'%(beta,prior) for beta in betas]
dat_files = ['data/stretched.sigma0.0500.beta%3.2f.dat'%beta for beta in betas]
for i in range(len(ratespec_files)):
ratespec_filename = ratespec_files[i]
mctraj_filename = mctraj_files[i]
if (prior == 'Lenet') and (betas[i] == 0.5):
ratespec_filename = ratespec_filename.replace('0.30', '0.30.noscale').replace('0.50', '0.50.noscale')
mctraj_filename = mctraj_filename.replace('0.30', '0.30.noscale').replace('0.50', '0.50.noscale')
try:
data = scipy.loadtxt(dat_files[i])
Times = data[:,0]*1.0e-6 # convert from us to seconds
Values = data[:,1]
mctraj_data = scipy.loadtxt(mctraj_filename) # step w sigma tau neglogP
ratespec_data = scipy.loadtxt(ratespec_filename)
Timescales = ratespec_data[:,0]
maxLikA = ratespec_data[:,1]
meanA = ratespec_data[:,2]
stdA = ratespec_data[:,3]
ci_5pc = ratespec_data[:,4]
ci_95pc = ratespec_data[:,5]
# plot timecourse + with noise
if j == 0:
ax = addAxes(panelpos[i])
plt.plot(Times, Values, 'b.', markersize=2)
hold(True)
Rates = rangeLog(1.0e-3, 1.e3, 100)
FitData = SumSpectra(meanA, Rates, Times)
plt.plot(Times, FitData, 'k-')
hold(True)
ax.set_xscale('log')
plt.axis([min(Times), max(Times), -0.2, 1.2])
xlabel('time (s)')
#ax.set_xticks([1.0e-3, 1.0, 1.0e-3])
# plot mean +/- std spectrum
ax = addAxes(panelpos[i+(j+1)*len(betas)])
#matplotlib.pyplot.errorbar(Timescales, meanA, yerr=stdA)
PlotStd = False
plot(Timescales, meanA, 'k-', linewidth=1)
hold(True)
if PlotStd:
plot(Timescales, meanA+stdA, 'k-', linewidth=0.5)
hold(True)
plot(Timescales, meanA-stdA, 'k-', linewidth=0.5)
else:
plot(Timescales, ci_5pc, 'k-', linewidth=0.5)
hold(True)
plot(Timescales, ci_95pc, 'k-', linewidth=0.5)
ax.set_xscale('log')
xlabel('timescale (s)')
ax.set_xticks([1.0e-9, 1.0e-6, 1.0e-3, 1.])
hold(True)
Rates = 1./Timescales
A_anal = analyticSpectrum(Rates, betas[i], tau=1.0, du=0.01, maxval=1000.0)
dRates = Rates[1:] - Rates[0:-1]
A_anal = A_anal[0:-1]*dRates
TimescaleSpectrum(Timescales[0:-1], A_anal, timeunit='s', linestyle='r-') # plot as a function of rates
#ax.set_xscale('linear')
#axis([0, 1, A_anal.min(), A_anal.max()])
except:
print 'Problems with prior', prior, 'ratespec_filename', ratespec_filename
plt.savefig(outpdffile, format='pdf')
#show()
|
{"hexsha": "b819c564f81230e4b0e588de5e7931b6d9ea9d3d", "size": 6415, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/Figure3/plotFigure3.py", "max_stars_repo_name": "vvoelz/ratespec", "max_stars_repo_head_hexsha": "fa76c867cf9485799c7075f69f3385d95eb0e797", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/Figure3/plotFigure3.py", "max_issues_repo_name": "vvoelz/ratespec", "max_issues_repo_head_hexsha": "fa76c867cf9485799c7075f69f3385d95eb0e797", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/Figure3/plotFigure3.py", "max_forks_repo_name": "vvoelz/ratespec", "max_forks_repo_head_hexsha": "fa76c867cf9485799c7075f69f3385d95eb0e797", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.398989899, "max_line_length": 148, "alphanum_fraction": 0.6233826968, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 2300}
|
import asyncio
import glob
import os
from os.path import join
import cv2
import numpy as np
import pydicom as pyd
import pyinotify
from keras.models import load_model
def make_predictition(image, model_path='/home/haimin/PycharmProjects/Tensorflow/ddsm_YaroslavNet_s10.h5'):
image = pyd.dcmread(image).pixel_array/14.5
image = cv2.resize(image, (896, 1152), interpolation=cv2.INTER_AREA)
image = np.expand_dims(image, -1)
image = np.expand_dims(image, 0)
model = load_model(model_path)
res = model.predict(image)
return res
def process_to_file(folder, img):
# make predictition for new image and save it to csv
csv_name = join(folder, folder.split('/')[-1])
predict = str(make_predictition(img)[0][0])
# print(predict[0][0])
if os.path.isfile(csv_name):
print('add result to file')
with open(csv_name, 'a') as csv_file:
csv_file.write(predict)
csv_file.write('\n')
else:
print('create file for writing predict results')
with open(csv_name, 'w') as csv_file:
csv_file.write(predict)
csv_file.write('\n')
def add_tag(image, tag):
# add tag and save in another folder (target)
print(image, 'from add tag')
img = pyd.dcmread(image)
img.AccessionNumber = tag
img.ImageComments = tag
img.StudyID = tag
pyd.dcmwrite(image, img)
print('tag added {} to {}'.format(tag, image))
def add_tag_to_one_folder(folder, folder_name, tag1, tag2):
# add given tag to same folder. No exchange folders
# ????? is it nescessary to add tag to RAW image ????
# find txt file with predictions and convert to list
predict_list = []
print(folder + '/' + folder_name, '\n'*3, '=)=(=='*10)
#with open(glob.glob(folder + '/' + folder_name)[0], 'r') as f:
with open(folder + '/' + folder_name, 'r') as f:
for row in f:
predict_list.append(float(row))
if min(predict_list) < 0.6:
print('mini predict is: {}, add tag: {}'.format(min(predict_list), tag1))
for root, _, files in os.walk(folder):
for f in files:
if isdicom(join(root, f)):
print(f, tag1, '---- add tag to folder')
add_tag(join(root, f), tag1)
else:
for root, _, files in os.walk(folder):
for f in files:
if isdicom(join(root, f)):
print(f, tag2, '---- add tag to folder')
add_tag(join(root, f), tag2)
def isdicom(file_path):
try:
if pyd.dcmread(file_path).ImageType[0] != 'ORIGINAL':
return True
else:
print('RAW image acepted')
return False
except Exception as e:
#print(e)
return False
def send_folder_to_pacs(folder):
# move processed folder to PACS
dcm_paths = glob.glob(folder + '/*/*/*.dcm')
for f in dcm_paths:
comand = 'dcmsend 3.120.139.162 4242 ' + f
print(f, '-- sended to pask')
#os.system(comand)
class EventHandler(pyinotify.ProcessEvent):
def my_init(self, work_folder='/incoming/data'):
self.last_uid = None
self.work_folder = work_folder
self.last_listdir = None
self.proc_list = []
self.image_count = 0
def process_IN_CLOSE_WRITE(self, event):
if not event.dir and isdicom(event.pathname):
# remember current folder
# print(event.__dict__)
# print(event.path.split('/')[3])
current_dir = event.path.split('/')[3]
if self.last_listdir == None: # if work just start
process_to_file(join(self.work_folder, current_dir), event.pathname)
self.proc_list.append(current_dir)
self.last_listdir = current_dir
self.image_count += 1
print(self.image_count, 'with None')
elif self.last_listdir == current_dir and self.image_count !=0:
process_to_file(join(self.work_folder, current_dir), event.pathname)
self.image_count += 1
print(self.image_count, 'inside loop')
# print(event.name)
if current_dir not in self.proc_list:
self.proc_list.append(current_dir)
elif self.image_count == 4:
# check if folder new or already 4 image processed
# print('\n'*3, 'proc_list: ', self.proc_list, '\n', current_dir, '\n'*2, self.last_listdir)
self.image_count = 0
add_tag_to_one_folder(join(self.work_folder, self.last_listdir), self.last_listdir, '111', '000')
send_folder_to_pacs(join(self.work_folder, self.last_listdir))
print('send to pacs finished', self.last_listdir)
print(self.image_count, 'before reset')
#process_to_file(join('/incoming/data', current_dir), event.pathname)
#self.image_count += 1
print(self.image_count, 'after reset')
#self.last_listdir = current_dir
#self.proc_list.append(current_dir)
elif self.last_listdir != current_dir and current_dir not in self.proc_list:
# check if folder new or already 4 image processed
# print('\n'*3, 'proc_list: ', self.proc_list, '\n', current_dir, '\n'*2, self.last_listdir)
if self.last_listdir not in self.proc_list:
#work in case if series have only 2 images
add_tag_to_one_folder(join(self.work_folder, self.last_listdir), self.last_listdir, '111', '000')
send_folder_to_pacs(join(self.work_folder, self.last_listdir))
print('send to pacs finished', self.last_listdir)
self.image_count = 0
process_to_file(join('/incoming/data', current_dir), event.pathname)
self.image_count += 1
self.last_listdir = current_dir
self.proc_list.append(current_dir)
if __name__ == '__main__':
wm = pyinotify.WatchManager() # Watch Manager
mask = pyinotify.IN_CLOSE_WRITE # watched events
loop = asyncio.get_event_loop()
notifier = pyinotify.AsyncioNotifier(wm, loop, default_proc_fun=EventHandler())
wdd = wm.add_watch('/incoming/data', mask, rec=True, auto_add=True)
# wdd = wm.add_watch('/var/lib/orthanc/db-v6', mask, rec=True, auto_add=True)
try:
loop.run_forever()
except:
print('\nshutting down...')
loop.stop()
notifier.stop()
|
{"hexsha": "b2b8ee56c12943b17423191f6f988eb472df3422", "size": 6692, "ext": "py", "lang": "Python", "max_stars_repo_path": "mvp.py", "max_stars_repo_name": "Kazanova85/M-Project", "max_stars_repo_head_hexsha": "d4a26b96a9711c41d0892c3a562926c3694d2d62", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mvp.py", "max_issues_repo_name": "Kazanova85/M-Project", "max_issues_repo_head_hexsha": "d4a26b96a9711c41d0892c3a562926c3694d2d62", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-03-06T10:33:55.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-06T10:34:26.000Z", "max_forks_repo_path": "mvp.py", "max_forks_repo_name": "Kazanova85/M-Project", "max_forks_repo_head_hexsha": "d4a26b96a9711c41d0892c3a562926c3694d2d62", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6281407035, "max_line_length": 117, "alphanum_fraction": 0.59279737, "include": true, "reason": "import numpy", "num_tokens": 1565}
|
"""Geometry Module"""
import numpy as np
from geometry_msgs.msg import Point
from scipy import spatial
def norm_to_pixel(normalized_point, res_x, res_y):
"""Convert normalized point to point in pixel coordinates"""
if (
normalized_point.x > 1
or normalized_point.x < 0
or normalized_point.y > 1
or normalized_point.y < 0
):
raise ValueError("Invalid value for normalized coordinates")
return Point(x=normalized_point.x * res_x, y=res_y - (normalized_point.y * res_y))
def find_closest_circle_distances(circles, gazes):
"""Calculate the mean distance for gaze points inside a circle"""
kd_tree = spatial.KDTree([circle.position for circle in circles])
return [
kd_tree.query([gaze_x, gaze_y])
for gaze_x, gaze_y in zip(gazes.gaze_x, gazes.gaze_x)
]
class PygameColor:
"""Pygame Color Definitions"""
WHITE = (255, 255, 255)
RED = (255, 0, 0)
class Circle:
"""Defines a circle for data accuracy measurement"""
def __init__(self, x_pos, y_pos, radius):
self.__circle_x = x_pos
self.__circle_y = y_pos
self.__circle_r = radius
self.__dist_gaze_inside = []
@staticmethod
def from_pd(series):
return Circle(series["x"], series["y"], series["radius"])
@property
def position(self):
return self.circle_x, self.circle_y
@property
def circle_x(self):
return self.__circle_x
@property
def circle_y(self):
return self.__circle_y
def append_gaze_inside_dist(self, dist):
if self.is_gaze_inside(dist):
self.__dist_gaze_inside.append(dist)
def is_gaze_inside(self, gaze_distance):
return gaze_distance < self.__circle_r
def mean_dist(self):
return (
sum(self.__dist_gaze_inside) / len(self.__dist_gaze_inside)
if self.has_gazes_inside()
else 0
)
def has_gazes_inside(self):
return bool(self.__dist_gaze_inside)
class GazeData:
"""Defines a gaze data list"""
def __init__(self):
self.__gaze_x = []
self.__gaze_y = []
self.__on_surface = []
def append(self, data):
self.__gaze_x.append(data.gaze_pixel.x)
self.__gaze_y.append(data.gaze_pixel.y)
self.__on_surface.append(data.on_surface)
@property
def gaze_x(self):
return self.__gaze_x
@property
def gaze_y(self):
return self.__gaze_y
@property
def on_surface(self):
return self.__on_surface
def to_numpy_array(self):
return np.array([self.gaze_x, self.gaze_y, self.on_surface]).transpose()
|
{"hexsha": "db0dfa4ab64563fa70cd697c01c2c2443104a2a6", "size": 2680, "ext": "py", "lang": "Python", "max_stars_repo_path": "gaze_detector/src/gaze_detector/geometry.py", "max_stars_repo_name": "hofbi/driver-awareness", "max_stars_repo_head_hexsha": "c16edc6b1ed26c252959ab85bbc33fe4f5598424", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gaze_detector/src/gaze_detector/geometry.py", "max_issues_repo_name": "hofbi/driver-awareness", "max_issues_repo_head_hexsha": "c16edc6b1ed26c252959ab85bbc33fe4f5598424", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gaze_detector/src/gaze_detector/geometry.py", "max_forks_repo_name": "hofbi/driver-awareness", "max_forks_repo_head_hexsha": "c16edc6b1ed26c252959ab85bbc33fe4f5598424", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-04T11:53:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-04T11:53:29.000Z", "avg_line_length": 24.8148148148, "max_line_length": 86, "alphanum_fraction": 0.6451492537, "include": true, "reason": "import numpy,from scipy", "num_tokens": 654}
|
"""
Code to plot fancy-looking TS maps, used in pipeline
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/like/tsmap_plotter.py,v 1.4 2011/02/21 00:42:49 lande Exp $
"""
import math, os
import numpy as np
from uw.utilities import image
from uw.like import roi_localize, roi_managers
from skymaps import SkyDir, PySkyFunction
import pylab as plt
def plot_tsmap(roi, name=None, center=None, size=0.5, pixelsize=None, outdir=None,
which=0, catsig=99, axes=None, fignum=99,
bandfits=True,
galmap=True, galactic=False,
assoc = None,
notitle = False,
nolegend = False,
markercolor='blue', markersize=12,
primary_markercolor='green', primary_markersize=14,
**kwargs):
""" create a TS map for the source. These are localization style
TS maps (where the source is not in the background model) and
are useful for source localization.
roi: an ROIAnalsyis object
Optional keyword arguments:
========= =======================================================
Keyword Description
========= =======================================================
name [None] -- provide name for title, and to save figure if outdir set
center [None] -- center, default the roi center
outdir [None] if set, save sed into <outdir>/<source_name>_tsmap.png if outdir is a directory, save into filename=<outdir> if not.
catsig [99] -- if set and less than 1.0, draw cross with this size (degrees)
size [0.5] -- width=height (deg)
pixelsize [None] -- if not set, will be 20 x20 pixels
galmap [True] -- if set, draw a galactic coordinate image with the source position shown
galactic [False] -- plot using galactic coordinates
which [0] -- chose a different source in the ROI to plot
can be an index for point sources, or a name to also get extended sources
assoc [None] -- if set, a list of tuple of associated sources
notitle [False] -- set to turn off (allows setting the current Axes object title)
nolegend [False]
markersize [12] -- set 0 to not plot nearby sources in the model
markercolor [blue]
========= =======================================================
returns the image.TSplot object for plotting positions, for example
"""
kwargs={} #fix later
localizer = roi_localize.localizer(roi, which, bandfits=bandfits)
tsm = PySkyFunction(localizer)
sdir = center if center is not None else roi.roi_dir
if axes is None:
plt.figure(fignum,figsize=(5,5)); plt.clf()
tsp = image.TSplot(tsm, sdir, size, pixelsize =pixelsize if pixelsize is not None else size/20. ,
axes=axes, galactic=galactic, galmap=galmap, **kwargs)
if 'qform' in roi.__dict__ and roi.qform is not None:
sigma = math.sqrt(roi.qform.par[3]*roi.qform.par[4]) # why do I need this?
qual = roi.qform.par[6]
if sigma<1 and qual <50:
tsp.overplot(roi.qform, sigma)
else:
print 'bad fit sigma %g, >1 or qual %.1f >50' % (sigma, qual)
tsp.show(colorbar=False)
if catsig<1:
tsp.cross(sdir, catsig, lw=2, color='grey')
# plot the primary source, any nearby from the fit
x,y = tsp.zea.pixel(sdir)
tsp.zea.axes.plot([x],[y], '*', color=primary_markercolor, label=name, markersize=primary_markersize)
marker = 'ov^<>1234sphH'; i=k=0
if markersize!=0:
for ps in roi.psm.point_sources: # skip
x,y = tsp.zea.pixel(ps.skydir)
if ps.name==name or x<0 or x>tsp.zea.nx or y<0 or y>tsp.zea.ny: continue
tsp.zea.axes.plot([x],[y], marker[k%12], color=markercolor, label=ps.name, markersize=markersize)
k+=1
tsp.plot(tsp.tsmaxpos, symbol='+', color='k') # at the maximum
if not notitle: plt.title( name, fontsize=24)
if assoc is not None:
# eventually move this to image.TSplot
last_loc,i=SkyDir(0,90),0
for aname, loc, prob, catid in zip(assoc['name'],assoc['dir'],assoc['prob'],assoc['cat']):
#print 'associate with %s, prob=%.2f' % (aname.strip(),prob)
if catid in ('ibis',):
print '---skip gamma cat %s' % catid
continue
if i>8:
print '---skip because too many for display'
continue
x,y = tsp.zea.pixel(loc)
diff = np.degrees(loc.difference(last_loc)); last_loc=loc
if diff>1e-3: k+=1 # new marker only if changed place
tsp.zea.axes.plot([x], [y], marker=marker[k%12], color='green', linestyle='None',
label='%s[%s] %.2f'%(aname.strip(), catid, prob ), markersize=markersize)
i+=1
fs = plt.rcParams['font.size']
plt.rcParams.update({'legend.fontsize':7, 'font.size':7})
# put legend on left.
if not nolegend: tsp.zea.axes.legend(loc=2, numpoints=1, bbox_to_anchor=(-0.15,1.0))
plt.rcParams['font.size'] = fs
if outdir is not None:
if os.path.isdir(outdir):
plt.savefig(os.path.join(outdir,'%s_tsmap.png'%name.strip()))
else :
plt.savefig(outdir)
return tsp
|
{"hexsha": "c29452c676fe501d800bf926c0bb0f3b35581e87", "size": 5332, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/uw/like/tsmap_plotter.py", "max_stars_repo_name": "tburnett/pointlike", "max_stars_repo_head_hexsha": "a556f07650c2f17d437c86fdafe9f9a33f59758e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-19T14:45:28.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-19T14:45:28.000Z", "max_issues_repo_path": "python/uw/like/tsmap_plotter.py", "max_issues_repo_name": "tburnett/pointlike", "max_issues_repo_head_hexsha": "a556f07650c2f17d437c86fdafe9f9a33f59758e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/uw/like/tsmap_plotter.py", "max_forks_repo_name": "tburnett/pointlike", "max_forks_repo_head_hexsha": "a556f07650c2f17d437c86fdafe9f9a33f59758e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-08-24T18:58:27.000Z", "max_forks_repo_forks_event_max_datetime": "2018-08-24T18:58:27.000Z", "avg_line_length": 44.0661157025, "max_line_length": 144, "alphanum_fraction": 0.5879594899, "include": true, "reason": "import numpy", "num_tokens": 1441}
|
[STATEMENT]
lemma Koszul_syz_sigs_auxE:
assumes "v \<in> set (Koszul_syz_sigs_aux bs k)"
obtains i j where "i < j" and "j < length bs"
and "v = ord_term_lin.max (term_of_pair (punit.lt (bs ! i), k + j)) (term_of_pair (punit.lt (bs ! j), k + i))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>i j. \<lbrakk>i < j; j < length bs; v = ord_term_lin.max (term_of_pair (punit.lt (bs ! i), k + j)) (term_of_pair (punit.lt (bs ! j), k + i))\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
v \<in> set (Koszul_syz_sigs_aux bs k)
goal (1 subgoal):
1. (\<And>i j. \<lbrakk>i < j; j < length bs; v = ord_term_lin.max (term_of_pair (punit.lt (bs ! i), k + j)) (term_of_pair (punit.lt (bs ! j), k + i))\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
proof (induct bs arbitrary: k thesis)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>k thesis. \<lbrakk>\<And>i j. \<lbrakk>i < j; j < length []; v = ord_term_lin.max (term_of_pair (punit.lt ([] ! i), k + j)) (term_of_pair (punit.lt ([] ! j), k + i))\<rbrakk> \<Longrightarrow> thesis; v \<in> set (Koszul_syz_sigs_aux [] k)\<rbrakk> \<Longrightarrow> thesis
2. \<And>a bs k thesis. \<lbrakk>\<And>k thesis. \<lbrakk>\<And>i j. \<lbrakk>i < j; j < length bs; v = ord_term_lin.max (term_of_pair (punit.lt (bs ! i), k + j)) (term_of_pair (punit.lt (bs ! j), k + i))\<rbrakk> \<Longrightarrow> thesis; v \<in> set (Koszul_syz_sigs_aux bs k)\<rbrakk> \<Longrightarrow> thesis; \<And>i j. \<lbrakk>i < j; j < length (a # bs); v = ord_term_lin.max (term_of_pair (punit.lt ((a # bs) ! i), k + j)) (term_of_pair (punit.lt ((a # bs) ! j), k + i))\<rbrakk> \<Longrightarrow> thesis; v \<in> set (Koszul_syz_sigs_aux (a # bs) k)\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
case Nil
[PROOF STATE]
proof (state)
this:
\<lbrakk>?i43 < ?j43; ?j43 < length []; v = ord_term_lin.max (term_of_pair (punit.lt ([] ! ?i43), k + ?j43)) (term_of_pair (punit.lt ([] ! ?j43), k + ?i43))\<rbrakk> \<Longrightarrow> thesis
v \<in> set (Koszul_syz_sigs_aux [] k)
goal (2 subgoals):
1. \<And>k thesis. \<lbrakk>\<And>i j. \<lbrakk>i < j; j < length []; v = ord_term_lin.max (term_of_pair (punit.lt ([] ! i), k + j)) (term_of_pair (punit.lt ([] ! j), k + i))\<rbrakk> \<Longrightarrow> thesis; v \<in> set (Koszul_syz_sigs_aux [] k)\<rbrakk> \<Longrightarrow> thesis
2. \<And>a bs k thesis. \<lbrakk>\<And>k thesis. \<lbrakk>\<And>i j. \<lbrakk>i < j; j < length bs; v = ord_term_lin.max (term_of_pair (punit.lt (bs ! i), k + j)) (term_of_pair (punit.lt (bs ! j), k + i))\<rbrakk> \<Longrightarrow> thesis; v \<in> set (Koszul_syz_sigs_aux bs k)\<rbrakk> \<Longrightarrow> thesis; \<And>i j. \<lbrakk>i < j; j < length (a # bs); v = ord_term_lin.max (term_of_pair (punit.lt ((a # bs) ! i), k + j)) (term_of_pair (punit.lt ((a # bs) ! j), k + i))\<rbrakk> \<Longrightarrow> thesis; v \<in> set (Koszul_syz_sigs_aux (a # bs) k)\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
from Nil(2)
[PROOF STATE]
proof (chain)
picking this:
v \<in> set (Koszul_syz_sigs_aux [] k)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
v \<in> set (Koszul_syz_sigs_aux [] k)
goal (1 subgoal):
1. thesis
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
thesis
goal (1 subgoal):
1. \<And>a bs k thesis. \<lbrakk>\<And>k thesis. \<lbrakk>\<And>i j. \<lbrakk>i < j; j < length bs; v = ord_term_lin.max (term_of_pair (punit.lt (bs ! i), k + j)) (term_of_pair (punit.lt (bs ! j), k + i))\<rbrakk> \<Longrightarrow> thesis; v \<in> set (Koszul_syz_sigs_aux bs k)\<rbrakk> \<Longrightarrow> thesis; \<And>i j. \<lbrakk>i < j; j < length (a # bs); v = ord_term_lin.max (term_of_pair (punit.lt ((a # bs) ! i), k + j)) (term_of_pair (punit.lt ((a # bs) ! j), k + i))\<rbrakk> \<Longrightarrow> thesis; v \<in> set (Koszul_syz_sigs_aux (a # bs) k)\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a bs k thesis. \<lbrakk>\<And>k thesis. \<lbrakk>\<And>i j. \<lbrakk>i < j; j < length bs; v = ord_term_lin.max (term_of_pair (punit.lt (bs ! i), k + j)) (term_of_pair (punit.lt (bs ! j), k + i))\<rbrakk> \<Longrightarrow> thesis; v \<in> set (Koszul_syz_sigs_aux bs k)\<rbrakk> \<Longrightarrow> thesis; \<And>i j. \<lbrakk>i < j; j < length (a # bs); v = ord_term_lin.max (term_of_pair (punit.lt ((a # bs) ! i), k + j)) (term_of_pair (punit.lt ((a # bs) ! j), k + i))\<rbrakk> \<Longrightarrow> thesis; v \<in> set (Koszul_syz_sigs_aux (a # bs) k)\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
case (Cons b bs)
[PROOF STATE]
proof (state)
this:
\<lbrakk>\<And>i j. \<lbrakk>i < j; j < length bs; v = ord_term_lin.max (term_of_pair (punit.lt (bs ! i), ?k43 + j)) (term_of_pair (punit.lt (bs ! j), ?k43 + i))\<rbrakk> \<Longrightarrow> ?thesis43; v \<in> set (Koszul_syz_sigs_aux bs ?k43)\<rbrakk> \<Longrightarrow> ?thesis43
\<lbrakk>?i43 < ?j43; ?j43 < length (b # bs); v = ord_term_lin.max (term_of_pair (punit.lt ((b # bs) ! ?i43), k + ?j43)) (term_of_pair (punit.lt ((b # bs) ! ?j43), k + ?i43))\<rbrakk> \<Longrightarrow> thesis
v \<in> set (Koszul_syz_sigs_aux (b # bs) k)
goal (1 subgoal):
1. \<And>a bs k thesis. \<lbrakk>\<And>k thesis. \<lbrakk>\<And>i j. \<lbrakk>i < j; j < length bs; v = ord_term_lin.max (term_of_pair (punit.lt (bs ! i), k + j)) (term_of_pair (punit.lt (bs ! j), k + i))\<rbrakk> \<Longrightarrow> thesis; v \<in> set (Koszul_syz_sigs_aux bs k)\<rbrakk> \<Longrightarrow> thesis; \<And>i j. \<lbrakk>i < j; j < length (a # bs); v = ord_term_lin.max (term_of_pair (punit.lt ((a # bs) ! i), k + j)) (term_of_pair (punit.lt ((a # bs) ! j), k + i))\<rbrakk> \<Longrightarrow> thesis; v \<in> set (Koszul_syz_sigs_aux (a # bs) k)\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
have "v \<in> (\<lambda>j. ord_term_lin.max (term_of_pair (punit.lt b, Suc (j + k))) (term_of_pair (punit.lt (bs ! j), k))) `
{0..<length bs} \<union> set (Koszul_syz_sigs_aux bs (Suc k))" (is "v \<in> ?A \<union> ?B")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. v \<in> (\<lambda>j. ord_term_lin.max (term_of_pair (punit.lt b, Suc (j + k))) (term_of_pair (punit.lt (bs ! j), k))) ` {0..<length bs} \<union> set (Koszul_syz_sigs_aux bs (Suc k))
[PROOF STEP]
using Cons(3)
[PROOF STATE]
proof (prove)
using this:
v \<in> set (Koszul_syz_sigs_aux (b # bs) k)
goal (1 subgoal):
1. v \<in> (\<lambda>j. ord_term_lin.max (term_of_pair (punit.lt b, Suc (j + k))) (term_of_pair (punit.lt (bs ! j), k))) ` {0..<length bs} \<union> set (Koszul_syz_sigs_aux bs (Suc k))
[PROOF STEP]
by (simp add: set_map_idx)
[PROOF STATE]
proof (state)
this:
v \<in> (\<lambda>j. ord_term_lin.max (term_of_pair (punit.lt b, Suc (j + k))) (term_of_pair (punit.lt (bs ! j), k))) ` {0..<length bs} \<union> set (Koszul_syz_sigs_aux bs (Suc k))
goal (1 subgoal):
1. \<And>a bs k thesis. \<lbrakk>\<And>k thesis. \<lbrakk>\<And>i j. \<lbrakk>i < j; j < length bs; v = ord_term_lin.max (term_of_pair (punit.lt (bs ! i), k + j)) (term_of_pair (punit.lt (bs ! j), k + i))\<rbrakk> \<Longrightarrow> thesis; v \<in> set (Koszul_syz_sigs_aux bs k)\<rbrakk> \<Longrightarrow> thesis; \<And>i j. \<lbrakk>i < j; j < length (a # bs); v = ord_term_lin.max (term_of_pair (punit.lt ((a # bs) ! i), k + j)) (term_of_pair (punit.lt ((a # bs) ! j), k + i))\<rbrakk> \<Longrightarrow> thesis; v \<in> set (Koszul_syz_sigs_aux (a # bs) k)\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
v \<in> (\<lambda>j. ord_term_lin.max (term_of_pair (punit.lt b, Suc (j + k))) (term_of_pair (punit.lt (bs ! j), k))) ` {0..<length bs} \<union> set (Koszul_syz_sigs_aux bs (Suc k))
goal (1 subgoal):
1. thesis
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. v \<in> (\<lambda>j. ord_term_lin.max (term_of_pair (punit.lt b, Suc (j + k))) (term_of_pair (punit.lt (bs ! j), k))) ` {0..<length bs} \<Longrightarrow> thesis
2. v \<in> set (Koszul_syz_sigs_aux bs (Suc k)) \<Longrightarrow> thesis
[PROOF STEP]
assume "v \<in> ?A"
[PROOF STATE]
proof (state)
this:
v \<in> (\<lambda>j. ord_term_lin.max (term_of_pair (punit.lt b, Suc (j + k))) (term_of_pair (punit.lt (bs ! j), k))) ` {0..<length bs}
goal (2 subgoals):
1. v \<in> (\<lambda>j. ord_term_lin.max (term_of_pair (punit.lt b, Suc (j + k))) (term_of_pair (punit.lt (bs ! j), k))) ` {0..<length bs} \<Longrightarrow> thesis
2. v \<in> set (Koszul_syz_sigs_aux bs (Suc k)) \<Longrightarrow> thesis
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
v \<in> (\<lambda>j. ord_term_lin.max (term_of_pair (punit.lt b, Suc (j + k))) (term_of_pair (punit.lt (bs ! j), k))) ` {0..<length bs}
[PROOF STEP]
obtain j where "j \<in> {0..<length bs}"
and v: "v = ord_term_lin.max (term_of_pair (punit.lt b, Suc (j + k)))
(term_of_pair (punit.lt (bs ! j), k))"
[PROOF STATE]
proof (prove)
using this:
v \<in> (\<lambda>j. ord_term_lin.max (term_of_pair (punit.lt b, Suc (j + k))) (term_of_pair (punit.lt (bs ! j), k))) ` {0..<length bs}
goal (1 subgoal):
1. (\<And>j. \<lbrakk>j \<in> {0..<length bs}; v = ord_term_lin.max (term_of_pair (punit.lt b, Suc (j + k))) (term_of_pair (punit.lt (bs ! j), k))\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
j \<in> {0..<length bs}
v = ord_term_lin.max (term_of_pair (punit.lt b, Suc (j + k))) (term_of_pair (punit.lt (bs ! j), k))
goal (2 subgoals):
1. v \<in> (\<lambda>j. ord_term_lin.max (term_of_pair (punit.lt b, Suc (j + k))) (term_of_pair (punit.lt (bs ! j), k))) ` {0..<length bs} \<Longrightarrow> thesis
2. v \<in> set (Koszul_syz_sigs_aux bs (Suc k)) \<Longrightarrow> thesis
[PROOF STEP]
from this(1)
[PROOF STATE]
proof (chain)
picking this:
j \<in> {0..<length bs}
[PROOF STEP]
have "j < length bs"
[PROOF STATE]
proof (prove)
using this:
j \<in> {0..<length bs}
goal (1 subgoal):
1. j < length bs
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
j < length bs
goal (2 subgoals):
1. v \<in> (\<lambda>j. ord_term_lin.max (term_of_pair (punit.lt b, Suc (j + k))) (term_of_pair (punit.lt (bs ! j), k))) ` {0..<length bs} \<Longrightarrow> thesis
2. v \<in> set (Koszul_syz_sigs_aux bs (Suc k)) \<Longrightarrow> thesis
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. thesis
[PROOF STEP]
proof (rule Cons(2))
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. ?i43 < ?j43
2. ?j43 < length (b # bs)
3. v = ord_term_lin.max (term_of_pair (punit.lt ((b # bs) ! ?i43), k + ?j43)) (term_of_pair (punit.lt ((b # bs) ! ?j43), k + ?i43))
[PROOF STEP]
show "0 < Suc j"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < Suc j
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
0 < Suc j
goal (2 subgoals):
1. Suc j < length (b # bs)
2. v = ord_term_lin.max (term_of_pair (punit.lt ((b # bs) ! 0), k + Suc j)) (term_of_pair (punit.lt ((b # bs) ! Suc j), k + 0))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. Suc j < length (b # bs)
2. v = ord_term_lin.max (term_of_pair (punit.lt ((b # bs) ! 0), k + Suc j)) (term_of_pair (punit.lt ((b # bs) ! Suc j), k + 0))
[PROOF STEP]
from \<open>j < length bs\<close>
[PROOF STATE]
proof (chain)
picking this:
j < length bs
[PROOF STEP]
show "Suc j < length (b # bs)"
[PROOF STATE]
proof (prove)
using this:
j < length bs
goal (1 subgoal):
1. Suc j < length (b # bs)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Suc j < length (b # bs)
goal (1 subgoal):
1. v = ord_term_lin.max (term_of_pair (punit.lt ((b # bs) ! 0), k + Suc j)) (term_of_pair (punit.lt ((b # bs) ! Suc j), k + 0))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. v = ord_term_lin.max (term_of_pair (punit.lt ((b # bs) ! 0), k + Suc j)) (term_of_pair (punit.lt ((b # bs) ! Suc j), k + 0))
[PROOF STEP]
show "v = ord_term_lin.max (term_of_pair (punit.lt ((b # bs) ! 0), k + Suc j))
(term_of_pair (punit.lt ((b # bs) ! Suc j), k + 0))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. v = ord_term_lin.max (term_of_pair (punit.lt ((b # bs) ! 0), k + Suc j)) (term_of_pair (punit.lt ((b # bs) ! Suc j), k + 0))
[PROOF STEP]
by (simp add: v ac_simps)
[PROOF STATE]
proof (state)
this:
v = ord_term_lin.max (term_of_pair (punit.lt ((b # bs) ! 0), k + Suc j)) (term_of_pair (punit.lt ((b # bs) ! Suc j), k + 0))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
thesis
goal (1 subgoal):
1. v \<in> set (Koszul_syz_sigs_aux bs (Suc k)) \<Longrightarrow> thesis
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. v \<in> set (Koszul_syz_sigs_aux bs (Suc k)) \<Longrightarrow> thesis
[PROOF STEP]
assume "v \<in> ?B"
[PROOF STATE]
proof (state)
this:
v \<in> set (Koszul_syz_sigs_aux bs (Suc k))
goal (1 subgoal):
1. v \<in> set (Koszul_syz_sigs_aux bs (Suc k)) \<Longrightarrow> thesis
[PROOF STEP]
obtain i j where "i < j" and "j < length bs"
and v: "v = ord_term_lin.max (term_of_pair (punit.lt (bs ! i), Suc k + j))
(term_of_pair (punit.lt (bs ! j), Suc k + i))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>i j. \<lbrakk>i < j; j < length bs; v = ord_term_lin.max (term_of_pair (punit.lt (bs ! i), Suc k + j)) (term_of_pair (punit.lt (bs ! j), Suc k + i))\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (rule Cons(1), assumption, rule \<open>v \<in> ?B\<close>)
[PROOF STATE]
proof (state)
this:
i < j
j < length bs
v = ord_term_lin.max (term_of_pair (punit.lt (bs ! i), Suc k + j)) (term_of_pair (punit.lt (bs ! j), Suc k + i))
goal (1 subgoal):
1. v \<in> set (Koszul_syz_sigs_aux bs (Suc k)) \<Longrightarrow> thesis
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. thesis
[PROOF STEP]
proof (rule Cons(2))
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. ?i43 < ?j43
2. ?j43 < length (b # bs)
3. v = ord_term_lin.max (term_of_pair (punit.lt ((b # bs) ! ?i43), k + ?j43)) (term_of_pair (punit.lt ((b # bs) ! ?j43), k + ?i43))
[PROOF STEP]
from \<open>i < j\<close>
[PROOF STATE]
proof (chain)
picking this:
i < j
[PROOF STEP]
show "Suc i < Suc j"
[PROOF STATE]
proof (prove)
using this:
i < j
goal (1 subgoal):
1. Suc i < Suc j
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Suc i < Suc j
goal (2 subgoals):
1. Suc j < length (b # bs)
2. v = ord_term_lin.max (term_of_pair (punit.lt ((b # bs) ! Suc i), k + Suc j)) (term_of_pair (punit.lt ((b # bs) ! Suc j), k + Suc i))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. Suc j < length (b # bs)
2. v = ord_term_lin.max (term_of_pair (punit.lt ((b # bs) ! Suc i), k + Suc j)) (term_of_pair (punit.lt ((b # bs) ! Suc j), k + Suc i))
[PROOF STEP]
from \<open>j < length bs\<close>
[PROOF STATE]
proof (chain)
picking this:
j < length bs
[PROOF STEP]
show "Suc j < length (b # bs)"
[PROOF STATE]
proof (prove)
using this:
j < length bs
goal (1 subgoal):
1. Suc j < length (b # bs)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Suc j < length (b # bs)
goal (1 subgoal):
1. v = ord_term_lin.max (term_of_pair (punit.lt ((b # bs) ! Suc i), k + Suc j)) (term_of_pair (punit.lt ((b # bs) ! Suc j), k + Suc i))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. v = ord_term_lin.max (term_of_pair (punit.lt ((b # bs) ! Suc i), k + Suc j)) (term_of_pair (punit.lt ((b # bs) ! Suc j), k + Suc i))
[PROOF STEP]
show "v = ord_term_lin.max (term_of_pair (punit.lt ((b # bs) ! Suc i), k + Suc j))
(term_of_pair (punit.lt ((b # bs) ! Suc j), k + Suc i))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. v = ord_term_lin.max (term_of_pair (punit.lt ((b # bs) ! Suc i), k + Suc j)) (term_of_pair (punit.lt ((b # bs) ! Suc j), k + Suc i))
[PROOF STEP]
by (simp add: v)
[PROOF STATE]
proof (state)
this:
v = ord_term_lin.max (term_of_pair (punit.lt ((b # bs) ! Suc i), k + Suc j)) (term_of_pair (punit.lt ((b # bs) ! Suc j), k + Suc i))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
thesis
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
thesis
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 7411, "file": "Signature_Groebner_Signature_Groebner", "length": 51}
|
#!/usr/bin/python3
from gi.repository import Gtk
from matplotlib.figure import Figure
from numpy import sin, cos, pi, linspace
#Possibly this rendering backend is broken currently
#from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas
from matplotlib.backends.backend_gtk3cairo import FigureCanvasGTK3Cairo as FigureCanvas
from matplotlib.backends.backend_gtk3 import NavigationToolbar2GTK3 as NavigationToolbar
class Signals:
def on_window1_destroy(self, widget):
Gtk.main_quit()
builder = Gtk.Builder()
builder.add_objects_from_file('mpl-ntb-glade.glade', ('window1', '') )
builder.connect_signals(Signals())
myfirstwindow = builder.get_object('window1')
sw = builder.get_object('scrolledwindow1')
sw2 = builder.get_object('scrolledwindow2')
fig = Figure(figsize=(5,5), dpi=80)
ax = fig.add_subplot(111)
n = 1000
xsin = linspace(-pi, pi, n, endpoint=True)
xcos = linspace(-pi, pi, n, endpoint=True)
ysin = sin(xsin)
ycos = cos(xcos)
sinwave = ax.plot(xsin, ysin, color='black', label='sin(x)')
coswave = ax.plot(xcos, ycos, color='black', label='cos(x)', linestyle='--')
ax.set_xlim(-pi,pi)
ax.set_ylim(-1.2,1.2)
ax.fill_between(xsin, 0, ysin, (ysin - 1) > -1, color='blue', alpha=.3)
ax.fill_between(xsin, 0, ysin, (ysin - 1) < -1, color='red', alpha=.3)
ax.fill_between(xcos, 0, ycos, (ycos - 1) > -1, color='blue', alpha=.3)
ax.fill_between(xcos, 0, ycos, (ycos - 1) < -1, color='red', alpha=.3)
ax.legend(loc='upper left')
ax = fig.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0))
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data',0))
fig.tight_layout()
canvas = FigureCanvas(fig)
sw.add_with_viewport(canvas)
toolbar = NavigationToolbar(canvas, myfirstwindow)
sw2.add_with_viewport(toolbar)
myfirstwindow.show_all()
Gtk.main()
|
{"hexsha": "6bd5183675173ed658b34a6ea5eb56fc0081d83d", "size": 1931, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/mpl-ntb-glade.py", "max_stars_repo_name": "tobias47n9e/GTK3-Matplotlib-Cookbook", "max_stars_repo_head_hexsha": "fea42a040e9e358740b3bda04c38ea164ff79b2d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2015-03-10T18:03:44.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-29T09:17:21.000Z", "max_issues_repo_path": "examples/mpl-ntb-glade.py", "max_issues_repo_name": "tobias47n9e/GTK3-Matplotlib-Cookbook", "max_issues_repo_head_hexsha": "fea42a040e9e358740b3bda04c38ea164ff79b2d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2015-02-09T22:56:38.000Z", "max_issues_repo_issues_event_max_datetime": "2018-02-01T21:53:29.000Z", "max_forks_repo_path": "examples/mpl-ntb-glade.py", "max_forks_repo_name": "tobias47n9e/GTK3-Matplotlib-Cookbook", "max_forks_repo_head_hexsha": "fea42a040e9e358740b3bda04c38ea164ff79b2d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2015-03-10T18:03:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-03T23:25:42.000Z", "avg_line_length": 30.171875, "max_line_length": 88, "alphanum_fraction": 0.7332988089, "include": true, "reason": "from numpy", "num_tokens": 559}
|
import argparse
import os
import time
import datetime
import sys
import json
import yaml
import tensorflow as tf
import numpy as np
import src.core as core
import src.retina_net.experiments.validation_utils as val_utils
from src.retina_net import config_utils
from src.core import constants
from src.retina_net.builders import dataset_handler_builder
from src.retina_net.models.retinanet_model import RetinaNetModel
keras = tf.keras
def validate_model(config):
# Get validation config
val_config = config['validation_config']
eval_wait_interval = val_config['eval_wait_interval']
# Create dataset class
dataset_config = config['dataset_config']
dataset_handler = dataset_handler_builder.build_dataset(
dataset_config, 'val')
# Set keras training phase
keras.backend.set_learning_phase(0)
print("Keras Learning Phase Set to: " +
str(keras.backend.learning_phase()))
# Create Model
with tf.name_scope("retinanet_model"):
model = RetinaNetModel(config['model_config'])
# Initialize the model from a saved checkpoint
checkpoint_dir = os.path.join(
core.data_dir(), 'outputs',
config['checkpoint_name'], 'checkpoints', config['checkpoint_name'])
predictions_dir = os.path.join(
core.data_dir(), 'outputs',
config['checkpoint_name'], 'predictions')
os.makedirs(predictions_dir, exist_ok=True)
if not os.path.exists(checkpoint_dir):
raise ValueError('{} must have at least one checkpoint entry.'
.format(checkpoint_dir))
already_evaluated_ckpts = val_utils.get_evaluated_ckpts(predictions_dir)
# Instantiate mini-batch and epoch size
epoch_size = int(dataset_handler.epoch_size)
# Create Dataset
# Main function to create dataset
dataset = dataset_handler.create_dataset()
# Batch size goes in parenthesis.
batched_dataset = dataset.repeat(1).batch(1)
# `prefetch` lets the dataset fetch batches, in the background while the model is validating.
batched_dataset = batched_dataset.prefetch(
buffer_size=tf.data.experimental.AUTOTUNE)
log_file = config['logs_dir'] + \
'/validation/' + str(datetime.datetime.now())
summary_writer = tf.summary.create_file_writer(log_file)
print('Starting evaluation at ' +
time.strftime('%Y-%m-%d-%H:%M:%S', time.gmtime()))
# Repeated checkpoint run
last_checkpoint_id = -1
number_of_evaluations = 0
# Initialize the model checkpoint manager
ckpt = tf.train.Checkpoint(step=tf.Variable(0), net=model)
# Begin validation loop
while True:
all_checkpoint_states = tf.train.get_checkpoint_state(
checkpoint_dir).all_model_checkpoint_paths
num_checkpoints = len(all_checkpoint_states)
print("Total Checkpoints: ", num_checkpoints)
start = time.time()
if number_of_evaluations >= num_checkpoints:
print('\nNo new checkpoints found in %s. '
'Will try again in %d seconds.'
% (checkpoint_dir, eval_wait_interval))
else:
for ckpt_idx in range(num_checkpoints):
checkpoint_to_restore = all_checkpoint_states[ckpt_idx]
ckpt_id = val_utils.strip_checkpoint_id(checkpoint_to_restore)
# Check if checkpoint has been evaluated already
already_evaluated = ckpt_id in already_evaluated_ckpts
if already_evaluated or ckpt_id <= last_checkpoint_id or ckpt_id == 1:
number_of_evaluations = max((ckpt_idx + 1,
number_of_evaluations))
continue
# run_checkpoint_once
predictions_dir_ckpt = os.path.join(predictions_dir,
'validation',
str(ckpt_id),
'data')
os.makedirs(predictions_dir_ckpt, exist_ok=True)
print('\nRunning checkpoint ' + str(ckpt_id) + '\n')
ckpt.restore(checkpoint_to_restore)
# Perform dataset-specific setup of result output
if dataset_config['dataset'] == 'kitti':
pass
elif dataset_config['dataset'] == 'bdd' or dataset_config['dataset'] == 'coco':
final_results_list = []
# Single json file for bdd or coco dataset
prediction_json_file_name = os.path.join(
predictions_dir_ckpt, 'predictions.json')
elif dataset_config['dataset'] == 'rvc':
final_results_list = []
# Single json file for bdd dataset
predictions_dir_ckpt = os.path.join(
predictions_dir_ckpt, dataset_config['rvc']['paths_config']['sequence_dir'])
os.makedirs(predictions_dir_ckpt, exist_ok=True)
prediction_json_file_name = os.path.join(
predictions_dir_ckpt,
'predictions.json')
with summary_writer.as_default():
for counter, sample_dict in enumerate(batched_dataset):
total_loss, loss_dict, prediction_dict = val_single_step(
model, sample_dict)
output_classes, output_boxes = val_utils.post_process_predictions(
sample_dict, prediction_dict, dataset_name=dataset_config['dataset'])
output_boxes = output_boxes.numpy()
output_classes = output_classes.numpy()
# Perform dataset-specific saving of outputs
if dataset_config['dataset'] == 'kitti':
predictions_kitti_format = val_utils.predictions_to_kitti_format(
output_boxes, output_classes)
prediction_file_name = os.path.join(
predictions_dir_ckpt,
dataset_handler.sample_ids[counter] + '.txt')
if predictions_kitti_format.size == 0:
np.savetxt(prediction_file_name, [])
else:
np.savetxt(
prediction_file_name,
predictions_kitti_format,
newline='\r\n',
fmt='%s')
elif dataset_config['dataset'] == 'bdd':
predictions_bdd_format = val_utils.predictions_to_bdd_format(
output_boxes,
output_classes,
dataset_handler.sample_ids[counter],
category_list=dataset_handler.training_data_config['categories'])
final_results_list.extend(predictions_bdd_format)
elif dataset_config['dataset'] == 'coco':
predictions_coco_format = val_utils.predictions_to_coco_format(
output_boxes,
output_classes,
int(dataset_handler.sample_ids[counter][:-4]),
dataset_handler.training_data_to_coco_category_ids)
final_results_list.extend(predictions_coco_format)
elif dataset_config['dataset'] == 'rvc':
predictions_rvc_format = val_utils.predictions_to_rvc_format(
output_boxes,
output_classes,
dataset_handler.sample_ids[counter][:-4],
dataset_handler.training_data_categories)
final_results_list.extend(predictions_rvc_format)
with tf.name_scope('losses'):
for loss_name in loss_dict.keys():
tf.summary.scalar(loss_name,
loss_dict[loss_name],
step=int(ckpt.step))
tf.summary.scalar(
'Total Loss',
total_loss,
step=int(
ckpt.step))
summary_writer.flush()
sys.stdout.write(
'\r{}'.format(
counter +
1) +
' /' +
str(epoch_size))
# Final dataset-specific wrap up work for checkpoint
# results
if dataset_config['dataset'] == 'kitti':
pass
else:
with open(prediction_json_file_name, 'w') as fp:
json.dump(final_results_list, fp, indent=4,
separators=(',', ': '))
number_of_evaluations += 1
val_utils.write_evaluated_ckpts(
predictions_dir, np.array([ckpt_id]))
# Save the id of the latest evaluated checkpoint
last_checkpoint_id = ckpt_id
time_to_next_eval = start + eval_wait_interval - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
@tf.function
def val_single_step(
model,
sample_dict):
"""
:param model: keras retinanet model
:param sample_dict: input dictionary generated from dataset.
If element sizes in this dictionary are variable, remove tf.function decorator.
:return total_loss: Sum of all losses.
:return cls_loss: classification loss.
:return reg_loss: regression loss.
:return regularization_loss: regularization_loss
:return prediction_dict: Dictionary containing neural network predictions
"""
prediction_dict = model(sample_dict[constants.IMAGE_NORMALIZED_KEY],
train_val_test='validation')
total_loss, loss_dict = model.get_loss(sample_dict, prediction_dict)
# Get any regularization loss in the model and add it to total loss
regularization_loss = tf.reduce_sum(
tf.concat([layer.losses for layer in model.layers], axis=0))
loss_dict.update(
{constants.REGULARIZATION_LOSS_KEY: regularization_loss})
total_loss += regularization_loss
return total_loss, loss_dict, prediction_dict
def main():
"""Object Detection Model Validator
"""
# Defaults
default_gpu_device = '0'
default_config_path = core.model_dir(
'retina_net') + '/configs/retinanet_bdd.yaml'
# Allowed data splits are 'train','train_mini', 'val', 'val_half',
# 'val_mini'
default_data_split = 'val'
# Parse input
parser = argparse.ArgumentParser() # Define argparser object
parser.add_argument('--gpu_device',
type=str,
dest='gpu_device',
default=default_gpu_device)
parser.add_argument('--yaml_path',
type=str,
dest='yaml_path',
default=default_config_path)
parser.add_argument('--data_split',
type=str,
dest='data_split',
default=default_data_split)
args = parser.parse_args()
# Set CUDA device id
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_device
# Allow GPU memory growth
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# Load in configuration file as python dictionary
with open(args.yaml_path, 'r') as yaml_file:
config = yaml.load(yaml_file, Loader=yaml.FullLoader)
# Make necessary directories, update config with checkpoint path and data
# split
config = config_utils.setup(config, args)
# Go to validation function
validate_model(config)
if __name__ == '__main__':
main()
|
{"hexsha": "955b7d601eb778fd55eb20e6f22c25cd04519991", "size": 12569, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/retina_net/experiments/run_validation.py", "max_stars_repo_name": "asharakeh/bayes-od-rc", "max_stars_repo_head_hexsha": "3f478e5c9a593ee03d7b63d533d46d87d739fc26", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 30, "max_stars_repo_stars_event_min_datetime": "2019-09-17T22:37:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T14:34:42.000Z", "max_issues_repo_path": "src/retina_net/experiments/run_validation.py", "max_issues_repo_name": "asharakeh/bayes-od-rc", "max_issues_repo_head_hexsha": "3f478e5c9a593ee03d7b63d533d46d87d739fc26", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-10-07T23:05:46.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-26T16:24:28.000Z", "max_forks_repo_path": "src/retina_net/experiments/run_validation.py", "max_forks_repo_name": "asharakeh/bayes-od-rc", "max_forks_repo_head_hexsha": "3f478e5c9a593ee03d7b63d533d46d87d739fc26", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-09-17T22:37:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T13:33:24.000Z", "avg_line_length": 40.0286624204, "max_line_length": 100, "alphanum_fraction": 0.5621767842, "include": true, "reason": "import numpy", "num_tokens": 2204}
|
import numpy as np
import torch
from elegantrl.agents.net import ActorPPO, ActorDiscretePPO, CriticPPO, SharePPO
from elegantrl.agents.AgentBase import AgentBase
from typing import Tuple
"""[ElegantRL.2021.12.12](github.com/AI4Fiance-Foundation/ElegantRL)"""
class AgentPPO(AgentBase):
"""
Bases: ``AgentBase``
PPO algorithm. “Proximal Policy Optimization Algorithms”. John Schulman. et al.. 2017.
:param net_dim[int]: the dimension of networks (the width of neural networks)
:param state_dim[int]: the dimension of state (the number of state vector)
:param action_dim[int]: the dimension of action (the number of discrete action)
:param learning_rate[float]: learning rate of optimizer
:param if_per_or_gae[bool]: PER (off-policy) or GAE (on-policy) for sparse reward
:param env_num[int]: the env number of VectorEnv. env_num == 1 means don't use VectorEnv
:param agent_id[int]: if the visible_gpu is '1,9,3,4', agent_id=1 means (1,9,4,3)[agent_id] == 9
"""
def __init__(
self, net_dim: int, state_dim: int, action_dim: int, gpu_id=0, args=None
):
self.if_off_policy = False
self.act_class = getattr(self, "act_class", ActorPPO)
self.cri_class = getattr(self, "cri_class", CriticPPO)
self.if_cri_target = getattr(args, "if_cri_target", False)
AgentBase.__init__(self, net_dim, state_dim, action_dim, gpu_id, args)
self.ratio_clip = getattr(
args, "ratio_clip", 0.25
) # could be 0.00 ~ 0.50 `ratio.clamp(1 - clip, 1 + clip)`
self.lambda_entropy = getattr(
args, "lambda_entropy", 0.02
) # could be 0.00~0.10
self.lambda_gae_adv = getattr(
args, "lambda_entropy", 0.98
) # could be 0.95~0.99, GAE (ICLR.2016.)
if getattr(
args, "if_use_gae", False
): # GAE (Generalized Advantage Estimation) for sparse reward
self.get_reward_sum = self.get_reward_sum_gae
else:
self.get_reward_sum = self.get_reward_sum_raw
def explore_one_env(self, env, target_step) -> list:
"""
Collect trajectories through the actor-environment interaction.
:param env: the DRL environment instance.
:param target_step: the total step for the interaction.
:return: a list of trajectories [traj, ...] where `traj = [(state, other), ...]`.
"""
traj_list = list()
last_done = [
0,
]
state = self.states[0]
step_i = 0
done = False
get_action = self.act.get_action
get_a_to_e = self.act.get_a_to_e
while step_i < target_step or not done:
ten_s = torch.as_tensor(state, dtype=torch.float32).unsqueeze(0)
ten_a, ten_n = [
ten.cpu() for ten in get_action(ten_s.to(self.device))
] # different
next_s, reward, done, _ = env.step(get_a_to_e(ten_a)[0].numpy())
traj_list.append((ten_s, reward, done, ten_a, ten_n)) # different
step_i += 1
state = env.reset() if done else next_s
self.states[0] = state
last_done[0] = step_i
return self.convert_trajectory(traj_list, last_done) # traj_list
def explore_vec_env(self, env, target_step) -> list:
"""
Collect trajectories through the actor-environment interaction for a **vectorized** environment instance.
:param env: the DRL environment instance.
:param target_step: the total step for the interaction.
:return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].
"""
traj_list = list()
last_done = torch.zeros(self.env_num, dtype=torch.int, device=self.device)
ten_s = self.states
step_i = 0
ten_dones = torch.zeros(self.env_num, dtype=torch.int, device=self.device)
get_action = self.act.get_action
get_a_to_e = self.act.get_a_to_e
while step_i < target_step or not any(ten_dones):
ten_a, ten_n = get_action(ten_s) # different
ten_s_next, ten_rewards, ten_dones, _ = env.step(get_a_to_e(ten_a))
traj_list.append(
(ten_s.clone(), ten_rewards.clone(), ten_dones.clone(), ten_a, ten_n)
) # different
step_i += 1
last_done[torch.where(ten_dones)[0]] = step_i # behind `step_i+=1`
ten_s = ten_s_next
self.states = ten_s
return self.convert_trajectory(traj_list, last_done) # traj_list
def update_net(self, buffer):
"""
Update the neural networks by sampling batch data from `ReplayBuffer`.
.. note::
Using advantage normalization and entropy loss.
:param buffer: the ReplayBuffer instance that stores the trajectories.
:param batch_size: the size of batch data for Stochastic Gradient Descent (SGD).
:param repeat_times: the re-using times of each trajectory.
:param soft_update_tau: the soft update parameter.
:return: a tuple of the log information.
"""
with torch.no_grad():
buf_state, buf_reward, buf_mask, buf_action, buf_noise = [
ten.to(self.device) for ten in buffer
]
buf_len = buf_state.shape[0]
"""get buf_r_sum, buf_logprob"""
bs = 2**10 # set a smaller 'BatchSize' when out of GPU memory.
buf_value = [
self.cri_target(buf_state[i : i + bs]) for i in range(0, buf_len, bs)
]
buf_value = torch.cat(buf_value, dim=0)
buf_logprob = self.act.get_old_logprob(buf_action, buf_noise)
buf_r_sum, buf_adv_v = self.get_reward_sum(
buf_len, buf_reward, buf_mask, buf_value
) # detach()
buf_adv_v = (buf_adv_v - buf_adv_v.mean()) / (buf_adv_v.std() + 1e-5)
# buf_adv_v: buffer data of adv_v value
del buf_noise
"""update network"""
obj_critic = None
obj_actor = None
assert buf_len >= self.batch_size
for _ in range(int(1 + buf_len * self.repeat_times / self.batch_size)):
indices = torch.randint(
buf_len,
size=(self.batch_size,),
requires_grad=False,
device=self.device,
)
state = buf_state[indices]
r_sum = buf_r_sum[indices]
adv_v = buf_adv_v[indices]
action = buf_action[indices]
logprob = buf_logprob[indices]
"""PPO: Surrogate objective of Trust Region"""
new_logprob, obj_entropy = self.act.get_logprob_entropy(
state, action
) # it is obj_actor
ratio = (new_logprob - logprob.detach()).exp()
surrogate1 = adv_v * ratio
surrogate2 = adv_v * ratio.clamp(1 - self.ratio_clip, 1 + self.ratio_clip)
obj_surrogate = -torch.min(surrogate1, surrogate2).mean()
obj_actor = obj_surrogate + obj_entropy * self.lambda_entropy
self.optimizer_update(self.act_optimizer, obj_actor)
value = self.cri(state).squeeze(
1
) # critic network predicts the reward_sum (Q value) of state
obj_critic = self.criterion(value, r_sum)
self.optimizer_update(self.cri_optimizer, obj_critic)
if self.if_cri_target:
self.soft_update(self.cri_target, self.cri, self.soft_update_tau)
a_std_log = getattr(self.act, "a_std_log", torch.zeros(1)).mean()
return obj_critic.item(), -obj_actor.item(), a_std_log.item() # logging_tuple
def get_reward_sum_raw(
self, buf_len, buf_reward, buf_mask, buf_value
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Calculate the **reward-to-go** and **advantage estimation**.
:param buf_len: the length of the ``ReplayBuffer``.
:param buf_reward: a list of rewards for the state-action pairs.
:param buf_mask: a list of masks computed by the product of done signal and discount factor.
:param buf_value: a list of state values estimated by the ``Critic`` network.
:return: the reward-to-go and advantage estimation.
"""
buf_r_sum = torch.empty(
buf_len, dtype=torch.float32, device=self.device
) # reward sum
pre_r_sum = 0
for i in range(buf_len - 1, -1, -1):
buf_r_sum[i] = buf_reward[i] + buf_mask[i] * pre_r_sum
pre_r_sum = buf_r_sum[i]
buf_adv_v = buf_r_sum - buf_value[:, 0]
return buf_r_sum, buf_adv_v
def get_reward_sum_gae(
self, buf_len, ten_reward, ten_mask, ten_value
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Calculate the **reward-to-go** and **advantage estimation** using GAE.
:param buf_len: the length of the ``ReplayBuffer``.
:param ten_reward: a list of rewards for the state-action pairs.
:param ten_mask: a list of masks computed by the product of done signal and discount factor.
:param ten_value: a list of state values estimated by the ``Critic`` network.
:return: the reward-to-go and advantage estimation.
"""
buf_r_sum = torch.empty(
buf_len, dtype=torch.float32, device=self.device
) # old policy value
buf_adv_v = torch.empty(
buf_len, dtype=torch.float32, device=self.device
) # advantage value
pre_r_sum = 0
pre_adv_v = 0 # advantage value of previous step
for i in range(buf_len - 1, -1, -1): # Notice: mask = (1-done) * gamma
buf_r_sum[i] = ten_reward[i] + ten_mask[i] * pre_r_sum
pre_r_sum = buf_r_sum[i]
buf_adv_v[i] = ten_reward[i] + ten_mask[i] * pre_adv_v - ten_value[i]
pre_adv_v = ten_value[i] + buf_adv_v[i] * self.lambda_gae_adv
# ten_mask[i] * pre_adv_v == (1-done) * gamma * pre_adv_v
return buf_r_sum, buf_adv_v
class AgentDiscretePPO(AgentPPO):
"""
Bases: ``AgentPPO``
:param net_dim[int]: the dimension of networks (the width of neural networks)
:param state_dim[int]: the dimension of state (the number of state vector)
:param action_dim[int]: the dimension of action (the number of discrete action)
:param learning_rate[float]: learning rate of optimizer
:param if_per_or_gae[bool]: PER (off-policy) or GAE (on-policy) for sparse reward
:param env_num[int]: the env number of VectorEnv. env_num == 1 means don't use VectorEnv
:param agent_id[int]: if the visible_gpu is '1,9,3,4', agent_id=1 means (1,9,4,3)[agent_id] == 9
"""
def __init__(
self, net_dim: int, state_dim: int, action_dim: int, gpu_id=0, args=None
):
self.act_class = getattr(self, "act_class", ActorDiscretePPO)
self.cri_class = getattr(self, "cri_class", CriticPPO)
super().__init__(net_dim, state_dim, action_dim, gpu_id, args)
# FIXME: this class is incomplete
class AgentSharePPO(AgentPPO):
def __init__(self):
AgentPPO.__init__(self)
self.obj_c = (-np.log(0.5)) ** 0.5 # for reliable_lambda
def init(
self,
net_dim=256,
state_dim=8,
action_dim=2,
reward_scale=1.0,
gamma=0.99,
learning_rate=1e-4,
if_per_or_gae=False,
env_num=1,
gpu_id=0,
):
self.device = torch.device(
f"cuda:{gpu_id}" if torch.cuda.is_available() else "cpu"
)
if if_per_or_gae:
self.get_reward_sum = self.get_reward_sum_gae
else:
self.get_reward_sum = self.get_reward_sum_raw
self.act = self.cri = SharePPO(state_dim, action_dim, net_dim).to(self.device)
self.cri_optim = torch.optim.Adam(
[
{"params": self.act.enc_s.parameters(), "lr": learning_rate * 0.9},
{
"params": self.act.dec_a.parameters(),
},
{
"params": self.act.a_std_log,
},
{
"params": self.act.dec_q1.parameters(),
},
{
"params": self.act.dec_q2.parameters(),
},
],
lr=learning_rate,
)
self.criterion = torch.nn.SmoothL1Loss()
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau):
with torch.no_grad():
buf_len = buffer[0].shape[0]
buf_state, buf_action, buf_noise, buf_reward, buf_mask = [
ten.to(self.device) for ten in buffer
]
# (ten_state, ten_action, ten_noise, ten_reward, ten_mask) = buffer
"""get buf_r_sum, buf_logprob"""
bs = 2**10 # set a smaller 'BatchSize' when out of GPU memory.
buf_value = [
self.cri_target(buf_state[i : i + bs]) for i in range(0, buf_len, bs)
]
buf_value = torch.cat(buf_value, dim=0)
buf_logprob = self.act.get_old_logprob(buf_action, buf_noise)
buf_r_sum, buf_adv_v = self.get_reward_sum(
buf_len, buf_reward, buf_mask, buf_value
) # detach()
buf_adv_v = (buf_adv_v - buf_adv_v.mean()) * (
self.lambda_a_value / torch.std(buf_adv_v) + 1e-5
)
# buf_adv_v: buffer data of adv_v value
del buf_noise, buffer[:]
obj_critic = obj_actor = None
for _ in range(int(buf_len / batch_size * repeat_times)):
indices = torch.randint(
buf_len, size=(batch_size,), requires_grad=False, device=self.device
)
state = buf_state[indices]
r_sum = buf_r_sum[indices]
adv_v = buf_adv_v[indices] # advantage value
action = buf_action[indices]
logprob = buf_logprob[indices]
"""PPO: Surrogate objective of Trust Region"""
new_logprob, obj_entropy = self.act.get_logprob_entropy(state, action)
# it is obj_actor # todo net.py sharePPO
ratio = (new_logprob - logprob.detach()).exp()
surrogate1 = adv_v * ratio
surrogate2 = adv_v * ratio.clamp(1 - self.ratio_clip, 1 + self.ratio_clip)
obj_surrogate = -torch.min(surrogate1, surrogate2).mean()
obj_actor = obj_surrogate + obj_entropy * self.lambda_entropy
value = self.cri(state).squeeze(
1
) # critic network predicts the reward_sum (Q value) of state
obj_critic = self.criterion(value, r_sum) / (r_sum.std() + 1e-6)
obj_united = obj_critic + obj_actor
self.optim_update(self.cri_optim, obj_united)
if self.if_use_cri_target:
self.soft_update(self.cri_target, self.cri, soft_update_tau)
a_std_log = getattr(self.act, "a_std_log", torch.zeros(1)).mean()
return obj_critic.item(), obj_actor.item(), a_std_log.item() # logging_tuple
|
{"hexsha": "8aa9474a202521cd624f9d52667321113a96d380", "size": 15217, "ext": "py", "lang": "Python", "max_stars_repo_path": "elegantrl/agents/AgentPPO.py", "max_stars_repo_name": "ihopethiswillfi/ElegantRL-1", "max_stars_repo_head_hexsha": "b81052a0bc6802443eb0f653c69158396b613b00", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "elegantrl/agents/AgentPPO.py", "max_issues_repo_name": "ihopethiswillfi/ElegantRL-1", "max_issues_repo_head_hexsha": "b81052a0bc6802443eb0f653c69158396b613b00", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "elegantrl/agents/AgentPPO.py", "max_forks_repo_name": "ihopethiswillfi/ElegantRL-1", "max_forks_repo_head_hexsha": "b81052a0bc6802443eb0f653c69158396b613b00", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.127027027, "max_line_length": 121, "alphanum_fraction": 0.5996582769, "include": true, "reason": "import numpy", "num_tokens": 3787}
|
import os
import sys
import pathlib
sys.path.append(str(pathlib.Path(__file__).parent.absolute()) + "/../")
from animation.experiment1_animation import create_ani_expe_1a
from csv_modules.csv_combine import combine_files_exp_1
from fit.fit_map_chimeraX import fit_map_in_map
from reconstruction.semi_exact_cover import get_semi_exact_s
from csv_modules.csv_writer import write_in_file
from general_utils.list_utils import get_element_list, generate_binary_matrix
from general_utils.math_utils import chance_base_point, get_vector_move_1to2
from pdb_to_mrc.pdb_2_mrc import pdb_to_mrc_chains
from general_utils.pdb_utils import get_chains_pdb, move_pdb_center, get_all_pdb_name
from general_utils.download_utils import download_pdb, download_emd
from process_graph.graph_algorithm import graph_aligning
from process_graph.process_graph_utils import generate_graph
from process_mrc.generate import get_mrc_segments, \
get_mrc_synthetic_segments_pdb_list, get_mrc_one
from process_mrc.miscellaneous import get_center_point
from general_utils.mrc_uilts import get_mass_angstrom, get_mrc_level, get_cube_len_angstrom
from globals.global_values import maps_with_pdb_origin, maps_with_pdb_origin_problems
from metric.metrics_mrc import get_geometric_overlap_p, get_cross_correlation
# segments = get_mrc_segments("../../maps/1010/EMD-1010.map", 7, 3, 1)
#
# segments_graph1 = segments
# segments_graph2 = segments
#
# print(segments[0].mask.shape)
# ##segments = get_mrc_synthetic_segments_pdb("../pdb_to_mrc/exit_pdb/175d", 7)
# graph1 = generate_graph(segments_graph1, 50, 0, 6, 1) #Preguntar con los no conectados y sub grafos
# graph2 = generate_graph(segments_graph2, 50, 0, 6, 1) #Preguntar con los no conectados y sub grafos
#
# result = graph_aligning(graph1, graph2, 1, False)
# print(result)
# #draw_graph_similarity(graph, graph, result)
# #draw_graph_similarity_same_image(graph, graph, result)
# figure_math1,figure_math2 = get_similarity_complete_cloud(segments_graph1,segments_graph2,result)
#
#
# from functools import partial
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
# from cloud_point import AffineRegistration
# import numpy as np
# import matplotlib
#
# def visualize(iteration, error, X, Y, ax):
# plt.cla()
# ax.scatter(X[:, 0], X[:, 1], X[:, 2], color='red', label='Target')
# ax.scatter(Y[:, 0], Y[:, 1], Y[:, 2], color='blue', label='Source')
# ax.text2D(0.87, 0.92, 'Iteration: {:d}\nQ: {:06.4f}'.format(
# iteration, error), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes, fontsize='x-large')
# ax.legend(loc='upper left', fontsize='x-large')
# plt.draw()
# plt.pause(0.001)
#
#
# def main():
# matplotlib.use('TKAgg')
# X = figure_math1
#
# # synthetic data, equaivalent to X + 1
# Y = figure_math2
#
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# callback = partial(visualize, ax=ax)
#
# print(X.shape)
# print(Y.shape)
#
# reg = AffineRegistration(**{'X': X, 'Y': Y})
# reg.register(callback)
# plt.show()
def fitting_process(verbose, path_map1, path_map2, segments_graph1, segments_graph2, figure1_shape, figure2_shape,
n_points_face, filter_value, max_distance, min_points, path_exit_folder, attempts):
if verbose:
print("Fit process start\n\n")
# Graphs generation
try:
graph1 = generate_graph(segments_graph1, n_points_face, filter_value, max_distance, min_points)
graph2 = generate_graph(segments_graph2, n_points_face, filter_value, max_distance, min_points)
except Exception as e:
raise Exception('Error to generate graph, the error is : {0}'.format(str(e)))
# Match graphs
try:
alignment_note, result = graph_aligning(graph1, graph2, 1, False)
except Exception as e:
raise Exception('Error to graph aligning, the error is : {0}'.format(str(e)))
if verbose:
print("Result match graph", result)
# Get match segments index
try:
graph1_match_index = get_element_list(0, result)
graph2_match_index = get_element_list(1, result)
except Exception as e:
raise Exception('Error to generate graph indexs, the error is : {0}'.format(str(e)))
# Centers points
try:
center_point1 = get_center_point(graph1_match_index, segments_graph1, 0)
center_point2 = get_center_point(graph2_match_index, segments_graph2, 0)
except Exception as e:
raise Exception('Error to calculate centers points, the error is : {0}'.format(str(e)))
return fitting_process_aux(attempts, center_point1, center_point2, figure1_shape, figure2_shape, path_exit_folder,
path_map1, path_map2, verbose)
def fitting_process_all_file(verbose, path_map1, path_map2, segments_graph1, segments_graph2, figure1_shape,
figure2_shape, path_exit_folder, attempts):
if verbose:
print("Fit process start\n\n")
# Get match segments index
try:
graph1_match_index = get_element_list(0, [[1, 1]])
graph2_match_index = get_element_list(1, [[1, 1]])
except Exception as e:
raise Exception('Error to generate graph indexs, the error is : {0}'.format(str(e)))
# Centers points
try:
center_point1 = get_center_point(graph1_match_index, segments_graph1, 0)
center_point2 = get_center_point(graph2_match_index, segments_graph2, 0)
except Exception as e:
raise Exception('Error to calculate centers points, the error is : {0}'.format(str(e)))
return fitting_process_aux(attempts, center_point1, center_point2, figure1_shape, figure2_shape, path_exit_folder,
path_map1, path_map2, verbose)
def fitting_process_aux(attempts, center_point1_o, center_point2_o, figure1_shape, figure2_shape, path_exit_folder,
path_map1, path_map2, verbose):
center_point1 = center_point1_o.copy()
center_point2 = center_point2_o.copy()
if verbose:
print("Center point map1 grid:", center_point1)
print("Center point map2 grid:", center_point2)
# Repair error in 'y' dimension on grid of chimeraX
center_point1[1] = figure1_shape[1] - center_point1[1]
center_point2[1] = figure2_shape[1] - center_point2[1]
if verbose:
print("Center point map1 grid with y ok:", center_point1)
print("Center point map2 grid with y ok:", center_point2)
# Get Angstrom shape
try:
real_shape_cube1 = get_cube_len_angstrom(path_map1)
real_shape_cube2 = get_cube_len_angstrom(path_map2)
except Exception as e:
raise Exception('Error to calculate real shape cube, the error is : {0}'.format(str(e)))
if verbose:
print("Angstrom shape map1:", real_shape_cube1)
print("Angstrom shape map2:", real_shape_cube2)
# Transformation by a rule of 3 from the central point of the grid to the Angstroms cube
try:
center_point1_a = chance_base_point(center_point1, figure1_shape, real_shape_cube1)
center_point2_a = chance_base_point(center_point2, figure2_shape, real_shape_cube2)
except Exception as e:
raise Exception('Error to transform center points by rule of 3, the error is : {0}'.format(str(e)))
if verbose:
print("Center point in Angstrom cube of map1:", center_point1_a)
print("Center point in Angstrom cube of map2:", center_point2_a)
# Obtain the move vector from a 3d point to another 3d point
try:
move_vector = get_vector_move_1to2(center_point1_a, center_point2_a)
except Exception as e:
raise Exception('Error to calculate move vector, the error is : {0}'.format(str(e)))
if verbose:
print("Move vector of map1:", move_vector)
# Fit map in map
try:
result = fit_map_in_map(path_map1, path_map2, path_exit_folder, attempts, map0_vector_move=move_vector)
except Exception as e:
raise Exception('Error to calculate fit map in map, the error is : {0}'.format(str(e)))
result.shape_cube1 = figure1_shape
result.shape_cube2 = figure2_shape
result.center_point1_o = center_point1_o
result.center_point2_o = center_point2_o
result.center_point1 = center_point1
result.center_point2 = center_point2
result.real_shape_cube1 = real_shape_cube1
result.real_shape_cube2 = real_shape_cube2
result.center_point1_a = center_point1_a
result.center_point2_a = center_point2_a
result.move_vector_map1 = move_vector
result.percentage_of_overlap = result.overlap_mass / (
min(get_mass_angstrom(path_map1), get_mass_angstrom(path_map2)))
if verbose:
print("Data for result fit are:")
result.print_data()
return result
def main_1():
path = './maps_pdb'
if not os.path.isdir(path):
os.mkdir(path)
download_pdb('175d', '{0}/175d.pdb'.format(path))
download_pdb('6m03', '{0}/6m03.pdb'.format(path))
# Maps creation
chains = get_chains_pdb('{0}/175d.pdb'.format(path))
pdb_to_mrc_chains(True, False, 5.0, '{0}/175d.pdb'.format(path), path, chains, 5)
chains = get_chains_pdb('{0}/6m03.pdb'.format(path))
pdb_to_mrc_chains(True, False, 5.0, '{0}/6m03.pdb'.format(path), path, chains, 5)
# Segments generation
segments_graph1, original_structure1 = get_mrc_segments("{0}/175d/175d.mrc".format(path), 7, 3, 1)
segments_graph2, original_structure2 = get_mrc_segments("{0}/6m03/6m03.mrc".format(path), 7, 3, 1)
figure1_shape = original_structure1.mask.shape
figure2_shape = original_structure2.mask.shape
result = fitting_process(True, "{0}/175d/175d.mrc".format(path), "{0}/6m03/6m03.mrc".format(path), segments_graph1,
segments_graph2, figure1_shape, figure2_shape, 50, 0, 6, 1, './exit_fit', 50)
def main_2():
path = './maps_pdb'
if not os.path.isdir(path):
os.mkdir(path)
download_pdb('175d', '{0}/175d.pdb'.format(path))
# Maps creation
chains = get_chains_pdb('{0}/175d.pdb'.format(path))
pdb_to_mrc_chains(True, False, 5.0, '{0}/175d.pdb'.format(path), path, chains, len(chains))
# Segments generation
segments_graph1, original_structure1 = get_mrc_synthetic_segments_pdb_list("{0}/175d/175d.mrc".format(path),
"{0}/175d".format(path), 7)
figure1_shape = original_structure1.mask.shape
result = fitting_process(True, "{0}/175d/175d.mrc".format(path), "{0}/175d/175d.mrc".format(path), segments_graph1,
segments_graph1, figure1_shape, figure1_shape, 50, 0, 6, 1, './exit_fit', 50)
def main_3():
con = 0
len_file = len(maps_with_pdb_origin)
headers_csv = ['map_name', 'center_p1A', 'center_p2A', 'move_v1', 'move_v2', 'map1_level', 'map2_level',
'Num_poins',
'Correlation', 'Correlation_about_mean', 'Overlap', 'Steps', 'Shift', 'Angle', 'Matrix_rt', 'Axis',
'Axis_point', 'Rotation_angle', 'Shift_along_axis', 'map1_path', 'map2_path']
initial_flag = False
for i in maps_with_pdb_origin:
con += 1
name = i[4:-1]
name = name[:-3]
if name in maps_with_pdb_origin_problems:
continue
if name == '0009':
initial_flag = True
if not initial_flag:
continue
print("Actual execution: ", name)
# Creation of segments in this case of only 1 element
try:
segments_graph1, original_structure1 = \
get_mrc_one("/mnt/hgfs/Project_files/selected_sim/sim_emd_{0}.mrc".format(name), 7)
segments_graph2, original_structure2 = \
get_mrc_one("/mnt/hgfs/Project_files/original_maps/original_maps/emd_{0}.map".format(name), 7)
figure1_shape = original_structure1.mask.shape
figure2_shape = original_structure2.mask.shape
except Exception as e:
f = open('log.txt', "a+")
f.write('Error to open file: {0}'.format(name))
f.write(str(e))
f.write(str('\n\n'))
f.close()
continue
# Fit map in map
result = fitting_process_all_file(True, '/mnt/hgfs/Project_files/selected_sim/sim_emd_{0}.mrc'.format(name),
'/mnt/hgfs/Project_files/original_maps/original_maps/emd_{0}.map'.format(
name),
segments_graph1, segments_graph2, figure1_shape, figure2_shape,
'./exit_fit_god',
50)
data_write = [[name, str(result.center_point1_o), str(result.center_point2_o), str(result.move_vector_map1),
[0, 0, 0], str('Defa'), str('Defa'), str(result.num_poins), str(result.correlation),
str(result.correlation_about_mean), str(result.overlap), str(result.steps), str(result.shift),
str(result.angle), str(result.matrix_rt), str(result.axis), str(result.axis_point),
str(result.rotation_angle), str(result.shift_along_axis),
'/mnt/hgfs/Project_files/selected_sim/sim_emd_{0}.mrc'.format(name),
'/mnt/hgfs/Project_files/original_maps/original_maps/emd_{0}.map'.format(name)]]
write_in_file('./exit_fit_god/results2.csv', headers_csv, data_write)
result.print_data()
print("Actual execution: ", con, " of map: ", name, " progress: ", con / len_file)
def main_4():
segments_graph1, original_structure1 = get_mrc_one("/home/lcastillo98/Documents/git_projects/sim_emd_9882.mrc", 7)
segments_graph2, original_structure2 = get_mrc_one("/home/lcastillo98/Documents/git_projects/emd_9882.map", 7)
figure1_shape = original_structure1.mask.shape
figure2_shape = original_structure2.mask.shape
# Fit map in map
result = fitting_process_all_file(True, "/home/lcastillo98/Documents/git_projects/sim_emd_9882.mrc",
"/home/lcastillo98/Documents/git_projects/emd_9882.map",
segments_graph1, segments_graph2, figure1_shape, figure2_shape, './exit_fit',
50)
print("Fin")
def main_5():
download_emd("0009", './exit_fit/0009.map', True)
def main_6():
import mrcfile
file1 = mrcfile.open('/home/lcastillo98/Documents/git_projects/sim_emd_9882_fit.mrc')
file2 = mrcfile.open('/home/lcastillo98/Documents/git_projects/emd_9882.map')
# file1 = mrcfile.open('/home/lcastillo98/Documents/git_projects/cube50x50x50.map')
# file2 = mrcfile.open('/home/lcastillo98/Documents/git_projects/cube100x100x100.map')
percentage_overlap = get_geometric_overlap_p(file1.data, file2.data)
print("Percentage of overlap:", percentage_overlap)
cross_correlation = get_cross_correlation(file1.data, file2.data)
print("Cross_correlation:", cross_correlation)
def main_7():
initial_matrix = [[1, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 0, 1, 1],
[0, 0, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 0, 1, 0],
[1, 1, 1, 1, 1, 1, 1]
]
top = 12
binary_matrix = generate_binary_matrix(initial_matrix)
combinations = get_semi_exact_s(binary_matrix, top, 2)
print("Combinations: ", combinations)
def main_8():
path = './maps_pdb'
if not os.path.isdir(path):
os.mkdir(path)
download_pdb('175d', '{0}/175d.pdb'.format(path))
# Maps creation
chains = get_chains_pdb('{0}/175d.pdb'.format(path))
pdb_to_mrc_chains(True, False, 5.0, '{0}/175d.pdb'.format(path), path, chains, len(chains))
# Segments generation
segments_graph1, original_structure1 = get_mrc_synthetic_segments_pdb_list("{0}/175d/175d.mrc".format(path),
"{0}/175d".format(path), 7)
initial_matrix = []
for i in segments_graph1:
flat_data = i.mask.ravel()
flat_data[flat_data > 0] = 1
flat_data = flat_data.astype(int)
flat_data = flat_data.tolist()
initial_matrix.append(flat_data)
print("Can elements ", len(initial_matrix))
print(initial_matrix)
top = 10
binary_matrix = generate_binary_matrix(initial_matrix)
combinations = get_semi_exact_s(binary_matrix, len(initial_matrix[0]), top, 80)
print("Combinations: ", combinations)
def main_9():
import itertools
import time
import matplotlib.pyplot as plt
times = []
cans = []
for i in range(2, 11):
initial_matrix = list(itertools.product([0, 1], repeat=i))
top = 10000
binary_matrix = generate_binary_matrix(initial_matrix)
can_elements = len(initial_matrix)
start_time = time.time()
combinations = get_semi_exact_s(binary_matrix, top, 20)
total_seconds = (time.time() - start_time)
print("--- {0} seconds --- can elements: {1}".format(total_seconds, can_elements))
times.append(total_seconds)
cans.append(can_elements)
plt.plot(cans, times)
plt.ylabel('Time in seconds')
plt.xlabel('Can elements')
plt.show()
def main_10():
# path = '/home/lcastillo98/Documents/git_projects/biostructure/reconstruction/data_experiment_1_a'
#
# create_ani_expe_1a(path,
# '1bgy',
# ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
# 'U', 'V', 'W'],
# ['A', 'B', 'C', 'D', 'E'],
# [[1, 1], [1, 2], [2, 3]],
# 0.1,
# [98, 98, 98],
# [150, 150, 150],
# [150, 150, 150],
# [150, 150, 150],
# 5.0,
# True)
from pymol import cmd
from pymol import cmd
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy.mrc", finish=0)
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_A.mrc", finish=1)
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_B.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_C.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_D.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_E.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_F.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_G.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_H.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_I.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_J.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_K.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_M.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_N.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_O.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_P.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_Q.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_R.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_S.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_T.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_U.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_V.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_W.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_father_pc.mrc")
cmd.load("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1bgy_father_pt.mrc")
cmd.volume("1bgy_volume", "1bgy")
cmd.show("volume", "1bgy_volume")
cmd.isosurface("1bgy_A_surface", "1bgy_A")
cmd.hide("surface", "1bgy_A_surface")
cmd.isosurface("1bgy_B_surface", "1bgy_B")
cmd.hide("surface", "1bgy_B_surface")
cmd.isosurface("1bgy_C_surface", "1bgy_C")
cmd.hide("surface", "1bgy_C_surface")
cmd.isosurface("1bgy_D_surface", "1bgy_D")
cmd.hide("surface", "1bgy_D_surface")
cmd.isosurface("1bgy_E_surface", "1bgy_E")
cmd.hide("surface", "1bgy_E_surface")
cmd.isosurface("1bgy_F_surface", "1bgy_F")
cmd.hide("surface", "1bgy_F_surface")
cmd.isosurface("1bgy_G_surface", "1bgy_G")
cmd.hide("surface", "1bgy_G_surface")
cmd.isosurface("1bgy_H_surface", "1bgy_H")
cmd.hide("surface", "1bgy_H_surface")
cmd.isosurface("1bgy_I_surface", "1bgy_I")
cmd.hide("surface", "1bgy_I_surface")
cmd.isosurface("1bgy_J_surface", "1bgy_J")
cmd.hide("surface", "1bgy_J_surface")
cmd.isosurface("1bgy_K_surface", "1bgy_K")
cmd.hide("surface", "1bgy_K_surface")
cmd.isosurface("1bgy_M_surface", "1bgy_M")
cmd.hide("surface", "1bgy_M_surface")
cmd.isosurface("1bgy_N_surface", "1bgy_N")
cmd.hide("surface", "1bgy_N_surface")
cmd.isosurface("1bgy_O_surface", "1bgy_O")
cmd.hide("surface", "1bgy_O_surface")
cmd.isosurface("1bgy_P_surface", "1bgy_P")
cmd.hide("surface", "1bgy_P_surface")
cmd.isosurface("1bgy_Q_surface", "1bgy_Q")
cmd.hide("surface", "1bgy_Q_surface")
cmd.isosurface("1bgy_R_surface", "1bgy_R")
cmd.hide("surface", "1bgy_R_surface")
cmd.isosurface("1bgy_S_surface", "1bgy_S")
cmd.hide("surface", "1bgy_S_surface")
cmd.isosurface("1bgy_T_surface", "1bgy_T")
cmd.hide("surface", "1bgy_T_surface")
cmd.isosurface("1bgy_U_surface", "1bgy_U")
cmd.hide("surface", "1bgy_U_surface")
cmd.isosurface("1bgy_V_surface", "1bgy_V")
cmd.hide("surface", "1bgy_V_surface")
cmd.isosurface("1bgy_W_surface", "1bgy_W")
cmd.hide("surface", "1bgy_W_surface")
cmd.isosurface("1bgy_father_pc_surface", "1bgy_father_pc")
cmd.color("red", "1bgy_father_pc_surface")
cmd.isosurface("1bgy_father_pt_surface", "1bgy_father_pt")
cmd.color("white", "1bgy_father_pt_surface")
cmd.png("/home/lcastillo98/Desktop/data_experiment_1_a/1bgy/1.png")
def experiment_1():
from experiment.experiment_1 import do_parallel_test_a, do_parallel_test_b
local_path = "/home/lcastillo98/Documents/git_projects/biostructure/reconstruction"
# local_path = "/work/lcastillo"
print("Start")
do_parallel_test_a("{0}/data_experiment_1_a_v2".format(local_path), "result.csv", [3.5, 9.5],
range_incompleteness=[10.0, 15.0], can_try_experiments=10)
print("Finish")
def experiment_3():
from experiment.experiment_3 import do_parallel_test_a
local_path = "/home/lcastillo98/Documents/git_projects/biostructure/reconstruction"
# local_path = "/work/lcastillo"
print("Start")
do_parallel_test_a("{0}/data_experiment_3_a".format(local_path), "result.csv", [3.5, 9.5], 1)
print("Finish")
def union_test():
local_path = "/home/lcastillo98/Documents/git_projects/biostructure/reconstruction/data_experiment_1_a"
# local_path = "/work/lcastillo/data_experiment_1_a"
combine_files_exp_1('salida.csv',
local_path)
if __name__ == '__main__':
# main_1()
# main_2()
# main_3()
# main_4()
# main_5()
# main_6()
# main_7()
# main_8()
# main_9()
# main_10()
experiment_1()
# experiment_3()
# union_test()
|
{"hexsha": "0fdb21f31fdee54a1275766412bdeddf4155a277", "size": 22878, "ext": "py", "lang": "Python", "max_stars_repo_path": "reconstruction/main/main.py", "max_stars_repo_name": "tecdatalab/biostructure", "max_stars_repo_head_hexsha": "a30e907e83fa5bbfb934d951b7c663b622104fcc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "reconstruction/main/main.py", "max_issues_repo_name": "tecdatalab/biostructure", "max_issues_repo_head_hexsha": "a30e907e83fa5bbfb934d951b7c663b622104fcc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2019-06-17T16:13:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-27T05:23:59.000Z", "max_forks_repo_path": "reconstruction/main/main.py", "max_forks_repo_name": "tecdatalab/biostructure", "max_forks_repo_head_hexsha": "a30e907e83fa5bbfb934d951b7c663b622104fcc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.2781690141, "max_line_length": 130, "alphanum_fraction": 0.6945974298, "include": true, "reason": "import numpy", "num_tokens": 6632}
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Description
#
# Tests of colors.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
@testset "Default color" begin
expected = """
┌────────┬────────┬────────┬────────┐
│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│\e[1m Col. 3 \e[0m│\e[1m Col. 4 \e[0m│
├────────┼────────┼────────┼────────┤
│ 1 │ false │ 1.0 │ 1 │
│ 2 │ true │ 2.0 │ 2 │
│ 3 │ false │ 3.0 │ 3 │
│ 4 │ true │ 4.0 │ 4 │
│ 5 │ false │ 5.0 │ 5 │
│ 6 │ true │ 6.0 │ 6 │
└────────┴────────┴────────┴────────┘
"""
result = sprint((io)->pretty_table(io, data),
context = :color => true)
@test result == expected
header = ([1, 2, 3, 4],
[5, 6, 7, 8])
expected = """
┌───┬───────┬─────┬───┐
│\e[1m 1 \e[0m│\e[1m 2 \e[0m│\e[1m 3 \e[0m│\e[1m 4 \e[0m│
│\e[90m 5 \e[0m│\e[90m 6 \e[0m│\e[90m 7 \e[0m│\e[90m 8 \e[0m│
├───┼───────┼─────┼───┤
│ 1 │ false │ 1.0 │ 1 │
│ 2 │ true │ 2.0 │ 2 │
│ 3 │ false │ 3.0 │ 3 │
│ 4 │ true │ 4.0 │ 4 │
│ 5 │ false │ 5.0 │ 5 │
│ 6 │ true │ 6.0 │ 6 │
└───┴───────┴─────┴───┘
"""
result = sprint((io)->pretty_table(io, data; header = header),
context = :color => true)
@test result == expected
end
@testset "Row number" begin
expected = """
┌─────┬────────┬────────┬────────┬────────┐
│\e[1m Row \e[0m│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│\e[1m Col. 3 \e[0m│\e[1m Col. 4 \e[0m│
├─────┼────────┼────────┼────────┼────────┤
│ 1 │ 1 │ false │ 1.0 │ 1 │
│ 2 │ 2 │ true │ 2.0 │ 2 │
│ 3 │ 3 │ false │ 3.0 │ 3 │
│ 4 │ 4 │ true │ 4.0 │ 4 │
│ 5 │ 5 │ false │ 5.0 │ 5 │
│ 6 │ 6 │ true │ 6.0 │ 6 │
└─────┴────────┴────────┴────────┴────────┘
"""
result = sprint((io)->pretty_table(io, data, show_row_number = true),
context = :color => true)
@test result == expected
end
@testset "Row name" begin
expected = """
┌──────┬────────┬────────┬────────┬────────┐
│\e[1m Name \e[0m│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│\e[1m Col. 3 \e[0m│\e[1m Col. 4 \e[0m│
├──────┼────────┼────────┼────────┼────────┤
│\e[1m A \e[0m│ 1 │ false │ 1.0 │ 1 │
│\e[1m B \e[0m│ 2 │ true │ 2.0 │ 2 │
│\e[1m C \e[0m│ 3 │ false │ 3.0 │ 3 │
│\e[1m D \e[0m│ 4 │ true │ 4.0 │ 4 │
│\e[1m E \e[0m│ 5 │ false │ 5.0 │ 5 │
│\e[1m F \e[0m│ 6 │ true │ 6.0 │ 6 │
└──────┴────────┴────────┴────────┴────────┘
"""
row_names = ['A'+i for i = 0:5]
result = sprint((io)->pretty_table(io, data,
row_names = row_names,
row_name_column_title = "Name"),
context = :color => true)
@test result == expected
end
@testset "Border color" begin
expected = """
\e[33m┌\e[0m\e[33m────────\e[0m\e[33m┬\e[0m\e[33m────────\e[0m\e[33m┬\e[0m\e[33m────────\e[0m\e[33m┬\e[0m\e[33m────────\e[0m\e[33m┐\e[0m
\e[33m│\e[0m\e[1m Col. 1 \e[0m\e[33m│\e[0m\e[1m Col. 2 \e[0m\e[33m│\e[0m\e[1m Col. 3 \e[0m\e[33m│\e[0m\e[1m Col. 4 \e[0m\e[33m│\e[0m
\e[33m├\e[0m\e[33m────────\e[0m\e[33m┼\e[0m\e[33m────────\e[0m\e[33m┼\e[0m\e[33m────────\e[0m\e[33m┼\e[0m\e[33m────────\e[0m\e[33m┤\e[0m
\e[33m│\e[0m 1 \e[33m│\e[0m false \e[33m│\e[0m 1.0 \e[33m│\e[0m 1 \e[33m│\e[0m
\e[33m│\e[0m 2 \e[33m│\e[0m true \e[33m│\e[0m 2.0 \e[33m│\e[0m 2 \e[33m│\e[0m
\e[33m│\e[0m 3 \e[33m│\e[0m false \e[33m│\e[0m 3.0 \e[33m│\e[0m 3 \e[33m│\e[0m
\e[33m│\e[0m 4 \e[33m│\e[0m true \e[33m│\e[0m 4.0 \e[33m│\e[0m 4 \e[33m│\e[0m
\e[33m│\e[0m 5 \e[33m│\e[0m false \e[33m│\e[0m 5.0 \e[33m│\e[0m 5 \e[33m│\e[0m
\e[33m│\e[0m 6 \e[33m│\e[0m true \e[33m│\e[0m 6.0 \e[33m│\e[0m 6 \e[33m│\e[0m
\e[33m└\e[0m\e[33m────────\e[0m\e[33m┴\e[0m\e[33m────────\e[0m\e[33m┴\e[0m\e[33m────────\e[0m\e[33m┴\e[0m\e[33m────────\e[0m\e[33m┘\e[0m
"""
result = sprint((io)->pretty_table(io, data, border_crayon = crayon"yellow"),
context = :color => true)
@test result == expected
end
@testset "Highlighters" begin
expected = """
┌────────┬────────┬────────┬────────┐
│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│\e[1m Col. 3 \e[0m│\e[1m Col. 4 \e[0m│
├────────┼────────┼────────┼────────┤
│ 1 │ false │ 1.0 │ 1 │
│ 2 │ true │ 2.0 │ 2 │
│ 3 │ false │\e[33;1m 3.0 \e[0m│ 3 │
│ 4 │ true │ 4.0 │ 4 │
│ 5 │ false │ 5.0 │ 5 │
│ 6 │ true │ 6.0 │ 6 │
└────────┴────────┴────────┴────────┘
"""
hl = Highlighter((data, i, j)-> i == 3 && j == 3;
bold = true,
foreground = :yellow)
result = sprint((io)->pretty_table(io, data, highlighters = hl),
context = :color => true)
@test result == expected
hl = Highlighter((data, i, j)-> i == 3 && j == 3,
crayon"yellow bold")
result = sprint((io)->pretty_table(io, data, highlighters = hl),
context = :color => true)
@test result == expected
hl2 = Highlighter((data, i, j)-> i == 3 && j == 2,
crayon"blue bold")
expected = """
┌────────┬────────┬────────┬────────┐
│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│\e[1m Col. 3 \e[0m│\e[1m Col. 4 \e[0m│
├────────┼────────┼────────┼────────┤
│ 1 │ false │ 1.0 │ 1 │
│ 2 │ true │ 2.0 │ 2 │
│ 3 │\e[34;1m false \e[0m│\e[33;1m 3.0 \e[0m│ 3 │
│ 4 │ true │ 4.0 │ 4 │
│ 5 │ false │ 5.0 │ 5 │
│ 6 │ true │ 6.0 │ 6 │
└────────┴────────┴────────┴────────┘
"""
result = sprint((io)->pretty_table(io, data, highlighters = (hl, hl2)),
context = :color => true)
@test result == expected
hl3 = Highlighter((data, i, j)-> data[i,j] isa AbstractFloat && data[i,j] == 3,
(h, data, i, j)->crayon"yellow bold")
result = sprint((io)->pretty_table(io, data, highlighters = (hl3, hl2)),
context = :color => true)
@test result == expected
end
@testset "Highlighters with table cropping and filters" begin
matrix = [1:1:100 1:1:100 1:1:100]
hl = Highlighter((data, i, j)-> i == 100 && j == 2,
crayon"yellow")
expected = """
┌────────┬────────┬────────┐
│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│\e[1m Col. 3 \e[0m│
├────────┼────────┼────────┤
│ 1 │ 1 │ 1 │
│ 2 │ 2 │ 2 │
│ 3 │ 3 │ 3 │
│ 4 │ 4 │ 4 │
│ ⋮ │ ⋮ │ ⋮ │
│ 98 │ 98 │ 98 │
│ 99 │ 99 │ 99 │
│ 100 │\e[33m 100 \e[0m│ 100 │
└────────┴────────┴────────┘
\e[36m 93 rows omitted\e[0m
"""
result = sprint((io)->pretty_table(io, matrix,
crop = :both,
display_size = (15,-1),
highlighters = (hl,),
vcrop_mode = :middle),
context = :color => true)
@test result == expected
expected = """
┌────────┬────────┬────────┐
│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│\e[1m Col. 3 \e[0m│
├────────┼────────┼────────┤
│ 2 │ 2 │ 2 │
│ 4 │ 4 │ 4 │
│ 6 │ 6 │ 6 │
│ 8 │ 8 │ 8 │
│ ⋮ │ ⋮ │ ⋮ │
│ 96 │ 96 │ 96 │
│ 98 │ 98 │ 98 │
│ 100 │\e[33m 100 \e[0m│ 100 │
└────────┴────────┴────────┘
\e[36m 43 rows omitted\e[0m
"""
result = sprint((io)->pretty_table(io, matrix,
crop = :both,
display_size = (15,-1),
filters_row = ((data, i)->i % 2 == 0,),
highlighters = (hl,),
vcrop_mode = :middle),
context = :color => true)
@test result == expected
expected = """
┌────────┬────────┐
│\e[1m Col. 2 \e[0m│\e[1m Col. 3 \e[0m│
├────────┼────────┤
│ 2 │ 2 │
│ 4 │ 4 │
│ 6 │ 6 │
│ 8 │ 8 │
│ ⋮ │ ⋮ │
│ 96 │ 96 │
│ 98 │ 98 │
│\e[33m 100 \e[0m│ 100 │
└────────┴────────┘
\e[36m 43 rows omitted\e[0m
"""
result = sprint((io)->pretty_table(io, matrix,
crop = :both,
display_size = (15,-1),
filters_col = ((data, i)->i != 1,),
filters_row = ((data, i)->i % 2 == 0,),
highlighters = (hl,),
vcrop_mode = :middle),
context = :color => true)
@test result == expected
end
@testset "Pre-defined highlighters" begin
# hl_cell
# ==========================================================================
expected = """
┌────────┬────────┬────────┬────────┐
│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│\e[1m Col. 3 \e[0m│\e[1m Col. 4 \e[0m│
├────────┼────────┼────────┼────────┤
│ 1 │ false │ 1.0 │ 1 │
│ 2 │ true │ 2.0 │ 2 │
│ 3 │ false │\e[33m 3.0 \e[0m│ 3 │
│ 4 │ true │ 4.0 │ 4 │
│ 5 │ false │ 5.0 │ 5 │
│ 6 │ true │ 6.0 │ 6 │
└────────┴────────┴────────┴────────┘
"""
result = sprint((io)->pretty_table(io, data;
highlighters = hl_cell(3, 3, crayon"yellow")),
context = :color => true)
@test result == expected
expected = """
┌────────┬────────┬────────┬────────┐
│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│\e[1m Col. 3 \e[0m│\e[1m Col. 4 \e[0m│
├────────┼────────┼────────┼────────┤
│ 1 │ false │ 1.0 │ 1 │
│ 2 │ true │\e[33m 2.0 \e[0m│ 2 │
│ 3 │ false │\e[33m 3.0 \e[0m│ 3 │
│ 4 │ true │ 4.0 │\e[33m 4 \e[0m│
│ 5 │ false │ 5.0 │ 5 │
│ 6 │ true │ 6.0 │ 6 │
└────────┴────────┴────────┴────────┘
"""
result = sprint((io)->pretty_table(io, data;
highlighters = hl_cell([(2, 3), (3, 3), (4, 4)],
crayon"yellow")),
context = :color => true)
@test result == expected
# hl_col
# ==========================================================================
expected = """
┌────────┬────────┬────────┬────────┐
│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│\e[1m Col. 3 \e[0m│\e[1m Col. 4 \e[0m│
├────────┼────────┼────────┼────────┤
│ 1 │\e[33m false \e[0m│ 1.0 │ 1 │
│ 2 │\e[33m true \e[0m│ 2.0 │ 2 │
│ 3 │\e[33m false \e[0m│ 3.0 │ 3 │
│ 4 │\e[33m true \e[0m│ 4.0 │ 4 │
│ 5 │\e[33m false \e[0m│ 5.0 │ 5 │
│ 6 │\e[33m true \e[0m│ 6.0 │ 6 │
└────────┴────────┴────────┴────────┘
"""
result = sprint((io)->pretty_table(io, data;
highlighters = hl_col(2, crayon"yellow")),
context = :color => true)
@test result == expected
expected = """
┌────────┬────────┬────────┬────────┐
│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│\e[1m Col. 3 \e[0m│\e[1m Col. 4 \e[0m│
├────────┼────────┼────────┼────────┤
│ 1 │\e[33m false \e[0m│ 1.0 │\e[33m 1 \e[0m│
│ 2 │\e[33m true \e[0m│ 2.0 │\e[33m 2 \e[0m│
│ 3 │\e[33m false \e[0m│ 3.0 │\e[33m 3 \e[0m│
│ 4 │\e[33m true \e[0m│ 4.0 │\e[33m 4 \e[0m│
│ 5 │\e[33m false \e[0m│ 5.0 │\e[33m 5 \e[0m│
│ 6 │\e[33m true \e[0m│ 6.0 │\e[33m 6 \e[0m│
└────────┴────────┴────────┴────────┘
"""
result = sprint((io)->pretty_table(io, data;
highlighters = hl_col([2,4], crayon"yellow")),
context = :color => true)
@test result == expected
# hl_row
# ==========================================================================
expected = """
┌────────┬────────┬────────┬────────┐
│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│\e[1m Col. 3 \e[0m│\e[1m Col. 4 \e[0m│
├────────┼────────┼────────┼────────┤
│ 1 │ false │ 1.0 │ 1 │
│\e[33m 2 \e[0m│\e[33m true \e[0m│\e[33m 2.0 \e[0m│\e[33m 2 \e[0m│
│ 3 │ false │ 3.0 │ 3 │
│ 4 │ true │ 4.0 │ 4 │
│ 5 │ false │ 5.0 │ 5 │
│ 6 │ true │ 6.0 │ 6 │
└────────┴────────┴────────┴────────┘
"""
result = sprint((io)->pretty_table(io, data;
highlighters = hl_row(2, crayon"yellow")),
context = :color => true)
@test result == expected
expected = """
┌────────┬────────┬────────┬────────┐
│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│\e[1m Col. 3 \e[0m│\e[1m Col. 4 \e[0m│
├────────┼────────┼────────┼────────┤
│ 1 │ false │ 1.0 │ 1 │
│\e[33m 2 \e[0m│\e[33m true \e[0m│\e[33m 2.0 \e[0m│\e[33m 2 \e[0m│
│ 3 │ false │ 3.0 │ 3 │
│\e[33m 4 \e[0m│\e[33m true \e[0m│\e[33m 4.0 \e[0m│\e[33m 4 \e[0m│
│ 5 │ false │ 5.0 │ 5 │
│ 6 │ true │ 6.0 │ 6 │
└────────┴────────┴────────┴────────┘
"""
result = sprint((io)->pretty_table(io, data;
highlighters = hl_row([2,4], crayon"yellow")),
context = :color => true)
@test result == expected
# hl_lt
# ==========================================================================
expected = """
┌────────┬────────┬────────┬────────┐
│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│\e[1m Col. 3 \e[0m│\e[1m Col. 4 \e[0m│
├────────┼────────┼────────┼────────┤
│\e[31;1m 1 \e[0m│\e[31;1m false \e[0m│\e[31;1m 1.0 \e[0m│\e[31;1m 1 \e[0m│
│\e[31;1m 2 \e[0m│\e[31;1m true \e[0m│\e[31;1m 2.0 \e[0m│\e[31;1m 2 \e[0m│
│ 3 │\e[31;1m false \e[0m│ 3.0 │ 3 │
│ 4 │\e[31;1m true \e[0m│ 4.0 │ 4 │
│ 5 │\e[31;1m false \e[0m│ 5.0 │ 5 │
│ 6 │\e[31;1m true \e[0m│ 6.0 │ 6 │
└────────┴────────┴────────┴────────┘
"""
result = sprint((io)->pretty_table(io, data; highlighters = hl_lt(3)),
context = :color => true)
@test result == expected
# hl_leq
# ==========================================================================
expected = """
┌────────┬────────┬────────┬────────┐
│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│\e[1m Col. 3 \e[0m│\e[1m Col. 4 \e[0m│
├────────┼────────┼────────┼────────┤
│\e[31;1m 1 \e[0m│\e[31;1m false \e[0m│\e[31;1m 1.0 \e[0m│\e[31;1m 1 \e[0m│
│\e[31;1m 2 \e[0m│\e[31;1m true \e[0m│\e[31;1m 2.0 \e[0m│\e[31;1m 2 \e[0m│
│\e[31;1m 3 \e[0m│\e[31;1m false \e[0m│\e[31;1m 3.0 \e[0m│\e[31;1m 3 \e[0m│
│ 4 │\e[31;1m true \e[0m│ 4.0 │ 4 │
│ 5 │\e[31;1m false \e[0m│ 5.0 │ 5 │
│ 6 │\e[31;1m true \e[0m│ 6.0 │ 6 │
└────────┴────────┴────────┴────────┘
"""
result = sprint((io)->pretty_table(io, data; highlighters = hl_leq(3)),
context = :color => true)
@test result == expected
# hl_gt
# ==========================================================================
expected = """
┌────────┬────────┬────────┬────────┐
│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│\e[1m Col. 3 \e[0m│\e[1m Col. 4 \e[0m│
├────────┼────────┼────────┼────────┤
│ 1 │ false │ 1.0 │ 1 │
│ 2 │ true │ 2.0 │ 2 │
│ 3 │ false │ 3.0 │ 3 │
│\e[34;1m 4 \e[0m│ true │\e[34;1m 4.0 \e[0m│\e[34;1m 4 \e[0m│
│\e[34;1m 5 \e[0m│ false │\e[34;1m 5.0 \e[0m│\e[34;1m 5 \e[0m│
│\e[34;1m 6 \e[0m│ true │\e[34;1m 6.0 \e[0m│\e[34;1m 6 \e[0m│
└────────┴────────┴────────┴────────┘
"""
result = sprint((io)->pretty_table(io, data; highlighters = hl_gt(3)),
context = :color => true)
@test result == expected
# hl_geq
# ==========================================================================
expected = """
┌────────┬────────┬────────┬────────┐
│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│\e[1m Col. 3 \e[0m│\e[1m Col. 4 \e[0m│
├────────┼────────┼────────┼────────┤
│ 1 │ false │ 1.0 │ 1 │
│ 2 │ true │ 2.0 │ 2 │
│\e[34;1m 3 \e[0m│ false │\e[34;1m 3.0 \e[0m│\e[34;1m 3 \e[0m│
│\e[34;1m 4 \e[0m│ true │\e[34;1m 4.0 \e[0m│\e[34;1m 4 \e[0m│
│\e[34;1m 5 \e[0m│ false │\e[34;1m 5.0 \e[0m│\e[34;1m 5 \e[0m│
│\e[34;1m 6 \e[0m│ true │\e[34;1m 6.0 \e[0m│\e[34;1m 6 \e[0m│
└────────┴────────┴────────┴────────┘
"""
result = sprint((io)->pretty_table(io, data; highlighters = hl_geq(3)),
context = :color => true)
@test result == expected
# hl_value
# ==========================================================================
expected = """
┌────────┬────────┬────────┬────────┐
│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│\e[1m Col. 3 \e[0m│\e[1m Col. 4 \e[0m│
├────────┼────────┼────────┼────────┤
│ 1 │ false │ 1.0 │ 1 │
│ 2 │ true │ 2.0 │ 2 │
│\e[33;1m 3 \e[0m│ false │\e[33;1m 3.0 \e[0m│\e[33;1m 3 \e[0m│
│ 4 │ true │ 4.0 │ 4 │
│ 5 │ false │ 5.0 │ 5 │
│ 6 │ true │ 6.0 │ 6 │
└────────┴────────┴────────┴────────┘
"""
result = sprint((io)->pretty_table(io, data; highlighters = hl_value(3)),
context = :color => true)
@test result == expected
end
@testset "Markdown" begin
# With linebreaks
# ==========================================================================
a = md"""
# Header
This is a paragraph.
```julia
function test()
return 1
end
```
""";
data = [1 a
2 a]
expected = """
┌────────┬────────────────────────┐
│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│
├────────┼────────────────────────┤
│ 1 │ \e[1m Header\e[22m\e[0m │
│ │ \e[1m ≡≡≡≡≡≡≡≡\e[22m\e[0m │
│ │ \e[0m │
│ │ This is a paragraph.\e[0m │
│ │ \e[0m │
│ │ \e[36m function test()\e[39m\e[0m │
│ │ \e[36m return 1\e[39m\e[0m │
│ │ \e[36m end\e[39m\e[0m │
├────────┼────────────────────────┤
│ 2 │ \e[1m Header\e[22m\e[0m │
│ │ \e[1m ≡≡≡≡≡≡≡≡\e[22m\e[0m │
│ │ \e[0m │
│ │ This is a paragraph.\e[0m │
│ │ \e[0m │
│ │ \e[36m function test()\e[39m\e[0m │
│ │ \e[36m return 1\e[39m\e[0m │
│ │ \e[36m end\e[39m\e[0m │
└────────┴────────────────────────┘
"""
result = sprint((io)->pretty_table(io, data,
hlines = :all,
linebreaks = true),
context = :color => true)
@test result == expected
# Without linebreaks
# ==========================================================================
a = md"""
**bold**
*italics*
"""
data = [1 a]
expected = """
┌────────┬────────────────┐
│\e[1m Col. 1 \e[0m│\e[1m Col. 2 \e[0m│
├────────┼────────────────┤
│ 1 │ \e[1mbold\e[22m \e[4mitalics\e[24m │
└────────┴────────────────┘
"""
result = sprint((io)->pretty_table(io, data,
hlines = :all,
linebreaks = false),
context = :color => true)
@test result == expected
end
|
{"hexsha": "cdf523eb884e715bdcf6881e245877fc16aad8ba", "size": 20124, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/text_backend/colors.jl", "max_stars_repo_name": "waldyrious/PrettyTables.jl", "max_stars_repo_head_hexsha": "5aea4f21b5a51ce8e160de88ffe7c12838a71f5f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/text_backend/colors.jl", "max_issues_repo_name": "waldyrious/PrettyTables.jl", "max_issues_repo_head_hexsha": "5aea4f21b5a51ce8e160de88ffe7c12838a71f5f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/text_backend/colors.jl", "max_forks_repo_name": "waldyrious/PrettyTables.jl", "max_forks_repo_head_hexsha": "5aea4f21b5a51ce8e160de88ffe7c12838a71f5f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4920634921, "max_line_length": 136, "alphanum_fraction": 0.3290598291, "num_tokens": 8625}
|
[STATEMENT]
lemma akra_bazzi_term_floor_subtract [akra_bazzi_term_intros]:
assumes "(b::real) > 0" "b < 1" "real x\<^sub>0 \<le> b * real x\<^sub>1 - c" "0 < c + (1 - b) * real x\<^sub>1" "x\<^sub>1 > 0"
shows "akra_bazzi_term x\<^sub>0 x\<^sub>1 b (\<lambda>x. nat \<lfloor>b*real x - c\<rfloor>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. akra_bazzi_term x\<^sub>0 x\<^sub>1 b (\<lambda>x. nat \<lfloor>b * real x - c\<rfloor>)
[PROOF STEP]
by (subst diff_conv_add_uminus, rule akra_bazzi_term_floor_add, insert assms) simp_all
|
{"llama_tokens": 245, "file": "Akra_Bazzi_Akra_Bazzi", "length": 1}
|
[STATEMENT]
lemma foldl_prs_aux:
assumes a: "Quotient3 R1 abs1 rep1"
and b: "Quotient3 R2 abs2 rep2"
shows "abs1 (foldl ((abs1 ---> abs2 ---> rep1) f) (rep1 e) (map rep2 l)) = foldl f e l"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. abs1 (foldl ((abs1 ---> abs2 ---> rep1) f) (rep1 e) (map rep2 l)) = foldl f e l
[PROOF STEP]
by (induct l arbitrary:e) (simp_all add: Quotient3_abs_rep[OF a] Quotient3_abs_rep[OF b])
|
{"llama_tokens": 198, "file": null, "length": 1}
|
""" The U.S. Standard Atmosphere 1966 depicts idealized middle-latitude
year-round mean conditions for the range of solar activity that occurs between
sunspot minimum and sunspot maximum.
+--------+---------+---------+-----------+---------------+---------------+
| Z (km) | H (km) | T (K) | p (mbar) | rho (kg / m3) | beta (K / km) |
+--------+---------+---------+-----------+---------------+---------------+
| 0.0 | 0.0 | 288.150 | 1.01325e3 | 1.2250 | -6.5 |
+--------+---------+---------+-----------+---------------+---------------+
| 11.019 | 11.0 | 216.650 | 2.2632e2 | 3.6392e-1 | 0.0 |
+--------+---------+---------+-----------+---------------+---------------+
| 20.063 | 20.0 | 216.650 | 5.4749e1 | 8.8035e-2 | 1.0 |
+--------+---------+---------+-----------+---------------+---------------+
| 32.162 | 32.0 | 228.650 | 8.68014 | 1.3225e-2 | 2.8 |
+--------+---------+---------+-----------+---------------+---------------+
| 47.350 | 47.0 | 270.650 | 1.109050 | 1.4275e-3 | 0.0 |
+--------+---------+---------+-----------+---------------+---------------+
| 51.413 | 52.0 | 270.650 | 5.90005e-1| 7.5943e-4 | -2.8 |
+--------+---------+---------+-----------+---------------+---------------+
| 61.591 | 61.0 | 252.650 | 1.82099e-1| 2.5109e-4 | -2.0 |
+--------+---------+---------+-----------+---------------+---------------+
| 79.994 | 79.0 | 180.650 | 1.0377e-2 | 2.001e-5 | 0.0 |
+--------+---------+---------+-----------+---------------+---------------+
| 90.0 | 88.743 | 180.650 | 1.6438e-3 | 3.170e-6 | 0.0 |
+--------+---------+---------+-----------+---------------+---------------+
| 100.0 | 98.451 | 210.020 | 3.0075e-4 | 4.974e-7 | 5.0 |
+--------+---------+---------+-----------+---------------+---------------+
| 110.0 | 108.129 | 257.000 | 7.3544e-5 | 9.829e-8 | 10.0 |
+--------+---------+---------+-----------+---------------+---------------+
| 120.0 | 117.776 | 349.490 | 2.5217e-5 | 2.436e-8 | 20.0 |
+--------+---------+---------+-----------+---------------+---------------+
| 150.0 | 146.541 | 892.790 | 5.0617e-6 | 1.836e-9 | 15.0 |
+--------+---------+---------+-----------+---------------+---------------+
| 160.0 | 156.071 | 1022.23 | 3.6943e-6 | 1.159e-9 | 10.0 |
+--------+---------+---------+-----------+---------------+---------------+
| 170.0 | 165.571 | 1105.51 | 2.7926e-6 | 8.036e-10 | 7.0 |
+--------+---------+---------+-----------+---------------+---------------+
| 190.0 | 184.485 | 1205.50 | 1.6852e-6 | 4.347e-10 | 5.0 |
+--------+---------+---------+-----------+---------------+---------------+
| 230.0 | 221.967 | 1321.70 | 6.9604e-7 | 1.564e-10 | 4.0 |
+--------+---------+---------+-----------+---------------+---------------+
| 300.0 | 286.476 | 1432.11 | 1.8838e-7 | 3.585e-11 | 3.3 |
+--------+---------+---------+-----------+---------------+---------------+
| 400.0 | 376.312 | 1487.38 | 4.0304e-8 | 6.498e-12 | 2.6 |
+--------+---------+---------+-----------+---------------+---------------+
| 500.0 | 463.526 | 1499.22 | 1.0957e-8 | 1.577e-12 | 1.7 |
+--------+---------+---------+-----------+---------------+---------------+
| 600.0 | 548.230 | 1506.13 | 3.4502e-9 | 4.640e-13 | 1.1 |
+--------+---------+---------+-----------+---------------+---------------+
| 700.0 | 630.530 | 1507.61 | 1.1918e-9 | 1.537e-13 | 0.0 |
+--------+---------+---------+-----------+---------------+---------------+
"""
import numpy as np
from astropy import units as u
from astropy.io import ascii
from astropy.utils.data import get_pkg_data_filename
from poliastro.atmosphere.base import COESA
# Constants come from the original paper to achieve pure implementation
r0 = 6356.766 * u.km
p0 = 1.013250e5 * u.Pa
rho0 = 1.2250 * u.K
T0 = 288.15 * u.K
g0 = 9.80665 * u.m / u.s ** 2
S = 110.4 * u.K
Ti = 273.15 * u.K
beta = 1.458e-6 * u.kg / u.s / u.m / u.K ** (0.5)
_gamma = 1.4
sigma = 3.65e-10 * u.m
N = 6.02257e26 * (u.kg * u.mol) ** -1
R = 8314.32 * u.J / u.kg / u.K
R_air = 287.053 * u.J / u.kg / u.K
# Reading layer parameters file
coesa_file = get_pkg_data_filename("data/coesa62.dat")
coesa62_data = ascii.read(coesa_file)
b_levels = coesa62_data["b"].data
zb_levels = coesa62_data["Zb [km]"].data * u.km
hb_levels = coesa62_data["Hb [km]"].data * u.km
Tb_levels = coesa62_data["Tb [K]"].data * u.K
Lb_levels = coesa62_data["Lb [K/km]"].data * u.K / u.km
pb_levels = coesa62_data["pb [mbar]"].data * u.mbar
class COESA62(COESA):
""" Holds the model for U.S Standard Atmosphere 1962. """
def __init__(self):
""" Constructor for the class. """
super().__init__(
b_levels, zb_levels, hb_levels, Tb_levels, Lb_levels, pb_levels
)
@property
def b_levels(self):
return self.tables[0]
@property
def zb_levels(self):
return self.tables[1]
@property
def hb_levels(self):
return self.tables[2]
@property
def Tb_levels(self):
return self.tables[3]
@property
def Lb_levels(self):
return self.tables[4]
@property
def pb_levels(self):
return self.tables[5]
def temperature(self, alt, geometric=True):
""" Solves for temperature at given altitude.
Parameters
----------
alt: ~astropy.units.Quantity
Geometric/Geopotential altitude.
geometric: boolean
If `True`, assumes geometric altitude kind.
Returns
-------
T: ~astropy.units.Quantity
Kinetic temeperature.
"""
# Test if altitude is inside valid range
z, h = self._check_altitude(alt, r0, geometric=geometric)
# Get base parameters
i = self._get_index(z, self.zb_levels)
zb = self.zb_levels[i]
Tb = self.Tb_levels[i]
Lb = self.Lb_levels[i]
hb = self.hb_levels[i]
# Apply different equations
if z <= 90 * u.km:
T = Tb + Lb * (h - hb)
else:
T = Tb + Lb * (z - zb)
return T.to(u.K)
def pressure(self, alt, geometric=True):
""" Solves pressure at given altitude.
Parameters
----------
alt: ~astropy.units.Quantity
Geometric/Geopotential altitude.
geometric: boolean
If `True`, assumes geometric altitude.
Returns
-------
p: ~astropy.units.Quantity
Pressure at given altitude.
"""
# Check if valid range and convert to geopotential
z, h = self._check_altitude(alt, r0, geometric=geometric)
# Get base parameters
i = self._get_index(z, self.zb_levels)
hb = self.hb_levels[i]
Tb = self.Tb_levels[i]
Lb = self.Lb_levels[i]
pb = self.pb_levels[i]
# If Z > 90km, different formulas apply
if z <= 90 * u.km:
print(z, hb, Tb, Lb, pb)
if Lb == 0.0:
p = pb * np.exp(-g0 * (h - hb) / Tb / R_air)
else:
T = self.temperature(z)
p = pb * (T / Tb) ** (-g0 / R_air / Lb)
else:
# TODO: Equation (1.2.10) should be applied avobe 90km
raise NotImplementedError(
"Pressure in COESA62 has just been implemented up to 90km."
)
return p
def density(self, alt, geometric=True):
""" Solves density at given altitude.
Parameters
----------
alt: ~astropy.units.Quantity
Geometric/Geopotential altitude.
geometric: boolean
If `True`, assumes geometric altitude.
Returns
-------
rho: ~astropy.units.Quantity
Density at given altitude.
"""
# Check if valid range and convert to geopotential
z, h = self._check_altitude(alt, r0, geometric=geometric)
# TODO: implement atmosphere up to 1000km
if z > 90 * u.km:
raise NotImplementedError(
"Density in COESA62 has just been implemented up to 90km."
)
# Solve temperature and pressure
T = self.temperature(z)
p = self.pressure(z)
rho = p / R_air / T
return rho.to(u.kg / u.m ** 3)
def properties(self, alt, geometric=True):
""" Solves density at given height.
Parameters
----------
alt: ~astropy.units.Quantity
Geometric/Geopotential height.
geometric: boolean
If `True`, assumes that `alt` argument is geometric kind.
Returns
-------
T: ~astropy.units.Quantity
Temperature at given height.
p: ~astropy.units.Quantity
Pressure at given height.
rho: ~astropy.units.Quantity
Density at given height.
"""
T = self.temperature(alt, geometric=geometric)
p = self.pressure(alt, geometric=geometric)
rho = self.density(alt, geometric=geometric)
return T, p, rho
|
{"hexsha": "1488b5937c8a774bb72f439e500c3695a1ac6bdd", "size": 9271, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/poliastro/atmosphere/coesa62.py", "max_stars_repo_name": "noc0lour/poliastro", "max_stars_repo_head_hexsha": "c3010318446a7b9efa22f01bd1ff603ccce53273", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/poliastro/atmosphere/coesa62.py", "max_issues_repo_name": "noc0lour/poliastro", "max_issues_repo_head_hexsha": "c3010318446a7b9efa22f01bd1ff603ccce53273", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/poliastro/atmosphere/coesa62.py", "max_forks_repo_name": "noc0lour/poliastro", "max_forks_repo_head_hexsha": "c3010318446a7b9efa22f01bd1ff603ccce53273", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6442687747, "max_line_length": 78, "alphanum_fraction": 0.4301585589, "include": true, "reason": "import numpy,from astropy", "num_tokens": 2841}
|
import numpy as np
import torch
import cv2
def reshape_patch(img_tensor, patch_size):
assert 4 == img_tensor.ndim
seq_length = np.shape(img_tensor)[0]
img_height = np.shape(img_tensor)[1]
img_width = np.shape(img_tensor)[2]
num_channels = np.shape(img_tensor)[3]
a = np.reshape(img_tensor, [seq_length,
img_height // patch_size, patch_size,
img_width // patch_size, patch_size,
num_channels])
b = np.transpose(a, [0, 1, 3, 2, 4, 5])
patch_tensor = np.reshape(b, [seq_length,
img_height // patch_size,
img_width // patch_size,
patch_size * patch_size * num_channels])
return patch_tensor
def reshape_patch_back(patch_tensor, patch_size):
# B L H W C
assert 5 == patch_tensor.ndim
batch_size = np.shape(patch_tensor)[0]
seq_length = np.shape(patch_tensor)[1]
patch_height = np.shape(patch_tensor)[2]
patch_width = np.shape(patch_tensor)[3]
channels = np.shape(patch_tensor)[4]
img_channels = channels // (patch_size * patch_size)
a = np.reshape(patch_tensor, [batch_size, seq_length,
patch_height, patch_width,
patch_size, patch_size,
img_channels])
b = np.transpose(a, [0, 1, 2, 4, 3, 5, 6])
img_tensor = np.reshape(b, [batch_size, seq_length,
patch_height * patch_size,
patch_width * patch_size,
img_channels])
return img_tensor
def reshape_patch_back_tensor(patch_tensor, patch_size):
# B L H W C
assert 5 == patch_tensor.ndim
patch_narray = patch_tensor.detach().cpu().numpy()
batch_size = np.shape(patch_narray)[0]
seq_length = np.shape(patch_narray)[1]
patch_height = np.shape(patch_narray)[2]
patch_width = np.shape(patch_narray)[3]
channels = np.shape(patch_narray)[4]
img_channels = channels // (patch_size * patch_size)
a = torch.reshape(patch_tensor, [batch_size, seq_length,
patch_height, patch_width,
patch_size, patch_size,
img_channels])
b = a.permute([0, 1, 2, 4, 3, 5, 6])
img_tensor = torch.reshape(b, [batch_size, seq_length,
patch_height * patch_size,
patch_width * patch_size,
img_channels])
return img_tensor.permute(0, 1, 4, 2, 3)
def reshape_patch_tensor(img_tensor, patch_size):
assert 4 == img_tensor.ndim
seq_length = img_tensor.shape[0]
img_height = img_tensor.shape[1]
img_width = img_tensor.shape[2]
num_channels = img_tensor.shape[3]
a = torch.reshape(img_tensor, [seq_length,
img_height // patch_size, patch_size,
img_width // patch_size, patch_size,
num_channels])
b = a.permute((0, 1, 3, 2, 4, 5))
patch_tensor = torch.reshape(b, [seq_length,
img_height // patch_size,
img_width // patch_size,
patch_size * patch_size * num_channels])
return patch_tensor.permute((0, 3, 1, 2))
|
{"hexsha": "760373c5f01dba53f8ba3e4e859e9a5349091d28", "size": 3520, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/utils/preprocess.py", "max_stars_repo_name": "ZhengChang467/MAU", "max_stars_repo_head_hexsha": "9e5461cfbacf20cc4323839730f06a9c09d98187", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2022-03-07T16:56:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T09:20:02.000Z", "max_issues_repo_path": "core/utils/preprocess.py", "max_issues_repo_name": "ZhengChang467/MAU", "max_issues_repo_head_hexsha": "9e5461cfbacf20cc4323839730f06a9c09d98187", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "core/utils/preprocess.py", "max_forks_repo_name": "ZhengChang467/MAU", "max_forks_repo_head_hexsha": "9e5461cfbacf20cc4323839730f06a9c09d98187", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-01T10:04:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T10:04:30.000Z", "avg_line_length": 40.9302325581, "max_line_length": 77, "alphanum_fraction": 0.5420454545, "include": true, "reason": "import numpy", "num_tokens": 770}
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl
! This file was ported from Lean 3 source module algebra.big_operators.basic
! leanprover-community/mathlib commit c227d107bbada5d0d9d20287e3282c0a7f1651a0
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Algebra.BigOperators.Multiset.Lemmas
import Mathlib.Algebra.Group.Pi
import Mathlib.Algebra.GroupPower.Lemmas
import Mathlib.Algebra.Hom.Equiv.Basic
import Mathlib.Algebra.Ring.Opposite
import Mathlib.Data.Finset.Sum
import Mathlib.Data.Fintype.Basic
import Mathlib.Data.Finset.Sigma
import Mathlib.Data.Multiset.Powerset
import Mathlib.Data.Set.Pairwise.Basic
import Mathlib.Tactic.ScopedNS
/-!
# Big operators
In this file we define products and sums indexed by finite sets (specifically, `Finset`).
## Notation
We introduce the following notation, localized in `BigOperators`.
To enable the notation, use `open BigOperators`.
Let `s` be a `Finset α`, and `f : α → β` a function.
* `∏ x in s, f x` is notation for `Finset.prod s f` (assuming `β` is a `CommMonoid`)
* `∑ x in s, f x` is notation for `Finset.sum s f` (assuming `β` is an `AddCommMonoid`)
* `∏ x, f x` is notation for `Finset.prod Finset.univ f`
(assuming `α` is a `Fintype` and `β` is a `CommMonoid`)
* `∑ x, f x` is notation for `Finset.sum Finset.univ f`
(assuming `α` is a `Fintype` and `β` is an `AddCommMonoid`)
## Implementation Notes
The first arguments in all definitions and lemmas is the codomain of the function of the big
operator. This is necessary for the heuristic in `@[to_additive]`.
See the documentation of `to_additive.attr` for more information.
-/
universe u v w
variable {ι : Type _} {β : Type u} {α : Type v} {γ : Type w}
open Fin
namespace Finset
/-- `∏ x in s, f x` is the product of `f x`
as `x` ranges over the elements of the finite set `s`.
-/
@[to_additive "`∑ x in s, f x` is the sum of `f x` as `x` ranges over the elements
of the finite set `s`."]
protected def prod [CommMonoid β] (s : Finset α) (f : α → β) : β :=
(s.1.map f).prod
#align finset.prod Finset.prod
#align finset.sum Finset.sum
@[to_additive (attr := simp)]
theorem prod_mk [CommMonoid β] (s : Multiset α) (hs : s.Nodup) (f : α → β) :
(⟨s, hs⟩ : Finset α).prod f = (s.map f).prod :=
rfl
#align finset.prod_mk Finset.prod_mk
#align finset.sum_mk Finset.sum_mk
@[to_additive (attr := simp)]
theorem prod_val [CommMonoid α] (s : Finset α) : s.1.prod = s.prod id := by
rw [Finset.prod, Multiset.map_id]
#align finset.prod_val Finset.prod_val
#align finset.sum_val Finset.sum_val
end Finset
library_note "operator precedence of big operators"/--
There is no established mathematical convention
for the operator precedence of big operators like `∏` and `∑`.
We will have to make a choice.
Online discussions, such as https://math.stackexchange.com/q/185538/30839
seem to suggest that `∏` and `∑` should have the same precedence,
and that this should be somewhere between `*` and `+`.
The latter have precedence levels `70` and `65` respectively,
and we therefore choose the level `67`.
In practice, this means that parentheses should be placed as follows:
```lean
∑ k in K, (a k + b k) = ∑ k in K, a k + ∑ k in K, b k →
∏ k in K, a k * b k = (∏ k in K, a k) * (∏ k in K, b k)
```
(Example taken from page 490 of Knuth's *Concrete Mathematics*.)
-/
-- TODO: Use scoped[NS], when implemented?
namespace BigOperators
open Std.ExtendedBinder
/-- `∑ x, f x` is notation for `Finset.sum Finset.univ f`. It is the sum of `f x`,
where `x` ranges over the finite domain of `f`. -/
scoped syntax (name := bigsum) "∑ " extBinder ", " term:67 : term
scoped macro_rules (kind := bigsum)
| `(∑ $x:ident, $p) => `(Finset.sum Finset.univ (fun $x:ident ↦ $p))
| `(∑ $x:ident : $t, $p) => `(Finset.sum Finset.univ (fun $x:ident : $t ↦ $p))
/-- `∏ x, f x` is notation for `Finset.prod Finset.univ f`. It is the product of `f x`,
where `x` ranges over the finite domain of `f`. -/
scoped syntax (name := bigprod) "∏ " extBinder ", " term:67 : term
scoped macro_rules (kind := bigprod)
| `(∏ $x:ident, $p) => `(Finset.prod Finset.univ (fun $x:ident ↦ $p))
| `(∏ $x:ident : $t, $p) => `(Finset.prod Finset.univ (fun $x:ident : $t ↦ $p))
/-- `∑ x in s, f x` is notation for `Finset.sum s f`. It is the sum of `f x`,
where `x` ranges over the finite set `s`. -/
scoped syntax (name := bigsumin) "∑ " extBinder "in " term "," term:67 : term
scoped macro_rules (kind := bigsumin)
| `(∑ $x:ident in $s, $r) => `(Finset.sum $s (fun $x ↦ $r))
| `(∑ $x:ident : $t in $s, $p) => `(Finset.sum $s (fun $x:ident : $t ↦ $p))
/-- `∏ x, f x` is notation for `Finset.prod s f`. It is the sum of `f x`,
where `x` ranges over the finite set `s`. -/
scoped syntax (name := bigprodin) "∏ " extBinder "in " term "," term:67 : term
scoped macro_rules (kind := bigprodin)
| `(∏ $x:ident in $s, $r) => `(Finset.prod $s (fun $x ↦ $r))
| `(∏ $x:ident : $t in $s, $p) => `(Finset.prod $s (fun $x:ident : $t ↦ $p))
end BigOperators
open BigOperators
namespace Finset
variable {s s₁ s₂ : Finset α} {a : α} {f g : α → β}
@[to_additive]
theorem prod_eq_multiset_prod [CommMonoid β] (s : Finset α) (f : α → β) :
(∏ x in s, f x) = (s.1.map f).prod :=
rfl
#align finset.prod_eq_multiset_prod Finset.prod_eq_multiset_prod
#align finset.sum_eq_multiset_sum Finset.sum_eq_multiset_sum
@[to_additive]
theorem prod_eq_fold [CommMonoid β] (s : Finset α) (f : α → β) :
(∏ x in s, f x) = s.fold ((· * ·) : β → β → β) 1 f :=
rfl
#align finset.prod_eq_fold Finset.prod_eq_fold
#align finset.sum_eq_fold Finset.sum_eq_fold
@[simp]
theorem sum_multiset_singleton (s : Finset α) : (s.sum fun x => {x}) = s.val := by
simp only [sum_eq_multiset_sum, Multiset.sum_map_singleton]
#align finset.sum_multiset_singleton Finset.sum_multiset_singleton
end Finset
@[to_additive]
theorem map_prod [CommMonoid β] [CommMonoid γ] {G : Type _} [MonoidHomClass G β γ] (g : G)
(f : α → β) (s : Finset α) : g (∏ x in s, f x) = ∏ x in s, g (f x) := by
simp only [Finset.prod_eq_multiset_prod, map_multiset_prod, Multiset.map_map]; rfl
#align map_prod map_prod
#align map_sum map_sum
section Deprecated
/-- Deprecated: use `_root_.map_prod` instead. -/
@[to_additive (attr := deprecated) "Deprecated: use `_root_.map_sum` instead."]
protected theorem MonoidHom.map_prod [CommMonoid β] [CommMonoid γ] (g : β →* γ) (f : α → β)
(s : Finset α) : g (∏ x in s, f x) = ∏ x in s, g (f x) :=
map_prod g f s
#align monoid_hom.map_prod MonoidHom.map_prod
#align add_monoid_hom.map_sum AddMonoidHom.map_sum
/-- Deprecated: use `_root_.map_prod` instead. -/
@[to_additive (attr := deprecated) "Deprecated: use `_root_.map_sum` instead."]
protected theorem MulEquiv.map_prod [CommMonoid β] [CommMonoid γ] (g : β ≃* γ) (f : α → β)
(s : Finset α) : g (∏ x in s, f x) = ∏ x in s, g (f x) :=
map_prod g f s
#align mul_equiv.map_prod MulEquiv.map_prod
#align add_equiv.map_sum AddEquiv.map_sum
@[deprecated _root_.map_list_prod]
protected theorem RingHom.map_list_prod [Semiring β] [Semiring γ] (f : β →+* γ) (l : List β) :
f l.prod = (l.map f).prod :=
map_list_prod f l
#align ring_hom.map_list_prod RingHom.map_list_prod
@[deprecated _root_.map_list_sum]
protected theorem RingHom.map_list_sum [NonAssocSemiring β] [NonAssocSemiring γ] (f : β →+* γ)
(l : List β) : f l.sum = (l.map f).sum :=
map_list_sum f l
#align ring_hom.map_list_sum RingHom.map_list_sum
/-- A morphism into the opposite ring acts on the product by acting on the reversed elements. -/
@[deprecated _root_.unop_map_list_prod]
protected theorem RingHom.unop_map_list_prod [Semiring β] [Semiring γ] (f : β →+* γᵐᵒᵖ)
(l : List β) : MulOpposite.unop (f l.prod) = (l.map (MulOpposite.unop ∘ f)).reverse.prod :=
unop_map_list_prod f l
#align ring_hom.unop_map_list_prod RingHom.unop_map_list_prod
@[deprecated _root_.map_multiset_prod]
protected theorem RingHom.map_multiset_prod [CommSemiring β] [CommSemiring γ] (f : β →+* γ)
(s : Multiset β) : f s.prod = (s.map f).prod :=
map_multiset_prod f s
#align ring_hom.map_multiset_prod RingHom.map_multiset_prod
@[deprecated _root_.map_multiset_sum]
protected theorem RingHom.map_multiset_sum [NonAssocSemiring β] [NonAssocSemiring γ] (f : β →+* γ)
(s : Multiset β) : f s.sum = (s.map f).sum :=
map_multiset_sum f s
#align ring_hom.map_multiset_sum RingHom.map_multiset_sum
@[deprecated _root_.map_prod]
protected theorem RingHom.map_prod [CommSemiring β] [CommSemiring γ] (g : β →+* γ) (f : α → β)
(s : Finset α) : g (∏ x in s, f x) = ∏ x in s, g (f x) :=
map_prod g f s
#align ring_hom.map_prod RingHom.map_prod
@[deprecated _root_.map_sum]
protected theorem RingHom.map_sum [NonAssocSemiring β] [NonAssocSemiring γ] (g : β →+* γ)
(f : α → β) (s : Finset α) : g (∑ x in s, f x) = ∑ x in s, g (f x) :=
map_sum g f s
#align ring_hom.map_sum RingHom.map_sum
end Deprecated
@[to_additive]
theorem MonoidHom.coe_finset_prod [MulOneClass β] [CommMonoid γ] (f : α → β →* γ) (s : Finset α) :
⇑(∏ x in s, f x) = ∏ x in s, ⇑f x :=
(MonoidHom.coeFn β γ).map_prod _ _
#align monoid_hom.coe_finset_prod MonoidHom.coe_finset_prod
#align add_monoid_hom.coe_finset_sum AddMonoidHom.coe_finset_sum
-- See also `finset.prod_apply`, with the same conclusion
-- but with the weaker hypothesis `f : α → β → γ`.
@[to_additive (attr := simp)]
theorem MonoidHom.finset_prod_apply [MulOneClass β] [CommMonoid γ] (f : α → β →* γ) (s : Finset α)
(b : β) : (∏ x in s, f x) b = ∏ x in s, f x b :=
(MonoidHom.eval b).map_prod _ _
#align monoid_hom.finset_prod_apply MonoidHom.finset_prod_apply
#align add_monoid_hom.finset_sum_apply AddMonoidHom.finset_sum_apply
variable {s s₁ s₂ : Finset α} {a : α} {f g : α → β}
namespace Finset
section CommMonoid
variable [CommMonoid β]
@[to_additive (attr := simp)]
theorem prod_empty : (∏ x in ∅, f x) = 1 :=
rfl
#align finset.prod_empty Finset.prod_empty
#align finset.sum_empty Finset.sum_empty
@[to_additive]
theorem prod_of_empty [IsEmpty α] (s : Finset α) : (∏ i in s, f i) = 1 := by
rw [eq_empty_of_isEmpty s, prod_empty]
#align finset.prod_of_empty Finset.prod_of_empty
#align finset.sum_of_empty Finset.sum_of_empty
@[to_additive (attr := simp)]
theorem prod_cons (h : a ∉ s) : (∏ x in cons a s h, f x) = f a * ∏ x in s, f x :=
fold_cons h
#align finset.prod_cons Finset.prod_cons
#align finset.sum_cons Finset.sum_cons
@[to_additive (attr := simp)]
theorem prod_insert [DecidableEq α] : a ∉ s → (∏ x in insert a s, f x) = f a * ∏ x in s, f x :=
fold_insert
#align finset.prod_insert Finset.prod_insert
#align finset.sum_insert Finset.sum_insert
/-- The product of `f` over `insert a s` is the same as
the product over `s`, as long as `a` is in `s` or `f a = 1`. -/
@[to_additive (attr := simp) "The sum of `f` over `insert a s` is the same as
the sum over `s`, as long as `a` is in `s` or `f a = 0`."]
theorem prod_insert_of_eq_one_if_not_mem [DecidableEq α] (h : a ∉ s → f a = 1) :
(∏ x in insert a s, f x) = ∏ x in s, f x := by
by_cases hm : a ∈ s
· simp_rw [insert_eq_of_mem hm]
· rw [prod_insert hm, h hm, one_mul]
#align finset.prod_insert_of_eq_one_if_not_mem Finset.prod_insert_of_eq_one_if_not_mem
#align finset.sum_insert_of_eq_zero_if_not_mem Finset.sum_insert_of_eq_zero_if_not_mem
/-- The product of `f` over `insert a s` is the same as
the product over `s`, as long as `f a = 1`. -/
@[to_additive (attr := simp) "The sum of `f` over `insert a s` is the same as
the sum over `s`, as long as `f a = 0`."]
theorem prod_insert_one [DecidableEq α] (h : f a = 1) : (∏ x in insert a s, f x) = ∏ x in s, f x :=
prod_insert_of_eq_one_if_not_mem fun _ => h
#align finset.prod_insert_one Finset.prod_insert_one
#align finset.sum_insert_zero Finset.sum_insert_zero
@[to_additive (attr := simp)]
theorem prod_singleton : (∏ x in singleton a, f x) = f a :=
Eq.trans fold_singleton <| mul_one _
#align finset.prod_singleton Finset.prod_singleton
#align finset.sum_singleton Finset.sum_singleton
@[to_additive]
theorem prod_pair [DecidableEq α] {a b : α} (h : a ≠ b) :
(∏ x in ({a, b} : Finset α), f x) = f a * f b := by
rw [prod_insert (not_mem_singleton.2 h), prod_singleton]
#align finset.prod_pair Finset.prod_pair
#align finset.sum_pair Finset.sum_pair
@[to_additive (attr := simp)]
theorem prod_const_one : (∏ _x in s, (1 : β)) = 1 := by
simp only [Finset.prod, Multiset.map_const', Multiset.prod_replicate, one_pow]
#align finset.prod_const_one Finset.prod_const_one
#align finset.sum_const_zero Finset.sum_const_zero
@[to_additive (attr := simp)]
theorem prod_image [DecidableEq α] {s : Finset γ} {g : γ → α} :
(∀ x ∈ s, ∀ y ∈ s, g x = g y → x = y) → (∏ x in s.image g, f x) = ∏ x in s, f (g x) :=
fold_image
#align finset.prod_image Finset.prod_image
#align finset.sum_image Finset.sum_image
@[to_additive (attr := simp)]
theorem prod_map (s : Finset α) (e : α ↪ γ) (f : γ → β) :
(∏ x in s.map e, f x) = ∏ x in s, f (e x) := by
rw [Finset.prod, Finset.map_val, Multiset.map_map]; rfl
#align finset.prod_map Finset.prod_map
#align finset.sum_map Finset.sum_map
@[to_additive (attr := congr)]
theorem prod_congr (h : s₁ = s₂) : (∀ x ∈ s₂, f x = g x) → s₁.prod f = s₂.prod g := by
rw [h]; exact fold_congr
#align finset.prod_congr Finset.prod_congr
#align finset.sum_congr Finset.sum_congr
@[to_additive]
theorem prod_disjUnion (h) :
(∏ x in s₁.disjUnion s₂ h, f x) = (∏ x in s₁, f x) * ∏ x in s₂, f x := by
refine' Eq.trans _ (fold_disjUnion h)
rw [one_mul]
rfl
#align finset.prod_disj_union Finset.prod_disjUnion
#align finset.sum_disj_union Finset.sum_disjUnion
@[to_additive]
theorem prod_disjUnionᵢ (s : Finset ι) (t : ι → Finset α) (h) :
(∏ x in s.disjUnionᵢ t h, f x) = ∏ i in s, ∏ x in t i, f x := by
refine' Eq.trans _ (fold_disjUnionᵢ h)
dsimp [Finset.prod, Multiset.prod, Multiset.fold, Finset.disjUnion, Finset.fold]
congr
exact prod_const_one.symm
#align finset.prod_disj_Union Finset.prod_disjUnionᵢ
#align finset.sum_disj_Union Finset.sum_disjUnionᵢ
@[to_additive]
theorem prod_union_inter [DecidableEq α] :
((∏ x in s₁ ∪ s₂, f x) * ∏ x in s₁ ∩ s₂, f x) = (∏ x in s₁, f x) * ∏ x in s₂, f x :=
fold_union_inter
#align finset.prod_union_inter Finset.prod_union_inter
#align finset.sum_union_inter Finset.sum_union_inter
@[to_additive]
theorem prod_union [DecidableEq α] (h : Disjoint s₁ s₂) :
(∏ x in s₁ ∪ s₂, f x) = (∏ x in s₁, f x) * ∏ x in s₂, f x := by
rw [← prod_union_inter, disjoint_iff_inter_eq_empty.mp h]; exact (mul_one _).symm
#align finset.prod_union Finset.prod_union
#align finset.sum_union Finset.sum_union
@[to_additive]
theorem prod_filter_mul_prod_filter_not
(s : Finset α) (p : α → Prop) [DecidablePred p] [∀ x, Decidable (¬p x)] (f : α → β) :
((∏ x in s.filter p, f x) * ∏ x in s.filter fun x => ¬p x, f x) = ∏ x in s, f x := by
have := Classical.decEq α
rw [← prod_union (disjoint_filter_filter_neg s s p), filter_union_filter_neg_eq]
#align finset.prod_filter_mul_prod_filter_not Finset.prod_filter_mul_prod_filter_not
#align finset.sum_filter_add_sum_filter_not Finset.sum_filter_add_sum_filter_not
section ToList
@[to_additive (attr := simp)]
theorem prod_to_list (s : Finset α) (f : α → β) : (s.toList.map f).prod = s.prod f := by
rw [Finset.prod, ← Multiset.coe_prod, ← Multiset.coe_map, Finset.coe_toList]
#align finset.prod_to_list Finset.prod_to_list
#align finset.sum_to_list Finset.sum_to_list
end ToList
@[to_additive]
theorem _root_.Equiv.Perm.prod_comp (σ : Equiv.Perm α) (s : Finset α) (f : α → β)
(hs : { a | σ a ≠ a } ⊆ s) : (∏ x in s, f (σ x)) = ∏ x in s, f x := by
convert (prod_map s σ.toEmbedding f).symm
exact (map_perm hs).symm
#align equiv.perm.prod_comp Equiv.Perm.prod_comp
#align equiv.perm.sum_comp Equiv.Perm.sum_comp
@[to_additive]
theorem _root_.Equiv.Perm.prod_comp' (σ : Equiv.Perm α) (s : Finset α) (f : α → α → β)
(hs : { a | σ a ≠ a } ⊆ s) : (∏ x in s, f (σ x) x) = ∏ x in s, f x (σ.symm x) := by
convert σ.prod_comp s (fun x => f x (σ.symm x)) hs
rw [Equiv.symm_apply_apply]
#align equiv.perm.prod_comp' Equiv.Perm.prod_comp'
#align equiv.perm.sum_comp' Equiv.Perm.sum_comp'
end CommMonoid
end Finset
section
open Finset
variable [Fintype α] [CommMonoid β]
@[to_additive]
theorem IsCompl.prod_mul_prod {s t : Finset α} (h : IsCompl s t) (f : α → β) :
((∏ i in s, f i) * ∏ i in t, f i) = ∏ i, f i :=
(Finset.prod_disjUnion h.Disjoint).symm.trans <| by
classical rw [Finset.disjUnion_eq_union, ← Finset.sup_eq_union, h.sup_eq_top]; rfl
#align is_compl.prod_mul_prod IsCompl.prod_mul_prod
#align is_compl.sum_add_sum IsCompl.sum_add_sum
end
namespace Finset
section CommMonoid
variable [CommMonoid β]
/-- Multiplying the products of a function over `s` and over `sᶜ` gives the whole product.
For a version expressed with subtypes, see `Fintype.prod_subtype_mul_prod_subtype`. -/
@[to_additive "Adding the sums of a function over `s` and over `sᶜ` gives the whole sum.
For a version expressed with subtypes, see `Fintype.sum_subtype_add_sum_subtype`. "]
theorem prod_mul_prod_compl [Fintype α] [DecidableEq α] (s : Finset α) (f : α → β) :
((∏ i in s, f i) * ∏ i in sᶜ, f i) = ∏ i, f i :=
IsCompl.prod_mul_prod isCompl_compl f
#align finset.prod_mul_prod_compl Finset.prod_mul_prod_compl
#align finset.sum_add_sum_compl Finset.sum_add_sum_compl
@[to_additive]
theorem prod_compl_mul_prod [Fintype α] [DecidableEq α] (s : Finset α) (f : α → β) :
((∏ i in sᶜ, f i) * ∏ i in s, f i) = ∏ i, f i :=
(@isCompl_compl _ s _).symm.prod_mul_prod f
#align finset.prod_compl_mul_prod Finset.prod_compl_mul_prod
#align finset.sum_compl_add_sum Finset.sum_compl_add_sum
@[to_additive]
theorem prod_sdiff [DecidableEq α] (h : s₁ ⊆ s₂) :
((∏ x in s₂ \ s₁, f x) * ∏ x in s₁, f x) = ∏ x in s₂, f x := by
rw [← prod_union sdiff_disjoint, sdiff_union_of_subset h]
#align finset.prod_sdiff Finset.prod_sdiff
#align finset.sum_sdiff Finset.sum_sdiff
@[to_additive (attr := simp)]
theorem prod_disj_sum (s : Finset α) (t : Finset γ) (f : Sum α γ → β) :
(∏ x in s.disjSum t, f x) = (∏ x in s, f (Sum.inl x)) * ∏ x in t, f (Sum.inr x) := by
rw [← map_inl_disjUnion_map_inr, prod_disjUnion, prod_map, prod_map]
rfl
#align finset.prod_disj_sum Finset.prod_disj_sum
#align finset.sum_disj_sum Finset.sum_disj_sum
@[to_additive]
theorem prod_sum_elim (s : Finset α) (t : Finset γ) (f : α → β) (g : γ → β) :
(∏ x in s.disjSum t, Sum.elim f g x) = (∏ x in s, f x) * ∏ x in t, g x := by simp
#align finset.prod_sum_elim Finset.prod_sum_elim
#align finset.sum_sum_elim Finset.sum_sum_elim
@[to_additive]
theorem prod_bunionᵢ [DecidableEq α] {s : Finset γ} {t : γ → Finset α}
(hs : Set.PairwiseDisjoint (↑s) t) : (∏ x in s.bunionᵢ t, f x) = ∏ x in s, ∏ i in t x, f i := by
rw [← disjUnionᵢ_eq_bunionᵢ _ _ hs, prod_disjUnionᵢ]
#align finset.prod_bUnion Finset.prod_bunionᵢ
#align finset.sum_bUnion Finset.sum_bunionᵢ
/-- Product over a sigma type equals the product of fiberwise products. For rewriting
in the reverse direction, use `Finset.prod_sigma'`. -/
@[to_additive "Sum over a sigma type equals the sum of fiberwise sums. For rewriting
in the reverse direction, use `Finset.sum_sigma'`"]
theorem prod_sigma {σ : α → Type _} (s : Finset α) (t : ∀ a, Finset (σ a)) (f : Sigma σ → β) :
(∏ x in s.sigma t, f x) = ∏ a in s, ∏ s in t a, f ⟨a, s⟩ := by
simp_rw [← disjUnionᵢ_map_sigma_mk, prod_disjUnionᵢ, prod_map, Function.Embedding.sigmaMk_apply]
#align finset.prod_sigma Finset.prod_sigma
#align finset.sum_sigma Finset.sum_sigma
@[to_additive]
theorem prod_sigma' {σ : α → Type _} (s : Finset α) (t : ∀ a, Finset (σ a)) (f : ∀ a, σ a → β) :
(∏ a in s, ∏ s in t a, f a s) = ∏ x in s.sigma t, f x.1 x.2 :=
Eq.symm <| prod_sigma s t fun x => f x.1 x.2
#align finset.prod_sigma' Finset.prod_sigma'
#align finset.sum_sigma' Finset.sum_sigma'
/-- Reorder a product.
The difference with `prod_bij'` is that the bijection is specified as a surjective injection,
rather than by an inverse function.
-/
@[to_additive "Reorder a sum.
The difference with `sum_bij'` is that the bijection is specified as a surjective injection,
rather than by an inverse function."]
theorem prod_bij {s : Finset α} {t : Finset γ} {f : α → β} {g : γ → β} (i : ∀ a ∈ s, γ)
(hi : ∀ a ha, i a ha ∈ t) (h : ∀ a ha, f a = g (i a ha))
(i_inj : ∀ a₁ a₂ ha₁ ha₂, i a₁ ha₁ = i a₂ ha₂ → a₁ = a₂)
(i_surj : ∀ b ∈ t, ∃ a ha, b = i a ha) : (∏ x in s, f x) = ∏ x in t, g x :=
congr_arg Multiset.prod (Multiset.map_eq_map_of_bij_of_nodup f g s.2 t.2 i hi h i_inj i_surj)
#align finset.prod_bij Finset.prod_bij
#align finset.sum_bij Finset.sum_bij
/-- Reorder a product.
The difference with `prod_bij` is that the bijection is specified with an inverse, rather than
as a surjective injection.
-/
@[to_additive "Reorder a sum.
The difference with `sum_bij` is that the bijection is specified with an inverse, rather than
as a surjective injection."]
theorem prod_bij' {s : Finset α} {t : Finset γ} {f : α → β} {g : γ → β} (i : ∀ a ∈ s, γ)
(hi : ∀ a ha, i a ha ∈ t) (h : ∀ a ha, f a = g (i a ha)) (j : ∀ a ∈ t, α)
(hj : ∀ a ha, j a ha ∈ s) (left_inv : ∀ a ha, j (i a ha) (hi a ha) = a)
(right_inv : ∀ a ha, i (j a ha) (hj a ha) = a) : (∏ x in s, f x) = ∏ x in t, g x := by
refine' prod_bij i hi h _ _
· intro a1 a2 h1 h2 eq
rw [← left_inv a1 h1, ← left_inv a2 h2]
simp only [eq]
· intro b hb
use j b hb
use hj b hb
exact (right_inv b hb).symm
#align finset.prod_bij' Finset.prod_bij'
#align finset.sum_bij' Finset.sum_bij'
/-- Reindexing a product over a finset along an equivalence.
See `Equiv.prod_comp` for the version where `s` and `s'` are `univ`. -/
@[to_additive " Reindexing a sum over a finset along an equivalence.
See `Equiv.sum_comp` for the version where `s` and `s'` are `univ`. "]
theorem Equiv.prod_comp_finset {ι'} [DecidableEq ι] (e : ι ≃ ι') (f : ι' → β) {s' : Finset ι'}
{s : Finset ι} (h : s = s'.image e.symm) : (∏ i' in s', f i') = ∏ i in s, f (e i) := by
rw [h]
refine'
Finset.prod_bij' (fun i' _hi' => e.symm i') (fun a ha => Finset.mem_image_of_mem _ ha)
(fun a _ha => by simp_rw [e.apply_symm_apply]) (fun i _hi => e i) (fun a ha => _)
(fun a _ha => e.apply_symm_apply a) fun a _ha => e.symm_apply_apply a
rcases Finset.mem_image.mp ha with ⟨i', hi', rfl⟩
dsimp only
rwa [e.apply_symm_apply]
#align finset.equiv.prod_comp_finset Finset.Equiv.prod_comp_finset
#align finset.equiv.sum_comp_finset Finset.Equiv.sum_comp_finset
@[to_additive]
theorem prod_finset_product (r : Finset (γ × α)) (s : Finset γ) (t : γ → Finset α)
(h : ∀ p : γ × α, p ∈ r ↔ p.1 ∈ s ∧ p.2 ∈ t p.1) {f : γ × α → β} :
(∏ p in r, f p) = ∏ c in s, ∏ a in t c, f (c, a) := by
refine' Eq.trans _ (prod_sigma s t fun p => f (p.1, p.2))
exact
prod_bij' (fun p _hp => ⟨p.1, p.2⟩) (fun p => mem_sigma.mpr ∘ (h p).mp)
(fun p hp => congr_arg f Prod.mk.eta.symm) (fun p _hp => (p.1, p.2))
(fun p => (h (p.1, p.2)).mpr ∘ mem_sigma.mp) (fun p _hp => Prod.mk.eta) fun p _hp => p.eta
#align finset.prod_finset_product Finset.prod_finset_product
#align finset.sum_finset_product Finset.sum_finset_product
@[to_additive]
theorem prod_finset_product' (r : Finset (γ × α)) (s : Finset γ) (t : γ → Finset α)
(h : ∀ p : γ × α, p ∈ r ↔ p.1 ∈ s ∧ p.2 ∈ t p.1) {f : γ → α → β} :
(∏ p in r, f p.1 p.2) = ∏ c in s, ∏ a in t c, f c a :=
prod_finset_product r s t h
#align finset.prod_finset_product' Finset.prod_finset_product'
#align finset.sum_finset_product' Finset.sum_finset_product'
@[to_additive]
theorem prod_finset_product_right (r : Finset (α × γ)) (s : Finset γ) (t : γ → Finset α)
(h : ∀ p : α × γ, p ∈ r ↔ p.2 ∈ s ∧ p.1 ∈ t p.2) {f : α × γ → β} :
(∏ p in r, f p) = ∏ c in s, ∏ a in t c, f (a, c) := by
refine' Eq.trans _ (prod_sigma s t fun p => f (p.2, p.1))
exact
prod_bij' (fun p _hp => ⟨p.2, p.1⟩) (fun p => mem_sigma.mpr ∘ (h p).mp)
(fun p hp => congr_arg f Prod.mk.eta.symm) (fun p _hp => (p.2, p.1))
(fun p => (h (p.2, p.1)).mpr ∘ mem_sigma.mp) (fun p _hp => Prod.mk.eta) fun p _hp => p.eta
#align finset.prod_finset_product_right Finset.prod_finset_product_right
#align finset.sum_finset_product_right Finset.sum_finset_product_right
@[to_additive]
theorem prod_finset_product_right' (r : Finset (α × γ)) (s : Finset γ) (t : γ → Finset α)
(h : ∀ p : α × γ, p ∈ r ↔ p.2 ∈ s ∧ p.1 ∈ t p.2) {f : α → γ → β} :
(∏ p in r, f p.1 p.2) = ∏ c in s, ∏ a in t c, f a c :=
prod_finset_product_right r s t h
#align finset.prod_finset_product_right' Finset.prod_finset_product_right'
#align finset.sum_finset_product_right' Finset.sum_finset_product_right'
@[to_additive]
theorem prod_fiberwise_of_maps_to [DecidableEq γ] {s : Finset α} {t : Finset γ} {g : α → γ}
(h : ∀ x ∈ s, g x ∈ t) (f : α → β) :
(∏ y in t, ∏ x in s.filter fun x => g x = y, f x) = ∏ x in s, f x := by
rw [← prod_disjUnionᵢ, disjUnionᵢ_filter_eq_of_maps_to h]
#align finset.prod_fiberwise_of_maps_to Finset.prod_fiberwise_of_maps_to
#align finset.sum_fiberwise_of_maps_to Finset.sum_fiberwise_of_maps_to
@[to_additive]
theorem prod_image' [DecidableEq α] {s : Finset γ} {g : γ → α} (h : γ → β)
(eq : ∀ c ∈ s, f (g c) = ∏ x in s.filter fun c' => g c' = g c, h x) :
(∏ x in s.image g, f x) = ∏ x in s, h x :=
calc
(∏ x in s.image g, f x) = ∏ x in s.image g, ∏ x in s.filter fun c' => g c' = x, h x :=
(prod_congr rfl) fun _x hx =>
let ⟨c, hcs, hc⟩ := mem_image.1 hx
hc ▸ eq c hcs
_ = ∏ x in s, h x := prod_fiberwise_of_maps_to (fun _x => mem_image_of_mem g) _
#align finset.prod_image' Finset.prod_image'
#align finset.sum_image' Finset.sum_image'
@[to_additive]
theorem prod_mul_distrib : (∏ x in s, f x * g x) = (∏ x in s, f x) * ∏ x in s, g x :=
Eq.trans (by rw [one_mul]; rfl) fold_op_distrib
#align finset.prod_mul_distrib Finset.prod_mul_distrib
#align finset.sum_add_distrib Finset.sum_add_distrib
@[to_additive]
theorem prod_product {s : Finset γ} {t : Finset α} {f : γ × α → β} :
(∏ x in s ×ᶠ t, f x) = ∏ x in s, ∏ y in t, f (x, y) :=
prod_finset_product (s ×ᶠ t) s (fun _a => t) fun _p => mem_product
#align finset.prod_product Finset.prod_product
#align finset.sum_product Finset.sum_product
/-- An uncurried version of `Finset.prod_product`. -/
@[to_additive "An uncurried version of `Finset.sum_product`"]
theorem prod_product' {s : Finset γ} {t : Finset α} {f : γ → α → β} :
(∏ x in s ×ᶠ t, f x.1 x.2) = ∏ x in s, ∏ y in t, f x y :=
prod_product
#align finset.prod_product' Finset.prod_product'
#align finset.sum_product' Finset.sum_product'
@[to_additive]
theorem prod_product_right {s : Finset γ} {t : Finset α} {f : γ × α → β} :
(∏ x in s ×ᶠ t, f x) = ∏ y in t, ∏ x in s, f (x, y) :=
prod_finset_product_right (s ×ᶠ t) t (fun _a => s) fun _p => mem_product.trans and_comm
#align finset.prod_product_right Finset.prod_product_right
#align finset.sum_product_right Finset.sum_product_right
/-- An uncurried version of `Finset.prod_product_right`. -/
@[to_additive "An uncurried version of `Finset.prod_product_right`"]
theorem prod_product_right' {s : Finset γ} {t : Finset α} {f : γ → α → β} :
(∏ x in s ×ᶠ t, f x.1 x.2) = ∏ y in t, ∏ x in s, f x y :=
prod_product_right
#align finset.prod_product_right' Finset.prod_product_right'
#align finset.sum_product_right' Finset.sum_product_right'
/-- Generalization of `Finset.prod_comm` to the case when the inner `Finset`s depend on the outer
variable. -/
@[to_additive "Generalization of `Finset.sum_comm` to the case when the inner `Finset`s depend on
the outer variable."]
theorem prod_comm' {s : Finset γ} {t : γ → Finset α} {t' : Finset α} {s' : α → Finset γ}
(h : ∀ x y, x ∈ s ∧ y ∈ t x ↔ x ∈ s' y ∧ y ∈ t') {f : γ → α → β} :
(∏ x in s, ∏ y in t x, f x y) = ∏ y in t', ∏ x in s' y, f x y := by
classical
have : ∀ z : γ × α, (z ∈ s.bunionᵢ fun x => (t x).map <| Function.Embedding.sectr x _) ↔
z.1 ∈ s ∧ z.2 ∈ t z.1 := by
rintro ⟨x, y⟩
simp only [mem_bunionᵢ, mem_map, Function.Embedding.sectr_apply, Prod.mk.injEq,
exists_eq_right, ← and_assoc]
exact
(prod_finset_product' _ _ _ this).symm.trans
((prod_finset_product_right' _ _ _) fun ⟨x, y⟩ => (this _).trans ((h x y).trans and_comm))
#align finset.prod_comm' Finset.prod_comm'
#align finset.sum_comm' Finset.sum_comm'
@[to_additive]
theorem prod_comm {s : Finset γ} {t : Finset α} {f : γ → α → β} :
(∏ x in s, ∏ y in t, f x y) = ∏ y in t, ∏ x in s, f x y :=
prod_comm' fun _ _ => Iff.rfl
#align finset.prod_comm Finset.prod_comm
#align finset.sum_comm Finset.sum_comm
@[to_additive]
theorem prod_hom_rel [CommMonoid γ] {r : β → γ → Prop} {f : α → β} {g : α → γ} {s : Finset α}
(h₁ : r 1 1) (h₂ : ∀ a b c, r b c → r (f a * b) (g a * c)) :
r (∏ x in s, f x) (∏ x in s, g x) := by
delta Finset.prod
apply Multiset.prod_hom_rel <;> assumption
#align finset.prod_hom_rel Finset.prod_hom_rel
#align finset.sum_hom_rel Finset.sum_hom_rel
@[to_additive]
theorem prod_eq_one {f : α → β} {s : Finset α} (h : ∀ x ∈ s, f x = 1) : (∏ x in s, f x) = 1 :=
calc
(∏ x in s, f x) = ∏ _x in s, 1 := Finset.prod_congr rfl h
_ = 1 := Finset.prod_const_one
#align finset.prod_eq_one Finset.prod_eq_one
#align finset.sum_eq_zero Finset.sum_eq_zero
@[to_additive]
theorem prod_subset_one_on_sdiff [DecidableEq α] (h : s₁ ⊆ s₂) (hg : ∀ x ∈ s₂ \ s₁, g x = 1)
(hfg : ∀ x ∈ s₁, f x = g x) : (∏ i in s₁, f i) = ∏ i in s₂, g i := by
rw [← prod_sdiff h, prod_eq_one hg, one_mul]
exact prod_congr rfl hfg
#align finset.prod_subset_one_on_sdiff Finset.prod_subset_one_on_sdiff
#align finset.sum_subset_zero_on_sdiff Finset.sum_subset_zero_on_sdiff
@[to_additive]
theorem prod_subset (h : s₁ ⊆ s₂) (hf : ∀ x ∈ s₂, x ∉ s₁ → f x = 1) :
(∏ x in s₁, f x) = ∏ x in s₂, f x :=
haveI := Classical.decEq α
prod_subset_one_on_sdiff h (by simpa) fun _ _ => rfl
#align finset.prod_subset Finset.prod_subset
#align finset.sum_subset Finset.sum_subset
@[to_additive]
theorem prod_filter_of_ne {p : α → Prop} [DecidablePred p] (hp : ∀ x ∈ s, f x ≠ 1 → p x) :
(∏ x in s.filter p, f x) = ∏ x in s, f x :=
(prod_subset (filter_subset _ _)) fun x => by
classical
rw [not_imp_comm, mem_filter]
exact fun h₁ h₂ => ⟨h₁, by simpa using hp _ h₁ h₂⟩
#align finset.prod_filter_of_ne Finset.prod_filter_of_ne
#align finset.sum_filter_of_ne Finset.sum_filter_of_ne
-- If we use `[decidable_eq β]` here, some rewrites fail because they find a wrong `decidable`
-- instance first; `{∀ x, decidable (f x ≠ 1)}` doesn't work with `rw ← prod_filter_ne_one`
@[to_additive]
theorem prod_filter_ne_one [∀ x, Decidable (f x ≠ 1)] :
(∏ x in s.filter fun x => f x ≠ 1, f x) = ∏ x in s, f x :=
prod_filter_of_ne fun _ _ => id
#align finset.prod_filter_ne_one Finset.prod_filter_ne_one
#align finset.sum_filter_ne_zero Finset.sum_filter_ne_zero
@[to_additive]
theorem prod_filter (p : α → Prop) [DecidablePred p] (f : α → β) :
(∏ a in s.filter p, f a) = ∏ a in s, if p a then f a else 1 :=
calc
(∏ a in s.filter p, f a) = ∏ a in s.filter p, if p a then f a else 1 :=
prod_congr rfl fun a h => by rw [if_pos]; simpa using (mem_filter.1 h).2
_ = ∏ a in s, if p a then f a else 1 := by
{ refine' prod_subset (filter_subset _ s) fun x hs h => _
rw [mem_filter, not_and] at h
exact if_neg (by simpa using h hs) }
#align finset.prod_filter Finset.prod_filter
#align finset.sum_filter Finset.sum_filter
@[to_additive]
theorem prod_eq_single_of_mem {s : Finset α} {f : α → β} (a : α) (h : a ∈ s)
(h₀ : ∀ b ∈ s, b ≠ a → f b = 1) : (∏ x in s, f x) = f a := by
haveI := Classical.decEq α
calc
(∏ x in s, f x) = ∏ x in {a}, f x := by
{ refine' (prod_subset _ _).symm
· intro _ H
rwa [mem_singleton.1 H]
· simpa only [mem_singleton] }
_ = f a := prod_singleton
#align finset.prod_eq_single_of_mem Finset.prod_eq_single_of_mem
#align finset.sum_eq_single_of_mem Finset.sum_eq_single_of_mem
@[to_additive]
theorem prod_eq_single {s : Finset α} {f : α → β} (a : α) (h₀ : ∀ b ∈ s, b ≠ a → f b = 1)
(h₁ : a ∉ s → f a = 1) : (∏ x in s, f x) = f a :=
haveI := Classical.decEq α
by_cases (prod_eq_single_of_mem a · h₀) fun this =>
(prod_congr rfl fun b hb => h₀ b hb <| by rintro rfl; exact this hb).trans <|
prod_const_one.trans (h₁ this).symm
#align finset.prod_eq_single Finset.prod_eq_single
#align finset.sum_eq_single Finset.sum_eq_single
@[to_additive]
theorem prod_eq_mul_of_mem {s : Finset α} {f : α → β} (a b : α) (ha : a ∈ s) (hb : b ∈ s)
(hn : a ≠ b) (h₀ : ∀ c ∈ s, c ≠ a ∧ c ≠ b → f c = 1) : (∏ x in s, f x) = f a * f b := by
haveI := Classical.decEq α; let s' := ({a, b} : Finset α)
have hu : s' ⊆ s := by
refine' insert_subset.mpr _
apply And.intro ha
apply singleton_subset_iff.mpr hb
have hf : ∀ c ∈ s, c ∉ s' → f c = 1 := by
intro c hc hcs
apply h₀ c hc
apply not_or.mp
intro hab
apply hcs
apply mem_insert.mpr
rw [mem_singleton]
exact hab
rw [← prod_subset hu hf]
exact Finset.prod_pair hn
#align finset.prod_eq_mul_of_mem Finset.prod_eq_mul_of_mem
#align finset.sum_eq_add_of_mem Finset.sum_eq_add_of_mem
@[to_additive]
theorem prod_eq_mul {s : Finset α} {f : α → β} (a b : α) (hn : a ≠ b)
(h₀ : ∀ c ∈ s, c ≠ a ∧ c ≠ b → f c = 1) (ha : a ∉ s → f a = 1) (hb : b ∉ s → f b = 1) :
(∏ x in s, f x) = f a * f b := by
haveI := Classical.decEq α; by_cases h₁ : a ∈ s <;> by_cases h₂ : b ∈ s
· exact prod_eq_mul_of_mem a b h₁ h₂ hn h₀
· rw [hb h₂, mul_one]
apply prod_eq_single_of_mem a h₁
exact fun c hc hca => h₀ c hc ⟨hca, ne_of_mem_of_not_mem hc h₂⟩
· rw [ha h₁, one_mul]
apply prod_eq_single_of_mem b h₂
exact fun c hc hcb => h₀ c hc ⟨ne_of_mem_of_not_mem hc h₁, hcb⟩
· rw [ha h₁, hb h₂, mul_one]
exact
_root_.trans
(prod_congr rfl fun c hc =>
h₀ c hc ⟨ne_of_mem_of_not_mem hc h₁, ne_of_mem_of_not_mem hc h₂⟩)
prod_const_one
#align finset.prod_eq_mul Finset.prod_eq_mul
#align finset.sum_eq_add Finset.sum_eq_add
@[to_additive]
theorem prod_attach {f : α → β} : (∏ x in s.attach, f x) = ∏ x in s, f x :=
haveI := Classical.decEq α
calc
(∏ x in s.attach, f x.val) = ∏ x in s.attach.image Subtype.val, f x := by
{ rw [prod_image]; exact fun x _ y _ => Subtype.eq }
_ = _ := by rw [attach_image_val]
#align finset.prod_attach Finset.prod_attach
#align finset.sum_attach Finset.sum_attach
-- Porting note: simpNF linter complains that LHS doesn't simplify, but it does
/-- A product over `s.subtype p` equals one over `s.filter p`. -/
@[to_additive (attr := simp, nolint simpNF)
"A sum over `s.subtype p` equals one over `s.filter p`."]
theorem prod_subtype_eq_prod_filter (f : α → β) {p : α → Prop} [DecidablePred p] :
(∏ x in s.subtype p, f x) = ∏ x in s.filter p, f x := by
conv_lhs => erw [← prod_map (s.subtype p) (Function.Embedding.subtype _) f]
exact prod_congr (subtype_map _) fun x _hx => rfl
#align finset.prod_subtype_eq_prod_filter Finset.prod_subtype_eq_prod_filter
#align finset.sum_subtype_eq_sum_filter Finset.sum_subtype_eq_sum_filter
/-- If all elements of a `Finset` satisfy the predicate `p`, a product
over `s.subtype p` equals that product over `s`. -/
@[to_additive "If all elements of a `Finset` satisfy the predicate `p`, a sum
over `s.subtype p` equals that sum over `s`."]
theorem prod_subtype_of_mem (f : α → β) {p : α → Prop} [DecidablePred p] (h : ∀ x ∈ s, p x) :
(∏ x in s.subtype p, f x) = ∏ x in s, f x := by
rw [prod_subtype_eq_prod_filter, filter_true_of_mem]
simpa using h
#align finset.prod_subtype_of_mem Finset.prod_subtype_of_mem
#align finset.sum_subtype_of_mem Finset.sum_subtype_of_mem
/-- A product of a function over a `Finset` in a subtype equals a
product in the main type of a function that agrees with the first
function on that `Finset`. -/
@[to_additive "A sum of a function over a `Finset` in a subtype equals a
sum in the main type of a function that agrees with the first
function on that `Finset`."]
theorem prod_subtype_map_embedding {p : α → Prop} {s : Finset { x // p x }} {f : { x // p x } → β}
{g : α → β} (h : ∀ x : { x // p x }, x ∈ s → g x = f x) :
(∏ x in s.map (Function.Embedding.subtype _), g x) = ∏ x in s, f x := by
rw [Finset.prod_map]
exact Finset.prod_congr rfl h
#align finset.prod_subtype_map_embedding Finset.prod_subtype_map_embedding
#align finset.sum_subtype_map_embedding Finset.sum_subtype_map_embedding
variable (f s)
@[to_additive]
theorem prod_coe_sort_eq_attach (f : s → β) : (∏ i : s, f i) = ∏ i in s.attach, f i :=
rfl
#align finset.prod_coe_sort_eq_attach Finset.prod_coe_sort_eq_attach
#align finset.sum_coe_sort_eq_attach Finset.sum_coe_sort_eq_attach
@[to_additive]
theorem prod_coe_sort : (∏ i : s, f i) = ∏ i in s, f i :=
prod_attach
#align finset.prod_coe_sort Finset.prod_coe_sort
#align finset.sum_coe_sort Finset.sum_coe_sort
@[to_additive]
theorem prod_finset_coe (f : α → β) (s : Finset α) : (∏ i : (s : Set α), f i) = ∏ i in s, f i :=
prod_coe_sort s f
#align finset.prod_finset_coe Finset.prod_finset_coe
#align finset.sum_finset_coe Finset.sum_finset_coe
variable {f s}
@[to_additive]
theorem prod_subtype {p : α → Prop} {F : Fintype (Subtype p)} (s : Finset α) (h : ∀ x, x ∈ s ↔ p x)
(f : α → β) : (∏ a in s, f a) = ∏ a : Subtype p, f a := by
have : (· ∈ s) = p := Set.ext h
subst p
rw [← prod_coe_sort]
congr!
#align finset.prod_subtype Finset.prod_subtype
#align finset.sum_subtype Finset.sum_subtype
/-- The product of a function `g` defined only on a set `s` is equal to
the product of a function `f` defined everywhere,
as long as `f` and `g` agree on `s`, and `f = 1` off `s`. -/
@[to_additive "The sum of a function `g` defined only on a set `s` is equal to
the sum of a function `f` defined everywhere,
as long as `f` and `g` agree on `s`, and `f = 0` off `s`."]
theorem prod_congr_set {α : Type _} [CommMonoid α] {β : Type _} [Fintype β] (s : Set β)
[DecidablePred (· ∈ s)] (f : β → α) (g : s → α) (w : ∀ (x : β) (h : x ∈ s), f x = g ⟨x, h⟩)
(w' : ∀ x : β, x ∉ s → f x = 1) : Finset.univ.prod f = Finset.univ.prod g := by
rw [← @Finset.prod_subset _ _ s.toFinset Finset.univ f _ (by simp)]
· rw [Finset.prod_subtype]
· apply Finset.prod_congr rfl
exact fun ⟨x, h⟩ _ => w x h
· simp
· rintro x _ h
exact w' x (by simpa using h)
#align finset.prod_congr_set Finset.prod_congr_set
#align finset.sum_congr_set Finset.sum_congr_set
@[to_additive]
theorem prod_apply_dite {s : Finset α} {p : α → Prop} {hp : DecidablePred p}
[DecidablePred fun x => ¬p x] (f : ∀ x : α, p x → γ) (g : ∀ x : α, ¬p x → γ) (h : γ → β) :
(∏ x in s, h (if hx : p x then f x hx else g x hx)) =
(∏ x in (s.filter p).attach, h (f x.1 $ by simpa using (mem_filter.mp x.2).2)) *
∏ x in (s.filter fun x => ¬p x).attach, h (g x.1 $ by simpa using (mem_filter.mp x.2).2) :=
calc
(∏ x in s, h (if hx : p x then f x hx else g x hx)) =
(∏ x in s.filter p, h (if hx : p x then f x hx else g x hx)) *
∏ x in s.filter fun x => ¬p x, h (if hx : p x then f x hx else g x hx) :=
(prod_filter_mul_prod_filter_not s p _).symm
_ =
(∏ x in (s.filter p).attach, h (if hx : p x.1 then f x.1 hx else g x.1 hx)) *
∏ x in (s.filter fun x => ¬p x).attach, h (if hx : p x.1 then f x.1 hx else g x.1 hx) :=
congr_arg₂ _ prod_attach.symm prod_attach.symm
_ =
(∏ x in (s.filter p).attach, h (f x.1 $ by simpa using (mem_filter.mp x.2).2)) *
∏ x in (s.filter fun x ↦ ¬p x).attach, h (g x.1 $ by simpa using (mem_filter.mp x.2).2) :=
congr_arg₂ _ (prod_congr rfl fun x _hx ↦
congr_arg h (dif_pos $ by simpa using (mem_filter.mp x.2).2))
(prod_congr rfl fun x _hx => congr_arg h (dif_neg $ by simpa using (mem_filter.mp x.2).2))
#align finset.prod_apply_dite Finset.prod_apply_dite
#align finset.sum_apply_dite Finset.sum_apply_dite
@[to_additive]
theorem prod_apply_ite {s : Finset α} {p : α → Prop} {_hp : DecidablePred p} (f g : α → γ)
(h : γ → β) :
(∏ x in s, h (if p x then f x else g x)) =
(∏ x in s.filter p, h (f x)) * ∏ x in s.filter fun x => ¬p x, h (g x) :=
_root_.trans (prod_apply_dite _ _ _)
(congr_arg₂ _ (@prod_attach _ _ _ _ (h ∘ f)) (@prod_attach _ _ _ _ (h ∘ g)))
#align finset.prod_apply_ite Finset.prod_apply_ite
#align finset.sum_apply_ite Finset.sum_apply_ite
@[to_additive]
theorem prod_dite {s : Finset α} {p : α → Prop} {hp : DecidablePred p} (f : ∀ x : α, p x → β)
(g : ∀ x : α, ¬p x → β) :
(∏ x in s, if hx : p x then f x hx else g x hx) =
(∏ x in (s.filter p).attach, f x.1 (by simpa using (mem_filter.mp x.2).2)) *
∏ x in (s.filter fun x => ¬p x).attach, g x.1 (by simpa using (mem_filter.mp x.2).2) := by
simp [prod_apply_dite _ _ fun x => x]
#align finset.prod_dite Finset.prod_dite
#align finset.sum_dite Finset.sum_dite
@[to_additive]
theorem prod_ite {s : Finset α} {p : α → Prop} {hp : DecidablePred p} (f g : α → β) :
(∏ x in s, if p x then f x else g x) =
(∏ x in s.filter p, f x) * ∏ x in s.filter fun x => ¬p x, g x := by
simp [prod_apply_ite _ _ fun x => x]
#align finset.prod_ite Finset.prod_ite
#align finset.sum_ite Finset.sum_ite
@[to_additive]
theorem prod_ite_of_false {p : α → Prop} {hp : DecidablePred p} (f g : α → β) (h : ∀ x ∈ s, ¬p x) :
(∏ x in s, if p x then f x else g x) = ∏ x in s, g x := by
rw [prod_ite, filter_false_of_mem, filter_true_of_mem]
· simp only [prod_empty, one_mul]
all_goals intros; simp; apply h; assumption
#align finset.prod_ite_of_false Finset.prod_ite_of_false
#align finset.sum_ite_of_false Finset.sum_ite_of_false
@[to_additive]
theorem prod_ite_of_true {p : α → Prop} {hp : DecidablePred p} (f g : α → β) (h : ∀ x ∈ s, p x) :
(∏ x in s, if p x then f x else g x) = ∏ x in s, f x := by
simp_rw [← ite_not (p _)]
apply prod_ite_of_false
simpa
#align finset.prod_ite_of_true Finset.prod_ite_of_true
#align finset.sum_ite_of_true Finset.sum_ite_of_true
@[to_additive]
theorem prod_apply_ite_of_false {p : α → Prop} {hp : DecidablePred p} (f g : α → γ) (k : γ → β)
(h : ∀ x ∈ s, ¬p x) : (∏ x in s, k (if p x then f x else g x)) = ∏ x in s, k (g x) := by
simp_rw [apply_ite k]
exact prod_ite_of_false _ _ h
#align finset.prod_apply_ite_of_false Finset.prod_apply_ite_of_false
#align finset.sum_apply_ite_of_false Finset.sum_apply_ite_of_false
@[to_additive]
theorem prod_apply_ite_of_true {p : α → Prop} {hp : DecidablePred p} (f g : α → γ) (k : γ → β)
(h : ∀ x ∈ s, p x) : (∏ x in s, k (if p x then f x else g x)) = ∏ x in s, k (f x) := by
simp_rw [apply_ite k]
exact prod_ite_of_true _ _ h
#align finset.prod_apply_ite_of_true Finset.prod_apply_ite_of_true
#align finset.sum_apply_ite_of_true Finset.sum_apply_ite_of_true
@[to_additive]
theorem prod_extend_by_one [DecidableEq α] (s : Finset α) (f : α → β) :
(∏ i in s, if i ∈ s then f i else 1) = ∏ i in s, f i :=
(prod_congr rfl) fun _i hi => if_pos hi
#align finset.prod_extend_by_one Finset.prod_extend_by_one
#align finset.sum_extend_by_zero Finset.sum_extend_by_zero
@[to_additive (attr := simp)]
theorem prod_ite_mem [DecidableEq α] (s t : Finset α) (f : α → β) :
(∏ i in s, if i ∈ t then f i else 1) = ∏ i in s ∩ t, f i := by
rw [← Finset.prod_filter, Finset.filter_mem_eq_inter]
#align finset.prod_ite_mem Finset.prod_ite_mem
#align finset.sum_ite_mem Finset.sum_ite_mem
@[to_additive (attr := simp)]
theorem prod_dite_eq [DecidableEq α] (s : Finset α) (a : α) (b : ∀ x : α, a = x → β) :
(∏ x in s, if h : a = x then b x h else 1) = ite (a ∈ s) (b a rfl) 1 := by
split_ifs with h
· rw [Finset.prod_eq_single a, dif_pos rfl]
· intros _ _ h
rw [dif_neg]
exact h.symm
· simp [h]
· rw [Finset.prod_eq_one]
intros
rw [dif_neg]
rintro rfl
contradiction
#align finset.prod_dite_eq Finset.prod_dite_eq
#align finset.sum_dite_eq Finset.sum_dite_eq
@[to_additive (attr := simp)]
theorem prod_dite_eq' [DecidableEq α] (s : Finset α) (a : α) (b : ∀ x : α, x = a → β) :
(∏ x in s, if h : x = a then b x h else 1) = ite (a ∈ s) (b a rfl) 1 := by
split_ifs with h
· rw [Finset.prod_eq_single a, dif_pos rfl]
· intros _ _ h
rw [dif_neg]
exact h
· simp [h]
· rw [Finset.prod_eq_one]
intros
rw [dif_neg]
rintro rfl
contradiction
#align finset.prod_dite_eq' Finset.prod_dite_eq'
#align finset.sum_dite_eq' Finset.sum_dite_eq'
@[to_additive (attr := simp)]
theorem prod_ite_eq [DecidableEq α] (s : Finset α) (a : α) (b : α → β) :
(∏ x in s, ite (a = x) (b x) 1) = ite (a ∈ s) (b a) 1 :=
prod_dite_eq s a fun x _ => b x
#align finset.prod_ite_eq Finset.prod_ite_eq
#align finset.sum_ite_eq Finset.sum_ite_eq
/-- A product taken over a conditional whose condition is an equality test on the index and whose
alternative is `1` has value either the term at that index or `1`.
The difference with `Finset.prod_ite_eq` is that the arguments to `eq` are swapped. -/
@[to_additive (attr := simp) "A sum taken over a conditional whose condition is an equality
test on the index and whose alternative is `0` has value either the term at that index or `0`.
The difference with `Finset.sum_ite_eq` is that the arguments to `eq` are swapped."]
theorem prod_ite_eq' [DecidableEq α] (s : Finset α) (a : α) (b : α → β) :
(∏ x in s, ite (x = a) (b x) 1) = ite (a ∈ s) (b a) 1 :=
prod_dite_eq' s a fun x _ => b x
#align finset.prod_ite_eq' Finset.prod_ite_eq'
#align finset.sum_ite_eq' Finset.sum_ite_eq'
@[to_additive]
theorem prod_ite_index (p : Prop) [Decidable p] (s t : Finset α) (f : α → β) :
(∏ x in if p then s else t, f x) = if p then ∏ x in s, f x else ∏ x in t, f x :=
apply_ite (fun s => ∏ x in s, f x) _ _ _
#align finset.prod_ite_index Finset.prod_ite_index
#align finset.sum_ite_index Finset.sum_ite_index
@[to_additive (attr := simp)]
theorem prod_ite_irrel (p : Prop) [Decidable p] (s : Finset α) (f g : α → β) :
(∏ x in s, if p then f x else g x) = if p then ∏ x in s, f x else ∏ x in s, g x := by
split_ifs with h <;> rfl
#align finset.prod_ite_irrel Finset.prod_ite_irrel
#align finset.sum_ite_irrel Finset.sum_ite_irrel
@[to_additive (attr := simp)]
theorem prod_dite_irrel (p : Prop) [Decidable p] (s : Finset α) (f : p → α → β) (g : ¬p → α → β) :
(∏ x in s, if h : p then f h x else g h x) =
if h : p then ∏ x in s, f h x else ∏ x in s, g h x := by
split_ifs with h <;> rfl
#align finset.prod_dite_irrel Finset.prod_dite_irrel
#align finset.sum_dite_irrel Finset.sum_dite_irrel
@[simp]
theorem sum_pi_single' {ι M : Type _} [DecidableEq ι] [AddCommMonoid M] (i : ι) (x : M)
(s : Finset ι) : (∑ j in s, Pi.single i x j) = if i ∈ s then x else 0 :=
sum_dite_eq' _ _ _
#align finset.sum_pi_single' Finset.sum_pi_single'
@[simp]
theorem sum_pi_single {ι : Type _} {M : ι → Type _} [DecidableEq ι] [∀ i, AddCommMonoid (M i)]
(i : ι) (f : ∀ i, M i) (s : Finset ι) :
(∑ j in s, Pi.single j (f j) i) = if i ∈ s then f i else 0 :=
sum_dite_eq _ _ _
#align finset.sum_pi_single Finset.sum_pi_single
@[to_additive]
theorem prod_bij_ne_one {s : Finset α} {t : Finset γ} {f : α → β} {g : γ → β}
(i : ∀ a ∈ s, f a ≠ 1 → γ) (hi : ∀ a h₁ h₂, i a h₁ h₂ ∈ t)
(i_inj : ∀ a₁ a₂ h₁₁ h₁₂ h₂₁ h₂₂, i a₁ h₁₁ h₁₂ = i a₂ h₂₁ h₂₂ → a₁ = a₂)
(i_surj : ∀ b ∈ t, g b ≠ 1 → ∃ a h₁ h₂, b = i a h₁ h₂) (h : ∀ a h₁ h₂, f a = g (i a h₁ h₂)) :
(∏ x in s, f x) = ∏ x in t, g x := by
classical
calc
(∏ x in s, f x) = ∏ x in s.filter fun x => f x ≠ 1, f x := prod_filter_ne_one.symm
_ = ∏ x in t.filter fun x => g x ≠ 1, g x :=
prod_bij (fun a ha => i a (mem_filter.mp ha).1 $ by simpa using (mem_filter.mp ha).2)
?_ ?_ ?_ ?_
_ = ∏ x in t, g x := prod_filter_ne_one
· intros a ha
refine' (mem_filter.mp ha).elim _
intros h₁ h₂
refine (mem_filter.mpr ⟨hi a h₁ _, ?_⟩)
specialize h a h₁ fun H ↦ by rw [H] at h₂; simp at h₂
rwa [← h]
· refine' (fun a ha => (mem_filter.mp ha).elim fun h₁ h₂ ↦ _)
exact h a h₁ fun H ↦ by rw [H] at h₂; simp at h₂
· intros a₁ a₂ ha₁ ha₂
refine' (mem_filter.mp ha₁).elim fun _ha₁₁ _ha₁₂ ↦ _
refine' (mem_filter.mp ha₂).elim fun _ha₂₁ _ha₂₂ ↦ _
apply i_inj
· intros b hb
refine' (mem_filter.mp hb).elim fun h₁ h₂ ↦ _
obtain ⟨a, ha₁, ha₂, eq⟩ := i_surj b h₁ fun H ↦ by rw [H] at h₂; simp at h₂
exact ⟨a, mem_filter.mpr ⟨ha₁, ha₂⟩, eq⟩
#align finset.prod_bij_ne_one Finset.prod_bij_ne_one
#align finset.sum_bij_ne_zero Finset.sum_bij_ne_zero
@[to_additive]
theorem prod_dite_of_false {p : α → Prop} {hp : DecidablePred p} (h : ∀ x ∈ s, ¬p x)
(f : ∀ x : α, p x → β) (g : ∀ x : α, ¬p x → β) :
(∏ x in s, if hx : p x then f x hx else g x hx) = ∏ x : s, g x.val (h x.val x.property) :=
prod_bij (fun x hx => ⟨x, hx⟩) (fun x hx => by simp)
(fun a ha => by
dsimp
rw [dif_neg])
(fun a₁ a₂ h₁ h₂ hh => congr_arg Subtype.val hh) fun b _hb => ⟨b.1, b.2, by simp⟩
#align finset.prod_dite_of_false Finset.prod_dite_of_false
#align finset.sum_dite_of_false Finset.sum_dite_of_false
@[to_additive]
theorem prod_dite_of_true {p : α → Prop} {hp : DecidablePred p} (h : ∀ x ∈ s, p x)
(f : ∀ x : α, p x → β) (g : ∀ x : α, ¬p x → β) :
(∏ x in s, if hx : p x then f x hx else g x hx) = ∏ x : s, f x.val (h x.val x.property) :=
prod_bij (fun x hx => ⟨x, hx⟩) (fun x hx => by simp)
(fun a ha => by
dsimp
rw [dif_pos])
(fun a₁ a₂ h₁ h₂ hh => congr_arg Subtype.val hh) fun b _hb => ⟨b.1, b.2, by simp⟩
#align finset.prod_dite_of_true Finset.prod_dite_of_true
#align finset.sum_dite_of_true Finset.sum_dite_of_true
@[to_additive]
theorem nonempty_of_prod_ne_one (h : (∏ x in s, f x) ≠ 1) : s.Nonempty :=
s.eq_empty_or_nonempty.elim (fun H => False.elim <| h <| H.symm ▸ prod_empty) id
#align finset.nonempty_of_prod_ne_one Finset.nonempty_of_prod_ne_one
#align finset.nonempty_of_sum_ne_zero Finset.nonempty_of_sum_ne_zero
@[to_additive]
theorem exists_ne_one_of_prod_ne_one (h : (∏ x in s, f x) ≠ 1) : ∃ a ∈ s, f a ≠ 1 := by
classical
rw [← prod_filter_ne_one] at h
rcases nonempty_of_prod_ne_one h with ⟨x, hx⟩
exact ⟨x, (mem_filter.1 hx).1, by simpa using (mem_filter.1 hx).2⟩
#align finset.exists_ne_one_of_prod_ne_one Finset.exists_ne_one_of_prod_ne_one
#align finset.exists_ne_zero_of_sum_ne_zero Finset.exists_ne_zero_of_sum_ne_zero
@[to_additive]
theorem prod_range_succ_comm (f : ℕ → β) (n : ℕ) :
(∏ x in range (n + 1), f x) = f n * ∏ x in range n, f x := by
rw [range_succ, prod_insert not_mem_range_self]
#align finset.prod_range_succ_comm Finset.prod_range_succ_comm
#align finset.sum_range_succ_comm Finset.sum_range_succ_comm
@[to_additive]
theorem prod_range_succ (f : ℕ → β) (n : ℕ) :
(∏ x in range (n + 1), f x) = (∏ x in range n, f x) * f n := by
simp only [mul_comm, prod_range_succ_comm]
#align finset.prod_range_succ Finset.prod_range_succ
#align finset.sum_range_succ Finset.sum_range_succ
@[to_additive]
theorem prod_range_succ' (f : ℕ → β) :
∀ n : ℕ, (∏ k in range (n + 1), f k) = (∏ k in range n, f (k + 1)) * f 0
| 0 => prod_range_succ _ _
| n + 1 => by rw [prod_range_succ _ n, mul_right_comm, ← prod_range_succ' _ n, prod_range_succ]
#align finset.prod_range_succ' Finset.prod_range_succ'
#align finset.sum_range_succ' Finset.sum_range_succ'
@[to_additive]
theorem eventually_constant_prod {u : ℕ → β} {N : ℕ} (hu : ∀ n ≥ N, u n = 1) {n : ℕ} (hn : N ≤ n) :
(∏ k in range (n + 1), u k) = ∏ k in range (N + 1), u k := by
obtain ⟨m, rfl : n = N + m⟩ := le_iff_exists_add.mp hn
clear hn
induction' m with m hm
· simp
erw [prod_range_succ, hm]
simp [hu, @zero_le' ℕ]
#align finset.eventually_constant_prod Finset.eventually_constant_prod
#align finset.eventually_constant_sum Finset.eventually_constant_sum
@[to_additive]
theorem prod_range_add (f : ℕ → β) (n m : ℕ) :
(∏ x in range (n + m), f x) = (∏ x in range n, f x) * ∏ x in range m, f (n + x) := by
induction' m with m hm
· simp
· erw [Nat.add_succ, prod_range_succ, prod_range_succ, hm, mul_assoc]; rfl
#align finset.prod_range_add Finset.prod_range_add
#align finset.sum_range_add Finset.sum_range_add
@[to_additive]
theorem prod_range_add_div_prod_range {α : Type _} [CommGroup α] (f : ℕ → α) (n m : ℕ) :
((∏ k in range (n + m), f k) / ∏ k in range n, f k) = ∏ k in Finset.range m, f (n + k) :=
div_eq_of_eq_mul' (prod_range_add f n m)
#align finset.prod_range_add_div_prod_range Finset.prod_range_add_div_prod_range
#align finset.sum_range_add_sub_sum_range Finset.sum_range_add_sub_sum_range
@[to_additive]
theorem prod_range_zero (f : ℕ → β) : (∏ k in range 0, f k) = 1 := by rw [range_zero, prod_empty]
#align finset.prod_range_zero Finset.prod_range_zero
#align finset.sum_range_zero Finset.sum_range_zero
@[to_additive sum_range_one]
theorem prod_range_one (f : ℕ → β) : (∏ k in range 1, f k) = f 0 := by
rw [range_one]
apply @prod_singleton β ℕ 0 f
#align finset.prod_range_one Finset.prod_range_one
#align finset.sum_range_one Finset.sum_range_one
open List
@[to_additive]
theorem prod_list_map_count [DecidableEq α] (l : List α) {M : Type _} [CommMonoid M] (f : α → M) :
(l.map f).prod = ∏ m in l.toFinset, f m ^ l.count m := by
induction' l with a s IH; · simp only [map_nil, prod_nil, count_nil, pow_zero, prod_const_one]
simp only [List.map, List.prod_cons, toFinset_cons, IH]
by_cases has : a ∈ s.toFinset
· rw [insert_eq_of_mem has, ← insert_erase has, prod_insert (not_mem_erase _ _),
prod_insert (not_mem_erase _ _), ← mul_assoc, count_cons_self, pow_succ]
congr 1
refine' prod_congr rfl fun x hx => _
rw [count_cons_of_ne (ne_of_mem_erase hx)]
rw [prod_insert has, count_cons_self, count_eq_zero_of_not_mem (mt mem_toFinset.2 has), pow_one]
congr 1
refine' prod_congr rfl fun x hx => _
rw [count_cons_of_ne]
rintro rfl
exact has hx
#align finset.prod_list_map_count Finset.prod_list_map_count
#align finset.sum_list_map_count Finset.sum_list_map_count
@[to_additive]
theorem prod_list_count [DecidableEq α] [CommMonoid α] (s : List α) :
s.prod = ∏ m in s.toFinset, m ^ s.count m := by simpa using prod_list_map_count s id
#align finset.prod_list_count Finset.prod_list_count
#align finset.sum_list_count Finset.sum_list_count
@[to_additive]
theorem prod_list_count_of_subset [DecidableEq α] [CommMonoid α] (m : List α) (s : Finset α)
(hs : m.toFinset ⊆ s) : m.prod = ∏ i in s, i ^ m.count i := by
rw [prod_list_count]
refine' prod_subset hs fun x _ hx => _
rw [mem_toFinset] at hx
rw [count_eq_zero_of_not_mem hx, pow_zero]
#align finset.prod_list_count_of_subset Finset.prod_list_count_of_subset
#align finset.sum_list_count_of_subset Finset.sum_list_count_of_subset
theorem sum_filter_count_eq_countp [DecidableEq α] (p : α → Prop) [DecidablePred p] (l : List α) :
(∑ x in l.toFinset.filter p, l.count x) = l.countp p := by
simp [Finset.sum, sum_map_count_dedup_filter_eq_countp p l]
#align finset.sum_filter_count_eq_countp Finset.sum_filter_count_eq_countp
open Multiset
@[to_additive]
theorem prod_multiset_map_count [DecidableEq α] (s : Multiset α) {M : Type _} [CommMonoid M]
(f : α → M) : (s.map f).prod = ∏ m in s.toFinset, f m ^ s.count m := by
refine' Quot.induction_on s fun l => _
simp [prod_list_map_count l f]
#align finset.prod_multiset_map_count Finset.prod_multiset_map_count
#align finset.sum_multiset_map_count Finset.sum_multiset_map_count
@[to_additive]
theorem prod_multiset_count [DecidableEq α] [CommMonoid α] (s : Multiset α) :
s.prod = ∏ m in s.toFinset, m ^ s.count m := by
convert prod_multiset_map_count s id
rw [Multiset.map_id]
#align finset.prod_multiset_count Finset.prod_multiset_count
#align finset.sum_multiset_count Finset.sum_multiset_count
@[to_additive]
theorem prod_multiset_count_of_subset [DecidableEq α] [CommMonoid α] (m : Multiset α) (s : Finset α)
(hs : m.toFinset ⊆ s) : m.prod = ∏ i in s, i ^ m.count i := by
revert hs
refine' Quot.induction_on m fun l => _
simp only [quot_mk_to_coe'', coe_prod, coe_count]
apply prod_list_count_of_subset l s
#align finset.prod_multiset_count_of_subset Finset.prod_multiset_count_of_subset
#align finset.sum_multiset_count_of_subset Finset.sum_multiset_count_of_subset
@[to_additive]
theorem prod_mem_multiset [DecidableEq α] (m : Multiset α) (f : { x // x ∈ m } → β) (g : α → β)
(hfg : ∀ x, f x = g x) : (∏ x : { x // x ∈ m }, f x) = ∏ x in m.toFinset, g x :=
prod_bij (fun x _ => x.1) (fun x _ => Multiset.mem_toFinset.mpr x.2) (fun _ _ => hfg _)
(fun _ _ _ _ h => by
ext
assumption)
fun y hy => ⟨⟨y, Multiset.mem_toFinset.mp hy⟩, Finset.mem_univ _, rfl⟩
#align finset.prod_mem_multiset Finset.prod_mem_multiset
#align finset.sum_mem_multiset Finset.sum_mem_multiset
/-- To prove a property of a product, it suffices to prove that
the property is multiplicative and holds on factors. -/
@[to_additive "To prove a property of a sum, it suffices to prove that
the property is additive and holds on summands."]
theorem prod_induction {M : Type _} [CommMonoid M] (f : α → M) (p : M → Prop)
(hom : ∀ a b, p a → p b → p (a * b)) (unit : p 1) (base : ∀ x ∈ s, p <| f x) :
p <| ∏ x in s, f x :=
Multiset.prod_induction _ _ hom unit (Multiset.forall_mem_map_iff.mpr base)
#align finset.prod_induction Finset.prod_induction
#align finset.sum_induction Finset.sum_induction
/-- To prove a property of a product, it suffices to prove that
the property is multiplicative and holds on factors. -/
@[to_additive "To prove a property of a sum, it suffices to prove that
the property is additive and holds on summands."]
theorem prod_induction_nonempty {M : Type _} [CommMonoid M] (f : α → M) (p : M → Prop)
(hom : ∀ a b, p a → p b → p (a * b)) (nonempty : s.Nonempty) (base : ∀ x ∈ s, p <| f x) :
p <| ∏ x in s, f x :=
Multiset.prod_induction_nonempty p hom (by simp [nonempty_iff_ne_empty.mp nonempty])
(Multiset.forall_mem_map_iff.mpr base)
#align finset.prod_induction_nonempty Finset.prod_induction_nonempty
#align finset.sum_induction_nonempty Finset.sum_induction_nonempty
/-- For any product along `{0, ..., n - 1}` of a commutative-monoid-valued function, we can verify
that it's equal to a different function just by checking ratios of adjacent terms.
This is a multiplicative discrete analogue of the fundamental theorem of calculus. -/
@[to_additive "For any sum along `{0, ..., n - 1}` of a commutative-monoid-valued function, we can
verify that it's equal to a different function just by checking differences of adjacent terms.
This is a discrete analogue of the fundamental theorem of calculus."]
theorem prod_range_induction (f s : ℕ → β) (base : s 0 = 1)
(step : ∀ n, s (n + 1) = s n * f n) (n : ℕ) :
(∏ k in Finset.range n, f k) = s n := by
induction' n with k hk
· rw [Finset.prod_range_zero, base]
· simp only [hk, Finset.prod_range_succ, step, mul_comm]
#align finset.prod_range_induction Finset.prod_range_induction
#align finset.sum_range_induction Finset.sum_range_induction
/-- A telescoping product along `{0, ..., n - 1}` of a commutative group valued function reduces to
the ratio of the last and first factors. -/
@[to_additive "A telescoping sum along `{0, ..., n - 1}` of an additive commutative group valued
function reduces to the difference of the last and first terms."]
theorem prod_range_div {M : Type _} [CommGroup M] (f : ℕ → M) (n : ℕ) :
(∏ i in range n, f (i + 1) / f i) = f n / f 0 := by apply prod_range_induction <;> simp
#align finset.prod_range_div Finset.prod_range_div
#align finset.sum_range_sub Finset.sum_range_sub
@[to_additive]
theorem prod_range_div' {M : Type _} [CommGroup M] (f : ℕ → M) (n : ℕ) :
(∏ i in range n, f i / f (i + 1)) = f 0 / f n := by apply prod_range_induction <;> simp
#align finset.prod_range_div' Finset.prod_range_div'
#align finset.sum_range_sub' Finset.sum_range_sub'
@[to_additive]
theorem eq_prod_range_div {M : Type _} [CommGroup M] (f : ℕ → M) (n : ℕ) :
f n = f 0 * ∏ i in range n, f (i + 1) / f i := by rw [prod_range_div, mul_div_cancel'_right]
#align finset.eq_prod_range_div Finset.eq_prod_range_div
#align finset.eq_sum_range_sub Finset.eq_sum_range_sub
@[to_additive]
theorem eq_prod_range_div' {M : Type _} [CommGroup M] (f : ℕ → M) (n : ℕ) :
f n = ∏ i in range (n + 1), if i = 0 then f 0 else f i / f (i - 1) := by
conv_lhs => rw [Finset.eq_prod_range_div f]
simp [Finset.prod_range_succ', mul_comm]
#align finset.eq_prod_range_div' Finset.eq_prod_range_div'
#align finset.eq_sum_range_sub' Finset.eq_sum_range_sub'
/-- A telescoping sum along `{0, ..., n-1}` of an `ℕ`-valued function
reduces to the difference of the last and first terms
when the function we are summing is monotone.
-/
theorem sum_range_tsub [CanonicallyOrderedAddMonoid α] [Sub α] [OrderedSub α]
[ContravariantClass α α (· + ·) (· ≤ ·)] {f : ℕ → α} (h : Monotone f) (n : ℕ) :
∑ i in range n, (f (i + 1) - f i) = f n - f 0 := by
apply sum_range_induction
case base => apply tsub_self
case step =>
intro n
have h₁ : f n ≤ f (n + 1) := h (Nat.le_succ _)
have h₂ : f 0 ≤ f n := h (Nat.zero_le _)
rw [tsub_add_eq_add_tsub h₂, add_tsub_cancel_of_le h₁]
#align finset.sum_range_tsub Finset.sum_range_tsub
@[to_additive (attr := simp)]
theorem prod_const (b : β) : (∏ _x in s, b) = b ^ s.card :=
(congr_arg _ <| s.val.map_const b).trans <| Multiset.prod_replicate s.card b
#align finset.prod_const Finset.prod_const
#align finset.sum_const Finset.sum_const
@[to_additive sum_eq_card_nsmul]
theorem prod_eq_pow_card {b : β} (hf : ∀ a ∈ s, f a = b) : (∏ a in s, f a) = b ^ s.card :=
(prod_congr rfl hf).trans <| prod_const _
#align finset.prod_eq_pow_card Finset.prod_eq_pow_card
#align finset.sum_eq_card_nsmul Finset.sum_eq_card_nsmul
@[to_additive]
theorem pow_eq_prod_const (b : β) : ∀ n, b ^ n = ∏ _k in range n, b := by simp
#align finset.pow_eq_prod_const Finset.pow_eq_prod_const
#align finset.nsmul_eq_sum_const Finset.nsmul_eq_sum_const
@[to_additive]
theorem prod_pow (s : Finset α) (n : ℕ) (f : α → β) : (∏ x in s, f x ^ n) = (∏ x in s, f x) ^ n :=
Multiset.prod_map_pow
#align finset.prod_pow Finset.prod_pow
#align finset.sum_nsmul Finset.sum_nsmul
@[to_additive]
theorem prod_flip {n : ℕ} (f : ℕ → β) :
(∏ r in range (n + 1), f (n - r)) = ∏ k in range (n + 1), f k := by
induction' n with n ih
· rw [prod_range_one, prod_range_one]
· rw [prod_range_succ', prod_range_succ _ (Nat.succ n)]
simp [← ih]
#align finset.prod_flip Finset.prod_flip
#align finset.sum_flip Finset.sum_flip
@[to_additive]
theorem prod_involution {s : Finset α} {f : α → β} :
∀ (g : ∀ a ∈ s, α) (_ : ∀ a ha, f a * f (g a ha) = 1) (_ : ∀ a ha, f a ≠ 1 → g a ha ≠ a)
(g_mem : ∀ a ha, g a ha ∈ s) (_ : ∀ a ha, g (g a ha) (g_mem a ha) = a),
(∏ x in s, f x) = 1 := by
haveI := Classical.decEq α; haveI := Classical.decEq β
exact
Finset.strongInductionOn s fun s ih g h g_ne g_mem g_inv =>
s.eq_empty_or_nonempty.elim (fun hs => hs.symm ▸ rfl) fun ⟨x, hx⟩ =>
have hmem : ∀ y ∈ (s.erase x).erase (g x hx), y ∈ s := fun y hy =>
mem_of_mem_erase (mem_of_mem_erase hy)
have g_inj : ∀ {x hx y hy}, g x hx = g y hy → x = y := fun {x hx y hy} h => by
rw [← g_inv x hx, ← g_inv y hy]; simp [h]
have ih' : (∏ y in erase (erase s x) (g x hx), f y) = (1 : β) :=
ih ((s.erase x).erase (g x hx))
⟨Subset.trans (erase_subset _ _) (erase_subset _ _), fun h =>
not_mem_erase (g x hx) (s.erase x) (h (g_mem x hx))⟩
(fun y hy => g y (hmem y hy)) (fun y hy => h y (hmem y hy))
(fun y hy => g_ne y (hmem y hy))
(fun y hy =>
mem_erase.2
⟨fun h : g y _ = g x hx => by simp [g_inj h] at hy,
mem_erase.2
⟨fun h : g y _ = x => by
have : y = g x hx := g_inv y (hmem y hy) ▸ by simp [h]
simp [this] at hy, g_mem y (hmem y hy)⟩⟩)
fun y hy => g_inv y (hmem y hy)
if hx1 : f x = 1 then
ih' ▸
Eq.symm
(prod_subset hmem fun y hy hy₁ =>
have : y = x ∨ y = g x hx := by
simpa [hy, -not_and, mem_erase, not_and_or, or_comm] using hy₁
this.elim (fun hy => hy.symm ▸ hx1) fun hy =>
h x hx ▸ hy ▸ hx1.symm ▸ (one_mul _).symm)
else by
rw [← insert_erase hx, prod_insert (not_mem_erase _ _), ←
insert_erase (mem_erase.2 ⟨g_ne x hx hx1, g_mem x hx⟩),
prod_insert (not_mem_erase _ _), ih', mul_one, h x hx]
#align finset.prod_involution Finset.prod_involution
#align finset.sum_involution Finset.sum_involution
/-- The product of the composition of functions `f` and `g`, is the product over `b ∈ s.image g` of
`f b` to the power of the cardinality of the fibre of `b`. See also `Finset.prod_image`. -/
@[to_additive "The sum of the composition of functions `f` and `g`, is the sum over `b ∈ s.image g`
of `f b` times of the cardinality of the fibre of `b`. See also `Finset.sum_image`."]
theorem prod_comp [DecidableEq γ] (f : γ → β) (g : α → γ) :
(∏ a in s, f (g a)) = ∏ b in s.image g, f b ^ (s.filter fun a => g a = b).card :=
calc
(∏ a in s, f (g a)) =
∏ x in (s.image g).sigma fun b : γ => s.filter fun a => g a = b, f (g x.2) :=
prod_bij (fun a _ha => ⟨g a, a⟩) (by simp; tauto) (fun _ _ => rfl) (by simp)
(by -- `(by finish)` closes this
rintro ⟨b_fst, b_snd⟩ H
simp only [mem_image, exists_prop, mem_filter, mem_sigma, decide_eq_true_eq] at H
tauto)
_ = ∏ b in s.image g, ∏ a in s.filter fun a => g a = b, f (g a) := prod_sigma _ _ _
_ = ∏ b in s.image g, ∏ _a in s.filter fun a => g a = b, f b :=
prod_congr rfl fun b _hb => prod_congr rfl (by simp (config := { contextual := true }))
_ = ∏ b in s.image g, f b ^ (s.filter fun a => g a = b).card :=
prod_congr rfl fun _ _ => prod_const _
#align finset.prod_comp Finset.prod_comp
#align finset.sum_comp Finset.sum_comp
@[to_additive]
theorem prod_piecewise [DecidableEq α] (s t : Finset α) (f g : α → β) :
(∏ x in s, (t.piecewise f g) x) = (∏ x in s ∩ t, f x) * ∏ x in s \ t, g x := by
erw [prod_ite, filter_mem_eq_inter, ← sdiff_eq_filter]
#align finset.prod_piecewise Finset.prod_piecewise
#align finset.sum_piecewise Finset.sum_piecewise
@[to_additive]
theorem prod_inter_mul_prod_diff [DecidableEq α] (s t : Finset α) (f : α → β) :
((∏ x in s ∩ t, f x) * ∏ x in s \ t, f x) = ∏ x in s, f x := by
convert (s.prod_piecewise t f f).symm
simp [Finset.piecewise]
#align finset.prod_inter_mul_prod_diff Finset.prod_inter_mul_prod_diff
#align finset.sum_inter_add_sum_diff Finset.sum_inter_add_sum_diff
@[to_additive]
theorem prod_eq_mul_prod_diff_singleton [DecidableEq α] {s : Finset α} {i : α} (h : i ∈ s)
(f : α → β) : (∏ x in s, f x) = f i * ∏ x in s \ {i}, f x := by
convert (s.prod_inter_mul_prod_diff {i} f).symm
simp [h]
#align finset.prod_eq_mul_prod_diff_singleton Finset.prod_eq_mul_prod_diff_singleton
#align finset.sum_eq_add_sum_diff_singleton Finset.sum_eq_add_sum_diff_singleton
@[to_additive]
theorem prod_eq_prod_diff_singleton_mul [DecidableEq α] {s : Finset α} {i : α} (h : i ∈ s)
(f : α → β) : (∏ x in s, f x) = (∏ x in s \ {i}, f x) * f i := by
rw [prod_eq_mul_prod_diff_singleton h, mul_comm]
#align finset.prod_eq_prod_diff_singleton_mul Finset.prod_eq_prod_diff_singleton_mul
#align finset.sum_eq_sum_diff_singleton_add Finset.sum_eq_sum_diff_singleton_add
@[to_additive]
theorem _root_.Fintype.prod_eq_mul_prod_compl [DecidableEq α] [Fintype α] (a : α) (f : α → β) :
(∏ i, f i) = f a * ∏ i in {a}ᶜ, f i :=
prod_eq_mul_prod_diff_singleton (mem_univ a) f
#align fintype.prod_eq_mul_prod_compl Fintype.prod_eq_mul_prod_compl
#align fintype.sum_eq_add_sum_compl Fintype.sum_eq_add_sum_compl
@[to_additive]
theorem _root_.Fintype.prod_eq_prod_compl_mul [DecidableEq α] [Fintype α] (a : α) (f : α → β) :
(∏ i, f i) = (∏ i in {a}ᶜ, f i) * f a :=
prod_eq_prod_diff_singleton_mul (mem_univ a) f
#align fintype.prod_eq_prod_compl_mul Fintype.prod_eq_prod_compl_mul
#align fintype.sum_eq_sum_compl_add Fintype.sum_eq_sum_compl_add
theorem dvd_prod_of_mem (f : α → β) {a : α} {s : Finset α} (ha : a ∈ s) : f a ∣ ∏ i in s, f i := by
classical
rw [Finset.prod_eq_mul_prod_diff_singleton ha]
exact dvd_mul_right _ _
#align finset.dvd_prod_of_mem Finset.dvd_prod_of_mem
/-- A product can be partitioned into a product of products, each equivalent under a setoid. -/
@[to_additive "A sum can be partitioned into a sum of sums, each equivalent under a setoid."]
theorem prod_partition (R : Setoid α) [DecidableRel R.r] :
(∏ x in s, f x) = ∏ xbar in s.image Quotient.mk'', ∏ y in s.filter (⟦·⟧ = xbar), f y := by
refine' (Finset.prod_image' f fun x _hx => _).symm
rfl
#align finset.prod_partition Finset.prod_partition
#align finset.sum_partition Finset.sum_partition
/-- If we can partition a product into subsets that cancel out, then the whole product cancels. -/
@[to_additive "If we can partition a sum into subsets that cancel out, then the whole sum cancels."]
theorem prod_cancels_of_partition_cancels (R : Setoid α) [DecidableRel R.r]
(h : ∀ x ∈ s, (∏ a in s.filter fun y => y ≈ x, f a) = 1) : (∏ x in s, f x) = 1 := by
rw [prod_partition R, ← Finset.prod_eq_one]
intro xbar xbar_in_s
obtain ⟨x, x_in_s, rfl⟩ := mem_image.mp xbar_in_s
simp only [← Quotient.eq] at h
exact h x x_in_s
#align finset.prod_cancels_of_partition_cancels Finset.prod_cancels_of_partition_cancels
#align finset.sum_cancels_of_partition_cancels Finset.sum_cancels_of_partition_cancels
@[to_additive]
theorem prod_update_of_not_mem [DecidableEq α] {s : Finset α} {i : α} (h : i ∉ s) (f : α → β)
(b : β) : (∏ x in s, Function.update f i b x) = ∏ x in s, f x := by
apply prod_congr rfl
intros j hj
have : j ≠ i := by
rintro rfl
exact h hj
simp [this]
#align finset.prod_update_of_not_mem Finset.prod_update_of_not_mem
#align finset.sum_update_of_not_mem Finset.sum_update_of_not_mem
@[to_additive]
theorem prod_update_of_mem [DecidableEq α] {s : Finset α} {i : α} (h : i ∈ s) (f : α → β) (b : β) :
(∏ x in s, Function.update f i b x) = b * ∏ x in s \ singleton i, f x := by
rw [update_eq_piecewise, prod_piecewise]
simp [h]
#align finset.prod_update_of_mem Finset.prod_update_of_mem
#align finset.sum_update_of_mem Finset.sum_update_of_mem
/-- If a product of a `Finset` of size at most 1 has a given value, so
do the terms in that product. -/
@[to_additive eq_of_card_le_one_of_sum_eq "If a sum of a `Finset` of size at most 1 has a given
value, so do the terms in that sum."]
theorem eq_of_card_le_one_of_prod_eq {s : Finset α} (hc : s.card ≤ 1) {f : α → β} {b : β}
(h : (∏ x in s, f x) = b) : ∀ x ∈ s, f x = b := by
intro x hx
by_cases hc0 : s.card = 0
· exact False.elim (card_ne_zero_of_mem hx hc0)
· have h1 : s.card = 1 := le_antisymm hc (Nat.one_le_of_lt (Nat.pos_of_ne_zero hc0))
rw [card_eq_one] at h1
cases' h1 with x2 hx2
rw [hx2, mem_singleton] at hx
simp_rw [hx2] at h
rw [hx]
rw [prod_singleton] at h
exact h
#align finset.eq_of_card_le_one_of_prod_eq Finset.eq_of_card_le_one_of_prod_eq
#align finset.eq_of_card_le_one_of_sum_eq Finset.eq_of_card_le_one_of_sum_eq
/-- Taking a product over `s : Finset α` is the same as multiplying the value on a single element
`f a` by the product of `s.erase a`.
See `Multiset.prod_map_erase` for the `Multiset` version. -/
@[to_additive "Taking a sum over `s : Finset α` is the same as adding the value on a single element
`f a` to the sum over `s.erase a`.
See `Multiset.sum_map_erase` for the `Multiset` version."]
theorem mul_prod_erase [DecidableEq α] (s : Finset α) (f : α → β) {a : α} (h : a ∈ s) :
(f a * ∏ x in s.erase a, f x) = ∏ x in s, f x := by
rw [← prod_insert (not_mem_erase a s), insert_erase h]
#align finset.mul_prod_erase Finset.mul_prod_erase
#align finset.add_sum_erase Finset.add_sum_erase
/-- A variant of `Finset.mul_prod_erase` with the multiplication swapped. -/
@[to_additive "A variant of `Finset.add_sum_erase` with the addition swapped."]
theorem prod_erase_mul [DecidableEq α] (s : Finset α) (f : α → β) {a : α} (h : a ∈ s) :
(∏ x in s.erase a, f x) * f a = ∏ x in s, f x := by rw [mul_comm, mul_prod_erase s f h]
#align finset.prod_erase_mul Finset.prod_erase_mul
#align finset.sum_erase_add Finset.sum_erase_add
/-- If a function applied at a point is 1, a product is unchanged by
removing that point, if present, from a `Finset`. -/
@[to_additive "If a function applied at a point is 0, a sum is unchanged by
removing that point, if present, from a `Finset`."]
theorem prod_erase [DecidableEq α] (s : Finset α) {f : α → β} {a : α} (h : f a = 1) :
(∏ x in s.erase a, f x) = ∏ x in s, f x := by
rw [← sdiff_singleton_eq_erase]
refine' prod_subset (sdiff_subset _ _) fun x hx hnx => _
rw [sdiff_singleton_eq_erase] at hnx
rwa [eq_of_mem_of_not_mem_erase hx hnx]
#align finset.prod_erase Finset.prod_erase
#align finset.sum_erase Finset.sum_erase
/-- See also `Finset.prod_boole`. -/
@[to_additive "See also `Finset.sum_boole`."]
theorem prod_ite_one {f : α → Prop} [DecidablePred f] (hf : (s : Set α).PairwiseDisjoint f)
(a : β) : (∏ i in s, ite (f i) a 1) = ite (∃ i ∈ s, f i) a 1 := by
split_ifs with h
· obtain ⟨i, hi, hfi⟩ := h
rw [prod_eq_single_of_mem _ hi, if_pos hfi]
exact fun j hj h => if_neg fun hfj => (hf hj hi h).le_bot ⟨hfj, hfi⟩
· push_neg at h
rw [prod_eq_one]
exact fun i hi => if_neg (h i hi)
#align finset.prod_ite_one Finset.prod_ite_one
#align finset.sum_ite_zero Finset.sum_ite_zero
theorem sum_erase_lt_of_pos {γ : Type _} [DecidableEq α] [OrderedAddCommMonoid γ]
[CovariantClass γ γ (· + ·) (· < ·)] {s : Finset α} {d : α} (hd : d ∈ s) {f : α → γ}
(hdf : 0 < f d) : (∑ m : α in s.erase d, f m) < ∑ m : α in s, f m := by
conv in ∑ m in s, f m => rw [← Finset.insert_erase hd]
rw [Finset.sum_insert (Finset.not_mem_erase d s)]
exact lt_add_of_pos_left _ hdf
#align finset.sum_erase_lt_of_pos Finset.sum_erase_lt_of_pos
/-- If a product is 1 and the function is 1 except possibly at one
point, it is 1 everywhere on the `Finset`. -/
@[to_additive "If a sum is 0 and the function is 0 except possibly at one
point, it is 0 everywhere on the `Finset`."]
theorem eq_one_of_prod_eq_one {s : Finset α} {f : α → β} {a : α} (hp : (∏ x in s, f x) = 1)
(h1 : ∀ x ∈ s, x ≠ a → f x = 1) : ∀ x ∈ s, f x = 1 := by
intro x hx
classical
by_cases h : x = a
· rw [h]
rw [h] at hx
rw [← prod_subset (singleton_subset_iff.2 hx) fun t ht ha => h1 t ht (not_mem_singleton.1 ha),
prod_singleton] at hp
exact hp
· exact h1 x hx h
#align finset.eq_one_of_prod_eq_one Finset.eq_one_of_prod_eq_one
#align finset.eq_zero_of_sum_eq_zero Finset.eq_zero_of_sum_eq_zero
theorem prod_pow_boole [DecidableEq α] (s : Finset α) (f : α → β) (a : α) :
(∏ x in s, f x ^ ite (a = x) 1 0) = ite (a ∈ s) (f a) 1 := by simp
#align finset.prod_pow_boole Finset.prod_pow_boole
theorem prod_dvd_prod_of_dvd {S : Finset α} (g1 g2 : α → β) (h : ∀ a ∈ S, g1 a ∣ g2 a) :
S.prod g1 ∣ S.prod g2 := by
classical
induction' S using Finset.induction_on' with a T _haS _hTS haT IH
· simp
rw [Finset.prod_insert haT, Finset.prod_insert haT]
exact mul_dvd_mul (h a $ T.mem_insert_self a) (IH fun b hb ↦ h b $ Finset.mem_insert_of_mem hb)
#align finset.prod_dvd_prod_of_dvd Finset.prod_dvd_prod_of_dvd
theorem prod_dvd_prod_of_subset {ι M : Type _} [CommMonoid M] (s t : Finset ι) (f : ι → M)
(h : s ⊆ t) : (∏ i in s, f i) ∣ ∏ i in t, f i :=
Multiset.prod_dvd_prod_of_le <| Multiset.map_le_map <| by simpa
#align finset.prod_dvd_prod_of_subset Finset.prod_dvd_prod_of_subset
end CommMonoid
/-- If `f = g = h` everywhere but at `i`, where `f i = g i + h i`, then the product of `f` over `s`
is the sum of the products of `g` and `h`. -/
theorem prod_add_prod_eq [CommSemiring β] {s : Finset α} {i : α} {f g h : α → β} (hi : i ∈ s)
(h1 : g i + h i = f i) (h2 : ∀ j ∈ s, j ≠ i → g j = f j) (h3 : ∀ j ∈ s, j ≠ i → h j = f j) :
((∏ i in s, g i) + ∏ i in s, h i) = ∏ i in s, f i := by
classical
simp_rw [prod_eq_mul_prod_diff_singleton hi, ← h1, right_distrib]
congr 2 <;> apply prod_congr rfl <;> simpa
#align finset.prod_add_prod_eq Finset.prod_add_prod_eq
theorem card_eq_sum_ones (s : Finset α) : s.card = ∑ x in s, 1 := by
rw [sum_const, smul_eq_mul, mul_one]
#align finset.card_eq_sum_ones Finset.card_eq_sum_ones
theorem sum_const_nat {m : ℕ} {f : α → ℕ} (h₁ : ∀ x ∈ s, f x = m) :
(∑ x in s, f x) = card s * m := by
rw [← Nat.nsmul_eq_mul, ← sum_const]
apply sum_congr rfl h₁
#align finset.sum_const_nat Finset.sum_const_nat
@[simp]
theorem sum_boole {s : Finset α} {p : α → Prop} [NonAssocSemiring β] {hp : DecidablePred p} :
(∑ x in s, if p x then (1 : β) else (0 : β)) = (s.filter p).card := by
simp only [add_zero, mul_one, Finset.sum_const, nsmul_eq_mul, eq_self_iff_true,
Finset.sum_const_zero, Finset.sum_ite, mul_zero]
#align finset.sum_boole Finset.sum_boole
theorem _root_.Commute.sum_right [NonUnitalNonAssocSemiring β] (s : Finset α) (f : α → β) (b : β)
(h : ∀ i ∈ s, Commute b (f i)) : Commute b (∑ i in s, f i) :=
(Commute.multiset_sum_right _ _) fun b hb => by
obtain ⟨i, hi, rfl⟩ := Multiset.mem_map.mp hb
exact h _ hi
#align commute.sum_right Commute.sum_right
theorem _root_.Commute.sum_left [NonUnitalNonAssocSemiring β] (s : Finset α) (f : α → β) (b : β)
(h : ∀ i ∈ s, Commute (f i) b) : Commute (∑ i in s, f i) b :=
((Commute.sum_right _ _ _) fun _i hi => (h _ hi).symm).symm
#align commute.sum_left Commute.sum_left
section Opposite
open MulOpposite
/-- Moving to the opposite additive commutative monoid commutes with summing. -/
@[simp]
theorem op_sum [AddCommMonoid β] {s : Finset α} (f : α → β) :
op (∑ x in s, f x) = ∑ x in s, op (f x) :=
(opAddEquiv : β ≃+ βᵐᵒᵖ).map_sum _ _
#align finset.op_sum Finset.op_sum
@[simp]
theorem unop_sum [AddCommMonoid β] {s : Finset α} (f : α → βᵐᵒᵖ) :
unop (∑ x in s, f x) = ∑ x in s, unop (f x) :=
(opAddEquiv : β ≃+ βᵐᵒᵖ).symm.map_sum _ _
#align finset.unop_sum Finset.unop_sum
end Opposite
section DivisionCommMonoid
variable [DivisionCommMonoid β]
@[to_additive (attr := simp)]
theorem prod_inv_distrib : (∏ x in s, (f x)⁻¹) = (∏ x in s, f x)⁻¹ :=
Multiset.prod_map_inv
#align finset.prod_inv_distrib Finset.prod_inv_distrib
#align finset.sum_neg_distrib Finset.sum_neg_distrib
@[to_additive (attr := simp)]
theorem prod_div_distrib : (∏ x in s, f x / g x) = (∏ x in s, f x) / ∏ x in s, g x :=
Multiset.prod_map_div
#align finset.prod_div_distrib Finset.prod_div_distrib
#align finset.sum_sub_distrib Finset.sum_sub_distrib
@[to_additive]
theorem prod_zpow (f : α → β) (s : Finset α) (n : ℤ) : (∏ a in s, f a ^ n) = (∏ a in s, f a) ^ n :=
Multiset.prod_map_zpow
#align finset.prod_zpow Finset.prod_zpow
#align finset.sum_zsmul Finset.sum_zsmul
end DivisionCommMonoid
section CommGroup
variable [CommGroup β] [DecidableEq α]
@[to_additive (attr := simp)]
theorem prod_sdiff_eq_div (h : s₁ ⊆ s₂) :
(∏ x in s₂ \ s₁, f x) = (∏ x in s₂, f x) / ∏ x in s₁, f x := by
rw [eq_div_iff_mul_eq', prod_sdiff h]
#align finset.prod_sdiff_eq_div Finset.prod_sdiff_eq_div
#align finset.sum_sdiff_eq_sub Finset.sum_sdiff_eq_sub
@[to_additive]
theorem prod_sdiff_div_prod_sdiff :
((∏ x in s₂ \ s₁, f x) / ∏ x in s₁ \ s₂, f x) = (∏ x in s₂, f x) / ∏ x in s₁, f x := by
simp [← Finset.prod_sdiff (@inf_le_left _ _ s₁ s₂), ← Finset.prod_sdiff (@inf_le_right _ _ s₁ s₂)]
#align finset.prod_sdiff_div_prod_sdiff Finset.prod_sdiff_div_prod_sdiff
#align finset.sum_sdiff_sub_sum_sdiff Finset.sum_sdiff_sub_sum_sdiff
@[to_additive (attr := simp)]
theorem prod_erase_eq_div {a : α} (h : a ∈ s) :
(∏ x in s.erase a, f x) = (∏ x in s, f x) / f a := by
rw [eq_div_iff_mul_eq', prod_erase_mul _ _ h]
#align finset.prod_erase_eq_div Finset.prod_erase_eq_div
#align finset.sum_erase_eq_sub Finset.sum_erase_eq_sub
end CommGroup
@[simp]
theorem card_sigma {σ : α → Type _} (s : Finset α) (t : ∀ a, Finset (σ a)) :
card (s.sigma t) = ∑ a in s, card (t a) :=
Multiset.card_sigma _ _
#align finset.card_sigma Finset.card_sigma
@[simp]
theorem card_disjUnionᵢ (s : Finset α) (t : α → Finset β) (h) :
(s.disjUnionᵢ t h).card = s.sum fun i => (t i).card :=
Multiset.card_bind _ _
#align finset.card_disj_Union Finset.card_disjUnionᵢ
theorem card_bunionᵢ [DecidableEq β] {s : Finset α} {t : α → Finset β}
(h : ∀ x ∈ s, ∀ y ∈ s, x ≠ y → Disjoint (t x) (t y)) :
(s.bunionᵢ t).card = ∑ u in s, card (t u) :=
calc
(s.bunionᵢ t).card = ∑ i in s.bunionᵢ t, 1 := card_eq_sum_ones _
_ = ∑ a in s, ∑ _i in t a, 1 := Finset.sum_bunionᵢ h
_ = ∑ u in s, card (t u) := by simp_rw [card_eq_sum_ones]
#align finset.card_bUnion Finset.card_bunionᵢ
theorem card_bunionᵢ_le [DecidableEq β] {s : Finset α} {t : α → Finset β} :
(s.bunionᵢ t).card ≤ ∑ a in s, (t a).card :=
haveI := Classical.decEq α
Finset.induction_on s (by simp) fun a s has ih =>
calc
((insert a s).bunionᵢ t).card ≤ (t a).card + (s.bunionᵢ t).card := by
{ rw [bunionᵢ_insert]; exact Finset.card_union_le _ _ }
_ ≤ ∑ a in insert a s, card (t a) := by rw [sum_insert has]; exact add_le_add_left ih _
#align finset.card_bUnion_le Finset.card_bunionᵢ_le
theorem card_eq_sum_card_fiberwise [DecidableEq β] {f : α → β} {s : Finset α} {t : Finset β}
(H : ∀ x ∈ s, f x ∈ t) : s.card = ∑ a in t, (s.filter fun x => f x = a).card := by
simp only [card_eq_sum_ones, sum_fiberwise_of_maps_to H]
#align finset.card_eq_sum_card_fiberwise Finset.card_eq_sum_card_fiberwise
theorem card_eq_sum_card_image [DecidableEq β] (f : α → β) (s : Finset α) :
s.card = ∑ a in s.image f, (s.filter fun x => f x = a).card :=
card_eq_sum_card_fiberwise fun _ => mem_image_of_mem _
#align finset.card_eq_sum_card_image Finset.card_eq_sum_card_image
theorem mem_sum {f : α → Multiset β} (s : Finset α) (b : β) :
(b ∈ ∑ x in s, f x) ↔ ∃ a ∈ s, b ∈ f a := by
classical
refine' s.induction_on (by simp) _
· intro a t hi ih
simp [sum_insert hi, ih, or_and_right, exists_or]
#align finset.mem_sum Finset.mem_sum
section ProdEqZero
variable [CommMonoidWithZero β]
theorem prod_eq_zero (ha : a ∈ s) (h : f a = 0) : (∏ x in s, f x) = 0 := by
haveI := Classical.decEq α
rw [← prod_erase_mul _ _ ha, h, mul_zero]
#align finset.prod_eq_zero Finset.prod_eq_zero
theorem prod_boole {s : Finset α} {p : α → Prop} [DecidablePred p] :
(∏ i in s, ite (p i) (1 : β) (0 : β)) = ite (∀ i ∈ s, p i) 1 0 := by
split_ifs with h
· apply prod_eq_one
intro i hi
rw [if_pos (h i hi)]
· push_neg at h
rcases h with ⟨i, hi, hq⟩
apply prod_eq_zero hi
rw [if_neg hq]
#align finset.prod_boole Finset.prod_boole
variable [Nontrivial β] [NoZeroDivisors β]
theorem prod_eq_zero_iff : (∏ x in s, f x) = 0 ↔ ∃ a ∈ s, f a = 0 := by
classical
induction' s using Finset.induction_on with a s ha ih
· exact ⟨Not.elim one_ne_zero, fun ⟨_, H, _⟩ => by simp at H⟩
· rw [prod_insert ha, mul_eq_zero, exists_mem_insert, ih, ← bex_def]
#align finset.prod_eq_zero_iff Finset.prod_eq_zero_iff
theorem prod_ne_zero_iff : (∏ x in s, f x) ≠ 0 ↔ ∀ a ∈ s, f a ≠ 0 := by
rw [Ne, prod_eq_zero_iff]
push_neg; rfl
#align finset.prod_ne_zero_iff Finset.prod_ne_zero_iff
end ProdEqZero
@[to_additive]
theorem prod_unique_nonempty {α β : Type _} [CommMonoid β] [Unique α] (s : Finset α) (f : α → β)
(h : s.Nonempty) : (∏ x in s, f x) = f default := by
rw [h.eq_singleton_default, Finset.prod_singleton]
#align finset.prod_unique_nonempty Finset.prod_unique_nonempty
#align finset.sum_unique_nonempty Finset.sum_unique_nonempty
theorem sum_nat_mod (s : Finset α) (n : ℕ) (f : α → ℕ) :
(∑ i in s, f i) % n = (∑ i in s, f i % n) % n :=
(Multiset.sum_nat_mod _ _).trans <| by rw [Finset.sum, Multiset.map_map]; rfl
#align finset.sum_nat_mod Finset.sum_nat_mod
theorem prod_nat_mod (s : Finset α) (n : ℕ) (f : α → ℕ) :
(∏ i in s, f i) % n = (∏ i in s, f i % n) % n :=
(Multiset.prod_nat_mod _ _).trans <| by rw [Finset.prod, Multiset.map_map]; rfl
#align finset.prod_nat_mod Finset.prod_nat_mod
theorem sum_int_mod (s : Finset α) (n : ℤ) (f : α → ℤ) :
(∑ i in s, f i) % n = (∑ i in s, f i % n) % n :=
(Multiset.sum_int_mod _ _).trans <| by rw [Finset.sum, Multiset.map_map]; rfl
#align finset.sum_int_mod Finset.sum_int_mod
theorem prod_int_mod (s : Finset α) (n : ℤ) (f : α → ℤ) :
(∏ i in s, f i) % n = (∏ i in s, f i % n) % n :=
(Multiset.prod_int_mod _ _).trans <| by rw [Finset.prod, Multiset.map_map]; rfl
#align finset.prod_int_mod Finset.prod_int_mod
end Finset
namespace Fintype
open Finset
/-- `Fintype.prod_bijective` is a variant of `Finset.prod_bij` that accepts `Function.bijective`.
See `Function.bijective.prod_comp` for a version without `h`. -/
@[to_additive "`Fintype.sum_equiv` is a variant of `Finset.sum_bij` that accepts
`Function.bijective`.
See `Function.bijective.sum_comp` for a version without `h`. "]
theorem prod_bijective {α β M : Type _} [Fintype α] [Fintype β] [CommMonoid M] (e : α → β)
(he : Function.Bijective e) (f : α → M) (g : β → M) (h : ∀ x, f x = g (e x)) :
(∏ x : α, f x) = ∏ x : β, g x :=
prod_bij (fun x _ => e x) (fun x _ => mem_univ (e x)) (fun x _ => h x)
(fun _x _x' _ _ h => he.injective h) fun y _ =>
(he.surjective y).imp fun _a h => ⟨mem_univ _, h.symm⟩
#align fintype.prod_bijective Fintype.prod_bijective
#align fintype.sum_bijective Fintype.sum_bijective
/-- `Fintype.prod_equiv` is a specialization of `Finset.prod_bij` that
automatically fills in most arguments.
See `Equiv.prod_comp` for a version without `h`.
-/
@[to_additive "`Fintype.sum_equiv` is a specialization of `Finset.sum_bij` that
automatically fills in most arguments.
See `Equiv.sum_comp` for a version without `h`."]
theorem prod_equiv {α β M : Type _} [Fintype α] [Fintype β] [CommMonoid M] (e : α ≃ β) (f : α → M)
(g : β → M) (h : ∀ x, f x = g (e x)) : (∏ x : α, f x) = ∏ x : β, g x :=
prod_bijective e e.bijective f g h
#align fintype.prod_equiv Fintype.prod_equiv
#align fintype.sum_equiv Fintype.sum_equiv
@[to_additive]
theorem prod_unique {α β : Type _} [CommMonoid β] [Unique α] [Fintype α] (f : α → β) :
(∏ x : α, f x) = f default := by rw [univ_unique, prod_singleton]
#align fintype.prod_unique Fintype.prod_unique
#align fintype.sum_unique Fintype.sum_unique
@[to_additive]
theorem prod_empty {α β : Type _} [CommMonoid β] [IsEmpty α] [Fintype α] (f : α → β) :
(∏ x : α, f x) = 1 :=
Finset.prod_of_empty _
#align fintype.prod_empty Fintype.prod_empty
#align fintype.sum_empty Fintype.sum_empty
@[to_additive]
theorem prod_subsingleton {α β : Type _} [CommMonoid β] [Subsingleton α] [Fintype α] (f : α → β)
(a : α) : (∏ x : α, f x) = f a := by
haveI : Unique α := uniqueOfSubsingleton a
rw [prod_unique f, Subsingleton.elim default a]
#align fintype.prod_subsingleton Fintype.prod_subsingleton
#align fintype.sum_subsingleton Fintype.sum_subsingleton
@[to_additive]
theorem prod_subtype_mul_prod_subtype {α β : Type _} [Fintype α] [CommMonoid β] (p : α → Prop)
(f : α → β) [DecidablePred p] :
((∏ i : { x // p x }, f i) * ∏ i : { x // ¬p x }, f i) = ∏ i, f i := by
classical
let s := { x | p x }.toFinset
rw [← Finset.prod_subtype s, ← Finset.prod_subtype (sᶜ)]
· exact Finset.prod_mul_prod_compl _ _
· simp
· simp
#align fintype.prod_subtype_mul_prod_subtype Fintype.prod_subtype_mul_prod_subtype
#align fintype.sum_subtype_add_sum_subtype Fintype.sum_subtype_add_sum_subtype
end Fintype
namespace List
@[to_additive]
theorem prod_toFinset {M : Type _} [DecidableEq α] [CommMonoid M] (f : α → M) :
∀ {l : List α} (_hl : l.Nodup), l.toFinset.prod f = (l.map f).prod
| [], _ => by simp
| a :: l, hl => by
let ⟨not_mem, hl⟩ := List.nodup_cons.mp hl
simp [Finset.prod_insert (mt List.mem_toFinset.mp not_mem), prod_toFinset _ hl]
#align list.prod_to_finset List.prod_toFinset
#align list.sum_to_finset List.sum_toFinset
end List
namespace Multiset
theorem disjoint_list_sum_left {a : Multiset α} {l : List (Multiset α)} :
Multiset.Disjoint l.sum a ↔ ∀ b ∈ l, Multiset.Disjoint b a := by
induction' l with b bs ih
· simp only [zero_disjoint, List.not_mem_nil, IsEmpty.forall_iff, forall_const, List.sum_nil]
· simp_rw [List.sum_cons, disjoint_add_left, List.mem_cons, forall_eq_or_imp]
simp [and_congr_left_iff, iff_self_iff, ih]
#align multiset.disjoint_list_sum_left Multiset.disjoint_list_sum_left
theorem disjoint_list_sum_right {a : Multiset α} {l : List (Multiset α)} :
Multiset.Disjoint a l.sum ↔ ∀ b ∈ l, Multiset.Disjoint a b := by
simpa only [@disjoint_comm _ a] using disjoint_list_sum_left
#align multiset.disjoint_list_sum_right Multiset.disjoint_list_sum_right
theorem disjoint_sum_right {a : Multiset α} {i : Multiset (Multiset α)} :
Multiset.Disjoint a i.sum ↔ ∀ b ∈ i, Multiset.Disjoint a b := by
simpa only [@disjoint_comm _ a] using disjoint_sum_left
#align multiset.disjoint_sum_right Multiset.disjoint_sum_right
theorem disjoint_finset_sum_left {β : Type _} {i : Finset β} {f : β → Multiset α} {a : Multiset α} :
Multiset.Disjoint (i.sum f) a ↔ ∀ b ∈ i, Multiset.Disjoint (f b) a := by
convert @disjoint_sum_left _ a (map f i.val)
simp [and_congr_left_iff, iff_self_iff]
#align multiset.disjoint_finset_sum_left Multiset.disjoint_finset_sum_left
theorem disjoint_finset_sum_right {β : Type _} {i : Finset β} {f : β → Multiset α}
{a : Multiset α} : Multiset.Disjoint a (i.sum f) ↔ ∀ b ∈ i, Multiset.Disjoint a (f b) := by
simpa only [disjoint_comm] using disjoint_finset_sum_left
#align multiset.disjoint_finset_sum_right Multiset.disjoint_finset_sum_right
variable [DecidableEq α]
theorem add_eq_union_left_of_le {x y z : Multiset α} (h : y ≤ x) :
z + x = z ∪ y ↔ z.Disjoint x ∧ x = y := by
rw [← add_eq_union_iff_disjoint]
constructor
· intro h0
rw [and_iff_right_of_imp]
· exact (le_of_add_le_add_left <| h0.trans_le <| union_le_add z y).antisymm h
· rintro rfl
exact h0
· rintro ⟨h0, rfl⟩
exact h0
#align multiset.add_eq_union_left_of_le Multiset.add_eq_union_left_of_le
theorem add_eq_union_right_of_le {x y z : Multiset α} (h : z ≤ y) :
x + y = x ∪ z ↔ y = z ∧ x.Disjoint y := by
simpa only [and_comm] using add_eq_union_left_of_le h
#align multiset.add_eq_union_right_of_le Multiset.add_eq_union_right_of_le
theorem finset_sum_eq_sup_iff_disjoint {β : Type _} {i : Finset β} {f : β → Multiset α} :
i.sum f = i.sup f ↔
∀ (x) (_ : x ∈ i) (y) (_ : y ∈ i), x ≠ y → Multiset.Disjoint (f x) (f y) := by
induction' i using Finset.cons_induction_on with z i hz hr
·
simp only [Finset.not_mem_empty, IsEmpty.forall_iff, imp_true_iff, Finset.sum_empty,
Finset.sup_empty, bot_eq_zero, eq_self_iff_true]
· simp_rw [Finset.sum_cons hz, Finset.sup_cons, Finset.mem_cons, Multiset.sup_eq_union,
forall_eq_or_imp, Ne.def, eq_self_iff_true, not_true, IsEmpty.forall_iff, true_and_iff,
imp_and, forall_and, ← hr, @eq_comm _ z]
have := fun x (H : x ∈ i) => ne_of_mem_of_not_mem H hz
simp (config := { contextual := true }) only [this, not_false_iff, true_imp_iff]
simp_rw [← disjoint_finset_sum_left, ← disjoint_finset_sum_right, disjoint_comm, ← and_assoc,
and_self_iff]
exact add_eq_union_left_of_le (Finset.sup_le fun x hx => le_sum_of_mem (mem_map_of_mem f hx))
#align multiset.finset_sum_eq_sup_iff_disjoint Multiset.finset_sum_eq_sup_iff_disjoint
theorem sup_powerset_len {α : Type _} [DecidableEq α] (x : Multiset α) :
(Finset.sup (Finset.range (card x + 1)) fun k => x.powersetLen k) = x.powerset := by
convert bind_powerset_len x using 1
rw [Multiset.bind, Multiset.join, ← Finset.range_val, ← Finset.sum_eq_multiset_sum]
exact
Eq.symm (finset_sum_eq_sup_iff_disjoint.mpr fun _ _ _ _ h => pairwise_disjoint_powersetLen x h)
#align multiset.sup_powerset_len Multiset.sup_powerset_len
@[simp]
theorem toFinset_sum_count_eq (s : Multiset α) : (∑ a in s.toFinset, s.count a) = card s :=
calc
(∑ a in s.toFinset, s.count a) = ∑ a in s.toFinset, s.count a • 1 := by
{ simp only [smul_eq_mul, mul_one] }
_ = (s.map fun _ => 1).sum := (Finset.sum_multiset_map_count _ _).symm
_ = card s := by simp
#align multiset.to_finset_sum_count_eq Multiset.toFinset_sum_count_eq
theorem count_sum' {s : Finset β} {a : α} {f : β → Multiset α} :
count a (∑ x in s, f x) = ∑ x in s, count a (f x) := by
dsimp only [Finset.sum]
rw [count_sum]
#align multiset.count_sum' Multiset.count_sum'
@[simp]
theorem toFinset_sum_count_nsmul_eq (s : Multiset α) :
(∑ a in s.toFinset, s.count a • {a}) = s := by
rw [← Finset.sum_multiset_map_count, Multiset.sum_map_singleton]
#align multiset.to_finset_sum_count_nsmul_eq Multiset.toFinset_sum_count_nsmul_eq
theorem exists_smul_of_dvd_count (s : Multiset α) {k : ℕ}
(h : ∀ a : α, a ∈ s → k ∣ Multiset.count a s) : ∃ u : Multiset α, s = k • u := by
use ∑ a in s.toFinset, (s.count a / k) • {a}
have h₂ :
(∑ x : α in s.toFinset, k • (count x s / k) • ({x} : Multiset α)) =
∑ x : α in s.toFinset, count x s • {x} := by
apply Finset.sum_congr rfl
intro x hx
rw [← mul_nsmul', Nat.mul_div_cancel' (h x (mem_toFinset.mp hx))]
rw [← Finset.sum_nsmul, h₂, toFinset_sum_count_nsmul_eq]
#align multiset.exists_smul_of_dvd_count Multiset.exists_smul_of_dvd_count
theorem toFinset_prod_dvd_prod [CommMonoid α] (S : Multiset α) : S.toFinset.prod id ∣ S.prod := by
rw [Finset.prod_eq_multiset_prod]
refine' Multiset.prod_dvd_prod_of_le _
simp [Multiset.dedup_le S]
#align multiset.to_finset_prod_dvd_prod Multiset.toFinset_prod_dvd_prod
@[to_additive]
theorem prod_sum {α : Type _} {ι : Type _} [CommMonoid α] (f : ι → Multiset α) (s : Finset ι) :
(∑ x in s, f x).prod = ∏ x in s, (f x).prod := by
classical
induction' s using Finset.induction_on with a t hat ih
· rw [Finset.sum_empty, Finset.prod_empty, Multiset.prod_zero]
· rw [Finset.sum_insert hat, Finset.prod_insert hat, Multiset.prod_add, ih]
#align multiset.prod_sum Multiset.prod_sum
#align multiset.sum_sum Multiset.sum_sum
end Multiset
namespace Nat
@[simp, norm_cast]
theorem cast_list_sum [AddMonoidWithOne β] (s : List ℕ) : (↑s.sum : β) = (s.map (↑)).sum :=
map_list_sum (castAddMonoidHom β) _
#align nat.cast_list_sum Nat.cast_list_sum
@[simp, norm_cast]
theorem cast_list_prod [Semiring β] (s : List ℕ) : (↑s.prod : β) = (s.map (↑)).prod :=
map_list_prod (castRingHom β) _
#align nat.cast_list_prod Nat.cast_list_prod
@[simp, norm_cast]
theorem cast_multiset_sum [AddCommMonoidWithOne β] (s : Multiset ℕ) :
(↑s.sum : β) = (s.map (↑)).sum :=
map_multiset_sum (castAddMonoidHom β) _
#align nat.cast_multiset_sum Nat.cast_multiset_sum
@[simp, norm_cast]
theorem cast_multiset_prod [CommSemiring β] (s : Multiset ℕ) : (↑s.prod : β) = (s.map (↑)).prod :=
map_multiset_prod (castRingHom β) _
#align nat.cast_multiset_prod Nat.cast_multiset_prod
@[simp, norm_cast]
theorem cast_sum [AddCommMonoidWithOne β] (s : Finset α) (f : α → ℕ) :
↑(∑ x in s, f x : ℕ) = ∑ x in s, (f x : β) :=
map_sum (castAddMonoidHom β) _ _
#align nat.cast_sum Nat.cast_sum
@[simp, norm_cast]
theorem cast_prod [CommSemiring β] (f : α → ℕ) (s : Finset α) :
(↑(∏ i in s, f i) : β) = ∏ i in s, (f i : β) :=
map_prod (castRingHom β) _ _
#align nat.cast_prod Nat.cast_prod
end Nat
namespace Int
@[simp, norm_cast]
theorem cast_list_sum [AddGroupWithOne β] (s : List ℤ) : (↑s.sum : β) = (s.map (↑)).sum :=
map_list_sum (castAddHom β) _
#align int.cast_list_sum Int.cast_list_sum
@[simp, norm_cast]
theorem cast_list_prod [Ring β] (s : List ℤ) : (↑s.prod : β) = (s.map (↑)).prod :=
map_list_prod (castRingHom β) _
#align int.cast_list_prod Int.cast_list_prod
@[simp, norm_cast]
theorem cast_multiset_sum [AddCommGroupWithOne β] (s : Multiset ℤ) :
(↑s.sum : β) = (s.map (↑)).sum :=
map_multiset_sum (castAddHom β) _
#align int.cast_multiset_sum Int.cast_multiset_sum
@[simp, norm_cast]
theorem cast_multiset_prod {R : Type _} [CommRing R] (s : Multiset ℤ) :
(↑s.prod : R) = (s.map (↑)).prod :=
map_multiset_prod (castRingHom R) _
#align int.cast_multiset_prod Int.cast_multiset_prod
@[simp, norm_cast]
theorem cast_sum [AddCommGroupWithOne β] (s : Finset α) (f : α → ℤ) :
↑(∑ x in s, f x : ℤ) = ∑ x in s, (f x : β) :=
map_sum (castAddHom β) _ _
#align int.cast_sum Int.cast_sum
@[simp, norm_cast]
theorem cast_prod {R : Type _} [CommRing R] (f : α → ℤ) (s : Finset α) :
(↑(∏ i in s, f i) : R) = ∏ i in s, (f i : R) :=
(Int.castRingHom R).map_prod _ _
#align int.cast_prod Int.cast_prod
end Int
@[simp, norm_cast]
theorem Units.coe_prod {M : Type _} [CommMonoid M] (f : α → Mˣ) (s : Finset α) :
(↑(∏ i in s, f i) : M) = ∏ i in s, (f i : M) :=
(Units.coeHom M).map_prod _ _
#align units.coe_prod Units.coe_prod
theorem Units.mk0_prod [CommGroupWithZero β] (s : Finset α) (f : α → β) (h) :
Units.mk0 (∏ b in s, f b) h =
∏ b in s.attach, Units.mk0 (f b) fun hh => h (Finset.prod_eq_zero b.2 hh) := by
classical induction s using Finset.induction_on <;> simp [*]
#align units.mk0_prod Units.mk0_prod
theorem nat_abs_sum_le {ι : Type _} (s : Finset ι) (f : ι → ℤ) :
(∑ i in s, f i).natAbs ≤ ∑ i in s, (f i).natAbs := by
classical
induction' s using Finset.induction_on with i s his IH
· simp only [Finset.sum_empty, Int.natAbs_zero]
· simp only [his, Finset.sum_insert, not_false_iff]
exact (Int.natAbs_add_le _ _).trans (add_le_add le_rfl IH)
#align nat_abs_sum_le nat_abs_sum_le
/-! ### `additive`, `multiplicative` -/
open Additive Multiplicative
section Monoid
variable [Monoid α]
@[simp]
theorem ofMul_list_prod (s : List α) : ofMul s.prod = (s.map ofMul).sum := by simp [ofMul]; rfl
#align of_mul_list_prod ofMul_list_prod
@[simp]
theorem toMul_list_sum (s : List (Additive α)) : toMul s.sum = (s.map toMul).prod := by
simp [toMul, ofMul]; rfl
#align to_mul_list_sum toMul_list_sum
end Monoid
section AddMonoid
variable [AddMonoid α]
@[simp]
theorem ofAdd_list_prod (s : List α) : ofAdd s.sum = (s.map ofAdd).prod := by simp [ofAdd]; rfl
#align of_add_list_prod ofAdd_list_prod
@[simp]
theorem toAdd_list_sum (s : List (Multiplicative α)) : toAdd s.prod = (s.map toAdd).sum := by
simp [toAdd, ofAdd]; rfl
#align to_add_list_sum toAdd_list_sum
end AddMonoid
section CommMonoid
variable [CommMonoid α]
@[simp]
theorem ofMul_multiset_prod (s : Multiset α) : ofMul s.prod = (s.map ofMul).sum := by
simp [ofMul]; rfl
#align of_mul_multiset_prod ofMul_multiset_prod
@[simp]
theorem toMul_multiset_sum (s : Multiset (Additive α)) : toMul s.sum = (s.map toMul).prod := by
simp [toMul, ofMul]; rfl
#align to_mul_multiset_sum toMul_multiset_sum
@[simp]
theorem ofMul_prod (s : Finset ι) (f : ι → α) : ofMul (∏ i in s, f i) = ∑ i in s, ofMul (f i) :=
rfl
#align of_mul_prod ofMul_prod
@[simp]
theorem toMul_sum (s : Finset ι) (f : ι → Additive α) :
toMul (∑ i in s, f i) = ∏ i in s, toMul (f i) :=
rfl
#align to_mul_sum toMul_sum
end CommMonoid
section AddCommMonoid
variable [AddCommMonoid α]
@[simp]
theorem ofAdd_multiset_prod (s : Multiset α) : ofAdd s.sum = (s.map ofAdd).prod := by
simp [ofAdd]; rfl
#align of_add_multiset_prod ofAdd_multiset_prod
@[simp]
theorem toAdd_multiset_sum (s : Multiset (Multiplicative α)) :
toAdd s.prod = (s.map toAdd).sum := by
simp [toAdd, ofAdd]; rfl
#align to_add_multiset_sum toAdd_multiset_sum
@[simp]
theorem ofAdd_sum (s : Finset ι) (f : ι → α) : ofAdd (∑ i in s, f i) = ∏ i in s, ofAdd (f i) :=
rfl
#align of_add_sum ofAdd_sum
@[simp]
theorem toAdd_prod (s : Finset ι) (f : ι → Multiplicative α) :
toAdd (∏ i in s, f i) = ∑ i in s, toAdd (f i) :=
rfl
#align to_add_prod toAdd_prod
end AddCommMonoid
|
{"author": "leanprover-community", "repo": "mathlib4", "sha": "b9a0a30342ca06e9817e22dbe46e75fc7f435500", "save_path": "github-repos/lean/leanprover-community-mathlib4", "path": "github-repos/lean/leanprover-community-mathlib4/mathlib4-b9a0a30342ca06e9817e22dbe46e75fc7f435500/Mathlib/Algebra/BigOperators/Basic.lean"}
|
# Autogenerated wrapper script for LCIO_Julia_Wrapper_jll for i686-linux-gnu-cxx11-julia_version+1.6.0
export lciowrap
using libcxxwrap_julia_jll
using LCIO_jll
JLLWrappers.@generate_wrapper_header("LCIO_Julia_Wrapper")
JLLWrappers.@declare_library_product(lciowrap, "liblciowrap.so")
function __init__()
JLLWrappers.@generate_init_header(libcxxwrap_julia_jll, LCIO_jll)
JLLWrappers.@init_library_product(
lciowrap,
"lib/liblciowrap.so",
RTLD_LAZY | RTLD_DEEPBIND,
)
JLLWrappers.@generate_init_footer()
end # __init__()
|
{"hexsha": "9019b3b726c89a44ee38d21226605e7cf18d4198", "size": 563, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/wrappers/i686-linux-gnu-cxx11-julia_version+1.6.0.jl", "max_stars_repo_name": "JuliaBinaryWrappers/LCIO_Julia_Wrapper_jll.jl", "max_stars_repo_head_hexsha": "150ae6aceec2508c9ded41f0221e3df7622bf96c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/wrappers/i686-linux-gnu-cxx11-julia_version+1.6.0.jl", "max_issues_repo_name": "JuliaBinaryWrappers/LCIO_Julia_Wrapper_jll.jl", "max_issues_repo_head_hexsha": "150ae6aceec2508c9ded41f0221e3df7622bf96c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/wrappers/i686-linux-gnu-cxx11-julia_version+1.6.0.jl", "max_forks_repo_name": "JuliaBinaryWrappers/LCIO_Julia_Wrapper_jll.jl", "max_forks_repo_head_hexsha": "150ae6aceec2508c9ded41f0221e3df7622bf96c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2777777778, "max_line_length": 102, "alphanum_fraction": 0.7815275311, "num_tokens": 172}
|
#' Metabolite set enrichment analysis (MSEA) using pathway knowledge curated by Metabolon
#'
#' A function that returns the pathway enrichment score for all perturbed metabolites in a patient's full metabolomic profile.
#' @param abs_filename_dataset - Relative or absolute path to relevant .gct file.
#' A .gct file contains profiling data, rows are compounds and columns are sample IDs.
#' @param abs_filename_classes - Relative or absolute path to relevant .cls file.
#' A .cls file contains a mapping of class labels to columns in the .gct file.
#' @param pathway.knowledgebase - The filename of the .gmt file associated with the pathway knowledge desired.
#' Currently only "Metabolon" is offered, though "KEGG", "WikiPathways", "SMPDB"
#' and/or "Reactome" can be added in future versions.
#' @param output_dir - The path associated with the folder in which MSEA results will be saved.
#' @param expt_name - A name to be associated with the experiment you are analyzing. This name will be used in filestems
#' of results rendered in output_dir.
#' @export stats.getMSEA_Metabolon
#' @examples
#' data(Miller2015)
#' Miller2015 = Miller2015[,grep("IEM", colnames(Miller2015))]
#' # Generate a .cls file for your data.
#' diagnoses = gsub("[[:digit:]]", "", colnames(Miller2015))
#' diag.ind = diagnoses
#' diag.ind[which(diag.ind!="Argininemia")] = 0
#' diag.ind[which(diag.ind=="Argininemia")] = 1
#' diag.ind = as.numeric(diag.ind)
#' # Manually add the following text to 1st line of .cls,
#' where num_samples is the length of diag.ind: #num_samples 1 2
#' # Manually add the following text to 2nd line of .cls: #disease control
#' write.table(diag.ind, file=system.file("extdata/MSEA_Datasets/Miller2015_arg.cls", package="CTD"),
#' sep=" ", quote=FALSE, row.names = FALSE, col.names = FALSE)
#'
#' # Create a .gct file.
#' data_mx = Miller2015
#' data_mx = data_mx[, order(diags.ind)]
#' data_mx = cbind(rep(NA, nrow(data_mx)), data_mx)
#' colnames(data_mx)[1] = "DESCRIPTION"
#' write.table(data_mx, file=system.file("extdata/MSEA_Datasets/Miller2015.gct", package="CTD"),
#' sep="\t", quote=FALSE, row.names = TRUE)
#'
#' # Generate a .gmt file.
#' population = names(met.profile)
#' paths.hsa = list.dirs(path="../inst/extdata", full.names = FALSE)
#' paths.hsa = paths.hsa[-which(paths.hsa %in% c("", "RData", "allPathways"))]
#' sink(system.file("extdata/Pathway_GMTs/Metabolon.gmt", package="CTD"))
#' for (p in 1:length(paths.hsa)) {
#' load(sprintf("../inst/extdata/RData/%s.RData", paths.hsa[p]))
#' pathway.compounds = V(ig)$label[which(V(ig)$shape=="circle")]
#' pathCompIDs = unique(tolower(pathway.compounds[which(pathway.compounds %in% population)]))
#' print(sprintf("%s %s", paths.hsa[p], paste(pathCompIDs, collapse=" ")), quote=FALSE)
#' }
#' sink()
#' print("test")
#' abs_filename_dataset = system.file("extdata/MSEA_Datasets/Miller2015.gct", package="CTD")
#' abs_filename_classes = system.file("extdata/MSEA_Datasets/Miller2015_arg.cls", package="CTD")
#' pathway.data = stats.getMSEA_Metabolon(abs_filename_dataset, abs_filename_classes, pathway_knowledgebase = "Metabolon",
#' output_dir = getwd(), expt_name="msea_results")
stats.getMSEA_Metabolon = function(abs_filename_dataset, abs_filename_classes, pathway_knowledgebase = "Metabolon",
output_dir = getwd(), expt_name="msea_results") {
met.db = system.file(sprintf("extdata/Pathway_GMTs/%s.gmt", pathway_knowledgebase), package="CTD")
res = MSEA(input.ds = abs_filename_dataset, input.cls = abs_filename_classes, met.db = met.db,
output.directory = output_dir, doc.string=expt_name,
reshuffling.type="sample.labels", nperm=1000, weighted.score.type=1,
nom.p.val.threshold=-1, fwer.p.val.threshold=-1, fdr.q.val.threshold=0.25, topmet = 15,
adjust.FDR.q.val = F, met.size.threshold.min = 5, met.size.threshold.max = 50, preproc.type = 0,
random.seed = 760435, perm.type = 0, fraction = 1.0, replace = F)
return(res)
}
# M S E A -- Metabolite Set Enrichment Analysis from the Broad Institute's GSEA-P implementation.
MSEA.MetaboliteRanking_SingleProfile = function(A, class.labels, metabolite.labels, nperm, permutation.type = 0, sigma.correction = "MetaboliteCluster", fraction=1.0, replace=F) {
A = A + 0.00000001
N = length(A[,1])
Ns = length(A[1,])
subset.mask = matrix(0, nrow=Ns, ncol=nperm)
reshuffled.class.labels1 = matrix(0, nrow=Ns, ncol=nperm)
reshuffled.class.labels2 = matrix(0, nrow=Ns, ncol=nperm)
class.labels1 = matrix(0, nrow=Ns, ncol=nperm)
class.labels2 = matrix(0, nrow=Ns, ncol=nperm)
order.matrix = matrix(0, nrow = N, ncol = nperm)
obs.order.matrix = matrix(0, nrow = N, ncol = nperm)
s2n.matrix = matrix(0, nrow = N, ncol = nperm)
obs.s2n.matrix = matrix(0, nrow = N, ncol = nperm)
obs.metabolite.labels = vector(length = N, mode="character")
obs.metabolite.descs = vector(length = N, mode="character")
obs.metabolite.symbols = vector(length = N, mode="character")
M1 = matrix(0, nrow = N, ncol = nperm)
M2 = matrix(0, nrow = N, ncol = nperm)
S1 = matrix(0, nrow = N, ncol = nperm)
S2 = matrix(0, nrow = N, ncol = nperm)
C = split(class.labels, class.labels)
class1.size = length(C[[1]])
class2.size = length(C[[2]])
class1.index = seq(1, class1.size, 1)
class2.index = seq(class1.size + 1, class1.size + class2.size, 1)
for (r in 1:nperm) {
class1.subset = sample(class1.index, size = ceiling(class1.size*fraction), replace = replace)
class2.subset = sample(class2.index, size = ceiling(class2.size*fraction), replace = replace)
class1.subset.size = length(class1.subset)
class2.subset.size = length(class2.subset)
subset.class1 = rep(0, class1.size)
for (i in 1:class1.size) {
if (is.element(class1.index[i], class1.subset)) {
subset.class1[i] = 1
}
}
subset.class2 = rep(0, class2.size)
for (i in 1:class2.size) {
if (is.element(class2.index[i], class2.subset)) {
subset.class2[i] = 1
}
}
subset.mask[, r] = as.numeric(c(subset.class1, subset.class2))
fraction.class1 = class1.size/Ns
fraction.class2 = class2.size/Ns
if (permutation.type == 0) { # random (unbalanced) permutation
full.subset = c(class1.subset, class2.subset)
label1.subset = sample(full.subset, size = Ns * fraction.class1)
reshuffled.class.labels1[, r] = rep(0, Ns)
reshuffled.class.labels2[, r] = rep(0, Ns)
class.labels1[, r] = rep(0, Ns)
class.labels2[, r] = rep(0, Ns)
for (i in 1:Ns) {
m1 = sum(!is.na(match(label1.subset, i)))
m2 = sum(!is.na(match(full.subset, i)))
reshuffled.class.labels1[i, r] = m1
reshuffled.class.labels2[i, r] = m2 - m1
if (i <= class1.size) {
class.labels1[i, r] = m2
class.labels2[i, r] = 0
} else {
class.labels1[i, r] = 0
class.labels2[i, r] = m2
}
}
} else if (permutation.type == 1) { # proportional (balanced) permutation
class1.label1.subset = sample(class1.subset, size = ceiling(class1.subset.size*fraction.class1))
class2.label1.subset = sample(class2.subset, size = floor(class2.subset.size*fraction.class1))
reshuffled.class.labels1[, r] = rep(0, Ns)
reshuffled.class.labels2[, r] = rep(0, Ns)
class.labels1[, r] = rep(0, Ns)
class.labels2[, r] = rep(0, Ns)
for (i in 1:Ns) {
if (i <= class1.size) {
m1 = sum(!is.na(match(class1.label1.subset, i)))
m2 = sum(!is.na(match(class1.subset, i)))
reshuffled.class.labels1[i, r] = m1
reshuffled.class.labels2[i, r] = m2 - m1
class.labels1[i, r] = m2
class.labels2[i, r] = 0
} else {
m1 = sum(!is.na(match(class2.label1.subset, i)))
m2 = sum(!is.na(match(class2.subset, i)))
reshuffled.class.labels1[i, r] = m1
reshuffled.class.labels2[i, r] = m2 - m1
class.labels1[i, r] = 0
class.labels2[i, r] = m2
}
}
}
}
# compute S2N for the random permutation matrix
P = reshuffled.class.labels1 * subset.mask
n1 = sum(P[,1])
M1 = A %*% P
M1 = M1/n1
A2 = A*A
S1 = A2 %*% P
S1 = S1/n1 - M1*M1
if (n1>1) {
S1 = sqrt(abs((n1/(n1-1)) * S1))
}
P = reshuffled.class.labels2 * subset.mask
n2 = sum(P[,1])
M2 = A %*% P
M2 = M2/n2
A2 = A*A
S2 = A2 %*% P
S2 = S2/n2 - M2*M2
if (n2>1) {
S2 = sqrt(abs((n2/(n2-1)) * S2))
}
if (sigma.correction == "MetaboliteCluster") { # small sigma "fix" as used in MetaboliteCluster
S2 = ifelse(0.2*abs(M2) < S2, S2, 0.2*abs(M2))
S2 = ifelse(S2 == 0, 0.2, S2)
S1 = ifelse(0.2*abs(M1) < S1, S1, 0.2*abs(M1))
S1 = ifelse(S1 == 0, 0.2, S1)
}
M1 = M1 - M2
S1 = S1 + S2
s2n.matrix = M1/S1
for (r in 1:nperm) {order.matrix[, r] = order(s2n.matrix[, r], decreasing=T)}
# compute S2N for the "observed" permutation matrix
P = class.labels1 * subset.mask
n1 = sum(P[,1])
if (n1>1) {
M1 = A %*% P
M1 = M1/n1
A2 = A*A
S1 = A2 %*% P
S1 = S1/n1 - M1*M1
S1 = sqrt(abs((n1/(n1-1)) * S1))
} else {
M1 = A %*% P
A2 = A*A
S1 = A2 %*% P
S1 = S1 - M1*M1
S1 = sqrt(abs(S1))
}
P = class.labels2 * subset.mask
n2 = sum(P[,1])
if (n2>1) {
M2 = A %*% P
M2 = M2/n2
A2 = A*A
S2 = A2 %*% P
S2 = S2/n2 - M2*M2
S2 = sqrt(abs((n2/(n2-1)) * S2))
} else {
M2 = A %*% P
A2 = A*A
S2 = A2 %*% P
S2 = S2 - M2*M2
S2 = sqrt(abs(S2))
}
if (sigma.correction == "MetaboliteCluster") { # small sigma "fix" as used in MetaboliteCluster
S2 = ifelse(0.2*abs(M2) < S2, S2, 0.2*abs(M2))
S2 = ifelse(S2 == 0, 0.2, S2)
S1 = ifelse(0.2*abs(M1) < S1, S1, 0.2*abs(M1))
S1 = ifelse(S1 == 0, 0.2, S1)
}
M1 = M1 - M2
S1 = S1 + S2
obs.s2n.matrix = M1/S1
for (r in 1:nperm) {obs.order.matrix[,r] = order(obs.s2n.matrix[,r], decreasing=T) }
return(list(s2n.matrix = s2n.matrix, obs.s2n.matrix = obs.s2n.matrix, order.matrix = order.matrix, obs.order.matrix = obs.order.matrix))
}
MSEA.MetaboliteRanking = function(A, class.labels, metabolite.labels, nperm, permutation.type = 0, sigma.correction = "MetaboliteCluster", fraction=1.0, replace=F) {
A = A + 0.00000001
N = length(A[,1])
Ns = length(A[1,])
subset.mask = matrix(0, nrow=Ns, ncol=nperm)
reshuffled.class.labels1 = matrix(0, nrow=Ns, ncol=nperm)
reshuffled.class.labels2 = matrix(0, nrow=Ns, ncol=nperm)
class.labels1 = matrix(0, nrow=Ns, ncol=nperm)
class.labels2 = matrix(0, nrow=Ns, ncol=nperm)
order.matrix = matrix(0, nrow = N, ncol = nperm)
obs.order.matrix = matrix(0, nrow = N, ncol = nperm)
s2n.matrix = matrix(0, nrow = N, ncol = nperm)
obs.s2n.matrix = matrix(0, nrow = N, ncol = nperm)
obs.metabolite.labels = vector(length = N, mode="character")
obs.metabolite.descs = vector(length = N, mode="character")
obs.metabolite.symbols = vector(length = N, mode="character")
M1 = matrix(0, nrow = N, ncol = nperm)
M2 = matrix(0, nrow = N, ncol = nperm)
S1 = matrix(0, nrow = N, ncol = nperm)
S2 = matrix(0, nrow = N, ncol = nperm)
C = split(class.labels, class.labels)
class1.size = length(C[[1]])
class2.size = length(C[[2]])
class1.index = seq(1, class1.size, 1)
class2.index = seq(class1.size + 1, class1.size + class2.size, 1)
for (r in 1:nperm) {
class1.subset = sample(class1.index, size = ceiling(class1.size*fraction), replace = replace)
class2.subset = sample(class2.index, size = ceiling(class2.size*fraction), replace = replace)
class1.subset.size = length(class1.subset)
class2.subset.size = length(class2.subset)
subset.class1 = rep(0, class1.size)
for (i in 1:class1.size) {
if (is.element(class1.index[i], class1.subset)) {
subset.class1[i] = 1
}
}
subset.class2 = rep(0, class2.size)
for (i in 1:class2.size) {
if (is.element(class2.index[i], class2.subset)) {
subset.class2[i] = 1
}
}
subset.mask[, r] = as.numeric(c(subset.class1, subset.class2))
fraction.class1 = class1.size/Ns
fraction.class2 = class2.size/Ns
if (permutation.type == 0) { # random (unbalanced) permutation
full.subset = c(class1.subset, class2.subset)
label1.subset = sample(full.subset, size = Ns * fraction.class1)
reshuffled.class.labels1[, r] = rep(0, Ns)
reshuffled.class.labels2[, r] = rep(0, Ns)
class.labels1[, r] = rep(0, Ns)
class.labels2[, r] = rep(0, Ns)
for (i in 1:Ns) {
m1 = sum(!is.na(match(label1.subset, i)))
m2 = sum(!is.na(match(full.subset, i)))
reshuffled.class.labels1[i, r] = m1
reshuffled.class.labels2[i, r] = m2 - m1
if (i <= class1.size) {
class.labels1[i, r] = m2
class.labels2[i, r] = 0
} else {
class.labels1[i, r] = 0
class.labels2[i, r] = m2
}
}
} else if (permutation.type == 1) { # proportional (balanced) permutation
class1.label1.subset = sample(class1.subset, size = ceiling(class1.subset.size*fraction.class1))
class2.label1.subset = sample(class2.subset, size = floor(class2.subset.size*fraction.class1))
reshuffled.class.labels1[, r] = rep(0, Ns)
reshuffled.class.labels2[, r] = rep(0, Ns)
class.labels1[, r] = rep(0, Ns)
class.labels2[, r] = rep(0, Ns)
for (i in 1:Ns) {
if (i <= class1.size) {
m1 = sum(!is.na(match(class1.label1.subset, i)))
m2 = sum(!is.na(match(class1.subset, i)))
reshuffled.class.labels1[i, r] = m1
reshuffled.class.labels2[i, r] = m2 - m1
class.labels1[i, r] = m2
class.labels2[i, r] = 0
} else {
m1 = sum(!is.na(match(class2.label1.subset, i)))
m2 = sum(!is.na(match(class2.subset, i)))
reshuffled.class.labels1[i, r] = m1
reshuffled.class.labels2[i, r] = m2 - m1
class.labels1[i, r] = 0
class.labels2[i, r] = m2
}
}
}
}
# compute S2N for the random permutation matrix
P = reshuffled.class.labels1 * subset.mask
n1 = sum(P[,1])
M1 = A %*% P
M1 = M1/n1
A2 = A*A
S1 = A2 %*% P
S1 = S1/n1 - M1*M1
if (n1>1) {
S1 = sqrt(abs((n1/(n1-1)) * S1))
}
P = reshuffled.class.labels2 * subset.mask
n2 = sum(P[,1])
M2 = A %*% P
M2 = M2/n2
A2 = A*A
S2 = A2 %*% P
S2 = S2/n2 - M2*M2
if (n2>1) {
S2 = sqrt(abs((n2/(n2-1)) * S2))
}
if (sigma.correction == "MetaboliteCluster") { # small sigma "fix" as used in MetaboliteCluster
S2 = ifelse(0.2*abs(M2) < S2, S2, 0.2*abs(M2))
S2 = ifelse(S2 == 0, 0.2, S2)
S1 = ifelse(0.2*abs(M1) < S1, S1, 0.2*abs(M1))
S1 = ifelse(S1 == 0, 0.2, S1)
}
M1 = M1 - M2
S1 = S1 + S2
s2n.matrix = M1/S1
for (r in 1:nperm) {order.matrix[, r] = order(s2n.matrix[, r], decreasing=T)}
# compute S2N for the "observed" permutation matrix
P = class.labels1 * subset.mask
n1 = sum(P[,1])
if (n1>1) {
M1 = A %*% P
M1 = M1/n1
A2 = A*A
S1 = A2 %*% P
S1 = S1/n1 - M1*M1
S1 = sqrt(abs((n1/(n1-1)) * S1))
} else {
M1 = A %*% P
A2 = A*A
S1 = A2 %*% P
S1 = S1 - M1*M1
S1 = sqrt(abs(S1))
}
P = class.labels2 * subset.mask
n2 = sum(P[,1])
if (n2>1) {
M2 = A %*% P
M2 = M2/n2
A2 = A*A
S2 = A2 %*% P
S2 = S2/n2 - M2*M2
S2 = sqrt(abs((n2/(n2-1)) * S2))
} else {
M2 = A %*% P
A2 = A*A
S2 = A2 %*% P
S2 = S2 - M2*M2
S2 = sqrt(abs(S2))
}
if (sigma.correction == "MetaboliteCluster") { # small sigma "fix" as used in MetaboliteCluster
S2 = ifelse(0.2*abs(M2) < S2, S2, 0.2*abs(M2))
S2 = ifelse(S2 == 0, 0.2, S2)
S1 = ifelse(0.2*abs(M1) < S1, S1, 0.2*abs(M1))
S1 = ifelse(S1 == 0, 0.2, S1)
}
M1 = M1 - M2
S1 = S1 + S2
obs.s2n.matrix = M1/S1
for (r in 1:nperm) {obs.order.matrix[,r] = order(obs.s2n.matrix[,r], decreasing=T) }
return(list(s2n.matrix = s2n.matrix, obs.s2n.matrix = obs.s2n.matrix, order.matrix = order.matrix, obs.order.matrix = obs.order.matrix))
}
MSEA.EnrichmentScore = function(metabolite.list, metabolite.set, weighted.score.type = 1, correl.vector = NULL) {
tag.indicator = sign(match(metabolite.list, metabolite.set, nomatch=0)) # notice that the sign is 0 (no tag) or 1 (tag)
no.tag.indicator = 1 - tag.indicator
N = length(metabolite.list)
Nh = length(metabolite.set)
Nm = N - Nh
if (weighted.score.type == 0) {
correl.vector = rep(1, N)
}
alpha = weighted.score.type
correl.vector[which(is.na(correl.vector))] = 0
correl.vector = abs(correl.vector**alpha)
sum.correl.tag = sum(na.omit(correl.vector[tag.indicator == 1]))
norm.tag = 1.0/sum.correl.tag
norm.no.tag = 1.0/Nm
RES = cumsum(tag.indicator * correl.vector * norm.tag - no.tag.indicator * norm.no.tag)
max.ES = max(RES)
min.ES = min(RES)
if (max.ES > - min.ES) {
ES = signif(max.ES, digits = 5)
arg.ES = which.max(RES)
} else {
ES = signif(min.ES, digits=5)
arg.ES = which.min(RES)
}
return(list(ES = ES, arg.ES = arg.ES, RES = RES, indicator = tag.indicator))
}
MSEA.EnrichmentScore2 = function(metabolite.list, metabolite.set, weighted.score.type = 1, correl.vector = NULL) {
# Computes the weighted MSEA score of metabolite.set in metabolite.list. It is the same calculation as in
# MSEA.EnrichmentScore but faster (x8) without producing the RES, arg.RES and tag.indicator outputs.
# This call is intended to be used to asses the enrichment of random permutations rather than the
# observed one.
N = length(metabolite.list)
Nh = length(metabolite.set)
Nm = N - Nh
loc.vector = vector(length=N, mode="numeric")
peak.res.vector = vector(length=Nh, mode="numeric")
valley.res.vector = vector(length=Nh, mode="numeric")
tag.correl.vector = vector(length=Nh, mode="numeric")
tag.loc.vector = vector(length=Nh, mode="numeric")
tag.diff.vector = vector(length=Nh, mode="numeric")
loc.vector[metabolite.list] = seq(1, N)
tag.loc.vector = loc.vector[metabolite.set]
tag.loc.vector = sort(tag.loc.vector, decreasing = F)
if (weighted.score.type == 0) {
tag.correl.vector = rep(1, Nh)
} else if (weighted.score.type == 1) {
tag.correl.vector = correl.vector[tag.loc.vector]
tag.correl.vector = abs(tag.correl.vector)
} else if (weighted.score.type == 2) {
tag.correl.vector = correl.vector[tag.loc.vector]*correl.vector[tag.loc.vector]
tag.correl.vector = abs(tag.correl.vector)
} else {
tag.correl.vector = correl.vector[tag.loc.vector]**weighted.score.type
tag.correl.vector = abs(tag.correl.vector)
}
tag.correl.vector[is.na(tag.correl.vector)] = 1
norm.tag = 1.0/sum(tag.correl.vector)
tag.correl.vector = tag.correl.vector * norm.tag
norm.no.tag = 1.0/Nm
tag.diff.vector[1] = (tag.loc.vector[1] - 1)
tag.diff.vector[2:Nh] = tag.loc.vector[2:Nh] - tag.loc.vector[1:(Nh - 1)] - 1
tag.diff.vector = tag.diff.vector * norm.no.tag
peak.res.vector = cumsum(tag.correl.vector - tag.diff.vector)
valley.res.vector = peak.res.vector - tag.correl.vector
max.ES = max(peak.res.vector)
min.ES = min(valley.res.vector)
ES = signif(ifelse(max.ES > - min.ES, max.ES, min.ES), digits=5)
return(list(ES = ES))
}
MSEA.HeatMapPlot = function(V, row.names = F, col.labels, col.classes, col.names = F, main = " ", xlab=" ", ylab=" ") {
# Plots a heatmap "pinkogram" of a metabolite expression matrix including phenotype vector and metabolite, sample and phenotype labels
n.rows = length(V[,1])
n.cols = length(V[1,])
row.mean = apply(V, MARGIN=1, function(i) mean(na.omit(i)))
row.sd = apply(V, MARGIN=1, function(i) sd(na.omit(i)))
row.n = length(V[,1])
for (i in 1:n.rows) {
if (row.sd[i] == 0) {V[i,] = 0} else {V[i,] = (V[i,] - row.mean[i])/(0.5 * row.sd[i])}
V[i,] = ifelse(V[i,] < -6, -6, V[i,])
V[i,] = ifelse(V[i,] > 6, 6, V[i,])
}
# blue-pinkogram colors. The first and last are the colors to indicate the class vector (phenotype).
mycol = c("#0000FF", "#0000FF", "#4040FF", "#7070FF", "#8888FF", "#A9A9FF", "#D5D5FF", "#EEE5EE", "#FFAADA", "#FF9DB0", "#FF7080", "#FF5A5A", "#FF4040", "#FF0D1D", "#FF0000")
mid.range.V = mean(range(V)) - 0.1
heatm = matrix(0, nrow = n.rows + 1, ncol = n.cols)
heatm[1:n.rows,] = V[seq(n.rows, 1, -1),]
heatm[n.rows + 1,] = ifelse(col.labels == 0, 7, -7)
image(1:n.cols, 1:(n.rows + 1), t(heatm), col=mycol, axes=FALSE, main=main, xlab= xlab, ylab=ylab)
if (length(row.names) > 1) {
numC = nchar(row.names)
size.row.char = 35/(n.rows + 5)
size.col.char = 25/(n.cols + 5)
maxl = floor(n.rows/1.6)
for (i in 1:n.rows) {row.names[i] = substr(row.names[i], 1, maxl)}
row.names = c(row.names[seq(n.rows, 1, -1)], "Class")
axis(2, at=1:(n.rows + 1), labels=row.names, adj= 0.5, tick=FALSE, las = 1, cex.axis=size.row.char, font.axis=2, line=-1)
}
if (length(col.names) > 1) {
axis(1, at=1:n.cols, labels=col.names, tick=FALSE, las = 3, cex.axis=size.col.char, font.axis=2, line=-1)
}
C = split(col.labels, col.labels)
class1.size = length(C[[1]])
class2.size = length(C[[2]])
axis(3, at=c(floor(class1.size/2),class1.size + floor(class2.size/2)), labels=col.classes, tick=FALSE, las = 1, cex.axis=1.25, font.axis=2, line=-1)
return()
}
MSEA.ReadClsFile = function(file = "NULL") {
# Reads a class vector CLS file and defines phenotype and class labels vectors for the samples in a metabolite expression file (RES or GCT format)
cls.cont = readLines(file)
num.lines = length(cls.cont)
class.list = unlist(strsplit(cls.cont[[3]], " "))
s = length(class.list)
t = table(class.list)
l = length(t)
phen = vector(length=l, mode="character")
phen.label = vector(length=l, mode="numeric")
class.v = vector(length=s, mode="numeric")
for (i in 1:l) {
phen[i] = noquote(names(t)[i])
phen.label[i] = i - 1
}
for (i in 1:s) {
for (j in 1:l) {
if (class.list[i] == phen[j]) {class.v[i] = phen.label[j]}
}
}
return(list(phen = phen, class.v = class.v))
}
# ----------------------------------------------------------------------------------------
# Main MSEA Analysis Function that implements the entire methodology
# This is a methodology for the analysis of global molecular profiles called Metabolite Set Enrichment Analysis (MSEA). It determines
# whether an a priori defined set of metabolites shows statistically significant, concordant differences between two biological
# states (e.g. phenotypes). MSEA operates on all metabolites from an experiment, rank ordered by the signal to noise ratio and
# determines whether members of an a priori defined metabolite set are nonrandomly distributed towards the top or bottom of the
# list and thus may correspond to an important biological process. To assess significance the program uses an empirical
# permutation procedure to test deviation from random that preserves correlations between metabolites.
#
# For details see Subramanian et al 2005
MSEA = function(input.ds, input.cls, met.db, output.directory = "", doc.string = "MSEA.analysis",
reshuffling.type = "sample.labels", nperm = 1000, weighted.score.type = 1,
nom.p.val.threshold = -1, fwer.p.val.threshold = -1, fdr.q.val.threshold = -1,
topmet = 10, adjust.FDR.q.val = F, met.size.threshold.min = 5, met.size.threshold.max = 100,
preproc.type = 0, random.seed = 123456, perm.type = 0,
fraction = 1.0, replace = F) {
# Inputs:
# input.ds: Input metabolite expression dataset file in GCT format
# input.cls: Input class vector (phenotype) file in CLS format
# met.file: Metabolite set database in GMT format
# output.directory: Directory where to store output and results (default: .)
# doc.string: Documentation string used as a prefix to name result files (default: "MSEA.analysis")
# reshuffling.type: Type of permutation reshuffling: "sample.labels" or "metabolite.labels" (default: "sample.labels")
# nperm: Number of random permutations (default: 1000)
# weighted.score.type: Enrichment correlation-based weighting: 0=no weight (KS), 1=standard weigth, 2 = over-weigth (default: 1)
# nom.p.val.threshold: Significance threshold for nominal p-vals for metabolite sets (default: -1, no thres)
# fwer.p.val.threshold: Significance threshold for FWER p-vals for metabolite sets (default: -1, no thres)
# fdr.q.val.threshold: Significance threshold for FDR q-vals for metabolite sets (default: 0.25)
# topmet: Besides those passing test, number of top scoring metabolite sets used for detailed reports (default: 10)
# adjust.FDR.q.val: Adjust the FDR q-vals (default: F)
# met.size.threshold.min: Minimum size (in metabolites) for database metabolite sets to be considered (default: 25)
# met.size.threshold.max: Maximum size (in metabolites) for database metabolite sets to be considered (default: 500)
# preproc.type: Preprocessing normalization: 0=none, 1=col(z-score)., 2=col(rank) and row(z-score)., 3=col(rank). (default: 0)
# random.seed: Random number metaboliterator seed. (default: 123456)
# perm.type: Permutation type: 0 = unbalanced, 1 = balanced. For experts only (default: 0)
# fraction: Subsampling fraction. Set to 1.0 (no resampling). For experts only (default: 1.0)
# replace: Resampling mode (replacement or not replacement). For experts only (default: F)
# use.fast.enrichment.routine: if true it uses a faster version to compute random perm. enrichment "MSEA.EnrichmentScore2"
#
# Output:
# The results of the method are stored in the "output.directory" specified by the user as part of the input parameters.
# The results files are:
# - Two tab-separated global result text files (one for each phenotype). These files are labeled according to the doc
# string prefix and the phenotype name from the CLS file: <doc.string>.SUMMARY.RESULTS.REPORT.<phenotype>.txt
# - One set of global plots. They include a.- metabolite list correlation profile, b.- global observed and null densities, c.- heat map
# for the entire sorted dataset, and d.- p-values vs. NES plot. These plots are in a single JPEG file named
# <doc.string>.global.plots.<phenotype>.jpg. When the program is run interactively these plots appear on a window in the R GUI.
# - A variable number of tab-separated metabolite result text files according to how many sets pass any of the significance thresholds
# ("nom.p.val.threshold," "fwer.p.val.threshold," and "fdr.q.val.threshold") and how many are specified in the "topmet"
# parameter. These files are named: <doc.string>.<metabolite set name>.report.txt.
# - A variable number of metabolite set plots (one for each metabolite set report file). These plots include a.- Metabolite set running enrichment
# "mountain" plot, b.- metabolite set null distribution and c.- heat map for metabolites in the metabolite set. These plots are stored in a
# single JPEG file named <doc.string>.<metabolite set name>.jpg.
# The format (columns) for the global result files is as follows.
# MS : Metabolite set name.
# SIZE : Size of the set in metabolites.
# SOURCE : Set definition or source.
# ES : Enrichment score.
# NES : Normalized (multiplicative rescaling) normalized enrichment score.
# NOM p-val : Nominal p-value (from the null distribution of the metabolite set).
# FDR q-val: False discovery rate q-values
# FWER p-val: Family wise error rate p-values.
# Tag %: Percent of metabolite set before running enrichment peak.
# Metabolite %: Percent of metabolite list before running enrichment peak.
# Signal : enrichment signal strength.
# FDR (median): FDR q-values from the median of the null distributions.
# glob.p.val: P-value using a global statistic (number of sets above the set's NES).
#
# The rows are sorted by the NES values (from maximum positive or negative NES to minimum)
# The format (columns) for the metabolite set result files is as follows.
# #: Metabolite number in the (sorted) metabolite set
# METABOLITE : metabolite name. For example the probe accession number, metabolite symbol or the metabolite identifier gin the dataset.
# SYMBOL : metabolite symbol from the metabolite annotation file.
# DESC : metabolite description (title) from the metabolite annotation file.
# LIST LOC : location of the metabolite in the sorted metabolite list.
# S2N : signal to noise ratio (correlation) of the metabolite in the metabolite list.
# RES : value of the running enrichment score at the metabolite location.
# CORE_ENRICHMENT: is this metabolite is the "core enrichment" section of the list? Yes or No variable specifying in the metabolite location is before (positive ES) or after (negative ES) the running enrichment peak.
#
# The rows are sorted by the metabolite location in the metabolite list.
# The function call to MSEA returns a two element list containing the two global result reports as data frames ($report1, $report2).
#
# results1: Global output report for first phenotype
# result2: Global putput report for second phenotype
print(" *** Running MSEA Analysis...")
# Copy input parameters to log file
filename = paste(output.directory, doc.string, "_params.txt", sep="", collapse="")
time.string = as.character(as.POSIXlt(Sys.time(),"GMT"))
write(paste("Run of MSEA on ", time.string), file=filename)
write(paste("input.ds=", input.ds, sep=" "), file=filename, append=T)
write(paste("input.cls=", input.cls, sep=" "), file=filename, append=T)
write(paste("met.db=", met.db, sep=" "), file=filename, append=T)
write(paste("output.directory =", output.directory, sep=" "), file=filename, append=T)
write(paste("doc.string = ", doc.string, sep=" "), file=filename, append=T)
write(paste("reshuffling.type =", reshuffling.type, sep=" "), file=filename, append=T)
write(paste("nperm =", nperm, sep=" "), file=filename, append=T)
write(paste("weighted.score.type =", weighted.score.type, sep=" "), file=filename, append=T)
write(paste("nom.p.val.threshold =", nom.p.val.threshold, sep=" "), file=filename, append=T)
write(paste("fwer.p.val.threshold =", fwer.p.val.threshold, sep=" "), file=filename, append=T)
write(paste("fdr.q.val.threshold =", fdr.q.val.threshold, sep=" "), file=filename, append=T)
write(paste("topmet =", topmet, sep=" "), file=filename, append=T)
write(paste("adjust.FDR.q.val =", adjust.FDR.q.val, sep=" "), file=filename, append=T)
write(paste("met.size.threshold.min =", met.size.threshold.min, sep=" "), file=filename, append=T)
write(paste("met.size.threshold.max =", met.size.threshold.max, sep=" "), file=filename, append=T)
write(paste("preproc.type =", preproc.type, sep=" "), file=filename, append=T)
write(paste("random.seed =", random.seed, sep=" "), file=filename, append=T)
write(paste("perm.type =", perm.type, sep=" "), file=filename, append=T)
write(paste("fraction =", fraction, sep=" "), file=filename, append=T)
write(paste("replace =", replace, sep=" "), file=filename, append=T)
# Start of MSEA methodology
# Read input data matrix
set.seed(seed=random.seed, kind = NULL)
adjust.param = 0.5
time1 = proc.time()
dataset = read.table(input.ds, sep="\t", check.names=FALSE, quote="")
dataset = dataset[,-1]
metabolite.labels = row.names(dataset)
sample.names = colnames(dataset)
A = data.matrix(dataset)
dim(A)
cols = length(A[1,])
rows = length(A[,1])
# preproc.type control the type of pre-processing: threshold, variation filter, normalization
if (preproc.type == 1) { # Column normalize (Z-score)
A = MSEA.NormalizeCols(A)
} else if (preproc.type == 2) { # Column (rank) and row (Z-score) normalize
for (j in 1:cols) {A[,j] = rank(A[,j])}
A = MSEA.NormalizeRows(A)
} else if (preproc.type == 3) { # Column (rank) norm.
for (j in 1:cols) {A[,j] = rank(A[,j])}
}
# Read input class vector
CLS = MSEA.ReadClsFile(file=input.cls)
class.labels = CLS$class.v
class.phen = CLS$phen
#phen1 = class.phen[1]
#phen2 = class.phen[2]
phen1 = "D"
phen2 = "C"
# sort samples according to phenotype
col.index = order(class.labels, decreasing=F)
class.labels = class.labels[col.index]
sample.names = sample.names[col.index]
for (j in 1:rows) {A[j, ] = A[j, col.index]}
names(A) = sample.names
# Read input metabolite set database
temp = readLines(met.db)
max.Ng = length(temp)
temp.size.G = vector(length = max.Ng, mode = "numeric")
for (i in 1:max.Ng) {
temp.size.G[i] = length(unlist(strsplit(temp[[i]], " "))) - 2
}
max.size.G = max(temp.size.G)
gs = matrix(rep("null", max.Ng*max.size.G), nrow=max.Ng, ncol= max.size.G)
temp.names = vector(length = max.Ng, mode = "character")
temp.desc = vector(length = max.Ng, mode = "character")
met.count = 1
for (i in 1:max.Ng) {
metabolite.set.size = length(unlist(strsplit(temp[[i]], " "))) - 2
met.line = noquote(unlist(strsplit(temp[[i]], " ")))
metabolite.set.name = met.line[1]
metabolite.set.desc = met.line[2]
metabolite.set.tags = vector(length = metabolite.set.size, mode = "character")
for (j in 1:metabolite.set.size) {
metabolite.set.tags[j] = trimws(met.line[j + 2])
}
existing.set = is.element(metabolite.set.tags, metabolite.labels)
set.size = length(existing.set[existing.set == T])
if ((set.size < met.size.threshold.min) || (set.size > met.size.threshold.max)) next
temp.size.G[met.count] = set.size
gs[met.count,] = c(metabolite.set.tags[existing.set], rep("null", max.size.G - temp.size.G[met.count]))
temp.names[met.count] = metabolite.set.name
temp.desc[met.count] = metabolite.set.desc
met.count = met.count + 1
}
Ng = met.count - 1
met.names = vector(length = Ng, mode = "character")
met.desc = vector(length = Ng, mode = "character")
size.G = vector(length = Ng, mode = "numeric")
met.names = temp.names[1:Ng]
met.desc = temp.desc[1:Ng]
size.G = temp.size.G[1:Ng]
N = length(A[,1])
Ns = length(A[1,])
print(c("Number of metabolites in dataset:", N))
print(sprintf("Number of Metabolite Pathway Sets between size %d-%d: %d", met.size.threshold.min, met.size.threshold.max, Ng))
print(c("Number of samples:", Ns))
print(c("Original number of Metabolite Pathway Sets:", max.Ng))
print(c("Maximum metabolite pathway set size:", max.size.G))
# Read metabolite and metabolite set annotations if metabolite annotation file was provided
all.metabolite.descs = vector(length = N, mode ="character")
all.metabolite.symbols = vector(length = N, mode ="character")
all.met.descs = vector(length = Ng, mode ="character")
for (i in 1:N) {
all.metabolite.descs[i] = metabolite.labels[i]
all.metabolite.symbols[i] = metabolite.labels[i]
}
for (i in 1:Ng) {
all.met.descs[i] = met.desc[i]
}
Obs.indicator = matrix(nrow= Ng, ncol=N)
Obs.RES = matrix(nrow= Ng, ncol=N)
Obs.ES = vector(length = Ng, mode = "numeric")
Obs.arg.ES = vector(length = Ng, mode = "numeric")
Obs.ES.norm = vector(length = Ng, mode = "numeric")
time2 = proc.time()
# MSEA methodology
# Compute observed and random permutation metabolite rankings
obs.s2n = vector(length=N, mode="numeric")
signal.strength = vector(length=Ng, mode="numeric")
tag.frac = vector(length=Ng, mode="numeric")
metabolite.frac = vector(length=Ng, mode="numeric")
coherence.ratio = vector(length=Ng, mode="numeric")
obs.phi.norm = matrix(nrow = Ng, ncol = nperm)
correl.matrix = matrix(nrow = N, ncol = nperm)
obs.correl.matrix = matrix(nrow = N, ncol = nperm)
order.matrix = matrix(nrow = N, ncol = nperm)
obs.order.matrix = matrix(nrow = N, ncol = nperm)
nperm.per.call = 100
n.groups = nperm %/% nperm.per.call
n.rem = nperm %% nperm.per.call
n.perms = c(rep(nperm.per.call, n.groups), n.rem)
n.ends = cumsum(n.perms)
n.starts = n.ends - n.perms + 1
if (n.rem == 0) {n.tot = n.groups} else {n.tot = n.groups + 1}
for (nk in 1:n.tot) {
call.nperm = n.perms[nk]
print(paste("Computing ranked list for actual and permuted phenotypes.......permutations: ", n.starts[nk], "--", n.ends[nk], sep=" "))
if (sum(CLS$class.v)==1) {
O = MSEA.MetaboliteRanking_SingleProfile(A, class.labels, metabolite.labels, call.nperm, permutation.type = perm.type, sigma.correction = "MetaboliteCluster", fraction=fraction, replace=replace)
} else {
O = MSEA.MetaboliteRanking(A, class.labels, metabolite.labels, call.nperm, permutation.type = perm.type, sigma.correction = "MetaboliteCluster", fraction=fraction, replace=replace)
}
order.matrix[,n.starts[nk]:n.ends[nk]] = O$order.matrix
obs.order.matrix[,n.starts[nk]:n.ends[nk]] = O$obs.order.matrix
correl.matrix[,n.starts[nk]:n.ends[nk]] = O$s2n.matrix
obs.correl.matrix[,n.starts[nk]:n.ends[nk]] = O$obs.s2n.matrix
rm(O)
}
obs.s2n = apply(obs.correl.matrix, 1, function(i) median(na.omit(i))) # using median to assign enrichment scores
obs.index = order(obs.s2n, decreasing=T)
obs.s2n = sort(obs.s2n, decreasing=T, na.last = TRUE)
obs.metabolite.labels = metabolite.labels[obs.index]
obs.metabolite.descs = all.metabolite.descs[obs.index]
obs.metabolite.symbols = all.metabolite.symbols[obs.index]
for (r in 1:nperm) {correl.matrix[, r] = correl.matrix[order.matrix[,r], r]}
for (r in 1:nperm) {obs.correl.matrix[, r] = obs.correl.matrix[obs.order.matrix[,r], r]}
metabolite.list2 = obs.index
for (i in 1:Ng) {
print(paste("Computing observed enrichment for metabolite set:", i, met.names[i], sep=" "))
metabolite.set = gs[i,gs[i,] != "null"]
metabolite.set2 = vector(length=length(metabolite.set), mode = "numeric")
metabolite.set2 = match(metabolite.set, metabolite.labels)
MSEA.results = MSEA.EnrichmentScore(metabolite.list=metabolite.list2, metabolite.set=metabolite.set2, weighted.score.type=weighted.score.type, correl.vector = obs.s2n)
Obs.ES[i] = MSEA.results$ES
Obs.arg.ES[i] = MSEA.results$arg.ES
Obs.RES[i,] = MSEA.results$RES
Obs.indicator[i,] = MSEA.results$indicator
if (Obs.ES[i] >= 0) { # compute signal strength
tag.frac[i] = sum(Obs.indicator[i,1:Obs.arg.ES[i]])/size.G[i]
metabolite.frac[i] = Obs.arg.ES[i]/N
} else {
tag.frac[i] = sum(Obs.indicator[i, Obs.arg.ES[i]:N])/size.G[i]
metabolite.frac[i] = (N - Obs.arg.ES[i] + 1)/N
}
signal.strength[i] = tag.frac[i] * (1 - metabolite.frac[i]) * (N / (N - size.G[i]))
}
# Compute enrichment for random permutations
phi = matrix(nrow = Ng, ncol = nperm)
phi.norm = matrix(nrow = Ng, ncol = nperm)
obs.phi = matrix(nrow = Ng, ncol = nperm)
if (reshuffling.type == "sample.labels") { # reshuffling phenotype labels
for (i in 1:Ng) {
print(paste("Computing random permutations' enrichment for metabolite set:", i, met.names[i], sep=" "))
metabolite.set = gs[i,gs[i,] != "null"]
metabolite.set2 = vector(length=length(metabolite.set), mode = "numeric")
metabolite.set2 = match(metabolite.set, metabolite.labels)
for (r in 1:nperm) {
metabolite.list2 = order.matrix[,r]
MSEA.results = MSEA.EnrichmentScore2(metabolite.list=metabolite.list2, metabolite.set=metabolite.set2, weighted.score.type=weighted.score.type, correl.vector=correl.matrix[, r])
phi[i, r] = MSEA.results$ES
}
if (fraction < 1.0) { # if resampling then compute ES for all observed rankings
for (r in 1:nperm) {
obs.metabolite.list2 = obs.order.matrix[,r]
MSEA.results = MSEA.EnrichmentScore2(metabolite.list=obs.metabolite.list2, metabolite.set=metabolite.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
obs.phi[i, r] = MSEA.results$ES
}
} else { # if no resampling then compute only one column (and fill the others with the same value)
obs.metabolite.list2 = obs.order.matrix[,1]
MSEA.results = MSEA.EnrichmentScore2(metabolite.list=obs.metabolite.list2, metabolite.set=metabolite.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
obs.phi[i, 1] = MSEA.results$ES
for (r in 2:nperm) {obs.phi[i, r] = obs.phi[i, 1]}
}
gc()
} # if (reshuffling.type == "sample.labels")
} else if (reshuffling.type == "metabolite.labels") { # reshuffling metabolite labels
for (i in 1:Ng) {
metabolite.set = gs[i,gs[i,] != "null"]
metabolite.set2 = vector(length=length(metabolite.set), mode = "numeric")
metabolite.set2 = match(metabolite.set, metabolite.labels)
for (r in 1:nperm) {
reshuffled.metabolite.labels = sample(1:rows)
MSEA.results = MSEA.EnrichmentScore2(metabolite.list=reshuffled.metabolite.labels, metabolite.set=metabolite.set2, weighted.score.type=weighted.score.type, correl.vector=obs.s2n)
phi[i, r] = MSEA.results$ES
}
if (fraction < 1.0) { # if resampling then compute ES for all observed rankings
for (r in 1:nperm) {
obs.metabolite.list2 = obs.order.matrix[,r]
MSEA.results = MSEA.EnrichmentScore2(metabolite.list=obs.metabolite.list2, metabolite.set=metabolite.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
obs.phi[i, r] = MSEA.results$ES
}
} else { # if no resampling then compute only one column (and fill the others with the same value)
obs.metabolite.list2 = obs.order.matrix[,1]
MSEA.results = MSEA.EnrichmentScore2(metabolite.list=obs.metabolite.list2, metabolite.set=metabolite.set2, weighted.score.type=weighted.score.type, correl.vector=obs.correl.matrix[, r])
obs.phi[i, 1] = MSEA.results$ES
for (r in 2:nperm) {obs.phi[i, r] = obs.phi[i, 1]}
}
gc()
}
}
# Compute 3 types of p-values
# Find nominal p-values
print("Computing nominal p-values...")
p.vals = matrix(1, nrow = Ng, ncol = 2)
for (i in 1:Ng) {
pos.phi = NULL
neg.phi = NULL
for (j in 1:nperm) {
if (phi[i, j] >= 0) {
pos.phi = c(pos.phi, phi[i, j])
} else {
neg.phi = c(neg.phi, phi[i, j])
}
}
ES.value = Obs.ES[i]
if (ES.value >= 0) {
p.vals[i, 1] = signif(sum(pos.phi >= ES.value)/length(pos.phi), digits=5)
} else {
p.vals[i, 1] = signif(sum(neg.phi <= ES.value)/length(neg.phi), digits=5)
}
}
# Find effective size
erf = function (x) {2 * pnorm(sqrt(2) * x)}
KS.mean = function(N) { # KS mean as a function of set size N
S = 0
for (k in -100:100) {
if (k == 0) next
S = S + 4 * (-1)**(k + 1) * (0.25 * exp(-2 * k * k * N) - sqrt(2 * pi) * erf(sqrt(2 * N) * k)/(16 * k * sqrt(N)))
}
return(abs(S))
}
# Rescaling normalization for each metabolite set null
print("Computing rescaling normalization for each metabolite set null...")
for (i in 1:Ng) {
pos.phi = NULL
neg.phi = NULL
for (j in 1:nperm) {
if (phi[i, j] >= 0) {
pos.phi = c(pos.phi, phi[i, j])
} else {
neg.phi = c(neg.phi, phi[i, j])
}
}
pos.m = mean(pos.phi)
neg.m = mean(abs(as.numeric(neg.phi)))
pos.phi = pos.phi/pos.m
neg.phi = neg.phi/neg.m
for (j in 1:nperm) {
if (phi[i, j] >= 0) {
phi.norm[i, j] = phi[i, j]/pos.m
} else {
phi.norm[i, j] = phi[i, j]/neg.m
}
}
for (j in 1:nperm) {
if (obs.phi[i, j] >= 0) {
obs.phi.norm[i, j] = obs.phi[i, j]/pos.m
} else {
obs.phi.norm[i, j] = obs.phi[i, j]/neg.m
}
}
if (Obs.ES[i] >= 0) {
Obs.ES.norm[i] = Obs.ES[i]/pos.m
} else {
Obs.ES.norm[i] = Obs.ES[i]/neg.m
}
}
# Compute FWER p-vals
print("Computing FWER p-values...")
max.ES.vals.p = NULL
max.ES.vals.n = NULL
for (j in 1:nperm) {
pos.phi = NULL
neg.phi = NULL
for (i in 1:Ng) {
if (phi.norm[i, j] >= 0) {
pos.phi = c(pos.phi, phi.norm[i, j])
} else {
neg.phi = c(neg.phi, phi.norm[i, j])
}
}
if (length(pos.phi) > 0) {
max.ES.vals.p = c(max.ES.vals.p, max(pos.phi))
}
if (length(neg.phi) > 0) {
max.ES.vals.n = c(max.ES.vals.n, min(neg.phi))
}
}
for (i in 1:Ng) {
ES.value = Obs.ES.norm[i]
if (Obs.ES.norm[i] >= 0) {
p.vals[i, 2] = signif(sum(max.ES.vals.p >= ES.value)/length(max.ES.vals.p), digits=5)
} else {
p.vals[i, 2] = signif(sum(max.ES.vals.n <= ES.value)/length(max.ES.vals.n), digits=5)
}
}
# Compute FDRs
print("Computing FDR q-values...")
NES = vector(length=Ng, mode="numeric")
phi.norm.mean = vector(length=Ng, mode="numeric")
obs.phi.norm.mean = vector(length=Ng, mode="numeric")
phi.norm.median = vector(length=Ng, mode="numeric")
obs.phi.norm.median = vector(length=Ng, mode="numeric")
phi.norm.mean = vector(length=Ng, mode="numeric")
obs.phi.mean = vector(length=Ng, mode="numeric")
FDR.mean = vector(length=Ng, mode="numeric")
FDR.median = vector(length=Ng, mode="numeric")
phi.norm.median.d = vector(length=Ng, mode="numeric")
obs.phi.norm.median.d = vector(length=Ng, mode="numeric")
Obs.ES.index = order(Obs.ES.norm, decreasing=T)
Orig.index = seq(1, Ng)
Orig.index = Orig.index[Obs.ES.index]
Orig.index = order(Orig.index, decreasing=F)
Obs.ES.norm.sorted = Obs.ES.norm[Obs.ES.index]
met.names.sorted = met.names[Obs.ES.index]
for (k in 1:Ng) {
NES[k] = Obs.ES.norm.sorted[k]
ES.value = NES[k]
count.col = vector(length=nperm, mode="numeric")
obs.count.col = vector(length=nperm, mode="numeric")
for (i in 1:nperm) {
phi.vec = phi.norm[,i]
obs.phi.vec = obs.phi.norm[,i]
if (ES.value >= 0) {
count.col.norm = sum(phi.vec >= 0)
obs.count.col.norm = sum(obs.phi.vec >= 0)
count.col[i] = ifelse(count.col.norm > 0, sum(phi.vec >= ES.value)/count.col.norm, 0)
obs.count.col[i] = ifelse(obs.count.col.norm > 0, sum(obs.phi.vec >= ES.value)/obs.count.col.norm, 0)
} else {
count.col.norm = sum(phi.vec < 0)
obs.count.col.norm = sum(obs.phi.vec < 0)
count.col[i] = ifelse(count.col.norm > 0, sum(phi.vec <= ES.value)/count.col.norm, 0)
obs.count.col[i] = ifelse(obs.count.col.norm > 0, sum(obs.phi.vec <= ES.value)/obs.count.col.norm, 0)
}
}
phi.norm.mean[k] = mean(count.col)
obs.phi.norm.mean[k] = mean(obs.count.col)
phi.norm.median[k] = median(count.col)
obs.phi.norm.median[k] = median(obs.count.col)
FDR.mean[k] = ifelse(phi.norm.mean[k]/obs.phi.norm.mean[k] < 1, phi.norm.mean[k]/obs.phi.norm.mean[k], 1)
FDR.median[k] = ifelse(phi.norm.median[k]/obs.phi.norm.median[k] < 1, phi.norm.median[k]/obs.phi.norm.median[k], 1)
}
FDR.mean[which(is.na(FDR.mean))] = 1
FDR.median[which(is.na(FDR.median))] = 1
# adjust q-values
if (adjust.FDR.q.val == T) {
pos.nes = length(NES[NES >= 0])
min.FDR.mean = FDR.mean[pos.nes]
min.FDR.median = FDR.median[pos.nes]
for (k in seq(pos.nes - 1, 1, -1)) {
if (FDR.mean[k] < min.FDR.mean) {min.FDR.mean = FDR.mean[k]}
if (min.FDR.mean < FDR.mean[k]) {FDR.mean[k] = min.FDR.mean}
}
neg.nes = pos.nes + 1
min.FDR.mean = FDR.mean[neg.nes]
min.FDR.median = FDR.median[neg.nes]
for (k in seq(neg.nes + 1, Ng)) {
if (FDR.mean[k] < min.FDR.mean) {min.FDR.mean = FDR.mean[k]}
if (min.FDR.mean < FDR.mean[k]) {FDR.mean[k] = min.FDR.mean}
}
}
obs.phi.norm.mean.sorted = obs.phi.norm.mean[Orig.index]
phi.norm.mean.sorted = phi.norm.mean[Orig.index]
FDR.mean.sorted = FDR.mean[Orig.index]
FDR.median.sorted = FDR.median[Orig.index]
# Compute global statistic
glob.p.vals = vector(length=Ng, mode="numeric")
NULL.pass = vector(length=nperm, mode="numeric")
OBS.pass = vector(length=nperm, mode="numeric")
for (k in 1:Ng) {
NES[k] = Obs.ES.norm.sorted[k]
if (NES[k] >= 0) {
for (i in 1:nperm) {
NULL.pos = sum(phi.norm[,i] >= 0)
NULL.pass[i] = ifelse(NULL.pos > 0, sum(phi.norm[,i] >= NES[k])/NULL.pos, 0)
OBS.pos = sum(obs.phi.norm[,i] >= 0)
OBS.pass[i] = ifelse(OBS.pos > 0, sum(obs.phi.norm[,i] >= NES[k])/OBS.pos, 0)
}
} else {
for (i in 1:nperm) {
NULL.neg = sum(phi.norm[,i] < 0)
NULL.pass[i] = ifelse(NULL.neg > 0, sum(phi.norm[,i] <= NES[k])/NULL.neg, 0)
OBS.neg = sum(obs.phi.norm[,i] < 0)
OBS.pass[i] = ifelse(OBS.neg > 0, sum(obs.phi.norm[,i] <= NES[k])/OBS.neg, 0)
}
}
glob.p.vals[k] = sum(NULL.pass >= mean(OBS.pass))/nperm
}
glob.p.vals.sorted = glob.p.vals[Orig.index]
# Produce results report
print("Producing result tables and plots...")
Obs.ES = signif(Obs.ES, digits=5)
Obs.ES.norm = signif(Obs.ES.norm, digits=5)
p.vals = signif(p.vals, digits=4)
signal.strength = signif(signal.strength, digits=3)
tag.frac = signif(tag.frac, digits=3)
metabolite.frac = signif(metabolite.frac, digits=3)
FDR.mean.sorted = signif(FDR.mean.sorted, digits=5)
FDR.median.sorted = signif(FDR.median.sorted, digits=5)
glob.p.vals.sorted = signif(glob.p.vals.sorted, digits=5)
report = data.frame(cbind(met.names, size.G, all.met.descs, Obs.ES, Obs.ES.norm, p.vals[,1], FDR.mean.sorted, p.vals[,2], tag.frac, metabolite.frac, signal.strength, FDR.median.sorted, glob.p.vals.sorted))
names(report) = c("MS", "SIZE", "SOURCE", "ES", "NES", "NOM p-val", "FDR q-val", "FWER p-val", "Tag %", "Metabolite %", "Signal", "FDR (median)", "glob.p.val")
report2 = report
report.index2 = order(Obs.ES.norm, decreasing=T)
for (i in 1:Ng) {report2[i,] = report[report.index2[i],]}
report3 = report
report.index3 = order(Obs.ES.norm, decreasing=F)
for (i in 1:Ng) {report3[i,] = report[report.index3[i],]}
report.phen1 = report2
report.phen2 = report3
if (output.directory != "") {
filename = paste(output.directory, doc.string, ".SUMMARY.RESULTS.REPORT.", phen1,".txt", sep="", collapse="")
write.table(report.phen1, file = filename, quote=F, row.names=F, sep = "\t")
}
# Global plots
if (output.directory != "") {
glob.filename = paste(output.directory, doc.string, ".global.plots.pdf", sep="", collapse="")
pdf(file=glob.filename, height = 10, width = 10)
}
nf = layout(matrix(c(1,2,3,4), 2, 2, byrow=T), c(1,1), c(1,1), TRUE)
# plot S2N correlation profile
location = 1:N
max.corr = max(obs.s2n)
min.corr = min(obs.s2n)
x = plot(location, obs.s2n, ylab = "Signal to Noise Ratio (S2N)", xlab = "Metabolite List Location", main = "Metabolite List Correlation (S2N) Profile", type = "l", lwd = 2, cex = 0.9, col = 1)
for (i in seq(1, N, 20)) {
lines(c(i, i), c(0, obs.s2n[i]), lwd = 3, cex = 0.9, col = colors()[12]) # shading of correlation plot
}
x = points(location, obs.s2n, type = "l", lwd = 2, cex = 0.9, col = 1)
lines(c(1, N), c(0, 0), lwd = 2, lty = 1, cex = 0.9, col = 1) # zero correlation horizontal line
temp = order(abs(obs.s2n), decreasing=T)
arg.correl = temp[N]
lines(c(arg.correl, arg.correl), c(min.corr, 0.7*max.corr), lwd = 2, lty = 3, cex = 0.9, col = 1) # zero correlation vertical line
area.bias = signif(100*(sum(obs.s2n[1:arg.correl]) + sum(obs.s2n[arg.correl:N]))/sum(abs(obs.s2n[1:N])), digits=3)
area.phen = ifelse(area.bias >= 0, phen1, phen2)
delta.string = paste("Corr. Area Bias to \"", area.phen, "\" =", abs(area.bias), "%", sep="", collapse="")
zero.crossing.string = paste("Zero Crossing at location ", arg.correl, " (", signif(100*arg.correl/N, digits=3), " %)")
leg.txt = c(delta.string, zero.crossing.string)
legend(x=N/10, y=max.corr, bty="n", bg = "white", legend=leg.txt, cex = 0.9)
leg.txt = paste("\"", phen1, "\" ", sep="", collapse="")
text(x=1, y=-0.05*max.corr, adj = c(0, 1), labels=leg.txt, cex = 0.9)
leg.txt = paste("\"", phen2, "\" ", sep="", collapse="")
text(x=N, y=0.05*max.corr, adj = c(1, 0), labels=leg.txt, cex = 0.9)
if (Ng > 1) { # make these plots only if there are multiple metabolite sets.
# compute plots of actual (weighted) null and observed
phi.densities.pos = matrix(0, nrow=512, ncol=nperm)
phi.densities.neg = matrix(0, nrow=512, ncol=nperm)
obs.phi.densities.pos = matrix(0, nrow=512, ncol=nperm)
obs.phi.densities.neg = matrix(0, nrow=512, ncol=nperm)
phi.density.mean.pos = vector(length=512, mode = "numeric")
phi.density.mean.neg = vector(length=512, mode = "numeric")
obs.phi.density.mean.pos = vector(length=512, mode = "numeric")
obs.phi.density.mean.neg = vector(length=512, mode = "numeric")
phi.density.median.pos = vector(length=512, mode = "numeric")
phi.density.median.neg = vector(length=512, mode = "numeric")
obs.phi.density.median.pos = vector(length=512, mode = "numeric")
obs.phi.density.median.neg = vector(length=512, mode = "numeric")
x.coor.pos = vector(length=512, mode = "numeric")
x.coor.neg = vector(length=512, mode = "numeric")
for (i in 1:nperm) {
pos.phi = phi.norm[phi.norm[, i] >= 0, i]
if (length(pos.phi) > 2) {
temp = density(pos.phi, adjust=adjust.param, n = 512, from=0, to=3.5)
} else {
temp = list(x = 3.5*(seq(1, 512) - 1)/512, y = rep(0.001, 512))
}
phi.densities.pos[, i] = temp$y
norm.factor = sum(phi.densities.pos[, i])
phi.densities.pos[, i] = phi.densities.pos[, i]/norm.factor
if (i == 1) {
x.coor.pos = temp$x
}
neg.phi = phi.norm[phi.norm[, i] < 0, i]
if (length(neg.phi) > 2) {
temp = density(neg.phi, adjust=adjust.param, n = 512, from=-3.5, to=0)
} else {
temp = list(x = 3.5*(seq(1, 512) - 1)/512, y = rep(0.001, 512))
}
phi.densities.neg[, i] = temp$y
norm.factor = sum(phi.densities.neg[, i])
phi.densities.neg[, i] = phi.densities.neg[, i]/norm.factor
if (i == 1) {
x.coor.neg = temp$x
}
pos.phi = obs.phi.norm[obs.phi.norm[, i] >= 0, i]
if (length(pos.phi) > 2) {
temp = density(pos.phi, adjust=adjust.param, n = 512, from=0, to=3.5)
} else {
temp = list(x = 3.5*(seq(1, 512) - 1)/512, y = rep(0.001, 512))
}
obs.phi.densities.pos[, i] = temp$y
norm.factor = sum(obs.phi.densities.pos[, i])
obs.phi.densities.pos[, i] = obs.phi.densities.pos[, i]/norm.factor
neg.phi = obs.phi.norm[obs.phi.norm[, i] < 0, i]
if (length(neg.phi)> 2) {
temp = density(neg.phi, adjust=adjust.param, n = 512, from=-3.5, to=0)
} else {
temp = list(x = 3.5*(seq(1, 512) - 1)/512, y = rep(0.001, 512))
}
obs.phi.densities.neg[, i] = temp$y
norm.factor = sum(obs.phi.densities.neg[, i])
obs.phi.densities.neg[, i] = obs.phi.densities.neg[, i]/norm.factor
}
phi.density.mean.pos = apply(phi.densities.pos, 1, mean)
phi.density.mean.neg = apply(phi.densities.neg, 1, mean)
obs.phi.density.mean.pos = apply(obs.phi.densities.pos, 1, mean)
obs.phi.density.mean.neg = apply(obs.phi.densities.neg, 1, mean)
phi.density.median.pos = apply(phi.densities.pos, 1, median)
phi.density.median.neg = apply(phi.densities.neg, 1, median)
obs.phi.density.median.pos = apply(obs.phi.densities.pos, 1, median)
obs.phi.density.median.neg = apply(obs.phi.densities.neg, 1, median)
x = c(x.coor.neg, x.coor.pos)
x.plot.range = range(x)
y1 = c(phi.density.mean.neg, phi.density.mean.pos)
y2 = c(obs.phi.density.mean.neg, obs.phi.density.mean.pos)
y.plot.range = c(-0.3*max(c(y1, y2)), max(c(y1, y2)))
print(c(y.plot.range, max(c(y1, y2)), max(y1), max(y2)))
plot(x, y1, xlim = x.plot.range, ylim = 1.5*y.plot.range, type = "l", lwd = 2, col = 2, xlab = "NES", ylab = "P(NES)", main = "Global Observed and Null Densities (Area Normalized)")
y1.point = y1[seq(1, length(x), 2)]
y2.point = y2[seq(2, length(x), 2)]
x1.point = x[seq(1, length(x), 2)]
x2.point = x[seq(2, length(x), 2)]
points(x, y1, type = "l", lwd = 2, col = colors()[555])
points(x, y2, type = "l", lwd = 2, col = colors()[29])
for (i in 1:Ng) {
col = ifelse(Obs.ES.norm[i] > 0, 2, 3)
lines(c(Obs.ES.norm[i], Obs.ES.norm[i]), c(-0.2*max(c(y1, y2)), 0), lwd = 1, lty = 1, col = 1)
}
leg.txt = paste("Neg. ES: \"", phen2, " \" ", sep="", collapse="")
text(x=x.plot.range[1], y=-0.25*max(c(y1, y2)), adj = c(0, 1), labels=leg.txt, cex = 0.9)
leg.txt = paste(" Pos. ES: \"", phen1, "\" ", sep="", collapse="")
text(x=x.plot.range[2], y=-0.25*max(c(y1, y2)), adj = c(1, 1), labels=leg.txt, cex = 0.9)
leg.txt = c("Null Density", "Observed Density", "Observed NES values")
c.vec = c(colors()[555], colors()[29], 1)
lty.vec = c(1, 1, 1)
lwd.vec = c(2, 2, 2)
legend(x=0, y=1.5*y.plot.range[2], bty="n", bg = "white", legend=leg.txt, lty = lty.vec, lwd = lwd.vec, col = c.vec, cex = 0.9)
B = A[obs.index,]
if (N > 300) {
C = rbind(B[1:100,], rep(0, Ns), rep(0, Ns), B[(floor(N/2) - 50 + 1):(floor(N/2) + 50),], rep(0, Ns), rep(0, Ns), B[(N - 100 + 1):N,])
}
rm(B)
MSEA.HeatMapPlot(V = C, col.labels = class.labels, col.classes = class.phen, main = "Heat Map for Metabolites in Dataset")
# p-vals plot
nom.p.vals = p.vals[Obs.ES.index,1]
FWER.p.vals = p.vals[Obs.ES.index,2]
plot.range = 1.25*range(NES)
plot(NES, FDR.mean, ylim = c(0, 1), xlim = plot.range, col = 1, bg = 1, type="p", pch = 22, cex = 0.75, xlab = "NES", main = "p-values vs. NES", ylab ="p-val/q-val")
points(NES, nom.p.vals, type = "p", col = 2, bg = 2, pch = 22, cex = 0.75)
points(NES, FWER.p.vals, type = "p", col = colors()[577], bg = colors()[577], pch = 22, cex = 0.75)
leg.txt = c("Nominal p-value", "FWER p-value", "FDR q-value")
c.vec = c(2, colors()[577], 1)
pch.vec = c(22, 22, 22)
legend(x=-0.5, y=0.5, bty="n", bg = "white", legend=leg.txt, pch = pch.vec, col = c.vec, pt.bg = c.vec, cex = 0.9)
lines(c(min(NES), max(NES)), c(nom.p.val.threshold, nom.p.val.threshold), lwd = 1, lty = 2, col = 2)
lines(c(min(NES), max(NES)), c(fwer.p.val.threshold, fwer.p.val.threshold), lwd = 1, lty = 2, col = colors()[577])
lines(c(min(NES), max(NES)), c(fdr.q.val.threshold, fdr.q.val.threshold), lwd = 1, lty = 2, col = 1)
dev.off()
} # if Ng > 1
#----------------------------------------------------------------------------
# Produce report for each metabolite set passing the nominal, FWER or FDR test or the top topmet in each side
if (topmet > floor(Ng/2)) {topmet = floor(Ng/2)}
for (i in 1:Ng) {
if ((p.vals[i, 1] <= nom.p.val.threshold) || (p.vals[i, 2] <= fwer.p.val.threshold) ||
(FDR.mean.sorted[i] <= fdr.q.val.threshold) || (is.element(i, c(Obs.ES.index[1:topmet], Obs.ES.index[(Ng - topmet + 1): Ng])))) {
# produce report per metabolite set
kk = 1
metabolite.number = vector(length = size.G[i], mode = "character")
metabolite.names = vector(length = size.G[i], mode = "character")
metabolite.symbols = vector(length = size.G[i], mode = "character")
metabolite.descs = vector(length = size.G[i], mode = "character")
metabolite.list.loc = vector(length = size.G[i], mode = "numeric")
core.enrichment = vector(length = size.G[i], mode = "character")
metabolite.s2n = vector(length = size.G[i], mode = "numeric")
metabolite.RES = vector(length = size.G[i], mode = "numeric")
rank.list = seq(1, N)
if (Obs.ES[i] >= 0) {
set.k = seq(1, N, 1)
phen.tag = phen1
loc = match(i, Obs.ES.index)
} else {
set.k = seq(N, 1, -1)
phen.tag = phen2
loc = Ng - match(i, Obs.ES.index) + 1
}
for (k in set.k) {
if (Obs.indicator[i, k] == 1) {
metabolite.number[kk] = kk
metabolite.names[kk] = obs.metabolite.labels[k]
metabolite.symbols[kk] = substr(obs.metabolite.symbols[k], 1, 15)
metabolite.descs[kk] = substr(obs.metabolite.descs[k], 1, 40)
metabolite.list.loc[kk] = k
metabolite.s2n[kk] = signif(obs.s2n[k], digits=3)
metabolite.RES[kk] = signif(Obs.RES[i, k], digits = 3)
if (Obs.ES[i] >= 0) {
core.enrichment[kk] = ifelse(metabolite.list.loc[kk] <= Obs.arg.ES[i], "YES", "NO")
} else {
core.enrichment[kk] = ifelse(metabolite.list.loc[kk] > Obs.arg.ES[i], "YES", "NO")
}
kk = kk + 1
}
}
metabolite.report = data.frame(cbind(metabolite.number, metabolite.names, metabolite.symbols, metabolite.descs, metabolite.list.loc, metabolite.s2n, metabolite.RES, core.enrichment))
names(metabolite.report) = c("#", "GENE", "SYMBOL", "DESC", "LIST LOC", "S2N", "RES", "CORE_ENRICHMENT")
if (output.directory != "") {
filename = paste(output.directory, doc.string, ".", met.names[i], ".report.", phen.tag, ".", loc, ".txt", sep="", collapse="")
write.table(metabolite.report, file = filename, quote=F, row.names=F, sep = "\t")
met.filename = paste(output.directory, doc.string, ".", met.names[i], ".plot.", phen.tag, ".", loc, ".pdf", sep="", collapse="")
pdf(file=met.filename, height = 6, width = 14)
}
nf = layout(matrix(c(1,2,3), 1, 3, byrow=T), 1, c(1, 1, 1), TRUE)
ind = 1:N
min.RES = min(na.omit(Obs.RES[i,]))
max.RES = max(na.omit(Obs.RES[i,]))
if (max.RES < 0.3) max.RES = 0.3
if (min.RES > -0.3) min.RES = -0.3
delta = (max.RES - min.RES)*0.50
min.plot = min.RES - 2*delta
max.plot = max.RES
max.corr = max(obs.s2n)
min.corr = min(obs.s2n)
Obs.correl.vector.norm = (obs.s2n - min.corr)/(max.corr - min.corr)*1.25*delta + min.plot
zero.corr.line = (- min.corr/(max.corr - min.corr))*1.25*delta + min.plot
col = ifelse(Obs.ES[i] > 0, 2, 4)
# Running enrichment plot
sub.string = paste("Number of metabolites: ", N, " (in list), ", size.G[i], " (in metabolite set)", sep = "", collapse="")
main.string = paste("Metabolite Set ", i, ":", met.names[i])
plot(ind, Obs.RES[i,], main = main.string, sub = sub.string, xlab = "Metabolite List Index", ylab = "Running Enrichment Score (RES)", xlim=c(1, N), ylim=c(min.plot, max.plot), type = "l", lwd = 2, cex = 1, col = col)
for (j in seq(1, N, 20)) {
lines(c(j, j), c(zero.corr.line, Obs.correl.vector.norm[j]), lwd = 1, cex = 1, col = colors()[12]) # shading of correlation plot
}
lines(c(1, N), c(0, 0), lwd = 1, lty = 2, cex = 1, col = 1) # zero RES line
lines(c(Obs.arg.ES[i], Obs.arg.ES[i]), c(min.plot, max.plot), lwd = 1, lty = 3, cex = 1, col = col) # max enrichment vertical line
for (j in 1:N) {
if (Obs.indicator[i, j] == 1) {
lines(c(j, j), c(min.plot + 1.25*delta, min.plot + 1.75*delta), lwd = 1, lty = 1, cex = 1, col = 1) # enrichment tags
}
}
lines(ind, Obs.correl.vector.norm, type = "l", lwd = 1, cex = 1, col = 1)
lines(c(1, N), c(zero.corr.line, zero.corr.line), lwd = 1, lty = 1, cex = 1, col = 1) # zero correlation horizontal line
temp = order(abs(obs.s2n), decreasing=T)
arg.correl = temp[N]
lines(c(arg.correl, arg.correl), c(min.plot, max.plot), lwd = 1, lty = 3, cex = 1, col = 3) # zero crossing correlation vertical line
leg.txt = paste("\"", phen1, "\" ", sep="", collapse="")
text(x=1, y=min.plot, adj = c(0, 0), labels=leg.txt, cex = 1.0)
leg.txt = paste("\"", phen2, "\" ", sep="", collapse="")
text(x=N, y=min.plot, adj = c(1, 0), labels=leg.txt, cex = 1.0)
adjx = ifelse(Obs.ES[i] > 0, 0, 1)
leg.txt = paste("Peak at ", Obs.arg.ES[i], sep="", collapse="")
text(x=Obs.arg.ES[i], y=min.plot + 1.8*delta, adj = c(adjx, 0), labels=leg.txt, cex = 1.0)
leg.txt = paste("Zero crossing at ", arg.correl, sep="", collapse="")
text(x=arg.correl, y=min.plot + 1.95*delta, adj = c(adjx, 0), labels=leg.txt, cex = 1.0)
# nominal p-val histogram
sub.string = paste("ES =", signif(Obs.ES[i], digits = 3), " NES =", signif(Obs.ES.norm[i], digits=3), "Nom. p-val=", signif(p.vals[i, 1], digits = 3),"FWER=", signif(p.vals[i, 2], digits = 3), "FDR=", signif(FDR.mean.sorted[i], digits = 3))
temp = density(phi[i,], adjust=adjust.param)
x.plot.range = range(temp$x)
y.plot.range = c(-0.125*max(temp$y), 1.5*max(temp$y))
plot(temp$x, temp$y, type = "l", sub = sub.string, xlim = x.plot.range, ylim = y.plot.range, lwd = 2, col = 2, main = "Metabolite Set Null Distribution", xlab = "ES", ylab="P(ES)")
x.loc = which.min(abs(temp$x - Obs.ES[i]))
lines(c(Obs.ES[i], Obs.ES[i]), c(0, temp$y[x.loc]), lwd = 2, lty = 1, cex = 1, col = 1)
lines(x.plot.range, c(0, 0), lwd = 1, lty = 1, cex = 1, col = 1)
leg.txt = c("Metabolite Set Null Density", "Observed Metabolite Set ES value")
c.vec = c(2, 1)
lty.vec = c(1, 1)
lwd.vec = c(2, 2)
legend(x=-0.2, y=y.plot.range[2], bty="n", bg = "white", legend=leg.txt, lty = lty.vec, lwd = lwd.vec, col = c.vec, cex = 1.0)
leg.txt = paste("Neg. ES \"", phen2, "\" ", sep="", collapse="")
text(x=x.plot.range[1], y=-0.1*max(temp$y), adj = c(0, 0), labels=leg.txt, cex = 1.0)
leg.txt = paste(" Pos. ES: \"", phen1, "\" ", sep="", collapse="")
text(x=x.plot.range[2], y=-0.1*max(temp$y), adj = c(1, 0), labels=leg.txt, cex = 1.0)
# create pinkogram for each metabolite set
kk = 1
pinko = matrix(0, nrow = size.G[i], ncol = cols)
pinko.metabolite.names = vector(length = size.G[i], mode = "character")
for (k in 1:rows) {
if (Obs.indicator[i, k] == 1) {
pinko[kk,] = A[obs.index[k],]
pinko.metabolite.names[kk] = obs.metabolite.symbols[k]
kk = kk + 1
}
}
MSEA.HeatMapPlot(V = pinko, row.names = pinko.metabolite.names, col.labels = class.labels, col.classes = class.phen, col.names = sample.names, main =" Heat Map for Metabolites in Metabolite Set", xlab=" ", ylab=" ")
dev.off()
} # if p.vals thres
} # loop over metabolite sets
return(list(report1 = report.phen1, report2 = report.phen2))
} # end of definition of MSEA.analysis
|
{"hexsha": "00d94b9c943192dc2b4bf36eed035e3158e60af1", "size": 66659, "ext": "r", "lang": "R", "max_stars_repo_path": "R/stats.getMSEA_Metabolon.r", "max_stars_repo_name": "areyoujokingme/CTD", "max_stars_repo_head_hexsha": "de702c01f5f69cce287f201fe5f233a30f4d7aae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "R/stats.getMSEA_Metabolon.r", "max_issues_repo_name": "areyoujokingme/CTD", "max_issues_repo_head_hexsha": "de702c01f5f69cce287f201fe5f233a30f4d7aae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "R/stats.getMSEA_Metabolon.r", "max_forks_repo_name": "areyoujokingme/CTD", "max_forks_repo_head_hexsha": "de702c01f5f69cce287f201fe5f233a30f4d7aae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.2423812899, "max_line_length": 246, "alphanum_fraction": 0.616990954, "num_tokens": 21752}
|
Horner <- function(Coef_Polinomio, x0) {
# Creación de la función,
#recibe los coeficientes del polinomio
#y el punto a evaluar.
TerminoInd <- Coef_Polinomio[1]
Coef_Polinomio <- Coef_Polinomio[-1]
#Guarda el término de independiente en una
#variable aparte de los coeficientes que
#acompañan algún valor de "x".
P_x0 <- Coef_Polinomio[length(Coef_Polinomio)]
P1_x0 <- Coef_Polinomio[length(Coef_Polinomio)]
j <- length(Coef_Polinomio)-1
#Guarda los coeficientes no independientes
#del polinomio en dos variables,
#y se inicializa la variable iterativa.
while (j>0) {
P_x0 <- x0*P_x0 + Coef_Polinomio[j]
P1_x0 <- x0*P1_x0 + P_x0
j <- j-1
}
#Se aplica el Método de Horner para
#calcular el valor del polinomio en x0
#y el valor de su derivada en x0.
P_x0 <- (x0*P_x0)+TerminoInd
#Se suma el término independiente al polinomio,
#pero no a la derivada, ya que en la
#derivada este término desaparece.
cat(paste("P(", x0, ") =", P_x0,
"\nP'(", x0, ") =", P1_x0))
}
#Se imprimen los resultados.
#Siendo P_x0 el valor del polinomio en x0 y
#P1_x0 el valor de la derivada en x0.
HornerC <- function(Coef_Polinomio, x0) {
Coef_Polinomio <- as.complex(Coef_Polinomio)
#Los coeficientes pasan a ser tratados como números complejos.
TerminoInd <- Coef_Polinomio[1]
Coef_Polinomio <- Coef_Polinomio[-1]
P_x0 <- Coef_Polinomio[length(Coef_Polinomio)]
P1_x0 <- Coef_Polinomio[length(Coef_Polinomio)]
j <- length(Coef_Polinomio)-1
while (j>0) {
P_x0 <- x0*P_x0 + Coef_Polinomio[j]
P1_x0 <- x0*P1_x0 + P_x0
j <- j-1
}
P_x0 <- (x0*P_x0)+TerminoInd
cat(paste("P(", x0, ") =", P_x0,
"\nP'(", x0, ") =", P1_x0))
}
install.packages("signal")
library(signal)
Operaciones <- 0
#Se instala un paquete para utilizar
#la función "polyval" más adelante y se
#inicia el número de operaciones PRINCIPALES en 0.
Seno_Taylor <- function(n){
Taylor_Pol <- c()
for(i in 0:n){
Taylor_Pol <- c(Taylor_Pol,0)
Taylor_Pol <- c(Taylor_Pol,((-1)^i)/factorial(2*i+1))
Operaciones <- Operaciones + 1
}
return(Taylor_Pol)
}
#Esta función recibe un valor de "n"
#y devuelve los coeficientes del polinomio de Taylor
#de grado "n" de la función seno.
Norm <- function(vect1,vect2){
error_relativo <- 0
err <- 0
for(i in 1:length(vect1)){
error_relativo <- error_relativo + abs(vect1[i]-vect2[i])
Operaciones <- Operaciones + 1
err <- err + abs(vect1[i])
Operaciones <- Operaciones + 1
}
return((error_relativo/err)-1)
}
#Se define una función que recibe imágenes de dos
#funciones en los mismos puntos, y devuelve una forma
#de medir el error que hay entre ellas.
#En este caso se calcula este error como un ponderado
#de los valores absolutos de las restas entre valores.
Taylor <- function(error_minimo){
test_val <- c()
for(i in seq(from=-pi/64,to=pi/64,by=0.001)){
test_val <- c(i,test_val)
}
#Se generan varios valores en el intervalo de interés
#y se guardan en la variable "test_val".
iteraciones <- 0
n <- 0
e_rel <- 1
#Se inicializan varias variables.
#El número de iteraciones en 0, el grado del
#polinomio de Taylor en 0, y el error relativo en 1.
cat("Iteracion =",iteraciones,"\n")
#Se muestra la iteración en la que va el código.
Aprox_Pol <- Seno_Taylor(n)
Pol_Ant <- polyval(Aprox_Pol[length(Aprox_Pol):1],test_val)
#Se inicializa el polinomio de aproximación en grado 0
#y se evaluan los datos de prueba en él.
Operaciones <- Operaciones + 1
while(e_rel>error_minimo){
n <- n+1
Operaciones <- Operaciones + 1
Aprox_Pol <- Seno_Taylor(n)
Pol_Act <- polyval(Aprox_Pol[length(Aprox_Pol):1],test_val)
#La función polyval evalúa el vector de puntos en el
#polinomio, dados sus coeficientes en orden descendente.
Operaciones <- Operaciones + 1
e_rel <- Norm(Pol_Ant,Pol_Act)
iteraciones <- iteraciones+1
cat("Iteracion =",iteraciones,"\n")
cat("Error Relativo =",e_rel,"\n")
Pol_Ant <- Pol_Act
}
#Se lleva a cabo la aproximación de Taylor.
#A cada iteración se eleva el grado del polinomio de
#aproximación en 1, se evaluan los datos de prueba en él,
#y se compara con los valores anteriores bajo la norma
#definida arriba. Esto se repite hasta que el error sea
#menor al valor ingresado por el usuario.
cat("Polinomio =",Aprox_Pol,"\n")
cat("Operaciones =",Operaciones,"\n")
#Se imprimen los coeficientes del polinomio
#que mejor aproxima la función en el intervalo
#dado y el número de operaciones principales
#que se realizaron.
}
install.packages("limSolve")
install.packages("signal")
library(signal)
library(limSolve)
#Se instalan dos paquetes, uno para poder utilizar la función
#"polyval", y el otro para utilizar la función "Solve" más adelante.
Norma_f <- function(p){
inf <- 0
ans <- c(0,0)
for(i in seq(from=-pi/64,to=pi/64,by=0.001)){
y = abs(sin(i)-polyval(p[length(p):1],i))
Operaciones <- Operaciones + 1
if(y<inf){
inf <- y
ans[1] <- inf
ans[2] <- i
}
}
return(ans)
}
#Esta función estima el valor de la norma que usa el
#método de Remez, devuelve el valor de la norma y
#el punto del intervalo en dónde da dicho valor.
#La función seno ya está predeterminada en el código.
Remez <- function(n,error_min){
#El usuario debe ingresar el error_min, el cual es el epsilon
#que aparece en el resúmen del método, y el "n" para la
#cantidad de datos a tomar al comienzo.
Iteraciones <- 0
Operaciones <- 0
Ptos_Muestra <- c()
#USAR DATOS UNIFORMEMENTE ELEGIDOS:
dx = (pi/32)/(n+1)
for(x in seq(from=-pi/64,to=pi/64,by=dx)){
Ptos_Muestra <- c(Ptos_Muestra,x)
}
#USAR DATOS ALEATORIAMENTE ELEGIDOS
#Ptos_Muestra <- runif((n+2),min=-pi/64,max=pi/64)
#Se toman "n+2" datos distribuidos uniformemente en el
#intervalo de interés.
while(TRUE){
cat("Iteracion =",Iteraciones,"\n")
A <- matrix(Ptos_Muestra,n+2,n+2)
for(i in seq(from=1,to=n+2,by=1)){
A[i,1] <- 1
A[i,n+2] <- (-1)**i
}
for(i in seq(from=1,to=n+2,by=1)){
for(j in seq(from=1,to=n+2,by=1)){
if(j!=1 && j!=(n+2)){
A[i,j] <- A[i,j]^j
}
}
}
#Se crea la matriz que codifica el sistema de
#ecuaciones linealess que hay que resolver.
b <- c()
for(i in Ptos_Muestra){
b <- rbind(b,sin(i))
}
#Se crea el vector que codifica el otro lado de la
#igualdad del sistema de ecuaciones lineales; en este
#caso la función "f" es seno.
B <- Solve(A,b)
#Se usa el comando "Solve" para hallar la solución "x"
#al sistema de ecuaciones lineales Ax=b.
Operaciones <- Operaciones + 1
Pol <- B[0:n+1]
e <- B[n+2]
#Se guardan los coeficientes del polinomio y el error.
norma <- Norma_f(Pol)
#Se estima la norma entre el polinomio y el seno.
cat("Error =",abs(norma[1]-abs(e)),"\n")
if(abs((norma[1]-abs(e)))<=error_min){
return(Pol)
#Se revisa la condición de parada.
}else{
E = norma[2]
for(j in 1:length(Ptos_Muestra)){
if(E<=Ptos_Muestra[j]){
Ptos_Muestra[j] <- E
Iteraciones <- Iteraciones + 1
break
#Se agrega el punto (en donde la norma equivale a la
#resta entre el seno y el polinomio) al conjunto de "n+2"
#valores y se retira el punto más cercano a éste.
}
}
}
}
}
|
{"hexsha": "1d49cbbb7116c9f493ee084b7523b866154a4a19", "size": 7487, "ext": "r", "lang": "R", "max_stars_repo_path": "Reto_1_Analisis.R.r", "max_stars_repo_name": "juanmendezp/Analisis-Numerico", "max_stars_repo_head_hexsha": "800851f1cfe8ce84945be8eb8ebad297f317cd6b", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-31T13:34:22.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-31T13:34:22.000Z", "max_issues_repo_path": "Reto_1_Analisis.R.r", "max_issues_repo_name": "GabrielGomez9898/Analisis-Numerico", "max_issues_repo_head_hexsha": "b1bd6b5ccfd1d0102863371fbf09d4544a328105", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Reto_1_Analisis.R.r", "max_forks_repo_name": "GabrielGomez9898/Analisis-Numerico", "max_forks_repo_head_hexsha": "b1bd6b5ccfd1d0102863371fbf09d4544a328105", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6844262295, "max_line_length": 68, "alphanum_fraction": 0.6542006144, "num_tokens": 2558}
|
[STATEMENT]
lemma (in aGroup) nt_mult_assoc:"sfg A a \<Longrightarrow> m\<triangleright>n\<triangleright>a\<^bsub>A\<^esub>\<^bsub>A\<^esub> = (m * n)\<triangleright>a\<^bsub>A\<^esub>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sfg A a \<Longrightarrow> m\<triangleright>n\<triangleright>a\<^bsub>A\<^esub>\<^bsub>A\<^esub> = (m * n)\<triangleright>a\<^bsub>A\<^esub>
[PROOF STEP]
apply (frule sfg_G_inc_a)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>sfg A a; a \<in> carrier A\<rbrakk> \<Longrightarrow> m\<triangleright>n\<triangleright>a\<^bsub>A\<^esub>\<^bsub>A\<^esub> = (m * n)\<triangleright>a\<^bsub>A\<^esub>
[PROOF STEP]
apply (frule sfg_agroup )
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>sfg A a; a \<in> carrier A; aGroup A\<rbrakk> \<Longrightarrow> m\<triangleright>n\<triangleright>a\<^bsub>A\<^esub>\<^bsub>A\<^esub> = (m * n)\<triangleright>a\<^bsub>A\<^esub>
[PROOF STEP]
apply (simp add:nt_mult_assoc0)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 468, "file": "Group-Ring-Module_Algebra9", "length": 4}
|
\documentclass[a4paper,titlepage,openany]{article}
\usepackage{epsfig,amsmath,pifont,moreverb,multirow,multicol}
%\usepackage[scaled=.92]{helvet}
%\usepackage{newcent}
%\usepackage{bookman}
%\usepackage{utopia}
%\usepackage{avant}
%\usepackage{charter}
%\usepackage{mathpazo}
\renewcommand{\familydefault}{\sfdefault}
\usepackage[colorlinks=true,
pdfpagemode=UseOutlines,
pdftitle={SPM12 Release Notes},
pdfauthor={The SPM Developers},
pdfsubject={Statistical Parametric Mapping},
pdfkeywords={neuroimaging, MRI, PET, EEG, MEG, SPM}
]{hyperref}
\pagestyle{headings}
\bibliographystyle{plain}
\hoffset=15mm
\voffset=-5mm
\oddsidemargin=0mm
\evensidemargin=0mm
\topmargin=0mm
\headheight=12pt
\headsep=10mm
\textheight=240mm
\textwidth=148mm
\marginparsep=5mm
\marginparwidth=21mm
\footskip=10mm
\parindent 0pt
\parskip 6pt
\newcommand{\matlab}{\textsc{MATLAB}}
\begin{document}
\let\oldlabel=\label
\renewcommand{\label}[1]{
{\pdfdest name {#1} fit}
\oldlabel{#1}
}
\newlength{\centeroffset}
\setlength{\centeroffset}{-0.5\oddsidemargin}
\addtolength{\centeroffset}{0.5\evensidemargin}
%\addtolength{\textwidth}{-\centeroffset}
\thispagestyle{empty}
\vspace*{\stretch{1}}
\noindent\hspace*{\centeroffset}\makebox[0pt][l]{
\begin{minipage}{\textwidth}
\flushright
\textbf{\Huge{SPM12 Release Notes}}
{\noindent\rule[-1ex]{\textwidth}{5pt}\\[2.5ex]}
\hfill{{\huge The FIL Methods Group} \\ {\LARGE (and honorary members)}\\}
%\vspace{20mm}
\end{minipage}
}
%\vspace{\stretch{2}}
\noindent\hspace*{\centeroffset}\makebox[0pt][l]{
\begin{minipage}{\textwidth}
\flushright
{\footnotesize
Functional Imaging Laboratory\\
Wellcome Trust Centre for Neuroimaging\\
Institute of Neurology, UCL\\
12 Queen Square, London WC1N 3BG, UK\\
\today\\
\url{http://www.fil.ion.ucl.ac.uk/spm/}}
\end{minipage}}
\vspace{10mm}
This is SPM12\footnote{\url{http://www.fil.ion.ucl.ac.uk/spm/software/spm12/}}, a major update to the SPM software, containing substantial theoretical, algorithmic, structural and interface enhancements over previous versions, as detailed in this document.
Although SPM12 will read image files from previous versions of SPM, there are differences in the algorithms, templates and models used. Therefore, we recommend you use a single SPM version for any given project.
We would like to warmly thank everyone for testing, reporting bugs and giving feedbacks on the beta version.
We are always interested to hear feedbacks and comments from SPM users - please contact us at \href{mailto:fil.spm@ucl.ac.uk}{fil.spm@ucl.ac.uk}.
If you happen to find any bug, please report them at the same email address. Thank you!
SPM is free but copyright software, distributed under the terms of the GNU General Public Licence as published by the Free Software Foundation (either version 2, as given in file \texttt{LICENCE.txt}, or at your option, any later version)\footnote{\url{http://www.gnu.org/copyleft/}}.
SPM is developed under the auspices of Functional Imaging Laboratory (FIL), The Wellcome Trust Centre for NeuroImaging, in the Institute of Neurology at University College London (UCL), UK.
\vspace{10mm}
%\begin{multicols}{2}
\section{Software}
Updates will be made available from time to time at the following address and advertised on the SPM mailing list:
\qquad \url{http://www.fil.ion.ucl.ac.uk/spm/download/spm12_updates/}
A function called \texttt{spm\_update.m} can be used to detect and install updates when available. It is also accessible from the Help menu of the Graphics window.
As in previous versions of SPM, the file \texttt{spm\_defaults.m} contains a list of defaults values used by the software. If you want to customise some defaults for your installation, we recommend you do not modify this file but rather create a file named \texttt{spm\_my\_defaults.m}, accessible from MATLAB search path; it will automatically be picked up and used by SPM to override some default settings.
\subsection{File formats}
SPM12 uses \href{http://nifti.nimh.nih.gov/nifti-1}{NIfTI-1} and \href{http://www.nitrc.org/projects/gifti/}{GIfTI} file formats for volumetric and surface-based data. The default format for images is now single file \texttt{.nii} instead of pair of files \texttt{.hdr/.img}. There is also preliminary support for \href{http://nifti.nimh.nih.gov/nifti-2}{NIfTI-2}.
\subsection{Batch interface}
New folder \texttt{batches} contains a number of ready-made batches (i.e. pipelines). They automatically configure the batch interface to execute preprocessings for fMRI or VBM analyses, as well as first or second level statistical analyses, using consensual settings. To use them, just load them from the batch interface as you would do with any other batch file you may have saved.
\subsection{MATLAB}
SPM12 is designed to work with MATLAB versions 7.4 (R2007a) to 8.3 (R2014a), and will not work with earlier versions. It is R2014b Graphics Ready and only requires core MATLAB to run (i.e. no toolboxes). It will be updated to handle future versions of MATLAB whenever necessary, until the release of the next major version of SPM.
A standalone version of SPM12 (compiled with the MATLAB Compiler) is also available\footnote{\url{http://en.wikibooks.org/wiki/SPM/Standalone}} -- it does not require a MATLAB licence and can be deployed royalty-free. It might therefore be particularly useful for teaching or usage on a computer cluster. However, it is not possible to read or modify the source code, use your own functions or install third-party toolboxes.
There is work in progress for compatibility with GNU Octave\footnote{\url{http://www.octave.org/}}. See ways you can contribute on the SPM wiki\footnote{\url{http://en.wikibooks.org/wiki/SPM/Octave}}.
\section{Temporal processing}
\subsection{Slice timing correction}
The slice timing correction implementation was extended by adding the option to use slice acquisition times instead of slice orders and a reference time instead of a reference slice. In particular, this allows to handle datasets acquired using multi-band sequences. This work has been contributed by A. Hoffmann, M. Woletz and C. Windischberger from Medical University of Vienna, Austria \cite{woletz2014st}.
\section{Spatial processing}
There have been changes to much of the functionality for spatially transforming images -- particularly with respect to inter-subject registration.
This is a small step towards reducing SPM to a more manageable size \cite{ashburner2011spm}.
\subsection{Normalise}
Spatial normalisation is no longer based on minimising the mean squared difference between a template and a warped version of the image.
Instead, it is now done via segmentation \cite{ashburner05}, as this provides more flexibility.
For those of you who preferred the older way of spatially normalising images, this is still available via the ``Old Normalise'' Tool.
However, the aim is to try to simplify SPM and eventually remove the older and less effective \cite{klein_evaluation} routines.
Deformation fields are now saved in a form that allows much more precise alignment.
Rather than the old sn.mat format, they are now saved as y\_$*$.nii files, which contain three image volumes encoding the x, y and z coordinates (in mm) of where each voxel maps to.
Note that for spatially normalising PET, SPECT and other images that have spatially correlated noise, it is a good idea to change the smoothness setting on the user interface (from 0 to about 5 mm).
\subsection{Segment}
The default segmentation has now been replaced by a slightly modified version of what was unimaginatively called ``New Segment'' in SPM8.
For those of you who preferred the older way of segmenting images, this is still available via the ``Old Segment'' Tool.
The aim, however, is to try to simplify SPM and eventually remove the older functionality that works less well.
Both implementations are based on the algorithm presented in \cite{ashburner05}, although the newer version makes use of additional tissue classes, allows multi-channel segmentation (of eg T2-weighted and PD-weighted images), and incorporates a more flexible image registration component.
Changes to the SPM8 version of ``New Segment'' include different regularisation for the deformations, some different default settings, as well as re-introducing the re-scaling of the tissue probability maps (which was in the old segment, but not the new). In addition, the tissue probability maps were re-generated using the T2-weighted and PD-weighted scans from the IXI dataset\footnote{\url{http://www.brain-development.org/}}.
This was initially done in an automated way (by enabling a hidden feature in \texttt{spm\_preproc\_run.m}, which allows the necessary sufficient statistics for re-generating the templates to be computed), with some manual editing of the results to tidy them up.
Note that eyeballs are now included within the same class as CSF.
Separating eyeballs from other non-brain tissue allows the nonlinear registration part to be made more flexible, but the disadvantage is that intra-cranial volumes are now fractionally more difficult to compute.
However, the cleanup step (re-introduced from the old segmentation routine, and extended slightly) should allow eyeballs to be removed from the fluid tissue class.
\section{Atlas}
Maximum probability tissue labels derived from the ``MICCAI 2012 Grand Challenge and Workshop on Multi-Atlas Labeling''\footnote{\url{https://masi.vuse.vanderbilt.edu/workshop2012/index.php/Challenge_Details}} are available in files \texttt{tpm/labels\_Neuromorphometrics.\{nii,xml\}}.
These data were released under the Creative Commons Attribution-NonCommercial (CC BY-NC) with no end date. Users should credit the MRI scans as originating from the OASIS project\footnote{\url{http://www.oasis-brains.org/}} and the labeled data as "provided by Neuromorphometrics, Inc.\footnote{\url{http://neuromorphometrics.com/}} under academic subscription". These references should be included in all workshop and final publications.
See \texttt{spm\_templates.man} for more details about the generation of this file.
\section{fMRI Statistics}
\subsection{Neuroimaging Data Model (NIDM)}
The Neuroimaging Data Model (NIDM)\footnote{\url{http://nidm.nidash.org/}} is a collection of specification documents that define extensions of the W3C PROV standard\footnote{\url{http://www.w3.org/TR/prov-primer/}} for the domain of human brain mapping.
An implementation of the export of brain mapping statistical results using NIDM-Results \cite{Maumet2014} is available from the batch editor in SPM \textgreater\ Stats \textgreater\ Results Report\ \textgreater\ Print results \textgreater\ NIDM.
\subsection{Canonical Variates Analysis}
SPM's Canonical Variates Analysis (CVA) function, \texttt{spm\_cva.m}, has been updated so that it also computes Log Bayes Factors to make inferences about the number of significantly non-zero canonical vectors. It computes AIC and BIC approximations to these Log Bayes Factors. These quantities can be used in second-level analysis to make inferences about a group of subjects/sessions using the SPM's random effects model selection tool (\texttt{spm\_BMS.m}).
\subsection{Regional Bayesian model comparison - first level}
The function \texttt{spm\_vb\_regionF.m} now provides approximation of the log model evidence for a first level fMRI model. If you call this function multiple times with different design matrices it will allow you to implement Bayesian model comparison. This is useful for example for comparing non-nested GLMs with, for example, different (sets of) parametric modulators. The approach can be applied to first level fMRI data from a local region. This is the recommended method for Bayesian comparison of General Linear Models of first level fMRI data.
\subsection{Interactive Bayesian model comparison - second level}
Multidimensional (or two-sided) inferences about GLM parameters are now implemented using Savage-Dickey ratios. When you specify a multidimensional contrast for a GLM that has been estimated using the Bayesian option, SPM will produce log Bayes factor maps (in favour of the alternative versus the null model). This provides a Bayesian analogue of the F-test and allows one to implement Bayesian model comparisons in a truly interactive manner.
This is the recommended method for Bayesian model comparison of second level data (fMRI or MEG/EEG). If you wish to create log Bayes factor maps in favour of the {\em null} model, you can use the MATLAB command line function \texttt{spm\_bms\_test\_null.m}.
Operationally, Bayesian inferences at the second level are implemented by first specifying a model, and then estimating it under the 'classical' option. You must then re-estimate the model under the 'Bayesian' option. Subsequent to this, when you are specifying new contrasts in the contrast manager you can have some contrasts that are Bayesian ('t' will provide inference about effect sizes, 'F' will use the Savage-Dickey approach), and others that are classical. The first time you specify a contrast you decide which it is to be.
\subsection{Mixed-effects models}
The Mixed-Effects (MFX) approach described in \cite{karl_mixed} and implemented in \texttt{spm\_mfx.m} is now available via the batch interface. A new chapter in the SPM manual is devoted to this new option. We do envisage, however, that the great majority of random-effects inferences will be implemented using the usual summary statistic (SS) approach (creating contrast images for SPM analysis of single subject data, and then entering these images as data into a group analysis). This is because the SS approach is highly computationally efficient and only becomes suboptimal when the within-subject variances differ by an order or magnitude (which could be caused eg. by one subject having an order of magnitude more events than another).
\subsection{Set-level threshold-free tests}
Typically in order to make inference on set level features (like the number of peaks) one has to make an arbitratry choice of threshold (to decide what consitutes a peak). This routine requires no arbitrary feature-defining threshold but is nevertheless sensitive to distributed or spatially extended patterns. It is based on comparing the intrinsic volumes (or Lipschitz-Killing curvature (LKC)) of the residual images with those of the test image \cite{Barnes2013}.
In brief, we make an estimate the LKC for every residual field (which are assumed to be realisations of Gaussian random fields) and for the final SPM (which \emph{under the null hypothesis} is assumed to be a realisation of a central random field of F or t statistics). Under the null hypothesis these two measures of intrinsic volume should be the same, and so comparing these two sets of curvatures gives a direct and threshold free test of whether the final SPM deviates from the null hypothesis. In other words, instead of using the LKC to assess the significance of an excursion set (like the number of maxima above a threshold), we assess the significance of the LKC measure per se, and evaluate its null distribution using the residual images that have the same intrinsic volume but contain no treatment effect. Intuitively, we assess whether the numbers of peaks in the statistical field (testing for signal) and the residual fields (noise) are consistent or not. For example, if the SPM contains more maxima than it should under the null hypothesis it would appear to have a greater intrinsic volume.
This option is available from the batch editor in SPM \textgreater\ Stats \textgreater\ Set Level test.
\subsection{DCM}
\subsubsection{Reparametrisation of the bilinear model}
The bilinear model used by (single-state) DCM for fMRI has been upgraded. To summarise, a number of people have observed that Bayesian parameter averages (BPA) of self (intrinsic) connections can be positive, despite being negative for each subject or session. This is perfectly plausible behaviour -- in the sense that their prior expectation is $-1/2$ and each subject can provide evidence that the posterior is less than the prior mean (but still negative). When this evidence is accumulated by BPA, the posterior can be `pushed' into positive values. The anomalous aspect of this behaviour rests on the fact that there is no negativity constraint on the self-connections. In the revised code, the self-connections -- in the leading diagonal of the $A$ matrix -- now specify log scaling parameters. This means that these (and only these) parameters encode a self-connections of $-1/2*exp(A)$; where $A$ has a prior mean of $0$ and $-1/2*exp(0) = -1/2$.
This re-parameterisation does not affect model inversion very much but guarantees the BPA is always negative because $-1/2*exp(A) < 0$ has to be less than one. The parameterisation of connection strengths in terms of log scale parameters is already used by two-state models for fMRI and all EEG (and MEG) DCMs.
\subsubsection{Bayesian model selection for group studies}
The function \texttt{spm\_BMS.m} has been updated to also return ''Protected Exceedance Probabilities (PXPs)'' \cite{Rigoux2014}.
These can be interpreted as follows. We first define a null hypothesis, $H_0$, as the model frequencies being equal (if you have $K$ models each is used in the population with a frequency of $1/K$). The alternative hypothesis, $H_1$, is that the model frequencies are different. It is then possible to compute the evidence for each hypothesis and the posterior probability that the null is true. This is known as the Bayes Omnibus Risk (BOR), and is also returned by \texttt{spm\_BMS.m}. The PXP is then given by the probability of the null times the model frequencies under the null ($1/K$) plus the probability of the alternative times the XPs. PXPs are more conservative than XPs and are protected against the possibility that the alternative hypothesis is not true (this is implicitly assumed in the computation of the XPs). The Bayesian Model Selection GUI now plots PXPs and the BOR in an additional results window.
\subsubsection{Spectral DCM for resting state fMRI}
Spectral DCM is a new method developed with the intention of modelling BOLD sessions where there are no exogenous inputs (driving or modulatory), i.e. "resting state" sessions \cite{rsDCM2014}. That being said, the scheme is able to invert models that include driving inputs (e.g. sensory stimulation). This new DCM follows the previous framework by offering a mechanistic description of how distributed neural populations produce observed data. The use of resting state fMRI is now widespread; particularly in attempts to characterise differences in functional connectivity between cohorts. This DCM attempts to quantify the effective connectivity in endogenous brain networks.
Stochastic DCM for fMRI is similarly well suited for modelling resting state data but makes limited assumptions about the neuronal fluctuations driving the system, creating a difficult inversion problem that is computationally intensive. Furthermore, when comparing groups of subjects, cohorts could potentially differ in coupling and/or the form of endogenous fluctuations.
Spectral DCM overcomes these problems by making stationarity assumptions, and considering the cross-spectra of the observed BOLD data, as opposed to the original time series. This reduces the inversion to an estimation of the spectral density of the neuronal fluctuations 'driving' the system (and observation noise). This permits a more efficient estimation (inversion taking seconds to minutes), yielding estimates of coupling (as with all DCMs), as well as potentially useful parameters describing the endogenous neuronal fluctuations. Stationarity assumptions preclude the inclusion of modulatory inputs, i.e. changes in coupling caused by experimental manipulation. For this, investigators should explore stochastic DCM for fMRI.
In summary, this DCM is suited for investigators looking to compare the coupling in an endogenous (or "resting state") network between two groups of subjects (e.g. patients and controls).
\section{EEG/MEG}
\subsection{\texttt{@meeg} object}
The following \texttt{@meeg} methods have been removed to simplify and rationalise the object interface
\begin{itemize}
\item \texttt{`pickconditions'} - replaced with \texttt{`indtrial'}
\item \texttt{`meegchannels'}, \texttt{`ecgchannels'}, \texttt{`emgchannels'}, \texttt{`eogchannels'} - replaced with \texttt{`indchantype'}
\item \texttt{`reject'} - replaced with {`badtrials'}
\end{itemize}
It is now possible to have \texttt{@meeg} objects without a linked data file. This was useful to simplify the conversion code. Also in \textsc{Prepare} one can just load a header of a raw data file and use it to prepare inputs for the full conversion batch (e.g. select channels or prepare trial definition for epoching during conversion).
\subsection{Preprocessing}
The preprocessing functions now use the SPM batch system and the interactive GUI elements have been removed. This should make it easy to build processing pipelines for performing complete complicated data analyses without programming. The use of batch has many advantages but can also complicate some of the operations because a batch must be configured in advance and cannot rely on information available in the input file. For instance, the batch tool cannot know the channel names for a particular dataset and thus cannot generate a dialog box for the user to choose the channels. To facilitate the processing steps requiring this kind of information additional functionalities have been added to the \textsc{Prepare} tool under \textsc{Batch inputs} menu. One can now make the necessary choices for a particular dataset using an unteractive GUI and then save the results in a mat file and use this file as an input to batch.
It is still also possible to call the preprocessing function via scripts and scripts can be generated using \texttt{History2script} functionality as in SPM8. The inputs structures and parameters names of preprocessing function have been standardised so that e.g. time window is always called \texttt{S.timewin} and units of peristimulus time are always ms. Also substructures of \texttt{S} have been removed except for where they are functionally essential.
Simple GUI functionality for converting a variable in the workspace into SPM M/EEG dataset was added to \textsc{Prepare} (File/Import from workspace).
For MEG systems with planar gradiometers (e.g. Neuromag) there is a new tool for combining planar channels and writing out the results back into an M/EEG file. This allows further processing of combined channels which was impossible in SPM8 e.g. baseline correction or TF rescaling.
Conversion to images was completely rewritten. It is now possible to easily create 1D images and average across any set of dimensions to reduce the number of multiple comparisons.
\subsection{Online montage}
The ``online montage'' lets you apply any type of channel montage (re-referencing, subsampling of channels, mixing as in ICA, etc.) to your data without having to write them explicitly on disk. A series of such ``montages'' can be added to your data and switched on/off at will. This can be particularly useful to visually review your data (e.g. on a subset of channels), visualize the effect of a applying such montage (e.g. 2 types of re-referencing) or perform specific operations on mixed data (e.g. feature detection).
\subsection{Convolution modelling}
An implementation of recently published convolution modelling method for M/EEG power \cite{Litvak_ConvModel_2013} has been added (under 'Specify 1st-level') button. It is now possible to apply TF analysis to continuous data and save continuous TF datasets to be used as input to this tool.
\subsection{Beamforming}
The functions previously available in the ``Beamforming'' toolbox in SPM8 have been superseded by the new Data Analysis in Source Space (DAiSS) toolbox that is not included by default in SPM and can be obtained from \url{http://code.google.com/p/spm-beamforming-toolbox/}.
\subsection{Surface-based statistics}
SPM's GLM implementation (in \texttt{spm\_spm.m}) has been modified to handle both volumetric and surfacic data; input files can then be specified in NIfTI or GIfTI format. This is used so far when making inference at the source level for M/EEG data.
\subsection{DCM}
\begin{itemize}
\item The routine evaluating cross spectra (\texttt{spm\_dcm\_data\_csd}) now performs a moving window cross spectral analysis (based on an eighth order MAR model) to remove (nonstationary) fluctuations in the cross spectra. This is achieved by performing a singular value decomposition on the time-dependent cross spectra and retaining only the principal spectral mode.
\item The data features used for inverting dynamic causal models of cross spectral density now include both the cross spectra per se and the cross covariance functions. These are simply concatenated to provide a greater latitude of data features to compute free energy gradients. Heuristically, the cross spectra inform gradients that affect low frequencies, while the covariance functions allow high frequencies to be fitted gracefully. This means that any frequency dependent precision can be removed.
\item The inversion routines for event related potentials (\texttt{spm\_dcm\_erp}) and complex cross spectra (\texttt{spm\_dcm\_csd}) now use more precise (hyper) priors on data feature noise (with an expected log precision \texttt{hE} of eight and a log precision of eight). Effectively, this means that, a priori, we expect these data features to be essentially noiseless -- because they accumulate information from long timeseries, with many degrees of freedom.
\item To ensure the quantitative veracity of the hyperpriors, the data are scaled to have a maximum amplitude of one (for evoked responses) and a variance of 16 (four cross spectral density analysis).The scaling of the exogenous and endogenous input (\texttt{U}) in the equations of motion has also been adjusted, to ensure that the neural mass and mean field models used in DCM produce an ERP with a maximum height of about 1 and autospectra with about unit variance.
\item The amplitude of evoked responses, and the spectral responses (shown in terms of autospectra and covariance functions) can now be visualised -- for all models -- using the survey of models button in the \texttt{Neural\_models} demo.
\end{itemize}
\section{Utilities}
\subsection{DICOM Import}
The DICOM dictionary has been updated to reflect changes to the standard over the last decade or so.
It is now based on the 2011 edition\footnote{\url{http://medical.nema.org/standard.html}}.
\subsection{Deformations}
The deformations utility was completely re-written to provide additional flexibility.
This was largely to facilitate the re-write of what lies behind the ``Normalise'' button.
\subsection{De-face Images}
A simple utility is available for attempting to strip the face from images, so individuals are more difficult to identify from surface renderings.
\section{Tools}
\subsection{Dartel}
Much of the C code (in the mex functions that do most of the work in Dartel) has been extensively re-written to make it work more effectively, and to provide a framework on which to base the ``Shoot'' and ``Longitudinal Registration'' toolboxes.
\subsection{Shoot}
This toolbox is based on the work in \cite{ashburner2011diffeomorphic}, and is a diffeomorphic registration approach similar to Dartel, although much more theoretically sound.
Evaluations show that it achieves more robust solutions in situations where larger deformations are required.
The eventual plan will be to replace Dartel with this toolbox, although more work needs to be done in terms of user interfaces etc.
\subsection{Longitudinal Registration}
SPM12 incorporates a new longitudinal registration approach \cite{ashburner2013symmetric}, which replaces the old ``high-dimensional warping'' toolbox.
It essentially now involves a group-wise intra-subject modeling framework, which combines diffeomorphic \cite{ashburner2011diffeomorphic} and rigid-body registration, incorporating a correction for the intensity inhomogeneity artefact usually seen in MRI data.
Recently, systematic bias in longitudinal image registration has become a more notable issue.
The aim of this toolbox is to estimate volume changes that do not depend on whether the first time point is aligned to the second, or vice verca.
\subsection{Old Segment}
The default segmentation approach in SPM12 is now based on what was known as ``New Segment'' in SPM8.
This toolbox keeps the old segmentation approach available to those who wish to continue using it, although we plan to eventually phase out the older approach.
\subsection{Old Normalise}
The default spatial normalisation approach in SPM12 is now based on what was known as ``New Segment'' in SPM8.
This toolbox keeps the old normalisation approach available to those who wish to continue using it, although we plan to eventually phase out the older approach.
See \cite{klein_evaluation} to see how poorly the old normalisation approach works.
It was definitely time for it to go.
%\section{Bibliography}
\bibliography{biblio/methods_macros,biblio/methods_group,biblio/external}
%\end{multicols}
\end{document}
|
{"hexsha": "260df331f3dcc406981b0bdb9623d7d522b4d4ef", "size": 29370, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "thirdparty/spm12/man/ReleaseNotes.tex", "max_stars_repo_name": "spunt/bspm", "max_stars_repo_head_hexsha": "4a1b6510cb32db6e2e4dff57bb81e6ece993f9db", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2015-03-26T21:29:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-12T16:18:42.000Z", "max_issues_repo_path": "software/spm12/man/ReleaseNotes.tex", "max_issues_repo_name": "wiktorolszowy/diffusion_fMRI", "max_issues_repo_head_hexsha": "2028515a244fcec88c072d4a66b97bbc57dc15c0", "max_issues_repo_licenses": ["RSA-MD"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-07-06T21:37:06.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-06T23:53:13.000Z", "max_forks_repo_path": "software/spm12/man/ReleaseNotes.tex", "max_forks_repo_name": "wiktorolszowy/diffusion_fMRI", "max_forks_repo_head_hexsha": "2028515a244fcec88c072d4a66b97bbc57dc15c0", "max_forks_repo_licenses": ["RSA-MD"], "max_forks_count": 24, "max_forks_repo_forks_event_min_datetime": "2015-03-26T21:30:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-08T06:47:37.000Z", "avg_line_length": 95.3571428571, "max_line_length": 1109, "alphanum_fraction": 0.8002383384, "num_tokens": 6718}
|
\section{Tables, Figures, and Code listlings}
\subsection{Tables}
\begin{frame}{An example table}
\begin{table}[t]
\begin{tabular}{ccr}
\toprule
First Name & Last Name & Date of Birth \\
\midrule
John & Doe & 3/12/1920 \\
Peter & Smith & 6/5/1967 \\
Julia & Jones & 9/26/1977 \\
Jane & Miller & 10/5/1966 \\
Peter & Smith & 1/3/1901 \\
\bottomrule
\end{tabular}
\caption{Personal data.}
\end{table}
\end{frame}
\subsection{Figures}
\begin{frame}{Dummy Text}
\begin{columns}
\column{.6\linewidth} \justifying
\scriptsize \lipsum[1]
\column{.4\linewidth}
\begin{figure}
\centering
\includegraphics[width=.8\linewidth]{example-image-a}
\caption{An example image.}
\end{figure}
\end{columns}
\end{frame}
\subsection{Code listlings}
\defverbatim[colored]\sleepSort{
\begin{lstlisting}[language=C,tabsize=2]
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/wait.h>
int main(int argc, char **argv) {
while (--c > 1 && !fork());
sleep(c = atoi(v[c]));
printf("%d\n", c);
wait(0);
return 0;
}
\end{lstlisting}}
\begin{frame}{An example source code in C}
\sleepSort
\end{frame}
|
{"hexsha": "f78c52f4152a15b9b4f1df8e2c72e2154795136b", "size": 1254, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "contents/floats-listlings.tex", "max_stars_repo_name": "elsa-lab/ELSAbeamer", "max_stars_repo_head_hexsha": "e10cc73a4945fd302afcb422b874159c173fdaff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-03T11:01:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-03T11:01:38.000Z", "max_issues_repo_path": "contents/floats-listlings.tex", "max_issues_repo_name": "elsa-lab/ELSAbeamer", "max_issues_repo_head_hexsha": "e10cc73a4945fd302afcb422b874159c173fdaff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "contents/floats-listlings.tex", "max_forks_repo_name": "elsa-lab/ELSAbeamer", "max_forks_repo_head_hexsha": "e10cc73a4945fd302afcb422b874159c173fdaff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.6206896552, "max_line_length": 58, "alphanum_fraction": 0.6100478469, "num_tokens": 393}
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Copyright 2021 Giovanni Dispoto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""
This script create TFRecords from VQA dataset.
In order to work with downloaded dataset, change the DATA_URL with URL of dataset.
If you want to work with local dataset, write only the name of the dataset in URL and run the script.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import pandas as pd
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from datasets import dataset_utils
# The URL where the Flowers data can be downloaded.
_DATA_URL = 'anndl-2020-vqa.zip'
# The number of images in the validation set.
_NUM_VALIDATION = 8823
# Seed for repeatability.
_RANDOM_SEED = 0
# Maximum question len, used for padding
_MAX_QUESTION_LEN = 21
# The number of shards per dataset split.
_NUM_SHARDS = 5
labels_dict = {
'0': 0,
'1': 1,
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'apple': 6,
'baseball': 7,
'bench': 8,
'bike': 9,
'bird': 10,
'black': 11,
'blanket': 12,
'blue': 13,
'bone': 14,
'book': 15,
'boy': 16,
'brown': 17,
'cat': 18,
'chair': 19,
'couch': 20,
'dog': 21,
'floor': 22,
'food': 23,
'football': 24,
'girl': 25,
'grass': 26,
'gray': 27,
'green': 28,
'left': 29,
'log': 30,
'man': 31,
'monkey bars': 32,
'no': 33,
'nothing': 34,
'orange': 35,
'pie': 36,
'plant': 37,
'playing': 38,
'red': 39,
'right': 40,
'rug': 41,
'sandbox': 42,
'sitting': 43,
'sleeping': 44,
'soccer': 45,
'squirrel': 46,
'standing': 47,
'stool': 48,
'sunny': 49,
'table': 50,
'tree': 51,
'watermelon': 52,
'white': 53,
'wine': 54,
'woman': 55,
'yellow': 56,
'yes': 57
}
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Initializes function that decodes RGB JPEG data.
# self._decode_jpeg_data = #tf.placeholder(dtype=tf.string)
# self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
print("init")
def read_image_dims(self, image_data):
image = self.decode_jpeg(image_data)
return image.shape[0], image.shape[1]
def decode_jpeg(self,image_data):
image = tf.image.decode_jpeg(image_data, channels=3) #sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data})
# assert len(image.shape) == 3
# assert image.shape[2] == 3
# return image
return image
def _get_filenames_and_classes(dataset_dir):
"""Returns a list of filenames and inferred question and answers.
Args:
dataset_dir: A directory containing a set of subdirectories representing
class names. Each subdirectory should contain PNG or JPG encoded images.
Returns:
A list of question, related image in the directory and answers.
"""
train_questions = pd.read_json(os.path.join(dataset_dir, 'VQA_Dataset/train_questions_annotations.json'))
#Questions
questions = list(train_questions.iloc[0])
#Answers
answers = list(train_questions.iloc[2])
#Images
images = np.array(train_questions.iloc[1])
images_path = []
questions_tokenizer = Tokenizer()
questions_tokenizer.fit_on_texts(questions)
questions_tokenized = questions_tokenizer.texts_to_sequences(questions)
questions_encoder_inputs = pad_sequences(questions_tokenized, maxlen=_MAX_QUESTION_LEN)
for image in images:
path = os.path.join(os.path.join(dataset_dir, 'VQA_Dataset/Images'), image+".png")
images_path.append(path)
return questions_encoder_inputs, images_path, answers
def _get_dataset_filename(dataset_dir, split_name, shard_id):
output_filename = 'vqa_%s_%05d-of-%05d.tfrecord' % (
split_name, shard_id, _NUM_SHARDS)
return os.path.join(dataset_dir, output_filename)
def _convert_dataset(split_name, filenames, questions, answers, dataset_dir):
"""Converts the given filenames to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'validation'.
filenames: A list of absolute paths to png or jpg images.
questions : A list of questions associated to files
answers: A list of answers associated to files
dataset_dir: The directory where the converted datasets are stored.
"""
assert split_name in ['train', 'validation']
num_per_shard = int(math.ceil(len(filenames) / float(_NUM_SHARDS)))
image_reader = ImageReader()
#with tf.Session('') as sess:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
with tf.io.TFRecordWriter(output_filename) as tfrecord_writer:
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id+1) * num_per_shard, len(filenames))
for i in range(start_ndx, end_ndx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (i+1, len(filenames), shard_id))
sys.stdout.flush()
# Read the filename:
image_data = tf.io.gfile.GFile(filenames[i], 'rb').read()
height, width = image_reader.read_image_dims(image_data)
#class_name = os.path.basename(os.path.dirname(filenames[i]))
question = questions[i]
answer = labels_dict[answers[i]]
example = dataset_utils.image_to_tfexample_vqa(
image_data, b'png', height, width, question.tolist(), answer)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
filename = _DATA_URL.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
tf.io.gfile.remove(filepath)
tmp_dir = os.path.join(dataset_dir, 'flower_photos')
tf.io.gfile.rmtree(tmp_dir)
def _dataset_exists(dataset_dir):
for split_name in ['train', 'validation']:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
if not tf.io.gfile.exists(output_filename):
return False
return True
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.io.gfile.exists(dataset_dir):
tf.io.gfile.mkdir(dataset_dir)
if _dataset_exists(dataset_dir):
print('Dataset files already exist. Exiting without re-creating them.')
return
#dataset_utils.download_and_uncompress_zipfile(_DATA_URL, dataset_dir)
questions, images_path, answers = _get_filenames_and_classes(dataset_dir)
# Divide into train and test:
random.seed(_RANDOM_SEED)
training_filenames = images_path[_NUM_VALIDATION:]
training_questions = questions[_NUM_VALIDATION:]
training_answers = answers[_NUM_VALIDATION:]
validation_filenames = images_path[:_NUM_VALIDATION]
validation_questions = questions[:_NUM_VALIDATION]
validation_answers = answers[:_NUM_VALIDATION]
# First, convert the training and validation sets.
_convert_dataset('train', training_filenames, training_questions, training_answers,
dataset_dir)
_convert_dataset('validation', validation_filenames, validation_questions, validation_answers,
dataset_dir)
# Finally, write the labels file:
labels_to_class_names = dict(
list(zip(labels_dict.values(), labels_dict.keys())))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
#_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the VQA dataset!')
|
{"hexsha": "53a41ecbc9b1119d430ec5ac99af71a6106f3592", "size": 8804, "ext": "py", "lang": "Python", "max_stars_repo_path": "apps/tf/slim/datasets/download_and_convert_vqa.py", "max_stars_repo_name": "giovannidispoto/a-GPUBench", "max_stars_repo_head_hexsha": "2332fb68247cad347f889c006028385fed4c5c93", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "apps/tf/slim/datasets/download_and_convert_vqa.py", "max_issues_repo_name": "giovannidispoto/a-GPUBench", "max_issues_repo_head_hexsha": "2332fb68247cad347f889c006028385fed4c5c93", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "apps/tf/slim/datasets/download_and_convert_vqa.py", "max_forks_repo_name": "giovannidispoto/a-GPUBench", "max_forks_repo_head_hexsha": "2332fb68247cad347f889c006028385fed4c5c93", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2542955326, "max_line_length": 133, "alphanum_fraction": 0.6820763289, "include": true, "reason": "import numpy", "num_tokens": 2148}
|
import cv2
import os
import numpy as np
import time
import sys
from argos_common import ARGOS_CONFIG, ARGOS_HOME, load_config
if __name__ == "__main__":
feed = cv2.VideoCapture(0)
config = load_config(ARGOS_CONFIG, "embeddingsExtractor")
if len(sys.argv) > 1:
label = sys.argv[1]
else:
label = input("Please enter your name: ")
detector = cv2.dnn.readNetFromCaffe(
ARGOS_HOME + config["dnn_detector_path"],
ARGOS_HOME + config["dnn_weights_path"],
)
photo_count = 0
number_of_photos = len(
[
name
for name in os.listdir(
ARGOS_HOME + config["training_directory"] + "stranger_danger/"
)
if os.path.isfile(
os.path.join(
ARGOS_HOME + config["training_directory"] + "stranger_danger/", name
)
)
]
)
ret, frame = feed.read()
cv2.namedWindow("capture user photos")
cv2.waitKey(10)
labelPath = ARGOS_HOME + config["training_directory"] + label
if not os.path.exists(labelPath):
os.mkdir(labelPath)
while photo_count < number_of_photos:
ret, frame = feed.read()
detector.setInput(
cv2.dnn.blobFromImage(
cv2.resize(frame, (300, 300)),
1.0,
(300, 300),
(104.0, 177.0, 123.0),
swapRB=False,
crop=False,
)
)
detections = detector.forward()
bestGuess = np.argmax(detections[0, 0, :, 2])
# if this photo probably contains a face
if detections[0, 0, bestGuess, 2] > 0.7:
cv2.imwrite(labelPath + "/photo{}.jpg".format(photo_count), frame)
photo_count += 1
time.sleep(0.5)
cv2.imshow("capture user photos", frame)
cv2.waitKey(10)
print("success?")
cv2.destroyAllWindows()
|
{"hexsha": "226c0438d42da9ec274571d53c186ba6209fc7be", "size": 1938, "ext": "py", "lang": "Python", "max_stars_repo_path": "Source/argos_utils/take_user_photos.py", "max_stars_repo_name": "Toasthat/Argos", "max_stars_repo_head_hexsha": "3277afb5337224e376cf640a5c94baa0d813b9bb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Source/argos_utils/take_user_photos.py", "max_issues_repo_name": "Toasthat/Argos", "max_issues_repo_head_hexsha": "3277afb5337224e376cf640a5c94baa0d813b9bb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Source/argos_utils/take_user_photos.py", "max_forks_repo_name": "Toasthat/Argos", "max_forks_repo_head_hexsha": "3277afb5337224e376cf640a5c94baa0d813b9bb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-04-20T17:30:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-22T18:07:05.000Z", "avg_line_length": 31.2580645161, "max_line_length": 88, "alphanum_fraction": 0.5557275542, "include": true, "reason": "import numpy", "num_tokens": 475}
|
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from util_plot import AddPlot
is_3d = True
ax, point_dim = AddPlot(is_3d).returns
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :point_dim] # we only take the first point_dim features.
y = iris.target
if is_3d:
test_point = np.array([7, 4, 3])
else:
test_point = np.array([7, 4])
# Set number of neighbors and (x, y) of test point.
neighbors_K = 3
distant_arr = np.zeros(len(y))
for i, (point, color_idx) in enumerate(zip(X, y)):
# Plot all points
ax.scatter(*point, color="C%d" % color_idx, s=50, alpha=0.1)
distant_arr[i] = np.linalg.norm(test_point - point)
# Get neighbor points from sorted distance
min_idx = np.argsort(distant_arr)[:neighbors_K]
neighbor_points = X[min_idx]
neighbor_colors_idx = y[min_idx]
# Emphasize neighbor points
for p, color_idx in zip(neighbor_points, neighbor_colors_idx):
ax.scatter(*p, color="C%d" % color_idx, s=50, alpha=0.5)
# Get value of unique item of maximum count
u, c = np.unique(neighbor_colors_idx, return_counts=True)
y = u[c == c.max()]
results = ["C%d" % c for c in y]
# Assert to only one predicted result of test point
if len(results) == 1:
ax.scatter(*test_point, color=results[0], marker="*", s=200)
else:
ax.scatter(*test_point, color="black", marker="*", s=200)
print("You got multiple predicted colors: %s" % results)
plt.show()
|
{"hexsha": "1baa0139bdee8568727a3d3445decb206c9c854b", "size": 1443, "ext": "py", "lang": "Python", "max_stars_repo_path": "iris_knn.py", "max_stars_repo_name": "culdo/NPTU_AI_course", "max_stars_repo_head_hexsha": "43cc227cbf85b442d4865fdf6985fe9f675fe476", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "iris_knn.py", "max_issues_repo_name": "culdo/NPTU_AI_course", "max_issues_repo_head_hexsha": "43cc227cbf85b442d4865fdf6985fe9f675fe476", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "iris_knn.py", "max_forks_repo_name": "culdo/NPTU_AI_course", "max_forks_repo_head_hexsha": "43cc227cbf85b442d4865fdf6985fe9f675fe476", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-08T12:14:25.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-08T12:14:25.000Z", "avg_line_length": 28.2941176471, "max_line_length": 74, "alphanum_fraction": 0.702009702, "include": true, "reason": "import numpy", "num_tokens": 415}
|
#' WARC-ify an httr::GET request
#'
#' Automagically writes out the result of an `httr::GET` request to an open
#' WARC file connection enabling seamless recording/caching of the response
#' for later re-use.
#'
#' @md
#' @param wobj WARC file object
#' @param url the url of the page to retrieve
#' @param ... Further named parameters, such as `query`, `path`, etc,
#' passed on to \code{\link{modify_url}}. Unnamed parameters will be combined
#' with \code{\link{config}}.
#' @param config Additional configuration settings such as http
#' authentication (\code{\link{authenticate}}), additional headers
#' (\code{\link{add_headers}}), cookies (\code{\link{set_cookies}}) etc.
#' See \code{\link{config}} for full details and list of helpers.
#' @param handle The handle to use with this request. If not
#' supplied, will be retrieved and reused from the \code{\link{handle_pool}}
#' based on the scheme, hostname and port of the url. By default \pkg{httr}
# automatically reuses the same http connection (aka handle) for mulitple
#' requests to the same scheme/host/port combo. This substantially reduces
#' connection time, and ensures that cookies are maintained over multiple
#' requests to the same host. See \code{\link{handle_pool}} for more
#' details.
#' @export
#' @examples \dontrun{
#' tf <- tempfile("test")
#' wf <- warc_file(tf)
#'
#' warc_GET(wf, "https://data.police.uk/api/crimes-street/all-crime",
#' query = list(lat=52.629729, lng=-1.131592, date="2017-01"))
#'
#' warc_POST(
#' wf,
#' url = "https://data.police.uk/api/crimes-street/all-crime",
#' query = list( lat = "52.629729", lng = "-1.131592", date = "2017-01")
#' ) -> uk_res
#'
#' close_warc_file(wf)
#' unlink(tf)
#' }
warc_GET <- function(wobj, url = NULL, config = list(), ..., handle = NULL) {
res <- httr::GET(url = url, config = config, handle = handle, ...)
warc_write_response(wobj, res)
invisible(res)
}
#' WARC-ify an httr::GET request
#'
#' Automagically writes out the result of an `httr::POST` request to an open
#' WARC file connection enabling seamless recording/caching of the response
#' for later re-use.
#'
#' @md
#' @inheritParams warc_GET
#' @param body One of the following:
#' \itemize{
#' \item \code{FALSE}: No body. This is typically not used with \code{POST},
#' \code{PUT}, or \code{PATCH}, but can be useful if you need to send a
#' bodyless request (like \code{GET}) with \code{VERB()}.
#' \item \code{NULL}: An empty body
#' \item \code{""}: A length 0 body
#' \item \code{upload_file("path/")}: The contents of a file. The mime
#' type will be guessed from the extension, or can be supplied explicitly
#' as the second argument to \code{upload_file()}
#' \item A character or raw vector: sent as is in body. Use
#' \code{\link{content_type}} to tell the server what sort of data
#' you are sending.
#' \item A named list: See details for encode.
#' }
#' @param encode If the body is a named list, how should it be encoded? Can be
#' one of form (application/x-www-form-urlencoded), multipart,
#' (multipart/form-data), or json (application/json).
#'
#' For "multipart", list elements can be strings or objects created by
#' \code{\link{upload_file}}. For "form", elements are coerced to strings
#' and escaped, use \code{I()} to prevent double-escaping. For "json",
#' parameters are automatically "unboxed" (i.e. length 1 vectors are
#' converted to scalars). To preserve a length 1 vector as a vector,
#' wrap in \code{I()}. For "raw", either a character or raw vector. You'll
#' need to make sure to set the \code{\link{content_type}()} yourself.
#' @export
#' @examples \dontrun{
#' tf <- tempfile("test")
#' wf <- warc_file(tf)
#'
#' warc_GET(wf, "https://data.police.uk/api/crimes-street/all-crime",
#' query = list(lat=52.629729, lng=-1.131592, date="2017-01"))
#'
#' warc_POST(
#' wf,
#' url = "https://data.police.uk/api/crimes-street/all-crime",
#' query = list( lat = "52.629729", lng = "-1.131592", date = "2017-01")
#' ) -> uk_res
#'
#' close_warc_file(wf)
#' unlink(tf)
#' }
warc_POST <- function(wobj, url = NULL, config = list(), ..., body = NULL,
encode = c("multipart", "form", "json", "raw"), handle = NULL) {
res <- httr::POST(url = url, config = list(), body = body,
encode = encode, handle = handle, ...)
warc_write_response(wobj, res)
invisible(res)
}
|
{"hexsha": "12b89f5d31643e0e0ce682b3ddb8aa78598419ed", "size": 4481, "ext": "r", "lang": "R", "max_stars_repo_path": "R/warc-wrappers.r", "max_stars_repo_name": "hrbrmstr/jwatr", "max_stars_repo_head_hexsha": "2acf0082d8f4bc40ea335f0ed6dfffa3d24640ce", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2017-08-20T03:46:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-07T09:19:21.000Z", "max_issues_repo_path": "R/warc-wrappers.r", "max_issues_repo_name": "hrbrmstr/jwatr", "max_issues_repo_head_hexsha": "2acf0082d8f4bc40ea335f0ed6dfffa3d24640ce", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2017-08-20T13:26:54.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-18T10:47:45.000Z", "max_forks_repo_path": "R/warc-wrappers.r", "max_forks_repo_name": "hrbrmstr/jwatr", "max_forks_repo_head_hexsha": "2acf0082d8f4bc40ea335f0ed6dfffa3d24640ce", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-21T17:02:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-21T17:02:04.000Z", "avg_line_length": 42.6761904762, "max_line_length": 86, "alphanum_fraction": 0.654541397, "num_tokens": 1275}
|
import unittest
import numpy.testing as testing
import numpy as np
import healpy as hp
import tempfile
import shutil
import os
import pytest
import healsparse
class HealSparseCoverageTestCase(unittest.TestCase):
def test_read_fits_coverage(self):
"""
Test reading healSparseCoverage from a fits file.
"""
nside_coverage = 32
nside_map = 64
self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')
fname = os.path.join(self.test_dir, 'healsparse_map_test.hsp')
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,
dtype=np.float32)
sparse_map[0: 20000] = np.random.random(size=20000).astype(np.float32)
sparse_map.write(fname)
# Generate a coverage mask from the 0: 20000
cov_mask_test = np.zeros(hp.nside2npix(nside_coverage), dtype=np.bool_)
theta, phi = hp.pix2ang(nside_map, np.arange(20000), nest=True)
ipnest = np.unique(hp.ang2pix(nside_coverage, theta, phi, nest=True))
cov_mask_test[ipnest] = True
cov_map = healsparse.HealSparseCoverage.read(fname)
# Ensure that the coverage mask is what we think it should be
testing.assert_array_equal(cov_map.coverage_mask, cov_mask_test)
# Ensure that we can address the cov_map by index
testing.assert_array_equal(cov_map[:], cov_map._cov_index_map)
testing.assert_array_equal(cov_map[0: 100], cov_map._cov_index_map[0: 100])
testing.assert_array_equal([cov_map[0]], [cov_map._cov_index_map[0]])
# Make a healpy file and make sure we can't read it
test_map = np.zeros(hp.nside2npix(nside_coverage))
fname = os.path.join(self.test_dir, 'healpy_map_test.fits')
hp.write_map(fname, test_map)
self.assertRaises(RuntimeError, healsparse.HealSparseCoverage.read, fname)
@pytest.mark.skipif(not healsparse.parquet_shim.use_pyarrow, reason='Requires pyarrow')
def test_read_parquet_coverage(self):
"""
Test reading healSparseCoverage from a parquet dataset.
"""
nside_coverage = 32
nside_map = 64
self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')
fname = os.path.join(self.test_dir, 'healsparse_map_test.hsparquet')
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,
dtype=np.float32)
sparse_map[0: 20000] = np.random.random(size=20000).astype(np.float32)
sparse_map.write(fname, format='parquet')
# Generate a coverage mask from the 0: 20000
cov_mask_test = np.zeros(hp.nside2npix(nside_coverage), dtype=np.bool_)
theta, phi = hp.pix2ang(nside_map, np.arange(20000), nest=True)
ipnest = np.unique(hp.ang2pix(nside_coverage, theta, phi, nest=True))
cov_mask_test[ipnest] = True
cov_map = healsparse.HealSparseCoverage.read(fname)
# Ensure that the coverage mask is what we think it should be
testing.assert_array_equal(cov_map.coverage_mask, cov_mask_test)
# Ensure that we can address the cov_map by index
testing.assert_array_equal(cov_map[:], cov_map._cov_index_map)
testing.assert_array_equal(cov_map[0: 100], cov_map._cov_index_map[0: 100])
testing.assert_array_equal([cov_map[0]], [cov_map._cov_index_map[0]])
def setUp(self):
self.test_dir = None
def tearDown(self):
if self.test_dir is not None:
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir, True)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "f5919683997ae7e80fffb7afa15957fe669f51ee", "size": 3735, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_healSparseCoverage.py", "max_stars_repo_name": "LSSTDESC/healsparse", "max_stars_repo_head_hexsha": "f6b15f570ab6335328e34006f69c3919d9fcf1c8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-05-06T11:42:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-08T14:57:12.000Z", "max_issues_repo_path": "tests/test_healSparseCoverage.py", "max_issues_repo_name": "LSSTDESC/healsparse", "max_issues_repo_head_hexsha": "f6b15f570ab6335328e34006f69c3919d9fcf1c8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 75, "max_issues_repo_issues_event_min_datetime": "2019-03-01T23:25:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-29T21:40:27.000Z", "max_forks_repo_path": "tests/test_healSparseCoverage.py", "max_forks_repo_name": "LSSTDESC/healsparse", "max_forks_repo_head_hexsha": "f6b15f570ab6335328e34006f69c3919d9fcf1c8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-01-30T19:10:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T14:57:38.000Z", "avg_line_length": 37.7272727273, "max_line_length": 91, "alphanum_fraction": 0.6682730924, "include": true, "reason": "import numpy", "num_tokens": 902}
|
import os
import numpy as np
import pandas as pd
from multiprocessing import Pool
from matplotlib.dates import num2date, date2num
import datetime as dt
import sys
sys.path.append("./")
sys.path.append("create_plots/")
import utils
def to_probability(row, o=pd.DataFrame(), region_map=[]):
e = np.rint(row["elv"])
slist = row["slist"]
rng = row["range"]
if e > 4: X = o[(o.elv == e) & (o.slist >= slist-1) & (o.slist <= slist+1)]
else: X = o[(o.slist >= slist-1) & (o.slist <= slist+1)]
if len(X) == 0: row["region"], row["hop"] = "", -1
else:
#r, h = max(X.region.tolist(),key=X.region.tolist().count),\
#max(X.hop.tolist(),key=X.hop.tolist().count)
print(X.hop.tolist(), X.region.tolist())
row["region"], row["hop"] = "", -1
return row
def fetch_regions():
regions = {
"D":{"heights":80, "hops":np.array([0.5]), "angles":np.arange(5,45,1)},
"E":{"heights":110, "hops":np.array([0.5, 1, 1.5]), "angles":np.arange(5,45,1)},
"F":{"heights":250, "hops":np.array([0.5, 1, 1.5, 2., 2.5, 3.]), "angles":np.arange(5,45,1)}
}
region_map = []
for k in regions.keys():
re = regions[k]
for mh in re["hops"]:
region_map.append(str(mh) + k)
if not os.path.exists("hop_info.csv"):
fregions, fhops, fangles, frange, fheight = [], [], [], [], []
for k in regions.keys():
re = regions[k]
for a in re["angles"]:
for mh in re["hops"]:
fregions.append(k)
fhops.append(mh)
fangles.append(a)
rng = mh*re["heights"]/np.sin(np.deg2rad(a))
frange.append(rng)
o = pd.DataFrame()
o["region"], o["hop"], o["elv"], o["range"] = fregions, fhops, fangles, frange
o["slist"] = o.range.apply(lambda x: int((x-180)/45))
o.to_csv("hop_info.csv", index=False, header=True)
else: o = pd.read_csv("hop_info.csv")
return o, region_map, regions
def estimate_truths(u, Re=6378.1):
u["range"] = u.slist.apply(lambda x: 180. + 45.*x)
hop = np.zeros((len(u)))
kx = 0
for i, x in u.iterrows():
hop[kx] = -1
d, e = x["range"], x["elv"]
h = np.sqrt( d**2 + Re**2 + (2*d*Re*np.sin(np.deg2rad(e))) ) - Re
if (h >= 75 and h < 115) or (h >= 115 and h < 150) or (h >= 150 and h < 900): hop[kx] = 0.5
d = x["range"]/2
h = np.sqrt( d**2 + Re**2 + (2*d*Re*np.sin(np.deg2rad(e))) ) - Re
if (h >= 75 and h < 115) or (h >= 115 and h < 150) or (h >= 150 and h < 900): hop[kx] = 1.0
kx += 1
#u = u.apply(to_probability, args=(o, region_map,), axis=1)
#clust = np.array(u.labels)
#region, hop = np.array([""]*len(clust)), np.array([-1]*len(clust)).astype(float)
#for c in np.unique(clust):
# ux = u[u.labels==c]
# idx = clust==c
# e = np.rint(np.median(ux.elv))
# slist = np.rint(np.median(ux.slist))
# print(e, slist)
# if slist < 7: region[idx], hop[idx] = "D", 0.5
# if slist > 7 and slist < 25: region[idx], hop[idx] = "", 0.5
# if slist >= 25 and slist < 50: region[idx], hop[idx] = "", 1.0
u["hop"] = hop
return u
def get_kappa(y1, y2):
from sklearn.metrics import cohen_kappa_score
k = 0.2 + cohen_kappa_score(y1, y2)
return k
def get_fetch_sd_data(rad, dates, procs=16):
from get_sd_data import FetchData
fd = FetchData(rad, dates)
b, _ = fd.fetch_data()
u = fd.convert_to_pandas(b)
#u = u[u.bmnum==d.bmnum.tolist()[0]]
#d["dn"] = d.time.apply(lambda a: num2date(a))
#d = d[(d.time>=date2num(dates[0])) & (d.time<=date2num(dates[1]))]
#u["labels"] = d.labels
df_split = np.array_split(u, procs)
pool = Pool(procs)
print(" Going into procs...")
u = pd.concat(pool.map(estimate_truths, df_split))
pool.close()
pool.join()
return u
def generate_stats():
fname = "../outputs/cluster_tags_def_params/{rad}.{a_name}.gmm.{dn}.csv"
pubfile = utils.get_pubfile()
conn = utils.get_session(key_filename=pubfile)
m = pd.read_csv("kappa_stat.csv", parse_dates=["date"])
start = (m.date.tolist()[-1]-dt.datetime(2016,1,1)).days + 1
for d in range(start, 365):
D, K = [], []
date = dt.datetime(2016,1,1) + dt.timedelta(d)
floc = fname.format(rad="cvw", a_name="dbscan", dn=date.strftime("%Y%m%d"))
if not os.path.exists(floc): utils.fetch_file(conn, floc, "LFS/LFS_clustering_superdarn_data/")
u = get_fetch_sd_data("cvw", [date, date+dt.timedelta(hours=24)])
x = pd.read_csv(floc)
if len(u) > 0:
x = utils._run_riberio_threshold_on_rad(x)
k = np.round(get_kappa(np.array(u.hop).astype(int), np.array(x["ribiero_gflg"])), 4)
os.system("rm -rf " + floc)
K.append(k)
D.append(date)
s = pd.DataFrame()
s["date"], s["kappa"] = D, K
s.to_csv("kappa_stat.csv", index=False, header=False, mode="a")
conn.close()
return
if __name__ == "__main__":
generate_stats()
|
{"hexsha": "a5ba39e5cb4c8ed99e365d525620ca819c1b05d0", "size": 5193, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/create_plots/simple_elev_range_model.py", "max_stars_repo_name": "shibaji7/clustering_superdarn_data", "max_stars_repo_head_hexsha": "02bc31dd85f66319bb46b632e0e7ac51ed98c432", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-02T20:13:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-02T20:13:14.000Z", "max_issues_repo_path": "analysis/create_plots/simple_elev_range_model.py", "max_issues_repo_name": "shibaji7/clustering_superdarn_data", "max_issues_repo_head_hexsha": "02bc31dd85f66319bb46b632e0e7ac51ed98c432", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis/create_plots/simple_elev_range_model.py", "max_forks_repo_name": "shibaji7/clustering_superdarn_data", "max_forks_repo_head_hexsha": "02bc31dd85f66319bb46b632e0e7ac51ed98c432", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.045112782, "max_line_length": 108, "alphanum_fraction": 0.5426535721, "include": true, "reason": "import numpy", "num_tokens": 1679}
|
# Copyright (c) 2013 Jasper den Ouden, under the MIT license,
# see doc/mit.txt from the project directory.
using Treekenize
#Generates a random tree into stdout and returns the same list for later comparison.
function rnd_tree(to_stream::IOStream, p::Number, depth::Integer,max_len::Integer, begin_end)
list = {}
if depth<0
return {}
end
for i = 1:rand(1:max_len)
if rand() < p #deepen
j = rand(1:length(begin_end)) #Random 'parentheses'.
b,e = begin_end[j]
write(to_stream, b)
push!(list, rnd_tree(to_stream,p, depth-1,max_len,begin_end))
write(to_stream, e)
else
x = rand()
push!(list,x)
write(to_stream, "$x ") #Just spaced out random numbers.
for i = 1:rand(1:10)
write(to_stream, " ")
end
end
end
return list
end
function rnd_tree(p::Number, depth::Integer,max_len::Integer, begin_end)
stream = memio()
tree = rnd_tree(stream, p,depth,max_len, begin_end)
write(stream, ";\n");
seek(stream,0) #Back to the beginning.
println(readall(stream))
println(tree)
end
function test_treekenize(p::Number, depth::Integer,max_len::Integer,
begin_end, max_be_len::Integer)
stream = memio()
tree = rnd_tree(stream, p,depth,max_len, begin_end)
write(stream, ";\n");
seek(stream,0) #Back to the beginning.
read_tree = treekenize(stream, (begin_end, none_incorrect(begin_end)),
("top",";\n"),10, max_be_len)
compare_tree(tree, compare::StrExpr) = compare_tree(tree, compare.body)
function compare_tree(tree, compare)
list = {}
for el in compare
if isa(el, String)
for sel in split(el," ")
if sel!=""
push!(list, parsefloat(sel))
end
end
else
push!(list, el)
end
end
#ignore compare(hmm maybe that should exist..)
assert(length(tree)==length(list), (tree,list))
for i = 1:length(tree)
if isa(list[i], Number)
assert( tree[i]==list[i] )
else
compare_tree(tree[i], list[i])
end
end
end
compare_tree(tree, read_tree)
end
#TODO insert incorrectly placed ending/beginning parentheses and see if
# the checker finds them at the right spots.
function test(cnt::Integer)
for n = 1:cnt #TODO may want to test a larger variation of beginnings and ends.
test_treekenize(0.4, 4,2, {("(",")"), ("begin","end"),
("[","]")},4)
end
end
#test(100)
|
{"hexsha": "a099530d7d8a8b38a6a3f7793f1999c6ad653431", "size": 2769, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/test/treekenize.jl", "max_stars_repo_name": "o-jasper/Treekenize.jl", "max_stars_repo_head_hexsha": "d8521c7faaeaf5684dba3453859068c5f66896aa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-25T04:20:53.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-25T04:20:53.000Z", "max_issues_repo_path": "src/test/treekenize.jl", "max_issues_repo_name": "o-jasper/Treekenize.jl", "max_issues_repo_head_hexsha": "d8521c7faaeaf5684dba3453859068c5f66896aa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/test/treekenize.jl", "max_forks_repo_name": "o-jasper/Treekenize.jl", "max_forks_repo_head_hexsha": "d8521c7faaeaf5684dba3453859068c5f66896aa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-07-25T04:23:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T12:11:58.000Z", "avg_line_length": 31.8275862069, "max_line_length": 94, "alphanum_fraction": 0.5565185988, "num_tokens": 680}
|
#' Oggetto sf della regione sicilia
#'
"sicilia"
|
{"hexsha": "78cadece86a00e82f3e32a8639a38e7e91d2163b", "size": 48, "ext": "r", "lang": "R", "max_stars_repo_path": "R/sicilia.r", "max_stars_repo_name": "guidofioravanti/regioniItalia", "max_stars_repo_head_hexsha": "5607ce1334f1dffa4e30e3ee9721dbde01d86477", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "R/sicilia.r", "max_issues_repo_name": "guidofioravanti/regioniItalia", "max_issues_repo_head_hexsha": "5607ce1334f1dffa4e30e3ee9721dbde01d86477", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "R/sicilia.r", "max_forks_repo_name": "guidofioravanti/regioniItalia", "max_forks_repo_head_hexsha": "5607ce1334f1dffa4e30e3ee9721dbde01d86477", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.0, "max_line_length": 35, "alphanum_fraction": 0.7291666667, "num_tokens": 16}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, sys
import glob
import random
import time
import pdb
# import imgaug
# from imgaug import augmenters as iaa
from PIL import Image
from tqdm import tqdm
import numpy as np
from six.moves import range
# import tensorflow as tf
from torchvision import transforms # noqa
from torch.utils.data import DataLoader, Dataset
from collections import deque
import torch
from maskrcnn_benchmark.structures.bounding_box import BoxList
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
# from helpers.utils import *
def rotate_list(input_list, N):
input_list = deque(input_list)
input_list.rotate(N)
input_list = list(input_list)
return input_list
def ReadWholeSlideImage(image_path):
img = Image.open(image_path)
return img
def getImagePatch(image, coords, size):
(x, y) = coords
if len(image.shape) == 3:
return image[x:x+size, y:y+size, :]
else:
return image[x:x+size, y:y+size]
class SignetCellTrainingDataset(Dataset):
def __init__(self,data_root_dir, image_size=(256, 256),n_classes=2,n_channels =3,
shuffle=True,batch_size=4, samples_per_epoch=500, transform=None,mode='pos_only'):
'Initialization'
self.n_classes = n_classes
self.mode = mode
self.tumor_coord_path = os.path.join(data_root_dir,'training_tumor.txt')
if self.mode == 'pos_and_fp':
self.normal_coord_path = os.path.join(data_root_dir,'training_fp.txt')
elif self.mode =='pos_only':
self.normal_coord_path = None
else:
raise Exception("Dataloader Error!!")
self.image_size = image_size
self.n_channels = n_channels
self.transforms = transform
self.shuffle = shuffle
self.batch_size = batch_size
self.tumor_coords = []
self.normal_coords = []
t = open(self.tumor_coord_path)
for line in t:
line_list = line.strip('\n').split(',')
pid_path = line_list[0]
center = [int(line_list[1]), int(line_list[2])]
patch_list = line_list[3:]
boxlist = []
for i in range(0,len(patch_list),4):
boxlist.append([int(patch_list[i]) - center[1],int(patch_list[i+1]) - center[0],int(patch_list[i+2]) - center[1],int(patch_list[i+3]) - center[0]])
if len(boxlist) == 0:
print(pid_path)
raise Exception("Jaba")
self.tumor_coords.append((pid_path,center,boxlist))
print("No of tumor samples : {}".format(len(self.tumor_coords)))
t.close()
if self.normal_coord_path is not None:
n = open(self.normal_coord_path)
for line in n:
line_list = line.strip('\n').split(',')
pid_path = line_list[0]
center = [int(line_list[1]), int(line_list[2])]
patch_list = line_list[3:]
boxlist = []
for i in range(0,len(patch_list),4):
boxlist.append([int(patch_list[i]) - center[1],int(patch_list[i+1]) - center[0],int(patch_list[i+2]) - center[1],int(patch_list[i+3]) - center[0]])
self.normal_coords.append((pid_path,center,boxlist))
print("No of False positive samples : {}".format(len(self.normal_coords)))
n.close()
self._num_image = len(self.tumor_coords) + len(self.normal_coords)
if samples_per_epoch is None:
self.samples_per_epoch = self._num_image
else:
self.samples_per_epoch = samples_per_epoch
self._shuffle_counter = 1
self._shuffle_reset_idx = int(np.floor(self._num_image / self.samples_per_epoch))
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
########return int(np.floor(self.samples_per_epoch / self.batch_size))
return 180000
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
X, y = self.__data_generation(index)
return X, y,index
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.shuffle == True:
if self._shuffle_counter % self._shuffle_reset_idx == 0:
random.shuffle(self.tumor_coords)
random.shuffle(self.normal_coords)
self._shuffle_counter = 1
self.tumor_coords = rotate_list(self.tumor_coords, self.samples_per_epoch//2)
self.normal_coords = rotate_list(self.normal_coords, self.samples_per_epoch//2)
self._shuffle_counter += 1
def __data_generation(self, index):
'Generates data containing batch_size samples'
# Initialization
X = []
y = []
norm_batch_size = self.batch_size//2
# tumor_batch_size = self.batch_size - self.batch_size//2
tumor_batch_size = self.batch_size################################################################
for i in range(self.batch_size):
# try:
if i >= self.batch_size//2 or self.mode=='pos_only':
pid_path, (x_center,y_center), boxlist = self.tumor_coords[(index)*norm_batch_size+i]
pid_path = pid_path[6:]
label = 1
else:
pid_path, (x_center, y_center), boxlist = self.normal_coords[(index)*tumor_batch_size+i]
pid_path = pid_path[6:]
label = 0
# Generate data
x_top_left = int(x_center)
y_top_left = int(y_center)
image = np.array(ReadWholeSlideImage(pid_path).convert('RGB'))
# print ("=============================")
# print (image.shape, self.image_size, x_top_left, y_top_left)
image = Image.fromarray(getImagePatch(image, (x_top_left, y_top_left),
min(self.image_size[0], self.image_size[1])))
labels = []
for l in boxlist:
labels.append(label)
labels = torch.tensor(labels)
boxlist = BoxList(boxlist, image.size, mode="xyxy")
boxlist.add_field("labels",labels)
boxlist = boxlist.clip_to_image(remove_empty=True)
#TODO: Check the below two lines
if self.transforms is not None:
image, boxlist = self.transforms(image, boxlist)
X.append(image)
y.append(boxlist)
# except:
# continue
return torch.stack(X), y
if __name__ == '__main__':
dir_path = '/media/balaji/Kori/histopath/coordinate_data/train_test_points_fold_1'
train_tumor_coord_path = os.path.join(dir_path, 'train_tumor.txt')
train_normal_coord_path = os.path.join(dir_path, 'train_normal.txt')
# augmentation = iaa.SomeOf((0, 3),
# [
# iaa.Fliplr(0.5),
# iaa.Flipud(0.5),
# iaa.Noop(),
# iaa.OneOf([iaa.Affine(rotate=90),
# iaa.Affine(rotate=180),
# iaa.Affine(rotate=270)]),
# iaa.GaussianBlur(sigma=(0.0, 0.5)),
# ])
# Parameters
train_transform_params = {'image_size': (256,256),
'batch_size': 4,
'n_classes': 2,
'n_channels': 3,
'shuffle': True,
'level': 0,
'samples_per_epoch': 60000,
'transform': augmentation
}
valid_transform_params = {'image_size': (256, 256),
'batch_size': 4,
'n_classes': 2,
'n_channels': 3,
'shuffle': True,
'level': 0,
'samples_per_epoch': 30000,
'transform': None
}
# Generators
# training_generator = DataGeneratorCoordFly(train_tumor_coord_path, train_normal_coord_path, **train_transform_params)
# print (training_generator.__len__())
# # # Enable Test Code
# for X, y in training_generator:
# imshow(normalize_minmax(X[0]), y[0][:,:,1], normalize_minmax(X[1]), y[1][:,:,1], \
# normalize_minmax(X[2]), y[2][:,:,1], normalize_minmax(X[3]), y[3][:,:,1])
valid_tumor_coord_path = os.path.join(dir_path, 'validation_tumor.txt')
valid_normal_coord_path = os.path.join(dir_path, 'validation_normal.txt')
validation_generator = DataGeneratorCoordFly(valid_tumor_coord_path, valid_normal_coord_path, **valid_transform_params)
print (validation_generator.__len__())
# Enable Test Code
# for X, y in validation_generator:
# imshow(normalize_minmax(X[0]), y[0][:,:,1], normalize_minmax(X[1]), y[1][:,:,1], \
# normalize_minmax(X[2]), y[2][:,:,1], normalize_minmax(X[3]), y[3][:,:,1])
import time
start_time = time.time()
for i, X in enumerate(validation_generator):
elapsed_time = time.time() - start_time
start_time = time.time()
print (i, "Elapsed Time", np.round(elapsed_time, decimals=2), "seconds")
pass
|
{"hexsha": "968a0083121a38f64e03d68511a6d57e12ae755c", "size": 8093, "ext": "py", "lang": "Python", "max_stars_repo_path": "maskrcnn_benchmark/data/datasets/train_loader.py", "max_stars_repo_name": "Sreehari-S/mask-rcnn-benchmark", "max_stars_repo_head_hexsha": "b4434c39fccda80575276308da86b6e944540445", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "maskrcnn_benchmark/data/datasets/train_loader.py", "max_issues_repo_name": "Sreehari-S/mask-rcnn-benchmark", "max_issues_repo_head_hexsha": "b4434c39fccda80575276308da86b6e944540445", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-02-18T12:25:48.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-18T12:25:48.000Z", "max_forks_repo_path": "maskrcnn_benchmark/data/datasets/train_loader.py", "max_forks_repo_name": "Sreehari-S/mask-rcnn-benchmark", "max_forks_repo_head_hexsha": "b4434c39fccda80575276308da86b6e944540445", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2923728814, "max_line_length": 152, "alphanum_fraction": 0.6718151489, "include": true, "reason": "import numpy", "num_tokens": 2179}
|
# -*- coding: utf-8 -*-
""" Binary data class for crowdsourced training data.
"""
# metadata variables
__author__ = "Hiroshi KAJINO <hiroshi.kajino.1989@gmail.com>"
__date__ = "2013/12/15"
__version__ = "1.0"
__copyright__ = "Copyright (c) 2013 Hiroshi Kajino all rights reserved."
__docformat__ = "restructuredtext en"
import sys
import numpy as np
DEBUG=1
class BinaryData:
""" Binary crowd data class without feature vectors.
:IVariables:
num_instances : int
the number of instances.
num_workers : int
the number of workers.
y : numpy.array
num_instances * num_workers numpy.array. Each column corresponds to each worker's label.
y[i,j] == 0 if worker j doesn't label data i.
y[i,j] == 1, or -1 if worker j labels data i.
"""
def __init__(self, response_array):
""" Initialization
"""
self.response_array = response_array
self.num_instances = response_array.shape[0]
self.num_workers = response_array.shape[1]
def majority_vote(self, prob):
""" Return the (soft/hard) majority votes
:Variables:
prob : str
if prob = "prob", then return probabilities of positive labels.
if prob = "log_prob", then return log probabilities of positive labels.
if prob = "no", then return the majority voted labels.
:RType: numpy.array
:Returns: 1-d numpy.array of length `num_instances`, each element contains (soft/hard) majority votes.
"""
pos_minus_neg = self.response_array.sum(axis=1)
pos_plus_neg = (self.response_array * self.response_array).sum(axis=1)
pos_array = (pos_plus_neg + pos_minus_neg) / 2.0
neg_array = (pos_plus_neg - pos_minus_neg) / 2.0
if DEBUG == 1:
if pos_minus_neg.shape != (self.num_instances,) or pos_minus_neg.shape != (self.num_instances,):
sys.stderr.write("ERROR: wrong shape")
exit(0)
if (not np.array_equal(pos_array + neg_array, pos_plus_neg)) or (not np.array_equal(pos_array - neg_array, pos_minus_neg)):
sys.stderr.write("ERROR: wrong results")
exit(0)
if prob == "prob":
return pos_array.astype('float') / pos_plus_neg.astype('float')
elif prob == "log_prob":
return np.array([np.ma.log(pos_array) - np.ma.log(pos_plus_neg), np.ma.log(neg_array) - np.ma.log(pos_plus_neg)])
elif prob == "no":
sys.stderr.write("ERROR: sorry, not implemented.")
exit(0)
#return pos_array.astype('float') / pos_plus_neg.astype('float')
if __name__ == "__main__":
# for test
mat = np.array([[1, 1, 1, 0, 0, -1, -1], [1, 1, 1, -1, 1, 1, 1], [-1, -1, 1, 0, 1, -1, 1]])
c_data = BinaryData(mat)
print(c_data.majority_vote("log_prob"))
print(np.log(np.array([3.0/5.0, 6.0/7.0, 3.0/6.0])))
|
{"hexsha": "c409e41e280920f324a6359ef9ffe8edb40c7f41", "size": 3005, "ext": "py", "lang": "Python", "max_stars_repo_path": "post_process_for_instance_clipping_protocol/crowd_data.py", "max_stars_repo_name": "kanojikajino/instance_privacy", "max_stars_repo_head_hexsha": "6b48f989b53631d35179afe04a7e38d59d767421", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "post_process_for_instance_clipping_protocol/crowd_data.py", "max_issues_repo_name": "kanojikajino/instance_privacy", "max_issues_repo_head_hexsha": "6b48f989b53631d35179afe04a7e38d59d767421", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "post_process_for_instance_clipping_protocol/crowd_data.py", "max_forks_repo_name": "kanojikajino/instance_privacy", "max_forks_repo_head_hexsha": "6b48f989b53631d35179afe04a7e38d59d767421", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.5256410256, "max_line_length": 135, "alphanum_fraction": 0.6013311148, "include": true, "reason": "import numpy", "num_tokens": 779}
|
import numpy as np
import cv2
import sys
# Update path to the Haar cascades file if necessary, e.g. if OpenCV version is different
faceCascadeFile = '/usr/local/opt/opencv/share/opencv4/haarcascades/haarcascade_frontalface_default.xml'
faceCascade = cv2.CascadeClassifier(faceCascadeFile)
if faceCascade.empty(): raise Exception('faceCascade is empty. Double-check the path.')
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray)
for(x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('frame', frame)
# Press 'Q' to quit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
{"hexsha": "5eab3304538aacedbd4bff24ccc509bb083ef015", "size": 873, "ext": "py", "lang": "Python", "max_stars_repo_path": "detection.py", "max_stars_repo_name": "estherjk/pyenv-opencv", "max_stars_repo_head_hexsha": "61d4202332ca58ce199daa54d2835d8a078cc592", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2017-11-13T19:32:48.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-01T13:42:41.000Z", "max_issues_repo_path": "detection.py", "max_issues_repo_name": "estherjk/pyenv-opencv", "max_issues_repo_head_hexsha": "61d4202332ca58ce199daa54d2835d8a078cc592", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-08-10T00:11:51.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-10T02:09:27.000Z", "max_forks_repo_path": "detection.py", "max_forks_repo_name": "estherjk/pyenv-opencv", "max_forks_repo_head_hexsha": "61d4202332ca58ce199daa54d2835d8a078cc592", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-01-13T09:01:19.000Z", "max_forks_repo_forks_event_max_datetime": "2018-01-13T09:01:19.000Z", "avg_line_length": 28.1612903226, "max_line_length": 104, "alphanum_fraction": 0.7079037801, "include": true, "reason": "import numpy", "num_tokens": 251}
|
import numpy as np
if __name__ == "__main__":
w_oI = np.array([0, 0, (np.pi / 180) / (24 * 60**2)]).reshape((3, 1))
w_hB = np.array([0])
|
{"hexsha": "bc7c0db063bb4d8a87bf3e29080f79a6283d4205", "size": 148, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/rotations.py", "max_stars_repo_name": "msc5/attitudes", "max_stars_repo_head_hexsha": "89a3ab7aaf98e4985f1e0e6ac7e353a0798d677f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/rotations.py", "max_issues_repo_name": "msc5/attitudes", "max_issues_repo_head_hexsha": "89a3ab7aaf98e4985f1e0e6ac7e353a0798d677f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/rotations.py", "max_forks_repo_name": "msc5/attitudes", "max_forks_repo_head_hexsha": "89a3ab7aaf98e4985f1e0e6ac7e353a0798d677f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.5, "max_line_length": 73, "alphanum_fraction": 0.5337837838, "include": true, "reason": "import numpy", "num_tokens": 62}
|
using CoordinateTransformations, Rotations, StaticArrays
using RoboLib.Geom: Pose2D
export AckerParams, AckerData
struct AckerParams{SCALAR<:AbstractFloat} <: MotionParams
car_length::SCALAR
end
mutable struct AckerData{T} <: MotionData
pose::Pose2D{T}
ctrl::SVector{2, T}
# TODO(cxs): special case
@inline function AckerData{T}(pose, ctrl) where T
pose = Pose2D{T}(pose)
ctrl = SVector{2, T}(ctrl)
new{T}(pose, ctrl)
end
@inline function AckerData{T}() where T
pose = Pose2D{T}()
ctrl = zeros(SVector{2, T})
new{T}(pose, ctrl)
end
end
@inline AckerData(pose, ctrl) = AckerData{Float64}(pose, ctrl)
@inline AckerData() = AckerData{Float64}()
@inline function _step(x, y, theta, dt, v, delta, car_length)
beta = atan(tan(delta) / 2)
theta_dot = (2 * v * sin(beta)) / car_length
theta_n = theta + theta_dot * dt
x_dot = (car_length / (2 * sin(beta))) * (sin(theta_n + beta) - sin(theta + beta))
y_dot = (car_length / (2 * sin(beta))) * (-cos(theta_n + beta) + cos(theta + beta))
x_n = x + x_dot
y_n = y + y_dot
x_n, y_n, theta_n
end
@inline function _step_nodelta(x, y, theta, dt, v, delta, car_length)
beta = atan(tan(delta) / 2)
s_theta_beta, c_theta_beta = sincos(theta + beta)
x_dot = v * c_theta_beta
y_dot = v * s_theta_beta
theta_dot = (2 * v * sin(beta)) / car_length
x_n = x + x_dot * dt
y_n = y + y_dot * dt
theta_n = theta + theta_dot * dt
x_n, y_n, theta_n
end
function step!(data::AckerData{T}, model::AckerParams, dt) where T
v, delta = data.ctrl
xc, yc, thetac = data.pose.statev
if abs(delta) < 0.001
x, y, theta = _step_nodelta(xc, yc, thetac, dt, v, delta, model.car_length)
else
x, y, theta = _step(xc, yc, thetac, dt, v, delta, model.car_length)
end
data.pose = Pose2D{T}(x, y, theta)
data
end
|
{"hexsha": "cd1bb10cd7bf0d0b646f81c49590ac96a6c2d71b", "size": 1916, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/MotionModels/ackermann.jl", "max_stars_repo_name": "prl-mushr/mushr_pf.jl", "max_stars_repo_head_hexsha": "e8837cd8d58875ecab11fe76bc707a31dd57a39b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-08-30T08:20:31.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-30T08:20:31.000Z", "max_issues_repo_path": "src/MotionModels/ackermann.jl", "max_issues_repo_name": "prl-mushr/mushr_pf.jl", "max_issues_repo_head_hexsha": "e8837cd8d58875ecab11fe76bc707a31dd57a39b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/MotionModels/ackermann.jl", "max_forks_repo_name": "prl-mushr/mushr_pf.jl", "max_forks_repo_head_hexsha": "e8837cd8d58875ecab11fe76bc707a31dd57a39b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-09-28T22:26:18.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-28T22:26:18.000Z", "avg_line_length": 28.1764705882, "max_line_length": 87, "alphanum_fraction": 0.6231732777, "num_tokens": 639}
|
import os, sys, random
import numpy as np
import PIL
from PIL import Image
def get_lbl_from_name(fname):
lbl = int(fname.split('.png')[0][-1])
return lbl
|
{"hexsha": "0fa5aac468bfd039363a69751752339ce9378a9d", "size": 162, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/gen_utils.py", "max_stars_repo_name": "Abhishekq10/TB-detection", "max_stars_repo_head_hexsha": "83c14223bbf7f07448fd10e3e5531fec7d2a7d44", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/gen_utils.py", "max_issues_repo_name": "Abhishekq10/TB-detection", "max_issues_repo_head_hexsha": "83c14223bbf7f07448fd10e3e5531fec7d2a7d44", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/gen_utils.py", "max_forks_repo_name": "Abhishekq10/TB-detection", "max_forks_repo_head_hexsha": "83c14223bbf7f07448fd10e3e5531fec7d2a7d44", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.25, "max_line_length": 41, "alphanum_fraction": 0.7037037037, "include": true, "reason": "import numpy", "num_tokens": 43}
|
[STATEMENT]
lemma AbsNat_zero [simp]: "AbsNat 0 + i = i"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. AbsNat 0 + i = i
[PROOF STEP]
by (simp add: plus_Nat_def)
|
{"llama_tokens": 80, "file": "RefinementReactive_Temporal", "length": 1}
|
import os
import os.path as op
import re
from setuptools import setup
import numpy as np
cmdclass = { }
ext_modules = [ ]
# Find the version.
curdir = op.dirname(op.realpath(__file__))
filename = op.join(curdir, 'klustaviewa/__init__.py')
with open(filename, 'r') as f:
version = re.search(r"__version__ = '([^']+)'", f.read()).group(1)
LONG_DESCRIPTION = """Spike sorting graphical interface."""
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
if __name__ == '__main__':
setup(
zip_safe=False,
name='klustaviewa',
version=version,
author='Cyrille Rossant',
author_email='rossant@github',
packages=['klustaviewa',
'klustaviewa.control',
'klustaviewa.control.tests',
'klustaviewa.gui',
'klustaviewa.gui.tests',
'klustaviewa.scripts',
'klustaviewa.stats',
'klustaviewa.stats.tests',
'klustaviewa.views',
'klustaviewa.views.tests',
'klustaviewa.wizard',
'klustaviewa.wizard.tests',
],
# Scripts.
entry_points={
'gui_scripts': [
'klustaviewa = klustaviewa.scripts.runklustaviewa:main',
],
},
# app=['klustaviewa/scripts/runklustaviewa.pyw',
# ],
package_data={
'klustaviewa': ['icons/*.png', 'icons/*.ico', 'gui/*.css'],
},
# Cython stuff.
cmdclass = cmdclass,
ext_modules=ext_modules,
include_dirs=np.get_include(),
url='https://klusta-team.github.io',
license='LICENSE.md',
description='Spike sorting software suite.',
long_description=LONG_DESCRIPTION,
)
|
{"hexsha": "82b46ffef870cb57dc6fa5e4ad0cbad79f5817bf", "size": 1908, "ext": "py", "lang": "Python", "max_stars_repo_path": "setup.py", "max_stars_repo_name": "adehad/klustaviewa", "max_stars_repo_head_hexsha": "d1cf3ddb341f5cc6273771a20f40e1f4cc9b31d1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2015-02-21T07:48:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-03T10:05:25.000Z", "max_issues_repo_path": "setup.py", "max_issues_repo_name": "adehad/klustaviewa", "max_issues_repo_head_hexsha": "d1cf3ddb341f5cc6273771a20f40e1f4cc9b31d1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2015-02-10T17:59:01.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-15T09:12:47.000Z", "max_forks_repo_path": "setup.py", "max_forks_repo_name": "adehad/klustaviewa", "max_forks_repo_head_hexsha": "d1cf3ddb341f5cc6273771a20f40e1f4cc9b31d1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2015-04-01T20:33:24.000Z", "max_forks_repo_forks_event_max_datetime": "2017-10-08T15:19:42.000Z", "avg_line_length": 26.1369863014, "max_line_length": 73, "alphanum_fraction": 0.5235849057, "include": true, "reason": "import numpy", "num_tokens": 457}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.