text stringlengths 0 27.1M | meta dict |
|---|---|
import os
import glob
import random
import numpy as np
import subprocess
import audiosegment
import inflect
from num2words import num2words
inflect_engine = inflect.engine()
PAD = '_'
EOS = '~'
PUNC = '!\'(),-.:;?`'
SPACE = ' '
SYMBOLS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
en_symbols = SYMBOLS + PAD + EOS + PUNC + SPACE
_symbol_to_id = {s: i for i, s in enumerate(en_symbols)}
def remove_punc(text):
return text \
.replace(',','') \
.replace('.','') \
.replace('?','') \
.replace('*','') \
.replace(')','') \
.replace('(','') \
.replace('[','') \
.replace(']','') \
.replace('/','') \
.replace('{','') \
.replace('}','') \
.replace(';','') \
.replace(':','') \
.replace('&','')
def process_blizzard(text: str, txt_filepath):
original_text = text
text = text.replace('@ ', '').replace('# ', '').replace('| ', '')
# THE FOLLOWING LINE ONLY EXISTS FOR CLEANED BLIZZARD
# BECAUSE I'M TRYING TO REUSE WEIGHTS FROM EXISTIN DATA WHICH DIDN'T
# HAVE QUOTES. EVENTUALLY, I DO NEED TO FIND A WAY TO SUPPORT THIS
# SYMBOL AS IT'S A CRITICAL ONE IN DEFINING HOW SPEECH SOUNDS
text = text.replace('"', '')
text = text.replace(']', '')
text = text.replace('[', '')
text = text.replace('/', '')
text = text.replace('}', '')
text = text.replace('{', '')
text = text.replace('*', '')
text = text.replace('<c', '')
text = text.replace('&', ' and ')
text = text.replace('%', ' percent ')
text = text.replace('$', ' dollars ')
no_punc_text = text.replace(',','').replace('.','').replace('?','')
easy_numbers = [
(int(remove_punc(i)), i)
for i in text.split()
if remove_punc(i).isdigit()
]
# Now find and replace ordinal numbers
ordinal_number_strs = [
(int(remove_punc(i).replace('st','').replace('th','').replace('rd','').replace('nd','')), i)
for i in text.split() if
(
remove_punc(i).endswith('st') or
remove_punc(i).endswith('th') or
remove_punc(i).endswith('rd') or
remove_punc(i).endswith('nd')
) and
(
remove_punc(i) \
.replace('st','') \
.replace('th','') \
.replace('rd','') \
.replace('nd','') \
.isdigit())
]
degrees_number_strs = [
i for i in no_punc_text.split() if
(i.endswith('deg') or i.endswith('d')) and
i.replace('deg','').replace('d','').isdigit()
]
pounds_number_strs = [
(int(remove_punc(i).replace('L','')), i)
for i in text.split() if
(i.endswith('L') or i.startswith('L')) and
remove_punc(i).replace('L','').isdigit()
]
easy_numbers = sorted(easy_numbers, key=lambda x: -1*len(str(x)))
ordinal_number_strs = sorted(ordinal_number_strs, key=lambda x: -1*len(str(x)))
degrees_number_strs = sorted(degrees_number_strs, key=lambda x: -1*len(str(x)))
pounds_number_strs = sorted(pounds_number_strs, key=lambda x: -1*len(str(x)))
for number, number_str in easy_numbers:
number_text = inflect_engine.number_to_words(number)
if number > 1000 and number < 2200:
number_text = inflect_engine.number_to_words(number, group=2)
number_text = number_text.replace(',', '')
number_text = ' ' + number_text.replace('-', ' ') + ' '
text = text.replace(number_str, number_text)
# print(txt_filepath)
# print(original_text)
# print(text)
# print('')
for ordinal_number, ordinal_number_str in ordinal_number_strs:
number_text = num2words(ordinal_number, to='ordinal')
number_text = number_text.replace(',', '')
number_text = ' ' + number_text.replace('-', ' ') + ' '
text = text.replace(ordinal_number_str, number_text)
# print(txt_filepath)
# print(original_text)
# print(text)
# print('')
for degree_number in degrees_number_strs:
number = int(degree_number.replace('deg','').replace('d',''))
number_text = inflect_engine.number_to_words(number)
number_text = number_text.replace(',', '')
number_text = ' ' + number_text.replace('-', ' ') + ' '
text = text.replace(str(degree_number), number_text + ' degrees')
# print(txt_filepath)
# print(original_text)
# print(text)
# print('')
for pound_number, pount_str in pounds_number_strs:
number_text = inflect_engine.number_to_words(pound_number)
number_text = number_text.replace(',', '')
number_text = ' ' + number_text.replace('-', ' ') + ' '
text = text.replace(pount_str, number_text + ' pounds')
# print(txt_filepath)
# print(original_text)
# print(text)
# print('')
text = text.replace(' ', ' ')
text = text.replace(' ,', ',')
text = text.replace(' .', '.')
text = text.replace(' !', '!')
text = text + EOS
seq = None
try:
seq = [_symbol_to_id[c] for c in text]
except Exception as e:
print(txt_filepath)
print(original_text)
print(text)
print('')
return False
return True
txt_train_path = os.path.join('datasets', 'complete_blizzard/train_txt')
txt_test_path = os.path.join('datasets', 'complete_blizzard/test_txt')
txt_file_list = glob.glob(
os.path.join(txt_train_path, '**', '*.txt'),
recursive=True
) + glob.glob(
os.path.join(txt_test_path, '**', '*.txt'),
recursive=True
)
for txt_filepath in txt_file_list:
f = open(txt_filepath, "r")
sentence = f.read().strip()
f.close()
success = process_blizzard(sentence, txt_filepath) | {
"alphanum_fraction": 0.5743090561,
"author": null,
"avg_line_length": 32.8742857143,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8592828d96fc4e10b90596f1d5ccc37cd88fa301",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-03-24T16:54:41.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-03-24T16:54:41.000Z",
"max_forks_repo_head_hexsha": "81043200e89e78cba4d065bb4bb262383d49e702",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "abhinavvadrevu/MelNet",
"max_forks_repo_path": "scripts/find_weird_chars.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "81043200e89e78cba4d065bb4bb262383d49e702",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "abhinavvadrevu/MelNet",
"max_issues_repo_path": "scripts/find_weird_chars.py",
"max_line_length": 100,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "81043200e89e78cba4d065bb4bb262383d49e702",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "abhinavvadrevu/MelNet",
"max_stars_repo_path": "scripts/find_weird_chars.py",
"max_stars_repo_stars_event_max_datetime": "2021-06-10T22:42:38.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-06-10T22:42:38.000Z",
"num_tokens": 1448,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5753
} |
// __BEGIN_LICENSE__
// Copyright (c) 2009-2013, United States Government as represented by the
// Administrator of the National Aeronautics and Space Administration. All
// rights reserved.
//
// The NGT platform is licensed under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance with the
// License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// __END_LICENSE__
/// \file stereo_corr.cc
///
#include <asp/Tools/stereo.h>
#include <vw/InterestPoint.h>
#include <boost/accumulators/accumulators.hpp>
#include <boost/accumulators/statistics.hpp>
#include <vw/Stereo/PreFilter.h>
#include <vw/Stereo/CorrelationView.h>
#include <vw/Stereo/CostFunctions.h>
#include <vw/Stereo/DisparityMap.h>
#include <asp/Core/DemDisparity.h>
#include <asp/Core/LocalHomography.h>
using namespace vw;
using namespace vw::stereo;
using namespace asp;
namespace vw {
template<> struct PixelFormatID<PixelMask<Vector<float, 5> > > { static const PixelFormatEnum value = VW_PIXEL_GENERIC_6_CHANNEL; };
}
void produce_lowres_disparity( Options & opt ) {
DiskImageView<vw::uint8> Lmask(opt.out_prefix + "-lMask.tif"),
Rmask(opt.out_prefix + "-rMask.tif");
DiskImageView<PixelGray<float> > left_sub( opt.out_prefix+"-L_sub.tif" ),
right_sub( opt.out_prefix+"-R_sub.tif" );
Vector2 downsample_scale( double(left_sub.cols()) / double(Lmask.cols()),
double(left_sub.rows()) / double(Lmask.rows()) );
DiskImageView<uint8> left_mask_sub ( opt.out_prefix+"-lMask_sub.tif" ),
right_mask_sub( opt.out_prefix+"-rMask_sub.tif" );
BBox2i search_range( floor(elem_prod(downsample_scale,stereo_settings().search_range.min())),
ceil (elem_prod(downsample_scale,stereo_settings().search_range.max())) );
if ( stereo_settings().seed_mode == 1 ) {
// Use low-res correlation to get the low-res disparity
Vector2i expansion( search_range.width(),
search_range.height() );
expansion *= stereo_settings().seed_percent_pad / 2.0f;
// Expand by the user selected amount. Default is 25%.
search_range.min() -= expansion;
search_range.max() += expansion;
VW_OUT(DebugMessage,"asp") << "D_sub search range: "
<< search_range << " px\n";
// Below we use on purpose stereo::CROSS_CORRELATION instead of
// user's choice of correlation method, since this is the most
// accurate, as well as reasonably fast for sub-sampled images.
stereo::CostFunctionType cost_mode = stereo::CROSS_CORRELATION; // hard-coded
Vector2i kernel_size = stereo_settings().corr_kernel;
int corr_timeout = 5*stereo_settings().corr_timeout; // 5x, so try hard
double seconds_per_op = 0.0;
if (corr_timeout > 0)
seconds_per_op = calc_seconds_per_op(cost_mode, left_sub,
right_sub, kernel_size);
asp::block_write_gdal_image
(opt.out_prefix + "-D_sub.tif",
rm_outliers_using_thresh
(stereo::pyramid_correlate
(left_sub, right_sub,
left_mask_sub, right_mask_sub,
stereo::LaplacianOfGaussian(stereo_settings().slogW),
search_range, kernel_size, cost_mode,
corr_timeout, seconds_per_op,
stereo_settings().xcorr_threshold, stereo_settings().corr_max_levels),
1, 1, 2.0, 0.5
), opt,
TerminalProgressCallback("asp", "\t--> Low-resolution disparity:")
);
}else if ( stereo_settings().seed_mode == 2 ) {
// Use a DEM to get the low-res disparity
boost::shared_ptr<camera::CameraModel> left_camera_model, right_camera_model;
opt.session->camera_models(left_camera_model, right_camera_model);
produce_dem_disparity(opt, left_camera_model, right_camera_model);
}else if ( stereo_settings().seed_mode == 3 ) {
// D_sub is already generated by now by sparse_disp
}
ImageView<PixelMask<Vector2i> > sub_disp;
read_image( sub_disp, opt.out_prefix + "-D_sub.tif" );
search_range = stereo::get_disparity_range( sub_disp );
VW_OUT(DebugMessage,"asp") << "D_sub resolved search range: "
<< search_range << " px\n";
search_range.min() = floor(elem_quot(search_range.min(),downsample_scale));
search_range.max() = ceil(elem_quot(search_range.max(),downsample_scale));
stereo_settings().search_range = search_range;
}
void lowres_correlation( Options & opt ) {
vw_out() << "\n[ " << current_posix_time_string()
<< " ] : Stage 1 --> LOW-RESOLUTION CORRELATION \n";
// Working out search range if need be
if (stereo_settings().is_search_defined()) {
vw_out() << "\t--> Using user-defined search range.\n";
}else if (stereo_settings().seed_mode == 2){
// Do nothing as we will compute the search range based on D_sub
}else if (stereo_settings().seed_mode == 3){
// Do nothing as low-res disparity is already done by sparse_disp
} else {
// Match file between the input files
std::string match_filename
= ip::match_filename(opt.out_prefix, opt.in_file1, opt.in_file2);
if (!fs::exists(match_filename)) {
// If there is not any match files for the input image. Let's
// gather some IP quickly from the low resolution images. This
// routine should only run for:
// Pinhole + Epipolar
// Pinhole + None
// DG + None
// Everything else should gather IP's all the time.
double sub_scale =
sum(elem_quot( Vector2(file_image_size( opt.out_prefix+"-L_sub.tif" )),
Vector2(file_image_size( opt.out_prefix+"-L.tif" ) ) )) +
sum(elem_quot( Vector2(file_image_size( opt.out_prefix+"-R_sub.tif" )),
Vector2(file_image_size( opt.out_prefix+"-R.tif" ) ) ));
sub_scale /= 4.0f;
stereo_settings().search_range =
approximate_search_range(opt.out_prefix,
opt.out_prefix+"-L_sub.tif",
opt.out_prefix+"-R_sub.tif",
sub_scale );
} else {
// There exists a matchfile out there.
std::vector<ip::InterestPoint> ip1, ip2;
ip::read_binary_match_file( match_filename, ip1, ip2 );
Matrix<double> align_left_matrix = math::identity_matrix<3>();
Matrix<double> align_right_matrix = math::identity_matrix<3>();
if ( fs::exists(opt.out_prefix+"-align-L.exr") )
read_matrix(align_left_matrix, opt.out_prefix + "-align-L.exr");
if ( fs::exists(opt.out_prefix+"-align-R.exr") )
read_matrix(align_right_matrix, opt.out_prefix + "-align-R.exr");
BBox2 search_range;
for ( size_t i = 0; i < ip1.size(); i++ ) {
Vector3 r = align_right_matrix * Vector3(ip2[i].x, ip2[i].y, 1);
Vector3 l = align_left_matrix * Vector3(ip1[i].x, ip1[i].y, 1);
r /= r[2];
l /= l[2];
search_range.grow( subvector(r,0,2) - subvector(l,0,2) );
}
stereo_settings().search_range = grow_bbox_to_int( search_range );
}
vw_out() << "\t--> Detected search range: " << stereo_settings().search_range << "\n";
}
DiskImageView<vw::uint8> Lmask(opt.out_prefix + "-lMask.tif"),
Rmask(opt.out_prefix + "-rMask.tif");
// Performing disparity on sub images
if ( stereo_settings().seed_mode > 0 ) {
// Reuse prior existing D_sub if it exists
bool rebuild = false;
std::string sub_disp_file = opt.out_prefix+"-D_sub.tif";
try {
vw_log().console_log().rule_set().add_rule(-1,"fileio");
DiskImageView<PixelMask<Vector2i> > test(sub_disp_file);
vw_settings().reload_config();
} catch (vw::IOErr const& e) {
vw_settings().reload_config();
rebuild = true;
} catch (vw::ArgumentErr const& e ) {
// Throws on a corrupted file.
vw_settings().reload_config();
rebuild = true;
}
if ( rebuild )
produce_lowres_disparity( opt );
else
vw_out() << "\t--> Using cached low-resolution disparity: " << sub_disp_file << "\n";
}
// Create the local homographies based on D_sub
if (stereo_settings().seed_mode > 0 && stereo_settings().use_local_homography){
std::string local_hom_file = opt.out_prefix + "-local_hom.txt";
try {
ImageView<Matrix3x3> local_hom;
read_local_homographies(local_hom_file, local_hom);
} catch (vw::IOErr const& e) {
create_local_homographies(opt);
}
}
vw_out() << "\n[ " << current_posix_time_string()
<< " ] : LOW-RESOLUTION CORRELATION FINISHED \n";
}
// This correlator takes a low resolution disparity image as an input
// so that it may narrow its search range for each tile that is processed.
template <class Image1T, class Image2T, class Mask1T, class Mask2T, class SeedDispT, class PProcT>
class SeededCorrelatorView : public ImageViewBase<SeededCorrelatorView<Image1T, Image2T, Mask1T, Mask2T, SeedDispT, PProcT > > {
Image1T m_left_image;
Image2T m_right_image;
Mask1T m_left_mask;
Mask2T m_right_mask;
SeedDispT m_sub_disp;
SeedDispT m_sub_disp_spread;
ImageView<Matrix3x3> const& m_local_hom;
PProcT m_preproc_func;
// Settings
Vector2 m_upscale_factor;
BBox2i m_seed_bbox;
BBox2i m_trans_crop_win;
Vector2i m_kernel_size;
stereo::CostFunctionType m_cost_mode;
int m_corr_timeout;
double m_seconds_per_op;
public:
SeededCorrelatorView( ImageViewBase<Image1T> const& left_image,
ImageViewBase<Image2T> const& right_image,
ImageViewBase<Mask1T> const& left_mask,
ImageViewBase<Mask2T> const& right_mask,
ImageViewBase<SeedDispT> const& sub_disp,
ImageViewBase<SeedDispT> const& sub_disp_spread,
ImageView<Matrix3x3> const& local_hom,
stereo::PreFilterBase<PProcT> const& filter,
BBox2i trans_crop_win,
Vector2i const& kernel_size,
stereo::CostFunctionType cost_mode,
int corr_timeout, double seconds_per_op) :
m_left_image (left_image.impl()), m_right_image (right_image.impl ()),
m_left_mask (left_mask.impl ()), m_right_mask (right_mask.impl ()),
m_sub_disp (sub_disp.impl ()), m_sub_disp_spread(sub_disp_spread.impl()),
m_local_hom (local_hom), m_preproc_func( filter.impl() ),
m_trans_crop_win(trans_crop_win),
m_kernel_size (kernel_size), m_cost_mode(cost_mode),
m_corr_timeout (corr_timeout), m_seconds_per_op(seconds_per_op){
m_upscale_factor[0] = double(m_left_image.cols()) / m_sub_disp.cols();
m_upscale_factor[1] = double(m_left_image.rows()) / m_sub_disp.rows();
m_seed_bbox = bounding_box( m_sub_disp );
}
// Image View interface
typedef PixelMask<Vector2i> pixel_type;
typedef pixel_type result_type;
typedef ProceduralPixelAccessor<SeededCorrelatorView> pixel_accessor;
inline int32 cols () const { return m_left_image.cols(); }
inline int32 rows () const { return m_left_image.rows(); }
inline int32 planes() const { return 1; }
inline pixel_accessor origin() const { return pixel_accessor( *this, 0, 0 ); }
inline pixel_type operator()( double /*i*/, double /*j*/, int32 /*p*/ = 0 ) const {
vw_throw(NoImplErr() << "SeededCorrelatorView::operator()(...) is not implemented");
return pixel_type();
}
typedef CropView<ImageView<pixel_type> > prerasterize_type;
inline prerasterize_type prerasterize(BBox2i const& bbox) const {
// We do stereo only in m_trans_crop_win. Skip the current tile if
// it does not intersect this region.
BBox2i intersection = bbox; intersection.crop(m_trans_crop_win);
if (intersection.empty()){
return prerasterize_type(ImageView<pixel_type>(bbox.width(),
bbox.height()),
-bbox.min().x(), -bbox.min().y(),
cols(), rows() );
}
CropView<ImageView<pixel_type> > disparity = prerasterize_helper(bbox);
// Set to invalid the disparity outside m_trans_crop_win.
for (int col = bbox.min().x(); col < bbox.max().x(); col++){
for (int row = bbox.min().y(); row < bbox.max().y(); row++){
if (!m_trans_crop_win.contains(Vector2(col, row))){
disparity(col, row) = pixel_type();
}
}
}
return disparity;
}
inline prerasterize_type prerasterize_helper(BBox2i const& bbox) const {
bool use_local_homography = stereo_settings().use_local_homography;
Matrix<double> lowres_hom = math::identity_matrix<3>();
Matrix<double> fullres_hom = math::identity_matrix<3>();
ImageViewRef<typename Image2T::pixel_type> right_trans_img;
ImageViewRef<typename Mask2T::pixel_type > right_trans_mask;
bool do_round = true; // round integer disparities after transform
// User strategies
BBox2f local_search_range;
if ( stereo_settings().seed_mode > 0 ) {
// The low-res version of bbox
BBox2i seed_bbox( elem_quot(bbox.min(), m_upscale_factor),
elem_quot(bbox.max(), m_upscale_factor) );
seed_bbox.expand(1);
seed_bbox.crop( m_seed_bbox );
VW_OUT(DebugMessage, "stereo") << "Getting disparity range for : "
<< seed_bbox << "\n";
SeedDispT disparity_in_box = crop( m_sub_disp, seed_bbox );
if (!use_local_homography){
local_search_range = stereo::get_disparity_range( disparity_in_box );
}else{
int ts = Options::corr_tile_size();
lowres_hom = m_local_hom(bbox.min().x()/ts, bbox.min().y()/ts);
local_search_range = stereo::get_disparity_range
(transform_disparities(do_round, seed_bbox,
lowres_hom, disparity_in_box));
}
bool has_sub_disp_spread = ( m_sub_disp_spread.cols() != 0 && m_sub_disp_spread.rows() != 0 );
// Sanity check: If m_sub_disp_spread was provided, it better have
// the same size as sub_disp.
if ( has_sub_disp_spread &&
m_sub_disp_spread.cols() != m_sub_disp.cols() &&
m_sub_disp_spread.rows() != m_sub_disp.rows() ){
vw_throw( ArgumentErr() << "stereo_corr: D_sub and D_sub_spread must have equal sizes.\n");
}
if (has_sub_disp_spread){
// Expand the disparity range by m_sub_disp_spread.
SeedDispT spread_in_box = crop( m_sub_disp_spread, seed_bbox );
if (!use_local_homography){
BBox2f spread = stereo::get_disparity_range( spread_in_box );
local_search_range.min() -= spread.max();
local_search_range.max() += spread.max();
}else{
SeedDispT upper_disp
= transform_disparities(do_round, seed_bbox, lowres_hom,
disparity_in_box + spread_in_box);
SeedDispT lower_disp
= transform_disparities(do_round, seed_bbox, lowres_hom,
disparity_in_box - spread_in_box);
BBox2f upper_range = stereo::get_disparity_range(upper_disp);
BBox2f lower_range = stereo::get_disparity_range(lower_disp);
local_search_range = upper_range;
local_search_range.grow(lower_range);
}
}
if (use_local_homography){
Vector3 upscale( m_upscale_factor[0], m_upscale_factor[1], 1 );
Vector3 dnscale( 1.0/m_upscale_factor[0], 1.0/m_upscale_factor[1], 1 );
fullres_hom = diagonal_matrix(upscale)*lowres_hom*diagonal_matrix(dnscale);
ImageViewRef< PixelMask<typename Image2T::pixel_type> >
right_trans_masked_img
= transform (copy_mask( m_right_image.impl(),
create_mask(m_right_mask.impl()) ),
HomographyTransform(fullres_hom),
m_left_image.impl().cols(), m_left_image.impl().rows());
right_trans_img = apply_mask(right_trans_masked_img);
right_trans_mask
= channel_cast_rescale<uint8>(select_channel(right_trans_masked_img, 1));
}
local_search_range = grow_bbox_to_int(local_search_range);
// Expand local_search_range by 1. This is necessary since
// m_sub_disp is integer-valued, and perhaps the search
// range was supposed to be a fraction of integer bigger.
local_search_range.expand(1);
// Scale the search range to full-resolution
local_search_range.min() = floor(elem_prod(local_search_range.min(),
m_upscale_factor));
local_search_range.max() = ceil(elem_prod(local_search_range.max(),
m_upscale_factor));
VW_OUT(DebugMessage, "stereo") << "SeededCorrelatorView("
<< bbox << ") search range "
<< local_search_range << " vs "
<< stereo_settings().search_range << "\n";
} else{
local_search_range = stereo_settings().search_range;
VW_OUT(DebugMessage,"stereo") << "Searching with "
<< stereo_settings().search_range << "\n";
}
if (use_local_homography){
typedef stereo::PyramidCorrelationView<Image1T, ImageViewRef<typename Image2T::pixel_type>, Mask1T,ImageViewRef<typename Mask2T::pixel_type>, PProcT> CorrView;
CorrView corr_view( m_left_image, right_trans_img,
m_left_mask, right_trans_mask,
m_preproc_func, local_search_range,
m_kernel_size, m_cost_mode,
m_corr_timeout, m_seconds_per_op,
stereo_settings().xcorr_threshold,
stereo_settings().corr_max_levels );
return corr_view.prerasterize(bbox);
}else{
typedef stereo::PyramidCorrelationView<Image1T, Image2T, Mask1T, Mask2T, PProcT> CorrView;
CorrView corr_view( m_left_image, m_right_image,
m_left_mask, m_right_mask,
m_preproc_func, local_search_range,
m_kernel_size, m_cost_mode,
m_corr_timeout, m_seconds_per_op,
stereo_settings().xcorr_threshold,
stereo_settings().corr_max_levels );
return corr_view.prerasterize(bbox);
}
}
template <class DestT>
inline void rasterize(DestT const& dest, BBox2i bbox) const {
vw::rasterize(prerasterize(bbox), dest, bbox);
}
};
template <class Image1T, class Image2T, class Mask1T, class Mask2T, class SeedDispT, class PProcT>
SeededCorrelatorView<Image1T, Image2T, Mask1T, Mask2T, SeedDispT, PProcT>
seeded_correlation( ImageViewBase<Image1T> const& left,
ImageViewBase<Image2T> const& right,
ImageViewBase<Mask1T> const& lmask,
ImageViewBase<Mask2T> const& rmask,
ImageViewBase<SeedDispT> const& sub_disp,
ImageViewBase<SeedDispT> const& sub_disp_spread,
ImageView<Matrix3x3> const& local_hom,
stereo::PreFilterBase<PProcT> const& filter,
BBox2i trans_crop_win,
Vector2i const& kernel_size,
stereo::CostFunctionType cost_type,
int corr_timeout, double seconds_per_op) {
typedef SeededCorrelatorView<Image1T, Image2T, Mask1T, Mask2T, SeedDispT, PProcT> return_type;
return return_type( left.impl(), right.impl(), lmask.impl(), rmask.impl(),
sub_disp.impl(), sub_disp_spread.impl(),
local_hom, filter.impl(), trans_crop_win, kernel_size,
cost_type, corr_timeout, seconds_per_op );
}
void stereo_correlation( Options& opt ) {
lowres_correlation(opt);
if (stereo_settings().compute_low_res_disparity_only) return;
vw_out() << "\n[ " << current_posix_time_string()
<< " ] : Stage 1 --> CORRELATION \n";
// Provide the user with some feedback of what we are actually going
// to use.
vw_out() << "\t--------------------------------------------------\n";
vw_out() << "\t Kernel Size: " << stereo_settings().corr_kernel << std::endl;
if ( stereo_settings().seed_mode > 0 )
vw_out() << "\t Refined Search: "
<< stereo_settings().search_range << std::endl;
else
vw_out() << "\t Search Range: "
<< stereo_settings().search_range << std::endl;
vw_out() << "\t Cost Mode: " << stereo_settings().cost_mode << std::endl;
vw_out(DebugMessage) << "\t XCorr Threshold: " << stereo_settings().xcorr_threshold << std::endl;
vw_out(DebugMessage) << "\t Prefilter: " << stereo_settings().pre_filter_mode << std::endl;
vw_out(DebugMessage) << "\t Prefilter Size: " << stereo_settings().slogW << std::endl;
vw_out() << "\t--------------------------------------------------\n";
// Load up for the actual native resolution processing
DiskImageView<PixelGray<float> > left_disk_image(opt.out_prefix+"-L.tif"),
right_disk_image(opt.out_prefix+"-R.tif");
DiskImageView<vw::uint8> Lmask(opt.out_prefix + "-lMask.tif"),
Rmask(opt.out_prefix + "-rMask.tif");
ImageViewRef<PixelMask<Vector2i> > sub_disp;
if ( stereo_settings().seed_mode > 0 )
sub_disp =
DiskImageView<PixelMask<Vector2i> >(opt.out_prefix+"-D_sub.tif");
ImageViewRef<PixelMask<Vector2i> > sub_disp_spread;
if ( stereo_settings().seed_mode == 2 || stereo_settings().seed_mode == 3 ){
// D_sub_spread is mandatory for seed_mode 2 and 3.
sub_disp_spread =
DiskImageView<PixelMask<Vector2i> >(opt.out_prefix+"-D_sub_spread.tif");
}else if ( stereo_settings().seed_mode == 1 ){
// D_sub_spread is optional for seed_mode 1, we use it only if
// it is provided.
try {
sub_disp_spread =
DiskImageView<PixelMask<Vector2i> >(opt.out_prefix+"-D_sub_spread.tif");
}
catch (vw::IOErr const& e) {}
catch (vw::ArgumentErr const& e) {}
}
ImageView<Matrix3x3> local_hom;
if ( stereo_settings().seed_mode > 0 && stereo_settings().use_local_homography ){
std::string local_hom_file = opt.out_prefix + "-local_hom.txt";
read_local_homographies(local_hom_file, local_hom);
}
stereo::CostFunctionType cost_mode;
if (stereo_settings().cost_mode == 0) cost_mode = stereo::ABSOLUTE_DIFFERENCE;
else if (stereo_settings().cost_mode == 1) cost_mode = stereo::SQUARED_DIFFERENCE;
else if (stereo_settings().cost_mode == 2) cost_mode = stereo::CROSS_CORRELATION;
else
vw_throw( ArgumentErr() << "Unknown value " << stereo_settings().cost_mode
<< " for cost-mode.\n" );
ImageViewRef<PixelMask<Vector2i> > fullres_disparity;
Vector2i kernel_size = stereo_settings().corr_kernel;
BBox2i trans_crop_win = stereo_settings().trans_crop_win;
int corr_timeout = stereo_settings().corr_timeout;
double seconds_per_op = 0.0;
if (corr_timeout > 0)
seconds_per_op = calc_seconds_per_op(cost_mode, left_disk_image, right_disk_image,
kernel_size);
if ( stereo_settings().pre_filter_mode == 2 ) {
vw_out() << "\t--> Using LOG pre-processing filter with "
<< stereo_settings().slogW << " sigma blur.\n";
fullres_disparity =
seeded_correlation( left_disk_image, right_disk_image, Lmask, Rmask,
sub_disp, sub_disp_spread, local_hom,
stereo::LaplacianOfGaussian(stereo_settings().slogW),
trans_crop_win, kernel_size, cost_mode, corr_timeout,
seconds_per_op );
} else if ( stereo_settings().pre_filter_mode == 1 ) {
vw_out() << "\t--> Using Subtracted Mean pre-processing filter with "
<< stereo_settings().slogW << " sigma blur.\n";
fullres_disparity =
seeded_correlation( left_disk_image, right_disk_image, Lmask, Rmask,
sub_disp, sub_disp_spread, local_hom,
stereo::SubtractedMean(stereo_settings().slogW),
trans_crop_win, kernel_size, cost_mode, corr_timeout,
seconds_per_op );
} else {
vw_out() << "\t--> Using NO pre-processing filter." << std::endl;
fullres_disparity =
seeded_correlation( left_disk_image, right_disk_image, Lmask, Rmask,
sub_disp, sub_disp_spread, local_hom,
stereo::NullOperation(),
trans_crop_win, kernel_size, cost_mode, corr_timeout,
seconds_per_op );
}
std::string d_file = opt.out_prefix + "-D.tif";
vw_out() << "Writing: " << d_file << "\n";
asp::block_write_gdal_image(d_file,
fullres_disparity, opt,
TerminalProgressCallback("asp", "\t--> Correlation :") );
vw_out() << "\n[ " << current_posix_time_string()
<< " ] : CORRELATION FINISHED \n";
}
int main(int argc, char* argv[]) {
stereo_register_sessions();
Options opt;
try {
handle_arguments( argc, argv, opt,
CorrelationDescription() );
// Integer correlator requires large tiles
//---------------------------------------------------------
int ts = Options::corr_tile_size();
opt.raster_tile_size = Vector2i(ts, ts);
// Internal Processes
//---------------------------------------------------------
stereo_correlation( opt );
} ASP_STANDARD_CATCHES;
return 0;
}
| {
"alphanum_fraction": 0.628457295,
"author": null,
"avg_line_length": 44.0284757119,
"converted": null,
"ext": "cc",
"file": null,
"hexsha": "f379cc966d45bf82fbba67c1fddb2b85769eeddb",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a9cb9129013f278e9f65e435193b735a6b051eb9",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "fenglang12345/StereoPipeline-2.4.0",
"max_forks_repo_path": "src/asp/Tools/stereo_corr.cc",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a9cb9129013f278e9f65e435193b735a6b051eb9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "fenglang12345/StereoPipeline-2.4.0",
"max_issues_repo_path": "src/asp/Tools/stereo_corr.cc",
"max_line_length": 165,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a9cb9129013f278e9f65e435193b735a6b051eb9",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "fenglang12345/StereoPipeline-2.4.0",
"max_stars_repo_path": "src/asp/Tools/stereo_corr.cc",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 6325,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 26285
} |
# Import Libraries
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score
def stack_models(df_prepared, df_target, model_1, model_2, model_3, model_4):
"""
Stack all the four models and blend their predictions in a Random Forest Regressor; Train and evaluate the
Blender's performance on Stacking set
:params: Stacking Set Features,
:params: Stacking Set Target
:params: All the different models we want to stack
:return: A tuple of Trained stacked Model and the Mean of it's Cross-validated RMSE
"""
# Bring together the best estimators of all the three ML models and the deep neural network model
estimators = [model_1, model_2, model_3, model_4]
# Creating training set for the Stacker/Blender
stack_predictions = np.empty((df_prepared.shape[0], len(estimators)), dtype=np.float32)
for index, estimator in enumerate(estimators):
stack_predictions[:, index] = np.reshape(estimator.predict(df_prepared), (df_prepared.shape[0],))
# Initializing the Stacker/Blender (Random Forest Regressor)
rf_blender = RandomForestRegressor(n_estimators=20, random_state=123)
# Evaluate the Blender on stacking set using cross-validation (# cross validation sets =3)
val_scores = cross_val_score(rf_blender, stack_predictions, df_target, scoring='neg_mean_squared_error', n_jobs=-1)
return rf_blender, np.mean(np.sqrt(np.array(val_scores)*-1))
| {
"alphanum_fraction": 0.7377972466,
"author": null,
"avg_line_length": 47,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "12206ca200b4b6557e0ffb1c3afef3ff04d949e4",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-11-20T20:52:32.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-11-20T20:52:32.000Z",
"max_forks_repo_head_hexsha": "83e536db35383b7e5266cf8370405b20aa4641b0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "rohansurve212/Black_Friday_Data_Hack",
"max_forks_repo_path": "src/blend_stacked_models.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "83e536db35383b7e5266cf8370405b20aa4641b0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "rohansurve212/Black_Friday_Data_Hack",
"max_issues_repo_path": "src/blend_stacked_models.py",
"max_line_length": 119,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "83e536db35383b7e5266cf8370405b20aa4641b0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "rohansurve212/Black_Friday_Data_Hack",
"max_stars_repo_path": "src/blend_stacked_models.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 367,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1598
} |
[STATEMENT]
lemma secureTT_secure: "secureTT \<Longrightarrow> secure"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. secureTT \<Longrightarrow> secure
[PROOF STEP]
unfolding secureTT_def secure_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>tr vl vl1. validSystemTrace tr \<and> TT tr \<and> B vl vl1 \<and> V tr = vl \<longrightarrow> (\<exists>tr1. validSystemTrace tr1 \<and> TT tr1 \<and> O tr1 = O tr \<and> V tr1 = vl1) \<Longrightarrow> \<forall>tr vl vl1. validSystemTrace tr \<and> TT tr \<and> B vl vl1 \<and> V tr = vl \<longrightarrow> (\<exists>tr1. validSystemTrace tr1 \<and> O tr1 = O tr \<and> V tr1 = vl1)
[PROOF STEP]
by blast | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Bounded_Deducibility_Security_BD_Security_Triggers",
"hexsha": null,
"include": null,
"lang": null,
"length": 2,
"llama_tokens": 244,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
#include <boost/simd/include/native.hpp>
#include <boost/simd/preprocessor/stack_buffer.hpp>
#include <boost/simd/include/functions/aligned_load.hpp>
using boost::simd::aligned_load;
using boost::simd::native;
int main()
{
typedef native<double,BOOST_SIMD_DEFAULT_EXTENSION> simd_t;
BOOST_SIMD_ALIGNED_STACK_BUFFER( data, double, 15 );
// Regular scalar load
double d = aligned_load<double>(&data[0]);
// Scalar load with offset
d = aligned_load<double>(&data[0],2);
// Scalar load with "misalignment"
d = aligned_load<double,2>(&data[0]+2);
// Scalar load with "misalignment" and offset
d = aligned_load<double,2>(&data[2],2);
// Regular SIMD load
simd_t vd = aligned_load<simd_t >(&data[0]);
// SIMD load with offset
vd = aligned_load<simd_t >(&data[0], simd_t::size());
// SIMD load with "misalignment"
vd = aligned_load<simd_t,2>(&data[0]+2);
// SIMD load with "misalignment" and offset
vd = aligned_load<simd_t,2>(&data[2],simd_t::size());
}
| {
"alphanum_fraction": 0.699195171,
"author": null,
"avg_line_length": 26.8648648649,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "0e61852902dba43e464e3e60443a12107293d8c9",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 7,
"max_forks_repo_forks_event_max_datetime": "2021-07-31T12:46:14.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-12-02T12:59:17.000Z",
"max_forks_repo_head_hexsha": "5e829807f6b57b339ca1be918a6b60a2507c54d0",
"max_forks_repo_licenses": [
"BSL-1.0"
],
"max_forks_repo_name": "psiha/nt2",
"max_forks_repo_path": "modules/boost/simd/sdk/examples/memory/aligned_load.cpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5e829807f6b57b339ca1be918a6b60a2507c54d0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSL-1.0"
],
"max_issues_repo_name": "psiha/nt2",
"max_issues_repo_path": "modules/boost/simd/sdk/examples/memory/aligned_load.cpp",
"max_line_length": 61,
"max_stars_count": 34,
"max_stars_repo_head_hexsha": "5e829807f6b57b339ca1be918a6b60a2507c54d0",
"max_stars_repo_licenses": [
"BSL-1.0"
],
"max_stars_repo_name": "psiha/nt2",
"max_stars_repo_path": "modules/boost/simd/sdk/examples/memory/aligned_load.cpp",
"max_stars_repo_stars_event_max_datetime": "2022-01-04T02:18:13.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-05-19T18:10:17.000Z",
"num_tokens": 279,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 994
} |
import numpy as np
from nntoolbox.losses import PinballLoss
import torch
class TestPinball:
def test_pinball(self):
"""
Adopt from https://www.tensorflow.org/addons/api_docs/python/tfa/losses/PinballLoss
"""
target = torch.from_numpy(np.array([0., 0., 1., 1.]))
input = torch.from_numpy(np.array([1., 1., 1., 0.]))
loss = PinballLoss(tau=0.1)
assert abs(loss(input, target).item() - 0.475) < 1e-3 | {
"alphanum_fraction": 0.6209150327,
"author": null,
"avg_line_length": 30.6,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8de78c3cbd764308810f46e2e966f28cfb6bc324",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-08-07T10:07:09.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-08-07T10:07:09.000Z",
"max_forks_repo_head_hexsha": "689b9924d3c88a433f8f350b89c13a878ac7d7c3",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "nhatsmrt/nn-toolbox",
"max_forks_repo_path": "tests/test_pinball.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "689b9924d3c88a433f8f350b89c13a878ac7d7c3",
"max_issues_repo_issues_event_max_datetime": "2022-01-18T22:21:57.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-01-18T22:21:57.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "nhatsmrt/nn-toolbox",
"max_issues_repo_path": "tests/test_pinball.py",
"max_line_length": 91,
"max_stars_count": 16,
"max_stars_repo_head_hexsha": "689b9924d3c88a433f8f350b89c13a878ac7d7c3",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "nhatsmrt/nn-toolbox",
"max_stars_repo_path": "tests/test_pinball.py",
"max_stars_repo_stars_event_max_datetime": "2020-09-08T13:52:45.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-07-11T15:57:41.000Z",
"num_tokens": 137,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 459
} |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
from sklearn.isotonic import IsotonicRegression
from .curve_fit import project_convex_lip
class _BaseShapeIV:
def predict(self, X):
inds = np.searchsorted(self.x_, X[:, 0])
lb_x = self.x_[np.clip(inds - 1, 0, len(self.coef_) - 1)]
lb_y = self.coef_[np.clip(inds - 1, 0, len(self.coef_) - 1)]
ub_x = self.x_[np.clip(inds, 0, len(self.coef_) - 1)]
ub_y = self.coef_[np.clip(inds, 0, len(self.coef_) - 1)]
y_pred = lb_y.copy()
filt = (ub_x != lb_x)
y_pred[filt] += (ub_y[filt] - lb_y[filt]) * \
(X[filt, 0] - lb_x[filt]) / (ub_x[filt] - lb_x[filt])
return y_pred
class ShapeIV(_BaseShapeIV):
def __init__(self, lambda_w=1, y_min=0, y_max=1, eta_theta='auto', eta_w='auto',
n_iter=2000, tol=1e-2, monotonic=None):
self.lambda_w = lambda_w
self.eta_theta = eta_theta
self.eta_w = eta_w
self.n_iter = n_iter
self.tol = tol
self.monotonic = monotonic
self.y_min = y_min
self.y_max = y_max
def fit(self, Z, X, Y):
T = self.n_iter
assert X.shape[1] == 1
assert Z.shape[1] == 1
Y = Y.flatten()
n = X.shape[0]
eta_theta = np.sqrt(
1 / (n * T)) if self.eta_theta == 'auto' else self.eta_theta
eta_w = np.sqrt(
1 / (n * T)) if self.eta_w == 'auto' else self.eta_w
theta_plus = np.zeros((T + 1, n))
theta_minus = np.zeros((T + 1, n))
w_plus = np.zeros((T + 1, n))
w_minus = np.zeros((T + 1, n))
inds_x = np.argsort(X[:, 0])
inds_z = np.argsort(Z[:, 0])
est_theta_plus = IsotonicRegression(
y_min=self.y_min, y_max=self.y_max, out_of_bounds='clip')
est_theta_minus = IsotonicRegression(
y_min=self.y_min, y_max=self.y_max, out_of_bounds='clip', increasing=False)
est_w_plus = IsotonicRegression(
y_min=self.y_min - self.y_max, y_max=2 * self.y_max, out_of_bounds='clip')
est_w_minus = IsotonicRegression(
y_min=self.y_min - self.y_max, y_max=2 * self.y_max, out_of_bounds='clip', increasing=False)
for t in np.arange(1, T + 1):
cor = w_plus[t - 1] - w_minus[t - 1]
if self.monotonic != 'decreasing':
theta_plus[t] = est_theta_plus.fit(
X[:, 0], theta_plus[t - 1] + eta_theta * cor).predict(X[:, 0])
if self.monotonic != 'increasing':
theta_minus[t] = - est_theta_minus.fit(
X[:, 0], - (theta_minus[t - 1] - eta_theta * cor)).predict(X[:, 0])
res = Y - (theta_plus[t - 1] - theta_minus[t - 1])
reg_w = 2 * self.lambda_w * (w_plus[t - 1] - w_minus[t - 1])
w_plus[t] = est_w_plus.fit(Z[:, 0], w_plus[t - 1] +
eta_theta * (res - reg_w)).predict(Z[:, 0])
w_minus[t] = - est_w_minus.fit(Z[:, 0], - (w_minus[t - 1] +
eta_w * (- res + reg_w))).predict(Z[:, 0])
self.coef_ = np.mean(theta_plus - theta_minus, axis=0)[inds_x]
self.x_ = X[inds_x, 0]
self.w_ = np.mean(w_plus - w_minus, axis=0)[inds_z]
self.z_ = Z[inds_z, 0]
self.all_coef_ = theta_plus[:, inds_x] - theta_minus[:, inds_x]
self.all_w = w_plus[:, inds_z] - w_minus[:, inds_z]
return self
class LipschitzShapeIV(_BaseShapeIV):
def __init__(self, L=1, convexity=None, lambda_w=1, y_min=0, y_max=1, eta_theta='auto', eta_w='auto',
n_iter=2000, tol=1e-2, monotonic=None, n_projection_subsamples=None,
max_projection_iters=100):
self.convexity = convexity
self.L = L
self.lambda_w = lambda_w
self.eta_theta = eta_theta
self.eta_w = eta_w
self.n_iter = n_iter
self.tol = tol
self.monotonic = monotonic
self.y_min = y_min
self.y_max = y_max
self.n_projection_subsamples = n_projection_subsamples
self.max_projection_iters = max_projection_iters
def fit(self, Z, X, Y):
T = self.n_iter
assert X.shape[1] == 1
assert Z.shape[1] == 1
Y = Y.flatten()
n = X.shape[0]
eta_theta = np.sqrt(
1 / (n * T)) if self.eta_theta == 'auto' else self.eta_theta
eta_w = np.sqrt(
1 / (n * T)) if self.eta_w == 'auto' else self.eta_w
theta_plus = np.zeros((T + 1, n))
theta_minus = np.zeros((T + 1, n))
w_plus = np.zeros((T + 1, n))
w_minus = np.zeros((T + 1, n))
inds_x = np.argsort(X[:, 0])
inds_z = np.argsort(Z[:, 0])
for t in np.arange(1, T + 1):
cor = w_plus[t - 1] - w_minus[t - 1]
if self.monotonic != 'decreasing':
theta_plus[t] = project_convex_lip(
X[:, 0], theta_plus[t - 1] + eta_theta * cor,
convexity=self.convexity, monotone='increasing',
L=self.L, ymin=self.y_min, ymax=self.y_max,
n_subsamples=self.n_projection_subsamples, max_iters=self.max_projection_iters)
if self.monotonic != 'increasing':
theta_minus[t] = - project_convex_lip(
X[:, 0], - (theta_minus[t - 1] - eta_theta * cor),
convexity=self.convexity, monotone='decreasing',
L=self.L, ymin=self.y_min, ymax=self.y_max,
n_subsamples=self.n_projection_subsamples, max_iters=self.max_projection_iters)
res = Y - (theta_plus[t - 1] - theta_minus[t - 1])
reg_w = 2 * self.lambda_w * (w_plus[t - 1] - w_minus[t - 1])
w_plus[t] = project_convex_lip(Z[:, 0], w_plus[t - 1] +
eta_w * (res - reg_w),
convexity=self.convexity, monotone='increasing', L=(2 * self.L if self.L is not None else None),
ymin=self.y_min - self.y_max, ymax=2 * self.y_max,
n_subsamples=self.n_projection_subsamples,
max_iters=self.max_projection_iters)
w_minus[t] = - project_convex_lip(Z[:, 0], - (w_minus[t - 1] +
eta_w * (- res + reg_w)),
convexity=self.convexity, monotone='decreasing', L=(2 * self.L if self.L is not None else None),
ymin=(self.y_min - self.y_max), ymax=2 * self.y_max,
n_subsamples=self.n_projection_subsamples,
max_iters=self.max_projection_iters)
self.coef_ = np.mean(theta_plus - theta_minus, axis=0)[inds_x]
self.x_ = X[inds_x, 0]
self.w_ = np.mean(w_plus - w_minus, axis=0)[inds_z]
self.z_ = Z[inds_z, 0]
self.all_coef_ = theta_plus[:, inds_x] - theta_minus[:, inds_x]
self.all_w = w_plus[:, inds_z] - w_minus[:, inds_z]
return self
| {
"alphanum_fraction": 0.5245339912,
"author": null,
"avg_line_length": 43.6886227545,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b88ea10c8ec44659bb6e4fbd6e8a28e8bebd3a58",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 10,
"max_forks_repo_forks_event_max_datetime": "2022-01-10T23:42:37.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-12-05T17:12:49.000Z",
"max_forks_repo_head_hexsha": "7a5cd51353c8a81e16c01220b71f77e4e1102add",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "microsoft/AdversarialGMM",
"max_forks_repo_path": "mliv/shape/shapeiv.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7a5cd51353c8a81e16c01220b71f77e4e1102add",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "microsoft/AdversarialGMM",
"max_issues_repo_path": "mliv/shape/shapeiv.py",
"max_line_length": 142,
"max_stars_count": 23,
"max_stars_repo_head_hexsha": "7a5cd51353c8a81e16c01220b71f77e4e1102add",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "microsoft/AdversarialGMM",
"max_stars_repo_path": "mliv/shape/shapeiv.py",
"max_stars_repo_stars_event_max_datetime": "2022-01-26T04:11:14.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-12-01T22:55:40.000Z",
"num_tokens": 2013,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7296
} |
from ..diversity import compound_class
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
def compound_class_plot(formula_list,
mass_list = [],
method = 'MSCC',
**kwargs):
"""
Docstring for function PyKrev.compound_class_plot
====================
This function takes a list of molecular formula strings and plots a bar chart of the compound classes present.
Use
----
compound_class_plot(Y)
Returns the figure and axes handles and a tuple containing a list of compound class assignments and a dictionary containing the compound class counts
Parameters
----------
Y: A list of molecular formula strings.
mass_list: a list of mz values to pass to pk.compound_class -> required for the MSCC algorithm.
method: the element to determine the atomic class. One of: C,H,N,O,S or P.
**kwargs: key word arguments to plt.bar
"""
compoundClass, cclassCounts = compound_class(formula_list,mass_list,method = method)
labels = []
values = []
for c in cclassCounts:
labels.append(str(c))
for v in cclassCounts.values():
values.append(v)
x_pos = [i for i, _ in enumerate(labels)]
plt.bar(x_pos, values, **kwargs)
plt.xticks(x_pos, labels, rotation = 'vertical')
plt.xlabel("Compound class")
plt.ylabel("Counts")
fig = plt.gcf()
ax = plt.gca()
return fig, ax, (compoundClass, cclassCounts) | {
"alphanum_fraction": 0.6490641711,
"author": null,
"avg_line_length": 34.7906976744,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "bada6d532f820ea31a6af84366421420c00d2988",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-09-23T16:03:03.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-09-23T16:03:03.000Z",
"max_forks_repo_head_hexsha": "1a328fccded962f309e951c8509b87a82c3d3ae6",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "erikafreeman/pykrev",
"max_forks_repo_path": "pykrev/plotting/compound_class_plot.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "1a328fccded962f309e951c8509b87a82c3d3ae6",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "erikafreeman/pykrev",
"max_issues_repo_path": "pykrev/plotting/compound_class_plot.py",
"max_line_length": 153,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "1a328fccded962f309e951c8509b87a82c3d3ae6",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Kzra/pykrev",
"max_stars_repo_path": "pykrev/plotting/compound_class_plot.py",
"max_stars_repo_stars_event_max_datetime": "2021-10-04T16:17:30.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-02-18T10:19:13.000Z",
"num_tokens": 336,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1496
} |
import os
import os.path
import copy
import hashlib
import errno
import numpy as np
from numpy.testing import assert_array_almost_equal
from parse_config import args
from data.noise import build_for_cifar100
def check_integrity(fpath, md5):
if not os.path.isfile(fpath):
return False
md5o = hashlib.md5()
with open(fpath, 'rb') as f:
# read in 1MB chunks
for chunk in iter(lambda: f.read(1024 * 1024), b''):
md5o.update(chunk)
md5c = md5o.hexdigest()
if md5c != md5:
return False
return True
def download_url(url, root, filename, md5):
from six.moves import urllib
root = os.path.expanduser(root)
fpath = os.path.join(root, filename)
try:
os.makedirs(root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
# downloads file
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(url, fpath)
except:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(url, fpath)
def list_dir(root, prefix=False):
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = list(
filter(
lambda p: os.path.isdir(os.path.join(root, p)),
os.listdir(root)
)
)
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
def list_files(root, suffix, prefix=False):
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = list(
filter(
lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),
os.listdir(root)
)
)
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
# basic function
def multiclass_noisify(y, P, random_state):
""" Flip classes according to transition probability matrix T.
It expects a number between 0 and the number of classes - 1.
"""
# print(np.max(y), P.shape[0])
assert P.shape[0] == P.shape[1]
assert np.max(y) < P.shape[0]
# row stochastic matrix
assert_array_almost_equal(P.sum(axis=1), np.ones(P.shape[1]))
assert (P >= 0.0).all()
m = y.shape[0]
# print(m)
new_y = y.copy()
flipper = np.random.RandomState(random_state)
for idx in np.arange(m):
i = y[idx]
# draw a vector with only an 1
flipped = flipper.multinomial(1, P[i, :][0], 1)[0]
new_y[idx] = np.where(flipped == 1)[0]
return new_y
# noisify_pairflip call the function "multiclass_noisify"
def noisify_pairflip(y_train, noise, random_state=None, nb_classes=10):
"""mistakes:
flip in the pair
"""
print('----------Pair noise----------')
P = np.eye(nb_classes)
n = noise
if n > 0.0:
if args.dataset == 'mnist':
"""mistakes:
1 <- 7
2 -> 7
3 -> 8
5 <-> 6
"""
# 1 <- 7
P[7, 7], P[7, 1] = 1. - n, n
# 2 -> 7
P[2, 2], P[2, 7] = 1. - n, n
# 5 <-> 6
P[5, 5], P[5, 6] = 1. - n, n
P[6, 6], P[6, 5] = 1. - n, n
# 3 -> 8
P[3, 3], P[3, 8] = 1. - n, n
y_train_noisy = multiclass_noisify(y_train, P=P, random_state=random_state)
actual_noise = (y_train_noisy != y_train).mean()
assert actual_noise > 0.0
print('Actual noise %.2f' % actual_noise)
y_train = y_train_noisy
return y_train, actual_noise, P
elif args.dataset == 'cifar10':
"""mistakes:
automobile <- truck
bird -> airplane
cat <-> dog
deer -> horse
"""
# automobile <- truck
P[9, 9], P[9, 1] = 1. - n, n
# bird -> airplane
P[2, 2], P[2, 0] = 1. - n, n
# cat <-> dog
P[3, 3], P[3, 5] = 1. - n, n
P[5, 5], P[5, 3] = 1. - n, n
# automobile -> truck
P[4, 4], P[4, 7] = 1. - n, n
y_train_noisy = multiclass_noisify(y_train, P=P, random_state=random_state)
actual_noise = (y_train_noisy != y_train).mean()
assert actual_noise > 0.0
print('Actual noise %.2f' % actual_noise)
y_train = y_train_noisy
return y_train, actual_noise, P
elif args.dataset == 'cifar100':
nb_superclasses = 20
nb_subclasses = 5
for i in np.arange(nb_superclasses):
init, end = i * nb_subclasses, (i+1) * nb_subclasses
P[init:end, init:end] = build_for_cifar100(nb_subclasses, n)
y_train_noisy = multiclass_noisify(y_train, P=P, random_state=random_state)
actual_noise = (y_train_noisy != y_train).mean()
assert actual_noise > 0.0
print('Actual noise %.2f' % actual_noise)
y_train = y_train_noisy
return y_train, actual_noise, P
else: # binary classes
"""mistakes:
1 -> 0: n
0 -> 1: .05
"""
P[1, 1], P[1, 0] = 1.0 - n, n
P[0, 0], P[0, 1] = 0.95, 0.05
y_train_noisy = multiclass_noisify(y_train, P=P, random_state=random_state)
actual_noise = (y_train_noisy != y_train).mean()
assert actual_noise > 0.0
print('Actual noise %.2f' % actual_noise)
y_train = y_train_noisy
return y_train, actual_noise, P
else:
print('Actual noise %.2f, not a right one, right range is (0.0, 1.0)' % noise)
return y_train, noise, P
def noisify_multiclass_symmetric(y_train, noise, random_state=None, nb_classes=10):
"""mistakes:
flip in the symmetric way
"""
print('----------Symmetric noise----------')
P = np.ones((nb_classes, nb_classes))
n = noise
P = (n / (nb_classes - 1)) * P
if n > 0.0:
# 0 -> 1
P[0, 0] = 1. - n
for i in range(1, nb_classes - 1):
P[i, i] = 1. - n
P[nb_classes - 1, nb_classes - 1] = 1. - n
y_train_noisy = multiclass_noisify(y_train, P=P, random_state=random_state)
actual_noise = (y_train_noisy != y_train).mean()
assert actual_noise > 0.0
print('Actual noise %.2f' % actual_noise)
y_train = y_train_noisy
# print(P)
return y_train, actual_noise, P
else:
print('Actual noise %.2f' % noise)
return y_train, noise, P
def noisify(dataset='mnist', nb_classes=10, train_labels=None, noise_type=None, noise_rate=0, random_state=1):
global train_noisy_labels, actual_noise_rate
if noise_type == 'pairflip':
train_noisy_labels, actual_noise_rate, _ = noisify_pairflip(train_labels, noise_rate, random_state=1,
nb_classes=nb_classes)
if noise_type == 'symmetric':
train_noisy_labels, actual_noise_rate, _ = noisify_multiclass_symmetric(train_labels, noise_rate,
random_state=1,
nb_classes=nb_classes)
return train_noisy_labels, actual_noise_rate
def norm(T):
row_sum = np.sum(T, 1)
T_norm = T / row_sum
return T_norm
def error(T, T_true):
error = np.sum(np.abs(T - T_true)) / np.sum(np.abs(T_true))
return error
def transition_matrix_generate(noise_rate=0.5, num_classes=10):
P = np.ones((num_classes, num_classes))
n = noise_rate
P = (n / (num_classes - 1)) * P
if n > 0.0:
# 0 -> 1
P[0, 0] = 1. - n
for i in range(1, num_classes - 1):
P[i, i] = 1. - n
P[num_classes - 1, num_classes - 1] = 1. - n
return P
def fit(X, num_classes, filter_outlier=False):
# number of classes
c = num_classes
T = np.empty((c, c))
eta_corr = X
for i in np.arange(c):
if not filter_outlier:
idx_best = np.argmax(eta_corr[:, i])
else:
eta_thresh = np.percentile(eta_corr[:, i], 97, interpolation='higher')
robust_eta = eta_corr[:, i]
robust_eta[robust_eta >= eta_thresh] = 0.0
idx_best = np.argmax(robust_eta)
for j in np.arange(c):
T[i, j] = eta_corr[idx_best, j]
return T
# flip clean labels to noisy labels
# train set and val set split
def dataset_split(train_images, train_labels, noise_rate=0.0, split_per=0.9, random_seed=1, num_classes=10):
clean_train_labels = train_labels[:, np.newaxis]
noisy_labels = clean_train_labels
if args.noise_type == 'symmetric' and noise_rate > 0.0:
noisy_labels, real_noise_rate, transition_matrix = noisify_multiclass_symmetric(clean_train_labels,
noise=noise_rate,
random_state=random_seed,
nb_classes=num_classes)
elif args.noise_type == 'pairflip' and noise_rate > 0.0:
noisy_labels, real_noise_rate, _ = noisify_pairflip(clean_train_labels, noise=noise_rate, random_state=random_seed,
nb_classes=num_classes)
noisy_labels = noisy_labels.squeeze()
# print(noisy_labels)
num_samples = int(noisy_labels.shape[0])
np.random.seed(random_seed)
train_set_index = np.random.choice(num_samples, int(num_samples * split_per), replace=False)
index = np.arange(train_images.shape[0])
val_set_index = np.delete(index, train_set_index)
train_set, val_set = train_images[train_set_index, :], train_images[val_set_index, :]
train_labels, val_labels = noisy_labels[train_set_index], noisy_labels[val_set_index]
return train_set, val_set, train_labels, val_labels
| {
"alphanum_fraction": 0.5386627147,
"author": null,
"avg_line_length": 35.7352024922,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "19a06d14e7b3b786e9f319c7d46af3edfd1e436d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "57b52dc4873f8eba7b8332db0ca3e593c2e3ffa8",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Billy1900/Noise-Adaption-Layer",
"max_forks_repo_path": "data/data_utils.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "57b52dc4873f8eba7b8332db0ca3e593c2e3ffa8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Billy1900/Noise-Adaption-Layer",
"max_issues_repo_path": "data/data_utils.py",
"max_line_length": 124,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "57b52dc4873f8eba7b8332db0ca3e593c2e3ffa8",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Billy1900/Noise-Adaption-Layer",
"max_stars_repo_path": "data/data_utils.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-10T02:35:21.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-04-27T11:44:56.000Z",
"num_tokens": 2941,
"path": null,
"reason": "import numpy,from numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 11471
} |
# Let's look at how the API is used by importing a simple pretrained imagenet
# model and testing it out on a picture of a dog. We start with our imports, and
# can import our ResNet50 model-getter and decode_predictions tool for
# retrieving labels
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from tfmodelzoo import ResNet50, decode_predictions
dog_img = plt.imread('dog.jpg')
# Create a session and pass it to our model getter. This will download the
# weights and return two tensors: the input and the output of the model
sess = tf.Session()
data, softmax = ResNet50(sess, weights='imagenet')
# We can now go ahead and use these models in TensorFlow, adding more operations
# or simply running them. I'll put my dog through the model and see how it does:
predictions = sess.run(softmax, {data: np.array([dog_img])})
# Now we can decode our predictions:
print('Top 10 Predictions w/ Confidence:')
for i, tup in enumerate(decode_predictions(predictions, top=10)[0]):
print("{}\t— {}".format(tup[2], tup[1]))
# Top 10 Predictions w/ Confidence:
# 0.8906792998313904 — beagle
# 0.061354268342256546 — Weimaraner
# 0.012612144462764263 — EntleBucher
# 0.010248360224068165 — Walker_hound
# 0.006116565316915512 — English_foxhound
# 0.004969390109181404 — bluetick
# 0.0017149088671430945 — redbone
# 0.001614996581338346 — Labrador_retriever
# 0.0015570599352940917 — basset
# 0.0012122730258852243 — dalmatian
# The above should be the output when the model is run!
| {
"alphanum_fraction": 0.7597103357,
"author": null,
"avg_line_length": 35.3255813953,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "210f55df88643738d1b7bb7f68deee0df19cf436",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2020-01-03T15:22:01.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-07-18T05:43:34.000Z",
"max_forks_repo_head_hexsha": "93ddba2916730fbdcdf01f9a1c07e8dddf594349",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "joeddav/tensorflow-modelzoo",
"max_forks_repo_path": "demo/demo.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "93ddba2916730fbdcdf01f9a1c07e8dddf594349",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "joeddav/tensorflow-modelzoo",
"max_issues_repo_path": "demo/demo.py",
"max_line_length": 80,
"max_stars_count": 7,
"max_stars_repo_head_hexsha": "93ddba2916730fbdcdf01f9a1c07e8dddf594349",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "joeddav/tensorflow-modelzoo",
"max_stars_repo_path": "demo/demo.py",
"max_stars_repo_stars_event_max_datetime": "2019-04-19T05:46:20.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-05-09T22:03:47.000Z",
"num_tokens": 442,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1519
} |
[STATEMENT]
lemma invpst_baldR: "invpst l \<Longrightarrow> invpst r \<Longrightarrow> invpst (baldR l a r)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>invpst l; invpst r\<rbrakk> \<Longrightarrow> invpst (baldR l a r)
[PROOF STEP]
by (cases "(l,a,r)" rule: baldR.cases) (auto simp: invpst_baliL) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Priority_Search_Trees_PST_RBT",
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 135,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
/*******************************************************************************
procmon, Copyright (c) 2014, The Regents of the University of California,
through Lawrence Berkeley National Laboratory (subject to receipt of any
required approvals from the U.S. Dept. of Energy). All rights reserved.
If you have questions about your rights to use or distribute this software,
please contact Berkeley Lab's Technology Transfer Department at TTD@lbl.gov.
The LICENSE file in the root directory of the source code archive describes the
licensing and distribution rights and restrictions on this software.
Author: Douglas Jacobsen <dmj@nersc.gov>
*******************************************************************************/
#ifndef __PROCMON_CONFIG_HH_
#define __PROCMON_CONFIG_HH_
#include "ProcData.hh"
#include <unistd.h>
#include <stdlib.h>
#include <vector>
#include <pwd.h>
#include <grp.h>
#include <boost/program_options.hpp>
#include <boost/filesystem.hpp>
#include <boost/bind.hpp>
#include <boost/regex.hpp>
#include <iostream>
#include <fstream>
namespace po = boost::program_options;
namespace fs = boost::filesystem;
using namespace std;
#include "config.h"
#ifndef DEFAULT_FREQUENCY
#define DEFAULT_FREQUENCY 60
#define DEFAULT_INITIAL_PHASE 0
#define DEFAULT_INITIAL_PHASE_FREQUENCY 1
#define DEFAULT_OUTPUT_FLAGS 0
#define DEFAULT_SYSTEM "cluster"
#define DEFAULT_IDENTIFIER "proc"
#define DEFAULT_SUBIDENTIFIER "mon"
#define DEFAULT_AMQP_HOST "localhost"
#define DEFAULT_AMQP_PORT 5672
#define DEFAULT_AMQP_VHOST "/"
#define DEFAULT_AMQP_EXCHANGE_NAME "procmon"
#define DEFAULT_AMQP_USER "guest"
#define DEFAULT_AMQP_PASSWORD "guest"
#define DEFAULT_AMQP_FRAMESIZE 131072
#endif
time_t getBootTime();
int fileFillBuffer(FILE* fp, char* buffer, int buffSize, char** sptr, char** ptr, char** eptr);
class ProcmonException : public exception {
public:
ProcmonException(const char* t_error) {
error = t_error;
}
virtual const char* what() const throw() {
return error;
}
private:
const char *error;
};
class ProcmonConfig {
protected:
po::options_description procmonOptions;
string userConfigFile;
int maxIterations;
int syslog_facility;
int syslog_priority_min;
/* FUTURE FEATURES
vector<string> contextProcessNames;
vector<boost::regex> contextProcessRegexes;
*/
void setSyslogFacility(const string &facility);
void setSyslogPriorityMin(const string &priority);
public:
inline const int getSyslogFacility() const {
return syslog_facility;
}
inline const int getSyslogPriorityMin() const {
return syslog_priority_min;
}
/* FUTURE FEATURES
inline const vector<boost::regex> &getContextProcessRegexes() {
return contextProcessRegexes;
}
*/
/* Configurable monitoring options */
int targetPPid;
int frequency;
int initialFrequency;
int initialPhase;
bool daemonize;
bool dummy;
bool verbose;
bool craylock;
int maxfd;
#ifdef SECURED
int target_uid;
int target_gid;
string user;
string group;
#endif
string system;
string identifier;
string subidentifier;
string identifier_env;
string subidentifier_env;
string identifier_cgroup;
string subidentifier_cgroup;
boost::regex *identifier_cgroup_regex;
boost::regex *subidentifier_cgroup_regex;
int gid_range_min;
int gid_range_max;
/* Derived monitoring inputs */
int tgtGid;
int tgtSid;
int tgtPgid;
long clockTicksPerSec;
long pageSize;
time_t boottime;
string hostname;
/* Output Options */
unsigned int outputFlags;
string outputTextFilename;
#ifdef USE_HDF5
string outputHDF5Filename;
#endif
bool noOutput;
string pidfile;
#ifdef USE_AMQP
/* AMQP options */
string mqServer;
vector<string> mqServers;
unsigned int mqPort;
string mqVHost;
string mqUser;
string mqPassword;
string mqExchangeName;
unsigned int mqFrameSize;
#endif
ProcmonConfig() {
gid_range_min = -1;
gid_range_max = -1;
/* Initialize defaults */
targetPPid = 1;
frequency = DEFAULT_FREQUENCY;
initialFrequency = DEFAULT_INITIAL_PHASE_FREQUENCY;
initialPhase = DEFAULT_INITIAL_PHASE;
clockTicksPerSec = 0;
pageSize = 0;
daemonize = false;
dummy = false;
verbose = false;
craylock = false;
outputFlags = DEFAULT_OUTPUT_FLAGS;
tgtGid = 0;
maxfd = 0;
tgtSid = 0;
tgtPgid = 0;
maxIterations = 0;
#ifdef SECURED
target_uid = -1;
target_gid = -1;
#endif
#ifdef USE_AMQP
mqServer = DEFAULT_AMQP_HOST;
mqPort = DEFAULT_AMQP_PORT;
mqUser = DEFAULT_AMQP_USER;
mqPassword = DEFAULT_AMQP_PASSWORD;
mqVHost = DEFAULT_AMQP_VHOST;
mqExchangeName = DEFAULT_AMQP_EXCHANGE_NAME;
mqFrameSize = DEFAULT_AMQP_FRAMESIZE;
#endif
identifier = DEFAULT_IDENTIFIER;
subidentifier = DEFAULT_SUBIDENTIFIER;
identifier_cgroup_regex = NULL;
subidentifier_cgroup_regex = NULL;
pidfile = "";
noOutput = false;
/* Setup Context-derived values */
char buffer[BUFFER_SIZE];
bzero(buffer, BUFFER_SIZE);
if (gethostname(buffer, BUFFER_SIZE) == 0) {
hostname = buffer;
}
clockTicksPerSec = sysconf(_SC_CLK_TCK);
pageSize = sysconf(_SC_PAGESIZE);
boottime = getBootTime();
setupDefaultProcmonOptions();
}
~ProcmonConfig() {
if (identifier_cgroup_regex != NULL) {
delete identifier_cgroup_regex;
}
if (subidentifier_cgroup_regex != NULL) {
delete subidentifier_cgroup_regex;
}
}
void setupDefaultProcmonOptions() {
po::options_description basic("Basic Options");
basic.add_options()
("version", "Print version information")
("help,h", "Print help message")
("verbose,v", "Print extra information (syslog mirrored to stderr)")
("daemonize,d", "Daemonize the procmon process")
("craylock,c", "Create and lock /tmp/procmon; exit if no lock")
("config.file", po::value<string>(&userConfigFile)
->default_value(""), "Configuration file from which to read "
"options.")
("syslog.facility", po::value<string>()
->notifier(boost::bind(&ProcmonConfig::setSyslogFacility, this, _1))
->default_value("USER"), "Name of syslog facility to use for"
" info/error output. Supports: DAEMON, USER, LOCAL0 - LOCAL7")
("syslog.level.min", po::value<string>()
->notifier(boost::bind(&ProcmonConfig::setSyslogPriorityMin, this, _1))
->default_value("NOTICE"), "Minimum level of log data to send."
" Supports: EMERG, ALERT, CRIT, ERR, WARNING, NOTICE, INFO, "
" DEBUG" )
;
procmonOptions.add(basic);
po::options_description configOptions("Configuration Options");
configOptions.add_options()
("frequency,f", po::value<int>(&frequency)
->default_value(DEFAULT_FREQUENCY), "Time elapsed between "
"measurements during normal data collection (in seconds)")
("initialphase,i", po::value<int>(&initialPhase)
->default_value(DEFAULT_INITIAL_PHASE_FREQUENCY), "Length of the "
"initial phase (in seconds)")
("initialfrequency,F", po::value<int>(&initialFrequency)
->default_value(DEFAULT_INITIAL_PHASE_FREQUENCY), "Time elapsed "
"between measurements during initial phase (in seconds)")
("nooutput,n", "prevent any type of output from being generated "
"(for testing)")
("outputtext,o", po::value<string>(&outputTextFilename)->default_value(""),
"filename for text output (optional)")
#ifdef USE_HDF5
("outputhdf5,O", po::value<string>(&outputhdf5)->default_value(""),
"filename for HDF5 output (optional)")
#endif
("pid,q", po::value<string>(&pidfile)->default_value(""), "filename for "
"optional pid file")
("debug.maxiterations", po::value<int>(&maxIterations)
->default_value(0), "Debugging: max iterations to complete")
#ifdef SECURED
("user,u", po::value<string>(&user), "username/uid to "
"setuid")
("group,r", po::value<string>(&group), "group/gid to "
"setgid")
#endif
;
procmonOptions.add(configOptions);
po::options_description contextOptions("Monitoring Context Options");
contextOptions.add_options()
("system", po::value<string>(&system)->default_value(DEFAULT_SYSTEM),
"system tag")
("identifier,I", po::value<string>(&identifier)
->default_value(DEFAULT_IDENTIFIER), "identifier for tagging data")
("subidentifier,S", po::value<string>(&subidentifier)
->default_value(DEFAULT_SUBIDENTIFIER), "secondary identifier for "
"tagging")
("no_system", "Do not use the system tag in the context identifier"
" string")
;
procmonOptions.add(contextOptions);
po::options_description processOptions("Process Monitoring Options");
processOptions.add_options()
("ppid,p", po::value<pid_t>(&targetPPid)->default_value(1), "Parent process ID "
"of monitoring hierarchy")
("group_min,g", po::value<int>(&gid_range_min), "Minimum "
"group id in GridEngine gid range (second group process ident)")
("group_max,G", po::value<int>(&gid_range_max), "Maximum "
"group id in GridEngine gid range (second group process ident)")
("sid,s", po::value<int>(&tgtSid)->implicit_value(getsid(0)),
"Track any processes (or children) matching the specified"
"session id. Use self with just -s")
("pgrp,l", po::value<int>(&tgtPgid)
->implicit_value(getpgrp()), "Track any processes (or children)"
" matching the specied pgrp id. Use self with just -l")
("fd_max,W", po::value<int>(&maxfd)->default_value(0), "Maximum"
" number of fds per process to track")
/* FUTURE FEATURES
("contextProcess", po::value<vector<string> >(&contextProcessNames)
->composing(), "If specified, regex matching argv[0] of "
"processes which should be considered \"roots\" of process "
"trees for the purpose of setting context")
*/
("dummy", "Send dummy message instead of performing any processing")
("identifier_env,x", po::value<string>(&identifier_env)
->default_value(""), "Read identifier from process environment with "
"specified environment value")
("subidentifier_env,X", po::value<string>(&subidentifier_env)
->default_value(""), "Read subidentifier from process environment "
"with specified environment value")
("identifier_cgroup", po::value<string>(&identifier_cgroup)
->default_value(""), "Read identifier from cgroup specified cgroup set")
("subidentifier_cgroup", po::value<string>(&subidentifier_cgroup)
->default_value(""), "Read subidentifier from cgroup specified cgroup set")
;
procmonOptions.add(processOptions);
#ifdef USE_AMQP
po::options_description amqpConfig("AMQP Configuration Options");
amqpConfig.add_options()
("mqhostname,H", po::value<string>(&mqServer)
->default_value(DEFAULT_AMQP_HOST), "hostname for AMQP Server")
("mqport,P", po::value<unsigned int>(&mqPort)
->default_value(DEFAULT_AMQP_PORT), "port for AMQP Server")
("mqvhost,Q", po::value<string>(&mqVHost)
->default_value(DEFAULT_AMQP_VHOST), "virtual-host for AMQP"
" Server")
("mqexchange,E", po::value<string>(&mqExchangeName)
->default_value(DEFAULT_AMQP_EXCHANGE_NAME), "exchange name for"
" AMQP Server")
("mquser,U", po::value<string>(&mqUser), "username for AMQP "
"Server (default built-in)")
("mqpassword,Y", po::value<string>(&mqPassword), "password for "
"AMQP Server (default built-in)")
("mqframe,R", po::value<unsigned int>(&mqFrameSize)
->default_value(DEFAULT_AMQP_FRAMESIZE), "maximum frame size "
"for AMQP Messages (bytes)")
;
procmonOptions.add(amqpConfig);
#endif
}
void parseOptions(int argc, char **argv) {
/* try to read config file first */
string baseConfigFile = string(SYSTEM_CONFIG_DIR) + "/procmon.conf";
char *configEnv = NULL;
if ((configEnv = getenv("PROCMON_DIR")) != NULL) {
baseConfigFile = string(configEnv) + "/procmon.conf";
}
/* read command line options first */
po::variables_map vm;
try {
po::store(po::command_line_parser(argc, argv).options(procmonOptions).run(), vm);
if (vm.count("config.file") > 0) {
userConfigFile = vm["config.file"].as<string>();
if (userConfigFile.length() > 0 && fs::exists(userConfigFile)) {
ifstream ifs(userConfigFile.c_str());
if (!ifs) {
invalid_argument e(string("Config file doesn't exist: ") + userConfigFile);
throw &e;
}
po::store(po::parse_config_file(ifs, procmonOptions), vm);
ifs.close();
}
}
if (fs::exists(baseConfigFile)) {
ifstream input(baseConfigFile.c_str());
if (!input) {
invalid_argument e("Base config file not readable: " + baseConfigFile);
throw &e;
}
po::store(po::parse_config_file(input, procmonOptions), vm);
input.close();
}
} catch (exception &e) {
cout << e.what() << endl;
cout << procmonOptions << endl;
exit(1);
}
po::notify(vm);
if (vm.count("help")) {
cout << procmonOptions << endl;
exit(0);
}
if (vm.count("version")) {
version();
exit(0);
}
craylock = vm.count("craylock") != 0;
daemonize = vm.count("daemonize") != 0;
dummy = vm.count("dummy") != 0;
verbose = vm.count("verbose") != 0;
noOutput = vm.count("nooutput") != 0;
if (frequency <= 0) {
cerr << "Frequency must be at least 1 second!" << endl;
cerr << procmonOptions << endl;
exit(1);
}
if (initialPhase <= 0) {
cerr << "Initial-phase, if specified, must be at least 1 second!" << endl;
cerr << procmonOptions << endl;
exit(1);
}
if (initialFrequency <= 0) {
cerr << "Initial-frequency, if specified, must be at least 1 second!" << endl;
cerr << procmonOptions << endl;
exit(1);
}
#ifdef SECURED
if (vm.count("user")) {
struct passwd *user_data = getpwnam(user.c_str());
if (user_data == NULL) {
int uid = atoi(user.c_str());
if (uid > 0) {
user_data = getpwuid(uid);
if (user_data != NULL) {
target_uid = user_data->pw_uid;
}
}
} else {
target_uid = user_data->pw_uid;
}
if (target_uid <= 0) {
cerr << "user, if specified, must resolve to a uid > 0" << endl;
cerr << procmonOptions << endl;
exit(1);
}
}
if (vm.count("group")) {
struct group *group_data = getgrnam(group.c_str());
if (group_data == NULL) {
int gid = atoi(group.c_str());
group_data = getgrgid(gid);
if (group_data != NULL) {
target_gid = group_data->gr_gid;
}
} else {
target_gid = group_data->gr_gid;
}
}
#endif
if (targetPPid <= 0) {
cerr << "ppid must be at least 1 (should be 1 to track all user-space processes)" << endl;
cerr << procmonOptions << endl;
exit(1);
}
if (vm.count("fd_max")) {
if (maxfd < 0) {
cerr << "fd_max, if specified, must be at least 0!" << endl;
cerr << procmonOptions << endl;
exit(1);
}
}
if (vm.count("group_min")) {
if (gid_range_min <= 0) {
cerr << "gid range minimum, if specified, must be at least 1!" << endl;
cerr << procmonOptions << endl;
exit(1);
}
}
if (vm.count("group_max")) {
if (gid_range_max <= 0) {
cerr << "gid range maximum, if specified, must be at least 1!" << endl;
cerr << procmonOptions << endl;
exit(1);
}
}
if (vm.count("sid")) {
if (tgtSid <= 0) {
cerr << "specified sid must be at least 1!" << endl;
cerr << procmonOptions << endl;
exit(1);
}
}
if (vm.count("pgrp")) {
if (tgtPgid <= 0) {
cerr << "specified pgrp id must be at least 1!" << endl;
cerr << procmonOptions << endl;
exit(1);
}
}
if (outputTextFilename != "") {
outputFlags |= OUTPUT_TYPE_TEXT;
}
#ifdef USE_HDF5
if (outputHDF5Filename != "") {
outputFlags |= OUTPUT_TYPE_HDF5;
}
#endif
#ifdef USE_AMQP
char *servers = strdup(mqServer.c_str());
char *token = strtok(servers, ",");
srand(getpid() | time(NULL));
while (token != NULL) {
mqServers.push_back(string(token));
token = strtok(NULL, ",");
}
free(servers);
mqServer = mqServers[rand() % mqServers.size()];
if (mqServer != "" && mqServer != "__NONE__") {
outputFlags |= OUTPUT_TYPE_AMQP;
}
#endif
if (noOutput) {
outputFlags = OUTPUT_TYPE_NONE;
}
if (outputFlags == 0) {
cerr << "No output mechanism specified (text, hdf5, or AMQP)" << endl;
cerr << procmonOptions << endl;
exit(1);
}
// deal with finding secondary gid mapping if applicable
if (gid_range_min > 0 && gid_range_max > 0) {
if (gid_range_min > gid_range_max) {
int temp = gid_range_min;
gid_range_min = gid_range_max;
gid_range_max = temp;
}
gid_t processGids[512];
int foundGroups = getgroups(512, processGids);
for (int i = 0; i < foundGroups; i++) {
if (processGids[i] >= gid_range_min && processGids[i] <= gid_range_max) {
tgtGid = processGids[i];
break;
}
}
}
// deal with cgroup identifier
identifier_cgroup_regex = NULL;
subidentifier_cgroup_regex = NULL;
if (identifier_cgroup != "") {
identifier_cgroup_regex = new boost::regex(identifier_cgroup);
}
if (subidentifier_cgroup != "") {
subidentifier_cgroup_regex = new boost::regex(subidentifier_cgroup);
}
}
inline const int getMaxIterations() const {
return maxIterations;
}
const string getContext();
void version() {
cout << "Procmon " << PROCMON_VERSION;
#ifdef SECURED
cout << " (interactive)";
#endif
cout << endl;
exit(0);
}
friend ostream& operator<<(ostream&, ProcmonConfig&);
};
ostream& operator<<(ostream& os, const ProcmonConfig& pc);
/* want to use template specialization or functors to perform process discovery */
class ProcessHierarchy {
private:
bool readStatus;
bool readStat;
bool readMStat;
vector<pid_t> pids;
public:
const vector<pid_t>& getPids();
inline const bool didReadStatus() const { return readStatus; }
inline const bool didReadStat() const { return readStat; }
inline const bool didReadMStat() const { return readMStat; }
};
#endif
| {
"alphanum_fraction": 0.5685949631,
"author": null,
"avg_line_length": 35.3835845896,
"converted": null,
"ext": "hh",
"file": null,
"hexsha": "6269ddc48e5ca5e3fe28780105933a6380d2f739",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c6e67d63e7c9c24f85a46b6d8965b8c615097edc",
"max_forks_repo_licenses": [
"BSD-3-Clause-LBNL"
],
"max_forks_repo_name": "glennklockwood/procmon",
"max_forks_repo_path": "src/procmon.hh",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c6e67d63e7c9c24f85a46b6d8965b8c615097edc",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause-LBNL"
],
"max_issues_repo_name": "glennklockwood/procmon",
"max_issues_repo_path": "src/procmon.hh",
"max_line_length": 102,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c6e67d63e7c9c24f85a46b6d8965b8c615097edc",
"max_stars_repo_licenses": [
"BSD-3-Clause-LBNL"
],
"max_stars_repo_name": "glennklockwood/procmon",
"max_stars_repo_path": "src/procmon.hh",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4632,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 21124
} |
program main
use DataInputM
use Structural3DApplicationM
use StructuralStrategyM
use SolvingStrategyM
use GIDDataOutputM
implicit none
type(Structural3DApplicationDT) :: application
type(StructuralStrategyDT) :: strategy
type(SolvingStrategyDT) :: solvingStrategy
call initFEM3D(application)
solvingStrategy = InitSolvingStrategy(strategy, application%model)
call solvingStrategy%useStrategy()
call initDataOutput()
call printResults(resultName = 'Displacement' &
, step = 1 &
, graphType = 'Vector' &
, locationName = 'onNodes' &
, resultNumber = application%model%getnNode() &
, nDof = 3 &
, component1 = application%model%dof )
call printResults(resultName = 'NormalStressOnTetras' &
, type = 'Tetrahedra' &
, step = 1 &
, graphType = 'Vector' &
, locationName = 'onGaussPoints' &
, gaussPoints = application%model%normalStress%tetraGPoint &
, resultNumber = size(application%model%normalStress%tetraElemID) &
, elemID = application%model%normalStress%tetraElemID &
, component1 = application%model%normalStress%tetraNS(:,1) &
, component2 = application%model%normalStress%tetraNS(:,2) &
, component3 = application%model%normalStress%tetraNS(:,3) )
call printResults(resultName = 'NormalStressOnHexas' &
, type = 'Hexahedra' &
, step = 1 &
, graphType = 'Vector' &
, locationName = 'onGaussPoints' &
, gaussPoints = application%model%normalStress%hexaGPoint &
, resultNumber = size(application%model%normalStress%hexaElemID) &
, elemID = application%model%normalStress%hexaElemID &
, component1 = application%model%normalStress%hexaNS(:,1) &
, component2 = application%model%normalStress%hexaNS(:,2) &
, component3 = application%model%normalStress%hexaNS(:,3) )
call printResults(resultName = 'ShearStressOnTetras' &
, type = 'Tetrahedra' &
, step = 1 &
, graphType = 'Vector' &
, locationName = 'onGaussPoints' &
, gaussPoints = application%model%shearStress%tetraGPoint &
, resultNumber = size(application%model%shearStress%tetraElemID) &
, elemID = application%model%shearStress%tetraElemID &
, component1 = application%model%shearStress%tetraShS(:,1) &
, component2 = application%model%shearStress%tetraShS(:,2) &
, component3 = application%model%shearStress%tetraShS(:,3) )
call printResults(resultName = 'ShearStressOnHexas' &
, type = 'Hexahedra' &
, step = 1 &
, graphType = 'Vector' &
, locationName = 'onGaussPoints' &
, gaussPoints = application%model%shearStress%hexaGPoint &
, resultNumber = size(application%model%shearStress%hexaElemID) &
, elemID = application%model%shearStress%hexaElemID &
, component1 = application%model%shearStress%hexaShS(:,1) &
, component2 = application%model%shearStress%hexaShS(:,2) &
, component3 = application%model%shearStress%hexaShS(:,3) )
call printResults(resultName = 'StrainStressOnTetras' &
, type = 'Tetrahedra' &
, step = 1 &
, graphType = 'Vector' &
, locationName = 'onGaussPoints' &
, gaussPoints = application%model%strain%tetraGPoint &
, resultNumber = size(application%model%strain%tetraElemID) &
, elemID = application%model%strain%tetraElemID &
, component1 = application%model%strain%tetraEp(:,1) &
, component2 = application%model%strain%tetraEp(:,2) &
, component3 = application%model%strain%tetraEp(:,3) )
call printResults(resultName = 'StrainStressOnHexas' &
, type = 'Hexahedra' &
, step = 1 &
, graphType = 'Vector' &
, locationName = 'onGaussPoints' &
, gaussPoints = application%model%strain%hexaGPoint &
, resultNumber = size(application%model%strain%hexaElemID) &
, elemID = application%model%strain%hexaElemID &
, component1 = application%model%strain%hexaEp(:,1) &
, component2 = application%model%strain%hexaEp(:,2) &
, component3 = application%model%strain%hexaEp(:,3) )
end program main
| {
"alphanum_fraction": 0.4899328859,
"author": null,
"avg_line_length": 58.3711340206,
"converted": null,
"ext": "f90",
"file": null,
"hexsha": "2259e7a63bfd671cebefb528f8143a1d2728b682",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2022-03-30T10:45:13.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-12-19T08:02:30.000Z",
"max_forks_repo_head_hexsha": "abcfcc4945024bd6bd5832bc3ef3d4e1b0df3b91",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ponfo/Project790",
"max_forks_repo_path": "applications/Structural3D/main.f90",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "abcfcc4945024bd6bd5832bc3ef3d4e1b0df3b91",
"max_issues_repo_issues_event_max_datetime": "2022-03-29T03:14:16.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-03-29T03:14:16.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ponfo/Project790",
"max_issues_repo_path": "applications/Structural3D/main.f90",
"max_line_length": 75,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "abcfcc4945024bd6bd5832bc3ef3d4e1b0df3b91",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ponfo/Project790",
"max_stars_repo_path": "applications/Structural3D/main.f90",
"max_stars_repo_stars_event_max_datetime": "2020-07-23T22:25:21.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-07-13T23:33:40.000Z",
"num_tokens": 1333,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 5662
} |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import stim
import numpy as np
def test_basic():
s = stim.TableauSimulator()
assert s.measure(0) is False
assert s.measure(0) is False
s.x(0)
assert s.measure(0) is True
assert s.measure(0) is True
s.reset(0)
assert s.measure(0) is False
s.h(0)
s.h(0)
s.sqrt_x(1)
s.sqrt_x(1)
assert s.measure_many(0, 1) == [False, True]
def test_access_tableau():
s = stim.TableauSimulator()
assert s.current_inverse_tableau() == stim.Tableau(0)
s.h(0)
assert s.current_inverse_tableau() == stim.Tableau.from_named_gate("H")
s.h(0)
assert s.current_inverse_tableau() == stim.Tableau(1)
s.h(1)
s.h(1)
assert s.current_inverse_tableau() == stim.Tableau(2)
s.h(2)
assert s.current_inverse_tableau() == stim.Tableau.from_conjugated_generators(
xs=[
stim.PauliString("X__"),
stim.PauliString("_X_"),
stim.PauliString("__Z"),
],
zs=[
stim.PauliString("Z__"),
stim.PauliString("_Z_"),
stim.PauliString("__X"),
],
)
@pytest.mark.parametrize("name", [
"x",
"y",
"z",
"h",
"h_xy",
"h_yz",
"sqrt_x",
"sqrt_x_dag",
"sqrt_y",
"sqrt_y_dag",
"s",
"s_dag",
"swap",
"iswap",
"iswap_dag",
"xcx",
"xcy",
"xcz",
"ycx",
"ycy",
"ycz",
"cnot",
"cy",
"cz",
])
def test_gates_present(name: str):
t = stim.Tableau.from_named_gate(name)
n = len(t)
s1 = stim.TableauSimulator()
s2 = stim.TableauSimulator()
for k in range(n):
s1.h(k)
s2.h(k)
s1.cnot(k, k + n)
s2.cnot(k, k + n)
getattr(s1, name)(*range(n))
s2.do(stim.Circuit(f"{name} " + " ".join(str(e) for e in range(n))))
assert s1.current_inverse_tableau() == s2.current_inverse_tableau()
def test_do():
s = stim.TableauSimulator()
s.do(stim.Circuit("""
S 0
"""))
assert s.current_inverse_tableau() == stim.Tableau.from_named_gate("S_DAG")
def test_peek_bloch():
s = stim.TableauSimulator()
assert s.peek_bloch(0) == stim.PauliString("+Z")
s.x(0)
assert s.peek_bloch(0) == stim.PauliString("-Z")
s.h(0)
assert s.peek_bloch(0) == stim.PauliString("-X")
s.sqrt_x(1)
assert s.peek_bloch(1) == stim.PauliString("-Y")
s.cz(0, 1)
assert s.peek_bloch(0) == stim.PauliString("+I")
assert s.peek_bloch(1) == stim.PauliString("+I")
def test_copy():
s = stim.TableauSimulator()
s.h(0)
s2 = s.copy()
assert s.current_inverse_tableau() == s2.current_inverse_tableau()
assert s is not s2
def test_paulis():
s = stim.TableauSimulator()
s.h(*range(0, 22, 2))
s.cnot(*range(22))
s.do(stim.PauliString("ZZZ_YYY_XXX"))
s.z(0, 1, 2)
s.y(4, 5, 6)
s.x(8, 9, 10)
s.cnot(*range(22))
s.h(*range(0, 22, 2))
assert s.measure_many(*range(22)) == [False] * 22
s = stim.TableauSimulator()
s.do(stim.PauliString("Z" * 500))
assert s.measure_many(*range(500)) == [False] * 500
s.do(stim.PauliString("X" * 500))
assert s.measure_many(*range(500)) == [True] * 500
def test_measure_kickback():
s = stim.TableauSimulator()
assert s.measure_kickback(0) == (False, None)
assert s.measure_kickback(0) == (False, None)
assert s.current_measurement_record() == [False, False]
s.h(0)
v = s.measure_kickback(0)
assert isinstance(v[0], bool)
assert v[1] == stim.PauliString("X")
assert s.measure_kickback(0) == (v[0], None)
assert s.current_measurement_record() == [False, False, v[0], v[0]]
s = stim.TableauSimulator()
s.h(0)
s.cnot(0, 1)
v = s.measure_kickback(0)
assert isinstance(v[0], bool)
assert v[1] == stim.PauliString("XX")
assert s.measure_kickback(0) == (v[0], None)
s = stim.TableauSimulator()
s.h(0)
s.cnot(0, 1)
v = s.measure_kickback(1)
assert isinstance(v[0], bool)
assert v[1] == stim.PauliString("XX")
assert s.measure_kickback(0) == (v[0], None)
def test_post_select_using_measure_kickback():
s = stim.TableauSimulator()
def pseudo_post_select(qubit, desired_result):
m, kick = s.measure_kickback(qubit)
if m != desired_result:
if kick is None:
raise ValueError("Deterministic measurement differed from desired result.")
s.do(kick)
s.h(0)
s.cnot(0, 1)
s.cnot(0, 2)
pseudo_post_select(qubit=2, desired_result=True)
assert s.measure_many(0, 1, 2) == [True, True, True]
def test_measure_kickback_random_branches():
s = stim.TableauSimulator()
s.set_inverse_tableau(stim.Tableau.random(8))
r = s.peek_bloch(4)
if r[0] == 3: # +-Z?
assert s.measure_kickback(4) == (r.sign == -1, None)
return
post_false = None
post_true = None
for _ in range(100):
if post_false is not None and post_true is not None:
break
copy = s.copy()
if copy.measure(4):
post_true = copy
else:
post_false = copy
assert post_false is not None and post_true is not None
result, kick = s.measure_kickback(4)
assert isinstance(kick, stim.PauliString) and len(kick) == 8
if result:
s.do(kick)
assert s.canonical_stabilizers() == post_false.canonical_stabilizers()
s.do(kick)
assert s.canonical_stabilizers() == post_true.canonical_stabilizers()
def test_set_num_qubits():
s = stim.TableauSimulator()
s.h(0)
s.cnot(0, 1)
s.cnot(0, 2)
s.cnot(0, 3)
t = s.current_inverse_tableau()
s.set_num_qubits(8)
s.set_num_qubits(4)
assert s.current_inverse_tableau() == t
assert s.peek_bloch(0) == stim.PauliString("_")
s.set_num_qubits(8)
s.set_num_qubits(4)
s.cnot(0, 4)
s.set_num_qubits(4)
assert s.peek_bloch(0) in [stim.PauliString("+Z"), stim.PauliString("-Z")]
def test_canonical_stabilizers():
s = stim.TableauSimulator()
s.h(0)
s.h(1)
s.h(2)
s.cz(0, 1)
s.cz(1, 2)
assert s.canonical_stabilizers() == [
stim.PauliString("+X_X"),
stim.PauliString("+ZXZ"),
stim.PauliString("+_ZX"),
]
s.s(1)
assert s.canonical_stabilizers() == [
stim.PauliString("+X_X"),
stim.PauliString("-ZXY"),
stim.PauliString("+_ZX"),
]
def test_classical_control_cnot():
s = stim.TableauSimulator()
with pytest.raises(IndexError, match="beginning of time"):
s.cnot(stim.target_rec(-1), 0)
assert not s.measure(1)
s.cnot(stim.target_rec(-1), 0)
assert not s.measure(0)
s.x(1)
assert s.measure(1)
s.cnot(stim.target_rec(-1), 0)
assert s.measure(0)
def test_collision():
s = stim.TableauSimulator()
with pytest.raises(ValueError, match="same qubit"):
s.cnot(0, 0)
with pytest.raises(ValueError, match="same qubit"):
s.swap(0, 1, 2, 2)
s.swap(0, 2, 2, 1)
def is_parallel_state_vector(actual, expected) -> bool:
actual = np.array(actual, dtype=np.complex64)
expected = np.array(expected, dtype=np.complex64)
assert len(expected.shape) == 1
if actual.shape != expected.shape:
return False
assert abs(np.linalg.norm(actual) - 1) < 1e-4
assert abs(np.linalg.norm(expected) - 1) < 1e-4
return abs(abs(np.dot(actual, np.conj(expected))) - 1) < 1e-4
def test_is_parallel_state_vector():
assert is_parallel_state_vector([1], [1])
assert is_parallel_state_vector([1], [1j])
assert is_parallel_state_vector([1j], [1])
assert not is_parallel_state_vector([1], [1, 2])
assert is_parallel_state_vector([0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5])
assert is_parallel_state_vector([0.5, 0.5, 0.5, 0.5], [0.5j, 0.5j, 0.5j, 0.5j])
assert is_parallel_state_vector([0.5, 0.5, 0.5, 0.5], [-0.5j, -0.5j, -0.5j, -0.5j])
assert not is_parallel_state_vector([0.5, 0.5, 0.5, 0.5], [-0.5j, -0.5j, -0.5j, 0.5j])
assert not is_parallel_state_vector([0.5, 0.5, 0.5, 0.5], [1, 0, 0, 0])
def test_to_state_vector():
s = stim.TableauSimulator()
assert is_parallel_state_vector(s.state_vector(), [1])
s.set_num_qubits(1)
assert is_parallel_state_vector(s.state_vector(), [1, 0])
s.set_num_qubits(2)
s.x(0)
assert is_parallel_state_vector(s.state_vector(), [0, 1, 0, 0])
s.h(1)
assert is_parallel_state_vector(s.state_vector(), [0, 0.5**0.5, 0, 0.5**0.5])
s.h(0)
assert is_parallel_state_vector(s.state_vector(), [0.5, -0.5, 0.5, -0.5])
s.cnot(1, 0)
assert is_parallel_state_vector(s.state_vector(), [0.5, -0.5, -0.5, 0.5])
s.x(2)
assert is_parallel_state_vector(s.state_vector(), [0, 0, 0, 0, 0.5, -0.5, -0.5, 0.5])
v = s.state_vector().reshape((2,) * 3)
assert v[0, 0, 0] == 0
assert v[1, 0, 0] != 0
assert v[0, 1, 0] == 0
assert v[0, 0, 1] == 0
def test_peek_observable_expectation():
s = stim.TableauSimulator()
s.do(stim.Circuit('''
H 0
CNOT 0 1 0 2
X 0
'''))
assert s.peek_observable_expectation(stim.PauliString("ZZ_")) == -1
assert s.peek_observable_expectation(stim.PauliString("_ZZ")) == 1
assert s.peek_observable_expectation(stim.PauliString("Z_Z")) == -1
assert s.peek_observable_expectation(stim.PauliString("XXX")) == 1
assert s.peek_observable_expectation(stim.PauliString("-XXX")) == -1
assert s.peek_observable_expectation(stim.PauliString("YYX")) == +1
assert s.peek_observable_expectation(stim.PauliString("XYY")) == -1
assert s.peek_observable_expectation(stim.PauliString("")) == 1
assert s.peek_observable_expectation(stim.PauliString("-I")) == -1
assert s.peek_observable_expectation(stim.PauliString("_____")) == 1
assert s.peek_observable_expectation(stim.PauliString("XXXZZZZZ")) == 1
assert s.peek_observable_expectation(stim.PauliString("XXXZZZZX")) == 0
with pytest.raises(ValueError, match="imaginary sign"):
s.peek_observable_expectation(stim.PauliString("iZZ"))
with pytest.raises(ValueError, match="imaginary sign"):
s.peek_observable_expectation(stim.PauliString("-iZZ"))
| {
"alphanum_fraction": 0.6227923406,
"author": null,
"avg_line_length": 29.2336956522,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "fe8ad2c02b216bbb8e3ff21781deeac633419a46",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 20,
"max_forks_repo_forks_event_max_datetime": "2022-03-15T04:40:12.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-03-09T14:10:13.000Z",
"max_forks_repo_head_hexsha": "503de420b1e56e90d7f44337ead1065a2ae26740",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "noajshu/Stim",
"max_forks_repo_path": "src/stim/simulators/tableau_simulator_pybind_test.py",
"max_issues_count": 95,
"max_issues_repo_head_hexsha": "503de420b1e56e90d7f44337ead1065a2ae26740",
"max_issues_repo_issues_event_max_datetime": "2022-03-30T08:53:44.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-04T00:11:30.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "noajshu/Stim",
"max_issues_repo_path": "src/stim/simulators/tableau_simulator_pybind_test.py",
"max_line_length": 91,
"max_stars_count": 99,
"max_stars_repo_head_hexsha": "503de420b1e56e90d7f44337ead1065a2ae26740",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "noajshu/Stim",
"max_stars_repo_path": "src/stim/simulators/tableau_simulator_pybind_test.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-22T11:39:08.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-03-03T19:03:25.000Z",
"num_tokens": 3494,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 10758
} |
program tel
integer n,m,l
parameter(n=96,m=38,l=13)
integer i,j,k,irow,icol,iu,itel
write(6,*) 'give irow'
read(5,*) irow
write(6,*) 'give icol'
read(5,*) icol
do i=1,n
do j=1,m
do k=1,l
do iu=1,6
itel = 6*((k-1)*n*m+n*(j-1)+i-1) +iu
if (itel.eq.irow) then
write(6,*) i,j,k,iu,itel
endif
if (itel.eq.icol) then
write(6,*) i,j,k,iu,itel
endif
enddo
enddo
enddo
enddo
end
| {
"alphanum_fraction": 0.4411764706,
"author": null,
"avg_line_length": 21.76,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "fe7d8bdaaf0d5f1630f0a3486b9c46c19de6f3b3",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2019-03-27T12:24:57.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-02-18T10:17:59.000Z",
"max_forks_repo_head_hexsha": "9cc3ecd8945f6f7fd1f640819c20f5d63fcab194",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "nlesc-smcm/i-emic",
"max_forks_repo_path": "legacy/tel.f",
"max_issues_count": 83,
"max_issues_repo_head_hexsha": "47e01f0413abff57f43f58ac2e1244f6ae948a16",
"max_issues_repo_issues_event_max_datetime": "2022-01-20T10:45:40.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-03-04T10:07:02.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "Sbte/i-emic",
"max_issues_repo_path": "legacy/tel.f",
"max_line_length": 44,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "47e01f0413abff57f43f58ac2e1244f6ae948a16",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "Sbte/i-emic",
"max_stars_repo_path": "legacy/tel.f",
"max_stars_repo_stars_event_max_datetime": "2021-01-18T09:53:59.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-11-30T08:37:53.000Z",
"num_tokens": 205,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 544
} |
"""
A basic parser for tped plink formated files to a more convenient HDF5 format.
"""
import time
import h5py
import scipy as sp
def parse_single_12tped_to_hdf5(in_file_prefix='/home/bv25/data/Ls154/Ls154_12',
out_file_prefix='/home/bv25/data/Ls154/Ls154_12',
impute_type='mode', filter_monomorphic_snps=True,
missing_val_thr=0.1):
"""
Parses plink 12 formatted tped file and stores it in a HDF5 file. It requires the h5py and scipy package.
Ideally the genotypes are imputed apriory, otherwise a rough imputation
(the most common genotype) is used for missing genotypes.
Notes:
Assumes the files are in diploid format!
"""
print 'Starting to parse genotypes'
genotype_data = {}
h5py_file = h5py.File(out_file_prefix + '.hdf5')
genotype_data['hdf5p_file'] = h5py_file
genot_group = h5py_file.create_group('genot_data')
indiv_group = h5py_file.create_group('indiv_data')
tot_num_snps = 0
tot_num_missing_val_snps_removed = 0
tot_num_ambiguous_loc_removed = 0
curr_chrom = 1
print 'Working on chromosome %d' % curr_chrom
g_filename = '%s.tped' % (in_file_prefix)
s_filename = '%s.bim' % (in_file_prefix)
i_filename = '%s.tfam' % (in_file_prefix)
indiv_ids = []
phenotypes = []
sex = []
print 'Parsing individuals file: %s' % i_filename
with open(i_filename) as f:
for line in f:
l = line.split()
iid = l[0]
indiv_ids.append(iid)
sex.append(int(l[4]))
phenotypes.append(float(l[5]))
tot_num_indiv = len(indiv_ids)
print 'Storing individual data in individ. group'
indiv_group.create_dataset('indiv_ids', data=indiv_ids)
indiv_group.create_dataset('sex', data=sex)
indiv_group.create_dataset('phenotypes', data=phenotypes)
num_indiv = len(indiv_ids)
print 'Found %d Individuals' % (num_indiv)
print 'Parsing nucleotide map'
nt_map = {}
chromsomoes = []
curr_chrom = 0
with open(s_filename) as f:
for line in f:
l = line.split()
chrom = l[0]
if chrom != curr_chrom:
chromsomoes.append(chrom)
curr_chrom = chrom
nt_map[l[1]] = (l[4], l[5])
assert len(chromsomoes) == len(set(chromsomoes)), 'Chromosomes need to be in order.'
curr_chrom = chromsomoes[0]
position = -1
# Initializing containers.
snps_mat = []
positions = []
sids = []
nts_list = []
nt_counts_list = []
missing_counts = []
freqs = []
num_missing_removed = 0
num_monomorphic_removed = 0
num_ambiguous_loc_removed = 0
t0 = time.time()
print 'Starting to parse SNP files'
gf = open(g_filename)
for g_line in gf:
# if random.random() > 0.01:
# continue
gl = g_line.split()
chrom = gl[0]
if chrom != curr_chrom:
# Store everything and reset.
print 'Number of SNPs removed due to too many missing values: %d' % num_missing_removed
print 'Number of SNPs removed due to ambiguous location: %d' % num_ambiguous_loc_removed
print 'Number of monomorphic SNPs removed: %d' % num_monomorphic_removed
print 'Number of SNPs retained: %d' % len(positions)
print 'Number of individuals: %d' % num_indiv
snps = sp.array(snps_mat, dtype='int8')
h5py_chrom_group = genot_group.create_group('chrom_%s' % curr_chrom)
h5py_chrom_group.create_dataset('raw_snps', compression='lzf', data=snps)
h5py_chrom_group.create_dataset('positions', compression='lzf', data=positions)
h5py_chrom_group.create_dataset('nts', compression='lzf', data=nts_list)
h5py_chrom_group.create_dataset('nt_counts', compression='lzf', data=nt_counts_list)
h5py_chrom_group.create_dataset('missing_counts', compression='lzf', data=missing_counts)
h5py_chrom_group.create_dataset('freqs', compression='lzf', data=freqs)
h5py_chrom_group.create_dataset('snp_ids', compression='lzf', data=sids)
tot_num_snps += len(positions)
tot_num_missing_val_snps_removed += num_missing_removed
tot_num_ambiguous_loc_removed += num_ambiguous_loc_removed
h5py_file.flush()
t1 = time.time()
t = t1 - t0
print 'It took %d minutes and %0.2f seconds to parse Chromosome %s.' % (t / 60, t % 60, curr_chrom)
t0 = time.time()
# Reset containers
snps_mat = []
positions = []
sids = []
nts_list = []
nt_counts_list = []
missing_counts = []
freqs = []
num_missing_removed = 0
num_ambiguous = 0
num_monomorphic_removed = 0
num_ambiguous_loc_removed = 0
curr_chrom = chrom
sid = gl[1]
prev_position = position
position = int(gl[3])
# Skipping unmappable locations
if position == prev_position:
num_ambiguous_loc_removed += 1
continue
if position == 0:
num_ambiguous_loc_removed += 1
continue
nt = nt_map[sid]
snp0 = sp.array(map(int, (g_line.strip()).split()[4:]), 'int8')
a = sp.arange(tot_num_indiv * 2)
even_map = a % 2 == 0
odd_map = a % 2 == 1
snp = snp0[even_map] + snp0[odd_map] - 2
snp[snp < 0] = 9
bin_counts = sp.bincount(snp)
if len(bin_counts) > 3:
missing_count = bin_counts[-1]
# Filtering SNPs with too many missing values
if missing_count > missing_val_thr * 2 * num_indiv:
num_missing_removed += 1
continue
elif impute_type == 'mode':
nt_counts = bin_counts[:3]
v = sp.argmax(nt_counts)
snp[snp == 9] = v
bin_counts = sp.bincount(snp)
else:
raise Exception('Imputation type is unknown')
else:
missing_count = 0
assert len(bin_counts) < 4, 'Issues with nucleotides.'
nt_counts = bin_counts[:3]
if len(nt_counts) == 2:
nt_counts = sp.array([nt_counts[0], nt_counts[1], 0])
elif len(nt_counts) == 1:
nt_counts = sp.array([nt_counts[0], 0, 0])
# Removing monomorphic SNPs
if filter_monomorphic_snps:
if max(nt_counts) == sum(nt_counts):
num_monomorphic_removed += 1
continue
freq = sp.mean(snp) / 2.0
snps_mat.append(snp)
positions.append(position)
sids.append(sid)
nts_list.append(nt)
nt_counts_list.append(nt_counts)
missing_counts.append(missing_count)
freqs.append(freq)
# Store everything and reset.
print 'Number of SNPs removed due to too many missing values: %d' % num_missing_removed
print 'Number of SNPs removed due to ambiguous location: %d' % num_ambiguous_loc_removed
print 'Number of monomorphic SNPs removed: %d' % num_monomorphic_removed
print 'Number of SNPs retained: %d' % len(positions)
print 'Number of individuals: %d' % num_indiv
snps = sp.array(snps_mat, dtype='int8')
h5py_chrom_group = genot_group.create_group('chrom_%s' % chrom)
h5py_chrom_group.create_dataset('raw_snps', compression='lzf', data=snps)
h5py_chrom_group.create_dataset('positions', compression='lzf', data=positions)
h5py_chrom_group.create_dataset('nts', compression='lzf', data=nts_list)
h5py_chrom_group.create_dataset('nt_counts', compression='lzf', data=nt_counts_list)
h5py_chrom_group.create_dataset('missing_counts', compression='lzf', data=missing_counts)
h5py_chrom_group.create_dataset('freqs', compression='lzf', data=freqs)
h5py_chrom_group.create_dataset('snp_ids', compression='lzf', data=sids)
tot_num_snps += len(positions)
tot_num_missing_val_snps_removed += num_missing_removed
tot_num_ambiguous_loc_removed += num_ambiguous_loc_removed
h5py_file.create_dataset('num_snps', data=sp.array(tot_num_snps))
h5py_file.flush()
t1 = time.time()
t = t1 - t0
print 'It took %d minutes and %0.2f seconds to parse chromosome %s.' % (t / 60, t % 60, chrom)
gf.close()
print 'Total number of SNPs parsed successfully was: %d' % tot_num_snps
print 'Total number of SNPs removed due to too many missing values: %d' % tot_num_missing_val_snps_removed
print 'Total number of SNPs removed due to ambiguous locations: %d' % tot_num_ambiguous_loc_removed
h5py_file.close()
print 'Done parsing genotypes.'
| {
"alphanum_fraction": 0.6025880031,
"author": null,
"avg_line_length": 37.6818181818,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "a5aaeafcc6c6cdf0e803a585d3237a19f8f0df47",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 8,
"max_forks_repo_forks_event_max_datetime": "2022-02-11T19:56:19.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-02-16T07:35:59.000Z",
"max_forks_repo_head_hexsha": "766b889d4f5e97f4c9a960e3a007b125137ba796",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bvilhjal/mixmogam",
"max_forks_repo_path": "plink2hdf5.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "766b889d4f5e97f4c9a960e3a007b125137ba796",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bvilhjal/mixmogam",
"max_issues_repo_path": "plink2hdf5.py",
"max_line_length": 111,
"max_stars_count": 15,
"max_stars_repo_head_hexsha": "766b889d4f5e97f4c9a960e3a007b125137ba796",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bvilhjal/mixmogam",
"max_stars_repo_path": "plink2hdf5.py",
"max_stars_repo_stars_event_max_datetime": "2021-12-22T12:13:21.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-08-02T05:39:06.000Z",
"num_tokens": 2348,
"path": null,
"reason": "import scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 9119
} |
# ##############################################
# Implementation of a (k,n) threshold scheme #
################################################
import sys
import numpy as np
# Parameters that needs to be changed manually are marked with: <--
par_nbr = 1 #<-- participant number
n = 5 #<--
k = 3 #<--
polynom = np.poly1d([14, 4, 16]) #<-- participant's private polynom (in descending order, largest first)
if(n > 8 or k < 3 or k >= n):
sys.exit("Invalid parameters")
#sum of participant's polynom and collaborates results for the participant's number
par_sum = polynom(par_nbr) + 45 + 57 + 30 + 39 #<--
col_sum = [0] * n
col_sum[par_nbr - 1] = par_sum
col_sum[2 - 1] = 471 #<--
col_sum[4 - 1] = 1381 #<--
#col_sum[5 - 1] = 50751 #<--
#col_sum[6 - 1] = 101700 #<--
#Calculations
tot_sum = 0
for i in range(n):
if(col_sum[i] != 0):
num = 1
for j in range(n):
if(col_sum[j] != 0 and i != j):
num *= j+1
dem = 1
for j in range(n):
if(col_sum[j] != 0 and i != j):
dem *= ((j+1) - (i+1))
tot_sum += (num/dem) * col_sum[i]
print(tot_sum)
| {
"alphanum_fraction": 0.5047454702,
"author": null,
"avg_line_length": 24.1458333333,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "3162b9522006d2fcd9123dfd6dd03fce1b0754c6",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e9d520efdb7c958740f7672c809a2d8c3a5761bf",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "OttossonJoel/SecurityAlgorithms",
"max_forks_repo_path": "threshold_scheme.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e9d520efdb7c958740f7672c809a2d8c3a5761bf",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "OttossonJoel/SecurityAlgorithms",
"max_issues_repo_path": "threshold_scheme.py",
"max_line_length": 104,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e9d520efdb7c958740f7672c809a2d8c3a5761bf",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "OttossonJoel/SecurityAlgorithms",
"max_stars_repo_path": "threshold_scheme.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 368,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1159
} |
import cv2
import numpy as np
import sys
import shutil
import os
class Crop():
def __init__(self, image_name):
self.image_name = image_name
#####Filled polygons
#lpnts : polygons being built
self.lpnts = np.empty((1,0,2), dtype=np.int32)
#rpnts : ready polygons
self.rpnts = []
#####Extruded polygons
#lextpnts: extruded polygons being built
self.lextpnts = np.empty((1,0,2), dtype=np.int32)
#rextpnts : ready extruded polygons
self.rextpnts = []
self.image = cv2.imread(self.image_name,-1)
self.copied_image = self.image.copy()
def crop_for_mouse(self,event,x,y,flags= None,parameters = None):
self.event = event
self.x = x
self.y = y
self.flags = flags
self.parameters = parameters
SHIFT_FLAG = 16 #this may vary on different OS/version of openCV
#add point to polygon being built
if self.event == cv2.EVENT_LBUTTONDOWN:
if (SHIFT_FLAG == (flags & SHIFT_FLAG)):
self.lextpnts = np.append(self.lextpnts, np.array([[[self.x, self.y]]]), axis=1)
cv2.polylines(self.image, [self.lextpnts], False, (0, 0, 255))
else:
self.lpnts = np.append(self.lpnts, np.array([[[self.x, self.y]]]), axis=1)
cv2.polylines(self.image, [self.lpnts], False, (0, 255, 0))
#add last point to polygon being built (close the polygon)
elif self.event == cv2.EVENT_RBUTTONDOWN:
if (SHIFT_FLAG == (flags & SHIFT_FLAG)):
self.lextpnts = np.append(self.lextpnts, np.array([[[self.x, self.y]]]), axis=1)
cv2.polylines(self.image, [self.lextpnts], True, (0, 0, 255))
self.rextpnts.append(self.lextpnts)
self.lextpnts = np.empty((1,0,2), dtype=np.int32)
self.copied_image = self.image.copy()
else:
self.lpnts = np.append(self.lpnts, np.array([[[self.x, self.y]]]), axis=1)
cv2.polylines(self.image, [self.lpnts], True, (0, 255, 0))
self.rpnts.append(self.lpnts)
self.lpnts = np.empty((1,0,2), dtype=np.int32)
self.copied_image = self.image.copy()
#erase polygon being built
elif self.event == cv2.EVENT_MBUTTONDOWN:
if (SHIFT_FLAG == (flags & SHIFT_FLAG)):
self.lextpnts = np.empty((1,0,2), dtype=np.int32)
self.image = self.copied_image.copy()
else:
self.lpnts = np.empty((1,0,2), dtype=np.int32)
self.image = self.copied_image.copy()
def do_crop(self):
cv2.namedWindow("CROP", cv2.WINDOW_NORMAL) ## Magnifying the window for more precise labelling
cv2.resizeWindow("CROP", 800, 800) ## 1250
cv2.setMouseCallback("CROP",self.crop_for_mouse)
self.skip = False
while True:
cv2.imshow("CROP",self.image)
cv2.setMouseCallback("CROP",self.crop_for_mouse)
keypress = cv2.waitKey(1)
#(e)rase last non closed polygon/extruded polygon
if keypress == ord('e'):
self.image = self.copied_image.copy() ## RC -- this command doesn't really work in practice
self.lpnts = np.empty((1,0,2), dtype=np.int32)
self.lextpnts = np.empty((1,0,2), dtype=np.int32)
#(r)estore initial situation
if keypress == ord('r'):
self.image = cv2.imread(self.image_name,-1)
self.copied_image = self.image.copy()
self.lpnts = np.empty((1,0,2), dtype=np.int32)
self.rpnts = []
self.lextpnts = np.empty((1,0,2), dtype=np.int32)
self.rextpnts = []
#(n)ext file
if keypress == ord('n'):
self.skip = True
break
#(v)izualize result and go to next file
if keypress == ord('v'):
self.skip = False
break
if self.skip is False:
image_path = self.image_name.split(os.path.sep)
image_orig_path = os.sep.join(image_path[:-1]) + '/PV/' + image_path[-1]
print("Saving original image to ", image_orig_path)
shutil.move(self.image_name, image_orig_path) ## RC comment: once final it should become --> shutil.move(self.image_name, image_path) ## shutil.copyfile
mask = np.zeros(self.image.shape, dtype=np.uint8)
channel_count = self.image.shape[2]
#white in output mask (i.e available for PV)
ignore_mask_color = (255,)*channel_count
#black in output mask (i.e not available for PV)-> used for extruded polygons
extrude_mask_color = (0,)*channel_count
for point in self.rpnts:
cv2.fillPoly(mask, point, ignore_mask_color)
#Extrude polygons
for point in self.rextpnts:
cv2.fillPoly(mask, point, extrude_mask_color)
masked_image = cv2.bitwise_and(self.image,mask)
image_path = os.sep.join(image_path[:-1]) + '/PV/labels/'\
+ image_path[-1][:-4] + '_label' + \
image_path[-1][-4:]
print("Saving labelled image to ", image_path)
cv2.imshow("ROI", masked_image)
cv2.imwrite(image_path, mask) ## Writing the B&W mask (instead of masked_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
image_path = self.image_name.split(os.path.sep)
image_path = os.sep.join(image_path[:-1]) + '/noPV/' + image_path[-1]
print("Saving original image to ", image_path)
shutil.move(self.image_name, image_path) ## RC comment: once final it should become --> shutil.move(self.image_name, image_path)
| {
"alphanum_fraction": 0.5486885246,
"author": null,
"avg_line_length": 41.7808219178,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f5771bcb94145faec8c3e9dda1947970d4dcec1e",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2021-07-16T13:44:01.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-07-07T08:39:47.000Z",
"max_forks_repo_head_hexsha": "2568eb25e5b9eaf3c09ce5f4721e71694c1d7440",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jschmidtnj/productstudio",
"max_forks_repo_path": "rooftop-detection/labelling_tool/crop.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2568eb25e5b9eaf3c09ce5f4721e71694c1d7440",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jschmidtnj/productstudio",
"max_issues_repo_path": "rooftop-detection/labelling_tool/crop.py",
"max_line_length": 165,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "2568eb25e5b9eaf3c09ce5f4721e71694c1d7440",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jschmidtnj/productstudio",
"max_stars_repo_path": "rooftop-detection/labelling_tool/crop.py",
"max_stars_repo_stars_event_max_datetime": "2021-04-23T21:11:19.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-01-29T09:34:58.000Z",
"num_tokens": 1478,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6100
} |
import numpy as np
import time
numMin = []
numMax = []
numAvg = []
ts = []
tformat = "%d/%m/%Y"
print(time.strftime(tformat))
tformat = "%H:%M"
#tformat = "%H:%M:%S"
for i in range(5):
numMin.append(np.random.randint(1,high=3))
numMax.append(np.random.randint(10,high=15))
numAvg.append(np.random.randint(3,high=10))
ts.append(time.strftime(tformat))
print("!",end='',flush=True)
time.sleep(60)
print('')
for i in range(5):
print(ts[i],"Min:",numMin[i],"Max:",numMax[i],"Average:",numAvg[i])
| {
"alphanum_fraction": 0.6070763501,
"author": null,
"avg_line_length": 23.347826087,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "6f9ca5ad8d28ec9cf4943e99b308f4d4f822535a",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "17184a09ff6123da50562a4a4841a0d7fc75b9d9",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "suakao/Python-for-Network-Engineer-Training",
"max_forks_repo_path": "Python for Network Engineer By MUT/pyw13.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "17184a09ff6123da50562a4a4841a0d7fc75b9d9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "suakao/Python-for-Network-Engineer-Training",
"max_issues_repo_path": "Python for Network Engineer By MUT/pyw13.py",
"max_line_length": 71,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "17184a09ff6123da50562a4a4841a0d7fc75b9d9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "suakao/Python-for-Network-Engineer-Training",
"max_stars_repo_path": "Python for Network Engineer By MUT/pyw13.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 163,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 537
} |
#!/usr/bin/env python
import os
import argparse
import subprocess
import numpy as np
import pandas as pd
from Bio import Seq
import pyranges as pr
from pathlib import Path
from collections import defaultdict
def main(fasta_file, codon_list, input_file, output, aa_length):
results_name = (
output
if output and not os.path.isfile(output)
else "Micropeptide_data.csv"
)
if Path(fasta_file).suffix == "*.gz":
print(
"Unable to process compressed fasta. Please uncompress before starting."
)
else:
chromosomes = (
subprocess.run(
f"grep '^>' {fasta_file} | cut -c 2- | sort -V | uniq",
shell=True,
capture_output=True,
)
.stdout.decode("utf-8")
.strip()
.split("\n")
)
input_df = pr.read_gtf(input_file)
input_df = input_df.subset(lambda df: df.Chromosome.isin(chromosomes))
input_df = input_df.drop(["Source", "Score", "Frame", "transcript_id"])
input_df.Chromosome = input_df.Chromosome.cat.remove_unused_categories()
input_df.Fasta = pr.get_fasta(input_df, fasta_file)
codons = codon_list
peptides = list()
for seq in input_df.Fasta:
orfs = [seq[i:] for i in range(3)]
local_peptides = list()
for orf in orfs:
kmers = defaultdict(list)
for i in range(0, len(orf) - 2, 3):
kmers[orf[i : i + 3]].append(i)
for c in codons:
local_peptides += [
str(Seq.translate(orf[i:], to_stop=True)) for i in kmers[c]
]
peptides.append(list(set(local_peptides)))
final_df = input_df.df
final_df["Sequence"] = peptides
final_df = final_df.explode("Sequence")
final_df = final_df[final_df["Sequence"].str.strip().astype(bool)]
final_df = final_df[
(final_df["Sequence"].str.len() <= aa_length[1])
& (final_df["Sequence"].str.len() > aa_length[0])
].reset_index(drop=True)
final_df["Protein_ID"] = "Protein_" + pd.Series(
np.arange(1, len(final_df) + 1, 1)
).astype(str)
final_df.to_csv(results_name, header=True, index=False, sep="\t")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--fasta_file",
"-ff",
type=str,
required=True,
help="Provide the genome fasta file to extract the nucleotide sequence from. Must be uncompressed.",
)
parser.add_argument(
"--codons",
"-c",
type=str,
default=["ATG"],
nargs="+",
required=False,
help="Provide the codons by which the micropeptides start in DNA sequence.",
)
parser.add_argument(
"--input_file",
"-i",
type=str,
required=False,
help="Provide the GTF file obtained from running the filter_samples script.",
)
parser.add_argument(
"--output",
"-o",
type=str,
required=False,
help="Output path to save the filtered dataset file. Default format is csv with tabular separation.",
)
parser.add_argument(
"--length",
"-l",
type=int,
nargs="+",
required=False,
default=[0, 100],
help="Peptides with more length than the specified value will be filtered out.",
)
args = parser.parse_args()
main(
args.fasta_file,
args.codons,
args.input_file,
args.output,
args.length,
) | {
"alphanum_fraction": 0.577994429,
"author": null,
"avg_line_length": 28.046875,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "1dec6566bfb7b7aa25fcc168faa08f4a0c363194",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "810a6d06316bb824fc51ca38118cdf4e9e80995c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Akazhiel/Micropep",
"max_forks_repo_path": "pepget.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "810a6d06316bb824fc51ca38118cdf4e9e80995c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Akazhiel/Micropep",
"max_issues_repo_path": "pepget.py",
"max_line_length": 109,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "810a6d06316bb824fc51ca38118cdf4e9e80995c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Akazhiel/Micropep",
"max_stars_repo_path": "pepget.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 841,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3590
} |
/-
Copyright (c) 2021 Joseph Myers. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Joseph Myers
-/
import linear_algebra.ray
import linear_algebra.determinant
/-!
# Orientations of modules
This file defines orientations of modules.
## Main definitions
* `orientation` is a type synonym for `module.ray` for the case where the module is that of
alternating maps from a module to its underlying ring. An orientation may be associated with an
alternating map or with a basis.
* `module.oriented` is a type class for a choice of orientation of a module that is considered
the positive orientation.
## Implementation notes
`orientation` is defined for an arbitrary index type, but the main intended use case is when
that index type is a `fintype` and there exists a basis of the same cardinality.
## References
* https://en.wikipedia.org/wiki/Orientation_(vector_space)
-/
noncomputable theory
open_locale big_operators
section ordered_comm_semiring
variables (R : Type*) [strict_ordered_comm_semiring R]
variables (M : Type*) [add_comm_monoid M] [module R M]
variables {N : Type*} [add_comm_monoid N] [module R N]
variables (ι : Type*)
/-- An orientation of a module, intended to be used when `ι` is a `fintype` with the same
cardinality as a basis. -/
abbreviation orientation := module.ray R (alternating_map R M R ι)
/-- A type class fixing an orientation of a module. -/
class module.oriented :=
(positive_orientation : orientation R M ι)
export module.oriented (positive_orientation)
variables {R M}
/-- An equivalence between modules implies an equivalence between orientations. -/
def orientation.map (e : M ≃ₗ[R] N) : orientation R M ι ≃ orientation R N ι :=
module.ray.map $ alternating_map.dom_lcongr R R ι R e
@[simp] lemma orientation.map_apply (e : M ≃ₗ[R] N) (v : alternating_map R M R ι)
(hv : v ≠ 0) :
orientation.map ι e (ray_of_ne_zero _ v hv) = ray_of_ne_zero _ (v.comp_linear_map e.symm)
(mt (v.comp_linear_equiv_eq_zero_iff e.symm).mp hv) := rfl
@[simp] lemma orientation.map_refl :
(orientation.map ι $ linear_equiv.refl R M) = equiv.refl _ :=
by rw [orientation.map, alternating_map.dom_lcongr_refl, module.ray.map_refl]
@[simp] lemma orientation.map_symm (e : M ≃ₗ[R] N) :
(orientation.map ι e).symm = orientation.map ι e.symm := rfl
/-- A module is canonically oriented with respect to an empty index type. -/
@[priority 100] instance is_empty.oriented [nontrivial R] [is_empty ι] :
module.oriented R M ι :=
{ positive_orientation := ray_of_ne_zero R (alternating_map.const_linear_equiv_of_is_empty 1) $
alternating_map.const_linear_equiv_of_is_empty.injective.ne (by simp) }
@[simp] lemma orientation.map_positive_orientation_of_is_empty [nontrivial R] [is_empty ι]
(f : M ≃ₗ[R] N) :
orientation.map ι f positive_orientation = positive_orientation :=
rfl
@[simp] lemma orientation.map_of_is_empty [is_empty ι] (x : orientation R M ι) (f : M ≃ₗ[R] M) :
orientation.map ι f x = x :=
begin
induction x using module.ray.ind with g hg,
rw orientation.map_apply,
congr,
ext i,
rw alternating_map.comp_linear_map_apply,
congr,
end
end ordered_comm_semiring
section ordered_comm_ring
variables {R : Type*} [strict_ordered_comm_ring R]
variables {M N : Type*} [add_comm_group M] [add_comm_group N] [module R M] [module R N]
@[simp] protected lemma orientation.map_neg {ι : Type*} (f : M ≃ₗ[R] N)
(x : orientation R M ι) :
orientation.map ι f (-x) = - orientation.map ι f x :=
module.ray.map_neg _ x
namespace basis
variables {ι : Type*}
/-- The value of `orientation.map` when the index type has the cardinality of a basis, in terms
of `f.det`. -/
lemma map_orientation_eq_det_inv_smul [finite ι] (e : basis ι R M)
(x : orientation R M ι) (f : M ≃ₗ[R] M) : orientation.map ι f x = (f.det)⁻¹ • x :=
begin
casesI nonempty_fintype ι,
letI := classical.dec_eq ι,
induction x using module.ray.ind with g hg,
rw [orientation.map_apply, smul_ray_of_ne_zero, ray_eq_iff, units.smul_def,
(g.comp_linear_map ↑f.symm).eq_smul_basis_det e, g.eq_smul_basis_det e,
alternating_map.comp_linear_map_apply, alternating_map.smul_apply, basis.det_comp,
basis.det_self, mul_one, smul_eq_mul, mul_comm, mul_smul, linear_equiv.coe_inv_det],
end
variables [fintype ι] [decidable_eq ι]
/-- The orientation given by a basis. -/
protected def orientation [nontrivial R] (e : basis ι R M) : orientation R M ι :=
ray_of_ne_zero R _ e.det_ne_zero
lemma orientation_map [nontrivial R] (e : basis ι R M)
(f : M ≃ₗ[R] N) : (e.map f).orientation = orientation.map ι f e.orientation :=
by simp_rw [basis.orientation, orientation.map_apply, basis.det_map']
/-- The orientation given by a basis derived using `units_smul`, in terms of the product of those
units. -/
lemma orientation_units_smul [nontrivial R] (e : basis ι R M) (w : ι → units R) :
(e.units_smul w).orientation = (∏ i, w i)⁻¹ • e.orientation :=
begin
rw [basis.orientation, basis.orientation, smul_ray_of_ne_zero, ray_eq_iff,
e.det.eq_smul_basis_det (e.units_smul w), det_units_smul_self, units.smul_def, smul_smul],
norm_cast,
simp
end
@[simp] lemma orientation_is_empty [nontrivial R] [is_empty ι] (b : basis ι R M) :
b.orientation = positive_orientation :=
begin
congrm ray_of_ne_zero _ _ _,
convert b.det_is_empty,
end
end basis
end ordered_comm_ring
section linear_ordered_comm_ring
variables {R : Type*} [linear_ordered_comm_ring R]
variables {M : Type*} [add_comm_group M] [module R M]
variables {ι : Type*}
namespace orientation
/-- A module `M` over a linearly ordered commutative ring has precisely two "orientations" with
respect to an empty index type. (Note that these are only orientations of `M` of in the conventional
mathematical sense if `M` is zero-dimensional.) -/
lemma eq_or_eq_neg_of_is_empty [nontrivial R] [is_empty ι] (o : orientation R M ι) :
o = positive_orientation ∨ o = - positive_orientation :=
begin
induction o using module.ray.ind with x hx,
dsimp [positive_orientation],
simp only [ray_eq_iff, same_ray_neg_swap],
rw same_ray_or_same_ray_neg_iff_not_linear_independent,
intros h,
let a : R := alternating_map.const_linear_equiv_of_is_empty.symm x,
have H : linear_independent R ![a, 1],
{ convert h.map' ↑alternating_map.const_linear_equiv_of_is_empty.symm (linear_equiv.ker _),
ext i,
fin_cases i;
simp [a] },
rw linear_independent_iff' at H,
simpa using H finset.univ ![1, -a] (by simp [fin.sum_univ_succ]) 0 (by simp),
end
end orientation
namespace basis
variables [fintype ι] [decidable_eq ι]
/-- The orientations given by two bases are equal if and only if the determinant of one basis
with respect to the other is positive. -/
lemma orientation_eq_iff_det_pos (e₁ e₂ : basis ι R M) :
e₁.orientation = e₂.orientation ↔ 0 < e₁.det e₂ :=
calc e₁.orientation = e₂.orientation ↔ same_ray R e₁.det e₂.det : ray_eq_iff _ _
... ↔ same_ray R (e₁.det e₂ • e₂.det) e₂.det : by rw [← e₁.det.eq_smul_basis_det e₂]
... ↔ 0 < e₁.det e₂ : same_ray_smul_left_iff_of_ne e₂.det_ne_zero (e₁.is_unit_det e₂).ne_zero
/-- Given a basis, any orientation equals the orientation given by that basis or its negation. -/
lemma orientation_eq_or_eq_neg (e : basis ι R M) (x : orientation R M ι) :
x = e.orientation ∨ x = -e.orientation :=
begin
induction x using module.ray.ind with x hx,
rw ← x.map_basis_ne_zero_iff e at hx,
rwa [basis.orientation, ray_eq_iff, neg_ray_of_ne_zero, ray_eq_iff, x.eq_smul_basis_det e,
same_ray_neg_smul_left_iff_of_ne e.det_ne_zero hx,
same_ray_smul_left_iff_of_ne e.det_ne_zero hx, lt_or_lt_iff_ne, ne_comm]
end
/-- Given a basis, an orientation equals the negation of that given by that basis if and only
if it does not equal that given by that basis. -/
lemma orientation_ne_iff_eq_neg (e : basis ι R M) (x : orientation R M ι) :
x ≠ e.orientation ↔ x = -e.orientation :=
⟨λ h, (e.orientation_eq_or_eq_neg x).resolve_left h,
λ h, h.symm ▸ (module.ray.ne_neg_self e.orientation).symm⟩
/-- Composing a basis with a linear equiv gives the same orientation if and only if the
determinant is positive. -/
lemma orientation_comp_linear_equiv_eq_iff_det_pos (e : basis ι R M) (f : M ≃ₗ[R] M) :
(e.map f).orientation = e.orientation ↔ 0 < (f : M →ₗ[R] M).det :=
by rw [orientation_map, e.map_orientation_eq_det_inv_smul, units_inv_smul, units_smul_eq_self_iff,
linear_equiv.coe_det]
/-- Composing a basis with a linear equiv gives the negation of that orientation if and only if
the determinant is negative. -/
lemma orientation_comp_linear_equiv_eq_neg_iff_det_neg (e : basis ι R M) (f : M ≃ₗ[R] M) :
(e.map f).orientation = -e.orientation ↔ (f : M →ₗ[R] M).det < 0 :=
by rw [orientation_map, e.map_orientation_eq_det_inv_smul, units_inv_smul, units_smul_eq_neg_iff,
linear_equiv.coe_det]
/-- Negating a single basis vector (represented using `units_smul`) negates the corresponding
orientation. -/
@[simp] lemma orientation_neg_single [nontrivial R] (e : basis ι R M) (i : ι) :
(e.units_smul (function.update 1 i (-1))).orientation = -e.orientation :=
begin
rw [orientation_units_smul, finset.prod_update_of_mem (finset.mem_univ _)],
simp
end
/-- Given a basis and an orientation, return a basis giving that orientation: either the original
basis, or one constructed by negating a single (arbitrary) basis vector. -/
def adjust_to_orientation [nontrivial R] [nonempty ι] (e : basis ι R M) (x : orientation R M ι) :
basis ι R M :=
by haveI := classical.dec_eq (orientation R M ι); exact if e.orientation = x then e else
(e.units_smul (function.update 1 (classical.arbitrary ι) (-1)))
/-- `adjust_to_orientation` gives a basis with the required orientation. -/
@[simp] lemma orientation_adjust_to_orientation [nontrivial R] [nonempty ι] (e : basis ι R M)
(x : orientation R M ι) : (e.adjust_to_orientation x).orientation = x :=
begin
rw adjust_to_orientation,
split_ifs with h,
{ exact h },
{ rw [orientation_neg_single, eq_comm, ←orientation_ne_iff_eq_neg, ne_comm],
exact h }
end
/-- Every basis vector from `adjust_to_orientation` is either that from the original basis or its
negation. -/
lemma adjust_to_orientation_apply_eq_or_eq_neg [nontrivial R] [nonempty ι] (e : basis ι R M)
(x : orientation R M ι) (i : ι) :
e.adjust_to_orientation x i = e i ∨ e.adjust_to_orientation x i = -(e i) :=
begin
rw adjust_to_orientation,
split_ifs with h,
{ simp },
{ by_cases hi : i = classical.arbitrary ι;
simp [units_smul_apply, hi] }
end
lemma det_adjust_to_orientation [nontrivial R] [nonempty ι] (e : basis ι R M)
(x : orientation R M ι) :
(e.adjust_to_orientation x).det = e.det ∨ (e.adjust_to_orientation x).det = - e.det :=
begin
dsimp [basis.adjust_to_orientation],
split_ifs,
{ left,
refl },
{ right,
simp [e.det_units_smul, ← units.coe_prod, finset.prod_update_of_mem] }
end
@[simp] lemma abs_det_adjust_to_orientation [nontrivial R] [nonempty ι] (e : basis ι R M)
(x : orientation R M ι) (v : ι → M) :
|(e.adjust_to_orientation x).det v| = |e.det v| :=
begin
cases e.det_adjust_to_orientation x with h h;
simp [h]
end
end basis
end linear_ordered_comm_ring
section linear_ordered_field
variables {R : Type*} [linear_ordered_field R]
variables {M : Type*} [add_comm_group M] [module R M]
variables {ι : Type*}
namespace orientation
variables [fintype ι] [_i : finite_dimensional R M]
open finite_dimensional
include _i
/-- If the index type has cardinality equal to the finite dimension, any two orientations are
equal or negations. -/
lemma eq_or_eq_neg (x₁ x₂ : orientation R M ι) (h : fintype.card ι = finrank R M) :
x₁ = x₂ ∨ x₁ = -x₂ :=
begin
have e := (fin_basis R M).reindex (fintype.equiv_fin_of_card_eq h).symm,
letI := classical.dec_eq ι,
rcases e.orientation_eq_or_eq_neg x₁ with h₁|h₁;
rcases e.orientation_eq_or_eq_neg x₂ with h₂|h₂;
simp [h₁, h₂]
end
/-- If the index type has cardinality equal to the finite dimension, an orientation equals the
negation of another orientation if and only if they are not equal. -/
lemma ne_iff_eq_neg (x₁ x₂ : orientation R M ι) (h : fintype.card ι = finrank R M) :
x₁ ≠ x₂ ↔ x₁ = -x₂ :=
⟨λ hn, (eq_or_eq_neg x₁ x₂ h).resolve_left hn, λ he, he.symm ▸ (module.ray.ne_neg_self x₂).symm⟩
/-- The value of `orientation.map` when the index type has cardinality equal to the finite
dimension, in terms of `f.det`. -/
lemma map_eq_det_inv_smul (x : orientation R M ι) (f : M ≃ₗ[R] M)
(h : fintype.card ι = finrank R M) :
orientation.map ι f x = (f.det)⁻¹ • x :=
begin
have e := (fin_basis R M).reindex (fintype.equiv_fin_of_card_eq h).symm,
exact e.map_orientation_eq_det_inv_smul x f
end
omit _i
/-- If the index type has cardinality equal to the finite dimension, composing an alternating
map with the same linear equiv on each argument gives the same orientation if and only if the
determinant is positive. -/
lemma map_eq_iff_det_pos (x : orientation R M ι) (f : M ≃ₗ[R] M)
(h : fintype.card ι = finrank R M) :
orientation.map ι f x = x ↔ 0 < (f : M →ₗ[R] M).det :=
begin
casesI is_empty_or_nonempty ι,
{ have H : finrank R M = 0,
{ refine h.symm.trans _,
convert fintype.card_of_is_empty,
apply_instance },
simp [linear_map.det_eq_one_of_finrank_eq_zero H] },
have H : 0 < finrank R M,
{ rw ← h,
exact fintype.card_pos },
haveI : finite_dimensional R M := finite_dimensional_of_finrank H,
rw [map_eq_det_inv_smul _ _ h, units_inv_smul, units_smul_eq_self_iff, linear_equiv.coe_det]
end
/-- If the index type has cardinality equal to the finite dimension, composing an alternating
map with the same linear equiv on each argument gives the negation of that orientation if and
only if the determinant is negative. -/
lemma map_eq_neg_iff_det_neg (x : orientation R M ι) (f : M ≃ₗ[R] M)
(h : fintype.card ι = finrank R M) :
orientation.map ι f x = -x ↔ (f : M →ₗ[R] M).det < 0 :=
begin
casesI is_empty_or_nonempty ι,
{ have H : finrank R M = 0,
{ refine h.symm.trans _,
convert fintype.card_of_is_empty,
apply_instance },
simp [linear_map.det_eq_one_of_finrank_eq_zero H, module.ray.ne_neg_self x] },
have H : 0 < finrank R M,
{ rw ← h,
exact fintype.card_pos },
haveI : finite_dimensional R M := finite_dimensional_of_finrank H,
rw [map_eq_det_inv_smul _ _ h, units_inv_smul, units_smul_eq_neg_iff, linear_equiv.coe_det]
end
include _i
/-- If the index type has cardinality equal to the finite dimension, a basis with the given
orientation. -/
def some_basis [nonempty ι] [decidable_eq ι] (x : orientation R M ι)
(h : fintype.card ι = finrank R M) :
basis ι R M :=
((fin_basis R M).reindex (fintype.equiv_fin_of_card_eq h).symm).adjust_to_orientation x
/-- `some_basis` gives a basis with the required orientation. -/
@[simp] lemma some_basis_orientation [nonempty ι] [decidable_eq ι] (x : orientation R M ι)
(h : fintype.card ι = finrank R M) : (x.some_basis h).orientation = x :=
basis.orientation_adjust_to_orientation _ _
end orientation
end linear_ordered_field
| {
"alphanum_fraction": null,
"author": "leanprover-community",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/lean/leanprover-community-mathlib/mathlib-5e526d18cea33550268dcbbddcb822d5cde40654/src/linear_algebra/orientation.lean",
"reason": null,
"repo": "mathlib",
"save_path": "github-repos/lean/leanprover-community-mathlib",
"sha": "5e526d18cea33550268dcbbddcb822d5cde40654",
"size": null
} |
[STATEMENT]
lemma in_conc_True[iff]:
"\<And>L R. fin (conc L R) (True#p) = False"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>L R. fin (RegExp2NAe.conc L R) (True # p) = False
[PROOF STEP]
by (simp add:conc_def) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Functional-Automata_RegExp2NAe",
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 106,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
# Call Syntax
person = @shared_var Citizen(
name::String = "Amin",
number::Float64 = 20.0,
)
person2 = Citizen(name = "Not-Amin", number = 1)
@test person.name == person2.name
@test person2.number == person2.number
| {
"alphanum_fraction": 0.6024096386,
"author": null,
"avg_line_length": 22.6363636364,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "1388c6ae193d553967a7d92e48fe12579fdf515e",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-01-02T16:03:06.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-01-02T16:03:06.000Z",
"max_forks_repo_head_hexsha": "7f61959c82a7d4036cc96fa78df8a914621a06e6",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "aminya/VarStructs.jl",
"max_forks_repo_path": "test/@shared_var.jl",
"max_issues_count": 5,
"max_issues_repo_head_hexsha": "7f61959c82a7d4036cc96fa78df8a914621a06e6",
"max_issues_repo_issues_event_max_datetime": "2022-01-02T13:40:11.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-04-29T11:23:31.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "aminya/VarStructs.jl",
"max_issues_repo_path": "test/@shared_var.jl",
"max_line_length": 48,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "7f61959c82a7d4036cc96fa78df8a914621a06e6",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "aminya/VarStructs.jl",
"max_stars_repo_path": "test/@shared_var.jl",
"max_stars_repo_stars_event_max_datetime": "2022-01-02T11:00:13.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-08-10T01:30:44.000Z",
"num_tokens": 66,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 249
} |
REBOL [
Title: "Red compile error test script"
Author: "Peter W A Wood"
File: %compile-error-test.r
Rights: "Copyright (C) 2013-2015 Peter W A Wood. All rights reserved."
License: "BSD-3 - https://github.com/red/red/blob/origin/BSD-3-License.txt"
]
~~~start-file~~~ "Red compile errors"
--test-- "ce-1 issue #608"
--compile-this-red {s: "open ended string}
--assert-msg? "*** Syntax Error: Invalid string! value"
~~~end-file~~~
| {
"alphanum_fraction": 0.6541019956,
"author": null,
"avg_line_length": 28.1875,
"converted": null,
"ext": "r",
"file": null,
"hexsha": "48d3d8375eecee54ea6d7397dade63a048a1770b",
"include": null,
"lang": "R",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-07-06T00:26:05.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-07-06T00:26:05.000Z",
"max_forks_repo_head_hexsha": "e80f0759eaef05e18d65426cf25b7e5727a6737e",
"max_forks_repo_licenses": [
"BSL-1.0",
"BSD-3-Clause"
],
"max_forks_repo_name": "0branch/Red",
"max_forks_repo_path": "tests/source/compiler/compile-error-test.r",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e80f0759eaef05e18d65426cf25b7e5727a6737e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSL-1.0",
"BSD-3-Clause"
],
"max_issues_repo_name": "0branch/Red",
"max_issues_repo_path": "tests/source/compiler/compile-error-test.r",
"max_line_length": 76,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "75978498db2599d20235b6135c8bf6ac5c46ec53",
"max_stars_repo_licenses": [
"BSL-1.0",
"BSD-3-Clause"
],
"max_stars_repo_name": "StephaneVeneri/red",
"max_stars_repo_path": "tests/source/compiler/compile-error-test.r",
"max_stars_repo_stars_event_max_datetime": "2021-10-31T16:55:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-09-15T21:23:06.000Z",
"num_tokens": 143,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 451
} |
from bayesianpy.network import NetworkFactory
class Selector:
def __init__(self, target, continuous=[], discrete=[]):
self.target = target
self._continuous = list(continuous)
self._discrete = list(discrete)
self._index = -1
self._all_variables = self._continuous + self._discrete
self._tix = self._all_variables.index(target)
def _c_length(self):
return len(self._continuous)
def _d_length(self):
return len(self._discrete)
def _is_continuous(self, ix):
return ix < self._c_length()
def _is_discrete(self, ix):
return ix >= self._c_length()
def next_combination(self):
self._index += 1
if self._index >= len(self._all_variables):
return False
return True
class UnivariateSelector(Selector):
def __init__(self, target, continuous=[], discrete=[]):
Selector.__init__(self, target, continuous, discrete)
def _get_vars(self):
return [self._all_variables[self._index], self._all_variables[self._tix]]
def get_discrete_variables(self):
for v in self._get_vars():
if v in self._discrete:
yield v
def get_continuous_variables(self):
for v in self._get_vars():
if v in self._continuous:
yield v
def get_key_variables(self):
variables = self._get_vars()
variables.pop(variables.index(self.target))
return variables
def next_combination(self):
self._index += 1
if self._index >= len(self._all_variables):
return False
v = self._get_vars()
if len(np.unique(v)) < len(v):
return self.next_combination()
return True
import numpy as np
import itertools
class CartesianProductSelector(Selector):
def __init__(self, target, continuous=[], discrete=[], n=2):
Selector.__init__(self, target, continuous, discrete)
self._total = len(self._all_variables)
if n > self._total - 1:
raise ValueError("n needs to be less or equal to the total length of the variable array - 1")
list_of_lists = [range(self._total) for _ in range(n)]
self._combinations = list(set(itertools.product(*list_of_lists)))
self._tix = self._all_variables.index(target)
def _get_vars(self):
r=[]
for ix in self._indexes:
r.append(self._all_variables[ix])
if self.target not in r:
r.append(self.target)
return r
def get_total_combinations(self):
return len([item for item in self._combinations if self._filter_combination(item)])
def _filter_combination(self, combination):
if len(np.unique(combination)) != len(combination):
return False
if self._tix in combination:
return False
return True
def get_key_variables(self):
variables = self._get_vars()
variables.pop(variables.index(self.target))
return variables
def get_discrete_variables(self):
for v in self._get_vars():
if v in self._discrete:
yield v
def get_continuous_variables(self):
for v in self._get_vars():
if v in self._continuous:
yield v
def next_combination(self):
self._index += 1
if self._index >= len(self._combinations):
return False
self._indexes = list(self._combinations[self._index])
if not self._filter_combination(self._indexes):
return self.next_combination()
return True
#l = CartesianProductSelector('c1', continuous=['c1', 'c2', 'c3'], discrete=['d1','d2','d3'], n=4)
#from collections import Counter
#c = Counter()
#while l.next_combination():
#
# for i in [v for v in l.get_continuous_variables()]:
# c[i] += 1
# for i in [v for v in l.get_discrete_variables()]:
# c[i] += 1
class LeaveSomeOutSelector(Selector):
def __init__(self, target, continuous=[], discrete=[], some=1):
Selector.__init__(self, target, continuous, discrete)
self._some = some
if some > len(self._all_variables):
raise ValueError("Some cannot be greater than the total number of columns")
def _get_vars(self):
variables = list(self._all_variables)
start_index = self._index
if self._index + self._some > len(variables):
start_index = abs(len(variables) - (self._index + self._some))
r = variables[start_index : self._index]
else:
r = variables[ : start_index] + variables[start_index + self._some:]
if self.target not in r:
r.append(self.target)
return r
def get_discrete_variables(self):
if self._index == -1:
raise ValueError("Call next_combination first")
for v in self._get_vars():
if v in self._discrete:
yield v
def get_continuous_variables(self):
if self._index == -1:
raise ValueError("Call next_combination first")
for v in self._get_vars():
if v in self._continuous:
yield v
def get_key_variables(self):
if self._index == -1:
raise ValueError("Call next_combination first")
return list(set(self._all_variables) - set(self._get_vars()))
#l = LeaveSomeOutSelector('c1', continuous=['c1', 'c2', 'c3'], discrete=['d1', 'd2', 'd3'], some=3)
#while l.next_combination():
# print(l._index)
# print(list(l.get_continuous_variables()))
# print(list(l.get_discrete_variables()))
class IterativeSelector(Selector):
def __init__(self, target, continuous=[], discrete=[], ordering=[], addition=True):
Selector.__init__(self, target, continuous, discrete)
self._ordering = ordering
self._addition = addition
def _get_vars(self):
variables = list(self._ordering)
start_index = self._index + 1
if self._addition:
r = variables[:start_index]
else:
r = variables[-start_index:]
if self.target not in r:
r.append(self.target)
return r
def get_discrete_variables(self):
if self._index == -1:
raise ValueError("Call next_combination first")
for v in self._get_vars():
if v in self._discrete:
yield v
def get_continuous_variables(self):
if self._index == -1:
raise ValueError("Call next_combination first")
for v in self._get_vars():
if v in self._continuous:
yield v
def get_key_variables(self):
if self._index == -1:
raise ValueError("Call next_combination first")
variables = self._get_vars()
variables.pop(variables.index(self.target))
return variables
def next_combination(self):
self._index += 1
if self._index >= len(self._ordering):
return False
return True
class ForwardFirstGreedySelector(IterativeSelector):
def __init__(self, target, continuous=[], discrete=[], ordering=[]):
IterativeSelector.__init__(self, target, continuous=continuous, discrete=discrete, ordering=ordering, addition=True)
class BackFirstGreedySelector(IterativeSelector):
def __init__(self, target, continuous=[], discrete=[], ordering=[]):
IterativeSelector.__init__(self, target, continuous=continuous, discrete=discrete, ordering=ordering, addition=False)
from sklearn.metrics import r2_score
def continuous_score(x, y):
return r2_score(x, y, multioutput='uniform_average')
from sklearn.metrics import accuracy_score
def discrete_score(x, y):
return accuracy_score(x, y['MaxStateLikelihood'])
from sklearn.metrics import confusion_matrix
def fmeasure_score(predicted, actual):
return _fmeasure(*confusion_matrix(predicted, actual).flatten())
def _fmeasure(tp, fp, fn, tn):
"""Computes effectiveness measures given a confusion matrix."""
specificity = tn / (tn + fp)
sensitivity = tp / (tp + fn)
fmeasure = 2 * (specificity * sensitivity) / (specificity + sensitivity)
return { 'sensitivity': sensitivity, 'specificity': specificity, 'fmeasure': fmeasure }
import numpy as np
from collections import defaultdict
def summarise_results(results, ascending=True):
averaged_results = {key: np.average(value) for key,value in results.items()}
summ = defaultdict(float)
for k,v in averaged_results.items():
for s in k.split(","):
summ[s] += float(v)
return sorted(summ.items(), key=lambda x: x[1], reverse=(not ascending))
def summarise_best_combinations(results):
averaged_results = {key: np.average(value) for key,value in results.items()}
return sorted(averaged_results.items(), key=lambda x: x[1], reverse=True)
from collections import OrderedDict
from sklearn.cross_validation import KFold
class VariableSelectionWrapper:
def __init__(self, selector, score_func, logger):
self._selector = selector
self._score_func = score_func
self._logger = logger
self._models = []
def pick_vars(self, data, n_folds=3):
kf = KFold(data.shape[0], n_folds=n_folds, shuffle=True)
results = OrderedDict()
network_factory = NetworkFactory(data, self._logger)
self._logger.debug("Written dataset")
i = 0
while self._selector.next_combination():
self._logger.debug("Combination: {}".format( ",".join(self._selector.get_key_variables())))
network = network_factory.create(discrete=list(self._selector.get_discrete_variables()), continuous=list(self._selector.get_continuous_variables()))
key = ",".join(self._selector.get_key_variables())
results.update({ key: [] })
for k, (train_indexes, test_indexes) in enumerate(kf):
_, X_test = data.ix[train_indexes], data.ix[test_indexes]
trained_model = network_factory.create_trained_model(network, train_indexes)
self._models.append(trained_model)
r = trained_model.predict(test_indexes, targets=[self._selector.target])
score = self._score_func(X_test[self._selector.target], r[self._selector.target])
self._logger.debug("Score i={}, k={}: {}".format( i, k, score))
results[key].append(score)
i += 1
return results | {
"alphanum_fraction": 0.6360615268,
"author": null,
"avg_line_length": 31.7228915663,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "0b1609756709f590f373cf3d374183b4b4b2171b",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 5,
"max_forks_repo_forks_event_max_datetime": "2021-12-22T21:26:22.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-03-09T04:53:53.000Z",
"max_forks_repo_head_hexsha": "a5f69ac6153b010051019e442a27274e5f38eed2",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "rohankumardubey/bayesianpy",
"max_forks_repo_path": "bayesianpy/ml.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a5f69ac6153b010051019e442a27274e5f38eed2",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "rohankumardubey/bayesianpy",
"max_issues_repo_path": "bayesianpy/ml.py",
"max_line_length": 160,
"max_stars_count": 19,
"max_stars_repo_head_hexsha": "a5f69ac6153b010051019e442a27274e5f38eed2",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "rohankumardubey/bayesianpy",
"max_stars_repo_path": "bayesianpy/ml.py",
"max_stars_repo_stars_event_max_datetime": "2022-01-02T19:29:26.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-03-29T14:55:30.000Z",
"num_tokens": 2352,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 10532
} |
[STATEMENT]
lemma Subset_fresh_iff [simp]: "a \<sharp> t SUBS u \<longleftrightarrow> a \<sharp> t \<and> a \<sharp> u"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a \<sharp> t SUBS u = (a \<sharp> t \<and> a \<sharp> u)
[PROOF STEP]
apply (rule obtain_fresh [where x="(t, u)"])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>aa. atom aa \<sharp> (t, u) \<Longrightarrow> a \<sharp> t SUBS u = (a \<sharp> t \<and> a \<sharp> u)
[PROOF STEP]
apply (subst Subset.simps, auto)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Incompleteness_Predicates",
"hexsha": null,
"include": null,
"lang": null,
"length": 3,
"llama_tokens": 236,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
#%% Import modules
import time
from get_data import get_data
import model as m
from train import train
from comp import comp
import numpy as np
from os import listdir
from sklearn.metrics import r2_score
import torch
import torch.optim as optim
import torch.nn as nn
#%%
tStart = time.time()
#%% Path
dpath = './Data_I'
data_list = listdir(dpath)
#%% Selection session
single_session = bool(0)
# 1為讀取單一session;0為讀取多個session的資料
if single_session:
# 讀取單一session
session = 0
# 欲讀取session的編號,從0開始,共37個session
data_list = data_list[session:session+1]
else:
# 讀取多個session
session_start = 0
# 讀取
session_end = 36
data_list = data_list[session_start:session_end+1]
data_list.sort()
print('\n---------------')
print('Num of session: ', len(data_list))
#%% Get data
print('\nGet data...')
DATA, NOR = get_data(dpath, data_list)
#%% Model and parameters
model = m.CNN()
Epoch = 30
lr = 1e-4 # Learning rate
single_optim = optim.Adam(model.parameters(), lr=lr) # Optimizer
loss_MSE = nn.MSELoss() # Loss function
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#%% For test
FR_ORDER_TEST = DATA["FR_ORDER_TEST"]
POS_TEST = DATA["POS_TEST"]
VEL_TEST = DATA["VEL_TEST"]
ACC_TEST = DATA["ACC_TEST"]
POS_MAX = NOR["POS_MAX"]
POS_MIN = NOR["POS_MIN"]
POS_MEAN = NOR["POS_MEAN"]
VEL_MAX = NOR["VEL_MAX"]
VEL_MIN = NOR["VEL_MIN"]
VEL_MEAN = NOR["VEL_MEAN"]
ACC_MAX = NOR["ACC_MAX"]
ACC_MIN = NOR["ACC_MIN"]
ACC_MEAN = NOR["ACC_MEAN"]
Pred_POS_X = []
Pred_POS_Y = []
Pred_VEL_X = []
Pred_VEL_Y = []
Pred_ACC_X = []
Pred_ACC_Y = []
R2_POS_X = []
R2_POS_Y = []
R2_VEL_X = []
R2_VEL_Y = []
R2_ACC_X = []
R2_ACC_Y = []
#%% Training
for ss in range(len(data_list)):
single_model = model
print('\nSession_' + str(ss+1))
print('-----Training-----')
train(DATA, ss, Epoch, single_model, single_optim, loss_MSE)
#%% Testing
print('-----Testing-----')
test_unsort_data = torch.from_numpy(FR_ORDER_TEST[ss]).type(torch.FloatTensor)
test_label = np.concatenate((POS_TEST[ss], VEL_TEST[ss], ACC_TEST[ss]), axis=1)
test_label = torch.from_numpy(test_label).type(torch.FloatTensor)
single_test_dataset = torch.utils.data.TensorDataset(test_unsort_data, test_label)
single_test_dataloader = torch.utils.data.DataLoader(dataset = single_test_dataset, batch_size=32, shuffle=False)
model_train = torch.load('model.pth')
with torch.no_grad():
for n_ts, (Data_ts, Label_ts) in enumerate (single_test_dataloader):
fr_data = Data_ts
fr_data = fr_data.to(device)
out_pos, out_vel, out_acc = model_train(fr_data)
out_vel = out_vel.cpu().data.numpy()
out_acc = out_acc.cpu().data.numpy()
out_pos = out_pos.cpu().data.numpy()
if n_ts == 0:
pred_vel = out_vel
pred_acc = out_acc
pred_pos = out_pos
else:
pred_vel = np.concatenate((pred_vel, out_vel), axis=0)
pred_acc = np.concatenate((pred_acc, out_acc), axis=0)
pred_pos = np.concatenate((pred_pos, out_pos), axis=0)
# R2 and Pred
pred_px , pred_py = pred_pos[:, 0], pred_pos[:, 1]
real_px , real_py = POS_TEST[ss][:, 0], POS_TEST[ss][:, 1]
pred_vx , pred_vy = pred_vel[:, 0], pred_vel[:, 1]
real_vx , real_vy = VEL_TEST[ss][:, 0], VEL_TEST[ss][:, 1]
pred_ax , pred_ay = pred_acc[:, 0], pred_acc[:, 1]
real_ax , real_ay = ACC_TEST[ss][:, 0], ACC_TEST[ss][:, 1]
Pred_POS_X.append(pred_px *(POS_MAX[ss][0,0] - POS_MIN[ss][0,0]) + POS_MEAN[ss][0,0])
Pred_POS_Y.append(pred_py *(POS_MAX[ss][0,1] - POS_MIN[ss][0,1]) + POS_MEAN[ss][0,1])
Pred_VEL_X.append(pred_vx *(VEL_MAX[ss][0,0] - VEL_MIN[ss][0,0]) + VEL_MEAN[ss][0,0])
Pred_VEL_Y.append(pred_vy *(VEL_MAX[ss][0,1] - VEL_MIN[ss][0,1]) + VEL_MEAN[ss][0,1])
Pred_ACC_X.append(pred_ax *(ACC_MAX[ss][0,0] - ACC_MIN[ss][0,0]) + ACC_MEAN[ss][0,0])
Pred_ACC_Y.append(pred_ay *(ACC_MAX[ss][0,1] - ACC_MIN[ss][0,1]) + ACC_MEAN[ss][0,1])
print('r2_px :', np.round(r2_score(real_px, Pred_POS_X[ss]),4))
print('r2_py :', np.round(r2_score(real_py, Pred_POS_Y[ss]),4))
print('r2_vx :', np.round(r2_score(real_vx, Pred_VEL_X[ss]),4))
print('r2_vy :', np.round(r2_score(real_vy, Pred_VEL_Y[ss]),4))
print('r2_ax :', np.round(r2_score(real_ax, Pred_ACC_X[ss]),4))
print('r2_ay :', np.round(r2_score(real_ay, Pred_ACC_Y[ss]),4))
R2_POS_X.append(np.round(r2_score(real_px, Pred_POS_X[ss]),4))
R2_POS_Y.append(np.round(r2_score(real_py, Pred_POS_Y[ss]),4))
R2_VEL_X.append(np.round(r2_score(real_vx, Pred_VEL_X[ss]),4))
R2_VEL_Y.append(np.round(r2_score(real_vy, Pred_VEL_Y[ss]),4))
R2_ACC_X.append(np.round(r2_score(real_ax, Pred_ACC_X[ss]),4))
R2_ACC_Y.append(np.round(r2_score(real_ay, Pred_ACC_Y[ss]),4))
with open('R2_CNN.npy', 'wb') as f:
np.save(f, R2_POS_X)
np.save(f, R2_POS_Y)
np.save(f, R2_VEL_X)
np.save(f, R2_VEL_Y)
np.save(f, R2_ACC_X)
np.save(f, R2_ACC_Y)
#%% Compare
# =============================================================================
# with open('R2_CNN.npy', 'rb') as f:
# R2_POS_X = np.load(f)
# R2_POS_Y = np.load(f)
# R2_VEL_X = np.load(f)
# R2_VEL_Y = np.load(f)
# R2_ACC_X = np.load(f)
# R2_ACC_Y = np.load(f)
# =============================================================================
if len(data_list) != 37:
print("Only used in all sessions")
else:
comp(R2_POS_X, R2_POS_Y, R2_VEL_X, R2_VEL_Y, R2_ACC_X, R2_ACC_Y)
#%%
tEnd = time.time()
print ("\n" + "It cost {:.4f} sec" .format(tEnd-tStart)) | {
"alphanum_fraction": 0.595107339,
"author": null,
"avg_line_length": 33.0164835165,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "52212593f934c160a224bbc4225bbf075418a51c",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "af8849d3bab160cce7e51621a3c21fd3e8bf764f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Abner0627/nc_lab_abner",
"max_forks_repo_path": "main.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "af8849d3bab160cce7e51621a3c21fd3e8bf764f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Abner0627/nc_lab_abner",
"max_issues_repo_path": "main.py",
"max_line_length": 117,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "af8849d3bab160cce7e51621a3c21fd3e8bf764f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Abner0627/nc_lab_abner",
"max_stars_repo_path": "main.py",
"max_stars_repo_stars_event_max_datetime": "2020-10-18T02:11:49.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-10-16T14:12:31.000Z",
"num_tokens": 1841,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6009
} |
import .QueueUnion: Range, Queues
import Printf: @printf
const EPS = 1e-10
const DEBUG = false
@deprecate(
clip_front(elements, pqs, i, slope, offset, t),
clip(elements, Ref(pqs, i), +slope, +offset - t, Val(true))
)
@deprecate(
clip_back(elements, pqs, i, slope, offset, t),
clip(elements, Ref(pqs, i), -slope, -offset + t, Val(false))
)
@deprecate(
clip_front(qs::Queues{Event}, i::I, slope::F, offset::F, t::F) where {F,I},
clip(qs, i, slope, offset - t, Val(true))
)
@deprecate(
clip_back(qs::Queues{Event}, i::I, slope::F, offset::F, t::F) where {F,I},
clip(qs, i, -slope, -offset + t, Val(false))
)
clip(qs::Queues{Event}, i::I, slope::F, offset::F, v::Val{D}) where {F,I,D} =
clip(qs.events, Ref(qs.pq, i), slope, offset, v)
function clip(
elements::Vector{Event},
pq::Ref{Range},
slope::F,
offset::F,
::Val{forward},
::Val{check},
)::F where {F,forward,check}
local start::Int = pq[].start
local stop::Int = pq[].stop
local e::Event = elements[forward ? start : stop]::Event
@static DEBUG && local dir::String = forward ? "f" : "b"
@static DEBUG && @printf("clip_%s: (%+g, %+.2f) [%d] %s\n",
dir, slope, offset, check, string(elements[start:stop]))
while start <= stop && slope * e.x + offset < 0
offset += intercept(e)
slope += e.slope
@static DEBUG && @printf(" lip_%s: (%+g, %+.2f)\n", dir, slope, offset)
e = elements[forward ? start += 1 : stop -= 1]
end
local x::F = if check && abs(slope) <= EPS
forward ? -Inf : +Inf
else
let x::F = -offset/slope
elements[forward ? start -= 1 : stop += 1] = Event(x, slope)
x
end
end
@static DEBUG && @printf(" ip_%s: --> %g\n", dir, x)
pq[] = Range(start, stop)
return x
end
| {
"alphanum_fraction": 0.552928533,
"author": null,
"avg_line_length": 27.776119403,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "8505692ff02f1c93a2e008280a9c154bb0e2b497",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "24a5cebf101180822198806c0a4131b0efb7a36d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "EQt/treelas",
"max_forks_repo_path": "julia/src/clip.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "24a5cebf101180822198806c0a4131b0efb7a36d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "EQt/treelas",
"max_issues_repo_path": "julia/src/clip.jl",
"max_line_length": 85,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "24a5cebf101180822198806c0a4131b0efb7a36d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "EQt/treelas",
"max_stars_repo_path": "julia/src/clip.jl",
"max_stars_repo_stars_event_max_datetime": "2021-04-05T17:42:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-06-18T13:31:26.000Z",
"num_tokens": 613,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1861
} |
from __future__ import absolute_import, division, print_function
"""
This is for 3D selection in Glue 3d scatter plot viewer.
"""
import numpy as np
from glue.core import Data
from glue.config import viewer_tool
from glue.viewers.common.tool import CheckableTool
from glue.core.command import ApplySubsetState
from glue.core.roi import RectangularROI, CircularROI, PolygonalROI, Projected3dROI
from glue.core.subset import RoiSubsetState3d
from ..utils import as_matrix_transform
from ..extern.vispy.scene import Rectangle, Line, Ellipse
# Backward-compatibility for reading files
from .compat import MultiMaskSubsetState # noqa
MultiElementSubsetState = MultiMaskSubsetState
class VispyMouseMode(CheckableTool):
# this will create an abstract selection mode class to handle mouse events
# instanced by lasso, rectangle, circular and point mode
def __init__(self, viewer):
super(VispyMouseMode, self).__init__(viewer)
self._vispy_widget = viewer._vispy_widget
self.current_visible_array = None
def get_visible_data(self):
visible = []
# Loop over visible layer artists
for layer_artist in self.viewer._layer_artist_container:
# Only extract Data objects, not subsets
if isinstance(layer_artist.layer, Data):
visible.append(layer_artist.layer)
visual = layer_artist.visual # we only have one visual for each canvas
return visible, visual
def iter_data_layer_artists(self):
for layer_artist in self.viewer._layer_artist_container:
if isinstance(layer_artist.layer, Data):
yield layer_artist
def apply_roi(self, roi):
x_att = self.viewer.state.x_att
y_att = self.viewer.state.y_att
z_att = self.viewer.state.z_att
self.apply_subset_state(RoiSubsetState3d(x_att, y_att, z_att, roi))
def apply_subset_state(self, subset_state):
cmd = ApplySubsetState(data_collection=self.viewer._data,
subset_state=subset_state)
self.viewer.session.command_stack.do(cmd)
def set_progress(self, value):
if value < 0:
self.viewer.show_status('')
else:
self.viewer.show_status('Calculating selection - {0}%'.format(int(value)))
@property
def projection_matrix(self):
# Get first layer (maybe just get from viewer directly in future)
layer_artist = next(self.iter_data_layer_artists())
# Get transformation matrix and transpose
transform = layer_artist.visual.get_transform(map_from='visual', map_to='canvas')
return as_matrix_transform(transform).matrix.T
@viewer_tool
class LassoSelectionMode(VispyMouseMode):
icon = 'glue_lasso'
tool_id = 'vispy:lasso'
action_text = 'Select data using a lasso selection'
def __init__(self, viewer):
super(LassoSelectionMode, self).__init__(viewer)
self.line = Line(color='purple',
width=2, method='agg',
parent=self._vispy_widget.canvas.scene)
def activate(self):
self.reset()
def reset(self):
self.line_pos = []
self.line.set_data(np.zeros((0, 2), dtype=float))
self.line.parent = None
def press(self, event):
if event.button == 1:
self.line_pos.append(event.pos)
def move(self, event):
if event.button == 1 and event.is_dragging:
self.line_pos.append(event.pos)
self.line.set_data(np.array(self.line_pos, dtype=float))
self.line.parent = self._vispy_widget.canvas.scene
def release(self, event):
if event.button == 1:
if len(self.line_pos) > 0:
vx, vy = np.array(self.line_pos).transpose()
roi = Projected3dROI(roi_2d=PolygonalROI(vx, vy),
projection_matrix=self.projection_matrix)
self.apply_roi(roi)
self.reset()
self.viewer.toolbar.active_tool = None
@viewer_tool
class RectangleSelectionMode(VispyMouseMode):
icon = 'glue_square'
tool_id = 'vispy:rectangle'
action_text = 'Select data using a rectangular selection'
def __init__(self, viewer):
super(RectangleSelectionMode, self).__init__(viewer)
self.rectangle = Rectangle(center=(0, 0), width=1, height=1, border_width=2,
color=(0, 0, 0, 0), border_color='purple')
def activate(self):
self.reset()
def reset(self):
self.corner1 = None
self.corner2 = None
self.rectangle.parent = None
def press(self, event):
if event.button == 1:
self.corner1 = event.pos
def move(self, event):
if event.button == 1 and event.is_dragging:
self.corner2 = event.pos
x1, y1 = self.corner1
x2, y2 = self.corner2
if abs(x2 - x1) > 0 and abs(y2 - y1) > 0:
self.rectangle.center = 0.5 * (x1 + x2), 0.5 * (y1 + y2)
self.rectangle.width = abs(x2 - x1)
self.rectangle.height = abs(y2 - y1)
self.rectangle.parent = self._vispy_widget.canvas.scene
@property
def bounds(self):
x1, y1 = self.corner1
x2, y2 = self.corner2
return (min(x1, x2), max(x1, x2), min(y1, y2), max(y1, y2))
def release(self, event):
if event.button == 1:
if self.corner2 is not None:
roi = Projected3dROI(roi_2d=RectangularROI(*self.bounds),
projection_matrix=self.projection_matrix)
self.apply_roi(roi)
self.reset()
self.viewer.toolbar.active_tool = None
@viewer_tool
class CircleSelectionMode(VispyMouseMode):
icon = 'glue_circle'
tool_id = 'vispy:circle'
action_text = 'Select data using a circular selection'
def __init__(self, viewer):
super(CircleSelectionMode, self).__init__(viewer)
self.ellipse = Ellipse(center=(0, 0), radius=1, border_width=2,
color=(0, 0, 0, 0), border_color='purple')
def activate(self):
self.reset()
def reset(self):
self.center = None
self.radius = 0
self.ellipse.parent = None
def press(self, event):
if event.button == 1:
self.center = event.pos
def move(self, event):
if event.button == 1 and event.is_dragging:
self.radius = max(abs(event.pos[0] - self.center[0]),
abs(event.pos[1] - self.center[1]))
if self.radius > 0:
self.ellipse.center = self.center
self.ellipse.radius = self.radius
self.ellipse.parent = self._vispy_widget.canvas.scene
def release(self, event):
if event.button == 1:
if self.radius > 0:
roi = Projected3dROI(roi_2d=CircularROI(self.center[0],
self.center[1],
self.radius),
projection_matrix=self.projection_matrix)
self.apply_roi(roi)
self.reset()
self.viewer.toolbar.active_tool = None
| {
"alphanum_fraction": 0.6076327584,
"author": null,
"avg_line_length": 32.2938596491,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "675f0643e4fbcbdc8e76d0603c77eab0d12053f3",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d940705f4ba95f8d7a9a74d37fb68c71080b490a",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "jzuhone/glue-vispy-viewers",
"max_forks_repo_path": "glue_vispy_viewers/common/selection_tools.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d940705f4ba95f8d7a9a74d37fb68c71080b490a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "jzuhone/glue-vispy-viewers",
"max_issues_repo_path": "glue_vispy_viewers/common/selection_tools.py",
"max_line_length": 89,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d940705f4ba95f8d7a9a74d37fb68c71080b490a",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "jzuhone/glue-vispy-viewers",
"max_stars_repo_path": "glue_vispy_viewers/common/selection_tools.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1664,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7363
} |
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.models import load_model
from tensorflow.keras.optimizers import Adam
import numpy as np
from matplotlib import pyplot as plt
import sys
import os
import warnings
import logging
# TODO: Specify the directory that contains the `pycocotools` here.
pycocotools_dir = '../cocoapi/PythonAPI/'
if pycocotools_dir not in sys.path:
sys.path.insert(0, pycocotools_dir)
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from models.keras_ssd300 import ssd_300
from keras_loss_function.keras_ssd_loss import SSDLoss
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_DecodeDetections import DecodeDetections
from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast
from keras_layers.keras_layer_L2Normalization import L2Normalization
from data_generator.object_detection_2d_data_generator import DataGenerator
from eval_utils.coco_utils import get_coco_category_maps, predict_all_to_json
# Erase terminal warning
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=Warning)
tf.get_logger().setLevel('INFO')
tf.autograph.set_verbosity(0)
tf.get_logger().setLevel(logging.ERROR)
# %matplotlib inline
# Set the input image size for the model.
img_height = 300
img_width = 300
# 1: Build the Keras model
K.clear_session() # Clear previous models from memory.
model = ssd_300(image_size=(img_height, img_width, 3),
n_classes=80,
mode='inference',
l2_regularization=0.0005,
scales=[0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05], # The scales for Pascal VOC are [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05]
aspect_ratios_per_layer=[[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5]],
two_boxes_for_ar1=True,
steps=[8, 16, 32, 64, 100, 300],
offsets=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
normalize_coords=True,
subtract_mean=[123, 117, 104],
swap_channels=[2, 1, 0],
confidence_thresh=0.01,
iou_threshold=0.45,
top_k=200,
nms_max_output_size=400)
# 2: Load the trained weights into the model.
# TODO: Set the path of the trained weights.
weights_path = '/home/gennosuke/ssd-keras-master-v2/weights/VGG_coco_SSD_300x300_iter_400000.h5'
model.load_weights(weights_path, by_name=True)
# 3: Compile the model so that Keras won't complain the next time you load it.
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
# model.summary()
model.compile(optimizer=adam, loss=ssd_loss.compute_loss)
dataset = DataGenerator()
# TODO: Set the paths to the dataset here.
MS_COCO_dataset_images_dir = '/home/gennosuke/ROLO-master/benchmark/cocoData/val2017/'
MS_COCO_dataset_annotations_filename = '/home/gennosuke/ROLO-master/benchmark/cocoData/annotations/instances_val2017.json'
dataset.parse_json(images_dirs=[MS_COCO_dataset_images_dir],
annotations_filenames=[MS_COCO_dataset_annotations_filename],
ground_truth_available=False, # It doesn't matter whether you set this `True` or `False` because the ground truth won't be used anyway, but the parsing goes faster if you don't load the ground truth.
include_classes='all',
ret=False)
# We need the `classes_to_cats` dictionary. Read the documentation of this function to understand why.
cats_to_classes, classes_to_cats, cats_to_names, classes_to_names = get_coco_category_maps(MS_COCO_dataset_annotations_filename)
# TODO: Set the desired output file name and the batch size.
results_file = 'detections_val2017_ssd300_results.json'
batch_size = 20 # Ideally, choose a batch size that divides the number of images in the dataset.
predict_all_to_json(out_file=results_file,
model=model,
img_height=img_height,
img_width=img_width,
classes_to_cats=classes_to_cats,
data_generator=dataset,
batch_size=batch_size,
data_generator_mode='resize',
model_mode='inference',
confidence_thresh=0.01,
iou_threshold=0.45,
top_k=200,
normalize_coords=True)
coco_gt = COCO(MS_COCO_dataset_annotations_filename)
coco_dt = coco_gt.loadRes(results_file)
image_ids = sorted(coco_gt.getImgIds())
cocoEval = COCOeval(cocoGt=coco_gt,
cocoDt=coco_dt,
iouType='bbox')
cocoEval.params.imgIds = image_ids
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize() | {
"alphanum_fraction": 0.6661639962,
"author": null,
"avg_line_length": 39.8872180451,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "a731d09114df5d6444b6a876bbd110f4eb92e8a7",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ba39ef02d421d3e0c6fa9a1bf0891e4518f89c5d",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "GennosukeKoita/ssd-keras",
"max_forks_repo_path": "ssd300_evaluation_COCO.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ba39ef02d421d3e0c6fa9a1bf0891e4518f89c5d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "GennosukeKoita/ssd-keras",
"max_issues_repo_path": "ssd300_evaluation_COCO.py",
"max_line_length": 218,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ba39ef02d421d3e0c6fa9a1bf0891e4518f89c5d",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "GennosukeKoita/ssd-keras",
"max_stars_repo_path": "ssd300_evaluation_COCO.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1390,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5305
} |
!****h* ROBODoc/H5D (F03)
!
! NAME
! H5D_PROVISIONAL
!
! PURPOSE
! This file contains Fortran 90 and Fortran 2003 interfaces for H5D functions.
! It contains the same functions as H5Dff_F90.f90 but includes the
! Fortran 2003 functions and the interface listings. This file will be compiled
! instead of H5Dff_F90.f90 if Fortran 2003 functions are enabled.
!
! COPYRIGHT
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
! Copyright by The HDF Group. *
! Copyright by the Board of Trustees of the University of Illinois. *
! All rights reserved. *
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
! the files COPYING and Copyright.html. COPYING can be found at the root *
! of the source code distribution tree; Copyright.html can be found at the *
! root level of an installed copy of the electronic HDF5 document set and *
! is linked from the top-level documents page. It can also be found at *
! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
! access to either file, you may request a copy from help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
! (1) The maximum rank of an array allowed in Fortran is 7, therefore
! we only provide an interface for arrays up to and including rank 7.
!
! (2) Unfortunately we are using a generic interface and one of the factors
! used in determining the proper routine to select is that of the array
! rank being passed. Therefore, we can not create just one subroutine for
! each array type (integer, real, etc...) and use a
! rank 1 array of assumed size to handle multiple ranks, i.e.
! (i.e. integer, dimension(*) :: ... )
! (i.e. real , dimension(*) :: ... ) etc...
!
! (3) Could not place the USE, INTRINSIC :: ISO_C_BINDING in the module header because it may
! conflict with the USE, INTRINSIC :: ISO_C_BINDING included in the user's program. Moved
! the statement instead to each subroutine.
!
!
! (4) C_LOC and character strings according to the Fortran 2003 standard:
!
! 15.1.2.5 C_LOC(X)
!
! Argument. X shall either
!
! (A) have interoperable type and type parameters and be
! (a) a variable that has the TARGET attribute and is interoperable,
! (b) an allocated allocatable variable that has the TARGET attribute
! and is not an array of zero size, or
! (c) an associated scalar pointer, or
! (B) be a nonpolymorphic scalar, have no length type parameters, and be
! (a) a nonallocatable, nonpointer variable that has the TARGET attribute,
! (b) an allocated allocatable variable that has the TARGET attribute, or
! (c) an associated pointer.
!
! - When X is a character, for interoperability the standard is:
!
! 15.2.1 Interoperability of intrinsic types
!
! ...if the type is character, interoperability also requires that the length type parameter
! be omitted or be specified by an initialization expression whose value is one.
!
! THEREFORE compilers that have not extended the standard require the
! argument in C_LOC to be of the variant:
!
! CHARACTER(LEN=1), TARGET :: chr
! or
! CHARACTER, TARGET :: chr
!
! *** IMPORTANT ***
! If you add a new H5D function you must add the function name to the
! Windows dll file 'hdf5_fortrandll.def' in the fortran/src directory.
! This is needed for Windows based operating systems.
!
!*****
MODULE H5D_PROVISIONAL
USE H5GLOBAL
INTERFACE h5dwrite_f
MODULE PROCEDURE h5dwrite_reference_obj
MODULE PROCEDURE h5dwrite_reference_dsetreg
MODULE PROCEDURE h5dwrite_integer_scalar
MODULE PROCEDURE h5dwrite_integer_1
MODULE PROCEDURE h5dwrite_integer_2
MODULE PROCEDURE h5dwrite_integer_3
MODULE PROCEDURE h5dwrite_integer_4
MODULE PROCEDURE h5dwrite_integer_5
MODULE PROCEDURE h5dwrite_integer_6
MODULE PROCEDURE h5dwrite_integer_7
MODULE PROCEDURE h5dwrite_char_scalar
MODULE PROCEDURE h5dwrite_char_1
MODULE PROCEDURE h5dwrite_char_2
MODULE PROCEDURE h5dwrite_char_3
MODULE PROCEDURE h5dwrite_char_4
MODULE PROCEDURE h5dwrite_char_5
MODULE PROCEDURE h5dwrite_char_6
MODULE PROCEDURE h5dwrite_char_7
MODULE PROCEDURE h5dwrite_real_scalar
MODULE PROCEDURE h5dwrite_real_1
MODULE PROCEDURE h5dwrite_real_2
MODULE PROCEDURE h5dwrite_real_3
MODULE PROCEDURE h5dwrite_real_4
MODULE PROCEDURE h5dwrite_real_5
MODULE PROCEDURE h5dwrite_real_6
MODULE PROCEDURE h5dwrite_real_7
! This is the preferred way to call h5dwrite
! by passing an address
MODULE PROCEDURE h5dwrite_ptr
END INTERFACE
INTERFACE h5dread_f
MODULE PROCEDURE h5dread_reference_obj
MODULE PROCEDURE h5dread_reference_dsetreg
MODULE PROCEDURE h5dread_integer_scalar
MODULE PROCEDURE h5dread_integer_1
MODULE PROCEDURE h5dread_integer_2
MODULE PROCEDURE h5dread_integer_3
MODULE PROCEDURE h5dread_integer_4
MODULE PROCEDURE h5dread_integer_5
MODULE PROCEDURE h5dread_integer_6
MODULE PROCEDURE h5dread_integer_7
MODULE PROCEDURE h5dread_char_scalar
MODULE PROCEDURE h5dread_char_1
MODULE PROCEDURE h5dread_char_2
MODULE PROCEDURE h5dread_char_3
MODULE PROCEDURE h5dread_char_4
MODULE PROCEDURE h5dread_char_5
MODULE PROCEDURE h5dread_char_6
MODULE PROCEDURE h5dread_char_7
MODULE PROCEDURE h5dread_real_scalar
MODULE PROCEDURE h5dread_real_1
MODULE PROCEDURE h5dread_real_2
MODULE PROCEDURE h5dread_real_3
MODULE PROCEDURE h5dread_real_4
MODULE PROCEDURE h5dread_real_5
MODULE PROCEDURE h5dread_real_6
MODULE PROCEDURE h5dread_real_7
! This is the preferred way to call h5dread
! by passing an address
MODULE PROCEDURE h5dread_ptr
END INTERFACE
! Interface for the function used to pass the C pointer of the buffer
! to the C H5Dwrite routine
INTERFACE
INTEGER FUNCTION h5dwrite_f_c(dset_id, mem_type_id, &
mem_space_id_default , &
file_space_id_default, &
xfer_prp_default, buf )
USE H5GLOBAL
USE, INTRINSIC :: ISO_C_BINDING
!DEC$IF DEFINED(HDF5F90_WINDOWS)
!DEC$ATTRIBUTES C,reference,decorate,alias:'H5DWRITE_F_C'::h5dwrite_f_c
!DEC$ENDIF
INTEGER(HID_T), INTENT(IN) :: dset_id
INTEGER(HID_T), INTENT(IN) :: mem_type_id
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
INTEGER(HID_T) :: xfer_prp_default
TYPE(C_PTR), VALUE :: buf
END FUNCTION h5dwrite_f_c
END INTERFACE
! Interface for the function used to pass the C pointer of the buffer
! to the C H5Dread routine
INTERFACE
INTEGER FUNCTION h5dread_f_c(dset_id, mem_type_id, &
mem_space_id_default, &
file_space_id_default, &
xfer_prp_default, buf)
USE H5GLOBAL
USE, INTRINSIC :: ISO_C_BINDING
!DEC$IF DEFINED(HDF5F90_WINDOWS)
!DEC$ATTRIBUTES C,reference,decorate,alias:'H5DREAD_F_C'::h5dread_f_c
!DEC$ENDIF
INTEGER(HID_T), INTENT(IN) :: dset_id
INTEGER(HID_T), INTENT(IN) :: mem_type_id
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
INTEGER(HID_T) :: xfer_prp_default
TYPE(C_PTR), VALUE :: buf
END FUNCTION h5dread_f_c
END INTERFACE
INTERFACE h5dfill_f
MODULE PROCEDURE h5dfill_integer
MODULE PROCEDURE h5dfill_real
MODULE PROCEDURE h5dfill_char
END INTERFACE
! Interface for the function used to pass the C pointer of the buffer
! to the C H5Dfill routine
INTERFACE
INTEGER FUNCTION h5dfill_c(f_ptr_fill_value, fill_type_id, space_id, &
f_ptr_buf, mem_type_id)
USE H5GLOBAL
USE, INTRINSIC :: ISO_C_BINDING
!DEC$IF DEFINED(HDF5F90_WINDOWS)
!DEC$ATTRIBUTES C,reference,decorate,alias:'H5DFILL_C'::h5dfill_c
!DEC$ENDIF
TYPE(C_PTR), VALUE :: f_ptr_fill_value
INTEGER(HID_T) :: fill_type_id ! Fill value datatype identifier
INTEGER(HID_T), INTENT(IN) :: space_id ! Memory dataspace selection identifier
TYPE(C_PTR), VALUE :: f_ptr_buf
INTEGER(HID_T) :: mem_type_id
END FUNCTION h5dfill_c
END INTERFACE
CONTAINS
SUBROUTINE h5dwrite_reference_obj(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), DIMENSION(*), INTENT(IN) :: dims ! size of the bufffer buf
TYPE(hobj_ref_t_f), DIMENSION(dims(1)), INTENT(IN), TARGET :: buf ! Data buffer
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_reference_obj
SUBROUTINE h5dwrite_reference_dsetreg(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), DIMENSION(*), INTENT(IN) :: dims ! size of the bufffer buf
TYPE(hdset_reg_ref_t_f), DIMENSION(dims(1)), INTENT(IN), TARGET :: buf ! Data buffer
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
INTEGER, ALLOCATABLE, DIMENSION(:) :: ref_buf
INTEGER :: i,j
TYPE(C_PTR) :: f_ptr
INTERFACE
INTEGER FUNCTION h5dwrite_ref_reg_c(dset_id, mem_type_id,&
mem_space_id_default, &
file_space_id_default, xfer_prp_default, ref_buf, dims)
USE H5GLOBAL
!DEC$IF DEFINED(HDF5F90_WINDOWS)
!DEC$ATTRIBUTES C,reference,decorate,alias:'H5DWRITE_REF_REG_C'::h5dwrite_ref_reg_c
!DEC$ENDIF
INTEGER(HID_T), INTENT(IN) :: dset_id
INTEGER(HID_T), INTENT(IN) :: mem_type_id
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
INTEGER, DIMENSION(*) :: ref_buf
INTEGER(HSIZE_T), DIMENSION(*) :: dims
END FUNCTION h5dwrite_ref_reg_c
END INTERFACE
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1))
ALLOCATE(ref_buf(REF_REG_BUF_LEN*dims(1)), stat=hdferr)
IF (hdferr .NE. 0 ) THEN
hdferr = -1
RETURN
ELSE
DO j = 1, dims(1)
DO i = 1, REF_REG_BUF_LEN
ref_buf(REF_REG_BUF_LEN*(j-1) + i) = buf(j)%ref(i)
ENDDO
ENDDO
ENDIF
hdferr = h5dwrite_ref_reg_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, ref_buf, dims)
DEALLOCATE(ref_buf)
END SUBROUTINE h5dwrite_reference_dsetreg
SUBROUTINE h5dwrite_integer_scalar(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER, INTENT(IN), TARGET :: buf ! Data buffer
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf)
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_integer_scalar
SUBROUTINE h5dwrite_integer_1(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
INTEGER, INTENT(IN), &
DIMENSION(dims(1)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_integer_1
SUBROUTINE h5dwrite_integer_2(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
INTEGER, INTENT(IN), &
DIMENSION(dims(1),dims(2)),TARGET :: buf ! Data buffer
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_integer_2
SUBROUTINE h5dwrite_integer_3(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
INTEGER, INTENT(IN), &
DIMENSION(dims(1),dims(2),dims(3)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_integer_3
SUBROUTINE h5dwrite_integer_4(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
INTEGER, INTENT(IN), &
DIMENSION(dims(1),dims(2),dims(3),dims(4)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_integer_4
SUBROUTINE h5dwrite_integer_5(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
INTEGER, INTENT(IN), &
DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1,1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_integer_5
SUBROUTINE h5dwrite_integer_6(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
INTEGER, INTENT(IN), &
DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1,1,1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_integer_6
SUBROUTINE h5dwrite_integer_7(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
INTEGER, INTENT(IN), &
DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1,1,1,1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_integer_7
SUBROUTINE h5dwrite_char_scalar(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
CHARACTER(*), INTENT(IN), TARGET :: buf ! Data buffer
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
CALL h5dwrite_char_scalar_fix(dset_id, mem_type_id, buf, LEN(buf), dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
END SUBROUTINE h5dwrite_char_scalar
SUBROUTINE h5dwrite_char_scalar_fix(dset_id, mem_type_id, buf, buf_len, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
INTEGER, INTENT(IN) :: buf_len
CHARACTER(LEN=buf_len), INTENT(IN), TARGET :: buf ! Data buffer
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1:1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_char_scalar_fix
SUBROUTINE h5dwrite_char_1(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
CHARACTER(LEN=*), INTENT(IN), DIMENSION(dims(1)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1)(1:1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_char_1
SUBROUTINE h5dwrite_char_2(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
CHARACTER(LEN=*), INTENT(IN), &
DIMENSION(dims(1),dims(2)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1)(1:1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_char_2
SUBROUTINE h5dwrite_char_3(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
CHARACTER(LEN=*), INTENT(IN), &
DIMENSION(dims(1),dims(2),dims(3)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1)(1:1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_char_3
SUBROUTINE h5dwrite_char_4(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
CHARACTER(LEN=*), INTENT(IN), &
DIMENSION(dims(1),dims(2),dims(3),dims(4)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1)(1:1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_char_4
SUBROUTINE h5dwrite_char_5(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
CHARACTER(LEN=*), INTENT(IN), &
DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1,1)(1:1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_char_5
SUBROUTINE h5dwrite_char_6(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
CHARACTER(LEN=*), INTENT(IN), &
DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1,1,1)(1:1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_char_6
SUBROUTINE h5dwrite_char_7(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
CHARACTER(LEN=*), INTENT(IN), &
DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1,1,1,1)(1:1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_char_7
SUBROUTINE h5dwrite_real_scalar(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
REAL, INTENT(IN), TARGET :: buf ! Data buffer
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf)
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_real_scalar
SUBROUTINE h5dwrite_real_1(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
REAL, INTENT(IN), &
DIMENSION(dims(1)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_real_1
SUBROUTINE h5dwrite_real_2(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
REAL, INTENT(IN), &
DIMENSION(dims(1),dims(2)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_real_2
SUBROUTINE h5dwrite_real_3(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
REAL, INTENT(IN), &
DIMENSION(dims(1),dims(2),dims(3)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_real_3
SUBROUTINE h5dwrite_real_4(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
REAL, INTENT(IN), &
DIMENSION(dims(1),dims(2),dims(3),dims(4)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_real_4
SUBROUTINE h5dwrite_real_5(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
REAL, INTENT(IN), &
DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1,1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_real_5
SUBROUTINE h5dwrite_real_6(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
REAL, INTENT(IN), &
DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1,1,1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_real_6
SUBROUTINE h5dwrite_real_7(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
REAL, INTENT(IN), &
DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1,1,1,1))
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dwrite_real_7
!
! NAME
! h5dread_f
!
! PURPOSE
! Reads raw data from the specified dataset into buf,
! converting from file datatype and dataspace to memory
! datatype and dataspace.
!
! Inputs:
! dset_id - dataset identifier
! mem_type_id - memory type identifier
! dims - 1-dim array of size 7; dims(k) has the size
! - of k-th dimension of the buf array
! Outputs:
! buf - buffer to read data in
! hdferr: - error code
! Success: 0
! Failure: -1
! Optional parameters:
! mem_space_id - memory dataspace identifier
! file_space_id - file dataspace identifier
! xfer_prp - trasfer property list identifier
!
! AUTHOR
! Elena Pourmal
! August 12, 1999
!
! HISTORY
! Explicit Fortran interfaces were added for
! called C functions (it is needed for Windows
! port). February 28, 2001
!
! dims parameter was added to make code portable;
! n parameter was replaced with dims parameter in
! the h5dwrite_reference_obj and h5dwrite_reference_dsetreg
! functions. April 2, 2001
!
! NOTES
! This function is overloaded to read INTEGER,
! REAL, DOUBLE PRECISION and CHARACTER buffers
! up to 7 dimensions, and one dimensional buffers
! of the TYPE(hobj_ref_t_f) and TYPE(hdset_reg_ref_t_f)
! types.
!
SUBROUTINE h5dread_reference_obj(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
TYPE(hobj_ref_t_f), INTENT(INOUT) , &
DIMENSION(dims(1)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_reference_obj
SUBROUTINE h5dread_reference_dsetreg(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
TYPE(hdset_reg_ref_t_f), INTENT(INOUT), &
DIMENSION(dims(1)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
INTEGER, ALLOCATABLE, DIMENSION(:) :: ref_buf
INTEGER :: i,j
INTERFACE
INTEGER FUNCTION h5dread_ref_reg_c(dset_id, mem_type_id,&
mem_space_id_default, &
file_space_id_default, xfer_prp_default, ref_buf, dims)
USE H5GLOBAL
!DEC$IF DEFINED(HDF5F90_WINDOWS)
!DEC$ATTRIBUTES C,reference,decorate,alias:'H5DREAD_REF_REG_C'::h5dread_ref_reg_c
!DEC$ENDIF
INTEGER(HID_T), INTENT(IN) :: dset_id
INTEGER(HID_T), INTENT(IN) :: mem_type_id
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
INTEGER, DIMENSION(*) :: ref_buf
END FUNCTION h5dread_ref_reg_c
END INTERFACE
ALLOCATE(ref_buf(REF_REG_BUF_LEN*dims(1)), stat=hdferr)
IF (hdferr .NE. 0) THEN
hdferr = -1
RETURN
ENDIF
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
hdferr = h5dread_ref_reg_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, ref_buf, dims)
DO j = 1, dims(1)
DO i = 1, REF_REG_BUF_LEN
buf(j)%ref(i) = ref_buf(REF_REG_BUF_LEN*(j-1) + i)
ENDDO
ENDDO
DEALLOCATE(ref_buf)
END SUBROUTINE h5dread_reference_dsetreg
SUBROUTINE h5dread_integer_scalar(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
INTEGER, INTENT(INOUT) , TARGET :: buf ! Data buffer
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf)
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_integer_scalar
SUBROUTINE h5dread_integer_1(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
INTEGER, INTENT(INOUT), &
DIMENSION(dims(1)) , TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_integer_1
SUBROUTINE h5dread_integer_2(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
INTEGER, INTENT(INOUT), &
DIMENSION(dims(1),dims(2)) , TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_integer_2
SUBROUTINE h5dread_integer_3(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
INTEGER, INTENT(INOUT), &
DIMENSION(dims(1),dims(2),dims(3)) , TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_integer_3
SUBROUTINE h5dread_integer_4(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
INTEGER, INTENT(INOUT), &
DIMENSION(dims(1),dims(2),dims(3),dims(4)) , TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_integer_4
SUBROUTINE h5dread_integer_5(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
INTEGER, INTENT(INOUT), &
DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5)) , TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1,1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_integer_5
SUBROUTINE h5dread_integer_6(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
INTEGER, INTENT(INOUT), &
DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6)) , TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1,1,1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_integer_6
SUBROUTINE h5dread_integer_7(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
INTEGER, INTENT(INOUT), &
DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7)) , TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1,1,1,1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_integer_7
SUBROUTINE h5dread_char_scalar(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
CHARACTER(LEN=*), INTENT(INOUT) :: buf ! Data buffer
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
CALL h5dread_char_scalar_fix(dset_id, mem_type_id, buf, LEN(buf), dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
END SUBROUTINE h5dread_char_scalar
SUBROUTINE h5dread_char_scalar_fix(dset_id, mem_type_id, buf, buf_len, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
INTEGER, INTENT(IN) :: buf_len
CHARACTER(LEN=buf_len), INTENT(INOUT), TARGET :: buf ! Data buffer
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1:1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_char_scalar_fix
SUBROUTINE h5dread_char_1(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
CHARACTER(LEN=*), INTENT(INOUT), &
DIMENSION(dims(1)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1)(1:1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_char_1
SUBROUTINE h5dread_char_2(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
CHARACTER(LEN=*), INTENT(INOUT), &
DIMENSION(dims(1),dims(2)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1)(1:1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_char_2
SUBROUTINE h5dread_char_3(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
CHARACTER(LEN=*), INTENT(INOUT), &
DIMENSION(dims(1),dims(2),dims(3)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1)(1:1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_char_3
SUBROUTINE h5dread_char_4(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
CHARACTER(LEN=*), INTENT(INOUT), &
DIMENSION(dims(1),dims(2),dims(3),dims(4)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1)(1:1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_char_4
SUBROUTINE h5dread_char_5(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
CHARACTER(LEN=*), INTENT(INOUT), &
DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1,1)(1:1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_char_5
SUBROUTINE h5dread_char_6(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
CHARACTER(LEN=*), INTENT(INOUT), &
DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1,1,1)(1:1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_char_6
SUBROUTINE h5dread_char_7(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
CHARACTER(LEN=*), INTENT(INOUT), &
DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7)), TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1,1,1,1)(1:1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_char_7
SUBROUTINE h5dread_real_scalar(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
REAL, INTENT(INOUT) , TARGET :: buf ! Data buffer
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf)
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_real_scalar
SUBROUTINE h5dread_real_1(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
REAL, INTENT(INOUT), &
DIMENSION(dims(1)) , TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_real_1
SUBROUTINE h5dread_real_2(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
REAL, INTENT(INOUT), &
DIMENSION(dims(1),dims(2)) , TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_real_2
SUBROUTINE h5dread_real_3(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
REAL, INTENT(INOUT), &
DIMENSION(dims(1),dims(2),dims(3)) , TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_real_3
SUBROUTINE h5dread_real_4(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
REAL, INTENT(INOUT), &
DIMENSION(dims(1),dims(2),dims(3), dims(4)) , TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_real_4
SUBROUTINE h5dread_real_5(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
REAL, INTENT(INOUT), &
DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5)) , TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1,1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_real_5
SUBROUTINE h5dread_real_6(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
REAL, INTENT(INOUT), &
DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6)) , TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1,1,1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_real_6
SUBROUTINE h5dread_real_7(dset_id, mem_type_id, buf, dims, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
INTEGER(HSIZE_T), INTENT(IN), DIMENSION(*) :: dims
REAL, INTENT(INOUT), &
DIMENSION(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7)) , TARGET :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
TYPE(C_PTR) :: f_ptr
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF(PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF(PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF(PRESENT(file_space_id)) file_space_id_default = file_space_id
f_ptr = C_LOC(buf(1,1,1,1,1,1,1))
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, f_ptr)
END SUBROUTINE h5dread_real_7
!****s* H5D (F03)/h5dwrite_f_F03
!
! NAME
! h5dwrite_f_F03
!
! PURPOSE
! Writes raw data from a dataset into a buffer.
!
! Inputs:
! dset_id - Identifier of the dataset to write to.
! mem_type_id - Identifier of the memory datatype.
! buf - Buffer with data to be written to the file.
!
! Outputs:
! hdferr - Returns 0 if successful and -1 if fails
!
! Optional parameters:
! mem_space_id - Identifier of the memory dataspace.
! file_space_id - Identifier of the dataset's dataspace in the file.
! xfer_prp - Identifier of a transfer property list for this I/O operation.
!
! AUTHOR
! M. Scot Breitenfeld
! September 17, 2011
!
! Fortran2003 Interface:
!! SUBROUTINE h5dwrite_f(dset_id, mem_type_id, buf, hdferr, &
!! mem_space_id, file_space_id, xfer_prp)
!! INTEGER(HID_T), INTENT(IN) :: dset_id
!! INTEGER(HID_T), INTENT(IN) :: mem_type_id
!! TYPE(C_PTR) , INTENT(IN) :: buf
!! INTEGER , INTENT(OUT) :: hdferr
!! INTEGER(HID_T), INTENT(IN) , OPTIONAL :: mem_space_id
!! INTEGER(HID_T), INTENT(IN) , OPTIONAL :: file_space_id
!! INTEGER(HID_T), INTENT(IN) , OPTIONAL :: xfer_prp
!*****
SUBROUTINE h5dwrite_ptr(dset_id, mem_type_id, buf, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
TYPE(C_PTR), INTENT(IN) :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF (PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF (PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF (PRESENT(file_space_id)) file_space_id_default = file_space_id
hdferr = h5dwrite_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, buf)
END SUBROUTINE h5dwrite_ptr
!****s* H5D (F03)/h5dread_f_F03
!
! NAME
! h5dread_f_F03
!
! PURPOSE
! Reads raw data from a dataset into a buffer.
!
! Inputs:
! dset_id - Identifier of the dataset read from.
! mem_type_id - Identifier of the memory datatype.
!
! Outputs:
! buf - Buffer to receive data read from file.
! hdferr - Returns 0 if successful and -1 if fails
!
! Optional parameters:
! mem_space_id - Identifier of the memory dataspace.
! file_space_id - Identifier of the dataset's dataspace in the file.
! xfer_prp - Identifier of a transfer property list for this I/O operation.
!
! AUTHOR
! M. Scot Breitenfeld
! September 17, 2011
!
! Fortran2003 Interface:
!! SUBROUTINE h5dread_f(dset_id, mem_type_id, buf, hdferr, &
!! mem_space_id, file_space_id, xfer_prp)
!! INTEGER(HID_T), INTENT(IN) :: dset_id
!! INTEGER(HID_T), INTENT(IN) :: mem_type_id
!! TYPE(C_PTR) , INTENT(INOUT) :: buf
!! INTEGER , INTENT(OUT) :: hdferr
!! INTEGER(HID_T), INTENT(IN) , OPTIONAL :: mem_space_id
!! INTEGER(HID_T), INTENT(IN) , OPTIONAL :: file_space_id
!! INTEGER(HID_T), INTENT(IN) , OPTIONAL :: xfer_prp
!*****
SUBROUTINE h5dread_ptr(dset_id, mem_type_id, buf, hdferr, &
mem_space_id, file_space_id, xfer_prp)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: dset_id ! Dataset identifier
INTEGER(HID_T), INTENT(IN) :: mem_type_id ! Memory datatype identifier
TYPE(C_PTR), INTENT(INOUT) :: buf
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: mem_space_id ! Memory dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: file_space_id ! File dataspace identfier
INTEGER(HID_T), OPTIONAL, INTENT(IN) :: xfer_prp ! Transfer property list identifier
INTEGER(HID_T) :: xfer_prp_default
INTEGER(HID_T) :: mem_space_id_default
INTEGER(HID_T) :: file_space_id_default
xfer_prp_default = H5P_DEFAULT_F
mem_space_id_default = H5S_ALL_F
file_space_id_default = H5S_ALL_F
IF (PRESENT(xfer_prp)) xfer_prp_default = xfer_prp
IF (PRESENT(mem_space_id)) mem_space_id_default = mem_space_id
IF (PRESENT(file_space_id)) file_space_id_default = file_space_id
hdferr = h5dread_f_c(dset_id, mem_type_id, mem_space_id_default, &
file_space_id_default, xfer_prp_default, buf)
END SUBROUTINE h5dread_ptr
!
! NAME
! h5dfill_integer
!
! PURPOSE
! Fills dataspace elements with a fill value in a memory buffer.
! Only INTEGER, CHARACTER, REAL and DOUBLE PRECISION datatypes
! of the fillvalues and buffers are supported. Buffer and fillvalue
! are assumed to have the same datatype.
! Only one-dimesional buffers are supported.
!
! Inputs:
! fill_value - fill value
! space_id - memory space selection identifier
! buf - data buffer iin memory ro apply selection to
! - of k-th dimension of the buf array
! Outputs:
! hdferr: - error code
! Success: 0
! Failure: -1
! AUTHOR
! Elena Pourmal
! March 12, 2003
!
!
SUBROUTINE h5dfill_integer(fill_value, space_id, buf, hdferr)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER, INTENT(IN), TARGET :: fill_value ! Fill value
INTEGER(HID_T), INTENT(IN) :: space_id ! Memory dataspace selection identifier
INTEGER, INTENT(IN), DIMENSION(*), TARGET :: buf ! Memory buffer to fill in
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T) :: fill_type_id ! Fill value datatype identifier
INTEGER(HID_T) :: mem_type_id ! Buffer dadtype identifier
TYPE(C_PTR) :: f_ptr_fill_value ! C pointer to fill_value
TYPE(C_PTR) :: f_ptr_buf ! C pointer to buf
f_ptr_fill_value = C_LOC(fill_value)
f_ptr_buf = C_LOC(buf(1))
fill_type_id = H5T_NATIVE_INTEGER
mem_type_id = H5T_NATIVE_INTEGER
hdferr = h5dfill_c(f_ptr_fill_value, fill_type_id, space_id, &
f_ptr_buf, mem_type_id)
END SUBROUTINE h5dfill_integer
!
! NAME
! h5dfill_real
!
! PURPOSE
! Fills dataspace elements with a fill value in a memory buffer.
! Only INTEGER, CHARACTER, REAL and DOUBLE PRECISION datatypes
! of the fillvalues and buffers are supported. Buffer and fillvalue
! are assumed to have the same datatype.
! Only one-dimesional buffers are supported.
!
! Inputs:
! fill_value - fill value
! space_id - memory space selection identifier
! buf - data buffer iin memory ro apply selection to
! - of k-th dimension of the buf array
! Outputs:
! hdferr: - error code
! Success: 0
! Failure: -1
!
! AUTHOR
! Elena Pourmal
! March 12, 2003
!
SUBROUTINE h5dfill_real(fill_valuer, space_id, buf, hdferr)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
REAL, INTENT(IN), TARGET :: fill_valuer ! Fill value
INTEGER(HID_T), INTENT(IN) :: space_id ! Memory dataspace selection identifier
REAL, INTENT(IN), DIMENSION(*), TARGET :: buf ! Memory buffer to fill in
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T) :: fill_type_id ! Fill value datatype identifier
INTEGER(HID_T) :: mem_type_id ! Buffer dadtype identifier
TYPE(C_PTR) :: f_ptr_fill_valuer ! C pointer to fill_value
TYPE(C_PTR) :: f_ptr_buf ! C pointer to buf
f_ptr_fill_valuer = C_LOC(fill_valuer)
f_ptr_buf = C_LOC(buf(1))
fill_type_id = H5T_NATIVE_REAL
mem_type_id = H5T_NATIVE_REAL
hdferr = h5dfill_c(f_ptr_fill_valuer, fill_type_id, space_id, &
f_ptr_buf, mem_type_id)
END SUBROUTINE h5dfill_real
!
! NAME
! h5dfill_char
!
! PURPOSE
! Fills dataspace elements with a fill value in a memory buffer.
! Only INTEGER, CHARACTER, REAL and DOUBLE PRECISION datatypes
! of the fillvalues and buffers are supported. Buffer and fillvalue
! are assumed to have the same datatype.
! Only one-dimesional buffers are supported.
!
! Inputs:
! fill_value - fill value
! space_id - memory space selection identifier
! buf - data buffer iin memory ro apply selection to
! - of k-th dimension of the buf array
! Outputs:
! hdferr: - error code
! Success: 0
! Failure: -1
! AUTHOR
! Elena Pourmal
! March 12, 2003
!
SUBROUTINE h5dfill_char(fill_value, space_id, buf, hdferr)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
CHARACTER, INTENT(IN), TARGET :: fill_value ! Fill value
INTEGER(HID_T), INTENT(IN) :: space_id ! Memory dataspace selection identifier
CHARACTER, INTENT(IN), DIMENSION(*), TARGET :: buf ! Memory buffer to fill in
INTEGER, INTENT(OUT) :: hdferr ! Error code
INTEGER(HID_T) :: fill_type_id ! Fill value datatype identifier
INTEGER(HID_T) :: mem_type_id ! Buffer dadtype identifier
TYPE(C_PTR) :: f_ptr_fill_value ! C pointer to fill_value
TYPE(C_PTR) :: f_ptr_buf ! C pointer to buf
f_ptr_fill_value = C_LOC(fill_value)
f_ptr_buf = C_LOC(buf(1))
hdferr = h5dfill_c(f_ptr_fill_value, fill_type_id, space_id, &
f_ptr_buf, mem_type_id)
END SUBROUTINE h5dfill_char
!
!****s* H5D (F03)/h5dvlen_reclaim_f
! NAME
! h5dvlen_reclaim_f
!
! PURPOSE
! Reclaims VL datatype memory buffers.
!
! Inputs:
!
! type_id - Identifier of the datatype.
! space_id - Identifier of the dataspace.
! plist_id - Identifier of the property list used to create the buffer.
! buf - Pointer to the buffer to be reclaimed.
!
! Outputs:
! hdferr - Returns 0 if successful and -1 if fails
!
! AUTHOR
! M. Scot Breitenfeld
! January 11, 2011
!
! Fortran2003 Interface:
SUBROUTINE h5dvlen_reclaim_f(type_id, space_id, plist_id, buf, hdferr)
USE, INTRINSIC :: ISO_C_BINDING
IMPLICIT NONE
INTEGER(HID_T), INTENT(IN) :: type_id
INTEGER(HID_T), INTENT(IN) :: space_id
INTEGER(HID_T), INTENT(IN) :: plist_id
TYPE(C_PTR) , INTENT(INOUT) :: buf
INTEGER , INTENT(OUT) :: hdferr
!*****
INTERFACE
INTEGER FUNCTION h5dvlen_reclaim_c(type_id, space_id, plist_id, buf)
USE H5GLOBAL
USE, INTRINSIC :: ISO_C_BINDING
!DEC$IF DEFINED(HDF5F90_WINDOWS)
!DEC$ATTRIBUTES C,reference,decorate,alias:'H5DVLEN_RECLAIM_C'::h5dvlen_reclaim_c
!DEC$ENDIF
INTEGER(HID_T) :: type_id
INTEGER(HID_T) :: space_id
INTEGER(HID_T) :: plist_id
TYPE(C_PTR), VALUE :: buf
END FUNCTION h5dvlen_reclaim_c
END INTERFACE
hdferr = H5Dvlen_reclaim_c(type_id, space_id, plist_id, buf)
END SUBROUTINE H5Dvlen_reclaim_f
END MODULE H5D_PROVISIONAL
| {
"alphanum_fraction": 0.7078845885,
"author": null,
"avg_line_length": 41.6191268191,
"converted": null,
"ext": "f90",
"file": null,
"hexsha": "61d5dfc72fb42458e27c615d74bfc4931cab92d8",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "833d8ca015deecdfa5d0aca01211632cdaca9e58",
"max_forks_repo_licenses": [
"MIT-0"
],
"max_forks_repo_name": "pbasting/cactus",
"max_forks_repo_path": "submodules/hdf5/fortran/src/H5Dff_F03.f90",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "833d8ca015deecdfa5d0aca01211632cdaca9e58",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT-0"
],
"max_issues_repo_name": "pbasting/cactus",
"max_issues_repo_path": "submodules/hdf5/fortran/src/H5Dff_F03.f90",
"max_line_length": 94,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "833d8ca015deecdfa5d0aca01211632cdaca9e58",
"max_stars_repo_licenses": [
"MIT-0"
],
"max_stars_repo_name": "pbasting/cactus",
"max_stars_repo_path": "submodules/hdf5/fortran/src/H5Dff_F03.f90",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 30337,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 100094
} |
#!python3 ./infer.py
from time import sleep
from picamera import PiCamera
import numpy as np
import tarfile
import tempfile
import os
import timeit
import tvm
from tvm.contrib import graph_runtime as runtime
from tvm.contrib.download import download_testdata
from scipy.special import softmax
# Download the and load labels for the model to output the inferrence
# human-readable form
labels_url = "https://s3.amazonaws.com/onnx-model-zoo/synset.txt"
labels_path = download_testdata(labels_url, "synset.txt", module="data")
with open(labels_path, "r") as f:
labels = [l.rstrip() for l in f]
# Open the library module that was generated by tvmc. This requires unzipping
# the file to a new location, then loading the runtime graph, the parameters,
# and the generated library code
with tempfile.TemporaryDirectory() as tmp_dir:
# open the library
t = tarfile.open("resnet50-v2-7.tvm")
#t = tarfile.open("resnet50-v2-7.tuned200.tvm")
t.extractall(tmp_dir)
# load the runtime graph
graph = open(os.path.join(tmp_dir, "mod.json")).read()
# load the model parameters
params = bytearray(open(os.path.join(tmp_dir, "mod.params"), "rb").read())
# load the compiled library
lib = tvm.runtime.load_module(os.path.join(tmp_dir, "mod.so"))
# create the runtime module from the runtime graph and library, targeting
# the cpu
module = runtime.create(graph, lib, tvm.context("cpu"))
# load the model parameters into the module
module.load_params(params)
# initialize the camera, since we're down-scaling we'll use a low
# resolution mode
camera = PiCamera(sensor_mode=4)
# set the resolution to the image size expected by ResNet
camera.resolution = (224,224)
camera.annotate_text_size = 12
# This is optional, we launch a preview so we can see the image, set some
# exposure parameters, and hold the image on the screen the screen for the
# specified number of seconds.
def infer():
camera.start_preview()
camera.exposure_mode='auto'
camera.iso=800
annotate_text = [camera.annotate_text]
#sleep(5)
# Prepare a numpy buffer to capture the image in
inage_data = np.empty((224, 224, 3), dtype=np.int8)
# The overlay becomes part of the image, so clear it
# before capturing
camera.annotate_text = ""
# Capture the image in rgb format
camera.capture(inage_data, "rgb")
# Write the previous prediction over the image while inference is run.
camera.annotate_text = annotate_text[0]
# Convert the image to floating point values
inage_data=inage_data.astype("float32")
# Reorder the image into the format expected by ResNet
# Also apply normalization expected by ResNet
inage_data= np.transpose(inage_data, (2, 0, 1))
imagenet_mean = np.array([0.485, 0.456, 0.406])
imagenet_stddev = np.array([0.229, 0.224, 0.225])
# Initialize the normalization matrix
norm_img_data = np.zeros(inage_data.shape).astype("float32")
# Compute the normalization
for i in range(inage_data.shape[0]):
norm_img_data[i, :, :] = (inage_data[i, :, :] / 255 - imagenet_mean[i]) / imagenet_stddev[i]
# Add one more dimension in the input matrix, as expected by ResNet
inage_data = np.expand_dims(norm_img_data, axis=0)
# Set the input and run the model
module.set_input("data", inage_data)
module.run()
inage_data_shape = (1, 1000)
# Get the output tensor
tvm_inage_data = module.get_output(0, tvm.nd.empty(inage_data_shape)).asnumpy()
# Compute the confidence of each classification, and sort
scores = softmax(tvm_inage_data)
scores = np.squeeze(scores)
ranks = np.argsort(scores)[::-1]
# Output the five most likely labels
Annotate_text = []
for rank in ranks[0:5]:
annotate_text.append("%s probability=%f" % (labels[rank], scores[rank]))
print("%s probability=%f" % (labels[rank], scores[rank]))
print("---")
timer = timeit.Timer(infer)
while(True):
number = 10
print("Average time for %s runs: %s" % (number, timer.timeit(number=number)/number))
| {
"alphanum_fraction": 0.666743649,
"author": null,
"avg_line_length": 34.64,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "727497497893439fdac26756aab2e9691adf903d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b10f0e0ac97660933197a596e44e6429f7f89125",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "hogepodge/tvm-rpi",
"max_forks_repo_path": "infer.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b10f0e0ac97660933197a596e44e6429f7f89125",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "hogepodge/tvm-rpi",
"max_issues_repo_path": "infer.py",
"max_line_length": 104,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b10f0e0ac97660933197a596e44e6429f7f89125",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "hogepodge/tvm-rpi",
"max_stars_repo_path": "infer.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1069,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4330
} |
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.filtering."""
import collections
import itertools
import re
import types
from typing import Any, Callable, Sequence, Set, Tuple
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import basic
from haiku._src import data_structures
from haiku._src import filtering
from haiku._src import test_utils
from haiku._src import transform
import jax
import jax.numpy as jnp
def jax_fn_with_filter(
jax_fn: Callable[..., Any],
f: Callable[..., Any],
predicate: Callable[[str, str, jnp.ndarray], bool],
**jax_fn_kwargs) -> Callable[..., Any]:
"""Applies a jax functionn to a given function after modifying its signature.
`jax_fn_with_filter` operates in two steps:
1 it wraps the input function `f`, which is expect to take as first
argument a `Params` data structure, with a function taking as first two
inputs a bipartition of the orginal parameters
2 the resulting function is transformed with `jax_fn` and wrapped
by a function supporting `f`'s signature and taking care of partitioning
the `f`'s `Params` input using `predicate`.
Args:
jax_fn: jax function, e.g. `jax.grad` or `jax.jacobian`.
f: callable to be transformed.
predicate: predicate to be used to partition `f`'s input parameters.
**jax_fn_kwargs: kwargs to be forwarded to `jax_fn`.
Returns:
Function calling the input jax function on the wrapped `f`.
"""
def wrapper(p1, p2, *args, **kwargs):
return f(filtering.merge(p1, p2), *args, **kwargs)
jaxed_fn = jax_fn(wrapper, **jax_fn_kwargs)
def fn_with_filter(p, *args, **kwargs):
p1, p2 = filtering.partition(predicate, p)
return jaxed_fn(p1, p2, *args, **kwargs)
return fn_with_filter
def get_net(x):
def init(v):
return dict(
w_init=lambda *args: v * jnp.ones((1, 1)),
b_init=lambda *args: v * 1.5 * jnp.ones((1,)))
h = basic.Linear(output_size=1, name="first_layer", **init(1.0))(x)
h = basic.Linear(output_size=1, name="second_layer", **init(3.0))(h)
return jnp.mean(h)
def get_names(params) -> Set[str]:
names = set([])
for path, module in params.items():
for name in module.keys():
names.add("/".join([path, name]))
return names
def to_set(params) -> Set[Tuple[str, Sequence[float]]]:
entries = set([])
for path, module in params.items():
for key, value in module.items():
entries.add(
("/".join([path, key]), tuple(jax.device_get(value).flatten())))
return entries
def compile_regex(regex):
if not isinstance(regex, str):
regex = "|".join(["(" + r + ")" for r in regex])
return re.compile(regex)
class FilteringTest(parameterized.TestCase):
def test_partition(self):
init_fn, _ = transform.transform(get_net)
params = init_fn(jax.random.PRNGKey(428), jnp.ones((1, 1)))
# parse by layer
first_layer_params, second_layer_params = filtering.partition(
lambda module_name, *_: module_name == "first_layer",
params)
self.assertEqual(
get_names(first_layer_params),
set(["first_layer/w", "first_layer/b"]))
self.assertEqual(
get_names(second_layer_params),
set(["second_layer/w", "second_layer/b"]))
# parse by variable type
weights, biases = filtering.partition(
lambda module_name, name, _: name == "w",
params) # pytype: disable=wrong-arg-types
self.assertEqual(
get_names(weights),
set(["first_layer/w", "second_layer/w"]))
self.assertEqual(
get_names(biases),
set(["first_layer/b", "second_layer/b"]))
# Compose regexes
regex = compile_regex(["first_layer.*", ".*w"])
matching, not_matching = filtering.partition(
lambda module_name, name, _: regex.match(f"{module_name}/{name}"),
params)
self.assertEqual(
get_names(matching),
set(["first_layer/w", "first_layer/b", "second_layer/w"]))
self.assertEqual(
get_names(not_matching),
set(["second_layer/b"]))
matching, not_matching = filtering.partition(
lambda mod_name, name, _: mod_name == "first_layer" and name != "w",
params)
self.assertEqual(
get_names(matching),
set(["first_layer/b"]))
self.assertEqual(
get_names(not_matching),
set(["first_layer/w", "second_layer/w", "second_layer/b"]))
@parameterized.parameters(*range(1, 8))
def test_partition_n(self, n):
cnt = itertools.count()
fn = lambda m, n, v: next(cnt)
structure = {f"layer_{i}": {"w": None} for i in range(n)}
structures = filtering.partition_n(fn, structure, n)
self.assertLen(structures, n)
self.assertEqual(filtering.merge(*structures), structure)
for i, substructure in enumerate(structures):
expected = {f"layer_{i}": {"w": None}}
self.assertEqual(substructure, expected)
@parameterized.parameters(*range(1, 8))
def test_partition_n_merge_isomorphism(self, n):
cnt = itertools.count()
fn = lambda m, n, v: next(cnt)
input_structure = {f"layer_{i}": {"w": None} for i in range(n)}
structures = filtering.partition_n(fn, input_structure, n)
merged_structure = filtering.merge(*structures)
self.assertEqual(merged_structure, input_structure)
@parameterized.parameters(*range(1, 8))
def test_traverse(self, n):
structure = {f"layer_{i}": {"w": "wv", "b": "bv"}
for i in reversed(range(n))}
expected = []
for i in range(n):
expected.append((f"layer_{i}", "b", "bv"))
expected.append((f"layer_{i}", "w", "wv"))
actual = list(filtering.traverse(structure))
self.assertEqual(expected, actual)
def test_filter(self):
init_fn, _ = transform.transform(get_net)
params = init_fn(jax.random.PRNGKey(428), jnp.ones((1, 1)))
second_layer_params = filtering.filter(
lambda module_name, *_: module_name == "second_layer",
params)
self.assertEqual(
get_names(second_layer_params),
set(["second_layer/w", "second_layer/b"]))
biases = filtering.filter(
lambda module_name, name, _: name == "b",
params) # pytype: disable=wrong-arg-types
self.assertEqual(
get_names(biases),
set(["first_layer/b", "second_layer/b"]))
def test_transforms_with_filer(self):
# Note to make sense of test:
#
# out = (w0 + b0) * w1 + b1
# = w0 * w1 + b0 * w1 + b1
# doutdw0 = w1
# doutdw1 = w0 + b0
# with w0 = 1.0, b0 = 1.5, w1 = 3.0, b1 = 4.5
init_fn, apply_fn = transform.transform(get_net)
inputs = jnp.ones((1, 1))
params = init_fn(jax.random.PRNGKey(428), inputs)
df_fn = jax_fn_with_filter(
jax_fn=jax.grad,
f=apply_fn,
predicate=lambda module_name, name, _: name == "w")
df = df_fn(params, None, inputs)
self.assertEqual(
to_set(df),
set([("first_layer/w", (3.0,)), ("second_layer/w", (2.5,))]))
fn = jax_fn_with_filter(
jax_fn=jax.value_and_grad,
f=apply_fn,
predicate=lambda module_name, name, _: name == "w")
v = fn(params, None, inputs)
self.assertEqual(v[0], jnp.array([12.0]))
self.assertEqual(to_set(df), to_set(v[1]))
def get_stacked_net(x):
y = get_net(x)
return jnp.stack([y, 2.0 * y])
_, apply_fn = transform.transform(get_stacked_net)
jf_fn = jax_fn_with_filter(
jax_fn=jax.jacobian,
f=apply_fn,
predicate=lambda module_name, name, _: name == "w")
jf = jf_fn(params, None, inputs)
self.assertEqual(
to_set(jf),
set([("first_layer/w", (3.0, 6.0)), ("second_layer/w", (2.5, 5.0))]))
def test_map(self):
init_fn, _ = transform.transform(get_net)
params = init_fn(jax.random.PRNGKey(428), jnp.ones((1, 1)))
# parse by layer
def map_fn(module_name, name, v):
del name
if "first_layer" in module_name:
return v
else:
return 2. * v
new_params = filtering.map(map_fn, params)
self.assertLen(jax.tree_leaves(new_params), 4)
first_layer_params, second_layer_params = filtering.partition(
lambda module_name, *_: module_name == "first_layer",
params)
for mn in first_layer_params:
for n in first_layer_params[mn]:
self.assertEqual(params[mn][n], new_params[mn][n])
for mn in second_layer_params:
for n in second_layer_params[mn]:
self.assertEqual(2. * params[mn][n], new_params[mn][n])
@test_utils.with_environ("HAIKU_FLATMAPPING", None)
def test_output_type_default(self):
self.assert_output_type(data_structures.FlatMapping)
@test_utils.with_environ("HAIKU_FLATMAPPING", "0")
def test_output_type_env_var_0(self):
self.assert_output_type(dict)
@test_utils.with_environ("HAIKU_FLATMAPPING", "1")
def test_output_type_env_var_1(self):
self.assert_output_type(data_structures.FlatMapping)
@test_utils.with_environ("HAIKU_FLATMAPPING", "0")
def test_merge_different_mappings(self):
a = collections.defaultdict(dict)
a["foo"]["bar"] = 1
b = {"foo": {"baz": 2}}
c = types.MappingProxyType({"foo": {"bat": 3}})
d = filtering.merge(a, b, c)
self.assertEqual(d, {"foo": {"bar": 1, "baz": 2, "bat": 3}})
def assert_output_type(self, out_cls):
def assert_type_recursive(s):
self.assertEqual(type(s), out_cls)
for in_cls in (dict, data_structures.FlatMapping):
with self.subTest(str(in_cls)):
structure_a = in_cls({"m1": in_cls({"w": None})})
structure_b = in_cls({"m2": in_cls({"w": None})})
structure_c = in_cls({f"{i}": in_cls({"w": None}) for i in range(5)})
assert_type_recursive(
filtering.filter(lambda m, n, v: True, structure_a))
assert_type_recursive(filtering.map(lambda m, n, v: v, structure_a))
assert_type_recursive(filtering.merge(structure_a, structure_b))
parts = filtering.partition(lambda m, n, v: int(m) > 1, structure_c)
for part in parts:
assert_type_recursive(part)
parts = filtering.partition_n(lambda m, n, v: int(m), structure_c, 5)
for part in parts:
assert_type_recursive(part)
if __name__ == "__main__":
absltest.main()
| {
"alphanum_fraction": 0.6486585478,
"author": null,
"avg_line_length": 34.5601265823,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "86d66ab3523131f07d7fcd5402670e46fc1ceb8c",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c5777c12bb55c9d026967d75094e90aa74102e8d",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "zhongwen/dm-haiku",
"max_forks_repo_path": "haiku/_src/filtering_test.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c5777c12bb55c9d026967d75094e90aa74102e8d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "zhongwen/dm-haiku",
"max_issues_repo_path": "haiku/_src/filtering_test.py",
"max_line_length": 80,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c5777c12bb55c9d026967d75094e90aa74102e8d",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "zhongwen/dm-haiku",
"max_stars_repo_path": "haiku/_src/filtering_test.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2880,
"path": null,
"reason": "import jax",
"repo": null,
"save_path": null,
"sha": null,
"size": 10921
} |
[STATEMENT]
lemma Cl_F: "Br_1 \<B> \<Longrightarrow> Br_3 \<B> \<Longrightarrow> \<forall>A. Cl(\<F> A)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>Br_1 \<B>; Br_3 \<B>\<rbrakk> \<Longrightarrow> \<forall>A w. \<C> (\<F> A) w = \<F> A w
[PROOF STEP]
by (metis CF_rel Cl_fr_def FB4 Fr_4_def eq_ext' join_def) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Topological_Semantics_topo_border_algebra",
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 146,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
import os
import csv
import numpy as np
import torch
import pandas as pd
import matplotlib.pyplot as plt
if __name__ == "__main__":
base_path = os.path.join('results', 'mnistgen_ent')
exp_names = [f"mnistgen_ent1", f"mnistgen_ent2"]
dfs = []
for exp_name in exp_names:
results_path = os.path.join(base_path, f"results_{exp_name}")
file_name = f"log_{exp_name}.csv"
path = os.path.join(results_path, file_name)
dfs.append(pd.read_csv(path))
df1, df2 = dfs
plt.figure()
plt.plot(df1["epoch"], df1["nll_loss"], label="NLL Loss, no entropy loss", linestyle=":")
plt.plot(df1["epoch"], df1["gmm_ent_lb"], label="GMM Entropy LB, no entropy loss", linestyle=":")
plt.plot(df2["epoch"], df2["nll_loss"], label="NLL Loss, alpha=1.0", linestyle="-.")
plt.plot(df2["epoch"], df2["gmm_ent_lb"], label="GMM Entropy LB, alpha=1.0", linestyle="-.")
plt.legend()
plt.show()
# data_t = torch.tensor(data.values)
print("done")
| {
"alphanum_fraction": 0.647,
"author": null,
"avg_line_length": 34.4827586207,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "207dfc0cb06d458a04c950d15aa2f4e19338a602",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3acd8683f05cf1d850fe7d0ebb4e24496050adb4",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "monoclecat/SPFlow",
"max_forks_repo_path": "experiments/plot_log_csv.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3acd8683f05cf1d850fe7d0ebb4e24496050adb4",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "monoclecat/SPFlow",
"max_issues_repo_path": "experiments/plot_log_csv.py",
"max_line_length": 101,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3acd8683f05cf1d850fe7d0ebb4e24496050adb4",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "monoclecat/SPFlow",
"max_stars_repo_path": "experiments/plot_log_csv.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 289,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1000
} |
import numpy as np
from rdkit.Chem import Mol
from rdkit.Chem import DataStructs
from rdkit.Chem import rdFingerprintGenerator
import rdkit.Chem.Descriptors as Desc
def _fingerprint_fn_bits(generator):
def _fp(mol: Mol):
fingerprint = generator.GetFingerprint(mol)
array = np.zeros((0,), dtype=np.int8)
DataStructs.ConvertToNumpyArray(fingerprint, array)
return array
return _fp
def _fingerprint_fn_count(generator):
def _fp(mol: Mol):
fingerprint = generator.GetCountFingerprint(mol)
array = np.zeros((0,), dtype=np.int8)
DataStructs.ConvertToNumpyArray(fingerprint, array)
return array
return _fp
def all_rdkit(exception_list=[]):
"""Gets a list of all RDKit descriptors.
This will return a set of features for all descriptors in the
rdkit.Chem.Descriptors.descList property. To allow repeatability, this
list is hardcoded from RDKit 2019.09.3.
"""
descriptors_2019_09_3 = [
'MaxEStateIndex', 'MinEStateIndex', 'MaxAbsEStateIndex',
'MinAbsEStateIndex', 'qed', 'MolWt', 'HeavyAtomMolWt', 'ExactMolWt',
'NumValenceElectrons', 'NumRadicalElectrons', 'MaxPartialCharge',
'MinPartialCharge', 'MaxAbsPartialCharge', 'MinAbsPartialCharge',
'FpDensityMorgan1', 'FpDensityMorgan2', 'FpDensityMorgan3', 'BalabanJ',
'BertzCT', 'Chi0', 'Chi0n', 'Chi0v', 'Chi1', 'Chi1n', 'Chi1v', 'Chi2n',
'Chi2v', 'Chi3n', 'Chi3v', 'Chi4n', 'Chi4v', 'HallKierAlpha', 'Ipc',
'Kappa1', 'Kappa2', 'Kappa3', 'LabuteASA', 'PEOE_VSA1', 'PEOE_VSA10',
'PEOE_VSA11', 'PEOE_VSA12', 'PEOE_VSA13', 'PEOE_VSA14', 'PEOE_VSA2',
'PEOE_VSA3', 'PEOE_VSA4', 'PEOE_VSA5', 'PEOE_VSA6', 'PEOE_VSA7',
'PEOE_VSA8', 'PEOE_VSA9', 'SMR_VSA1', 'SMR_VSA10', 'SMR_VSA2',
'SMR_VSA3', 'SMR_VSA4', 'SMR_VSA5', 'SMR_VSA6', 'SMR_VSA7', 'SMR_VSA8',
'SMR_VSA9', 'SlogP_VSA1', 'SlogP_VSA10', 'SlogP_VSA11', 'SlogP_VSA12',
'SlogP_VSA2', 'SlogP_VSA3', 'SlogP_VSA4', 'SlogP_VSA5', 'SlogP_VSA6',
'SlogP_VSA7', 'SlogP_VSA8', 'SlogP_VSA9', 'TPSA', 'EState_VSA1',
'EState_VSA10', 'EState_VSA11', 'EState_VSA2', 'EState_VSA3',
'EState_VSA4', 'EState_VSA5', 'EState_VSA6', 'EState_VSA7',
'EState_VSA8', 'EState_VSA9', 'VSA_EState1', 'VSA_EState10',
'VSA_EState2', 'VSA_EState3', 'VSA_EState4', 'VSA_EState5',
'VSA_EState6', 'VSA_EState7', 'VSA_EState8', 'VSA_EState9',
'FractionCSP3', 'HeavyAtomCount', 'NHOHCount', 'NOCount',
'NumAliphaticCarbocycles', 'NumAliphaticHeterocycles',
'NumAliphaticRings', 'NumAromaticCarbocycles',
'NumAromaticHeterocycles', 'NumAromaticRings', 'NumHAcceptors',
'NumHDonors', 'NumHeteroatoms', 'NumRotatableBonds',
'NumSaturatedCarbocycles', 'NumSaturatedHeterocycles',
'NumSaturatedRings', 'RingCount', 'MolLogP', 'MolMR', 'fr_Al_COO',
'fr_Al_OH', 'fr_Al_OH_noTert', 'fr_ArN', 'fr_Ar_COO', 'fr_Ar_N',
'fr_Ar_NH', 'fr_Ar_OH', 'fr_COO', 'fr_COO2', 'fr_C_O', 'fr_C_O_noCOO',
'fr_C_S', 'fr_HOCCN', 'fr_Imine', 'fr_NH0', 'fr_NH1', 'fr_NH2',
'fr_N_O', 'fr_Ndealkylation1', 'fr_Ndealkylation2', 'fr_Nhpyrrole',
'fr_SH', 'fr_aldehyde', 'fr_alkyl_carbamate', 'fr_alkyl_halide',
'fr_allylic_oxid', 'fr_amide', 'fr_amidine', 'fr_aniline',
'fr_aryl_methyl', 'fr_azide', 'fr_azo', 'fr_barbitur', 'fr_benzene',
'fr_benzodiazepine', 'fr_bicyclic', 'fr_diazo', 'fr_dihydropyridine',
'fr_epoxide', 'fr_ester', 'fr_ether', 'fr_furan', 'fr_guanido',
'fr_halogen', 'fr_hdrzine', 'fr_hdrzone', 'fr_imidazole', 'fr_imide',
'fr_isocyan', 'fr_isothiocyan', 'fr_ketone', 'fr_ketone_Topliss',
'fr_lactam', 'fr_lactone', 'fr_methoxy', 'fr_morpholine', 'fr_nitrile',
'fr_nitro', 'fr_nitro_arom', 'fr_nitro_arom_nonortho', 'fr_nitroso',
'fr_oxazole', 'fr_oxime', 'fr_para_hydroxylation', 'fr_phenol',
'fr_phenol_noOrthoHbond', 'fr_phos_acid', 'fr_phos_ester',
'fr_piperdine', 'fr_piperzine', 'fr_priamide', 'fr_prisulfonamd',
'fr_pyridine', 'fr_quatN', 'fr_sulfide', 'fr_sulfonamd', 'fr_sulfone',
'fr_term_acetylene', 'fr_tetrazole', 'fr_thiazole', 'fr_thiocyan',
'fr_thiophene', 'fr_unbrch_alkane', 'fr_urea']
return [rdkit(descriptor) for descriptor in descriptors_2019_09_3
if descriptor not in exception_list]
def fingerprint_atompair(fpSize=2048, count=False):
"""Atom pair fingerprint (list of int).
Args:
fpSize: Size of the generated fingerprint (defaults to 2048).
count: The default value of False will generate fingerprint bits
(0 or 1) whereas a value of True will generate the count of each
fingerprint value.
"""
generator = rdFingerprintGenerator.GetAtomPairGenerator(fpSize=fpSize)
if count:
fingerprint_fn = _fingerprint_fn_count(generator)
else:
fingerprint_fn = _fingerprint_fn_bits(generator)
fingerprint_fn.__name__ = 'fingerprint_atompair(' + \
f'fpSize={fpSize},count={count})'
return fingerprint_fn
def fingerprint_morgan(radius, fpSize=2048, count=False):
"""Morgan fingerprint of the specified size (list of int).
Args:
radius: The number of iterations to grow the fingerprint.
fpSize: Size of the generated fingerprint (defaults to 2048).
count: The default value of False will generate fingerprint bits
(0 or 1) whereas a value of True will generate the count of each
fingerprint value.
"""
generator = rdFingerprintGenerator.GetMorganGenerator(radius=radius,
fpSize=fpSize)
if count:
fingerprint_fn = _fingerprint_fn_count(generator)
else:
fingerprint_fn = _fingerprint_fn_bits(generator)
fingerprint_fn.__name__ = f'fingerprint_morgan(radius={radius},' + \
f'fpSize={fpSize},count={count})'
return fingerprint_fn
def logp(mol: Mol) -> float:
"""Calculated LogP (float).
"""
return Desc.MolLogP(mol)
def molwt(mol: Mol) -> float:
"""Molecular weight (float).
"""
return Desc.MolWt(mol)
def num_atoms(mol: Mol) -> int:
"""Total number of atoms (int).
"""
return mol.GetNumAtoms()
def num_bonds(mol: Mol) -> int:
"""Total number of bonds (int).
"""
return mol.GetNumBonds()
def num_h_donors(mol: Mol) -> int:
"""Number of hydrogen bond donors (int).
"""
return Desc.NumHDonors(mol)
def num_h_acceptors(mol: Mol) -> int:
"""Number of hydrogen bond acceptors (int).
"""
return Desc.NumHAcceptors(mol)
def num_heavy_atoms(mol: Mol) -> int:
"""Number of heavy atoms (int).
"""
return Desc.HeavyAtomCount(mol)
def rdkit(name: str):
"""Feature from a specified RDKit descriptor.
The descriptor should be the name for the corresponding descriptor
in rdkit.Chem.Descriptors.
Args:
name: The name of the descriptor.
"""
descriptor = getattr(Desc, name)
def _rdkit(mol: Mol):
return descriptor(mol)
_rdkit.__name__ = f'rdkit({name})'
return _rdkit
def tpsa(mol: Mol) -> float:
"""Total polar surface area (float)
"""
return Desc.TPSA(mol)
| {
"alphanum_fraction": 0.6535433071,
"author": null,
"avg_line_length": 37.7743589744,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "7b54b4dc903d833c3e25018b3a742feccf83023e",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "83efc7ea66d2def860a3e04ccd70d77fb689fddc",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Andy-Wilkinson/ChemMLToolkit",
"max_forks_repo_path": "chemmltoolkit/features/moleculeFeatures.py",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "83efc7ea66d2def860a3e04ccd70d77fb689fddc",
"max_issues_repo_issues_event_max_datetime": "2021-11-28T21:09:39.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-11-28T21:09:30.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Andy-Wilkinson/ChemMLToolk",
"max_issues_repo_path": "chemmltoolkit/features/moleculeFeatures.py",
"max_line_length": 79,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "83efc7ea66d2def860a3e04ccd70d77fb689fddc",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Andy-Wilkinson/ChemMLToolk",
"max_stars_repo_path": "chemmltoolkit/features/moleculeFeatures.py",
"max_stars_repo_stars_event_max_datetime": "2019-10-30T03:43:24.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-10-30T03:43:24.000Z",
"num_tokens": 2353,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7366
} |
//
// asio.hpp
// ~~~~~~~~
//
// Copyright (c) 2003-2017 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// See www.boost.org/libs/asio for documentation.
//
#ifndef BOOST_ASIO_HPP
#define BOOST_ASIO_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include <boost/asio/associated_allocator.hpp>
#include <boost/asio/associated_executor.hpp>
#include <boost/asio/async_result.hpp>
#include <boost/asio/basic_datagram_socket.hpp>
#include <boost/asio/basic_deadline_timer.hpp>
#include <boost/asio/basic_io_object.hpp>
#include <boost/asio/basic_raw_socket.hpp>
#include <boost/asio/basic_seq_packet_socket.hpp>
#include <boost/asio/basic_serial_port.hpp>
#include <boost/asio/basic_signal_set.hpp>
#include <boost/asio/basic_socket_acceptor.hpp>
#include <boost/asio/basic_socket_iostream.hpp>
#include <boost/asio/basic_socket_streambuf.hpp>
#include <boost/asio/basic_stream_socket.hpp>
#include <boost/asio/basic_streambuf.hpp>
#include <boost/asio/basic_waitable_timer.hpp>
#include <boost/asio/bind_executor.hpp>
#include <boost/asio/buffer.hpp>
#include <boost/asio/buffered_read_stream_fwd.hpp>
#include <boost/asio/buffered_read_stream.hpp>
#include <boost/asio/buffered_stream_fwd.hpp>
#include <boost/asio/buffered_stream.hpp>
#include <boost/asio/buffered_write_stream_fwd.hpp>
#include <boost/asio/buffered_write_stream.hpp>
#include <boost/asio/buffers_iterator.hpp>
#include <boost/asio/completion_condition.hpp>
#include <boost/asio/connect.hpp>
#include <boost/asio/coroutine.hpp>
#include <boost/asio/datagram_socket_service.hpp>
#include <boost/asio/deadline_timer_service.hpp>
#include <boost/asio/deadline_timer.hpp>
#include <boost/asio/defer.hpp>
#include <boost/asio/dispatch.hpp>
#include <boost/asio/error.hpp>
#include <boost/asio/execution_context.hpp>
#include <boost/asio/executor.hpp>
#include <boost/asio/executor_work_guard.hpp>
#include <boost/asio/generic/basic_endpoint.hpp>
#include <boost/asio/generic/datagram_protocol.hpp>
#include <boost/asio/generic/raw_protocol.hpp>
#include <boost/asio/generic/seq_packet_protocol.hpp>
#include <boost/asio/generic/stream_protocol.hpp>
#include <boost/asio/handler_alloc_hook.hpp>
#include <boost/asio/handler_continuation_hook.hpp>
#include <boost/asio/handler_invoke_hook.hpp>
#include <boost/asio/handler_type.hpp>
#include <boost/asio/high_resolution_timer.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/io_context_strand.hpp>
#include <boost/asio/io_service.hpp>
#include <boost/asio/io_service_strand.hpp>
#include <boost/asio/ip/address.hpp>
#include <boost/asio/ip/address_v4.hpp>
#include <boost/asio/ip/address_v4_iterator.hpp>
#include <boost/asio/ip/address_v4_range.hpp>
#include <boost/asio/ip/address_v6.hpp>
#include <boost/asio/ip/address_v6_iterator.hpp>
#include <boost/asio/ip/address_v6_range.hpp>
#include <boost/asio/ip/bad_address_cast.hpp>
#include <boost/asio/ip/basic_endpoint.hpp>
#include <boost/asio/ip/basic_resolver.hpp>
#include <boost/asio/ip/basic_resolver_entry.hpp>
#include <boost/asio/ip/basic_resolver_iterator.hpp>
#include <boost/asio/ip/basic_resolver_query.hpp>
#include <boost/asio/ip/host_name.hpp>
#include <boost/asio/ip/icmp.hpp>
#include <boost/asio/ip/multicast.hpp>
#include <boost/asio/ip/resolver_base.hpp>
#include <boost/asio/ip/resolver_query_base.hpp>
#include <boost/asio/ip/resolver_service.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/ip/udp.hpp>
#include <boost/asio/ip/unicast.hpp>
#include <boost/asio/ip/v6_only.hpp>
#include <boost/asio/is_executor.hpp>
#include <boost/asio/is_read_buffered.hpp>
#include <boost/asio/is_write_buffered.hpp>
#include <boost/asio/local/basic_endpoint.hpp>
#include <boost/asio/local/connect_pair.hpp>
#include <boost/asio/local/datagram_protocol.hpp>
#include <boost/asio/local/stream_protocol.hpp>
#include <boost/asio/packaged_task.hpp>
#include <boost/asio/placeholders.hpp>
#include <boost/asio/posix/basic_descriptor.hpp>
#include <boost/asio/posix/basic_stream_descriptor.hpp>
#include <boost/asio/posix/descriptor.hpp>
#include <boost/asio/posix/descriptor_base.hpp>
#include <boost/asio/posix/stream_descriptor.hpp>
#include <boost/asio/posix/stream_descriptor_service.hpp>
#include <boost/asio/post.hpp>
#include <boost/asio/raw_socket_service.hpp>
#include <boost/asio/read.hpp>
#include <boost/asio/read_at.hpp>
#include <boost/asio/read_until.hpp>
#include <boost/asio/seq_packet_socket_service.hpp>
#include <boost/asio/serial_port.hpp>
#include <boost/asio/serial_port_base.hpp>
#include <boost/asio/serial_port_service.hpp>
#include <boost/asio/signal_set.hpp>
#include <boost/asio/signal_set_service.hpp>
#include <boost/asio/socket_acceptor_service.hpp>
#include <boost/asio/socket_base.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/strand.hpp>
#include <boost/asio/stream_socket_service.hpp>
#include <boost/asio/streambuf.hpp>
#include <boost/asio/system_context.hpp>
#include <boost/asio/system_executor.hpp>
#include <boost/asio/system_timer.hpp>
#include <boost/asio/thread_pool.hpp>
#include <boost/asio/time_traits.hpp>
#include <boost/asio/use_future.hpp>
#include <boost/asio/uses_executor.hpp>
#include <boost/asio/version.hpp>
#include <boost/asio/wait_traits.hpp>
#include <boost/asio/waitable_timer_service.hpp>
#include <boost/asio/windows/basic_handle.hpp>
#include <boost/asio/windows/basic_object_handle.hpp>
#include <boost/asio/windows/basic_random_access_handle.hpp>
#include <boost/asio/windows/basic_stream_handle.hpp>
#include <boost/asio/windows/object_handle.hpp>
#include <boost/asio/windows/object_handle_service.hpp>
#include <boost/asio/windows/overlapped_handle.hpp>
#include <boost/asio/windows/overlapped_ptr.hpp>
#include <boost/asio/windows/random_access_handle.hpp>
#include <boost/asio/windows/random_access_handle_service.hpp>
#include <boost/asio/windows/stream_handle.hpp>
#include <boost/asio/windows/stream_handle_service.hpp>
#include <boost/asio/write.hpp>
#include <boost/asio/write_at.hpp>
#endif // BOOST_ASIO_HPP
| {
"alphanum_fraction": 0.8005455712,
"author": null,
"avg_line_length": 41,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "c8179c7ec074c5dd51c8ca17cb48db699f07f1df",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 122,
"max_forks_repo_forks_event_max_datetime": "2022-02-22T14:25:49.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-12-22T17:38:09.000Z",
"max_forks_repo_head_hexsha": "ae78ae582c6132964b7ef838a74cda9c075e74dc",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "fictheader/fcolorwheel",
"max_forks_repo_path": "tether/boost/asio.hpp",
"max_issues_count": 203,
"max_issues_repo_head_hexsha": "ae78ae582c6132964b7ef838a74cda9c075e74dc",
"max_issues_repo_issues_event_max_datetime": "2022-03-30T20:46:55.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-12-27T12:09:03.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "fictheader/fcolorwheel",
"max_issues_repo_path": "tether/boost/asio.hpp",
"max_line_length": 79,
"max_stars_count": 918,
"max_stars_repo_head_hexsha": "ae78ae582c6132964b7ef838a74cda9c075e74dc",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "fictheader/fcolorwheel",
"max_stars_repo_path": "tether/boost/asio.hpp",
"max_stars_repo_stars_event_max_datetime": "2022-03-22T06:21:35.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-12-22T02:53:08.000Z",
"num_tokens": 1512,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 6232
} |
#importing required modules
import pandas as pd
import numpy as np
#function to create or check required files
def create_file():
try:
exp = pd.read_csv('ent_expense.csv')
except FileNotFoundError:
exp = pd.DataFrame({'Purchase': np.NaN,'Electricity': np.NaN,'Telecom': np.NaN,'Rent': np.NaN,'Interest': np.NaN,'Salary/Wages': np.NaN,'Maintenance': np.NaN,'Tax': np.NaN,'Advertisement': np.NaN,'Insurance': np.NaN,'Other': np.NaN}, index=["Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec", "Jan","Feb","Mar"])
exp.to_csv('ent_expense.csv')
try:
inc = pd.read_csv('ent_income.csv')
except FileNotFoundError:
inc = pd.DataFrame({'Sales': np.NaN,'Interest_inc': np.NaN,'Rent_inc': np.NaN,'Bad_Debts_Recovered': np.NaN,'Commission': np.NaN,'Other_inc': np.NaN}, index=["Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec","Jan","Feb","Mar"])
inc.to_csv("ent_income.csv")
#if this file is run as main, run this function.
if __name__ == '__main__':
create_file() | {
"alphanum_fraction": 0.6472868217,
"author": null,
"avg_line_length": 49.1428571429,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "c4c5460d7718979818551e2c0597f6f2db2095de",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "10e813579d7b61f61f736f0bc3c434184e9045e5",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "SirSnehGupta/MonTra",
"max_forks_repo_path": "ent_csv_creation.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "10e813579d7b61f61f736f0bc3c434184e9045e5",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "SirSnehGupta/MonTra",
"max_issues_repo_path": "ent_csv_creation.py",
"max_line_length": 323,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "1a05ea0fadb10faca80da613ced42bef6af64ffb",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "SirSnehGupta/I.P.-Project",
"max_stars_repo_path": "ent_csv_creation.py",
"max_stars_repo_stars_event_max_datetime": "2020-12-09T12:00:48.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-12-09T12:00:48.000Z",
"num_tokens": 270,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1032
} |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import logging
import os
import numpy as np
import torch
from core.evaluate import accuracy
from core.inference import get_final_preds
from core.inference import get_max_preds
from utils.transforms import flip_back
from utils.vis import save_debug_images
from utils.utils import get_network_grad_flow
# from utils.vis import save_pretty_debug_images as save_debug_images
logger = logging.getLogger(__name__)
# --------------------------------------------------------------------------------
def train_lambda_012(config, train_loader, model, criterion_lambda, criterion, optimizer, epoch,
output_dir, tb_log_dir, writer_dict, print_prefix=''):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
model_grads = AverageMeter()
diversity_losses = AverageMeter()
pose_losses = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target_a, target_weight_a, meta_a, target_b, target_weight_b, meta_b, target_c, target_weight_c, meta_c) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
B, C, H, W = input.shape
##--- 0s and 1s--------
lambda_val = 0 ##binary dim0: 0, dim1: 0
lambda_vec_zero = torch.zeros(B, 2).cuda()
lambda_val = 1 ##binary dim0: 1, dim1: 0
lambda_vec_one = torch.zeros(B, 2).cuda()
lambda_vec_one[:, 0] += 1
lambda_val = 2 ##binary dim0: 0, dim1: 1
lambda_vec_two = torch.zeros(B, 2).cuda()
lambda_vec_two[:, 1] += 1
# lambda_val = torch.cat([torch.zeros(B), torch.zeros(B)+1, torch.zeros(B)+2], dim=0) ### 3B x 2
# lambda_vec = torch.cat([lambda_vec_zero, lambda_vec_one, lambda_vec_two], dim=0) ### 3B x 2
# --------------duplicate-----------------------------
# num_candidates = 3
# input = torch.cat([input]*num_candidates, dim=0)
# target_a = torch.cat([target_a]*num_candidates, dim=0)
# target_weight_a = torch.cat([target_weight_a]*num_candidates, dim=0)
# meta_a['joints'] = torch.cat([meta_a['joints']]*num_candidates, dim=0)
# meta_a['joints_vis'] = torch.cat([meta_a['joints_vis']]*num_candidates, dim=0)
# target_b = torch.cat([target_b]*num_candidates, dim=0)
# target_weight_b = torch.cat([target_weight_b]*num_candidates, dim=0)
# meta_b['joints'] = torch.cat([meta_b['joints']]*num_candidates, dim=0)
# meta_b['joints_vis'] = torch.cat([meta_b['joints_vis']]*num_candidates, dim=0)
# target_c = torch.cat([target_c]*num_candidates, dim=0)
# target_weight_c = torch.cat([target_weight_c]*num_candidates, dim=0)
# meta_c['joints'] = torch.cat([meta_c['joints']]*num_candidates, dim=0)
# meta_c['joints_vis'] = torch.cat([meta_c['joints_vis']]*num_candidates, dim=0)
# # --------------------------------
# # compute output
# outputs = model(input, lambda_vec)
# target_a = target_a.cuda(non_blocking=True)
# target_weight_a = target_weight_a.cuda(non_blocking=True)
# target_b = target_b.cuda(non_blocking=True)
# target_weight_b = target_weight_b.cuda(non_blocking=True)
# target_c = target_c.cuda(non_blocking=True)
# target_weight_c = target_weight_c.cuda(non_blocking=True)
# output = outputs
# start_idx = 0; end_idx = start_idx + B
# loss_a_lambda = criterion_lambda(output[start_idx:end_idx], target_a, target_weight_a) ##size = B
# start_idx = B; end_idx = start_idx + B
# loss_b_lambda = criterion_lambda(output[start_idx:end_idx], target_b, target_weight_b) ##size = B
# start_idx = 2*B; end_idx = start_idx + B
# loss_c_lambda = criterion_lambda(output[start_idx:end_idx], target_c, target_weight_c) ##size = B
# pose_loss = loss_a_lambda.mean() + loss_b_lambda.mean() + loss_c_lambda.mean()
# loss = pose_loss
# # compute gradient and do update step
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# # --------------------------------
target_a = target_a.cuda(non_blocking=True)
target_weight_a = target_weight_a.cuda(non_blocking=True)
target_b = target_b.cuda(non_blocking=True)
target_weight_b = target_weight_b.cuda(non_blocking=True)
target_c = target_c.cuda(non_blocking=True)
target_weight_c = target_weight_c.cuda(non_blocking=True)
# --------------------------------
# compute output
outputs_zero = model(input, lambda_vec_zero)
loss_a_lambda = criterion_lambda(outputs_zero, target_a, target_weight_a) ##size = B
loss_a = loss_a_lambda.mean()
optimizer.zero_grad()
loss_a.backward()
optimizer.step()
# --------------------------------
outputs_one = model(input, lambda_vec_one)
loss_b_lambda = criterion_lambda(outputs_one, target_b, target_weight_b) ##size = B
loss_b = loss_b_lambda.mean()
optimizer.zero_grad()
loss_b.backward()
optimizer.step()
# --------------------------------
outputs_two = model(input, lambda_vec_two)
loss_c_lambda = criterion_lambda(outputs_two, target_c, target_weight_c) ##size = B
loss_c = loss_c_lambda.mean()
optimizer.zero_grad()
loss_c.backward()
optimizer.step()
# --------------------------------
output = torch.cat([outputs_zero, outputs_one, outputs_two], dim=0)
loss = loss_a + loss_b + loss_c
pose_loss = loss
model_grad = get_network_grad_flow(model)
model_grads.update(model_grad)
# measure accuracy and record loss
losses.update(loss.item(), input.size(0))
pose_losses.update(pose_loss.item(), input.size(0))
start_idx = 0; end_idx = start_idx + B
_, avg_acc_a, cnt_a, pred_a = accuracy(output[start_idx:end_idx].detach().cpu().numpy(),
target_a.detach().cpu().numpy())
start_idx = B; end_idx = start_idx + B
_, avg_acc_b, cnt_b, pred_b = accuracy(output[start_idx:end_idx].detach().cpu().numpy(),
target_b.detach().cpu().numpy())
start_idx = 2*B; end_idx = start_idx + B
_, avg_acc_c, cnt_c, pred_c = accuracy(output[start_idx:end_idx].detach().cpu().numpy(),
target_c.detach().cpu().numpy())
acc.update(avg_acc_a, cnt_a)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
msg = 'Epoch: [{0}][{1}/{2}]\t' \
'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
'Speed {speed:.1f} samples/s\t' \
'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
'Loss {loss.val:.5f} ({loss.avg:.5f})\t' \
'Accuracy {acc.val:.3f} ({acc.avg:.3f})\t' \
'model_grad {model_grad.val:.6f} ({model_grad.avg:.6f})\t' \
'PoseLoss {pose_loss.val:.5f} ({pose_loss.avg:.5f})\t'.format(
epoch, i, len(train_loader), batch_time=batch_time,
speed=input.size(0)/batch_time.val,
data_time=data_time, loss=losses, acc=acc,
model_grad=model_grads,
pose_loss=pose_losses)
logger.info(msg)
if i % config.PRINT_FREQ == 0:
save_size = min(16, B)
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', losses.val, global_steps)
writer.add_scalar('train_acc', acc.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
meta_a['pred_joints_vis'] = torch.ones_like(meta_a['joints_vis'])
meta_b['pred_joints_vis'] = torch.ones_like(meta_b['joints_vis'])
meta_c['pred_joints_vis'] = torch.ones_like(meta_c['joints_vis'])
prefix = '{}_epoch_{:09d}_iter_{}_{}'.format(os.path.join(output_dir, 'train'), epoch, i, print_prefix)
start_idx = 0; end_idx = start_idx + save_size
save_debug_images(config, input[:save_size, [2,1,0], :, :], meta_a, target_a[:save_size], (pred_a*4)[:save_size], output[start_idx:end_idx], prefix, suffix='a')
start_idx = B; end_idx = start_idx + save_size
save_debug_images(config, input[:save_size, [2,1,0], :, :], meta_b, target_b[:save_size], (pred_b*4)[:save_size], output[start_idx:end_idx], prefix, suffix='b')
start_idx = 2*B; end_idx = start_idx + save_size
save_debug_images(config, input[:save_size, [2,1,0], :, :], meta_c, target_c[:save_size], (pred_c*4)[:save_size], output[start_idx:end_idx], prefix, suffix='c')
return
# --------------------------------------------------------------------------------
def train_lambda_0123(config, train_loader, model, criterion_lambda, criterion, optimizer, epoch,
output_dir, tb_log_dir, writer_dict, print_prefix=''):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
model_grads = AverageMeter()
diversity_losses = AverageMeter()
pose_losses = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target_a, target_weight_a, meta_a, target_b, target_weight_b, meta_b, target_c, target_weight_c, meta_c, target_d, target_weight_d, meta_d) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
B, C, H, W = input.shape
##--- 0s and 1s--------
lambda_val = 0 ##binary dim0: 0, dim1: 0
lambda_vec_zero = torch.zeros(B, 2).cuda()
lambda_val = 1 ##binary dim0: 1, dim1: 0
lambda_vec_one = torch.zeros(B, 2).cuda()
lambda_vec_one[:, 0] += 1
lambda_val = 2 ##binary dim0: 0, dim1: 1
lambda_vec_two = torch.zeros(B, 2).cuda()
lambda_vec_two[:, 1] += 1
lambda_val = 3 ##binary dim1: 1, dim1: 1
lambda_vec_three = torch.zeros(B, 2).cuda()
lambda_vec_three[:, 0] += 1
lambda_vec_three[:, 1] += 1
# # --------------------------------
target_a = target_a.cuda(non_blocking=True)
target_weight_a = target_weight_a.cuda(non_blocking=True)
target_b = target_b.cuda(non_blocking=True)
target_weight_b = target_weight_b.cuda(non_blocking=True)
target_c = target_c.cuda(non_blocking=True)
target_weight_c = target_weight_c.cuda(non_blocking=True)
target_d = target_d.cuda(non_blocking=True)
target_weight_d = target_weight_d.cuda(non_blocking=True)
# --------------------------------
# compute output
outputs_zero = model(input, lambda_vec_zero)
loss_a_lambda = criterion_lambda(outputs_zero, target_a, target_weight_a) ##size = B
loss_a = loss_a_lambda.mean()
optimizer.zero_grad()
loss_a.backward()
optimizer.step()
# --------------------------------
outputs_one = model(input, lambda_vec_one)
loss_b_lambda = criterion_lambda(outputs_one, target_b, target_weight_b) ##size = B
loss_b = loss_b_lambda.mean()
optimizer.zero_grad()
loss_b.backward()
optimizer.step()
# --------------------------------
outputs_two = model(input, lambda_vec_two)
loss_c_lambda = criterion_lambda(outputs_two, target_c, target_weight_c) ##size = B
loss_c = loss_c_lambda.mean()
optimizer.zero_grad()
loss_c.backward()
optimizer.step()
# --------------------------------
outputs_three = model(input, lambda_vec_three)
loss_d_lambda = criterion_lambda(outputs_three, target_d, target_weight_d) ##size = B
loss_d = loss_d_lambda.mean()
optimizer.zero_grad()
loss_d.backward()
optimizer.step()
# --------------------------------
output = torch.cat([outputs_zero, outputs_one, outputs_two, outputs_three], dim=0)
loss = loss_a + loss_b + loss_c + loss_d
pose_loss = loss
model_grad = get_network_grad_flow(model)
model_grads.update(model_grad)
# measure accuracy and record loss
losses.update(loss.item(), input.size(0))
pose_losses.update(pose_loss.item(), input.size(0))
start_idx = 0; end_idx = start_idx + B
_, avg_acc_a, cnt_a, pred_a = accuracy(output[start_idx:end_idx].detach().cpu().numpy(),
target_a.detach().cpu().numpy())
start_idx = B; end_idx = start_idx + B
_, avg_acc_b, cnt_b, pred_b = accuracy(output[start_idx:end_idx].detach().cpu().numpy(),
target_b.detach().cpu().numpy())
start_idx = 2*B; end_idx = start_idx + B
_, avg_acc_c, cnt_c, pred_c = accuracy(output[start_idx:end_idx].detach().cpu().numpy(),
target_c.detach().cpu().numpy())
start_idx = 3*B; end_idx = start_idx + B
_, avg_acc_d, cnt_d, pred_d = accuracy(output[start_idx:end_idx].detach().cpu().numpy(),
target_d.detach().cpu().numpy())
acc.update(avg_acc_a, cnt_a)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
msg = 'Epoch: [{0}][{1}/{2}]\t' \
'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
'Speed {speed:.1f} samples/s\t' \
'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
'Loss {loss.val:.5f} ({loss.avg:.5f})\t' \
'Accuracy {acc.val:.3f} ({acc.avg:.3f})\t' \
'model_grad {model_grad.val:.6f} ({model_grad.avg:.6f})\t' \
'PoseLoss {pose_loss.val:.5f} ({pose_loss.avg:.5f})\t'.format(
epoch, i, len(train_loader), batch_time=batch_time,
speed=input.size(0)/batch_time.val,
data_time=data_time, loss=losses, acc=acc,
model_grad=model_grads,
pose_loss=pose_losses)
logger.info(msg)
if i % config.PRINT_FREQ == 0:
save_size = min(16, B)
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', losses.val, global_steps)
writer.add_scalar('train_acc', acc.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
meta_a['pred_joints_vis'] = torch.ones_like(meta_a['joints_vis'])
meta_b['pred_joints_vis'] = torch.ones_like(meta_b['joints_vis'])
meta_c['pred_joints_vis'] = torch.ones_like(meta_c['joints_vis'])
meta_d['pred_joints_vis'] = torch.ones_like(meta_d['joints_vis'])
prefix = '{}_epoch_{:09d}_iter_{}_{}'.format(os.path.join(output_dir, 'train'), epoch, i, print_prefix)
start_idx = 0; end_idx = start_idx + save_size
save_debug_images(config, input[:save_size, [2,1,0], :, :], meta_a, target_a[:save_size], (pred_a*4)[:save_size], output[start_idx:end_idx], prefix, suffix='a')
start_idx = B; end_idx = start_idx + save_size
save_debug_images(config, input[:save_size, [2,1,0], :, :], meta_b, target_b[:save_size], (pred_b*4)[:save_size], output[start_idx:end_idx], prefix, suffix='b')
start_idx = 2*B; end_idx = start_idx + save_size
save_debug_images(config, input[:save_size, [2,1,0], :, :], meta_c, target_c[:save_size], (pred_c*4)[:save_size], output[start_idx:end_idx], prefix, suffix='c')
start_idx = 3*B; end_idx = start_idx + save_size
save_debug_images(config, input[:save_size, [2,1,0], :, :], meta_d, target_d[:save_size], (pred_d*4)[:save_size], output[start_idx:end_idx], prefix, suffix='d')
return
# --------------------------------------------------------------------------------
# markdown format output
def _print_name_value(name_value, full_arch_name):
names = name_value.keys()
values = name_value.values()
num_values = len(name_value)
logger.info(
'| Arch ' +
' '.join(['| {}'.format(name) for name in names]) +
' |'
)
logger.info('|---' * (num_values+1) + '|')
if len(full_arch_name) > 15:
full_arch_name = full_arch_name[:8] + '...'
logger.info(
'| ' + full_arch_name + ' ' +
' '.join(['| {:.3f}'.format(value) for value in values]) +
' |'
)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count if self.count != 0 else 0
| {
"alphanum_fraction": 0.5784743287,
"author": null,
"avg_line_length": 41.4965034965,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "a67007d27ee7183cb1e0ccb11f76309155fb9881",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2022-03-28T09:48:34.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-10-14T01:27:42.000Z",
"max_forks_repo_head_hexsha": "eb11b7792dd91d698c1bdf16e1f44d673a8cb00f",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "ValterH/MIPNet",
"max_forks_repo_path": "lib/core/train_general_sequential.py",
"max_issues_count": 8,
"max_issues_repo_head_hexsha": "eb11b7792dd91d698c1bdf16e1f44d673a8cb00f",
"max_issues_repo_issues_event_max_datetime": "2022-03-25T12:48:10.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-10-21T14:04:03.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "ValterH/MIPNet",
"max_issues_repo_path": "lib/core/train_general_sequential.py",
"max_line_length": 186,
"max_stars_count": 35,
"max_stars_repo_head_hexsha": "eb11b7792dd91d698c1bdf16e1f44d673a8cb00f",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "ValterH/MIPNet",
"max_stars_repo_path": "lib/core/train_general_sequential.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-30T12:46:02.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-10-12T02:58:10.000Z",
"num_tokens": 4280,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 17802
} |
import os
import sys
import json
import yaml
import datetime
import numpy as np
from ptranking.base.ranker import LTRFRAME_TYPE
from ptranking.data.data_utils import SPLIT_TYPE
from ptranking.ltr_adhoc.eval.ltr import LTREvaluator
from ptranking.ltr_adhoc.eval.parameter import ValidationTape
from ptranking.ltr_diversification.util.div_data import DIVDataset, RerankDIVDataset
from ptranking.metric.metric_utils import metric_results_to_string, get_opt_model
from ptranking.ltr_diversification.eval.div_parameter import DivDataSetting, DivEvalSetting, DivScoringFunctionParameter, DivSummaryTape, DivCVTape
from ptranking.ltr_diversification.score_and_sort.daletor import DALETOR, DALETORParameter
from ptranking.ltr_diversification.score_and_sort.div_prob_ranker import DivProbRanker, DivProbRankerParameter
LTR_DIV_MODEL = ['DALETOR', 'DivLambdaRank', 'DivProbRanker', 'DivSoftRank', 'DivTwinRank']
####
# 1> opt as a grid choice; 2> learning rate; 3> self.b for risk-aware ranking;
# DALETOR 1> temperature; 2> opt
# presort as the argument;
####
class DivLTREvaluator(LTREvaluator):
def __init__(self, frame_id=LTRFRAME_TYPE.Diversification, cuda=None):
super(DivLTREvaluator, self).__init__(frame_id=frame_id, cuda=cuda)
'''
Since it is time-consuming to generate the ideal diversified ranking dynamically,
we make it as a global True.
'''
self.presort = True
def determine_files(self, data_splits=None, fold_k=None):
#dict_splits = {1:[1], 2:[2], 3:[3], 4:[4], 5:[5]}
fold_ids = [1, 2, 3, 4, 5]
file_test = data_splits[fold_ids[fold_k-1]]
file_vali= data_splits[fold_ids[fold_k-5]]
file_train = data_splits[fold_ids[fold_k-4]] + data_splits[fold_ids[fold_k-3]] + data_splits[fold_ids[fold_k-2]]
#print("file_test", file_test)
#print("file_vali", file_vali)
#print("file_train", file_train)
return file_train, file_vali, file_test
def load_data(self, eval_dict=None, data_dict=None, fold_k=None, discriminator=None):
"""
We note that it is impossible to perform processing over multiple queries,
since q_doc_rele_mat may differ from query to query.
@param eval_dict:
@param data_dict:
@param fold_k:
@return:
"""
file_train, file_vali, file_test = self.determine_files(data_splits=self.data_splits, fold_k=fold_k)
fold_dir = data_dict['dir_data'] + 'folder' + str(fold_k) + '/'
if discriminator is not None:
train_data = \
RerankDIVDataset(list_as_file=file_train, split_type=SPLIT_TYPE.Train, fold_dir=fold_dir,
data_dict=data_dict, dictQueryRepresentation=self.dictQueryRepresentation,
dictDocumentRepresentation=self.dictDocumentRepresentation,
dictQueryPermutaion=self.dictQueryPermutaion, presort=self.presort,
dictQueryDocumentSubtopics=self.dictQueryDocumentSubtopics, buffer=True,
discriminator=discriminator, eval_dict=eval_dict)
test_data = \
RerankDIVDataset(list_as_file=file_test, split_type=SPLIT_TYPE.Test, fold_dir=fold_dir,
data_dict=data_dict, dictQueryRepresentation=self.dictQueryRepresentation,
dictQueryPermutaion=self.dictQueryPermutaion,
dictDocumentRepresentation=self.dictDocumentRepresentation,
dictQueryDocumentSubtopics=self.dictQueryDocumentSubtopics,
presort=self.presort, discriminator=discriminator, buffer=True, eval_dict=eval_dict)
vali_data = \
RerankDIVDataset(list_as_file=file_vali, split_type=SPLIT_TYPE.Validation, fold_dir=fold_dir,
data_dict=data_dict, dictQueryRepresentation=self.dictQueryRepresentation,
dictDocumentRepresentation=self.dictDocumentRepresentation,
dictQueryPermutaion=self.dictQueryPermutaion,
dictQueryDocumentSubtopics=self.dictQueryDocumentSubtopics,
buffer=True, presort=self.presort, discriminator=discriminator, eval_dict=eval_dict)
else:
train_data = \
DIVDataset(list_as_file=file_train, split_type=SPLIT_TYPE.Train, fold_dir=fold_dir, data_dict=data_dict,
dictQueryRepresentation=self.dictQueryRepresentation,
dictDocumentRepresentation=self.dictDocumentRepresentation,
dictQueryPermutaion=self.dictQueryPermutaion, buffer=True, presort=self.presort,
dictQueryDocumentSubtopics=self.dictQueryDocumentSubtopics,
add_noise=data_dict['add_noise'], std_delta=data_dict['std_delta'])
test_data = \
DIVDataset(list_as_file=file_test, split_type=SPLIT_TYPE.Test, fold_dir=fold_dir,
data_dict=data_dict, dictQueryRepresentation=self.dictQueryRepresentation,
dictDocumentRepresentation=self.dictDocumentRepresentation,
dictQueryPermutaion=self.dictQueryPermutaion, presort=self.presort,
dictQueryDocumentSubtopics=self.dictQueryDocumentSubtopics, buffer=True,
add_noise=data_dict['add_noise'], std_delta=data_dict['std_delta'])
vali_data = \
DIVDataset(list_as_file=file_vali, split_type=SPLIT_TYPE.Validation, fold_dir=fold_dir,
data_dict=data_dict, dictQueryRepresentation=self.dictQueryRepresentation,
dictDocumentRepresentation=self.dictDocumentRepresentation,
dictQueryPermutaion=self.dictQueryPermutaion, presort=self.presort,
dictQueryDocumentSubtopics=self.dictQueryDocumentSubtopics, buffer=True,
add_noise=data_dict['add_noise'], std_delta=data_dict['std_delta'])
return train_data, test_data, vali_data
def save_as_qrels(self, dictQueryPermutaion, dictQueryDocumentSubtopics, dir=None, data_id=None):
target_file = '/'.join([dir, data_id+'_qrels.txt'])
if os.path.isfile(target_file):
return
else:
qrels_writer = open(target_file, 'w')
for q_id in dictQueryDocumentSubtopics.keys():
q_doc_subtopics = dictQueryDocumentSubtopics[q_id]
perm_docs = dictQueryPermutaion[q_id]['permutation']
# get max subtopic_id
max_subtopic_id = 0
for list_subtopic_id in q_doc_subtopics.values():
for subtopic_id in list_subtopic_id:
if int(subtopic_id) > max_subtopic_id:
max_subtopic_id = int(subtopic_id)
# generate qrels
for doc in perm_docs:
if doc not in q_doc_subtopics:
for i in range(1, max_subtopic_id+1):
qrels_writer.write(' '.join([q_id, str(i), doc, "0\n"]))
else:
covered_subtopics = q_doc_subtopics[doc]
if len(covered_subtopics) == 0:
for i in range(1, max_subtopic_id + 1):
qrels_writer.write(' '.join([q_id, str(i), doc, "0\n"]))
else:
for i in range(1, max_subtopic_id + 1):
if str(i) in covered_subtopics:
qrels_writer.write(' '.join([q_id, str(i), doc, "1\n"]))
else:
qrels_writer.write(' '.join([q_id, str(i), doc, "0\n"]))
#==
qrels_writer.flush()
qrels_writer.close()
def load_raw_data(self, eval_dict=None, data_dict=None, fold_k=None):
root = data_dict['dir_data']
query_permutation_file = root + 'query_permutation.json'
query_representation_file = root + 'query_representation.dat'
document_representation_file = root + 'doc_representation.dat'
query_document_subtopics_file = root + 'query_doc.json'
fold_num = 5
self.data_splits = dict()
for fold_k in range(1, fold_num + 1):
with open(root + 'folder'+ str(fold_k) + '/config.yml') as confFile:
''' Using the provided splits for a fair comparison '''
self.data_splits[fold_k] = yaml.load(confFile, Loader=yaml.FullLoader)['test_set']
#print('self.data_splits', self.data_splits)
#198
'''
total number: 198
{query_id: {'alphaDCG':*, 'permutation':[list of documents (the number of documents per query is different)]}}
'''
with open(query_permutation_file) as self.fileQueryPermutaion:
self.dictQueryPermutaion = json.load(self.fileQueryPermutaion)
'''
num_docs = 0
for q in self.dictQueryPermutaion.keys():
#print(self.dictQueryPermutaion[q]['alphaDCG'])
#print('number of docs: ', len(self.dictQueryPermutaion[q]['permutation']))
num_docs += len(self.dictQueryPermutaion[q]['permutation'])
print('num_docs', num_docs)
'''
with open(query_representation_file) as self.fileQueryRepresentation:
self.dictQueryRepresentation = json.load(self.fileQueryRepresentation)
for query in self.dictQueryRepresentation: # each query is represented as a float vector
self.dictQueryRepresentation[query] = np.matrix([self.dictQueryRepresentation[query]], dtype=np.float)
#self.dictQueryRepresentation[query] = np.transpose(self.dictQueryRepresentation[query])
with open(document_representation_file) as self.fileDocumentRepresentation:
self.dictDocumentRepresentation = json.load(self.fileDocumentRepresentation)
for doc in self.dictDocumentRepresentation:
self.dictDocumentRepresentation[doc] = np.matrix([self.dictDocumentRepresentation[doc]], dtype=np.float)
#self.dictDocumentRepresentation[doc] = np.transpose(self.dictDocumentRepresentation[doc])
with open(query_document_subtopics_file) as self.fileQueryDocumentSubtopics:
self.dictQueryDocumentSubtopics = json.load(self.fileQueryDocumentSubtopics)
'''
self.save_as_qrels(dictQueryPermutaion=self.dictQueryPermutaion,
dictQueryDocumentSubtopics=self.dictQueryDocumentSubtopics,
dir=data_dict['dir_data'], data_id=data_dict['data_id'])
'''
def setup_output(self, data_dict=None, eval_dict=None, reproduce=False):
"""
Update output directory
:param data_dict:
:param eval_dict:
:param sf_para_dict:
:param model_para_dict:
:return:
"""
model_id = self.model_parameter.model_id
grid_search, do_vali, dir_output = eval_dict['grid_search'], eval_dict['do_validation'], eval_dict['dir_output']
if grid_search or reproduce:
dir_root = dir_output + '_'.join(['gpu', 'grid', model_id]) + '/' if self.gpu else dir_output + '_'.join(['grid', model_id]) + '/'
else:
dir_root = dir_output
eval_dict['dir_root'] = dir_root
if not os.path.exists(dir_root): os.makedirs(dir_root)
sf_str = self.sf_parameter.to_para_string()
data_eval_str = '_'.join([self.data_setting.to_data_setting_string(),
self.eval_setting.to_eval_setting_string()])
file_prefix = '_'.join([model_id, 'SF', sf_str, data_eval_str])
dir_run = dir_root + file_prefix + '/' # run-specific outputs
model_para_string = self.model_parameter.to_para_string()
if len(model_para_string) > 0:
dir_run = dir_run + model_para_string + '/'
eval_dict['dir_run'] = dir_run
if not os.path.exists(dir_run):
os.makedirs(dir_run)
return dir_run
def setup_eval(self, data_dict, eval_dict, sf_para_dict, model_para_dict):
"""
Finalize the evaluation setting correspondingly
:param data_dict:
:param eval_dict:
:param sf_para_dict:
:param model_para_dict:
:return:
"""
sf_para_dict[sf_para_dict['sf_id']].update(dict(num_features=data_dict['num_features']))
self.dir_run = self.setup_output(data_dict, eval_dict)
if eval_dict['do_log'] and not self.eval_setting.debug:
time_str = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M")
sys.stdout = open(self.dir_run + '_'.join(['log', time_str]) + '.txt', "w")
#if self.do_summary: self.summary_writer = SummaryWriter(self.dir_run + 'summary')
def div_cv_reproduce(self, data_dict=None, eval_dict=None, sf_para_dict=None, div_para_dict=None):
self.display_information(data_dict, div_para_dict)
self.load_raw_data(data_dict=data_dict)
sf_para_dict[sf_para_dict['sf_id']].update(dict(num_features=data_dict['num_features']))
model_id = div_para_dict['model_id']
max_label = data_dict['max_label']
log_step = eval_dict['log_step']
vali_metric, vali_k, cutoffs = eval_dict['vali_metric'], eval_dict['vali_k'], eval_dict['cutoffs']
epochs, do_vali, do_summary = eval_dict['epochs'], eval_dict['do_validation'], eval_dict['do_summary']
fold_num = 5
dir_run = self.setup_output(data_dict, eval_dict, reproduce=True)
cv_tape = DivCVTape(model_id=model_id, fold_num=fold_num, cutoffs=cutoffs, do_validation=do_vali,reproduce=True)
ranker = self.load_ranker(model_para_dict=div_para_dict, sf_para_dict=sf_para_dict)
for fold_k in range(1, fold_num + 1): # evaluation over k-fold data
ranker.init() # initialize or reset with the same random initialization
_, test_data, _ = self.load_data(data_dict=data_dict, fold_k=fold_k)
cv_tape.fold_evaluation_reproduce(ranker=ranker, test_data=test_data, dir_run=dir_run,
max_label=max_label, fold_k=fold_k, model_id=model_id)
andcg_cv_avg_scores = cv_tape.get_cv_performance() # for comparison among different settings of hyper-parameters
return andcg_cv_avg_scores
def load_pretrained_model(self, model, dir_run, fold_k):
subdir = '-'.join(['Fold', str(fold_k)])
run_fold_k_dir = os.path.join(dir_run, subdir)
fold_k_buffered_model_names = os.listdir(run_fold_k_dir)
fold_opt_model_name = get_opt_model(fold_k_buffered_model_names)
fold_opt_model = os.path.join(run_fold_k_dir, fold_opt_model_name)
model.load(file_model=fold_opt_model, context='cpu_gpu')
def div_cv_eval(self, data_dict=None, eval_dict=None, sf_para_dict=None, div_para_dict=None, **kwargs):
self.display_information(data_dict, div_para_dict)
self.setup_eval(data_dict, eval_dict, sf_para_dict, div_para_dict)
self.load_raw_data(data_dict=data_dict)
ranker = self.load_ranker(model_para_dict=div_para_dict, sf_para_dict=sf_para_dict)
ranker.uniform_eval_setting(eval_dict=eval_dict)
model_id = div_para_dict['model_id']
max_label = data_dict['max_label']
log_step = eval_dict['log_step']
vali_metric, vali_k, cutoffs = eval_dict['vali_metric'], eval_dict['vali_k'], eval_dict['cutoffs']
epochs, do_vali, do_summary = eval_dict['epochs'], eval_dict['do_validation'], eval_dict['do_summary']
fold_num = 5
cv_tape = DivCVTape(model_id=model_id, fold_num=fold_num, cutoffs=cutoffs, do_validation=do_vali)
if eval_dict['rerank']:
d_sf_para_dict, d_div_para_dict = kwargs['d_sf_para_dict'], kwargs['d_div_para_dict']
d_sf_para_dict[d_sf_para_dict['sf_id']].update(dict(num_features=data_dict['num_features']))
discriminator = self.load_ranker(model_para_dict=d_div_para_dict, sf_para_dict=d_sf_para_dict)
else:
discriminator = None
for fold_k in range(1, fold_num + 1): # evaluation over k-fold data
ranker.init() # initialize or reset with the same random initialization
if eval_dict['rerank']:
discriminator.init()
self.load_pretrained_model(model=discriminator, dir_run=eval_dict['rerank_model_dir'], fold_k=fold_k)
train_data, test_data, vali_data = self.load_data(data_dict=data_dict, fold_k=fold_k, eval_dict=eval_dict,
discriminator=discriminator)
if do_vali:
vali_tape = ValidationTape(num_epochs=epochs, validation_metric=vali_metric, validation_at_k=vali_k,
fold_k=fold_k, dir_run=self.dir_run)
if do_summary:
summary_tape = DivSummaryTape(do_validation=do_vali, cutoffs=cutoffs, gpu=self.gpu)
for epoch_k in range(1, epochs + 1):
torch_fold_k_epoch_k_loss, stop_training = ranker.div_train(train_data=train_data, epoch_k=epoch_k)
ranker.scheduler.step() # adaptive learning rate with step_size=40, gamma=0.5
if stop_training:
print('training is failed !')
break
if (do_summary or do_vali) and (epoch_k % log_step == 0 or epoch_k == 1): # stepwise check
if do_vali: # per-step validation score
vali_metric_value = ranker.div_validation(
vali_data=vali_data, vali_metric=vali_metric, k=vali_k, max_label=max_label, device='cpu')
vali_tape.epoch_validation(ranker=ranker, epoch_k=epoch_k,
metric_value=vali_metric_value.squeeze(-1).data.numpy())
if do_summary: # summarize per-step performance w.r.t. train, test
summary_tape.epoch_summary(torch_epoch_k_loss=torch_fold_k_epoch_k_loss, ranker=ranker,
train_data=train_data, vali_data=vali_data, test_data=test_data)
if do_vali: # loading the fold-wise optimal model for later testing
ranker.load(vali_tape.get_optimal_path())
vali_tape.clear_fold_buffer(fold_k=fold_k)
else: # buffer the model after a fixed number of training-epoches if no validation is deployed
fold_optimal_checkpoint = '-'.join(['Fold', str(fold_k)])
ranker.save(dir=self.dir_run + fold_optimal_checkpoint + '/',
name='_'.join(['net_params_epoch', str(epoch_k)]) + '.pkl')
cv_tape.fold_evaluation(model_id=model_id, ranker=ranker, fold_k=fold_k, test_data=test_data, max_label=max_label)
andcg_cv_avg_scores = cv_tape.get_cv_performance() # for comparison among different settings of hyper-parameters
return andcg_cv_avg_scores
def load_ranker(self, sf_para_dict, model_para_dict):
"""
Load a ranker correspondingly
:param sf_para_dict:
:param model_para_dict:
:param kwargs:
:return:
"""
model_id = model_para_dict['model_id']
if model_id in ['DALETOR', 'DivLambdaRank', 'DivProbRanker', 'DivSoftRank', 'DivTwinRank']:
ranker = globals()[model_id](sf_para_dict=sf_para_dict, model_para_dict=model_para_dict,
gpu=self.gpu, device=self.device)
else:
raise NotImplementedError
return ranker
def log_max(self, data_dict=None, max_cv_avg_scores=None, sf_para_dict=None, eval_dict=None, log_para_str=None):
''' Log the best performance across grid search and the corresponding setting '''
dir_root, cutoffs = eval_dict['dir_root'], eval_dict['cutoffs']
data_id = data_dict['data_id']
sf_str = self.sf_parameter.to_para_string(log=True)
data_eval_str = self.data_setting.to_data_setting_string(log=True) +'\n'+ self.eval_setting.to_eval_setting_string(log=True)
with open(file=dir_root + '/' + '_'.join([data_id, sf_para_dict['sf_id'], 'max.txt']), mode='w') as max_writer:
max_writer.write('\n\n'.join([data_eval_str, sf_str, log_para_str, metric_results_to_string(max_cv_avg_scores, cutoffs, metric='aNDCG')]))
def set_data_setting(self, debug=False, data_id=None, dir_data=None, div_data_json=None):
if div_data_json is not None:
self.data_setting = DivDataSetting(div_data_json=div_data_json)
else:
self.data_setting = DivDataSetting(debug=debug, data_id=data_id, dir_data=dir_data)
def set_eval_setting(self, debug=False, dir_output=None, div_eval_json=None):
if div_eval_json is not None:
self.eval_setting = DivEvalSetting(debug=debug, div_eval_json=div_eval_json)
else:
self.eval_setting = DivEvalSetting(debug=debug, dir_output=dir_output)
def set_scoring_function_setting(self, debug=None, sf_id=None, sf_json=None):
if sf_json is not None:
self.sf_parameter = DivScoringFunctionParameter(sf_json=sf_json)
else:
self.sf_parameter = DivScoringFunctionParameter(debug=debug, sf_id=sf_id)
def set_model_setting(self, debug=False, model_id=None, para_json=None):
if para_json is not None:
self.model_parameter = globals()[model_id + "Parameter"](para_json=para_json)
else:
self.model_parameter = globals()[model_id + "Parameter"](debug=debug)
def run(self, debug=False, model_id=None, sf_id=None, config_with_json=None,
dir_json=None, data_id=None, dir_data=None, dir_output=None, grid_search=False, reproduce=False):
if config_with_json:
assert dir_json is not None
if reproduce:
self.point_run(debug=debug, model_id=model_id, dir_json=dir_json, reproduce=reproduce)
else:
self.grid_run(debug=debug, model_id=model_id, dir_json=dir_json)
else:
assert sf_id in ['pointsf', 'listsf', 'listsf_co']
if grid_search:
self.grid_run(debug=debug, model_id=model_id, sf_id=sf_id,
data_id=data_id, dir_data=dir_data, dir_output=dir_output)
else:
self.point_run(debug=debug, model_id=model_id, sf_id=sf_id,
data_id=data_id, dir_data=dir_data, dir_output=dir_output)
def grid_run(self, debug=True, model_id=None, sf_id=None, data_id=None, dir_data=None, dir_output=None, dir_json=None):
"""
Perform diversified ranking based on grid search of optimal parameter setting
"""
if dir_json is not None:
div_data_eval_sf_json = dir_json + 'Div_Data_Eval_ScoringFunction.json'
para_json = dir_json + model_id + "Parameter.json"
self.set_eval_setting(debug=debug, div_eval_json=div_data_eval_sf_json)
self.set_data_setting(div_data_json=div_data_eval_sf_json)
self.set_scoring_function_setting(sf_json=div_data_eval_sf_json)
self.set_model_setting(model_id=model_id, para_json=para_json)
else:
self.set_eval_setting(debug=debug, dir_output=dir_output)
self.set_data_setting(debug=debug, data_id=data_id, dir_data=dir_data)
self.set_scoring_function_setting(debug=debug, sf_id=sf_id)
self.set_model_setting(debug=debug, model_id=model_id)
''' select the best setting through grid search '''
vali_k, cutoffs = 5, [1, 3, 5, 10, 20, 50] # cutoffs should be consistent w.r.t. eval_dict
max_cv_avg_scores = np.zeros(len(cutoffs)) # fold average
k_index = cutoffs.index(vali_k)
max_common_para_dict, max_sf_para_dict, max_div_para_dict = None, None, None
for data_dict in self.iterate_data_setting():
for eval_dict in self.iterate_eval_setting():
if eval_dict['rerank']:
d_sf_para_dict, d_div_para_dict = self.get_rerank_para_dicts(eval_dict=eval_dict)
else:
d_sf_para_dict, d_div_para_dict = None, None
for sf_para_dict in self.iterate_scoring_function_setting():
for div_para_dict in self.iterate_model_setting():
curr_cv_avg_scores = \
self.div_cv_eval(data_dict=data_dict, eval_dict=eval_dict,
sf_para_dict=sf_para_dict, div_para_dict=div_para_dict,
d_sf_para_dict=d_sf_para_dict, d_div_para_dict=d_div_para_dict)
if curr_cv_avg_scores[k_index] > max_cv_avg_scores[k_index]:
max_cv_avg_scores, max_sf_para_dict, max_eval_dict, max_div_para_dict = \
curr_cv_avg_scores, sf_para_dict, eval_dict, div_para_dict
# log max setting
self.log_max(data_dict=data_dict, eval_dict=max_eval_dict,
max_cv_avg_scores=max_cv_avg_scores, sf_para_dict=max_sf_para_dict,
log_para_str=self.model_parameter.to_para_string(log=True, given_para_dict=max_div_para_dict))
def get_rerank_para_dicts(self, eval_dict):
rerank_dir = eval_dict['rerank_dir']
rerank_model_id = eval_dict['rerank_model_id']
rerank_div_data_eval_sf_json = rerank_dir + 'Div_Data_Eval_ScoringFunction.json'
rerank_para_json = rerank_dir + rerank_model_id + "Parameter.json"
rerank_sf_parameter = DivScoringFunctionParameter(sf_json=rerank_div_data_eval_sf_json)
rerank_model_parameter = globals()[rerank_model_id + "Parameter"](para_json=rerank_para_json)
d_sf_para_dict = rerank_sf_parameter.default_para_dict()
d_div_para_dict = rerank_model_parameter.default_para_dict()
return d_sf_para_dict, d_div_para_dict
def point_run(self, debug=False, model_id=None, sf_id=None, data_id=None, dir_data=None, dir_output=None,
dir_json=None, reproduce=False):
"""
:param debug:
:param model_id:
:param data_id:
:param dir_data:
:param dir_output:
:return:
"""
if dir_json is None:
self.set_eval_setting(debug=debug, dir_output=dir_output)
self.set_data_setting(debug=debug, data_id=data_id, dir_data=dir_data)
self.set_scoring_function_setting(debug=debug, sf_id=sf_id)
self.set_model_setting(debug=debug, model_id=model_id)
else:
div_data_eval_sf_json = dir_json + 'Div_Data_Eval_ScoringFunction.json'
para_json = dir_json + model_id + "Parameter.json"
self.set_eval_setting(debug=debug, div_eval_json=div_data_eval_sf_json)
self.set_data_setting(div_data_json=div_data_eval_sf_json)
self.set_scoring_function_setting(sf_json=div_data_eval_sf_json)
self.set_model_setting(model_id=model_id, para_json=para_json)
data_dict = self.get_default_data_setting()
eval_dict = self.get_default_eval_setting()
sf_para_dict = self.get_default_scoring_function_setting()
div_model_para_dict = self.get_default_model_setting()
if eval_dict['rerank']:
d_sf_para_dict, d_div_para_dict = self.get_rerank_para_dicts(eval_dict=eval_dict)
self.div_cv_eval(data_dict=data_dict, eval_dict=eval_dict, sf_para_dict=sf_para_dict,
div_para_dict=div_model_para_dict,
d_sf_para_dict=d_sf_para_dict, d_div_para_dict=d_div_para_dict)
else:
if reproduce:
self.div_cv_reproduce(data_dict=data_dict, eval_dict=eval_dict, sf_para_dict=sf_para_dict,
div_para_dict=div_model_para_dict)
else:
self.div_cv_eval(data_dict=data_dict, eval_dict=eval_dict, sf_para_dict=sf_para_dict,
div_para_dict=div_model_para_dict)
| {
"alphanum_fraction": 0.6482585504,
"author": null,
"avg_line_length": 52.056261343,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d6c4df9ba339a7a7094461a3267f481a5b638759",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 11,
"max_forks_repo_forks_event_max_datetime": "2020-03-12T06:28:35.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-09-28T07:17:51.000Z",
"max_forks_repo_head_hexsha": "2794e6e086bcd87ce177f40194339e9b825e9f4c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ii-research-ranking/ptranking",
"max_forks_repo_path": "ptranking/ltr_diversification/eval/ltr_diversification.py",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "2794e6e086bcd87ce177f40194339e9b825e9f4c",
"max_issues_repo_issues_event_max_datetime": "2020-01-05T12:35:12.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-09-27T06:59:02.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ii-research-ranking/ptranking",
"max_issues_repo_path": "ptranking/ltr_diversification/eval/ltr_diversification.py",
"max_line_length": 150,
"max_stars_count": 64,
"max_stars_repo_head_hexsha": "2794e6e086bcd87ce177f40194339e9b825e9f4c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ii-research-ranking/ptranking",
"max_stars_repo_path": "ptranking/ltr_diversification/eval/ltr_diversification.py",
"max_stars_repo_stars_event_max_datetime": "2020-04-30T07:54:04.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-09-19T17:04:04.000Z",
"num_tokens": 6290,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 28683
} |
import cv2
import numpy as np
import svm_train as st
#Get the biggest Controur
def getMaxContour(contours,minArea=200):
maxC=np.array([])
maxArea=minArea
for cnt in contours:
area=cv2.contourArea(cnt)
if(area>maxArea):
maxArea=area
maxC=cnt
return maxC
#Get Gesture Image by prediction
def getGestureImg(cnt,img,th1,model):
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
imgT=img[y:y+h,x:x+w]
imgT=cv2.bitwise_and(imgT,imgT,mask=th1[y:y+h,x:x+w])
imgT=cv2.resize(imgT,(200,200))
imgTG=cv2.cvtColor(imgT,cv2.COLOR_BGR2GRAY)
resp=st.predict(model,imgTG)
img=cv2.imread('TrainData/'+chr(int(resp[1])+64)+'_2.jpg')
return img,chr(int(resp[1])+64) | {
"alphanum_fraction": 0.649870801,
"author": null,
"avg_line_length": 28.6666666667,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "35fa3eba7dc577cb274a27465d86f711e8f1fd6f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "90a53a023555c2fcfbd40c4df73c9f2f31a8b92a",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "Gunnika/VoiceBox",
"max_forks_repo_path": "ASL-Translator/util.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "90a53a023555c2fcfbd40c4df73c9f2f31a8b92a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "Gunnika/VoiceBox",
"max_issues_repo_path": "ASL-Translator/util.py",
"max_line_length": 62,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "90a53a023555c2fcfbd40c4df73c9f2f31a8b92a",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "Gunnika/VoiceBox",
"max_stars_repo_path": "ASL-Translator/util.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 242,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 774
} |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 University of Groningen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides a Processor that identifies unexpected atoms such as PTMs and
protonations, and canonicalizes their attributes based on modifications known
in the forcefield.
"""
from collections import defaultdict
import itertools
import networkx as nx
from .processor import Processor
from ..log_helpers import StyleAdapter, get_logger
from ..utils import format_atom_string
LOGGER = StyleAdapter(get_logger(__name__))
class PTMGraphMatcher(nx.isomorphism.GraphMatcher):
"""
Implements matching logic for PTMs
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# G1 >= G2; G1 is the found residue; G2 the PTM reference
def semantic_feasibility(self, node1, node2):
"""
Returns True iff node1 and node2 should be considered equal. This means
they are both either marked as PTM_atom, or not. If they both are PTM
atoms, the elements need to match, and otherwise, the atomnames must
match.
"""
node1 = self.G1.nodes[node1]
node2 = self.G2.nodes[node2]
if node1.get('PTM_atom', False) == node2['PTM_atom']:
if node2['PTM_atom']:
# elements must match
return node1['element'] == node2['element']
else:
# atomnames must match
return node1['atomname'] == node2['atomname']
else:
return False
def find_ptm_atoms(molecule):
"""
Finds all atoms in molecule that have the node attribute ``PTM_atom`` set
to a value that evaluates to ``True``. ``molecule`` will be traversed
starting at these atoms untill all marked atoms are visited such that they
are identified per "branch", and for every branch the anchor node is known.
The anchor node is the node(s) which are not PTM atoms and share an edge
with the traversed branch.
Parameters
----------
molecule : networkx.Graph
Returns
-------
list[tuple[set, set]]
``[({ptm atom indices}, {anchor indices}), ...]``. Ptm atom indices are
connected, and are connected to the rest of molecule via anchor
indices.
"""
# Atomnames have already been fixed, and missing atoms have been added.
# In addition, unrecognized atoms have been labeled with the PTM attribute.
extra_atoms = set(n_idx for n_idx in molecule
if molecule.nodes[n_idx].get('PTM_atom', False))
ptms = []
while extra_atoms:
# First PTM atom we'll look at
first = next(iter(extra_atoms))
anchors = set()
# PTM atoms we've found
atoms = set()
# Atoms we still need to see this traversal
to_see = set([first])
# Traverse in molecule.
for orig, succ in nx.bfs_successors(molecule, first):
# We've seen orig, so remove it
if orig in to_see:
to_see.remove(orig)
if orig in extra_atoms:
# If this is a PTM atom, we want to see it's neighbours as
# well.
to_see.update(succ)
atoms.add(orig)
else:
# Else, it's an attachment point for the this PTM
anchors.add(orig)
if not to_see:
# We've traversed the interesting bit of the tree
break
# Although we know how far our tree spans we may still have work to do
# for terminal nodes. There has to be a more elegant solution though.
for node in to_see:
if node in extra_atoms:
atoms.add(node)
else:
anchors.add(node)
extra_atoms -= atoms
ptms.append((atoms, anchors))
return ptms
def identify_ptms(residue, residue_ptms, known_ptms):
"""
Identifies all PTMs in ``known_PTMs`` nescessary to describe all PTM atoms in
``residue_ptms``. Will take PTMs such that all PTM atoms in ``residue``
will be covered by applying PTMs from ``known_PTMs`` in order.
Nodes in ``residue`` must have correct ``atomname`` attributes, and may not
be missing. In addition, every PTM in must be anchored to a non-PTM atom.
Parameters
----------
residue : networkx.Graph
The residues involved with these PTMs. Need not be connected.
residue_ptms : list[tuple[set, set]]
As returned by ``find_PTM_atoms``, but only those relevant for
``residue``.
known_PTMs : collections.abc.Sequence[tuple[networkx.Graph, PTMGraphMatcher]]
The nodes in the graph must have the `PTM_atom` attribute (True or
False). It should be True for atoms that are not part of the PTM
itself, but describe where it is attached to the molecule.
In addition, its nodes must have the `atomname` attribute, which will
be used to recognize where the PTM is anchored, or to correct the
atomnames. Lastly, the nodes may have a `replace` attribute, which
is a dictionary of ``{attribute_name: new_value}`` pairs. The special
case here is if attribute_name is ``'atomname'`` and new_value is
``None``: in this case the node will be removed.
Lastly, the graph (not its nodes) needs a 'name' attribute.
Returns
-------
list[tuple[networkx.Graph, dict]]
All PTMs from ``known_PTMs`` needed to describe the PTM atoms in
``residue`` along with a ``dict`` of node correspondences. The order of
``known_PTMs`` is preserved.
Raises
------
KeyError
Not all PTM atoms in ``residue`` can be covered with ``known_PTMs``.
"""
to_cover = set()
for res_ptm in residue_ptms:
to_cover.update(res_ptm[0])
to_cover.update(res_ptm[1])
return _cover_graph(residue, to_cover, known_ptms)
def _cover_graph(graph, to_cover, fragments):
# BASECASE: to_cover is empty
if not to_cover:
return []
# All non-PTM atoms in residue are always available for matching...
available = set(n_idx for n_idx in graph
if not graph.nodes[n_idx].get('PTM_atom', False))
# ... and add those we still need to cover
available.update(to_cover)
# REDUCTION: Apply one of fragments, remove those atoms from to_cover
# COMBINATION: add the applied option to the output.
for idx, option in enumerate(fragments):
graphlet, matcher = option
matches = list(matcher.subgraph_isomorphisms_iter())
# Matches: [{graph_idxs: fragment_idxs}, {...}, ...]
for match in matches:
matching = set(match.keys())
# TODO: one of the matching atoms must be an anchor. Should be
# handled by PTMGraphMatcher already, assuming every PTM graph has
# at least one non-ptm atom specified
if matching <= available:
# Continue with the remaining ptm atoms, and try just this
# option and all smaller.
try:
rest_cover = _cover_graph(graph, to_cover - matching, fragments[idx:])
except KeyError:
continue
return [(graphlet, match)] + rest_cover
raise KeyError('Could not identify PTM')
def allowed_ptms(residue, res_ptms, known_ptms):
"""
Finds all PTMs in ``known_ptms`` which might be relevant for ``residue``.
Parameters
----------
residue : networkx.Graph
res_ptms : list[tuple[set, set]]
As returned by ``find_PTM_atoms``.
Currently not used.
known_ptms : collections.abc.Iterable[networkx.Graph]
Yields
------
tuple[networkx.Graph, PTMGraphMatcher]
All graphs in known_ptms which are subgraphs of residue.
"""
# TODO: filter by element count first
for ptm in known_ptms:
ptm_graph_matcher = PTMGraphMatcher(residue, ptm)
if ptm_graph_matcher.subgraph_is_isomorphic():
yield ptm, ptm_graph_matcher
def fix_ptm(molecule):
'''
Canonizes all PTM atoms in molecule, and labels the relevant residues with
which PTMs were recognized. Modifies ``molecule`` such that atomnames of
PTM atoms are corrected, and the relevant residues have been labeled with
which PTMs were recognized.
Parameters
----------
molecule : networkx.Graph
Must not have missing atoms, and atomnames must be correct. Atoms which
could not be recognized must be labeled with the attribute
PTM_atom=True.
'''
ptm_atoms = find_ptm_atoms(molecule)
def key_func(ptm_atoms):
node_idxs = ptm_atoms[-1] # The anchors
return sorted(molecule.nodes[idx]['resid'] for idx in node_idxs)
ptm_atoms = sorted(ptm_atoms, key=key_func)
resid_to_idxs = defaultdict(list)
for n_idx in molecule:
residx = molecule.nodes[n_idx]['resid']
resid_to_idxs[residx].append(n_idx)
resid_to_idxs = dict(resid_to_idxs)
known_ptms = molecule.force_field.modifications
for resids, res_ptms in itertools.groupby(ptm_atoms, key_func):
# How to solve this graph covering problem
# Filter known_ptms, such that
# element_count(known_ptm) <= element_count(found)
# Filter known_ptms, such that known_ptm <= found (subgraph of).
# Note that this does mean that the PTMs in the PDB *must* be
# complete. So no missing atoms.
# Find all the exactly covering combinations.
# Pick the best solution, such that the maximum size of the applied
# PTMs is maximal. (3, 2) > (3, 1, 1) > (2, 2, 1)
# Numbers are sizes of applied PTMs
# The last two steps are combined by recursively trying the largest
# option in identify_ptms
res_ptms = list(res_ptms)
n_idxs = set()
for resid in resids:
n_idxs.update(resid_to_idxs[resid])
# TODO: Maybe use graph_utils.make_residue_graph? Or rewrite that
# function?
residue = molecule.subgraph(n_idxs)
options = allowed_ptms(residue, res_ptms, known_ptms)
# TODO/FIXME: This includes anchors in sorting by size.
options = sorted(options, key=lambda opt: len(opt[0]), reverse=True)
try:
identified = identify_ptms(residue, res_ptms, options)
except KeyError:
LOGGER.exception('Could not identify the modifications for'
' residues {}, involving atoms {}',
['{resname}{resid}'.format(**molecule.nodes[resid_to_idxs[resid][0]])
for resid in sorted(set(resids))],
['{atomid}-{atomname}'.format(**molecule.nodes[idx])
for idxs in res_ptms for idx in idxs[0]],
type='unknown-input')
raise
# Why this mess? There can be multiple PTMs for a single (set of)
# residue(s); and a single PTM can span multiple residues.
LOGGER.info("Identified the modifications {} on residues {}",
[out[0].graph['name'] for out in identified],
['{resname}{resid}'.format(**molecule.nodes[resid_to_idxs[resid][0]])
for resid in resids])
for ptm, match in identified:
for mol_idx, ptm_idx in match.items():
ptm_node = ptm.nodes[ptm_idx]
mol_node = molecule.nodes[mol_idx]
# Names of PTM atoms still need to be corrected, and for some
# non PTM atoms attributes need to change.
# Nodes with 'replace': {'atomname': None} will be removed.
if ptm_node['PTM_atom'] or 'replace' in ptm_node:
mol_node['graph'] = molecule.subgraph([mol_idx]).copy()
to_replace = ptm_node.copy()
if 'replace' in to_replace:
del to_replace['replace']
to_replace.update(ptm_node.get('replace', dict()))
for attr_name, val in to_replace.items():
if attr_name == 'atomname' and val is None:
LOGGER.debug('Removing atom {}',
format_atom_string(mol_node),
type='remove-atom')
molecule.remove_node(mol_idx)
n_idxs.remove(mol_idx)
resid_to_idxs[mol_node['resid']].remove(mol_idx)
break
if mol_node.get(attr_name) != val:
fmt = 'Changing attribute {} from {} to {} for atom {}'
LOGGER.debug(fmt, attr_name, mol_node[attr_name],
val, format_atom_string(mol_node),
type='change-atom')
mol_node[attr_name] = val
for n_idx in n_idxs:
molecule.nodes[n_idx]['modifications'] = molecule.nodes[n_idx].get('modifications', [])
molecule.nodes[n_idx]['modifications'].append(ptm)
class CanonicalizeModifications(Processor):
def run_molecule(self, molecule):
fix_ptm(molecule)
return molecule
| {
"alphanum_fraction": 0.6091748372,
"author": null,
"avg_line_length": 40.9765395894,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "502fd3e66cd31c6d7362ee65920565c9874dfac3",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "975593c9191fe379d836bdab3d953fb739e1a6f2",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "NanduTej/vermouth-martinize",
"max_forks_repo_path": "vermouth/processors/canonicalize_modifications.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "975593c9191fe379d836bdab3d953fb739e1a6f2",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "NanduTej/vermouth-martinize",
"max_issues_repo_path": "vermouth/processors/canonicalize_modifications.py",
"max_line_length": 103,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "975593c9191fe379d836bdab3d953fb739e1a6f2",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "NanduTej/vermouth-martinize",
"max_stars_repo_path": "vermouth/processors/canonicalize_modifications.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3158,
"path": null,
"reason": "import networkx",
"repo": null,
"save_path": null,
"sha": null,
"size": 13973
} |
#!/usr/bin/env python
"""
Key '0' - To select areas of background
Key '1' - To select areas of cervix
Key '2' - To select areas of channel
Key 'l' - go to next image
Key 'k' - go to previous iamge
Key 'd' - inc thickness
Key 'a' - dec thickness
Key 'r' - To reset mask
Key 's' - To save the results
Key 'm' - move processed image
Usage:
channel_marker {/path/to/directory-with-images-in-PNG}
"""
# Python 2/3 compatibility
from __future__ import print_function
import os
import numpy as np
import cv2
import sys
import glob
import shutil
import matplotlib.pyplot as plt
def_win_input_img = 'input-image'
def_win_output_msk = 'output-mask'
def_BLACK = [0, 0, 0]
def_CHANNEL_COLOR = [120, 155, 75]
def_CERVIX_COLOR = [255, 255, 255]
def_DRAW_BG = {'color' : def_BLACK, 'val' : 0, 'alpha': 64}
def_DRAW_CERVIX = {'color' : def_CERVIX_COLOR, 'val' : 1, 'alpha': 255}
def_DRAW_CHANNEL = {'color' : def_CHANNEL_COLOR, 'val' : 2, 'alpha': 128}
def_DRAWS = [def_DRAW_BG, def_DRAW_CHANNEL]
def_ALPHA_TO_VAL = [def_DRAW_BG, def_DRAW_CERVIX, def_DRAW_CHANNEL]
globX = 10
globY = 10
is_first_run = True
# setting up flags
drawing = False # flag for drawing curves
thickness = 9 # brush thickness
datasetPtr = None
class Dataset:
_ix = None
_iy = None
#
mskPrefix = '-processed.png'
outPrefix = 'processed-images'
denPrefix = 'denied'
wdir = None
pathImgs = None
cidx = None
#
_orig = None
_img_rgb = None
_msk = None
_img_draw = None
_is_drawing = False
_value = None
#
def __init__(self, pdir=None):
if pdir is not None:
self.wdir = os.path.abspath(pdir)
self.cidx = 0
self.updateImages()
def updateImages(self):
if (self.wdir is not None):
tlst = glob.glob('{0}/*.jpg-automasked.png'.format(self.wdir))
self.pathImgs = np.array(sorted(tlst))
if (self.cidx > len(tlst)):
self.cidx = 0
self._value = def_DRAW_CHANNEL
def isOk(self):
return (self.wdir is not None) and (self.pathImgs is not None)
def getNumImages(self):
if self.isOk():
return len(self.pathImgs)
def getCurrentIdx(self):
return self.cidx
def getNumProcessedImages(self):
if self.isOk():
if os.path.isdir(self.outputDir()):
lstImg = glob.glob('{0}/*.jpg-automasked.png'.format(self.outputDir()))
return len(lstImg)
else:
return 0
def prevImageIdx(self):
if self.isOk():
self.cidx -=1
if self.cidx<0:
self.cidx = self.getNumImages() - 1
return True
return False
def nextImageIdx(self):
if self.isOk():
self.cidx +=1
if self.cidx>=self.getNumImages():
self.cidx = 0
return True
return False
def _getPathImg(self):
return str(self.pathImgs[self.cidx])
def _getPathImgProc(self):
return self._getPathMsk()
def _getPathMsk(self):
return '{0}{1}'.format(self._getPathImg(), self.mskPrefix)
def outputDir(self):
if self.isOk():
outDir = os.path.join(self.wdir, self.outPrefix)
if not os.path.isdir(outDir):
os.makedirs(outDir)
return outDir
def deniedDir(self):
if self.isOk():
denDir = os.path.join(self.wdir, self.denPrefix)
if not os.path.isdir(denDir):
os.makedirs(denDir)
return denDir
def _toString(self):
if not self.isOk():
return 'Dataset is not initialized...'
else:
return 'Dataset: #Images/#Processed = {0}/{1}, current = {2}'.format(
self.getNumImages(),
self.getNumProcessedImages(),
self.getCurrentIdx())
def __str__(self):
return self._toString()
def __repr__(self):
return self._toString()
#
def loadCurretImage(self):
if self.isOk():
if self.getNumImages()<1:
print (' !!! WARNING !!! cant find files in directory [{0}], skip...'.format(self.wdir))
return
self._orig = cv2.imread(self._getPathImg(), cv2.IMREAD_UNCHANGED)
self._img_rgb = self._orig[:,:,:3].copy()
self.resetMask()
def resetMask(self):
tmsk = self._orig[:,:,3]
# convert binary mask to Grab-Mask
self._msk = np.zeros(tmsk.shape, np.uint8)
for mask_type in def_ALPHA_TO_VAL:
self._msk[tmsk == mask_type['alpha']] = mask_type['val']
self._img_draw = draw_mask_on_image(self._img_rgb, self._msk)
self._is_drawing = False
def saveMasked(self):
if self.isOk():
retMsk = np.zeros(self._msk.shape, dtype=np.uint8)
for mask_type in def_ALPHA_TO_VAL:
retMsk[self._msk == mask_type['val']] = mask_type['alpha']
retMasked = np.dstack( (self._img_rgb, retMsk) )
fout = self._getPathImgProc()
cv2.imwrite(fout, retMasked)
print (':: SAVE to [{0}]'.format(fout))
def moveProcessedImage(self):
if self.isOk():
pathImg = self._getPathImg()
pathMsk = self._getPathImgProc()
dirOut = self.outputDir()
if os.path.isfile(pathImg) and os.path.isfile(pathMsk):
shutil.move(pathImg, dirOut)
shutil.move(pathMsk, dirOut)
self.updateImages()
self.loadCurretImage()
print (':: MOVE from [{0}] to [{1}]'.format(pathMsk, dirOut))
else:
print ('\t***Image is not processed!, skip... [{0}]'.format(pathMsk))
print(self._toString())
def moveDeniedImage(self):
if self.isOk():
pathImg = self._getPathImg()
pathMsk = self._getPathImgProc()
dirDen = self.deniedDir()
if os.path.isfile(pathImg):
shutil.move(pathImg, dirDen)
if os.path.isfile(pathMsk):
shutil.move(pathMsk, dirDen)
self.updateImages()
self.loadCurretImage()
print ('DENIED :: MOVE from [{0}] to [{1}]'.format(pathImg, dirDen))
else:
print ('\t***Image is not processed!, skip... [{0}]'.format(pathImg))
def draw_mask_on_image(pimg, pmsk):
img_with_mask = pimg.copy()
for draw in def_DRAWS:
img_with_mask[pmsk == draw['val']] = draw['color']
return img_with_mask
def mark_position(pdataset, px, py, pthickness):
cv2.circle(pdataset._img_draw, (px, py), pthickness, pdataset._value['color'], -1)
cv2.circle(pdataset._msk, (px, py), pthickness, pdataset._value['val'], -1)
def draw_all_windows():
global globX, globY, is_first_run, datasetPtr
if is_first_run:
cv2.namedWindow(def_win_input_img, cv2.WINDOW_NORMAL | cv2.WINDOW_AUTOSIZE)
cv2.namedWindow(def_win_output_msk, cv2.WINDOW_NORMAL | cv2.WINDOW_AUTOSIZE)
cv2.setMouseCallback(def_win_input_img, on_mouse)
cv2.setMouseCallback(def_win_output_msk, on_mouse)
cv2.moveWindow(def_win_input_img, 600, 50)
cv2.createTrackbar('Radius', def_win_output_msk, 0, 50, on_track_mouse_size)
cv2.setTrackbarPos('Radius', def_win_output_msk, thickness)
is_first_run = False
if datasetPtr.isOk:
timg1 = datasetPtr._img_draw.copy()
#timg1 = datasetPtr._img_rgb.copy()
timg2 = datasetPtr._img_rgb.copy()
# (0) prepare masked images
for mask_type in def_DRAWS:
c_img = timg2.copy()
c_img[datasetPtr._msk == mask_type['val']] = np.array(mask_type['color'])
timg2 = cv2.addWeighted(timg2, 0.6, c_img, 0.4, 0)
# (1) draw current mouse pointer
colorCircle = datasetPtr._value['color']
cv2.circle(timg1, (globX, globY), thickness, colorCircle)
cv2.circle(timg1, (globX, globY), 1, colorCircle)
cv2.circle(timg2, (globX, globY), thickness, colorCircle)
#
cv2.imshow(def_win_input_img, timg1)
cv2.imshow(def_win_output_msk, timg2)
def on_mouse(event, x, y, flags, param):
global globX, globY, datasetPtr
globX = x
globY = y
# draw touchup curves
if event == cv2.EVENT_LBUTTONDOWN:
datasetPtr._is_drawing = True
mark_position(datasetPtr, x,y, thickness)
elif event == cv2.EVENT_MOUSEMOVE:
if datasetPtr._is_drawing == True:
mark_position(datasetPtr, x, y, thickness)
elif event == cv2.EVENT_LBUTTONUP:
if datasetPtr._is_drawing == True:
datasetPtr._is_drawing = False
mark_position(datasetPtr, x, y, thickness)
draw_all_windows()
def on_track_mouse_size(x):
global thickness
thickness = x + 1
if __name__ == '__main__':
if len(sys.argv) < 2:
print ('Usage: {0} {{/path/to/dir/with/images}}'.format(sys.argv[0]))
sys.exit(0)
else:
datasetPtr = Dataset(pdir=sys.argv[1])
print (datasetPtr)
datasetPtr.loadCurretImage()
draw_all_windows()
while True:
k = cv2.waitKey() & 255
# key bindings
if k == 27: # esc to exit
break
elif k == ord('0'):
print(" mark BACKGROUND regions with left mouse button \n")
datasetPtr._value = def_DRAW_BG
elif k == ord('1'):
print(" mark CERVIX regions with left mouse button \n")
datasetPtr._value = def_DRAW_CERVIX
elif k == ord('2'):
print(" mark CHANNEL regions with left mouse button \n")
datasetPtr._value = def_DRAW_CHANNEL
elif k == ord('r'):
datasetPtr.resetMask()
elif k == ord('k'):
if datasetPtr.prevImageIdx():
datasetPtr.loadCurretImage()
else:
print ('!!! Cannt load previous image: {0}'.format(datasetPtr._toString()))
elif k == ord('l'):
if datasetPtr.nextImageIdx():
datasetPtr.loadCurretImage()
else:
print ('!!! Cannt load next image: {0}'.format(datasetPtr._toString()))
elif k == ord('s'):
datasetPtr.saveMasked()
elif k == ord('m'):
datasetPtr.moveProcessedImage()
print(datasetPtr)
elif k == ord('d'):
datasetPtr.moveDeniedImage()
print(datasetPtr)
elif k == ord('h'):
print (__doc__)
elif k == ord('e'):
thickness += 1
elif k == ord('q'):
thickness -= 1
draw_all_windows()
| {
"alphanum_fraction": 0.5802332038,
"author": null,
"avg_line_length": 33.3518518519,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "5a74c26749b5e20b1f1cbfced1ce96721e9adad9",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2021-07-20T15:21:58.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-06-27T07:14:06.000Z",
"max_forks_repo_head_hexsha": "7cb7cb308b43de4f85a09053723e50c368c05891",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "gakarak/Challenge_Cervical_Cancer_Screening-",
"max_forks_repo_path": "code/src01_Dataset_Segmentation_with_GrabCut_HelpScript/channel_marker_v1.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7cb7cb308b43de4f85a09053723e50c368c05891",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "gakarak/Challenge_Cervical_Cancer_Screening-",
"max_issues_repo_path": "code/src01_Dataset_Segmentation_with_GrabCut_HelpScript/channel_marker_v1.py",
"max_line_length": 104,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "7cb7cb308b43de4f85a09053723e50c368c05891",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "gakarak/Challenge_Cervical_Cancer_Screening-",
"max_stars_repo_path": "code/src01_Dataset_Segmentation_with_GrabCut_HelpScript/channel_marker_v1.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2779,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 10806
} |
def azureml_main(frame1):
import matplotlib
matplotlib.use('agg')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.graphics.boxplots as sm
Azure = True
## Create a series of bar plots for the various levels of the
## string columns in the data frame by readmi_class.
names = list(frame1)
num_cols = frame1.shape[1]
for indx in range(num_cols - 1):
if(frame1.ix[:, indx].dtype not in [np.int64, np.int32, np.float64]):
temp1 = frame1.ix[frame1.readmi_class == 'YES', indx].value_counts()
temp0 = frame1.ix[frame1.readmi_class == 'NO', indx].value_counts()
fig = plt.figure(figsize = (12,6))
fig.clf()
ax1 = fig.add_subplot(1, 2, 1)
ax0 = fig.add_subplot(1, 2, 2)
temp1.plot(kind = 'bar', ax = ax1)
ax1.set_title('Values of ' + names[indx] + '\n for readmitted patients')
temp0.plot(kind = 'bar', ax = ax0)
ax0.set_title('Values of ' + names[indx] + '\n for patients not readmitted')
if(Azure == True): fig.savefig('bar_' + names[indx] + '.png')
## Now make some box plots of the columbns with numerical values.
for indx in range(num_cols):
if(frame1.ix[:, indx].dtype in [np.int64, np.int32, np.float64]):
temp1 = frame1.ix[frame1.readmi_class == 'YES', indx]
temp0 = frame1.ix[frame1.readmi_class == 'NO', indx]
fig = plt.figure(figsize = (12,6))
fig.clf()
ax1 = fig.add_subplot(1, 2, 1)
ax0 = fig.add_subplot(1, 2, 2)
ax1.boxplot(temp1.as_matrix())
ax1.set_title('Box plot of ' + names[indx] + '\n for readmitted patients')
ax0.boxplot(temp0.as_matrix())
ax0.set_title('Box plot of ' + names[indx] + '\n for patients not readmitted')
if(Azure == True): fig.savefig('box_' + names[indx] + '.png')
return frame1
| {
"alphanum_fraction": 0.5516098485,
"author": null,
"avg_line_length": 41.4117647059,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "5744ca34e9a48c1fb7a7a25ba1e5613b7df89d9a",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2d166cc18ced32d9bf01620d83555d70c688a627",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "applecool/DataScience",
"max_forks_repo_path": "Azure ML Studio Experiments/Diabetes_Model/DiabetesVis.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2d166cc18ced32d9bf01620d83555d70c688a627",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "applecool/DataScience",
"max_issues_repo_path": "Azure ML Studio Experiments/Diabetes_Model/DiabetesVis.py",
"max_line_length": 91,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2d166cc18ced32d9bf01620d83555d70c688a627",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "applecool/DataScience",
"max_stars_repo_path": "Azure ML Studio Experiments/Diabetes_Model/DiabetesVis.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 550,
"path": null,
"reason": "import numpy,import statsmodels",
"repo": null,
"save_path": null,
"sha": null,
"size": 2112
} |
[STATEMENT]
lemma nn_integral_C:
assumes "m \<le> m'" and f[measurable]: "f \<in> borel_measurable (PiM {0..<n+m} M)"
and nonneg: "\<And>x. x \<in> space (PiM {0..<n+m} M) \<Longrightarrow> 0 \<le> f x"
and x: "x \<in> space (PiM {0..<n} M)"
shows "(\<integral>\<^sup>+x. f x \<partial>C n m x) = (\<integral>\<^sup>+x. f (restrict x {0..<n+m}) \<partial>C n m' x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n m' x
[PROOF STEP]
using \<open>m \<le> m'\<close>
[PROOF STATE]
proof (prove)
using this:
m \<le> m'
goal (1 subgoal):
1. integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n m' x
[PROOF STEP]
proof (induction rule: dec_induct)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n m x
2. \<And>na. \<lbrakk>m \<le> na; na < m'; integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n na x\<rbrakk> \<Longrightarrow> integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n (Suc na) x
[PROOF STEP]
case (step i)
[PROOF STATE]
proof (state)
this:
m \<le> i
i < m'
integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n i x
goal (2 subgoals):
1. integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n m x
2. \<And>na. \<lbrakk>m \<le> na; na < m'; integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n na x\<rbrakk> \<Longrightarrow> integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n (Suc na) x
[PROOF STEP]
let ?E = "\<lambda>x. f (restrict x {0..<n + m})" and ?C = "\<lambda>i f. \<integral>\<^sup>+x. f x \<partial>C n i x"
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n m x
2. \<And>na. \<lbrakk>m \<le> na; na < m'; integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n na x\<rbrakk> \<Longrightarrow> integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n (Suc na) x
[PROOF STEP]
from \<open>m\<le>i\<close> x
[PROOF STATE]
proof (chain)
picking this:
m \<le> i
x \<in> space (Pi\<^sub>M {0..<n} M)
[PROOF STEP]
have "?C i ?E = ?C (Suc i) ?E"
[PROOF STATE]
proof (prove)
using this:
m \<le> i
x \<in> space (Pi\<^sub>M {0..<n} M)
goal (1 subgoal):
1. \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n i x = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n (Suc i) x
[PROOF STEP]
by (auto simp: nn_integral_bind[where B="PiM {0 ..< Suc (n + i)} M"] space_C nn_integral_eP
intro!: nn_integral_cong)
(simp add: space_PiM PiE_iff nonneg prob_space.emeasure_space_1[OF prob_space_P])
[PROOF STATE]
proof (state)
this:
\<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n i x = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n (Suc i) x
goal (2 subgoals):
1. integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n m x
2. \<And>na. \<lbrakk>m \<le> na; na < m'; integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n na x\<rbrakk> \<Longrightarrow> integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n (Suc na) x
[PROOF STEP]
with step
[PROOF STATE]
proof (chain)
picking this:
m \<le> i
i < m'
integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n i x
\<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n i x = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n (Suc i) x
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
m \<le> i
i < m'
integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n i x
\<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n i x = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n (Suc i) x
goal (1 subgoal):
1. integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n (Suc i) x
[PROOF STEP]
by (simp del: restrict_apply)
[PROOF STATE]
proof (state)
this:
integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n (Suc i) x
goal (1 subgoal):
1. integral\<^sup>N (C n m x) f = \<integral>\<^sup>+ x. f (restrict x {0..<n + m}) \<partial>C n m x
[PROOF STEP]
qed (auto simp: space_PiM space_C[OF x] simp del: restrict_apply intro!: nn_integral_cong) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": 11,
"llama_tokens": 2147,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
# -*- coding: utf-8 -*-
"""
This example solves a plug-flow reactor problem of hydrogen-oxygen combustion.
The PFR is computed by two approaches: The simulation of a Lagrangian fluid
particle, and the simulation of a chain of reactors.
"""
import cantera as ct
import numpy as np
#######################################################################
# Input Parameters
#######################################################################
T_0 = 1500.0 # inlet temperature [K]
pressure = ct.one_atm # constant pressure [Pa]
composition_0 = 'H2:2, O2:1, AR:0.1'
length = 1.5e-7 # *approximate* PFR length [m]
u_0 = .006 # inflow velocity [m/s]
area = 1.e-4 # cross-sectional area [m**2]
# input file containing the reaction mechanism
reaction_mechanism = 'h2o2.xml'
# Resolution: The PFR will be simulated by 'n_steps' time steps or by a chain
# of 'n_steps' stirred reactors.
n_steps = 2000
#####################################################################
#####################################################################
# Method 1: Lagrangian Particle Simulation
#####################################################################
# A Lagrangian particle is considered which travels through the PFR. Its
# state change is computed by upwind time stepping. The PFR result is produced
# by transforming the temporal resolution into spatial locations.
# The spatial discretization is therefore not provided a priori but is instead
# a result of the transformation.
# import the gas model and set the initial conditions
gas1 = ct.Solution(reaction_mechanism)
gas1.TPX = T_0, pressure, composition_0
mass_flow_rate1 = u_0 * gas1.density * area
# create a new reactor
r1 = ct.IdealGasConstPressureReactor(gas1)
# create a reactor network for performing time integration
sim1 = ct.ReactorNet([r1])
# approximate a time step to achieve a similar resolution as in the next method
t_total = length / u_0
dt = t_total / n_steps
# define time, space, and other information vectors
t1 = (np.arange(n_steps) + 1) * dt
z1 = np.zeros_like(t1)
u1 = np.zeros_like(t1)
states1 = ct.SolutionArray(r1.thermo)
for n1, t_i in enumerate(t1):
# perform time integration
sim1.advance(t_i)
# compute velocity and transform into space
u1[n1] = mass_flow_rate1 / area / r1.thermo.density
z1[n1] = z1[n1 - 1] + u1[n1] * dt
states1.append(r1.thermo.state)
#####################################################################
#####################################################################
# Method 2: Chain of Reactors
#####################################################################
# The plug flow reactor is represented by a linear chain of zero-dimensional
# reactors. The gas at the inlet to the first one has the specified inlet
# composition, and for all others the inlet composition is fixed at the
# composition of the reactor immediately upstream. Since in a PFR model there
# is no diffusion, the upstream reactors are not affected by any downstream
# reactors, and therefore the problem may be solved by simply marching from
# the first to last reactor, integrating each one to steady state.
# (This approach is anologous to the one presented in 'surf_pfr.py', which
# additionally includes surface chemistry)
# import the gas model and set the initial conditions
gas2 = ct.Solution(reaction_mechanism)
gas2.TPX = T_0, pressure, composition_0
mass_flow_rate2 = u_0 * gas2.density * area
dz = length / n_steps
r_vol = area * dz
# create a new reactor
r2 = ct.IdealGasReactor(gas2)
r2.volume = r_vol
# create a reservoir to represent the reactor immediately upstream. Note
# that the gas object is set already to the state of the upstream reactor
upstream = ct.Reservoir(gas2, name='upstream')
# create a reservoir for the reactor to exhaust into. The composition of
# this reservoir is irrelevant.
downstream = ct.Reservoir(gas2, name='downstream')
# The mass flow rate into the reactor will be fixed by using a
# MassFlowController object.
m = ct.MassFlowController(upstream, r2, mdot=mass_flow_rate2)
# We need an outlet to the downstream reservoir. This will determine the
# pressure in the reactor. The value of K will only affect the transient
# pressure difference.
v = ct.PressureController(r2, downstream, master=m, K=1e-5)
sim2 = ct.ReactorNet([r2])
# define time, space, and other information vectors
z2 = (np.arange(n_steps) + 1) * dz
t_r2 = np.zeros_like(z2) # residence time in each reactor
u2 = np.zeros_like(z2)
t2 = np.zeros_like(z2)
states2 = ct.SolutionArray(r2.thermo)
# iterate through the PFR cells
for n in range(n_steps):
# Set the state of the reservoir to match that of the previous reactor
gas2.TDY = r2.thermo.TDY
upstream.syncState()
# integrate the reactor forward in time until steady state is reached
sim2.reinitialize()
sim2.advance_to_steady_state()
# compute velocity and transform into time
u2[n] = mass_flow_rate2 / area / r2.thermo.density
t_r2[n] = r2.mass / mass_flow_rate2 # residence time in this reactor
t2[n] = np.sum(t_r2)
# write output data
states2.append(r2.thermo.state)
#####################################################################
#####################################################################
# Compare Results in matplotlib
#####################################################################
import matplotlib.pyplot as plt
plt.figure()
plt.plot(z1, states1.T, label='Lagrangian Particle')
plt.plot(z2, states2.T, label='Reactor Chain')
plt.xlabel('$z$ [m]')
plt.ylabel('$T$ [K]')
plt.legend(loc=0)
plt.show()
plt.savefig('pfr_T_z.png')
plt.figure()
plt.plot(t1, states1.X[:, gas1.species_index('H2')], label='Lagrangian Particle')
plt.plot(t2, states2.X[:, gas2.species_index('H2')], label='Reactor Chain')
plt.xlabel('$t$ [s]')
plt.ylabel('$X_{H_2}$ [-]')
plt.legend(loc=0)
plt.show()
plt.savefig('pfr_XH2_t.png')
| {
"alphanum_fraction": 0.6446855399,
"author": null,
"avg_line_length": 37.1006289308,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "982da873a7279e840837dc2e38fcb09fa1412577",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "54ff4acb95721c54996b2b6378498dd439b42b6d",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "vincentrobin/conda",
"max_forks_repo_path": "Cantera-data-examples/examples/reactors/pfr.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "54ff4acb95721c54996b2b6378498dd439b42b6d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "vincentrobin/conda",
"max_issues_repo_path": "Cantera-data-examples/examples/reactors/pfr.py",
"max_line_length": 81,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "54ff4acb95721c54996b2b6378498dd439b42b6d",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "vincentrobin/conda",
"max_stars_repo_path": "Cantera-data-examples/examples/reactors/pfr.py",
"max_stars_repo_stars_event_max_datetime": "2020-10-09T05:19:16.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-10-09T05:19:16.000Z",
"num_tokens": 1450,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5899
} |
#!/usr/bin/env python
# generative model associated with compression approaches to give us insights
# into how to compress better
import numpy as np
import matplotlib.pyplot as plt
# from .datasets import viz
# ================================================================ main
def unif_nbits(N=200, B=16, M=8, deltaB=2):
assert N % M == 0 # must use even number of blocks
N_blocks = N / M
# unif rand walk for deltas of nbits to encode each block
#
# min_delta = -(1 << (deltaB - 1)) - 1 # symmetric, so, eg, [-3, 3]
# max_delta = -min_delta
# print "min delta, max delta: ", min_delta, max_delta
# delta_maxes = np.random.randint(min_delta, max_delta + 1,
# size=N_blocks-1, dtype=np.int32)
# gauss rand walk for deltas of nbits to encode each block
#
# delta_maxes = np.random.randn(N_blocks - 1)
# delta_maxes = np.floor(delta_maxes + .5).astype(np.int32)
# compute maxes from above deltas (using either strategy)
#
# maxes = np.zeros(N_blocks, dtype=np.int32)
# maxes[0] = B - 6
# for i, el in enumerate(delta_maxes):
# maxes[i + 1] = np.clip(maxes[i] + delta_maxes[i], 0, B)
# # print "maxes: ", maxes[:20]
# gaussian max values instead of rand walk
maxes = (np.random.randn(N_blocks) * B / 8) + B - 6
maxes = np.clip(maxes, 0, B)
maxes = np.floor(maxes + .5).astype(np.int32)
dx = np.zeros((N_blocks, M), dtype=np.int32)
for i, maxbits in enumerate(maxes):
if maxbits == 0:
dx[i, :] = 0
continue
minval = -(1 << (maxbits - 1))
maxval = -minval - 1
# if minval >= maxval:
# print "minval, maxval", minval, maxval
dx[i, :] = np.random.randint(minval, maxval + 1, size=M)
dx = dx.ravel()[1:]
# print "dx: ", dx[:20]
x = np.zeros(N, dtype=np.int32)
minval = -(1 << (B - 1))
maxval = -minval - 1
x[0] = dx[0]
for i, delta in enumerate(dx):
x[i + 1] = np.clip(x[i] + delta, minval, maxval)
return x
# x = np.cumsum(dx)
# minval = -(1 << (B - 1))
# maxval = -B - 1
# return np.clip(x, minval, maxval)
def main():
B = 16 # number of bits
N = 200 # number of samples
M = 8 # block size
niters = 5
fig, axes = plt.subplots(2, 2, figsize=(10, 7))
# for i in range(2):
# for i in range(10):
axes[0, 0].set_title('unif nbits walk')
for i in range(niters):
x = unif_nbits(N=N, M=M, B=B)
axes[0, 0].plot(x)
axes[0, 1].set_title('gauss rand walk')
for i in range(niters):
x = np.cumsum(np.random.randn(N))
axes[0, 1].plot(x)
plt.show()
if __name__ == '__main__':
main()
| {
"alphanum_fraction": 0.5485631139,
"author": null,
"avg_line_length": 27.7676767677,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8e24d5f3e118b8d375b390d1933cc188d324be1f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 10,
"max_forks_repo_forks_event_max_datetime": "2022-02-18T22:28:23.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-03-08T09:04:29.000Z",
"max_forks_repo_head_hexsha": "a056cdb67d049669875ab5487359aca99ae873ea",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "memetb/sprintz",
"max_forks_repo_path": "python/generative.py",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "a056cdb67d049669875ab5487359aca99ae873ea",
"max_issues_repo_issues_event_max_datetime": "2020-11-09T01:37:08.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-03-27T23:29:15.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "memetb/sprintz",
"max_issues_repo_path": "python/generative.py",
"max_line_length": 77,
"max_stars_count": 45,
"max_stars_repo_head_hexsha": "a056cdb67d049669875ab5487359aca99ae873ea",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "dblalock/sprintz",
"max_stars_repo_path": "python/generative.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-08T05:42:06.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-02-02T15:50:25.000Z",
"num_tokens": 878,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2749
} |
\section{The one-pass distributed algorithm}
The essence of the distributed strategy is to achieve almost perfect
parallelism, by splitting the input matrix into several smaller
matrices called \emph{jobs}. \\
\[
A^{m \times n} =
\begin{bmatrix}
A_1^{m \times c_1} \mid A_2^{m \times c_2} \mid \cdots \mid A_k^{m \times c_k}
\end{bmatrix}
\suchthat \sum_{i=1}^k c_i = n
\]
\\
A subset of these smaller matrices or \emph{jobs} is assigned to each
node in the cluster, depending on their capabilities; the
objective is to assign matrices that fit into the node's RAM
memory. Each node will calculate the SVD factorization of the
submatrices assigned, but merging those results into a single
SVD approximation that covers all the input data it received. At the
end, a global merge step across all the nodes is performed, giving the
global SVD approximation for original matrix $A$. The
\cref{alg:svd-dist} describes the overall distributed algorithm: \\
\begin{algorithm}
\label{alg:svd-dist}
\caption{Distributed-SVD: Distributed SVD for LSI (global)}
%
\setstretch{1.35}
\SetKwInOut{Input}{Input}
\SetKwInOut{Output}{Output}
\DontPrintSemicolon
%
\Input{Truncation factor $k$, queue of jobs $A= [A_1, A_2, \dots ]$}
%
\Output{Matrices $U^{m \times k}$ and $\Sigma^{k \times k}$,
from the SVD decomp. of $A$}
%
\For {\textbf{all} (node $i$ in cluster)}
{
$B_i \gets \text{subset of the queue of jobs } [A_1,A_2,\dots]$ \;
%
$P_i = (U_i,\Sigma_i) \gets \func{SVD-Node}(k,B_i)$ \;
}
$(U,\Sigma) \gets \func{Reduce}(\func{Merge-SVD},[P_1,P_2,\dots])$ \;
%
return $(U, \Sigma)$ \;
\end{algorithm}
\hfill
The first important detail from the algorithm just shown, is that we
are not calculating the matrix $V$ from the SVD factorization, how
come! Such detail is explained at the end of the last section. For the
moment, let us just say that such matrix is not required for our
purposes. \\
We can also observe the map-reduce pattern in this algorithm, with the map
part being the iteration done over $p$ nodes (in parallel); and the
reduce part being the final merge of those partial results. The
\cref{alg:svd-dist-node} describes the part done inside each node.
\begin{algorithm}
\label{alg:svd-dist-node}
\caption{SVD-Node: Distributed SVD for LSI (node)}
%
\setstretch{1.35}
\SetKwInOut{Input}{Input}
\SetKwInOut{Output}{Output}
\DontPrintSemicolon
%
\Input{Truncation factor $k$, queue of jobs $A_1,A_2,\dots$}
%
\Output{Matrices $U^{m \times k}$ and $\Sigma^{k
\times k}$, from the SVD of $[A_1,A_2,\dots]$}
%
$P = (U,\Sigma) \gets 0^{m \times k} 0^{k \times k}$ \;
%
\For {each job $A_i$}
{
$\prim{P} = (\prim{U},\prim{\Sigma}) \gets \func{Basecase-SVD}(k,A_i)$ \;
%
$P = (U^{m \times k},\Sigma^{k \times k}) \gets \func{Merge-SVD}(k, P, \prim{P})$ \;
}
%
return $(U,\Sigma)$ \;
\end{algorithm}
\hfill
It is important to realize that the iteration in this
\cref{alg:svd-dist-node} is done serially, but that the procedure
$\func{Basecase-SVD}$ that resolves the SVD of a
matrix that fits in memory (base case), internally may exploit the
multicore or vectorial capabilities of the node computer. This
procedure serves as a black box SVD calculator, and \Rehurek mentions
at least two algorithms which can be plugged on its place: \\
\begin{enumerate}
\item The Lanczos algorithm as implemented by SVDLIBC (\cite{svdlibc}),
which in turn is based on SVDPACKC written by Berry et al
(\cite{svdpackc}), which in turn is based on its Fortran77
predecessor SVDPACK (\cite{svdpack}). All of them ultimately based
on seminal paper by Berry \cite{berry92} (which in turn comes from
his PhD thesis \cite{berry91}). \\
\item A custom stochastic algorithm based on the work of Halko et al
(see \cite{halko11}).
\end{enumerate}
\hfill
For the scope of this project, we considered appropriate to focus only
on the Lanczos based algorithm; as that is essentially what we
described in the previous chapter. In that sense, the work of \Rehurek
is interesting because by using the divide and conquer strategy for
the SVD problem, he is leveraging on the decades of research and
numerical accuracy of the work done by Berry et al. At the same time,
his key contribution becomes the procedure $\func{Merge-SVD}$, which
we will describe in further sections. \\
| {
"alphanum_fraction": 0.7141222248,
"author": null,
"avg_line_length": 36.7142857143,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "3589a82772411f72d0744f9e02cd07290317e2f9",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3db2aed30f124e79d60dd7aa6c012ddd05bdce7f",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "rzavalet/svd-lsi-project-master",
"max_forks_repo_path": "svd-dist-alg.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3db2aed30f124e79d60dd7aa6c012ddd05bdce7f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "rzavalet/svd-lsi-project-master",
"max_issues_repo_path": "svd-dist-alg.tex",
"max_line_length": 88,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3db2aed30f124e79d60dd7aa6c012ddd05bdce7f",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "rzavalet/svd-lsi-project-master",
"max_stars_repo_path": "svd-dist-alg.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1331,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 4369
} |
# Copyright Contributors to the Tapqir project.
# SPDX-License-Identifier: Apache-2.0
import math
from collections import defaultdict
from functools import partial
from pathlib import Path
import numpy as np
import pandas as pd
import pyro
import pyro.distributions as dist
import torch
from pyro.ops.indexing import Vindex
from pyro.ops.stats import hpdi, quantile
from sklearn.metrics import (
confusion_matrix,
matthews_corrcoef,
precision_score,
recall_score,
)
from tapqir.distributions import AffineBeta
from tapqir.distributions.util import gaussian_spots
pyro.enable_validation(False)
def snr(
data: torch.Tensor,
width: torch.Tensor,
x: torch.Tensor,
y: torch.Tensor,
target_locs: torch.Tensor,
background: torch.Tensor,
gain: float,
offset_mean: float,
offset_var: float,
P: int,
theta_probs: torch.Tensor,
) -> torch.Tensor:
r"""
Calculate the signal-to-noise ratio.
Total signal:
.. math::
\mu_{knf} = \sum_{ij} I_{nfij}
\mathcal{N}(i, j \mid x_{knf}, y_{knf}, w_{knf})`
Noise:
.. math::
\sigma^2_{knf} = \sigma^2_{\text{offset}}
+ \mu_{knf} \text{gain}`
Signal-to-noise ratio:
.. math::
\text{SNR}_{knf} =
\dfrac{\mu_{knf} - b_{nf} - \mu_{\text{offset}}}{\sigma_{knf}}
\text{ for } \theta_{nf} = k`
"""
weights = gaussian_spots(
torch.ones(1, device=torch.device("cpu")),
width,
x,
y,
target_locs,
P,
)
signal = ((data - background[..., None, None] - offset_mean) * weights).sum(
dim=(-2, -1)
)
noise = (offset_var + background * gain).sqrt()
result = signal / noise
mask = theta_probs > 0.5
return result[mask]
def save_stats(model, path, CI=0.95, save_matlab=False):
# global parameters
global_params = model._global_params
summary = pd.DataFrame(
index=global_params,
columns=["Mean", f"{int(100*CI)}% LL", f"{int(100*CI)}% UL"],
)
# local parameters
local_params = [
"height",
"width",
"x",
"y",
"background",
]
ci_stats = defaultdict(partial(defaultdict, list))
num_samples = 10000
for param in global_params:
if param == "gain":
fn = dist.Gamma(
pyro.param("gain_loc") * pyro.param("gain_beta"),
pyro.param("gain_beta"),
)
elif param == "pi":
fn = dist.Dirichlet(pyro.param("pi_mean") * pyro.param("pi_size"))
elif param == "lamda":
fn = dist.Gamma(
pyro.param("lamda_loc") * pyro.param("lamda_beta"),
pyro.param("lamda_beta"),
)
elif param == "proximity":
fn = AffineBeta(
pyro.param("proximity_loc"),
pyro.param("proximity_size"),
0,
(model.data.P + 1) / math.sqrt(12),
)
elif param == "trans":
fn = dist.Dirichlet(
pyro.param("trans_mean") * pyro.param("trans_size")
).to_event(1)
else:
raise NotImplementedError
samples = fn.sample((num_samples,)).data.squeeze()
ci_stats[param] = {}
LL, UL = hpdi(
samples,
CI,
dim=0,
)
ci_stats[param]["LL"] = LL.cpu()
ci_stats[param]["UL"] = UL.cpu()
ci_stats[param]["Mean"] = fn.mean.data.squeeze().cpu()
# calculate Keq
if param == "pi":
ci_stats["Keq"] = {}
LL, UL = hpdi(samples[:, 1] / (1 - samples[:, 1]), CI, dim=0)
ci_stats["Keq"]["LL"] = LL.cpu()
ci_stats["Keq"]["UL"] = UL.cpu()
ci_stats["Keq"]["Mean"] = (samples[:, 1] / (1 - samples[:, 1])).mean().cpu()
# this does not need to be very accurate
num_samples = 1000
for param in local_params:
LL, UL, Mean = [], [], []
for ndx in torch.split(torch.arange(model.data.Nt), model.nbatch_size):
ndx = ndx[:, None]
kdx = torch.arange(model.K)[:, None, None]
ll, ul, mean = [], [], []
for fdx in torch.split(torch.arange(model.data.F), model.fbatch_size):
if param == "background":
fn = dist.Gamma(
Vindex(pyro.param("b_loc"))[ndx, fdx]
* Vindex(pyro.param("b_beta"))[ndx, fdx],
Vindex(pyro.param("b_beta"))[ndx, fdx],
)
elif param == "height":
fn = dist.Gamma(
Vindex(pyro.param("h_loc"))[kdx, ndx, fdx]
* Vindex(pyro.param("h_beta"))[kdx, ndx, fdx],
Vindex(pyro.param("h_beta"))[kdx, ndx, fdx],
)
elif param == "width":
fn = AffineBeta(
Vindex(pyro.param("w_mean"))[kdx, ndx, fdx],
Vindex(pyro.param("w_size"))[kdx, ndx, fdx],
0.75,
2.25,
)
elif param == "x":
fn = AffineBeta(
Vindex(pyro.param("x_mean"))[kdx, ndx, fdx],
Vindex(pyro.param("size"))[kdx, ndx, fdx],
-(model.data.P + 1) / 2,
(model.data.P + 1) / 2,
)
elif param == "y":
fn = AffineBeta(
Vindex(pyro.param("y_mean"))[kdx, ndx, fdx],
Vindex(pyro.param("size"))[kdx, ndx, fdx],
-(model.data.P + 1) / 2,
(model.data.P + 1) / 2,
)
else:
raise NotImplementedError
samples = fn.sample((num_samples,)).data.squeeze()
l, u = hpdi(
samples,
CI,
dim=0,
)
m = fn.mean.data.squeeze()
ll.append(l)
ul.append(u)
mean.append(m)
else:
LL.append(torch.cat(ll, -1))
UL.append(torch.cat(ul, -1))
Mean.append(torch.cat(mean, -1))
else:
ci_stats[param]["LL"] = torch.cat(LL, -2).cpu()
ci_stats[param]["UL"] = torch.cat(UL, -2).cpu()
ci_stats[param]["Mean"] = torch.cat(Mean, -2).cpu()
for param in global_params:
if param == "pi":
summary.loc[param, "Mean"] = ci_stats[param]["Mean"][1].item()
summary.loc[param, "95% LL"] = ci_stats[param]["LL"][1].item()
summary.loc[param, "95% UL"] = ci_stats[param]["UL"][1].item()
# Keq
summary.loc["Keq", "Mean"] = ci_stats["Keq"]["Mean"].item()
summary.loc["Keq", "95% LL"] = ci_stats["Keq"]["LL"].item()
summary.loc["Keq", "95% UL"] = ci_stats["Keq"]["UL"].item()
elif param == "trans":
summary.loc["kon", "Mean"] = ci_stats[param]["Mean"][0, 1].item()
summary.loc["kon", "95% LL"] = ci_stats[param]["LL"][0, 1].item()
summary.loc["kon", "95% UL"] = ci_stats[param]["UL"][0, 1].item()
summary.loc["koff", "Mean"] = ci_stats[param]["Mean"][1, 0].item()
summary.loc["koff", "95% LL"] = ci_stats[param]["LL"][1, 0].item()
summary.loc["koff", "95% UL"] = ci_stats[param]["UL"][1, 0].item()
else:
summary.loc[param, "Mean"] = ci_stats[param]["Mean"].item()
summary.loc[param, "95% LL"] = ci_stats[param]["LL"].item()
summary.loc[param, "95% UL"] = ci_stats[param]["UL"].item()
ci_stats["m_probs"] = model.m_probs.data.cpu()
ci_stats["theta_probs"] = model.theta_probs.data.cpu()
ci_stats["z_probs"] = model.z_probs.data.cpu()
ci_stats["z_map"] = model.z_map.data.cpu()
model.params = ci_stats
# snr
summary.loc["SNR", "Mean"] = (
snr(
model.data.images[:, :, model.cdx],
ci_stats["width"]["Mean"],
ci_stats["x"]["Mean"],
ci_stats["y"]["Mean"],
model.data.xy[:, :, model.cdx],
ci_stats["background"]["Mean"],
ci_stats["gain"]["Mean"],
model.data.offset.mean,
model.data.offset.var,
model.data.P,
model.theta_probs,
)
.mean()
.item()
)
# classification statistics
if model.data.labels is not None:
pred_labels = model.z_map[model.data.is_ontarget].cpu().numpy().ravel()
true_labels = model.data.labels["z"][: model.data.N, :, model.cdx].ravel()
with np.errstate(divide="ignore", invalid="ignore"):
summary.loc["MCC", "Mean"] = matthews_corrcoef(true_labels, pred_labels)
summary.loc["Recall", "Mean"] = recall_score(
true_labels, pred_labels, zero_division=0
)
summary.loc["Precision", "Mean"] = precision_score(
true_labels, pred_labels, zero_division=0
)
(
summary.loc["TN", "Mean"],
summary.loc["FP", "Mean"],
summary.loc["FN", "Mean"],
summary.loc["TP", "Mean"],
) = confusion_matrix(true_labels, pred_labels, labels=(0, 1)).ravel()
mask = torch.from_numpy(model.data.labels["z"][: model.data.N, :, model.cdx])
samples = torch.masked_select(model.z_probs[model.data.is_ontarget].cpu(), mask)
if len(samples):
z_ll, z_ul = hpdi(samples, CI)
summary.loc["p(specific)", "Mean"] = quantile(samples, 0.5).item()
summary.loc["p(specific)", "95% LL"] = z_ll.item()
summary.loc["p(specific)", "95% UL"] = z_ul.item()
else:
summary.loc["p(specific)", "Mean"] = 0.0
summary.loc["p(specific)", "95% LL"] = 0.0
summary.loc["p(specific)", "95% UL"] = 0.0
model.summary = summary
if path is not None:
path = Path(path)
torch.save(ci_stats, path / f"{model.full_name}-params.tpqr")
if save_matlab:
from scipy.io import savemat
for param, field in ci_stats.items():
if param in (
"m_probs",
"theta_probs",
"z_probs",
"z_map",
):
ci_stats[param] = field.numpy()
continue
for stat, value in field.items():
ci_stats[param][stat] = value.cpu().numpy()
savemat(path / f"{model.full_name}-params.mat", ci_stats)
summary.to_csv(
path / f"{model.full_name}-summary.csv",
)
| {
"alphanum_fraction": 0.4933762649,
"author": null,
"avg_line_length": 35.1779935275,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "13edc955e54506b3359158d83638564c67cf7eee",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-05-30T21:54:37.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-05-30T21:54:37.000Z",
"max_forks_repo_head_hexsha": "60da3fda1632d4309ff7d0ffeeab5940a020963a",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "gelles-brandeis/tapqir",
"max_forks_repo_path": "tapqir/utils/stats.py",
"max_issues_count": 12,
"max_issues_repo_head_hexsha": "60da3fda1632d4309ff7d0ffeeab5940a020963a",
"max_issues_repo_issues_event_max_datetime": "2022-02-22T14:59:46.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-06-04T03:38:47.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "gelles-brandeis/tapqir",
"max_issues_repo_path": "tapqir/utils/stats.py",
"max_line_length": 88,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "60da3fda1632d4309ff7d0ffeeab5940a020963a",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "gelles-brandeis/tapqir",
"max_stars_repo_path": "tapqir/utils/stats.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-24T20:01:18.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-06-24T20:44:10.000Z",
"num_tokens": 2739,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 10870
} |
"""Colour related classes and functions."""
from collections import deque
import random
import numpy
def wrap_hue(value):
while value >= 360:
value -= 360
while value < 0:
value += 360
return value
def rainbow(gap):
def hue_iterator():
hue = wrap_hue(random.randint(0, 359) * gap)
while True:
yield hue
hue = wrap_hue(hue + gap)
return hue_iterator
def hue_selector(x, y):
while True:
yield random.randint(x, y)
def random_hue():
yield from hue_selector(0, 359)
def reds():
selector = hue_selector(0, 120)
while True:
yield wrap_hue(300 + next(selector))
def greens():
yield from hue_selector(60, 180)
def blues():
yield from hue_selector(180, 300)
class ColourPicker:
default_buffer_size = 48
themes = [
rainbow(30), rainbow(-30),
random_hue, random_hue, random_hue, random_hue,
reds, reds, blues, blues, greens, greens,
]
def __init__(self, buffer_size: int = default_buffer_size):
self.change_theme()
self.buffer = deque(maxlen=buffer_size)
def change_theme(self):
self.theme = random.choice(self.themes)()
print(f'Changed theme: {self.theme.__name__}')
def fit_value(self, minimum, maximum, minimum_value, maximum_value, value):
coefficients = numpy.polyfit(
(minimum, maximum), (minimum_value, maximum_value), 1
)
a = coefficients[0]
b = coefficients[1]
return a * value + b
def clamp_byte(self, value):
return max(0, min(value, 255))
def pick(self, energy):
self.buffer.append(energy)
hue = next(self.theme)
minimum = min(self.buffer)
maximum = max(self.buffer)
saturation = self.clamp_byte(round(self.fit_value(minimum, maximum, 220, 255, energy)))
brightness = self.clamp_byte(round(self.fit_value(minimum, maximum, 150, 220, energy)))
return hue, saturation, brightness
if __name__ == '__main__':
from .audio import listen
colour_picker = ColourPicker()
counter = 0
def handler(beat):
global counter
print(colour_picker.pick(beat))
counter += 1
if counter >= 10:
colour_picker.change_theme()
counter = 0
listen(handler)
| {
"alphanum_fraction": 0.6145393068,
"author": null,
"avg_line_length": 20.7543859649,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "22c3972f63f9a39de5b503d0450c88b2cf12e3fc",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "bc256a07877a464c074f00dec53f480f97944983",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "thomasleese/partay",
"max_forks_repo_path": "partay/colours.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "bc256a07877a464c074f00dec53f480f97944983",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "thomasleese/partay",
"max_issues_repo_path": "partay/colours.py",
"max_line_length": 95,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "bc256a07877a464c074f00dec53f480f97944983",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "thomasleese/partay",
"max_stars_repo_path": "partay/colours.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 572,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2366
} |
"""
Class Features
Name: lib_data_io_ascii
Author(s): Francesco Avanzi (francesco.avanzi@cimafoundation.org), Fabio Delogu (fabio.delogu@cimafoundation.org)
Date: '20210603'
Version: '1.0.0'
"""
#######################################################################################
# Libraries
import logging
import rasterio
import os
import numpy as np
from rasterio.crs import CRS
from lib_utils_io import create_darray_2d
from lib_info_args import logger_name
from lib_info_args import proj_epsg as proj_epsg_default
# Logging
log_stream = logging.getLogger(logger_name)
logging.getLogger("rasterio").setLevel(logging.WARNING)
# Debug
import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
# Default method variable(s)
info_default_fields = ['nrows', 'ncols', 'xll', 'yll', 'res']
map_default_fields = ['nrows', 'ncols', 'xllcorner', 'yllcorner', 'cellsize']
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to extract data grid information
def extract_data_grid(geo_data, geo_x_values, geo_y_values, geo_transform, geo_bbox=None,
tag_geo_values='data',
tag_geo_x='geo_x', tag_geo_y='geo_y',
tag_nodata='nodata_value', value_no_data=-9999.0):
if geo_bbox is not None:
geo_bbox_xll = geo_bbox[0]
geo_bbox_yll = geo_bbox[1]
data_grid = {'nrows': geo_y_values.shape[0], 'ncols': geo_x_values.shape[0], 'xllcorner': geo_bbox_xll,
'yllcorner': geo_bbox_yll, 'cellsize': abs(geo_transform[0]), tag_geo_values: geo_data,
tag_geo_x: geo_x_values, tag_geo_y: geo_y_values}
elif geo_bbox is None:
data_grid = {'nrows': geo_y_values.shape[0], 'ncols': geo_x_values.shape[0], 'xllcorner': geo_transform[2],
'yllcorner': geo_transform[5], 'cellsize': abs(geo_transform[0]), tag_geo_values: geo_data,
tag_geo_x: geo_x_values, tag_geo_y: geo_y_values}
if tag_nodata not in list(data_grid.keys()):
data_grid[tag_nodata] = value_no_data
return data_grid
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to create data grid information
def create_data_grid(info_grid, info_expected_fields=None, map_expected_fields=None,
tag_geo_values='data',
tag_geo_x='geo_x', tag_geo_y='geo_y',
tag_nodata='nodata_value', value_no_data=-9999.0, value_default_data=1):
if info_expected_fields is None:
info_expected_fields = info_default_fields
if map_expected_fields is None:
map_expected_fields = map_default_fields
data_grid = {}
if info_grid is not None:
if isinstance(info_grid, dict):
if set(info_expected_fields).issubset(info_grid):
if any(field is not None for field in info_grid.values()):
for info_field, map_field in zip(info_expected_fields, map_expected_fields):
data_grid[map_field] = info_grid[info_field]
xll = data_grid['xllcorner']
yll = data_grid['yllcorner']
nrows = data_grid['nrows']
ncols = data_grid['ncols']
res = data_grid['cellsize']
geo_x_values = np.arange(xll + res / 2, xll + res / 2 + res * ncols, res)
geo_y_values = np.arange(yll + res / 2, yll + res / 2 + res * nrows, res)
geo_x_values_2d, geo_y_values_2d = np.meshgrid(geo_x_values, geo_y_values)
geo_y_upper = geo_y_values_2d[0, 0]
geo_y_lower = geo_y_values_2d[-1, 0]
if geo_y_lower > geo_y_upper:
geo_y_values_2d = np.flipud(geo_y_values_2d)
geo_data_values = np.zeros([geo_y_values.shape[0], geo_x_values.shape[0]])
geo_data_values[:, :] = value_default_data
data_grid[tag_geo_values] = geo_data_values
data_grid[tag_geo_x] = geo_x_values_2d[0, :]
data_grid[tag_geo_y] = geo_y_values_2d[:, 0]
if tag_nodata not in list(data_grid.keys()):
data_grid[tag_nodata] = value_no_data
else:
log_stream.warning(' ===> Grid information are not all defined. Datasets will be set to None')
data_grid = None
else:
log_stream.warning(' ===> Grid information are not enough. Datasets will be set to None')
data_grid = None
else:
log_stream.warning(' ===> Grid information are not in dictionary format. Datasets will be set to None')
data_grid = None
else:
log_stream.warning(' ===> Grid information are null. Datasets will be set to None')
data_grid = None
return data_grid
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to read an ascii grid file
def read_data_grid(file_name, output_format='data_array', output_dtype='float32'):
try:
dset = rasterio.open(file_name)
bounds = dset.bounds
res = dset.res
transform = dset.transform
data = dset.read()
if dset.crs is None:
crs = CRS.from_string(proj_epsg_default)
else:
crs = dset.crs
values = data[0, :, :]
decimal_round = 7
center_right = bounds.right - (res[0] / 2)
center_left = bounds.left + (res[0] / 2)
center_top = bounds.top - (res[1] / 2)
center_bottom = bounds.bottom + (res[1] / 2)
lon = np.arange(center_left, center_right + np.abs(res[0] / 2), np.abs(res[0]), float)
lat = np.arange(center_bottom, center_top + np.abs(res[0] / 2), np.abs(res[1]), float)
lons, lats = np.meshgrid(lon, lat)
min_lon_round = round(np.min(lons), decimal_round)
max_lon_round = round(np.max(lons), decimal_round)
min_lat_round = round(np.min(lats), decimal_round)
max_lat_round = round(np.max(lats), decimal_round)
center_right_round = round(center_right, decimal_round)
center_left_round = round(center_left, decimal_round)
center_bottom_round = round(center_bottom, decimal_round)
center_top_round = round(center_top, decimal_round)
assert min_lon_round == center_left_round
assert max_lon_round == center_right_round
assert min_lat_round == center_bottom_round
assert max_lat_round == center_top_round
lats = np.flipud(lats)
if output_format == 'data_array':
data_obj = create_darray_2d(values, lons[0, :], lats[:, 0],
coord_name_x='west_east', coord_name_y='south_north',
dim_name_x='west_east', dim_name_y='south_north')
elif output_format == 'dictionary':
data_obj = {'values': values, 'longitude': lons[0, :], 'latitude': lats[:, 0],
'transform': transform, 'crs': crs,
'bbox': [bounds.left, bounds.bottom, bounds.right, bounds.top],
'bb_left': bounds.left, 'bb_right': bounds.right,
'bb_top': bounds.top, 'bb_bottom': bounds.bottom,
'res_lon': res[0], 'res_lat': res[1]}
else:
log_stream.error(' ===> File static "' + file_name + '" output format not allowed')
raise NotImplementedError('Case not implemented yet')
except IOError as io_error:
data_obj = None
log_stream.warning(' ===> File static in ascii grid was not correctly open with error "' + str(io_error) + '"')
log_stream.warning(' ===> Filename "' + os.path.split(file_name)[1] + '"')
return data_obj
# -------------------------------------------------------------------------------------
| {
"alphanum_fraction": 0.5431573976,
"author": null,
"avg_line_length": 42.7727272727,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "83dfb7337ac8ecf14479ddc82c780295ba1f350c",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "cbb1f347f558fbfaa8d564441931989bc833a02d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "c-hydro/fp-s3m",
"max_forks_repo_path": "tools/s3m_source2nc_converter/lib_data_io_ascii.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "cbb1f347f558fbfaa8d564441931989bc833a02d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "c-hydro/fp-s3m",
"max_issues_repo_path": "tools/s3m_source2nc_converter/lib_data_io_ascii.py",
"max_line_length": 119,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "cbb1f347f558fbfaa8d564441931989bc833a02d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "c-hydro/fp-s3m",
"max_stars_repo_path": "tools/s3m_source2nc_converter/lib_data_io_ascii.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1838,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 8469
} |
[STATEMENT]
lemma finite_ImageI:
assumes "finite A"
assumes "\<And>a. a\<in>A \<Longrightarrow> finite (R``{a})"
shows "finite (R``A)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite (R `` A)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. finite (R `` A)
[PROOF STEP]
note [[simproc add: finite_Collect]]
[PROOF STATE]
proof (state)
this:
TERM _
goal (1 subgoal):
1. finite (R `` A)
[PROOF STEP]
have "R``A = \<Union>{R``{a} | a. a:A}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. R `` A = \<Union> {R `` {a} |a. a \<in> A}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
R `` A = \<Union> {R `` {a} |a. a \<in> A}
goal (1 subgoal):
1. finite (R `` A)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
R `` A = \<Union> {R `` {a} |a. a \<in> A}
goal (1 subgoal):
1. finite (R `` A)
[PROOF STEP]
have "finite (\<Union>{R``{a} | a. a:A})"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite (\<Union> {R `` {a} |a. a \<in> A})
[PROOF STEP]
apply (rule finite_Union)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. finite {R `` {a} |a. a \<in> A}
2. \<And>M. M \<in> {R `` {a} |a. a \<in> A} \<Longrightarrow> finite M
[PROOF STEP]
apply (simp add: assms)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>M. M \<in> {R `` {a} |a. a \<in> A} \<Longrightarrow> finite M
[PROOF STEP]
apply (clarsimp simp: assms)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
finite (\<Union> {R `` {a} |a. a \<in> A})
goal (1 subgoal):
1. finite (R `` A)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
finite (R `` A)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
finite (R `` A)
goal (1 subgoal):
1. finite (R `` A)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
finite (R `` A)
goal:
No subgoals!
[PROOF STEP]
qed | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "CAVA_LTL_Modelchecker_SM_Lib_SOS_Misc_Add",
"hexsha": null,
"include": null,
"lang": null,
"length": 14,
"llama_tokens": 865,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
###############################################################################
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import os
import sys
import unittest
import keras2onnx
import numpy as np
from keras2onnx.proto import keras, is_tf_keras
from keras.applications.vgg16 import VGG16
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../tests/'))
from test_utils import run_onnx_runtime
import tensorflow as tf
from keras.utils import conv_utils
from keras.engine import Layer, InputSpec
import keras.backend as K
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
Activation = keras.layers.Activation
BatchNormalization = keras.layers.BatchNormalization
Conv2D = keras.layers.Conv2D
concatenate = keras.layers.concatenate
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Reshape = keras.layers.Reshape
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
Sequential = keras.models.Sequential
Model = keras.models.Model
def _to_list(x):
if isinstance(x, list):
return x
return [x]
def _collect_input_shape(input_tensors):
input_tensors = _to_list(input_tensors)
shapes = []
for x in input_tensors:
try:
shapes.append(K.int_shape(x))
except Exception as e:
print(e)
shapes.append(None)
if len(shapes) == 1:
return shapes[0]
return shapes
def _permute_dimensions(x, pattern):
return tf.transpose(x, perm=pattern)
def _resie_image(x, target_layer, target_shape, data_format):
if data_format == 'channels_first':
new_shape = tf.shape(target_layer)[2:]
x = _permute_dimensions(x, [0, 2, 3, 1])
x = tf.image.resize_nearest_neighbor(x, new_shape)
x = _permute_dimensions(x, [0, 3, 1, 2])
x.set_shape((None, None, target_shape[2], target_shape[3]))
return x
elif data_format == 'channels_last':
new_shape = tf.shape(target_layer)[1:3]
x = tf.image.resize_nearest_neighbor(x, new_shape)
x.set_shape((None, target_shape[1], target_shape[2], None))
return x
else:
raise ValueError('Unknown data_format: ' + str(data_format))
class Interpolate(Layer):
def __init__(self, target_layer, data_format=None, **kwargs):
super(Interpolate, self).__init__(**kwargs)
self.target_layer = target_layer
self.target_shape = _collect_input_shape(target_layer)
# self.data_format = conv_utils.normalize_data_format(data_format)
self.data_format = K.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
height = self.target_shape[2]
width = self.target_shape[3]
return (input_shape[0],
input_shape[1],
height,
width)
elif self.data_format == 'channels_last':
height = self.target_shape[1]
width = self.target_shape[2]
return (input_shape[0],
height,
width,
input_shape[3])
def call(self, inputs, **kwargs):
return _resie_image(inputs, self.target_layer, self.target_shape, self.data_format)
def up_conv(input_tensor, filters):
x = Conv2D(filters[0], kernel_size=1)(input_tensor)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters[1], kernel_size=3, padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def conv_cls(input_tensor, num_class):
x = Conv2D(32, kernel_size=3, padding='same', activation='relu')(input_tensor)
x = Conv2D(32, kernel_size=3, padding='same', activation='relu')(x)
x = Conv2D(16, kernel_size=3, padding='same', activation='relu')(x)
x = Conv2D(16, kernel_size=1, padding='same', activation='relu')(x)
x = Conv2D(16, kernel_size=num_class, padding='same', activation='sigmoid')(x)
return x
def VGG16_UNet(weights=None, input_tensor=None, pooling=None):
vgg16 = VGG16(include_top=False, weights=weights, input_tensor=input_tensor, pooling=pooling)
source = vgg16.get_layer('block5_conv3').output
x = MaxPooling2D(3, strides=1, padding='same', name='block5_pool')(source)
x = Conv2D(1024, kernel_size=3, padding='same', dilation_rate=6)(x)
x = Conv2D(1024, kernel_size=1)(x)
x = Interpolate(target_layer=source, name='resize_1')(x)
x = concatenate([x, source])
x = up_conv(x, [512, 256])
source = vgg16.get_layer('block4_conv3').output
x = Interpolate(target_layer=source, name='resize_2')(x)
x = concatenate([x, source])
x = up_conv(x, [256, 128])
source = vgg16.get_layer('block3_conv3').output
x = Interpolate(target_layer=source, name='resize_3')(x)
x = concatenate([x, source])
x = up_conv(x, [128, 64])
source = vgg16.get_layer('block2_conv2').output
x = Interpolate(target_layer=source, name='resize_4')(x)
x = concatenate([x, source])
feature = up_conv(x, [64, 32])
x = conv_cls(feature, 2)
region_score = Lambda(lambda layer: layer[:, :, :, 0])(x)
affinity_score = Lambda(lambda layer: layer[:, :, :, 1])(x)
return region_score, affinity_score
# From https://github.com/RubanSeven/CRAFT_keras/blob/master/net/vgg16.py
class TestCRAFT(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(get_maximum_opset_supported() < 10,
"Need Upsample 10+ support.")
def test_CRAFT(self):
# input_image = Input(shape=(None, None, 3)) -- Need fixed input shape
input_image = Input(shape=(512, 512, 3))
region, affinity = VGG16_UNet(input_tensor=input_image, weights=None)
keras_model = Model(input_image, [region, affinity], name='vgg16_unet')
x = np.random.rand(1, 512, 512, 3).astype(np.float32)
expected = keras_model.predict(x)
onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, x, expected, self.model_files))
if __name__ == "__main__":
unittest.main()
| {
"alphanum_fraction": 0.6593245228,
"author": null,
"avg_line_length": 35.1030927835,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "bc886a9617dbc7d15f807bfb65a4004abdf51d0f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3b0ccecabdba66704aa7fd260d08f2a6dccfe3cb",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "szha/keras-onnx",
"max_forks_repo_path": "applications/nightly_build/test_craft.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3b0ccecabdba66704aa7fd260d08f2a6dccfe3cb",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "szha/keras-onnx",
"max_issues_repo_path": "applications/nightly_build/test_craft.py",
"max_line_length": 107,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3b0ccecabdba66704aa7fd260d08f2a6dccfe3cb",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "szha/keras-onnx",
"max_stars_repo_path": "applications/nightly_build/test_craft.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1704,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6810
} |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import torch
import numpy as np
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
def getWeightGrad(self, grad):
self.weight_grad.append(grad.to("cpu"))
def getInputGrad(self, grad):
self.input_grad.append(grad.to("cpu"))
class TestSlowConvDilated2dBackward(TestCase):
weight_grad = []
input_grad = []
def cpu_op_exec(self, input1, weight, bias1, stride=1, padding=0, dilation=2, groups=1):
weight1 = weight
input1.requires_grad = True
#input1.register_hook(lambda grad: self.getInputGrad(grad))
weight.requires_grad = True
#weight.register_hook(lambda grad: self.getWeightGrad(grad))
bias1.requires_grad = True
res_forward = torch.nn.functional.conv2d(input1, weight, bias1, stride, padding, dilation, groups)
grads = torch.ones_like(res_forward).float()
res_forward.backward(grads)
res_forward = res_forward.detach().numpy()
return res_forward, input1.grad, weight.grad
def npu_op_exec(self, input1, weight, bias1, stride=1, padding=0, dilation=2, groups=1):
weight1 = weight
input1.requires_grad = True
#input1.register_hook(lambda grad: self.getInputGrad(grad))
weight.requires_grad = True
#weight.register_hook(lambda grad: self.getWeightGrad(grad))
bias1 = bias1.to("npu")
bias1.requires_grad = True
res_forward =torch.nn.functional.conv2d(input1, weight, bias1, stride, padding, dilation, groups)
grads = torch.ones_like(res_forward).float()
res_forward.backward(grads)
grads = grads.to("npu")
res_forward = res_forward.to("cpu")
res_forward = res_forward.detach().numpy()
return res_forward, input1.grad.to("cpu"), weight.grad.to("cpu")
def test_slow_conv_dilated2d_backward_shape_format(self, device):
weight_grad = []
input_grad = []
shape_format = [
[np.float32, 0, (64, 1, 16, 14)],
[np.float32, 3, (256, 1, 8, 8)],
[np.float32, 4, (32, 1, 8, 8)],
[np.float32, 0, (10, 1, 16, 16)]
]
for item in shape_format:
self.weight_grad.clear()
self.input_grad.clear()
cpu_input1, npu_input1 = create_common_tensor(item, -2, 2)
cpu_weight, npu_weight = create_common_tensor([np.float32, 0, (3, 1, 2, 2)], -2, 2)
cpu_bias, npu_bias = create_common_tensor([np.float32, 0, (3)], 1, 100)
cpu_output, cpu_input_grad, cpu_weight_grad =self.cpu_op_exec(cpu_input1, cpu_weight, bias1=cpu_bias)
npu_output, npu_input_grad, npu_weight_grad = self.npu_op_exec(npu_input1, npu_weight, bias1=npu_bias)
self.assertRtolEqual(cpu_output, npu_output, 0.001)
self.assertRtolEqual(cpu_input_grad, npu_input_grad, 0.01)
self.assertRtolEqual(cpu_weight_grad, npu_weight_grad, 0.01)
instantiate_device_type_tests(TestSlowConvDilated2dBackward, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests() | {
"alphanum_fraction": 0.6523170423,
"author": null,
"avg_line_length": 40.7113402062,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "68688ae6faaaf28701889744bc013d134d7ce2e5",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "Ascend/pytorch",
"max_forks_repo_path": "test/test_npu/test_network_ops/test_slow_conv_dilated2d_backward.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc",
"max_issues_repo_issues_event_max_datetime": "2021-11-12T08:28:13.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-11-12T07:23:03.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "Ascend/pytorch",
"max_issues_repo_path": "test/test_npu/test_network_ops/test_slow_conv_dilated2d_backward.py",
"max_line_length": 114,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "Ascend/pytorch",
"max_stars_repo_path": "test/test_npu/test_network_ops/test_slow_conv_dilated2d_backward.py",
"max_stars_repo_stars_event_max_datetime": "2021-12-02T03:07:35.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-12-02T03:07:35.000Z",
"num_tokens": 966,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3949
} |
import logging
from absl import app
from absl import flags
import numpy as np
import torch
from bgrl import *
log = logging.getLogger(__name__)
FLAGS = flags.FLAGS
# Dataset.
flags.DEFINE_enum('dataset', 'coauthor-cs',
['amazon-computers', 'amazon-photos', 'coauthor-cs', 'coauthor-physics', 'wiki-cs'],
'Which graph dataset to use.')
flags.DEFINE_string('dataset_dir', './data', 'Where the dataset resides.')
# Architecture.
flags.DEFINE_multi_integer('graph_encoder_layer', None, 'Conv layer sizes.')
flags.DEFINE_string('ckpt_path', None, 'Path to checkpoint.')
def main(argv):
# use CUDA_VISIBLE_DEVICES to select gpu
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
log.info('Using {} for evaluation.'.format(device))
# load data
if FLAGS.dataset != 'wiki-cs':
dataset = get_dataset(FLAGS.dataset_dir, FLAGS.dataset)
else:
dataset, train_masks, val_masks, test_masks = get_wiki_cs(FLAGS.dataset_dir)
data = dataset[0] # all dataset include one graph
log.info('Dataset {}, {}.'.format(dataset.__class__.__name__, data))
data = data.to(device) # permanently move in gpy memory
# build networks
input_size, representation_size = data.x.size(1), FLAGS.graph_encoder_layer[-1]
encoder = GCN([input_size] + FLAGS.graph_encoder_layer, batchnorm=True) # 512, 256, 128
load_trained_encoder(encoder, FLAGS.ckpt_path, device)
encoder.eval()
# compute representations
representations, labels = compute_representations(encoder, dataset, device)
if FLAGS.dataset != 'wiki-cs':
score = fit_logistic_regression(representations.cpu().numpy(), labels.cpu().numpy())[0]
else:
scores = fit_logistic_regression_preset_splits(representations.cpu().numpy(), labels.cpu().numpy(),
train_masks, val_masks, test_masks)
score = np.mean(scores)
print('Test score: %.5f' %score)
if __name__ == "__main__":
log.info('PyTorch version: %s' % torch.__version__)
app.run(main)
| {
"alphanum_fraction": 0.6769157994,
"author": null,
"avg_line_length": 35.2333333333,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "07d87da2d021c610fc4c1a1187cf1d82112242be",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2022-03-16T06:22:05.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-02-08T13:29:09.000Z",
"max_forks_repo_head_hexsha": "c79ae37c228a5c3ee23c7cd8726fe6f5dd8c9675",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "devvrit/bgrl",
"max_forks_repo_path": "linear_eval_transductive.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "c79ae37c228a5c3ee23c7cd8726fe6f5dd8c9675",
"max_issues_repo_issues_event_max_datetime": "2021-11-08T19:52:04.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-11-08T19:52:04.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "devvrit/bgrl",
"max_issues_repo_path": "linear_eval_transductive.py",
"max_line_length": 107,
"max_stars_count": 27,
"max_stars_repo_head_hexsha": "c79ae37c228a5c3ee23c7cd8726fe6f5dd8c9675",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "devvrit/bgrl",
"max_stars_repo_path": "linear_eval_transductive.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-26T16:47:42.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-11-08T00:54:37.000Z",
"num_tokens": 494,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2114
} |
SUBROUTINE DIAGN(N,A,D,V,EPS)
* Diagonalization of a real symmetric NxN matrix
* Using the Jacobi method (from Numerical Recipes)
IMPLICIT NONE
INTEGER N,I,J,IP,IQ,NMAX
PARAMETER(NMAX=500)
DOUBLE PRECISION A(N,N),D(N),V(N,N),B(N),Z(N)
DOUBLE PRECISION EPS,SM,THR,G,H,C,S,T,THETA,TAU
DO IP=1,N
DO IQ=1,N
V(IP,IQ)=0d0
ENDDO
V(IP,IP)=1d0
B(IP)=A(IP,IP)
D(IP)=B(IP)
Z(IP)=0d0
ENDDO
DO I=1,NMAX
SM=0d0
DO IP=1,N-1
DO IQ=IP+1,N
SM=SM+DABS(A(IP,IQ))
ENDDO
ENDDO
IF(SM.LE.EPS)RETURN
IF(I.LT.4)THEN
THR=.2d0*SM/N**2
ELSE
THR=0d0
ENDIF
DO IP=1,N-1
DO IQ=IP+1,N
IF(DABS(A(IP,IQ)).LT.EPS*MIN(DABS(D(IP)),DABS(D(IQ)))
. .AND. I.GT.4)THEN
A(IP,IQ)=0d0
ELSEIF(DABS(A(IP,IQ)).GT.THR)THEN
H=D(IQ)-D(IP)
IF(DABS(A(IP,IQ)).LT.EPS*DABS(H))THEN
T=A(IP,IQ)/H
ELSE
THETA=.5d0*H/A(IP,IQ)
T=1d0/(DABS(THETA)+DSQRT(1d0+THETA**2))
IF(THETA.LT.0d0)T=-T
ENDIF
C=1d0/DSQRT(1d0+T**2)
S=T*C
TAU=S/(1d0+C)
H=T*A(IP,IQ)
Z(IP)=Z(IP)-H
Z(IQ)=Z(IQ)+H
D(IP)=D(IP)-H
D(IQ)=D(IQ)+H
A(IP,IQ)=0d0
DO J=1,IP-1
G=A(J,IP)
H=A(J,IQ)
A(J,IP)=G-S*(H+G*TAU)
A(J,IQ)=H+S*(G-H*TAU)
ENDDO
DO J=IP+1,IQ-1
G=A(IP,J)
H=A(J,IQ)
A(IP,J)=G-S*(H+G*TAU)
A(J,IQ)=H+S*(G-H*TAU)
ENDDO
DO J=IQ+1,N
G=A(IP,J)
H=A(IQ,J)
A(IP,J)=G-S*(H+G*TAU)
A(IQ,J)=H+S*(G-H*TAU)
ENDDO
DO J=1,N
G=V(J,IP)
H=V(J,IQ)
V(J,IP)=G-S*(H+G*TAU)
V(J,IQ)=H+S*(G-H*TAU)
ENDDO
ENDIF
ENDDO
ENDDO
DO IP=1,N
B(IP)=B(IP)+Z(IP)
D(IP)=B(IP)
Z(IP)=0d0
ENDDO
ENDDO
RETURN
END
SUBROUTINE SORTN(N,D,V)
* Reordering of the eigenvalues D(I), I=1..N
* and corresponding eigenvectors V(J,I), J=1..N in ascending order
* (from Numerical Recipes)
IMPLICIT NONE
INTEGER N,I,J,K
DOUBLE PRECISION D(N),V(N,N),P
DO I=1,N-1
K=I
P=D(I)
DO J=I+1,N
IF(D(J).LT.P)THEN
K=J
P=D(J)
ENDIF
ENDDO
IF(K.NE.I)THEN
D(K)=D(I)
D(I)=P
DO J=1,N
P=V(J,I)
V(J,I)=V(J,K)
V(J,K)=P
ENDDO
ENDIF
ENDDO
RETURN
END
SUBROUTINE SORTNA(N,D,V)
* Reordering of the absolute value of the eigenvalues D(I), I=1..N
* and corresponding eigenvectors V(J,I), J=1..N in ascending order
* (from Numerical Recipes)
IMPLICIT NONE
INTEGER N,I,J,K
DOUBLE PRECISION D(N),V(N,N),P
DO I=1,N-1
K=I
P=D(I)
DO J=I+1,N
IF(DABS(D(J)).LT.DABS(P))THEN
K=J
P=D(J)
ENDIF
ENDDO
IF(K.NE.I)THEN
D(K)=D(I)
D(I)=P
DO J=1,N
P=V(J,I)
V(J,I)=V(J,K)
V(J,K)=P
ENDDO
ENDIF
ENDDO
RETURN
END
DOUBLE PRECISION FUNCTION RUNM(Q,NF)
* Subroutine to calculate the quark running masses
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
IMPLICIT INTEGER (I-N)
PARAMETER (NN=6)
PARAMETER (ZETA3 = 1.202056903159594d0)
DIMENSION AM(NN),YMSB(NN)
COMMON/ALS/XLAMBDA,AMCA,AMBA,AMTA,N0A
COMMON/GAUGE/ALSMZ,ALEMMZ,GF,gg1,gg2,S2TW
COMMON/SMSPEC/AMS,AMC,AMB,AMBP,AMT,AMTAU,AMMUON,AMZ,AMW
B0(NF)= (33d0-2d0*NF)/12d0
B1(NF)= (102d0-38d0/3d0*NF)/16d0
B2(NF)= (2857d0/2d0-5033d0/18d0*NF+325d0/54d0*NF**2)/64d0
G0(NF)= 1d0
G1(NF)= (202d0/3d0-20d0/9d0*NF)/16d0
G2(NF)= (1249d0-(2216d0/27d0+160d0/3d0*ZETA3)*NF
. - 140d0/81d0*NF**2)/64d0
C1(NF)= G1(NF)/B0(NF) - B1(NF)*G0(NF)/B0(NF)**2
C2(NF)= ((G1(NF)/B0(NF) - B1(NF)*G0(NF)/B0(NF)**2)**2
. + G2(NF)/B0(NF) + B1(NF)**2*G0(NF)/B0(NF)**3
. - B1(NF)*G1(NF)/B0(NF)**2 - B2(NF)*G0(NF)/B0(NF)**2)/2d0
TRAN(X,XK)= 1d0+4d0/3d0*ALPHAS(X,2)/PI+XK*(ALPHAS(X,2)/PI)**2
CQ(X,NF)= (2d0*B0(NF)*X)**(G0(NF)/B0(NF))
. * (1d0+C1(NF)*X+C2(NF)*X**2)
PI= 4d0*DATAN(1d0)
ACC= 1d-8
AM(1)= 0
AM(2)= 0
AM(3)= AMS
AM(4)= AMC
AM(5)= AMBP
AM(6)= AMT
XK= 16.11d0
DO 1 I=1,NF-1
XK= XK - 1.04d0*(1d0-AM(I)/AM(NF))
1 CONTINUE
IF(NF.GE.4)THEN
XMSB= AM(NF)/TRAN(AM(NF),0d0)
XMHAT= XMSB/CQ(ALPHAS(AM(NF),2)/PI,NF)
ELSE
XMSB= 0
XMHAT= 0
ENDIF
YMSB(3)= AMS
IF(NF.EQ.3)THEN
YMSB(4)= YMSB(3)*CQ(ALPHAS(AM(4),2)/PI,3)/
. CQ(ALPHAS(1d0,2)/PI,3)
YMSB(5)= YMSB(4)*CQ(ALPHAS(AM(5),2)/PI,4)/
. CQ(ALPHAS(AM(4),2)/PI,4)
YMSB(6)= YMSB(5)*CQ(ALPHAS(AM(6),2)/PI,5)/
. CQ(ALPHAS(AM(5),2)/PI,5)
ELSEIF(NF.EQ.4)THEN
YMSB(4)= XMSB
YMSB(5)= YMSB(4)*CQ(ALPHAS(AM(5),2)/PI,4)/
. CQ(ALPHAS(AM(4),2)/PI,4)
YMSB(6)= YMSB(5)*CQ(ALPHAS(AM(6),2)/PI,5)/
. CQ(ALPHAS(AM(5),2)/PI,5)
ELSEIF(NF.EQ.5)THEN
YMSB(5)= XMSB
YMSB(4)= YMSB(5)*CQ(ALPHAS(AM(4),2)/PI,4)/
. CQ(ALPHAS(AM(5),2)/PI,4)
YMSB(6)= YMSB(5)*CQ(ALPHAS(AM(6),2)/PI,5)/
. CQ(ALPHAS(AM(5),2)/PI,5)
ELSEIF(NF.EQ.6)THEN
YMSB(6)= XMSB
YMSB(5)= YMSB(6)*CQ(ALPHAS(AM(5),2)/PI,5)/
. CQ(ALPHAS(AM(6),2)/PI,5)
YMSB(4)= YMSB(5)*CQ(ALPHAS(AM(4),2)/PI,4)/
. CQ(ALPHAS(AM(5),2)/PI,4)
ENDIF
IF(Q.LT.AMC)THEN
N0= 3
Q0= 1d0
ELSEIF(Q.LE.AMBP)THEN
N0= 4
Q0= AMC
ELSEIF(Q.LE.AMT)THEN
N0= 5
Q0= AMBP
ELSE
N0= 6
Q0= AMT
ENDIF
IF(NF.GT.3)THEN
XKFAC= TRAN(AM(NF),0d0)/TRAN(AM(NF),XK)
ELSE
XKFAC= 1d0
ENDIF
RUNM= YMSB(N0)*CQ(ALPHAS(Q,2)/PI,N0)/
. CQ(ALPHAS(Q0,2)/PI,N0)
. * XKFAC
RETURN
END
* Running alpha_s and aux. subroutines/functions as in HDECAY
DOUBLE PRECISION FUNCTION ALPHAS(Q,N)
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
IMPLICIT INTEGER (I-N)
DIMENSION XLB(6)
COMMON/ALSLAM/XLB1(6),XLB2(6)
COMMON/ALS/XLAMBDA,AMC,AMBP,AMT,N0
B0(NF)=33d0-2d0*NF
B1(NF)=6d0*(153d0-19d0*NF)/B0(NF)**2
ALS1(NF,X)=12d0*PI/(B0(NF)*DLOG(X**2/XLB(NF)**2))
ALS2(NF,X)=12d0*PI/(B0(NF)*DLOG(X**2/XLB(NF)**2))
. *(1d0-B1(NF)*DLOG(DLOG(X**2/XLB(NF)**2))
. /DLOG(X**2/XLB(NF)**2))
PI=4d0*DATAN(1d0)
IF(N.EQ.1)THEN
DO 1 I=1,6
XLB(I)=XLB1(I)
1 CONTINUE
ELSE
DO 2 I=1,6
XLB(I)=XLB2(I)
2 CONTINUE
ENDIF
IF(Q.LT.AMC)THEN
NF=3
ELSEIF(Q.LE.AMBP)THEN
NF=4
ELSEIF(Q.LE.AMT)THEN
NF=5
ELSE
NF=6
ENDIF
IF(N.EQ.1)THEN
ALPHAS=ALS1(NF,Q)
ELSE
ALPHAS=ALS2(NF,Q)
ENDIF
RETURN
END
SUBROUTINE ALSINI(ACC)
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
IMPLICIT INTEGER (I-N)
DIMENSION XLB(6)
COMMON/ALSLAM/XLB1(6),XLB2(6)
COMMON/ALS/XLAMBDA,AMC,AMBP,AMT,N0
PI=4d0*DATAN(1d0)
XLB1(1)=0d0
XLB1(2)=0d0
XLB2(1)=0d0
XLB2(2)=0d0
IF(N0.EQ.3)THEN
XLB(3)=XLAMBDA
XLB(4)=XLB(3)*(XLB(3)/AMC)**(2d0/25d0)
XLB(5)=XLB(4)*(XLB(4)/AMBP)**(2d0/23d0)
XLB(6)=XLB(5)*(XLB(5)/AMT)**(2d0/21d0)
ELSEIF(N0.EQ.4)THEN
XLB(4)=XLAMBDA
XLB(5)=XLB(4)*(XLB(4)/AMBP)**(2d0/23d0)
XLB(3)=XLB(4)*(XLB(4)/AMC)**(-2d0/27d0)
XLB(6)=XLB(5)*(XLB(5)/AMT)**(2d0/21d0)
ELSEIF(N0.EQ.5)THEN
XLB(5)=XLAMBDA
XLB(4)=XLB(5)*(XLB(5)/AMBP)**(-2d0/25d0)
XLB(3)=XLB(4)*(XLB(4)/AMC)**(-2d0/27d0)
XLB(6)=XLB(5)*(XLB(5)/AMT)**(2d0/21d0)
ELSEIF(N0.EQ.6)THEN
XLB(6)=XLAMBDA
XLB(5)=XLB(6)*(XLB(6)/AMT)**(-2d0/23d0)
XLB(4)=XLB(5)*(XLB(5)/AMBP)**(-2d0/25d0)
XLB(3)=XLB(4)*(XLB(4)/AMC)**(-2d0/27d0)
ENDIF
DO 1 I=1,6
XLB1(I)=XLB(I)
1 CONTINUE
IF(N0.EQ.3)THEN
XLB(3)=XLAMBDA
XLB(4)=XLB(3)*(XLB(3)/AMC)**(2d0/25d0)
. *(2d0*DLOG(AMC/XLB(3)))**(-107d0/1875d0)
XLB(4)=XITER(AMC,XLB(3),3,XLB(4),4,ACC)
XLB(5)=XLB(4)*(XLB(4)/AMBP)**(2d0/23d0)
. *(2d0*DLOG(AMBP/XLB(4)))**(-963d0/13225d0)
XLB(5)=XITER(AMBP,XLB(4),4,XLB(5),5,ACC)
XLB(6)=XLB(5)*(XLB(5)/AMT)**(2d0/21d0)
. *(2d0*DLOG(AMT/XLB(5)))**(-321d0/3381d0)
XLB(6)=XITER(AMT,XLB(5),5,XLB(6),6,ACC)
ELSEIF(N0.EQ.4)THEN
XLB(4)=XLAMBDA
XLB(5)=XLB(4)*(XLB(4)/AMBP)**(2d0/23d0)
. *(2d0*DLOG(AMBP/XLB(4)))**(-963d0/13225d0)
XLB(5)=XITER(AMBP,XLB(4),4,XLB(5),5,ACC)
XLB(3)=XLB(4)*(XLB(4)/AMC)**(-2d0/27d0)
. *(2d0*DLOG(AMC/XLB(4)))**(107d0/2025d0)
XLB(3)=XITER(AMC,XLB(4),4,XLB(3),3,ACC)
XLB(6)=XLB(5)*(XLB(5)/AMT)**(2d0/21d0)
. *(2d0*DLOG(AMT/XLB(5)))**(-321d0/3381d0)
XLB(6)=XITER(AMT,XLB(5),5,XLB(6),6,ACC)
ELSEIF(N0.EQ.5)THEN
XLB(5)=XLAMBDA
XLB(4)=XLB(5)*(XLB(5)/AMBP)**(-2d0/25d0)
. *(2d0*DLOG(AMBP/XLB(5)))**(963d0/14375d0)
XLB(4)=XITER(AMBP,XLB(5),5,XLB(4),4,ACC)
XLB(3)=XLB(4)*(XLB(4)/AMC)**(-2d0/27d0)
. *(2d0*DLOG(AMC/XLB(4)))**(107d0/2025d0)
XLB(3)=XITER(AMC,XLB(4),4,XLB(3),3,ACC)
XLB(6)=XLB(5)*(XLB(5)/AMT)**(2d0/21d0)
. *(2d0*DLOG(AMT/XLB(5)))**(-321d0/3381d0)
XLB(6)=XITER(AMT,XLB(5),5,XLB(6),6,ACC)
ELSEIF(N0.EQ.6)THEN
XLB(6)=XLAMBDA
XLB(5)=XLB(6)*(XLB(6)/AMT)**(-2d0/23d0)
. *(2d0*DLOG(AMT/XLB(6)))**(321d0/3703d0)
XLB(5)=XITER(AMT,XLB(6),6,XLB(5),5,ACC)
XLB(4)=XLB(5)*(XLB(5)/AMBP)**(-2d0/25d0)
. *(2d0*DLOG(AMBP/XLB(5)))**(963d0/14375d0)
XLB(4)=XITER(AMBP,XLB(5),5,XLB(4),4,ACC)
XLB(3)=XLB(4)*(XLB(4)/AMC)**(-2d0/27d0)
. *(2d0*DLOG(AMC/XLB(4)))**(107d0/2025d0)
XLB(3)=XITER(AMC,XLB(4),4,XLB(3),3,ACC)
ENDIF
DO 2 I=1,6
XLB2(I)=XLB(I)
2 CONTINUE
RETURN
END
DOUBLE PRECISION FUNCTION XITER(Q,XLB1,NF1,XLB,NF2,ACC)
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
IMPLICIT INTEGER (I-N)
B0(NF)=33d0-2d0*NF
B1(NF)=6d0*(153d0-19d0*NF)/B0(NF)**2
ALS2(NF,X,XLB)=12d0*PI/(B0(NF)*DLOG(X**2/XLB**2))
. *(1d0-B1(NF)*DLOG(DLOG(X**2/XLB**2))
. /DLOG(X**2/XLB**2))
AA(NF)=12d0*PI/B0(NF)
BB(NF)=B1(NF)/AA(NF)
XIT(A,B,X)=A/2d0*(1d0+DSQRT(1d0-4d0*B*DLOG(X)))
PI=4d0*DATAN(1d0)
XLB2=XLB
II=0
1 II=II+1
X=DLOG(Q**2/XLB2**2)
ALP=ALS2(NF1,Q,XLB1)
A=AA(NF2)/ALP
B=BB(NF2)*ALP
XX=XIT(A,B,X)
XLB2=Q*DEXP(-XX/2d0)
Y1=ALS2(NF1,Q,XLB1)
Y2=ALS2(NF2,Q,XLB2)
DY=DABS(Y2-Y1)/Y1
IF(DY.GE.ACC) GOTO 1
XITER=XLB2
RETURN
END
DOUBLE PRECISION FUNCTION XITLA(NO,ALP,ACC)
* Iteration routine to determine improved Lambda's
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
IMPLICIT INTEGER (I-N)
COMMON/GAUGE/ALSMZ,ALEMMZ,GF,gg1,gg2,S2TW
COMMON/SMSPEC/AMS,AMC,AMB,AMBP,AMT,AMTAU,AMMUON,AMZ,AMW
B0(NF)=33d0-2d0*NF
B1(NF)=6d0*(153d0-19d0*NF)/B0(NF)**2
ALS2(NF,X,XLB)=12d0*PI/(B0(NF)*DLOG(X**2/XLB**2))
. *(1d0-B1(NF)*DLOG(DLOG(X**2/XLB**2))
. /DLOG(X**2/XLB**2))
AA(NF)=12d0*PI/B0(NF)
BB(NF)=B1(NF)/AA(NF)
XIT(A,B,X)=A/2d0*(1d0+DSQRT(1d0-4d0*B*DLOG(X)))
PI=4d0*DATAN(1d0)
NF=5
Q=AMZ
XLB=Q*DEXP(-AA(NF)/ALP/2d0)
IF(NO.EQ.1)GOTO 111
II=0
1 II=II+1
X=DLOG(Q**2/XLB**2)
A=AA(NF)/ALP
B=BB(NF)*ALP
XX=XIT(A,B,X)
XLB=Q*DEXP(-XX/2d0)
Y1=ALP
Y2=ALS2(NF,Q,XLB)
DY=DABS(Y2-Y1)/Y1
IF(DY.GE.ACC) GOTO 1
111 XITLA=XLB
RETURN
END
DOUBLE PRECISION FUNCTION FINT(Z,XX,YY)
* One-dimensional cubic interpolation
* Z = wanted point
* XX = array of 4 discrete x-values around Z
* YY = array of 4 discrete function-values around Z
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
DIMENSION XX(4),YY(4)
X = DLOG(Z)
X0=DLOG(XX(1))
X1=DLOG(XX(2))
X2=DLOG(XX(3))
X3=DLOG(XX(4))
Y0=DLOG(YY(1))
Y1=DLOG(YY(2))
Y2=DLOG(YY(3))
Y3=DLOG(YY(4))
A0=(X-X1)*(X-X2)*(X-X3)/(X0-X1)/(X0-X2)/(X0-X3)
A1=(X-X0)*(X-X2)*(X-X3)/(X1-X0)/(X1-X2)/(X1-X3)
A2=(X-X0)*(X-X1)*(X-X3)/(X2-X0)/(X2-X1)/(X2-X3)
A3=(X-X0)*(X-X1)*(X-X2)/(X3-X0)/(X3-X1)/(X3-X2)
FINT=DEXP(A0*Y0+A1*Y1+A2*Y2+A3*Y3)
RETURN
END
* Spence function and auxiliary functions as in HDECAY
DOUBLE PRECISION FUNCTION SP(X)
* REAL dilogarithm (Spence-function)
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
DOUBLE COMPLEX CX,LI2
CX = DCMPLX(X,0d0)
SP = DREAL(LI2(CX))
RETURN
END
DOUBLE COMPLEX FUNCTION LI2(X)
* COMPLEX dilogarithm (Spence-function)
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
IMPLICIT INTEGER (I-N)
DOUBLE COMPLEX X,Y,CLI2
COMMON/CONST/ZETA2,ZETA3
ZERO=1d-16
XR=DREAL(X)
XI=DIMAG(X)
R2=XR*XR+XI*XI
LI2=0
IF(R2.LE.ZERO)THEN
LI2=X
RETURN
ENDIF
RR=XR/R2
IF(R2.EQ.1d0.AND.XI.EQ.0d0)THEN
IF(XR.EQ.1d0)THEN
LI2=DCMPLX(ZETA2)
ELSE
LI2=-DCMPLX(ZETA2/2d0)
ENDIF
RETURN
ELSEIF(R2.GT.1d0.AND.RR.GT.0.5d0)THEN
Y=(X-1d0)/X
LI2=CLI2(Y)+ZETA2-CDLOG(X)*CDLOG(1d0-X)+0.5d0*CDLOG(X)**2
RETURN
ELSEIF(R2.GT.1d0.AND.RR.LE.0.5d0)THEN
Y=1d0/X
LI2=-CLI2(Y)-ZETA2-0.5d0*CDLOG(-X)**2
RETURN
ELSEIF(R2.LE.1d0.AND.XR.GT.0.5d0)THEN
Y=1d0-X
LI2=-CLI2(Y)+ZETA2-CDLOG(X)*CDLOG(1d0-X)
RETURN
ELSEIF(R2.LE.1d0.AND.XR.LE.0.5d0)THEN
Y=X
LI2=CLI2(Y)
RETURN
ENDIF
END
DOUBLE COMPLEX FUNCTION CLI2(X)
* Taylor-expansion for complex dilogarithm (Spence-function)
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
IMPLICIT INTEGER (I-N)
DOUBLE COMPLEX X,Z
COMMON/BERNOULLI/B2(18),B12(18),B3(18)
COMMON/POLY/NBER
N=NBER-1
Z=-CDLOG(1d0-X)
CLI2=B2(NBER)
DO 111 I=N,1,-1
CLI2=Z*CLI2+B2(I)
111 CONTINUE
CLI2=Z**2*CLI2+Z
RETURN
END
DOUBLE PRECISION FUNCTION FACULT(N)
* DOUBLE PRECISION version of FACULTY
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
IMPLICIT INTEGER (I-N)
FACULT=1d0
IF(N.EQ.0)RETURN
DO 999 I=1,N
FACULT=FACULT*DFLOAT(I)
999 CONTINUE
RETURN
END
SUBROUTINE BERNINI(N)
* Initialization of coefficients for polylogarithms
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
IMPLICIT INTEGER (I-N)
DIMENSION B(18),PB(19)
COMMON/BERNOULLI/B2(18),B12(18),B3(18)
COMMON/CONST/ZETA2,ZETA3
COMMON/POLY/NBER
NBER=N
PI=4d0*DATAN(1d0)
B(1)=-1d0/2d0
B(2)=1d0/6d0
B(3)=0d0
B(4)=-1d0/30d0
B(5)=0d0
B(6)=1d0/42d0
B(7)=0d0
B(8)=-1d0/30d0
B(9)=0d0
B(10)=5d0/66d0
B(11)=0d0
B(12)=-691d0/2730d0
B(13)=0d0
B(14)=7d0/6d0
B(15)=0d0
B(16)=-3617d0/510d0
B(17)=0d0
B(18)=43867d0/798d0
ZETA2=PI**2/6d0
ZETA3=1.202056903159594d0
DO 995 I=1,18
B2(I)=B(I)/FACULT(I+1)
B12(I)=DFLOAT(I+1)/FACULT(I+2)*B(I)/2d0
PB(I+1)=B(I)
B3(I)=0d0
995 CONTINUE
PB(1)=1d0
DO 996 I=1,18
DO 996 J=0,I
B3(I)=B3(I)+PB(J+1)*PB(I-J+1)/FACULT(I-J)/FACULT(J+1)
. /DFLOAT(I+1)
996 CONTINUE
RETURN
END
*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
* Passarino-Veltman one- and two-points functions A0, B0 and B1
* orig from LoopTools, http://www.feynarts.de/looptools/
* taken from Suspect2.3, modified by S. Kraml, 7 March 2005
*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
double precision function NMA0(m2,q)
implicit none
double precision m2,q
if(m2.ne.0d0) then
NMA0 = m2 * (1d0-dlog( m2/q ))
else
NMA0 = 0d0
endif
end
double precision function NMB0(p,m1,m2,q)
* note: all input is quadratical: p=p^2, m1=m1^2, m2=m2^2, q=q^2
implicit none
double precision p, m1, m2
double precision mudim2, divergence, lambda2, q
double precision acc, eps, minacc
double complex x1, x2, y1, y2, r, be0
double complex Ieps, onePeps, oneMeps
COMMON/cutoff/mudim2, divergence, lambda2
parameter (acc = 1d-12)
parameter (eps = 1d-20)
parameter (Ieps = (0d0,1d0)*eps)
parameter (onePeps = 1d0 + Ieps)
parameter (oneMeps = 1d0 - Ieps)
double complex fpv, xlogx
external fpv, xlogx
divergence = 0d0
lambda2 = 0d0
mudim2 = q
minacc = acc*(m1 + m2)
* general case
if(abs(p) .gt. minacc) then
CALL roots(p, m1, m2, x1, x2, y1, y2, r)
if(abs(y1) .gt. .5d0 .and. abs(y2) .gt. .5d0) then
be0 = -log(m2/mudim2) -
+ fpv(1, x1, y1) - fpv(1, x2, y2)
else if(abs(x1) .lt. 10d0 .and. abs(x2) .lt. 10d0) then
be0 = 2 - log(p*oneMeps/mudim2) +
+ xlogx(-x1) + xlogx(-x2) - xlogx(y1) - xlogx(y2)
else if(abs(x1) .gt. .5d0 .and. abs(x2) .gt. .5d0) then
be0 = -log(m1/mudim2) -
+ fpv(1, y1, x1) - fpv(1, y2, x2)
else
be0 = 1d100
endif
* zero momentum
else if(abs(m1 - m2) .gt. minacc) then
x2 = oneMeps*m1/(m1 - m2)
y2 = oneMeps*m2/(m2 - m1)
if(abs(y2) .gt. .5d0) then
be0 = -log(m2/mudim2) - fpv(1, x2, y2)
else
be0 = -log(m1/mudim2) - fpv(1, y2, x2)
endif
else
be0 = -log(m2/mudim2)
endif
NMB0 = dble(be0 + divergence)
end
double precision function NMB1(s,mi,mj,q)
* note: all input is quadratical: s=p^2, mi=m1^2, mj=m2^2, q=q^2
implicit none
double precision s,mi,mj,NMB0,NMA0,q
if(mi.eq.mj) then
NMB1 = NMB0(s,mi,mj,q)/2d0
else
NMB1= (NMA0(mj,q) - NMA0(mi,q)
. + (s+mi-mj)*NMB0(s,mi,mj,q))/(2d0*s)
endif
end
*---------------------------------------------------------------------
* auxiliary functions used by the B0,B1 two-point functions
* from Looptools http://www.feynarts.de/looptools/
*---------------------------------------------------------------------
subroutine roots(p, m1, m2, x1, x2, y1, y2, r)
implicit none
double precision p, m1, m2
double complex x1, x2, y1, y2, r
double precision mudim2, divergence, lambda2
COMMON/cutoff/mudim2, divergence, lambda2
double precision acc, eps
double complex Ieps, onePeps, oneMeps
parameter (acc = 1d-12)
parameter (eps = 1d-20)
parameter (Ieps = (0d0,1d0)*eps)
parameter (onePeps = 1d0 + Ieps)
parameter (oneMeps = 1d0 - Ieps)
double precision q
r = sqrt(dcmplx(p*(p - 2*(m1 + m2)) + (m1 - m2)**2))
q = p + m1 - m2
x1 = (q + r)/2d0/p
x2 = (q - r)/2d0/p
if(abs(x2) .gt. abs(x1)) then
x1 = m1/p/x2
else if(abs(x1) .gt. abs(x2)) then
x2 = m1/p/x1
endif
x1 = x1 + abs(p*x1)/p*Ieps
x2 = x2 - abs(p*x2)/p*Ieps
q = p - m1 + m2
y2 = (q + r)/2d0/p
y1 = (q - r)/2d0/p
if(abs(y2) .gt. abs(y1)) then
y1 = m2/p/y2
else if(abs(y1) .gt. abs(y2)) then
y2 = m2/p/y1
endif
y1 = y1 - abs(p*y1)/p*Ieps
y2 = y2 + abs(p*y2)/p*Ieps
end
double complex function fpv(n, x, y)
implicit none
integer n
double complex x, y
double precision mudim2, divergence, lambda2
COMMON/cutoff/mudim2, divergence, lambda2
double precision acc, eps
double complex Ieps, onePeps, oneMeps
parameter (acc = 1d-12)
parameter (eps = 1d-20)
parameter (Ieps = (0,1)*eps)
parameter (onePeps = 1 + Ieps)
parameter (oneMeps = 1 - Ieps)
integer m
double complex xm
if(abs(x) .lt. 10d0) then
if(n .eq. 0) then
fpv = -log(-y/x)
else if(abs(x) .lt. acc) then
fpv = -1d0/n
else
fpv = 0
xm = 1
do m = 0, n - 1
fpv = fpv - xm/(n - m)
xm = xm*x
enddo
fpv = fpv - xm*log(-y/x)
endif
else
fpv = 0
xm = 1
do m = 1, 30
xm = xm/x
fpv = fpv + xm/(m + n)
if(abs(xm/fpv) .lt. acc**2) return
enddo
endif
end
double complex function yfpv(n, x, y)
implicit none
integer n
double complex x, y
double complex fpv
external fpv
if(abs(y) .eq. 0d0) then
yfpv = 0
else
yfpv = y*fpv(n, x, y)
endif
end
double complex function xlogx(x)
implicit none
double complex x
if(abs(x) .eq. 0d0) then
xlogx = 0
else
xlogx = x*log(x)
endif
end
DOUBLE PRECISION FUNCTION RUNMB(Q)
* Subroutine to calculate the running b quark mass for Q > MB
IMPLICIT NONE
DOUBLE PRECISION Q
DOUBLE PRECISION PI,ALPHAS,ALMB,ALMT,ALQ,U5MTMB,U6QMT
DOUBLE PRECISION MS,MC,MB,MBP,MT,MTAU,MMUON,MZ,MW
COMMON/SMSPEC/MS,MC,MB,MBP,MT,MTAU,MMUON,MZ,MW
PI=4d0*DATAN(1d0)
ALQ=ALPHAS(Q,2)
ALMB=ALPHAS(MB,2)
IF(Q.LE.MT) THEN
RUNMB=MB*(ALQ/ALMB)**(12d0/23d0)*(1d0+7462d0*(ALQ-ALMB)/
. (4d0*PI*1587d0))
ELSE
ALMT=ALPHAS(MT,2)
U5MTMB=(ALMT/ALMB)**(12d0/23d0)*(1d0+7462d0*(ALMT-ALMB)/
. (4d0*PI*1587d0))
U6QMT=(ALQ/ALMT)**(4d0/7d0)*(1d0+7398d0*(ALQ-ALMT)/
. (4d0*PI*1323d0))
RUNMB=MB*U6QMT*U5MTMB
ENDIF
END
DOUBLE PRECISION FUNCTION RAN2(IDUM)
IMPLICIT NONE
INTEGER IDUM
DOUBLE PRECISION DA,DB,DC
PARAMETER(DA=16807d0,DB=2147483647d0,DC=2147483648d0)
IDUM=INT(ABS(MOD(DA*IDUM,DB)+0.5d0))
RAN2=DFLOAT(IDUM)/DC
END
DOUBLE PRECISION FUNCTION GAU(IDUM)
IMPLICIT NONE
INTEGER IDUM,ISET
DOUBLE PRECISION F,SET,R,V1,V2,RAN2
SAVE ISET,SET
DATA ISET/0/
IF(ISET.EQ.0)THEN
5 V1=2d0*RAN2(IDUM)-1d0
V2=2d0*RAN2(IDUM)-1d0
R=V1**2+V2**2
IF(R.GE.1d0.OR.R.EQ.0d0)GOTO 5
F=DSQRT(-2d0*DLOG(R)/R)
SET=V1*F
GAU=V2*F
ISET=1
ELSE
GAU=SET
ISET=0
ENDIF
END
DOUBLE PRECISION FUNCTION CLEOTAU(MX)
* CLEO constraints on BR(Y -> A gamma)*BR(A -> tau tau)
IMPLICIT NONE
INTEGER I,N
PARAMETER (N=17)
DOUBLE PRECISION MX,X(N),M(N)
DATA M/3.75d0,4.25d0,4.75d0,5.1d0,5.8d0,6.15d0,6.6d0,7d0,
. 7.4d0,7.6d0,8d0,8.25d0,8.6d0,9d0,9.25d0,9.35d0,9.41d0/
DATA X/2.9d-5,2.5d-5,2d-5,2.3d-5,5.1d-5,2.5d-5,2.5d-5,2.7d-5,
. 4.5d-5,3.7d-5,2.7d-5,7.2d-5,6.8d-5,8.6d-5,2.1d-4,2.85d-4,
. 4.75d-4/
CLEOTAU=0d0
IF(MX.LT.M(1).OR.MX.GT.M(N))RETURN
DO I=2,N
IF(MX.LT.M(I))THEN
CLEOTAU=X(I-1)+(MX-M(I-1))/(M(I)-M(I-1))*(X(I)-X(I-1))
RETURN
ENDIF
ENDDO
END
DOUBLE PRECISION FUNCTION CLEOMU(MX)
* CLEO constraints on BR(Y -> A gamma)*BR(A -> mu mu)
IMPLICIT NONE
INTEGER I,N
PARAMETER (N=2)
DOUBLE PRECISION MX,X(N),M(N)
DATA M/.25d0,3.75d0/
DATA X/9d-6,9d-6/
CLEOMU=0d0
IF(MX.LT.M(1).OR.MX.GT.M(N))RETURN
DO I=2,N
IF(MX.LT.M(I))THEN
CLEOMU=X(I-1)+(MX-M(I-1))/(M(I)-M(I-1))*(X(I)-X(I-1))
RETURN
ENDIF
ENDDO
END
DOUBLE PRECISION FUNCTION DEV(N,NN)
IMPLICIT NONE
DOUBLE PRECISION N,NN
IF(N.EQ.NN)THEN
DEV=0d0
ELSE
DEV=(NN-N)/(DABS(N)+DABS(NN))
ENDIF
END
DOUBLE PRECISION FUNCTION DDSIN(X)
IMPLICIT NONE
DOUBLE PRECISION X,D
D=DSIN(X)
IF(DABS(D).LT.1d-15)THEN
DDSIN=0d0
ELSE
DDSIN=D
ENDIF
END
DOUBLE PRECISION FUNCTION DDCOS(X)
IMPLICIT NONE
DOUBLE PRECISION X,D
D=DCOS(X)
IF(DABS(D).LT.1d-15)THEN
DDCOS=0d0
ELSE
DDCOS=D
ENDIF
END
| {
"alphanum_fraction": 0.501404269,
"author": null,
"avg_line_length": 23.6470588235,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "96d99188be206cbbf9b4c281e9d51a2655da3ed7",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-01-15T12:22:30.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-01-15T12:22:30.000Z",
"max_forks_repo_head_hexsha": "2d78a85413cc85789cc4fee8ec991eb2a0563ef8",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "yuanfangtardis/vscode_project",
"max_forks_repo_path": "Externals/micromegas_4.3.5/Packages/NMSSMTools_5.1.1/sources/subfun.f",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2d78a85413cc85789cc4fee8ec991eb2a0563ef8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "yuanfangtardis/vscode_project",
"max_issues_repo_path": "Externals/micromegas_4.3.5/Packages/NMSSMTools_5.1.1/sources/subfun.f",
"max_line_length": 72,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2d78a85413cc85789cc4fee8ec991eb2a0563ef8",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "yuanfangtardis/vscode_project",
"max_stars_repo_path": "Externals/micromegas_4.3.5/Packages/NMSSMTools_5.1.1/sources/subfun.f",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 10761,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 24924
} |
# confusion matrix is set up correctly
test_that("Included and own confusion matrix give identical results", {
data(iris)
tmp <- ranger::ranger(Species ~., data = iris)
confusion_matrix <- table(true = iris$Species,
predicted = tmp$predictions)
expect_equal(confusion_matrix, tmp$confusion.matrix)
})
# ranger_classification
test_that("Length of output equals input times classes", {
test_grid$Target <- as.character(test_grid$Target)
classes <- length(levels(augmented_input_multi[[1]][["train_set"]][[unique(test_grid$Target)]]))
results <- purrr::pmap(cbind(test_grid, .row = rownames(test_grid)), master_grid = test_grid,
phyloseq2ML::ranger_classification, the_list = augmented_input_multi)
expect_equal(nrow(results[[1]]), classes / length(test_grid$Target)* nrow(test_grid))
})
test_that("Number of cases per class corresponds to TP + FN for each class, also
Number of Samples fits number of predicted cases", {
trues <- table(augmented_input_multi[[test_grid$ML_object[1]]][["train_set"]][[test_grid$Target[1]]])
test_grid$results <- purrr::pmap(cbind(test_grid, .row = rownames(test_grid)),
ranger_classification, the_list = augmented_input_multi, master_grid = test_grid)
results_df <- as.data.frame(tidyr::unnest(test_grid, results))
result_sub <- subset(results_df, ML_object == test_grid$ML_object[1])
predicteds <- result_sub[["True_positive"]] + result_sub[["False_negative"]]
expect_true(all(result_sub[["Number_of_samples"]] == result_sub[["True_positive"]] + result_sub[["False_negative"]] +
result_sub[["False_positive"]] + result_sub[["True_negative"]]))
expect_true(all(predicteds %in% trues))
})
test_that("Detect 2 classes for binary_classification", {
test_grid$Target <- as.character(test_grid$Target)
classes <- length(levels(augmented_input_binary[[1]][["train_set"]][[unique(test_grid$Target)]]))
results <- purrr::pmap(cbind(test_grid, .row = rownames(test_grid)), master_grid = test_grid,
phyloseq2ML::ranger_classification, the_list = augmented_input_binary)
expect_equal(length(unique(results[[1]]$Class)), classes, 2)
})
test_that("Detect correct number of classes for multi_classification", {
test_grid$Target <- as.character(test_grid$Target)
classes <- length(levels(augmented_input_multi[[1]][["train_set"]][[unique(test_grid$Target)]]))
results <- purrr::pmap(cbind(test_grid, .row = rownames(test_grid)), master_grid = test_grid,
phyloseq2ML::ranger_classification, the_list = augmented_input_multi)
expect_equal(length(unique(results[[1]]$Class)), classes)
})
test_that("Breaks if ML_object names in master_grid do not match list item names", {
expect_error(purrr::pmap(cbind(test_grid, .row = rownames(test_grid)), master_grid = test_grid,
phyloseq2ML::ranger_classification, the_list = splitted_input_multi)
)
})
test_that("Breaks if master_grid$Target is not character", {
test_grid$Target <- as.factor(test_grid$Target)
expect_error(purrr::pmap(cbind(test_grid, .row = rownames(test_grid)), master_grid = test_grid,
phyloseq2ML::ranger_classification, the_list = augmented_input_multi)
)
})
test_that("Breaks if master_grid does not contain required columns", {
expect_error(purrr::pmap(cbind(parameter_df, .row = rownames(parameter_df)),
master_grid = parameter_df, phyloseq2ML::ranger_classification, the_list =
augmented_input_multi)
)
})
test_that("Breaks if the input list is not correct", {
expect_error(purrr::pmap(cbind(test_grid, .row = rownames(test_grid)),
master_grid = test_grid, phyloseq2ML::ranger_classification, the_list =
test_grid)
)
})
test_that("Breaks if the actual response variable is not a factor is not correct", {
expect_error(purrr::pmap(cbind(test_grid, .row = rownames(test_grid)),
master_grid = test_grid, phyloseq2ML::ranger_classification, the_list =
augmented_regression)
)
})
# store_classification
test_that("Breaks if no test_set is provided", {
data(iris)
tmp <- ranger::ranger(Species ~., data = iris)
tmp2 <- predict(tmp, data = iris)
confusion_matrix <- table(true = iris$Species,
predicted = tmp2$predictions)
expect_error(phyloseq2ML::store_classification(trained_rf = tmp, predicted_rf = tmp2,
confusion_matrix = confusion_matrix, n_classes = 3,
step = "prediction", test_set = NULL)
)
})
test_that("Breaks if no prediction.ranger class object is provided", {
data(iris)
tmp <- ranger::ranger(Species ~., data = iris)
tmp2 <- predict(tmp, data = iris)
confusion_matrix <- table(true = iris$Species,
predicted = tmp2$predictions)
expect_error(phyloseq2ML::store_classification(trained_rf = tmp, predicted_rf = tmp,
confusion_matrix = confusion_matrix, n_classes = 3,
step = "prediction", test_set = iris)
)
})
test_that("Breaks if classes are not provided", {
data(iris)
tmp <- ranger::ranger(Species ~., data = iris)
expect_error(phyloseq2ML::store_classification(tmp, tmp$confusion.matrix,
n_classes = NULL, step = "training")
)
})
test_that("Breaks for using wrong step string", {
data(iris)
tmp <- ranger::ranger(Species ~., data = iris)
expect_error(phyloseq2ML::store_classification(tmp, tmp$confusion.matrix,
n_classes = 3, step = "no")
)
})
test_that("Breaks if not using ranger object", {
data(iris)
tmp <- ranger::ranger(Species ~., data = iris)
expect_error(phyloseq2ML::store_classification("hello", confusion_matrix = tmp$confusion.matrix,
n_classes = 3, step = "training")
)
})
test_that("Breaks if no confusion matrix is provided", {
data(iris)
tmp <- ranger::ranger(Species ~., data = iris)
expect_error(phyloseq2ML::store_classification(tmp, confusion_matrix = NULL,
n_classes = 3, step = "training")
)
})
# prediction_accuracy
test_that("Breaks for non-prediction object", {
data(iris)
tmp <- ranger::ranger(Species ~., data = iris)
expect_error(phyloseq2ML::prediction_accuracy(tmp, iris))
})
test_that("Breaks if test set is not a data.frame", {
data(iris)
tmp <- ranger::ranger(Species ~., data = iris)
prediction <- stats::predict(tmp, data = iris)
expect_error(phyloseq2ML::prediction_accuracy(prediction, "hello"))
})
test_that("Returns numeric value", {
data(iris)
tmp <- ranger::ranger(Species ~., data = iris)
prediction <- stats::predict(tmp, data = iris)
expect_true(is.numeric(phyloseq2ML::prediction_accuracy(prediction, iris)))
})
# classification_metrics
test_that("Breaks if number of samples argument is not numeric", {
df <- data.frame()
expect_error(phyloseq2ML::classification_metrics(df, "hello"))
})
test_that("Breaks if result table is empty", {
df <- data.frame()
expect_error(phyloseq2ML::classification_metrics(df, df$Number_of_samples))
})
| {
"alphanum_fraction": 0.729689808,
"author": null,
"avg_line_length": 39.3604651163,
"converted": null,
"ext": "r",
"file": null,
"hexsha": "715ae27cb9a5e51d92b130a29a33adbb1ac51892",
"include": null,
"lang": "R",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "391ce779d9cb816ca109ab6f89bd7246c27c784c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "RJ333/phyloseq2ML",
"max_forks_repo_path": "tests/testthat/test_ranger_classification.r",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "391ce779d9cb816ca109ab6f89bd7246c27c784c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "RJ333/phyloseq2ML",
"max_issues_repo_path": "tests/testthat/test_ranger_classification.r",
"max_line_length": 119,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "391ce779d9cb816ca109ab6f89bd7246c27c784c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "RJ333/phyloseq2ML",
"max_stars_repo_path": "tests/testthat/test_ranger_classification.r",
"max_stars_repo_stars_event_max_datetime": "2021-02-05T14:20:05.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-02-05T14:20:05.000Z",
"num_tokens": 1768,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 6770
} |
import os
import numpy as np
import pandas as pd
import pg8000
from sqlalchemy import create_engine
import datetime
import scipy.optimize
import scipy.interpolate
# templatenumeric = np.zeros((10, 1)) * np.nan
# templatebool = np.zeros((10, 1), dtype=bool)
engine = create_engine('postgresql+pg8000://user:password@vm-srv-finstad.vm.ntnu.no/ecco_biwa_db')
sqlagnotdone = "select ebint from mylake.kt_orders o where o.lastaggdone is null"
orders = pd.read_sql(sqlagnotdone, engine)
if len(orders) == 0:
os._exit(1)
ee = orders['ebint'].values
# ## just for COMSAT
# comsat = pd.read_csv('COMSAT_utf8.csv')
# ee = comsat.ebint.values
## get relevant mms
sqlgetmm = """select mm, ya0 as y0, yb1 as y1
from mylake.mm
where mm = 22 or mm = 26 or mm = 27 or mm = 28"""
mms = pd.read_sql(sqlgetmm, engine)
first_time = True
# ee = ee[:100]
## looped version
for ei, e in enumerate(ee):
print('%s: %s' % (ei, e))
h = hex(e).rstrip('L').lstrip('0x')
eh = h
while len(eh) < 6:
eh = '0' + eh
d1, d2, d3 = eh[:2], eh[:4], eh[:6]
# ## acknowledge it started
# sqlupdate1 = """update kt_orders set lastaggtry = current_timestamp
# where ebint = %s""" % e
# _ = engine.execute(sqlupdate1)
# going through initial decade as the reference
print('this current version does NOT work for multiple RCMs, reference to the historical needs to be RCM specific')
mm0, y00, y10 = mms.iloc[0, :]
wd0 = os.path.join('/data/lakes', d1, d2, d3, '%02d' % mm0)
ndays0 = (datetime.date(y10, 12, 31) - datetime.date(y00, 1, 1)).days + 1
dates0 = [datetime.date(y00, 1, 1) + datetime.timedelta(d)
for d in range(ndays0)]
dti0 = pd.DatetimeIndex(dates0)
period0 = pd.PeriodIndex(dates0, freq='D')
if not os.path.exists(os.path.join(wd0, 'Tzt.csv.gz')):
continue
t0 = pd.read_csv(os.path.join(wd0, 'Tzt.csv.gz'),
compression='gzip', header=None)
t0.index = period0
h0 = pd.read_csv(os.path.join(wd0, 'His.csv.gz'),
compression='gzip', header=None)
h0.index = period0
q0 = pd.read_csv(os.path.join(wd0, 'Qst.csv.gz'),
compression='gzip', header=None)
q0.index = period0
k0 = pd.read_csv(os.path.join(wd0, 'Kzt.csv.gz'),
compression='gzip', header=None)
k0.index = period0
# weqs0 = h0.iloc[:, 1] * h0.iloc[:, 5] / 1000
# Hs * rho_snow / rho_freshwater rho_fw
icesnowatt0 = np.exp(-5 * h0.iloc[:, 0]) * np.exp(-15 * h0.iloc[:, 1])
icesnowatt0.iloc[np.logical_not(h0.iloc[:, 6] == 1).values] = 1
# exp(-lambda_i * Hi) * exp(-lambda_s * (rho_fw/rho_snow)*weqs)
sw15 = q0.iloc[:, 0] * icesnowatt0 * 0.45 * np.exp(-1 * 15) if \
t0.shape[1] >= 15 \
else pd.DataFrame(np.zeros((ndays0, 1)) * np.nan)
sw15.index = period0
sw15 = np.log(sw15)
sw15 = sw15.resample('M')
sw15 = sw15.groupby(sw15.index.month).mean()
fr15pr = sw15 # Frode's PAR level at 15m
sw05 = q0.iloc[:, 0] * icesnowatt0 * 0.45 * np.exp(-1 * 5) if \
t0.shape[1] >= 5 \
else pd.DataFrame(np.zeros((ndays0, 1)) * np.nan)
sw05.index = period0
sw05 = np.log(sw05)
sw05 = sw05.resample('M')
sw05 = sw05.groupby(sw05.index.month).mean()
# Frode's PAR level at 05m
fr15tr = (t0.iloc[:, 14] + t0.iloc[:, 15]) / 2 if \
t0.shape[1] >= 16 \
else pd.DataFrame(np.zeros((ndays0, 1)) * np.nan)
fr15tr.index = period0
fr15tr = fr15tr.resample('M')
fr15troriginal = fr15tr
fr15tr[fr15tr < 3.98] = np.nan
grouped15 = fr15tr.groupby(fr15tr.index.month)
allwarm15 = np.array([np.logical_not(np.isnan(g)).sum() == 10
for k, g in grouped15])
fr15tr = grouped15.mean()
fr15tr[np.logical_not(allwarm15)] = np.nan
## Frode's 15m temperature ref
fr05tr = (t0.iloc[:, 4] + t0.iloc[:, 5]) / 2 if \
t0.shape[1] >= 6 \
else pd.DataFrame(np.zeros((ndays0, 1)) * np.nan)
fr05tr.index = period0
fr05tr = fr05tr.resample('M')
fr05troriginal = fr05tr
fr05tr[fr05tr < 3.98] = np.nan
grouped05 = fr05tr.groupby(fr05tr.index.month)
allwarm05 = np.array([np.logical_not(np.isnan(g)).sum() == 10
for k, g in grouped05])
fr05tr = grouped05.mean()
fr05tr[np.logical_not(allwarm05)] = np.nan
## Frode's 05m temperature ref
print('this current version does NOT work with varying extinction coefficients')
# Hs * rho_snow / rho_freshwater rho_fw
icesnowatt = np.exp(-5 * h0.iloc[:, 0]) * np.exp(-15 * h0.iloc[:, 1])
icesnowatt.iloc[np.logical_not(h0.iloc[:, 6] == 1).values] = 1
# exp(-lambda_i * Hi) * exp(-lambda_s * (rho_fw/rho_snow)*weqs)
sw15 = q0.iloc[:, 0] * icesnowatt * 0.45 * np.exp(-1 * 15) if \
t0.shape[1] >= 15 \
else pd.DataFrame(np.zeros((ndays0, 1)) * np.nan)
sw15.index = period0
sw15 = np.log(sw15)
sw15 = sw15.resample('M')
sw15 = sw15.groupby(sw15.index.month).mean()
fr15pr = sw15
sw05 = q0.iloc[:, 0] * icesnowatt * 0.45 * np.exp(-1 * 5) if \
t0.shape[1] >= 5 \
else pd.DataFrame(np.zeros((ndays0, 1)) * np.nan)
sw05.index = period0
sw05 = np.log(sw05)
sw05 = sw05.resample('M')
sw05 = sw05.groupby(sw05.index.month).mean()
fr05pr = sw05
# e: ebint
# mm: myriadmyriad
# sim: sim_id
# y0: first year of the decade
# y1: last year of the decade
# ndays: number of days in the decade
# dti: pd.DatetimeIndex of the dates in the decade
# t: temperature profile [day x depth]
# h: ice variables [day x variable]
# q: heat variables [day x variable]
# k:
# r: rho, water density [day x depth]
# dr: delta rho [day x (depth - 1)]
# stratified: bool [day]
# pycd: max pycnocline or mixing depth [day]
# if mixed completely lake's maximum depth
#
# pcn00 ... pcn12: annual and monthly mixing depth [day]
# tsw00 ... tws12: annual and monthly temperature surface water [day]
for mm, y0, y1 in mms.itertuples(index=False):
sim = mm * 2e7 + e # the sim_id
wd = os.path.join('/data/lakes', d1, d2, d3, '%02d' % mm)
# wd = os.path.join('/Users/kojito/Desktop/lakes', d1, d2, d3, '%02d' % mm)
ndays = (datetime.date(y1, 12, 31) - datetime.date(y0, 1, 1)).days + 1
dates = [datetime.date(y0, 1, 1) + datetime.timedelta(d)
for d in range(ndays)]
dti = pd.DatetimeIndex(dates)
period = pd.PeriodIndex(dates, freq='D')
if not os.path.exists(os.path.join(wd, 'Tzt.csv.gz')):
continue
t = pd.read_csv(os.path.join(wd, 'Tzt.csv.gz'),
compression='gzip', header=None)
t.index = period
h = pd.read_csv(os.path.join(wd, 'His.csv.gz'),
compression='gzip', header=None)
h.index = period
q = pd.read_csv(os.path.join(wd, 'Qst.csv.gz'),
compression='gzip', header=None)
q.index = period
k = pd.read_csv(os.path.join(wd, 'Kzt.csv.gz'),
compression='gzip', header=None)
k.index = period
if np.logical_not(np.isnan(t.values)).sum() == 0:
continue
# rho density
r = 999.842594 + t * (6.793952e-2 + t * (-9.09529e-3 +
t * (1.001685e-4 + t * (-1.120083e-6 + t * 6.536332e-9))))
# delta rho
dr = np.concatenate((np.repeat(np.array([[0]]), r.shape[0], axis=0),
r.iloc[:, :-1].values - r.iloc[:, 1:].values),
axis=1)
dr = np.abs(dr)
drd0, drd1 = dr.shape
drthreshold = 0.05
dr[dr < drthreshold] = 0
dr[t.values < 3.98] = 0
## this .values is important as pd.DataFrame < 3.98 does not work
drz = dr * np.arange(drd1).reshape((1, drd1)).repeat(drd0, axis=0)
pycd = drz.sum(axis=1) / dr.sum(axis=1)
stratified = np.logical_not(np.isnan(pycd))
stratified = pd.DataFrame(stratified)
stratified.index = dti
pycd[np.logical_not(stratified.iloc[:, 0].values)] = drd1 - 1.0 # if not stratified mix to the bottom
pycd = pd.DataFrame(pycd)
pycd.index = dti
# pcd01-pcd12 # depth
pcd = pycd.resample('M').values.reshape((10, 12))
# pcd00
pcd00 = pcd.mean(axis=1).reshape((10, 1))
# pcn01-pcn12 # number of stratified days in a month
pcn = stratified.resample('M', how='sum').values.reshape((10, 12))
# pcn00
pcn00 = pcn.mean(axis=1).reshape((10, 1))
# tsw01-tsw12
tsw = t.iloc[:, 0].resample('M').reshape((10, 12))
# tsw00
tsw00 = tsw.mean(axis=1).reshape((10, 1))
ndice = h.iloc[:, 6].resample('M', how='sum').values.reshape((10, 12))
ndice00 = ndice.sum(axis=1).reshape((10, 1))
########
# first work on warm stratification and lake categories
# ice will be done later and only in between years
########
# csomeice: is there at least one ice-covered day?
# clongstrat: is there unusually long stratification?
# (if yes stats mix... are meaningless)
# # cwarm: does it at least go up > 3.98 degrees C?
# maxdoy: doy at which temperature at surface is at its maximum
# mixspr0: spring mixing doy starts (first day t > 3.98)
# mixspr1: spring mixing doy ends (if stratification turns on)
# mixaut0: autumn mixing doy starts (if stratification has been on)
# mixaut1: autumn mixing doy ends (last day t > 3.98)
# cwarm = False ## can be sub'ed with "not np.isnan(maxdoy)"
csomeice = np.zeros((10, 1), dtype=bool)
clongstrat = np.zeros((10, 1), dtype=bool)
maxdoy = np.zeros((10, 1)) * np.nan
mixspr0 = np.zeros((10, 1)) * np.nan
mixspr1 = np.zeros((10, 1)) * np.nan
mixaut0 = np.zeros((10, 1)) * np.nan
mixaut1 = np.zeros((10, 1)) * np.nan
# ndmix and nemix ONLY MEANINGFUL for cat 4 or 7
ndmix = np.zeros((10, 1)) * np.nan # number of mixing days
nemix = np.zeros((10, 1)) * np.nan # number of mixing events
category = np.zeros((10, 1)) * np.nan
# lake categories:
# 0: unknown
# 1: amictic
# 2: cold monomictic
# 3: continuous cold polymictic
# 4: discontinuous cold polymictic
# 5: dimictic
# 6: continuous warm polymictic
# 7: discontinous warm polymictic
# 8: warm monomictic
for yi, thisy in enumerate(t.resample('A').index.year):
thiscsomeice = False
# thisclongstrat = False not used
thismaxdoy = np.nan
thismixspr0 = np.nan
thismixspr1 = np.nan
thismixaut0 = np.nan
thismixaut1 = np.nan
thisndmix = np.nan
thisnemix = np.nan
# ndaysthisyear = pd.to_datetime('%s-12-31' % thisy).dayofyear
thish = h[h.index.year == thisy]
# I. AMICTIC if completely ice-covered
if np.all(thish.iloc[:, 6] == 1):
category[yi, 0] = 1
# keep everything else default
continue
thist = t[t.index.year == thisy]
s = thist.iloc[:, 0]
smax = s.max()
# II. COLD MONOMICTIC if t at maxdoy < 3.98
if smax < 3.98:
category[yi, 0] = 2
# keep everything else default
continue
thismaxdoy = s[s == s.max()].index.dayofyear[0] # just pick the 1st
maxdoy[yi, 0] = thismaxdoy
thiscsomeice = np.any(thish.iloc[:, 6] == 1)
csomeice[yi, 0] = thiscsomeice
# needed later unless category = 8
thisstrat = stratified[stratified.index.year == thisy]
# III. if always stratified put it together with WARM MONOMICTIC
if np.all(thisstrat):
category[yi, 0] = 8
clongstrat[yi, 0] = True
# keep everything else default
continue
# check for stratification anyway
elif np.any(thisstrat):
thismixspr1 = thisstrat[thisstrat.iloc[:, 0]].index.dayofyear.min() - 1
thismixaut0 = thisstrat[thisstrat.iloc[:, 0]].index.dayofyear.max() + 1
## fix for the situation where the first day > 3.98 is already stratified
if (s[s > 3.98].index.dayofyear.min() - 1) == thismixspr1:
thismixspr0 = thismixspr1
else:
thismixspr0 = thisstrat[np.logical_not(thisstrat.iloc[:, 0]) & \
(thisstrat.index.dayofyear < thismaxdoy) & \
(s > 3.98).values] \
.index.dayofyear.min()
if (s[s > 3.98].index.dayofyear.max() + 1) == thismixaut0:
thismixaut1 = thismixaut0
else:
thismixaut1 = thisstrat[np.logical_not(thisstrat.iloc[:, 0]) & \
(thisstrat.index.dayofyear > thismaxdoy) & \
(s > 3.98).values] \
.index.dayofyear.max()
mixspr0[yi, 0] = thismixspr0 # can be overwritten in IV
mixspr1[yi, 0] = thismixspr1
mixaut0[yi, 0] = thismixaut0
mixaut1[yi, 0] = thismixaut1 # can be overwritten in IV
# else: all thismix.... remains np.nan
# # IV. if either end (1 January or 31 December) is warmer than 3.98 then
# # put together with WARM MONOMICTIC
# # the year started warm
# flag4a = np.all(s[s.index.dayofyear < thismaxdoy] > 3.98):
# # the year ended warm
# flag4b = np.all(s[s.index.dayofyear > thismaxdoy] > 3.98):
# if flag4a and flag4b:
# mixspr0[yi, 0] = thismixspr0 = np.nan
# mixaut1[yi, 0] = thismixaut1 = np.nan
# clongstrat[yi, 0] = True
# # keep everything else default
# if
# continue
# V. if always mixed (i.e., no warm stratification) either
# CONTINUOUS COLD POLYMICTIC or CONTINUOUS WARM POLYMICTIC
if np.all(np.logical_not(thisstrat)):
category[yi, 0] = 3 if thiscsomeice else 6
continue
# check the stability of warm stratification
st = thisstrat[(thisstrat.index.dayofyear > thismixspr1) \
& (thisstrat.index.dayofyear < thismixaut0)]
# VI. if stratification is stable either
# DIMICTIC or WARM MONOMICTIC
# otherwise DISCONTINOUS COLD POLYMICTIC or
# DISCONTINOUS WARM POLYMICTIC,
# and calculate ndmix and nemix
if np.all(st):
category[yi, 0] = 5 if thiscsomeice else 8
elif st.shape[0] <= 4: ## too short stratification
category[yi, 0] = 4 if thiscsomeice else 7
thisndmix = 0 # overwrite on np.nan
thisnemix = 0 # overwrite on np.nan
elif np.all(st.iloc[4:, 0]) and st.shape[0] > 10:
# if only unstable in the beginning consider stable
category[yi, 0] = 5 if thiscsomeice else 8
else:
category[yi, 0] = 4 if thiscsomeice else 7
# get number of days and events of mixing
thisndmix = 0 # overwrite on np.nan
thisnemix = 0 # overwrite on np.nan
# only consider stratification period > 4 days
# ignore the first 4 days for initial unstable stratification
thisndmix = np.logical_not(st.iloc[4:, 0]).sum()
# for the number of events only count the number of first days
if thisndmix >= 1:
previous = False
for d in np.logical_not(st.iloc[:, 0]):
if previous: # if mixed on previous day do not accrue nemix
if not d:
previous = False # mixing ended on this day
elif d:
thisnemix += 1
previous = True # mixing started on this day
ndmix[yi, 0] = thisndmix
nemix[yi, 0] = thisnemix
######
# now work on ice related matters
######
# y0 missing
# record for winter y0 to y1 is recorded in y1
# iceon: first ice-covered doy of the year before (can be negative)
# iceof: last ice-covered doy of the year
# icedu: number of days inclusive between iceon and iceof
# ndtha: number of days between iceon and iceof not ice-covered
# netha: number of events between iceon and iceof not ice-covered
# clongice: unusually long ( > 365 days) of ice-cover
# in the year or the year before or the year after
# above stats not meaningful
iceon = np.zeros((10, 1)) * np.nan
iceof = np.zeros((10, 1)) * np.nan
icedu = np.zeros((10, 1))
icedu[0, 0] = np.nan
ndtha = np.zeros((10, 1)) * np.nan
netha = np.zeros((10, 1)) * np.nan
clongice = np.zeros((10, 1), dtype=bool)
for yi, thisy in enumerate(t.resample('A').index.year):
if yi == 0:
continue
md0 = maxdoy[yi - 1, 0] # maxdoy the year before
md1 = maxdoy[yi, 0] # maxdoy the year
# I. if neither md0 and md1 exists, then completely ice-covered
if np.isnan(md0) and np.isnan(md1):
icedu[yi, 0] = 365
clongice[yi, 0] = True
continue
# II. if only md0 is non-existent, record iceon
elif np.isnan(md0):
icedu[yi, 0] = 365
iceon[yi, 0] = (h[(((h.index.year == (thisy - 1)) \
& (h.index.dayofyear > md0)) \
| (h.index.year == thisy))\
& (h.iloc[:, 6] == 1)] \
.index.min().start_time.to_pydatetime().date() \
- datetime.date(thisy - 1, 12, 31)).days
# either last year days since maxdoy or this year,
# AND ice-covered --- take the date and count how many days
# from the New Year's Eve the year before
clongice[yi, 0] = True
continue
# III. if only md1 is non-existent, record iceof,
# and do complex calculation of icedu
elif np.isnan(md1):
iceof[yi, 0] = thisiceof = h[(h.index.year == thisy) \
& (h.index.dayofyear < md1) \
& (h.iloc[:, 6] == 1)] \
.index.max().dayofyear
# find the year (immediately before this year)
# that did not have complete ice cover
# if not found, then record icedu = 365
nonfully = None # the yi for the year that does not have complete ice-cover
# defaults to 1, which is 2nd -- this is used
# only if icecovered from the beginning year
for yii in range(yi-1, 0, -1):
if not np.isnan(maxdoy[yii, 0]):
nonfully = yii
break
if nonfully is None:
icedu[yi, 0] = 365
else:
icedu[yi, 0] = min(365, (365 - iceon[nonfully, 0]) + 1 + thisiceof)
clongice[yi, 0] = True
continue
# IV. if both md0 and md1 are available,
# calculate iceon like II.
# calculate iceof like III
# and also calculate ndtha and ndtha
else:
# all days with ice (may be intermittent)
icx = h[(((h.index.year == (thisy - 1)) \
& (h.index.dayofyear > md0)) \
| ((h.index.year == thisy) \
& (h.index.dayofyear < md1))) \
& (h.iloc[:, 6] == 1)]
if icx.shape[0] == 0:
# this means no ice
continue
iceon[yi, 0] = (h[(((h.index.year == (thisy - 1)) \
& (h.index.dayofyear > md0)) \
| (h.index.year == thisy))\
& (h.iloc[:, 6] == 1)] \
.index.min().start_time.to_pydatetime().date() \
- datetime.date(thisy - 1, 12, 31)).days
thisiceon = iceon[yi, 0]
# either last year days since maxdoy or this year,
# AND ice-covered --- take the date and count how many days
# from the New Year's Eve the year before
iceof[yi, 0] = h[(h.index.year == thisy) \
& (h.index.dayofyear < md1) \
& (h.iloc[:, 6] == 1)] \
.index.max().dayofyear
thisiceof = iceof[yi, 0]
# (EITHER last year since maxdoy OR this year until maxdoy)
# AND ice-covered
ic = h[icx.index.min():icx.index.max()] # works charming
icedu[yi, 0] = ic.shape[0]
ic = ic.iloc[:, 6]
ic = (ic == 1).values
# get number of days and events of mixing
thisndtha = 0 # overwrite on np.nan
thisnetha = 0 # overwrite on np.nan
thisndtha = np.logical_not(ic).sum()
# for the number of events only count the number of first days
if thisndtha > 1:
previous = False
for d in np.logical_not(ic):
if previous: # if not ice-covered on previous day do not accrue
if not d:
previous = False # open-water ended on this day
elif d:
thisnetha += 1
previous = True # open-water started on this day
ndtha[yi, 0] = thisndtha
netha[yi, 0] = thisnetha
######
# other variables
######
## Frode's variables
## reference fr15t.loc[month]
## target t.reample('M').iloc[13, :]
fr15tall = np.nan * fr15troriginal
fr15tall.index = t.resample('M').index
for monthi in range(fr15tall.shape[0]):
m = fr15tall.index.month[monthi]
yt = fr15tr.loc[m]
if np.any(np.isnan(yt)):
continue
if yt < 3.98:
continue
y = t.resample('M').iloc[monthi, :]
x = np.arange(t0.shape[1]) + 0.5
if np.all((y - yt) > 0):
continue
if np.all((y - yt) < 0):
fr15tall[monthi] = 0
continue
a = max(x[((y - yt) > 0).values])
b = min(x[((y - yt) < 0).values])
f = scipy.interpolate.interp1d(x, y - yt)
fr15tall[monthi] = scipy.optimize.bisect(f, a, b, xtol=0.001)
# tttt = fr15tall.groupby(fr15tall.index.month).mean()
# print(tttt[tttt.notnull()])
fr15t = fr15tall.values.reshape((10, 12))
fr05tall = np.nan * fr05troriginal
fr05tall.index = t.resample('M').index
for monthi in range(fr05tall.shape[0]):
m = fr05tall.index.month[monthi]
yt = fr05tr.loc[m]
if np.any(np.isnan(yt)):
continue
if yt < 3.98:
continue
y = t.resample('M').iloc[monthi, :]
x = np.arange(t0.shape[1]) + 0.5
if np.all((y - yt) > 0):
continue
if np.all((y - yt) < 0):
fr05tall[monthi] = 0
continue
a = max(x[((y - yt) > 0).values])
b = min(x[((y - yt) < 0).values])
f = scipy.interpolate.interp1d(x, y - yt)
fr05tall[monthi] = scipy.optimize.bisect(f, a, b, xtol=0.001)
# tttt = fr05tall.groupby(fr05tall.index.month).mean()
# print(tttt[tttt.notnull()])
fr05t = fr05tall.values.reshape((10, 12))
icesnowatt = np.exp(-5 * h.iloc[:, 0]) * np.exp(-15 * h.iloc[:, 1])
icesnowatt.iloc[np.logical_not(h.iloc[:, 6] == 1).values] = 1
incomingsw = q.iloc[:, 0] * icesnowatt * 0.45
incomingsw.index = period
fr15pall = np.nan * fr05troriginal
fr15pall.index = t.resample('M').index
fr05pall = np.nan * fr05troriginal
fr05pall.index = t.resample('M').index
for monthi in range(fr05tall.shape[0]):
m = fr05tall.index.month[monthi]
year = fr05tall.index.year[monthi]
refsw15 = fr15pr.loc[m]
refsw05 = fr05pr.loc[m]
sub = incomingsw[(incomingsw.index.year == year) &
(incomingsw.index.month == m)]
fr15pall[monthi] = np.mean(-1 * np.log(np.exp(refsw15) / sub))
fr05pall[monthi] = np.mean(-1 * np.log(np.exp(refsw05) / sub))
fr15p = fr15pall.values.reshape((10, 12))
fr05p = fr05pall.values.reshape((10, 12))
fr15p[np.isinf(fr15p)] = np.nan
fr15p[fr15p > t.shape[1]] = np.nan
fr05p[np.isinf(fr05p)] = np.nan
fr05p[fr15p > t.shape[1]] = np.nan
# form into shape
a = np.concatenate((tsw00, tsw, pcd00, pcd, pcn00, pcn, ndice00, ndice,
csomeice, clongstrat, clongice,
maxdoy, mixspr0, mixspr1, mixaut0, mixaut1,
ndmix, nemix, category,
icedu, iceon, iceof, ndtha, netha,
fr15t, fr05t, fr15p, fr05p), axis=1)
dim0 = 10
# dim0 = 10 + 4
# dim1 = a.shape[1]
# a100 = a.mean(axis=0).reshape((1, dim1))
# a101 = np.median(a, axis=0).reshape((1, dim1))
# a102 = a.min(axis=0).reshape((1, dim1))
# a103 = a.max(axis=0).reshape((1, dim1))
# y = np.array(range(10) + [100, 101, 102, 103]).reshape((dim0, 1))
# b = np.concatenate((a, a100, a101, a102, a103), axis=0)
y = np.array(np.arange(10).reshape((dim0, 1)))
b = a
c = np.concatenate((np.array([[sim]]).repeat(dim0, axis=0), y, b),
axis=1)
if first_time:
result = c
first_time = False
else:
result = np.concatenate((result, c), axis=0)
cols = ['sim_id', 'y',
'tws00', 'tws01', 'tws02', 'tws03', 'tws04', 'tws05', 'tws06',
'tws07', 'tws08', 'tws09', 'tws10', 'tws11', 'tws12',
'pcd00', 'pcd01', 'pcd02', 'pcd03', 'pcd04', 'pcd05', 'pcd06',
'pcd07', 'pcd08', 'pcd09', 'pcd10', 'pcd11', 'pcd12',
'pcn00', 'pcn01', 'pcn02', 'pcn03', 'pcn04', 'pcn05', 'pcn06',
'pcn07', 'pcn08', 'pcn09', 'pcn10', 'pcn11', 'pcn12',
'ice00', 'ice01', 'ice02', 'ice03', 'ice04', 'ice05', 'ice06',
'ice07', 'ice08', 'ice09', 'ice10', 'ice11', 'ice12',
'csomeice', 'clongstrat', 'clongice',
'maxdoy', 'mixspr0', 'mixspr1', 'mixaut0', 'mixaut1',
'ndmix', 'nemix', 'category',
'icedu', 'iceon', 'iceof', 'ndtha', 'netha',
'f15t01', 'f15t02', 'f15t03', 'f15t04', 'f15t05', 'f15t06',
'f15t07', 'f15t08', 'f15t09', 'f15t10', 'f15t11', 'f15t12',
'f05t01', 'f05t02', 'f05t03', 'f05t04', 'f05t05', 'f05t06',
'f05t07', 'f05t08', 'f05t09', 'f05t10', 'f05t11', 'f05t12',
'f15p01', 'f15p02', 'f15p03', 'f15p04', 'f15p05', 'f15p06',
'f15p07', 'f15p08', 'f15p09', 'f15p10', 'f15p11', 'f15p12',
'f05p01', 'f05p02', 'f05p03', 'f05p04', 'f05p05', 'f05p06',
'f05p07', 'f05p08', 'f05p09', 'f05p10', 'f05p11', 'f05p12']
result = pd.DataFrame(result, columns=cols)
## still need to do something with the iceon after 31st December
## comsat
# In [63]: result22 = result
# In [64]: result22['ebint'] = result.sim_id - 440000000
# In [65]: agg = result22.groupby('sim_id').mean()
# In [66]: agg.to_csv('comsat_DMI_mean-2001-2010.csv')
# In [67]: m = agg.merge(comsat)
result['csomeice'] = result['csomeice'].astype(bool)
result['clongstrat'] = result['clongstrat'].astype(bool)
result['clongice'] = result['clongice'].astype(bool)
result.to_csv('test20151110a.csv')
result.to_sql(name='sim3', con=engine, schema='mylake',
if_exists='append', index=False)
| {
"alphanum_fraction": 0.5080531033,
"author": null,
"avg_line_length": 44.0326409496,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "0d5a2a2065e1789183178f07d8fadea93bb3a79e",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "28338f4f27476cab56a856a6f3be2be91d637194",
"max_forks_repo_licenses": [
"FSFAP"
],
"max_forks_repo_name": "kojitominaga/eb",
"max_forks_repo_path": "postprocessing/digest.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "28338f4f27476cab56a856a6f3be2be91d637194",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"FSFAP"
],
"max_issues_repo_name": "kojitominaga/eb",
"max_issues_repo_path": "postprocessing/digest.py",
"max_line_length": 119,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "5eaf4de30c89ff1e855a6be493105d1201f07f74",
"max_stars_repo_licenses": [
"FSFAP"
],
"max_stars_repo_name": "kojitominaga/scratch",
"max_stars_repo_path": "270k/eb/digest.py",
"max_stars_repo_stars_event_max_datetime": "2017-11-02T03:52:05.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-11-02T03:52:05.000Z",
"num_tokens": 8805,
"path": null,
"reason": "import numpy,import scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 29678
} |
!-------------------------------------------------------------------------------
! Copyright (c) 2021, Whitman T. Dailey
! All rights reserved.
!
! Redistribution and use in source and binary forms, with or without
! modification, are permitted provided that the following conditions are met:
! 1. Redistributions of source code must retain the above copyright notice, this
! list of conditions and the following disclaimer.
! 2. Redistributions in binary form must reproduce the above copyright notice,
! this list of conditions and the following disclaimer in the documentation
! and/or other materials provided with the distribution.
! 3. Neither the name of the copyright holder nor the names of its
! contributors may be used to endorse or promote products derived from
! this software without specific prior written permission.
!
! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
! DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
! FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
! DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
! SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
! CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
! OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
! OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
!-------------------------------------------------------------------------------
Module Xscat
Use Kinds, Only: dp
Implicit None
Private
Public :: Thompson_AD
Public :: Coherent_AD
Public :: KN_AD
Public :: Incoherent_AD
Public :: mu_KN
Public :: mu_Thomp
Public :: mu_CohS
Public :: mu_IncS
Public :: Isotropic_Azimuth
Public :: Isotropic_mu
Public :: Isotropic_Omega_Hat
Public :: mu_w_2_OmegaHat
Public :: Post_IS_Alpha
Integer, Parameter :: nZm = 30
Integer, Parameter :: nZs = 100
Integer :: ii
Real(dp), Parameter :: Za(1:nZs) = (/ (Real(ii,dp), ii = 1,nZs) /)
!The following parameters define curve fits for the Atomic Form Factor for Z = 1-30
!Values and formulae from:
! Muhammad, W. & Lee, S.H. New Emperical Equation for the atomic Form Factor Function in the
! Momentum Transfer Range q=0-50A-1 for the Elements z=1-30, PLoS ONE, 8,
! 2013.
Real(dp), Parameter :: rm(1:nZm) = (/ 2._dp, &
& 1.994_dp, &
& 2.934_dp, &
& 2.998_dp, &
& 2.9952_dp, &
& 2.87_dp, &
& 3.0808_dp, &
& 2.5968_dp, &
& 2.676_dp, &
& 2.6768_dp, &
& 2.4892_dp, &
& 2.573_dp, &
& 2.5948_dp, &
& 2.7396_dp, &
& 2.8128_dp, &
& 2.9188_dp, &
& 3.0348_dp, &
& 3.1704_dp, &
& 3.1948_dp, &
& 3.1412_dp, &
& 2.5908_dp, &
& 2.5368_dp, &
& 2.7128_dp, &
& 2.7208_dp, &
& 2.536_dp, &
& 2.58_dp, &
& 2.632_dp, &
& 2.648_dp, &
& 2.648_dp, &
& 2.78_dp /)
Real(dp), Parameter :: a1m(1:nZm) = ( (/ 1._dp, & !Arithmetic (*z, **r) performed in parameter definition to reduce
& 1.07342_dp, & ! repeated arithmetic in AFF evaluation
& 0.38714_dp, &
& 0.28152_dp, &
& 0.2303_dp, &
& 0.18732_dp, &
& 0.13286_dp, &
& 0.0939_dp, &
& 0.08332_dp, &
& 0.07474_dp, &
& 0.0539_dp, &
& 0.05214_dp, &
& 0.04874_dp, &
& 0.04866_dp, &
& 0.04704_dp, &
& 0.0459_dp, &
& 0.04484_dp, &
& 0.0439_dp, &
& 0.0416_dp, &
& 0.03902_dp, &
& 0.03036_dp, &
& 0.02814_dp, &
& 0.02944_dp, &
& 0.02866_dp, &
& 0.02488_dp, &
& 0.02458_dp, &
& 0.02438_dp, &
& 0.02378_dp, &
& 0.02328_dp, &
& 0.02398_dp /) * Za(1:nZm) )**rm
Real(dp), Parameter :: a2m(1:nZm) = ( (/ 1.E5_dp, & !Arithmetic (*z, **r) performed in parameter definition to reduce
& 0.12568_dp, & ! repeated arithmetic in AFF evaluation
& 0.07786_dp, &
& 0.04686_dp, &
& 0.03316_dp, &
& 0.02471_dp, &
& 0.02926_dp, &
& 0.02472_dp, &
& 0.02226_dp, &
& 0.02016_dp, &
& 0.01797_dp, &
& 0.01662_dp, &
& 0.01539_dp, &
& 0.01441_dp, &
& 0.01354_dp, &
& 0.01276_dp, &
& 0.0121_dp, &
& 0.01151_dp, &
& 0.01105_dp, &
& 0.01046_dp, &
& 0.00964_dp, &
& 0.00923_dp, &
& 0.00877_dp, &
& 0.00854_dp, &
& 0.00813_dp, &
& 0.00785_dp, &
& 0.00763_dp, &
& 0.00727_dp, &
& 0.00712_dp, &
& 0.00677_dp /) * Za(1:nZm) )**rm
Real(dp), Parameter :: a3m(1:nZm) = ( (/ 1.E5_dp, & !Arithmetic (*z) performed in parameter definition to reduce
& 0.14111_dp, & ! repeated arithmetic in AFF evaluation
& 0.01882_dp, &
& 0.01342_dp, &
& 0.01083_dp, &
& 0.01421_dp, &
& 0.00181_dp, &
& 0.00813_dp, &
& 0.00534_dp, &
& 0.004_dp, &
& 0.00855_dp, &
& 0.00493_dp, &
& 0.00361_dp, &
& 0.00192_dp, &
& 0.00121_dp, &
& 6.60E-4_dp, &
& 3.10E-4_dp, &
& 1.07E-4_dp, &
& 7.92E-5_dp, &
& 1.06E-4_dp, &
& 0.00112_dp, &
& 0.00116_dp, &
& 6.29E-4_dp, &
& 5.43E-4_dp, &
& 8.91E-4_dp, &
& 7.26E-4_dp, &
& 5.73E-4_dp, &
& 5.05E-4_dp, &
& 4.61E-4_dp, &
& 2.30E-4_dp /) * Za(1:nZm) )
Real(dp), Parameter :: b1m(1:nZm) = (/ 1._dp, &
& 0.64272_dp, &
& 0.09257_dp, &
& 0.01825_dp, &
& 0.00808_dp, &
& 0.00503_dp, &
& 0.2496_dp, &
& 0.13992_dp, &
& 0.09631_dp, &
& 0.0664_dp, &
& 0.0382_dp, &
& 0.02571_dp, &
& 0.01839_dp, &
& 0.01224_dp, &
& 0.00832_dp, &
& 0.00561_dp, &
& 0.00368_dp, &
& 0.00232_dp, &
& 0.00176_dp, &
& 0.00155_dp, &
& 0.00331_dp, &
& 0.00307_dp, &
& 0.00212_dp, &
& 0.00167_dp, &
& 0.00207_dp, &
& 0.00171_dp, &
& 0.00137_dp, &
& 0.00123_dp, &
& 0.00102_dp, &
& 7.35E-4_dp /)
Real(dp), Parameter :: b2m(1:nZm) = (/ 1._dp, &
& 0.0845_dp, &
& 0.554_dp, &
& 0.1465_dp, &
& 0.06074_dp, &
& 0.03378_dp, &
& 0.00127_dp, &
& 0.00135_dp, &
& 0.00101_dp, &
& 8.15E-4_dp, &
& 7.56E-4_dp, &
& 5.95E-4_dp, &
& 4.96E-4_dp, &
& 3.82E-4_dp, &
& 3.14E-4_dp, &
& 2.54E-4_dp, &
& 2.06E-4_dp, &
& 1.66E-4_dp, &
& 1.48E-4_dp, &
& 1.38E-4_dp, &
& 1.83E-4_dp, &
& 1.73E-4_dp, &
& 1.36E-4_dp, &
& 1.26E-4_dp, &
& 1.32E-4_dp, &
& 1.18E-4_dp, &
& 1.05E-4_dp, &
& 9.46E-5_dp, &
& 8.91E-5_dp, &
& 7.09E-5_dp /)
Real(dp), Parameter :: b3m(1:nZm) = (/ 1._dp, &
& 0.17253_dp, &
& 0.00128_dp, &
& 5.80E-4_dp, &
& 3.69E-4_dp, &
& 5.79E-4_dp, &
& 7.60E-4_dp, &
& 0.01953_dp, &
& 0.00698_dp, &
& 0.00361_dp, &
& 0.11457_dp, &
& 0.03657_dp, &
& 0.02043_dp, &
& 0.00521_dp, &
& 0.00187_dp, &
& 5.16E-4_dp, &
& 1.06E-4_dp, &
& 1.19E-5_dp, &
& 6.49E-6_dp, &
& 1.21E-5_dp, &
& 0.0016_dp, &
& 0.00172_dp, &
& 4.76E-4_dp, &
& 3.24E-4_dp, &
& 9.25E-4_dp, &
& 5.92E-4_dp, &
& 3.55E-4_dp, &
& 2.69E-4_dp, &
& 2.12E-4_dp, &
& 5.28E-5_dp /)
Real(dp), Parameter :: cm(1:nZm) = 1._dp / &
& ( a1m / a1m**rm + & !Muhammad & Lee (2013) AFF evaluated at x=0 (for normalization)
& a2m / (2._dp**rm * a2m)**2 + &
& a3m**rm / (4._dp*a3m**2)**2 )
!The following parameters define curve fits for the Atomic Form Factor for Z = 31-100
!Values and formulae from:
! Z = 31-100:
! Szaloki, Imre. Emperical Formulations for Atomic Form Factor and Incoherent Scattering Functions,
! X-ray Spectrometry, Vol 25, p.21-28, 1996.
!The following parameters define curve fits for the Incoherent Scattering Function for Z = 1-100
!Values and formulae from:
! Szaloki, Imre. Emperical Formulations for Atomic Form Factor and Incoherent Scattering Functions,
! X-ray Spectrometry, Vol 25, p.21-28, 1996
Real(dp), Parameter :: x1(1:nZs) = (/ 0.05_dp, & !referred to as q1 in the reference
& 0.04_dp, &
& 0.07_dp, &
& 0.1_dp, &
& 0.125_dp, &
& 0.1_dp, &
& 0.1_dp, &
& 0.07_dp, &
& 0.09_dp, &
& 0.09_dp, &
& 0.07_dp, &
& 0.07_dp, &
& 0.07_dp, &
& 0.07_dp, &
& 0.05_dp, &
& 0.07_dp, &
& 0.07_dp, &
& 0.1_dp, &
& 0.04_dp, &
& 0.05_dp, &
& 0.05_dp, &
& 0.025_dp, &
& 0.03_dp, &
& 0.04_dp, &
& 0.03_dp, &
& 0.03_dp, &
& 0.07_dp, &
& 0.03_dp, &
& 0.07_dp, &
& 0.07_dp, &
& 0.07_dp, &
& 0.09_dp, &
& 0.09_dp, &
& 0.04_dp, &
& 0.05_dp, &
& 0.1_dp, &
& 0.04_dp, &
& 0.05_dp, &
& 0.05_dp, &
& 0.05_dp, &
& 0.05_dp, &
& 0.05_dp, &
& 0.03_dp, &
& 0.04_dp, &
& 0.05_dp, &
& 0.1_dp, &
& 0.07_dp, &
& 0.07_dp, &
& 0.04_dp, &
& 0.07_dp, &
& 0.09_dp, &
& 0.09_dp, &
& 0.09_dp, &
& 0.09_dp, &
& 0.04_dp, &
& 0.04_dp, &
& 0.02_dp, &
& 0.025_dp, &
& 0.025_dp, &
& 0.025_dp, &
& 0.025_dp, &
& 0.025_dp, &
& 0.025_dp, &
& 0.05_dp, &
& 0.05_dp, &
& 0.025_dp, &
& 0.025_dp, &
& 0.025_dp, &
& 0.025_dp, &
& 0.025_dp, &
& 0.025_dp, &
& 0.025_dp, &
& 0.03_dp, &
& 0.03_dp, &
& 0.03_dp, &
& 0.07_dp, &
& 0.07_dp, &
& 0.07_dp, &
& 0.07_dp, &
& 0.07_dp, &
& 0.07_dp, &
& 0.07_dp, &
& 0.07_dp, &
& 0.07_dp, &
& 0.09_dp, &
& 0.09_dp, &
& 0.04_dp, &
& 0.04_dp, &
& 0.025_dp, &
& 0.025_dp, &
& 0.025_dp, &
& 0.025_dp, &
& 0.025_dp, &
& 0.025_dp, &
& 0.025_dp, &
& 0.025_dp, &
& 0.05_dp, &
& 0.05_dp, &
& 0.05_dp, &
& 0.05_dp /)
Real(dp), Parameter :: d1(1:nZs) = (/ -0.0197_dp, &
& 0.3041_dp, &
& -0.1391_dp, &
& -0.0592_dp, &
& 0.0145_dp, &
& 0.0011_dp, &
& -0.0941_dp, &
& -0.0055_dp, &
& -0.1036_dp, &
& -0.0652_dp, &
& -0.0493_dp, &
& -0.1070_dp, &
& -0.0027_dp, &
& -0.0997_dp, &
& -0.1516_dp, &
& -0.1852_dp, &
& -0.0408_dp, &
& -0.0665_dp, &
& -0.0419_dp, &
& -0.0340_dp, &
& -0.0349_dp, &
& -0.0156_dp, &
& -0.0330_dp, &
& -0.0108_dp, &
& -0.0174_dp, &
& -0.0280_dp, &
& -0.0177_dp, &
& -0.0263_dp, &
& -0.0061_dp, &
& -0.0289_dp, &
& -0.0107_dp, &
& -0.0027_dp, &
& -0.0150_dp, &
& -0.0034_dp, &
& -0.0136_dp, &
& -0.0058_dp, &
& -0.0402_dp, &
& -0.0063_dp, &
& -0.0421_dp, &
& -0.0573_dp, &
& -0.0449_dp, &
& -0.0473_dp, &
& -0.0416_dp, &
& -0.0961_dp, &
& -0.3292_dp, &
& -0.1438_dp, &
& 0.0070_dp, &
& -0.2189_dp, &
& -0.4304_dp, &
& -0.2109_dp, &
& 0.862_dp, &
& -0.1808_dp, &
& -0.3347_dp, &
& -0.2632_dp, &
& -0.2109_dp, &
& -0.9138_dp, &
& -0.3386_dp, &
& -0.6022_dp, &
& -0.7030_dp, &
& -0.5836_dp, &
& -0.6086_dp, &
& -0.5146_dp, &
& -0.5679_dp, &
& -0.2125_dp, &
& -0.2921_dp, &
& -0.5423_dp, &
& -0.5276_dp, &
& -0.3955_dp, &
& -0.5309_dp, &
& -0.3734_dp, &
& -0.2836_dp, &
& -0.4854_dp, &
& -0.5371_dp, &
& -0.4548_dp, &
& 0.0757_dp, &
& -0.0351_dp, &
& -0.0037_dp, &
& -0.0436_dp, &
& -0.2577_dp, &
& -0.3896_dp, &
& -0.1529_dp, &
& -0.2124_dp, &
& -0.4104_dp, &
& -0.1400_dp, &
& -0.3421_dp, &
& -0.1104_dp, &
& -0.9244_dp, &
& -0.7224_dp, &
& -0.6601_dp, &
& -0.6586_dp, &
& -0.6373_dp, &
& -0.5755_dp, &
& -0.7476_dp, &
& -0.6070_dp, &
& -0.5474_dp, &
& -0.2714_dp, &
& -0.2289_dp, &
& -0.4307_dp, &
& 0.1362_dp, &
& 0.0498_dp /)
Real(dp), Parameter :: d2(1:nZs) = (/ 46.658_dp, &
& 15.786_dp, &
& 298.736_dp, &
& 192.903_dp, &
& 163.483_dp, &
& 142.681_dp, &
& 140.260_dp, &
& 116.887_dp, &
& 104.404_dp, &
& 94.148_dp, &
& 381.537_dp, &
& 438.065_dp, &
& 424.210_dp, &
& 388.077_dp, &
& 348.382_dp, &
& 315.476_dp, &
& 279.197_dp, &
& 256.179_dp, &
& 718.922_dp, &
& 811.177_dp, &
& 759.683_dp, &
& 673.093_dp, &
& 649.698_dp, &
& 502.138_dp, &
& 564.433_dp, &
& 544.439_dp, &
& 518.395_dp, &
& 492.754_dp, &
& 391.062_dp, &
& 457.749_dp, &
& 475.219_dp, &
& 454.054_dp, &
& 436.395_dp, &
& 398.440_dp, &
& 387.307_dp, &
& 369.160_dp, &
& 887.728_dp, &
& 1006.909_dp, &
& 979.206_dp, &
& 935.639_dp, &
& 760.984_dp, &
& 722.571_dp, &
& 791.705_dp, &
& 613.128_dp, &
& 618.716_dp, &
& 420.385_dp, &
& 556.313_dp, &
& 644.889_dp, &
& 679.343_dp, &
& 651.565_dp, &
& 614.953_dp, &
& 602.015_dp, &
& 584.936_dp, &
& 565.540_dp, &
& 1172.401_dp, &
& 1426.524_dp, &
& 1293.899_dp, &
& 1295.463_dp, &
& 1314.655_dp, &
& 1278.915_dp, &
& 1256.848_dp, &
& 1225.067_dp, &
& 1207.566_dp, &
& 1143.609_dp, &
& 1128.846_dp, &
& 1134.899_dp, &
& 1125.777_dp, &
& 1106.509_dp, &
& 1078.094_dp, &
& 1072.986_dp, &
& 1020.140_dp, &
& 970.937_dp, &
& 956.855_dp, &
& 924.169_dp, &
& 886.166_dp, &
& 836.890_dp, &
& 815.217_dp, &
& 677.738_dp, &
& 659.168_dp, &
& 756.299_dp, &
& 773.885_dp, &
& 770.247_dp, &
& 752.468_dp, &
& 739.357_dp, &
& 710.700_dp, &
& 694.447_dp, &
& 1352.893_dp, &
& 1654.435_dp, &
& 1568.488_dp, &
& 1502.561_dp, &
& 1496.796_dp, &
& 1467.799_dp, &
& 1440.744_dp, &
& 1478.360_dp, &
& 1439.856_dp, &
& 1376.419_dp, &
& 1350.819_dp, &
& 1327.815_dp, &
& 1319.578_dp, &
& 1305.774_dp /)
Real(dp), Parameter :: d3(1:nZs) = (/ -135.793_dp, &
& 195.533_dp, &
& -2077.247_dp, &
& -816.409_dp, &
& -557.029_dp, &
& -465.650_dp, &
& -394.998_dp, &
& -267.197_dp, &
& -214.004_dp, &
& -191.236_dp, &
& -2407.050_dp, &
& -2400.246_dp, &
& -2113.027_dp, &
& -1733.065_dp, &
& -1396.561_dp, &
& -1136.386_dp, &
& -868.474_dp, &
& -748.725_dp, &
& -5676.406_dp, &
& -5464.647_dp, &
& -4820.361_dp, &
& -3437.101_dp, &
& -3538.582_dp, &
& -2564.686_dp, &
& -2560.102_dp, &
& -2603.434_dp, &
& -2513.015_dp, &
& -2197.567_dp, &
& -1717.878_dp, &
& -2066.283_dp, &
& -2135.928_dp, &
& -1878.231_dp, &
& -1737.722_dp, &
& -1256.156_dp, &
& -1256.884_dp, &
& -1175.850_dp, &
& -6984.753_dp, &
& -6947.309_dp, &
& -6341.764_dp, &
& -5744.201_dp, &
& -4310.872_dp, &
& -3936.302_dp, &
& -4074.343_dp, &
& -2304.286_dp, &
& -2945.652_dp, &
& -1322.242_dp, &
& -2436.547_dp, &
& -2969.544_dp, &
& -3186.124_dp, &
& -2842.961_dp, &
& -2533.116_dp, &
& -2353.106_dp, &
& -2178.860_dp, &
& -2042.619_dp, &
& -9491.180_dp, &
& -11183.018_dp, &
& -8075.629_dp, &
& -8681.688_dp, &
& -9366.551_dp, &
& -8868.613_dp, &
& -8646.801_dp, &
& -8212.132_dp, &
& -8074.852_dp, &
& -7338.880_dp, &
& -7192.771_dp, &
& -7209.270_dp, &
& -7216.450_dp, &
& -7013.419_dp, &
& -6617.335_dp, &
& -6689.479_dp, &
& -5677.107_dp, &
& -4976.506_dp, &
& -5171.432_dp, &
& -4836.051_dp, &
& -4427.990_dp, &
& -4085.797_dp, &
& -3890.139_dp, &
& -2941.423_dp, &
& -2803.800_dp, &
& -3421.362_dp, &
& -3477.152_dp, &
& -3348.351_dp, &
& -3201.578_dp, &
& -2996.164_dp, &
& -2697.170_dp, &
& -2576.404_dp, &
& -10938.622_dp, &
& -13223.625_dp, &
& -11089.486_dp, &
& -9838.163_dp, &
& -10166.004_dp, &
& -9819.182_dp, &
& -9428.355_dp, &
& -10299.179_dp, &
& -9680.469_dp, &
& -8667.332_dp, &
& -8701.113_dp, &
& -8709.175_dp, &
& -8631.409_dp, &
& -8506.791_dp /)
Real(dp), Parameter :: t1(1:nZs) = (/ 15.027_dp, &
& 12.571_dp, &
& 12.166_dp, &
& 12.165_dp, &
& 12.395_dp, &
& 7.220_dp, &
& 5.809_dp, &
& 3.825_dp, &
& 2.807_dp, &
& 24.538_dp, &
& 24.539_dp, &
& 5.055_dp, &
& 5.365_dp, &
& 5.364_dp, &
& 5.278_dp, &
& 5.454_dp, &
& 4.634_dp, &
& 4.291_dp, &
& 0.469_dp, &
& 0.709_dp, &
& 0.673_dp, &
& 0.646_dp, &
& 0.602_dp, &
& 0.482_dp, &
& 0.505_dp, &
& 0.468_dp, &
& 0.443_dp, &
& 0.401_dp, &
& 0.392_dp, &
& 0.345_dp, &
& 0.365_dp, &
& 0.381_dp, &
& 0.406_dp, &
& 0.448_dp, &
& 0.495_dp, &
& 0.489_dp, &
& 0.455_dp, &
& 0.456_dp, &
& 0.482_dp, &
& 0.496_dp, &
& 0.488_dp, &
& 0.473_dp, &
& 0.473_dp, &
& 0.294_dp, &
& 0.294_dp, &
& 0.395_dp, &
& 0.411_dp, &
& 0.455_dp, &
& 0.393_dp, &
& 0.397_dp, &
& 0.401_dp, &
& 0.406_dp, &
& 0.412_dp, &
& 0.401_dp, &
& 0.394_dp, &
& 0.392_dp, &
& 0.399_dp, &
& 0.391_dp, &
& 0.374_dp, &
& 0.367_dp, &
& 0.359_dp, &
& 0.351_dp, &
& 0.344_dp, &
& 0.339_dp, &
& 0.334_dp, &
& 0.324_dp, &
& 0.318_dp, &
& 0.345_dp, &
& 0.305_dp, &
& 0.299_dp, &
& 0.301_dp, &
& 0.302_dp, &
& 0.314_dp, &
& 0.303_dp, &
& 0.300_dp, &
& 0.300_dp, &
& 0.301_dp, &
& 0.305_dp, &
& 0.302_dp, &
& 0.297_dp, &
& 0.283_dp, &
& 0.303_dp, &
& 0.310_dp, &
& 0.314_dp, &
& 0.318_dp, &
& 0.323_dp, &
& 0.315_dp, &
& 0.314_dp, &
& 0.321_dp, &
& 0.325_dp, &
& 0.320_dp, &
& 0.310_dp, &
& 0.307_dp, &
& 0.296_dp, &
& 0.291_dp, &
& 0.290_dp, &
& 0.287_dp, &
& 0.276_dp, &
& 0.280_dp, &
& 0.276_dp /)
Real(dp), Parameter :: t2(1:nZs) = (/ -40.675_dp, &
& 3.383_dp, &
& 2.212_dp, &
& 2.202_dp, &
& 2.771_dp, &
& 2.774_dp, &
& 2.392_dp, &
& 2.003_dp, &
& 0.984_dp, &
& 9.492_dp, &
& 10.001_dp, &
& 10.602_dp, &
& 9.462_dp, &
& 9.477_dp, &
& 9.773_dp, &
& 9.906_dp, &
& 9.518_dp, &
& 9.360_dp, &
& 12.871_dp, &
& 9.436_dp, &
& 10.385_dp, &
& 12.546_dp, &
& 13.590_dp, &
& 15.970_dp, &
& 16.871_dp, &
& 18.311_dp, &
& 17.843_dp, &
& 21.195_dp, &
& 20.780_dp, &
& 22.227_dp, &
& 22.376_dp, &
& 22.103_dp, &
& 21.804_dp, &
& 22.226_dp, &
& 19.718_dp, &
& 18.319_dp, &
& 22.160_dp, &
& 21.177_dp, &
& 19.672_dp, &
& 18.807_dp, &
& 19.649_dp, &
& 20.688_dp, &
& 21.632_dp, &
& 31.122_dp, &
& 31.122_dp, &
& 24.386_dp, &
& 24.874_dp, &
& 22.410_dp, &
& 27.857_dp, &
& 25.923_dp, &
& 25.319_dp, &
& 24.881_dp, &
& 24.390_dp, &
& 24.154_dp, &
& 27.887_dp, &
& 27.791_dp, &
& 28.282_dp, &
& 29.034_dp, &
& 31.105_dp, &
& 32.146_dp, &
& 33.301_dp, &
& 34.491_dp, &
& 35.590_dp, &
& 34.338_dp, &
& 35.318_dp, &
& 38.853_dp, &
& 40.034_dp, &
& 37.467_dp, &
& 42.409_dp, &
& 43.564_dp, &
& 43.630_dp, &
& 43.682_dp, &
& 41.961_dp, &
& 43.630_dp, &
& 44.276_dp, &
& 42.043_dp, &
& 41.905_dp, &
& 41.777_dp, &
& 42.356_dp, &
& 43.194_dp, &
& 45.383_dp, &
& 42.106_dp, &
& 40.808_dp, &
& 39.885_dp, &
& 38.579_dp, &
& 37.429_dp, &
& 41.020_dp, &
& 40.893_dp, &
& 40.249_dp, &
& 39.205_dp, &
& 40.411_dp, &
& 42.405_dp, &
& 43.157_dp, &
& 45.833_dp, &
& 47.099_dp, &
& 47.233_dp, &
& 45.801_dp, &
& 48.287_dp, &
& 47.877_dp, &
& 48.774_dp /)
Real(dp), Parameter :: t3(1:nZs) = (/ 15.264_dp, &
& 5.910_dp, &
& 3.034_dp, &
& 2.235_dp, &
& 2.538_dp, &
& 1.656_dp, &
& 1.211_dp, &
& 0.983_dp, &
& 1.166_dp, &
& 2.387_dp, &
& 2.041_dp, &
& 1.797_dp, &
& 1.451_dp, &
& 1.352_dp, &
& 1.162_dp, &
& 1.081_dp, &
& 0.954_dp, &
& 0.886_dp, &
& 2.325_dp, &
& 2.806_dp, &
& 2.698_dp, &
& 2.628_dp, &
& 2.421_dp, &
& 2.040_dp, &
& 1.991_dp, &
& 1.865_dp, &
& 1.777_dp, &
& 1.642_dp, &
& 1.608_dp, &
& 1.482_dp, &
& 1.465_dp, &
& 1.470_dp, &
& 1.510_dp, &
& 1.603_dp, &
& 1.762_dp, &
& 1.751_dp, &
& 1.599_dp, &
& 1.586_dp, &
& 1.709_dp, &
& 1.816_dp, &
& 1.842_dp, &
& 1.803_dp, &
& 1.829_dp, &
& 1.386_dp, &
& 1.386_dp, &
& 1.617_dp, &
& 1.646_dp, &
& 1.725_dp, &
& 1.566_dp, &
& 1.602_dp, &
& 1.644_dp, &
& 1.700_dp, &
& 1.770_dp, &
& 1.945_dp, &
& 1.675_dp, &
& 1.669_dp, &
& 1.752_dp, &
& 1.703_dp, &
& 1.572_dp, &
& 1.523_dp, &
& 1.471_dp, &
& 1.418_dp, &
& 1.372_dp, &
& 1.353_dp, &
& 1.321_dp, &
& 1.264_dp, &
& 1.227_dp, &
& 1.268_dp, &
& 1.157_dp, &
& 1.125_dp, &
& 1.126_dp, &
& 1.128_dp, &
& 1.159_dp, &
& 1.133_dp, &
& 1.124_dp, &
& 1.126_dp, &
& 1.139_dp, &
& 1.170_dp, &
& 1.166_dp, &
& 1.137_dp, &
& 1.100_dp, &
& 1.172_dp, &
& 1.219_dp, &
& 1.255_dp, &
& 1.289_dp, &
& 1.347_dp, &
& 1.286_dp, &
& 1.276_dp, &
& 1.352_dp, &
& 1.408_dp, &
& 1.367_dp, &
& 1.319_dp, &
& 1.307_dp, &
& 1.229_dp, &
& 1.202_dp, &
& 1.210_dp, &
& 1.199_dp, &
& 1.157_dp, &
& 1.165_dp, &
& 1.150_dp /)
Contains
Function Isotropic_Azimuth(RNG) Result(w)
!Returns w (or omega), an azimuthal scattering angle uniformly distributed [0,2pi)
Use Kinds, Only: dp
Use Global, Only: TwoPi
Use PRNGs, Only: ACORN_Type
Implicit None
Real(dp) :: w
Type(ACORN_Type), Intent(InOut) :: RNG
w = TwoPi * RNG%r()
End Function Isotropic_Azimuth
Function Isotropic_mu(RNG) Result(mu)
!Returns mu, cosine of the polar scattering angle (theta),
! uniformly distributed [1,-1) corresponding to angles [0,pi)
Use Kinds, Only: dp
Use PRNGs, Only: ACORN_Type
Implicit None
Real(dp) :: mu
Type(ACORN_Type), Intent(InOut) :: RNG
mu = 2._dp * RNG%r() - 1._dp
End Function Isotropic_mu
Function mu_w_2_OmegaHat(xi,w) Result(Omegahat)
!Converts angles (polar cosine and azimuthal rotation) to a unit vector
Use Kinds, Only: dp
Implicit None
Real(dp) :: OmegaHat(1:3)
Real(dp), Intent(In) :: xi !cosine of the polar scattering angle
Real(dp), Intent(In) :: w !azimuthal scattering angle
Real(dp) :: nu,mu,eta
nu = Sqrt(1._dp - xi**2)
mu = nu * Cos(w)
eta = nu * Sin(w)
OmegaHat = (/ mu, eta, xi /)
End Function mu_w_2_OmegaHat
Function Isotropic_Omega_hat(RNG) Result(OmegaHat)
!Reutrns an isotropic random direction
Use Kinds, Only: dp
Use PRNGs, Only: ACORN_Type
Implicit None
Real(dp):: OmegaHat(1:3)
Type(ACORN_Type), Intent(InOut) :: RNG
Real(dp):: xi ! z direction cosine = cos(theta), theta is angle from zHat
Real(dp):: w ! angle from xHat toward yHat around zHat
xi = Isotropic_mu(RNG)
w = Isotropic_Azimuth(RNG)
OmegaHat = mu_w_2_OmegaHat(xi,w)
End Function Isotropic_Omega_hat
Function mu_Thomp(RNG) Result(mu)
!Returns mu, cosine of the polar scattering angle (theta),
! distributed on the Thompson distribution on [1,-1) corresponding to angles [0,pi)
Use Kinds, Only: dp
Use PRNGs, Only: ACORN_Type
Implicit None
Real(dp) :: mu
Type(ACORN_Type), Intent(InOut) :: RNG
Real(dp) :: xi
Real(dp) :: a
Real(dp), Parameter :: one_third = 1._dp / 3._dp
Real(dp), Parameter :: two_thirds = 2._dp / 3._dp
xi = RNG%r()
a = 2._dp - 4._dp * xi + Sqrt(5._dp + 16._dp*xi*(xi-1._dp))
mu = (1._dp - a**two_thirds) / (a**one_third)
End Function mu_Thomp
Function Thompson_AD(mu) Result(ds)
!Evaluates a normalized Thompson distribution at the value of mu
Use Kinds, Only: dp
Implicit None
Real(dp) :: ds !differential element of cross section for scatter into angle with cosine mu
Real(dp),Intent(In) :: mu !cosine of the desired scatter angle
ds = 0.5_dp * (1._dp + mu**2)
End Function Thompson_AD
Function Atomic_Form_Factor_scaled(Zi,mu,alpha) Result(aff)
!Returns normalized atomic form factor for scattering by an atom with atomic number Zi for a scatter with
!incident energy alpha (in electron rest masses) and scattering angle cosine mu.
Use Kinds, Only: dp
Use Global, Only: TwoPi
Use Global, Only: m0c2 ![keV] rest mass energy of an electron
Use Global, Only: speed_of_light ![km/s] speed of light in a vacuum
Use Global, Only: h_Planck ![J*s] Planck constant
Use Global, Only: keV_per_Joule ![keV/J]
Use Global, Only: a0_Bohr ![m] Bohr Radius
Implicit None
Real(dp) :: aff ![] value of the atomic form factor, normalized to its value at max momentum transfer
Integer, Intent(In) :: Zi !atomic number of the target atom
Real(dp), Intent(In) :: mu !cosine of the scattering angle
Real(dp), Intent(In) :: alpha !Ratio of photon energy to electron rest mass energy
Real(dp) :: x !argument to atomic form factor function
Real(dp), Parameter :: q2x = TwoPi * a0_bohr * m0c2 / (keV_per_Joule * h_Planck * speed_of_light * 1.E3_dp)
![m*keV / (kev/J * J*s * km/s * 1000m/km)] = [] (unitless from inclusion of a0)
!Conversion factor for momentum transfer to x (argument to AFF), arithmetic (*TwoPi, *a0) added
!and factor of 1/2 omitted to reduce repeated arithmetic later
!Evaluate argument to AFF
x = q2x * alpha * Sqrt(0.5_dp * (1 - mu)) !factor of 2 omitted to reduce arithmetic
!corresponding factor of 1/2 was omitted from definition of parameter q2x
!Evaluate AFF
If (Zi .LE. nZm) Then !use the formulation from Muhammad & Lee (2013)
aff = cm(Zi) * ( a1m(Zi) / (a1m(Zi) + b1m(Zi)*x**rm(Zi))**rm(Zi) + &
& a2m(Zi) / (2._dp**rm(Zi) * a2m(Zi) + b2m(Zi)*x**2)**2 + &
& a3m(Zi)**rm(Zi) / (4._dp*a3m(Zi)**2 + b3m(Zi)*x**2)**2 )
Else !use the formulation from Szaloki (1996)
! If (x .LT. x1(Zi)) Then !use f21 formulation
! aff = as(Zi) * Exp(-b1s(Zi)*x) + (Za(Zi) - as(Zi)*Exp(-cs(Zi)*x))
! Else If (x .LT. x2(Zi)) Then !use f22 formulation
! aff = f21(Zi) * Exp( b2s(Zi) * (x1(Zi)-x) )
! Else !x.GT.x2(Zi), use f23 formulation
! aff = f22(Zi) * Exp( b3s(Zi) * (x2(Zi)-x) )
! End If
! aff = aff / Zs(Zi)
End If
End Function Atomic_Form_Factor_scaled
Function Coherent_AD(Z,mu,alpha) Result(ds)
!Evaluates the angular distribution of coherently scattered photons by an atom with atomic number Zi for a scatter with
!incident energy alpha (in electron rest masses) and scattering angle cosine mu. Functionally, this returns the value of the
!Thompson distribution at mu, multiplied by weight determined by the Atomic Form Factor
Use Kinds, Only: dp
Implicit None
Real(dp) :: ds !differential element of cross section for scatter into angle with cosine mu
Integer, Intent(In) :: Z !atomic number of the target atom
Real(dp), Intent(In) :: mu !cosine of the desired scattering angle
Real(dp), Intent(In) :: alpha !Ratio of gamma energy to electron rest mass energy
ds = Thompson_AD(mu) * Atomic_Form_Factor_scaled(Z,mu,alpha)**2
End Function Coherent_AD
Function mu_CohS(RNG,Z,alpha) Result(mu)
!Samples a cosine for the polar scattering angle of a photon with energy hv interacting with an atom with atomic number Z from
!the angular distribution for coherent scattering.
Use Kinds, Only: dp
Use PRNGs, Only: ACORN_type
Implicit None
Real(dp) :: mu !cosine of the polar scattering angle
Type(ACORN_type), Intent(InOut) :: RNG
Integer, Intent(In) :: Z !atomic number of the target atom
Real(dp), Intent(In) :: alpha ![] energy of the incident photon, in rest mass energy of an electron
Real(dp) :: ds !angular distribution function evaluated at a proposed mu
Do !Sampling by geometric rejection
!sample mu from the Thompson distribution
mu = mu_Thomp(RNG)
!compute the value of the Coherent Scatter distribution at this mu
ds = Coherent_AD(Z,mu,alpha)
!If the value of Coherent AD at this mu exceeds a test value under the Thompson dist, then accept the value and exit
If ( ds .GT. Thompson_AD(mu)*RNG%r() ) Exit
!Otherwise, cycle and repeat the sampling process
End Do
End Function mu_CohS
Function mu_KN(RNG,alpha) Result(mu)
!Returns mu, cosine of the polar scattering angle (theta),
! distributed on the Klein-Nishina Distribution on [1,-1) corresponding to angles [0,pi)
Use Kinds, Only: dp
Use PRNGs, Only: ACORN_Type
Implicit None
Real(dp) :: mu
Type(ACORN_Type), Intent(InOut) :: RNG
Real(dp), Intent(In) :: alpha ![] energy of the incident photon, in rest mass energy of an electron
Real(dp) :: kn !Klein Nishina distribution evaluated at a proposed mu
Real(dp) :: mu2p1 !(mu**2 + 1._dp) intermediate value for computing kn
Real(dp) :: amamup1 !( 1._dp + a * (1._dp - mu) ) intermediate value for computing kn
Do !sampling by geometric rejection
!sample mu from Thompson scattering distribution
mu = mu_Thomp(RNG)
!compute the value of the KN distribution at this mu
!This is the KN distributon scaled on 0 < f(mu) < TWO (using this scaled value eliminates a multiplication here and a
! multiplication in the following check for acceptance)
mu2p1 = 1._dp + mu**2
amamup1 = 1._dp + alpha * (1._dp - mu)
kn = mu2p1 * ( 1._dp + (alpha * (1._dp - mu))**2 / (mu2p1 * amamup1) ) / amamup1**2
!If the probability of KN at this mu exceeds the probability of the sampled dist, then accept the value and exit
If ( kn .GT. mu2p1*RNG%r() ) Exit
!Otherwise, cycle and repeat the sampling process
End Do
End Function mu_KN
Function KN_AD(mu,a) Result(ds)
!Evaluates a normalized Klein-Nishina distribution at the value of mu and alpha
Use Kinds, Only: dp
Implicit None
Real(dp) :: ds !differential element of cross section for scatter into angle with cosine mu
Real(dp), Intent(In) :: mu !cosine of the desired scatter angle
Real(dp), Intent(In) :: a !ratio of photon energy to electron rest mass energy
Real(dp) :: mu2p1 !(mu**2 + 1._dp) intermediate value for computing kn
Real(dp) :: amamup1 !( 1._dp + a * (1._dp - mu) ) intermediate value for computing kn
mu2p1 = 1._dp + mu**2
amamup1 = 1._dp + a * (1._dp - mu)
ds = 0.5_dp * mu2p1 * ( 1._dp + (a * (1._dp - mu))**2 / (mu2p1 * amamup1) ) / amamup1**2
End Function KN_AD
Function Incoh_Scat_Func_scaled(Zi,mu,alpha0,alpha) Result(isf)
!Returns normalized incoherent scattering function for scattering by an atom with atomic number Zi for a scatter with
!incident energy alpha (in electron rest masses) and scattering angle cosine mu.
Use Kinds, Only: dp
Use Global, Only: m0c2 ![keV] rest mass energy of an electron
Use Global, Only: speed_of_light ![km/s] speed of light in a vacuum
Use Global, Only: h_Planck ![J*s] Planck constant
Use Global, Only: keV_per_Joule ![keV/J]
Implicit None
Real(dp) :: isf ![] value of the incoherent scattering function at mu, normalized to its value at max momentum transfer
Integer, Intent(In) :: Zi !atomic number of the target atom
Real(dp), Intent(In) :: mu !cosine of the scattering angle
Real(dp), Intent(In) :: alpha0 !Ratio of photon energy to electron rest mass energy before scatter
Real(dp), Intent(Out) :: alpha !Ratio of photon energy to electron rest mass energy after scatter
Real(dp) :: x !argument to incoherent scattering function
Real(dp) :: x_max !argument to incoherent scattering function evaluated for mu=-1 (max momentum transfer)
Real(dp), Parameter :: q2x = 0.5_dp * m0c2 / (keV_per_Joule * h_Planck * speed_of_light * 1.E13_dp)
![keV / (kev/J * J*s * km/s * 1000m/km * 10**10Angstroms/m)] = [1/Angstroms]
!Conversion factor for momentum transfer to x (argument to ISF)
!Evaluate argument to ISF (x)
alpha = Post_IS_Alpha(alpha0,mu)
x = q2x * Sqrt(alpha0**2 + alpha**2 - 2._dp * alpha0 * alpha * mu)
x_max = q2x * (alpha0 + alpha)
!Evaluate ISF and normalize
isf = s(x) / s(x_max)
Contains
Function s(xx)
Use Kinds, Only: dp
Implicit None
Real(dp) :: s
Real(dp), Intent(In) :: xx
Real(dp) :: s0,s1 !intermediate values for evaluating the incoherent scattering function
If (x1(Zi) .LT. xx) Then !x greater than critical value (x1), use "s2" formulation
s0 = Exp(x1(Zi)) - 1._dp
s1 = s0 * ( d1(Zi) + s0 * (d2(Zi) + s0 * d3(Zi)) )
s = (Za(Zi) - s1 - t2(Zi)) * (1._dp - Exp( t1(Zi) * (x1(Zi) - xx) )) + &
& t2(Zi) * (1._dp - Exp( t3(Zi) * (x1(Zi) - xx) )) + s1
Else !otherwise x less than or equal to critical value (x1), use "s1" formulation
s0 = Exp(xx) - 1._dp
s = s0 * ( d1(Zi) + s0 * (d2(Zi) + s0 * d3(Zi)) )
!d1*(Exp(x) - 1._dp) + d2*(Exp(x) - 1._dp)**2 + d3*(Exp(x) - 1._dp)**3
End If
End Function s
End Function Incoh_Scat_Func_scaled
Function Post_IS_Alpha(alpha0,mu) Result(alpha)
Use Kinds, Only: dp
Implicit None
Real(dp) :: alpha !Ratio of gamma energy to electron rest mass energy after scatter
Real(dp),Intent(In) :: alpha0 !Ratio of gamma energy to electron rest mass energy before scatter
Real(dp),Intent(In) :: mu !cosine of the scattering angle
alpha = alpha0 / (1._dp + alpha0*(1._dp - mu))
End Function Post_IS_Alpha
Function Incoherent_AD(Z,mu,alpha0,alpha) Result(ds)
Use Kinds, Only: dp
Implicit None
Real(dp) :: ds !differential element of cross section for scatter into angle with cosine mu
Integer, Intent(In) :: Z !atomic number of the target atom
Real(dp),Intent(In) :: mu !cosine of the desired scattering angle
Real(dp),Intent(In) :: alpha0 !Ratio of gamma energy to electron rest mass energy before scatter
Real(dp),Intent(Out) :: alpha !Ratio of gamma energy to electron rest mass energy after scatter
ds = KN_AD(mu,alpha0) * Incoh_Scat_Func_scaled(Z,mu,alpha0,alpha)
End Function Incoherent_AD
Subroutine mu_IncS(RNG,Z,a0,mu,a)
Use Kinds, Only: dp
Use PRNGs, Only: ACORN_type
Implicit None
Type(ACORN_type), Intent(InOut) :: RNG
Integer, Intent(In) :: Z !atomic number of the target atom
Real(dp),Intent(In) :: a0 ![] energy of the incident photon, in rest mass energy of an electron
Real(dp),Intent(Out) :: mu !cosine of the polar scattering angle
Real(dp),Intent(Out) :: a ![] energy of the scattered photon, in rest mass energy of an electron
Real(dp) :: ds !angular distribution function evaluated at the proposed mu
Do !Sampling by rejection
!sample mu from the Klein-Nishina distribution
mu = mu_KN(RNG,a0)
!compute the value of the andguler distribution for incoherent scatter at this mu
ds = Incoherent_AD(Z,mu,a0,a) !hv comes back in rest masses of an electron
!If the value of Incoherent AD at this mu exceeds a test value under the KN dist, then accept the value and exit
If ( ds .GT. KN_AD(mu,a0)*RNG%r() ) Exit
!Otherwise, cycle and repeat the sampling process
End Do
End Subroutine mu_IncS
End Module Xscat
| {
"alphanum_fraction": 0.27610304,
"author": null,
"avg_line_length": 55.313771518,
"converted": null,
"ext": "f90",
"file": null,
"hexsha": "045951bfee67b863802f455802f2b1e219be0ac2",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c05ff2d218e51ddce1855bef0bc2981deccbb620",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "wtdailey/XrayToolKit",
"max_forks_repo_path": "source/2trans/Xscat.f90",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c05ff2d218e51ddce1855bef0bc2981deccbb620",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "wtdailey/XrayToolKit",
"max_issues_repo_path": "source/2trans/Xscat.f90",
"max_line_length": 131,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c05ff2d218e51ddce1855bef0bc2981deccbb620",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "wtdailey/XrayToolKit",
"max_stars_repo_path": "source/2trans/Xscat.f90",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 15639,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 70691
} |
"""MPI-INF-3DHP dataset."""
import copy
import json
import os
import pickle as pk
import numpy as np
import scipy.misc
import torch.utils.data as data
from hybrik.utils.bbox import bbox_clip_xyxy, bbox_xywh_to_xyxy
from hybrik.utils.pose_utils import cam2pixel_matrix, pixel2cam_matrix, reconstruction_error
from hybrik.utils.presets import SimpleTransform3DSMPL
class HP3D(data.Dataset):
""" MPI-INF-3DHP dataset.
Parameters
----------
ann_file: str,
Path to the annotation json file.
root: str, default './data/3dhp'
Path to the 3dhp dataset.
train: bool, default is True
If true, will set as training mode.
skip_empty: bool, default is False
Whether skip entire image if no valid label is found.
"""
CLASSES = ['person']
EVAL_JOINTS = [i - 1 for i in [8, 6, 15, 16, 17, 10, 11, 12, 24, 25, 26, 19, 20, 21, 5, 4, 7]]
EVAL_JOINTS_17 = [
14,
11, 12, 13,
8, 9, 10,
15, 1,
16, 0,
5, 6, 7,
2, 3, 4
]
joints_name_17 = (
'Pelvis', # 0
'L_Hip', 'L_Knee', 'L_Ankle', # 3
'R_Hip', 'R_Knee', 'R_Ankle', # 6
'Torso', 'Neck', # 8
'Nose', 'Head', # 10
'L_Shoulder', 'L_Elbow', 'L_Wrist', # 13
'R_Shoulder', 'R_Elbow', 'R_Wrist', # 16
)
# EVAL_JOINTS = [10, 8, 14, 15, 16, 11, 12, 13, 1, 2, 3, 4, 5, 6, 0, 7, 9] # h36m -> 3dhp
# num_joints = 28
bbox_3d_shape = (2000, 2000, 2000)
joints_name = ('spine3', 'spine4', 'spine2', 'spine', 'pelvis', # 4
'neck', 'head', 'head_top', # 7
'left_clavicle', 'left_shoulder', 'left_elbow', # 10
'left_wrist', 'left_hand', 'right_clavicle', # 13
'right_shoulder', 'right_elbow', 'right_wrist', # 16
'right_hand', 'left_hip', 'left_knee', # 19
'left_ankle', 'left_foot', 'left_toe', # 22
'right_hip', 'right_knee', 'right_ankle', 'right_foot', 'right_toe') # 27
skeleton = ((0, 2), (1, 0), (2, 3), (3, 4),
(5, 1), (6, 5), (7, 6), (8, 1), (9, 8), (10, 9),
(11, 10), (12, 11), (13, 1), (14, 13), (15, 14), (16, 15),
(17, 16), (18, 4), (19, 18), (20, 19), (21, 20), (22, 21),
(23, 4), (24, 23), (25, 24), (26, 25), (27, 26)
)
skeleton = (
(1, 0), (2, 1), (3, 2), # 2
(4, 0), (5, 4), (6, 5), # 5
(7, 0), (8, 7), # 7
(9, 8), (10, 9), # 9
(11, 7), (12, 11), (13, 12), # 12
(14, 7), (15, 14), (16, 15), # 15
)
mean_bone_len = None
test_seqs = (1, 2, 3, 4, 5, 6)
joint_groups = {'Head': [0], 'Neck': [1], 'Shou': [2, 5], 'Elbow': [3, 6], 'Wrist': [4, 7], 'Hip': [8, 11], 'Knee': [9, 12], 'Ankle': [10, 13]}
# activity_name full name: ('Standing/Walking','Exercising','Sitting','Reaching/Crouching','On The Floor','Sports','Miscellaneous')
activity_name = ('Stand', 'Exe', 'Sit', 'Reach', 'Floor', 'Sports', 'Miscell')
pck_thres = 150
auc_thres = list(range(0, 155, 5))
def __init__(self,
cfg,
ann_file,
root='./data/3dhp',
train=True,
skip_empty=True,
dpg=False,
lazy_import=False):
self._cfg = cfg
self._ann_file = os.path.join(
root, f'annotation_mpi_inf_3dhp_{ann_file}.json')
self._lazy_import = lazy_import
self._root = root
self._skip_empty = skip_empty
self._train = train
self._dpg = dpg
self._scale_factor = cfg.DATASET.SCALE_FACTOR
self._color_factor = cfg.DATASET.COLOR_FACTOR
self._rot = cfg.DATASET.ROT_FACTOR
self._input_size = cfg.MODEL.IMAGE_SIZE
self._output_size = cfg.MODEL.HEATMAP_SIZE
self._occlusion = cfg.DATASET.OCCLUSION
self._crop = cfg.MODEL.EXTRA.CROP
self._sigma = cfg.MODEL.EXTRA.SIGMA
self._depth_dim = cfg.MODEL.EXTRA.DEPTH_DIM
self._check_centers = False
self.num_class = len(self.CLASSES)
self.num_joints = 28 if self._train else 17
self.num_joints_half_body = cfg.DATASET.NUM_JOINTS_HALF_BODY
self.prob_half_body = cfg.DATASET.PROB_HALF_BODY
self.augment = cfg.MODEL.EXTRA.AUGMENT
self._loss_type = cfg.LOSS['TYPE']
self.kinematic = cfg.MODEL.EXTRA.get('KINEMATIC', False)
self.upper_body_ids = (7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
self.lower_body_ids = (0, 1, 2, 3, 4, 5, 6)
if cfg.MODEL.EXTRA.PRESET == 'simple_smpl_3d':
self.transformation = SimpleTransform3DSMPL(
self, scale_factor=self._scale_factor,
color_factor=self._color_factor,
occlusion=False,
input_size=self._input_size,
output_size=self._output_size,
depth_dim=self._depth_dim,
bbox_3d_shape=self.bbox_3d_shape,
rot=self._rot, sigma=self._sigma,
train=self._train, add_dpg=self._dpg,
loss_type=self._loss_type, two_d=True)
self.root_idx = self.joints_name.index('pelvis') if self._train else self.EVAL_JOINTS.index(self.joints_name.index('pelvis'))
self.root_idx_17 = 0
self.lshoulder_idx = self.joints_name.index('left_shoulder') if self._train else self.EVAL_JOINTS.index(self.joints_name.index('left_shoulder'))
self.rshoulder_idx = self.joints_name.index('right_shoulder') if self._train else self.EVAL_JOINTS.index(self.joints_name.index('right_shoulder'))
self._items, self._labels = self._lazy_load_json()
def __getitem__(self, idx):
# get image id
img_path = self._items[idx]
img_id = int(self._labels[idx]['img_id'])
# load ground truth, including bbox, keypoints, image size
label = copy.deepcopy(self._labels[idx])
img = scipy.misc.imread(img_path, mode='RGB')
# transform ground truth into training label and apply data augmentation
target = self.transformation(img, label)
img = target.pop('image')
bbox = target.pop('bbox')
return img, target, img_id, bbox
def __len__(self):
return len(self._items)
def _lazy_load_json(self):
if os.path.exists(self._ann_file + '_annot_keypoint.pkl') and self._lazy_import:
print('Lazy load annot...')
with open(self._ann_file + '_annot_keypoint.pkl', 'rb') as fid:
items, labels = pk.load(fid)
else:
items, labels = self._load_jsons()
try:
with open(self._ann_file + '_annot_keypoint.pkl', 'wb') as fid:
pk.dump((items, labels), fid, pk.HIGHEST_PROTOCOL)
except Exception as e:
print(e)
print('Skip writing to .pkl file.')
return items, labels
def _load_jsons(self):
"""Load all image paths and labels from JSON annotation files into buffer."""
items = []
labels = []
with open(self._ann_file, 'r') as fid:
database = json.load(fid)
# iterate through the annotations
for ann_image, ann_annotations in zip(database['images'], database['annotations']):
ann = dict()
for k, v in ann_image.items():
assert k not in ann.keys()
ann[k] = v
for k, v in ann_annotations.items():
ann[k] = v
image_id = ann['image_id']
width, height = ann['width'], ann['height']
xmin, ymin, xmax, ymax = bbox_clip_xyxy(
bbox_xywh_to_xyxy(ann['bbox']), width, height)
intrinsic_param = np.array(ann['cam_param']['intrinsic_param'], dtype=np.float32)
f = np.array([intrinsic_param[0, 0], intrinsic_param[1, 1]], dtype=np.float32)
c = np.array([intrinsic_param[0, 2], intrinsic_param[1, 2]], dtype=np.float32)
joint_cam = np.array(ann['keypoints_cam'])
joint_img = cam2pixel_matrix(joint_cam, intrinsic_param)
joint_img[:, 2] = joint_img[:, 2] - joint_cam[self.root_idx, 2]
joint_vis = np.ones((self.num_joints, 3))
root_cam = joint_cam[self.root_idx]
abs_path = os.path.join(self._root, 'mpi_inf_3dhp_{}_set'.format('train' if self._train else 'test'), ann['file_name'])
items.append(abs_path)
labels.append({
'bbox': (xmin, ymin, xmax, ymax),
'img_id': image_id,
'img_path': abs_path,
'img_name': ann['file_name'],
'width': width,
'height': height,
'joint_img': joint_img,
'joint_vis': joint_vis,
'joint_cam': joint_cam,
'root_cam': root_cam,
'intrinsic_param': intrinsic_param,
'f': f,
'c': c
})
if not self._train:
labels[-1]['activity_id'] = ann['activity_id']
return items, labels
@property
def joint_pairs(self):
"""Joint pairs which defines the pairs of joint to be swapped
when the image is flipped horizontally."""
hp3d_joint_pairs = ((8, 13), (9, 14), (10, 15), (11, 16), (12, 17),
(18, 23), (19, 24), (20, 25), (21, 26), (22, 27))
return hp3d_joint_pairs
# return ((1, 4), (2, 5), (3, 6), (14, 11), (15, 12), (16, 13)) # h36m pairs
def _get_box_center_area(self, bbox):
"""Get bbox center"""
c = np.array([(bbox[0] + bbox[2]) / 2.0, (bbox[1] + bbox[3]) / 2.0])
area = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0])
return c, area
def _get_keypoints_center_count(self, keypoints):
"""Get geometric center of all keypoints"""
keypoint_x = np.sum(keypoints[:, 0, 0] * (keypoints[:, 0, 1] > 0))
keypoint_y = np.sum(keypoints[:, 1, 0] * (keypoints[:, 1, 1] > 0))
num = float(np.sum(keypoints[:, 0, 1]))
return np.array([keypoint_x / num, keypoint_y / num]), num
def add_thorax(self, joint_coord):
thorax = (joint_coord[self.lshoulder_idx, :] + joint_coord[self.rshoulder_idx, :]) * 0.5
thorax = thorax.reshape((1, 3))
joint_coord = np.concatenate((joint_coord, thorax), axis=0)
return joint_coord
def _calc_metric_per_class(self, error, seq_idx_dict):
seq_mpjpes_list = []
seq_pck_array_list = []
seq_auc_array_list = []
for i in seq_idx_dict.keys():
seq_error = np.take(error, seq_idx_dict[i], axis=0)
seq_mpjpes = np.mean(seq_error, axis=0)
seq_mpjpes = np.concatenate((seq_mpjpes, np.array([np.mean(seq_error)])), 0)
joint_count = 0
num_frames = seq_error.shape[0]
num_joint_groups = len(self.joint_groups.keys())
num_thres = len(self.auc_thres)
# calculate pck & auc curve
seq_pck_curve_array = np.zeros((num_joint_groups + 1, num_thres))
seq_pck_array = np.zeros((num_joint_groups + 1))
seq_auc_array = np.zeros((num_joint_groups + 1))
# transval of joint groups
for j_idx, j in enumerate(self.joint_groups.keys()):
seq_jgroup_error = np.take(seq_error, self.joint_groups[j], axis=1)
# transval of all thresholds
for t_idx, t in enumerate(self.auc_thres):
seq_pck_curve_array[j_idx, t_idx] = np.sum(seq_jgroup_error < t) / (len(self.joint_groups[j]) * num_frames)
joint_count += len(self.joint_groups[j])
seq_pck_curve_array[-1, :] += seq_pck_curve_array[j_idx, :] * len(self.joint_groups[j])
seq_auc_array[j_idx] = 100 * np.sum(seq_pck_curve_array[j_idx]) / num_thres
seq_pck_array[j_idx] = 100 * np.sum(seq_jgroup_error < self.pck_thres) / (len(self.joint_groups[j]) * num_frames)
seq_pck_array[-1] += seq_pck_array[j_idx] * len(self.joint_groups[j])
seq_pck_array[-1] /= joint_count
seq_pck_curve_array[-1, :] /= joint_count
seq_auc_array[-1] = 100 * np.sum(seq_pck_curve_array[-1, :]) / num_thres
seq_mpjpes_list.append(seq_mpjpes)
seq_pck_array_list.append(seq_pck_array)
seq_auc_array_list.append(seq_auc_array)
return seq_mpjpes_list, seq_pck_array_list, seq_auc_array_list
def evaluate_xyz_17(self, preds, result_dir):
print('Evaluation start...')
gts = self._labels
assert len(gts) == len(preds)
sample_num = len(gts)
seq_idx_dict = {k: [] for k in self.test_seqs}
act_idx_dict = {k: [] for k in range(len(self.activity_name))}
pred_save = []
error = np.zeros((sample_num, 17)) # joint error
error_pa = np.zeros((sample_num, 17)) # joint error
error_x = np.zeros((sample_num, 17)) # joint error
error_y = np.zeros((sample_num, 17)) # joint error
error_z = np.zeros((sample_num, 17)) # joint error
# error for each sequence
for n in range(sample_num):
gt = gts[n]
img_name = gt['img_name']
# intrinsic_param = gt['intrinsic_param']
bbox = gt['bbox']
gt_3d_root = gt['root_cam']
gt_3d_kpt = gt['joint_cam']
gt_3d_kpt = np.take(gt_3d_kpt, self.EVAL_JOINTS_17, axis=0)
# gt_vis = gt['joint_vis']
pred_3d_kpt = preds[n]['xyz_17'].copy() * self.bbox_3d_shape[2]
# root joint alignment
pred_3d_kpt = pred_3d_kpt - pred_3d_kpt[self.root_idx_17]
gt_3d_kpt = gt_3d_kpt - gt_3d_kpt[self.root_idx_17]
# if self.protocol == 1:
# # rigid alignment for PA MPJPE (protocol #1)
pred_3d_kpt_pa = reconstruction_error(pred_3d_kpt, gt_3d_kpt)
align = False
if align:
pred_3d_kpt = pred_3d_kpt_pa
# exclude thorax
# pred_3d_kpt = np.take(pred_3d_kpt, self.EVAL_JOINTS, axis=0)
# pred_3d_kpt_pa = np.take(pred_3d_kpt_pa, self.EVAL_JOINTS, axis=0)
# gt_3d_kpt = np.take(gt_3d_kpt, self.EVAL_JOINTS, axis=0)
# error calculate
error[n] = np.sqrt(np.sum((pred_3d_kpt - gt_3d_kpt)**2, 1))
error_pa[n] = np.sqrt(np.sum((pred_3d_kpt_pa - gt_3d_kpt)**2, 1))
error_x[n] = np.abs(pred_3d_kpt[:, 0] - gt_3d_kpt[:, 0])
error_y[n] = np.abs(pred_3d_kpt[:, 1] - gt_3d_kpt[:, 1])
error_z[n] = np.abs(pred_3d_kpt[:, 2] - gt_3d_kpt[:, 2])
# record idx per seq or act
seq_id = int(img_name.split('/')[-3][2])
seq_idx_dict[seq_id].append(n)
act_idx_dict[int(gt['activity_id']) - 1].append(n)
img_name = gt['img_path']
# prediction save
pred_save.append({'img_name': img_name, 'joint_cam': pred_3d_kpt.tolist(
), 'bbox': [float(_) for _ in bbox], 'root_cam': gt_3d_root.tolist()}) # joint_cam is root-relative coordinate
# total error
tot_err = np.mean(error)
tot_err_pa = np.mean(error_pa)
tot_err_x = np.mean(error_x)
tot_err_y = np.mean(error_y)
tot_err_z = np.mean(error_z)
eval_summary = f'PA MPJPE >> tot: {tot_err_pa:2f}; MPJPE >> tot: {tot_err:2f}, x: {tot_err_x:2f}, y: {tot_err_y:.2f}, z: {tot_err_z:2f}\n'
seq_mpjpes_list, seq_pck_array_list, seq_auc_array_list = self._calc_metric_per_class(error, seq_idx_dict)
act_mpjpes_list, act_pck_array_list, act_auc_array_list = self._calc_metric_per_class(error, act_idx_dict)
all_mpjpes_list, all_pck_array_list, all_auc_array_list = self._calc_metric_per_class(error, {0: list(range(sample_num))})
# Summary mpjpe per sequence
eval_summary += '#' * 10 + 'MPJPE per sequence\n'
# eval_summary += ''.join(['MPJPE\t'] + [self.joints_name[j] + ' ' for j in self.EVAL_JOINTS_17] + ['Average\n'])
total_mpjpe = 0
for i_idx, i in enumerate(self.test_seqs):
eval_summary += ''.join([f'TS{i}\t'] + ['{:.2f}\t'.format(seq_mpjpes_list[i_idx][j]) for j in range(seq_mpjpes_list[i_idx].shape[0])] + ['\n'])
total_mpjpe += seq_mpjpes_list[i_idx][-1]
total_mpjpe /= len(self.test_seqs)
eval_summary += f'Avg MPJPE >> tot: {total_mpjpe:2f}\n'
# Summary pck per sequence
eval_summary += '#' * 10 + 'PCK per sequence\n'
# eval_summary += ''.join(['PCK\t'] + [k + '\t' for k in self.joint_groups.keys()] + ['Total\n'])
total_pck = 0
for i_idx, i in enumerate(self.test_seqs):
eval_summary += ''.join([f'TS{i}\t'] + ['{:.2f}\t'.format(seq_pck_array_list[i_idx][j]) for j in range(seq_pck_array_list[i_idx].shape[0])] + ['\n'])
total_pck += seq_pck_array_list[i_idx][-1]
total_pck /= len(self.test_seqs)
eval_summary += f'Avg PCK >> tot: {total_pck:2f}\n'
# Summary auc per sequence
eval_summary += '#' * 10 + 'AUC per sequence\n'
# eval_summary += ''.join(['AUC\t'] + [k + '\t' for k in self.joint_groups.keys()] + ['Total\n'])
total_auc = 0
for i_idx, i in enumerate(self.test_seqs):
eval_summary += ''.join([f'TS{i}\t'] + ['{:.2f}\t'.format(seq_auc_array_list[i_idx][j]) for j in range(seq_auc_array_list[i_idx].shape[0])] + ['\n'])
total_auc += seq_auc_array_list[i_idx][-1]
total_auc /= len(self.test_seqs)
eval_summary += f'Avg AUC >> tot: {total_auc:2f}\n'
# Summary mpjpe per action
eval_summary += '#' * 10 + 'MPJPE per action\n'
# eval_summary += ''.join(['MPJPE\t'] + [self.joints_name[j] + ' ' for j in self.EVAL_JOINTS_17] + ['Average\n'])
total_mpjpe = 0
for i_idx, i in enumerate(self.activity_name):
eval_summary += ''.join([f'{i}\t'] + ['{:.2f}\t'.format(act_mpjpes_list[i_idx][j]) for j in range(act_mpjpes_list[i_idx].shape[0])] + ['\n'])
total_mpjpe += act_mpjpes_list[i_idx][-1]
total_mpjpe /= len(self.activity_name)
eval_summary += ''.join(['All\t'] + ['{:.2f}\t'.format(all_mpjpes_list[0][j]) for j in range(all_mpjpes_list[0].shape[0])] + ['\n'])
eval_summary += f'Avg MPJPE >> tot: {total_mpjpe:2f}\n'
# Summary pck per action
eval_summary += '#' * 10 + 'PCK per action\n'
# eval_summary += ''.join(['PCK\t'] + [k + '\t' for k in self.joint_groups.keys()] + ['Total\n'])
total_pck = 0
for i_idx, i in enumerate(self.activity_name):
eval_summary += ''.join([f'{i}\t'] + ['{:.2f}\t'.format(act_pck_array_list[i_idx][j]) for j in range(act_pck_array_list[i_idx].shape[0])] + ['\n'])
total_pck += act_pck_array_list[i_idx][-1]
total_pck /= len(self.activity_name)
eval_summary += ''.join(['All\t'] + ['{:.2f}\t'.format(all_pck_array_list[0][j]) for j in range(all_pck_array_list[0].shape[0])] + ['\n'])
eval_summary += f'Avg PCK >> tot: {total_pck:2f}\n'
# Summary auc per action
eval_summary += '#' * 10 + 'AUC per action\n'
# eval_summary += ''.join(['AUC\t'] + [k + '\t' for k in self.joint_groups.keys()] + ['Total\n'])
total_auc = 0
for i_idx, i in enumerate(self.activity_name):
eval_summary += ''.join([f'{i}\t'] + ['{:.2f}\t'.format(act_auc_array_list[i_idx][j]) for j in range(act_auc_array_list[i_idx].shape[0])] + ['\n'])
total_auc += act_auc_array_list[i_idx][-1]
total_auc /= len(self.activity_name)
eval_summary += ''.join(['All\t'] + ['{:.2f}\t'.format(all_auc_array_list[0][j]) for j in range(all_auc_array_list[0].shape[0])] + ['\n'])
eval_summary += f'Avg AUC >> tot: {total_auc:2f}\n'
print(eval_summary)
# prediction save
with open(result_dir, 'w') as f:
json.dump(pred_save, f)
print("Test result is saved at " + result_dir)
return tot_err
def evaluate(self, preds, result_dir):
print('Evaluation start...')
gts = self._labels
assert len(gts) == len(preds)
sample_num = len(gts)
seq_idx_dict = {k: [] for k in self.test_seqs}
act_idx_dict = {k: [] for k in range(len(self.activity_name))}
pred_save = []
error = np.zeros((sample_num, len(self.EVAL_JOINTS))) # joint error
error_pa = np.zeros((sample_num, len(self.EVAL_JOINTS))) # joint error
error_x = np.zeros((sample_num, len(self.EVAL_JOINTS))) # joint error
error_y = np.zeros((sample_num, len(self.EVAL_JOINTS))) # joint error
error_z = np.zeros((sample_num, len(self.EVAL_JOINTS))) # joint error
# error for each sequence
for n in range(sample_num):
gt = gts[n]
img_name = gt['img_name']
intrinsic_param = gt['intrinsic_param']
bbox = gt['bbox']
gt_3d_root = gt['root_cam']
gt_3d_kpt = gt['joint_cam']
# gt_vis = gt['joint_vis']
# restore coordinates to original space
pred_2d_kpt = preds[n].copy()
# pred_2d_kpt[:, 0] = pred_2d_kpt[:, 0] / self._output_size[1] * bbox[2] + bbox[0]
# pred_2d_kpt[:, 1] = pred_2d_kpt[:, 1] / self._output_size[0] * bbox[3] + bbox[1]
pred_2d_kpt[:, 2] = pred_2d_kpt[:, 2] * self.bbox_3d_shape[0] + gt_3d_root[2]
# back project to camera coordinate system
pred_3d_kpt = pixel2cam_matrix(pred_2d_kpt, intrinsic_param)
# root joint alignment
pred_3d_kpt = pred_3d_kpt - pred_3d_kpt[self.root_idx]
gt_3d_kpt = gt_3d_kpt - gt_3d_kpt[self.root_idx]
# if self.protocol == 1:
# # rigid alignment for PA MPJPE (protocol #1)
pred_3d_kpt_pa = reconstruction_error(pred_3d_kpt, gt_3d_kpt)
# exclude thorax
# pred_3d_kpt = np.take(pred_3d_kpt, self.EVAL_JOINTS, axis=0)
# pred_3d_kpt_pa = np.take(pred_3d_kpt_pa, self.EVAL_JOINTS, axis=0)
# gt_3d_kpt = np.take(gt_3d_kpt, self.EVAL_JOINTS, axis=0)
# error calculate
error[n] = np.sqrt(np.sum((pred_3d_kpt - gt_3d_kpt)**2, 1))
error_pa[n] = np.sqrt(np.sum((pred_3d_kpt_pa - gt_3d_kpt)**2, 1))
error_x[n] = np.abs(pred_3d_kpt[:, 0] - gt_3d_kpt[:, 0])
error_y[n] = np.abs(pred_3d_kpt[:, 1] - gt_3d_kpt[:, 1])
error_z[n] = np.abs(pred_3d_kpt[:, 2] - gt_3d_kpt[:, 2])
# record idx per seq or act
seq_id = int(img_name.split('/')[-3][2])
seq_idx_dict[seq_id].append(n)
act_idx_dict[int(gt['activity_id']) - 1].append(n)
img_name = gt['img_path']
# prediction save
pred_save.append({'img_name': img_name, 'joint_cam': pred_3d_kpt.tolist(
), 'bbox': [float(_) for _ in bbox], 'root_cam': gt_3d_root.tolist()}) # joint_cam is root-relative coordinate
# total error
tot_err = np.mean(error)
tot_err_pa = np.mean(error_pa)
tot_err_x = np.mean(error_x)
tot_err_y = np.mean(error_y)
tot_err_z = np.mean(error_z)
eval_summary = f'PA MPJPE >> tot: {tot_err_pa:2f}; MPJPE >> tot: {tot_err:2f}, x: {tot_err_x:2f}, y: {tot_err_y:.2f}, z: {tot_err_z:2f}\n'
seq_mpjpes_list, seq_pck_array_list, seq_auc_array_list = self._calc_metric_per_class(error, seq_idx_dict)
act_mpjpes_list, act_pck_array_list, act_auc_array_list = self._calc_metric_per_class(error, act_idx_dict)
all_mpjpes_list, all_pck_array_list, all_auc_array_list = self._calc_metric_per_class(error, {0: list(range(sample_num))})
# Summary mpjpe per sequence
eval_summary += '#' * 10 + 'MPJPE per sequence\n'
eval_summary += ''.join(['MPJPE\t'] + [self.joints_name[j] + ' ' for j in self.EVAL_JOINTS] + ['Average\n'])
total_mpjpe = 0
for i_idx, i in enumerate(self.test_seqs):
eval_summary += ''.join([f'TS{i}\t'] + ['{:.2f}\t'.format(seq_mpjpes_list[i_idx][j]) for j in range(seq_mpjpes_list[i_idx].shape[0])] + ['\n'])
total_mpjpe += seq_mpjpes_list[i_idx][-1]
total_mpjpe /= len(self.test_seqs)
eval_summary += f'Avg MPJPE >> tot: {total_mpjpe:2f}\n'
# Summary pck per sequence
eval_summary += '#' * 10 + 'PCK per sequence\n'
eval_summary += ''.join(['PCK\t'] + [k + '\t' for k in self.joint_groups.keys()] + ['Total\n'])
total_pck = 0
for i_idx, i in enumerate(self.test_seqs):
eval_summary += ''.join([f'TS{i}\t'] + ['{:.2f}\t'.format(seq_pck_array_list[i_idx][j]) for j in range(seq_pck_array_list[i_idx].shape[0])] + ['\n'])
total_pck += seq_pck_array_list[i_idx][-1]
total_pck /= len(self.test_seqs)
eval_summary += f'Avg PCK >> tot: {total_pck:2f}\n'
# Summary auc per sequence
eval_summary += '#' * 10 + 'AUC per sequence\n'
eval_summary += ''.join(['AUC\t'] + [k + '\t' for k in self.joint_groups.keys()] + ['Total\n'])
total_auc = 0
for i_idx, i in enumerate(self.test_seqs):
eval_summary += ''.join([f'TS{i}\t'] + ['{:.2f}\t'.format(seq_auc_array_list[i_idx][j]) for j in range(seq_auc_array_list[i_idx].shape[0])] + ['\n'])
total_auc += seq_auc_array_list[i_idx][-1]
total_auc /= len(self.test_seqs)
eval_summary += f'Avg AUC >> tot: {total_auc:2f}\n'
# Summary mpjpe per action
eval_summary += '#' * 10 + 'MPJPE per action\n'
eval_summary += ''.join(['MPJPE\t'] + [self.joints_name[j] + ' ' for j in self.EVAL_JOINTS] + ['Average\n'])
total_mpjpe = 0
for i_idx, i in enumerate(self.activity_name):
eval_summary += ''.join([f'{i}\t'] + ['{:.2f}\t'.format(act_mpjpes_list[i_idx][j]) for j in range(act_mpjpes_list[i_idx].shape[0])] + ['\n'])
total_mpjpe += act_mpjpes_list[i_idx][-1]
total_mpjpe /= len(self.activity_name)
eval_summary += ''.join(['All\t'] + ['{:.2f}\t'.format(all_mpjpes_list[0][j]) for j in range(all_mpjpes_list[0].shape[0])] + ['\n'])
eval_summary += f'Avg MPJPE >> tot: {total_mpjpe:2f}\n'
# Summary pck per action
eval_summary += '#' * 10 + 'PCK per action\n'
eval_summary += ''.join(['PCK\t'] + [k + '\t' for k in self.joint_groups.keys()] + ['Total\n'])
total_pck = 0
for i_idx, i in enumerate(self.activity_name):
eval_summary += ''.join([f'{i}\t'] + ['{:.2f}\t'.format(act_pck_array_list[i_idx][j]) for j in range(act_pck_array_list[i_idx].shape[0])] + ['\n'])
total_pck += act_pck_array_list[i_idx][-1]
total_pck /= len(self.activity_name)
eval_summary += ''.join(['All\t'] + ['{:.2f}\t'.format(all_pck_array_list[0][j]) for j in range(all_pck_array_list[0].shape[0])] + ['\n'])
eval_summary += f'Avg PCK >> tot: {total_pck:2f}\n'
# Summary auc per action
eval_summary += '#' * 10 + 'AUC per action\n'
eval_summary += ''.join(['AUC\t'] + [k + '\t' for k in self.joint_groups.keys()] + ['Total\n'])
total_auc = 0
for i_idx, i in enumerate(self.activity_name):
eval_summary += ''.join([f'{i}\t'] + ['{:.2f}\t'.format(act_auc_array_list[i_idx][j]) for j in range(act_auc_array_list[i_idx].shape[0])] + ['\n'])
total_auc += act_auc_array_list[i_idx][-1]
total_auc /= len(self.activity_name)
eval_summary += ''.join(['All\t'] + ['{:.2f}\t'.format(all_auc_array_list[0][j]) for j in range(all_auc_array_list[0].shape[0])] + ['\n'])
eval_summary += f'Avg AUC >> tot: {total_auc:2f}\n'
print(eval_summary)
# prediction save
with open(result_dir, 'w') as f:
json.dump(pred_save, f)
print("Test result is saved at " + result_dir)
return tot_err
| {
"alphanum_fraction": 0.5666935004,
"author": null,
"avg_line_length": 47.7775919732,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "a9b9ea59ba46b50c243767cde4912df019b26e9f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 30,
"max_forks_repo_forks_event_max_datetime": "2022-03-09T06:06:28.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-03-04T07:18:03.000Z",
"max_forks_repo_head_hexsha": "ae1bc3cea0cc5aa98fb512eeb295c3478b0c598f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "pengyun1314123/HybrIK",
"max_forks_repo_path": "hybrik/datasets/hp3d.py",
"max_issues_count": 62,
"max_issues_repo_head_hexsha": "ae1bc3cea0cc5aa98fb512eeb295c3478b0c598f",
"max_issues_repo_issues_event_max_datetime": "2022-03-15T11:55:58.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-01-08T02:06:47.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "pengyun1314123/HybrIK",
"max_issues_repo_path": "hybrik/datasets/hp3d.py",
"max_line_length": 161,
"max_stars_count": 287,
"max_stars_repo_head_hexsha": "aff6aeda06e627fc48f7d7c2bffb2245393d7584",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "uyoung-jeong/HybrIK",
"max_stars_repo_path": "hybrik/datasets/hp3d.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T16:03:45.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-11-30T12:45:20.000Z",
"num_tokens": 8426,
"path": null,
"reason": "import numpy,import scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 28571
} |
# coding: utf-8
# # Assignment 3: Recommendation systems
#
# Here we'll implement a content-based recommendation algorithm.
# It will use the list of genres for a movie as the content.
# The data come from the MovieLens project: http://grouplens.org/datasets/movielens/
# Note that I have not provided many doctests for this one. I strongly
# recommend that you write your own for each function to ensure your
# implementation is correct.
# Please only use these imports.
from collections import Counter, defaultdict
import math
import numpy as np
import os
import pandas as pd
import re
from scipy.sparse import csr_matrix
import urllib.request
import zipfile
def download_data():
""" DONE. Download and unzip data.
"""
url = 'https://www.dropbox.com/s/h9ubx22ftdkyvd5/ml-latest-small.zip?dl=1'
urllib.request.urlretrieve(url, 'ml-latest-small.zip')
zfile = zipfile.ZipFile('ml-latest-small.zip')
zfile.extractall()
zfile.close()
def tokenize_string(my_string):
""" DONE. You should use this in your tokenize function.
"""
return re.findall('[\w\-]+', my_string.lower())
def tokenize(movies):
"""
Append a new column to the movies DataFrame with header 'tokens'.
This will contain a list of strings, one per token, extracted
from the 'genre' field of each movie. Use the tokenize_string method above.
Note: you may modify the movies parameter directly; no need to make
a new copy.
Params:
movies...The movies DataFrame
Returns:
The movies DataFrame, augmented to include a new column called 'tokens'.
>>> movies = pd.DataFrame([[123, 'Horror|Romance'], [456, 'Sci-Fi']], columns=['movieId', 'genres'])
>>> movies = tokenize(movies)
>>> movies['tokens'].tolist()
[['horror', 'romance'], ['sci-fi']]
"""
###TODO
# movies = pd.DataFrame([[123, 'Horror|Romance'], [456, 'Sci-Fi']], columns=['movieId', 'genres'])
genres_list=list(movies['genres'])
token_list=[]
for n in genres_list:
token_list.append((tokenize_string(n)))
movies['tokens']=token_list
return movies
pass
def featurize(movies):
"""
Append a new column to the movies DataFrame with header 'features'.
Each row will contain a csr_matrix of shape (1, num_features). Each
entry in this matrix will contain the tf-idf value of the term, as
defined in class:
tfidf(i, d) := tf(i, d) / max_k tf(k, d) * log10(N/df(i))
where:
i is a term
d is a document (movie)
tf(i, d) is the frequency of term i in document d
max_k tf(k, d) is the maximum frequency of any term in document d
N is the number of documents (movies)
df(i) is the number of unique documents containing term i
Params:
movies...The movies DataFrame
Returns:
A tuple containing:
- The movies DataFrame, which has been modified to include a column named 'features'.
- The vocab, a dict from term to int. Make sure the vocab is sorted alphabetically as in a2 (e.g., {'aardvark': 0, 'boy': 1, ...})
"""
###TODO
freq_dir = Counter()
for index, row in movies.iterrows():
freq_dir.update(row['tokens'])
feats_sorted = sorted(freq_dir.keys(), key=lambda x: x)
vocab_dic= {}
for index, feat in enumerate(feats_sorted):
vocab_dic[feat]=index
N=len(movies)
feature_list=[]
for index,row in movies.iterrows():
tf_idf_matrix=[0 for i in range(len(feats_sorted))]
max_k= Counter(row['tokens']).most_common(1)[0][1]
for token in row['tokens']:
df=freq_dir[token]
tf=Counter(row['tokens'])[token]
tfidf=tf/max_k*math.log10(N/df)
tf_idf_matrix[vocab_dic[token]]=tfidf
row_csr=csr_matrix([tf_idf_matrix])
# print(row_csr.toarray())
feature_list.append(row_csr)
movies['features']=feature_list
return movies,vocab_dic
pass
def train_test_split(ratings):
"""DONE.
Returns a random split of the ratings matrix into a training and testing set.
"""
test = set(range(len(ratings))[::1000])
train = sorted(set(range(len(ratings))) - test)
test = sorted(test)
return ratings.iloc[train], ratings.iloc[test]
def cosine_sim(a, b):
"""
Compute the cosine similarity between two 1-d csr_matrices.
Each matrix represents the tf-idf feature vector of a movie.
Params:
a...A csr_matrix with shape (1, number_features)
b...A csr_matrix with shape (1, number_features)
Returns:
A float. The cosine similarity, defined as: dot(a, b) / ||a|| * ||b||
where ||a|| indicates the Euclidean norm (aka L2 norm) of vector a.
"""
###TODO
# a= movies.iloc[0]['features']
# b=movies.iloc[1]['features']
lenght_a=np.sqrt((a.toarray()**2).sum())
lenght_b=np.sqrt((b.toarray()**2).sum())
dot_value=a.toarray()[0].dot(b.toarray()[0])
return dot_value/(lenght_a*lenght_b)
pass
def make_predictions(movies, ratings_train, ratings_test):
"""
Using the ratings in ratings_train, predict the ratings for each
row in ratings_test.
To predict the rating of user u for movie i: Compute the weighted average
rating for every other movie that u has rated. Restrict this weighted
average to movies that have a positive cosine similarity with movie
i. The weight for movie m corresponds to the cosine similarity between m
and i.
If there are no other movies with positive cosine similarity to use in the
prediction, use the mean rating of the target user in ratings_train as the
prediction.
Params:
movies..........The movies DataFrame.
ratings_train...The subset of ratings used for making predictions. These are the "historical" data.
ratings_test....The subset of ratings that need to predicted. These are the "future" data.
Returns:
A numpy array containing one predicted rating for each element of ratings_test.
"""
###TODO
rate_list=[]
for index,row in ratings_test.iterrows():
user_id=row.userId
movie_x=movies.features[movies.movieId==row.movieId].values[0]
sim_sum=0
sim_rate=0
movie_mean=np.mean(ratings_train[ratings_train.userId==user_id]['rating'].values)
for index_train,row_train in ratings_train[ratings_train.userId==user_id].iterrows():
movie_y=movies.features[movies.movieId==row_train.movieId].values[0]
sim=cosine_sim(movie_x,movie_y)
if sim>=0:
sim_sum+=sim
sim_rate+=(row_train.rating*sim)
if sim_sum!=0:
rate_list.append(sim_rate/sim_sum)
else:
rate_list.append(movie_mean)
return rate_list
pass
def mean_absolute_error(predictions, ratings_test):
"""DONE.
Return the mean absolute error of the predictions.
"""
return np.abs(predictions - np.array(ratings_test.rating)).mean()
def main():
download_data()
path = 'ml-latest-small'
ratings = pd.read_csv(path + os.path.sep + 'ratings.csv')
movies = pd.read_csv(path + os.path.sep + 'movies.csv')
movies = tokenize(movies)
movies, vocab = featurize(movies)
print('vocab:')
print(sorted(vocab.items())[:10])
ratings_train, ratings_test = train_test_split(ratings)
print('%d training ratings; %d testing ratings' % (len(ratings_train), len(ratings_test)))
predictions = make_predictions(movies, ratings_train, ratings_test)
print('error=%f' % mean_absolute_error(predictions, ratings_test))
print(predictions[:10])
if __name__ == '__main__':
main() | {
"alphanum_fraction": 0.6616463256,
"author": null,
"avg_line_length": 35.3302752294,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "1ee563c4b475b96348e89866ec4cc9b366439042",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a69ec2b0dc636c7f76cb6c40ede7b25af09ffa3f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "tina31726/Social-Network-Analysis",
"max_forks_repo_path": "a3/a3.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a69ec2b0dc636c7f76cb6c40ede7b25af09ffa3f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "tina31726/Social-Network-Analysis",
"max_issues_repo_path": "a3/a3.py",
"max_line_length": 136,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a69ec2b0dc636c7f76cb6c40ede7b25af09ffa3f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "tina31726/Social-Network-Analysis",
"max_stars_repo_path": "a3/a3.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1870,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7702
} |
# Copyright (c) 2009-2022 The Regents of the University of Michigan.
# Part of HOOMD-blue, released under the BSD 3-Clause License.
"""Angle potentials."""
from hoomd.md import _md
from hoomd.md.force import Force
from hoomd.data.typeparam import TypeParameter
from hoomd.data.parameterdicts import TypeParameterDict
import hoomd
import numpy
class Angle(Force):
"""Constructs the angular bond potential.
Note:
:py:class:`Angle` is the base class for all angular potentials.
Users should not instantiate this class directly.
"""
def __init__(self):
super().__init__()
def _attach(self):
# check that some angles are defined
if self._simulation.state._cpp_sys_def.getAngleData().getNGlobal() == 0:
self._simulation.device._cpp_msg.warning("No angles are defined.\n")
# create the c++ mirror class
if isinstance(self._simulation.device, hoomd.device.CPU):
cpp_cls = getattr(_md, self._cpp_class_name)
else:
cpp_cls = getattr(_md, self._cpp_class_name + "GPU")
self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def)
super()._attach()
class Harmonic(Angle):
r"""Harmonic angle potential.
:py:class:`Harmonic` specifies a harmonic potential energy between
every triplet of particles with an angle specified between them.
.. math::
V(\theta) = \frac{1}{2} k \left( \theta - \theta_0 \right)^2
where :math:`\theta` is the angle between the triplet of particles.
Attributes:
params (TypeParameter[``angle type``, dict]):
The parameter of the harmonic bonds for each particle type.
The dictionary has the following keys:
* ``k`` (`float`, **required**) - potential constant :math:`k`
:math:`[\mathrm{energy} \cdot \mathrm{radians}^{-2}]`
* ``t0`` (`float`, **required**) - rest angle :math:`\theta_0`
:math:`[\mathrm{radians}]`
Examples::
harmonic = angle.Harmonic()
harmonic.params['polymer'] = dict(k=3.0, t0=0.7851)
harmonic.params['backbone'] = dict(k=100.0, t0=1.0)
"""
_cpp_class_name = 'HarmonicAngleForceCompute'
def __init__(self):
super().__init__()
params = TypeParameter('params', 'angle_types',
TypeParameterDict(t0=float, k=float, len_keys=1))
self._add_typeparam(params)
class CosineSquared(Angle):
r"""Cosine squared angle potential.
:py:class:`CosineSquared` specifies a cosine squared potential energy
between every triplet of particles with an angle specified between them.
.. math::
V(\theta) = \frac{1}{2} k \left( \cos\theta - \cos\theta_0 \right)^2
where :math:`\theta` is the angle between the triplet of particles.
This angle style is also known as g96, since they were used in the
gromos96 force field. These are also the types of angles used with the
coarse-grained MARTINI force field.
Attributes:
params (TypeParameter[``angle type``, dict]):
The parameter of the harmonic bonds for each particle type.
The dictionary has the following keys:
* ``k`` (`float`, **required**) - potential constant :math:`k`
:math:`[\mathrm{energy}]`
* ``t0`` (`float`, **required**) - rest angle :math:`\theta_0`
:math:`[\mathrm{radians}]`
Parameters :math:`k` and :math:`\theta_0` must be set for each type of
angle in the simulation. Note that the value of :math:`k` for this angle
potential is not comparable to the value of :math:`k` for harmonic angles,
as they have different units.
Examples::
cosinesq = angle.CosineSquared()
cosinesq.params['polymer'] = dict(k=3.0, t0=0.7851)
cosinesq.params['backbone'] = dict(k=100.0, t0=1.0)
"""
_cpp_class_name = 'CosineSqAngleForceCompute'
def __init__(self):
super().__init__()
params = TypeParameter('params', 'angle_types',
TypeParameterDict(t0=float, k=float, len_keys=1))
self._add_typeparam(params)
class Table(Angle):
"""Tabulated bond potential.
Args:
width (int): Number of points in the table.
`Table` computes a user-defined potential and force applied to each angle.
The torque :math:`\\tau` is:
.. math::
\\tau(\\theta) = \\tau_\\mathrm{table}(\\theta)
and the potential :math:`V(\\theta)` is:
.. math::
V(\\theta) =V_\\mathrm{table}(\\theta)
where :math:`\\theta` is the angle between the vectors
:math:`\\vec{r}_A - \\vec{r}_B` and :math:`\\vec{r}_C - \\vec{r}_B` for
particles A,B,C in the angle.
Provide :math:`\\tau_\\mathrm{table}(\\theta)` and
:math:`V_\\mathrm{table}(\\theta)` on evenly spaced grid points points
in the range :math:`\\theta \\in [0,\\pi]`. `Table` linearly
interpolates values when :math:`\\theta` lies between grid points. The
torque must be specificed commensurate with the potential: :math:`\\tau =
-\\frac{\\partial V}{\\partial \\theta}`.
Attributes:
params (`TypeParameter` [``angle type``, `dict`]):
The potential parameters. The dictionary has the following keys:
* ``V`` ((*width*,) `numpy.ndarray` of `float`, **required**) -
the tabulated energy values :math:`[\\mathrm{energy}]`. Must have
a size equal to `width`.
* ``tau`` ((*width*,) `numpy.ndarray` of `float`, **required**) -
the tabulated torque values :math:`[\\mathrm{force} \\cdot
\\mathrm{length}]`. Must have a size equal to `width`.
width (int): Number of points in the table.
"""
def __init__(self, width):
super().__init__()
param_dict = hoomd.data.parameterdicts.ParameterDict(width=int)
param_dict['width'] = width
self._param_dict = param_dict
params = TypeParameter(
"params", "angle_types",
TypeParameterDict(
V=hoomd.data.typeconverter.NDArrayValidator(numpy.float64),
tau=hoomd.data.typeconverter.NDArrayValidator(numpy.float64),
len_keys=1))
self._add_typeparam(params)
def _attach(self):
"""Create the c++ mirror class."""
if isinstance(self._simulation.device, hoomd.device.CPU):
cpp_cls = _md.TableAngleForceCompute
else:
cpp_cls = _md.TableAngleForceComputeGPU
self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def, self.width)
Force._attach(self)
| {
"alphanum_fraction": 0.6224290647,
"author": null,
"avg_line_length": 33.9846938776,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "4742db2c2633c5311b9eb053787f8fb1f631b912",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f7f97abfa3fcc2522fa8d458d65d0aeca7ba781a",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "robert-mijakovic/hoomd-blue",
"max_forks_repo_path": "hoomd/md/angle.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f7f97abfa3fcc2522fa8d458d65d0aeca7ba781a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "robert-mijakovic/hoomd-blue",
"max_issues_repo_path": "hoomd/md/angle.py",
"max_line_length": 80,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f7f97abfa3fcc2522fa8d458d65d0aeca7ba781a",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "robert-mijakovic/hoomd-blue",
"max_stars_repo_path": "hoomd/md/angle.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1669,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6661
} |
# -*- coding: utf-8 -*-
"""example_depletion
A case that shows how the depletion is carried out.
Created on Mon Oct 11 21:30:00 2021 @author: Dan Kotlyar
Last updated on Mon Oct 11 21:45:00 2021 @author: Dan Kotlyar
"""
import numpy as np
from pyIsoDep.functions.maindepletionsolver import MainDepletion
from pyIsoDep.functions.generatedata import TransmutationData
from pyIsoDep.functions.read_csv import ReadCsv
from pyIsoDep.functions.postprocessresults import Results
from pyIsoDep.functions.xsinterface import XsInterface
from datetime import datetime
start_time = datetime.now()
FY_WGT = 1.0 # determines the fission yield wieghting
VOL = 332097.8 # cm^3
# -----------------------------------------------------------------------------
# DATA GENERATION
# -----------------------------------------------------------------------------
xsTable, fields = ReadCsv("./bootstrap.csv")
bootstrap = TransmutationData(libraryFlag=True, wgtFY=1.0)
bootstrap.ReadData(ID=xsTable[:, 0], sig_f=xsTable[:, 3], sig_c=xsTable[:, 2],
sig_a=xsTable[:, 4], flagBarns=True)
xsTable, fields = ReadCsv("./tempramp.csv")
tempramp = TransmutationData(libraryFlag=True, wgtFY=1.0)
tempramp.ReadData(ID=xsTable[:, 0], sig_f=xsTable[:, 3], sig_c=xsTable[:, 2],
sig_a=xsTable[:, 4], flagBarns=True)
xsTable, fields = ReadCsv("./fullthrust.csv")
fullthrust = TransmutationData(libraryFlag=True, wgtFY=1.0)
fullthrust.ReadData(ID=xsTable[:, 0], sig_f=xsTable[:, 3], sig_c=xsTable[:, 2],
sig_a=xsTable[:, 4], flagBarns=True)
# -------------------------------------------------------------------------
# XS Interface (Linear Interpolation/Extrp.)
# -------------------------------------------------------------------------
xs = XsInterface(numdepn=1, numpert=3, states=[[500], [600], [700]],
xssets=[bootstrap, tempramp, fullthrust])
timepoints, xsTimeSets = xs.setTimeTrace([0, 3.5, 7.0], [525, 550, 575])
# -------------------------------------------------------------------------
# XS Interface (BiLinear Interpolation/Extrp.)
# -------------------------------------------------------------------------
xs = XsInterface(numdepn=2, numpert=6, states=[[500, 2], [500, 3], [500, 4],
[600, 2], [600, 3], [600, 4]],
xssets=[bootstrap, bootstrap, bootstrap, bootstrap, bootstrap,
bootstrap])
timepoints, xsTimeSets = xs.setTimeTrace([0, 3.5], [500, 550], [3.0, 3.5])
a = 1
| {
"alphanum_fraction": 0.5309633028,
"author": null,
"avg_line_length": 36.8450704225,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "aebf01b10fc4b2652ec44d0f5c59d1693c798c24",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d9da8be6eff4ba301f9689ce5c38a5e50856d033",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "DanKotlyar/PYTHON-ISOTOPIC-DEPLETION-PACKAGE",
"max_forks_repo_path": "examples/example_xsInterface.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d9da8be6eff4ba301f9689ce5c38a5e50856d033",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "DanKotlyar/PYTHON-ISOTOPIC-DEPLETION-PACKAGE",
"max_issues_repo_path": "examples/example_xsInterface.py",
"max_line_length": 80,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "d9da8be6eff4ba301f9689ce5c38a5e50856d033",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "DanKotlyar/PYTHON-ISOTOPIC-DEPLETION-PACKAGE",
"max_stars_repo_path": "examples/example_xsInterface.py",
"max_stars_repo_stars_event_max_datetime": "2022-01-04T22:21:18.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-01-04T22:21:18.000Z",
"num_tokens": 645,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2616
} |
!===================================================================2
! LANCZOS routines for BIGSTICK
!
! versions for 'new' parallelization scheme -- FALL 2011
!
! This code uses LAPACK ROUTINES
!
! LAPACK copyright statements and license
!
!Copyright (c) 1992-2013 The University of Tennessee and The University
! of Tennessee Research Foundation. All rights
! reserved.
!Copyright (c) 2000-2013 The University of California Berkeley. All
! rights reserved.
!Copyright (c) 2006-2013 The University of Colorado Denver. All rights
! reserved.
!Additional copyrights may follow
!Redistribution and use in source and binary forms, with or without
!modification, are permitted provided that the following conditions are
!met:
!
!- Redistributions of source code must retain the above copyright
! notice, this list of conditions and the following disclaimer.
!
!- Redistributions in binary form must reproduce the above copyright
! notice, this list of conditions and the following disclaimer listed
! in this license in the documentation and/or other materials
! provided with the distribution.
!
!- Neither the name of the copyright holders nor the names of its
! contributors may be used to endorse or promote products derived from
! this software without specific prior written permission.
!
!The copyright holders provide no reassurances that the source code
!provided does not infringe any patent, copyright, or any other
!intellectual property rights of third parties. The copyright holders
!disclaim any liability to any recipient for claims brought against
!recipient by any third party for infringement of that parties
!intellectual property rights.
!
!THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
!"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
!LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
!A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
!OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
!SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
!LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
!DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
!THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
!(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
!OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
!===== END OF LACK COPYRIGHT STATEMENT ========================
module lanczos_util
use lanczos_info
use precisions
implicit none
integer startiter ! # of starting iterations already
real(kind=4), allocatable :: alpha(:),beta(:) ! lanczos coefficients
real(kind=egv_prec), allocatable :: eiglvec(:,:),e(:)
logical :: flyeig
!--------------------- CONVERGENCE -----------------------------------------
logical :: finished
integer :: ncheck
real(kind=egv_prec), allocatable :: eold(:) ! old energies
! real :: ediff0,ediff1
!-------------- ALTERNATE CONVERGENCE ---------------------------
! based on wavefunctions
logical :: altconverge
real(kind=egv_prec), allocatable :: eiglvecold(:,:)
contains
!===========================================================================
! SUBROUTINES IN THIS FILE
! lanczos_menu
! initialize_lanczos_vector
! swap_vchar : controls "direction" of mat-vec multiply
! either H * vec1 = vec2 or H * vec2 = vec1
! lanczos_p : main lanczos subroutine
! initialize_final : zeroes out next vector to be calculated
! lanczos_output
! find_lanczos_eigenvalues
! density1b_output
! exactdiag_p
! thick_restart_sub_p
! random_restart_p
! open_lanczosfile
! close_lanczosfile
!================================================
!
! routine to find eigenvalues of lanczos matrix
! if vecflag = .true. then find eigenvectors too
! if nthick > 0 then arrange according to thick restart
! nwindow = # of eigenvalues desired; only needed for targeted thick-restart
!
! MODIFIED 7.6.8 so one can choose a target energy; REMOVED 7.7.4
!
subroutine find_lanczos_eigenvalues(n,vecflag,nthick)
! subroutine find_lanczos_eigenvalues(n,alpha,beta,e,vec,vecflag,nthick)
use precisions
use lanczos_info,only:Etarget,lanczchar
implicit none
integer(4):: n ! actual dimension
! integer :: lwork
! real(kind = 4) :: alpha(np), beta(np)
! real(kind = egv_prec) :: e(np), vec(np,np)
logical :: vecflag ! flag to get eigenvectors or not
integer :: nthick
real(kind = egv_prec) :: work3(3*niter) ! work(np)
integer i,j,k
! integer(4) :: nwindow, thickshift
real(kind = egv_prec) :: etmp
real(kind=egv_prec) :: ediff(n),ediff0,xswap
integer info
eiglvec(:,:) = 0.0
do i = 1,n
eiglvec(i,i) = real(alpha(i),kind=egv_prec)
enddo ! i
!-------------- put in column of thick-restart matrix elements
if(nthick > 0)then
do i = 1,nthick
eiglvec(i,nthick+1) = real(beta(i),kind=egv_prec)
eiglvec(nthick+1,i) = real(beta(i),kind=egv_prec)
enddo
endif
!-------- what about beta(nthick+1?)
do i = nthick+1,n-1
eiglvec(i,i+1) = real(beta(i),kind=egv_prec)
eiglvec(i+1,i) = real(beta(i),kind=egv_prec)
enddo
call clocker('egv','sta')
if(vecflag)then
if(egv_prec==4)then
call SSYEV( 'V','U', N, eiglvec, Niter, e, WORK3, 3*niter, INFO )
else
call DSYEV( 'V','U', N, eiglvec, Niter, e, WORK3, 3*niter, INFO )
end if
else
if(egv_prec==4)then
call SSYEV( 'N','U', N, eiglvec, Niter, e, WORK3, 3*niter, INFO )
else
call DSYEV( 'N','U', N, eiglvec, Niter, e, WORK3, 3*niter, INFO )
end if
endif
call clocker('egv','end')
!--------- OPTION TO CHOOSE EXCITED STATES --------------------
! added in 7.7.8 based in part upon work by R. Zbikowski
! SORT EIGENVALUES, EIGENVECTORS BASED UPON PROXIMITY TO Etarget
!
if(lanczchar=='tx')then
!................ SET UP DIFFERENCES OF ENERGIES .............
do i = 1,n
ediff(i)= abs(e(i)-etarget)
end do
!........... NOW SIMPLE BUBBLE SORT .......................
do i = 1,N-1
k = i
ediff0 = ediff(i)
do j = i+1,N
if(ediff(j) < ediff0)then
k=j
ediff0 = ediff(j)
end if
end do
if(k /= i)then
ediff(k)=ediff(i)
ediff(i)=ediff0
xswap =e(k)
e(k) =e(i)
e(i) =xswap
do j = 1,N
xswap = eiglvec(j,k)
eiglvec(j,k)=eiglvec(j,i)
eiglvec(j,i)=xswap
end do
end if
end do
!.......... DO A SECOND SORT JUST ON THE Nkeep "LOWEST"........
! THIS PUTS EVERYTHING INTO A "PRETTY" ORDER
do i = 1,Nkeep-1
k = i
ediff0 = e(i)
do j = i+1,Nkeep
if(e(j) < ediff0)then
k=j
ediff0 = e(j)
end if
end do
if(k /= i)then
e(k)=e(i)
e(i)=ediff0
do j = 1,N
xswap = eiglvec(j,k)
eiglvec(j,k)=eiglvec(j,i)
eiglvec(j,i)=xswap
end do
end if
end do
end if
return
end subroutine find_lanczos_eigenvalues
!====================================================================
!
! routine to control construction and "improvement" of initial vector
! added in 7.6.3 by CWJ @ SDSU
!
! CALLED BY: lanczos_menu
! SUBROUTINES CALLED:
! br_open_u
! setup_localvectors
! intialize_lanczos_vector
!
subroutine setup_for_lanczos
use flagger
use mod_reorthog
use io
use bvectorlib_mod
use onebodypot, only: meanie
implicit none
! integer :: startiter ! number of iterations from previous run; only > 0 if restarting
if(noisy0) print *, "setup_localvectors"
call setup_localvectors
if(noisy0) print *, "initialize_lanczos_vector"
call initialize_lanczos_vector
if(startiter == 0 .and. .not.strengthflag .and. .not.meanie) then
call dynamic_vector_initializer
end if
return
end subroutine setup_for_lanczos
!====================================================================
!
! routine to initialize Lanczos vector
!
! NOTE: currently very kludgy!
!
! INPUT:
! startiter: where iterations start
! needed for restart option
!
! OUTPUT:
! dnorm0 = if read in pivot, what is magnitue
! vchar = keeps track of which vectors is initial, which final
! 'n' (normal) initial vec1, final vec2
! 'r' (reverse) initial vec2, final vec1
!
! CALLED BY:
! setup_for_lanczos
! SUBROUTINES CALLED:
! readpivot (old version) reads in pivot from file, normalizes
! dnormvec_p : normalizes a vector (or a fragment)
!
subroutine initialize_lanczos_vector
use flagger
use localvectors
! use precisions
use basis
use fragments
use io
! use lanczos_info
use onebodypot
use nodeinfo
use wfn_mod
use bvectorlib_mod
use menu_choices
implicit none
character(1) :: vchar
real(kind=8) :: dnorm, dnorm0,da
real :: rv
integer startiter
integer (kind = basis_prec) :: i
integer (kind=4) :: iter
logical smallflag
integer :: pivotoption ! different choices
pivotoption = 3
vchar = 'n'
if ( strengthflag .or. menu_char=='np') then
call readpivot ! FIXED so it works with fragments
return
else if ( startiter == 0 ) then
dnorm = dsqrt(real(dimbasis,kind=8))
select case (pivotoption)
case (0)
do i = v1s, v1e
!....... TO PREVENT ROUNDOFF ERRORS AND ALSO TO MINIMIZE
! ACCIDENTAL STOPS, HAVE DECREASING WEIGHT
vec1(i) = real(1.d0 /dnorm,kind=lanc_prec)
end do
case (1)
do i = v1s, v1e
!....... TO PREVENT ROUNDOFF ERRORS AND ALSO TO MINIMIZE
! ACCIDENTAL STOPS, HAVE DECREASING WEIGHT
call random_number(rv)
vec1(i) = real( (1.d0+ 0.1*rv) /dnorm, kind=lanc_prec)
end do
case (2)
do i = v1s, v1e
!....... TO PREVENT ROUNDOFF ERRORS AND ALSO TO MINIMIZE
! ACCIDENTAL STOPS, HAVE DECREASING WEIGHT
vec1(i) = real(1.d0 /dsqrt(real(i,kind=8)+dnorm ), kind=lanc_prec)
end do
case (3)
do i = v1s, v1e
!....... TO PREVENT ROUNDOFF ERRORS AND ALSO TO MINIMIZE
! ACCIDENTAL STOPS, HAVE DECREASING WEIGHT
vec1(i) = real( 1.d0 * (-1)**i /dnorm, kind=lanc_prec)
end do
end select
end if
!----------- RESTART OPTION -------------
! NEEDS WORK!
if ( startiter > 0 ) then
if(iproc==0)rewind(lvec_file)
if(.not. useNewReorthog) call swap_vchar(vchar)
do iter = 1,startiter
if(.not. useNewReorthog) call swap_vchar(vchar)
call read_lanczos_vector_restart_a(vchar,'i',iter,lvec_file)
end do
endif
return
end subroutine initialize_lanczos_vector
!====================================================================
! added in 7.6.3 March 2016
! options to generate initial pivot vector;
! also useful in pointing towards a preconditioner in LOBPCG
!
! OPTIONS:
! 1) PP, NN, + trace of PN
! 2) diagonal parts of PP, NN + trace PN only
! 3) diagonal parts of PP,NN, PN only
! 3) Hartree-Fock like initial vector
!
! SUBROUTINES CALLED:
! masterpnsectortrace (optional)
subroutine dynamic_vector_initializer
! use lanczos_info
use nodeinfo
use bmpi_mod
use io
use mod_reorthog
use wfn_mod
implicit none
integer :: choice_initializing
integer :: ierr
integer :: tmpkeep
integer i,j,n, tidx
real :: e,xj,xt2
nsave = 1 ! could be different, especially if creating vectors
! to construct a preconditioner
if(.not.initializing_enabled)then
initializing=.false.
applypntrace =.false.
applyXXonly =.false.
diagonalsectorsonly = .false.
write(autoinputfile,'(" ! Not optimizing initial pivot vector ")')
return
end if
if(iproc==0)then
print*,' '
print*,' You get to dynamically prepare the pivot (initial vector) '
print*,' Choose one of the following options: '
print*,' (0) No further initializing '
print*,' (1) Read in pivot from prior calculation ' ! ADDED 7.7.4
print*,' (2) Use PP, NN, and trace of PN '
print*,' (3) Sector-diagonal parts of PP, NN, and PN only '
if(auto_input)then
read(autoinputfile,*)choice_initializing
write(6,*)' Choice = ',choice_initializing
else
read(5,*)choice_initializing
write(autoinputfile,'(i3," ! pivot preparation option")')choice_initializing
end if
if(choice_initializing > 1)then
print*,' How many iterations to generate ? (must be <= ',niter,')'
if(auto_input)then
read(autoinputfile,*)initial_maxiter
initial_maxiter = min(initial_maxiter,niter)
write(6,*)initial_maxiter,' iterations for preparing pivot '
else
read(5,*)initial_maxiter
initial_maxiter = min(initial_maxiter,niter)
write(autoinputfile,'(i3," ! # of initial iterations on pivot ")')initial_maxiter
end if
end if
end if
call BMPI_BCAST(choice_initializing,1,0,icomm,ierr)
call BMPI_BCAST(initial_maxiter,1,0,icomm,ierr)
select case (choice_initializing)
case (0)
initializing=.false.
applypntrace =.false.
applyXXonly =.false.
diagonalsectorsonly = .false.
return
case (2)
initializing=.true.
applypntrace =.true.
applyXXonly =.true.
diagonalsectorsonly = .false.
call masterpnsectortrace
case (3)
initializing=.true.
applypntrace =.false.
applyXXonly =.false.
diagonalsectorsonly = .true.
! call mastersectortrace
case (1)
initializing=.false.
applypntrace =.false.
applyXXonly =.false.
diagonalsectorsonly = .false.
!............. READ IN PIVOT ............................. ADDED 7.7.4..........
call wfn_ropen_file(oldwfnfile)
call read_wfn_header(oldwfnfile,.false.)
if(dimbasischeck /= dimbasis)then
if(iproc == 0)then
print*,' mismatch in basis dimensions '
print*,' expecting ',dimbasis
print*,' final ',dimbasischeck
endif
stop
endif
call wfn_read_nkeep(oldwfnfile, tmpkeep)
if(iproc==0) print*,' There are ', tmpkeep,' wavefunctions '
do i = 1,tmpkeep
! new interface - we say which vec to read, it checks
! KSM: This will be very slow, only need to read head part of each vector
call wfn_readeigenvec(oldwfnfile,frag1, fcomm1, vec1,i,e,xj,xt2)
if(iproc==0)print*,i,e,xj,xt2
enddo
if(iproc == 0) then
tidx = -1
do while(tidx < 1 .or. tidx > tmpkeep)
print*,' Which do you want as initial state? '
if(auto_input)then
read(autoinputfile,*) tidx
if(tidx < 1 .or. tidx > nkeep) then
print *, "vector selection out of range: ", tidx
stop 1
end if
else
read(5,*) tidx
if(tidx < 1 .or. tidx > nkeep) then
print *, "vector selection out of range: ", tidx, ", please try again"
else
write(autoinputfile,*) tidx
end if
end if
end do
write(logfile,*)' Initial state = ',tidx
end if
call BMPI_BCAST(tidx,1,0,icomm,ierr)
! new interface - we say which vec to read, it seeks and reads
! no need to rewind and read forward
call wfn_readeigenvec(oldwfnfile, frag1, fcomm1, vec1,tidx,e,xj,xt2)
call wfn_close_file(oldwfnfile)
return
end select
if(initializing)then
call clocker('piv','sta')
write(logfile,*)' Initializing pivot vector with ',initial_maxiter,' iterations '
write(logfile,*)' Choosing option ',choice_initializing
write(logfile,*)' Flags: applypntrace = ',applypntrace,', applyXXonly = ',applyXXonly, &
', diagonalsectorsonly = ',diagonalsectorsonly
call lanczos_p(0)
initializing=.false. ! return values to defaults
applypntrace =.false.
applyXXonly =.false.
diagonalsectorsonly = .false.
br_histpos = 0 !IMPORTANT -- need to return to zero
if(iproc==0)print*,' Finished initializing pivot vector '
call clocker('piv','end')
call clockout('piv')
end if
return
end subroutine dynamic_vector_initializer
!====================================================================
subroutine swap_vchar(vchar)
use fragments
implicit none
character(1) :: vchar
if(useNewReorthog) then
print *, "swap_vchar: should not call with new reorthog"
stop 1
end if
if(vchar == 'n')then
vchar = 'r'
else
vchar = 'n'
endif
return
end subroutine swap_vchar
subroutine print_vec_head(vec, msg)
use nodeinfo
use precisions
implicit none
integer :: i
real(kind=lanc_prec) :: vec(*)
character (len=*) :: msg
if(iproc /= 0) return
! print *, msg, ": ", vec(1), vec(2), vec(3), vec(4), vec(5)
return
end subroutine print_vec_head
!================================================
!
! WRITE OUT RESULTS
!
! CALLS:
! find_lanczos_eigenvalues
! setup4obsmaster
! reset_jumps4obs
! read_lanczos_vector_a
! block_reduce
! applyobsbundled
! clocker
! wfn_writeeigenvec (formerly writeeigenvec_p)
! close_lanczosfile
! Wcounter
! density1b_output
! output_TRDENS
! makespeme
!
subroutine lanczos_output(iter,i_thick,dnorm0)
use sporbit
use localvectors
use nodeinfo
use io
use basis
use obs
! use lanczos_info
! use precisions
use fragments
use mod_reorthog
use flagger
use coupledmatrixelements
use system_parameters
use wfn_mod
use pocc_mod
use butil_mod
use btbme_mod
use bvectorlib_mod
use jump_mod
use diagh
use apply_obs_mod
implicit none
integer(4) :: iter,i_thick
! real :: alpha(niter), beta(niter)
! real(kind=egv_prec) :: e(niter), eiglvec(niter,niter)
real (kind =8 )::dnorm0,da ! magnitude for strengths
real(4) :: xj,xt
logical :: flyeig
integer(4) :: i,j
integer(kind=basis_prec) :: k
integer(kind=basis_prec) :: istate
!--------------------- CONVERGENCE -----------------------------
! logical :: finished
integer :: ncheck
real, allocatable :: eold(:) ! old energies
real :: ediff
logical smallflag,zeroflag
integer :: nread,jvec
integer inode
character(1) :: vchar
integer :: ierr
real(kind = 8) :: dtmp
real(kind = 8) :: tmpvec(nkeep)
! real(kind=4),allocatable :: spoccx(:,:,:) ! single-particle occupations
! spoccx(it,istate,iorb) species(p/n), eigenstate, orbit
integer(4) :: iorb
character :: outchoice !used for steering output options
vchar = 'n'
call find_lanczos_eigenvalues(iter,get_JT,i_thick)
call clocker('tt8','end')
call clocker('lan','end')
call clocker('obs','start')
call pocc_init_spoccx() ! allocate and initialize array
call wfn_write_nkeep(nkeep) ! write number of vectors to wfn file
!---------- SET UP ARRAY FOR SINGLE-PARTICLE OCCUPATIONS --
outchoice ='d' ! default option
if (spoccflag .and. .not.densityflag)then
if(npeff(1)>0 .and. npeff(2) > 0)outchoice='b'
if(npeff(1)>0 .and. npeff(2) == 0)outchoice='p'
if(npeff(1)==0 .and. npeff(2) > 0)outchoice='n'
end if
!------------ RECONSTRUCT EIGENVECTORS AND COMPUTE J^2, T^2.........
if(get_JT)then
!------------ SET UP FOR ANG MOM................................
call setup4obsmaster('J')
call setup4obsmaster('T')
!.................. ADDED DEC 2010 by CWJ .........................
! IF USING 3-BODY (OR POSSIBLY SKIPPING ZEROES)
! NEED TO RESET ALL THE JUMPS
call reset_jumps4obs
end if
!...................................................................
if ( iproc == 0 ) print*,' '
if ( strengthflag ) then
if ( iproc == 0 ) then
write(resultfile,*)' Energy Strength '
write(resultfile,*)' ______ ________ '
write(6,*)' Energy Strength '
write(6,*)' ______ ________ '
do j = 1,iter
write(6,88)e(j),eiglvec(1,j)**2*dnorm0*dnorm0
if(writeout)write(resultfile,88)e(j),eiglvec(1,j)**2*dnorm0*dnorm0
enddo
write(resultfile,*)' ______ ________ '
write(resultfile,*)' '
write(6,*)' ______ ________ '
write(6,*)' '
end if
! return
endif
88 format(2f10.5)
!---------------- WRITE OUT SINGLE PARTICLE STATES with occupation report --------
if(spoccflag .and. .not.densityflag) call pocc_write_orbits()
call pocc_write_table_header() ! E Ex ...
allocate(energy(nkeep), xjlist(nkeep),xtlist(nkeep), stat=aerr )
if(aerr /= 0) call memerror("lanczos_output 1");
!----- Construct nkeep eigenvectors of H from Lanczos eigenvectors....
!---- If lanczos vectors stored in core, then construct in core
if(get_JT)then !added 7.6.8
if(storelanczosincoreMPI .or. storelanczosincore1)then
call clocker('eig','sta')
! KSM lvec(bitpos, iter) is the old way of storing old vectors
! It will go away with useNewReorthog
if(useNewReorthog) then
! This applys the same transform as the else clause, but to the
! basis stored in br_histbuf
call br_transform_basis(eiglvec, nkeep, iter)
else
! KSM 17Aug2014 - my interpretation of the following code
! We have a sub basis stored in lvec(:, i)
! We extracted the \alpha and \beta coefficients against this basis
! This represents a projection of H onto this much smaller basis.
! The eigenvectors of the \alpha,\beta matrix can be used to construct
! approximate eigenvectors in the untruncated space by taking the linear
! combination of lvec(:, i) with weights being the component of the eigenvector.
!
!$omp parallel do private(tmpvec,i,j,dtmp), shared(eiglvec, Lvec)
do k = 1,Ldim ! each row (labeled by kl) can be transformed independently
dtmp = 0.d0
do i = 1,nkeep
dtmp = 0.d0
do j = 1,iter
dtmp = dtmp + real(lvec(k,j),kind=8)* real(eiglvec(j,i),kind=8)
end do ! j
tmpvec(i) = dtmp
end do ! i
do i = 1,nkeep
lvec(k,i) =real(tmpvec(i),kind=lanc_prec)
end do
end do ! k
!$omp end parallel do
end if
call clocker('eig','end')
vchar = 'n'
!---- COMPUTE J,T AND WRITE OUT VECTORS-------------------------------
do i = 1,nkeep
! KSM - the idea here is to restore a history vector into vec1
if(useNewReorthog) then
call br_retrieve_hist(i)
! It turns out that we need the vector loaded into both vec1 and vec2
! of course, they are different slices on each node
call br_restore_vec1()
else
vec1 = 0.0
call read_lanczos_vector_a(vchar,'i',i,lvec_file)
if(storelanczosincoreMPI) then
! call block_reduce(dimbasis,vec1)
! each mpi process reads one slice. allreduce is overkill but works
call BMPI_ALLREDUCE(vec1, size(vec1), MPI_SUM, icomm, ierr) ! in place
end if
end if
call br_load_vec2_from_vec1()
energy(i) = real(e(i), kind(energy(1)))
!------------ COMPUTE J2, T2.................................
call clocker('aob','sta')
xj2 = 0.0e0_obs_prec
xt2 = 0.0e0_obs_prec
twoobsflag = .true. ! compute both J^2 and T^2
call applyobsbundled(1)
call clocker('aob','end')
xj = real( -0.5 + sqrt(xj2 + 0.25), kind=4)
xt = real( -0.5 + sqrt(xt2 + 0.25), kind=4)
xjlist(i) = xj
xtlist(i) = xt
!!----------------WRITE OUT WFN..............................................
!! KSM: do this before computing occupations because data like xt2 will be overwritten
!! not clear why we are saving xt2 instead of xt, but I'm not changing it.
call clocker('wev','sta')
if ( writeout .and. write_wfn) then
call wfn_writeeigenvec(wfnfile, frag1, vec1, i, real(e(i),kind=4), xj, real(xt2,kind=4))
end if
call clocker('wev','end')
!----------- COMPUTE SINGLE PARTICLE OCCUPATIONS---------- added in 7.3.7
call pocc_compute_spocc(i, .true.) ! true for restore J/T setup
!----------------- WRITE OUT RESULTS.............................
if ( iproc == 0 ) then
select case (outchoice)
case('d')
call pocc_write_ejt(i, e, xj, xt)
case('b')
! write(6,12)i,e(i), e(i) - e(1),xj,xt,(spoccx(1,i,iorb),iorb=1,numorb(1))
call pocc_write_ejt(i, e, xj, xt)
call pocc_write_occvec(6, spoccx, i, 1, " p occ:")
call pocc_write_occvec(6, spoccx, i, 2, " n occ:")
if ( writeout ) then
call pocc_write_occvec(resultfile, spoccx, i, 1, " p occ:")
call pocc_write_occvec(resultfile, spoccx, i, 2, " n_occ:")
end if
case('p')
! write(6,12)i,e(i), e(i) - e(1),xj,xt,(spoccx(1,i,iorb),iorb=1,numorb(1))
call pocc_write_ejt(i, e, xj, xt)
call pocc_write_occvec(6, spoccx, i, 1, " p occ:")
if ( writeout ) then
! write(resultfile,12)i,e(i),e(i) - e(1),xj,xt,(spoccx(1,i,iorb),iorb=1,numorb(1))
call pocc_write_occvec(resultfile, spoccx, i, 1, " p occ:")
end if
case('n')
! write(6,13)i,e(i), e(i) - e(1),xj,xt,(spoccx(2,i,iorb),iorb=1,numorb(2))
call pocc_write_ejt(i, e, xj, xt)
call pocc_write_occvec(6, spoccx, i, 2, " n occ:")
if ( writeout ) then
! write(resultfile,13)i,e(i),e(i) - e(1),xj,xt,(spoccx(1,i,iorb),iorb=1,numorb(2))
call pocc_write_ejt(i, e, xj, xt)
call pocc_write_occvec(resultfile, spoccx, i, 2, " n occ:")
end if
end select
end if
end do ! i
!..... else construct by writing to disk (slow).....
else
if(useNewReorthog) then
if(iproc == 0) print *, "useNewReorthog oops"
stop 1
end if
do i = 1, nkeep
do k = 1, dimbasis
vec1(k) = 0.0e0_lanc_prec
end do !k
call clocker('eig','sta')
do jvec = 1,iter
da = real(eiglvec(jvec,i),kind=8)
if(.not.storelanczosincoreMPI)then
call read_lanczos_vector_a(vchar,'f',jvec,lvec_file)
do k = 1, dimbasis
vec1(k) = vec1(k) + real(vec2(k),kind=8)*da
end do !k
! need data on both sides: note that nodes don't have the same slices on both sides
call br_load_vec2_from_vec1()
else
!$omp parallel do private(istate, k) &
!$omp shared(Lstart, Ldim, vec1, Lvec, jvec, da)
do istate = 1, Ldim
k = Lstart + istate - 1
vec1(k) = vec1(k) + real(Lvec(istate,jvec),kind=8)*da
end do !istate
!$omp end parallel do
end if
end do !j
if(storelanczosincoreMPI) then
! call block_reduce(dimbasis,vec1)
! each mpi process reads one slice. allreduce is overkill but works
call BMPI_ALLREDUCE(vec1, size(vec1), MPI_SUM, icomm, ierr) ! in place
end if
call clocker('eig','end')
!------------ COMPUTE J2, T2.................................
call clocker('aob','sta')
xj2 = 0.0e0_obs_prec
xt2 = 0.0e0_obs_prec
twoobsflag = .true. ! compute both J^2 and T^2
call applyobsbundled(1)
call clocker('aob','end')
xj = real( -0.5 + sqrt(xj2 + 0.25), kind=4)
xt = real( -0.5 + sqrt(xt2 + 0.25), kind=4)
!----------- COMPUTE SINGLE PARTICLE OCCUPATIONS---------- added in 7.3.7
if(spoccflag .and. .not.densityflag)then
do iorb = 1,bmax(numorb(1),numorb(2) )
if(numorb(1) > 0 .and. iorb < numorb(1))then
pspe = 0.0
pspe(iorb) = 1.0
call makespeme(1,'H')
end if
if(numorb(2) > 0 .and. iorb < numorb(2))then
nspe = 0.0
nspe(iorb) = 1.0
call makespeme(2,'H')
end if
xj2 = 0.0e0_obs_prec
xt2 = 0.0e0_obs_prec
call applyspoccbundled(1)
if(np(1) > 0)spoccx(1,i,iorb)= xj2
if(np(2) > 0)spoccx(2,i,iorb)= xt2
end do
end if
!----------------- WRITE OUT RESULTS.............................
! if ( isoflag ) then
if ( iproc == 0 ) then
select case (outchoice)
case('d')
write(6,11)i,e(i), e(i) - e(1),xj,xt
if ( writeout ) write(resultfile,11)i,e(i),e(i) - e(1),xj,xt
case('b')
write(6,121)i,e(i), e(i) - e(1),xj,xt
call pocc_write_occvec(6, spoccx, i, 1, " p occ:")
call pocc_write_occvec(6, spoccx, i, 2, " n occ:")
if ( writeout ) then
write(resultfile,121)i,e(i), e(i) - e(1),xj,xt
call pocc_write_occvec(resultfile, spoccx, i, 1, " p occ:")
call pocc_write_occvec(resultfile, spoccx, i, 2, " n occ:")
end if
case('p')
case('n')
end select
end if
call pocc_write_orbits()
!----------------WRITE OUT WFN..............................................
call clocker('wev','sta')
if ( writeout .and. write_wfn) then
call wfn_writeeigenvec(wfnfile, frag1, vec1, i, real(e(i),kind=4), xj, real(xt2,kind=4))
end if
call clocker('wev','end')
end do
end if
else ! just print out energies for get_JT=.false.
do i = 1,nkeep
write(6,11)i,e(i), e(i) - e(1)
if ( writeout ) write(resultfile,11)i,e(i),e(i) - e(1)
end do
end if
! add blank line for formatting
if(iproc == 0) then
write(6,*) " "
if(writeout) write(resultfile, *) " "
end if
11 format(i5,3x,2f10.5,2x,2f8.3)
12 format(i5,3x,2f10.5,2x,2f8.3,' p occ: ',20f7.3)
121 format(i5,3x,2f10.5,2x,2f8.3)
13 format(i5,3x,2f10.5,2x,2f8.3,' n occ: ',20f7.3)
14 format(46x,' n occ: ',12f7.3)
if(.not.allsamew .and. get_JT)call Wcounter ! added in 7.2.5; computes and prints out truncation components
call close_lanczosfile
if(densityflag)call density1b_output !(e,eiglvec)
if ( trdensout ) then
call clocker('trd','sta')
call output_TRDENS
call clocker('trd','end')
end if
call pocc_cleanup()
return
end subroutine lanczos_output
!================================================
!
! function to force conversion of unconverged xJ to integer J
! that is, odd 2 x J for odd A, and even for even A
!
function closest2J(evenA,xj)
implicit none
integer closest2J
real xj
logical evenA
if(evenA)then
closest2J = 2*nint(xj)
if(closest2J < 0)closest2J = 0
else
closest2J = 2*nint(xj-0.5)+1
if(closest2J < 1)closest2J = 1
end if
return
end function closest2J
!---- NOTE: density1b_output moved to bdenslib1.f90
!=============================================================================
!
! thick-restart lanczos:
! after some iterations, take nthick lowest eigenvectors
! and reintroduce them as lanczos vectors
! then start up iterations again
!
! NB: in normal runs, up to this point have done iter = nthick+1 iterations
!
! nthick = # of vectors to keep for thick-restart
! iter = # of lanczos iterations in total
! iter_thick = which iteration of thick_restart (starting at 0)
! maxiter = dimension of arrays
! e(:), eiglev(:,:) : eigenvectors
! alpha(),beta()
! vchar (no longer in significant use)
!
! ALSO USED but not passed in arguments: niter, dimension of truncated Hamiltonian to diagonalize
! NOT USED here: nkeep, the # of final eigenvectors
!
! CALLS:
! find_lanczos_eigenvalues
! swap_vchar
! read_lanczos_vector_a
! write_lanczos_vector_a
! block_reduce
!
subroutine thick_restart_sub_p(nthick,iter,iter_thick,maxiter,vchar)
! subroutine thick_restart_sub_p(nthick,iter,iter_thick,maxiter,e,eiglvec,alpha,beta,vchar)
use localvectors
use precisions
use basis
use lanczos_info
use nodeinfo
use fragments
use bmpi_mod
use butil_mod
use bvectorlib_mod
use mod_reorthog
use flagger
implicit none
integer :: nthick ! # of vectors to keep for thick-restart
integer :: iter ! # of lanczos iterations so far
integer :: iter_thick ! which thick_restart iteration this is, starting with 0
integer :: maxiter ! dimension
! real(kind=egv_prec) :: e(maxiter), eiglvec(maxiter,maxiter)
real(kind=8) :: tmpvec(maxiter)
! real(kind=4) :: alpha(maxiter), beta(maxiter)
character(1) :: vchar
real (kind = lanc_prec),pointer :: v1(:), v2(:)
integer nthicktmp
integer(4):: i,j,k
integer(4):: thickshift ! for targeted thick-restart
integer(4):: iclosest
real(kind=8) :: eclosest
logical :: verbosetarget = .true. ! used for debugging excited state thick-restart
integer, parameter :: tmpfile = 58
real (kind = lanc_prec) :: da
integer(kind=basis_prec) :: kl
integer(4) :: file_mode,file_info,ierror
integer(4) :: inode
real(kind = 8) :: dtmp
integer :: ierr
real(kind=8) :: thickconverge
if(iproc==0)print*,' In thick restart niter, nthick = ',niter,nthick
!-------------------SOLVE FOR TRUNCATED EIGENVALUES -----------
if(iter_thick == 0)then
nthicktmp = 0
else
nthicktmp = nthick
endif
call find_lanczos_eigenvalues(iter-1,.true.,nthicktmp)
!--------------------CREATE NEW COEFICIENTS -------------------
do i = 1,nthick
alpha(i) = e(i)
beta(i) = beta(iter-1)*eiglvec(iter-1,i) ! correction 6/2011 by WEO
enddo
!------------ CHECK SIZE OF COUPLING -------------------------
thickconverge = 0.d0
do i = 1,nkeep
thickconverge = thickconverge + beta(i)**2/real(nkeep,kind=8)
end do
if(iproc==0)print*,' Alternate thick-restart convergence : ',thickconverge
!.................. REWRITE TO FILE......
if(iproc ==0)then
rewind(coef_file)
print*,' rewriting to file ',coef_file,nthick
do i = 1,nthick
write(coef_file,'(i10,2(2x,f20.15))')-i,alpha(i), beta(i)
end do
end if
if(useNewReorthog) then
! use eigenvectors of tridiagonal alpha-beta matrix to
! mix saved lanczos vectors into approximate eigenstates of H.
! we will use these as the first nthick saved lanczos vectors (the history)
! going forward.
! I understand alpha(i) = e(i) above
! I think the beta issue is related to the last vector produced in
! the lanczos process. We have dropped the last beta and associated vector
! by truncating the alpha-beta matrix at nthick.
if(iproc .eq. 0) then
print *, "New Reorthog Thick Restart"
print *, "iter=", iter, ", nthick=", nthick
print *, "br_histpos=", br_histpos
print *, "nthick_add=", nthick_add
end if
call br_transform_basis(eiglvec, nthick, iter-1)
! init for next iteration.
! load vec1 from the top of the history.
call br_retrieve_hist(iter)
call br_restore_vec1()
! ends with nthick as the most recent vector.
call br_set_histpos(nthick) ! set last written position
call br_add2hist(iter+1) ! br_retrieve_hist left in br_reg, put at new head
return
end if
!................IF STORING LANCZOS IN CORE .....
if(storelanczosincore1 .or. storelanczosincoreMPI)then
!$omp parallel do private(tmpvec,i,j,dtmp), shared(eiglvec, Lvec,nthick,iter,Ldim)
do kl = 1,Ldim ! each row (labeled by kl) can be transformed independently
dtmp = 0.d0
do i = 1,nthick
dtmp = 0.d0
do j = 1,iter-1
dtmp = dtmp + real(lvec(kl,j),kind=8)* real(eiglvec(j,i),kind=8)
end do ! j
tmpvec(i) = dtmp
end do ! i
do i = 1,nthick
lvec(kl,i) =real(tmpvec(i),kind=lanc_prec)
end do
end do ! kl
!$omp end parallel do
!........ SET VCHAR...
vchar = 'n'
do i = 1,nthick
call swap_vchar(vchar)
end do
if(vchar=='n')then
do kl = 1,dimbasis
vec1(kl) = 0.0
end do
end if
if(vchar=='r')then
do kl = 1,dimbasis
vec2(kl) = 0.0
end do
end if
if(useNewReorthog .and. iproc == 0) print *, "read_lanczos_vector in thick_restart_sub_p"
call read_lanczos_vector_a(vchar,'i',iter,tmpfile)
if(storelanczosincoreMPI) then
if(vchar == 'n') call BMPI_ALLREDUCE(vec1, size(vec1), MPI_SUM, icomm, ierr) ! in place
if(vchar == 'r') call BMPI_ALLREDUCE(vec2, size(vec2), MPI_SUM, icomm, ierr) ! in place
end if
return
end if
!................ IF STORING LANCZOS VECTORS ON DISK.................
vchar = 'n'
open(unit=tmpfile,status ='scratch',form='unformatted')
do i = 1,nthick
if(vchar == 'n')then
v1 => vec1
v2 => vec2
else
v1 => vec2
v2 => vec1
end if
do kl = 1,dimbasis
v1(kl) = 0.0
enddo ! j
do j = 1,iter-1
if(i == 1)then
call read_lanczos_vector_a(vchar,'f',j,lvec_file)
call write_lanczos_vector_a(vchar,'f',j,tmpfile)
else
call read_lanczos_vector_a(vchar,'f',j,tmpfile)
endif
da = real(eiglvec(j,i), kind(da))
do kl = 1,dimbasis
v1(kl) = v1(kl) + v2(kl)*da
enddo !kl
enddo !j
if(i == 1)then
call read_lanczos_vector_a(vchar,'f',iter,lvec_file)
call write_lanczos_vector_a(vchar,'f',iter,tmpfile)
endif
call write_lanczos_vector_a(vchar,'i',i,lvec_file)
call swap_vchar(vchar)
enddo ! i
call read_lanczos_vector_a(vchar,'i',iter,tmpfile)
333 format(i4,4f10.6)
if(nproc ==1)then
close(tmpfile)
else
call BMPI_FILE_CLOSE(tmpfile,ierror)
end if
return
end subroutine thick_restart_sub_p
!================================================
!
! caution: not thoroughly tested
!
! CALLS:
! dnormvec_p
! reorthogonalize_a
!
subroutine random_restart_p(iter,vchar)
use localvectors
use fragments
! use precisions
use basis
! use lanczos_info
use nodeinfo
use bmpi_mod
use butil_mod
use bvectorlib_mod
implicit none
character(1) :: vchar
integer(4) :: iter
real(kind=lanc_prec), pointer :: w(:)
real(kind=8):: da
integer i,j
real(kind=8) :: dnorm
logical :: smallflag
real :: rv
integer(kind=basis_prec) :: jl,vstart,vstop
! integer iseed =1959 ! to be used for restarting lanczos if it stops
if(vchar == 'n')then
w => vec1
vstart = basestart(frag1)
vstop = basestop (frag1)
else
w => vec2
vstart = basestart(frag2)
vstop = basestop (frag2)
endif
dnorm = 1.d0 /dsqrt(real(dimbasis,kind=8))
do jl = vstart,vstop
call random_number(rv)
w(jl) = dnorm*(rv-0.5) !ran(iseed) ! RANDOM FUNCTION -- may need another
end do
call dnormvec_p(vchar,'i',dnorm,smallflag)
!----------- orthogonalize against previous vectors
call reorthogonalize_a(iter,vchar,dnorm)
call dnormvec_p(vchar,'i',dnorm,smallflag)
if(smallflag)then
if(iproc == 0)print*,' Ooops, zero vec in reorthogonalization of restart '
stop
endif
return
end subroutine random_restart_p
!===========================================================================
! Open Lanczos file
! revision 6/2012 by CWJ
! requires further modification when vectors stored in memory
!
! CALLED BY:
! lanczos_master in BLANCZOS.f90
!
! NOTE: in 7.8.1, opening .lvec file turned OFF
!
!===========================================================================
subroutine open_lanczosfile
use nodeinfo
use lanczos_info
use precisions
use io
use bmpi_mod
use butil_mod
implicit none
character (len=25) :: filename
character (len=4) :: proc_name
integer(4) :: ilast
integer(4) :: file_mode
integer(4) :: file_info
integer(4) :: ierror
writetodisk = .true.
if ( writeout .and. iproc == 0) then
ilast = index(outfile,' ') - 1
! open(unit=lvec_file,file=outfile(1:ilast)//'.lvec',status = 'unknown',form='unformatted')
open(unit=coef_file,file=outfile(1:ilast)//'.lcoef',status = 'unknown',form='formatted')
elseif(iproc==0)then
filename = 'lanczosvec'
ilast = index(filename,' ')-1
! open(unit = lvec_file,file=filename(1:ilast)//'.lvec',status='unknown',form ='unformatted')
open(unit = coef_file,file=filename(1:ilast)//'.lcoef',status='unknown',form ='formatted')
end if
return
end subroutine open_lanczosfile
!===========================================================================
! subroutine close_lanczosfile
! revision 6/2012 by CWJ
!===========================================================================
subroutine close_lanczosfile
use lanczos_info
use io
use nodeinfo
use bmpi_mod
use butil_mod
implicit none
integer(4) :: ierror
if ( writetodisk ) then
if ( .not. restart_enabled ) then
if ( iproc == 0 ) then ! use normal I/O
rewind(lvec_file)
write(lvec_file)0
end if
end if
if ( iproc == 0 ) then
close(lvec_file)
end if
if (iproc == 0) close(coef_file)
end if
return
end subroutine close_lanczosfile
!======================================================================
!
! routine to compute distribution of W values in final wavefunctions
! added in 7.2.5 by CWJ SDSU 11/2013
!
subroutine Wcounter
use W_info
use sporbit
use basis
use sectors
use io
use nodeinfo
use lanczos_info
use localvectors
use flagger
use fragments
! use tribution
use mod_reorthog
use wfn_mod
use bmpi_mod
use butil_mod
use bvectorlib_mod
implicit none
integer(4) :: ierr
integer is,isc,jsc
integer(8) :: ip,in
integer(kind=basis_prec) :: ibasis
integer Wp, Wn,W
logical, allocatable :: Wallowed(:)
integer :: nWvals,n
integer, allocatable :: Wlist(:)
real(8), allocatable :: Wfrac(:)
real(8) :: ftmp,dv
integer istate
integer :: idummy
real(4) :: xj,xt,ei,ef,xtt,xjj
integer :: aerr
if(allsameW)return
allocate(Wallowed(minWtot:maxWtot), stat=aerr )
if(aerr /= 0) then
call memerror("Wcounter 1")
stop 5
end if
Wallowed(:) = .false.
!................ FIND POSSIBLE W IN BASIS.................
do is = 1,nsectors(1)
wp = xsd(1)%sector(is)%Wx
do isc = 1,xsd(1)%sector(is)%ncsectors
jsc = xsd(1)%sector(is)%csector(isc)
wn = xsd(2)%sector(jsc)%Wx
!................ ERROR TRAP..............
if( Wp + Wn < minWtot .or. Wp+Wn > maxWtot)then
print*,' wrong ws ',wp,wn, minwtot,maxwtot
stop
end if
Wallowed(wp+wn)=.true.
end do ! isc
end do ! is
nWvals = 0
!.............. COUNT HOW MANY................................
do w = minWtot,maxWtot
if(Wallowed(w))nWvals = nWvals+1
end do
if(nWvals==1)then
if(iproc==0)then
print*,' '
print*,' Only one value of W in truncation encountered '
print*,' '
if(writeout)then
write(resultfile,*)' '
write(resultfile,*)' Only one value of W in truncation encountered '
write(resultfile,*)' '
end if
end if
return
end if
allocate(wlist(nWvals), stat=aerr)
if(aerr /= 0) call memerror("Wcounter 2")
n = 0
!........... CREATE LIST OF Ws............
do w = minWtot,maxWtot
if(Wallowed(w))then
n=n+1
Wlist(n) = W
end if
end do
allocate(Wfrac(minWtot:maxWtot), stat=aerr )
if(aerr /= 0) call memerror("Wcounter 3")
!................ GO THROUGH WAVEFUNCTIONS AND COUNT UP W.....
if(iproc==0)then
write(6,*)' '
write(6,*)' % occupation of W-truncation subspaces '
write(6,'(a8,10i7)')'State/W=',(wlist(n)-minwtot,n=1,nWvals)
if(writeout)then
write(resultfile,*)' '
write(resultfile,*)' % occupation of W-truncation subspaces '
write(resultfile,'(a8,10i7)')'State/W=',(wlist(n)-minWtot,n=1,nWvals)
end if
end if
!...................LOOP OVER WFNS..............................
if(.not.storelanczosincore1 .and. .not.storelanczosincoreMPI)then
call wfn_rewind(wfnfile)
call read_wfn_header(wfnfile,.false.)
call wfn_read_nkeep(wfnfile, n) ! dummy reading in nkeep
! read(wfnfile)n ! dummy reading in nkeep
endif
do istate = 1,nkeep
if(storelanczosincore1 .or. storelanczosincoreMPI)then
if(useNewReorthog) then
call br_retrieve_hist(istate)
! It turns out that we need the vector loaded into both vec1 and vec2
! of course, they are different slices on each node
call br_restore_vec1()
else
vec1 = 0.0 ! all ranks
call read_lanczos_vector_a('n','i',istate,lvec_file) ! read in rank=0
if(storelanczosincoreMPI) then
! call block_reduce(dimbasis,vec1)
! each mpi process reads one slice. allreduce is overkill but works
call BMPI_ALLREDUCE(vec1, size(vec1), MPI_SUM, icomm, ierr) ! in place
end if
end if
else
! new interface, we say which vector to read and it checks
call wfn_readeigenvec(wfnfile,frag1, fcomm1, vec1,istate,ei,xj,xtt)
end if
wfrac(:) = 0.d0
! figure out if this node is the first node in ifragment.
if(nodal(iproc)%ifirst) then
do is = 1,nsectors(1)
wp = xsd(1)%sector(is)%Wx
do isc = 1,xsd(1)%sector(is)%ncsectors
jsc = xsd(1)%sector(is)%csector(isc)
wn = xsd(2)%sector(jsc)%Wx
ftmp = 0.d0
do ip = xsd(1)%sector(is)%xsdstart,xsd(1)%sector(is)%xsdend
do in = xsd(2)%sector(jsc)%xsdstart,xsd(2)%sector(jsc)%xsdend
ibasis = pstart(ip)+nstart(in)
if(ibasis .ge. v1s .and. ibasis .le. v1e) then
dv = vec1(ibasis)
ftmp = ftmp + dv*dv
end if
end do
end do
wfrac(wp+wn) = wfrac(wp+wn)+ftmp
end do ! isc
end do ! is
end if
if(useNewReorthog) then
! have to reduce. Note that condition %ifirst above suppresses nodes that
! are not "first" in their fragment. This is not very efficient, but it works.
call BMPI_ALLREDUCE(wfrac(:), SIZE(wfrac), MPI_SUM, icomm, ierr) ! in place reduce
end if
if(iproc==0)then
write(6,'(i4,4x,10f7.2)')istate,(Wfrac(wlist(n))*100.,n=1,min(nWvals,10))
if(writeout)write(resultfile,'(i4,4x,10f7.2)')istate,(Wfrac(wlist(n))*100.,n=1,min(10,nWvals))
if(nWvals> 10)then
write(6,'(i4,4x,10f7.2)')istate,(Wfrac(wlist(n))*100.,n=11,nWvals)
if(writeout)write(resultfile,'(i4,4x,10f7.2)')istate,(Wfrac(wlist(n))*100.,n=11,nWvals)
end if
end if
end do ! istate
return
end subroutine Wcounter
!======================================================================
! counts up # of lanczos iterations so far
! used in restart option
!
! NOTE: not yet fully parallelized
subroutine countlanczositerations
use lanczos_info
use basis
use precisions
use localvectors
use nodeinfo
use flagger
use bmpi_mod
use butil_mod
implicit none
integer niter0
integer(4) :: ierr
integer(4) :: i,j,k
integer(4) :: iunit
integer(4) :: iread
real(kind=lanc_prec) :: v
integer(kind=basis_prec) :: jl,vstart,vstop
real(kind=4) :: a,b
if(iproc==0)then
startiter = 0
do i = 1,10000
read( lvec_file,end=101) ( v, jl = 1, dimbasis )
startiter = startiter+1
end do
101 continue
rewind(lvec_file)
!........... CHECKS THAT THIS AGREES WITH lanczos coefficients file
k = 0
thick_restart = .false.
do i = 1,startiter
read(coef_file,*,end=102)j,a,b
if(j < 0)then
thick_restart = .true.
nkeep = -j-nthick_add
end if
k = k+1
end do
if(thick_restart)then
print*,' Sorry, restart not fully working with thick-restart lanczos '
stop
! print*,' Keeping ',nkeep,' states '
end if
return
102 continue
if(k < startiter -1)then
if(iproc == 0)print*,' Problem with restarting ',startiter,k
call BMPI_ABORT(icomm,101,ierr)
stop
endif
startiter = bmin(startiter,k)
end if
!.... BROADCAST VARIABLES
call BMPI_BCAST(startiter,1,0,icomm,ierr)
call BMPI_BCAST(nkeep,1,0,icomm,ierr)
call BMPI_BCAST(thick_restart,1,0,icomm,ierr)
return
end subroutine countlanczositerations
end module lanczos_util
| {
"alphanum_fraction": 0.5868022562,
"author": null,
"avg_line_length": 30.7414543195,
"converted": null,
"ext": "f90",
"file": null,
"hexsha": "5277342d530ed9719025e01d26c3517972190f86",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2019-10-15T18:18:41.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-01-27T18:17:15.000Z",
"max_forks_repo_head_hexsha": "b195fea5bd35438c9dd17858ce9d0a9726cda7ff",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "cwjsdsu/BigstickPublick",
"max_forks_repo_path": "src/blanczoslib.f90",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "b195fea5bd35438c9dd17858ce9d0a9726cda7ff",
"max_issues_repo_issues_event_max_datetime": "2018-01-27T05:49:40.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-01-27T05:49:40.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "cwjsdsu/BigstickPublick",
"max_issues_repo_path": "src/blanczoslib.f90",
"max_line_length": 112,
"max_stars_count": 12,
"max_stars_repo_head_hexsha": "b195fea5bd35438c9dd17858ce9d0a9726cda7ff",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "cwjsdsu/BigstickPublick",
"max_stars_repo_path": "src/blanczoslib.f90",
"max_stars_repo_stars_event_max_datetime": "2022-03-07T16:02:34.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-01-26T02:01:47.000Z",
"num_tokens": 15006,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 49463
} |
import datetime
import pandas_datareader.data as web
import pandas as pd
import numpy as np
from collections import defaultdict
from sklearn.ensemble import GradientBoostingClassifier as GBC
from sklearn.cross_validation import train_test_split
from sklearn.metrics import precision_score
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
def download_stock_data():
start_date = datetime.datetime(2012,1,1)
end_date = datetime.datetime(2016,8,31)
symbol = "PETR4.SA"
return web.DataReader(symbol, 'yahoo', start_date, end_date)
def create_new_variables(df):
df['Delta'] = (df['Adj Close'] / df['Adj Close'].shift(1))-1
df['Signal'] = df['Delta']>0
return df.dropna()
def backward_filling(df, backward=30):
backward = 30
for column in ['Delta', 'Volume']:
for i in range(1,backward+1):
new_column = "{} -d{}".format(column, i)
for row in range(backward, df.shape[0]):
df.ix[row, new_column] = df.ix[row-i, column]
return df.dropna()
class Backtester():
def __init__(self):
self.df = download_stock_data()
self.df = create_new_variables(self.df)
self.df = backward_filling(self.df, backward=30)
def forward_filling(self, forward=10, profit_margin=.05, stop_loss=.05):
for row in range(self.ldf.shape[0]-forward):
# initialize max and min ticks
max_uptick = 0
min_downtick = 0
for i in range(1,forward+1):
delta = (self.ldf.ix[row+i, 'Adj Close'] / self.ldf.ix[row, 'Adj Close'])-1
if delta > max_uptick:
max_uptick = delta
if delta < min_downtick:
min_downtick = delta
# evaluate ticks against predefined strategy parameters
if max_uptick >= profit_margin and min_downtick <= -stop_loss:
self.ldf.ix[row,'Label'] = 1
else:
self.ldf.ix[row,'Label'] = 0
self.ldf = self.ldf.dropna()
def prep_data(self):
X = self.ldf.drop('Label', axis=1)
y = self.ldf['Label']
return train_test_split(X, y, test_size=0.3, stratify=y)
def score(self, X_train, X_test, y_train, y_test):
clf = GBC().fit(X_train, y_train)
return precision_score(y_test, clf.predict(X_test))
def evaluate(self, forward, profit_margin, stop_loss):
self.ldf = self.df.copy(deep=True)
self.forward_filling(forward=forward, profit_margin=profit_margin, stop_loss=stop_loss)
score = self.score(*self.prep_data())
print "span: {}, profit_margin: {:.3f}, stop_loss: {:.3f} -- score: {:.3f}".format(
forward, profit_margin, stop_loss, score)
return score
| {
"alphanum_fraction": 0.6795516042,
"author": null,
"avg_line_length": 32.746835443,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "14947699a696725d73d94ec543286e4c46bb6f40",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-11-24T06:24:32.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-11-24T06:24:32.000Z",
"max_forks_repo_head_hexsha": "4ce6a7d8925c51b57bfeb4de988c1516ac6ff165",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "lucasosouza/oraclia-research",
"max_forks_repo_path": "ENIAC-experiments/v-previous/stocks_prev.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "4ce6a7d8925c51b57bfeb4de988c1516ac6ff165",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "lucasosouza/oraclia-research",
"max_issues_repo_path": "ENIAC-experiments/v-previous/stocks_prev.py",
"max_line_length": 89,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "4ce6a7d8925c51b57bfeb4de988c1516ac6ff165",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "lucasosouza/oraclia-research",
"max_stars_repo_path": "ENIAC-experiments/v-previous/stocks_prev.py",
"max_stars_repo_stars_event_max_datetime": "2021-11-24T06:22:51.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-11-24T06:22:51.000Z",
"num_tokens": 700,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2587
} |
/*!@file
* @copyright This code is licensed under the 3-clause BSD license.
* Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.
* See LICENSE.txt for details.
*/
#define BOOST_FILESYSTEM_NO_DEPRECATED
#include "boost/filesystem.hpp"
#include "boost/program_options.hpp"
#include "Molassembler/IO.h"
#include "Molassembler/Interpret.h"
#include "Molassembler/Graph.h"
#include "Molassembler/Molecule.h"
#include "Utils/Geometry/AtomCollection.h"
#include "Utils/Bonds/BondOrderCollection.h"
#include "Utils/IO/ChemicalFileFormats/ChemicalFileHandler.h"
#include <iostream>
#include <fstream>
#include <Eigen/Dense>
void readTriples(const std::string& file, Scine::Utils::BondOrderCollection& bondOrders) {
std::ifstream in(file);
std::string line;
unsigned numBondOrders = 0;
unsigned i;
unsigned j;
double value;
if(in.is_open()) {
while(true) {
in >> i >> j >> value;
if(in.eof() || in.fail()) {
break;
}
bondOrders.setOrder(i, j, value);
++numBondOrders;
}
}
std::cout << "Read " << numBondOrders << " bond orders from '" << file << "'\n";
}
Eigen::MatrixXd readCSV(const std::string& file, int rows, int cols) {
std::ifstream in(file);
std::string line;
int row = 0;
int col = 0;
Eigen::MatrixXd res(rows, cols);
if (in.is_open()) {
while (std::getline(in, line)) {
char *ptr = const_cast<char *>(line.c_str());
int len = line.length();
col = 0;
char *start = ptr;
for (int i = 0; i < len; i++) {
if (ptr[i] == ',') {
res(row, col++) = atof(start);
start = ptr + i + 1;
}
}
res(row, col) = atof(start);
row++;
}
in.close();
}
return res;
}
int main(int argc, char* argv[]) {
using namespace Scine;
using namespace Molassembler;
// Set up option parsing
boost::program_options::options_description options_description("Recognized options");
options_description.add_options()
("help", "Produce help message")
("m", boost::program_options::value<std::string>(), "Molecule data file (.xyz, .mol, etc)")
("bo-csv", boost::program_options::value<std::string>(), "Matrix CSV file to read as bond orders")
("bo-triples", boost::program_options::value<std::string>(), "Matrix triples list file to read as bond orders")
("threshold", boost::program_options::value<double>(), "Manual threshold for bond stereopermutator interpretation")
;
// Parse
boost::program_options::variables_map options_variables_map;
boost::program_options::store(
boost::program_options::parse_command_line(argc, argv, options_description),
options_variables_map
);
boost::program_options::notify(options_variables_map);
if(options_variables_map.count("m") > 0) {
boost::filesystem::path filepath {
options_variables_map["m"].as<std::string>()
};
if(!boost::filesystem::exists(filepath)) {
std::cout << "The molecular file does not exist" << std::endl;
return 1;
}
// This can throw in lots of cases
auto readData = Utils::ChemicalFileHandler::read(filepath.string());
const auto& atomCollection = readData.first;
auto& bondOrders = readData.second;
if(bondOrders.empty()) {
if(options_variables_map.count("bo-csv") == 0 && options_variables_map.count("bo-triples") == 0) {
std::cout << "The molecule data file did not contain bond order information and you did not supply a file to read as bond orders\n";
return 1;
}
const int N = atomCollection.size();
bondOrders.resize(N);
if(options_variables_map.count("bo-csv") == 1) {
auto fileMatrix = readCSV(
options_variables_map["bo-csv"].as<std::string>(),
N,
N
);
for(Eigen::Index i = 0; i < N; ++i) {
for(Eigen::Index j = i + 1; j < N; ++j) {
if(fileMatrix(i, j) > 0) {
bondOrders.setOrder(i, j, fileMatrix(i, j));
}
}
}
} else if(options_variables_map.count("bo-triples") == 1) {
readTriples(
options_variables_map["bo-triples"].as<std::string>(),
bondOrders
);
}
}
double defaultThreshold = 1.4;
if(options_variables_map.count("threshold") == 1) {
defaultThreshold = options_variables_map["threshold"].as<double>();
}
auto interpretation = Interpret::molecules(atomCollection, bondOrders, Interpret::BondDiscretizationOption::RoundToNearest, defaultThreshold);
auto positions = interpretation.componentMap.apply(atomCollection);
if(interpretation.molecules.size() == 1) {
std::ofstream graphFile("interpreted.dot");
graphFile << interpretation.molecules.front().dumpGraphviz();
graphFile.close();
IO::write("interpreted.json", interpretation.molecules.front());
IO::write("interpreted.mol", interpretation.molecules.front(), positions.front().getPositions());
} else {
for(unsigned i = 0; i < interpretation.molecules.size(); ++i) {
const auto& mol = interpretation.molecules[i];
std::string filebase = "interpreted-" + std::to_string(i);
std::ofstream graphFile(filebase + ".dot");
graphFile << mol.dumpGraphviz();
graphFile.close();
IO::write(filebase + ".json", mol);
IO::write(filebase + ".mol", mol, positions.at(i).getPositions());
}
}
for(const auto& mol : interpretation.molecules) {
std::cout << "Interpreted molecule N=" << mol.graph().N() << ": "
<< mol << "\n\n";
}
} else {
std::cout << options_description << std::endl;
}
return 0;
}
| {
"alphanum_fraction": 0.6278498772,
"author": null,
"avg_line_length": 29.8534031414,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "e6a2b2a6cbe22c5a4a567a67c051bd9bb27b2bf1",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "dafc656b1aa846b65b1fd1e06f3740ceedcf22db",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "Dom1L/molassembler",
"max_forks_repo_path": "analysis/Interpret.cpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "dafc656b1aa846b65b1fd1e06f3740ceedcf22db",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "Dom1L/molassembler",
"max_issues_repo_path": "analysis/Interpret.cpp",
"max_line_length": 146,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "dafc656b1aa846b65b1fd1e06f3740ceedcf22db",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "Dom1L/molassembler",
"max_stars_repo_path": "analysis/Interpret.cpp",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1412,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 5702
} |
from PIL import Image
from OpenGL.GL import *
import numpy as np
class Number:
_digit_textures = [None] * 10
_digit_to_path = [
'./assets/digit_0.png',
'./assets/digit_1.png',
'./assets/digit_2.png',
'./assets/digit_3.png',
'./assets/digit_4.png',
'./assets/digit_5.png',
'./assets/digit_6.png',
'./assets/digit_7.png',
'./assets/digit_8.png',
'./assets/digit_9.png',
]
@staticmethod
def draw(number, position, height):
number_str = str(number)
y = height - position[1] - 1
x = position[0]
for digit_in_str_idx in range(len(number_str)):
digit = number_str[digit_in_str_idx]
digit_idx = ord(digit) - ord('0')
if Number._digit_textures[digit_idx] is None:
Number._init_texture(digit_idx)
glBindTexture(GL_TEXTURE_2D, Number._digit_textures[digit_idx])
digit_x = x + digit_in_str_idx * 0.4
glBegin(GL_TRIANGLES)
glTexCoord2f(0, 1)
glVertex2f(digit_x, y)
glTexCoord2f(1, 1)
glVertex2f(digit_x+0.4, y)
glTexCoord2f(1, 0)
glVertex2f(digit_x+0.4, y+0.4)
glTexCoord2f(0, 1)
glVertex2f(digit_x, y)
glTexCoord2f(1, 0)
glVertex2f(digit_x+0.4, y+0.4)
glTexCoord2f(0, 0)
glVertex2f(digit_x, y+0.4)
glEnd()
glBindTexture(GL_TEXTURE_2D, 0)
@staticmethod
def _init_texture(idx):
Number._digit_textures[idx] = glGenTextures(1)
image = Image.open(Number._digit_to_path[idx])
imageData = np.array(list(image.getdata()), np.uint8)
glBindTexture(GL_TEXTURE_2D, Number._digit_textures[idx])
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, image.size[0], image.size[1], 0, GL_RGBA, GL_UNSIGNED_BYTE, imageData)
glBindTexture(GL_TEXTURE_2D, 0)
| {
"alphanum_fraction": 0.5874363328,
"author": null,
"avg_line_length": 31,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "c311205fe3b81d18b0a46f8b7229c602df7aeca8",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "52f7078ef238e70cad84d8b9c051d9aba179fd13",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "lfscheidegger/adventofcode2018",
"max_forks_repo_path": "day15.2/number.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "52f7078ef238e70cad84d8b9c051d9aba179fd13",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "lfscheidegger/adventofcode2018",
"max_issues_repo_path": "day15.2/number.py",
"max_line_length": 118,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "52f7078ef238e70cad84d8b9c051d9aba179fd13",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "lfscheidegger/adventofcode2018",
"max_stars_repo_path": "day15.1/number.py",
"max_stars_repo_stars_event_max_datetime": "2018-12-09T23:41:40.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-12-09T23:41:40.000Z",
"num_tokens": 633,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2356
} |
from __builtin__ import sorted
from docopt import docopt
import numpy as np
from representations.representation_factory import create_representation
def main():
args = docopt("""
Usage:
analogy_eval.py [options] <representation> <representation_path> <task_path>
Options:
--neg NUM Number of negative samples; subtracts its log from PMI (only applicable to PPMI) [default: 1]
--w+c Use ensemble of word and context vectors (not applicable to PPMI)
--eig NUM Weighted exponent of the eigenvalue matrix (only applicable to SVD) [default: 0.5]
--normalize Use row-normalized word vectors
""")
print args['--normalize']
representation = create_representation(args)
args['--normalize'] = True
print args['--normalize']
representation_sim = create_representation(args)
data = read_test_set(representation, args['<task_path>'])
xi, ix = get_vocab(data)
accuracy_add, accuracy_mul = evaluate(
representation, representation_sim, data, xi, ix)
print args['<representation>'], args['<representation_path>'], '\t%0.3f' % accuracy_add, '\t%0.3f' % accuracy_mul
def read_test_set(representation, path):
test = []
unks = 0
with open(path) as f:
for line in f:
analogy = line.strip().lower().split()
a, b, c, d = analogy
if a not in representation.wi or b not in representation.wi or c not in representation.wi or d not in representation.wi:
unks += 1
continue
test.append(analogy)
print 'skipped ' + str(unks) + ' unk sets'
return test
def get_vocab(data):
vocab = set()
for analogy in data:
vocab.update(analogy)
vocab = sorted(vocab)
return dict([(a, i) for i, a in enumerate(vocab)]), vocab
def evaluate(representation, representation_sim, data, xi, ix):
sims = prepare_similarities(representation_sim, ix)
correct_add = 0.0
correct_mul = 0.0
for a, a_, b, b_ in data:
b_add, b_mul = guess(representation, sims, xi, a, a_, b)
if b_add == b_:
correct_add += 1
if b_mul == b_:
correct_mul += 1
return correct_add / len(data), correct_mul / len(data)
def prepare_similarities(representation, vocab):
vocab_representation = representation.m[
[representation.wi[w] for w in vocab]]
# vocab_representation = representation.m[
# [representation.wi[w] if w in representation.wi else 0 for w in vocab]]
sims = vocab_representation.dot(representation.m.T)
# dummy = None
# for w in vocab:
# if w not in representation.wi:
# dummy = representation.represent(w)
# break
# if dummy is not None:
# for i, w in enumerate(vocab):
# if w not in representation.wi:
# vocab_representation[i] = dummy
if type(sims) is not np.ndarray:
sims = np.array(sims.todense())
else:
sims = (sims + 1) / 2
return sims
def guess(representation, sims, xi, a, a_, b):
sa = sims[xi[a]]
sa_ = sims[xi[a_]]
sb = sims[xi[b]]
add_sim = -sa + sa_ + sb
if a in representation.wi:
add_sim[representation.wi[a]] = 0
if a_ in representation.wi:
add_sim[representation.wi[a_]] = 0
if b in representation.wi:
add_sim[representation.wi[b]] = 0
b_add = representation.iw[np.nanargmax(add_sim)]
mul_sim = sa_ * sb * np.reciprocal(sa + 0.01)
if a in representation.wi:
mul_sim[representation.wi[a]] = 0
if a_ in representation.wi:
mul_sim[representation.wi[a_]] = 0
if b in representation.wi:
mul_sim[representation.wi[b]] = 0
b_mul = representation.iw[np.nanargmax(mul_sim)]
return b_add, b_mul
if __name__ == '__main__':
main()
| {
"alphanum_fraction": 0.6087503143,
"author": null,
"avg_line_length": 32.3333333333,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f66c258aa87f093e88eef1c58582531cd7345567",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2019-12-14T04:56:34.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-10-18T18:55:41.000Z",
"max_forks_repo_head_hexsha": "8a90d6d098ec2108e2dccd447b1c19acef53b59e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "HLTCHKUST/eigenvector-analysis",
"max_forks_repo_path": "hyperwords/analogy_eval.py",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "8a90d6d098ec2108e2dccd447b1c19acef53b59e",
"max_issues_repo_issues_event_max_datetime": "2021-11-18T10:19:26.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-03-28T18:24:08.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "HLTCHKUST/eigenvector-analysis",
"max_issues_repo_path": "hyperwords/analogy_eval.py",
"max_line_length": 133,
"max_stars_count": 14,
"max_stars_repo_head_hexsha": "8a90d6d098ec2108e2dccd447b1c19acef53b59e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "HLTCHKUST/eigenvector-analysis",
"max_stars_repo_path": "hyperwords/analogy_eval.py",
"max_stars_repo_stars_event_max_datetime": "2021-08-15T08:41:09.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-03-17T20:16:58.000Z",
"num_tokens": 1023,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3977
} |
------------------------------------------------------------------------
-- Brandt and Henglein's subterm relation
------------------------------------------------------------------------
module RecursiveTypes.Subterm where
open import Algebra
open import Data.Fin using (Fin; zero; suc; lift)
open import Data.Nat
open import Data.List using (List; []; _∷_; [_]; _++_)
open import Data.List.Properties
open import Data.List.Relation.Unary.Any using (here; there)
open import Data.List.Relation.Unary.Any.Properties
using (++↔; map-with-∈↔)
open import Data.List.Membership.Propositional
open import Data.List.Membership.Propositional.Properties
open import Data.List.Relation.Binary.BagAndSetEquality as BSEq
open import Data.List.Relation.Binary.Subset.Propositional
open import Data.List.Relation.Binary.Subset.Propositional.Properties
open import Data.Product
open import Data.Sum
open import Function.Base
open import Function.Equality using (_⟨$⟩_)
open import Function.Inverse using (module Inverse)
open import Relation.Binary
import Relation.Binary.Reasoning.Preorder as PreR
import Relation.Binary.Reasoning.Setoid as EqR
import Relation.Binary.PropositionalEquality as P
open import Data.Fin.Substitution
private
open module SetM {A : Set} =
CommutativeMonoid (BSEq.commutativeMonoid set A)
using (_≈_)
open import RecursiveTypes.Syntax
open import RecursiveTypes.Substitution
hiding (subst) renaming (id to idˢ)
open // using (_//_)
------------------------------------------------------------------------
-- Subterms
-- The subterm relation.
infix 4 _⊑_
data _⊑_ {n} : Ty n → Ty n → Set where
refl : ∀ {τ} → τ ⊑ τ
unfold : ∀ {σ τ₁ τ₂} (σ⊑ : σ ⊑ unfold[μ τ₁ ⟶ τ₂ ]) → σ ⊑ μ τ₁ ⟶ τ₂
⟶ˡ : ∀ {σ τ₁ τ₂} (σ⊑τ₁ : σ ⊑ τ₁) → σ ⊑ τ₁ ⟶ τ₂
⟶ʳ : ∀ {σ τ₁ τ₂} (σ⊑τ₂ : σ ⊑ τ₂) → σ ⊑ τ₁ ⟶ τ₂
-- Some simple consequences.
unfold′ : ∀ {n} {τ₁ τ₂ : Ty (suc n)} →
unfold[μ τ₁ ⟶ τ₂ ] ⊑ μ τ₁ ⟶ τ₂
unfold′ = unfold refl
⟶ˡ′ : ∀ {n} {τ₁ τ₂ : Ty n} → τ₁ ⊑ τ₁ ⟶ τ₂
⟶ˡ′ = ⟶ˡ refl
⟶ʳ′ : ∀ {n} {τ₁ τ₂ : Ty n} → τ₂ ⊑ τ₁ ⟶ τ₂
⟶ʳ′ = ⟶ʳ refl
-- The subterm relation is transitive.
trans : ∀ {n} {σ τ χ : Ty n} →
σ ⊑ τ → τ ⊑ χ → σ ⊑ χ
trans σ⊑τ refl = σ⊑τ
trans σ⊑τ (unfold τ⊑) = unfold (trans σ⊑τ τ⊑)
trans σ⊑τ (⟶ˡ τ⊑δ₁) = ⟶ˡ (trans σ⊑τ τ⊑δ₁)
trans σ⊑τ (⟶ʳ τ⊑δ₂) = ⟶ʳ (trans σ⊑τ τ⊑δ₂)
------------------------------------------------------------------------
-- Subterm closure
-- The list τ_∗ contains exactly the subterms of τ (possibly with
-- duplicates).
--
-- The important property is completeness, which ensures that the
-- number of distinct subterms of a given type is always finite.
mutual
infix 7 _∗ _∗′
_∗ : ∀ {n} → Ty n → List (Ty n)
τ ∗ = τ ∷ τ ∗′
_∗′ : ∀ {n} → Ty n → List (Ty n)
(τ₁ ⟶ τ₂) ∗′ = τ₁ ∗ ++ τ₂ ∗
(μ τ₁ ⟶ τ₂) ∗′ = (τ₁ ⟶ τ₂ ∷ τ₁ ∗ ++ τ₂ ∗) // sub (μ τ₁ ⟶ τ₂)
τ ∗′ = []
------------------------------------------------------------------------
-- Soundness
-- _/_ is monotonous in its left argument.
/-monoˡ : ∀ {m n σ τ} {ρ : Sub Ty m n} →
σ ⊑ τ → σ / ρ ⊑ τ / ρ
/-monoˡ refl = refl
/-monoˡ (⟶ˡ σ⊑τ₁) = ⟶ˡ (/-monoˡ σ⊑τ₁)
/-monoˡ (⟶ʳ σ⊑τ₂) = ⟶ʳ (/-monoˡ σ⊑τ₂)
/-monoˡ {ρ = ρ} (unfold {σ} {τ₁} {τ₂} σ⊑) =
unfold $ P.subst (λ χ → σ / ρ ⊑ χ)
(sub-commutes (τ₁ ⟶ τ₂))
(/-monoˡ σ⊑)
sub-⊑-μ : ∀ {n} {σ : Ty (suc n)} {τ₁ τ₂} →
σ ⊑ τ₁ ⟶ τ₂ → σ / sub (μ τ₁ ⟶ τ₂) ⊑ μ τ₁ ⟶ τ₂
sub-⊑-μ σ⊑τ₁⟶τ₂ = unfold (/-monoˡ σ⊑τ₁⟶τ₂)
-- All list elements are subterms.
sound : ∀ {n σ} (τ : Ty n) → σ ∈ τ ∗ → σ ⊑ τ
sound τ (here P.refl) = refl
sound (τ₁ ⟶ τ₂) (there σ∈) =
[ ⟶ˡ ∘ sound τ₁ , ⟶ʳ ∘ sound τ₂ ]′
(Inverse.from (++↔ {xs = τ₁ ∗}) ⟨$⟩ σ∈)
sound (μ τ₁ ⟶ τ₂) (there (here P.refl)) = unfold refl
sound (μ τ₁ ⟶ τ₂) (there (there σ∈))
with Inverse.from (map-∈↔ (λ σ → σ / sub (μ τ₁ ⟶ τ₂))
{xs = τ₁ ∗ ++ τ₂ ∗}) ⟨$⟩ σ∈
... | (χ , χ∈ , P.refl) =
sub-⊑-μ $ [ ⟶ˡ ∘ sound τ₁ , ⟶ʳ ∘ sound τ₂ ]′
(Inverse.from (++↔ {xs = τ₁ ∗}) ⟨$⟩ χ∈)
sound ⊥ (there ())
sound ⊤ (there ())
sound (var x) (there ())
------------------------------------------------------------------------
-- Completeness
++-lemma : ∀ {A} xs ys {zs : List A} →
(xs ++ zs) ++ (ys ++ zs) ≈ (xs ++ ys) ++ zs
++-lemma xs ys {zs} = begin
(xs ++ zs) ++ (ys ++ zs) ≈⟨ SetM.assoc xs zs (ys ++ zs) ⟩
xs ++ (zs ++ (ys ++ zs)) ≈⟨ SetM.∙-cong (SetM.refl {x = xs}) (begin
zs ++ (ys ++ zs) ≈⟨ SetM.∙-cong (SetM.refl {x = zs}) (SetM.comm ys zs) ⟩
zs ++ (zs ++ ys) ≈⟨ SetM.sym $ SetM.assoc zs zs ys ⟩
(zs ++ zs) ++ ys ≈⟨ SetM.∙-cong (++-idempotent zs) SetM.refl ⟩
zs ++ ys ≈⟨ SetM.comm zs ys ⟩
ys ++ zs ∎) ⟩
xs ++ (ys ++ zs) ≈⟨ SetM.sym $ SetM.assoc xs ys zs ⟩
(xs ++ ys) ++ zs ∎
where open EqR ([ set ]-Equality _)
open BSEq.⊆-Reasoning
mutual
-- Weakening "commutes" with _∗.
wk-∗-commute : ∀ k {n} (σ : Ty (k + n)) →
σ / wk ↑⋆ k ∗ ⊆ σ ∗ // wk ↑⋆ k
wk-∗-commute k σ (here P.refl) = here P.refl
wk-∗-commute k σ (there •∈•) = there $ wk-∗′-commute k σ •∈•
wk-∗′-commute : ∀ k {n} (σ : Ty (k + n)) →
σ / wk ↑⋆ k ∗′ ⊆ σ ∗′ // wk ↑⋆ k
wk-∗′-commute k (σ₁ ⟶ σ₂) = begin
σ₁ ⟶ σ₂ / wk ↑⋆ k ∗′ ≡⟨ P.refl ⟩
σ₁ / wk ↑⋆ k ∗ ++ σ₂ / wk ↑⋆ k ∗ ⊆⟨ ++⁺ (wk-∗-commute k σ₁)
(wk-∗-commute k σ₂) ⟩
σ₁ ∗ // wk ↑⋆ k ++ σ₂ ∗ // wk ↑⋆ k ≡⟨ P.sym $ map-++-commute
(λ σ → σ / wk ↑⋆ k) (σ₁ ∗) (σ₂ ∗) ⟩
(σ₁ ∗ ++ σ₂ ∗) // wk ↑⋆ k ≡⟨ P.refl ⟩
σ₁ ⟶ σ₂ ∗′ // wk ↑⋆ k ∎
wk-∗′-commute k (μ σ₁ ⟶ σ₂) = begin
(μ σ₁ ⟶ σ₂) / wk ↑⋆ k ∗′ ≡⟨ P.refl ⟩
σ₁ ⟶ σ₂ / wk ↑⋆ (suc k) / sub (σ / wk ↑⋆ k) ∷
(σ₁ / wk ↑⋆ (suc k) ∗ ++ σ₂ / wk ↑⋆ (suc k) ∗)
// sub (σ / wk ↑⋆ k) ⊆⟨ ++⁺ lem₁ lem₂ ⟩
σ₁ ⟶ σ₂ / sub σ / wk ↑⋆ k ∷
(σ₁ ∗ ++ σ₂ ∗) // sub σ // wk ↑⋆ k ≡⟨ P.refl ⟩
μ σ₁ ⟶ σ₂ ∗′ // wk ↑⋆ k ∎
where
σ = μ σ₁ ⟶ σ₂
lem₁ : _ ⊆ _
lem₁ = begin
[ σ₁ ⟶ σ₂ / wk ↑⋆ (suc k) / sub (σ / wk ↑⋆ k) ] ≡⟨ P.cong [_] $ P.sym $
sub-commutes (σ₁ ⟶ σ₂) ⟩
[ σ₁ ⟶ σ₂ / sub σ / wk ↑⋆ k ] ∎
lem₂ : _ ⊆ _
lem₂ = begin
(σ₁ / wk ↑⋆ (suc k) ∗ ++
σ₂ / wk ↑⋆ (suc k) ∗) // sub (σ / wk ↑⋆ k) ⊆⟨ map⁺ _ (++⁺ (wk-∗-commute (suc k) σ₁)
(wk-∗-commute (suc k) σ₂)) ⟩
(σ₁ ∗ // wk ↑⋆ (suc k) ++
σ₂ ∗ // wk ↑⋆ (suc k)) // sub (σ / wk ↑⋆ k) ≡⟨ P.cong (λ σs → σs // sub (σ / wk ↑⋆ k)) $
P.sym $ map-++-commute
(λ σ → σ / wk ↑⋆ suc k) (σ₁ ∗) (σ₂ ∗) ⟩
(σ₁ ∗ ++ σ₂ ∗) // wk ↑⋆ (suc k) // sub (σ / wk ↑⋆ k) ≡⟨ P.sym $ //.sub-commutes (σ₁ ∗ ++ σ₂ ∗) ⟩
(σ₁ ∗ ++ σ₂ ∗) // sub σ // wk ↑⋆ k ∎
wk-∗′-commute k (var x) = begin
var x / wk ↑⋆ k ∗′ ≡⟨ P.cong _∗′ (var-/-wk-↑⋆ k x) ⟩
var (lift k suc x) ∗′ ≡⟨ P.refl ⟩
[] ⊆⟨ (λ ()) ⟩
var x ∗′ // wk ↑⋆ k ∎
wk-∗′-commute k ⊥ = λ ()
wk-∗′-commute k ⊤ = λ ()
-- Substitution "commutes" with _∗.
sub-∗′-commute-var : ∀ k {n} x (τ : Ty n) →
var x / sub τ ↑⋆ k ∗′ ⊆ τ ∗ // wk⋆ k
sub-∗′-commute-var zero zero τ = begin
τ ∗′ ⊆⟨ there ⟩
τ ∗ ≡⟨ P.sym $ //.id-vanishes (τ ∗) ⟩
τ ∗ // wk⋆ zero ∎
sub-∗′-commute-var zero (suc x) τ = begin
var x / idˢ ∗′ ≡⟨ P.cong _∗′ (id-vanishes (var x)) ⟩
var x ∗′ ≡⟨ P.refl ⟩
[] ⊆⟨ (λ ()) ⟩
τ ∗ // wk⋆ zero ∎
sub-∗′-commute-var (suc k) zero τ = begin
[] ⊆⟨ (λ ()) ⟩
τ ∗ // wk⋆ (suc k) ∎
sub-∗′-commute-var (suc k) (suc x) τ = begin
var (suc x) / sub τ ↑⋆ suc k ∗′ ≡⟨ P.cong _∗′ (suc-/-↑ {ρ = sub τ ↑⋆ k} x) ⟩
var x / sub τ ↑⋆ k / wk ∗′ ⊆⟨ wk-∗′-commute zero (var x / sub τ ↑⋆ k) ⟩
var x / sub τ ↑⋆ k ∗′ // wk ⊆⟨ map⁺ _ (sub-∗′-commute-var k x τ) ⟩
τ ∗ // wk⋆ k // wk ≡⟨ P.sym $ //./-weaken (τ ∗) ⟩
τ ∗ // wk⋆ (suc k) ∎
sub-∗-commute : ∀ k {n} σ (τ : Ty n) →
σ / sub τ ↑⋆ k ∗ ⊆ σ ∗ // sub τ ↑⋆ k ++ τ ∗ // wk⋆ k
sub-∗-commute k σ τ (here P.refl) = here P.refl
sub-∗-commute k ⊥ τ •∈• = Inverse.to ++↔ ⟨$⟩ inj₁ •∈•
sub-∗-commute k ⊤ τ •∈• = Inverse.to ++↔ ⟨$⟩ inj₁ •∈•
sub-∗-commute k (var x) τ (there •∈•) = there $ sub-∗′-commute-var k x τ •∈•
sub-∗-commute k (σ₁ ⟶ σ₂) τ {χ} (there •∈•) = there $
χ ∈⟨ •∈• ⟩
(σ₁ / ρ) ∗ ++ (σ₂ / ρ) ∗ ⊆⟨ ++⁺ (sub-∗-commute k σ₁ τ)
(sub-∗-commute k σ₂ τ) ⟩
(σ₁ ∗ // ρ ++ τ ∗ // wk⋆ k) ++
(σ₂ ∗ // ρ ++ τ ∗ // wk⋆ k) ∼⟨ ++-lemma (σ₁ ∗ // ρ) (σ₂ ∗ // ρ) ⟩
(σ₁ ∗ // ρ ++ σ₂ ∗ // ρ) ++
τ ∗ // wk⋆ k ≡⟨ P.cong₂ _++_
(P.sym $ map-++-commute (λ σ → σ / ρ) (σ₁ ∗) (σ₂ ∗))
P.refl ⟩
(σ₁ ∗ ++ σ₂ ∗) // ρ ++ τ ∗ // wk⋆ k ∎
where ρ = sub τ ↑⋆ k
sub-∗-commute k (μ σ₁ ⟶ σ₂) τ (there (here P.refl)) =
there $ here $ P.sym $ sub-commutes (σ₁ ⟶ σ₂)
sub-∗-commute k (μ σ₁ ⟶ σ₂) τ {χ} (there (there •∈•)) = there $ there $
χ ∈⟨ •∈• ⟩
((σ₁ / ρ ↑) ∗ ++ (σ₂ / ρ ↑) ∗) // sub (σ / ρ) ⊆⟨ map⁺ _ (begin
(σ₁ / ρ ↑) ∗ ++ (σ₂ / ρ ↑) ∗ ⊆⟨ ++⁺ (sub-∗-commute (suc k) σ₁ τ)
(sub-∗-commute (suc k) σ₂ τ) ⟩
(σ₁ ∗ // ρ ↑ ++ τ ∗ // wk⋆ (suc k)) ++
(σ₂ ∗ // ρ ↑ ++ τ ∗ // wk⋆ (suc k)) ∼⟨ ++-lemma (σ₁ ∗ // ρ ↑) (σ₂ ∗ // ρ ↑) ⟩
(σ₁ ∗ // ρ ↑ ++ σ₂ ∗ // ρ ↑) ++
τ ∗ // wk⋆ (suc k) ≡⟨ P.cong₂ _++_
(P.sym $ map-++-commute
(λ σ → σ / ρ ↑) (σ₁ ∗) (σ₂ ∗))
P.refl ⟩
(σ₁ ∗ ++ σ₂ ∗) // ρ ↑ ++
τ ∗ // wk⋆ (suc k) ∎) ⟩
((σ₁ ∗ ++ σ₂ ∗) // ρ ↑ ++
τ ∗ // wk⋆ (suc k)) // sub (σ / ρ) ≡⟨ map-++-commute (λ χ → χ / sub (σ / ρ))
((σ₁ ∗ ++ σ₂ ∗) // ρ ↑) _ ⟩
(σ₁ ∗ ++ σ₂ ∗) // ρ ↑ // sub (σ / ρ) ++
τ ∗ // wk⋆ (suc k) // sub (σ / ρ) ≡⟨ P.cong₂ _++_
(P.sym $ //.sub-commutes (σ₁ ∗ ++ σ₂ ∗))
lem ⟩
(σ₁ ∗ ++ σ₂ ∗) // sub σ // ρ ++
τ ∗ // wk⋆ k ∎
where
ρ = sub τ ↑⋆ k
σ = μ σ₁ ⟶ σ₂
lem = ≡R.begin
τ ∗ // wk⋆ (suc k) // sub (σ / ρ) ≡R.≡⟨ P.cong₂ _//_ (//./-weaken (τ ∗)) P.refl ⟩
τ ∗ // wk⋆ k // wk // sub (σ / ρ) ≡R.≡⟨ //.wk-sub-vanishes (τ ∗ // wk⋆ k) ⟩
τ ∗ // wk⋆ k ≡R.∎
where
module ≡R = P.≡-Reasoning
-- The list contains all subterms.
complete : ∀ {n} {σ τ : Ty n} → σ ⊑ τ → σ ∈ τ ∗
complete refl = here P.refl
complete (⟶ˡ σ⊑τ₁) = there (Inverse.to ++↔ ⟨$⟩ inj₁ (complete σ⊑τ₁))
complete (⟶ʳ σ⊑τ₂) = there (Inverse.to (++↔ {P = P._≡_ _} {xs = _ ∗}) ⟨$⟩ inj₂ (complete σ⊑τ₂))
complete (unfold {σ} {τ₁} {τ₂} σ⊑) =
σ ∈⟨ complete σ⊑ ⟩
unfold[μ τ₁ ⟶ τ₂ ] ∗ ⊆⟨ sub-∗-commute zero (τ₁ ⟶ τ₂) τ ⟩
τ₁ ⟶ τ₂ ∗ // sub τ ++ τ ∗ // idˢ ≡⟨ P.cong (_++_ (τ₁ ⟶ τ₂ ∗ // sub τ))
(//.id-vanishes (τ ∗)) ⟩
τ ∗′ ++ τ ∗ ⊆⟨ ++⁺ (there {x = τ} {xs = τ ∗′}) id ⟩
τ ∗ ++ τ ∗ ∼⟨ ++-idempotent (τ ∗) ⟩
τ ∗ ∎
where τ = μ τ₁ ⟶ τ₂
------------------------------------------------------------------------
-- A wrapper function
-- Pairs up subterms with proofs.
subtermsOf : ∀ {n} (τ : Ty n) → List (∃ λ σ → σ ⊑ τ)
subtermsOf τ = map-with-∈ (τ ∗) (-,_ ∘′ sound τ)
-- subtermsOf is complete.
subtermsOf-complete : ∀ {n} {σ τ : Ty n} →
σ ⊑ τ → ∃ λ σ⊑τ → (σ , σ⊑τ) ∈ subtermsOf τ
subtermsOf-complete {σ = σ} {τ} σ⊑τ =
(-, Inverse.to (map-with-∈↔ {f = -,_ ∘′ sound τ}) ⟨$⟩
(σ , complete σ⊑τ , P.refl))
| {
"alphanum_fraction": 0.3758508663,
"author": null,
"avg_line_length": 41.0412698413,
"converted": null,
"ext": "agda",
"file": null,
"hexsha": "3b87b4754f644efb711ebc79a1720ef32acbf912",
"include": null,
"lang": "Agda",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "1b90445566df0d3b4ba6e31bd0bac417b4c0eb0e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "nad/codata",
"max_forks_repo_path": "RecursiveTypes/Subterm.agda",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "1b90445566df0d3b4ba6e31bd0bac417b4c0eb0e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "nad/codata",
"max_issues_repo_path": "RecursiveTypes/Subterm.agda",
"max_line_length": 111,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "1b90445566df0d3b4ba6e31bd0bac417b4c0eb0e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "nad/codata",
"max_stars_repo_path": "RecursiveTypes/Subterm.agda",
"max_stars_repo_stars_event_max_datetime": "2021-02-13T14:48:45.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-02-13T14:48:45.000Z",
"num_tokens": 5407,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 12928
} |
#
# Copyright (c) 2020. Asutosh Nayak (nayak.asutosh@ymail.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
import os
import re
from operator import itemgetter
import pandas as pd
import pickle
import numpy as np
import sklearn.feature_selection
import sklearn.model_selection
import sklearn.preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import mutual_info_classif
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils.class_weight import compute_class_weight
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import sklearn.utils
from tqdm.auto import tqdm
from logger import Logger
from utils import *
class DataGenerator:
def __init__(self, company_code, data_path='./stock_history', output_path='./outputs', strategy_type='original',
update=False, logger: Logger = None):
self.company_code = company_code
self.strategy_type = strategy_type
self.data_path = data_path
self.logger = logger
self.BASE_URL = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED" \
"&outputsize=full&apikey=KD1I9P1L06Y003R9&datatype=csv&symbol="
self.output_path = output_path
self.start_col = 'open'
self.end_col = 'eom_26'
self.update = update
self.download_stock_data()
##calcula os indicadores e os labels
self.df = self.create_features()
##define pixels das imagens
self.feat_idx = self.feature_selection()
self.one_hot_enc = OneHotEncoder(sparse=False, categories='auto')
self.one_hot_enc.fit(self.df['labels'].values.reshape(-1, 1))
self.batch_start_date = self.df.head(1).iloc[0]["timestamp"]
self.test_duration_years = 1
self.logger.append_log("{} has data for {} to {}".format(data_path, self.batch_start_date,
self.df.tail(1).iloc[0]['timestamp']))
def log(self, text):
if self.logger:
self.logger.append_log(text)
else:
print(text)
##baixa os dados das ações
def download_stock_data(self):
path_to_company_data = self.data_path
print("path to company data:", path_to_company_data)
parent_path = os.sep.join(path_to_company_data.split(os.sep)[:-1])
if not os.path.exists(parent_path):
os.makedirs(parent_path)
print("Company Directory created", parent_path)
if not os.path.exists(path_to_company_data):
self.log("Downloading " + self.company_code + " data")
download_save(self.BASE_URL + self.company_code, path_to_company_data, self.logger)
else:
self.log("Data for " + self.company_code + " ready to use")
def calculate_technical_indicators(self, df, col_name, intervals):
# get_RSI(df, col_name, intervals) # faster but non-smoothed RSI
get_RSI_smooth(df, col_name, intervals) # momentum
get_williamR(df, col_name, intervals) # momentum
get_mfi(df, intervals) # momentum
# get_MACD(df, col_name, intervals) # momentum, ready to use +3
# get_PPO(df, col_name, intervals) # momentum, ready to use +1
get_ROC(df, col_name, intervals) # momentum
get_CMF(df, col_name, intervals) # momentum, volume EMA
get_CMO(df, col_name, intervals) # momentum
get_SMA(df, col_name, intervals)
get_SMA(df, 'open', intervals)
get_EMA(df, col_name, intervals)
get_WMA(df, col_name, intervals)
get_HMA(df, col_name, intervals)
get_TRIX(df, col_name, intervals) # trend
get_CCI(df, col_name, intervals) # trend
get_DPO(df, col_name, intervals) # Trend oscillator
get_kst(df, col_name, intervals) # Trend
get_DMI(df, col_name, intervals) # trend
get_BB_MAV(df, col_name, intervals) # volatility
# get_PSI(df, col_name, intervals) # can't find formula
get_force_index(df, intervals) # volume
get_kdjk_rsv(df, intervals) # ready to use, +2*len(intervals), 2 rows
get_EOM(df, col_name, intervals) # volume momentum
get_volume_delta(df) # volume +1
get_IBR(df) # ready to use +1
def create_labels(self, df, col_name, window_size=11):
"""
Data is labeled as per the logic in research paper
Label code : BUY => 1, SELL => 0, HOLD => 2
params :
df => Dataframe with data
col_name => name of column which should be used to determine strategy
returns : numpy array with integer codes for labels with
size = total-(window_size)+1
"""
self.log("creating label with original paper strategy")
row_counter = 0
total_rows = len(df)
labels = np.zeros(total_rows)
labels[:] = np.nan
print("Calculating labels")
pbar = tqdm(total=total_rows)
while row_counter < total_rows:
if row_counter >= window_size - 1:
window_begin = row_counter - (window_size - 1)
window_end = row_counter
window_middle = (window_begin + window_end) // 2
min_ = np.inf
min_index = -1
max_ = -np.inf
max_index = -1
for i in range(window_begin, window_end + 1):
price = df.iloc[i][col_name]
if price < min_:
min_ = price
min_index = i
if price > max_:
max_ = price
max_index = i
if max_index == window_middle:
labels[window_middle] = 0
elif min_index == window_middle:
labels[window_middle] = 1
else:
labels[window_middle] = 2
row_counter = row_counter + 1
pbar.update(1)
pbar.close()
return labels
def create_label_short_long_ma_crossover(self, df, col_name, short, long):
"""
if short = 30 and long = 90,
Buy when 30 day MA < 90 day MA
Sell when 30 day MA > 90 day MA
Label code : BUY => 1, SELL => 0, HOLD => 2
params :
df => Dataframe with data
col_name => name of column which should be used to determine strategy
returns : numpy array with integer codes for labels
"""
def detect_crossover(diff_prev, diff):
if diff_prev >= 0 > diff:
# buy
return 1
elif diff_prev <= 0 < diff:
return 0
else:
return 2
get_SMA(df, 'close', [short, long])
labels = np.zeros((len(df)))
labels[:] = np.nan
diff = df['close_sma_' + str(short)] - df['close_sma_' + str(long)]
diff_prev = diff.shift()
df['diff_prev'] = diff_prev
df['diff'] = diff
res = df.apply(lambda row: detect_crossover(row['diff_prev'], row['diff']), axis=1)
print("labels count", np.unique(res, return_counts=True))
df.drop(columns=['diff_prev', 'diff'], inplace=True)
return res
##calcula indicadores e labels
def create_features(self):
if not os.path.exists(os.path.join(self.output_path, "df_" + self.company_code+".csv")) or self.update:
df = pd.read_csv(self.data_path, engine='python')
df['timestamp'] = pd.to_datetime(df['timestamp'])
df.sort_values('timestamp', inplace=True)
df.reset_index(drop=True, inplace=True)
intervals = range(6, 27) # 21
self.calculate_technical_indicators(df, 'close', intervals)
self.log("Saving dataframe...")
df.to_csv(os.path.join(self.output_path, "df_" + self.company_code+".csv"), index=False)
else:
self.log("Technical indicators already calculated. Loading...")
df = pd.read_csv(os.path.join(self.output_path, "df_" + self.company_code+".csv"))
df['timestamp'] = pd.to_datetime(df['timestamp'])
df.sort_values('timestamp', inplace=True)
df.reset_index(drop=True, inplace=True)
prev_len = len(df)
df.dropna(inplace=True)
df.reset_index(drop=True, inplace=True)
if 'labels' not in df.columns or self.update:
if re.match(r"\d+_\d+_ma", self.strategy_type):
short = self.strategy_type.split('_')[0]
long = self.strategy_type.split('_')[1]
df['labels'] = self.create_label_short_long_ma_crossover(df, 'close', short, long)
else:
df['labels'] = self.create_labels(df, 'close')
prev_len = len(df)
df.dropna(inplace=True)
df.reset_index(drop=True, inplace=True)
df.drop(columns=['dividend_amount', 'split_coefficient'], inplace=True)
df.to_csv(os.path.join(self.output_path, "df_" + self.company_code + ".csv"), index=False)
else:
print("labels já calculados")
return df
def feature_selection(self):
##define o intervalo em anos
df_batch = self.df_by_date(None, 10)
list_features = list(df_batch.loc[:, self.start_col:self.end_col].columns)
##normaliza os valores entre 0 e 1
mm_scaler = MinMaxScaler(feature_range=(0, 1)) # or StandardScaler?
##mm_scaler = StandardScaler() # or StandardScaler?
x_train = mm_scaler.fit_transform(df_batch.loc[:, self.start_col:self.end_col].values)
y_train = df_batch['labels'].values
num_features = 225 # should be a perfect square
topk = 350
select_k_best = SelectKBest(f_classif, k=topk)
select_k_best.fit(x_train, y_train)
selected_features_anova = itemgetter(*select_k_best.get_support(indices=True))(list_features)
select_k_best = SelectKBest(mutual_info_classif, k=topk)
select_k_best.fit(x_train, y_train)
selected_features_mic = itemgetter(*select_k_best.get_support(indices=True))(list_features)
common = list(set(selected_features_anova).intersection(selected_features_mic))
self.log("common selected featues:" + str(len(common)) + ", " + str(common))
if len(common) < num_features:
raise Exception(
'number of common features found {} < {} required features. Increase "topK"'.format(len(common),
num_features))
feat_idx = []
for c in common:
feat_idx.append(list_features.index(c))
feat_idx = sorted(feat_idx[0:225])
self.log(str(feat_idx))
return feat_idx
def df_by_date(self, start_date=None, years=5):
if not start_date:
start_date = self.df.head(1).iloc[0]["timestamp"]
end_date = start_date + pd.offsets.DateOffset(years=years)
df_batch = self.df[(self.df["timestamp"] >= start_date) & (self.df["timestamp"] <= end_date)]
return df_batch
def get_data(self, start_date=None, years=5):
df_batch = self.df_by_date(start_date, years)
x = df_batch.loc[:, self.start_col:self.end_col].values
x = x[:, self.feat_idx]
print('x:')
print(start_date)
print(years)
print(df_batch.loc[:, self.start_col:self.end_col].values)
print(x)
mm_scaler = MinMaxScaler(feature_range=(0, 1)) # or StandardScaler?
x = mm_scaler.fit_transform(x)
dim = int(np.sqrt(x.shape[1]))
x = reshape_as_image(x, dim, dim)
##converte array em imagens
x = save_array_as_images(x, dim, dim, './images', 'imagem')
x = np.stack((x,) * 3, axis=-1)
y = df_batch['labels'].values
sample_weights = self.get_sample_weights(y)
y = self.one_hot_enc.transform(y.reshape(-1, 1))
return x, y, df_batch, sample_weights
def get_sample_weights(self, y):
"""
calculate the sample weights based on class weights. Used for models with
imbalanced data and one hot encoding prediction.
params:
y: class labels as integers
"""
y = y.astype(int) # compute_class_weight needs int labels
class_weights = compute_class_weight('balanced', np.unique(y), y)
print("real class weights are {}".format(class_weights), np.unique(y))
print("value_counts", np.unique(y, return_counts=True))
sample_weights = y.copy().astype(float)
for i in np.unique(y):
sample_weights[sample_weights == i] = class_weights[i] # if i == 2 else 0.8 * class_weights[i]
# sample_weights = np.where(sample_weights == i, class_weights[int(i)], y_)
return sample_weights
| {
"alphanum_fraction": 0.607804878,
"author": null,
"avg_line_length": 39.8952095808,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "1cc4b42f90fc8deade2c6c5e865b3387bb0b5013",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "bb809773cc11305048270feff879a73d41b85aef",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "julianazk/stock_cnn",
"max_forks_repo_path": "src/data_generator.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "bb809773cc11305048270feff879a73d41b85aef",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "julianazk/stock_cnn",
"max_issues_repo_path": "src/data_generator.py",
"max_line_length": 116,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "bb809773cc11305048270feff879a73d41b85aef",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "julianazk/stock_cnn",
"max_stars_repo_path": "src/data_generator.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3088,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 13325
} |
import logging
import os
import numpy as np
import tensorflow as tf
from cleverhans.attacks import CarliniWagnerL2
from cleverhans.compat import flags
from cleverhans.dataset import MNIST
from cleverhans.loss import CrossEntropy
from cleverhans.utils import grid_visual, AccuracyReport
from cleverhans.utils import set_log_level
from cleverhans.utils_tf import model_eval, tf_model_load
from cleverhans.train import train
from cleverhans.model_zoo.basic_cnn import ModelBasicCNN
VIZ_ENABLED = True
BATCH_SIZE = 128
NB_EPOCHS = 6
SOURCE_SAMPLES = 10
LEARNING_RATE = .001
CW_LEARNING_RATE = .2
ATTACK_ITERATIONS = 100
MODEL_PATH = os.path.join('models', 'mnist')
TARGETED = True
def load_data():
# Get MNIST test data
mnist = MNIST(train_start=0, train_end=60000,
test_start=0, test_end=10000)
x_train, y_train = mnist.get_set('train')
x_test, y_test = mnist.get_set('test')
# Obtain Image Parameters
# (nrows, ncols, nchannels, nlabels)
return (x_train, y_train), (x_test, y_test)
def get_cnn_model(nrows, ncols, nchannels, nlabels):
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, nrows, ncols,
nchannels))
y = tf.placeholder(tf.float32, shape=(None, nlabels))
nb_filters = 64
# Define TF model graph
model = ModelBasicCNN('model1', nlabels, nb_filters)
preds = model.get_logits(x)
loss = CrossEntropy(model, smoothing=0.1)
print("Defined TensorFlow model graph.")
return model, loss
def train_cnn_model(model, x_train, y_train):
# tf.set_random_seed(1234)
sess = tf.Session()
# set_log_level(logging.DEBUG)
(x_train, y_train), (x_test, y_test) = load_data()
nrows, ncols, nchannels = x_train.shape[1:4]
nlabels = y_train.shape[1]
model, loss = get_cnn_model(nrows, ncols, nchannels, nlabels)
# Train an MNIST model
train_params = {
'nb_epochs': NB_EPOCHS,
'batch_size': BATCH_SIZE,
'learning_rate': LEARNING_RATE,
'filename': os.path.split(MODEL_PATH)[-1]
}
rng = np.random.RandomState([2017, 8, 30])
# check if we've trained before, and if we have, use that pre-trained model
if os.path.exists(MODEL_PATH + ".meta"):
print('loading from ', MODEL_PATH + ".meta")
tf_model_load(sess, MODEL_PATH)
else:
print('training a new model ..')
train(sess, loss, x_train, y_train, args=train_params, rng=rng)
saver = tf.train.Saver()
saver.save(sess, MODEL_PATH)
# Evaluate the accuracy of the MNIST model on legitimate test examples
eval_params = {'batch_size': BATCH_SIZE}
accuracy = model_eval(sess, x, y, preds, x_test, y_test, args=eval_params)
assert x_test.shape[0] == 10000 - 0, x_test.shape
print('Test accuracy on legitimate test examples: {0}'.format(accuracy))
report.clean_train_clean_eval = accuracy
def cw_model(model, x_test, y_test):
# Instantiate a CW attack object
cw = CarliniWagnerL2(model, sess=sess)
assert SOURCE_SAMPLES == nb_classes
idxs = [np.where(np.argmax(y_test, axis=1) == i)[0][0]
for i in range(nb_classes)]
# Initialize our array for grid visualization
grid_shape = (nb_classes, nb_classes, img_rows, img_cols,
nchannels)
grid_viz_data = np.zeros(grid_shape, dtype='f')
# adversarial inputs
adv_inputs = np.array(
[[instance] * nb_classes for instance in x_test[idxs]],
dtype=np.float32)
one_hot = np.zeros((nb_classes, nb_classes))
one_hot[np.arange(nb_classes), np.arange(nb_classes)] = 1
adv_inputs = adv_inputs.reshape(
(SOURCE_SAMPLES * nb_classes, img_rows, img_cols, nchannels))
adv_ys = np.array([one_hot] * SOURCE_SAMPLES,
dtype=np.float32).reshape((SOURCE_SAMPLES *
nb_classes, nb_classes))
yname = "y_target"
cw_params_batch_size = SOURCE_SAMPLES * nb_classes
cw_params = {'binary_search_steps': 1,
yname: adv_ys,
'max_iterations': ATTACK_ITERATIONS,
'learning_rate': CW_LEARNING_RATE,
'batch_size': cw_params_batch_size,
'initial_const': 10}
# generate only based on adv_inputs. The yname is also passed
# through the params
adv = cw.generate_np(adv_inputs,
**cw_params)
eval_params = {'batch_size': np.minimum(nb_classes, SOURCE_SAMPLES)}
adv_accuracy = model_eval(
sess, x, y, preds, adv, adv_ys, args=eval_params)
for j in range(nb_classes):
for i in range(nb_classes):
grid_viz_data[i, j] = adv[i * nb_classes + j]
print(grid_viz_data.shape)
print('--------------------------------------')
# Compute the number of adversarial examples that were successfully found
print('Avg. rate of successful adv. examples {0:.4f}'.format(adv_accuracy))
report.clean_train_adv_eval = 1. - adv_accuracy
# Compute the average distortion introduced by the algorithm
percent_perturbed = np.mean(np.sum((adv - adv_inputs)**2,
axis=(1, 2, 3))**.5)
print('Avg. L_2 norm of perturbations {0:.4f}'.format(percent_perturbed))
def __test():
report = AccuracyReport()
###########################################################################
# Craft adversarial examples using Carlini and Wagner's approach
###########################################################################
nb_adv_per_sample = str(nb_classes - 1)
print('Crafting ' + str(SOURCE_SAMPLES) + ' * ' + nb_adv_per_sample +
' adversarial examples')
print("This could take some time ...")
# Close TF session
sess.close()
_ = grid_visual(grid_viz_data)
return report
| {
"alphanum_fraction": 0.6389314276,
"author": null,
"avg_line_length": 36.73125,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "1949466bf055f1183ddee54213a041256af1a4aa",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "56dea2a33c7da64bcc577b0c061a38406fdde101",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "lihebi/AdvAE",
"max_forks_repo_path": "python/back/mycw-back.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "56dea2a33c7da64bcc577b0c061a38406fdde101",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "lihebi/AdvAE",
"max_issues_repo_path": "python/back/mycw-back.py",
"max_line_length": 79,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "56dea2a33c7da64bcc577b0c061a38406fdde101",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "lihebi/AdvAE",
"max_stars_repo_path": "python/back/mycw-back.py",
"max_stars_repo_stars_event_max_datetime": "2019-12-17T14:26:08.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-12-17T14:26:08.000Z",
"num_tokens": 1458,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5877
} |
# coding: utf-8
# In[177]:
import os
import pandas as pd
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
import matplotlib.pyplot as plt
import hashlib
import sklearn as sk
import os
# In[178]:
path = '/home/catherinej/Downloads'
file = os.path.join(path, 'IrmaMudThicknessComparisons.xlsx')
mud = pd.read_excel(file)
mud
# mud.head()
# mud.info()
# mud.describe()
# In[179]:
mud.hist(bins=50, figsize=(5,5))
plt.show()
# In[180]:
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit()
for train_index, test_index in split.split(mud, mud['Site']):
mud_train_set = mud.loc[train_index]
mud_test_set = mud.loc[test_index]
# In[181]:
mud['Site'].value_counts()
# In[182]:
# mud.plot(kind='scatter', x='Lon', y="Lat", alpha=0.4,
# cmap=plt.get_cmap('jet'), colorbar=True)
# plt.legend()
# In[183]:
corr_matrix = mud.corr()
for key in corr_matrix:
print(key)
corr_matrix['Thickness at Date 2 (cm)'].sort_values(ascending=False)
# In[184]:
attributes = ['Lat', 'Lon', 'Thickness at Date 1 (cm)', 'Thickness at Date 2 (cm)',
]
pd.tools.plotting.scatter_matrix(mud[attributes], figsize=(6,4))
# In[185]:
data = mud.drop('Notes', axis=1)
data.dropna(subset=['Thickness at Date 1 (cm)'])
data.dropna(subset=['Thickness at Date 2 (cm)'])
data
# In[186]:
from sklearn.preprocessing import Imputer
imputer = Imputer(strategy='median')
mud_num = data.drop('Region', axis=1)
mud_num = mud_num.drop('Site', axis=1)
mud_num = mud_num.drop('Station', axis=1)
mud_num = mud_num.dropna(subset=['Date 2'])
mud_num = mud_num.dropna(subset=['Thickness at Date 2 (cm)'])
# In[187]:
mud_num.plot(kind='scatter',x='Lon', y='Lat', alpha=0.1)
# In[188]:
# for t in mud_num['Thickness at Date 1 (cm)']:
mud_num = mud_num.replace('<0.1', 0)
mud_num = mud_num.replace('<1', 0.5)
# In[189]:
mud_num
# In[190]:
mud_num
# mud_num = mud_num.drop('Date 1',axis=1)
# In[191]:
corr_matrix = mud_num.corr()
corr_matrix['Thickness at Date 1 (cm)'].sort_values(ascending=False)
# In[192]:
from pandas.tools.plotting import scatter_matrix
attributes = ['Lat', 'Lon', 'Thickness at Date 1 (cm)']
scatter_matrix(mud_num[attributes], figsize=(12, 9))
mud_num.plot(kind='scatter', x='Thickness at Date 1 (cm)', y='Thickness at Date 2 (cm)', alpha=0.4)
# In[194]:
imputer.statistics_
mud_num.median().values
x = imputer.transform(mud_num)
# In[196]:
mud_num_tr = pd.DataFrame(x, columns=mud_num.columns)
# In[80]:
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
mud_cat = mud['Date 1']
mud_cat_encoded = encoder.fit_transform(mud_cat)
mud_cat_encoded
print(encoder.classes_)
# In[82]:
mud_cat = mud_train_set['Region']
# In[89]:
def encode_text(data_frame):
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
cat = data_frame['Site'] #, 'Region']
cat_encoded = encoder.fit_transform(cat)
return cat_encoded
# In[200]:
mud_cat = encode_text(mud_train_set)
cat = ['Thickness at Date 1 (cm)', 'Thickness at Date 2 (cm)', 'Date 1', 'Date 2']
df1 = mud_test_set.dropna(subset=cat)
df1 = mud_num.drop('Station', axis=1)
# In[201]:
def clean_dataframe(dataframe1, dataframe2):
from sklearn.preprocessing import LabelBinarizer
lb = LabelBinarizer()
cat = ['Thickness at Date 1 (cm)', 'Thickness at Date 2 (cm)', 'Date 1', 'Date 2']
df1 = dataframe1.dropna(subset=cat)
df2 = dataframe2.dropna(subset=cat)
df1 = df1.replace('<0.1', 0)
df1 = df1.replace('<1', .5)
df2 = df2.replace('<0.1', 0)
df2 = df2.replace('<1', .5)
drop_columns = ['Date 1', 'Date 2', 'Notes']
df1 = df1.drop(drop_columns, axis=1)
df2 = df2.drop(drop_columns, axis=1)
return df1, df2
def categorize_text(df1, df2):
categories = df1.select_dtypes(include=[object]).columns
df1_cat = pd.get_dummies(df1, columns=categories, drop_first=True)
df2_cat = pd.get_dummies(df2, columns=categories, drop_first=True)
return df1_cat, df2_cat
# print(categories)
# from sklearn.preprocessing import OneHotEncoder, MultiLabelBinarizer
# enc = MultiLabelBinarizer()
# cat_features = ['Region', 'Site', 'Station']
# df1_cat = df1[cat_features]
# df2_cat = df2[cat_features]
# df1_enc = enc.fit_transform(df1_cat)
# df2_enc = enc.fit_transform(df2_cat)
# ohe = OneHotEncoder()
# df1_hot = ohe.fit_transform(df1_enc)
# df2_hot = ohe.fit_transform(df2_enc)
# return df1_hot, df2_hot
# train_set = pd.concat([mud_train_set, train_code], axis=1)
train, test = clean_dataframe(mud_train_set, mud_test_set)
print(train)
train_code, test_code = categorize_text(train, test)
print(train_code)
# In[170]:
mud_median = pd.DataFrame.median(train_code['Thickness at Date 1 (cm)'])
# In[171]:
mud_median
# In[198]:
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
help(lin_reg.fit)
# lin_reg.fit(train_code, test_code)
| {
"alphanum_fraction": 0.6833862014,
"author": null,
"avg_line_length": 19.1787072243,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "54f7982bc8fa2f112eb115dd58d77420bdf09aeb",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d16be82102f6dd71e6bbb6085b0e2eaac8108a4b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "l-szczyrba/Irma_Sediment",
"max_forks_repo_path": "Sample_Scripts/mud_machine_learning.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d16be82102f6dd71e6bbb6085b0e2eaac8108a4b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "l-szczyrba/Irma_Sediment",
"max_issues_repo_path": "Sample_Scripts/mud_machine_learning.py",
"max_line_length": 99,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d16be82102f6dd71e6bbb6085b0e2eaac8108a4b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "l-szczyrba/Irma_Sediment",
"max_stars_repo_path": "Sample_Scripts/mud_machine_learning.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1486,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5044
} |
"""
Module of utility methods.
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import re
import os
import sys
import time
import pickle
import random
import scipy.sparse
import numpy as np
import pandas as pd
import xgboost as xgb
import lightgbm as lgb
import termcolor
import sklearn.metrics as sm
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import PredefinedSplit
from sklearn.model_selection import GridSearchCV
class Util:
def __init__(self):
self.noise_limit = 0.000025
self.timer = []
self.dirs = []
# public
def check_file(self, file):
"""Checks to see if the file exists
file: path of the file.
Returns True if it exists, exits the application if not."""
if os.path.exists(file):
return True
else:
self.exit('cannot read ' + file)
def clean_msg(self, msg):
"""Utility function to clean msg text by removing links, special
characters using simple regex statements."""
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|\
(\w+:\/\/\S+)", " ", msg).split())
def close_writer(self, sw):
"""Closes a file writer.
sw: file writer object."""
sw.close()
def colorize(self, string, color, display):
"""Gives the string the specified color if there is a display.
string: string to colorize.
color: color to give the string.
display: boolean indicating if the application is run on a consolde.
Returns a colorized string if there is a display, string otherwise."""
s = string
if display:
s = termcolor.colored(string, color)
return s
def create_dirs(self, path):
"""Creates all directories in path that doesn't already exist.
path: str, directory to create."""
if not os.path.exists(path):
os.makedirs(path)
def div0(self, num, denom):
"""Divide operation that deals with a 0 value denominator.
num: numerator.
denom: denominator.
Returns 0.0 if the denominator is 0, otherwise returns a float."""
return 0.0 if denom == 0 else float(num) / denom
def end(self, message='', fw=None):
"""Pop a start time and take the time difference from now.
message: message to print."""
unit = 's'
elapsed = time.time() - self.timer.pop()
if elapsed >= 60:
elapsed /= 60
unit = 'm'
s = message + '%.2f' + unit + '\n'
if fw is not None:
fw.write(s % (elapsed))
else:
self.out(s % (elapsed))
def evaluate(self, data, test_probs):
"""Evaluates the predictions against the true labels.
data: tuple including test set labels and ids.
test_probs: predictions to evaluate."""
x, y, ids, feat_names = data
if y is not None:
t1 = self.out('evaluating...')
auroc, aupr, p, r, mp, mr, t = self.compute_scores(test_probs, y)
self.time(t1)
self.print_scores(mp, mr, t, aupr, auroc)
self.print_median_mean(ids, test_probs, y)
def exit(self, message='Unexpected error occurred!'):
"""Convenience method to fail gracefully.
message: messaage to display to the user as to the error."""
self.out(message)
self.out('exiting...')
exit(0)
def file_len(self, fname):
"""Counts the number of lines in a file.
fname: path of the file.
Returns the number of lines in the specified file."""
lines = 0
f = open(fname, 'r')
lines = len(f.readlines())
f.close()
return lines
def gen_noise(self, pred):
"""Returns a prediction with some noise added to it.
pred: predicion (e.g. value between 0.0 and 1.0).
Returns predictions with noise."""
noise = random.uniform(-self.noise_limit, self.noise_limit)
result = max(0.0, min(1.0, pred + noise))
return result
def get_comments_filename(self, modified):
"""Chooses the correct comments file to read
modified: Boolean indicating to read the modified comments file.
Returns the name of the appropriate comments file."""
filename = 'comments.csv'
if modified:
filename = 'modified.csv'
return filename
def load(self, filename):
"""Loads a binary pickled object.
filename: path of the file.
Returns loaded object."""
if self.check_file(filename):
with open(filename, 'rb') as f:
obj = pickle.load(f)
return obj
def load_sparse(self, filename):
"""Loads a sparse matrix object.
filename: path to the sparse matrix object file.
Returns sparse matrix object."""
matrix = scipy.sparse.load_npz(filename)
return matrix
def mean(self, numbers):
"""Computes the mean for a list of numbers.
numbers: list of numbers.
Returns mean as a float."""
return np.mean(numbers)
def out(self, message='', newline=1):
"""Custom print method to print multiple times on one line.
message: string to print immediately."""
msg = '\n' + message if newline == 1 else message
sys.stdout.write(msg)
sys.stdout.flush()
return time.time()
def open_writer(self, name, mode='w'):
f = open(name, mode)
return f
def percent(self, num, denom):
"""Turns fraction into a percent.
num: numerator.
denom: denominator.
Returns float in percent form."""
return self.div0(num, denom) * 100.0
def plot_features(self, model, classifier, features, fname, save=True):
"""Plots relative feature importance.
model: fitted model.
classifier: specific model.
features: list of feature names.
fname: filename of where to store the plot.
save: boolean of whether the plot should be saved."""
if classifier == 'lr':
feat_importance = model.coef_[0]
elif classifier == 'rf' or classifier == 'lgb':
feat_importance = model.feature_importances_
elif classifier == 'xgb':
try:
ax = xgb.plot_importance(model._Booster)
labels = ax.get_yticklabels()
indices = [int(x.get_text().replace('f', '')) for x in labels]
yticks = [features[ndx] for ndx in indices]
ax.set_yticklabels(yticks)
plt.savefig(fname + '_feats.png', bbox_inches='tight')
plt.close('all')
except ValueError:
self.out('error plotting xgb feature importances...')
return
# normalize and rearrange features
feat_norm = feat_importance / feat_importance.sum()
sorted_idx = np.argsort(feat_norm)
pos = np.arange(sorted_idx.shape[0]) + 0.5 # [0.5, 1.5, ...]
feat_importance_sort = feat_norm[sorted_idx]
feat_sort = np.asanyarray(features)[sorted_idx]
# plot relative feature importance
color = '#7A68A6'
plt.figure(figsize=(12, 12))
plt.barh(pos, feat_importance_sort, align='center', color=color)
plt.yticks(pos, feat_sort)
plt.xlabel('Relative Importance')
plt.title('Feature Importance')
plt.savefig(fname + '_feats.pdf', bbox_inches='tight', format='pdf')
plt.close('all')
def plot_pr_curve(self, model, fname, rec, prec, aupr, title='',
line='-', save=False, show_legend=False, show_grid=False,
more_ticks=False):
"""Plots a precision-recall curve.
model: name of the model.
fname: filename to save the plot.
rec: recalls from the aupr.
prec: precisions from the aupr.
aupr: area under the pr curve.
title: title of the plot.
line: shape used to draw the curve.
save: boolean specifying whether to save the plot."""
self.set_plot_rc()
# if ax is None:
# fig, ax = plt.subplots()
plt.figure(2)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.title(title, fontsize=22)
plt.xlabel('Recall', fontsize=22)
plt.ylabel('Precision', fontsize=22)
plt.tick_params(axis='both', labelsize=18)
# else:
# plt.figure(2)
plt.plot(rec, prec, line, label=model + ' = %0.3f' % aupr)
if show_legend:
plt.legend(loc='lower left', prop={'size': 6})
if show_grid:
ax = plt.gca()
ax.grid(b=True, which='major', color='#E5DCDA', linestyle='-')
if more_ticks:
plt.yticks(np.arange(0.0, 1.01, 0.1))
plt.xticks(np.arange(0.0, 1.01, 0.1), rotation=70)
if save:
plt.savefig(fname + '.pdf', bbox_inches='tight', format='pdf')
plt.clf()
plt.close('all')
def print_stats(self, df, r_df, relation, dset, fw=None):
"""Prints information about a relationship in the data.
df: comments dataframe.
r_df: df containing number of times relationship occurred.
relation: name of relation (e.g. posts).
dset: dataset (e.g. 'val' or 'test')."""
spam = r_df['label'].sum()
out_str = '\n\t[' + dset + '] ' + relation + ': >1: ' + str(len(r_df))
out_str += ', spam: ' + str(spam)
self.write(out_str, fw=fw)
def pushd(self, dir):
curd = os.getcwd()
self.dirs.append(curd)
os.chdir(dir)
def popd(self):
os.chdir(self.dirs.pop())
def read_csv(self, filename):
"""Safe read for pandas dataframes.
filename: path to data file.
Returns dataframe if the file exists, None otherwise."""
result = None
if os.path.exists(filename):
result = pd.read_csv(filename)
return result
def save(self, obj, filename):
"""Pickles an object to a binary file.
obj: object to pickle.
filename: path of the file."""
with open(filename, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def save_sparse(self, matrix, filename):
"""Saves a sparse matrix object to a file.
matrix: sparse matrix object.
filename: path to the file to save the object to."""
scipy.sparse.save_npz(filename, matrix)
def set_noise_limit(self, noise_limit):
"""Setter for noise_limit."""
self.noise_limit = noise_limit
def start(self, message='', fw=None):
"""Pushes a start time onto a stack and print a specified message.
message: message to print."""
self.write(message=message, fw=fw)
self.timer.append(time.time())
def test(self, data, model, fsets=['all']):
"""Tests data using a trained model.
data: tuple including data to classify.
model: trained model.
Returns predictions and ids associated with those predictions."""
x, y, ids, feat_names = data
if type(model) == xgb.XGBClassifier and \
any(x in fsets for x in ['ngrams', 'all']):
x = x.tocsc() # bug in xgb, turn on when stacking is on.
t1 = self.out('testing...')
if type(model) == lgb.LGBMClassifier:
ys = model.predict_proba(x, num_iteration=model.best_iteration_)
else:
ys = model.predict_proba(x)
self.time(t1)
return ys, ids
def time(self, t):
"""Write time based on suffix."""
elapsed = time.time() - t
if elapsed < 60:
suffix = 's'
elif elapsed < 3600:
suffix = 'm'
else:
suffix = 'h'
if suffix == 'm':
elapsed /= 60.0
elif suffix == 'h':
elapsed /= 3600.0
self.out('%.2f%s' % (elapsed, suffix), 0)
def train(self, data, clf='rf', param_search='single', tune_size=0.15,
scoring='roc_auc', n_jobs=1, verbose=1):
"""Trains a classifier with the specified training data.
data: tuple including training data.
clf: string of {'rf' 'lr', 'xgb'}.
Returns trained classifier."""
x_train, y_train, _, features = data
if param_search == 'single' or tune_size == 0:
model, params = self.classifier(clf, param_search='single')
model.set_params(**params)
elif tune_size > 0:
t1 = self.out('tuning...')
model, params = self.classifier(clf, param_search=param_search)
train_len = x_train.shape[0]
split_ndx = train_len - int(train_len * tune_size)
sm_x_train, x_val = x_train[:split_ndx], x_train[split_ndx:]
sm_train_fold = np.full(sm_x_train.shape[0], -1)
val_fold = np.full(x_val.shape[0], 0)
predefined_fold = np.append(sm_train_fold, val_fold)
ps = PredefinedSplit(predefined_fold)
cv = ps.split(x_train, y_train)
m = GridSearchCV(model, params, scoring=scoring, cv=cv,
verbose=verbose, n_jobs=n_jobs)
m.fit(x_train, y_train)
model = m.best_estimator_
self.time(t1)
t1 = self.out('training...')
if clf == 'lgb':
cat_feat = ['app', 'device', 'os', 'channel', 'hour']
cat_feat_ndx = [features.index(x) for x in cat_feat]
train_len = x_train.shape[0]
split_ndx = train_len - int(train_len * tune_size)
sm_x_train, x_val = x_train[:split_ndx], x_train[split_ndx:]
sm_y_train, y_val = y_train[:split_ndx], y_train[split_ndx:]
eval_set = (x_val, y_val)
model = model.fit(sm_x_train, sm_y_train, eval_set=eval_set,
early_stopping_rounds=50, eval_metric='auc',
categorical_feature=cat_feat_ndx)
else:
model = model.fit(x_train, y_train)
self.time(t1)
self.out(str(model))
return model
def write(self, message='', fw=None):
if fw is not None:
fw.write(message)
else:
self.out(message)
def classifier(self, classifier='rf', param_search='single'):
"""
Defines model and parameters to tune.
Parameters
----------
classifier : str, {'rf', 'xgb', 'lr1', 'lr2'}, default: 'rf'
Type of model to define.
param_search : str, {'low', 'med', 'high'}, default: 'low'
Level of parameters to tune.
input_dim : int, default = 0
Number of features input to the model.
Returns
-------
Defined model and dictionary of parameters to tune.
"""
if classifier == 'lr':
clf = LogisticRegression()
high = [{'penalty': ['l1', 'l2'],
'C': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5,
1.0, 2.0, 10.0, 50.0, 100.0, 500.0, 1000.0],
'solver': ['liblinear']},
{'penalty': ['l2'],
'C': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5,
1.0, 2.0, 10.0, 50.0, 100.0, 500.0, 1000.0],
'solver': ['newton-cg']}]
med = [{'penalty': ['l1', 'l2'],
'C': [0.0001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0],
'solver': ['liblinear']},
{'penalty': ['l2'],
'C': [0.0001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0],
'solver': ['newton-cg']}]
low = {'penalty': ['l2'],
'C': [0.0001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0],
'solver': ['liblinear'],
'class_weight': ['balanced']},
single = {'penalty': 'l2', 'C': 1000.0, 'solver': 'liblinear',
'class_weight': 'balanced'}
elif classifier == 'rf':
clf = RandomForestClassifier()
high = {'n_estimators': [10, 100, 1000], 'max_depth': [None, 2, 4]}
med = {'n_estimators': [1000], 'max_depth': [None, 2]}
low = {'n_estimators': [1000], 'max_depth': [None]}
single = {'n_estimators': 100, 'max_depth': 4}
elif classifier == 'lgb':
clf = lgb.LGBMClassifier()
high = {'max_depth': [3, 4, 6],
'n_estimators': [100, 1000],
'learning_rate': [0.3, 0.1, 0.05, 0.01, 0.005, 0.001],
'subsample': [0.8, 0.9, 1.0],
'colsample_bytree': [0.8, 0.9, 1.0]}
med = {'max_depth': [4, 6], 'n_estimators': [10, 100, 1000],
'learning_rate': [0.005, 0.05, 0.1],
'subsample': [0.9, 1.0], 'colsample_bytree': [1.0]}
low = {'max_depth': [4], 'boosting_type': ['gbdt'],
'n_estimators': [1500], 'subsample': [0.7],
'num_leaves': [7], 'colsample_bytree': [0.7, 0.9],
'min_child_samples': [100], 'max_bin': [100],
'learning_rate': [0.1, 0.2], 'min_child_weight': [0.0],
'scale_pos_weight': [500], 'verbose': [-1]}
single = {'max_depth': 4, 'n_estimators': 1500,
'learning_rate': 0.1, 'scale_pos_weight': 500,
'num_leaves': 7, 'min_child_samples': 100,
'subsample': 0.7, 'colsample_bytree': 0.7,
'min_child_weight': 0.0, 'verbose': -1}
# single = {'max_depth': 4, 'n_estimators': 1500, # not adclicks
# 'learning_rate': 0.1, 'scale_pos_weight': 500,
# 'num_leaves': 7, 'min_child_samples': 20,
# 'subsample': 0.7, 'colsample_bytree': 0.7,
# 'min_child_weight': 0.0, 'verbose': -1}
elif classifier == 'xgb':
clf = xgb.XGBClassifier()
high = {'max_depth': [3, 4, 6],
'n_estimators': [100, 1000],
'learning_rate': [0.3, 0.1, 0.05, 0.01, 0.005, 0.001],
'subsample': [0.8, 0.9, 1.0],
'colsample_bytree': [0.8, 0.9, 1.0]}
med = {'max_depth': [4, 6], 'n_estimators': [10, 100, 1000],
'learning_rate': [0.005, 0.05, 0.1],
'subsample': [0.9, 1.0], 'colsample_bytree': [1.0]}
low = {'max_depth': [6], 'n_estimators': [1000],
'learning_rate': [0.05], 'subsample': [0.9],
'colsample_bytree': [1.0]}
single = {'max_depth': 4, 'n_estimators': 100,
'learning_rate': 0.1, 'subsample': 1.0,
'colsample_bytree': 1.0, 'scale_pos_weight': 500}
param_dict = {'high': high, 'med': med, 'low': low, 'single': single}
param_grid = param_dict[param_search]
return (clf, param_grid)
def compute_scores(self, probs, y):
"""Generates noisy predictions and computes various metrics.
probs: predictions, shape=(2, <num_instances>).
y: list of true labels.
report: file to write performance to.
dset: dataset (e.g. 'train', 'val', 'test').
Returns auroc, aupr, recalls, precisions, max precision, max recall,
and threshold where those max values take place."""
prob_preds_noise = [self.gen_noise(pred) for pred in probs[:, 1]]
fpr, tpr, tholds = sm.roc_curve(y, prob_preds_noise)
prec, rec, tholds = sm.precision_recall_curve(y, prob_preds_noise)
aupr = sm.average_precision_score(y, prob_preds_noise)
auroc = sm.auc(fpr, tpr)
max_p, max_r, thold = self.find_max_prec_recall(prec, rec, tholds)
return auroc, aupr, prec, rec, max_p, max_r, thold
def find_max_prec_recall(self, prec, rec, tholds):
"""Finds the precision and recall scores with the maximum amount of
area and returns their values, including the threshold.
prec: list of precisions from the pr curve.
rec: list of recalls from the pr curve.
tholds: list of thresholds from the pr curve.
Returns max precision and recall scores, including their threshold."""
max_val, max_prec, max_rec, max_thold = -1, -1, -1, -1
if len(tholds) > 1:
for i in range(len(prec)):
val = prec[i] * rec[i]
if val > max_val:
max_val = val
max_thold = tholds[i]
max_prec = prec[i]
max_rec = rec[i]
return max_prec, max_rec, max_thold
def save_preds(self, probs, ids, fold, pred_f, dset, eval='cc'):
"""Save predictions to a specified file.
probs: array of binary predictions; shape=(2, <num_instances>).
ids: list of identifiers for the data instances.
pred_f: folder to save predictions to.
dset: dataset (e.g. 'train', 'val', 'test')."""
columns = ['com_id', 'ind_pred']
fname = dset + '_' + fold + '_preds'
t1 = self.out('saving predictions...')
preds = list(zip(ids, probs[:, 1]))
preds_df = pd.DataFrame(preds, columns=columns)
preds_df.to_csv(pred_f + fname + '.csv', index=None)
if eval == 'tt':
preds_df.columns = ['click_id', 'is_attributed']
preds_df.to_csv(pred_f + fname + '.csv.gz', index=None,
compression='gzip')
self.time(t1)
def set_plot_rc(self):
"""Corrects for embedded fonts for text in plots."""
plt.rc('pdf', fonttype=42)
plt.rc('ps', fonttype=42)
def print_median_mean(self, ids, probs, y, fw=None):
"""Prints the median and mean independent predictions for spam and ham.
ids: comment ids.
probs: independent predictions.
y: labels"""
preds = list(zip(ids, probs[:, 1], y))
df = pd.DataFrame(preds, columns=['com_id', 'ind_pred', 'label'])
spam_med = df[df['label'] == 1]['ind_pred'].median()
ham_med = df[df['label'] == 0]['ind_pred'].median()
spam_mean = df[df['label'] == 1]['ind_pred'].mean()
ham_mean = df[df['label'] == 0]['ind_pred'].mean()
self.out('-> median spam: %.4f, ham: %.4f' % (spam_med, ham_med))
self.out('-> mean spam: %.4f, ham: %.4f' % (spam_mean, ham_mean))
def print_scores(self, max_p, max_r, thold, aupr, auroc, fw=None):
"""Print evaluation metrics to std out.
max_p: maximum precision in pr curve at thold.
max_r: maximum recall in pr curve at thold.
thold: threshold where the maximum area is.
aupr: area under the pr curve.
auroc: area under the roc curve."""
self.out('-> aupr: %.4f, auroc: %.4f' % (aupr, auroc))
s = '-> max p: %.3f, max r: %.3f, area: %.3f, thold: %.3f'
self.out(s % (max_p, max_r, max_p * max_r, thold))
| {
"alphanum_fraction": 0.5509617036,
"author": null,
"avg_line_length": 39.4779661017,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "be2993ba8f3a7c04da9a31a3a3a47d7341c97599",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e0bb8e9c843e26e5f4be8a49a960ebf7a0d5bfd5",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "snspam/sn_spam",
"max_forks_repo_path": "analysis/util.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e0bb8e9c843e26e5f4be8a49a960ebf7a0d5bfd5",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "snspam/sn_spam",
"max_issues_repo_path": "analysis/util.py",
"max_line_length": 79,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e0bb8e9c843e26e5f4be8a49a960ebf7a0d5bfd5",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "snspam/sn_spam",
"max_stars_repo_path": "analysis/util.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 5968,
"path": null,
"reason": "import numpy,import scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 23292
} |
import wikipedia
from wordcloud import WordCloud, STOPWORDS
import os
from PIL import Image
import numpy as np
#currdir = os.path.dirname(__file__)
def get_wiki(query):
title = wikipedia.search(query)[0]
page = wikipedia.page(title)
return page.content
def create_wordcloud(text):
stopwords = set(STOPWORDS)
wc = WordCloud(background_color='white',
mask=mask,
max_words = 200,
stopwords=stopwords)
wc.generate(text)
wc.to_file("wc.png")
create_wordcloud(get_wiki("python programming language")) | {
"alphanum_fraction": 0.6717948718,
"author": null,
"avg_line_length": 26.5909090909,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "ac52d9c6ee61f60343fee15ae0e80eebfdb525a9",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "fbc7831d48206c0e1fa4ef68bcf7572b7877cc5e",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "William0Friend/undergrad_East_Stroudsburg_University",
"max_forks_repo_path": "research_team/WordCloud_Generator_with_scrapers_and_file_converters/py/wiki_cloud.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "fbc7831d48206c0e1fa4ef68bcf7572b7877cc5e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "William0Friend/undergrad_East_Stroudsburg_University",
"max_issues_repo_path": "research_team/WordCloud_Generator_with_scrapers_and_file_converters/py/wiki_cloud.py",
"max_line_length": 57,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "fbc7831d48206c0e1fa4ef68bcf7572b7877cc5e",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "William0Friend/undergrad_East_Stroudsburg_University",
"max_stars_repo_path": "research_team/WordCloud_Generator_with_scrapers_and_file_converters/py/wiki_cloud.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 129,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 585
} |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 3 15:30:41 2019
@author: autol
"""
#%%
from depends import ScaleX
from matrix_fun import Fill,Frob2,obj1,obj2,svdk,svd_,Prox,Frob1
import numpy as np
import time
from init_matrix import init_A1,init_A2,init_A3,init_A4
from sklearn.model_selection import ParameterGrid
from plotxy import plot_gd_xy,iters_matrix_plot
#%% SoftImpute with svda, svds, als implement calculate details inside the functions
def SoftImpute(X=0,k=2,λ=0,
method='svda',trace=0, isScale=0,
n_iters=1, final_svd=1, svdw=0,paras='',
**kwargs):
time1 = time.time()
isConv=0
n,m = X.shape ; xnas = np.isnan(X) ; nz = n*m-xnas.sum()
if isScale: sc = ScaleX(ismean=1,isstd=1) ; X = sc.scalef(X);X
Z = Fill(X.copy(),fill='zero');Z # Fill fun for fill matrx with 0
ratio = 0 ; dh=[]
Z0 = Z.copy() ; Z1 = 0 ; ratio = 0
if 'svda' == method:
print('SVD approximate',kwargs)
U,d,Vt = np.linalg.svd(Z,full_matrices=0) # full_matrices 只考虑在是否方阵,是则大小不变,否则取min(X.shape)维度
for i in range(min(k,min(Z.shape))):
Z1 += d[i] * U[:,i][:,np.newaxis] @ Vt[i][np.newaxis]
Z[xnas] = Z1[xnas]
obj = obj1(Z,Z1,xnas,nz)
ratio = Frob2(Z0,Z1)
Z0 = Z1.copy()
dicts = dict(ik=i,obj='%.3e'%obj,ratio='%.3e'%ratio) ; print(dicts)
dh.append([i,obj,ratio])
if i>len(d)-2:break
elif 'svds' == method:
print('Soft Impute SVD',kwargs)
svdZ = svdk(Z,k)
for i in range(n_iters):
svdZ0 = svdZ
d = Prox(svdZ[1],λ)
Z1 = svdZ[0] @ np.diag(d) @ svdZ[2]
Z[xnas] = Z1[xnas]
svdZ = svdk(Z,k)
d1 = Prox(svdZ[1],λ)
obj = obj2(Z,Z1,xnas,nz,d1,λ)
ratio = Frob1(svdZ0[0],d,svdZ0[2].T, svdZ[0],d1,svdZ[2].T)
dicts = dict(ik=i,obj='%.3e'%obj,ratio='%.3e'%ratio) ; print(dicts)
dh.append([i,obj,ratio])
elif 'als' == method: # 算法参考[softImpute_als](https://github.com/cran/softImpute/blob/master/R/simpute.als.R)
print('Soft Impute ALS',kwargs)
if svdw: # warm start 有初始值
Z = X.copy() ; J = k #must have u,d and v components # J = min(sum(svdw[1]>0)+1,k)
d = svdw[1] ; JD = sum(d>0)
print('JD=',JD,'J=',J)
if JD >= J:
U = svdw[0][:,:J] ; V = (svdw[2].T)[:,:J] ; Dsq = d[:J][:,np.newaxis]
else:
fill = np.repeat(D[JD-1],J-JD) # impute zeros with last value of D matrix
Dsq = np.append(D,fill)[:,np.newaxis]
Ja = J-JD ; U = svdw[0]
Ua = np.random.normal(size=n*Ja).reshape(n,Ja) # 截断大小
Ua = Ua - U @ U.T @ Ua
Ua = svd_(Ua)[0]
U = np.column_stack((U,Ua))
V = np.column_stack((svdw[2].T,np.repeat(0,m*Ja).reshape(m,Ja)))
Z1 = U @ (Dsq*V.T)
Z[xnas]=Z1[xnas]
# print('Z=',Z.shape,'Z1=',Z1.shape)
else: # cool start 冷启动没初始值
# k = min(sum(svd(Z)[1]>0)+1,k)
V = np.zeros((m,k))
U = np.random.normal(size=n*k).reshape(n,k)
U = svd_(U)[0]
Dsq = np.repeat(1,k)[:,np.newaxis] # Dsq = D_square = d^2 # we call it Dsq because A=UD and B=VD and AB=U Dsq Vt
print('Z=',Z.shape,'u=',U.shape,'dsq=',Dsq.shape,'vt=',V.T.shape)
for i in range(n_iters):
U0,Dsq0,V0=U,Dsq,V
# U step
B = U.T @ Z
if λ>0: B = B*(Dsq/(Dsq+λ))
Bsvd = svd_(B.T)
V = Bsvd[0] ; Dsq = Bsvd[1][:,np.newaxis] ; U = U @ Bsvd[2].T
Z1 = U @ (Dsq*V.T)
Z[xnas] = Z1[xnas]
obj = obj2(Z,Z1,xnas,nz,Dsq,λ)
# V step
A = (Z @ V).T
if λ>0: A = A*(Dsq/(Dsq+λ))
Asvd = svd_(A.T)
U = Asvd[0] ; Dsq = Asvd[1][:,np.newaxis] ; V = V @ Asvd[2]
Z1 = U @ (Dsq*V.T)
Z[xnas] = Z1[xnas]
# End U V steps
ratio = Frob1(U0,Dsq0,V0,U,Dsq,V)
# if ratio>1e-05: break
dicts = dict(ik=i,obj='%.3e'%obj,ratio='%.3e'%ratio) ; print(dicts)
dh.append([i,obj,ratio])
if isScale: Z = sc.inverse_scalef(Z);Z
time2 = time.time()
print('All Running time: %s Seconds'%(time2-time1))
return dict(dh=np.stack(dh),finals=dicts,method=method,Z=Z)
#%%
pgrid = dict(
method=['svds','als'],
# λ=[0,.5,1],
# method=['svda','svds'], # 'svds','als'
# isScale = [0],
k=[2,5,7],
# den_X = [.4,.6], #np.random.randint(1,100,3) *1/100,
n_X = [80,100], # np.random.randint(2,100,3),
)
pgrid = list(ParameterGrid(pgrid));pgrid
rets=[]
for pg in pgrid:
pg1 = pg.copy()
n_X = pg.get('n_X',5) ; den_X = pg.get('den_X',.5)
X = init_A2(n_X,n_X,den_X,rstat=1212);X
# a = pg1.pop('isScale')
# X = init_A3();X
ret = SoftImpute(X,#λ=0,
# isScale = a,
n_iters = 13,
**pg1)
Z = ret['Z']
if np.isnan(Z).any():
print('Z with nan');continue
rets.append(ret)
iters_matrix_plot(rets,pgrid)
#%%
np.set_printoptions(precision=2,suppress=1)
X = init_A2(10,10,.3,546);print(X)
ret = SoftImpute(X,method='svds',n_iters=25,λ=1)
Z= ret['Z'];Z
plot_gd_xy(ret)
#%%
X = init_A1(4,3);print(X)
#%%
X = init_A2(6,6,.3,546);print(X)
X[np.isnan(X)] = 0
sc = ScaleX(ismean=1,isstd=1)
X = sc.scalef(X);X
#%%
X = init_A2(15,15,.6);X
rets = SoftImpute(X,k=13,n_iters=10)
#%%
# if i==n_iters:
# print("Convergence not achieved by",n_iters,"iterations")
# if λ>0 and final_svd:
# U = Z @ V
# sU = svd(U)
# U = sU[0] ; Dsq = sU[1] ;V = V @ sU[2].T
# Dsq = Sλ(Dsq,λ)
# if trace:
# Z1 = U @ (Dsq*V.T)
# print("final SVD:", "obj=",obj_(Z,Z1,Dsq).round(5),"\n")
| {
"alphanum_fraction": 0.4930041152,
"author": null,
"avg_line_length": 34.1292134831,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "3dbcacbaf3aa15adda77e4270cc72bd5264fc2bb",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-11-01T20:21:01.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-11-01T20:21:01.000Z",
"max_forks_repo_head_hexsha": "768dac67b802a6b98c9a1c7da587c0d6d4af95ac",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "autolordz/gradient-descent-optimization",
"max_forks_repo_path": "matrix_fact.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "768dac67b802a6b98c9a1c7da587c0d6d4af95ac",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "autolordz/gradient-descent-optimization",
"max_issues_repo_path": "matrix_fact.py",
"max_line_length": 124,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "768dac67b802a6b98c9a1c7da587c0d6d4af95ac",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "autolordz/gradient-descent-optimization",
"max_stars_repo_path": "matrix_fact.py",
"max_stars_repo_stars_event_max_datetime": "2020-05-14T13:29:55.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-04-24T17:24:01.000Z",
"num_tokens": 2156,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6075
} |
import numpy as np
from brl_gym.estimators.bayes_doors_estimator import BayesDoorsEstimator #, LearnableDoorsBF
from brl_gym.envs.mujoco.doors import DoorsEnv
from brl_gym.envs.mujoco.doors_slow import DoorsSlowEnv
from brl_gym.wrapper_envs.explicit_bayes_env import ExplicitBayesEnv
from brl_gym.wrapper_envs.env_sampler import DiscreteEnvSampler
from gym.spaces import Box, Dict
from gym import utils
class ExplicitBayesDoorsEnv(ExplicitBayesEnv, utils.EzPickle):
def __init__(self, reset_params=True,
reward_entropy=True, entropy_weight=1.0,
doors_slow=False, learnable_bf=False):
self.num_doors = 4
self.num_cases = 2**self.num_doors
self.cases = ['{{:0{}b}}'.format(self.num_doors).format(x) \
for x in range(self.num_cases)]
self.cases_np = [np.array([int(x) for x in case]) for case in self.cases]
envs = []
env_class = DoorsEnv if not doors_slow else DoorsSlowEnv
for case in self.cases_np:
env = env_class()
env.open_doors = case.astype(np.bool)
envs += [env]
if not learnable_bf:
self.estimator = BayesDoorsEstimator()
else:
self.estimator = LearnableDoorsBF()
self.env_sampler = DiscreteEnvSampler(envs)
super(ExplicitBayesDoorsEnv, self).__init__(env, self.estimator)
self.nominal_env = env
self.observation_space = Dict(
{"obs": env.observation_space, "zbel": self.estimator.belief_space})
self.internal_observation_space = env.observation_space
self.env = env
self.reset_params = reset_params
self.reward_entropy = reward_entropy
if reward_entropy:
self.entropy_weight = entropy_weight
else:
self.entropy_weight = 0.0
utils.EzPickle.__init__(self)
def _update_belief(self,
action,
obs,
**kwargs):
# Estimate
self.estimator.estimate(
action, obs, **kwargs)
belief = self.estimator.get_belief()
return belief, kwargs
def get_flat_belief():
return self.estimator.get_flat_belief()
def step(self, action):
prev_state = self.env.get_state().copy()
obs, reward, done, info = self.env.step(action)
info['prev_state'] = prev_state
info['curr_state'] = self.env.get_state()
info['done'] = done
bel, info = self._update_belief(
action,
obs,
**info)
entropy = np.sum(-np.log(bel+1e-5)/np.log(len(bel)) * (bel + 1e-5))
ent_reward = -(entropy - self.prev_entropy)
self.prev_entropy = entropy
# reward += ent_reward * self.entropy_weight
info['entropy'] = entropy
# self.color_belief()
info['label'] = self.nominal_env.open_doors.astype(np.int)
return {'obs':obs, 'zbel':bel}, reward, done, info
def reset(self):
if self.reset_params:
while True:
self.env = self.env_sampler.sample()
if not np.all(self.env.open_doors == False):
break
obs = self.env.reset()
self.estimator.reset()
bel, _ = self._update_belief(action=None, obs=obs)
entropy = np.sum(-np.log(bel)/np.log(bel.shape[0]) * bel)
self.prev_entropy = entropy
# self.color_belief()
return {'obs':obs, 'zbel':bel}
def color_belief(self):
bel = self.estimator.belief
for i, b in enumerate(bel.ravel()):
self.env.model.geom_rgba[10+i, -1] = 1 - b
def set_bayes_filter(self, file):
self.estimator.set_bayes_filter(file)
class ExplicitBayesDoorsEnvNoEntropyReward(ExplicitBayesDoorsEnv):
def __init__(self):
super(ExplicitBayesDoorsEnvNoEntropyReward, self).__init__(True, False)
# Instead of the belief, return best estimate
class UPMLEDoorsEnv(ExplicitBayesEnv, utils.EzPickle):
def __init__(self, reset_params=True, reward_entropy=True):
self.num_doors = 4
self.num_cases = 2**self.num_doors
self.cases = ['{{:0{}b}}'.format(self.num_doors).format(x) \
for x in range(self.num_cases)]
self.cases_np = [np.array([int(x) for x in case]) for case in self.cases]
envs = []
for case in self.cases_np:
env = DoorsEnv()
env.open_doors = case.astype(np.bool)
envs += [env]
self.estimator = BayesDoorsEstimator()
self.env_sampler = DiscreteEnvSampler(envs)
super(UPMLEDoorsEnv, self).__init__(env, self.estimator)
self.nominal_env = env
self.observation_space = Dict(
{"obs": env.observation_space, "zparam": self.estimator.param_space})
self.internal_observation_space = env.observation_space
self.env = env
self.reset_params = reset_params
self.reward_entropy = reward_entropy
utils.EzPickle.__init__(self)
def _update_belief(self,
action,
obs,
**kwargs):
# Estimate
self.estimator.estimate(
action, obs, **kwargs)
belief = self.estimator.get_belief()
return belief, kwargs
def step(self, action):
prev_state = self.env.get_state().copy()
obs, reward, done, info = self.env.step(action)
info['prev_state'] = prev_state
info['curr_state'] = self.env.get_state()
bel, info = self._update_belief(
action,
obs,
**info)
entropy = np.sum(-np.log(bel+1e-5)/np.log(len(bel)) * (bel + 1e-5))
ent_reward = -(entropy - self.prev_entropy)
self.prev_entropy = entropy
if self.reward_entropy:
reward += ent_reward
info['entropy'] = entropy
param = self.estimator.get_mle()
return {'obs':obs, 'zparam':param}, reward, done, info
def reset(self):
if self.reset_params:
while True:
self.env = self.env_sampler.sample()
if not np.all(self.env.open_doors == False):
break
obs = self.env.reset()
self.estimator.reset()
bel, _ = self._update_belief(action=None, obs=obs)
entropy = np.sum(-np.log(bel)/np.log(bel.shape[0]) * bel)
self.prev_entropy = entropy
param = self.estimator.get_mle()
return {'obs':obs, 'zparam':param}
class UPMLEDoorsEnvNoEntropyReward(UPMLEDoorsEnv):
def __init__(self):
super(UPMLEDoorsEnvNoEntropyReward, self).__init__(True, False)
class BayesDoorsEntropyEnv(ExplicitBayesDoorsEnv):
"""
Environment that provides entropy instead of belief as observation
"""
def __init__(self, reward_entropy=True, reset_params=True, observe_entropy=True):
super(BayesDoorsEntropyEnv, self).__init__(reward_entropy=reward_entropy, reset_params=reset_params)
utils.EzPickle.__init__(self)
entropy_space = Box(np.array([0.0]), np.array([1.0]))
if observe_entropy:
self.observation_space = Dict(
{"obs": env.observation_space, "zentropy": entropy_space})
else:
self.observation_space = env.observation_space
self.observe_entropy = observe_entropy
def step(self, action):
obs, reward, done, info = super().step(action)
info['bel'] = obs['zbel'].copy()
del obs['zbel']
if self.observe_entropy:
obs['zentropy'] = np.array([info['entropy']])
return obs, reward, done, info
else:
return obs['obs'], reward, done, info
def reset(self):
obs = super().reset()
if self.observe_entropy:
obs['zentropy'] = np.array([self.prev_entropy])
return obs
else:
return obs['obs']
# class BayesDoorsHiddenEntropyEnv(BayesDoorsEntropyEnv):
# """
# Hides entropy. Info has everything experts need
# """
# def __init__(self):
# super(BayesDoorsHiddenEntropyEnv, self).__init__(True, True, observe_entropy=False)
# self.observation_space = env.observation_space
# def step(self, action):
# obs, reward, done, info = super().step(action)
# return obs['obs'], reward, done, info
# def reset(self):
# obs = super().reset()
# return obs['obs']
if __name__ == "__main__":
# Test simple experts
# env = ExplicitBayesDoorsEnv()
# obs = env.reset()
# doors = env.env.open_doors
# simple_expert = SimpleExpert()
# done = False
# while not done:
# action = simple_expert.action(doors.reshape(1, -1), obs['obs'][:2].reshape(1, -1))
# obs, _, done, _ = env.step(action[0])
# env.render()
# # Test expert
# env = ExplicitBayesDoorsEnv()
# obs = env.reset()
# doors = env.env.open_doors
# expert = Expert()
# done = False
# rewards = []
# while not done:
# action = expert.action((obs['obs'].reshape(1, -1), obs['zbel'].reshape(1, -1)))
# print('obs', np.around(obs['obs'][:2], 2), 'act', action, 'zbel', obs['zbel'])
# obs, r, done, _ = env.step(action.ravel())
# env.render()
# rewards += [r]
# if done:
# break
# print("Length", len(rewards))
# print(np.sum(rewards))
# Test upmle env
# env = UPMLEDoorsEnv()
# obs = env.reset()
# for _ in range(100):
# o, r, d, info = env.step([0,0,1])
# print(o['zparam'], env.estimator.belief)
# import IPython; IPython.embed()
# Test entropy-only env
from brl_gym.experts.doors.expert import DoorsExpert as Expert
env = BayesDoorsEntropyEnv()
expert = Expert(mle=True)
o = env.reset()
print(o)
info = []
for _ in range(300):
o = np.concatenate([o['obs'], o['zentropy']], axis=0).reshape(1, -1)
action = expert.action(o, info).ravel()
action[-1] = 1
o, r, d, info = env.step(action)
info = [info]
if d:
break
env.render()
print("expert action", action, np.around(info[0]['bel'],1))
import IPython; IPython.embed()
| {
"alphanum_fraction": 0.58262027,
"author": null,
"avg_line_length": 33.9290322581,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b9fcceb0c093f6c3ef29153b05f631d3ad76d719",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9c0784e9928f12d2ee0528c79a533202d3afb640",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "gilwoolee/brl_gym",
"max_forks_repo_path": "brl_gym/wrapper_envs/wrapper_doors.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9c0784e9928f12d2ee0528c79a533202d3afb640",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "gilwoolee/brl_gym",
"max_issues_repo_path": "brl_gym/wrapper_envs/wrapper_doors.py",
"max_line_length": 108,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "9c0784e9928f12d2ee0528c79a533202d3afb640",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "gilwoolee/brl_gym",
"max_stars_repo_path": "brl_gym/wrapper_envs/wrapper_doors.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-03T08:46:10.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-08-07T05:50:44.000Z",
"num_tokens": 2626,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 10518
} |
import os
import sys
#dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = "/Users/neda/HiCPlus_pytorch/src"
import numpy as np
import argparse
import cooler
import matplotlib.pyplot as plt
import matplotlib.backends.backend_pdf
import torch
from torch.autograd import Variable
from scipy.stats.stats import pearsonr
model_path = dir_path + "/models"
utils_path = dir_path + "/utils"
sys.path.insert(0, model_path)
sys.path.insert(0, utils_path)
import model
import utils
ap = argparse.ArgumentParser()
ap.add_argument("--LowRes_matrix_type", help = "0 if it is a cool file and 1 if it is a COO", required = True, type = int)
ap.add_argument("--LowRes_matrix_path", help = "path of low resolution cool file or low resolution COO", required = True)
ap.add_argument("--HighRes_matrix_type", help = "0 if it is a cool file and 1 if it is a COO", required = True, type = int)
ap.add_argument("--HighRes_matrix_path", help = "path of high resolution cool file or high resolution COO", required = True)
ap.add_argument("--resolution", help = "resolution needed if input is cool file", type = int)
ap.add_argument("--chr_num", help = "chromosome number to be used for evaluation", type = int)
ap.add_argument("--genome_type", help = "hg19 or hg38")
ap.add_argument("--network_path", help = "path of ConvNet learned for enhancing data")
ap.add_argument("--result_path", help = "path of folder to save result in")
args = vars(ap.parse_args())
# loading a model to be used to enhance
use_gpu = 0
Net = model.Net(40, 28)
Net.load_state_dict(torch.load(args['network_path']))
Net = Net.float()
if use_gpu:
Net = Net.cuda()
# first we load low resolution matrix as an enhanced matrix and then reassign some parts included in enhanced frames
if args['LowRes_matrix_type'] == 0:
low_cool = cooler.Cooler(args['LowRes_matrix_path'] + '::/resolutions/' + str(args['resolution']))
low_chr_mat = low_cool.matrix(balance = False).fetch("chr" + str(args['chr_num'])).astype(float)
low_chr_mat[np.isnan(low_chr_mat)] = 0
chr_frames, chr_indices = utils.divide2(low_chr_mat,args['chr_num'])
enhanced_chr_mat = low_cool.matrix(balance = False).fetch("chr" + str(args['chr_num'])).astype(float)
enhanced_chr_mat[np.isnan(enhanced_chr_mat)] = 0
"""
average_chr_mat = low_cool.matrix(balance = False).fetch("chr" + str(args['chr_num'])).astype(float)
average_chr_mat[np.isnan(average_chr_mat)] = 0
"""
else:
chr_frames, chr_indices = utils.divide(args['LowRes_matrix_path'], args['chr_num'], args['resolution'], args['genome_type'])
low_chr_mat = np.load(args['LowRes_matrix_path'] + '_npy_form_tmp.npy')
enhanced_chr_mat = np.load(args['LowRes_matrix_path'] + '_npy_form_tmp.npy')
# average_chr_mat = np.load(args['LowRes_matrix_path'] + '_npy_form_tmp.npy')
# applying model on frames
chr_frames = np.stack(chr_frames, axis = 0)
chr_indices = np.stack(chr_indices, axis = 0)
chr_frames = np.expand_dims(chr_frames, axis = 1)
lowres_set = torch.from_numpy(chr_frames).float()
enhanced_set = Net(Variable(lowres_set))
enhanced_set = enhanced_set.data.cpu().numpy()
enhanced_set = np.reshape(enhanced_set, (enhanced_set.shape[0], enhanced_set.shape[2], enhanced_set.shape[3]))
# using enhanced frames and index file to assign enhanced values
for i in range(chr_indices.shape[0]):
x_pos = chr_indices[i,1]
y_pos = chr_indices[i,2]
enhanced_chr_mat[x_pos+6:x_pos+34,y_pos+6:y_pos+34] = enhanced_set[i,:,:]
# since we just enhance upper triangle frames, this is for making a matrix symmetric
iu = np.triu_indices(enhanced_chr_mat.shape[0],1)
il = (iu[1],iu[0])
enhanced_chr_mat[il]=enhanced_chr_mat[iu]
"""
chr_length = average_chr_mat.shape[0]
low_chr_mat2 = np.zeros((chr_length+6,chr_length+6))
low_chr_mat2[3:chr_length+3,3:chr_length+3] = low_chr_mat
for i1 in range(chr_length):
for i2 in range(chr_length):
average_chr_mat[i1,i2] = np.mean(low_chr_mat2[i1:i1+6,i2:i2+6])
"""
# loading high resolution matrix to evaluate result
if args['HighRes_matrix_type'] == 0:
high_cool = cooler.Cooler(args['HighRes_matrix_path'] + '::/resolutions/' + str(args['resolution']))
high_chr_mat = high_cool.matrix(balance = False).fetch("chr" + str(args['chr_num'])).astype(float)
high_chr_mat[np.isnan(high_chr_mat)] = 0
else:
_, _ = utils.divide(args['HighRes_matrix_path'], args['chr_num'], args['resolution'], args['genome_type'])
high_chr_mat = np.load(args['HighRes_matrix_path'] + '_npy_form_tmp.npy')
# this function extract minor diagonals of matrix x shows difference of x and y's in a given minor diagonal
def vec_of_dist(matrix, x):
return([matrix[i,i+x] for i in range(matrix.shape[1]-x)])
highVSlow_corr_list = []
highVSenhanced_corr_list = []
#highVSaverage_corr_list = []
# loops over 100 minor diagonals to compute correlations of those diagonals among low, enhanced and high resolution matrices
for dist in range(100):
low_res_vec = vec_of_dist(low_chr_mat, dist)
high_res_vec = vec_of_dist(high_chr_mat, dist)
enhanced_vec = vec_of_dist(enhanced_chr_mat, dist)
#average_vec = vec_of_dist(average_chr_mat, dist)
highVSlow_corr_list.append(pearsonr(low_res_vec, high_res_vec)[0])
highVSenhanced_corr_list.append(pearsonr(high_res_vec, enhanced_vec)[0])
#highVSaverage_corr_list.append(pearsonr(high_res_vec, average_vec)[0])
lb = 1095
ub = 1145
plt.show()
pdf = matplotlib.backends.backend_pdf.PdfPages(os.path.join(args['result_path'],'result.pdf'))
fig = plt.figure()
plt.plot(highVSlow_corr_list, label = "highVSlow")
plt.plot(highVSenhanced_corr_list, label = "highVSenhanced")
#plt.plot(highVSaverage_corr_list, label = "highVSaverage")
plt.legend(loc='upper right', prop={'size': 5})
pdf.savefig(fig)
fig = plt.figure()
plt.imshow(low_chr_mat[lb:ub,lb:ub])
pdf.savefig(fig)
fig = plt.figure()
plt.imshow(enhanced_chr_mat[lb:ub,lb:ub])
pdf.savefig(fig)
fig = plt.figure()
plt.imshow(high_chr_mat[lb:ub,lb:ub])
pdf.savefig(fig)
pdf.close()
| {
"alphanum_fraction": 0.7412610804,
"author": null,
"avg_line_length": 45.641221374,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b7de74a060b860a9c5dca45449a8295571c64845",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-08-01T16:27:21.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-08-01T16:27:21.000Z",
"max_forks_repo_head_hexsha": "b237ef1d30f3362b58a7180a6e66af03d7fe468b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "nedo0shki/HiCPlus-PC",
"max_forks_repo_path": "src/runHiCPlus.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b237ef1d30f3362b58a7180a6e66af03d7fe468b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "nedo0shki/HiCPlus-PC",
"max_issues_repo_path": "src/runHiCPlus.py",
"max_line_length": 128,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b237ef1d30f3362b58a7180a6e66af03d7fe468b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "nedo0shki/HiCPlus-PC",
"max_stars_repo_path": "src/runHiCPlus.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1611,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5979
} |
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
import math
import matplotlib as mpl
#-------------------------------------------------------------------------------
def strain_stress_divergence_hist():
# grid
fileGrid = Dataset("grid.40962.nc","r")
nVertices = len(fileGrid.dimensions["nVertices"])
latVertex = fileGrid.variables["latVertex"][:]
fileGrid.close()
# ic
fileIC = Dataset("ic_40962.nc","r")
uVelocity = fileIC.variables["uVelocity"][:]
vVelocity = fileIC.variables["vVelocity"][:]
stressDivergenceUAnalytical = fileIC.variables["stressDivergenceUAnalytical"][:]
stressDivergenceVAnalytical = fileIC.variables["stressDivergenceVAnalytical"][:]
print("Stress divergence: ",
np.amin(stressDivergenceUAnalytical), np.amax(stressDivergenceUAnalytical),
np.amin(stressDivergenceVAnalytical), np.amax(stressDivergenceVAnalytical))
fileIC.close()
# Wachspress
fileWach = Dataset("./output_wachspress_alt_40962/output.2000.nc","r")
stressDivergenceUWach = fileWach.variables["stressDivergenceU"][0,:]
stressDivergenceVWach = fileWach.variables["stressDivergenceV"][0,:]
stressDivergenceUWachDiff = (stressDivergenceUWach - stressDivergenceUAnalytical)
stressDivergenceVWachDiff = (stressDivergenceVWach - stressDivergenceVAnalytical)
print("Wachs: ",
np.amin(stressDivergenceUWachDiff), np.amax(stressDivergenceUWachDiff),
np.amin(stressDivergenceVWachDiff), np.amax(stressDivergenceVWachDiff))
fileWach.close()
# PWL
filePWL = Dataset("./output_pwl_alt_40962/output.2000.nc","r")
stressDivergenceUPWL = filePWL.variables["stressDivergenceU"][0,:]
stressDivergenceVPWL = filePWL.variables["stressDivergenceV"][0,:]
stressDivergenceUPWLDiff = (stressDivergenceUPWL - stressDivergenceUAnalytical)
stressDivergenceVPWLDiff = (stressDivergenceVPWL - stressDivergenceVAnalytical)
print("PWL: ",
np.amin(stressDivergenceUPWLDiff), np.amax(stressDivergenceUPWLDiff),
np.amin(stressDivergenceVPWLDiff), np.amax(stressDivergenceVPWLDiff))
filePWL.close()
# Weak
fileWeak = Dataset("./output_weak_40962/output.2000.nc","r")
stressDivergenceUWeak = fileWeak.variables["stressDivergenceU"][0,:]
stressDivergenceVWeak = fileWeak.variables["stressDivergenceV"][0,:]
stressDivergenceUWeakDiff = (stressDivergenceUWeak - stressDivergenceUAnalytical)
stressDivergenceVWeakDiff = (stressDivergenceVWeak - stressDivergenceVAnalytical)
print("Weak: ",
np.amin(stressDivergenceUWeakDiff), np.amax(stressDivergenceUWeakDiff),
np.amin(stressDivergenceVWeakDiff), np.amax(stressDivergenceVWeakDiff))
fileWeak.close()
# WeakWachs
fileWeakWachs = Dataset("./output_weakwachs_40962/output.2000.nc","r")
stressDivergenceUWeakWachs = fileWeakWachs.variables["stressDivergenceU"][0,:]
stressDivergenceVWeakWachs = fileWeakWachs.variables["stressDivergenceV"][0,:]
stressDivergenceUWeakWachsDiff = (stressDivergenceUWeakWachs - stressDivergenceUAnalytical)
stressDivergenceVWeakWachsDiff = (stressDivergenceVWeakWachs - stressDivergenceVAnalytical)
print("WeakWachs: ",
np.amin(stressDivergenceUWeakWachsDiff), np.amax(stressDivergenceUWeakWachsDiff),
np.amin(stressDivergenceVWeakWachsDiff), np.amax(stressDivergenceVWeakWachsDiff))
fileWeakWachs.close()
# histograms
stressDivergenceUWachDiffHist = []
stressDivergenceUPWLDiffHist = []
stressDivergenceUWeakDiffHist = []
maxValue = 0.0
for iVertex in range(0,nVertices):
if (latVertex[iVertex] > math.radians(20.0)):
stressDivergenceUWachDiffHist.append(math.fabs(stressDivergenceUWachDiff[iVertex]))
stressDivergenceUPWLDiffHist.append(math.fabs(stressDivergenceUPWLDiff[iVertex]))
stressDivergenceUWeakDiffHist.append(math.fabs(stressDivergenceUWeakDiff[iVertex]))
maxValue = max(math.fabs(stressDivergenceUWachDiff[iVertex]),maxValue)
maxValue = max(math.fabs(stressDivergenceUPWLDiff[iVertex]),maxValue)
mpl.rc('text', usetex=True)
mpl.rc('font', family='Times New Roman', size=8)
mpl.rcParams['axes.linewidth'] = 0.5
plt.figure(figsize=(3.74016, 3))
plt.hist(stressDivergenceUWachDiffHist, 50, range=[0.0,1.0], histtype='step', lw=1, color='blue', label='Wachspress')
plt.hist(stressDivergenceUPWLDiffHist, 50, range=[0.0,1.0], histtype='step', lw=1, color='red', label='PWL')
plt.hist(stressDivergenceUWeakDiffHist, 50, range=[0.0,1.0], histtype='step', lw=1, color='green', label='Weak')
plt.yscale('log', nonpositive='clip')
plt.xlabel("Error")
plt.ylabel("Frequency")
plt.legend(["Wachspress","PWL","Weak"], frameon=False, fontsize=8)
plt.xlim([0,1.0])
plt.tight_layout(pad=0.5, w_pad=0.5, h_pad=0.5)
plt.savefig("strain_stress_divergence_hist.png",dpi=400)
#-------------------------------------------------------------------------------
if __name__ == "__main__":
strain_stress_divergence_hist()
| {
"alphanum_fraction": 0.7053917766,
"author": null,
"avg_line_length": 36.0559440559,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "e1ba8c7e9085ba06c80c33a208e6cf3eaec6ba4d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3523b2bd017db92dcc84c4df29fa2b2c3473d3a9",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "oksanaguba/E3SM",
"max_forks_repo_path": "components/mpas-seaice/testing_and_setup/testcases/spherical_operators/strain_stress_divergence/strain_stress_divergence_hist.py",
"max_issues_count": 11,
"max_issues_repo_head_hexsha": "3523b2bd017db92dcc84c4df29fa2b2c3473d3a9",
"max_issues_repo_issues_event_max_datetime": "2022-03-29T14:23:55.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-06-04T22:56:10.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "oksanaguba/E3SM",
"max_issues_repo_path": "components/mpas-seaice/testing_and_setup/testcases/spherical_operators/strain_stress_divergence/strain_stress_divergence_hist.py",
"max_line_length": 122,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "3523b2bd017db92dcc84c4df29fa2b2c3473d3a9",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "oksanaguba/E3SM",
"max_stars_repo_path": "components/mpas-seaice/testing_and_setup/testcases/spherical_operators/strain_stress_divergence/strain_stress_divergence_hist.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-03T19:09:42.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-03-03T19:09:42.000Z",
"num_tokens": 1514,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5156
} |
[STATEMENT]
lemma binomial_absorb_comp: "(n - k) * (n choose k) = n * ((n - 1) choose k)"
(is "?lhs = ?rhs")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (n - k) * (n choose k) = n * (n - 1 choose k)
[PROOF STEP]
proof (cases "n \<le> k")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. n \<le> k \<Longrightarrow> (n - k) * (n choose k) = n * (n - 1 choose k)
2. \<not> n \<le> k \<Longrightarrow> (n - k) * (n choose k) = n * (n - 1 choose k)
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
n \<le> k
goal (2 subgoals):
1. n \<le> k \<Longrightarrow> (n - k) * (n choose k) = n * (n - 1 choose k)
2. \<not> n \<le> k \<Longrightarrow> (n - k) * (n choose k) = n * (n - 1 choose k)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
n \<le> k
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
n \<le> k
goal (1 subgoal):
1. (n - k) * (n choose k) = n * (n - 1 choose k)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(n - k) * (n choose k) = n * (n - 1 choose k)
goal (1 subgoal):
1. \<not> n \<le> k \<Longrightarrow> (n - k) * (n choose k) = n * (n - 1 choose k)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> n \<le> k \<Longrightarrow> (n - k) * (n choose k) = n * (n - 1 choose k)
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
\<not> n \<le> k
goal (1 subgoal):
1. \<not> n \<le> k \<Longrightarrow> (n - k) * (n choose k) = n * (n - 1 choose k)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<not> n \<le> k
[PROOF STEP]
have "?rhs = Suc ((n - 1) - k) * (n choose Suc ((n - 1) - k))"
[PROOF STATE]
proof (prove)
using this:
\<not> n \<le> k
goal (1 subgoal):
1. n * (n - 1 choose k) = Suc (n - 1 - k) * (n choose Suc (n - 1 - k))
[PROOF STEP]
using binomial_symmetric[of k "n - 1"] binomial_absorption[of "(n - 1) - k" n]
[PROOF STATE]
proof (prove)
using this:
\<not> n \<le> k
k \<le> n - 1 \<Longrightarrow> n - 1 choose k = n - 1 choose (n - 1 - k)
Suc (n - 1 - k) * (n choose Suc (n - 1 - k)) = n * (n - 1 choose (n - 1 - k))
goal (1 subgoal):
1. n * (n - 1 choose k) = Suc (n - 1 - k) * (n choose Suc (n - 1 - k))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
n * (n - 1 choose k) = Suc (n - 1 - k) * (n choose Suc (n - 1 - k))
goal (1 subgoal):
1. \<not> n \<le> k \<Longrightarrow> (n - k) * (n choose k) = n * (n - 1 choose k)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
n * (n - 1 choose k) = Suc (n - 1 - k) * (n choose Suc (n - 1 - k))
goal (1 subgoal):
1. \<not> n \<le> k \<Longrightarrow> (n - k) * (n choose k) = n * (n - 1 choose k)
[PROOF STEP]
have "Suc ((n - 1) - k) = n - k"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Suc (n - 1 - k) = n - k
[PROOF STEP]
using False
[PROOF STATE]
proof (prove)
using this:
\<not> n \<le> k
goal (1 subgoal):
1. Suc (n - 1 - k) = n - k
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Suc (n - 1 - k) = n - k
goal (1 subgoal):
1. \<not> n \<le> k \<Longrightarrow> (n - k) * (n choose k) = n * (n - 1 choose k)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
Suc (n - 1 - k) = n - k
goal (1 subgoal):
1. \<not> n \<le> k \<Longrightarrow> (n - k) * (n choose k) = n * (n - 1 choose k)
[PROOF STEP]
have "n choose \<dots> = n choose k"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. n choose (n - k) = n choose k
[PROOF STEP]
using False
[PROOF STATE]
proof (prove)
using this:
\<not> n \<le> k
goal (1 subgoal):
1. n choose (n - k) = n choose k
[PROOF STEP]
by (intro binomial_symmetric [symmetric]) simp_all
[PROOF STATE]
proof (state)
this:
n choose (n - k) = n choose k
goal (1 subgoal):
1. \<not> n \<le> k \<Longrightarrow> (n - k) * (n choose k) = n * (n - 1 choose k)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
n * (n - 1 choose k) = (n - k) * (n choose k)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
n * (n - 1 choose k) = (n - k) * (n choose k)
goal (1 subgoal):
1. (n - k) * (n choose k) = n * (n - 1 choose k)
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
(n - k) * (n choose k) = n * (n - 1 choose k)
goal:
No subgoals!
[PROOF STEP]
qed | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": 23,
"llama_tokens": 1931,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
import numpy as np
import matplotlib.pyplot as plt
# For drift
'''
for test in range(0,330):
# Removing anomalies.
if test not in [19,80,282,310]:
# Loading data.
data = np.load(f'D:/RLBot/ViliamVadocz/TestBot/data/test_{test:03}.npy')
# Selection position data.
pos = data[0]
# Transforming to start from origin and to be the correct way around (reverse x-axis).
pos += np.array([-2500, 2300, -17.01])
pos *= np.array([-1, 1, 1])
# Plotting trace.
plt.plot(pos[:,0],pos[:,1])
# Showing plotted paths.
plt.show()
'''
# Distance - time esimation.
for test in [3, 5, 7, 9]:
# Loading data.
data = np.load(f'D:/RLBot/ViliamVadocz/TestBot/data/test_{test:02}.npy')
# Selection position data.
times = data[0]
distances = data[1]
# Plotting trace.
plt.plot(distances, times)
# Hand-fitted graph.
x = np.linspace(0, 3000, 100)
y = x**0.55 / 41.53
y[x>2177.25] = 1/2300 * x[x>2177.25] + 0.70337
plt.plot(x, y)
# Showing plotted paths.
plt.show()
| {
"alphanum_fraction": 0.6062618596,
"author": null,
"avg_line_length": 25.7073170732,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "28a23090316a6dcfdf4fce228e4f37602782dc78",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "092abc5bf92e9dab9d07499849d54a33b0b0c4f6",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ViliamVadocz/Bots",
"max_forks_repo_path": "Test/TestBot/data_analysis.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "092abc5bf92e9dab9d07499849d54a33b0b0c4f6",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ViliamVadocz/Bots",
"max_issues_repo_path": "Test/TestBot/data_analysis.py",
"max_line_length": 94,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "092abc5bf92e9dab9d07499849d54a33b0b0c4f6",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ViliamVadocz/Bots",
"max_stars_repo_path": "Test/TestBot/data_analysis.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 335,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1054
} |
// Copyright (C) 2004-2008 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: Douglas Gregor
// Andrew Lumsdaine
#include <boost/graph/use_mpi.hpp>
#include <boost/config.hpp>
#include <boost/throw_exception.hpp>
#include <boost/graph/distributed/mpi_process_group.hpp>
#include <boost/property_map/property_map.hpp>
#include <boost/test/minimal.hpp>
#include <vector>
#include <string>
#include <boost/serialization/string.hpp>
#include <boost/serialization/utility.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/graph/parallel/basic_reduce.hpp>
#ifdef BOOST_NO_EXCEPTIONS
void
boost::throw_exception(std::exception const& ex)
{
std::cout << ex.what() << std::endl;
abort();
}
#endif
using namespace boost;
using boost::graph::distributed::mpi_process_group;
enum color_t { red, blue };
struct remote_key
{
remote_key(int p = -1, std::size_t l = 0) : processor(p), local_key(l) {}
int processor;
std::size_t local_key;
template<typename Archiver>
void serialize(Archiver& ar, const unsigned int /*version*/)
{
ar & processor & local_key;
}
};
namespace boost { namespace mpi {
template<> struct is_mpi_datatype<remote_key> : mpl::true_ { };
} }
BOOST_IS_BITWISE_SERIALIZABLE(remote_key)
BOOST_CLASS_IMPLEMENTATION(remote_key,object_serializable)
BOOST_CLASS_TRACKING(remote_key,track_never)
namespace boost {
template<>
struct hash<remote_key>
{
std::size_t operator()(const remote_key& key) const
{
std::size_t hash = hash_value(key.processor);
hash_combine(hash, key.local_key);
return hash;
}
};
}
inline bool operator==(const remote_key& x, const remote_key& y)
{ return x.processor == y.processor && x.local_key == y.local_key; }
struct remote_key_to_global
{
typedef readable_property_map_tag category;
typedef remote_key key_type;
typedef std::pair<int, std::size_t> value_type;
typedef value_type reference;
};
inline std::pair<int, std::size_t>
get(remote_key_to_global, const remote_key& key)
{
return std::make_pair(key.processor, key.local_key);
}
template<typename T>
struct my_reduce : boost::parallel::basic_reduce<T> {
BOOST_STATIC_CONSTANT(bool, non_default_resolver = true);
};
void colored_test()
{
mpi_process_group pg;
const int n = 500;
color_t my_start_color = process_id(pg) % 2 == 0? ::red : ::blue;
int next_processor = (process_id(pg) + 1) % num_processes(pg);
color_t next_start_color = next_processor % 2 == 0? ::red : ::blue;
// Initial color map: even-numbered processes are all red,
// odd-numbered processes are all blue.
std::vector<color_t> color_vec(n, my_start_color);
typedef iterator_property_map<std::vector<color_t>::iterator,
identity_property_map> LocalPropertyMap;
LocalPropertyMap local_colors(color_vec.begin(), identity_property_map());
synchronize(pg);
// Create the distributed property map
typedef boost::parallel::distributed_property_map<mpi_process_group,
remote_key_to_global,
LocalPropertyMap> ColorMap;
ColorMap colors(pg, remote_key_to_global(), local_colors);
colors.set_reduce(my_reduce<color_t>());
if (process_id(pg) == 0) std::cerr << "Checking local colors...";
// check local processor colors
for (int i = 0; i < n; ++i) {
remote_key k(process_id(pg), i);
BOOST_CHECK(get(colors, k) == my_start_color);
}
colors.set_consistency_model(boost::parallel::cm_bidirectional);
if (process_id(pg) == 0) std::cerr << "OK.\nChecking next processor's default colors...";
// check next processor's colors
for (int i = 0; i < n; ++i) {
remote_key k(next_processor, i);
BOOST_CHECK(get(colors, k) == color_t());
}
if (process_id(pg) == 0) std::cerr << "OK.\nSynchronizing...";
synchronize(pg);
if (process_id(pg) == 0) std::cerr << "OK.\nChecking next processor's colors...";
// check next processor's colors
for (int i = 0; i < n; ++i) {
remote_key k(next_processor, i);
BOOST_CHECK(get(colors, k) == next_start_color);
}
if (process_id(pg) == 0) std::cerr << "OK.\nSynchronizing...";
synchronize(pg);
if (process_id(pg) == 0) std::cerr << "OK.\nChanging next processor's colors...";
// change the next processor's colors
color_t next_finish_color = next_processor % 2 == 0? ::blue : ::red;
for (int i = 0; i < n; ++i) {
remote_key k(next_processor, i);
put(colors, k, next_finish_color);
}
if (process_id(pg) == 0) std::cerr << "OK.\nSynchronizing...";
synchronize(pg);
if (process_id(pg) == 0) std::cerr << "OK.\nChecking changed colors...";
// check our own colors
color_t my_finish_color = process_id(pg) % 2 == 0? ::blue : ::red;
for (int i = 0; i < n; ++i) {
remote_key k(process_id(pg), i);
BOOST_CHECK(get(colors, k) == my_finish_color);
}
// check our neighbor's colors
if (process_id(pg) == 0) std::cerr << "OK.\nChecking changed colors on neighbor...";
for (int i = 0; i < n; ++i) {
remote_key k(next_processor, i);
BOOST_CHECK(get(colors, k) == next_finish_color);
}
synchronize(pg);
if (process_id(pg) == 0) std::cerr << "OK.\n";
}
void bool_test()
{
mpi_process_group pg;
const int n = 500;
bool my_start_value = process_id(pg) % 2;
int next_processor = (process_id(pg) + 1) % num_processes(pg);
bool next_start_value = ((process_id(pg) + 1) % num_processes(pg)) % 2;
// Initial color map: even-numbered processes are false,
// odd-numbered processes are true
std::vector<bool> bool_vec(n, my_start_value);
typedef iterator_property_map<std::vector<bool>::iterator,
identity_property_map> LocalPropertyMap;
LocalPropertyMap local_values(bool_vec.begin(), identity_property_map());
synchronize(pg);
// Create the distributed property map
typedef boost::parallel::distributed_property_map<mpi_process_group,
remote_key_to_global,
LocalPropertyMap> ValueMap;
ValueMap values(pg, remote_key_to_global(), local_values);
values.set_reduce(my_reduce<bool>());
if (process_id(pg) == 0) std::cerr << "Checking local values...";
// check local processor values
for (int i = 0; i < n; ++i) {
remote_key k(process_id(pg), i);
BOOST_CHECK(get(values, k) == my_start_value);
}
values.set_consistency_model(boost::parallel::cm_bidirectional);
if (process_id(pg) == 0) std::cerr << "OK.\nChecking next processor's default values...";
// check next processor's values
for (int i = 0; i < n; ++i) {
remote_key k(next_processor, i);
BOOST_CHECK(get(values, k) == false);
}
if (process_id(pg) == 0) std::cerr << "OK.\nSynchronizing...";
synchronize(pg);
if (process_id(pg) == 0) std::cerr << "OK.\nChecking next processor's values...";
// check next processor's values
for (int i = 0; i < n; ++i) {
remote_key k(next_processor, i);
BOOST_CHECK(get(values, k) == next_start_value);
}
if (process_id(pg) == 0) std::cerr << "OK.\nSynchronizing...";
synchronize(pg);
if (process_id(pg) == 0) std::cerr << "OK.\nChanging next processor's values...";
// change the next processor's values
bool next_finish_value = next_processor % 2 == 0;
for (int i = 0; i < n; ++i) {
remote_key k(next_processor, i);
put(values, k, next_finish_value);
}
if (process_id(pg) == 0) std::cerr << "OK.\nSynchronizing...";
synchronize(pg);
if (process_id(pg) == 0) std::cerr << "OK.\nChecking changed values...";
// check our own values
bool my_finish_value = process_id(pg) % 2 == 0;
for (int i = 0; i < n; ++i) {
remote_key k(process_id(pg), i);
BOOST_CHECK(get(values, k) == my_finish_value);
}
// check our neighbor's values
if (process_id(pg) == 0) std::cerr << "OK.\nChecking changed values on neighbor...";
for (int i = 0; i < n; ++i) {
remote_key k(next_processor, i);
BOOST_CHECK(get(values, k) == next_finish_value);
}
synchronize(pg);
if (process_id(pg) == 0) std::cerr << "OK.\n";
}
void string_test()
{
mpi_process_group pg;
const int n = 500;
std::string my_start_string = lexical_cast<std::string>(process_id(pg));
int next_processor = (process_id(pg) + 1) % num_processes(pg);
std::string next_start_string = lexical_cast<std::string>(next_processor);
// Initial color map: even-numbered processes are false,
// odd-numbered processes are true
std::vector<std::string> string_vec(n, my_start_string);
typedef iterator_property_map<std::vector<std::string>::iterator,
identity_property_map> LocalPropertyMap;
LocalPropertyMap local_strings(string_vec.begin(), identity_property_map());
synchronize(pg);
// Create the distributed property map
typedef boost::parallel::distributed_property_map<mpi_process_group,
remote_key_to_global,
LocalPropertyMap> StringMap;
StringMap strings(pg, remote_key_to_global(), local_strings);
strings.set_reduce(my_reduce<std::string>());
if (process_id(pg) == 0) std::cerr << "Checking local strings...";
// check local processor strings
for (int i = 0; i < n; ++i) {
remote_key k(process_id(pg), i);
BOOST_CHECK(get(strings, k) == my_start_string);
}
strings.set_consistency_model(boost::parallel::cm_bidirectional);
if (process_id(pg) == 0) std::cerr << "OK.\nChecking next processor's default strings...";
// check next processor's strings
for (int i = 0; i < n; ++i) {
remote_key k(next_processor, i);
BOOST_CHECK(get(strings, k) == (num_processes(pg) == 1 ? my_start_string : std::string()));
}
if (process_id(pg) == 0) std::cerr << "OK.\nSynchronizing...";
synchronize(pg);
if (process_id(pg) == 0) std::cerr << "OK.\nChecking next processor's strings...";
// check next processor's strings
for (int i = 0; i < n; ++i) {
remote_key k(next_processor, i);
BOOST_CHECK(get(strings, k) == next_start_string);
}
if (process_id(pg) == 0) std::cerr << "OK.\nSynchronizing...";
synchronize(pg);
if (process_id(pg) == 0) std::cerr << "OK.\nChanging next processor's strings...";
// change the next processor's strings
std::string next_finish_string = next_start_string + next_start_string;
for (int i = 0; i < n; ++i) {
remote_key k(next_processor, i);
put(strings, k, next_finish_string);
}
if (process_id(pg) == 0) std::cerr << "OK.\nSynchronizing...";
synchronize(pg);
if (process_id(pg) == 0) std::cerr << "OK.\nChecking changed strings...";
// check our own strings
std::string my_finish_string = my_start_string + my_start_string;
for (int i = 0; i < n; ++i) {
remote_key k(process_id(pg), i);
BOOST_CHECK(get(strings, k) == my_finish_string);
}
// check our neighbor's strings
if (process_id(pg) == 0) std::cerr << "OK.\nChecking changed strings on neighbor...";
for (int i = 0; i < n; ++i) {
remote_key k(next_processor, i);
BOOST_CHECK(get(strings, k) == next_finish_string);
}
synchronize(pg);
if (process_id(pg) == 0) std::cerr << "OK.\n";
}
int test_main(int argc, char** argv)
{
boost::mpi::environment env(argc, argv);
colored_test();
bool_test();
string_test();
return 0;
}
| {
"alphanum_fraction": 0.6567745285,
"author": null,
"avg_line_length": 32.4662921348,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "d1aae740eac4572f2947925ff7e6df26d303ddf2",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 9,
"max_forks_repo_forks_event_max_datetime": "2021-01-30T00:24:24.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-09-09T02:38:32.000Z",
"max_forks_repo_head_hexsha": "eb5b376736e7f57ff0a61f7e4e5a436bbb874720",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "randolphwong/mcsema",
"max_forks_repo_path": "boost/libs/graph_parallel/test/distributed_property_map_test.cpp",
"max_issues_count": 49,
"max_issues_repo_head_hexsha": "eb5b376736e7f57ff0a61f7e4e5a436bbb874720",
"max_issues_repo_issues_event_max_datetime": "2019-05-05T04:59:26.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-02-29T17:59:52.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "randolphwong/mcsema",
"max_issues_repo_path": "boost/libs/graph_parallel/test/distributed_property_map_test.cpp",
"max_line_length": 95,
"max_stars_count": 18,
"max_stars_repo_head_hexsha": "eb5b376736e7f57ff0a61f7e4e5a436bbb874720",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "randolphwong/mcsema",
"max_stars_repo_path": "boost/libs/graph_parallel/test/distributed_property_map_test.cpp",
"max_stars_repo_stars_event_max_datetime": "2021-12-31T11:06:25.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-03-04T15:44:24.000Z",
"num_tokens": 3063,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 11558
} |
#include <iostream>
#include <vector>
#include <functional>
#include <cmath>
#include <Eigen/Dense>
#include "Derivative.h"
using Eigen::MatrixXd;
using Eigen::VectorXd;
using Eigen::Derivative;
using std::function;
using std::vector;
typedef function<double(VectorXd)> FuncDV;
typedef function<VectorXd(VectorXd)> FuncVV;
typedef function<MatrixXd(VectorXd)> FuncMV;
MatrixXd MakePositiveSemidefinite(MatrixXd mat, int dim){
double left = 0, right = std::max(-mat.minCoeff(), .0) + 1;
while(right - left >= 1e-4){
double mid = (left+right)/2;
auto test = mat + mid*MatrixXd::Identity(dim, dim);
// From https://stackoverflow.com/questions/35227131/eigen-check-if-matrix-is-positive-semi-definite
Eigen::LLT<Eigen::MatrixXd> lltOfA(test);
if(lltOfA.info() == Eigen::NumericalIssue)
left = mid;
else
right = mid;
}
return mat + right*MatrixXd::Identity(dim, dim);
}
VectorXd doIPM(
FuncDV F, FuncVV DelF, FuncMV LaplaceF,
FuncVV H, FuncMV DelH, vector<FuncMV> LaplaceH,
int dimX, int dimH, VectorXd initX
){
double mu = 0.0001;
std::function<bool(VectorXd)> feasible = [dimH](VectorXd _f){
for(int lx = 0;lx < dimH;lx++)
if(_f[lx] < 0)
return false;
return true;
};
std::function<double(VectorXd, VectorXd, VectorXd)>
L = [mu, F, H, dimH](VectorXd x, VectorXd y, VectorXd w){
double barrier = 0;
for(int lx = 0;lx < dimH;lx++)
barrier += log(w[lx]);
return F(x) - mu*barrier - y.transpose()*(H(x) - w);
};
VectorXd x = initX, y, w = H(initX);
{
MatrixXd A = DelH(x).transpose();
MatrixXd invA = A.completeOrthogonalDecomposition().pseudoInverse();
y = invA*DelF(x);
}
for(;;){
VectorXd h = H(x), e = VectorXd::Ones(dimH);
MatrixXd W = w.asDiagonal(), invY = y.asDiagonal().inverse();
MatrixXd Hess = LaplaceF(x);
for(int lx = 0;lx < dimH;lx++)
Hess -= y[lx]*LaplaceH[lx](x);
// Find ~H = H + lambda * I
Hess = MakePositiveSemidefinite(Hess, dimX);
MatrixXd A = DelH(x);
MatrixXd M1(dimX + dimH, dimX + dimH);
M1 << -Hess, A.transpose(),
A, W*invY ;
VectorXd V1(dimX + dimH);
V1 << DelF(x) - A.transpose()*y,
- h + mu*invY*e ;
VectorXd dxy = M1.inverse()*V1;
VectorXd dx, dy, dw;
dx = dxy.head(dimX);
dy = dxy.tail(dimH);
dw = invY*mu*e - W*e - invY*W*dy;
x += dx, y += dy, w += dw;
if(dx.norm() <= 0.001)
break;
}
return x;
}
// Solve : min obj_f
// sub con_hs >= 0
VectorXd Derivative_IPM(Derivative obj_f, vector<Derivative> con_hs, VectorXd start_guess){
int x_size = start_guess.size(), h_size = con_hs.size();
vector<Derivative> v1w(x_size);
vector< vector<Derivative> > v2w(x_size, v1w);
vector<Derivative> f_gradient = v1w;
vector< vector<Derivative> > f_hess = v2w;
vector< vector<Derivative> > hs_gradient(h_size, v1w);
vector< vector< vector<Derivative> > > hs_hess(h_size, v2w);
// Prebuild differiential function
for(int lx = 0;lx < x_size;lx++)
f_gradient[lx] = obj_f.diffPartial(lx);
for(int lx = 0;lx < x_size;lx++)
for(int ly = 0;ly < x_size;ly++)
f_hess[lx][ly] = f_gradient[lx].diffPartial(ly);
for(int lh = 0;lh < h_size;lh++){
for(int lx = 0;lx < x_size;lx++)
hs_gradient[lh][lx] = con_hs[lh].diffPartial(lx);
for(int lx = 0;lx < x_size;lx++)
for(int ly = 0;ly < x_size;ly++)
hs_hess[lh][lx][ly] = hs_gradient[lh][lx].diffPartial(ly);
}
FuncDV F = [obj_f](VectorXd x){
return obj_f(x);
};
FuncVV DelF = [f_gradient, x_size](VectorXd x){
VectorXd ret(x_size);
for(int lx = 0;lx < x_size;lx++)
ret[lx] = f_gradient[lx](x);
return ret;
};
FuncMV LaplaceF = [f_hess, x_size](VectorXd x){
MatrixXd ret(x_size, x_size);
for(int lx = 0;lx < x_size;lx++)
for(int ly = 0;ly < x_size;ly++)
ret(lx, ly) = f_hess[lx][ly](x);
return ret;
};
FuncVV H = [con_hs, h_size](VectorXd x){
VectorXd ret(h_size);
for(int lx = 0;lx < h_size;lx++)
ret[lx] = con_hs[lx](x);
return ret;
};
FuncMV DelH = [hs_gradient, h_size, x_size](VectorXd x){
MatrixXd A(h_size, x_size);
for(int lh = 0;lh < h_size;lh++)
for(int lx = 0;lx < x_size;lx++)
A(lh, lx) = hs_gradient[lh][lx](x);
return A;
};
vector<FuncMV> LaplaceH(h_size);
for(int lh = 0;lh < h_size;lh++){
LaplaceH[lh] = [hs_hess, lh, x_size](VectorXd x){
MatrixXd ret(x_size, x_size);
for(int lx = 0;lx < x_size;lx++)
for(int ly = 0;ly < x_size;ly++)
ret(lx, ly) = hs_hess[lh][lx][ly](x);
return ret;
};
};
return doIPM(F, DelF, LaplaceF, H, DelH, LaplaceH, start_guess.size(), con_hs.size(), start_guess);
}
int main(){
// Reference : http://www.princeton.edu/~rvdb/tex/talks/MLSS_LaPalma/LaPalma3.pdf
// Solve : min x+y
// sub xx + yy >= 1
// x >= 0
// y >= 0
Derivative par_x = Derivative::Variable(0), par_y = Derivative::Variable(1);
Derivative obj_f = par_x + par_y;
Derivative con_h1 = par_x*par_x + par_y*par_y - 1,
con_h2 = par_x,
con_h3 = par_y;
// Multiple initial value
VectorXd x(2);
x << 1, 1;
std::cout << "x initial as " << x.transpose() << std::endl;
std::cout << Derivative_IPM(obj_f, {con_h1, con_h2, con_h3}, x).transpose() << std::endl;
x << 1, 2;
std::cout << "x initial as " << x.transpose() << std::endl;
std::cout << Derivative_IPM(obj_f, {con_h1, con_h2, con_h3}, x).transpose() << std::endl;
x << 2, 1;
std::cout << "x initial as " << x.transpose() << std::endl;
std::cout << Derivative_IPM(obj_f, {con_h1, con_h2, con_h3}, x).transpose() << std::endl;
x << 4, -1;
std::cout << "x initial as " << x.transpose() << std::endl;
std::cout << Derivative_IPM(obj_f, {con_h1, con_h2, con_h3}, x).transpose() << std::endl;
x << -1, 4;
std::cout << "x initial as " << x.transpose() << std::endl;
std::cout << Derivative_IPM(obj_f, {con_h1, con_h2, con_h3}, x).transpose() << std::endl;
x << 0.2, 0.7;
std::cout << "x initial as " << x.transpose() << std::endl;
std::cout << Derivative_IPM(obj_f, {con_h1, con_h2, con_h3}, x).transpose() << std::endl;
x << 0.5, 0.5;
std::cout << "x initial as " << x.transpose() << std::endl;
std::cout << Derivative_IPM(obj_f, {con_h1, con_h2, con_h3}, x).transpose() << std::endl;
return 0;
}
| {
"alphanum_fraction": 0.5461144321,
"author": null,
"avg_line_length": 30.1545064378,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "55ee171f3727f76c71ad3e290159f3a536dca781",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c45c9bd1fe781831aaf42fe798c7d6c1a3d568ad",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mudream4869/eigen-derivative",
"max_forks_repo_path": "examples/interior-point-method.cpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c45c9bd1fe781831aaf42fe798c7d6c1a3d568ad",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mudream4869/eigen-derivative",
"max_issues_repo_path": "examples/interior-point-method.cpp",
"max_line_length": 108,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c45c9bd1fe781831aaf42fe798c7d6c1a3d568ad",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mudream4869/eigen-derivative",
"max_stars_repo_path": "examples/interior-point-method.cpp",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2214,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 7026
} |
[STATEMENT]
lemma reflexive:
fixes P :: pi
shows "P \<sim>\<^sup>s P"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P \<sim>\<^sup>s P
[PROOF STEP]
by(force simp add: substClosed_def intro: Strong_Early_Bisim.reflexive) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Pi_Calculus_Strong_Early_Bisim_Subst",
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 100,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
import ctypes
import glob
import logging
import logging.config
import os
import shutil
from pathlib import Path
import numpy as np
import pandas as pd
from invoke import task
logging.config.fileConfig("logging.ini")
logger = logging.getLogger(__name__)
DEFAULT_SIM_DATADIR = os.getenv("SIM_DATADIR", "data")
DEFAULT_TEST_DATADIR = os.getenv("TEST_DATADIR", "../../../../sgkit/tests/test_hwe")
@task
def compile(ctx):
"""Build reference implementation C library"""
logger.info("Building reference C library")
ctx.run("make")
logger.info("Build complete")
def get_genotype_counts():
"""Generate genotype counts for testing."""
rs = np.random.RandomState(0)
n, s = 10_000, 50
n_het = np.expand_dims(np.arange(n, step=s) + 1, -1)
frac = rs.uniform(0.3, 0.7, size=(n // s, 2))
n_hom = frac * n_het
n_hom = n_hom.astype(int)
return pd.DataFrame(
np.concatenate((n_het, n_hom), axis=1), columns=["n_het", "n_hom_1", "n_hom_2"]
)
@task
def simulate(ctx, sim_datadir=DEFAULT_SIM_DATADIR):
"""Create inputs and outputs for unit tests."""
logger.info("Generating unit test data")
libc = ctypes.CDLL("./libchwe.so")
chwep = libc.hwep
chwep.restype = ctypes.c_double
df = get_genotype_counts()
df["p"] = df.apply(
lambda r: chwep(int(r["n_het"]), int(r["n_hom_1"]), int(r["n_hom_2"])), axis=1
)
output_dir = Path(sim_datadir)
if not output_dir.exists():
output_dir.mkdir(parents=True, exist_ok=True)
path = output_dir / "sim_01.csv"
df.to_csv(path, index=False)
logger.info(f"Unit test data written to {path}")
@task
def export(
ctx,
sim_datadir=DEFAULT_SIM_DATADIR,
test_datadir=DEFAULT_TEST_DATADIR,
clear=True,
runs=None,
):
sim_datadir = Path(sim_datadir)
test_datadir = Path(test_datadir).resolve()
logger.info(f"Exporting test data to {test_datadir}")
if clear and test_datadir.exists():
logger.info(f"Clearing test datadir at {test_datadir}")
shutil.rmtree(test_datadir)
test_datadir.mkdir(exist_ok=True)
for f in glob.glob(str(sim_datadir / "*.csv")):
src = f
dst = test_datadir / Path(f).name
logger.info(f"Copying {src} to {dst}")
shutil.copy(src, dst)
logger.info("Export complete")
| {
"alphanum_fraction": 0.6675302245,
"author": null,
"avg_line_length": 28.5925925926,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f4da9666b635695a66392389db215975b020542b",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 20,
"max_forks_repo_forks_event_max_datetime": "2022-03-05T03:33:13.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-06-22T13:40:10.000Z",
"max_forks_repo_head_hexsha": "a3a5e961b6b21ff7de235075955c0f797ad5027c",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "timothymillar/sgkit",
"max_forks_repo_path": "validation/gwas/method/hwe/tasks.py",
"max_issues_count": 677,
"max_issues_repo_head_hexsha": "a3a5e961b6b21ff7de235075955c0f797ad5027c",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T16:20:50.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-06-18T15:57:33.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "timothymillar/sgkit",
"max_issues_repo_path": "validation/gwas/method/hwe/tasks.py",
"max_line_length": 87,
"max_stars_count": 74,
"max_stars_repo_head_hexsha": "a3a5e961b6b21ff7de235075955c0f797ad5027c",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "timothymillar/sgkit",
"max_stars_repo_path": "validation/gwas/method/hwe/tasks.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-10T06:42:30.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-06-16T18:08:24.000Z",
"num_tokens": 635,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2316
} |
Require Import Kami.AllNotations.
Require Import StdLibKami.Fifo.Ifc.
Require Import StdLibKami.GenericFifo.Ifc.
Section Spec.
Context {ifcParams : Fifo.Ifc.Params}.
Class Params := {fifo : @Fifo.Ifc.Ifc ifcParams;
genericFifo : @GenericFifo.Ifc.Ifc (GenericFifo.Ifc.Build_Params
(@Fifo.Ifc.name ifcParams)
(@Fifo.Ifc.k ifcParams)
(@Fifo.Ifc.size ifcParams))}.
(* Class Params := {genericParams : @GenericFifo.Ifc.Ifc (GenericFifo.Ifc.Build_Params *)
(* (@Fifo.Ifc.name ifcParams) *)
(* (@Fifo.Ifc.k ifcParams) *)
(* (@Fifo.Ifc.size ifcParams))}. *)
Context {params : Params}.
Local Notation genericParams := (GenericFifo.Ifc.Build_Params
(@Fifo.Ifc.name ifcParams)
(@Fifo.Ifc.k ifcParams)
(@Fifo.Ifc.size ifcParams)).
Local Open Scope kami_expr.
Local Open Scope kami_action.
Local Definition propagate ty: ActionT ty Void :=
Retv.
Local Definition isEmpty ty: ActionT ty Bool :=
(@Fifo.Ifc.isEmpty ifcParams fifo ty).
Local Definition isFull ty: ActionT ty Bool :=
(@Fifo.Ifc.isFull ifcParams fifo ty).
Local Definition numFree ty: ActionT ty (Bit ((@lgSize genericParams) + 1)) :=
(@Fifo.Ifc.numFree ifcParams fifo ty).
Local Definition first ty: ActionT ty (Maybe (@k genericParams)) :=
(@Fifo.Ifc.first ifcParams fifo ty).
Local Definition deq ty: ActionT ty (Maybe (@k genericParams)) :=
(@Fifo.Ifc.deq ifcParams fifo ty).
Local Definition enq ty (new: ty (@k genericParams)): ActionT ty Bool :=
(@Fifo.Ifc.enq ifcParams fifo ty new).
Local Definition flush ty: ActionT ty Void :=
(@Fifo.Ifc.flush ifcParams fifo ty).
Local Definition regs : list RegInitT := (@Fifo.Ifc.regs ifcParams fifo).
Definition Extension: Ifc :=
{|
Ifc.propagate := propagate;
Ifc.regs := regs;
Ifc.regFiles := nil;
Ifc.isEmpty := isEmpty;
Ifc.isFull := isFull;
Ifc.numFree := numFree;
Ifc.first := first;
Ifc.deq := deq;
Ifc.enq := enq;
Ifc.flush := flush
|}.
End Spec.
| {
"alphanum_fraction": null,
"author": "sifive",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/coq/sifive-StdLibKami/StdLibKami-01d3dffcec9d8bfc4f864b940974396ffe817314/GenericFifo/ExtendedFifo.v",
"reason": null,
"repo": "StdLibKami",
"save_path": "github-repos/coq/sifive-StdLibKami",
"sha": "01d3dffcec9d8bfc4f864b940974396ffe817314",
"size": null
} |
import os, sys
import random
import itertools
import collections
import ast
import os.path as osp
import math
import multiprocessing
import numpy as np
class AttrDict(dict):
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
def deep_update(source, target):
for k, v in target.items():
if isinstance(v, collections.abc.Mapping):
assert k in source, f'{k} does not exist in {source}'
assert isinstance(source[k], collections.abc.Mapping), \
f'Inconsistent types: {type(v)} vs {type(source[k])}'
source[k] = deep_update(source.get(k, {}), v)
else:
source[k] = v
return source
def config_attr(obj, config):
for k, v in config.items():
if k.islower():
k = f'_{k}'
if isinstance(v, str):
try:
v = float(v)
except:
pass
if isinstance(v, float) and v == int(v):
v = int(v)
setattr(obj, k, v)
def to_int(s):
return int(float(s))
def to_array32(x):
x = np.array(x, copy=False)
if x.dtype == np.float64:
x = x.astype(np.float32)
elif x.dtype == np.int64:
x = x.astype(np.int32)
return x
def isscalar(x):
return isinstance(x, (int, float))
def step_str(step):
if step < 1000:
return f'{step}'
elif step < 1000000:
return f'{step/1000:.3g}k'
else:
return f'{step/1000000:.3g}m'
def expand_dims_match(x, target):
""" Expands dimensions of x to match target,
an efficient way of the following process
while len(x.shape) < len(target.shape):
x = np.expand_dims(x, -1)
"""
assert x.shape == target.shape[:x.ndim], (x.shape, target.shape)
return x[(*[slice(None) for _ in x.shape], *(None,)*(target.ndim - x.ndim))]
def moments(x, axis=None, mask=None):
if x.dtype == np.uint8:
x = x.astype(np.int32)
if mask is None:
x_mean = np.mean(x, axis=axis)
x2_mean = np.mean(x**2, axis=axis)
else:
if axis is None:
axis = tuple(range(x.ndim))
else:
axis = (axis,) if isinstance(axis, int) else tuple(axis)
assert mask.ndim == len(axis), (mask.shape, axis)
# compute valid entries in x corresponding to True in mask
n = np.sum(mask)
if n == 0:
return 0, 0
# the following process is about 5x faster than np.nan*
# expand mask to match the dimensionality of x
mask = expand_dims_match(mask, x)
for i in axis:
if mask.shape[i] != 1:
assert mask.shape[i] == x.shape[i], (
f'{i}th dimension of mask({mask.shape[i]}) does not match'
f'that of x({x.shape[i]})')
else:
n *= x.shape[i]
# compute x_mean and x_std from entries in x corresponding to True in mask
x_mask = x * mask
x_mean = np.sum(x_mask, axis=axis) / n
x2_mean = np.sum(x_mask**2, axis=axis) / n
x_var = x2_mean - x_mean**2
return x_mean, x_var
def standardize(x, mask=None, axis=None, epsilon=1e-8):
if mask is not None:
mask = expand_dims_match(mask, x)
x_mean, x_var = moments(x, axis=axis, mask=mask)
x_std = np.sqrt(x_var + epsilon)
y = (x - x_mean) / x_std
if mask is not None:
y = np.where(mask == 1, y, x)
return y
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise ValueError('Boolean value expected.')
def eval_str(val):
try:
val = ast.literal_eval(val)
except ValueError:
pass
return val
def is_main_process():
return multiprocessing.current_process().name == 'MainProcess'
def set_global_seed(seed=42, tf=None):
os.environ['PYTHONHASHSEED']=str(seed)
random.seed(seed)
np.random.seed(seed)
if tf:
tf.random.set_seed(seed)
def timeformat(t):
return f'{t:.2e}'
def get_and_unpack(x):
"""
This function is used to decompose a list of remote objects
that corresponds to a tuple of lists.
For example:
@ray.remote
def f():
return ['a', 'a'], ['b', 'b']
get_and_unpack(ray.get([f.remote() for _ in range(2)]))
>>> [['a', 'a', 'a', 'a'], ['b', 'b', 'b', 'b']]
"""
list_of_lists = list(zip(*x))
results = []
for item_list in list_of_lists:
tmp = []
for item in item_list:
tmp += item
results.append(tmp)
return results
def squarest_grid_size(n, more_on_width=True):
"""Calculates the size of the most squared grid for n.
Calculates the largest integer divisor of n less than or equal to
sqrt(n) and returns that as the width. The height is
n / width.
Args:
n: The total number of images.
more_on_width: If cannot fit in a square, put more cells on width
Returns:
A tuple of (height, width) for the image grid.
"""
# the following code is useful for large n, but it is not compatible with tf.numpy_function
# import sympy
# divisors = sympy.divisors(n)
# square_root = math.sqrt(n)
# for d in divisors:
# if d > square_root:
# break
square_root = math.ceil(np.sqrt(n))
for d in range(square_root, n+1):
if n // d * d == n:
break
h, w = int(n // d), d
if not more_on_width:
h, w = w, h
return h, w
def check_make_dir(path):
_, ext = osp.splitext(path)
if ext: # if path is a file path, extract its directory path
path, _ = osp.split(path)
if not os.path.isdir(path):
os.mkdir(path)
def zip_pad(*args):
list_len = None
for x in args:
if isinstance(x, list) or isinstance(x, tuple):
list_len = len(x)
break
assert list_len is not None
new_args = []
for i, x in enumerate(args):
if not isinstance(x, list) and not isinstance(x, tuple):
new_args.append([x] * list_len)
else:
new_args.append(x)
return list(zip(*new_args))
def convert_indices(indices, *args):
"""
convert 1d indices to a tuple of for ndarray index
args specify the size of the first len(args) dimensions
e.g.
x = np.array([[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]])
print(x.shape)
>>> (2, 2, 2)
indices = np.random.randint(7, size=5)
print(indices)
>>> [6 6 0 3 1]
indices = convert_shape(indices, *x.shape)
print(indices)
>>> (array([1, 1, 0, 0, 0]), array([1, 1, 0, 1, 0]), array([0, 0, 0, 1, 1]))
print(x[indices])
>>> array(['b0', 'c1', 'b1', 'a1', 'c0'])
"""
res = []
v = indices
for i in range(1, len(args)):
prod = np.prod(args[i:])
res.append(v // prod)
v = v % prod
res.append(v)
return tuple(res)
def infer_dtype(dtype, precision=None):
if precision is None:
return dtype
elif np.issubdtype(dtype, np.floating):
dtype = {16: np.float16, 32: np.float32, 64: np.float64}[precision]
elif np.issubdtype(dtype, np.signedinteger):
dtype = {16: np.int16, 32: np.int32, 64: np.int64}[precision]
elif np.issubdtype(dtype, np.uint8):
dtype = np.uint8
elif dtype == np.bool:
dtype = np.bool
else:
dtype = None
return dtype
def convert_dtype(value, precision=32, dtype=None, **kwargs):
value = np.array(value, copy=False, **kwargs)
if dtype is None:
dtype = infer_dtype(value.dtype, precision)
return value.astype(dtype)
def flatten_dict(**kwargs):
""" Flatten a dict of lists into a list of dicts
For example
flatten_dict(lr=[1, 2], a=[10,3], b=dict(c=[2, 4], d=np.arange(1, 3)))
>>>
[{'lr': 1, 'a': 10, 'b': {'c': 2, 'd': 1}},
{'lr': 2, 'a': 3, 'b': {'c': 4, 'd': 2}}]
"""
ks, vs = [], []
for k, v in kwargs.items():
ks.append(k)
if isinstance(v, dict):
vs.append(flatten_dict(**v))
elif isinstance(v, (int, float)):
vs.append([v])
else:
vs.append(v)
result = []
for k, v in itertools.product([ks], zip(*vs)):
result.append(dict(zip(k, v)))
return result
def product_flatten_dict(**kwargs):
""" Flatten a dict of lists into a list of dicts
using the Cartesian product
For example
product_flatten_dict(lr=[1, 2], a=[10,3], b=dict(c=[2, 4], d=np.arange(3)))
>>>
[{'lr': 1, 'a': 10, 'b': {'c': 2, 'd': 0}},
{'lr': 1, 'a': 10, 'b': {'c': 2, 'd': 1}},
{'lr': 1, 'a': 10, 'b': {'c': 2, 'd': 2}},
{'lr': 1, 'a': 10, 'b': {'c': 4, 'd': 0}},
{'lr': 1, 'a': 10, 'b': {'c': 4, 'd': 1}},
{'lr': 1, 'a': 10, 'b': {'c': 4, 'd': 2}},
{'lr': 1, 'a': 3, 'b': {'c': 2, 'd': 0}},
{'lr': 1, 'a': 3, 'b': {'c': 2, 'd': 1}},
{'lr': 1, 'a': 3, 'b': {'c': 2, 'd': 2}},
{'lr': 1, 'a': 3, 'b': {'c': 4, 'd': 0}},
{'lr': 1, 'a': 3, 'b': {'c': 4, 'd': 1}},
{'lr': 1, 'a': 3, 'b': {'c': 4, 'd': 2}},
{'lr': 2, 'a': 10, 'b': {'c': 2, 'd': 0}},
{'lr': 2, 'a': 10, 'b': {'c': 2, 'd': 1}},
{'lr': 2, 'a': 10, 'b': {'c': 2, 'd': 2}},
{'lr': 2, 'a': 10, 'b': {'c': 4, 'd': 0}},
{'lr': 2, 'a': 10, 'b': {'c': 4, 'd': 1}},
{'lr': 2, 'a': 10, 'b': {'c': 4, 'd': 2}},
{'lr': 2, 'a': 3, 'b': {'c': 2, 'd': 0}},
{'lr': 2, 'a': 3, 'b': {'c': 2, 'd': 1}},
{'lr': 2, 'a': 3, 'b': {'c': 2, 'd': 2}},
{'lr': 2, 'a': 3, 'b': {'c': 4, 'd': 0}},
{'lr': 2, 'a': 3, 'b': {'c': 4, 'd': 1}},
{'lr': 2, 'a': 3, 'b': {'c': 4, 'd': 2}}]
"""
ks, vs = [], []
for k, v in kwargs.items():
ks.append(k)
if isinstance(v, dict):
vs.append(product_flatten_dict(**v))
elif isinstance(v, (int, float)):
vs.append([v])
else:
vs.append(v)
result = []
for k, v in itertools.product([ks], itertools.product(*vs)):
result.append(dict(zip(k, v)))
return result
def batch_dicts(x, func=np.stack):
keys = x[0].keys()
vals = [o.values() for o in x]
vals = [func(v) for v in zip(*vals)]
x = {k: v for k, v in zip(keys, vals)}
return x
class Every:
def __init__(self, period, start=0):
self._period = period
self._next = start
def __call__(self, step):
if step >= self._next:
while step >= self._next:
self._next += self._period
return True
return False
def step(self):
return self._next - self._period
class RunningMeanStd:
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, axis, epsilon=1e-8, clip=None, name=None, ndim=None):
""" Computes running mean and std from data
A reimplementation of RunningMeanStd from OpenAI's baselines
Args:
axis: axis along which we compute mean and std from incoming data.
If it's None, we only receive at a time a sample without batch dimension
ndim: expected number of dimensions for the stats, useful for debugging
"""
self.name = name
if isinstance(axis, int):
axis = (axis, )
elif isinstance(axis, (tuple, list)):
axis = tuple(axis)
elif axis is None:
pass
else:
raise ValueError(f'Invalid axis({axis}) of type({type(axis)})')
if isinstance(axis, tuple):
assert axis == tuple(range(len(axis))), \
f'Axis should only specifies leading axes so that '\
f'mean and var can be broadcasted automatically when normalizing. '\
f'But receving axis = {axis}'
self._axis = axis
if self._axis is not None:
self._shape_slice = np.s_[: max(self._axis)+1]
self._mean = None
self._var = None
self._epsilon = epsilon
self._count = epsilon
self._clip = clip
self._ndim = ndim # expected number of dimensions o
@property
def axis(self):
return self._axis
def set_rms_stats(self, mean, var, count):
self._mean = mean
self._var = var
self._std = np.sqrt(self._var)
self._count = count
def get_rms_stats(self):
Stats = collections.namedtuple('RMS', 'mean var count')
return Stats(self._mean, self._var, self._count)
def update(self, x, mask=None):
x = x.astype(np.float64)
if self._axis is None:
assert mask is None, mask
batch_mean, batch_var, batch_count = x, np.zeros_like(x), 1
else:
batch_mean, batch_var = moments(x, self._axis, mask)
batch_count = np.prod(x.shape[self._shape_slice]) \
if mask is None else np.sum(mask)
if batch_count > 0:
if self._ndim is not None:
assert batch_mean.ndim == self._ndim, (batch_mean.shape, self._ndim)
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
if self._count == self._epsilon:
self._mean = np.zeros_like(batch_mean, 'float64')
self._var = np.ones_like(batch_var, 'float64')
assert self._mean.shape == batch_mean.shape
assert self._var.shape == batch_var.shape
delta = batch_mean - self._mean
total_count = self._count + batch_count
new_mean = self._mean + delta * batch_count / total_count
# no minus one here to be consistent with np.std
m_a = self._var * self._count
m_b = batch_var * batch_count
M2 = m_a + m_b + delta**2 * self._count * batch_count / total_count
assert np.all(np.isfinite(M2)), f'M2: {M2}'
new_var = M2 / total_count
self._mean = new_mean
self._var = new_var
self._std = np.sqrt(self._var)
self._count = total_count
assert np.all(self._var > 0), self._var[self._var <= 0]
def normalize(self, x, zero_center=True, mask=None):
assert not np.isinf(np.std(x)), f'{np.min(x)}\t{np.max(x)}'
assert self._var is not None, (self._mean, self._var, self._count)
assert x.ndim == self._var.ndim + (0 if self._axis is None else len(self._axis)), \
(x.shape, self._var.shape, self._axis)
if mask is not None:
assert mask.ndim == len(self._axis), (mask.shape, self._axis)
old = x.copy()
if zero_center:
x -= self._mean
x /= self._std
if self._clip:
x = np.clip(x, -self._clip, self._clip)
if mask is not None:
mask = expand_dims_match(mask, x)
x = np.where(mask, x, old)
x = x.astype(np.float32)
return x
class TempStore:
def __init__(self, get_fn, set_fn):
self._get_fn = get_fn
self._set_fn = set_fn
def __enter__(self):
self.state = self._get_fn()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._set_fn(self.state)
| {
"alphanum_fraction": 0.5442145794,
"author": null,
"avg_line_length": 31.4640657084,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "6a2f862a3a901e4cbda79128c786fa5aa5b8712c",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2022-03-13T09:53:14.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-01-25T09:32:01.000Z",
"max_forks_repo_head_hexsha": "7d42bb2e78bc3e7b7c3ebbcf356a4d1cf12abebf",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "xlnwel/d2rl",
"max_forks_repo_path": "utility/utils.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7d42bb2e78bc3e7b7c3ebbcf356a4d1cf12abebf",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "xlnwel/d2rl",
"max_issues_repo_path": "utility/utils.py",
"max_line_length": 95,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "7d42bb2e78bc3e7b7c3ebbcf356a4d1cf12abebf",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "xlnwel/grl",
"max_stars_repo_path": "utility/utils.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-13T09:53:09.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-09-04T14:50:39.000Z",
"num_tokens": 4466,
"path": null,
"reason": "import numpy,import sympy",
"repo": null,
"save_path": null,
"sha": null,
"size": 15323
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import cv2
import sys
import torch
import numpy as np
import pydicom
import os.path as osp
from copy import deepcopy
import torch.nn.functional as F
sys.path.insert(0, ".")
#from ct_iterator import CTIterator
from detectron2.engine import CTIterator
from detectron2.structures import BoxMode
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN
__all__ = ["MediastinumDetector", "build_mediastinum_detector"]
gpu_id = 1
conf = {"2DStage1": {
"device": gpu_id,
"view": "axial",
"nms_thresh": 0.1,
"num_class": 15,
"thresh": [0, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05],
"config_file": "detectron2/config//mask_rcnn_V_57_FPN_1x_3dce.yaml"
}}
class MePredictor:
def __init__(self, cfg):
self.cfg = cfg
self.model = GeneralizedRCNN(self.cfg)
self.model.eval()
self.model.to(torch.device(cfg.MODEL.DEVICE))
checkpoint = torch.load(cfg.MODEL.WEIGHTS,map_location=lambda storage, loc: storage.cuda(gpu_id))# map_location=torch.device("cpu"))
self.model.load_state_dict(checkpoint['model'], strict=False)
def predict(self, original_image):
with torch.no_grad():
'''inputs = []
for i in range(len(original_image)):
image = original_image[i]
height, width = image.shape[-2:]
inputs.append({"image": image, "height": height, "width": width})
predictions = self.model(inputs, image_size)'''
predictions = self.model(original_image)
return predictions
class MSDetector(object):
def __init__(self, config, gpu_id, show_mask_heatmaps=False):
self.slot = 8 # column id of det scores in result tensor
self.device = gpu_id
self.thresh = config['thresh']
self.view = config['view']
self.nms_thresh = config['nms_thresh']
self.cfg = self.setup(config['config_file'])
# get necessary params from model config
self.pred = MePredictor(self.cfg)
self.input_size = self.cfg.INPUT.MIN_SIZE_TEST
self.batch_size = self.cfg.SOLVER.IMS_PER_BATCH
self.input_channels = self.cfg.INPUT.SLICE_NUM
def setup(self, config_file):
cfg = get_cfg()
cfg.merge_from_file(config_file)
cfg.freeze()
return cfg
def inference(self, image_tensor):
"""
run detector on all/selected slices of a CT
:param ct: DxHxW, already on target device, dtype should be torch.uint8
:param slice_inds: list of slices to run, torch.long
:return: detection results, Nx7, (x, y, z, w, h, d), torch.tensor
"""
ct_iter = CTIterator(image_tensor, self.input_size, self.device,
view=self.view,
in_channels=self.input_channels,
batch_size=self.batch_size)
boxes, rois, slice_ids, scores, labels = [], [], [], [], []
for i, batch in enumerate(ct_iter):
'''inputs = []
for i_bach in range(len(batch)):
image = batch[i_bach]
height, width = image.shape[-2:]
inputs.append({"image": image, "height": height, "width": width})'''
#predictions = self.model(inputs, image_size)
#import pdb;pdb.set_trace()
s1 = time.time()
predictions = self.pred.predict(batch)
torch.cuda.synchronize()
print('pre_time:',time.time()-s1)
prop_detector = MSDetector(conf['2DStage1'], gpu_id = gpu_id)
spacing = (0.53125, 0.53125, 1.258)
image_tensors = torch.tensor(np.load('tests/vovnet/image.npz')['data'])
image_size = [512, 512]
s = time.time()
dets_ct = prop_detector.inference(image_tensors)
print('inference time: ', time.time() - s)
| {
"alphanum_fraction": 0.6268328446,
"author": null,
"avg_line_length": 38.2429906542,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "5185449cf141576abcd180c76a2fb5e2e6b26686",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ad5fdb3f8daa4a90818b79b15da67e2c7236eb78",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "fangyunfeng/detectron2",
"max_forks_repo_path": "tests/vovnet/test_blade.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ad5fdb3f8daa4a90818b79b15da67e2c7236eb78",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "fangyunfeng/detectron2",
"max_issues_repo_path": "tests/vovnet/test_blade.py",
"max_line_length": 140,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ad5fdb3f8daa4a90818b79b15da67e2c7236eb78",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "fangyunfeng/detectron2",
"max_stars_repo_path": "tests/vovnet/test_blade.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1015,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4092
} |
#ifndef READ_PCAP_HPP
#define READ_PCAP_HPP
#define DETAIL_TIMING
#include <ParallelPcap/Pcap.hpp>
#include <ParallelPcap/Util.hpp>
#include <ParallelPcap/CountDictionary.hpp>
#include <boost/program_options.hpp>
#include <boost/archive/text_oarchive.hpp>
#include <boost/archive/text_iarchive.hpp>
#include <boost/filesystem.hpp>
#include <boost/serialization/map.hpp>
#include <boost/serialization/vector.hpp>
#include <iostream>
#include <fstream>
#include <chrono>
namespace bp = boost::python;
namespace po = boost::program_options;
namespace ba = boost::archive;
namespace bf = boost::filesystem;
namespace parallel_pcap {
class ReadPcap {
public:
ReadPcap(
std::string inputDir,
bp::list &ngrams,
size_t vocabSize,
std::string outputDir,
bool debug
) : _ngrams(ngrams), _vocabSize(vocabSize),
_filePrefixIntVector("intVector"),
_filePrefixIntVectorVector("intVectorVector"),
_outputDir(outputDir), _msg(debug) { this->processFiles(inputDir); }
ReadPcap(
std::string inputDir,
bp::list &ngrams,
size_t vocabSize,
std::string filePrefixIntVector,
std::string filePrefixIntVectorVector,
std::string outputDir,
bool debug
) : _ngrams(ngrams), _vocabSize(vocabSize),
_filePrefixIntVector(filePrefixIntVector),
_filePrefixIntVectorVector(filePrefixIntVectorVector),
_outputDir(outputDir), _msg(debug) { this->processFiles(inputDir); }
~ReadPcap() { }
private:
/// Vector of files to read
std::vector<std::string> _files;
/// This holds the list of ngram sizes that we want to compute
bp::list _ngrams;
/// The top vocabSize ngrams are given integer ids. The uncommon ones
/// are assigned the UNK (unknown) symbol.
size_t _vocabSize;
/// Prefix to use for the files that have the vector of int data.
std::string _filePrefixIntVector;
/// Prefix to use for the files that have the vector of vector of int data.
std::string _filePrefixIntVectorVector;
/// The path to the output directory where files are written.
std::string _outputDir;
/// Messenger for printing
Messenger _msg;
void processFiles(std::string &inputfile);
void createDirectories();
};
void ReadPcap::createDirectories()
{
// Add slash to outputDir string if not there
if (this->_outputDir.back() != '/')
this->_outputDir.push_back('/');
// intVector
if (!bf::exists(this->_outputDir + "intVector/"))
bf::create_directory(this->_outputDir + "intVector/");
// intVectorVector
if (!bf::exists(this->_outputDir + "intVectorVector/"))
bf::create_directory(this->_outputDir + "intVectorVector/");
// pcaps
if (!bf::exists(this->_outputDir + "pcaps/"))
bf::create_directory(this->_outputDir + "pcaps/");
// dictionary
if (!bf::exists(this->_outputDir + "dict/"))
bf::create_directory(this->_outputDir + "dict/");
}
void ReadPcap::processFiles(std::string &inputDir)
{
auto everythingt1 = std::chrono::high_resolution_clock::now();
typedef CountDictionary<std::string, StringHashFunction> DictionaryType;
// Create directories
this->createDirectories();
// Read the list of files to be read
for (bf::directory_iterator itr(inputDir); itr!=bf::directory_iterator(); ++itr)
{
this->_files.push_back(itr->path().string());
}
// Create the dictionary
DictionaryType d(this->_vocabSize);
this->_msg.printMessage("Total numer of files " + std::to_string(this->_files.size()));
/// We run through all the pcap files. In this first pass we
/// 1) Create a pcap object from each file and save that to disk using
/// Boost serialize.
/// 2) Create a vector of all the string ngrams found in the pcap file.
/// 3) Feed that vector of string ngrams into the dictionary object to
/// iteratively update the dictionary counts for each ngram.
for (size_t i = 0; i < this->_files.size(); i++)
{
std::string message = "First pass: Processing pcap file "
+ this->_files[i]
+ " number " + std::to_string(i + 1)
+ " out of " + std::to_string(this->_files.size());
this->_msg.printMessage(message);
auto t1 = std::chrono::high_resolution_clock::now();
Pcap pcap(this->_files[i]);
auto t2 = std::chrono::high_resolution_clock::now();
this->_msg.printDuration("Time to create pcap object:", t1, t2);
/// Save pcap file on first pass for later use
bf::path p(this->_files[i]);
std::string save_path = this->_outputDir + "pcaps/" +
p.stem().string() + ".bin";
std::ofstream ofs(save_path);
ba::text_oarchive ar(ofs);
ar << pcap;
// Calculate the ngrams
std::vector<std::vector<std::string>> ngramVector;
typedef std::vector<std::string> OutputType;
this->_msg.printMessage("Calculating Ngrams");
for (size_t i = 0; i < bp::len(this->_ngrams); ++i) {
size_t ngram = bp::extract<size_t>(this->_ngrams[i]);
t1 = std::chrono::high_resolution_clock::now();
NgramOperator ngramOperator(ngram);
pcap.applyOperator<NgramOperator, OutputType>(ngramOperator,
ngramVector);
t2 = std::chrono::high_resolution_clock::now();
this->_msg.printDuration("Time to create ngram: ", t1, t2);
}
t1 = std::chrono::high_resolution_clock::now();
std::vector<std::string> allNgrams = flatten(ngramVector);
t2 = std::chrono::high_resolution_clock::now();
this->_msg.printDuration("Time to flatten ngram: ", t1, t2);
t1 = std::chrono::high_resolution_clock::now();
d.processTokens(allNgrams);
t2 = std::chrono::high_resolution_clock::now();
this->_msg.printDuration("Time for dictionary.processTokens: ", t1, t2);
}
/// The dictionary has all the counts for all the ngrams in all the files.
/// It is time to finalize the mapping string2int and int2string.
auto t1 = std::chrono::high_resolution_clock::now();
d.finalize();
auto t2 = std::chrono::high_resolution_clock::now();
this->_msg.printDuration("Time for dictionary.finalize: ", t1, t2);
// In this pass we
/// 1) read in the pcap files again,
/// 2) translate the pcap file into a single vector of integers, and
/// 3) also create a vector of vector of integers where the first dimension
/// indexes the packet.
for (size_t i = 0; i < this->_files.size(); i++)
{
bf::path p(this->_files[i]);
std::string message = "2nd pass: Processing pcap file "
+ this->_files[i]
+ " number " + std::to_string(i + 1)
+ " out of " + std::to_string(this->_files.size());
this->_msg.printMessage(message);
auto t1 = std::chrono::high_resolution_clock::now();
Pcap pcap(this->_files[i]);
auto t2 = std::chrono::high_resolution_clock::now();
this->_msg.printDuration("Time to create pcap object:", t1, t2);
this->_msg.printMessage("Num packets: " + std::to_string(pcap.getNumPackets()));
// Calculate the ngrams
std::vector<std::vector<std::string>> ngramVector;
typedef std::vector<std::string> OutputType;
this->_msg.printMessage("Calculating ngrams");
for (size_t i = 0; i < bp::len(this->_ngrams); ++i) {
size_t ngram = bp::extract<size_t>(this->_ngrams[i]);
t1 = std::chrono::high_resolution_clock::now();
NgramOperator ngramOperator(ngram);
pcap.applyOperator<NgramOperator, OutputType>(ngramOperator,
ngramVector);
t2 = std::chrono::high_resolution_clock::now();
this->_msg.printDuration("Time to create ngram: ", t1, t2);
}
t1 = std::chrono::high_resolution_clock::now();
std::vector<std::string> allNgrams = flatten(ngramVector);
t2 = std::chrono::high_resolution_clock::now();
this->_msg.printDuration("Time to flatten ngram: ", t1, t2);
/// We translate the entire vector of strings to a vector of ints and
/// write that out to disk.
t1 = std::chrono::high_resolution_clock::now();
std::vector<size_t> translated = d.translate(allNgrams);
t2 = std::chrono::high_resolution_clock::now();
this->_msg.printDuration("Time for dictionary.translate (one file): ", t1, t2);
std::string path = this->_outputDir + "intVector/" +
this->_filePrefixIntVector + "_" + p.stem().string() + ".bin";
writeBinary(translated, path);
/// Translate the vector of vector of strings into a vector of vector
/// of ints.
t1 = std::chrono::high_resolution_clock::now();
std::vector<std::vector<size_t>> vvtranslated = d.translate(ngramVector);
t2 = std::chrono::high_resolution_clock::now();
this->_msg.printDuration("Time for dictionary.translate (vector of vectors): ", t1, t2);
path = this->_outputDir + "intVectorVector/" +
this->_filePrefixIntVectorVector + "_" + p.stem().string() + ".bin";
std::ofstream ofs(path);
boost::archive::text_oarchive oa(ofs);
oa << vvtranslated;
}
// Save dictionary to disk for later use
std::string dict_path = this->_outputDir + "dict/dictionary.bin";
std::ofstream d_ofs(dict_path);
ba::text_oarchive d_ar(d_ofs);
d_ar << d;
auto everythingt2 = std::chrono::high_resolution_clock::now();
this->_msg.printDuration("Time for everything: ", everythingt1, everythingt2);
}
}
#endif
| {
"alphanum_fraction": 0.6605436398,
"author": null,
"avg_line_length": 35.2734082397,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "2c2f851bd526ca666541a80f66ca7a4c8bcda970",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2021-05-21T11:29:01.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-04-01T15:41:09.000Z",
"max_forks_repo_head_hexsha": "44c0b6719652bb412980fedcb885e6e0b51746ae",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "Isdriai/packet2vec",
"max_forks_repo_path": "ParallelPcap/ParallelPcap/ReadPcap.hpp",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "44c0b6719652bb412980fedcb885e6e0b51746ae",
"max_issues_repo_issues_event_max_datetime": "2022-02-10T02:05:50.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-11-13T19:04:49.000Z",
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "Isdriai/packet2vec",
"max_issues_repo_path": "ParallelPcap/ParallelPcap/ReadPcap.hpp",
"max_line_length": 92,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "44c0b6719652bb412980fedcb885e6e0b51746ae",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "Isdriai/packet2vec",
"max_stars_repo_path": "ParallelPcap/ParallelPcap/ReadPcap.hpp",
"max_stars_repo_stars_event_max_datetime": "2021-05-21T11:29:32.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-05-21T11:29:32.000Z",
"num_tokens": 2434,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 9418
} |
import numpy as np
import matplotlib.pyplot as plt
# 42. Ten pregnant women were given an injection of pitocin to induce labor. Their
# systolic blood pressures immediately before and after the injection were:
before = [134, 122, 132, 130, 128, 140, 118, 127, 125, 142]
after = [140, 130, 135, 126, 134, 138, 124, 126, 132, 144]
# vamos a implementar un metodo de shuffling. para hacer esto la hipotesis nula
# es que ambas series de datos vienen de la misma distribucion.
# la estadistica que vamos a calcular es el promedio de las diferencias.
n_mc = 10000
n = len(before)
mu_mc = np.zeros(n_mc)
datos = np.array(before+after)
for i in range(n_mc):
np.random.shuffle(datos)
new_before = datos[:n]
new_after = datos[n:]
mu_mc[i] = np.mean(new_before - new_after)
mu = np.mean(np.array(before) - np.array(after))
p_value = np.count_nonzero(np.abs(mu_mc)>np.abs(mu))/n_mc
if p_value<0.05:
H = 'SI'
else:
H = 'NO'
print('Problema 42. p-value: {:.2f}. La droga {} tiene un efecto en la presion'.format(p_value, H))
plt.figure()
plt.hist(mu_mc, bins=20)
plt.axvline(mu, color='red')
plt.savefig("tmp_42.png")
# 43. A question of medical importance is whether jogging leads to a
#reduction in one’s pulse rate. To test this hypothesis, 8 nonjogging
#volunteers agreed to begin a 1-month jogging program. After the month
#their pulse rates were determined and compared with their earlier
#values. If the data are as follows, can we conclude that jogging has
#had an effect on the pulse rates?
before = [74,86,98,102,78,84,79,70]
after = [70,85,90,110,71,80,69,74]
# vamos a implementar un metodo de shuffling. para hacer esto la hipotesis nula
# es que ambas series de datos vienen de la misma distribucion.
# la estadistica que vamos a calcular es el promedio de las diferencias
n_mc = 10000
n = len(before)
mu_mc = np.zeros(n_mc)
datos = np.array(before+after)
for i in range(n_mc):
np.random.shuffle(datos)
new_before = datos[:n]
new_after = datos[n:]
mu_mc[i] = np.mean(new_before - new_after)
mu = np.mean(np.array(before) - np.array(after))
p_value = np.count_nonzero(np.abs(mu_mc)>np.abs(mu))/n_mc
if p_value<0.05:
H = 'SI'
else:
H = 'NO'
print('Problema 43. p-value: {:.2f}. Correr {} tiene un efecto en el pulso'.format(p_value, H))
plt.figure()
plt.hist(mu_mc, bins=20)
plt.axvline(mu, color='red')
plt.savefig("tmp_43.png")
#67. In the
#nineteenseventies,theU.S.VeteransAdministration(Murphy,1977)con-
#ducted an experiment comparing coronary artery bypass surgery with
#medical drug therapy as treatments for coronary artery disease. The
#experiment involved 596 patients, of whom 286 were randomly assigned
#to receive surgery, with the remaining 310 assigned to drug
#therapy. A total of 252 of those receiving surgery, and a total of
#270 of those receiving drug therapy were still alive three years
#after Use these data to test the hypothesis that the survival
#probabilities are equal.
N_cirugia = 286
N_droga = 310
N_sobrevive_cirugia = 252
N_sobrevive_droga = 270
# Vamos a tomar como la probabilidad de supervivencia global
# p_sobre = (N_sobrevive_cirugia+N_sobreviev_droga)/(N_cirugia+N_droga).
# Si las probabilidades de supervivencia son iguales, entonce la
# p_sobrevive para cualquiera de los casos (cirugia, droga) debe ser
# consistente con p_sobre.
# Vamos a generar entonces numeros aleatorios con la probabilidad
# p_sobre y compararlo con el N_sobrevive_cirugia observado.
n_mc = 10000
p_sobre = (N_sobrevive_cirugia + N_sobrevive_droga)/(N_cirugia + N_droga)
n_sobre_mc = np.zeros(n_mc)
for i in range(n_mc):
r = np.random.random(N_cirugia)
n_sobre_mc[i] = np.count_nonzero(r<p_sobre)
delta = np.abs(np.mean(n_sobre_mc) - N_sobrevive_cirugia)
p_value = np.count_nonzero(np.abs(n_sobre_mc - np.mean(n_sobre_mc))>delta)/n_mc
if p_value<0.05:
H = 'NO'
else:
H = 'SI'
print('Problema 67. p-value: {:.2f}. Las probabilidades de supervivencia {} son iguales.'.format(p_value, H))
plt.figure()
plt.hist(n_sobre_mc, bins=20)
plt.axvline(N_sobrevive_cirugia, color='red')
plt.savefig("tmp_67.png")
#69 The following table gives the number
#of fatal accidents o fU.S.commercialairline carriers in the 16 years from
#1980 to 1995. Do these data disprove, at the 5 percent level of
#significance, the hypothesis that the mean number of accidents in a
#year is greater than or equal to 4.5? What is the p-value? (Hint:
#First formulate a model for the number of accidents.)
accidentes = [0, 4, 4, 4, 1, 4, 2, 4, 3, 11, 6, 4, 4, 1, 4, 2]
# Se espera que el numero de accidentes anuales siga una distribucion
# possoniana de parametro lambda. Dado que par esta distribucion el
# valor medio es lambda, entonces vamos a comparar la suma total del
# numero de accidentes observados con el numero total de accidentes
# esperados si lambda fuera 4.5.
n_mc = 10000
n_acc = len(accidentes)
n_tot_acc_mc = np.zeros(n_mc)
for i in range(n_mc):
n_tot_acc_mc[i] = np.sum(np.random.poisson(lam=4.5, size=n_acc))
p_1 = np.count_nonzero(n_tot_acc_mc<np.sum(accidentes))/n_mc
p_2 = np.count_nonzero(n_tot_acc_mc>np.sum(accidentes))/n_mc
p_value = 2.0*np.min([p_1, p_2])
if p_value<0.05:
H = 'NO'
else:
H = 'SI'
print('Problema 69. p-value: {:.2f}. El numero promedio de accidentes {} es mayor o igual que 4.5.'.format(p_value, H))
plt.figure()
plt.hist(n_tot_acc_mc, bins=20)
plt.axvline(np.sum(accidentes), color='red')
plt.savefig("tmp_69.png")
| {
"alphanum_fraction": 0.7298672969,
"author": null,
"avg_line_length": 33.3393939394,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "9bf2a9310772eb38be4bafbc537bed6b96cc2464",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "be016b25f2f49788235fbe91ec577fd16b9ad613",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "aess14/Cursos-Uniandes",
"max_forks_repo_path": "Metodos Computacionales Uniandes/Code/ejercicio_31.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "be016b25f2f49788235fbe91ec577fd16b9ad613",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "aess14/Cursos-Uniandes",
"max_issues_repo_path": "Metodos Computacionales Uniandes/Code/ejercicio_31.py",
"max_line_length": 119,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "be016b25f2f49788235fbe91ec577fd16b9ad613",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "aess14/Cursos-Uniandes",
"max_stars_repo_path": "Metodos Computacionales Uniandes/Code/ejercicio_31.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1717,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5501
} |
import numpy as np
import pandas as pd
import torch
import torch.nn.functional
import tqdm.auto as tqdm
from torch import nn
def _default_age_embedder(num_hidden, num_factors):
return nn.Sequential(
nn.utils.weight_norm(nn.Linear(1, num_hidden)),
nn.LeakyReLU(0.3),
nn.utils.weight_norm(nn.Linear(num_hidden, num_hidden)),
nn.LeakyReLU(0.3),
nn.utils.weight_norm(nn.Linear(num_hidden, num_hidden)),
nn.LeakyReLU(0.3),
nn.utils.weight_norm(nn.Linear(num_hidden, num_hidden)),
nn.LeakyReLU(0.3),
nn.Linear(num_hidden, num_factors),
)
def _default_discriminator(num_embeddings, num_hidden, num_classes):
return nn.Sequential(
nn.Linear(num_embeddings, num_hidden),
nn.LeakyReLU(0.3),
nn.Linear(num_hidden, num_hidden),
nn.LeakyReLU(0.3),
nn.Linear(num_hidden, num_hidden),
nn.LeakyReLU(0.3),
nn.Linear(num_hidden, num_hidden),
nn.LeakyReLU(0.3),
nn.Linear(num_hidden, num_classes),
)
def _default_offsetter(num_embeddings, num_hidden, num_factors):
return nn.Sequential(
nn.utils.weight_norm(nn.Linear(1, num_hidden, bias=True)),
nn.LeakyReLU(0.3),
nn.utils.weight_norm(nn.Linear(num_hidden, num_hidden, bias=True)),
nn.LeakyReLU(0.3),
nn.utils.weight_norm(nn.Linear(num_hidden, num_hidden, bias=True)),
nn.LeakyReLU(0.3),
nn.utils.weight_norm(nn.Linear(num_hidden, num_hidden, bias=True)),
nn.LeakyReLU(0.3),
nn.Linear(num_hidden, num_factors * num_embeddings, bias=True),
)
class TemporalNMF(nn.Module):
def __init__(
self,
num_entities,
num_embeddings,
num_factors,
num_hidden,
num_days,
num_matrices,
num_classes,
ages,
symmetric=True,
age_embedder=None,
discriminator=None,
offsetter=None,
nonnegative=True,
max_norm_embedding=1,
):
super().__init__()
self.num_entities = num_entities
self.num_hidden = num_hidden
self.num_factors = num_factors
self.symmetric = symmetric
if not symmetric:
self.num_factors *= 2
self.num_embeddings = num_embeddings
self.num_classes = num_classes
self.num_days = num_days
self.age_embedder = (
_default_age_embedder(num_hidden, num_factors) if age_embedder is None else age_embedder
)
self.discriminator = (
_default_discriminator(num_embeddings, num_hidden, self.num_classes)
if discriminator is None
else discriminator
)
self.offsetter = (
_default_offsetter(num_embeddings, num_hidden, num_factors)
if offsetter is None
else offsetter
)
self.nonnegative = nonnegative
self.embeddings = nn.Embedding(
num_entities, num_embeddings, max_norm=max_norm_embedding, norm_type=1
)
nn.init.orthogonal_(self.embeddings.weight, gain=10)
if symmetric:
self.output_map = nn.Parameter(torch.ones(self.num_factors, num_matrices))
else:
self.output_map = nn.Parameter(torch.ones(self.num_factors // 2, num_matrices))
self.ages = torch.from_numpy(ages)
self.std_age = torch.std(self.ages[self.ages > 0])
self.mean_age = torch.mean(self.ages[self.ages > 0])
self.modules = nn.ModuleList((self.age_embedder, self.discriminator, self.offsetter))
def get_device(self):
return next(self.parameters()).device
def get_model_parameters(self):
return [params for name, params in self.named_parameters() if "discriminator" not in name]
def get_discriminator_parameters(self):
return [params for name, params in self.named_parameters() if "discriminator" in name]
def get_age_factors(self, temporal_idxs, entity_idxs):
num_entities = len(entity_idxs)
num_timesteps = len(temporal_idxs)
ages = (self.ages[temporal_idxs][:, entity_idxs] - self.mean_age) / self.std_age
# use timesteps as second input
"""
timesteps = temporal_idxs[:, None].repeat(1, num_entities)
ages = torch.cat((ages[:, :, None], timesteps[:, :, None]), dim=-1)
ages = self.pin_transfer(ages)
factors_by_age = self.age_embedder(ages)
"""
factors_by_age = self.age_embedder(self.pin_transfer(ages.view(-1, 1)))
factors_by_age = factors_by_age.view(num_timesteps, num_entities, self.num_factors)
return factors_by_age
def get_basis_functions(self, ages):
num_days = ages.shape[0]
batch_size = ages.shape[1]
basis_functions = self.offsetter(ages)
basis_functions = basis_functions.view(
num_days, batch_size, self.num_embeddings, self.num_factors
)
return basis_functions
def get_embedding_factor_offsets(self, temporal_idxs, entity_idxs):
num_timesteps = len(temporal_idxs)
ages = (self.ages[temporal_idxs][:, entity_idxs] - self.mean_age) / self.std_age
# use timesteps as second input
"""
num_entities = len(entity_idxs)
timesteps = temporal_idxs[:, None].repeat(1, num_entities)
ages = torch.cat((ages[:, :, None], timesteps[:, :, None]), dim=-1)
ages = self.pin_transfer(ages)
"""
ages = self.pin_transfer(ages[:, :, None])
embs = self.embeddings(self.pin_transfer(entity_idxs)).abs()
basis_functions = self.get_basis_functions(ages)
offsets = basis_functions * embs[None, :, :, None].repeat(num_timesteps, 1, 1, 1)
offsets = offsets.sum(dim=-2)
return embs, offsets
def get_discriminator_output(self, embs):
logits = self.discriminator(embs)
return logits
def reconstruction(self, factors):
if self.nonnegative:
factors = torch.nn.functional.relu(factors)
if self.symmetric:
recon = factors[:, :, None] * factors[:, None, :]
else:
recon = (
factors[:, : self.num_factors // 2, None]
* factors[:, None, self.num_factors // 2 :]
)
return recon.sum(dim=-1)[..., None]
def reconstruct_inputs(self, with_offsets=True, iterator=tqdm.trange):
with torch.no_grad():
all_temporal_idxs = torch.LongTensor(list(range(self.num_days)))
all_entity_idxs = torch.LongTensor(list(range(self.num_entities)))
factors_by_age = self.get_age_factors(all_temporal_idxs, all_entity_idxs)
if with_offsets:
_, factor_offsets = self.get_embedding_factor_offsets(
all_temporal_idxs, all_entity_idxs
)
factors_by_emb = factors_by_age + factor_offsets
else:
factors_by_emb = factors_by_age
recs = []
for day in iterator(self.num_days):
recs.append((self.reconstruction(factors_by_emb[day][None, :, :])).cpu().half())
recs = torch.cat(recs, dim=0)
return recs
def pin_transfer(self, tensor):
device = self.get_device()
if str(device).startswith("cuda"):
tensor = tensor.pin_memory().to(device, non_blocking=True)
return tensor
@staticmethod
def nonnegativity_loss(factors_by_age, factors_by_emb):
return (
torch.nn.functional.relu(-factors_by_emb).mean()
+ torch.nn.functional.relu(-factors_by_age).mean()
)
@staticmethod
def factor_l1_loss(factors):
return factors.abs().sum(dim=-1).mean()
def basis_function_l1_loss(self, min_age=0, max_age=60, num_age_samples=100):
ages = (
torch.linspace(min_age, max_age, steps=num_age_samples, device=self.get_device())
- self.mean_age
) / self.std_age
basis_functions = self.get_basis_functions(ages[:, None, None])
return basis_functions.abs().sum(dim=-2).mean()
def embedding_l1_loss(self):
return torch.mean((self.embeddings.weight).abs().sum(dim=-1))
def embedding_sparsity_loss(self):
return self.embedding_l1_loss()
def forward(self, temporal_idxs, entity_idxs):
factors_by_age = self.get_age_factors(temporal_idxs, entity_idxs)
rec_by_age = self.reconstruction(factors_by_age)
embs, factor_offsets = self.get_embedding_factor_offsets(temporal_idxs, entity_idxs)
factors_by_emb = factors_by_age + factor_offsets
rec_by_emb = self.reconstruction(factors_by_emb)
return rec_by_age, rec_by_emb, factors_by_age, factors_by_emb, factor_offsets, embs
def get_factor_df(self, ids=None, embedding_dim=2, batch_size=128, valid_ages=None):
if embedding_dim is not None:
if embedding_dim == self.num_embeddings:
embs_reduced = np.abs(self.embeddings.weight.data.cpu().numpy())
else:
from umap import UMAP
embs_reduced = UMAP(n_components=embedding_dim).fit_transform(
np.abs(self.embeddings.weight.data.cpu().numpy())
)
with torch.no_grad():
idx = 0
dfs = []
all_temporal_idxs = torch.LongTensor(list(range(self.num_days)))
while idx < self.num_entities:
batch_idxs = torch.arange(idx, min((idx + batch_size, self.num_entities)))
(
_,
_,
_,
factors_by_emb,
_,
_,
) = self.forward(all_temporal_idxs, batch_idxs)
idx += batch_size
bee_ages_flat = self.ages[:, batch_idxs].numpy().flatten()
factors_flat = factors_by_emb.data.cpu().numpy().reshape(-1, self.num_factors)
day_flat = np.tile(
np.arange(self.num_days)[:, None], (1, len(batch_idxs))
).flatten()
columns = ["age", "day"] + [f"f_{f}" for f in range(self.num_factors)]
df_data = np.concatenate(
(bee_ages_flat[:, None], day_flat[:, None], factors_flat), axis=-1
)
if ids is not None:
columns = ["bee_id"] + columns
ids_flat = np.tile(ids[batch_idxs][None, :], (self.num_days, 1)).flatten()
df_data = np.concatenate((ids_flat[:, None], df_data), axis=-1)
if valid_ages is not None:
columns = ["valid_age"] + columns
valid_flat = valid_ages[:, batch_idxs].flatten()
df_data = np.concatenate((valid_flat[:, None], df_data), axis=-1)
if embedding_dim is not None:
columns += [f"e_{f}" for f in range(embedding_dim)]
embs_flat = np.tile(
embs_reduced[batch_idxs][None, :], (self.num_days, 1)
).reshape(-1, embedding_dim)
df_data = np.concatenate((df_data, embs_flat), axis=-1)
factor_df = pd.DataFrame(df_data, columns=columns)
dfs.append(factor_df)
factor_df = pd.concat(dfs)
factor_df.reset_index(inplace=True, drop=True)
factor_df = factor_df[factor_df.age >= 0]
return factor_df
| {
"alphanum_fraction": 0.604494382,
"author": null,
"avg_line_length": 35.0606060606,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "eff4696d57c86ef7fb73bcca4431811ec6b4495e",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d5c9808ecd79be10f9b18490438494a77de1bc0f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "nebw/temporal_nmf",
"max_forks_repo_path": "temporal_nmf/model.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d5c9808ecd79be10f9b18490438494a77de1bc0f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "nebw/temporal_nmf",
"max_issues_repo_path": "temporal_nmf/model.py",
"max_line_length": 100,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "d5c9808ecd79be10f9b18490438494a77de1bc0f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "nebw/temporal_nmf",
"max_stars_repo_path": "temporal_nmf/model.py",
"max_stars_repo_stars_event_max_datetime": "2020-11-23T09:04:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-07-31T12:17:24.000Z",
"num_tokens": 2539,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 11570
} |
/-
Copyright (c) 2018 Kenny Lau. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kenny Lau, Chris Hughes, Mario Carneiro
-/
import algebra.associated
import linear_algebra.basic
import order.zorn
import order.atoms
import order.compactly_generated
import tactic.abel
import data.nat.choose.sum
import linear_algebra.finsupp
/-!
# Ideals over a ring
This file defines `ideal R`, the type of ideals over a commutative ring `R`.
## Implementation notes
`ideal R` is implemented using `submodule R R`, where `•` is interpreted as `*`.
## TODO
Support one-sided ideals, and ideals over non-commutative rings.
-/
universes u v w
variables {α : Type u} {β : Type v}
open set function
open_locale classical big_operators pointwise
/-- A (left) ideal in a semiring `R` is an additive submonoid `s` such that
`a * b ∈ s` whenever `b ∈ s`. If `R` is a ring, then `s` is an additive subgroup. -/
@[reducible] def ideal (R : Type u) [semiring R] := submodule R R
section semiring
namespace ideal
variables [semiring α] (I : ideal α) {a b : α}
protected lemma zero_mem : (0 : α) ∈ I := I.zero_mem
protected lemma add_mem : a ∈ I → b ∈ I → a + b ∈ I := I.add_mem
variables (a)
lemma mul_mem_left : b ∈ I → a * b ∈ I := I.smul_mem a
variables {a}
@[ext] lemma ext {I J : ideal α} (h : ∀ x, x ∈ I ↔ x ∈ J) : I = J :=
submodule.ext h
theorem eq_top_of_unit_mem
(x y : α) (hx : x ∈ I) (h : y * x = 1) : I = ⊤ :=
eq_top_iff.2 $ λ z _, calc
z = z * (y * x) : by simp [h]
... = (z * y) * x : eq.symm $ mul_assoc z y x
... ∈ I : I.mul_mem_left _ hx
theorem eq_top_of_is_unit_mem {x} (hx : x ∈ I) (h : is_unit x) : I = ⊤ :=
let ⟨y, hy⟩ := h.exists_left_inv in eq_top_of_unit_mem I x y hx hy
theorem eq_top_iff_one : I = ⊤ ↔ (1:α) ∈ I :=
⟨by rintro rfl; trivial,
λ h, eq_top_of_unit_mem _ _ 1 h (by simp)⟩
theorem ne_top_iff_one : I ≠ ⊤ ↔ (1:α) ∉ I :=
not_congr I.eq_top_iff_one
@[simp]
theorem unit_mul_mem_iff_mem {x y : α} (hy : is_unit y) : y * x ∈ I ↔ x ∈ I :=
begin
refine ⟨λ h, _, λ h, I.mul_mem_left y h⟩,
obtain ⟨y', hy'⟩ := hy.exists_left_inv,
have := I.mul_mem_left y' h,
rwa [← mul_assoc, hy', one_mul] at this,
end
/-- The ideal generated by a subset of a ring -/
def span (s : set α) : ideal α := submodule.span α s
@[simp] lemma submodule_span_eq {s : set α} :
submodule.span α s = ideal.span s :=
rfl
lemma subset_span {s : set α} : s ⊆ span s := submodule.subset_span
lemma span_le {s : set α} {I} : span s ≤ I ↔ s ⊆ I := submodule.span_le
lemma span_mono {s t : set α} : s ⊆ t → span s ≤ span t := submodule.span_mono
@[simp] lemma span_eq : span (I : set α) = I := submodule.span_eq _
@[simp] lemma span_singleton_one : span ({1} : set α) = ⊤ :=
(eq_top_iff_one _).2 $ subset_span $ mem_singleton _
lemma mem_span_insert {s : set α} {x y} :
x ∈ span (insert y s) ↔ ∃ a (z ∈ span s), x = a * y + z := submodule.mem_span_insert
lemma mem_span_singleton' {x y : α} :
x ∈ span ({y} : set α) ↔ ∃ a, a * y = x := submodule.mem_span_singleton
lemma span_insert (x) (s : set α) : span (insert x s) = span ({x} : set α) ⊔ span s :=
submodule.span_insert x s
lemma span_eq_bot {s : set α} : span s = ⊥ ↔ ∀ x ∈ s, (x:α) = 0 := submodule.span_eq_bot
@[simp] lemma span_singleton_eq_bot {x} : span ({x} : set α) = ⊥ ↔ x = 0 :=
submodule.span_singleton_eq_bot
@[simp] lemma span_zero : span (0 : set α) = ⊥ := by rw [←set.singleton_zero, span_singleton_eq_bot]
@[simp] lemma span_one : span (1 : set α) = ⊤ := by rw [←set.singleton_one, span_singleton_one]
lemma span_eq_top_iff_finite (s : set α) :
span s = ⊤ ↔ ∃ s' : finset α, ↑s' ⊆ s ∧ span (s' : set α) = ⊤ :=
begin
simp_rw eq_top_iff_one,
exact ⟨submodule.mem_span_finite_of_mem_span, λ ⟨s', h₁, h₂⟩, span_mono h₁ h₂⟩
end
/--
The ideal generated by an arbitrary binary relation.
-/
def of_rel (r : α → α → Prop) : ideal α :=
submodule.span α { x | ∃ (a b) (h : r a b), x + b = a }
/-- An ideal `P` of a ring `R` is prime if `P ≠ R` and `xy ∈ P → x ∈ P ∨ y ∈ P` -/
class is_prime (I : ideal α) : Prop :=
(ne_top' : I ≠ ⊤)
(mem_or_mem' : ∀ {x y : α}, x * y ∈ I → x ∈ I ∨ y ∈ I)
theorem is_prime_iff {I : ideal α} :
is_prime I ↔ I ≠ ⊤ ∧ ∀ {x y : α}, x * y ∈ I → x ∈ I ∨ y ∈ I :=
⟨λ h, ⟨h.1, h.2⟩, λ h, ⟨h.1, h.2⟩⟩
theorem is_prime.ne_top {I : ideal α} (hI : I.is_prime) : I ≠ ⊤ := hI.1
theorem is_prime.mem_or_mem {I : ideal α} (hI : I.is_prime) :
∀ {x y : α}, x * y ∈ I → x ∈ I ∨ y ∈ I := hI.2
theorem is_prime.mem_or_mem_of_mul_eq_zero {I : ideal α} (hI : I.is_prime)
{x y : α} (h : x * y = 0) : x ∈ I ∨ y ∈ I :=
hI.mem_or_mem (h.symm ▸ I.zero_mem)
theorem is_prime.mem_of_pow_mem {I : ideal α} (hI : I.is_prime)
{r : α} (n : ℕ) (H : r^n ∈ I) : r ∈ I :=
begin
induction n with n ih,
{ rw pow_zero at H, exact (mt (eq_top_iff_one _).2 hI.1).elim H },
{ rw pow_succ at H, exact or.cases_on (hI.mem_or_mem H) id ih }
end
lemma not_is_prime_iff {I : ideal α} : ¬ I.is_prime ↔ I = ⊤ ∨ ∃ (x ∉ I) (y ∉ I), x * y ∈ I :=
begin
simp_rw [ideal.is_prime_iff, not_and_distrib, ne.def, not_not, not_forall, not_or_distrib],
exact or_congr iff.rfl
⟨λ ⟨x, y, hxy, hx, hy⟩, ⟨x, hx, y, hy, hxy⟩, λ ⟨x, hx, y, hy, hxy⟩, ⟨x, y, hxy, hx, hy⟩⟩
end
theorem zero_ne_one_of_proper {I : ideal α} (h : I ≠ ⊤) : (0:α) ≠ 1 :=
λ hz, I.ne_top_iff_one.1 h $ hz ▸ I.zero_mem
lemma bot_prime {R : Type*} [ring R] [is_domain R] : (⊥ : ideal R).is_prime :=
⟨λ h, one_ne_zero (by rwa [ideal.eq_top_iff_one, submodule.mem_bot] at h),
λ x y h, mul_eq_zero.mp (by simpa only [submodule.mem_bot] using h)⟩
/-- An ideal is maximal if it is maximal in the collection of proper ideals. -/
class is_maximal (I : ideal α) : Prop := (out : is_coatom I)
theorem is_maximal_def {I : ideal α} : I.is_maximal ↔ is_coatom I := ⟨λ h, h.1, λ h, ⟨h⟩⟩
theorem is_maximal.ne_top {I : ideal α} (h : I.is_maximal) : I ≠ ⊤ := (is_maximal_def.1 h).1
theorem is_maximal_iff {I : ideal α} : I.is_maximal ↔
(1:α) ∉ I ∧ ∀ (J : ideal α) x, I ≤ J → x ∉ I → x ∈ J → (1:α) ∈ J :=
is_maximal_def.trans $ and_congr I.ne_top_iff_one $ forall_congr $ λ J,
by rw [lt_iff_le_not_le]; exact
⟨λ H x h hx₁ hx₂, J.eq_top_iff_one.1 $
H ⟨h, not_subset.2 ⟨_, hx₂, hx₁⟩⟩,
λ H ⟨h₁, h₂⟩, let ⟨x, xJ, xI⟩ := not_subset.1 h₂ in
J.eq_top_iff_one.2 $ H x h₁ xI xJ⟩
theorem is_maximal.eq_of_le {I J : ideal α}
(hI : I.is_maximal) (hJ : J ≠ ⊤) (IJ : I ≤ J) : I = J :=
eq_iff_le_not_lt.2 ⟨IJ, λ h, hJ (hI.1.2 _ h)⟩
instance : is_coatomic (ideal α) :=
begin
apply complete_lattice.coatomic_of_top_compact,
rw ←span_singleton_one,
exact submodule.singleton_span_is_compact_element 1,
end
/-- **Krull's theorem**: if `I` is an ideal that is not the whole ring, then it is included in some
maximal ideal. -/
theorem exists_le_maximal (I : ideal α) (hI : I ≠ ⊤) :
∃ M : ideal α, M.is_maximal ∧ I ≤ M :=
let ⟨m, hm⟩ := (eq_top_or_exists_le_coatom I).resolve_left hI in ⟨m, ⟨⟨hm.1⟩, hm.2⟩⟩
variables (α)
/-- Krull's theorem: a nontrivial ring has a maximal ideal. -/
theorem exists_maximal [nontrivial α] : ∃ M : ideal α, M.is_maximal :=
let ⟨I, ⟨hI, _⟩⟩ := exists_le_maximal (⊥ : ideal α) bot_ne_top in ⟨I, hI⟩
variables {α}
instance [nontrivial α] : nontrivial (ideal α) :=
begin
rcases @exists_maximal α _ _ with ⟨M, hM, _⟩,
exact nontrivial_of_ne M ⊤ hM
end
/-- If P is not properly contained in any maximal ideal then it is not properly contained
in any proper ideal -/
lemma maximal_of_no_maximal {R : Type u} [comm_semiring R] {P : ideal R}
(hmax : ∀ m : ideal R, P < m → ¬is_maximal m) (J : ideal R) (hPJ : P < J) : J = ⊤ :=
begin
by_contradiction hnonmax,
rcases exists_le_maximal J hnonmax with ⟨M, hM1, hM2⟩,
exact hmax M (lt_of_lt_of_le hPJ hM2) hM1,
end
theorem mem_span_pair {x y z : α} :
z ∈ span ({x, y} : set α) ↔ ∃ a b, a * x + b * y = z :=
by simp [mem_span_insert, mem_span_singleton', @eq_comm _ _ z]
theorem is_maximal.exists_inv {I : ideal α}
(hI : I.is_maximal) {x} (hx : x ∉ I) : ∃ y, ∃ i ∈ I, y * x + i = 1 :=
begin
cases is_maximal_iff.1 hI with H₁ H₂,
rcases mem_span_insert.1 (H₂ (span (insert x I)) x
(set.subset.trans (subset_insert _ _) subset_span)
hx (subset_span (mem_insert _ _))) with ⟨y, z, hz, hy⟩,
refine ⟨y, z, _, hy.symm⟩,
rwa ← span_eq I,
end
section lattice
variables {R : Type u} [semiring R]
lemma mem_sup_left {S T : ideal R} : ∀ {x : R}, x ∈ S → x ∈ S ⊔ T :=
show S ≤ S ⊔ T, from le_sup_left
lemma mem_sup_right {S T : ideal R} : ∀ {x : R}, x ∈ T → x ∈ S ⊔ T :=
show T ≤ S ⊔ T, from le_sup_right
lemma mem_Sup_of_mem {S : set (ideal R)} {s : ideal R}
(hs : s ∈ S) : ∀ {x : R}, x ∈ s → x ∈ Sup S :=
show s ≤ Sup S, from le_Sup hs
theorem mem_Inf {s : set (ideal R)} {x : R} :
x ∈ Inf s ↔ ∀ ⦃I⦄, I ∈ s → x ∈ I :=
⟨λ hx I his, hx I ⟨I, infi_pos his⟩, λ H I ⟨J, hij⟩, hij ▸ λ S ⟨hj, hS⟩, hS ▸ H hj⟩
@[simp] lemma mem_inf {I J : ideal R} {x : R} : x ∈ I ⊓ J ↔ x ∈ I ∧ x ∈ J := iff.rfl
@[simp] lemma mem_infi {ι : Sort*} {I : ι → ideal R} {x : R} : x ∈ infi I ↔ ∀ i, x ∈ I i :=
submodule.mem_infi _
@[simp] lemma mem_bot {x : R} : x ∈ (⊥ : ideal R) ↔ x = 0 :=
submodule.mem_bot _
end lattice
section pi
variables (ι : Type v)
/-- `I^n` as an ideal of `R^n`. -/
def pi : ideal (ι → α) :=
{ carrier := { x | ∀ i, x i ∈ I },
zero_mem' := λ i, I.zero_mem,
add_mem' := λ a b ha hb i, I.add_mem (ha i) (hb i),
smul_mem' := λ a b hb i, I.mul_mem_left (a i) (hb i) }
lemma mem_pi (x : ι → α) : x ∈ I.pi ι ↔ ∀ i, x i ∈ I := iff.rfl
end pi
end ideal
end semiring
section comm_semiring
variables {a b : α}
-- A separate namespace definition is needed because the variables were historically in a different
-- order.
namespace ideal
variables [comm_semiring α] (I : ideal α)
@[simp]
theorem mul_unit_mem_iff_mem {x y : α} (hy : is_unit y) : x * y ∈ I ↔ x ∈ I :=
mul_comm y x ▸ unit_mul_mem_iff_mem I hy
lemma mem_span_singleton {x y : α} :
x ∈ span ({y} : set α) ↔ y ∣ x :=
mem_span_singleton'.trans $ exists_congr $ λ _, by rw [eq_comm, mul_comm]
lemma span_singleton_le_span_singleton {x y : α} :
span ({x} : set α) ≤ span ({y} : set α) ↔ y ∣ x :=
span_le.trans $ singleton_subset_iff.trans mem_span_singleton
lemma span_singleton_eq_span_singleton {α : Type u} [comm_ring α] [is_domain α] {x y : α} :
span ({x} : set α) = span ({y} : set α) ↔ associated x y :=
begin
rw [←dvd_dvd_iff_associated, le_antisymm_iff, and_comm],
apply and_congr;
rw span_singleton_le_span_singleton,
end
lemma span_singleton_mul_right_unit {a : α} (h2 : is_unit a) (x : α) :
span ({x * a} : set α) = span {x} :=
begin
apply le_antisymm,
{ rw span_singleton_le_span_singleton, use a},
{ rw span_singleton_le_span_singleton, rw is_unit.mul_right_dvd h2}
end
lemma span_singleton_mul_left_unit {a : α} (h2 : is_unit a) (x : α) :
span ({a * x} : set α) = span {x} := by rw [mul_comm, span_singleton_mul_right_unit h2]
lemma span_singleton_eq_top {x} : span ({x} : set α) = ⊤ ↔ is_unit x :=
by rw [is_unit_iff_dvd_one, ← span_singleton_le_span_singleton, span_singleton_one,
eq_top_iff]
theorem span_singleton_prime {p : α} (hp : p ≠ 0) :
is_prime (span ({p} : set α)) ↔ prime p :=
by simp [is_prime_iff, prime, span_singleton_eq_top, hp, mem_span_singleton]
theorem is_maximal.is_prime {I : ideal α} (H : I.is_maximal) : I.is_prime :=
⟨H.1.1, λ x y hxy, or_iff_not_imp_left.2 $ λ hx, begin
let J : ideal α := submodule.span α (insert x ↑I),
have IJ : I ≤ J := (set.subset.trans (subset_insert _ _) subset_span),
have xJ : x ∈ J := ideal.subset_span (set.mem_insert x I),
cases is_maximal_iff.1 H with _ oJ,
specialize oJ J x IJ hx xJ,
rcases submodule.mem_span_insert.mp oJ with ⟨a, b, h, oe⟩,
obtain (F : y * 1 = y * (a • x + b)) := congr_arg (λ g : α, y * g) oe,
rw [← mul_one y, F, mul_add, mul_comm, smul_eq_mul, mul_assoc],
refine submodule.add_mem I (I.mul_mem_left a hxy) (submodule.smul_mem I y _),
rwa submodule.span_eq at h,
end⟩
@[priority 100] -- see Note [lower instance priority]
instance is_maximal.is_prime' (I : ideal α) : ∀ [H : I.is_maximal], I.is_prime :=
is_maximal.is_prime
lemma span_singleton_lt_span_singleton [comm_ring β] [is_domain β] {x y : β} :
span ({x} : set β) < span ({y} : set β) ↔ dvd_not_unit y x :=
by rw [lt_iff_le_not_le, span_singleton_le_span_singleton, span_singleton_le_span_singleton,
dvd_and_not_dvd_iff]
lemma factors_decreasing [comm_ring β] [is_domain β]
(b₁ b₂ : β) (h₁ : b₁ ≠ 0) (h₂ : ¬ is_unit b₂) :
span ({b₁ * b₂} : set β) < span {b₁} :=
lt_of_le_not_le (ideal.span_le.2 $ singleton_subset_iff.2 $
ideal.mem_span_singleton.2 ⟨b₂, rfl⟩) $ λ h,
h₂ $ is_unit_of_dvd_one _ $ (mul_dvd_mul_iff_left h₁).1 $
by rwa [mul_one, ← ideal.span_singleton_le_span_singleton]
variables (b)
lemma mul_mem_right (h : a ∈ I) : a * b ∈ I := mul_comm b a ▸ I.mul_mem_left b h
variables {b}
lemma pow_mem_of_mem (ha : a ∈ I) (n : ℕ) (hn : 0 < n) : a ^ n ∈ I :=
nat.cases_on n (not.elim dec_trivial) (λ m hm, (pow_succ a m).symm ▸ I.mul_mem_right (a^m) ha) hn
theorem is_prime.mul_mem_iff_mem_or_mem {I : ideal α} (hI : I.is_prime) :
∀ {x y : α}, x * y ∈ I ↔ x ∈ I ∨ y ∈ I :=
λ x y, ⟨hI.mem_or_mem, by { rintro (h | h), exacts [I.mul_mem_right y h, I.mul_mem_left x h] }⟩
theorem is_prime.pow_mem_iff_mem {I : ideal α} (hI : I.is_prime)
{r : α} (n : ℕ) (hn : 0 < n) : r ^ n ∈ I ↔ r ∈ I :=
⟨hI.mem_of_pow_mem n, (λ hr, I.pow_mem_of_mem hr n hn)⟩
theorem pow_multiset_sum_mem_span_pow (s : multiset α) (n : ℕ) :
s.sum ^ (s.card * n + 1) ∈ span ((s.map (λ x, x ^ (n + 1))).to_finset : set α) :=
begin
induction s using multiset.induction_on with a s hs,
{ simp },
simp only [finset.coe_insert, multiset.map_cons, multiset.to_finset_cons, multiset.sum_cons,
multiset.card_cons, add_pow],
refine submodule.sum_mem _ _,
intros c hc,
rw mem_span_insert,
by_cases h : n+1 ≤ c,
{ refine ⟨a ^ (c - (n + 1)) * s.sum ^ ((s.card + 1) * n + 1 - c) *
(((s.card + 1) * n + 1).choose c), 0, submodule.zero_mem _, _⟩,
rw mul_comm _ (a ^ (n + 1)),
simp_rw ← mul_assoc,
rw [← pow_add, add_zero, add_tsub_cancel_of_le h], },
{ use 0,
simp_rw [zero_mul, zero_add],
refine ⟨_,_,rfl⟩,
replace h : c ≤ n := nat.lt_succ_iff.mp (not_le.mp h),
have : (s.card + 1) * n + 1 - c = s.card * n + 1 + (n - c),
{ rw [add_mul, one_mul, add_assoc, add_comm n 1, ← add_assoc, add_tsub_assoc_of_le h] },
rw [this, pow_add],
simp_rw [mul_assoc, mul_comm (s.sum ^ (s.card * n + 1)), ← mul_assoc],
exact mul_mem_left _ _ hs }
end
theorem sum_pow_mem_span_pow {ι} (s : finset ι) (f : ι → α) (n : ℕ) :
(∑ i in s, f i) ^ (s.card * n + 1) ∈ span ((λ i, f i ^ (n + 1)) '' s) :=
begin
convert pow_multiset_sum_mem_span_pow (s.1.map f) n,
{ rw multiset.card_map, refl },
rw [multiset.map_map, multiset.to_finset_map, finset.val_to_finset, finset.coe_image]
end
theorem span_pow_eq_top (s : set α)
(hs : span s = ⊤) (n : ℕ) : span ((λ x, x ^ n) '' s) = ⊤ :=
begin
rw eq_top_iff_one,
cases n,
{ obtain rfl | ⟨x, hx⟩ := eq_empty_or_nonempty s,
{ rw [set.image_empty, hs],
trivial },
{ exact subset_span ⟨_, hx, pow_zero _⟩ } },
rw [eq_top_iff_one, span, finsupp.mem_span_iff_total] at hs,
rcases hs with ⟨f, hf⟩,
change f.support.sum (λ a, f a * a) = 1 at hf,
have := sum_pow_mem_span_pow f.support (λ a, f a * a) n,
rw [hf, one_pow] at this,
refine (span_le).mpr _ this,
rintros _ hx,
simp_rw [finset.mem_coe, set.mem_image] at hx,
rcases hx with ⟨x, hx, rfl⟩,
have : span ({x ^ (n + 1)} : set α) ≤ span ((λ (x : α), x ^ (n + 1)) '' s),
{ rw [span_le, set.singleton_subset_iff],
exact subset_span ⟨x, x.prop, rfl⟩ },
refine this _,
rw [mul_pow, mem_span_singleton],
exact ⟨f x ^ (n + 1), mul_comm _ _⟩
end
end ideal
end comm_semiring
section ring
namespace ideal
variables [ring α] (I : ideal α) {a b : α}
lemma neg_mem_iff : -a ∈ I ↔ a ∈ I := I.neg_mem_iff
lemma add_mem_iff_left : b ∈ I → (a + b ∈ I ↔ a ∈ I) := I.add_mem_iff_left
lemma add_mem_iff_right : a ∈ I → (a + b ∈ I ↔ b ∈ I) := I.add_mem_iff_right
protected lemma sub_mem : a ∈ I → b ∈ I → a - b ∈ I := I.sub_mem
lemma mem_span_insert' {s : set α} {x y} :
x ∈ span (insert y s) ↔ ∃a, x + a * y ∈ span s := submodule.mem_span_insert'
end ideal
end ring
section division_ring
variables {K : Type u} [division_ring K] (I : ideal K)
namespace ideal
/-- All ideals in a division ring are trivial. -/
lemma eq_bot_or_top : I = ⊥ ∨ I = ⊤ :=
begin
rw or_iff_not_imp_right,
change _ ≠ _ → _,
rw ideal.ne_top_iff_one,
intro h1,
rw eq_bot_iff,
intros r hr,
by_cases H : r = 0, {simpa},
simpa [H, h1] using I.mul_mem_left r⁻¹ hr,
end
lemma eq_bot_of_prime [h : I.is_prime] : I = ⊥ :=
or_iff_not_imp_right.mp I.eq_bot_or_top h.1
lemma bot_is_maximal : is_maximal (⊥ : ideal K) :=
⟨⟨λ h, absurd ((eq_top_iff_one (⊤ : ideal K)).mp rfl) (by rw ← h; simp),
λ I hI, or_iff_not_imp_left.mp (eq_bot_or_top I) (ne_of_gt hI)⟩⟩
end ideal
end division_ring
section comm_ring
namespace ideal
theorem mul_sub_mul_mem {R : Type*} [comm_ring R] (I : ideal R) {a b c d : R}
(h1 : a - b ∈ I) (h2 : c - d ∈ I) : a * c - b * d ∈ I :=
begin
rw (show a * c - b * d = (a - b) * c + b * (c - d), by {rw [sub_mul, mul_sub], abel}),
exact I.add_mem (I.mul_mem_right _ h1) (I.mul_mem_left _ h2),
end
end ideal
end comm_ring
namespace ring
variables {R : Type*} [comm_ring R]
lemma not_is_field_of_subsingleton {R : Type*} [ring R] [subsingleton R] : ¬ is_field R :=
λ ⟨⟨x, y, hxy⟩, _, _⟩, hxy (subsingleton.elim x y)
lemma exists_not_is_unit_of_not_is_field [nontrivial R] (hf : ¬ is_field R) :
∃ x ≠ (0 : R), ¬ is_unit x :=
begin
have : ¬ _ := λ h, hf ⟨exists_pair_ne R, mul_comm, h⟩,
simp_rw is_unit_iff_exists_inv,
push_neg at ⊢ this,
obtain ⟨x, hx, not_unit⟩ := this,
exact ⟨x, hx, not_unit⟩
end
lemma not_is_field_iff_exists_ideal_bot_lt_and_lt_top [nontrivial R] :
¬ is_field R ↔ ∃ I : ideal R, ⊥ < I ∧ I < ⊤ :=
begin
split,
{ intro h,
obtain ⟨x, nz, nu⟩ := exists_not_is_unit_of_not_is_field h,
use ideal.span {x},
rw [bot_lt_iff_ne_bot, lt_top_iff_ne_top],
exact ⟨mt ideal.span_singleton_eq_bot.mp nz, mt ideal.span_singleton_eq_top.mp nu⟩ },
{ rintros ⟨I, bot_lt, lt_top⟩ hf,
obtain ⟨x, mem, ne_zero⟩ := set_like.exists_of_lt bot_lt,
rw submodule.mem_bot at ne_zero,
obtain ⟨y, hy⟩ := hf.mul_inv_cancel ne_zero,
rw [lt_top_iff_ne_top, ne.def, ideal.eq_top_iff_one, ← hy] at lt_top,
exact lt_top (I.mul_mem_right _ mem), }
end
lemma not_is_field_iff_exists_prime [nontrivial R] :
¬ is_field R ↔ ∃ p : ideal R, p ≠ ⊥ ∧ p.is_prime :=
not_is_field_iff_exists_ideal_bot_lt_and_lt_top.trans
⟨λ ⟨I, bot_lt, lt_top⟩, let ⟨p, hp, le_p⟩ := I.exists_le_maximal (lt_top_iff_ne_top.mp lt_top) in
⟨p, bot_lt_iff_ne_bot.mp (lt_of_lt_of_le bot_lt le_p), hp.is_prime⟩,
λ ⟨p, ne_bot, prime⟩, ⟨p, bot_lt_iff_ne_bot.mpr ne_bot, lt_top_iff_ne_top.mpr prime.1⟩⟩
/-- When a ring is not a field, the maximal ideals are nontrivial. -/
lemma ne_bot_of_is_maximal_of_not_is_field [nontrivial R] {M : ideal R} (max : M.is_maximal)
(not_field : ¬ is_field R) : M ≠ ⊥ :=
begin
rintros h,
rw h at max,
rcases max with ⟨⟨h1, h2⟩⟩,
obtain ⟨I, hIbot, hItop⟩ := not_is_field_iff_exists_ideal_bot_lt_and_lt_top.mp not_field,
exact ne_of_lt hItop (h2 I hIbot),
end
end ring
namespace ideal
/-- Maximal ideals in a non-field are nontrivial. -/
variables {R : Type u} [comm_ring R] [nontrivial R]
lemma bot_lt_of_maximal (M : ideal R) [hm : M.is_maximal] (non_field : ¬ is_field R) : ⊥ < M :=
begin
rcases (ring.not_is_field_iff_exists_ideal_bot_lt_and_lt_top.1 non_field)
with ⟨I, Ibot, Itop⟩,
split, { simp },
intro mle,
apply @irrefl _ (<) _ (⊤ : ideal R),
have : M = ⊥ := eq_bot_iff.mpr mle,
rw this at *,
rwa hm.1.2 I Ibot at Itop,
end
end ideal
variables {a b : α}
/-- The set of non-invertible elements of a monoid. -/
def nonunits (α : Type u) [monoid α] : set α := { a | ¬is_unit a }
@[simp] theorem mem_nonunits_iff [monoid α] : a ∈ nonunits α ↔ ¬ is_unit a := iff.rfl
theorem mul_mem_nonunits_right [comm_monoid α] :
b ∈ nonunits α → a * b ∈ nonunits α :=
mt is_unit_of_mul_is_unit_right
theorem mul_mem_nonunits_left [comm_monoid α] :
a ∈ nonunits α → a * b ∈ nonunits α :=
mt is_unit_of_mul_is_unit_left
theorem zero_mem_nonunits [semiring α] : 0 ∈ nonunits α ↔ (0:α) ≠ 1 :=
not_congr is_unit_zero_iff
@[simp] theorem one_not_mem_nonunits [monoid α] : (1:α) ∉ nonunits α :=
not_not_intro is_unit_one
theorem coe_subset_nonunits [semiring α] {I : ideal α} (h : I ≠ ⊤) :
(I : set α) ⊆ nonunits α :=
λ x hx hu, h $ I.eq_top_of_is_unit_mem hx hu
lemma exists_max_ideal_of_mem_nonunits [comm_semiring α] (h : a ∈ nonunits α) :
∃ I : ideal α, I.is_maximal ∧ a ∈ I :=
begin
have : ideal.span ({a} : set α) ≠ ⊤,
{ intro H, rw ideal.span_singleton_eq_top at H, contradiction },
rcases ideal.exists_le_maximal _ this with ⟨I, Imax, H⟩,
use [I, Imax], apply H, apply ideal.subset_span, exact set.mem_singleton a
end
| {
"alphanum_fraction": null,
"author": "Mel-TunaRoll",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/lean/Mel-TunaRoll-Lean-Mordell-Weil-Mel-Branch/Lean-Mordell-Weil-Mel-Branch-4db36f86423976aacd2c2968c4e45787fcd86b97/src/ring_theory/ideal/basic.lean",
"reason": null,
"repo": "Lean-Mordell-Weil-Mel-Branch",
"save_path": "github-repos/lean/Mel-TunaRoll-Lean-Mordell-Weil-Mel-Branch",
"sha": "4db36f86423976aacd2c2968c4e45787fcd86b97",
"size": null
} |
!c***************************************************************
subroutine test1(accessor1,accessor2,width1,width2,test)
implicit none
!c PARAMETER STATEMENTS:
integer*8 accessor1,accessor2
integer width1,width2,i,j,k,test,eofFlag
complex*8, allocatable :: data1(:)
real*4, allocatable :: data2(:,:)
allocate(data1(width1))
allocate(data2(2,width2))
eofFlag = 0
if(test .eq. 1) then
do
call getLineSequential(accessor1,data1,eofFlag)
if(eofFlag .lt. 0)then
write(6,*) 'eof'
exit
endif
do i = 1,width1
data2(1,i) = real(data1(i))
data2(2,i) = aimag(data1(i))
enddo
call setLineSequential(accessor2,data2)
enddo
endif
if(test .eq. 2) then
do
call getLineSequential(accessor2,data2,eofFlag)
if(eofFlag .lt. 0) exit
do i = 1,width2
data1(i) = cmplx(data2(1,i),data2(2,i))
enddo
call setLineSequential(accessor1,data1)
enddo
endif
deallocate(data1)
deallocate(data2)
end
| {
"alphanum_fraction": 0.4852824185,
"author": null,
"avg_line_length": 24.1730769231,
"converted": null,
"ext": "f90",
"file": null,
"hexsha": "4a82640add38096cdbe13f389171f7b03835466e",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 235,
"max_forks_repo_forks_event_max_datetime": "2022-03-18T07:37:24.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-02-10T05:00:53.000Z",
"max_forks_repo_head_hexsha": "1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c",
"max_forks_repo_licenses": [
"ECL-2.0",
"Apache-2.0"
],
"max_forks_repo_name": "vincentschut/isce2",
"max_forks_repo_path": "components/iscesys/ImageApi/test/test1.f90",
"max_issues_count": 276,
"max_issues_repo_head_hexsha": "1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T21:45:55.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-02-10T07:18:28.000Z",
"max_issues_repo_licenses": [
"ECL-2.0",
"Apache-2.0"
],
"max_issues_repo_name": "vincentschut/isce2",
"max_issues_repo_path": "components/iscesys/ImageApi/test/test1.f90",
"max_line_length": 65,
"max_stars_count": 1133,
"max_stars_repo_head_hexsha": "1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c",
"max_stars_repo_licenses": [
"ECL-2.0",
"Apache-2.0"
],
"max_stars_repo_name": "vincentschut/isce2",
"max_stars_repo_path": "components/iscesys/ImageApi/test/test1.f90",
"max_stars_repo_stars_event_max_datetime": "2022-01-07T21:33:08.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-01-07T21:24:57.000Z",
"num_tokens": 331,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1257
} |
"""
Bit Plane Slicing
"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
def bit_plane_slicing(img_file, plane_level):
img = cv2.imread(img_file, 0)
plane_level = 1
transformed_img = np.zeros(shape=img.shape)
height, width = img.shape
for slice_factor in range(8):
for y in range(height):
for x in range(width):
try:
if img[x,y] & plane_level == 0:
transformed_img[x,y] = 0
else:
transformed_img[x,y] = 1
except:
pass
plane_level *= 2
cv2.imwrite("{}_sliced.png".format(slice_factor), transformed_img)
plt.imshow(transformed_img, cmap='gray')
plt.show()
bit_plane_slicing("dollar.tif", plane_level=1)
| {
"alphanum_fraction": 0.6844444444,
"author": null,
"avg_line_length": 24.1071428571,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "233bea65ba7dfe4e62c8a6ffe042d876656620fc",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-02-02T07:19:57.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-02-02T07:19:57.000Z",
"max_forks_repo_head_hexsha": "ce03aedde3c49ae36837dc5843d97451e4e061d1",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "kjanjua26/AI-DIP-Lab-Work",
"max_forks_repo_path": "DIP-Labs/Lab05_Bit_Plane_Slicing/Task_03.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ce03aedde3c49ae36837dc5843d97451e4e061d1",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "kjanjua26/AI-DIP-Lab-Work",
"max_issues_repo_path": "DIP-Labs/Lab05_Bit_Plane_Slicing/Task_03.py",
"max_line_length": 68,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ce03aedde3c49ae36837dc5843d97451e4e061d1",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "kjanjua26/AI-DIP-Lab-Work",
"max_stars_repo_path": "DIP-Labs/Lab05_Bit_Plane_Slicing/Task_03.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 192,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 675
} |
"""
Functions for converting between data formats
"""
from typing import Optional
import numpy as np
import pandas as pd
from .checks import (
is_flat_dataset,
is_sklearn_dataset,
is_stacked_dataset,
is_timeseries_dataset,
)
from .exceptions import TimekeepCheckError
def convert_timeseries_input(func):
def inner(*args, **kwargs):
dim = 0
x = args[dim] # For functions, x should be first argument
if not isinstance(x, np.ndarray):
dim = 1
x = args[dim] # For methods, arguments are (self, x, ...)
assert isinstance(x, np.ndarray)
x = to_sklearn_dataset(x)
args = [args[i] if i != dim else x for i in range(len(args))]
return func(*args, **kwargs)
return inner
def convert_output_to_timeseries(func):
def inner(*args, **kwargs):
data = func(*args, **kwargs)
if len(data.shape) == 3:
return data
# If it's not 2-dimensional, we can't handle it
if not len(data.shape) == 2:
raise TimekeepCheckError(
"convert_output_to_timeseries: data has {} axes; "
"data must have 2 axes".format(data.shape)
)
return to_timeseries_dataset(data)
return inner
def timeseries_transformer(cls):
"""
Augment sklearn.TransformerMixin classes to accept timeseries datasets
Parameters
----------
cls : TransformerMixin
The class to augment
Returns
-------
TransformerMixin
The input class, which now accepts timeseries datasets as input
"""
cls.fit = convert_timeseries_input(cls.fit)
cls.transform = convert_timeseries_input(cls.transform)
cls.fit_transform = convert_timeseries_input(cls.fit_transform)
return cls
# ----- Format conversion ----- #
def to_flat_dataset(data) -> pd.DataFrame:
"""
Convert a tslearn timeseries or tsfresh stacked dataset
to a tsfresh flat dataset
A flat dataset is a DataFrame with columns for 'id' (of timeseries),
'time' (at which value occurs) and a column for each of the
timeseries parameters
Parameters
----------
data
The data to have its format changed
Returns
-------
pandas.DataFrame
Data, now as a tsfresh flat dataset
Raises
------
ValueError
If data is not a tslearn timeseries dataset,
tsfresh stacked dataset or tsfresh flat dataset
"""
try:
is_flat_dataset(data) # will raise TimekeepCheckError if not
return data
except TimekeepCheckError:
pass
try:
is_stacked_dataset(data) # will raise TimekeepCheckError if not
# Get the id and time values for one "kind" of values
flat_data = data.loc[
data["kind"] == data.loc[0, "kind"], ["id", "time"]
].reset_index(drop=True)
# Add the values as columns
for col_name in np.unique(data["kind"]):
data_subset = data.loc[
data.loc[:, "kind"] == col_name, ["id", "time", "value"]
].rename(columns={"value": col_name})
flat_data = flat_data.merge(data_subset, on=["id", "time"])
return flat_data
except TimekeepCheckError:
pass
try:
is_timeseries_dataset(data) # will raise TimekeepCheckError if not
n, t, d = data.shape
id_ = np.tile(np.arange(n), t)
time_ = np.tile(np.arange(t), n)
values_ = data.reshape(n * t, d) # check if this reshape is correct
df = pd.DataFrame({"id": id_, "time": time_})
for value in range(d):
df[str(value)] = values_[:, value]
return df
except TimekeepCheckError:
pass
raise ValueError(
"Did not recognise data of type {}. Cannot convert to flat dataset".format(
type(data)
)
)
def to_stacked_dataset(data) -> pd.DataFrame:
"""
Convert a tslearn timeseries or tsfresh flat dataset
to a tsfresh stacked dataset
A stacked dataset is a DataFrame with columns for 'id' (of timeseries),
'time' (at which value occurs), 'kind' (of value),
and 'value' (of timeseries parameter)
Parameters
----------
data
The data to have its format changed
Returns
-------
pandas.DataFrame
Data, now as a tsfresh stacked dataset
Raises
------
ValueError
If data is not a tslearn timeseries dataset,
tsfresh stacked dataset or tsfresh flat dataset
"""
try:
is_flat_dataset(data)
d = data.shape[1] - 2
id_ = np.tile(data["id"].to_numpy(), d)
time_ = np.tile(data["time"].to_numpy(), d)
kind_ = np.repeat(
np.array([col for col in data.columns if col not in ("time", "id")]),
data.shape[0],
)
values_ = (
data[[col for col in data.columns if col not in ("time", "id")]]
.to_numpy()
.flatten("F") # flatten Fortran (column-major) order
)
return pd.DataFrame({"id": id_, "time": time_, "kind": kind_, "value": values_})
except TimekeepCheckError:
pass
try:
is_stacked_dataset(data)
return data
except TimekeepCheckError:
pass
try:
is_timeseries_dataset(data) # will raise TimekeepCheckError if not
n, t, d = data.shape
id_ = np.tile(np.arange(n), t * d)
time_ = np.tile(np.arange(t), n * d)
kind_ = np.repeat(np.arange(d), n * t)
values_ = data.flatten() # check if this reshape is correct
return pd.DataFrame({"id": id_, "time": time_, "kind": kind_, "value": values_})
except TimekeepCheckError:
pass
raise ValueError(
"Did not recognise data of type {}. Cannot convert to stacked dataset".format(
type(data)
)
)
def to_timeseries_dataset(
data, t: Optional[int] = None, d: Optional[int] = None
) -> np.ndarray:
"""
Convert a tsfresh or scikit-learn dataset to timeseries dataset.
A timeseries dataset is a numpy.ndarray with shape (N, T, D).
Parameters
----------
data
The data to have its format changed
t : int, optional
The number of time points
d : int, optional
The number of data parameters
Returns
-------
numpy.ndarray
Data, now as a timeseries dataset
Raises
------
ValueError
If data is not a scikit-learn dataset,
tsfresh stacked dataset or tsfresh flat dataset
"""
try:
is_timeseries_dataset(data)
except TimekeepCheckError:
pass
else:
return data
try:
is_flat_dataset(data)
except TimekeepCheckError:
pass
else:
d = data.shape[1] - 2
times = data["time"]
t = np.max(times) - np.min(times) + 1
unique_ids = np.unique(data["id"])
n = unique_ids.size
ts_data = np.full((n, t, d), np.nan)
for idx in range(n):
idx_data = data.loc[data["id"] == unique_ids[idx], :]
idx_times = idx_data["time"].to_numpy() - np.min(times)
idx_data = idx_data.drop(["id", "time"], axis=1).to_numpy()
ts_data[idx, idx_times] = idx_data
return ts_data
try:
is_stacked_dataset(data)
except TimekeepCheckError:
pass
else:
unique_kinds = np.unique(data["kind"])
d = len(unique_kinds)
times = data["time"]
t = np.max(times) - np.min(times) + 1
unique_ids = np.unique(data["id"])
n = unique_ids.size
stacked_value_dtype = data["value"].to_numpy().dtype
ts_data = np.full((n, t, d), np.nan, dtype=stacked_value_dtype)
for idx in range(n):
for kind_idx in range(d):
indexes = (data["id"] == unique_ids[idx]) & (
data["kind"] == unique_kinds[kind_idx]
)
idx_data = data.loc[indexes, "value"].to_numpy()
idx_times = data.loc[indexes, "time"].to_numpy() - np.min(times)
ts_data[idx, idx_times, kind_idx] = idx_data
return ts_data
try:
is_sklearn_dataset(data)
except TimekeepCheckError:
raise ValueError(
"Did not recognise data of type {}. Cannot convert to timeseries dataset".format(
type(data)
)
)
else:
total_size = data.size
n = data.shape[0]
if isinstance(data, pd.DataFrame):
data = data.to_numpy()
if t is None and d is None:
# assume d = 1
return np.expand_dims(data, axis=2)
elif t is None:
t = int(total_size / (n * d))
elif d is None:
d = int(total_size / (n * t))
return data.T.reshape((d, t, n)).T
def to_sklearn_dataset(data) -> np.ndarray:
"""
Convert a tslearn timeseries or tsfresh dataset
to a scikit-learn dataset
A scikit-learn dataset is a numpy.ndarray with 2 axes,
shape (N, D) where N is number of data points and D is number
of dimensions.
Parameters
----------
data
The data to have its format changed
Returns
-------
numpy.ndarray
The data, now as a scikit-learn dataset
Raises
------
ValueError
If data is not a tslearn timeseries dataset,
tsfresh stacked dataset or tsfresh flat dataset
"""
try:
is_timeseries_dataset(data)
except TimekeepCheckError:
pass
else:
return data.T.reshape((-1, data.shape[0])).T
try:
is_stacked_dataset(data)
except TimekeepCheckError:
pass
else:
return to_sklearn_dataset(to_timeseries_dataset(data))
try:
is_flat_dataset(data)
except TimekeepCheckError:
pass
else:
return to_sklearn_dataset(to_timeseries_dataset(data))
try:
is_sklearn_dataset(data)
except TimekeepCheckError:
raise ValueError(
"Did not recognise data of type {}. Cannot convert to sklearn dataset".format(
type(data)
)
)
else:
return data
| {
"alphanum_fraction": 0.5820647931,
"author": null,
"avg_line_length": 26.7571801567,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "7e9d59c6a985e9009db98d31888cb95a74cc361c",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "07e727fb84e6e3344562b2eec3c6491d6961b6f3",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "TTitcombe/timekeep",
"max_forks_repo_path": "timekeep/conversion.py",
"max_issues_count": 18,
"max_issues_repo_head_hexsha": "07e727fb84e6e3344562b2eec3c6491d6961b6f3",
"max_issues_repo_issues_event_max_datetime": "2019-12-15T11:19:21.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-10-08T17:28:42.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "TTitcombe/timekeep",
"max_issues_repo_path": "timekeep/conversion.py",
"max_line_length": 93,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "07e727fb84e6e3344562b2eec3c6491d6961b6f3",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "TTitcombe/timekeep",
"max_stars_repo_path": "timekeep/conversion.py",
"max_stars_repo_stars_event_max_datetime": "2019-10-26T19:19:07.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-10-09T20:54:43.000Z",
"num_tokens": 2445,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 10248
} |
\chapter{Undocumented Facilities}
Ns is often growing to include new protocols.
Unfortunately the documention doesn't grow quite as often.
This section lists what remains to be documented,
or what needs to be improved.
(The documentation is in the doc subdirectory of the ns source code
if you want to add to it. :-)
\begin{description}
\item[Interface to the Interpreter]
\begin{itemize}
\item nothing currently
\end{itemize}
\item[Simulator Basics]
\begin{itemize}
\item LANs need to be updated for new wired/wireless support
(Yuri updated this?)
\item wireless support needs to be added
(done)
\item should explicitly list queueing options in the queue mgt chapter?
\end{itemize}
\item[Support]
\begin{itemize}
\item should pick a single list mgt package and document it
\item should document the trace-post-processing utilities in bin
\end{itemize}
\item[Routing]
\begin{itemize}
\item The usage and design of link state and MPLS routing modules
are not documented at all. (Note: link state and MPLS appeared only in
daily snapshots and releases after 09/14/2000.)
\item need to document hierarchical routing/addressing
(Padma has done)
\item need a chapter on supported ad-hoc routing protocols
\end{itemize}
\item[Queueing]
\begin{itemize}
\item CBQ needs documentation (can maybe build off of
\url{ftp://ftp.ee.lbl.gov/papers/cbqsims.ps.Z}?)
\end{itemize}
\item[Transport]
\begin{itemize}
\item need to document MFTP
\item need to document RTP (session-rtp.cc, etc.)
\item need to document multicast building blocks
\item should repair and document snoop and tcp-int
\end{itemize}
\item[Traffic and scenarios] (new section)
\begin{itemize}
\item should add a description of how to drive the simulator
from traces
\item should add discussion of the scenario generator
\item should add discussion of http traffic sources
\end{itemize}
\item[Application]
\begin{itemize}
\item is the non-Haobo http stuff documented? no.
\end{itemize}
\item[Scale]
\begin{itemize}
\item should add disucssion of mixed mode (pending)
\end{itemize}
\item[Emulation]
\begin{itemize}
\item nothing currently
\end{itemize}
\item[Other]
\begin{itemize}
\item should document admission control policies?
\item should add a validation chapter and snarf
up the contents of ns-tests.html
\item should snarf up Marc Greis' tutorial
rather than just referring to it?
\end{itemize}
\end{description}
| {
"alphanum_fraction": 0.6546029515,
"author": null,
"avg_line_length": 27.9019607843,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "d2f68dc8f8cae951dbf8425afdb52a7814757f7b",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-09-29T16:06:57.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-09-29T16:06:57.000Z",
"max_forks_repo_head_hexsha": "f037b796ff10300ffe0422580be5855c37d0b140",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "nitishk017/ns2project",
"max_forks_repo_path": "ns-allinone-2.35/ns-2.35/doc/undocumented.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "f037b796ff10300ffe0422580be5855c37d0b140",
"max_issues_repo_issues_event_max_datetime": "2019-01-22T21:41:38.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-01-20T17:35:23.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "nitishk017/ns2project",
"max_issues_repo_path": "ns-allinone-2.35/ns-2.35/doc/undocumented.tex",
"max_line_length": 79,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "f037b796ff10300ffe0422580be5855c37d0b140",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "nitishk017/ns2project",
"max_stars_repo_path": "ns-allinone-2.35/ns-2.35/doc/undocumented.tex",
"max_stars_repo_stars_event_max_datetime": "2021-04-21T06:39:42.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-04-21T06:39:42.000Z",
"num_tokens": 682,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2846
} |
#
# Test does not work on some cards.
#
import threading
try:
from Queue import Queue # Python 2
except:
from queue import Queue # Python 3
import numpy as np
from numba import cuda
from numba.cuda.testing import unittest, CUDATestCase
def newthread(exception_queue):
try:
cuda.select_device(0)
stream = cuda.stream()
A = np.arange(100)
dA = cuda.to_device(A, stream=stream)
stream.synchronize()
del dA
del stream
cuda.close()
except Exception as e:
exception_queue.put(e)
class TestSelectDevice(CUDATestCase):
def test_select_device(self):
exception_queue = Queue()
for i in range(10):
t = threading.Thread(target=newthread, args=(exception_queue,))
t.start()
t.join()
exceptions = []
while not exception_queue.empty():
exceptions.append(exception_queue.get())
self.assertEqual(exceptions, [])
if __name__ == '__main__':
unittest.main()
| {
"alphanum_fraction": 0.6234269119,
"author": null,
"avg_line_length": 22.4565217391,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8aa68755a3b62f3243bbf3ee486db1a757b840bd",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-10-09T18:11:50.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-10-09T18:11:50.000Z",
"max_forks_repo_head_hexsha": "8a6d09b15f0090144161158d01550847f15fc1c8",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "ARF1/numba",
"max_forks_repo_path": "numba/cuda/tests/cudadrv/test_select_device.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "8a6d09b15f0090144161158d01550847f15fc1c8",
"max_issues_repo_issues_event_max_datetime": "2019-02-11T13:46:30.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-02-11T13:46:30.000Z",
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "ARF1/numba",
"max_issues_repo_path": "numba/cuda/tests/cudadrv/test_select_device.py",
"max_line_length": 75,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "8a6d09b15f0090144161158d01550847f15fc1c8",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "ARF1/numba",
"max_stars_repo_path": "numba/cuda/tests/cudadrv/test_select_device.py",
"max_stars_repo_stars_event_max_datetime": "2020-10-09T18:11:44.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-10-09T18:11:44.000Z",
"num_tokens": 229,
"path": null,
"reason": "import numpy,from numba",
"repo": null,
"save_path": null,
"sha": null,
"size": 1033
} |
"""
This code is modified from Hengyuan Hu's repository.
https://github.com/hengyuan-hu/bottom-up-attention-vqa
"""
from __future__ import print_function
import _pickle as cPickle
import os
import json
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=FutureWarning)
import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
from konlpy.tag import Mecab, Kkma
from transformers import AutoTokenizer
from ..utils import utils
class Dictionary(object):
def __init__(self, word2idx=None, idx2word=None):
if word2idx is None:
word2idx = {}
if idx2word is None:
idx2word = []
self.word2idx = word2idx
self.idx2word = idx2word
@property
def ntoken(self):
return len(self.word2idx)
@property
def padding_idx(self):
return len(self.word2idx)
def tokenize(self, sentence, add_word, sp=None):
if sp is None:
sentence = sentence.lower()
sentence = sentence.replace(',', '').replace('?', '').replace('\'s', ' \'s')
words = sentence.split()
else:
words = sp(sentence)
tokens = []
if add_word:
for w in words:
tokens.append(self.add_word(w))
else:
for w in words:
# the least frequent word (`bebe`) as UNK for Visual Genome dataset
tokens.append(self.word2idx.get(w, self.padding_idx-1))
return tokens
def dump_to_file(self, path):
cPickle.dump([self.word2idx, self.idx2word], open(path, 'wb'))
print('dictionary dumped to %s' % path)
@classmethod
def load_from_file(cls, path):
print('loading dictionary from %s' % path)
word2idx, idx2word = cPickle.load(open(path, 'rb'))
d = cls(word2idx, idx2word)
return d
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
def _create_entry(img, question, answer):
if None!=answer:
answer.pop('image_id')
answer.pop('question_id')
entry = {
'question_id' : question['question_id'],
'image_id' : question['image_id'],
'image' : img,
'question' : question['question'],
'answer' : answer}
return entry
def _load_kvqa(dataroot, name, img_id2val):
"""Load entries
img_id2val: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val', 'test'
"""
question_path = os.path.join(
os.path.join(dataroot, 'KVQA_annotations_%s.json' % name))
questions = sorted(json.load(open(question_path, encoding='utf-8')), key=lambda x: x['image'])
idx2type = None
type2idx = None
if 'test'!=name[:4]: # train, val
answer_path = os.path.join(dataroot, 'cache', '%s_target.kvqa.pkl' % name)
answers = cPickle.load(open(answer_path, 'rb'))
answers = sorted(answers, key=lambda x: x['question_id'])
utils.assert_eq(len(questions), len(answers))
type2idx = {}
idx2type = []
entries = []
for question, answer in zip(questions, answers):
q_id, _ = os.path.splitext(question['image'])
question['question_id'] = q_id
question['image_id'] = q_id
utils.assert_eq(q_id, answer['question_id'])
img_id = q_id
image_index = img_id2val[img_id]
entry = _create_entry(image_index, question, answer)
entry['answerable'] = int(question['answerable'])
if question['answer_type'] not in type2idx:
type2idx[question['answer_type']] = len(idx2type)
idx2type.append(question['answer_type'])
entry['answer_type'] = type2idx[question['answer_type']]
entries.append(entry)
else: # test
entries = []
for question in questions:
img_id, _ = os.path.splitext(question['image'])
q_id = img_id
question['question_id'] = q_id
question['image_id'] = q_id
entry = _create_entry(img_id2val[img_id], question, None)
entries.append(entry)
return entries, type2idx, idx2type
class KvqaFeatureDataset(Dataset):
def __init__(self, split, dictionary, max_length=16, dataroot='../data', tokenizer='sp'):
super(KvqaFeatureDataset, self).__init__()
assert split in ['train', 'val', 'test']
self.dataroot = dataroot
self.max_length = max_length
ans2label_path = os.path.join(self.dataroot, 'cache', 'trainval_ans2label.kvqa.pkl')
label2ans_path = os.path.join(self.dataroot, 'cache', 'trainval_label2ans.kvqa.pkl')
self.ans2label = cPickle.load(open(ans2label_path, 'rb'))
self.label2ans = cPickle.load(open(label2ans_path, 'rb'))
self.num_ans_candidates = len(self.ans2label)
self.dictionary = dictionary
self.img_id2idx = cPickle.load(
open(os.path.join(self.dataroot, '%s_imgid2idx.kvqa.pkl' % split),
'rb'))
h5_path = os.path.join(self.dataroot, '%s_kvqa.hdf5' % split)
print('loading features from h5 file')
with h5py.File(h5_path, 'r') as hf:
self.features = np.array(hf.get('image_features'))
self.spatials = np.array(hf.get('spatial_features'))
self.pos_boxes = np.array(hf.get('pos_boxes'))
self.entries, self.type2idx, self.idx2type = _load_kvqa(self.dataroot, split, self.img_id2idx)
if tokenizer == 'sp':
self.tokenizer = AutoTokenizer.from_pretrained('klue/roberta-base', do_lower_case=False)
self.dictionary = self.tokenizer.vocab
elif tokenizer == 'mecab':
self.tokenizer = Mecab()
elif tokenizer == 'kkma':
self.tokenizer = Kkma()
self.tokenize()
self.tensorize()
self.v_dim = self.features.size(1)
self.s_dim = self.spatials.size(1)
def tokenize(self): # max_length=14
"""Tokenizes the questions.
This will add q_token in each entry of the dataset.
-1 represent nil, and should be treated as padding_idx in embedding
"""
for entry in self.entries:
if hasattr(self.tokenizer, 'morphs'):
tokens = self.tokenizer.morphs(entry['question'].replace('.', ''))
tokens = [self.dictionary.word2idx[token] for token in tokens[:self.max_length]]
if len(tokens) < self.max_length:
# Note here we pad in front of the sentence
padding = [self.dictionary.padding_idx] * (self.max_length - len(tokens))
tokens = tokens + padding
elif hasattr(self.tokenizer, 'tokenize'):
tokens = self.tokenizer.tokenize(entry['question'], add_special_tokens=True)
tokens = [self.dictionary[token] for token in tokens[:self.max_length]]
if len(tokens) < self.max_length:
# Note here we pad in front of the sentence
padding = [self.dictionary['[PAD]']] * (self.max_length - len(tokens))
tokens = tokens + padding
else:
tokens = self.tokenizer(entry['question'])
tokens = [self.dictionary(token) for token in tokens[:self.max_length]]
if len(tokens) < self.max_length:
# Note here we pad in front of the sentence
padding = [self.dictionary('[PAD]')] * (self.max_length - len(tokens))
tokens = tokens + padding
utils.assert_eq(len(tokens), self.max_length)
entry['q_token'] = tokens
def tensorize(self):
self.features = torch.from_numpy(self.features)
self.spatials = torch.from_numpy(self.spatials)
for entry in self.entries:
question = torch.from_numpy(np.array(entry['q_token']))
entry['q_token'] = question
answer = entry['answer']
if None!=answer:
labels = np.array(answer['labels'])
scores = np.array(answer['scores'], dtype=np.float32)
if len(labels):
labels = torch.from_numpy(labels)
scores = torch.from_numpy(scores)
entry['answer']['labels'] = labels
entry['answer']['scores'] = scores
else:
entry['answer']['labels'] = None
entry['answer']['scores'] = None
def __getitem__(self, index):
entry = self.entries[index]
features = self.features[self.pos_boxes[entry['image']][0]:self.pos_boxes[entry['image']][1], :]
spatials = self.spatials[self.pos_boxes[entry['image']][0]:self.pos_boxes[entry['image']][1], :]
question = entry['q_token']
question_id = entry['question_id']
answer = entry['answer']
if None != answer:
labels = answer['labels']
scores = answer['scores']
target = torch.zeros(self.num_ans_candidates)
if labels is not None:
target.scatter_(0, labels, scores)
return features, spatials, question, target, entry['answerable'], entry['answer_type']
else:
return features, spatials, question, question_id, 0., -1
def __len__(self):
return len(self.entries)
| {
"alphanum_fraction": 0.5893555693,
"author": null,
"avg_line_length": 38.5476190476,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "43e018cdd438294a05f7e1e87dcfc859cbc67df2",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "eecc5945e1e9f77f0a85f77fec7573437d10f1d4",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "KiYOUNG2/BAN-KVQA",
"max_forks_repo_path": "solution_vqa/data/dataset.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "eecc5945e1e9f77f0a85f77fec7573437d10f1d4",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "KiYOUNG2/BAN-KVQA",
"max_issues_repo_path": "solution_vqa/data/dataset.py",
"max_line_length": 104,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "eecc5945e1e9f77f0a85f77fec7573437d10f1d4",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "KiYOUNG2/BAN-KVQA",
"max_stars_repo_path": "solution_vqa/data/dataset.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2264,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 9714
} |
#encoding=utf8
from __future__ import print_function
import os
import six
import ast
import copy
import numpy as np
import paddle.fluid as fluid
class Placeholder(object):
def __init__(self):
self.shapes = []
self.dtypes = []
self.lod_levels = []
self.names = []
def __init__(self, input_shapes):
self.shapes = []
self.dtypes = []
self.lod_levels = []
self.names = []
for new_holder in input_shapes:
shape = new_holder[0]
dtype = new_holder[1]
lod_level = new_holder[2] if len(new_holder) >= 3 else 0
name = new_holder[3] if len(new_holder) >= 4 else ""
self.append_placeholder(shape, dtype, lod_level = lod_level, name = name)
def append_placeholder(self, shape, dtype, lod_level = 0, name = ""):
self.shapes.append(shape)
self.dtypes.append(dtype)
self.lod_levels.append(lod_level)
self.names.append(name)
def build(self, capacity, reader_name, use_double_buffer = False):
pyreader = fluid.layers.py_reader(
capacity = capacity,
shapes = self.shapes,
dtypes = self.dtypes,
lod_levels = self.lod_levels,
name = reader_name,
use_double_buffer = use_double_buffer)
return [pyreader, fluid.layers.read_file(pyreader)]
def __add__(self, new_holder):
assert isinstance(new_holder, tuple) or isinstance(new_holder, list)
assert len(new_holder) >= 2
shape = new_holder[0]
dtype = new_holder[1]
lod_level = new_holder[2] if len(new_holder) >= 3 else 0
name = new_holder[3] if len(new_holder) >= 4 else ""
self.append_placeholder(shape, dtype, lod_level = lod_level, name = name)
if __name__ == "__main__":
print("hello world!")
| {
"alphanum_fraction": 0.6107561235,
"author": null,
"avg_line_length": 25.7260273973,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "e835038ee257d08f64b849855b30a0e9cd3d262b",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 720,
"max_forks_repo_forks_event_max_datetime": "2022-03-31T12:21:15.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-02-14T02:12:38.000Z",
"max_forks_repo_head_hexsha": "a95d49323ed504e5a9164586f171f408954fd43a",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "XiaoguangHu01/models",
"max_forks_repo_path": "PaddleNLP/Research/MRQA2019-D-NET/server/ernie_server/pdnlp/toolkit/placeholder.py",
"max_issues_count": 192,
"max_issues_repo_head_hexsha": "a95d49323ed504e5a9164586f171f408954fd43a",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T02:25:48.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-02-14T02:53:34.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "XiaoguangHu01/models",
"max_issues_repo_path": "PaddleNLP/Research/MRQA2019-D-NET/server/ernie_server/pdnlp/toolkit/placeholder.py",
"max_line_length": 85,
"max_stars_count": 1319,
"max_stars_repo_head_hexsha": "a95d49323ed504e5a9164586f171f408954fd43a",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "XiaoguangHu01/models",
"max_stars_repo_path": "PaddleNLP/Research/MRQA2019-D-NET/server/ernie_server/pdnlp/toolkit/placeholder.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T15:42:18.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-02-14T10:42:07.000Z",
"num_tokens": 441,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1878
} |
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as optimize
import sys
from termcolor import colored
def line(x,a,x0) :
return a*x+x0
def texsci(number):
return "\\num{{{0:.2e}}}".format(number)
if __name__ == "__main__":
if(len(sys.argv) < 2) :
binfile = './data.csv'
else :
binfile = sys.argv[1]
try :
data = np.loadtxt(binfile, delimiter=',')
except IOError:
print colored('Warning:', 'red', attrs=['bold']), "Data file'", binfile, "' not found. Using data.csv instead."
data = np.loadtxt('data.csv', delimiter=',')
X = data[:,0]
T = data[:,1]
pfit,pconv = optimize.curve_fit(line,X,T,(1,0))
Xcont = np.linspace(np.max(X),np.min(X),100)
fig, (ax1, ax3) = plt.subplots(2,
sharex=True,
gridspec_kw={'height_ratios' : [3,1]}
)
ax1.plot(X*1e-6,T, 'bx', label='Measured calculation time')
ax1.plot(Xcont*1e-6,
line(Xcont,*pfit),
'-',
color='black',
label=r'$t ='+texsci(pfit[0]*1e6)+r'\cdot s / \text{Mpxl} +'+texsci(pfit[1])+'s$')
ax1.set_ylabel(r'$t$ (s)')
ax1.legend(loc=2)
ax3.set_xlabel(r'\#MPixels ')
ax3.set_ylabel(r'$\Delta t $ (s)')
ax3.plot(X*1e-6,(T-line(X,*pfit)),'x', color='black', label='residues')
plt.tight_layout()
plt.show()
| {
"alphanum_fraction": 0.5632267442,
"author": null,
"avg_line_length": 31.2727272727,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "3ba8dce8b69b4051e259122965c9c3b0d77c7d42",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2022-01-06T07:37:43.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-08-14T14:31:31.000Z",
"max_forks_repo_head_hexsha": "939724ee877f3eb66572b4c7d0cfa92870d57b03",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "TAdeJong/plasma-analysis",
"max_forks_repo_path": "documentation/linfitplot.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "939724ee877f3eb66572b4c7d0cfa92870d57b03",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "TAdeJong/plasma-analysis",
"max_issues_repo_path": "documentation/linfitplot.py",
"max_line_length": 119,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "939724ee877f3eb66572b4c7d0cfa92870d57b03",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "TAdeJong/plasma-analysis",
"max_stars_repo_path": "documentation/linfitplot.py",
"max_stars_repo_stars_event_max_datetime": "2018-09-03T04:58:20.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-09-03T04:58:20.000Z",
"num_tokens": 421,
"path": null,
"reason": "import numpy,import scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1376
} |
from typing import Dict
from rastervision.core.data.raster_transformer.raster_transformer \
import RasterTransformer
import numpy as np # noqa
class ReclassTransformer(RasterTransformer):
"""Reclassifies label raster
"""
def __init__(self, mapping: Dict[int, int]):
"""Construct a new ReclassTransformer.
Args:
mapping: (dict) Remapping dictionary
"""
self.mapping = mapping
def transform(self, chip, channel_order=None):
"""Transform a chip.
Reclassify a label raster using the given mapping.
Args:
chip: ndarray of shape [height, width, channels] This is assumed to already
have the channel_order applied to it if channel_order is set. In other
words, channels should be equal to len(channel_order).
Returns:
[height, width, channels] numpy array
"""
masks = []
for (value_from, value_to) in self.mapping.items():
mask = (chip == value_from)
masks.append((mask, value_to))
for (mask, value_to) in masks:
chip[mask] = value_to
return chip
| {
"alphanum_fraction": 0.6161016949,
"author": null,
"avg_line_length": 28.0952380952,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "0431e7179380216d53c24d89f53b1c684131f9d9",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 336,
"max_forks_repo_forks_event_max_datetime": "2022-03-28T06:19:33.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-05-15T16:30:44.000Z",
"max_forks_repo_head_hexsha": "fc181a6f31f085affa1ee12f0204bdbc5a6bf85a",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "monocilindro/raster-vision",
"max_forks_repo_path": "rastervision_core/rastervision/core/data/raster_transformer/reclass_transformer.py",
"max_issues_count": 933,
"max_issues_repo_head_hexsha": "dab675517f904771e2ce8c052494f8a6f1ddc026",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T10:22:59.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-05-09T20:25:02.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "theoway/raster-vision",
"max_issues_repo_path": "rastervision_core/rastervision/core/data/raster_transformer/reclass_transformer.py",
"max_line_length": 87,
"max_stars_count": 1577,
"max_stars_repo_head_hexsha": "dab675517f904771e2ce8c052494f8a6f1ddc026",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "theoway/raster-vision",
"max_stars_repo_path": "rastervision_core/rastervision/core/data/raster_transformer/reclass_transformer.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T02:03:25.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-05-22T14:22:00.000Z",
"num_tokens": 252,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1180
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.