text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import unittest
import import_ipynb
import pandas as pd
import pandas.testing as pd_testing
import numpy.testing as np_testing
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn import ensemble
from sklearn import model_selection
from scipy import stats
class Test(unittest.TestCase):
def setUp(self):
import Activity8_01
self.exercises = Activity8_01
self.file_url = 'https://raw.githubusercontent.com/PacktWorkshops/The-Data-Science-Workshop/master/Chapter08/agaricus-lepiota.data'
self.mushrooms = pd.read_csv(self.file_url, header=None)
self.y_raw = self.mushrooms.iloc[:,0]
self.X_raw = self.mushrooms.iloc[:,1:]
self.y = (self.y_raw == 'p') * 1
self.encoder = preprocessing.OneHotEncoder()
self.encoder.fit(self.X_raw)
self.X = self.encoder.transform(self.X_raw).toarray()
self.rfc = ensemble.RandomForestClassifier(n_estimators=100, random_state=100)
self.grid = {
'criterion': ['gini', 'entropy'],
'max_features': [2, 4, 6, 8, 10, 12, 14]
}
self.gscv = model_selection.GridSearchCV(estimator=self.rfc, param_grid=self.grid, cv=5, scoring='accuracy')
self.gscv.fit(self.X,self.y)
self.results = pd.DataFrame(self.gscv.cv_results_)
np.random.seed(100)
self.max_features = X.shape[1]
self.param_dist = {
'criterion': ['gini', 'entropy'],
'max_features': stats.randint(low=1, high=self.max_features)
}
self.rscv = model_selection.RandomizedSearchCV(estimator=self.rfc, param_distributions=self.param_dist, n_iter=50, cv=5, scoring='accuracy', random_state=100)
self.rscv.fit(self.X,self.y)
self.results = pd.DataFrame(self.rscv.cv_results_)
def test_result(self):
self.assertEqual(
self.exercises.results["mean_test_score"].max()
, self.results["mean_test_score"].max()
)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "21a2ded0d412b6cacee42cf3b37b3d3a1675aadc", "size": 2062, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapter08/Activity8.01/Test_Activity8_01.py", "max_stars_repo_name": "pmayd/The-Data-Science-Workshop", "max_stars_repo_head_hexsha": "a712f1fdbdf839c8b9288f4d4cdebcf5ebe146c4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 162, "max_stars_repo_stars_event_min_datetime": "2020-02-11T08:45:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T13:06:03.000Z", "max_issues_repo_path": "Chapter08/Activity8.01/Test_Activity8_01.py", "max_issues_repo_name": "pmayd/The-Data-Science-Workshop", "max_issues_repo_head_hexsha": "a712f1fdbdf839c8b9288f4d4cdebcf5ebe146c4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-03-31T10:05:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:12:16.000Z", "max_forks_repo_path": "Chapter08/Activity8.01/Test_Activity8_01.py", "max_forks_repo_name": "carltyndall/The-Data-Science-Workshop", "max_forks_repo_head_hexsha": "e16b96016314590ffe7294c7186923cfbec7b8b8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 193, "max_forks_repo_forks_event_min_datetime": "2019-11-08T05:15:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T09:14:02.000Z", "avg_line_length": 28.6388888889, "max_line_length": 166, "alphanum_fraction": 0.6605237633, "include": true, "reason": "import numpy,from scipy", "num_tokens": 498}
|
#!/usr/bin/env python
# Noise2Void - 2D Example for SEM data
from n2v.models import N2V
import numpy as np
from matplotlib import pyplot as plt
from tifffile import imread
from csbdeep.io import save_tiff_imagej_compatible
# A previously trained model is loaded by creating a new N2V-object without providing a 'config'.
model_name = 'n2v_2D_SEM'
basedir = 'models'
model = N2V(config=None, name=model_name, basedir=basedir)
input_train = imread('data/train.tif')
input_val = imread('data/validation.tif')
pred_train = model.predict(input_train, axes='YX', n_tiles=(2,1))
pred_val = model.predict(input_val, axes='YX')
#plt.figure(figsize=(16,8))
#plt.subplot(1,2,1)
#plt.imshow(input_train[:1500:,:1500],cmap="magma")
#plt.title('Input');
#plt.subplot(1,2,2)
#plt.imshow(pred_train[:1500,:1500],cmap="magma")
#plt.title('Prediction')
#plt.show()
# Let's look at the results
#plt.figure(figsize=(16,8))
#plt.subplot(1,2,1)
#plt.imshow(input_val,cmap="magma")
#plt.title('Input')
#plt.subplot(1,2,2)
#plt.imshow(pred_val,cmap="magma")
#plt.title('Prediction')
#plt.show()
save_tiff_imagej_compatible('models/n2v_2D_SEM/pred_train.tif', pred_train, axes='YX')
save_tiff_imagej_compatible('models/n2v_2D_SEM/pred_validation.tif', pred_val, axes='YX')
|
{"hexsha": "bec07627ec0e009effde3456e41ef98b870b28f0", "size": 1254, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/functional/test_prediction2D_SEM.py", "max_stars_repo_name": "trasse/n2v", "max_stars_repo_head_hexsha": "5d08eeb694fd1a795c028789c560dd13e90052aa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-07T06:20:51.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-07T06:20:51.000Z", "max_issues_repo_path": "tests/functional/test_prediction2D_SEM.py", "max_issues_repo_name": "GenevieveBuckley/n2v", "max_issues_repo_head_hexsha": "af5c60fdd0f41253bff1a1ad4e07306ef7c39726", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/functional/test_prediction2D_SEM.py", "max_forks_repo_name": "GenevieveBuckley/n2v", "max_forks_repo_head_hexsha": "af5c60fdd0f41253bff1a1ad4e07306ef7c39726", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-20T16:51:02.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-20T16:51:02.000Z", "avg_line_length": 31.35, "max_line_length": 98, "alphanum_fraction": 0.7503987241, "include": true, "reason": "import numpy", "num_tokens": 374}
|
// All content Copyright (C) 2018 Genomics plc
#define BOOST_TEST_DYN_LINK
#include <boost/test/unit_test.hpp>
#include "io/read.hpp"
#include "io/readRange.hpp"
#include "io/readDataSet.hpp"
#include "alignment/cigar.hpp"
#include "alignment/cigarItems.hpp"
#include "common.hpp"
using wecall::io::Read;
using wecall::alignment::Cigar;
using wecall::utils::ReferenceSequence;
using wecall::utils::BasePairSequence;
using wecall::utils::QualitySequence;
using wecall::caller::Region;
using wecall::variant::Variant;
using wecall::variant::Breakpoint;
using wecall::io::ReadDataset;
std::shared_ptr< Read > constructHighQualMatchReadWithLength( size_t seqLength, int64_t startPos )
{
std::string seq( seqLength, 'T' );
std::string qual( seqLength, 60 );
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >(
Region( "1", startPos - 1, startPos + seqLength + 1 ), std::string( seqLength + 2, 'A' ) );
return std::make_shared< Read >( seq, qual, "testId", Cigar( std::to_string( seqLength ) + "M" ), 0, startPos, 0, 0,
0, 0, 0, refSequence );
}
BOOST_AUTO_TEST_CASE( testReadsStartEndComputationForEmptyList )
{
ReadDataset readDataset( {""}, Region( "1", 0, 10 ) );
const auto startEnd = wecall::io::readsAlignedStartEnd( readDataset.getAllReads( 0 ).at( "" ) );
BOOST_CHECK_EQUAL( startEnd.first, wecall::alignment::noPos );
BOOST_CHECK_EQUAL( startEnd.second, wecall::alignment::noPos );
}
BOOST_AUTO_TEST_CASE( testReadsStartEndComputation )
{
ReadDataset readDataset( {""}, Region( "1", 0, 10 ) );
auto refSequenceLen = 100;
auto refSequence = std::make_shared< ReferenceSequence >( Region( "1", -10, refSequenceLen - 10 ),
std::string( refSequenceLen, 'A' ) );
readDataset.insertRead( "", std::make_shared< Read >( std::string( 8, 'A' ), std::string( 8, 'Q' ), "1",
Cigar( "8M" ), 0, 3, 0, 0, 0, 0, 0, refSequence ) );
readDataset.insertRead( "", std::make_shared< Read >( std::string( 11, 'A' ), std::string( 11, 'Q' ), "2",
Cigar( "11M" ), 0, 0, 0, 0, 0, 0, 0, refSequence ) );
readDataset.insertRead( "", std::make_shared< Read >( std::string( 6, 'A' ), std::string( 6, 'Q' ), "3",
Cigar( "6M" ), 0, 4, 0, 0, 0, 0, 0, refSequence ) );
const auto startEnd = wecall::io::readsAlignedStartEnd( readDataset.getAllReads( 0 ).at( "" ) );
BOOST_CHECK_EQUAL( startEnd.first, 0 );
BOOST_CHECK_EQUAL( startEnd.second, 11 );
}
BOOST_AUTO_TEST_CASE( testMaxAlignedReadsLengthComputationEmptyTrees )
{
ReadDataset readDataset( {"A", "B", "C", "D"}, Region( "1", 0, 10 ) );
const auto reads = readDataset.getAllReads( 0 );
BOOST_CHECK_EQUAL( wecall::io::perSampleMaxAlignedReadLength( reads ), 0 );
BOOST_CHECK_EQUAL( wecall::io::perSampleMaxReadCigarLength( reads ), 0 );
}
BOOST_AUTO_TEST_CASE( testMaxAlignedReadsLengthComputation )
{
ReadDataset readDataset( {"A", "B", "C", "D"}, Region( "1", 0, 100 ) );
readDataset.insertRead( "A", constructHighQualMatchReadWithLength( 8, 0 ) );
readDataset.insertRead( "B", constructHighQualMatchReadWithLength( 11, 0 ) );
readDataset.insertRead( "B", constructHighQualMatchReadWithLength( 6, 4 ) );
readDataset.insertRead( "C", constructHighQualMatchReadWithLength( 80, 20 ) );
readDataset.insertRead( "C", constructHighQualMatchReadWithLength( 82, 17 ) );
const auto reads = readDataset.getAllReads( 0 );
BOOST_CHECK_EQUAL( wecall::io::perSampleMaxAlignedReadLength( reads ), 99 - 17 );
BOOST_CHECK_EQUAL( wecall::io::perSampleMaxReadCigarLength( reads ), 99 - 17 );
}
BOOST_AUTO_TEST_CASE( testMaxReadsLengthComputationEmptyTrees )
{
ReadDataset readDataset( {"A", "B", "C", "D"}, Region( "1", 0, 10 ) );
const auto reads = readDataset.getAllReads( 0 );
BOOST_CHECK_EQUAL( wecall::io::perSampleMaxReadLength( reads ), 0 );
BOOST_CHECK_EQUAL( wecall::io::perSampleMaxReadCigarLength( reads ), 0 );
}
BOOST_AUTO_TEST_CASE( testMaxReadsLengthComputation )
{
ReadDataset readDataset( {"A", "B", "C", "D"}, Region( "1", 0, 100 ) );
readDataset.insertRead( "A", constructHighQualMatchReadWithLength( 8, 0 ) );
readDataset.insertRead( "B", constructHighQualMatchReadWithLength( 11, 0 ) );
readDataset.insertRead( "B", constructHighQualMatchReadWithLength( 6, 4 ) );
readDataset.insertRead( "C", constructHighQualMatchReadWithLength( 80, 20 ) );
readDataset.insertRead( "C", constructHighQualMatchReadWithLength( 82, 17 ) );
const auto reads = readDataset.getAllReads( 0 );
BOOST_CHECK_EQUAL( wecall::io::perSampleMaxReadLength( reads ), 99 - 17 );
BOOST_CHECK_EQUAL( wecall::io::perSampleMaxReadCigarLength( reads ), 99 - 17 );
}
BOOST_AUTO_TEST_CASE( testGetReferencePositionsSimpleCase )
{
auto refSequence =
std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", 0, 10 ), std::string( 10, 'A' ) );
Read read( "WHY", "NOT", "test", Cigar( "3M" ), 0, 0, BAM_FPROPER_PAIR, 100, 200, 0, 200, refSequence );
auto positions = read.getReferencePositions();
auto expected_positions = {0, 1, 2};
BOOST_CHECK_EQUAL_COLLECTIONS( positions.begin(), positions.end(), expected_positions.begin(),
expected_positions.end() );
}
BOOST_AUTO_TEST_CASE( testGetReferencePositionsFromRead )
{
auto refSequence =
std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", 0, 10 ), std::string( 10, 'A' ) );
Read read( "WHY", "NOT", "test", Cigar( "1M10D1I1M" ), 0, 0, BAM_FPROPER_PAIR, 100, 200, 0, 200, refSequence );
auto positions = read.getReferencePositions();
auto expected_positions = {0, -1, 11};
BOOST_CHECK_EQUAL_COLLECTIONS( positions.begin(), positions.end(), expected_positions.begin(),
expected_positions.end() );
}
BOOST_AUTO_TEST_CASE( shouldtrimReadOfShortFragmentAtEnd )
{
std::size_t length = 10;
std::string seq( length, 'A' );
std::string qual( length, 'Q' );
std::string strCig = "10M";
int64_t startPos = 0;
int64_t flag = BAM_FPROPER_PAIR;
int64_t insertSize = 9;
auto refSequence =
std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", -10, 10 ), std::string( 20, 'A' ) );
Read read( seq, qual, "Fwd", Cigar( strCig ), 0, startPos, flag, 100, insertSize, 0, 200, refSequence );
read.trimReadOfShortFragment();
auto lengthToTrim = length - int64_to_sizet( insertSize );
std::string expectedQualities =
qual.substr( 0, qual.size() - lengthToTrim ) + std::string( lengthToTrim, constants::minAllowedQualityScore );
BOOST_CHECK_EQUAL( read.getQualities(), expectedQualities );
}
BOOST_AUTO_TEST_CASE( shouldNotTrimReadIfInsertSizeIsGreaterThanReadLength )
{
std::size_t length = 10;
std::string seq( length, 'A' );
std::string qual( length, 'Q' );
std::string strCig = "10M";
int64_t startPos = 0;
int64_t flag = BAM_FPROPER_PAIR + BAM_FREVERSE;
int64_t insertSize = length + 100000000;
int64_t mappingQuality = 100;
int64_t mateStartPos = startPos + insertSize - length;
auto refSequence =
std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", 0, 10 ), std::string( 10, 'A' ) );
Read read( seq, qual, "Rev", Cigar( strCig ), 0, startPos, flag, mappingQuality, insertSize, 0, mateStartPos,
refSequence );
read.trimReadOfShortFragment();
std::string expectedQualities = qual;
BOOST_CHECK_EQUAL( read.getQualities(), expectedQualities );
}
BOOST_AUTO_TEST_CASE( shouldtrimReadOfShortFragmentReverseReadAtStart )
{
std::size_t length = 10;
std::string seq( length, 'A' );
std::string qual( length, 'Q' );
std::string strCig = "10M";
int64_t mappingQuality = 100;
int64_t startPos = 0;
int64_t insertSize = 9;
int64_t mateStartPos = startPos + insertSize - length;
int64_t flag = BAM_FPROPER_PAIR + BAM_FREVERSE;
auto refSequence =
std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", 0, 10 ), std::string( 10, 'A' ) );
Read read( seq, qual, "Rev", Cigar( strCig ), 0, startPos, flag, mappingQuality, insertSize, 0, mateStartPos,
refSequence );
read.trimReadOfShortFragment();
auto lengthToTrim = length - int64_to_sizet( insertSize );
std::string expectedQualities = std::string( lengthToTrim, constants::minAllowedQualityScore ) +
qual.substr( lengthToTrim, qual.size() - lengthToTrim );
BOOST_CHECK_EQUAL( read.getQualities(), expectedQualities );
}
BOOST_AUTO_TEST_CASE( shouldTrimOverlapOfForwardRead2 )
{
std::size_t length = 10;
std::string seq( length, 'A' );
std::string qual( length, 'Q' );
std::string strCig = "10M";
int64_t startPos = 0;
int64_t flag = BAM_FPROPER_PAIR;
std::size_t overlapLength = 1;
int64_t insertSize = 2 * static_cast< int64_t >( length ) - static_cast< int64_t >( overlapLength );
auto refSequence =
std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", 0, 10 ), std::string( 10, 'A' ) );
Read read( seq, qual, "Fwd", Cigar( strCig ), 0, startPos, flag, 100, insertSize, 0, 200, refSequence );
read.trimOverlap();
std::string expectedQualities =
qual.substr( 0, qual.size() - overlapLength ) + std::string( overlapLength, constants::minAllowedQualityScore );
BOOST_CHECK_EQUAL( read.getQualities(), expectedQualities );
}
BOOST_AUTO_TEST_CASE( shouldNotTrimOverlapOfForwardRead1 )
{
std::size_t length = 10;
std::string seq( length, 'A' );
std::string qual( length, 'Q' );
std::string strCig = "10M";
int64_t startPos = 0;
int64_t flag = BAM_FPROPER_PAIR + BAM_FREAD1;
std::size_t overlapLength = 1;
int64_t insertSize = 2 * static_cast< int64_t >( length ) - static_cast< int64_t >( overlapLength );
auto refSequence =
std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", 0, 10 ), std::string( 10, 'A' ) );
Read read( seq, qual, "Fwd", Cigar( strCig ), 0, startPos, flag, 100, insertSize, 0, 200, refSequence );
read.trimOverlap();
std::string expectedQualities = qual;
BOOST_CHECK_EQUAL( read.getQualities(), expectedQualities );
}
BOOST_AUTO_TEST_CASE( shouldTrimOverlapOfReverseRead2 )
{
std::size_t length = 10;
std::string seq( length, 'A' );
std::string qual( length, 'Q' );
std::string strCig = "10M";
int64_t startPos = 0;
int64_t flag = BAM_FPROPER_PAIR + BAM_FREVERSE;
std::size_t overlapLength = 1;
int64_t insertSize = 2 * static_cast< int64_t >( length ) - static_cast< int64_t >( overlapLength );
auto refSequence =
std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", 0, 10 ), std::string( 10, 'A' ) );
Read read( seq, qual, "Rev", Cigar( strCig ), 0, startPos, flag, 100, insertSize, 0, 200, refSequence );
read.trimOverlap();
std::string expectedQualities = std::string( overlapLength, constants::minAllowedQualityScore ) +
qual.substr( overlapLength, qual.size() - overlapLength );
BOOST_CHECK_EQUAL( read.getQualities(), expectedQualities );
}
BOOST_AUTO_TEST_CASE( shouldGetWholeReadSpanIfIntervalMatchesReadSpanInRef )
{
Cigar cigar( "2M2I2M2D2M" );
const auto startPos = 100L;
const std::string seq( 8, 'A' );
const std::string qual( 8, 'Q' );
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read testRead( seq, qual, "", cigar, 0, startPos, 0, 0, 0, 0, 0, refSequence );
BOOST_CHECK_EQUAL( testRead.getAlignedEndPos(), 108L );
wecall::utils::Interval inputRefInterval( startPos, 108L );
wecall::utils::Interval expectedResult( 0L, testRead.getLength() );
BOOST_CHECK_EQUAL( expectedResult, testRead.getIntervalInRead( inputRefInterval ) );
}
BOOST_AUTO_TEST_CASE( shouldGetWholeReadSpanIfIntervalContainsReadSpanInRef )
{
Cigar cigar( "2M2I2M2D2M" );
const auto startPos = 100L;
const std::string seq( 8, 'A' );
const std::string qual( 8, 'Q' );
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read testRead( seq, qual, "", cigar, 0, startPos, 0, 0, 0, 0, 0, refSequence );
wecall::utils::Interval inputRefInterval( startPos - 10, testRead.getAlignedEndPos() + 10 );
wecall::utils::Interval expectedResult( 0L, testRead.getLength() );
BOOST_CHECK_EQUAL( expectedResult, testRead.getIntervalInRead( inputRefInterval ) );
}
BOOST_AUTO_TEST_CASE( shouldGetStartOfReadSpanIfIntervalPreceedsReadSpanInRef )
{
Cigar cigar( "2M2I2M2D2M" );
const auto startPos = 100L;
const std::string seq( 8, 'A' );
const std::string qual( 8, 'Q' );
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read testRead( seq, qual, "", cigar, 0, startPos, 0, 0, 0, 0, 0, refSequence );
wecall::utils::Interval inputRefInterval( startPos - 10, startPos );
wecall::utils::Interval expectedResult( 0L, 0L );
BOOST_CHECK_EQUAL( expectedResult, testRead.getIntervalInRead( inputRefInterval ) );
}
BOOST_AUTO_TEST_CASE( shouldGetEndOfReadSpanIfIntervalFollowsReadSpanInRef )
{
Cigar cigar( "2M2I2M2D2M" );
const auto startPos = 100L;
const std::string seq( 8, 'A' );
const std::string qual( 8, 'Q' );
auto refSequence =
std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", 100, 110 ), std::string( 10, 'A' ) );
const Read testRead( seq, qual, "", cigar, 0, startPos, 0, 0, 0, 0, 0, refSequence );
wecall::utils::Interval inputRefInterval( testRead.getAlignedEndPos(), testRead.getAlignedEndPos() + 10 );
wecall::utils::Interval expectedResult( testRead.getLength(), testRead.getLength() );
BOOST_CHECK_EQUAL( expectedResult, testRead.getIntervalInRead( inputRefInterval ) );
}
BOOST_AUTO_TEST_CASE( shouldGetEmptyIntervalIfInputIntervalIsEmptyAndReadFlatAligned )
{
Cigar cigar( "2M2I2M2D2M" );
const auto startPos = 100L;
const std::string seq( 8, 'A' );
const std::string qual( 8, 'Q' );
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read testRead( seq, qual, "", cigar, 0, startPos, 0, 0, 0, 0, 0, refSequence );
wecall::utils::Interval inputRefInterval( startPos + 1L, startPos + 1L );
wecall::utils::Interval expectedResult( 1L, 1L );
BOOST_CHECK_EQUAL( expectedResult, testRead.getIntervalInRead( inputRefInterval ) );
}
BOOST_AUTO_TEST_CASE( shouldGetIntervalCorrespondingToInsertionInReadIfInputIntervalIsEmptyAtStartPos )
{
Cigar cigar( "2M2I2M2D2M" );
const auto startPos = 100L;
const std::string seq( 8, 'A' );
const std::string qual( 8, 'Q' );
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read testRead( seq, qual, "", cigar, 0, startPos, 0, 0, 0, 0, 0, refSequence );
wecall::utils::Interval inputRefInterval( startPos + 2L, startPos + 2L );
wecall::utils::Interval expectedResult( 2L, 4L );
BOOST_CHECK_EQUAL( expectedResult, testRead.getIntervalInRead( inputRefInterval ) );
}
BOOST_AUTO_TEST_CASE( shouldGetMatchingIntervalIfInputIntervalIsFlatAligned )
{
Cigar cigar( "2M2I2M2D2M" );
const auto startPos = 100L;
const std::string seq( 8, 'A' );
const std::string qual( 8, 'Q' );
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read testRead( seq, qual, "", cigar, 0, startPos, 0, 0, 0, 0, 0, refSequence );
wecall::utils::Interval inputRefInterval( startPos + 3L, startPos + 4L );
wecall::utils::Interval expectedResult( 5L, 6L );
BOOST_CHECK_EQUAL( expectedResult, testRead.getIntervalInRead( inputRefInterval ) );
}
BOOST_AUTO_TEST_CASE( shouldGetEmptyIntervalForEmptyRefIntervalCorrespondingToDeletion )
{
Cigar cigar( "2M2I2M2D2M" );
const auto startPos = 100L;
const std::string seq( 8, 'A' );
const std::string qual( 8, 'Q' );
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read testRead( seq, qual, "", cigar, 0, startPos, 0, 0, 0, 0, 0, refSequence );
wecall::utils::Interval inputRefInterval( startPos + 5L, startPos + 5L );
wecall::utils::Interval expectedResult( 6L, 6L );
BOOST_CHECK_EQUAL( expectedResult, testRead.getIntervalInRead( inputRefInterval ) );
}
BOOST_AUTO_TEST_CASE( shouldGetEmptyIntervalForNonEmptyRefIntervalCorrespondingToDeletion )
{
Cigar cigar( "2M2I2M2D2M" );
const auto startPos = 100L;
const std::string seq( 8, 'A' );
const std::string qual( 8, 'Q' );
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read testRead( seq, qual, "", cigar, 0, startPos, 0, 0, 0, 0, 0, refSequence );
wecall::utils::Interval inputRefInterval( startPos + 4L, startPos + 6L );
wecall::utils::Interval expectedResult( 6L, 6L );
BOOST_CHECK_EQUAL( expectedResult, testRead.getIntervalInRead( inputRefInterval ) );
}
// indels at edges of reads
BOOST_AUTO_TEST_CASE( shouldGetInsertionAtStartOfReadForNonOverlappingInterval )
{
Cigar cigar( "4I4M" );
const auto startPos = 100L;
const std::string seq( 8, 'A' );
const std::string qual( 8, 'Q' );
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read testRead( seq, qual, "", cigar, 0, startPos, 0, 0, 0, 0, 0, refSequence );
wecall::utils::Interval inputRefInterval( startPos + 0L, startPos + 0L );
wecall::utils::Interval expectedResult( 0L, 4L );
BOOST_CHECK_EQUAL( expectedResult, testRead.getIntervalInRead( inputRefInterval ) );
}
BOOST_AUTO_TEST_CASE( shouldIgnoreDeletionAtStartOfReadForNonOverlappingInterval )
{
Cigar cigar( "4D4M" );
const auto startPos = 100L;
const std::string seq( 4, 'A' );
const std::string qual( 4, 'Q' );
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read testRead( seq, qual, "", cigar, 0, startPos, 0, 0, 0, 0, 0, refSequence );
wecall::utils::Interval inputRefInterval( startPos + 0L, startPos + 0L );
wecall::utils::Interval expectedResult( 0L, 0L );
BOOST_CHECK_EQUAL( expectedResult, testRead.getIntervalInRead( inputRefInterval ) );
}
BOOST_AUTO_TEST_CASE( shouldGetInsertionAtEndOfReadForNonOverlappingInterval )
{
Cigar cigar( "4M4I" );
const auto startPos = 100L;
const std::string seq( 8, 'A' );
const std::string qual( 8, 'Q' );
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read testRead( seq, qual, "", cigar, 0, startPos, 0, 0, 0, 0, 0, refSequence );
wecall::utils::Interval inputRefInterval( testRead.getAlignedEndPos(), testRead.getAlignedEndPos() );
wecall::utils::Interval expectedResult( 4L, 8L );
BOOST_CHECK_EQUAL( expectedResult, testRead.getIntervalInRead( inputRefInterval ) );
}
BOOST_AUTO_TEST_CASE( shouldIgnoreDeletionAtEndOfReadForNonOverlappingInterval )
{
Cigar cigar( "4M4D" );
const auto startPos = 100L;
const std::string seq( 4, 'A' );
const std::string qual( 4, 'Q' );
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read testRead( seq, qual, "", cigar, 0, startPos, 0, 0, 0, 0, 0, refSequence );
wecall::utils::Interval inputRefInterval( testRead.getAlignedEndPos(), testRead.getAlignedEndPos() );
wecall::utils::Interval expectedResult( 4L, 4L );
BOOST_CHECK_EQUAL( expectedResult, testRead.getIntervalInRead( inputRefInterval ) );
}
// intervals adjacent to indels
BOOST_AUTO_TEST_CASE( shouldGetInsertionOnLeftOfQueryInterval )
{
Cigar cigar( "1M4I3M" );
const auto startPos = 100L;
const std::string seq( 8, 'A' );
const std::string qual( 8, 'Q' );
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read testRead( seq, qual, "", cigar, 0, startPos, 0, 0, 0, 0, 0, refSequence );
wecall::utils::Interval inputRefInterval( startPos + 1L, startPos + 2L );
wecall::utils::Interval expectedResult( 1L, 6L );
BOOST_CHECK_EQUAL( expectedResult, testRead.getIntervalInRead( inputRefInterval ) );
}
BOOST_AUTO_TEST_CASE( shouldSkipDeletionOnLeftOfQueryInterval )
{
Cigar cigar( "1M4D3M" );
const auto startPos = 100L;
const std::string seq( 4, 'A' );
const std::string qual( 4, 'Q' );
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read testRead( seq, qual, "", cigar, 0, startPos, 0, 0, 0, 0, 0, refSequence );
wecall::utils::Interval inputRefInterval( startPos + 5L, startPos + 6L );
wecall::utils::Interval expectedResult( 1L, 2L );
BOOST_CHECK_EQUAL( expectedResult, testRead.getIntervalInRead( inputRefInterval ) );
}
BOOST_AUTO_TEST_CASE( shouldGetInsertionOnRightOfQueryInterval )
{
Cigar cigar( "3M4I1M" );
const auto startPos = 100L;
const std::string seq( 8, 'A' );
const std::string qual( 8, 'Q' );
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read testRead( seq, qual, "", cigar, 0, startPos, 0, 0, 0, 0, 0, refSequence );
wecall::utils::Interval inputRefInterval( startPos + 2L, startPos + 3L );
wecall::utils::Interval expectedResult( 2L, 7L );
BOOST_CHECK_EQUAL( expectedResult, testRead.getIntervalInRead( inputRefInterval ) );
}
BOOST_AUTO_TEST_CASE( shouldSkipDeletionOnRightOfQueryInterval )
{
Cigar cigar( "3M4D1M" );
const auto startPos = 100L;
const std::string seq( 4, 'A' );
const std::string qual( 4, 'Q' );
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read testRead( seq, qual, "", cigar, 0, startPos, 0, 0, 0, 0, 0, refSequence );
wecall::utils::Interval inputRefInterval( startPos + 2L, startPos + 3L );
wecall::utils::Interval expectedResult( 2L, 3L );
BOOST_CHECK_EQUAL( expectedResult, testRead.getIntervalInRead( inputRefInterval ) );
}
BOOST_AUTO_TEST_CASE( shouldRetreiveSNPsFromRead )
{
const auto referenceSequence = std::make_shared< ReferenceSequence >( Region( "1", 0, 5 ), "AAAAA" );
const Read read( BasePairSequence( "TACG" ), QualitySequence( "QQQQ" ), "", Cigar( "4M" ), 0, 1, 0, 0, 0, 0, 0,
referenceSequence );
const auto variants = read.getVariants();
BOOST_REQUIRE_EQUAL( variants.size(), 3 );
BOOST_CHECK_EQUAL( *variants[0], Variant( referenceSequence, Region( "1", 1, 2 ), "T" ) );
BOOST_CHECK_EQUAL( *variants[1], Variant( referenceSequence, Region( "1", 3, 4 ), "C" ) );
BOOST_CHECK_EQUAL( *variants[2], Variant( referenceSequence, Region( "1", 4, 5 ), "G" ) );
}
BOOST_AUTO_TEST_CASE( shouldRetrieveDeletionFromReadStart )
{
bool skip = true;
if ( skip )
{
return;
}
const auto referenceSequence = std::make_shared< ReferenceSequence >( Region( "1", 0, 10 ), "AAAAAAAAAA" );
const auto startPos = 1;
const Read read( BasePairSequence( "A" ), QualitySequence( {10} ), "", Cigar( "4D1M" ), 0, startPos, 0, 0, 0, 0, 0,
referenceSequence );
const auto variants = read.getVariants();
BOOST_REQUIRE_EQUAL( variants.size(), 1 );
BOOST_CHECK_EQUAL( *variants[0], Variant( referenceSequence, Region( "1", 1, 5 ), "" ) );
}
BOOST_AUTO_TEST_CASE( shouldRetrieveDeletionFromReadEnd )
{
bool skip = true;
if ( skip )
{
return;
}
const auto referenceSequence = std::make_shared< ReferenceSequence >( Region( "1", 0, 10 ), "AAAAAAAAAA" );
const auto startPos = 1;
const Read read( BasePairSequence( "A" ), QualitySequence( {10} ), "", Cigar( "1M4D" ), 0, startPos, 0, 0, 0, 0, 0,
referenceSequence );
const auto variants = read.getVariants();
BOOST_REQUIRE_EQUAL( variants.size(), 1 );
BOOST_CHECK_EQUAL( *variants[0], Variant( referenceSequence, Region( "1", 2, 6 ), "" ) );
}
BOOST_AUTO_TEST_CASE( shouldRetrieveDeletionFromReadMiddle )
{
const auto referenceSequence = std::make_shared< ReferenceSequence >( Region( "1", 0, 10 ), "AAAAAAAAAA" );
const auto start = 1;
const Read read( BasePairSequence( "AA" ), QualitySequence( {10, 10} ), "", Cigar( "1M4D1M" ), 0, start, 0, 0, 0, 0,
0, referenceSequence );
const auto variants = read.getVariants();
BOOST_REQUIRE_EQUAL( variants.size(), 1 );
BOOST_CHECK_EQUAL( *variants[0], Variant( referenceSequence, Region( "1", 2, 6 ), "" ) );
}
BOOST_AUTO_TEST_CASE( shouldRetrieveInsertionFromReadStart )
{
bool skip = true;
if ( skip )
{
return;
}
const auto referenceSequence = std::make_shared< ReferenceSequence >( Region( "1", 0, 10 ), "AAAAAAAAAA" );
const auto startPos = 1;
const Read read( BasePairSequence( "AAAAA" ), QualitySequence( 5, 10 ), "", Cigar( "4I1M" ), 0, startPos, 0, 0, 0,
0, 0, referenceSequence );
const auto variants = read.getVariants();
BOOST_REQUIRE_EQUAL( variants.size(), 1 );
BOOST_CHECK_EQUAL( *variants[0], Variant( referenceSequence, Region( "1", 1, 1 ), "AAAA" ) );
}
BOOST_AUTO_TEST_CASE( shouldRetrieveInsertionFromReadEnd )
{
bool skip = true;
if ( skip )
{
return;
}
const auto referenceSequence = std::make_shared< ReferenceSequence >( Region( "1", 0, 10 ), "AAAAAAAAAA" );
const auto startPos = 1;
const Read read( BasePairSequence( "AAAAA" ), QualitySequence( 5, 10 ), "", Cigar( "1M4I" ), 0, startPos, 0, 0, 0,
0, 0, referenceSequence );
const auto variants = read.getVariants();
BOOST_REQUIRE_EQUAL( variants.size(), 1 );
BOOST_CHECK_EQUAL( *variants[0], Variant( referenceSequence, Region( "1", 2, 2 ), "AAAA" ) );
}
BOOST_AUTO_TEST_CASE( needsTwoCigarItemsToGetBreakpoints )
{
const auto startPos = 1;
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read read( BasePairSequence( "AAAAA" ), QualitySequence( 5, 10 ), "", Cigar( "5S" ), 0, startPos, 0, 0, 0, 0,
0, refSequence );
const auto breakpoints = read.getBreakpoints();
BOOST_REQUIRE_EQUAL( breakpoints.size(), 0 );
}
BOOST_AUTO_TEST_CASE( shouldRetrieveBreakpointFromReadStart )
{
const auto startPos = 1;
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read read( BasePairSequence( "AAAAA" ), QualitySequence( 5, 10 ), "", Cigar( "4S1M" ), 0, startPos, 0, 0, 0,
0, 0, refSequence );
const auto breakpoints = read.getBreakpoints();
BOOST_REQUIRE_EQUAL( breakpoints.size(), 1 );
BOOST_CHECK_EQUAL( *breakpoints[0], Breakpoint( "1", startPos, false, "AAAA" ) );
}
BOOST_AUTO_TEST_CASE( shouldRetrieveBreakpointFromReadEnd )
{
const auto startPos = 1;
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read read( BasePairSequence( "AAAAA" ), QualitySequence( 5, 10 ), "", Cigar( "1M4S" ), 0, startPos, 0, 0, 0,
0, 0, refSequence );
const auto breakpoints = read.getBreakpoints();
BOOST_REQUIRE_EQUAL( breakpoints.size(), 1 );
BOOST_CHECK_EQUAL( *breakpoints[0], Breakpoint( "1", startPos + 1, true, "AAAA" ) );
}
BOOST_AUTO_TEST_CASE( testStoringOfMateRegion )
{
const int32_t tid = 0;
const int64_t startPos = 0;
const int64_t flag = BAM_FPROPER_PAIR;
int64_t mateStartPos = 200;
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const Read read( BasePairSequence( "AAAAA" ), QualitySequence( 5, 10 ), "", Cigar( "1M4S" ), tid, startPos, flag, 0,
0, tid, mateStartPos, refSequence );
const auto mateRegions = read.getBreakpoints()[0]->mateRegions();
// Currently padding by 150 to left + using length of read as proxy for length of mate.
std::set< Region > expected = {Region( "1", read.getMateIntervalInRef() )};
BOOST_CHECK_EQUAL_COLLECTIONS( mateRegions.cbegin(), mateRegions.cend(), expected.cbegin(), expected.cend() );
}
BOOST_AUTO_TEST_CASE( testStoringOfMateRegionGetsNoubtIfMateUnmapped )
{
const int32_t tid = 0;
const int64_t startPos = 0;
const int64_t flag = BAM_FPROPER_PAIR + BAM_FMUNMAP;
int64_t mateStartPos = 200;
auto refSequence = std::make_shared< wecall::utils::ReferenceSequence >( Region( "1", startPos - 1, startPos + 9 ),
std::string( 10, 'A' ) );
const auto mateRegions = Read( BasePairSequence( "AAAAA" ), QualitySequence( 5, 10 ), "", Cigar( "1M4S" ), tid,
startPos, flag, 0, 0, tid, mateStartPos, refSequence )
.getBreakpoints()[0]
->mateRegions();
std::set< Region > expected = {};
BOOST_CHECK_EQUAL_COLLECTIONS( mateRegions.cbegin(), mateRegions.cend(), expected.cbegin(), expected.cend() );
}
BOOST_AUTO_TEST_CASE( shouldRetrieveInsertionFromReadMiddle )
{
const auto referenceSequence = std::make_shared< ReferenceSequence >( Region( "1", 0, 10 ), "AAAAAAAAAA" );
const auto start = 1;
const Read read( BasePairSequence( "AAAAAA" ), QualitySequence( 6, 10 ), "", Cigar( "1M4I1M" ), 0, start, 0, 0, 0,
0, 0, referenceSequence );
const auto variants = read.getVariants();
BOOST_REQUIRE_EQUAL( variants.size(), 1 );
BOOST_CHECK_EQUAL( *variants[0], Variant( referenceSequence, Region( "1", 2, 2 ), "AAAA" ) );
}
BOOST_AUTO_TEST_CASE( shouldRetrieveSNPsEitherSideOfInsertion )
{
const auto referenceSequence = std::make_shared< ReferenceSequence >( Region( "1", 0, 10 ), "AAAAAAAAAA" );
const auto start = 1;
const Read read( BasePairSequence( "TAAAAC" ), QualitySequence( 6, 10 ), "", Cigar( "1M4I1M" ), 0, start, 0, 0, 0,
0, 0, referenceSequence );
const auto variants = read.getVariants();
BOOST_REQUIRE_EQUAL( variants.size(), 3 );
BOOST_CHECK_EQUAL( *variants[0], Variant( referenceSequence, Region( "1", 1, 2 ), "T" ) );
BOOST_CHECK_EQUAL( *variants[1], Variant( referenceSequence, Region( "1", 2, 2 ), "AAAA" ) );
BOOST_CHECK_EQUAL( *variants[2], Variant( referenceSequence, Region( "1", 2, 3 ), "C" ) );
}
|
{"hexsha": "e763cb3cbac5b969cebb90d4b71f98c5f14208b6", "size": 33412, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "cpp/test/ioTest/io/testRead.cpp", "max_stars_repo_name": "dylex/wecall", "max_stars_repo_head_hexsha": "35d24cefa4fba549e737cd99329ae1b17dd0156b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8.0, "max_stars_repo_stars_event_min_datetime": "2018-10-08T15:47:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T07:13:05.000Z", "max_issues_repo_path": "cpp/test/ioTest/io/testRead.cpp", "max_issues_repo_name": "dylex/wecall", "max_issues_repo_head_hexsha": "35d24cefa4fba549e737cd99329ae1b17dd0156b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2018-11-05T09:16:27.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-09T12:32:56.000Z", "max_forks_repo_path": "cpp/test/ioTest/io/testRead.cpp", "max_forks_repo_name": "dylex/wecall", "max_forks_repo_head_hexsha": "35d24cefa4fba549e737cd99329ae1b17dd0156b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-09-03T15:46:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-04T07:28:33.000Z", "avg_line_length": 45.7698630137, "max_line_length": 120, "alphanum_fraction": 0.6299533102, "num_tokens": 9442}
|
[STATEMENT]
lemma splitting_lemma_left:
assumes ex: "exact_seq ([C,B,A], [g,f])" and f': "f' \<in> hom B A"
and inv: "(\<And>x. x \<in> carrier A \<Longrightarrow> f'(f x) = x)"
and injf: "inj_on f (carrier A)" and surj: "g ` carrier B = carrier C"
obtains H K where "H \<lhd> B" "K \<lhd> B" "H \<inter> K \<subseteq> {one B}" "set_mult B H K = carrier B"
"f \<in> iso A (subgroup_generated B H)" "g \<in> iso (subgroup_generated B K) C"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>H K. \<lbrakk>H \<lhd> B; K \<lhd> B; H \<inter> K \<subseteq> {\<one>\<^bsub>B\<^esub>}; H <#>\<^bsub>B\<^esub> K = carrier B; f \<in> Group.iso A (subgroup_generated B H); g \<in> Group.iso (subgroup_generated B K) C\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<And>H K. \<lbrakk>H \<lhd> B; K \<lhd> B; H \<inter> K \<subseteq> {\<one>\<^bsub>B\<^esub>}; H <#>\<^bsub>B\<^esub> K = carrier B; f \<in> Group.iso A (subgroup_generated B H); g \<in> Group.iso (subgroup_generated B K) C\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
interpret fAB: group_hom A B f
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. group_hom A B f
[PROOF STEP]
using ex
[PROOF STATE]
proof (prove)
using this:
exact_seq ([C, B, A], [g, f])
goal (1 subgoal):
1. group_hom A B f
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<And>H K. \<lbrakk>H \<lhd> B; K \<lhd> B; H \<inter> K \<subseteq> {\<one>\<^bsub>B\<^esub>}; H <#>\<^bsub>B\<^esub> K = carrier B; f \<in> Group.iso A (subgroup_generated B H); g \<in> Group.iso (subgroup_generated B K) C\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
interpret gBC: group_hom B C g
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. group_hom B C g
[PROOF STEP]
using ex
[PROOF STATE]
proof (prove)
using this:
exact_seq ([C, B, A], [g, f])
goal (1 subgoal):
1. group_hom B C g
[PROOF STEP]
by (simp add: group_hom_def group_hom_axioms_def)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<And>H K. \<lbrakk>H \<lhd> B; K \<lhd> B; H \<inter> K \<subseteq> {\<one>\<^bsub>B\<^esub>}; H <#>\<^bsub>B\<^esub> K = carrier B; f \<in> Group.iso A (subgroup_generated B H); g \<in> Group.iso (subgroup_generated B K) C\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
have "group A" "group B" "group C" and kerg: "kernel B C g = f ` carrier A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (Group.group A &&& Group.group B &&& Group.group C) &&& kernel B C g = f ` carrier A
[PROOF STEP]
using ex
[PROOF STATE]
proof (prove)
using this:
exact_seq ([C, B, A], [g, f])
goal (1 subgoal):
1. (Group.group A &&& Group.group B &&& Group.group C) &&& kernel B C g = f ` carrier A
[PROOF STEP]
by (auto simp: group_hom_def group_hom_axioms_def)
[PROOF STATE]
proof (state)
this:
Group.group A
Group.group B
Group.group C
kernel B C g = f ` carrier A
goal (1 subgoal):
1. (\<And>H K. \<lbrakk>H \<lhd> B; K \<lhd> B; H \<inter> K \<subseteq> {\<one>\<^bsub>B\<^esub>}; H <#>\<^bsub>B\<^esub> K = carrier B; f \<in> Group.iso A (subgroup_generated B H); g \<in> Group.iso (subgroup_generated B K) C\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
have iso: "f' \<circ> f \<in> Group.iso A A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f' \<circ> f \<in> Group.iso A A
[PROOF STEP]
using ex
[PROOF STATE]
proof (prove)
using this:
exact_seq ([C, B, A], [g, f])
goal (1 subgoal):
1. f' \<circ> f \<in> Group.iso A A
[PROOF STEP]
by (auto simp: inv intro: group.iso_eq [OF \<open>group A\<close> id_iso])
[PROOF STATE]
proof (state)
this:
f' \<circ> f \<in> Group.iso A A
goal (1 subgoal):
1. (\<And>H K. \<lbrakk>H \<lhd> B; K \<lhd> B; H \<inter> K \<subseteq> {\<one>\<^bsub>B\<^esub>}; H <#>\<^bsub>B\<^esub> K = carrier B; f \<in> Group.iso A (subgroup_generated B H); g \<in> Group.iso (subgroup_generated B K) C\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
show thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. thesis
[PROOF STEP]
by (metis that splitting_lemma_left_gen [OF ex f' iso injf surj])
[PROOF STATE]
proof (state)
this:
thesis
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1820, "file": null, "length": 16}
|
'''
Created on Apr 10, 2019
@author: chengzi
'''
import os,sys,glob,math
from PIL import Image
import numpy as np
from six.moves import cPickle as pickle
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from DepInvercs_model import DeepInverse
block_size =33;
dtype = torch.float32
def createDir(imgn,dirname,CS_ratio):
img_path = os.path.dirname(imgn)
img_path = os.path.abspath(os.path.join(img_path, "..")) + dirname
img_rec_path = "%s_rec_%s" % (img_path,CS_ratio)
isExists=os.path.exists(img_rec_path)
if not isExists:
os.makedirs(img_rec_path)
return img_rec_path
else:
return img_rec_path
def psnrISTA(img_rec, img_orig):
img_rec=img_rec.astype(np.float32)
img_orig=img_orig.astype(np.float32)
mse = np.mean((img_rec - img_orig) ** 2)
if mse == 0:
return 100
PIXEL_MAX = 255.0
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
def psnr(recovered, original):
recovered=recovered.astype(np.float32)
original=original.astype(np.float32)
recovered = torch.from_numpy(recovered)
original = torch.from_numpy(original)
mse = F.mse_loss(recovered, original)
if mse == 0:
return 100
psnr = 10 * np.log10(1 / mse.item())
return psnr
def RGBrec(model,csinput,device,img_orig, channels_Num, row_new, col_new):
# [row, col,channels_Num] = img_orig.shape
row = img_orig.shape[0]
col = img_orig.shape[1]
with torch.no_grad():
csinput = csinput.to(device=device, dtype=dtype)
img_recch = model(csinput)
print(img_recch.shape)
# Use Tensor.cpu() to copy the tensor to host memory first
img_recch = img_recch.cpu().numpy()
row_block = int(row_new/block_size)
col_block = int(col_new/block_size)
blocknum = int(row_block*col_block)
RGB_rec = []
rec_PSNR =0.0
img_x = np.zeros([row_new, col_new], dtype=np.float32)
for channel_no in range(channels_Num):
begblockid = blocknum*channel_no
endblockid = blocknum*(channel_no+1)
img_rec = img_recch[begblockid:endblockid,:,:,:]
count = 0
for xi in range(row_block):
for yj in range(col_block):
img_x[xi*block_size:(xi+1)*block_size, yj*block_size:(yj+1)*block_size] = img_rec[count,:,:,:]
count = count +1
imgarrf_x = img_x[:row, :col]
imgf_x = Image.fromarray(np.clip(imgarrf_x * 255, 0, 255).astype(np.uint8))
# imgf_x.show()
RGB_rec.append(imgf_x)
if channels_Num==3:
rec_PSNR = rec_PSNR + psnr(imgarrf_x, img_orig[:,:,channel_no])
# rec_PSNR = rec_PSNR + psnrISTA(imgarrf_x*255, img_orig[:,:,channel_no]*255)
else:
rec_PSNR = rec_PSNR + psnr(imgarrf_x, img_orig)
if channels_Num==3:
RGBimg_rec=Image.merge("RGB", (RGB_rec[0],RGB_rec[1],RGB_rec[2]))
rec_PSNR = rec_PSNR /3.0
elif channels_Num==1:
RGBimg_rec=RGB_rec[0]
rec_PSNR = rec_PSNR
# RGBimg_rec.show()
return RGBimg_rec,rec_PSNR
def PRWimgTensor(imgpath,phi):
img_rgb = Image.open(imgpath);
# plt.show(img_rgb)
img = np.array(img_rgb, dtype=np.uint8)
# img_bsize = sys.getsizeof(img);
img_bsize = img.nbytes;
print("orig_img size ",img_bsize, img.shape, img.dtype)
# [row, col, channels_Num] = img.shape
channels_Num = len(img_rgb.split())
row = img.shape[0]
col = img.shape[1]
if np.mod(row,block_size)==0:
row_pad=0
else:
row_pad = block_size-np.mod(row,block_size)
if np.mod(col,block_size)==0:
col_pad = 0
else:
col_pad = block_size-np.mod(col,block_size)
row_new = row + row_pad
col_new = col + col_pad
row_block = int(row_new/block_size)
col_block = int(col_new/block_size)
blocknum = int(row_block*col_block)
img_ycs = []
ysize = 0.0
for channel_no in range(channels_Num):
# print("channel no ====%d"%(channel_no))
if channels_Num==1:
imgorg=img[:,:]
else:
imgorg=img[:,:,channel_no]
Ipadc = np.concatenate((imgorg, np.zeros([row, col_pad],dtype=np.uint8)), axis=1)
Ipadc = np.concatenate((Ipadc, np.zeros([row_pad, col+col_pad],dtype=np.uint8)), axis=0)
Ipadc = Ipadc/255.0
# [row_new, col_new] = Ipadc.shape
img_x = np.zeros([blocknum, 1,block_size, block_size], dtype=np.float32)
count = 0
for xi in range(row_block):
for yj in range(col_block):
img_x[count] = Ipadc[xi*block_size:(xi+1)*block_size, yj*block_size:(yj+1)*block_size]
count = count +1
img_x = torch.from_numpy(img_x)
X = torch.empty(blocknum, 1, block_size, block_size, dtype=torch.float)
for i in range(X.shape[0]):
y = torch.mv(phi, img_x[i].view(-1) ) # Performs a matrix-vector product
# You cannot use sys.getsizeof(y) to get the correct memory size of the tensor y
ysize = ysize + sys.getsizeof(y.storage());
x_tilde = torch.mv(phi.transpose(0,1), y)
x_tilde = x_tilde.view(1, 1, block_size, block_size) # view as 1-channel 32x32 image
X[i] = x_tilde
img_ycs.append(X)
img_ycs22 = torch.cat(img_ycs)
return img/255.0, channels_Num, img_bsize, ysize, img_ycs22, row_new, col_new
def DeepInvertCS(filepaths,fname_phi,fname_sdict, CS_ratio):
# set up device
USE_GPU = True
if USE_GPU and torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
print('using device:', device)
# To load the model, use the following code:
with open(fname_phi, 'rb') as f:
phi = pickle.load(f)
print("Phi size = ",phi.shape)
model = DeepInverse(phi.shape)
model.load_state_dict(torch.load(fname_sdict))
model.eval()
model.to(device)
print('trained model loaded')
ImgNum = len(filepaths)
PSNR_All = np.zeros([ImgNum], dtype=np.float32)
MCRy = np.zeros([ImgNum], dtype=np.float32)
img_rec_path = createDir(filepaths[0],'/DVCS',(CS_ratio)[2:4]);
for img_no in range(ImgNum):
imgName = filepaths[img_no]
img_orig, channels_Num, img_bsize, ysize, img_ycs, row_new, col_new= PRWimgTensor(imgName,phi)
MCRy[img_no] = img_bsize/ysize
RGBimg_rec,rec_PSNR = RGBrec(model, img_ycs, device, img_orig, channels_Num, row_new, col_new)
PSNR_All[img_no] = rec_PSNR
print("Image %s, PSNR= %.6f, mCR= %0.3f" % (imgName, rec_PSNR, MCRy[img_no]))
img_name = os.path.split(imgName)[-1]
img_rec_name = "%s/%s" % (img_rec_path, img_name)
RGBimg_rec.save(img_rec_name)
# print("Rec_image save to",img_rec_name)
#-------------------------------------------------
print("-----------------------")
output_data = "CS_ratio= %.2f , AvgPSNR is %.2f dB, mCR is %.3f \n" % (float(CS_ratio), np.mean(PSNR_All), np.mean(MCRy))
print(output_data)
# plt.subplot(1,2,1)
# plt.imshow(img_orig); plt.title('original')
# plt.subplot(1,2,2)
# plt.imshow(RGBimg_rec); plt.title('restored')
# plt.show()
if __name__ == '__main__':
path_dataset = "/home/chengzi/Desktop/workspace20170624/DeepInvertCS/Test_Image"
filepaths = glob.glob(path_dataset + '/*.tif')
# path_dataset = "/media/chengzi/FT-dataset/PRW-v16.04.20/testprw"
# filepaths = glob.glob(path_dataset + '/*.jpg')
csrate = '0.01'
fname_sdict = "dvcs_91imgcs_%s_gray.pt" % (csrate)[2:4]
fname_phi = "dvcs_91imgcs_%s_gray-measurement.pickle" % (csrate)[2:4]
DeepInvertCS(filepaths,fname_phi,fname_sdict,csrate)
|
{"hexsha": "7ffc957048f3abf3b1a6b05997704ca20e66a396", "size": 8092, "ext": "py", "lang": "Python", "max_stars_repo_path": "testDVCSPRW33.py", "max_stars_repo_name": "TaihuLight/DeepInverse-Pytorch", "max_stars_repo_head_hexsha": "b0a8f0672f057ddceee6621d4b842672c26c8654", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-04-13T13:35:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-07T03:22:59.000Z", "max_issues_repo_path": "testDVCSPRW33.py", "max_issues_repo_name": "TaihuLight/DeepInverse-Pytorch", "max_issues_repo_head_hexsha": "b0a8f0672f057ddceee6621d4b842672c26c8654", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-01-29T03:08:31.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-29T03:08:31.000Z", "max_forks_repo_path": "testDVCSPRW33.py", "max_forks_repo_name": "TaihuLight/DeepInverse-Pytorch", "max_forks_repo_head_hexsha": "b0a8f0672f057ddceee6621d4b842672c26c8654", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-04-16T08:22:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-29T01:57:46.000Z", "avg_line_length": 34.1434599156, "max_line_length": 125, "alphanum_fraction": 0.6031883342, "include": true, "reason": "import numpy", "num_tokens": 2333}
|
import numpy as np
from glob import glob
from keras.models import Sequential, load_model
from keras.layers import InputLayer, GlobalMaxPool2D, Dense
# from dog_detector import dog_detector,
from dog_detector import path_to_tensor
# from human_detector import face_detector
from extract_bottleneck_features import extract_Resnet50
# load list of dog names
dog_names = [item[20:-1] for item in sorted(glob("../dogImages/train/*/"))]
# Load trained transfer learning top of ResNet50 model for dog classification
Resnet50_model = load_model('saved_models/resnet50')
def Resnet50_predict_breed(img_path):
'''
Functions takes a path to an image as input
and returns the dog breed that is predicted by the model.
'''
# extract bottleneck features
tensor = path_to_tensor(img_path)
# extract_Resnet50 represents ->
# ResNet50(weights='imagenet', include_top=False, pooling='max').predict(preprocess_input(tensor))
bottleneck_feature = extract_Resnet50(tensor)
#adapt dimensions from (1, 2048) to req'd input shape (1,1,1,2048)
bottleneck_feature = np.expand_dims(np.expand_dims(bottleneck_feature, axis=0), axis=0)
# obtain predicted vector
predicted_vector = Resnet50_model.predict(bottleneck_feature)
# return dog breed that is predicted by the model
return dog_names[np.argmax(predicted_vector)].split('.')[-1]
# def determine_dog_breed(img_path):
# """
# Function to determine whether the image contains a human, a dog,
# or neither and returns the dog breed or None
# Input:
# img_path = (str), path to image file
# Output:
# dog_breed = (str), detected dog breed
# """
# # human = face_detector(img_path)
# # dog = dog_detector(img_path)
# # if (human == True) or (dog == True):
# # dog_breed = Resnet50_predict_breed(img_path)
# # else:
# # dog_breed = None
# dog_breed = Resnet50_predict_breed(img_path)
# return dog_breed
|
{"hexsha": "f40e36625a1b8d9d307c961233dbb84f42786dd3", "size": 1976, "ext": "py", "lang": "Python", "max_stars_repo_path": "predict_dog.py", "max_stars_repo_name": "mhoenick/dogapp", "max_stars_repo_head_hexsha": "c084c84bf988a45ba41746f94d2e0680f4b5a433", "max_stars_repo_licenses": ["FTL", "CNRI-Python"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "predict_dog.py", "max_issues_repo_name": "mhoenick/dogapp", "max_issues_repo_head_hexsha": "c084c84bf988a45ba41746f94d2e0680f4b5a433", "max_issues_repo_licenses": ["FTL", "CNRI-Python"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "predict_dog.py", "max_forks_repo_name": "mhoenick/dogapp", "max_forks_repo_head_hexsha": "c084c84bf988a45ba41746f94d2e0680f4b5a433", "max_forks_repo_licenses": ["FTL", "CNRI-Python"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9272727273, "max_line_length": 102, "alphanum_fraction": 0.7150809717, "include": true, "reason": "import numpy", "num_tokens": 489}
|
import numpy as np
import tensorflow as tf
class VGG16Net(tf.keras.Model):
def __init__(self, num_classes=3):
super(VGG16Net, self).__init__()
# self.block_1 = VGGBlock(conv_layers=2, filters=64)
# self.block_2 = VGGBlock(conv_layers=2, filters=128)
# self.block_3 = VGGBlock(conv_layers=3, filters=256)
# self.block_4 = VGGBlock(conv_layers=3, filters=512)
# self.block_5 = VGGBlock(conv_layers=3, filters=512)
# self.flatten = tf.keras.layers.Flatten(input_shape=(7, 7, 512))
# self.dense_1 = tf.keras.layers.Dense(4096, activation='relu')
# self.dense_2 = tf.keras.layers.Dense(4096, activation='relu')
# self.dense_3 = tf.keras.layers.Dense(4096, activation='relu')
# self.classifier = tf.keras.layers.Dense(num_classes, activation='softmax')
self.block_1 = VGGBlock(conv_layers=1, filters=64)
self.block_2 = VGGBlock(conv_layers=1, filters=128)
self.block_3 = VGGBlock(conv_layers=1, filters=256)
self.block_4 = VGGBlock(conv_layers=1, filters=512)
self.block_5 = VGGBlock(conv_layers=2, filters=512)
self.flatten = tf.keras.layers.Flatten(input_shape=(7, 7, 512))
self.dense_1 = tf.keras.layers.Dense(496, activation='relu')
self.dense_2 = tf.keras.layers.Dense(496, activation='relu')
self.dense_3 = tf.keras.layers.Dense(496, activation='relu')
self.classifier = tf.keras.layers.Dense(num_classes, activation='softmax')
def call(self, inputs):
print('[+] inputs shape: ', inputs.shape)
x = self.block_1(inputs)
print('[+] self.block_1.shape shape: ', x.shape)
x = self.block_2(x)
print('[+] self.block_2 shape: ', x.shape)
x = self.block_3(x)
print('[+] self.block_3 shape: ', x.shape)
x = self.block_4(x)
print('[+] self.block_4 shape: ', x.shape)
x = self.block_5(x)
print('[+] self.block_5 shape: ', x.shape)
x = self.flatten(x)
print('[+] self.flatten shape: ', x.shape)
x = self.dense_1(x)
print('[+] self.dense_1 shape: ', x.shape)
x = self.dense_2(x)
print('[+] self.dense_2 shape: ', x.shape)
x = self.dense_3(x)
print('[+] self.dense_3 shape: ', x.shape)
x = self.classifier(x)
print('[+] self.classifier shape: ', tf.shape(x))
print('[+] self.classifier shape: ', x.shape)
return x
class VGGBlock(tf.keras.Model):
def __init__(self, conv_layers=2, kernel_size=3, filters=64):
super(VGGBlock, self).__init__(name='')
self.conv_layers = conv_layers
self.kernel_size = kernel_size
self.filters = filters
self.layer_id = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
for i in range(self.conv_layers):
# --- Option 1 ---
left_cls = ''.join(["self.conv2d", "_", str(self.kernel_size), "_", str(self.filters), "_", str(self.layer_id[i])])
right_cls = ''.join(["tf.keras.layers.Conv2D(", str(self.filters), ", ", "(", str(self.kernel_size), ", ", str(self.kernel_size), ")", ", ",
"activation='relu'", ", ", "padding='same'", ")"])
assignation = ''.join([left_cls, '=', right_cls])
# print('[**] assignation: ', assignation)
exec(assignation)
# print('[**] ', self.conv2d_3_64_a)
# print('[**] ', self.conv2d_3_64_b)
self.max_pool2d = tf.keras.layers.MaxPool2D((2, 2), strides=(2, 2), padding='valid')
def call(self, input_tensor, training=False):
# print('[**] ', self.conv2d_3_64_a)
# print('[**] ', self.conv2d_3_64_b)
for i in range(self.conv_layers):
# layer = ''.join(["self.conv2d", "_", str(self.kernel_size), "_", str(self.filters), "_", str(self.layer_id[i])])
# print('[**] layer: ', eval(layer))
if i == 0:
layer = ''.join(["self.conv2d", "_", str(self.kernel_size), "_", str(self.filters), "_", str(self.layer_id[i])])
# print('[**] layer: ', eval(layer))
x = eval(layer)(input_tensor)
else:
layer = ''.join(["self.conv2d", "_", str(self.kernel_size), "_", str(self.filters), "_", str(self.layer_id[i])])
# print('[**] layer: ', eval(layer))
x = eval(layer)(x)
x = self.max_pool2d(x)
return x
# class VGGBlock(tf.keras.Model):
# def __init__(self, conv_layers=2, kernel_size=3, filters=64):
# super(VGGBlock, self).__init__(name='')
# self.conv_layers = conv_layers
# self.kernel_size = kernel_size
# self.filters = filters
# self.layer_id = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
# for i in range(self.conv_layers):
# # --- Option 1 ---
# left_cls = ''.join(["self.conv2d", "_", str(self.kernel_size), "_", str(self.filters), "_", str(self.layer_id[i])])
# # right_cls = ''.join(["tf.keras.layers.Conv2D(", filters, ", ", "(", kernel_size, ", ", kernel_size, ")", ", ",
# # "activation='relu'", ")"])
# # assignation = ''.join([left_cls, '=', right_cls])
# # exec(assignation)
# # --- Option 2 ---
# globals()[left_cls] = tf.keras.layers.Conv2D(self.filters, self.kernel_size, activation='relu', padding='same')
# print('[**] ', globals()["self.conv2d_3_64_a"])
# print('[**] ', globals()["self.conv2d_3_64_b"])
# self.max_pool2d = tf.keras.layers.MaxPool2D((2, 2), strides=(2, 2), padding='valid')
# def call(self, input_tensor, training=False):
# print('[***] ', globals()["self.conv2d_3_64_a"])
# print('[***] ', globals()["self.conv2d_3_64_b"])
# for i in range(self.conv_layers):
# layer = ''.join(["self.conv2d", "_", str(self.kernel_size), "_", str(self.filters), "_", str(self.layer_id[i])])
# if i == 0:
# x = globals()[layer](input_tensor)
# else:
# x = globals()[layer](x)
# x = self.max_pool2d(x)
# return x
|
{"hexsha": "e83cf6410560fe4c1db5de9b08453b26f48ac12f", "size": 6200, "ext": "py", "lang": "Python", "max_stars_repo_path": "part4/networks/vgg.py", "max_stars_repo_name": "willogy-team/insights--tensorflow", "max_stars_repo_head_hexsha": "2d4885c99e7b550e94d679bed1f192f62f7e4139", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "part4/networks/vgg.py", "max_issues_repo_name": "willogy-team/insights--tensorflow", "max_issues_repo_head_hexsha": "2d4885c99e7b550e94d679bed1f192f62f7e4139", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "part4/networks/vgg.py", "max_forks_repo_name": "willogy-team/insights--tensorflow", "max_forks_repo_head_hexsha": "2d4885c99e7b550e94d679bed1f192f62f7e4139", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.6, "max_line_length": 152, "alphanum_fraction": 0.5516129032, "include": true, "reason": "import numpy", "num_tokens": 1664}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 11:19:32 2020
@author: luol2
"""
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
from nltk.stem.porter import PorterStemmer
import nltk
import numpy as np
import json
import copy
import sys
import argparse
import os
np.random.seed(123)
lemmatizer = WordNetLemmatizer()
stemmer = PorterStemmer()
def get_wordnet_pos(treebank_tag):
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R') or treebank_tag=='IN':
return wordnet.ADV
else:
return wordnet.NOUN
def train_pos(hpo_obo,hpo_vocab,outpath):
fout=open(outpath+'distant_train_pos.conll','w',encoding='utf-8')
for hpoid in hpo_vocab:
if hpoid=='HP:None':
continue
term_name=hpo_obo[hpoid]['name']
temp_out=[]
temp_out.append(hpoid+'\t'+term_name[0])
tokens = term_name[0].split(' ')
token_pos = nltk.pos_tag(tokens)
for token in token_pos:
lemma = lemmatizer.lemmatize(token[0], get_wordnet_pos(token[1]))
stem = stemmer.stem(token[0])
temp_out.append(token[0]+'\t'+lemma+'\t'+stem+'\t'+token[1]+'\tB\tB')
fout.write('\n'.join(temp_out)+'\n\n')
if term_name[0]!=term_name[1]:
temp_out=[]
temp_out.append(hpoid+'\t'+term_name[1])
tokens = term_name[1].split(' ')
token_pos = nltk.pos_tag(tokens)
for token in token_pos:
lemma = lemmatizer.lemmatize(token[0], get_wordnet_pos(token[1]))
stem = stemmer.stem(token[0])
temp_out.append(token[0]+'\t'+lemma+'\t'+stem+'\t'+token[1]+'\tB\tB')
fout.write('\n'.join(temp_out)+'\n\n')
term_synonyms= hpo_obo[hpoid]['synonym']
for term_name in term_synonyms:
temp_out=[]
temp_out.append(hpoid+'\t'+term_name[0])
tokens = term_name[0].split(' ')
token_pos = nltk.pos_tag(tokens)
for token in token_pos:
lemma = lemmatizer.lemmatize(token[0], get_wordnet_pos(token[1]))
stem = stemmer.stem(token[0])
temp_out.append(token[0]+'\t'+lemma+'\t'+stem+'\t'+token[1]+'\tB\tB')
fout.write('\n'.join(temp_out)+'\n\n')
if term_name[0]!=term_name[1]:
temp_out=[]
temp_out.append(hpoid+'\t'+term_name[1])
tokens = term_name[1].split(' ')
token_pos = nltk.pos_tag(tokens)
for token in token_pos:
lemma = lemmatizer.lemmatize(token[0], get_wordnet_pos(token[1]))
stem = stemmer.stem(token[0])
temp_out.append(token[0]+'\t'+lemma+'\t'+stem+'\t'+token[1]+'\tB\tB')
fout.write('\n'.join(temp_out)+'\n\n')
fout.close()
def pun_filter(temp_entity):
pun_list1=['.','!',';',':','?','(',')','[',']','{','}']
pun_list2=[',','-','/']
filter_flag=0
if (temp_entity[1].split('\t')[0] in pun_list2) or (temp_entity[-1].split('\t')[0] in pun_list2):
filter_flag=1
for ele in temp_entity[1:]:
token=ele.split('\t')[0]
if token in pun_list1:
filter_flag=1
break
return filter_flag
def pos_filter(temp_entity):
pos_list_l=['PRP']
pos_list=['IN','DT','CC','O','MD','EX','POS','WDT','WP','WP$','WRB','TO','PRP$']
verb_word=['is','are','was','were','had','have','has','be','been','also']
filter_flag=0
token_s=temp_entity[1].split('\t')[0]
token_e=temp_entity[-1].split('\t')[0]
pos_s=temp_entity[1].split('\t')[3]
pos_e=temp_entity[-1].split('\t')[3]
if (token_s in verb_word) or (token_e in verb_word):
filter_flag=1
if (pos_s in pos_list) or (pos_e in pos_list) or (pos_s in pos_list_l):
filter_flag=1
return filter_flag
def train_neg(negfile,num,hpo_dic,outpath):
fin=open(negfile,'r',encoding='utf-8',errors='replace')
fout=open(outpath+'distant_train_neg.conll','w',encoding='utf-8')
all_text = fin.read().split()
fin.close()
indecies = np.random.choice(len(all_text), num*100)
lengths = np.random.randint(1, 10, num*100)
neg_num=0
i=0
while(neg_num<num):
negative_text=' '.join(all_text[indecies[i]:indecies[i]+lengths[i]])
tokens = word_tokenize(negative_text.strip().lower().replace('-',' - ').replace('/',' / '))
negative_text=' '.join(tokens)
i+=1
# print(negative_text)
temp_out=[]
if negative_text not in hpo_dic:
temp_out.append('HP:None\t'+negative_text)
token_pos = nltk.pos_tag(tokens)
for token in token_pos:
lemma = lemmatizer.lemmatize(token[0], get_wordnet_pos(token[1]))
stem = stemmer.stem(token[0])
temp_out.append(token[0]+'\t'+lemma+'\t'+stem+'\t'+token[1]+'\tB\tB')
if pun_filter(temp_out)==0 and pos_filter(temp_out)==0:
neg_num+=1
fout.write('\n'.join(temp_out)+'\n\n')
else:
pass
#print('filter:',negative_text)
#print('neg_num:',neg_num)
fout.close()
def combine_pos_neg(outpath):
fin_pos=open(outpath+'distant_train_pos.conll','r',encoding='utf-8')
fin_neg=open(outpath+'distant_train_neg.conll','r',encoding='utf-8')
fout=open(outpath+'distant_train.conll','w',encoding='utf-8')
all_pos=fin_pos.read().rstrip()
all_neg=fin_neg.read().rstrip()
fin_pos.close()
fin_neg.close()
fout.write(all_pos+'\n\n'+all_neg+'\n')
fout.close()
if __name__=="__main__":
parser = argparse.ArgumentParser(description='build distant training corpus, python Build_distant_corpus.py -d dictpath -f fileneg -n number_of_neg -o outpath')
parser.add_argument('--dict', '-d', help="the input path of the ontology dictionary",default='../dict/')
parser.add_argument('--fileneg', '-f', help="the text file used to generate the negatives",default='../mutation_disease.txt')
parser.add_argument('--negnum', '-n', help="the number of negatives ",type=int, default=10000)
parser.add_argument('--output', '-o', help="the output folder of the distantly-supervised training dataset",default='../data/distant_train_data/')
args = parser.parse_args()
if not os.path.exists(args.output):
os.makedirs(args.output)
fin_obo=open(args.dict+'obo.json','r',encoding='utf-8')
hpo_obo=json.load(fin_obo)
fin_obo.close()
fin_label=open(args.dict+'lable.vocab','r',encoding='utf-8')
hpo_vocab=fin_label.read().strip().split('\n')
fin_label.close()
fin_dic=open(args.dict+'noabb_lemma.dic','r',encoding='utf-8')
hpo_dic=fin_dic.read().strip().split('\n')
fin_dic.close()
print('generating training positives........')
train_pos(hpo_obo,hpo_vocab,args.output)
print('done..........')
print('generating training negatives........')
train_neg(args.fileneg,args.negnum,hpo_dic,args.output)
print('done..........')
combine_pos_neg(args.output)
|
{"hexsha": "d03625d1786a013aa95d4a38af0deb79136a993b", "size": 7465, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/Build_distant_corpus.py", "max_stars_repo_name": "ncbi-nlp/PhenoTagger", "max_stars_repo_head_hexsha": "e2857068def2580a4c3048682787ce7ae9a8d126", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 32, "max_stars_repo_stars_event_min_datetime": "2020-09-29T21:17:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T14:06:41.000Z", "max_issues_repo_path": "src/Build_distant_corpus.py", "max_issues_repo_name": "ncbi-nlp/PhenoTagger", "max_issues_repo_head_hexsha": "e2857068def2580a4c3048682787ce7ae9a8d126", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-03-09T06:04:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-10T13:20:08.000Z", "max_forks_repo_path": "src/Build_distant_corpus.py", "max_forks_repo_name": "ncbi-nlp/PhenoTagger", "max_forks_repo_head_hexsha": "e2857068def2580a4c3048682787ce7ae9a8d126", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-02-01T19:44:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-03T04:20:22.000Z", "avg_line_length": 39.4973544974, "max_line_length": 165, "alphanum_fraction": 0.5915606162, "include": true, "reason": "import numpy", "num_tokens": 1997}
|
@testset "999.available-captures-for-rook.jl" begin
board = [
'.' '.' '.' '.' '.' '.' '.' '.'
'.' '.' '.' 'p' '.' '.' '.' '.'
'.' '.' '.' 'R' '.' '.' '.' 'p'
'.' '.' '.' '.' '.' '.' '.' '.'
'.' '.' '.' '.' '.' '.' '.' '.'
'.' '.' '.' 'p' '.' '.' '.' '.'
'.' '.' '.' '.' '.' '.' '.' '.'
'.' '.' '.' '.' '.' '.' '.' '.'
]
@test num_rook_captures(board) == 3
board = [
'.' '.' '.' '.' '.' '.' '.' '.'
'.' 'p' 'p' 'p' 'p' 'p' '.' '.'
'.' 'p' 'p' 'B' 'p' 'p' '.' '.'
'.' 'p' 'B' 'R' 'B' 'p' '.' '.'
'.' 'p' 'p' 'B' 'p' 'p' '.' '.'
'.' 'p' 'p' 'p' 'p' 'p' '.' '.'
'.' '.' '.' '.' '.' '.' '.' '.'
'.' '.' '.' '.' '.' '.' '.' '.'
]
@test num_rook_captures(board) == 0
board = [
'.' '.' '.' '.' '.' '.' '.' '.'
'.' '.' '.' 'p' '.' '.' '.' '.'
'.' '.' '.' 'p' '.' '.' '.' '.'
'p' 'p' '.' 'R' '.' 'p' 'B' '.'
'.' '.' '.' '.' '.' '.' '.' '.'
'.' '.' '.' 'B' '.' '.' '.' '.'
'.' '.' '.' 'p' '.' '.' '.' '.'
'.' '.' '.' '.' '.' '.' '.' '.'
]
@test num_rook_captures(board) == 3
end
|
{"hexsha": "ba795a6974ef8d422af65909be93ff9baa7c118c", "size": 1198, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/problems/999.available-captures-for-rook.jl", "max_stars_repo_name": "jmmshn/LeetCode.jl", "max_stars_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 74, "max_stars_repo_stars_event_min_datetime": "2020-10-27T18:58:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T13:27:49.000Z", "max_issues_repo_path": "test/problems/999.available-captures-for-rook.jl", "max_issues_repo_name": "jmmshn/LeetCode.jl", "max_issues_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 57, "max_issues_repo_issues_event_min_datetime": "2020-11-01T07:26:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-19T11:57:53.000Z", "max_forks_repo_path": "test/problems/999.available-captures-for-rook.jl", "max_forks_repo_name": "jmmshn/LeetCode.jl", "max_forks_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2020-10-30T11:52:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-13T10:35:11.000Z", "avg_line_length": 31.5263157895, "max_line_length": 51, "alphanum_fraction": 0.1485809683, "num_tokens": 396}
|
# Import the necessary packages and modules
import matplotlib.pyplot as plt
import numpy as np
# Prepare the data
x = np.linspace(0, 10, 100)
# Plot the data
plt.plot(x, x, label='linear')
# Add a legend
plt.legend()
# Show the plot
plt.show()
print("done")
|
{"hexsha": "805cfa57f38d572f8bd8d921e1b13dbfb4eaf62f", "size": 262, "ext": "py", "lang": "Python", "max_stars_repo_path": "coding/plot.py", "max_stars_repo_name": "vadim-ivlev/STUDY", "max_stars_repo_head_hexsha": "286675fcdf154ea605f50059c4a60b212b3ba4b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "coding/plot.py", "max_issues_repo_name": "vadim-ivlev/STUDY", "max_issues_repo_head_hexsha": "286675fcdf154ea605f50059c4a60b212b3ba4b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "coding/plot.py", "max_forks_repo_name": "vadim-ivlev/STUDY", "max_forks_repo_head_hexsha": "286675fcdf154ea605f50059c4a60b212b3ba4b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.4117647059, "max_line_length": 43, "alphanum_fraction": 0.7099236641, "include": true, "reason": "import numpy", "num_tokens": 72}
|
#!/usr/bin/env python3
import matplotlib
matplotlib.use('pdf')
import scrublet as scr
import scipy.io
import scipy.sparse
import numpy
import numpy.ma
from PIL import Image, ImageDraw, ImageFont
import os
import sys
import re
import warnings
import traceback
import argparse
#
# Notes:
# o apply umi_cutoff in filter_counts_matrix() that is consistent with the
# umi_cutoff value used in reduce_dimensions.R in order to produce consistent
# numbers of cells (columns).
# o I have the impression that scrublet is vulnerable to internal
# resulting from things like division by zero and sqrt of x < 0.
# o it appears that python issues a runtime warning for some of
# these errors rather than stopping so I am raising an exceptioni
# in these cases
#
def handle_warning(message, category, filename, lineno, file=None, line=None):
print( 'Scrublet: stop on warning \'%s\' in %s at line %s' % ( message, filename, lineno ), file=sys.stderr )
raise ValueError(message)
return( 0 )
def read_col_file(col_file):
col = []
with open(col_file, 'r') as fp:
for line in fp:
col.append(line.rstrip())
return(col)
def filter_counts_matrix(mat_in, outlier_filter, umi_cutoff, col_names_file):
print('run_scrublet.py: filter_counts_matrix: begin')
# start with COO format matrix
if(not scipy.sparse.isspmatrix_coo(mat_in)):
mat_in = mat_in.tocoo()
# read column names
col_in = read_col_file(col_names_file)
# binarize matrix using directly the (non-zero) m.data attribute
# (from snapATAC)
cutoff = numpy.percentile(a=mat_in.data, q=100 - outlier_filter, axis=None)
mat_in.data[mat_in.data > cutoff] = 0
mat_in.data[mat_in.data > 1] = 1
# find cells with no more than umi_cutoff counts
colsum = mat_in.sum(0)[0, :]
keep_col = colsum > umi_cutoff
# subset matrix and column names
mat_out = mat_in.tocsc()[:, keep_col.tolist()[0]]
col_out = [i for (i, v) in zip(col_in, keep_col.tolist()[0]) if v]
print('run_scrublet.py: filter_counts_matrix: end')
return(mat_out, col_out)
def run_scrublet(sample_name, counts_matrix):
print('run_scrublet.py: run_scrublet: begin')
warnings.showwarning = handle_warning
if(numpy.size(counts_matrix, 0) == 0 or numpy.size(counts_matrix, 1) == 0):
filename = args.sample_name + "-scrublet_hist.png"
image = Image.new(mode = "RGB", size = (800,600), color = "white")
draw = ImageDraw.Draw(image)
draw.text((50,50), "Scrublet failed. This is generally because there aren't enough cells with sufficient reads.\n", fill = "black")
return(-1)
if(not scipy.sparse.isspmatrix_csc(counts_matrix)):
counts_matrix = counts_matrix.T.tocsc()
else:
counts_matrix = counts_matrix.T
# count_matrix
# rows: cells
# cols: genes
scrub = scr.Scrublet(counts_matrix)
try:
doublet_scores, predicted_doublets = scrub.scrub_doublets()
scrub.plot_histogram()[0].savefig(args.sample_name + "-scrublet_hist.png")
all_scores = numpy.vstack((doublet_scores, predicted_doublets))
all_scores = numpy.transpose(all_scores)
numpy.savetxt(args.sample_name + "-scrublet_table.csv", all_scores, delimiter=",", fmt='%.8e,%d')
except (ZeroDivisionError, FloatingPointError, ValueError) as eobj:
tb_str = traceback.format_exc()
print('%s' % ( tb_str ), file=sys.stderr)
temp = numpy.array(["NA"] * numpy.size(counts_matrix, 0))
all_scores = numpy.vstack((temp, temp))
all_scores = numpy.transpose(all_scores)
filename = args.sample_name + "-scrublet_hist.png"
image = Image.new(mode = "RGB", size = (800,600), color = "white")
draw = ImageDraw.Draw(image)
draw.text((50,50), "Scrublet failed. This is generally because there aren't enough cells with sufficient reads.\n\nFailure message:\n\n" + tb_str, fill = "black")
image.save(filename)
numpy.savetxt(args.sample_name + "-scrublet_table.csv", all_scores, fmt="%s", delimiter=",")
except (AttributeError) as eobj:
tb_str = traceback.format_exc()
print('%s' % ( tb_str ), file=sys.stderr)
predicted_doublets = scrub.call_doublets(threshold=0.15)
scrub.plot_histogram()[0].savefig(args.sample_name + "-scrublet_hist.png")
all_scores = numpy.vstack((doublet_scores, predicted_doublets))
all_scores = numpy.transpose(all_scores)
numpy.savetxt(args.sample_name + "-scrublet_table.csv", all_scores, delimiter=",", header='doublet_score,doublet')
print('run_scrublet.py: run_scrublet: end')
return( 0 )
if __name__ == '__main__':
parser = argparse.ArgumentParser('Run scrublet.')
parser.add_argument('--sample_name', required=True, help='Sample name (for naming).')
parser.add_argument('--mat_file', required=True, help='input matrix file name.')
parser.add_argument('--umi_cutoff', type=int, required=True, help='umi filter cutoff.')
args = parser.parse_args()
col_names_file = re.sub('[.]gz$', '', args.mat_file)
col_names_file = re.sub('[.]mtx$', '', col_names_file) + '.columns.txt'
counts_matrix = scipy.io.mmread(args.mat_file)
# note: numpy percentile takes percentile value whereas R quantile takes 'quantile' value: differ by factor of 100
outlier_filter = 1.0e-1
counts_matrix, col_names = filter_counts_matrix(counts_matrix, outlier_filter, args.umi_cutoff, col_names_file)
run_scrublet(args.sample_name, counts_matrix)
peak_matrix_cols_out_file = '%s-scrublet_columns.txt' % (args.sample_name)
with open(peak_matrix_cols_out_file, 'w' ) as fp:
fp.write('\n'.join(col_names) + '\n')
|
{"hexsha": "aafe3743af6da37d886e1e2a00dcdd64c2dbb7fc", "size": 5744, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/run_scrublet.py", "max_stars_repo_name": "bbi-lab/bbi-sciatac-analyze", "max_stars_repo_head_hexsha": "f5cfb20ed98373a21fdda152f2a91cfd3347411d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/run_scrublet.py", "max_issues_repo_name": "bbi-lab/bbi-sciatac-analyze", "max_issues_repo_head_hexsha": "f5cfb20ed98373a21fdda152f2a91cfd3347411d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/run_scrublet.py", "max_forks_repo_name": "bbi-lab/bbi-sciatac-analyze", "max_forks_repo_head_hexsha": "f5cfb20ed98373a21fdda152f2a91cfd3347411d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.6231884058, "max_line_length": 170, "alphanum_fraction": 0.688718663, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1462}
|
[STATEMENT]
theorem main\<^sub>P\<^sub>K\<^sub>B: \<open>G \<TTurnstile>\<^sub>!\<^sub>K\<^sub>B p \<longleftrightarrow> G \<turnstile>\<^sub>!\<^sub>K\<^sub>B p\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (G \<TTurnstile>\<^sub>!\<^sub>K\<^sub>B p) = (G \<turnstile>\<^sub>!\<^sub>K\<^sub>B p)
[PROOF STEP]
using strong_soundness\<^sub>P\<^sub>K\<^sub>B[of G p] strong_completeness\<^sub>P\<^sub>K\<^sub>B[of G p]
[PROOF STATE]
proof (prove)
using this:
G \<turnstile>\<^sub>!\<^sub>K\<^sub>B p \<Longrightarrow> symmetric; G \<TTurnstile>\<^sub>!\<star> p
G \<TTurnstile>\<^sub>!\<^sub>K\<^sub>B p \<Longrightarrow> G \<turnstile>\<^sub>!\<^sub>K\<^sub>B p
goal (1 subgoal):
1. (G \<TTurnstile>\<^sub>!\<^sub>K\<^sub>B p) = (G \<turnstile>\<^sub>!\<^sub>K\<^sub>B p)
[PROOF STEP]
by fast
|
{"llama_tokens": 348, "file": "Public_Announcement_Logic_PAL", "length": 2}
|
# the inclusion of the tests module is not meant to offer best practices for
# testing in general, but rather to support the `find_packages` example in
# setup.py that excludes installing the "tests" package
from __future__ import print_function
import networkx as nx
from pyhwcomm import Compute, Transfer
from pyhwcomm.machines.blaise import Blaise
from pyhwcomm.scheduler import TrivialScheduler
from pyhwcomm.programs.vecaddstream import VecAddStream
def test_success():
assert True
def test_scheduler():
c = Blaise()
s = TrivialScheduler()
p = s(VecAddStream, c)
c.execute(p)
assert True
def test_blaise():
c = Blaise()
p = nx.DiGraph()
p.add_edge(Compute(c.cpu0), Transfer(1000, c.cpu0, c.gpu0))
elapsed = c.execute(p)
print(elapsed)
assert True
|
{"hexsha": "cb86a9f7cdc84ee45fa5e2c314cf9d6225d8d159", "size": 810, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_simple.py", "max_stars_repo_name": "cwpearson/pyhwcomm", "max_stars_repo_head_hexsha": "7f893552b6a7f4fa5a5cfbc1d9d10e6f51c2c6b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_simple.py", "max_issues_repo_name": "cwpearson/pyhwcomm", "max_issues_repo_head_hexsha": "7f893552b6a7f4fa5a5cfbc1d9d10e6f51c2c6b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-02-28T21:17:13.000Z", "max_issues_repo_issues_event_max_datetime": "2018-02-28T21:17:13.000Z", "max_forks_repo_path": "tests/test_simple.py", "max_forks_repo_name": "cwpearson/pyhwcomm", "max_forks_repo_head_hexsha": "7f893552b6a7f4fa5a5cfbc1d9d10e6f51c2c6b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.1428571429, "max_line_length": 76, "alphanum_fraction": 0.7308641975, "include": true, "reason": "import networkx", "num_tokens": 205}
|
# USAGE
# python histogram_with_mask.py
# import the necessary packages
from matplotlib import pyplot as plt
import numpy as np
import cv2
def plot_histogram(image, title, mask=None):
# split the image into its respective channels, then initialize
# the tuple of channel names along with our figure for plotting
chans = cv2.split(image)
colors = ("b", "g", "r")
plt.figure()
plt.title(title)
plt.xlabel("Bins")
plt.ylabel("# of Pixels")
# loop over the image channels
for (chan, color) in zip(chans, colors):
# create a histogram for the current channel and plot it
hist = cv2.calcHist([chan], [0], mask, [256], [0, 256])
plt.plot(hist, color=color)
plt.xlim([0, 256])
# load the beach image and plot a histogram for it
image = cv2.imread("beach.png")
plot_histogram(image, "Histogram for Original Image")
cv2.imshow("Original", image)
# construct a mask for our image; our mask will be *black* for regions
# we want to *ignore* and *white* for regions we want to *examine*
mask = np.zeros(image.shape[:2], dtype="uint8")
cv2.rectangle(mask, (60, 290), (210, 390), 255, -1)
cv2.imshow("Mask", mask)
# display the masked region
masked = cv2.bitwise_and(image, image, mask=mask)
cv2.imshow("Applying the Mask", masked)
# compute a histogram for our image, but we'll only include pixels in
# the masked region
plot_histogram(image, "Histogram for Masked Image", mask=mask)
# show our plots
plt.show()
|
{"hexsha": "e04ace0f492036085f4c051f97aa0595a7310288", "size": 1423, "ext": "py", "lang": "Python", "max_stars_repo_path": "OpenCV 104/Histograms/opencv-image-histograms/histogram_with_mask.py", "max_stars_repo_name": "jjaramillo34/pyimagesearchuniversity_course", "max_stars_repo_head_hexsha": "0a4a26c29a6f8122f6a03d3393ac01ebbc14a391", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-13T16:52:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T16:52:58.000Z", "max_issues_repo_path": "OpenCV 104/Histograms/opencv-image-histograms/histogram_with_mask.py", "max_issues_repo_name": "jjaramillo34/pyimagesearchuniversity_course", "max_issues_repo_head_hexsha": "0a4a26c29a6f8122f6a03d3393ac01ebbc14a391", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "OpenCV 104/Histograms/opencv-image-histograms/histogram_with_mask.py", "max_forks_repo_name": "jjaramillo34/pyimagesearchuniversity_course", "max_forks_repo_head_hexsha": "0a4a26c29a6f8122f6a03d3393ac01ebbc14a391", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9347826087, "max_line_length": 70, "alphanum_fraction": 0.7203092059, "include": true, "reason": "import numpy", "num_tokens": 383}
|
import os
import pathlib
import matplotlib.pyplot as plt
import gdal
import tensorflow as tf
import numpy as np
import sys
import globalvars as g
from data_generator import DataGenerator
from model import get_model
from included_vars import data_vars, vars_to_plot, operators
print('Python version: %s' % sys.version)
print('TensorFlow version: %s' % tf.__version__)
print('Keras version: %s' % tf.keras.__version__)
def get_band_identifier(band_data):
desc = band_data.GetDescription()
metadata = band_data.GetMetadata()
d = str(metadata['GRIB_ELEMENT']) + " -- "
d += str(metadata['GRIB_COMMENT']) + " -- "
d += str(desc)
return d
def print_band_identifier(ttl, data = None, used = True):
if used and g.PRINT_USED:
print(ttl)
if g.SHOULD_DISPLAY_BAND_STATS:
print("MAX: " + str(np.max(data)))
print("MIN: " + str(np.min(data)))
elif not used and g.PRINT_UNUSED:
print(ttl + " # UNUSED")
if g.SHOULD_DISPLAY_BAND_STATS:
print("MAX: " + str(np.max(data)))
print("MIN: " + str(np.min(data)))
def get_input_dimensions(nparrays):
size = g.RADIUS * 2 + 1
return size * size * len(nparrays) + 4 # x, y, day, time
def get_output_dimensions(nparrays):
return len(nparrays)
# Loop over all data files
path = os.path.join(pathlib.Path(__file__).parent.absolute(), "data")
finished_files_count = 0
data_by_days = []
for ff in os.listdir(path):
if g.JUST_ONE_FILE and finished_files_count > 0: break;
print("Opening: " + ff)
file_path = os.path.join(path, ff)
# Open the file
grib = gdal.Open(file_path)
present_data_vars = []
# Loop over all data fields of a grib file and load those requested to present_data_fields
for a in range(grib.RasterCount):
a += 1
# Read an specific band
band = grib.GetRasterBand(a)
ttl = get_band_identifier(band)
# Read the band as a Python array
data = band.ReadAsArray()
if ttl in data_vars:
print_band_identifier(ttl, data = data, used = True)
else:
print_band_identifier(ttl, data = data, used = False)
# Show the image
if g.SHOULD_PLOT and ttl in vars_to_plot:
plt.imshow(data, cmap='jet')
plt.title(ttl)
plt.show()
# Add data from this layer to data fields
if ttl in data_vars:
# transform data
if ttl in operators:
op = operators[ttl]
data *= op[1]
data += op[0]
else:
raise SystemError("MISSING OPERATOR FOR: " + str(ttl))
present_data_vars.append((ttl, data))
# Verify that all requested data fields are present and that we don't have any excess fields either
requested_data_vars = data_vars.copy()
for a in present_data_vars:
ttl = a[0]
data = a[1]
if ttl in requested_data_vars:
requested_data_vars.remove(ttl)
else:
raise SystemError("PRESENT_DATA_VARS HAS AN ENTRY THAT WASN'T REQUESTED OR THERE IS A DUPLICATE!")
if len(requested_data_vars) > 0:
raise SystemError("NOT ALL REQUESTED FIELDS WERE PRESENT! MISSING: " + str(requested_data_vars))
# Sort present_data_vars by ttl and the order in data_vars
grib_data = []
for i in range(len(data_vars)):
ttl = data_vars[i]
for a in present_data_vars:
if a[0] == ttl:
grib_data.append(a[1])
continue
data_by_days.append(grib_data)
finished_files_count += 1
print("--- complete ---")
g.GLOBAL_MAP_DIMENSIONS = data_by_days[0][0].shape
print("GLOBAL MAP DIMENSIONS: " + str(g.GLOBAL_MAP_DIMENSIONS))
g.INPUT_SIZE = get_input_dimensions(data_by_days[0])
print("INPUT SIZE: " + str(g.INPUT_SIZE))
g.OUTPUT_SIZE = get_output_dimensions(data_by_days[0])
print("OUTPUT SIZE: " + str(g.OUTPUT_SIZE))
mirrored_strategy = tf.distribute.MirroredStrategy()
print('Number of devices: {}'.format(mirrored_strategy.num_replicas_in_sync))
with mirrored_strategy.scope():
model = get_model()
# NOTE !!! EVEN THO BELOW WE USE A WORD "DAY" WE REALLY MEAN "TICK"
if len(data_by_days) >= 2:
generator = DataGenerator(
data_by_days,
batch_size=g.BATCH_SIZE*mirrored_strategy.num_replicas_in_sync,
len_multiplier=g.EPOCH_LENGHT_MULTIPLIER)
validation_generator = DataGenerator(
data_by_days,
batch_size=g.BATCH_SIZE*mirrored_strategy.num_replicas_in_sync,
len_multiplier=g.VALIDATION_LENGTH_MULTIPLIER)
print("Generator len: " + str(len(generator)))
print(model.summary())
epochs_count = g.EPOCHS
history = model.fit(generator,
epochs=epochs_count,
verbose=g.VERBOSITY,
validation_data=validation_generator)
# LOG STUFF
print(history.history.keys())
# "Accuracy"
plt.plot(history.history['mean_absolute_error'])
plt.plot(history.history['val_mean_absolute_error'])
plt.title('mean_absolute_error')
plt.ylabel('mean_absolute_error')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# "Loss"
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# SAVE MODEL
if g.SHOULD_SAVE_MODEL:
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'models')
tf.keras.models.save_model(model, filename, overwrite=True)
else:
print("NOT ENOUGH GRIB FILES FOR ACTUAL LEARNING!")
|
{"hexsha": "698dcfe4b081b56d4322914dba296633e7e6d3d3", "size": 5800, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "EpicScizor/climatenet", "max_stars_repo_head_hexsha": "2d449c4f9700c5ed06f98396378549a4f1d5e9e2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "EpicScizor/climatenet", "max_issues_repo_head_hexsha": "2d449c4f9700c5ed06f98396378549a4f1d5e9e2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "EpicScizor/climatenet", "max_forks_repo_head_hexsha": "2d449c4f9700c5ed06f98396378549a4f1d5e9e2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.918128655, "max_line_length": 110, "alphanum_fraction": 0.6443103448, "include": true, "reason": "import numpy", "num_tokens": 1407}
|
import mxnet as mx
import numpy as np
from distutils.util import strtobool
from ..processing.generate_anchor import generate_anchors
from ..processing.bbox_transform import iou_pred, nonlinear_pred
from ..processing.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper
def check_equal(lst, errstr='check_equal'):
assert len(set(lst)) <= 1, '%s:%s' % (errstr, lst)
class SampleAnchorsOperator(mx.operator.CustomOp):
"""
RPN OHEM:
input: ['cls_prob', 'bbox_loss', 'label', 'bbox_pred']
hyperparams: basic anchors
output: ['cls_prob', 'bbox_pred', 'cls_mask', 'bbox_mask']
in_shape:
cls_prob: (batch_images, 2, a*h*w)
bbox_loss: (batch_images, a*4, h, w)
label: (batch_images, a*h*w)
bbox_pred: (batch_images, a*4, h, w)
out_shape:
cls_prob_out: (batch_images, 2, a*h*w)
bbox_loss_out: (batch_images, a*4, h, w)
cls_mask: (batch_images, 2, a*h*w)
bbox_mask: (batch_images, a*4, h, w)
procedure:
1. calculate log loss of softmax_prob
2. predict boxes (applying bbox_pred)
3. nms
4. sort [logloss + bbox_loss] and backprop grad
"""
def __init__(self, feature_stride, scales, ratios,
rpn_pre_nms_top_n, rpn_batch_size, nms_threshold,
iou_loss, ignore, transform, np_ratio):
super(SampleAnchorsOperator, self).__init__()
self._feat_stride = feature_stride
self._anchors = generate_anchors(base_size=feature_stride, scales=scales, ratios=ratios)
self._rpn_pre_nms_top_n = rpn_pre_nms_top_n
self._rpn_batch_size = rpn_batch_size
self._nms_threshold = nms_threshold
self._bbox_pred = iou_pred if iou_loss else nonlinear_pred
self._ignore = ignore
self._transform = transform
self._np_ratio = np_ratio
def forward(self, is_train, req, in_data, out_data, aux):
nms = gpu_nms_wrapper(self._nms_threshold, in_data[0].context.device_id)
cls_prob = in_data[0].asnumpy()
bbox_loss = in_data[1].asnumpy()
label = in_data[2].asnumpy()
bbox_deltas = in_data[3].asnumpy()
# get shape and check consistency
batch_size, num_anchor, height, width = bbox_deltas.shape
num_anchor /= 4
check_equal([num_anchor, len(self._anchors)], 'inconsistent anchor shape')
# get anchors
# enumerate all shifts
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
check_equal([shifts.shape[0], height * width], 'inconsistent shift')
# add A anchors (1, A, 4) to K shifts (K, 1, 4) to (K*A, 4) anchors
shifts = shifts.reshape((1, shifts.shape[0], 4)).transpose((1, 0, 2))
anchors = self._anchors.reshape((1, num_anchor, 4)) + shifts
anchors = anchors.reshape((height * width * num_anchor, 4))
# reshape anchors to (b, h*w*a, 4)
if batch_size > 1:
anchors = np.tile(anchors, (batch_size, 1))
anchors = anchors.reshape((batch_size, -1, 4))
# calculate log loss
# cls_prob is now (b, 2, a*h, w)
cls_prob_flat_shape = (cls_prob.shape[0], cls_prob.shape[1], cls_prob.shape[2] * cls_prob.shape[3])
cls_prob = cls_prob.reshape(cls_prob_flat_shape)
# turn cls_prob from (b, 2, a*h*w) to (b*a*h*w, 2)
# label from (b, a*h*w) to (b*a*h*w)
label = label.astype(np.int32).reshape((-1))
cls_prob = cls_prob.transpose((0, 2, 1)).reshape((label.shape[0], -1))
cls_prob = cls_prob[np.arange(label.shape[0]), label]
cls_loss = -1 * np.log(cls_prob + 1e-14)
# softmax has ignore_label; set cls_loss to be 0 (seanlx)
# so that they will not be treated as negative example
cls_loss[label == -1] = 0
# turn label from (b*a*h*w) to (b, h, w, a) to (b, h*w*a)
label = label.reshape((batch_size, num_anchor, height, width))
label = label.transpose((0, 2, 3, 1)).reshape((batch_size, -1))
# turn log loss from (b*a*h*w) to (b, h, w, a) to (b, h*w*a)
cls_loss = cls_loss.reshape((batch_size, num_anchor, height, width))
cls_loss = cls_loss.transpose((0, 2, 3, 1)).reshape((batch_size, -1))
# turn bbox from (b, a*4, h, w) to (b,h,w,a*4) to (b, h*w*a, 4)
# and then reduce the last dimension
bbox_deltas = bbox_deltas.transpose((0, 2, 3, 1)).reshape((batch_size, -1, 4))
bbox_loss = bbox_loss.transpose((0, 2, 3, 1)).reshape((batch_size, -1, 4))
bbox_loss_red = bbox_loss.sum(axis=2)
# # sanity check for label and bbox_loss
# check_equal([len(np.where(label == 1)[1]), len(np.where(bbox_loss_red > 0)[1])])
# # while label == 1, bbox_loss must be > 0
# check_equal([len(np.where(label == 1)[1]), len(np.where(bbox_loss_red[label == 1])[0])])
# convert anchors to proposals
all_loss = cls_loss + bbox_loss_red
# initialize output
cls_mask = np.zeros(cls_prob_flat_shape)
bbox_mask = np.zeros(in_data[1].shape)
for batch_index, label_i in enumerate(label):
anchor = anchors[batch_index]
bbox_delta = bbox_deltas[batch_index]
loss = all_loss[batch_index]
pos_ind = np.where(label_i > 0)[0]
loss[pos_ind] = 0
num_pos = len(pos_ind)
num_neg = int(num_pos * self._np_ratio)
if self._transform:
# proposal only predicts foreground class (rcnn will have background)
proposal = self._bbox_pred(anchor, bbox_delta)
else:
proposal = anchor
# ohem
pre_inds = np.argsort(loss)[::-1][:self._rpn_pre_nms_top_n]
det = np.hstack((proposal[pre_inds, :], loss[pre_inds, np.newaxis])).astype(np.float32)
keep_ind = nms(det) # no need to pad
# select neg examples by bootstrap
start = int(self._ignore * len(keep_ind))
keep_ind = keep_ind[start:start + num_neg]
keep_ind = pre_inds[keep_ind]
# select pos examples
# pos_ind = np.where(label[batch_index] > 0)[0]
keep_ind = np.append(keep_ind, pos_ind)
# check
# print 'selected pos label', len(np.where(label[0, keep_ind] == 1)[0])
# print 'selected pos bbox', len(np.where(bbox_loss_red[0, keep_ind] > 0)[0])
# convert back to spatial index in h*w*a
ind_h, ind_w, ind_a = np.unravel_index(keep_ind, (height, width, num_anchor))
keep_ind = np.ravel_multi_index((ind_a, ind_h, ind_w), (num_anchor, height, width))
# output cls_mask
cls_mask[batch_index, :, keep_ind] = 1
# in_a should be step by 4
bbox_mask_red = np.zeros((1, num_anchor, height, width))
bbox_mask_red[0, ind_a, ind_h, ind_w] = 1
bbox_mask[batch_index] = np.repeat(bbox_mask_red, 4, axis=1)
# bbox_mask[batch_index, 4 * ind_a, ind_h, ind_w] = 1
# bbox_mask[batch_index, 4 * ind_a + 1, ind_h, ind_w] = 1
# bbox_mask[batch_index, 4 * ind_a + 2, ind_h, ind_w] = 1
# bbox_mask[batch_index, 4 * ind_a + 3, ind_h, ind_w] = 1
# pos label check
# label = in_data[2].asnumpy()
# print 'original pos label', np.sum(label[batch_index, keep_ind] == 1)
# output bbox_mask
# pos bbox check
# bbox_loss_o = in_data[1].asnumpy()
# bbox_loss_o_red = bbox_loss_o[:, ::4, :, :] + bbox_loss_o[:, 1::4, :, :] + \
# bbox_loss_o[:, 2::4, :, :] + bbox_loss_o[:, 3::4, :, :]
# print 'original pos bbox', \
# np.sum(bbox_loss_o_red[batch_index, ind_a, ind_h, ind_w] > 0)
# output ['cls_prob_out', 'bbox_loss_out', 'cls_mask', 'bbox_mask']
self.assign(out_data[0], req[0], in_data[0])
self.assign(out_data[1], req[1], in_data[1])
self.assign(out_data[2], req[2], cls_mask.reshape(in_data[0].shape))
self.assign(out_data[3], req[3], bbox_mask)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
# in_grad: ['cls_prob', 'bbox_loss', 'label', 'bbox_pred']
# put cls_mask and bbox_mask to in_grad of cls_prob and bbox_loss
# normalize bbox_loss by total selected box
ncount = mx.nd.sum(out_data[3]) / 4
self.assign(in_grad[0], req[0], out_data[2])
self.assign(in_grad[1], req[1], out_data[3] / ncount)
self.assign(in_grad[2], req[2], 0)
self.assign(in_grad[3], req[3], 0)
@mx.operator.register('sample_anchors')
class SampleAnchorsProp(mx.operator.CustomOpProp):
def __init__(self, feature_stride='16', scales='(8, 16, 32)', ratios='(0.5, 1, 2)',
rpn_pre_nms_top_n='12000', rpn_batch_size='256', nms_threshold='0.7',
iou_loss='False', ignore='0', transform='False', np_ratio='2'):
# iou_loss: different bbox_pred
# ignore: give up top hard examples, e.g. 0.05
# transform: nms on the transformed rois
super(SampleAnchorsProp, self).__init__(need_top_grad=False)
self._feature_stride = int(feature_stride)
self._scales = np.fromstring(scales[1:-1], dtype=float, sep=',')
self._ratios = np.fromstring(ratios[1:-1], dtype=float, sep=',')
self._rpn_pre_nms_top_n = int(rpn_pre_nms_top_n)
self._rpn_batch_size = int(rpn_batch_size)
self._nms_threshold = float(nms_threshold)
self._iou_loss = strtobool(iou_loss)
self._ignore = float(ignore)
self._transform = strtobool(transform)
self._np_ratio = float(np_ratio)
def list_arguments(self):
return ['cls_prob', 'bbox_loss', 'label', 'bbox_pred']
def list_outputs(self):
return ['cls_prob_out', 'bbox_loss_out', 'cls_mask', 'bbox_mask']
def infer_shape(self, in_shape):
cls_prob_shape = in_shape[0]
bbox_loss_shape = in_shape[1]
label_shape = in_shape[2]
bbox_pred_shape = in_shape[3]
# share batch size
batch_sizes = [cls_prob_shape[0], label_shape[0],
bbox_pred_shape[0], bbox_loss_shape[0]]
check_equal(batch_sizes, 'inconsistent batch size')
# share spatial dimension
spatial_dims = [cls_prob_shape[2] * cls_prob_shape[3], label_shape[1],
bbox_pred_shape[1] / 4 * bbox_pred_shape[2] * bbox_pred_shape[3],
bbox_loss_shape[1] / 4 * bbox_loss_shape[2] * bbox_loss_shape[3]]
check_equal(spatial_dims, 'inconsistent spatial dimension')
out_shape = [cls_prob_shape, bbox_loss_shape, cls_prob_shape, bbox_loss_shape]
return in_shape, out_shape
def create_operator(self, ctx, shapes, dtypes):
return SampleAnchorsOperator(self._feature_stride, self._scales, self._ratios,
self._rpn_pre_nms_top_n, self._rpn_batch_size, self._nms_threshold,
self._iou_loss, self._ignore, self._transform, self._np_ratio)
|
{"hexsha": "45d6e8d60667e568b468137252cf4abbbdadecc5", "size": 11380, "ext": "py", "lang": "Python", "max_stars_repo_path": "rcnn_dff/symbol/sample_anchors.py", "max_stars_repo_name": "tonysy/mx-rcnn-flow", "max_stars_repo_head_hexsha": "b78c3c964c802bb874d673170d7452e7a573a998", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-01-31T02:47:42.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-05T03:48:54.000Z", "max_issues_repo_path": "rcnn_dff/symbol/sample_anchors.py", "max_issues_repo_name": "tonysy/mx-rcnn-flow", "max_issues_repo_head_hexsha": "b78c3c964c802bb874d673170d7452e7a573a998", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rcnn_dff/symbol/sample_anchors.py", "max_forks_repo_name": "tonysy/mx-rcnn-flow", "max_forks_repo_head_hexsha": "b78c3c964c802bb874d673170d7452e7a573a998", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.2199170124, "max_line_length": 108, "alphanum_fraction": 0.610456942, "include": true, "reason": "import numpy", "num_tokens": 3160}
|
import numpy as np
from phenom.testing import prec_angle_helper as pah
from phenom import HztoMf, m1_m2_M_q
import lal
f_gw_min=1.
f_gw_max=700
df_gw=0.1
f_gw_ref=f_gw_min*2
Npts = int(np.ceil((f_gw_max - f_gw_min)/df_gw))
f_gw_list = lal.CreateREAL8Sequence(Npts)
f_gw_list.data = np.arange(f_gw_min, f_gw_max, df_gw)
# m1=60*lal.MSUN_SI/10.
# m2=10*lal.MSUN_SI/10.
# s1x=0.7393
# s1y=0.1902
# s1z=-0.4953
# s2x=-0.1896
# s2y=5.103e-2
# s2z=-0.2268
# s1x=0.
# s1y=0
# s1z=0
# s2x=0
# s2y=0
# s2z=0
# s1x=1.
# s1y=0
# s1z=0
# s2x=1.
# s2y=0
# s2z=0
m1,m2 =m1_m2_M_q(20., 3)
m1=m1*lal.MSUN_SI
m2=m2*lal.MSUN_SI
s1x=1.
s1y=0
s1z=0
s2x=0.
s2y=0
s2z=0
pv3pars = {
"flist":f_gw_list,
"m1":m1,
"m2":m2,
"s1x":s1x,
"s1y":s1y,
"s1z":s1z,
"s2x":s2x,
"s2y":s2y,
"s2z":s2z,
"fref":f_gw_ref,
"ExpansionOrder":5,
"PN":"3"
}
pv3angles={}
pv3angles['alpha'],pv3angles['beta'],pv3angles['epsilon']= pah.evaluate_phenomPv3_angles(**pv3pars)
pv2pars = {
"flist":f_gw_list,
"m1":m1,
"m2":m2,
"s1x":s1x,
"s1y":s1y,
"s1z":s1z,
"s2x":s2x,
"s2y":s2y,
"s2z":s2z,
"fref":f_gw_ref
}
pv2angles={}
pv2angles['alpha'],pv2angles['beta'],pv2angles['epsilon']= pah.evaluate_phenomPv2_angles(**pv2pars)
# mf_gw_list = HztoMf(f_gw_list.data, (m1+m2)/lal.MSUN_SI)
import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(f_gw_list.data, pv2angles['alpha'], label='pv2')
# plt.plot(f_gw_list.data, pv3angles['alpha'], label='pv3')
# plt.title("alpha")
# plt.legend()
# plt.xscale('log')
# plt.show()
# plt.figure()
# plt.plot(f_gw_list.data, pv2angles['beta'], label='pv2')
# plt.plot(f_gw_list.data, pv3angles['beta'], label='pv3')
# plt.title("beta")
# plt.legend()
# plt.yscale('log')
# plt.xscale('log')
# plt.show()
# plt.figure()
# plt.plot(f_gw_list.data, pv2angles['epsilon'], label='pv2')
# plt.plot(f_gw_list.data, pv3angles['epsilon'], label='pv3')
# plt.title("epsilon")
# plt.legend()
# plt.xscale('log')
# plt.show()
fig, axes = plt.subplots(1,3, figsize=(14,4))
axes[0].plot(f_gw_list.data, pv2angles['alpha'], label='pv2')
axes[0].plot(f_gw_list.data, pv3angles['alpha'], label='pv3')
axes[0].set_title("alpha")
axes[0].set_xscale('log')
axes[1].plot(f_gw_list.data, pv2angles['beta'], label='pv2')
axes[1].plot(f_gw_list.data, pv3angles['beta'], label='pv3')
axes[1].set_title("beta")
axes[1].set_yscale('log')
axes[1].set_xscale('log')
axes[2].plot(f_gw_list.data, pv2angles['epsilon'], label='pv2')
axes[2].plot(f_gw_list.data, pv3angles['epsilon'], label='pv3')
axes[2].set_title("epsilon")
axes[2].set_xscale('log')
axes[2].legend()
plt.show()
|
{"hexsha": "ed6f6b52e720a27a57cd2163eb02df52fd39386b", "size": 2645, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/prec-angles/compare-pv2-pv3.py", "max_stars_repo_name": "LBJ-Wade/phenom_gw_waveform", "max_stars_repo_head_hexsha": "2c705e6ba85510c573d23dca8be4456665d29edf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-12T00:55:53.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-12T00:55:53.000Z", "max_issues_repo_path": "examples/prec-angles/compare-pv2-pv3.py", "max_issues_repo_name": "LBJ-Wade/phenom_gw_waveform", "max_issues_repo_head_hexsha": "2c705e6ba85510c573d23dca8be4456665d29edf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/prec-angles/compare-pv2-pv3.py", "max_forks_repo_name": "LBJ-Wade/phenom_gw_waveform", "max_forks_repo_head_hexsha": "2c705e6ba85510c573d23dca8be4456665d29edf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-10T22:31:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-10T22:31:49.000Z", "avg_line_length": 19.1666666667, "max_line_length": 99, "alphanum_fraction": 0.6536862004, "include": true, "reason": "import numpy", "num_tokens": 1129}
|
# Goal here is to automate saving out dithered positions (RA, Dec, rotTelPos)
# for non-MAF users. See function documentation below for details.
#
# Humna Awan: humna.awan@rutgers.edu
#
##########################################################################################
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import lsst.sims.maf
import lsst.sims.maf.db as db
import lsst.sims.maf.utils as mafUtils
import lsst.sims.maf.slicers as slicers
import lsst.sims.maf.metrics as metrics
import lsst.sims.maf.metricBundles as metricBundles
import lsst.sims.maf.stackers as stackers
import time
import pandas as pd
import os
import numpy as np
import datetime
__all__= ['save_csv_dithers']
def save_csv_dithers(dbs_path, outDir, db_files_only=None,
rot_rand_seed=42, trans_rand_seed=42,
print_progress=True,
show_diagnostic_plots=False, save_plots=False):
"""
The goal here is to calculate the translational and rotational dithers for
various cadences and save the output as a csv file. These dithers are largely
the same as in DC1/DC2:
- Translational dithers:
- WFD: large random offsets (as large as 1.75 deg) applied after every visit.
- DD: small random offsets (as large as 7 arcmin) applied after every visit.
- Else: no dithers, so `fieldRA`, `fieldDec` are returned.
- Rotational dithers:
- All surveys (WFD, DD, else): random between -90, 90 degrees applied after
every filter change. (Break from DC2: Some visits
dont get dithered since they are forced outside
the rotator range.
See RotStacker info for details.)
Supports OpSim V3/V4 outputs.
Required Inputs
---------------
* dbs_path: str: path to the directory that contains the .db files; could have non-.db files.
* outDir: str: path to the directory where the output should be saved.
Optional Inputs
---------------
* db_files_only: list of str: list of names of the db files to run.
Default: None. Runs over all the files in db_path.
* rot_rand_seed: int: seed for random number generator for rotational dithers.
Default: 42
* trans_rand_seed: int: seed for random number generator for translational dithers.
Default: 42
* print_progress: bool: set to False to not print progress.
Default: True
* show_diagnostic_plots: bool: set to True to show histogram of added dithers.
Default: False
* save_plots: bool: set to True to save the histogram for descDithers in outDir.
Default: False
Saved file format
-----------------
.csv file with four columns:
obsIDcol, 'descDitheredRA', 'descDitheredDec', 'descDitheredRotTelPos'
where
obsIDcol = 'observationId' for V4 outputs and 'obsHistID' for V3 outputs.
Saved filename = descDithers_<database name>.csv
"""
startTime_0 = time.time()
readme = '##############################\n%s'%(datetime.date.isoformat(datetime.date.today()))
readme += '\nRunning with lsst.sims.maf.__version__: %s'%lsst.sims.maf.__version__
readme += '\n\nsave_csv_dithers run:\ndbs_path= %s\n'%dbs_path
readme += 'outDir: %s'%outDir
readme += 'db_files_only: %s'%db_files_only
readme += 'rot_rand_seed=%s\ntrans_rand_seed=%s'%(rot_rand_seed, trans_rand_seed)
readme += 'print_progress=%s\show_diagnostic_plots=%s\n'%(print_progress, show_diagnostic_plots)
dbfiles = [f for f in os.listdir(dbs_path) if f.endswith('db')] # select db files
if print_progress: print('Found files: %s\n'%dbfiles)
if db_files_only is not None:
dbfiles = [f for f in dbfiles if f in db_files_only] # select db files
readme += '\nReading for files: %s\n\n'%dbfiles
if print_progress and db_files_only is not None: print('Running over: %s\n'%dbfiles)
for i, dbfile in enumerate(dbfiles): # loop over all the db files
startTime = time.time()
if (i!=0): readme = ''
readme += '%s'%dbfile
if print_progress: print('Starting: %s\n'%dbfile)
opsdb = db.OpsimDatabase('%s/%s'%(dbs_path, dbfile)) # connect to the database
# specify the column names to get from the db file
colnames = ['proposalId', 'observationId', 'fieldRA', 'fieldDec', 'rotTelPos']
propIDcol, obsIDcol= 'proposalId', 'observationId'
if (opsdb.opsimVersion=='V3'):
# V3 outputs have somewhat different column names
colnames = ['propID', 'obsHistID', 'fieldRA', 'fieldDec', 'rotTelPos']
propIDcol, obsIDcol= 'propID', 'obsHistID'
# get the data
simdata = opsdb.fetchMetricData(colnames=colnames, sqlconstraint=None)
# set up to run the stackers that add columns for translational and rotational dithers.
metric = metrics.PassMetric() # want to access the database; no analysis needed
slicer = slicers.OneDSlicer(sliceColName='night', binsize=1, verbose=print_progress) # essentially accessing all nights
sqlconstraint = None
resultsDb = db.ResultsDb(outDir=outDir)
################################################################################################
# set up metric bundle to run stackers for large translational dithers + rotational dithers
if print_progress: print('Setting up for WFD translational dithers + rot dithers.')
bgroup= {}
stackerList = [stackers.RandomDitherFieldPerVisitStacker(degrees=opsdb.raDecInDeg,
randomSeed=trans_rand_seed),
stackers.RandomRotDitherPerFilterChangeStacker(degrees=opsdb.raDecInDeg,
randomSeed=rot_rand_seed)]
bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint=sqlconstraint,
stackerList=stackerList)
bgroup['WFD'] = metricBundles.MetricBundleGroup({0: bundle}, opsdb, outDir=outDir,
resultsDb=resultsDb, saveEarly=False,
verbose=print_progress)
# run the bundle
bgroup['WFD'].runAll()
# set up the bundle for small translational dithers
if print_progress: print('\nSetting up for DD translational dithers.')
chipSize= 1.75*2/15
chipMaxDither= chipSize/2.
stackerList = [stackers.RandomDitherFieldPerVisitStacker(maxDither= chipMaxDither,
degrees=opsdb.raDecInDeg,
randomSeed=trans_rand_seed)]
bundle = metricBundles.MetricBundle(metric, slicer, sqlconstraint=sqlconstraint,
stackerList=stackerList)
bgroup['DD'] = metricBundles.MetricBundleGroup({0: bundle}, opsdb, outDir=outDir,
resultsDb=resultsDb, saveEarly=False,
verbose=print_progress)
# run the bundle
bgroup['DD'].runAll()
################################################################################################
# access the relevant columns
dithered_RA, dithered_Dec = {}, {}
for key in bgroup:
dithered_RA[key] = bgroup[key].simData['randomDitherFieldPerVisitRa']
dithered_Dec[key] = bgroup[key].simData['randomDitherFieldPerVisitDec']
dithered_rotTelPos = bgroup['WFD'].simData['randomDitherPerFilterChangeRotTelPos']
################################################################################################
# diagnostic plots
if show_diagnostic_plots:
# histograms of dithers
fig, axes = plt.subplots(nrows=1, ncols=3)
for key in bgroup:
# ra
axes[0].hist(dithered_RA[key]-simdata['fieldRA'],
label='%s dithers: delRA'%key, histtype='step', lw=2, bins= 30)
# dec
axes[1].hist(dithered_Dec[key]-simdata['fieldDec'],
label='%s dithers: delDec'%key, histtype='step', lw=2)
# tel pos
axes[2].hist(dithered_rotTelPos-simdata['rotTelPos'],
label='rot dithers: rotTelPos', histtype='step', lw=2)
for ax in axes:
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax.set_ylabel('Counts')
axes[0].legend()
axes[1].legend()
if opsdb.raDecInDeg: unitlabel = 'degrees'
else: unitlabel = 'radians'
axes[0].set_xlabel('delRA (%s)'%unitlabel)
axes[1].set_xlabel('delDec (%s)'%unitlabel)
axes[2].set_xlabel('delRotTelPos (%s)'%unitlabel)
plt.title(dbfile)
fig.set_size_inches(20,5)
################################################################################################
# initiate the final arrays as undithered fieldRA, fieldDec as nonWFD, nonDDF should remain unchanged
descDitheredRA = simdata['fieldRA'].copy()
descDitheredDec = simdata['fieldDec'].copy()
descDitheredRot = simdata['rotTelPos'].copy()
# need to find the indices for WFD vs. DD observations since we are adding different
# translational dithers for WFD/DDF visits + none for other surveys
propIds, propTags = opsdb.fetchPropInfo()
# ok work with WFD visits now
ind_WFD = np.where(simdata[propIDcol]==propTags['WFD'])[0]
if print_progress:
tot= len(simdata)
print('Total visits: ', tot)
print('propTags: ', propTags)
print('%s WFD visits out of total %s'%(len(ind_WFD), tot))
descDitheredRA[ind_WFD] = dithered_RA['WFD'][ind_WFD]
descDitheredDec[ind_WFD] = dithered_Dec['WFD'][ind_WFD]
# work with DD visits now
ind_DD = np.where(simdata[propIDcol]==propTags['DD'])[0]
if print_progress:
print('%s DD visits out of total %s'%(len(ind_DD), tot))
descDitheredRA[ind_DD] = dithered_RA['DD'][ind_DD]
descDitheredDec[ind_DD] = dithered_Dec['DD'][ind_DD]
# add rotational dithers to everything
descDitheredRot = dithered_rotTelPos
###############################################################
# diagnostic plots
if show_diagnostic_plots or save_plots:
# histograms of desc dithered positions
fig, axes = plt.subplots(nrows=1, ncols=3)
_, bins, _ = axes[0].hist(descDitheredRA, label='descDitheredRA', histtype='step', lw=2)
axes[0].hist(simdata['fieldRA'], label='fieldRA', histtype='step', lw=2, bins=bins)
_, bins, _ = axes[1].hist(descDitheredDec, label='descDitheredDec', histtype='step', lw=2)
axes[1].hist(simdata['fieldDec'], label='fieldDec', histtype='step', lw=2, bins=bins)
_, bins, _ = axes[2].hist(descDitheredRot, label='descDitheredRot', histtype='step', lw=2)
axes[2].hist(simdata['rotTelPos'], label='rotTelPos', histtype='step', lw=2, bins=bins)
if opsdb.raDecInDeg: xlabel = 'degrees'
else: xlabel = 'radians'
for ax in axes:
ax.legend()
ax.set_xlabel(xlabel)
ax.set_ylabel('Counts')
plt.suptitle(dbfile)
fig.set_size_inches(20,5)
if save_plots:
filename='hist_descDithers_%s.png'%(dbfile.split('.db')[0])
plt.savefig('%s/%s'%(outDir, filename), format= 'png', bbox_inches='tight')
readme += '\nSaved hist for descDithers in %s.'%filename
if print_progress:
print('\nSaved hist plot in %s'%filename)
if show_diagnostic_plots:
plt.show()
else:
plt.close('all')
###############################################################
# save the columns as a csv file.
d= {obsIDcol: simdata[obsIDcol],
'descDitheredRA': descDitheredRA, 'descDitheredDec': descDitheredDec,
'descDitheredRotTelPos': descDitheredRot}
filename= 'descDithers_%s.csv'%(dbfile.split('.db')[0])
pd.DataFrame(d).to_csv('%s/%s'%(outDir, filename), index=False)
readme += '\nSaved the dithers in %s'%filename
readme += '\nTime taken: %.2f (min)\n\n'%((time.time()-startTime)/60.)
if print_progress:
print('\nSaved the dithers in %s'%filename)
print('Time taken: %.2f (min)\n\n'%((time.time()-startTime)/60.))
readme_file= open('%s/readme.txt'%(outDir), 'a')
readme_file.write(readme)
readme_file.close()
# mark the end in the readme.
readme_file= open('%s/readme.txt'%(outDir), 'a')
readme_file.write('All done. Total time taken: %.2f (min)\n\n'%((time.time()-startTime_0)/60.))
readme_file.close()
|
{"hexsha": "706aed88bcdb3790210aef93960416aa9182c3d1", "size": 13924, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/descDithers/save_csv_dithers.py", "max_stars_repo_name": "galdering/ObsStrat", "max_stars_repo_head_hexsha": "1032e5c66ee9d7e6b8d8ddde443670489d32e7fa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2018-07-16T21:36:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-23T02:47:29.000Z", "max_issues_repo_path": "code/descDithers/save_csv_dithers.py", "max_issues_repo_name": "galdering/ObsStrat", "max_issues_repo_head_hexsha": "1032e5c66ee9d7e6b8d8ddde443670489d32e7fa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2018-07-23T13:12:29.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-19T19:50:02.000Z", "max_forks_repo_path": "code/descDithers/save_csv_dithers.py", "max_forks_repo_name": "galdering/ObsStrat", "max_forks_repo_head_hexsha": "1032e5c66ee9d7e6b8d8ddde443670489d32e7fa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-10-19T19:41:17.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-30T00:31:20.000Z", "avg_line_length": 47.6849315068, "max_line_length": 129, "alphanum_fraction": 0.5504883654, "include": true, "reason": "import numpy", "num_tokens": 3275}
|
SUBROUTINE zcorec6 (IFTOLD, IFTNEW, CPOLD, CPNEW,
* IBUFF1, KBUFF1, IBUFF2, KBUFF2, LDUP, ISTAT)
C
C
C Copy a single record using buffered reads and writes.
C This allows us to copy as big as record as in the file
C (unlimited size).
C The record can be copied from another file, or can be duplicated
C in the same file (with a different pathname)
C
C Written by Bill Charley, HEC, 1989.
C
C
INTEGER IFTOLD(*), IFTNEW(*), IBUFF1(*), IBUFF2(*)
INTEGER KBUFF1, KBUFF2, ISTAT
LOGICAL LDUP
CHARACTER CPOLD*(*), CPNEW*(*)
C
CHARACTER CDAT*9, CTIM*4, CUNITS*8, CTYPE*8
CHARACTER CSDATE*9, CSTIME*4, CSPROG*8
INTEGER IBPART(6), IEPART(6), ILPART(6)
LOGICAL LFOUND, LRETAG, L, LTSCOPY
INTEGER INFORN(20)
INTEGER JTYPE
C
COMMON /WORDS/ IWORD(10)
C
C
INCLUDE 'zdsskz.h'
C
INCLUDE 'zdsscz.h'
C
INCLUDE 'zdssnz.h'
C
INCLUDE 'zdsslz.h'
C
INCLUDE 'zdssiz.h'
C
INCLUDE 'zdssmz.h'
C
C
C
IF (MLEVEL.GE.11) THEN
IF (LDUP) THEN
WRITE (MUNIT, 20) IFTOLD(KUNIT), CPOLD,
* CPNEW, KBUFF1, KBUFF2
20 FORMAT (T6,'-----DSS---Debug: Enter zcorec6, duplicate',/,
* T5,'From: Unit:',I5,' Path: ',A,/,
* T5,' To: Path: ',A,/,
* T5,'KBUFF1:',I8,', KBUFF2:',I8)
ELSE
WRITE (MUNIT, 21) IFTOLD(KUNIT), CPOLD,
* IFTNEW(KUNIT), CPNEW, KBUFF1, KBUFF2
21 FORMAT (T6,'-----DSS---Debug: Enter zcorec6',/,
* T5,'From: Unit:',I5,' Path: ',A,/,
* T5,' To: Unit:',I5,' Path: ',A,/,
* T5,'KBUFF1:',I8,', KBUFF2:',I8)
ENDIF
ENDIF
C
C
IF (.NOT. LDUP) THEN
IF (IFTOLD(KSWAP).NE.IFTNEW(KSWAP)) GO TO 950
ENDIF
C
ISTAT = 0
IF ((KBUFF1.LE.0).OR.(KBUFF2.LE.0)) GO TO 900
C
C
C Are we in a read only state?
IF (LDUP) THEN
IF (IFTOLD(KREADO).EQ.1) GO TO 940
CALL zmultu6 ( IFTOLD, .TRUE., .TRUE.)
ELSE
IF (IFTNEW(KREADO).EQ.1) GO TO 940
C Get multiple user access
CALL zmultu6 ( IFTNEW, .TRUE., .TRUE.)
ENDIF
C
C
CALL zreadx6 (IFTOLD, CPOLD, IBUFF1, KBUFF1, NIHEAD,
* IBUFF2, KBUFF2, NCHEAD, IUHEAD, 0, NUHEAD, IDATA,
* 0, NDATA, 2, LFOUND)
IF (IFTOLD(KSTAT).NE.0) GO TO 960
IF (.NOT.LFOUND) THEN
ISTAT = 1
GO TO 800
ENDIF
IF ((NIHEAD.GT.KBUFF1).OR.(NCHEAD.GT.KBUFF2)) GO TO 910
C
C Save pertainent info from the info block
NLEN = KIQUAL - KILNDA + 1
DO 40 I=1,NLEN
J = I + NPPWRD + KILNDA - 1
INFORN(I) = INFO(J)
40 CONTINUE
DO 45 I=1,NPASS
J = I + NPPWRD + KIPASS - 1
IPASS(I) = INFO(J)
45 CONTINUE
C
IDADD = INFO(NPPWRD+KIADAT)
IHADD = INFO(NPPWRD+KIAUHE)
ITYPE = INFO(NPPWRD+KITYPE)
JTYPE = ITYPE
NLDATA = INFO(NPPWRD+KILNDA)
IPREC = INFO(NPPWRD+KIPREC)
IF ((ITYPE.GE.110).AND.(ITYPE.LT.120)) THEN
C Irregular interval - check for pseudo regular interval
INTL_PSEUDO = IBUFF1(5)
ENDIF
CTAG = ' '
CALL HOLCHR (INFO(NPPWRD+KITAG), 1, NTAGC, CTAG, 1)
C
CALL CHRLNB(CPNEW, N)
IF (MLEVEL.GE.3) WRITE (MUNIT,50) CPNEW(1:N)
50 FORMAT(' -----DSS--- zcorec6; Record: ',A)
C
C
C If this data is time-series, and we need to use the compression
C method of the new file, call zrrtsx6, then zsrtsx6 to do this,
C or it we are copying 15 or 30 min data.
LTSCOPY = .FALSE.
IF (JTYPE.EQ.100) THEN
IF ((LTSCMP).AND.(INFO(NPPWRD+KIQUAL).EQ.0)) THEN
IF (.NOT. LDUP) THEN
IF ((IFTNEW(KCOMPN).GT.0).OR.(INFO(NPPWRD+KICOMP).GT.0)) THEN
ENDIF
LTSCOPY = .TRUE.
ENDIF
ENDIF
ENDIF
C
IF (LTSCOPY) THEN
CDAT = ' '
NVALS = KBUFF1
CALL zrrtsx (IFTOLD, CPOLD, CDAT, CTIM, NVALS, IBUFF1, IDUM,
* .FALSE., L, CUNITS, CTYPE, IBUFF2, KBUFF2, NUHEAD, IOFSET,
* J, JSTAT)
IF (JSTAT.GE.5) GO TO 920
C
C Reset the record information to match the old record
ISVER = IBVER
ISPREC = IPREC
CSPROG = CPROG(1:8)
CSDATE = CDATE(1:9)
CSTIME = CTIME(1:4)
IBVER = INFO(NPPWRD+KIVER)
IPREC = INFO(NPPWRD+KIPREC)
CALL HOLCHR (INFO(NPPWRD+KIPROG), 1, NPROGC, CPROG, 1)
CALL HOLCHR (INFO(NPPWRD+KIDATE), 1, NDATEC, CDATE, 1)
CALL HOLCHR (INFO(NPPWRD+KITIME), 1, NTIMEC, CTIME, 1)
C
C Get the date and time of the data (and adjust the offset)
CALL zupath (CPOLD, IBPART, IEPART, ILPART, I)
CDAT = CPOLD(IBPART(4):IEPART(4))
CALL DATJUL (CDAT, JUL, IERR)
I = 1
CALL zgintl6 (INTL, CPOLD(IBPART(5):IEPART(5)), N, I)
ITIME = 1
CALL zofset6 (JUL, ITIME, INTL, 2, IOFSET)
CALL JULDAT (JUL, 104, CDAT, N)
N = M2IHM (ITIME, CTIM)
C
IF (LDUP) THEN
CALL zsrtsx (IFTOLD, CPNEW, CDAT, CTIM, NVALS, IBUFF1, IDUM,
* .FALSE., CUNITS, CTYPE, IBUFF2, NUHEAD, 0, 0, B, L, L, N, JSTAT)
ELSE
CALL zsrtsx (IFTNEW, CPNEW, CDAT, CTIM, NVALS, IBUFF1, IDUM,
* .FALSE., CUNITS, CTYPE, IBUFF2, NUHEAD, 0, 0, B, L, L, N, JSTAT)
ENDIF
C
C Reset the record information
IBVER = ISVER
IPREC = ISPREC
CPROG = CSPROG
CDATE = CSDATE
CTIME = CSTIME
C
GO TO 800
ENDIF
C
C
C Check if new record exists
LWRITE = .TRUE.
CALL CHRLNB (CPNEW, NPNEW)
IF (LDUP) THEN
CALL zcheck6 (IFTOLD, CPNEW, NPNEW, JHEAD, JDATA, LFOUND)
C If the pathname was not found by zcheck6 write new pointers
C
IF (.NOT.LFOUND) THEN
CALL znwrit6 (IFTOLD, CPNEW, NPNEW, NIHEAD, NCHEAD, NUHEAD, NDATA)
ELSE
IF (LPROTC) GO TO 930
CALL zowrit6 (IFTOLD, CPNEW, NPNEW, NIHEAD, NCHEAD, NUHEAD,
* NDATA)
ENDIF
IF (IFTOLD(KSTAT).NE.0) GO TO 960
ELSE
CALL zcheck6 (IFTNEW, CPNEW, NPNEW, JHEAD, JDATA, LFOUND)
C If the pathname was not found by zcheck6 write new pointers
C
IF (.NOT.LFOUND) THEN
CALL znwrit6 (IFTNEW, CPNEW, NPNEW, NIHEAD, NCHEAD, NUHEAD, NDATA)
ELSE
IF (LPROTC) GO TO 930
CALL zowrit6 (IFTNEW, CPNEW, NPNEW, NIHEAD, NCHEAD, NUHEAD,
* NDATA)
ENDIF
IF (IFTNEW(KSTAT).NE.0) GO TO 960
ENDIF
C
C Update the information block to contain what the old one had
C
DO 60 I=1,NLEN
J = I + NPPWRD + KILNDA - 1
INFO(J) = INFORN(I)
60 CONTINUE
DO 70 I=1,NPASS
J = I + NPPWRD + KIPASS - 1
INFO(J) = IPASS(I)
70 CONTINUE
C
ISIZE = NPPWRD + NINFO
IF (LDUP) THEN
CALL zptrec6 (IFTOLD, INFO, ISIZE, IPNBIN(JPNBIN+NPPWRD+KBAINF),
* .FALSE.)
C Now store the internal header and the compression headers
C Store the header array
IF (NIHEAD.GT.0)
*CALL zptrec6(IFTOLD, IBUFF1, NIHEAD, INFO(NPPWRD+KIAIHE), .FALSE.)
IF (NCHEAD.GT.0)
*CALL zptrec6(IFTOLD, IBUFF2, NCHEAD, INFO(NPPWRD+KIACHE), .FALSE.)
ELSE
CALL zptrec6 (IFTNEW, INFO, ISIZE, IPNBIN(JPNBIN+NPPWRD+KBAINF),
* .FALSE.)
C Now store the internal header and the compression headers
C Store the header array
IF (NIHEAD.GT.0)
*CALL zptrec6(IFTNEW, IBUFF1, NIHEAD, INFO(NPPWRD+KIAIHE), .FALSE.)
IF (NCHEAD.GT.0)
*CALL zptrec6(IFTNEW, IBUFF2, NCHEAD, INFO(NPPWRD+KIACHE), .FALSE.)
ENDIF
C
C
C Copy the Users Header block
IF (NUHEAD.GT.0) THEN
NH = MIN0(NUHEAD,KBUFF1)
NHTOT = 0
JHADD = INFO(NPPWRD+KIAUHE)
100 CONTINUE
CALL zgtrec6 (IFTOLD, IBUFF1, NH, IHADD, .FALSE.)
IF (LDUP) THEN
CALL zptrec6 (IFTOLD, IBUFF1, NH, JHADD, .FALSE.)
ELSE
CALL zptrec6 (IFTNEW, IBUFF1, NH, JHADD, .FALSE.)
ENDIF
NHTOT = NHTOT + NH
IF (NHTOT.LT.NUHEAD) THEN
IHADD = IHADD + NH
JHADD = JHADD + NH
NH = NUHEAD - NHTOT
NH = MIN0(NH,KBUFF1)
GO TO 100
ENDIF
ENDIF
C
C Copy the data array
IF (NDATA.GT.0) THEN
ND = MIN0(NDATA,KBUFF1)
NDTOT = 0
JDADD = INFO(NPPWRD+KIADAT)
120 CONTINUE
CALL zgtrec6 (IFTOLD, IBUFF1, ND, IDADD, .FALSE.)
C Do we need to swap words on version conversion doubles?
IF (.NOT. LDUP) THEN
IF (IFTOLD(KDSWAP).NE.IFTNEW(KDSWAP)) THEN
IF (JTYPE.EQ.205) THEN
CALL zdswap6(IBUFF1, ND)
IF (MLEVEL.GE.11) WRITE (MUNIT, *)'Swapping PDD'
ELSE IF (JTYPE.EQ.105) THEN
IF (MLEVEL.GE.11) WRITE (MUNIT, *)'Swapping RTD'
IF (INFO(NPPWRD+KIQUAL).EQ.0) THEN
CALL zdswap6(IBUFF1, ND)
ELSE
N = (ND * 2) / 3
CALL zdswap6(IBUFF1, N)
ENDIF
ELSE IF (JTYPE.EQ.115) THEN
IF (MLEVEL.GE.11) WRITE (MUNIT, *)'Swapping ITD'
IMULT = 3
IF (INFO(NPPWRD+KIQUAL).NE.0) IMULT = 4
DO 140, I=1,ND,IMULT
CALL zdswap6(IBUFF1(I+1), 1)
140 CONTINUE
ENDIF
ENDIF
CALL zptrec6 (IFTNEW, IBUFF1, ND, JDADD, .FALSE.)
ELSE
CALL zptrec6 (IFTOLD, IBUFF1, ND, JDADD, .FALSE.)
ENDIF
NDTOT = NDTOT + ND
IF (NDTOT.LT.NDATA) THEN
IDADD = IDADD + ND
JDADD = JDADD + ND
ND = NDATA - NDTOT
ND = MIN0(ND,KBUFF1)
GO TO 120
ENDIF
ENDIF
IF (.NOT. LDUP) THEN
IF (IFTNEW(KSTAT).NE.0) GO TO 960
ENDIF
IF (IFTOLD(KSTAT).NE.0) GO TO 960
C
C
C
800 CONTINUE
C Release multiple user access
IF (LDUP) THEN
CALL zmultu6 ( IFTOLD, .FALSE., .TRUE.)
ELSE
CALL zmultu6 ( IFTNEW, .FALSE., .TRUE.)
ENDIF
LWRITE = .FALSE.
INTL_PSEUDO = 0
IF (MLEVEL.GE.11) WRITE (MUNIT, 820)
820 FORMAT (T6,'-----DSS---Debug: Exit zcorec6')
* CALL FLUSH(MUNIT)
RETURN
C
900 CONTINUE
ISTAT = -1
IF (MLEVEL.GE.1) WRITE (MUNIT, 901) KBUFF1, KBUFF2
901 FORMAT (/,' -----DSS---zcorec6: ERROR; Null length buffer(s)',
* ' supplied for copy',/,' Buffer Sizes:',2I8,/)
GO TO 800
C
910 CONTINUE
ISTAT = -2
IF (MLEVEL.GE.2) WRITE (MUNIT, 911) NIHEAD, KBUFF1, NCHEAD, KBUFF2
911 FORMAT (/,' -----DSS---zcorec6: ERROR; Insufficient buffer(s)',
* ' supplied for copy',/,' Needed:',I9,', Supplied:',I9,
* '; Needed:',I9,', Supplied:',I9)
GO TO 800
C
920 CONTINUE
ISTAT = 1
IF (MLEVEL.GE.2) WRITE (MUNIT, 921) CPOLD, JSTAT
921 FORMAT (/,' -----DSS---zcorec6: ERROR; Unable to Retrieve Data',
* /,' Pathname: ',A,/,' Status:',I5)
GO TO 800
C
930 CONTINUE
ISTAT = 2
IF (MLEVEL.GE.2) WRITE (MUNIT, 931) CPNEW
931 FORMAT (' -----DSS---zcorec6: Write Protection for Existing',
* ' Record (no data written)',/,
* ' Pathname: ',A)
C
C
940 CONTINUE
IF (MLEVEL.GE.1) WRITE (MUNIT, 941) CPNEW
941 FORMAT (' -----DSS---zcorec6: ERROR; File has Read Access Only',
* /,' Pathname: ',A)
ISTAT = -12
GO TO 800
C
950 CONTINUE
ISTAT = -5
WRITE (MUNIT, 951)
951 FORMAT (/' -----DSS---zcorec6: ERROR; Files are of different',
* ' endian type.'/' Can only copy files with the same byte order')
GO TO 800
C
C
960 CONTINUE
IF (LDUP) THEN
ISTAT = IFTOLD(KSTAT)
WRITE (MUNIT, 961) IFTOLD(KSTAT)
ELSE
ISTAT = IFTNEW(KSTAT)
WRITE (MUNIT, 961) IFTNEW(KSTAT)
ENDIF
961 FORMAT (/,' *****DSS*** zcorec6: ERROR - UNABLE TO ',
* ' COPY DATA',/,' Status: ',I8)
GO TO 800
C
END
SUBROUTINE zcorec(ifltabFrom, ifltabTo, cpathFrom, cpathTo,
* IBUFF1, KBUFF1, IBUFF2, KBUFF2, LDUP, ISTAT)
C
INTEGER ifltabFrom(*)
CHARACTER cpathFrom*(*)
INTEGER ifltabTo(*)
CHARACTER cpathTo*(*)
C
INTEGER IBUFF1(*), IBUFF2(*)
LOGICAL LDUP
INTEGER KBUFF1, KBUFF2, ISTAT
C
call zGetVersion(ifltabFrom, iversion1)
IF (LDUP) THEN
iversion2 = iversion1
ELSE
call zGetVersion(ifltabTo, iversion2)
ENDIF
C
if (iversion1.eq.iversion2) then
IF (iversion1.EQ.6) THEN
call zcorec6(ifltabFrom, ifltabTo, cpathFrom, cpathTo,
* IBUFF1, KBUFF1, IBUFF2, KBUFF2, LDUP, ISTAT)
else
if (cpathFrom.eq.cpathTo) then
C ******* FIX ME ****************
call zcopyRecord (ifltabFrom, ifltabTo, cpathFrom, cpathTo,
* ISTAT)
else
call zcopyRecord (ifltabFrom, ifltabTo, cpathFrom, cpathTo,
* ISTAT)
endif
endif
else
call zcopyRecord (ifltabFrom, ifltabTo, cpathFrom, cpathTo,
* ISTAT)
endif
IF (iversion.EQ.6) THEN
call zstags6 (IFLTAB, CLINE, ISTAT)
endif
return
end
|
{"hexsha": "af9135897c324db4a221df43537749b56fb6c2cc", "size": 12885, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "heclib/heclib_f/src/zcorec6.f", "max_stars_repo_name": "HydrologicEngineeringCenter/heclib", "max_stars_repo_head_hexsha": "dd3111ee2a8d0c80b88d21bd529991f154fec40a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2021-03-09T17:42:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T21:46:47.000Z", "max_issues_repo_path": "heclib/heclib_f/src/zcorec6.f", "max_issues_repo_name": "HydrologicEngineeringCenter/heclib", "max_issues_repo_head_hexsha": "dd3111ee2a8d0c80b88d21bd529991f154fec40a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2021-06-17T20:01:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T21:33:29.000Z", "max_forks_repo_path": "heclib/heclib_f/src/zcorec6.f", "max_forks_repo_name": "HydrologicEngineeringCenter/heclib", "max_forks_repo_head_hexsha": "dd3111ee2a8d0c80b88d21bd529991f154fec40a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-06-16T17:48:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-13T16:44:47.000Z", "avg_line_length": 29.4851258581, "max_line_length": 81, "alphanum_fraction": 0.5656965464, "num_tokens": 4799}
|
/*
@copyright Louis Dionne 2015
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
#include <boost/hana/ext/std/type_traits.hpp>
#include <boost/hana/assert.hpp>
#include <boost/hana/integral_constant.hpp>
#include <boost/hana/type.hpp>
using namespace boost::hana;
enum Enumeration { };
struct Structure { };
constexpr auto e = type<Enumeration>;
constexpr auto s = type<Structure>;
int main() {
// We just make sure that they compile. If the forwarding to `std::` is
// well done, it is the job of `std::` to return the right thing.
///////////////////
// Type properties
///////////////////
// Primary type categories
static_assert(!traits::is_void(s), "the traits should be compile-time checkable");
traits::is_null_pointer(s);
traits::is_integral(s);
traits::is_floating_point(s);
traits::is_array(s);
traits::is_enum(s);
traits::is_union(s);
traits::is_class(s);
traits::is_function(s);
traits::is_pointer(s);
traits::is_lvalue_reference(s);
traits::is_rvalue_reference(s);
traits::is_member_object_pointer(s);
traits::is_member_function_pointer(s);
// Composite type categories
traits::is_fundamental(s);
traits::is_arithmetic(s);
traits::is_scalar(s);
traits::is_object(s);
traits::is_compound(s);
traits::is_reference(s);
traits::is_member_pointer(s);
// Type properties
traits::is_const(s);
traits::is_volatile(s);
traits::is_trivial(s);
traits::is_trivially_copyable(s);
traits::is_standard_layout(s);
traits::is_pod(s);
traits::is_literal_type(s);
traits::is_empty(s);
traits::is_polymorphic(s);
traits::is_abstract(s);
traits::is_signed(s);
traits::is_unsigned(s);
// Supported operations
traits::is_constructible(s, s);
traits::is_trivially_constructible(s, s);
traits::is_nothrow_constructible(s, s);
traits::is_default_constructible(s);
traits::is_trivially_default_constructible(s);
traits::is_nothrow_default_constructible(s);
traits::is_copy_constructible(s);
traits::is_trivially_copy_constructible(s);
traits::is_nothrow_copy_constructible(s);
traits::is_move_constructible(s);
traits::is_trivially_move_constructible(s);
traits::is_nothrow_move_constructible(s);
traits::is_assignable(s, s);
traits::is_trivially_assignable(s, s);
traits::is_nothrow_assignable(s, s);
traits::is_copy_assignable(s);
traits::is_trivially_copy_assignable(s);
traits::is_nothrow_copy_assignable(s);
traits::is_move_assignable(s);
traits::is_trivially_move_assignable(s);
traits::is_nothrow_move_assignable(s);
traits::is_destructible(s);
traits::is_trivially_destructible(s);
traits::is_nothrow_destructible(s);
traits::has_virtual_destructor(s);
// Property queries
traits::alignment_of(s);
traits::rank(s);
traits::extent(s);
traits::extent(type<int[2][3]>, uint<1>);
// Type relationships
traits::is_same(s, s);
traits::is_base_of(s, s);
traits::is_convertible(s, s);
///////////////////
// Type modifications
///////////////////
// Const-volatility specifiers
traits::remove_cv(s);
traits::remove_const(s);
traits::remove_volatile(s);
traits::add_cv(s);
traits::add_const(s);
traits::add_volatile(s);
// References
traits::remove_reference(s);
traits::add_lvalue_reference(s);
traits::add_rvalue_reference(s);
// Pointers
traits::remove_pointer(s);
traits::add_pointer(s);
// Sign modifiers
traits::make_signed(type<unsigned>);
traits::make_unsigned(type<signed>);
// Arrays
traits::remove_extent(s);
traits::remove_all_extents(s);
// Miscellaneous transformations
traits::aligned_storage(size_t<1>);
traits::aligned_storage(size_t<1>, size_t<1>);
traits::aligned_union(size_t<0>, s);
traits::decay(s);
traits::common_type(s, s);
traits::underlying_type(e);
using FunctionPointer = void(*)();
traits::result_of(type<FunctionPointer(void)>);
}
|
{"hexsha": "53b7594b318d50252a8772e6d6637616742c5cf6", "size": 4199, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/ext/std/type_traits.cpp", "max_stars_repo_name": "josephwinston/hana", "max_stars_repo_head_hexsha": "a8586ec1812e14e43dfd6867209412aa1d254e1a", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/ext/std/type_traits.cpp", "max_issues_repo_name": "josephwinston/hana", "max_issues_repo_head_hexsha": "a8586ec1812e14e43dfd6867209412aa1d254e1a", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/ext/std/type_traits.cpp", "max_forks_repo_name": "josephwinston/hana", "max_forks_repo_head_hexsha": "a8586ec1812e14e43dfd6867209412aa1d254e1a", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.625, "max_line_length": 86, "alphanum_fraction": 0.6742081448, "num_tokens": 1046}
|
[STATEMENT]
lemma uint_0_iff: "uint x = 0 \<longleftrightarrow> x = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (uint x = 0) = (x = 0)
[PROOF STEP]
by (auto simp add: unsigned_word_eqI)
|
{"llama_tokens": 88, "file": null, "length": 1}
|
# Defined in Section 2.1.2
import numpy as np
M = np.array([[0, 2, 1, 1, 1, 1, 1, 2, 1, 3],
[2, 0, 1, 1, 1, 0, 0, 1, 1, 2],
[1, 1, 0, 1, 1, 0, 0, 0, 0, 1],
[1, 1, 1, 0, 1, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 1, 0, 1, 0, 1],
[2, 1, 0, 0, 0, 1, 1, 0, 1, 2],
[1, 1, 0, 0, 0, 0, 0, 1, 0, 1],
[3, 2, 1, 1, 1, 1, 1, 2, 1, 0]])
def pmi(M, positive=True):
col_totals = M.sum(axis=0)
row_totals = M.sum(axis=1)
total = col_totals.sum()
expected = np.outer(row_totals, col_totals) / total
M = M / expected
# Silence distracting warnings about log(0):
with np.errstate(divide='ignore'):
M = np.log(M)
M[np.isinf(M)] = 0.0 # log(0) = 0
if positive:
M[M < 0] = 0.0
return M
M_pmi = pmi(M)
np.set_printoptions(precision=2)
print(M_pmi)
U, s, Vh = np.linalg.svd(M_pmi)
import matplotlib.pyplot as plt
# plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']
plt.rcParams['font.sans-serif'] = ['SimHei'] # 显示中文标签
plt.rcParams['axes.unicode_minus'] = False
words = ["我", "喜欢", "自然", "语言", "处理", "爱", "深度", "学习", "机器", "。"]
for i in range(len(words)):
plt.text(U[i, 0], U[i, 1], words[i])
plt.xlim(0, 0.6)
plt.ylim(-0.5, 0.6)
plt.savefig('svd.pdf')
plt.show()
|
{"hexsha": "71ea22b7c6d945321f19d74485b1fac6444aa98d", "size": 1401, "ext": "py", "lang": "Python", "max_stars_repo_path": "chp2/svd.py", "max_stars_repo_name": "Sheldoer/plm-nlp-code", "max_stars_repo_head_hexsha": "04127d137c8bd40bc1412bee863640b9d909ddf9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chp2/svd.py", "max_issues_repo_name": "Sheldoer/plm-nlp-code", "max_issues_repo_head_hexsha": "04127d137c8bd40bc1412bee863640b9d909ddf9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chp2/svd.py", "max_forks_repo_name": "Sheldoer/plm-nlp-code", "max_forks_repo_head_hexsha": "04127d137c8bd40bc1412bee863640b9d909ddf9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9423076923, "max_line_length": 65, "alphanum_fraction": 0.483226267, "include": true, "reason": "import numpy", "num_tokens": 648}
|
(*
Copyright 2014 Cornell University
This file is part of VPrl (the Verified Nuprl project).
VPrl is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
VPrl is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with VPrl. Ifnot, see <http://www.gnu.org/licenses/>.
Website: http://nuprl.org/html/verification/
Authors: Abhishek Anand & Vincent Rahli
*)
Require Import continuity.
Require Import continuity2.
Require Import continuity3.
Require Export continuity_defs2.
(* !! MOVE to opid *)
Definition fresh_atom (o : POpid) : FreshFun (get_patom_set o) :=
dsetFresh (patom o).
Definition uexcc {o} (a : get_patom_set o) :=
mkc_nexception (Some a) mkc_axiom.
Definition get_utokensc {o} (t : @CTerm o) : list (get_patom_set o) :=
get_utokens (get_cterm t).
Lemma comp_force_int_app_F_c {o} :
forall lib (F f : @CTerm o) x z,
reduces_toc
lib
(force_int_F_c x F f)
(mkc_integer z)
-> {b : nat
& forall (e : CTerm) b',
b <= b'
-> reduces_toc
lib
(force_int_bound_F_c x b' F f e)
(mkc_integer z)}.
Proof.
introv r.
unfold reduces_toc in r.
rw @get_cterm_force_int_F_c in r.
simpl in r.
apply comp_force_int_app_F in r; exrepnd.
exists b.
introv l.
pose proof (r0 (get_cterm e) b') as h.
repeat (autodimp h hyp).
{ rw @free_vars_cterm; sp. }
unfold reduces_toc.
rw @get_cterm_force_int_bound_F_c; auto.
Qed.
Lemma comp_force_int_app_F3_c {o} :
forall lib a (F f g : @CTerm o) x z b,
!LIn a (get_utokensc F)
-> !LIn a (get_utokensc f)
-> !LIn a (get_utokensc g)
-> agree_upto_bc lib b f g
-> reduces_toc
lib
(force_int_bound_F_c x b F f (uexcc a))
(mkc_integer z)
-> reduces_toc
lib
(force_int_bound_F_c x b F g (uexcc a))
(mkc_integer z).
Proof.
introv ni1 ni2 ni3 agree r.
allunfold @reduces_toc.
allrw @get_cterm_force_int_bound_F_c.
allsimpl.
apply (comp_force_int_app_F3 lib a (get_cterm F) (get_cterm f) (get_cterm g)); auto;
allrw @free_vars_cterm; allsimpl; tcsp; eauto with slow.
Qed.
Lemma comp_force_int_app_F2_c {o} :
forall lib a (F g : @CTerm o) x z b,
!LIn a (get_utokensc F)
-> !LIn a (get_utokensc g)
-> reduces_toc
lib
(force_int_bound_F_c x b F g (uexcc a))
(mkc_integer z)
-> reduces_toc
lib
(force_int_F_c x F g)
(mkc_integer z).
Proof.
introv ni1 ni2 r.
allunfold @reduces_toc.
allrw @get_cterm_force_int_bound_F_c.
allrw @get_cterm_force_int_F_c.
allsimpl.
apply (comp_force_int_app_F2 lib a (get_cterm F) (get_cterm g) x z b); auto;
allrw @free_vars_cterm; allsimpl; tcsp; eauto with slow.
Qed.
(*
F f -> z
=> (* by typing *)
F (\x.let x:=(x + 0) in f(x)) -> z
=> (* by comp_force_int_app_F *)
exists b. forall e.
F (\x.let x:=(let x:=x in if |x|<b then x else e) in f(x)) -> z
=> (* if e cannot get caught, because the 2 functions agree upto b *)
F (\x.let x:=(let x:=x in if |x|<b then x else e) in g(x)) -> z
=> (* comp_force_int_app_F2 *)
F (\x.let x:=(x + 0) in g(x)) -> z
=> (* by typing *)
F g -> z
*)
Lemma continuity_axiom {o} :
forall lib (F : @CTerm o),
member lib F (mkc_fun (mkc_fun mkc_int mkc_int) mkc_int)
-> continuous lib F.
Proof.
introv mT mt.
assert (member lib (mkc_apply F f) mkc_int) as ma.
{ rw @equality_in_fun in mT; repnd.
apply mT; auto. }
(* by typing *)
assert (equality lib f (force_int_f_c nvarx f) (mkc_fun mkc_int mkc_int))
as ea by (apply equality_force_int_f_c; auto).
assert (equality lib (mkc_apply F f) (mkc_apply F (force_int_f_c nvarx f)) mkc_int) as mb.
{ rw @equality_in_fun in mT; repnd.
apply mT; auto. }
apply equality_in_int in mb.
apply equality_of_int_imp_tt in mb.
unfold equality_of_int_tt in mb; exrepnd; GC.
(* 1st step *)
pose proof (comp_force_int_app_F_c lib F f nvarx k) as step1.
autodimp step1 hyp.
{ rw @computes_to_valc_iff_reduces_toc in mb0; repnd; auto. }
destruct step1 as [b step1].
exists b.
introv mg agree.
(* now pick a fresh atom to instantiate h0 with the right exception,
to then use comp_force_int_app_F3. *)
pose proof (fresh_atom o (get_utokensc F ++ get_utokensc f ++ get_utokensc g)) as fr.
destruct fr as [a ni].
allrw in_app_iff; allrw not_over_or; repnd.
pose proof (step1 (uexcc a) b) as r1.
autodimp r1 hyp.
(* 2nd step *)
pose proof (comp_force_int_app_F3_c lib a F f g nvarx k b) as step2.
repeat (autodimp step2 hyp).
{ apply agree_upto_c_iff; auto. }
(* 3rd step *)
pose proof (comp_force_int_app_F2_c lib a F g nvarx k b) as step3.
repeat (autodimp step3 hyp).
(* by typing *)
assert (equality lib g (force_int_f_c nvarx g) (mkc_fun mkc_int mkc_int))
as eb by (apply equality_force_int_f_c; auto).
assert (equality lib (mkc_apply F g) (mkc_apply F (force_int_f_c nvarx g)) mkc_int) as mc.
{ rw @equality_in_fun in mT; repnd.
apply mT; auto. }
apply equality_in_int in mc.
apply equality_of_int_imp_tt in mc.
unfold equality_of_int_tt in mc; exrepnd; GC.
assert (computes_to_valc lib (force_int_F_c nvarx F g) (mkc_integer k)) as c.
{ rw @computes_to_valc_iff_reduces_toc; dands; eauto with slow. }
repeat computes_to_eqval.
exists k0; dands; auto.
Qed.
(*
*** Local Variables:
*** coq-load-path: ("." "./close/")
*** End:
*)
|
{"author": "vrahli", "repo": "NuprlInCoq", "sha": "0c3d7723836d3f615ea47f56e58b2ea6173e7d98", "save_path": "github-repos/coq/vrahli-NuprlInCoq", "path": "github-repos/coq/vrahli-NuprlInCoq/NuprlInCoq-0c3d7723836d3f615ea47f56e58b2ea6173e7d98/continuity/continuity_axiom.v"}
|
import numpy as np
from scipy.optimize import curve_fit
from utils import import_qchem
from utils import utils
from core.polymer_chain import Polymer
from core.polymer_chain import RandomChargePolymer
from argparse import ArgumentParser
def run_partial_order_param():
description = "command line interface for running dihedral_model"
parser = ArgumentParser(description=description)
parser.add_argument('-nf', action='store', type=str, help='input file', required=True)
parser.add_argument('-cf', action='store', type=str, help='input file', required=True)
parser.add_argument('-gp', action='store', type=int, default=3600,
help='number of grid points used for dihedral angles')
parser.add_argument('-t', action='store', type=float, required=True, help='temperature in kelvin')
parser.add_argument('-mn', action='store', type=int, required=True, help='monomer_number')
parser.add_argument('-ml', action='store', type=float, default=2.548, help='monomer_length')
parser.add_argument('-ll', action='store', type=float, default=1.480, help='link_length')
parser.add_argument('-la', action='store', type=float, default=15.0, help='link_angle')
parser.add_argument('-sn', action='store', type=int, required=True, help='sample_number')
parser.add_argument('-cs', action='store', type=int, required=True, help='number of charged dihedrals')
parser.add_argument('-o', action='store', type=str, required=True, help='output filename')
parser.add_argument('-od', action='store', type=str, required=True, help='output directory')
args = parser.parse_args()
# import
c_energy, c_dihedral, c_errors = import_qchem.get_energy_dihedral(args.cf)
energy, dihedral, errors = import_qchem.get_energy_dihedral(args.nf)
# fit dihedral potential curve
c_rel_eV_energy = utils.relative_energy(c_energy)
rel_eV_energy = utils.relative_energy(energy)
c_params, c_covar = curve_fit(utils.RB_potential, c_dihedral, c_rel_eV_energy)
params, covar = curve_fit(utils.RB_potential, dihedral, rel_eV_energy)
# create list of angles and corresponding energies
angles = np.linspace(-179.9, 180.0, args.gp)
c_RB_energy = [utils.RB_potential(angle, *c_params) for angle in angles]
RB_energy = [utils.RB_potential(angle, *params) for angle in angles]
# Boltzmann distribution
c_prob = utils.boltz_dist(args.t, c_RB_energy)
prob = utils.boltz_dist(args.t, RB_energy)
# cumulative probability
c_cum_prob = [sum(c_prob[0:c_prob_i]) for c_prob_i in range(len(c_prob))]
c_prob_angle = np.array(zip(c_cum_prob, angles))
cum_prob = [sum(prob[0:prob_i]) for prob_i in range(len(prob))]
prob_angle = np.array(zip(cum_prob, angles))
# run dihedral model
poly = RandomChargePolymer(args.mn, args.ml, args.ll, args.la, prob_angle, c_prob_angle, args.sn)
poly.sample_charged_chains(args.cs)
# write files
for attr, value in poly.__dict__.iteritems():
if attr.startswith('c_ete_stats'):
utils.write_json(value.mean, "{dir}/{name}_m{m}_t{t}_{s}_cs{cs}_{d}_mean.json".format(
dir=args.od, name=args.o, m=args.mn, t=args.t, s=args.sn, cs=args.cs, d=attr))
utils.write_json(value.variance, "{dir}/{name}_m{m}_t{t}_{s}_cs{cs}_{d}_var.json".format(
dir=args.od, name=args.o, m=args.mn, t=args.t, s=args.sn, cs=args.cs, d=attr))
utils.write_json(value.std_error, "{dir}/{name}_m{m}_t{t}_{s}_cs{cs}_{d}_std_error.json".format(
dir=args.od, name=args.o, m=args.mn, t=args.t, s=args.sn, cs=args.cs, d=attr))
if attr.startswith('s_order_param'):
utils.write_json(value.mean, "{dir}/{name}_m{m}_t{t}_{s}_cs{cs}_{d}_mean.json".format(
dir=args.od, name=args.o, m=args.mn, t=args.t, s=args.sn, cs=args.cs, d=attr))
utils.write_json(value.variance, "{dir}/{name}_m{m}_t{t}_{s}_cs{cs}_{d}_var.json".format(
dir=args.od, name=args.o, m=args.mn, t=args.t, s=args.sn, cs=args.cs, d=attr))
utils.write_json(value.std_error, "{dir}/{name}_m{m}_t{t}_{s}_cs{cs}_{d}_std_error.json".format(
dir=args.od, name=args.o, m=args.mn, t=args.t, s=args.sn, cs=args.cs, d=attr))
if __name__ == '__main__':
run_partial_order_param()
|
{"hexsha": "8dd052857a31df0e01d58ec5d5d4ad4faf1e2b5c", "size": 4327, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/run_partial_order_param.py", "max_stars_repo_name": "wood-b/dihedral_model", "max_stars_repo_head_hexsha": "37b7b1648ff2c36f6319401aa50b8400183312f6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/run_partial_order_param.py", "max_issues_repo_name": "wood-b/dihedral_model", "max_issues_repo_head_hexsha": "37b7b1648ff2c36f6319401aa50b8400183312f6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/run_partial_order_param.py", "max_forks_repo_name": "wood-b/dihedral_model", "max_forks_repo_head_hexsha": "37b7b1648ff2c36f6319401aa50b8400183312f6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 58.472972973, "max_line_length": 108, "alphanum_fraction": 0.6873122256, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1180}
|
# -*- coding : utf-8-*-
import copy
import json
import os
import zipfile
import numpy as np
import sqlalchemy
from decimal import ROUND_HALF_UP, Decimal
from PyAngle import Angle
from numpy import loadtxt, pi
from sqlalchemy import create_engine, event
from sqlalchemy.orm import sessionmaker
from xml.dom.minidom import Document
from ..alignment.align import Align
from ..server import Base, Column, String, Text, ForeignKey, relationship, Float, FLOAT, DECIMAL
from ezdxf.math import Vec2, Matrix44, Vector
from ..stdlib.supstructures import CIPBoxPoints
class Bridge(Base):
__tablename__ = "bridge_tbl"
name = Column('name', String(10), primary_key=True)
_f_ali_name = Column('align_name', String(10), ForeignKey("ei_tbl.name", ondelete='CASCADE', onupdate='CASCADE'))
_f_title_name = Column('title_name', String(120), nullable=True)
RelatedAlign = relationship("Align", foreign_keys=[_f_ali_name], cascade='save-update,delete')
def __init__(self, name: str, al: Align):
self.name = name
self.RelatedAlign = al
self.spanlist = []
self.ciplist = []
def set_title(self, title: str):
self._f_title_name = title
def serialize(self):
return json.dumps({"name": self.name})
def assign_sup(self, inst_name: str, inst: Base, spans: list, st_pk: float, end_pk: float, steps=0.1):
"""
指定上部结构.
Args:
inst_name:
inst:
spans:
st_pk:
end_pk:
steps: 现浇梁线段步长
Returns:
"""
sup_inst = inst.copy()
sup_inst.Name = inst_name
sup_inst.start_pk = st_pk
sup_inst.end_pk = end_pk
sup_inst.RelatedAlign = self.RelatedAlign
sup_inst.RelatedBridge = self
for ii, sp in enumerate(spans):
setattr(sup_inst, "RelatedSpan%i" % ii, sp)
# 补充KP
npts = int((end_pk - st_pk) / steps) + 1
sideL = ((end_pk - st_pk) - (npts - 3) * steps) * 0.5
for i in range(npts):
if i == 0:
dx = st_pk
elif i == npts - 1:
dx = end_pk
else:
dx = st_pk + sideL + (i - 1) * steps
x0, y0 = sup_inst.RelatedAlign.get_coordinate(dx)
z0 = sup_inst.RelatedAlign.get_elevation(dx)
KP = CIPBoxPoints(i, float(x0), float(y0), float(z0))
KP.SetRelatedCIPBox(sup_inst)
sup_inst.KeyPointsList.append(KP)
#
self.ciplist.append(sup_inst)
pass
class Span(Base):
# 增加ORM映射 -- Bill 2020/11/18
__tablename__ = "span_tbl"
name = Column("name", String(17), primary_key=True)
_fAli_name = Column('align_name', String(10), ForeignKey("ei_tbl.name", ondelete='CASCADE', onupdate='CASCADE'))
_fBri_name = Column('bridge_name', String(10),
ForeignKey("bridge_tbl.name", ondelete='CASCADE', onupdate='CASCADE'))
align = relationship("Align", foreign_keys=[_fAli_name], cascade='save-update,delete')
bridge = relationship("Bridge", foreign_keys=[_fBri_name], cascade='save-update,delete')
_fStation = Column('Station', DECIMAL(15, 3))
_fAngle = Column('Angle', DECIMAL(15, 3))
_f_deck_wl = Column('deck_wl', DECIMAL(15, 3), nullable=True)
_f_deck_wr = Column('deck_wr', DECIMAL(15, 3), nullable=True)
_f_back_wl = Column('back_wl', DECIMAL(15, 3), nullable=True)
_f_back_wr = Column('back_wr', DECIMAL(15, 3), nullable=True)
_f_front_wl = Column('front_wl', DECIMAL(15, 3), nullable=True)
_f_front_wr = Column('front_wr', DECIMAL(15, 3), nullable=True)
_fBeam_type = Column('BeamType', String(1), nullable=True)
_fPier_type = Column('PierType', String(1), nullable=True)
_fDeck_type = Column('DeckType', String(2), nullable=True, default="CT")
_fCut_to = Column("cut_to", String(17), nullable=True)
_fCut_by = Column("cut_by", String(17), nullable=True)
_fHPL = Column("HPL", DECIMAL(15, 3), nullable=True)
_fHPR = Column("HPR", DECIMAL(15, 3), nullable=True)
# 增加ORM映射 -- Bill 2020/11/18
def __init__(self, align: Align, bridge: Bridge, station: float, ang_deg: float = 90):
"""
跨径线对象
Args:
align (Align): 跨径线对应路线
bridge (Bridge): 跨径线对应桥梁
station (float): 跨径线桩号
ang_deg (float): 斜交角, 正交时为90, 逆时针为正, 默认值为90.
"""
self.align = align
self.bridge = bridge
self.station = station
self.angle = Angle.from_degrees(ang_deg).to_rad()
self.elevation = align.get_elevation(station)
self.ground_elevation = align.get_ground_elevation(station, 0)
self.width_left, self.width_right = align.get_width(station, self.angle)
self.hp_left, self.hp_right = align.get_cross_slope(station, self.angle)
self.pier = None
self.foundation = None
self.mj = None
self.bearings = []
# 增加ORM映射 -- Bill 2020/11/18
result = ("%.3f" % (float(Decimal(station).quantize(Decimal('0.000'), rounding=ROUND_HALF_UP)))).zfill(9)
self.name = align.name + "+" + result
self._fStation = station
self._fAngle = ang_deg
self._fHPL = self.hp_left
self._fHPR = self.hp_right
self._f_deck_wl = self.width_left
self._f_deck_wr = self.width_right
# 增加ORM映射 -- Bill 2020/11/18
self.pier = None # 增加结构指定
def __str__(self):
return self.name
def __eq__(self, other):
if isinstance(other, Span):
if str(self) == str(other):
return True
else:
return False
def __lt__(self, other):
if isinstance(other, Span):
return self.station < other.station
else:
raise Exception("无法与非Span类进行比较.")
def __add__(self, dist: float):
return Span(self.align, self.bridge, self.station + dist, self.angle)
def __sub__(self, other) -> float:
if isinstance(other, Span):
if other.align == self.align:
return self.station - other.station
else:
print("警告:桩号不在同一条设计线")
return self.station - other.station
raise Exception("无法与其他类型相减.")
def serialize(self):
dict = {
"align": self.align.name,
"bridge": self.bridge.name,
"station": self.station,
"angle": self.angle,
"elevation": self.elevation,
"ground_elevation": self.ground_elevation,
"width_left": self.width_left,
"width_right": self.width_right,
"hp_left": self.hp_left,
"hp_right": self.hp_right,
}
return json.dumps(dict)
def assign_substructure(self, inst_name: str, sub_inst: Base):
"""
向S附加下部结构
Args:
inst_name: 主键
sub_inst: 实例
Returns:
"""
self.pier = sub_inst.copy()
self.pier.Sub_Inst.Name = inst_name
self.pier.Sub_Inst.RelatedSpan = self
for ii, col in enumerate(self.pier.ColumnList):
col.Name = inst_name + "/COL%s" % str(ii + 1).zfill(2)
self.pier.CapBeam_Inst.Name = inst_name + "/CB01"
xyz = self.align.get_coordinate(self.station)
xyz.append(self.align.get_ground_elevation(self.station, 0))
span_cc = Vector(xyz)
uux = Vector(self.align.get_direction(self.station))
uuy = uux.rotate_deg(90.0)
uuz = Vector(0, 0, 1)
trans_matrix = Matrix44.ucs(uux, uuy, uuz, span_cc)
self.pier.transform(trans_matrix)
pass
def assign_found2(self, inst_name: str, fund_inst: Base):
self.foundation = fund_inst.copy()
self.foundation.Found_Inst.Name = inst_name
self.foundation.Found_Inst.RelatedSpan = self
for ii, pile in enumerate(self.foundation.PileList):
pile.Name = inst_name + "/PI%s" % str(ii + 1).zfill(2)
for ii, pc in enumerate(self.foundation.PileCapList):
pc.Name = inst_name + "/PC%s" % str(ii + 1).zfill(2)
xyz = self.align.get_coordinate(self.station)
xyz.append(self.align.get_ground_elevation(self.station, 0))
span_cc = Vector(xyz)
uux = Vector(self.align.get_direction(self.station))
uuy = uux.rotate_deg(90.0)
uuz = Vector(0, 0, 1)
trans_matrix = Matrix44.ucs(uux, uuy, uuz, span_cc)
self.foundation.transform(trans_matrix)
pass
def assign_found(self, inst_name: str, fund_inst: Base,
off_l: float = 0, off_w: float = 0, off_h: float = 0,
angle_deg: float = 0):
"""
指定属于本处分跨线的基础结构实例.
Args:
inst_name: 基础编号.
fund_inst: 基础实例.
off_h: 竖向偏心, 默认值为 0 表示地面线下0.5m.
off_w: 横桥向偏心, 默认值为 0.
off_l: 顺桥向偏心, 默认值为 0.
angle_deg :基础相对于span平面的偏角, 默认值为 0, 逆时针为正.
Returns:
"""
self.foundation = copy.deepcopy(fund_inst)
self.foundation.Name = inst_name
self.foundation.align = self.align
self.foundation.bridge = self.bridge
self.foundation.RelatedSpan = self
cc = Vec2(self.align.get_coordinate(self.station))
l_unit = Vec2(self.align.get_direction(self.station))
w_unit = l_unit.rotate_deg(90.0)
delta = off_l * l_unit + off_w * w_unit
new_cc = cc + delta
z0 = self.align.get_ground_elevation(self.station, 0) - 0.5 + off_h
x0 = new_cc.x
y0 = new_cc.y
ref_v = Vec2(self.align.get_direction(self.station))
ref_v = ref_v.rotate_deg(Angle.from_rad(self.angle).to_degrees() + angle_deg - 90.0)
ang2north = ref_v.angle_between(Vec2([0, 1]))
self.foundation.AngOfNorth = ang2north # 弧度
self.foundation.X = x0
self.foundation.Y = y0
self.foundation.Z = z0
pass
def assign_bearing(self, inst_name: str, br_inst: Base, offset=None):
"""
Args:
inst_name:
br_inst:
offset:
Returns:
"""
if offset is None:
offset = [0, 0]
br_cp = br_inst.copy()
br_cp.Name = inst_name
br_cp.RelatedSpan = self
bk_supper = None # 后排上部
ft_supper = None # 前排上部
supper = None
for cip in self.bridge.ciplist:
for ii, sp in enumerate(cip.span_list()):
if sp.name == self.name:
if ii == 0:
ft_supper = cip
elif ii == len(cip.span_list()) - 1:
bk_supper = cip
else:
supper = cip
xyz = self.align.get_coordinate(self.station)
xyz.append()
span_cc = Vector(xyz)
def make_happy(self):
pass
# def assign_pier(self, name_inst: str, pier_inst: PierBase):
# """
# 指定属于本处分跨线的桥墩结构实例.
#
# Args:
# name: 桥墩编号.
# pier_inst: 桥墩实例.
#
# Returns:
#
# """
# self.pier = copy.deepcopy(pier_inst)
# self.pier._fName = name_inst
# self.pier.align = self.align
# self.pier.bridge = self.bridge
# self.pier.span = self
# self.pier._fStation = self._fStation
# self.pier._fAngle = self._fAngle
# self.pier._fSlopeLeft = self._fHPL
# self.pier._fSlopeRight = self._fHPR
# pass
class SpanCollection(list):
def __init__(self, align_dict: dict, bridge_dict: dict):
super().__init__()
self.align_dict = align_dict
self.bridge_dict = bridge_dict
def add(self, s: Span = None, align: Align = None, bridge: Bridge = None, station: float = None,
ang_deg: float = None) -> Span:
if s != None:
res = s
elif align != None and bridge != None:
res = Span(align, bridge, station, Angle.from_degrees(ang_deg).to_rad())
else:
raise Exception("参数不足.")
self.append(res)
self.sort()
return res
def read_csv(self, csv_path, sep=','):
data = loadtxt(csv_path, delimiter=sep, dtype=str)
for line in data:
self.append(Span(self.align_dict[line[0]],
self.bridge_dict[line[1]],
float(line[2]),
Angle.from_degrees(line[3]).to_rad()
))
self.sort()
def __getitem__(self, item) -> Span:
return super(SpanCollection, self).__getitem__(item)
class Model(object):
'''
基础模型
'''
def __init__(self):
self.alignments = {}
self.bridges = {}
self.spans = SpanCollection(self.alignments, self.bridges)
def add_align(self, alignment: Align) -> int:
"""
导入路线数据。
Args:
alignment: 路线对象
Returns:
int: 成功时返回 0,失败返回 -1
"""
try:
self.alignments[alignment.name] = alignment
return 0
except Exception as e:
print(e)
return -1
def add_bridge(self, bri: Bridge) -> int:
"""
导入桥梁数据。
Args:
bri (Bridge): 桥梁对象
Returns:
int : 成功时返回 0,失败返回 -1
"""
try:
self.bridges[bri.name] = bri
return 0
except Exception as e:
print(e)
return -1
def add_span(self, spa: Span) -> int:
try:
self.spans.append(spa)
self.spans.sort()
spa.bridge.spanlist.append(spa)
return 0
except Exception as e:
print(e)
return -1
def _project_xml(self) -> Document:
"""
生成project.xml
Returns:
Document : <class 'xml.dom.minidom.Document'>
"""
doc = Document()
pro = doc.createElement('project')
brs = doc.createElement('bridges')
als = doc.createElement('alignments')
for key in self.alignments.keys():
align = self.alignments[key]
al = doc.createElement('alignment')
al.setAttribute("name", align.name)
file = doc.createElement("fileLocation")
file.appendChild(doc.createTextNode(align._work_dir))
al.appendChild(file)
als.appendChild(al)
for key in self.bridges.keys():
bri = self.bridges[key]
br = doc.createElement('bridge')
br.setAttribute("name", bri.name)
brs.appendChild(br)
doc.appendChild(pro)
pro.appendChild(als)
pro.appendChild(brs)
include = doc.createElement("include")
include.appendChild(doc.createTextNode("./spans.xml"))
pro.appendChild(include)
return doc
def _make_span_xml(self) -> Document:
doc = Document()
return doc
def save_srb(self, filename):
CLEANTMPS = True
tmp, ex = os.path.splitext(filename)
file = tmp + '.srb'
z = zipfile.ZipFile(file, 'w', zipfile.ZIP_DEFLATED)
proj_doc = self._project_xml()
fpath = os.path.dirname(filename) + "/project.xml"
with open(fpath, 'wb') as f:
f.write(proj_doc.toprettyxml(indent='\t', encoding='utf-8'))
z.write(fpath)
if CLEANTMPS:
os.remove(fpath)
span_doc = self._make_span_xml()
fpath = os.path.dirname(filename) + "/span.xml"
with open(fpath, 'wb') as f:
f.write(span_doc.toprettyxml(indent='\t', encoding='utf-8'))
z.write(fpath)
if CLEANTMPS:
os.remove(fpath)
z.close()
def save_sql(self, connect):
engine = create_engine(connect, echo=False)
event.listen(engine, "before_cursor_execute", add_own_encoders)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
for al in self.alignments.keys():
session.add(self.alignments[al])
session.commit()
for br in self.bridges.keys():
session.add(self.bridges[br])
session.commit()
for sp in self.spans:
session.add(sp)
if sp.pier is not None:
session.add(sp.pier.Sub_Inst) # 添加下部结点
session.add(sp.pier.CapBeam_Inst) # 添加下部盖梁
for col in sp.pier.ColumnList: # 添加墩柱
session.add(col)
if sp.foundation is not None:
session.add(sp.foundation.Found_Inst)
for pc in sp.foundation.PileCapList:
session.add(pc)
for pile in sp.foundation.PileList:
session.add(pile)
session.commit()
for br in self.bridges.keys():
for cip in self.bridges[br].ciplist:
session.add(cip)
for kp in cip.KeyPointsList:
session.add(kp)
session.commit()
def add_own_encoders(conn, cursor, query, *args):
# try:
# cursor.connection.encoders[np.float64] = lambda value, encoders: float(value)
pass
|
{"hexsha": "1c055cbe430f74c660c8f820238d7140f74f60d9", "size": 17289, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/srbpy/model/core.py", "max_stars_repo_name": "billhu0228/SmartRoadBridgePy", "max_stars_repo_head_hexsha": "4a5d34028a2612aef846b580733bf6f488110798", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-08-05T10:46:45.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-11T11:05:18.000Z", "max_issues_repo_path": "src/srbpy/model/core.py", "max_issues_repo_name": "billhu0228/SmartRoadBridgePy", "max_issues_repo_head_hexsha": "4a5d34028a2612aef846b580733bf6f488110798", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/srbpy/model/core.py", "max_forks_repo_name": "billhu0228/SmartRoadBridgePy", "max_forks_repo_head_hexsha": "4a5d34028a2612aef846b580733bf6f488110798", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-26T07:50:22.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-26T07:50:22.000Z", "avg_line_length": 33.1206896552, "max_line_length": 117, "alphanum_fraction": 0.5680490485, "include": true, "reason": "import numpy,from numpy", "num_tokens": 4571}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 11/3/2020 15:06:33 2020
@author: cmg
"""
import numpy as np
import matplotlib.pyplot as plt
import timeit
import active_subspaces as ss
from astars.stars_sim import Stars_sim
from astars.utils.misc import subspace_dist, find_active
import pandas as pd
###############################################################################
############ Set user-desired file path for storing output data!!! ############
###############################################################################
user_file_path = '/home/ccm/Desktop/'
###############################################################################
###############################################################################
###############################################################################
class nesterov_2:
def __init__(self, dim = 10, sig = 1E-3):
self.dim = dim
self.sig = sig
self.L1 = 2**9
self.var = self.sig**2
self.nickname = 'nest_2'
self.name = 'Example 3: STARS vs FAASTARS With Adaptive Thresholding'
self.fstar = 0
self.maxit = 1000
self.ntrials = 1 #50
self.adapt = 2*dim
self.regul = None # maybe - self.sig**2
self.threshold = .9
self.initscl = 1
def __call__(self,x):
temp = np.arange(self.dim,dtype=float)
weights = 2**((-1)**temp*temp)
y = np.copy(x)
y *= y
ans = np.dot(weights,y) +self.sig*np.random.randn(1)
return ans
class test_weights:
def __init__(self, dim = 10, sig = 1E-3):
self.dim = dim
self.sig = sig
self.L1 = 200
self.var = self.sig**2
self.nickname = 'test_weights'
self.name = 'Example 4: STARS vs FAASTARS With Adaptive Thresholding'
self.fstar = 0
self.maxit = 500
self.ntrials = 1
#50
self.adapt = 2*dim
self.regul = None # maybe - self.sig**2
self.threshold = .9
self.initscl = 1
def __call__(self,x):
#temp = np.arange(self.dim,dtype=float)
weights = np.zeros(dim)
weights[0], weights[1], weights[2] = 100, 10, 1
y = np.copy(x)
y *= y
ans = np.dot(weights,y) +self.sig*np.random.randn(1)
return ans
#plotting parameters and definitions
nest = nesterov_2()
wt_fn = test_weights()
params = {'legend.fontsize': 28,'legend.handlelength': 3}
plt.rcParams["figure.figsize"] = (60,40)
plt.rcParams['figure.dpi'] = 80
plt.rcParams['savefig.dpi'] = 100
plt.rcParams['font.size'] = 30
plt.rcParams['figure.titlesize'] = 'xx-large'
plt.rcParams.update(params)
stars_full, sf_ls = 'red', '--'
active_stars_learned, lr_ls = 'black', '-.'
active_stars_ref, rf_ls = 'blue', ':'
# Start the clock!
start = timeit.default_timer()
for f in {wt_fn}:
dim = f.dim
#np.random.seed(9)
#init_pt = f.initscl*np.random.randn(dim)
init_pt = np.ones(dim)
ntrials = f.ntrials
maxit = f.maxit
f_avr = np.zeros(maxit+1)
f2_avr = np.zeros(maxit+1)
f3_avr = np.zeros(maxit+1)
f4_avr = np.zeros(maxit+1)
# STARS
for trial in range(ntrials):
#sim setup
test = Stars_sim(f, init_pt, L1 = f.L1, var = f.var, verbose = False, maxit = maxit)
test.STARS_only = True
test.get_mu_star()
test.get_h()
# STARS steps
while test.iter < test.maxit:
test.step()
#update average of f
f_avr += test.fhist
# FAASTARS (3 scenarios: no extensions, adaptive thresholding, and active subcycling)
for trial in range(ntrials):
#sim setup
test4 = Stars_sim(f, init_pt, L1 =f.L1, var = f.var, verbose = False, maxit = maxit, true_as = None)
test2 = Stars_sim(f, init_pt, L1 = f.L1, var = f.var, verbose = False, maxit = maxit, true_as = None)
test3 = Stars_sim(f, init_pt, L1 = f.L1, var = f.var, verbose = False, maxit = maxit, true_as = None)
test4.get_mu_star()
test4.get_h()
test2.get_mu_star()
test2.get_h()
test3.get_mu_star()
test3.get_h()
test3.lasso = True
test2.lasso = True
# adapt every f.adapt timesteps using quadratic(after inital burn)
test4.train_method = 'GQ'
test2.train_method = 'GQ'
test3.train_method = 'GQ'
test4.adapt = f.adapt # Sets retraining steps
test2.adapt = f.adapt
test3.adapt = f. adapt
#test.update_L1 = True
#test2.update_L1 = True
#test3.update_L1 = True
# Make test2 our adaptive thresholding trial, and test3 our subcycling trial
test2.threshadapt = True
test3.subcycle = True
test3.sub_method = 2
test2.slope_weight = .1
test3.slope_weight = .1
test2.pad_train = 2.0
test2.explore_weight = 2.0
test3.pad_train = 2.0
test3.explore_weight = 2.0
#test4.regul = f.regul
#test2.regul = f.regul
#test3.regul = f.regul
test4.threshold = f.threshold
test2.threshold = f.threshold
test3.threshold = f.threshold
# do 100 steps
while test4.iter < test.maxit:
test4.step()
test2.step()
test3.step()
#pprint(test.iter,'Iteration')
#if test.iter == 100:
# test.L1 = 2.0
# test2.L1 = 2.0
# test3.L1 = 2.0
if test4.iter % 200 == 0:
print(test2.eigenvals)
f2_avr += test4.fhist
f3_avr += test2.fhist
f4_avr += test3.fhist
f_avr /= ntrials
f2_avr /= ntrials
f3_avr /= ntrials
f4_avr /= ntrials
# Stop the clock!
stop = timeit.default_timer()
# Difference stop-start tells us run time
time = stop - start
print('the time of this experiment was: ', time/3600, 'hours')
plt.semilogy(np.abs(f_avr-f.fstar),lw = 5,label='STARS',color=stars_full, ls=sf_ls)
plt.semilogy(np.abs(f2_avr-f.fstar), lw = 5, label='FAASTARS (No Extensions, $\\tau = 0.9$)',color=active_stars_learned ,ls=lr_ls)
plt.semilogy(np.abs(f3_avr-f.fstar), lw = 5,label = 'FAASTARS (Adaptive Thresholding)',color=active_stars_ref ,ls=rf_ls)
plt.semilogy(np.abs(f4_avr-f.fstar), lw = 5,label = 'FAASTARS (Active Subcycling)',color='orange' ,ls=rf_ls)
plt.title(f.name)
plt.xlabel('$k$, iteration count')
plt.ylabel('$|f(\lambda^{(k)})-f^*|$')
plt.legend()
plt.show()
for i in range(3):
plt.plot(test4.xhist[i,-1000:], label='FAASTARS (No Extensions, $\\tau = 0.9$)')
plt.plot(test2.xhist[i,-1000:], label = 'FAASTARS (Adaptive Thresholding)')
plt.plot(test3.xhist[i,-1000:], label = 'FAASTARS (Active Subcycling)')
plt.title(f.name)
plt.xlabel('$k$, iteration count')
y_str = '$\lambda_'+str(i)+'^{(k)}$'
plt.ylabel(y_str)
plt.legend()
plt.show()
for fa in [f_avr,f2_avr,f3_avr,f4_avr]:
slopes = []
for j in range(80,f2_avr.size):
fsamp = fa[j-20:j]
poly = np.polyfit(np.arange(20),fsamp,1)
slopes.append(poly[0])
slopes = np.array(slopes)
plt.semilogy(np.absolute(slopes))
plt.axhline(-test.sigma*f.dim)
plt.show()
|
{"hexsha": "2d29b8ac010ec05101ba94ba643e85781e16491d", "size": 7578, "ext": "py", "lang": "Python", "max_stars_repo_path": "paper_examples/nesterov_adapt_thresholding.py", "max_stars_repo_name": "variscarey/ASTARS", "max_stars_repo_head_hexsha": "99da27bffe1f843804a85a206686a7f8ae29fa42", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-25T21:33:20.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-25T21:33:20.000Z", "max_issues_repo_path": "paper_examples/nesterov_adapt_thresholding.py", "max_issues_repo_name": "jordanrhall/ASTARS", "max_issues_repo_head_hexsha": "63ee17ba73e21880218db6514e4c6dd37671074d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-09-14T15:25:40.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-14T15:39:55.000Z", "max_forks_repo_path": "paper_examples/nesterov_adapt_thresholding.py", "max_forks_repo_name": "jordanrhall/ASTARS", "max_forks_repo_head_hexsha": "63ee17ba73e21880218db6514e4c6dd37671074d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.486381323, "max_line_length": 134, "alphanum_fraction": 0.534837688, "include": true, "reason": "import numpy", "num_tokens": 2133}
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import numpy as np
from .A2JPoseNet import RegressionModel, ClassificationModel, generate_anchors, shift
from .pose_hrnet import get_pose_net
class HRNetA2JPoseNet(nn.Module):
def __init__(self, cfg):
super(HRNetA2JPoseNet, self).__init__()
res_layers = cfg.MODEL.RESNET_LAYERS
self.Backbone = get_pose_net(cfg, is_train=True) # 1 channel depth only, resnet50
channels = sum(cfg.MODEL.EXTRA.STAGE4.NUM_CHANNELS)
self.downsample = nn.Sequential(
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1),
nn.AvgPool2d(kernel_size=2, stride=2)
)
n_anchors_per_px = len(cfg.MODEL.N_ANCHORS_H) * len(cfg.MODEL.N_ANCHORS_W)
self.regressionModel = RegressionModel(channels, num_anchors=n_anchors_per_px, num_classes=cfg.DATASET.NUM_JOINTS * 2)
self.classificationModel = ClassificationModel(channels, num_anchors=n_anchors_per_px, num_classes=cfg.DATASET.NUM_JOINTS * 2)
self.wh = torch.tensor(cfg.MODEL.INPUT_SIZE).view(1,1,1,-1).float()
anchors = generate_anchors(P_h=cfg.MODEL.N_ANCHORS_H, P_w=cfg.MODEL.N_ANCHORS_W)
self.all_anchors = torch.from_numpy(shift(shape=[16,16], stride=16, anchors=anchors)).float() # (w*h*A, 2)
self.trainable_temp = torch.nn.parameter.Parameter(torch.tensor(5.0), requires_grad=cfg.MODEL.TRAINABLE_SOFTMAX) if cfg.MODEL.TRAINABLE_SOFTMAX else 1.0
def forward(self, x):
output_feat, inter_feat = self.Backbone(x)
inter_feat = self.downsample(inter_feat)
output_feat = self.downsample(output_feat)
# cls: b x w*h*n_anchors x n_joints
# reg: B x w*h*n_anchors x n_joints x 2
classification = self.classificationModel(inter_feat)
relative_regression = torch.tanh(self.regressionModel(output_feat))
#print(self.wh)
#print('---relative_regression---\n',relative_regression)
reg = relative_regression * self.wh.to(relative_regression.device)
reg_weight = F.softmax(classification * self.trainable_temp,dim=1)
reg_weight_xy = torch.unsqueeze(reg_weight,3).expand(reg_weight.shape[0], reg_weight.shape[1],reg_weight.shape[2],2)#b x (w*h*A) x n_joints x 2
#print('---regression---\n',reg)
#print('---reg_weight_xy---',reg_weight_xy)
pose_pred = (reg_weight_xy * (reg + self.all_anchors.unsqueeze(1).unsqueeze(0).to(x.device))).sum(1) # b x n_joints x 2
surrounding_anchors_pred = (reg_weight_xy * self.all_anchors.unsqueeze(1).unsqueeze(0).to(x.device)).sum(1)
return pose_pred, surrounding_anchors_pred, classification, reg, self.trainable_temp
|
{"hexsha": "b345eeb4c47e762153f5a5815d0715dfca347944", "size": 2816, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/models/HRNetA2JPoseNet.py", "max_stars_repo_name": "ZJULiHongxin/two-hand-pose-est", "max_stars_repo_head_hexsha": "e531faacd9cdddcb716b614b832038d079b9663f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/models/HRNetA2JPoseNet.py", "max_issues_repo_name": "ZJULiHongxin/two-hand-pose-est", "max_issues_repo_head_hexsha": "e531faacd9cdddcb716b614b832038d079b9663f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/models/HRNetA2JPoseNet.py", "max_forks_repo_name": "ZJULiHongxin/two-hand-pose-est", "max_forks_repo_head_hexsha": "e531faacd9cdddcb716b614b832038d079b9663f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.1320754717, "max_line_length": 160, "alphanum_fraction": 0.7027698864, "include": true, "reason": "import numpy", "num_tokens": 748}
|
import numpy as np
import pandas as pd
import pytest
from anndata import AnnData
from scipy import sparse
import scanpy as sc
from scanpy.preprocessing._qc import (
top_proportions,
top_segment_proportions,
describe_var,
describe_obs,
)
@pytest.fixture
def anndata():
a = np.random.binomial(100, 0.005, (1000, 1000))
adata = AnnData(
sparse.csr_matrix(a),
obs=pd.DataFrame(index=[f"cell{i}" for i in range(a.shape[0])]),
var=pd.DataFrame(index=[f"gene{i}" for i in range(a.shape[1])]),
)
return adata
@pytest.mark.parametrize(
"a",
[np.ones((100, 100)), sparse.csr_matrix(np.ones((100, 100)))],
ids=["dense", "sparse"],
)
def test_proportions(a):
prop = top_proportions(a, 100)
assert (prop[:, -1] == 1).all()
assert np.array_equal(np.sort(prop, axis=1), prop)
assert np.apply_along_axis(lambda x: len(np.unique(x)) == 1, 0, prop).all()
assert (prop[:, 49] == 0.5).all()
def test_segments_binary():
a = np.concatenate([np.zeros((300, 50)), np.ones((300, 50))], 1)
a = np.apply_along_axis(np.random.permutation, 1, a)
seg = top_segment_proportions(a, [25, 50, 100])
assert (seg[:, 0] == 0.5).all()
assert (top_segment_proportions(a, [25]) == 0.5).all()
assert (seg[:, 1] == 1.0).all()
assert (seg[:, 2] == 1.0).all()
segfull = top_segment_proportions(a, np.arange(100) + 1)
propfull = top_proportions(a, 100)
assert (segfull == propfull).all()
@pytest.mark.parametrize(
"cls", [np.asarray, sparse.csr_matrix, sparse.csc_matrix, sparse.coo_matrix]
)
def test_top_segments(cls):
a = cls(np.ones((300, 100)))
seg = top_segment_proportions(a, [50, 100])
assert (seg[:, 0] == 0.5).all()
assert (seg[:, 1] == 1.0).all()
segfull = top_segment_proportions(a, np.arange(100) + 1)
propfull = top_proportions(a, 100)
assert (segfull == propfull).all()
# While many of these are trivial,
# they’re also just making sure the metrics are there
def test_qc_metrics():
adata = AnnData(X=sparse.csr_matrix(np.random.binomial(100, 0.005, (1000, 1000))))
adata.var["mito"] = np.concatenate(
(np.ones(100, dtype=bool), np.zeros(900, dtype=bool))
)
adata.var["negative"] = False
sc.pp.calculate_qc_metrics(adata, qc_vars=["mito", "negative"], inplace=True)
assert (adata.obs["n_genes_by_counts"] < adata.shape[1]).all()
assert (
adata.obs["n_genes_by_counts"] >= adata.obs["log1p_n_genes_by_counts"]
).all()
assert (adata.obs["total_counts"] == np.ravel(adata.X.sum(axis=1))).all()
assert (adata.obs["total_counts"] >= adata.obs["log1p_total_counts"]).all()
assert (
adata.obs["total_counts_mito"] >= adata.obs["log1p_total_counts_mito"]
).all()
assert (adata.obs["total_counts_negative"] == 0).all()
assert (
adata.obs["pct_counts_in_top_50_genes"]
<= adata.obs["pct_counts_in_top_100_genes"]
).all()
for col in filter(lambda x: "negative" not in x, adata.obs.columns):
assert (adata.obs[col] >= 0).all() # Values should be positive or zero
assert (adata.obs[col] != 0).any().all() # Nothing should be all zeros
if col.startswith("pct_counts_in_top"):
assert (adata.obs[col] <= 100).all()
assert (adata.obs[col] >= 0).all()
for col in adata.var.columns:
assert (adata.var[col] >= 0).all()
assert (adata.var["mean_counts"] < np.ravel(adata.X.max(axis=0).todense())).all()
assert (adata.var["mean_counts"] >= adata.var["log1p_mean_counts"]).all()
assert (adata.var["total_counts"] >= adata.var["log1p_total_counts"]).all()
# Should return the same thing if run again
old_obs, old_var = adata.obs.copy(), adata.var.copy()
sc.pp.calculate_qc_metrics(adata, qc_vars=["mito", "negative"], inplace=True)
assert set(adata.obs.columns) == set(old_obs.columns)
assert set(adata.var.columns) == set(old_var.columns)
for col in adata.obs:
assert np.allclose(adata.obs[col], old_obs[col])
for col in adata.var:
assert np.allclose(adata.var[col], old_var[col])
# with log1p=False
adata = AnnData(X=sparse.csr_matrix(np.random.binomial(100, 0.005, (1000, 1000))))
adata.var["mito"] = np.concatenate(
(np.ones(100, dtype=bool), np.zeros(900, dtype=bool))
)
adata.var["negative"] = False
sc.pp.calculate_qc_metrics(
adata, qc_vars=["mito", "negative"], log1p=False, inplace=True
)
assert not np.any(adata.obs.columns.str.startswith("log1p_"))
assert not np.any(adata.var.columns.str.startswith("log1p_"))
def adata_mito():
a = np.random.binomial(100, 0.005, (1000, 1000))
init_var = pd.DataFrame(
dict(mito=np.concatenate((np.ones(100, dtype=bool), np.zeros(900, dtype=bool))))
)
adata_dense = AnnData(X=a, var=init_var.copy())
return adata_dense, init_var
@pytest.mark.parametrize(
"cls", [np.asarray, sparse.csr_matrix, sparse.csc_matrix, sparse.coo_matrix]
)
def test_qc_metrics_format(cls):
adata_dense, init_var = adata_mito()
sc.pp.calculate_qc_metrics(adata_dense, qc_vars=["mito"], inplace=True)
adata = AnnData(X=cls(adata_dense.X), var=init_var.copy())
sc.pp.calculate_qc_metrics(adata, qc_vars=["mito"], inplace=True)
assert np.allclose(adata.obs, adata_dense.obs)
for col in adata.var: # np.allclose doesn't like mix of types
assert np.allclose(adata.var[col], adata_dense.var[col])
def test_qc_metrics_percentage(): # In response to #421
adata_dense, init_var = adata_mito()
sc.pp.calculate_qc_metrics(adata_dense, percent_top=[])
sc.pp.calculate_qc_metrics(adata_dense, percent_top=())
sc.pp.calculate_qc_metrics(adata_dense, percent_top=None)
sc.pp.calculate_qc_metrics(adata_dense, percent_top=[1, 2, 3, 10])
sc.pp.calculate_qc_metrics(adata_dense, percent_top=[1])
with pytest.raises(IndexError):
sc.pp.calculate_qc_metrics(adata_dense, percent_top=[1, 2, 3, -5])
with pytest.raises(IndexError):
sc.pp.calculate_qc_metrics(adata_dense, percent_top=[20, 30, 1001])
def test_layer_raw(anndata):
adata = anndata.copy()
adata.raw = adata.copy()
adata.layers["counts"] = adata.X.copy()
obs_orig, var_orig = sc.pp.calculate_qc_metrics(adata)
sc.pp.log1p(adata) # To be sure they aren't reusing it
obs_layer, var_layer = sc.pp.calculate_qc_metrics(adata, layer="counts")
obs_raw, var_raw = sc.pp.calculate_qc_metrics(adata, use_raw=True)
assert np.allclose(obs_orig, obs_layer)
assert np.allclose(obs_orig, obs_raw)
assert np.allclose(var_orig, var_layer)
assert np.allclose(var_orig, var_raw)
def test_inner_methods(anndata):
adata = anndata.copy()
full_inplace = adata.copy()
partial_inplace = adata.copy()
obs_orig, var_orig = sc.pp.calculate_qc_metrics(adata)
assert np.all(obs_orig == describe_obs(adata))
assert np.all(var_orig == describe_var(adata))
sc.pp.calculate_qc_metrics(full_inplace, inplace=True)
describe_obs(partial_inplace, inplace=True)
describe_var(partial_inplace, inplace=True)
assert np.all(full_inplace.obs == partial_inplace.obs)
assert np.all(full_inplace.var == partial_inplace.var)
assert np.all(partial_inplace.obs[obs_orig.columns] == obs_orig)
assert np.all(partial_inplace.var[var_orig.columns] == var_orig)
|
{"hexsha": "71f6e728e038eff12d9885d3e4f2c51a5522b12d", "size": 7379, "ext": "py", "lang": "Python", "max_stars_repo_path": "scanpy/tests/test_qc_metrics.py", "max_stars_repo_name": "mkmkryu/scanpy2", "max_stars_repo_head_hexsha": "f3db32a142dc31c1b628380db1c969a6d0b9dc3a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1171, "max_stars_repo_stars_event_min_datetime": "2017-01-17T14:01:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T23:02:57.000Z", "max_issues_repo_path": "scanpy/tests/test_qc_metrics.py", "max_issues_repo_name": "mkmkryu/scanpy2", "max_issues_repo_head_hexsha": "f3db32a142dc31c1b628380db1c969a6d0b9dc3a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1946, "max_issues_repo_issues_event_min_datetime": "2017-01-22T10:19:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T17:13:03.000Z", "max_forks_repo_path": "scanpy/tests/test_qc_metrics.py", "max_forks_repo_name": "mkmkryu/scanpy2", "max_forks_repo_head_hexsha": "f3db32a142dc31c1b628380db1c969a6d0b9dc3a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 499, "max_forks_repo_forks_event_min_datetime": "2017-01-21T11:39:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T13:57:35.000Z", "avg_line_length": 39.8864864865, "max_line_length": 88, "alphanum_fraction": 0.6723133216, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2114}
|
import tensorflow
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
import os
from tensorflow.keras.preprocessing import image
from .net import MobileNetV2
import numpy as np
from tensorflow.keras import backend as K
class OrangeClassifier():
def __init__(self, model_path):
self.model = MobileNetV2(input_shape=(224, 224, 3), num_classes=2)
self.model.load_weights(model_path)
self.class_map = {0:"ripe orange",1:"unripe orange"}
def preprocess_input(self,x):
x *= (1. / 255)
return x
def predict(self,image_path):
image_to_predict = image.load_img(image_path, target_size=(
224, 224))
image_to_predict = image.img_to_array(image_to_predict, data_format="channels_last")
image_to_predict = np.expand_dims(image_to_predict, axis=0)
image_to_predict = self.preprocess_input(image_to_predict)
prediction = self.model.predict(image_to_predict)
predicted_class = prediction.argmax()
prediction_confidence = prediction.max() * 100
image_class = self.class_map[predicted_class]
return image_class, prediction_confidence
def predictBatch(self,image_paths):
#create an array to store all processed images
images_array = []
#loop over the batch of images sent
for image_path in image_paths:
image_to_predict = image.load_img(image_path, target_size=(224, 224))
image_to_predict = image.img_to_array(image_to_predict, data_format="channels_last")
image_to_predict = np.expand_dims(image_to_predict, axis=0)
image_to_predict = self.preprocess_input(image_to_predict)
#append the processed images to the array
images_array.append(image_to_predict)
#merge all the images together as one array
images = np.concatenate(images_array)
predictions = self.model.predict(images)
#use axis=1 to compute the argmax and max
predicted_classes = predictions.argmax(axis=1)
prediction_confidence = predictions.max(axis=1) * 100
#create an array to store the names of the classes
predicted_class_names = []
#loop over all the predictions and convert indexes to class names
for predicted_index in predicted_classes:
class_name = self.class_map[predicted_index]
#append the class name to the array
predicted_class_names.append(class_name)
#return the class name list and the confidences
return predicted_class_names,prediction_confidence
class BananaClassifier():
def __init__(self, model_path):
self.model = MobileNetV2(input_shape=(224, 224, 3), num_classes=2)
self.model.load_weights(model_path)
self.class_map = {0:"ripe banana",1:"unripe banana"}
def preprocess_input(self,x):
x *= (1. / 255)
return x
def predict(self,image_path):
image_to_predict = image.load_img(image_path, target_size=(
224, 224))
image_to_predict = image.img_to_array(image_to_predict, data_format="channels_last")
image_to_predict = np.expand_dims(image_to_predict, axis=0)
image_to_predict = self.preprocess_input(image_to_predict)
prediction = self.model.predict(image_to_predict)
predicted_class = prediction.argmax()
prediction_confidence = prediction.max() * 100
image_class = self.class_map[predicted_class]
return image_class, prediction_confidence
def predictBatch(self,image_paths):
#create an array to store all processed images
images_array = []
#loop over the batch of images sent
for image_path in image_paths:
image_to_predict = image.load_img(image_path, target_size=(224, 224))
image_to_predict = image.img_to_array(image_to_predict, data_format="channels_last")
image_to_predict = np.expand_dims(image_to_predict, axis=0)
image_to_predict = self.preprocess_input(image_to_predict)
#append the processed images to the array
images_array.append(image_to_predict)
#merge all the images together as one array
images = np.concatenate(images_array)
predictions = self.model.predict(images)
#use axis=1 to compute the argmax and max
predicted_classes = predictions.argmax(axis=1)
prediction_confidence = predictions.max(axis=1) * 100
#create an array to store the names of the classes
predicted_class_names = []
#loop over all the predictions and convert indexes to class names
for predicted_index in predicted_classes:
class_name = self.class_map[predicted_index]
#append the class name to the array
predicted_class_names.append(class_name)
#return the class name list and the confidences
return predicted_class_names,prediction_confidence
class AppleClassifier():
def __init__(self, model_path):
self.model = MobileNetV2(input_shape=(224, 224, 3), num_classes=2)
self.model.load_weights(model_path)
self.class_map = {0:"green apple",1:"red apple"}
def preprocess_input(self,x):
x *= (1. / 255)
return x
def predict(self,image_path):
image_to_predict = image.load_img(image_path, target_size=(
224, 224))
image_to_predict = image.img_to_array(image_to_predict, data_format="channels_last")
image_to_predict = np.expand_dims(image_to_predict, axis=0)
image_to_predict = self.preprocess_input(image_to_predict)
prediction = self.model.predict(image_to_predict)
predicted_class = prediction.argmax()
prediction_confidence = prediction.max() * 100
image_class = self.class_map[predicted_class]
return image_class, prediction_confidence
def predictBatch(self,image_paths):
#create an array to store all processed images
images_array = []
#loop over the batch of images sent
for image_path in image_paths:
image_to_predict = image.load_img(image_path, target_size=(224, 224))
image_to_predict = image.img_to_array(image_to_predict, data_format="channels_last")
image_to_predict = np.expand_dims(image_to_predict, axis=0)
image_to_predict = self.preprocess_input(image_to_predict)
#append the processed images to the array
images_array.append(image_to_predict)
#merge all the images together as one array
images = np.concatenate(images_array)
predictions = self.model.predict(images)
#use axis=1 to compute the argmax and max
predicted_classes = predictions.argmax(axis=1)
prediction_confidence = predictions.max(axis=1) * 100
#create an array to store the names of the classes
predicted_class_names = []
#loop over all the predictions and convert indexes to class names
for predicted_index in predicted_classes:
class_name = self.class_map[predicted_index]
#append the class name to the array
predicted_class_names.append(class_name)
#return the class name list and the confidences
return predicted_class_names,prediction_confidence
|
{"hexsha": "5a42d9e8f1f674629b5751efe57ef4fa0adfa1ac", "size": 7667, "ext": "py", "lang": "Python", "max_stars_repo_path": "orangelib/build/lib/orangelib/model.py", "max_stars_repo_name": "ayoolaolafenwa/orangelib", "max_stars_repo_head_hexsha": "4fe7d88488482e52d466b8021db3b4e4dc80a484", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-06-23T11:47:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-18T16:00:18.000Z", "max_issues_repo_path": "orangelib/build/lib/orangelib/model.py", "max_issues_repo_name": "ayoolaolafenwa/orangelib", "max_issues_repo_head_hexsha": "4fe7d88488482e52d466b8021db3b4e4dc80a484", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "orangelib/build/lib/orangelib/model.py", "max_forks_repo_name": "ayoolaolafenwa/orangelib", "max_forks_repo_head_hexsha": "4fe7d88488482e52d466b8021db3b4e4dc80a484", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-06-23T11:51:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-21T03:44:48.000Z", "avg_line_length": 35.1697247706, "max_line_length": 97, "alphanum_fraction": 0.6636233207, "include": true, "reason": "import numpy", "num_tokens": 1547}
|
# The abstract types provided by InteratomicPotentials.jl
export AbstractPotential, NonTrainablePotential, TrainablePotential, EmpiricalPotential, MixedPotential
"""
AbstractPotential
The abstract supertype of all interatomic potentials.
"""
abstract type AbstractPotential end
include("types/abstract_potential.jl")
"""
NonTrainablePotential <: AbstractPotential
Abstract type for potentials that aren't trainable.
"""
abstract type NonTrainablePotential <: AbstractPotential end
include("types/non_trainable_potential.jl")
"""
TrainablePotential{P<:NamedTuple,HP<:NamedTuple} <: AbstractPotential
Abstract type for potentials that are trainable.
`P` is a `NamedTuple` of parameter names and `HP`` is a `NamedTuple`` of hyperparameter names.
"""
abstract type TrainablePotential{P<:NamedTuple,HP<:NamedTuple} <: AbstractPotential end
include("types/trainable_potential.jl")
"""
EmpiricalPotential{P<:NamedTuple,HP<:NamedTuple} <: TrainablePotential{P,HP}
Defines an empirical potential, a heuristic function used to describe the intermolecular potential energy of a configuration of atoms. Various potentials have been found to agree empirically with the experimentally obtained potential energy of a configuration of atoms for particular atoms in particular situations. This package implements a number of such popular potentials.
`P` is a `NamedTuple` of parameter names and `HP`` is a `NamedTuple`` of hyperparameter names.
"""
abstract type EmpiricalPotential{P<:NamedTuple,HP<:NamedTuple} <: TrainablePotential{P,HP} end
include("types/empirical_potential.jl")
"""
MixedPotential <: AbstractPotential
Abstract type for potentials that are the combination of multiple sub-potentials.
"""
abstract type MixedPotential <: AbstractPotential end
include("types/mixed_potential.jl")
|
{"hexsha": "fc5772556f689057c406e09baf60d9b4c317fc57", "size": 1815, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/types.jl", "max_stars_repo_name": "cesmix-mit/InteratomicPotentials.jl", "max_stars_repo_head_hexsha": "100af9067e69d4e3fa2f4697b4915c93cb08f419", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-10-04T09:43:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T11:15:24.000Z", "max_issues_repo_path": "src/types.jl", "max_issues_repo_name": "cesmix-mit/InteratomicPotentials.jl", "max_issues_repo_head_hexsha": "100af9067e69d4e3fa2f4697b4915c93cb08f419", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2022-01-19T01:22:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T19:09:50.000Z", "max_forks_repo_path": "src/types.jl", "max_forks_repo_name": "cesmix-mit/InteratomicPotentials.jl", "max_forks_repo_head_hexsha": "100af9067e69d4e3fa2f4697b4915c93cb08f419", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-10-11T00:39:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T20:55:29.000Z", "avg_line_length": 38.6170212766, "max_line_length": 376, "alphanum_fraction": 0.7994490358, "num_tokens": 397}
|
#ifndef HPENFACAG_HPP_INCLUDED
#define HPENFACAG_HPP_INCLUDED
#include <vector>
#include <string>
#include <boost/serialization/list.hpp>
#include <boost/serialization/set.hpp>
#include <boost/serialization/vector.hpp>
#include <boost/serialization/deque.hpp>
#include <caffe/util/math_functions.hpp>
#include "arch/AACAgent.hpp"
#include "bib/Seed.hpp"
#include "bib/Utils.hpp"
#include "bib/OrnsteinUhlenbeckNoise.hpp"
#include "bib/MetropolisHasting.hpp"
#include "bib/XMLEngine.hpp"
#include "bib/IniParser.hpp"
#include "bib/OnlineNormalizer.hpp"
#include "nn/MLP.hpp"
#ifdef PARALLEL_INTERACTION
#include <mpi.h>
#include <boost/mpi.hpp>
#endif
#ifndef SAASRG_SAMPLE
#define SAASRG_SAMPLE
typedef struct _sample {
std::vector<double> s;
std::vector<double> goal_achieved;
std::vector<double> goal_achieved_unnormed;
std::vector<double> pure_a;
std::vector<double> a;
std::vector<double> next_s;
std::vector<double> next_goal_achieved_unnormed;
double r;
bool goal_reached;
double prob;
bool artificial;
bool interest;
friend class boost::serialization::access;
template <typename Archive>
void serialize(Archive& ar, const unsigned int) {
ar& BOOST_SERIALIZATION_NVP(s);
ar& BOOST_SERIALIZATION_NVP(goal_achieved);
ar& BOOST_SERIALIZATION_NVP(goal_achieved_unnormed);
ar& BOOST_SERIALIZATION_NVP(pure_a);
ar& BOOST_SERIALIZATION_NVP(a);
ar& BOOST_SERIALIZATION_NVP(next_s);
ar& BOOST_SERIALIZATION_NVP(next_goal_achieved_unnormed);
ar& BOOST_SERIALIZATION_NVP(r);
ar& BOOST_SERIALIZATION_NVP(goal_reached);
ar& BOOST_SERIALIZATION_NVP(prob);
ar& BOOST_SERIALIZATION_NVP(artificial);
ar& BOOST_SERIALIZATION_NVP(interest);
}
} sample;
#endif
template<typename NN = MLP>
class OfflineCaclaAg : public arch::AACAgent<NN, arch::AgentGPUProgOptions> {
public:
typedef NN PolicyImpl;
OfflineCaclaAg(unsigned int _nb_motors, unsigned int _nb_sensors, uint _goal_size, uint _goal_start)
: arch::AACAgent<NN, arch::AgentGPUProgOptions>(_nb_motors, _nb_sensors-_goal_size), nb_sensors(_nb_sensors-_goal_size), empty_action(),
last_state(_nb_sensors-_goal_size, 0.f), last_goal_achieved(_goal_size, 0.f), goal_size(_goal_size), goal_start(_goal_start-_goal_size),
normalizer(nb_sensors) {
LOG_DEBUG("goal size " << goal_size);
LOG_DEBUG("goal start " << goal_start);
}
virtual ~OfflineCaclaAg() {
if(vnn != nullptr)
delete vnn;
delete ann;
delete ann_noblob;
delete hidden_unit_v;
delete hidden_unit_a;
if(oun == nullptr)
delete oun;
}
const std::vector<double>& _run(double reward, const std::vector<double>& sensors,
const std::vector<double>& goal_achieved, bool learning, bool as, bool) override {
std::vector<double> normed_sensors(nb_sensors);
normalizer.transform_with_double_clip(normed_sensors, sensors, false);
// protect batch norm from testing data and poor data
vector<double>* next_action = ann_noblob->computeOut(normed_sensors);
if (last_action.get() != nullptr && learning) {
double prob = bib::Proba<double>::truncatedGaussianDensity(*last_action, *last_pure_action, noise);
bool gr = reward >= -0.0000001;
trajectory.push_back( {last_state, last_goal_achieved, last_goal_achieved, *last_pure_action, *last_action, sensors, goal_achieved, reward, gr, prob, false, true});
if (gr)
trajectory_end_points.push_back(trajectory.size());
// auto sa = trajectory.back();
// double nr = dense_reward(sa.goal_achieved_unnormed, sa.s,
// sa.next_goal_achieved_unnormed, sa.next_s,
// sa.s, sa.next_s);
// LOG_DEBUG(reward << " " << nr);
}
last_pure_action.reset(new std::vector<double>(*next_action));
if(learning) {
if(gaussian_policy == 1) {
vector<double>* randomized_action = bib::Proba<double>::multidimentionnalTruncatedGaussian(*next_action, noise);
delete next_action;
next_action = randomized_action;
} else if(gaussian_policy == 2) {
oun->step(*next_action);
} else if(gaussian_policy == 3 && bib::Utils::rand01() < noise2) {
vector<double>* randomized_action = bib::Proba<double>::multidimentionnalTruncatedGaussian(*next_action, noise);
delete next_action;
next_action = randomized_action;
} else if(gaussian_policy == 4) {
vector<double>* randomized_action = bib::Proba<double>::multidimentionnalTruncatedGaussian(*next_action, noise * pow(noise2, noise3 - ((double) step)));
delete next_action;
next_action = randomized_action;
} else if(gaussian_policy == 5) {
if (bib::Utils::rand01() < noise2){
for (uint i = 0; i < next_action->size(); i++)
next_action->at(i) = bib::Utils::randin(-1.f, 1.f);
} else {
vector<double>* randomized_action = bib::Proba<double>::multidimentionnalTruncatedGaussian(*next_action, noise);
delete next_action;
next_action = randomized_action;
}
} else if(bib::Utils::rand01() < noise) { //e-greedy
for (uint i = 0; i < next_action->size(); i++)
next_action->at(i) = bib::Utils::randin(-1.f, 1.f);
}
}
last_action.reset(next_action);
std::copy(sensors.begin(), sensors.end(), last_state.begin());
std::copy(goal_achieved.begin(), goal_achieved.end(), last_goal_achieved.begin());
step++;
return *next_action;
}
void _unique_invoke(boost::property_tree::ptree* pt, boost::program_options::variables_map* command_args) override {
// bib::Seed::setFixedSeedUTest();
hidden_unit_v = bib::to_array<uint>(pt->get<std::string>("agent.hidden_unit_v"));
hidden_unit_a = bib::to_array<uint>(pt->get<std::string>("agent.hidden_unit_a"));
noise = pt->get<double>("agent.noise");
gaussian_policy = pt->get<uint>("agent.gaussian_policy");
number_fitted_iteration = pt->get<uint>("agent.number_fitted_iteration");
stoch_iter_actor = pt->get<uint>("agent.stoch_iter_actor");
stoch_iter_critic = pt->get<uint>("agent.stoch_iter_critic");
actor_output_layer_type = pt->get<uint>("agent.actor_output_layer_type");
hidden_layer_type = pt->get<uint>("agent.hidden_layer_type");
alpha_a = pt->get<double>("agent.alpha_a");
alpha_v = pt->get<double>("agent.alpha_v");
lambda = pt->get<double>("agent.lambda");
momentum = pt->get<uint>("agent.momentum");
beta_target = pt->get<double>("agent.beta_target");
ignore_poss_ac = pt->get<bool>("agent.ignore_poss_ac");
conserve_beta = pt->get<bool>("agent.conserve_beta");
disable_trust_region = pt->get<bool>("agent.disable_trust_region");
disable_cac = pt->get<bool>("agent.disable_cac");
hindsight_nb_destination = pt->get<uint>("agent.hindsight_nb_destination");
gae = false;
update_each_episode = 1;
if(gaussian_policy == 2) {
double oun_theta = pt->get<double>("agent.noise2");
double oun_dt = pt->get<double>("agent.noise3");
oun = new bib::OrnsteinUhlenbeckNoise<double>(this->nb_motors, noise, oun_theta, oun_dt);
} else if (gaussian_policy == 3 || gaussian_policy == 5){
noise2 = pt->get<double>("agent.noise2");
} else if (gaussian_policy == 4){
noise2 = pt->get<double>("agent.noise2");
noise3 = pt->get<double>("agent.noise3");
}
try {
update_each_episode = pt->get<uint>("agent.update_each_episode");
#ifdef PARALLEL_INTERACTION
if (update_each_episode % (world.size() - 1) != 0) {
LOG_ERROR("update_each_episode must be a multiple of (number of worker - 1)");
exit(1);
}
update_each_episode = update_each_episode / (world.size() - 1);
#endif
} catch(boost::exception const& ) {
}
if(lambda >= 0.)
gae = pt->get<bool>("agent.gae");
#ifdef CAFFE_CPU_ONLY
LOG_INFO("CPU mode");
(void) command_args;
#else
if(command_args->count("gpu") == 0 || command_args->count("cpu") > 0
#ifdef PARALLEL_INTERACTION
|| world.rank() != 0
#endif
)
{
caffe::Caffe::set_mode(caffe::Caffe::Brew::CPU);
LOG_INFO("CPU mode");
} else {
caffe::Caffe::set_mode(caffe::Caffe::Brew::GPU);
caffe::Caffe::SetDevice((*command_args)["gpu"].as<uint>());
LOG_INFO("GPU mode");
}
#endif
LOG_INFO("dimensionality of NN " << nb_sensors << " (in) " << this->nb_motors << " (out).");
ann = new NN(nb_sensors, *hidden_unit_a, this->nb_motors, alpha_a, 1, hidden_layer_type, actor_output_layer_type, 0, true, momentum);
ann_noblob = new NN(*ann, false, ::caffe::Phase::TEST);
#ifdef PARALLEL_INTERACTION
std::vector<double> weights(ann->number_of_parameters(false), 0.f);
if (world.rank() == 0)
ann->copyWeightsTo(weights.data(), false);
broadcast(world, weights, 0);
if (world.rank() != 0)
ann->copyWeightsFrom(weights.data(), false);
#endif
#ifdef PARALLEL_INTERACTION
if (world.rank() == 0)
vnn = new NN(nb_sensors, nb_sensors, *hidden_unit_v, alpha_v, 1, -1, hidden_layer_type, 0, false, momentum);
#else
vnn = new NN(nb_sensors, nb_sensors, *hidden_unit_v, alpha_v, 1, -1, hidden_layer_type, 0, false, momentum);
#endif
bestever_score = std::numeric_limits<double>::lowest();
}
void _start_episode(const std::vector<double>& sensors, bool) override {
std::copy(sensors.begin(), sensors.end(), last_state.begin());
last_action = nullptr;
last_pure_action = nullptr;
step = 0;
if(gaussian_policy == 2)
oun->reset();
double* weights = new double[ann->number_of_parameters(false)];
ann->copyWeightsTo(weights, false);
ann_noblob->copyWeightsFrom(weights, false);
delete[] weights;
}
void update_critic(const caffe::Blob<double>& all_states, const caffe::Blob<double>& all_next_states,
const caffe::Blob<double>& r_gamma_coef) {
if (trajectory.size() > 0) {
caffe::Blob<double> v_target(trajectory.size(), 1, 1, 1);
//remove trace of old policy
auto iter = [&]() {
auto all_nextV = vnn->computeOutVFBlob(all_next_states, empty_action);
auto all_V = vnn->computeOutVFBlob(all_states, empty_action);
//all_V must be computed after all_nextV to use learn_blob_no_full_forward
//#ifdef CAFFE_CPU_ONLY
caffe::caffe_mul(trajectory.size(), r_gamma_coef.cpu_diff(), all_nextV->cpu_data(), v_target.mutable_cpu_data());
caffe::caffe_add(trajectory.size(), r_gamma_coef.cpu_data(), v_target.cpu_data(), v_target.mutable_cpu_data());
double *pv_target = v_target.mutable_cpu_data();
double min_ = - (1.f/(1.f-this->gamma));
for(int i=0;i<trajectory.size();i++){
if(pv_target[i] > 0.0)
pv_target[i] = 0.f;
else if (pv_target[i] < min_)
pv_target[i] = min_;
}
caffe::caffe_sub(trajectory.size(), v_target.cpu_data(), all_V->cpu_data(), v_target.mutable_cpu_data());
//#else
// switch (caffe::Caffe::mode()) {
// case caffe::Caffe::CPU:
// caffe::caffe_mul(trajectory.size(), r_gamma_coef.cpu_diff(), all_nextV->cpu_data(), v_target.mutable_cpu_data());
// caffe::caffe_add(trajectory.size(), r_gamma_coef.cpu_data(), v_target.cpu_data(), v_target.mutable_cpu_data());
// caffe::caffe_sub(trajectory.size(), v_target.cpu_data(), all_V->cpu_data(), v_target.mutable_cpu_data());
// break;
// case caffe::Caffe::GPU:
// caffe::caffe_gpu_mul(trajectory.size(), r_gamma_coef.gpu_diff(), all_nextV->gpu_data(), v_target.mutable_gpu_data());
// caffe::caffe_gpu_add(trajectory.size(), r_gamma_coef.gpu_data(), v_target.gpu_data(), v_target.mutable_gpu_data());
// caffe::caffe_gpu_sub(trajectory.size(), v_target.gpu_data(), all_V->gpu_data(), v_target.mutable_gpu_data());
// break;
// }
//#endif
// Simple computation for lambda return
// move v_target from GPU to CPU
double* pdiff = v_target.mutable_cpu_diff();
const double* pvtarget = v_target.cpu_data();
int li=trajectory.size() - 1;
double prev_delta = 0.;
int index_ep = trajectory_end_points.size() - 1;
for (auto it = trajectory.rbegin(); it != trajectory.rend(); it++) {
if (index_ep >= 0 && trajectory_end_points[index_ep] - 1 == li){
prev_delta = 0.;
index_ep--;
}
// if(pvtarget[li] + all_V->cpu_data()[li] > 0.0001f || pvtarget[li] + all_V->cpu_data()[li] < -50.0001f)
// LOG_DEBUG("MIGHT BE PROBLEMATIC " << (pvtarget[li]+all_V->cpu_data()[li]) << " " << r_gamma_coef.cpu_diff()[li] << " " << all_nextV->cpu_data()[li] << " " << r_gamma_coef.cpu_data()[li]);
if (it->artificial) {
pdiff[li] = pvtarget[li] * std::min(it->prob, pbar) + prev_delta * std::min(it->prob, cbar);
prev_delta = this->gamma * lambda * pdiff[li];
} else {
pdiff[li] = pvtarget[li] + prev_delta;
prev_delta = this->gamma * lambda * pdiff[li];
}
--li;
}
// ASSERT(pdiff[trajectory.size() -1] == pvtarget[trajectory.size() -1] * std::min(trajectory[trajectory.size() - 1].prob, pbar), "pb lambda");
// move diff to GPU
#ifdef CAFFE_CPU_ONLY
caffe::caffe_add(trajectory.size(), v_target.cpu_diff(), all_V->cpu_data(), v_target.mutable_cpu_data());
#else
switch (caffe::Caffe::mode()) {
case caffe::Caffe::CPU:
caffe::caffe_add(trajectory.size(), v_target.cpu_diff(), all_V->cpu_data(), v_target.mutable_cpu_data());
break;
case caffe::Caffe::GPU:
caffe::caffe_gpu_add(trajectory.size(), v_target.gpu_diff(), all_V->gpu_data(), v_target.mutable_gpu_data());
break;
}
#endif
if (stoch_iter_critic == 1)
vnn->learn_blob_no_full_forward(all_states, empty_action, v_target);
else
vnn->learn_blob(all_states, empty_action, v_target, stoch_iter_critic);
delete all_V;
delete all_nextV;
};
for(uint i=0; i<number_fitted_iteration; i++)
iter();
}
}
void end_episode(bool learning) override {
// LOG_FILE("policy_exploration", ann->hash());
if(!learning){
return;
}
//learning phase
if (trajectory_end_points.size() == 0 || trajectory_end_points.back() != trajectory.size())
trajectory_end_points.push_back(trajectory.size());
if (episode % update_each_episode != 0)
return;
//
// Remove junk data
//
std::deque<double> varsums(trajectory_end_points.size(), 0.f);
for (int traj = trajectory_end_points.size() - 1 ; traj >= 0 ; traj--) {
int beg = traj == 0 ? 0 : trajectory_end_points[traj-1];
int end = trajectory_end_points[traj];
if (end - beg > 1) {
for (int goal_dim=0; goal_dim < goal_size; goal_dim++) {
std::function<double(const sample&)> get = [goal_dim](const sample& s) {
return s.goal_achieved_unnormed[goal_dim];
};
varsums[traj] += bib::Utils::variance<>(trajectory.cbegin() + beg, trajectory.cbegin() + end, get);
}
}
//goal_achieved hasn't change at all during the trajectory
if (varsums[traj] <= 1e-8) {
// tag already achieved task where actor won't be update
if (trajectory[beg].r >= -0.0001) {
for (auto it = trajectory.begin() + beg; it != trajectory.begin() + end; it++)
it->interest=false;
}
}
}
#ifdef PARALLEL_INTERACTION
if (world.rank() == 0) {
trajectory_end_points.clear();
varsums.clear();
std::vector<std::deque<sample>> all_traj;
std::vector<std::deque<int>> all_traj_ep;
std::vector<std::deque<double>> all_varsums;
gather(world, trajectory, all_traj, 0);
gather(world, trajectory_end_points, all_traj_ep, 0);
gather(world, varsums, all_varsums, 0);
ASSERT(all_traj.size() == all_traj_ep.size(), "pb");
for (auto it : all_varsums)
for (auto it2 : it)
varsums.push_back(it2);
for (int i=0; i < all_traj.size() ; i++) {
for (auto d2 : all_traj_ep[i])
trajectory_end_points.push_back(trajectory.size() + d2);
trajectory.insert(trajectory.end(), all_traj[i].begin(), all_traj[i].end());
}
} else {
gather(world, trajectory, 0);
gather(world, trajectory_end_points, 0);
gather(world, varsums, 0);
}
if (world.rank() == 0) {
#endif
// LOG_DEBUG("#############");
// for (int i=0;i<trajectory.size(); i++) {
// bib::Logger::PRINT_ELEMENTS(trajectory[i].goal_achieved);
// LOG_DEBUG(trajectory[i].r << " " <<i);
// }
// bib::Logger::PRINT_ELEMENTS(trajectory_end_points);
// LOG_DEBUG("#############");
// LOG_DEBUG("#############");
//
// update norm on batch
//
for (int i=0;i<trajectory.size(); i++) {
normalizer.update_batch_clip_before(trajectory[i].s, goal_size);//ignore fixed goal in update
normalizer.update_batch_clip_before(trajectory[i].goal_achieved);
}
#ifdef PARALLEL_INTERACTION
}
// synchronize normalizer
bib::OnlineNormalizer on(this->nb_sensors);
if (world.rank() == 0)
on.copyFrom(normalizer);
broadcast(world, on, 0);
if (world.rank() != 0)
normalizer.copyFrom(on);
if (world.rank() == 0) {
#endif
if (trajectory.size() == 0) {
LOG_INFO("no data left");
nb_sample_update = 0;
ASSERT(trajectory_end_points.size() == 0, "");
return;
}
// LOG_DEBUG("#############");
// for (int i=0;i<trajectory.size(); i++) {
// bib::Logger::PRINT_ELEMENTS(trajectory[i].goal_achieved);
// LOG_DEBUG(trajectory[i].r << " " <<i);
// // if(trajectory[i].r >= 0 ){
// // bib::Logger::PRINT_ELEMENTS(trajectory[i].goal_achieved, ("HERE "+std::to_string(i)+" ").c_str());
// // }
// }
// // bib::Logger::PRINT_ELEMENTS(trajectory_end_points);
// // exit(1);
//
// perform norm on batch
//
for (int i=0;i<trajectory.size(); i++) {
std::vector<double> normed_sensors(nb_sensors);
std::vector<double> normed_goal_size(goal_size);
std::vector<double> normed_next_s(nb_sensors);
normalizer.transform_with_double_clip(normed_sensors, trajectory[i].s, false);
normalizer.transform_with_double_clip(normed_goal_size, trajectory[i].goal_achieved, false);
normalizer.transform_with_double_clip(normed_next_s, trajectory[i].next_s, false);
std::copy(normed_sensors.begin(), normed_sensors.end(), trajectory[i].s.begin());
std::copy(normed_goal_size.begin(), normed_goal_size.end(), trajectory[i].goal_achieved.begin());
std::copy(normed_next_s.begin(), normed_next_s.end(), trajectory[i].next_s.begin());
}
// LOG_DEBUG("#############");
// for (int i=0;i<trajectory.size(); i++) {
// bib::Logger::PRINT_ELEMENTS(trajectory[i].s);
// bib::Logger::PRINT_ELEMENTS(trajectory[i].goal_achieved);
// LOG_DEBUG(trajectory[i].r << " " <<i);
// }
// exit(1);
//
// data augmentation part
//
int saved_trajsize=trajectory.size();
int saved_trajend_point=trajectory_end_points.size();
for(int i=0;i < saved_trajend_point; i++) {
// don't generate trajectory where goal achieved hasn't changed
if (varsums[i] <= 1e-8)
continue;
int min_index=0;
if(i>0)
min_index=trajectory_end_points[i-1];
if(trajectory_end_points[i]-1 == min_index)
continue;
for(int j=0;j<hindsight_nb_destination;j++) {
uint destination = bib::Seed::unifRandInt(min_index, trajectory_end_points[i]-1);
for(int k=min_index;k<=destination;k++) {
sample sa = trajectory[k];
trajectory.push_back(sa);
trajectory.back().artificial = true;
std::copy(trajectory[destination].goal_achieved.begin(),
trajectory[destination].goal_achieved.end(),
trajectory.back().s.begin() + goal_start);
std::copy(trajectory[destination].goal_achieved.begin(),
trajectory[destination].goal_achieved.end(),
trajectory.back().next_s.begin() + goal_start);
// sparse reward
if ( sparse_reward(sa.goal_achieved_unnormed, trajectory[destination].goal_achieved_unnormed)) {
trajectory.back().r = 0.f;
trajectory.back().goal_reached = true;
trajectory_end_points.push_back(trajectory.size());
}
// --
// dense reward
// trajectory.back().r = dense_reward(sa.goal_achieved_unnormed, trajectory[destination].goal_achieved_unnormed,
// sa.next_goal_achieved_unnormed, trajectory[destination].next_goal_achieved_unnormed,
// sa.s, sa.next_s);
// if ( trajectory.back().r >= -0.0000001 ) {
// trajectory_end_points.push_back(trajectory.size());
// trajectory.back().goal_reached = true;
// }
// --
//should remove junk data after data
}
if (trajectory_end_points.back() != trajectory.size())
trajectory_end_points.push_back(trajectory.size());
}
}
//
// tag artificial junk data
//
for (int traj = trajectory_end_points.size() - 1 ; traj >= saved_trajend_point ; traj--) {
int beg = traj == 0 ? 0 : trajectory_end_points[traj-1];
int end = trajectory_end_points[traj];
double varsum = 0.f;
if (end - beg > 1) {
for (int goal_dim=0; goal_dim < goal_size; goal_dim++) {
std::function<double(const sample&)> get = [goal_dim](const sample& s) {
return s.goal_achieved_unnormed[goal_dim];
};
varsum += bib::Utils::variance<>(trajectory.cbegin() + beg, trajectory.cbegin() + end, get);
}
}
//goal_achieved hasn't change at all during the trajectory
if (varsum <= 1e-8 && trajectory[beg].r >= -0.0001) {
for (auto it = trajectory.begin() + beg; it != trajectory.begin() + end; it++){
it->interest=false;
}
}
}
//
// compute importance sampling ratio on artificial data
//
int artificial_data_size = trajectory.size() - saved_trajsize;
if (artificial_data_size > 0) {
caffe::Blob<double> all_states(artificial_data_size, nb_sensors, 1, 1);
double* pall_states = all_states.mutable_cpu_data();
int li=0;
for (int i = saved_trajsize; i < trajectory.size(); i++) {
std::copy(trajectory[i].s.begin(), trajectory[i].s.end(), pall_states + li * nb_sensors);
li++;
}
ann->increase_batchsize(artificial_data_size);
auto ac_out = ann->computeOutBlob(all_states);
li=0;
for (int i = saved_trajsize; i < trajectory.size(); i++) {
trajectory[i].prob = bib::Proba<double>::truncatedGaussianDensity(trajectory[i].a, ac_out->cpu_data(), noise, li * this->nb_motors) / trajectory[i].prob;
li++;
}
delete ac_out;
}
// LOG_DEBUG("#############");
// for (int i=0;i<trajectory.size(); i++){
// bib::Logger::PRINT_ELEMENTS(trajectory[i].s, trajectory[i].artificial ? "arti " : "real ");
// bib::Logger::PRINT_ELEMENTS(trajectory[i].goal_achieved);
// LOG_DEBUG(trajectory[i].r << " " <<i);
// }
// LOG_DEBUG("#############");
// LOG_DEBUG("#############");
// LOG_DEBUG("#############");
// exit(1);
if(trajectory.size() > 0)
vnn->increase_batchsize(trajectory.size());
caffe::Blob<double> all_states(trajectory.size(), nb_sensors, 1, 1);
caffe::Blob<double> all_next_states(trajectory.size(), nb_sensors, 1, 1);
//store reward in data and gamma coef in diff
caffe::Blob<double> r_gamma_coef(trajectory.size(), 1, 1, 1);
double* pall_states = all_states.mutable_cpu_data();
double* pall_states_next = all_next_states.mutable_cpu_data();
double* pr_all = r_gamma_coef.mutable_cpu_data();
double* pgamma_coef = r_gamma_coef.mutable_cpu_diff();
int li=0;
for (auto it : trajectory) {
std::copy(it.s.begin(), it.s.end(), pall_states + li * nb_sensors);
std::copy(it.next_s.begin(), it.next_s.end(), pall_states_next + li * nb_sensors);
pr_all[li]=it.r;
pgamma_coef[li]= it.goal_reached ? 0.000f : this->gamma;
li++;
}
update_critic(all_states, all_next_states, r_gamma_coef);
if (trajectory.size() > 0) {
const std::vector<double> disable_back_ac(this->nb_motors, 0.00f);
caffe::Blob<double> deltas(trajectory.size(), 1, 1, 1);
auto all_nextV = vnn->computeOutVFBlob(all_next_states, empty_action);
auto all_mine = vnn->computeOutVFBlob(all_states, empty_action);
//#ifdef CAFFE_CPU_ONLY
caffe::caffe_mul(trajectory.size(), r_gamma_coef.cpu_diff(), all_nextV->cpu_data(), deltas.mutable_cpu_data());
caffe::caffe_add(trajectory.size(), r_gamma_coef.cpu_data(), deltas.cpu_data(), deltas.mutable_cpu_data());
double *pv_target = deltas.mutable_cpu_data();
double min_ = - (1.f/(1.f-this->gamma));
for(int i=0;i<trajectory.size();i++){
if(pv_target[i] > 0.0)
pv_target[i] = 0.f;
else if (pv_target[i] < min_)
pv_target[i] = min_;
}
caffe::caffe_sub(trajectory.size(), deltas.cpu_data(), all_mine->cpu_data(), deltas.mutable_cpu_data());
//#else
// switch (caffe::Caffe::mode()) {
// case caffe::Caffe::CPU:
// caffe::caffe_mul(trajectory.size(), r_gamma_coef.cpu_diff(), all_nextV->cpu_data(), deltas.mutable_cpu_data());
// caffe::caffe_add(trajectory.size(), r_gamma_coef.cpu_data(), deltas.cpu_data(), deltas.mutable_cpu_data());
// caffe::caffe_sub(trajectory.size(), deltas.cpu_data(), all_mine->cpu_data(), deltas.mutable_cpu_data());
// break;
// case caffe::Caffe::GPU:
// caffe::caffe_gpu_mul(trajectory.size(), r_gamma_coef.gpu_diff(), all_nextV->gpu_data(), deltas.mutable_gpu_data());
// caffe::caffe_gpu_add(trajectory.size(), r_gamma_coef.gpu_data(), deltas.gpu_data(), deltas.mutable_gpu_data());
// caffe::caffe_gpu_sub(trajectory.size(), deltas.gpu_data(), all_mine->gpu_data(), deltas.mutable_gpu_data());
// break;
// }
//#endif
if(gae){
// Simple computation for lambda return
// move deltas from GPU to CPU
double * diff = deltas.mutable_cpu_diff();
const double* pdeltas = deltas.cpu_data();
int li=trajectory.size() - 1;
double prev_delta = 0.;
int index_ep = trajectory_end_points.size() - 1;
for (auto it = trajectory.rbegin(); it != trajectory.rend(); it++) {
if (index_ep >= 0 && trajectory_end_points[index_ep] - 1 == li){
prev_delta = 0.;
index_ep--;
}
if(it->artificial) {
diff[li] = pdeltas[li] * std::min(it->prob, pbar) + prev_delta * std::min(it->prob, cbar);
prev_delta = this->gamma * lambda * diff[li];
} else {
diff[li] = pdeltas[li] + prev_delta;
prev_delta = this->gamma * lambda * diff[li];
}
--li;
}
// ASSERT(diff[trajectory.size() -1] == pdeltas[trajectory.size() -1] * std::min(trajectory[trajectory.size() - 1].prob, pbar), "pb lambda");
caffe::caffe_copy(trajectory.size(), deltas.cpu_diff(), deltas.mutable_cpu_data());
}
uint n=0;
posdelta_mean=0.f;
//store target in data, and disable in diff
caffe::Blob<double> target_cac(trajectory.size(), this->nb_motors, 1, 1);
caffe::Blob<double> target_treg(trajectory.size(), this->nb_motors, 1, 1);
caffe::caffe_set(target_cac.count(), static_cast<double>(1.f), target_cac.mutable_cpu_diff());
caffe::caffe_set(target_treg.count(), static_cast<double>(1.f), target_treg.mutable_cpu_diff());
caffe::Blob<double> deltas_blob(trajectory.size(), this->nb_motors, 1, 1);
caffe::caffe_set(deltas_blob.count(), static_cast<double>(1.f), deltas_blob.mutable_cpu_data());
double* pdisable_back_cac = target_cac.mutable_cpu_diff();
double* pdisable_back_treg = target_treg.mutable_cpu_diff();
double* pdeltas_blob = deltas_blob.mutable_cpu_data();
double* ptarget_cac = target_cac.mutable_cpu_data();
double* ptarget_treg = target_treg.mutable_cpu_data();
const double* pdeltas = deltas.cpu_data();
li=0;
//cacla cost
for(auto it = trajectory.begin(); it != trajectory.end() ; ++it) {
std::copy(it->a.begin(), it->a.end(), ptarget_cac + li * this->nb_motors);
if(pdeltas[li] > 0. && it->interest) {
posdelta_mean += pdeltas[li];
n++;
} else {
std::copy(disable_back_ac.begin(), disable_back_ac.end(), pdisable_back_cac + li * this->nb_motors);
}
if(!disable_cac)
std::fill(pdeltas_blob + li * this->nb_motors, pdeltas_blob + (li+1) * this->nb_motors, pdeltas[li]);
li++;
}
//penalty cost
li=0;
int number_non_artificial_sample = 0;
for(auto it = trajectory.begin(); it != trajectory.end() ; ++it) {
std::copy(it->pure_a.begin(), it->pure_a.end(), ptarget_treg + li * this->nb_motors);
if(ignore_poss_ac && pdeltas[li] > 0. || it->artificial) {
std::copy(disable_back_ac.begin(), disable_back_ac.end(), pdisable_back_treg + li * this->nb_motors);
}
if (! it->artificial)
number_non_artificial_sample++;
li++;
}
ratio_valid_advantage = ((float)n) / ((float) trajectory.size());
posdelta_mean = posdelta_mean / ((float) trajectory.size());
int size_cost_cacla=trajectory.size()*this->nb_motors;
double beta=0.0001f;
mean_beta=0.f;
if(conserve_beta)
beta=conserved_beta;
mean_beta += beta;
if(n > 0) {
ann->increase_batchsize(trajectory.size());
for(uint sia = 0; sia < stoch_iter_actor; sia++){
//learn BN
auto ac_out = ann->computeOutBlob(all_states);
ann->ZeroGradParameters();
number_effective_actor_update = sia;
if(disable_trust_region)
beta=0.f;
else if (sia > 0) {
//compute deter distance(pi, pi_old)
caffe::Blob<double> diff_treg(trajectory.size(), this->nb_motors, 1, 1);
double l2distance = 0.f;
#ifdef CAFFE_CPU_ONLY
caffe::caffe_sub(size_cost_cacla, target_treg.cpu_data(), ac_out->cpu_data(), diff_treg.mutable_cpu_data());
caffe::caffe_mul(size_cost_cacla, target_treg.cpu_diff(), diff_treg.cpu_data(), diff_treg.mutable_cpu_data());
caffe::caffe_mul(size_cost_cacla, diff_treg.cpu_data(), diff_treg.cpu_data(), diff_treg.mutable_cpu_data());
l2distance = caffe::caffe_cpu_asum(size_cost_cacla, diff_treg.cpu_data());
#else
switch (caffe::Caffe::mode()) {
case caffe::Caffe::CPU:
caffe::caffe_sub(size_cost_cacla, target_treg.cpu_data(), ac_out->cpu_data(), diff_treg.mutable_cpu_data());
caffe::caffe_mul(size_cost_cacla, target_treg.cpu_diff(), diff_treg.cpu_data(), diff_treg.mutable_cpu_data());
caffe::caffe_mul(size_cost_cacla, diff_treg.cpu_data(), diff_treg.cpu_data(), diff_treg.mutable_cpu_data());
l2distance = caffe::caffe_cpu_asum(size_cost_cacla, diff_treg.cpu_data());
break;
case caffe::Caffe::GPU:
caffe::caffe_gpu_sub(size_cost_cacla, target_treg.gpu_data(), ac_out->gpu_data(), diff_treg.mutable_gpu_data());
caffe::caffe_gpu_mul(size_cost_cacla, target_treg.gpu_diff(), diff_treg.gpu_data(), diff_treg.mutable_gpu_data());
caffe::caffe_gpu_mul(size_cost_cacla, diff_treg.gpu_data(), diff_treg.gpu_data(), diff_treg.mutable_gpu_data());
caffe::caffe_gpu_asum(size_cost_cacla, diff_treg.gpu_data(), &l2distance);
break;
}
#endif
l2distance = std::sqrt(l2distance/((double) number_non_artificial_sample*this->nb_motors));
if (l2distance < beta_target/1.5)
beta = beta/2.;
else if (l2distance > beta_target*1.5)
beta = beta*2.;
beta=std::max(std::min((double)20.f, beta), (double) 0.01f);
mean_beta += beta;
conserved_l2dist = l2distance;
//LOG_DEBUG(std::setprecision(7) << l2distance << " " << beta << " " << beta_target << " " << sia);
}
const auto actor_actions_blob = ann->getNN()->blob_by_name(MLP::actions_blob_name);
caffe::Blob<double> diff_cac(trajectory.size(), this->nb_motors, 1, 1);
caffe::Blob<double> diff_treg(trajectory.size(), this->nb_motors, 1, 1);
double * ac_diff = nullptr;
#ifdef CAFFE_CPU_ONLY
ac_diff = actor_actions_blob->mutable_cpu_diff();
caffe::caffe_sub(size_cost_cacla, target_cac.cpu_data(), ac_out->cpu_data(), diff_cac.mutable_cpu_data());
caffe::caffe_mul(size_cost_cacla, diff_cac.cpu_data(), deltas_blob.cpu_data(), diff_cac.mutable_cpu_data());
caffe::caffe_mul(size_cost_cacla, target_cac.cpu_diff(), diff_cac.cpu_data(), diff_cac.mutable_cpu_data());
caffe::caffe_sub(size_cost_cacla, target_treg.cpu_data(), ac_out->cpu_data(), diff_treg.mutable_cpu_data());
caffe::caffe_scal(size_cost_cacla, beta, diff_treg.mutable_cpu_data());
caffe::caffe_mul(size_cost_cacla, target_treg.cpu_diff(), diff_treg.cpu_data(), diff_treg.mutable_cpu_data());
caffe::caffe_add(size_cost_cacla, diff_cac.cpu_data(), diff_treg.cpu_data(), ac_diff);
caffe::caffe_scal(size_cost_cacla, (double) -1.f, ac_diff);
#else
switch (caffe::Caffe::mode()) {
case caffe::Caffe::CPU:
ac_diff = actor_actions_blob->mutable_cpu_diff();
caffe::caffe_sub(size_cost_cacla, target_cac.cpu_data(), ac_out->cpu_data(), diff_cac.mutable_cpu_data());
caffe::caffe_mul(size_cost_cacla, diff_cac.cpu_data(), deltas_blob.cpu_data(), diff_cac.mutable_cpu_data());
caffe::caffe_mul(size_cost_cacla, target_cac.cpu_diff(), diff_cac.cpu_data(), diff_cac.mutable_cpu_data());
caffe::caffe_sub(size_cost_cacla, target_treg.cpu_data(), ac_out->cpu_data(), diff_treg.mutable_cpu_data());
caffe::caffe_scal(size_cost_cacla, beta, diff_treg.mutable_cpu_data());
caffe::caffe_mul(size_cost_cacla, target_treg.cpu_diff(), diff_treg.cpu_data(), diff_treg.mutable_cpu_data());
caffe::caffe_add(size_cost_cacla, diff_cac.cpu_data(), diff_treg.cpu_data(), ac_diff);
caffe::caffe_scal(size_cost_cacla, (double) -1.f, ac_diff);
break;
case caffe::Caffe::GPU:
ac_diff = actor_actions_blob->mutable_gpu_diff();
caffe::caffe_gpu_sub(size_cost_cacla, target_cac.gpu_data(), ac_out->gpu_data(), diff_cac.mutable_gpu_data());
caffe::caffe_gpu_mul(size_cost_cacla, diff_cac.gpu_data(), deltas_blob.gpu_data(), diff_cac.mutable_gpu_data());
caffe::caffe_gpu_mul(size_cost_cacla, target_cac.gpu_diff(), diff_cac.gpu_data(), diff_cac.mutable_gpu_data());
caffe::caffe_gpu_sub(size_cost_cacla, target_treg.gpu_data(), ac_out->gpu_data(), diff_treg.mutable_gpu_data());
caffe::caffe_gpu_scal(size_cost_cacla, beta, diff_treg.mutable_gpu_data());
caffe::caffe_gpu_mul(size_cost_cacla, target_treg.gpu_diff(), diff_treg.gpu_data(), diff_treg.mutable_gpu_data());
caffe::caffe_gpu_add(size_cost_cacla, diff_cac.gpu_data(), diff_treg.gpu_data(), ac_diff);
caffe::caffe_gpu_scal(size_cost_cacla, (double) -1.f, ac_diff);
break;
}
#endif
ann->actor_backward();
ann->updateFisher(n);
ann->regularize();
ann->getSolver()->ApplyUpdate();
ann->getSolver()->set_iter(ann->getSolver()->iter() + 1);
delete ac_out;
}
}
conserved_beta = beta;
if (number_effective_actor_update != 0)
mean_beta /= (double) number_effective_actor_update;
delete all_nextV;
delete all_mine;
}
#ifdef PARALLEL_INTERACTION
}
std::vector<double> weights(ann->number_of_parameters(false), 0.f);
if (world.rank() == 0)
ann->copyWeightsTo(weights.data(), false);
broadcast(world, weights, 0);
if (world.rank() != 0)
ann->copyWeightsFrom(weights.data(), false);
#endif
nb_sample_update = trajectory.size();
trajectory.clear();
trajectory_end_points.clear();
}
void end_instance(bool learning) override {
if(learning)
episode++;
}
void save(const std::string& path, bool savebest, bool learning) override {
if(savebest) {
if(!learning && this->sum_weighted_reward >= bestever_score) {
bestever_score = this->sum_weighted_reward;
ann->save(path+".actor");
}
} else {
#ifdef PARALLEL_INTERACTION
if(world.rank() == 0 ) {
#endif
ann->save(path+".actor");
vnn->save(path+".critic");
bib::XMLEngine::save<>(normalizer, "normalizer", path+".normalizer.data");
#ifdef PARALLEL_INTERACTION
}
#endif
}
}
void save_run() override {
ann->save("continue.actor");
vnn->save("continue.critic");
struct algo_state st = {episode};
bib::XMLEngine::save(st, "algo_state", "continue.algo_state.data");
}
void load(const std::string& path) override {
ann->load(path+".actor");
#ifndef PARALLEL_INTERACTION
vnn->load(path+".critic");
#else
if (world.rank() == 0)
vnn->load(path+".critic");
#endif
bib::XMLEngine::load<>(normalizer, "normalizer", path+".normalizer.data");
}
void load_previous_run() override {
ann->load("continue.actor");
vnn->load("continue.critic");
auto p3 = bib::XMLEngine::load<struct algo_state>("algo_state", "continue.algo_state.data");
episode = p3->episode;
delete p3;
}
double criticEval(const std::vector<double>&, const std::vector<double>&) override {
LOG_INFO("not implemented");
return 0;
}
arch::Policy<NN>* getCopyCurrentPolicy() override {
// return new arch::Policy<MLP>(new MLP(*ann) , gaussian_policy ? arch::policy_type::GAUSSIAN : arch::policy_type::GREEDY, noise, decision_each);
return nullptr;
}
uint getGoalSize(){
return goal_size;
}
bool sparse_reward(const std::vector<double>& a, const std::vector<double>& b) {
double sum = 0.f;
for (int i=0;i<a.size();i++){
double diff = a[i] - b[i];
sum += diff*diff;
}
sum = std::sqrt(sum);
return sum < 0.05f;
};
double dense_reward(const std::vector<double>& goal_achieved, const std::vector<double>& desired_goal,
const std::vector<double>& next_goal_achieved, const std::vector<double>& next_desired_goal,
const std::vector<double>& observation, const std::vector<double>& next_observation) {
if (sparse_reward(goal_achieved, desired_goal))
return 0.f;
std::vector<double> mid_goal(3, 0.f);
mid_goal[0] = next_goal_achieved[0] - (next_desired_goal[0] - next_goal_achieved[0] > 0.f ? 1.f : -1.f)*0.04f;
mid_goal[1] = next_goal_achieved[1] - (next_desired_goal[1] - next_goal_achieved[1] > 0.f ? 1.f : -1.f)*0.04f;
mid_goal[2] = next_goal_achieved[2] + 0.07f;
double dist_obj_hand;
{
std::vector<double> diff(3, 0.f);
std::transform(mid_goal.begin(), mid_goal.end(), observation.begin() + 3, diff.begin(), std::minus<double>());
dist_obj_hand = bib::Utils::euclidien_dist_ref(diff, 0.f)*3.f;
}
{
std::vector<double> diff(3, 0.f);
std::transform(mid_goal.begin(), mid_goal.end(), next_observation.begin() + 3, diff.begin(), std::minus<double>());
dist_obj_hand -= bib::Utils::euclidien_dist_ref(diff, 0.f)*3.f;
}
double dist_goal;
{
std::vector<double> diff(3, 0.f);
std::transform(goal_achieved.begin(), goal_achieved.end(), desired_goal.begin(), diff.begin(), std::minus<double>());
dist_goal = bib::Utils::euclidien_dist_ref(diff, 0.f)*3.f;
}
{
std::vector<double> diff(3, 0.f);
std::transform(next_goal_achieved.begin(), next_goal_achieved.end(), next_desired_goal.begin(), diff.begin(), std::minus<double>());
dist_goal -= bib::Utils::euclidien_dist_ref(diff, 0.f)*3.f;
}
if(dist_goal < 0.00005 and dist_goal >= 0.000000001)
dist_goal = 0.f;
double r = dist_obj_hand + 100*dist_goal;
if (r > 0.5)
r = 0.5;
else if (r < -0.5)
r = -0.5;
return -1. + 0.5 + r;
}
#ifdef PARALLEL_INTERACTION
int getMPIrank() {
return world.rank();
}
#endif
protected:
#ifndef PARALLEL_INTERACTION
void _display(std::ostream& out) const override {
out << std::setw(12) << std::fixed << std::setprecision(10) << this->sum_weighted_reward/this->gamma << " " << this->sum_reward <<
" " << std::setw(8) << std::fixed << std::setprecision(5) << vnn->error() << " " << noise << " " << nb_sample_update <<
" " << std::setprecision(3) << ratio_valid_advantage << " " << vnn->weight_l1_norm() << " " << ann->weight_l1_norm(true);
}
//clear all; close all; wndw = 10; X=load('0.learning.data'); X=filter(ones(wndw,1)/wndw, 1, X); startx=0; starty=800; width=350; height=350; figure('position',[startx,starty,width,height]); plot(X(:,3), "linewidth", 2); xlabel('learning episode', "fontsize", 16); ylabel('sum rewards', "fontsize", 16); startx+=width; figure('position',[startx,starty,width,height]); plot(X(:,9), "linewidth", 2); xlabel('learning episode', "fontsize", 16); ylabel('beta', "fontsize", 16); startx+=width; figure('position',[startx,starty,width,height]) ; plot(X(:,8), "linewidth", 2); xlabel('learning episode', "fontsize", 16); ylabel('valid adv', "fontsize", 16); ylim([0, 1]); startx+=width; figure('position',[startx,starty,width,height]) ; plot(X(:,11), "linewidth", 2); hold on; plot(X(:,12), "linewidth", 2, "color", "red"); legend("critic", "actor"); xlabel('learning episode', "fontsize", 16); ylabel('||\theta||_1', "fontsize", 16); startx+=width; figure('position',[startx,starty,width,height]) ; plot(X(:,10), "linewidth", 2); xlabel('learning episode', "fontsize", 16); ylabel('||\mu_{old}-\mu||_2', "fontsize", 16); startx+=width; figure('position',[startx,starty,width,height]) ; plot(X(:,14), "linewidth", 2); xlabel('learning episode', "fontsize", 16); ylabel('effective actor. upd.', "fontsize", 16);
void _dump(std::ostream& out) const override {
out << std::setw(25) << std::fixed << std::setprecision(22) << this->sum_weighted_reward/this->gamma << " " <<
this->sum_reward << " " << std::setw(8) << std::fixed << std::setprecision(5) << vnn->error() << " " <<
nb_sample_update << " " << std::setprecision(3) << ratio_valid_advantage << " " << std::setprecision(10) <<
mean_beta << " " << conserved_l2dist << " " << std::setprecision(3) << vnn->weight_l1_norm() << " " <<
ann->weight_l1_norm(true) << " " << std::setprecision(6) << posdelta_mean << " " << number_effective_actor_update;
}
#else
void _dump(std::ostream& out) const override {
out << std::setw(25) << std::fixed << std::setprecision(22) << this->sum_weighted_reward/this->gamma << " " <<
this->sum_reward << " " << std::setw(8) << std::fixed << std::setprecision(5) << (world.rank() == 0 ? vnn->error() : 0) << " " <<
nb_sample_update << " " << std::setprecision(3) << ratio_valid_advantage << " " << std::setprecision(10) <<
mean_beta << " " << conserved_l2dist << " " << std::setprecision(3) << (world.rank() == 0 ? vnn->weight_l1_norm() : 0) << " " <<
ann->weight_l1_norm(true) << " " << std::setprecision(6) << posdelta_mean << " " << number_effective_actor_update;
}
void _display(std::ostream& out) const override {
out << std::setw(12) << std::fixed << std::setprecision(10) << this->sum_weighted_reward/this->gamma << " " << this->sum_reward <<
" " << std::setw(8) << std::fixed << std::setprecision(5) << (world.rank() == 0 ? vnn->error() : 0) << " " << noise << " " << nb_sample_update <<
" " << std::setprecision(3) << ratio_valid_advantage << " " << (world.rank() == 0 ? vnn->weight_l1_norm() : 0) << " " << ann->weight_l1_norm(true);
}
#endif
private:
uint nb_sensors;
uint episode = 1;
uint step = 0;
double noise, noise2, noise3;
uint gaussian_policy;
bool gae, ignore_poss_ac, conserve_beta, disable_trust_region, disable_cac;
uint number_fitted_iteration, stoch_iter_actor, stoch_iter_critic;
uint actor_output_layer_type, hidden_layer_type, momentum;
double lambda, beta_target;
double conserved_beta= 0.0001f;
double mean_beta= 0.f;
double conserved_l2dist= 0.f;
int number_effective_actor_update = 0;
std::shared_ptr<std::vector<double>> last_action;
std::shared_ptr<std::vector<double>> last_pure_action;
std::vector<double> last_state;
std::vector<double> last_goal_achieved;
double alpha_v, alpha_a;
std::deque<sample> trajectory;
std::deque<int> trajectory_end_points;
NN* ann;
NN* ann_noblob;
NN* vnn = nullptr;
std::vector<uint>* hidden_unit_v;
std::vector<uint>* hidden_unit_a;
caffe::Blob<double> empty_action; //dummy action cause c++ cannot accept null reference
double bestever_score;
int update_each_episode;
bib::OrnsteinUhlenbeckNoise<double>* oun = nullptr;
float ratio_valid_advantage=0;
int nb_sample_update = 0;
double posdelta_mean = 0;
//hindsight
uint goal_size;
uint goal_start;
uint hindsight_nb_destination;
//v trace
double pbar = 1;
double cbar = 1;
bib::OnlineNormalizer normalizer;
#ifdef PARALLEL_INTERACTION
boost::mpi::communicator world;
#endif
struct algo_state {
uint episode;
friend class boost::serialization::access;
template <typename Archive>
void serialize(Archive& ar, const unsigned int) {
ar& BOOST_SERIALIZATION_NVP(episode);
}
};
};
#endif
|
{"hexsha": "037b859947bba1705eaab977f7c2828778b9efa3", "size": 47034, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "agent/cacla/include/HPeNFACAg.hpp", "max_stars_repo_name": "matthieu637/ddrl", "max_stars_repo_head_hexsha": "a454d09a3ac9be5db960ff180b3d075c2f9e4a70", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 27.0, "max_stars_repo_stars_event_min_datetime": "2017-11-27T09:32:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-02T13:50:23.000Z", "max_issues_repo_path": "agent/cacla/include/HPeNFACAg.hpp", "max_issues_repo_name": "matthieu637/ddrl", "max_issues_repo_head_hexsha": "a454d09a3ac9be5db960ff180b3d075c2f9e4a70", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10.0, "max_issues_repo_issues_event_min_datetime": "2018-10-09T14:39:14.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-10T15:01:00.000Z", "max_forks_repo_path": "agent/cacla/include/HPeNFACAg.hpp", "max_forks_repo_name": "matthieu637/ddrl", "max_forks_repo_head_hexsha": "a454d09a3ac9be5db960ff180b3d075c2f9e4a70", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2019-05-16T09:14:15.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-15T14:35:40.000Z", "avg_line_length": 42.372972973, "max_line_length": 1303, "alphanum_fraction": 0.6250797296, "num_tokens": 12430}
|
#!/usr/bin/env python3
import rospy
import numpy as np
import math
from std_msgs.msg import Float32
from std_msgs.msg import Int32
from sensor_msgs.msg import PointCloud2, PointField
from sensor_msgs import point_cloud2
from visualization_msgs.msg import Marker, MarkerArray
import os
import open3d.ml as _ml3d
import open3d.ml.torch as ml3d
import open3d as o3d
import copy
import sys
import time
import struct
import ctypes
import roslib
from geometry_msgs.msg import Transform, Vector3, Quaternion
import numpy.lib.recfunctions as nlr
from matplotlib.cm import get_cmap
import matplotlib.pyplot as plt
class Clustering(object):
def __init__(self):
# ROS Subscriber
self.pub_markers = rospy.Publisher('visualization_marker_array', MarkerArray, queue_size=1)
self.sub_points = rospy.Subscriber("/velodyne1/velodyne_points", PointCloud2, self.cb_points, queue_size=1)
self.pub_points = rospy.Publisher('clustering_points', PointCloud2, queue_size=1)
self.raw_point = []
self.stamp = None
self.header = None
print("clustering init done")
def cb_points(self,msg):
# PointCloud2 to numpy
cloud_points = []
for p in point_cloud2.read_points(msg, field_names = ("x", "y", "z"), skip_nans=True):
if (p[0]>0) and (p[1]<2) and (p[1]>-2) and (p[2]<1) and (p[2]>-0.5):
cloud_points.append(p)
self.raw_point = np.array(cloud_points)
self.header = msg.header
# # downsample
# pcd = o3d.geometry.PointCloud()
# pcd.points = o3d.utility.Vector3dVector(np.array(cloud_points))
# downpcd = pcd.voxel_down_sample(voxel_size=0.1)
# raw_point = np.asarray(downpcd.points)
#
# print(len(raw_point))
def xyz_to_marker(self, x, y, z, id, color=[1, 0, 0],header=None):
marker = Marker()
if header: marker.header = header
marker.type = marker.SPHERE
marker.action = marker.ADD
marker.pose.orientation.w = 1
marker.pose.position.x = x
marker.pose.position.y = y
marker.pose.position.z = z
marker.id = id
marker.scale.x = 0.2
marker.scale.y = 0.2
marker.scale.z = 0.2
marker.color.a = 1.0
marker.color.r = color[0]
marker.color.g = color[1]
marker.color.b = color[2]
return marker
def xyzrgb_array_to_pointcloud2(self, points, colors, header=None):
'''
Create a sensor_msgs.PointCloud2 from an array
of points.
'''
msg = PointCloud2()
assert(points.shape == colors.shape)
buf = []
if header: msg.header = header
if len(points.shape) == 3:
msg.height = points.shape[1]
msg.width = points.shape[0]
else:
N = len(points)
xyzrgb = np.array(np.hstack([points, colors]), dtype=np.float32)
msg.height = 1
msg.width = N
msg.fields = [
PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1),
PointField('r', 12, PointField.FLOAT32, 1),
PointField('g', 16, PointField.FLOAT32, 1),
PointField('b', 20, PointField.FLOAT32, 1)
]
msg.is_bigendian = False
msg.point_step = 24
msg.row_step = msg.point_step * N
msg.is_dense = True;
msg.data = xyzrgb.tostring()
return msg
def run_clustering(self):
raw_point = self.raw_point
header = self.header
try:
# numpy to open3d.PointCloud
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(raw_point)
with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:
labels = np.array(pcd.cluster_dbscan(eps=0.25, min_points=15, print_progress=True))
max_label = labels.max()
print(f"point cloud has {max_label + 1} clusters")
color_list = plt.get_cmap("tab20").colors
if max_label + 1 > 0:
group = [ [] for i in range(max_label + 1) ]
pub_points = []
pub_colors = []
# find marker xyz
for num,point in enumerate(pcd.points):
(group[labels[num]]).append( np.asarray(point) )
markerArray = MarkerArray()
markerArray.markers = []
id = 0
for group_index in range(max_label+1):
if len(group[group_index])>1500:
xyz = np.mean(np.array(group[group_index]), axis=0)
print('center',group_index+1,'at : ',xyz[0],xyz[1],xyz[2])
id +=1
color = list(color_list[group_index])
markerArray.markers.append( self.xyz_to_marker(xyz[0],xyz[1],xyz[2],id=id,color=color,header=header) )
pub_points = np.concatenate((pub_points, np.array(group[group_index]))) if not pub_points == [] else np.array(group[group_index])
pub_colors = np.concatenate((pub_colors,np.array(np.array([color]*len(group[group_index]))))) if not pub_colors == [] else np.array(np.array([color]*len(group[group_index])))
print(len(markerArray.markers))
self.pub_markers.publish(markerArray)
pub_msg = self.xyzrgb_array_to_pointcloud2( pub_points, pub_colors*255,header=header )
self.pub_points.publish(pub_msg)
except:
rospy.logerr('Wrong')
if __name__ == "__main__":
rospy.init_node("clustering")
clu = Clustering()
while not rospy.is_shutdown():
clu.run_clustering()
rospy.spin()
|
{"hexsha": "064aefe7ff0a5195bbcf19f90149b7041d565167", "size": 4992, "ext": "py", "lang": "Python", "max_stars_repo_path": "open3d-ros/catkin_ws/src/open3d_ros/src/clustering_rosbag.py", "max_stars_repo_name": "Yelloooowww/Open3D-ML", "max_stars_repo_head_hexsha": "8029b7e52bb42f4ac73cf20d9da1734d193a1c2c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "open3d-ros/catkin_ws/src/open3d_ros/src/clustering_rosbag.py", "max_issues_repo_name": "Yelloooowww/Open3D-ML", "max_issues_repo_head_hexsha": "8029b7e52bb42f4ac73cf20d9da1734d193a1c2c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "open3d-ros/catkin_ws/src/open3d_ros/src/clustering_rosbag.py", "max_forks_repo_name": "Yelloooowww/Open3D-ML", "max_forks_repo_head_hexsha": "8029b7e52bb42f4ac73cf20d9da1734d193a1c2c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7142857143, "max_line_length": 180, "alphanum_fraction": 0.6987179487, "include": true, "reason": "import numpy", "num_tokens": 1479}
|
subroutine r8mktricub(x,nx,y,ny,z,nz,f,nf2,nf3,
> ibcxmin,bcxmin,ibcxmax,bcxmax,inb1x,
> ibcymin,bcymin,ibcymax,bcymax,inb1y,
> ibczmin,bczmin,ibczmax,bczmax,inb1z,
> ilinx,iliny,ilinz,ier)
c
c setup a tricubic spline; store coefficients in compact form
c (as per suggestion of L. Zakharov, PPPL, Feb. 1999)
C 8 coeffs per (x,y,z) grid point:
C f,fxx,fyy,fzz,fxxyy,fxxzz,fyyzz,fxxyyzz
C
C dmc -- modified Feb 2004 -- rewritten to compute coefficients
C directly rather than by conversion from the non-compact representation
C (to reduce cpu and memory cost)
C
C
C input:
IMPLICIT NONE
INTEGER, PARAMETER :: R8=SELECTED_REAL_KIND(12,100)
integer nx ! length of x vector
integer ny ! length of y vector
integer nz ! length of z vector
REAL*8 x(nx) ! x vector, strict ascending
REAL*8 y(ny) ! y vector, strict ascending
REAL*8 z(nz) ! z vector, strict ascending
c
integer nf2 ! 2nd dim. of f array, nf2.ge.nx
integer nf3 ! 3rd dim. of f array, nf3.ge.ny
c
c input/output:
c
REAL*8 f(8,nf2,nf3,nz) ! data and spline coefficients
c
C on input: f(1,i,j,k) = f(x(i),y(j),z(k))
C on output: f(1,i,j,k) unchanged
C f(2,i,j,k) = d2f/dx2(x(i),y(j),z(k))
C f(3,i,j,k) = d2f/dy2(x(i),y(j),z(k))
C f(4,i,j,k) = d2f/dz2(x(i),y(j),z(k))
C f(5,i,j,k) = d4f/dx2dy2(x(i),y(j),z(k))
C f(6,i,j,k) = d4f/dx2dz2(x(i),y(j),z(k))
C f(7,i,j,k) = d4f/dy2dz2(x(i),y(j),z(k))
C f(8,i,j,k) = d6f/dx2dy2dz2(x(i),y(j),z(k))
C
C there is a rather Hermite like interpolation formula to go with
C this-- see evtricub.for. Also the bicubic formula is given in
C mkbicubw.for; the tricubic formula is precisely analogous.
C
C boundary condition data
C inputs:
integer inb1x ! 1st dim of xmin & xmax bc arrays
integer inb1y ! 1st dim of ymin & ymax bc arrays
integer inb1z ! 1st dim of zmin & zmax bc arrays
C
integer ibcxmin,ibcxmax ! BC type flag @xmin, xmax
integer ibcymin,ibcymax ! BC type flag @ymin, ymax
integer ibczmin,ibczmax ! BC type flag @zmin, zmax
C
REAL*8 bcxmin(inb1x,nz),bcxmax(inb1x,nz) ! xmin & xmax BC data, ny x nz
REAL*8 bcymin(inb1y,nz),bcymax(inb1y,nz) ! ymin & ymax BC data, nx x nz
REAL*8 bczmin(inb1z,ny),bczmax(inb1z,ny) ! zmin & zmax BC data, nx x ny
c
c where BC data is not required, dummy scalars may be passed.
C the ibc* flags determine whether BC data isneeded.
c
c BC data: bcxmin & bcxmax: BC vs. y,z @xmin,xmax
C bcymin & bcymax: BC vs. x,z @ymin,ymax
C bczmin & bczmax: BC vs. x,y @zmin,zmax
C
c ibcxmin -- indicator for boundary condition at xmin=x(1):
c bcxmin(...) -- boundary condition data
c =-1 -- use periodic boundary condition
c =0 -- use "not a knot"
c =1 -- match slope, specified at x(1),y(iy),z(iz) by bcxmin(iy,iz)
c =2 -- match 2nd derivative, specified at x(1),y(iy),z(iz)
c by bcxmin(iy,iz
c =3 -- boundary condition is slope=0 (df/dx=0) at x(1), all y(j)
c =4 -- boundary condition is d2f/dx2=0 at x(1), all y(j)
c =5 -- df/dx BC from 1st divided difference
c =6 -- d2f/dx2 BC from 2nd divided difference (parabolic fit)
c =7 -- d3f/dx3 BC from 3rd divided difference (cubic fit)
c ***NOTE bcxmin(...) referenced ONLY if ibcxmin=1 or ibcxmin=2
c
c ibcxmax -- indicator for boundary condition at x(nx):
c bcxmax(...) -- boundary condition data
c (interpretation as with ibcxmin, bcxmin)
c NOTE: if ibcxmin=-1 then the periodic BC applies on both sides
c and ibcxmax, bcxmax are ignored.
c inb1x -- 1st dimension of bcxmin, bcxmax: if ibcxmin or ibcxmax .gt. 0
c this must be .ge. ny.
c
c interpretation of ibcymin,bcymin,ibcymax,bcymax,inb1y
c is same as with ibcxmin,...
c
c interpretation of ibczmin,bczmin,ibczmax,bczmax,inb1z
c is same as with ibcxmin,...
c
c the explicit bdy condition arrays are referenced only if the
c corresponding "ibc" flag values are set to 1 or 2.
c
c output:
integer ilinx ! x vector equal spacing flag
integer iliny ! y vector equal spacing flag
integer ilinz ! z vector equal spacing flag
c
c ilinx -- =1 on output if x(nx) pts are nearly evenly spaced (tol=1e-3)
c iliny -- =1 on output if y(ny) evenly spaced (tol=1e-3)
c ilinz -- =1 on output if z(nz) evenly spaced (tol=1e-3)
c
integer ier ! exit code
c ier -- completion code, 0 for normal
c
C-----------------------------------------------------
c workspace **dynamic allocation**
C f90 dynamic array
C
REAL*8, dimension(:,:,:), allocatable :: fbicub ! bicubic subsection
REAL*8, dimension(:,:), allocatable :: fwk ! work array
REAL*8, dimension(:), allocatable :: bcx1,bcx2,bcy1,bcy2 ! BCs for mkbicub
c
REAL*8, dimension(:,:,:,:), allocatable :: fcorr ! correction spline
REAL*8, dimension(:,:), allocatable :: bcc1,bcc2 ! correction BCs
c
integer iflg,ierx,iery,ierz
integer ix,iy,iz
c
REAL*8 ztol
REAL*8 zbc1,zbc2,hz
integer ibc1,ibc2
c
data ztol/1.0E-3_r8/
c-----------------------------------------------------
c
ier=0
c
iflg=0
c
c check z bdy condition "linearity"
c
if(ibczmin.ne.-1) then
if((ibczmin.eq.1).or.(ibczmin.eq.2)) then
do iy=1,ny
do ix=1,nx
if(bczmin(ix,iy).ne.0.0_r8) iflg=1
enddo
enddo
endif
if((ibczmax.eq.1).or.(ibczmax.eq.2)) then
do iy=1,ny
do ix=1,nx
if(bczmax(ix,iy).ne.0.0_r8) iflg=1
enddo
enddo
endif
endif
c
if(nx.lt.2) then
write(6,'('' ?mktricub: at least 2 x points required.'')')
ier=1
endif
if(ny.lt.2) then
write(6,'('' ?mktricub: need at least 2 y points.'')')
ier=1
endif
if(nz.lt.2) then
write(6,'('' ?mktricub: need at least 2 z points.'')')
ier=1
endif
c
if((ibcxmin.eq.1).or.(ibcxmax.eq.1).or.(ibcxmin.eq.2).or.
> (ibcxmax.eq.2)) then
if(inb1x.lt.ny) then
ier=1
write(6,
>'('' ?mktricub: 1st dim of bcxmin/max arrays .lt. ny'')')
endif
endif
c
if((ibcymin.eq.1).or.(ibcymax.eq.1).or.(ibcymin.eq.2).or.
> (ibcymax.eq.2)) then
if(inb1y.lt.nx) then
ier=1
write(6,
>'('' ?mktricub: 1st dim of bcymin/max arrays .lt. nx'')')
endif
endif
c
if((ibczmin.eq.1).or.(ibczmax.eq.1).or.(ibczmin.eq.2).or.
> (ibczmax.eq.2)) then
if(inb1z.lt.nx) then
ier=1
write(6,
>'('' ?mktricub: 1st dim of bczmin/max arrays .lt. nx'')')
endif
endif
c
call ibc_ck(ibcxmin,'mktricub','xmin',-1,7,ier)
if(ibcxmin.ge.0) call ibc_ck(ibcxmax,'mktricub','xmax',0,7,ier)
c
call ibc_ck(ibcymin,'mktricub','ymin',-1,7,ier)
if(ibcymin.ge.0) call ibc_ck(ibcymax,'mktricub','ymax',0,7,ier)
c
call ibc_ck(ibczmin,'mktricub','zmin',-1,7,ier)
if(ibczmax.ge.0) call ibc_ck(ibczmax,'mktricub','zmax',0,7,ier)
c
c check ilinx & x vector
c
call r8splinck(x,nx,ilinx,ztol,ierx)
if(ierx.ne.0) ier=2
c
if(ier.eq.2) then
write(6,'('' ?mktricub: x axis not strict ascending'')')
endif
c
c check iliny & y vector
c
call r8splinck(y,ny,iliny,ztol,iery)
if(iery.ne.0) ier=3
c
if(ier.eq.3) then
write(6,'('' ?mktricub: y axis not strict ascending'')')
endif
c
c check ilinz & z vector
c
call r8splinck(z,nz,ilinz,ztol,ierz)
if(ierz.ne.0) ier=4
c
if(ier.eq.4) then
write(6,'('' ?mktricub: z axis not strict ascending'')')
endif
c
if(ier.ne.0) return
c
c------------------------------------
c 1. compute (x,y) bicubic splines using mkbicub
c
allocate(fbicub(4,nx,ny))
allocate(bcx1(ny),bcx2(ny),bcy1(nx),bcy2(nx))
bcx1=0.0; bcx2=0.0; bcy1=0.0; bcy2=0.0_r8
c
do iz=1,nz
if(ibcxmin.ne.-1) then
if((ibcxmin.eq.1).or.(ibcxmin.eq.2)) then
bcx1(1:ny)=bcxmin(1:ny,iz)
endif
if((ibcxmax.eq.1).or.(ibcxmax.eq.2)) then
bcx2(1:ny)=bcxmax(1:ny,iz)
endif
endif
if(ibcymin.ne.-1) then
if((ibcymin.eq.1).or.(ibcymin.eq.2)) then
bcy1(1:nx)=bcymin(1:nx,iz)
endif
if((ibcymax.eq.1).or.(ibcymax.eq.2)) then
bcy2(1:nx)=bcymax(1:nx,iz)
endif
endif
c
fbicub(1,1:nx,1:ny) = f(1,1:nx,1:ny,iz)
c
call r8mkbicub(x,nx,y,ny,fbicub,nx,
> ibcxmin,bcx1,ibcxmax,bcx2,
> ibcymin,bcy1,ibcymax,bcy2,
> ilinx,iliny,ier)
if(ier.ne.0) return
c
f(2:3,1:nx,1:ny,iz) = fbicub(2:3,1:nx,1:ny) ! fxx, fyy
f(5,1:nx,1:ny,iz) = fbicub(4,1:nx,1:ny) ! fxxyy
c
enddo
c
deallocate(fbicub,bcx1,bcx2,bcy1,bcy2)
c
c 2. homogeneous spline in z direction; inhomogeneous BC imposed later
c if necessary
c
zbc1=0.0_r8
zbc2=0.0_r8
ibc1=ibczmin
ibc2=ibczmax
if(iflg.eq.1) then
if((ibczmin.eq.1).or.(ibczmin.eq.2)) ibc1=0
if((ibczmax.eq.1).or.(ibczmax.eq.2)) ibc2=0
endif
c
allocate(fwk(2,nz))
c
do iy=1,ny
do ix=1,nx
fwk(1,1:nz) = f(1,ix,iy,1:nz)
call r8mkspline(z,nz,fwk,
> ibc1,zbc1,ibc2,zbc2,ilinz,ier)
if(ier.ne.0) return
f(4,ix,iy,1:nz) = fwk(2,1:nz) ! fzz
fwk(1,1:nz) = f(2,ix,iy,1:nz)
call r8mkspline(z,nz,fwk,
> ibc1,zbc1,ibc2,zbc2,ilinz,ier)
if(ier.ne.0) return
f(6,ix,iy,1:nz) = fwk(2,1:nz) ! fxxzz
fwk(1,1:nz) = f(3,ix,iy,1:nz)
call r8mkspline(z,nz,fwk,
> ibc1,zbc1,ibc2,zbc2,ilinz,ier)
if(ier.ne.0) return
f(7,ix,iy,1:nz) = fwk(2,1:nz) ! fyyzz
fwk(1,1:nz) = f(5,ix,iy,1:nz)
call r8mkspline(z,nz,fwk,
> ibc1,zbc1,ibc2,zbc2,ilinz,ier)
if(ier.ne.0) return
f(8,ix,iy,1:nz) = fwk(2,1:nz) ! fxxyyzz
enddo
enddo
c
deallocate(fwk)
c
if(iflg.eq.1) then
c
c 3. inhomogeneous BC correction
c
allocate(fwk(2,max(nx,ny,nz)))
allocate(bcc1(nx,ny),bcc2(nx,ny))
allocate(fcorr(4,nx,ny,nz))
c
c correction BCs
c
do iy=1,ny
do ix=1,nx
bcc1(ix,iy)=0.0_r8
if(ibczmin.eq.1) then
hz=z(2)-z(1)
bcc1(ix,iy)=(f(1,ix,iy,2)-f(1,ix,iy,1))/hz +
> hz*(-2*f(4,ix,iy,1)-f(4,ix,iy,2))/6
bcc1(ix,iy)=bczmin(ix,iy)-bcc1(ix,iy)
else if(ibczmin.eq.2) then
bcc1(ix,iy)=bczmin(ix,iy)-f(4,ix,iy,1)
endif
enddo
enddo
c
do iy=1,ny
do ix=1,nx
bcc2(ix,iy)=0.0_r8
if(ibczmax.eq.1) then
hz=z(2)-z(1)
bcc2(ix,iy)=(f(1,ix,iy,2)-f(1,ix,iy,1))/hz +
> hz*(-2*f(4,ix,iy,1)-f(4,ix,iy,2))/6
bcc2(ix,iy)=bczmax(ix,iy)-bcc2(ix,iy)
else if(ibczmax.eq.2) then
bcc2(ix,iy)=bczmax(ix,iy)-f(4,ix,iy,1)
endif
enddo
enddo
c
fwk(1,1:nz)=0.0_r8 ! values are all zero, only BC is set...
do iy=1,ny
do ix=1,nx
call r8mkspline(z,nz,fwk,
> ibczmin,bcc1(ix,iy),ibczmax,bcc2(ix,iy),ilinz,ier)
if(ier.ne.0) return
fcorr(1,ix,iy,1:nz)=fwk(2,1:nz) ! fzz-correction
enddo
enddo
c
c higher order corrections
c
zbc1=0.0_r8
zbc2=0.0_r8
c
do iz=1,nz
do iy=1,ny
fwk(1,1:nx)=fcorr(1,1:nx,iy,iz)
call r8mkspline(x,nx,fwk,
> ibcxmin,zbc1,ibcxmax,zbc2,ilinx,ier)
if(ier.ne.0) return
fcorr(2,1:nx,iy,iz)=fwk(2,1:nx) ! fxxzz-correction
enddo
enddo
c
do iz=1,nz
do ix=1,nx
fwk(1,1:ny)=fcorr(1,ix,1:ny,iz)
call r8mkspline(y,ny,fwk,
> ibcymin,zbc1,ibcymax,zbc2,iliny,ier)
if(ier.ne.0) return
fcorr(3,ix,1:ny,iz)=fwk(2,1:ny) ! fyyzz-correction
fwk(1,1:ny)=fcorr(2,ix,1:ny,iz)
call r8mkspline(y,ny,fwk,
> ibcymin,zbc1,ibcymax,zbc2,iliny,ier)
if(ier.ne.0) return
fcorr(4,ix,1:ny,iz)=fwk(2,1:ny) ! fxxyyzz-correction
enddo
enddo
c
c apply correction
c
do iz=1,nz
do iy=1,ny
do ix=1,nx
f(4,ix,iy,iz)=f(4,ix,iy,iz)+fcorr(1,ix,iy,iz)
f(6,ix,iy,iz)=f(6,ix,iy,iz)+fcorr(2,ix,iy,iz)
f(7,ix,iy,iz)=f(7,ix,iy,iz)+fcorr(3,ix,iy,iz)
f(8,ix,iy,iz)=f(8,ix,iy,iz)+fcorr(4,ix,iy,iz)
enddo
enddo
enddo
c
deallocate(fwk,fcorr,bcc1,bcc2)
c
endif
c
c that's all
c
return
end
|
{"hexsha": "a60026c7ef48d7c401b794ac16b37aa6a30c62eb", "size": 13815, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "gfile_Bfield/PSPLINE/Pspline/r8mktricub.f", "max_stars_repo_name": "ORNL-Fusion/RFSciDAC-testing", "max_stars_repo_head_hexsha": "c2fa44e00ce8e0af4be6fa662a9e8c94d6c6f60e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2020-05-08T01:47:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-06T10:35:28.000Z", "max_issues_repo_path": "gfile_Bfield/PSPLINE/Pspline/r8mktricub.f", "max_issues_repo_name": "ORNL-Fusion/RFSciDAC-testing", "max_issues_repo_head_hexsha": "c2fa44e00ce8e0af4be6fa662a9e8c94d6c6f60e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 77, "max_issues_repo_issues_event_min_datetime": "2020-05-08T07:18:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T11:20:33.000Z", "max_forks_repo_path": "gfile_Bfield/PSPLINE/Pspline/r8mktricub.f", "max_forks_repo_name": "ORNL-Fusion/RFSciDAC-testing", "max_forks_repo_head_hexsha": "c2fa44e00ce8e0af4be6fa662a9e8c94d6c6f60e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-02-10T13:47:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T12:53:43.000Z", "avg_line_length": 32.8147268409, "max_line_length": 80, "alphanum_fraction": 0.5204487875, "num_tokens": 5137}
|
from __future__ import print_function
# try to connect features that really ought to be connected:
from matplotlib.pyplot import *
from numpy import *
import shapely.wkb,shapely.geometry
try:
from osgeo import ogr
except ImportError:
import ogr
import sys
import os.path
import six
from numpy.linalg import norm
from . import wkb2shp
from .. import utils
try:
# 2017-07-01: RH - this is most likely not true any more
#if sys.platform == 'darwin':
# print( "MacOS prepared geometries appear to be buggy.")
# raise ImportError("Intential abort on OSX")
from shapely import prepared
except ImportError:
prepared = None
print("Prepared geometries not available - tests will be slow")
import logging
logging.basicConfig(level=logging.INFO)
log=logging.getLogger('join_features')
def progress_printer(str,steps_done=None,steps_total=None):
if steps_done is not None and steps_total is not None:
log.info( "%s -- %d%%"%(str,100.0*steps_done/steps_total) )
elif steps_done:
log.info( "%s -- %d"%(str,steps_done) )
else:
log.info(str)
progress_message = progress_printer
# in a few cases (<0.01% ??) the prepared geometry will report a different answer
# than the real geometry. But the prepared geomtries are an order of magnitude
# faster...
trust_prepared = True
def merge_lines(layer=None,segments=None):
""" Given an ogr LineString layer, merge linestrings by matching
endpoints, and return a list of arrays of points.
if layer is given, it should be an ogr LineString layer
if segments is given, it should be a list of numpy arrays, where
each array is [N,2] giving points along a path.
this version only handles *exact* matches between endpoints
"""
# a hash that maps x,y values to feature ids
endpoints = {}
features = {} # map feature id to a list of points
# map old feature ids to new ones:
remapper = {}
progress_message("Reading features")
if layer:
layer.ResetReading()
else:
next_seg = lambda it=enumerate(segments): six.next(it)
while 1:
if layer:
feat = layer.GetNextFeature()
if not feat:
break
fid = feat.GetFID()
geo = feat.GetGeometryRef() # should be a linestring
if geo.GetGeometryName() != 'LINESTRING':
raise Exception("All features must be linestrings")
# read the points into a numpy array:
points = array(shapely.wkb.loads(geo.ExportToWkb()).coords)
else:
try:
fid,points = next_seg()
except StopIteration:
break
features[fid] = points
start_point = tuple(points[0])
end_point = tuple(points[-1])
if start_point == end_point:
continue
if start_point not in endpoints:
endpoints[start_point] = []
endpoints[start_point].append(fid)
if end_point not in endpoints:
endpoints[end_point] = []
endpoints[end_point].append(fid)
remapper[fid] = fid
# check on how many things match up:
# almost every point has exactly two features - perfect!
progress_message("%i possible matched features"%len(endpoints))
# toss out endpoints that don't have exactly two matches:
endpoint_list = []
for k in endpoints:
if len(endpoints[k]) == 2:
endpoint_list.append(endpoints[k])
total_pairs = len(endpoint_list)
pairs_processed = 0
# iterate over the end points, merging all exact matches:
for matched_pair in endpoint_list:
fidA,fidB = [remapper[fid] for fid in matched_pair]
if fidA==fidB:
continue
pairs_processed += 1
if pairs_processed%1000==0:
progress_message("Merge lines exact",pairs_processed,total_pairs)
#
coordsA = features[fidA]
coordsB = features[fidB]
# figure out how they go together, and figure out what point needs
# to be redirected from featB:
# also be sure to skip the repeated poin
if all(coordsA[0]==coordsB[0]):
coordsC = concatenate((coordsA[::-1],coordsB[1:]))
redirect = coordsB[-1]
elif all(coordsA[-1]==coordsB[0]):
coordsC = concatenate((coordsA,coordsB[1:]))
redirect = coordsB[-1]
elif all(coordsA[0]==coordsB[-1]):
coordsC = concatenate((coordsB,coordsA[1:]))
redirect = coordsB[0]
elif all(coordsA[-1]==coordsB[-1]):
coordsC = concatenate((coordsA[:-1],coordsB[::-1]))
redirect = coordsB[0]
else:
log.error( "No match:" )
log.error( "%s %s"%( fidA,fidB) )
log.error( "%s"%( coordsA[0]) )
log.error( "%s"%( coordsA[-1]) )
log.error( "%s"%( coordsB[0] ) )
log.error( "%s"%( coordsB[-1] ) )
raise Exception("hash says we have a match, but no good match found")
# replace the geometry of featA
features[fidA] = coordsC
for k in remapper.keys():
if remapper[k] == fidB:
remapper[k] = fidA
# and delete featB
del features[fidB]
progress_message("merge completed")
# cast to list for python 3
return list(features.values())
def tolerant_merge_lines(features,tolerance):
""" expects features to be formatted like the output of merge_lines,
i.e. a list of numpy arrays
"""
NO_MATCH =0
FIRST_FIRST=1
FIRST_LAST =2
LAST_FIRST =3
LAST_LAST =4
INIT_MATCH =5 # dummy value to kick-start the loop
closed_already = [ all(feat[0]==feat[-1]) for feat in features]
def check_match(pntsA,pntsB):
if norm(pntsA[0]-pntsB[0]) <= tolerance:
return FIRST_FIRST
elif norm(pntsA[0]-pntsB[-1]) <= tolerance:
return FIRST_LAST
elif norm(pntsA[-1]-pntsB[0]) <= tolerance:
return LAST_FIRST
elif norm(pntsA[-1]-pntsB[-1]) <= tolerance:
return LAST_LAST
else:
return NO_MATCH
# how to do the matching:
# nested loops? match the i-th feature against each jth other feature
# if they match, merge j onto i, set j-th to None, and start scanning
# again to match more features against i-th
for i in range(len(features)):
if features[i] is None:
continue
if closed_already[i]:
continue
progress_message("Merge lines tolerant",i,len(features))
# once we've tried to match the i-th feature against everybody
# after i, there's no reason to look at it again, so the inner
# loop starts at i+1
match = INIT_MATCH
while match:
match = NO_MATCH
# check each subsequent feature
for j in range(i+1,len(features)):
if features[j] is None:
continue # check next j-th
if closed_already[j]:
continue
match = check_match(features[i],
features[j])
# When merging, drop one point from the merge location
# otherwise if they are very close we'll end up with numerical issues
# related to repeated points.
if match==FIRST_FIRST:
features[i] = concatenate((features[i][::-1],features[j][1:]))
elif match==FIRST_LAST:
features[i] = concatenate((features[j],features[i][1:]))
elif match==LAST_FIRST:
features[i] = concatenate((features[i],features[j][1:]))
elif match==LAST_LAST:
features[i] = concatenate((features[i][:-1],features[j][::-1]))
# if we get a match, we just merged the features and can
# remove the j-th feature.
if match != NO_MATCH:
features[j] = None
# at this point, though, our i-th feature has changed and
# requires that we re-process matches against it, so
# with match set non-zero, escape out of the j-loop
# and the while loop will restart the j-loop.
break
# if we fall out of this loop and didn't have a match, we're done
# with the i-th feature, so let the next iteration of the i-loop
# run
# this just eliminates None elements
features = [f for f in features if f is not None]
# Make an additional loop to see if there are rings that we need to close:
for feat in features:
delta = norm(feat[0] - feat[-1])
if delta > 0.0 and delta <= tolerance:
log.info("tolerant_merge: joining a loop - dist = %f"%delta)
feat[-1] = feat[0]
return features
# how many of the features are closed, and return the one that isn't
# since it will define the exterior ring in the output
# if all the rings are closed, return the ring with the greatest area
# and closed_p=True
def clean_degenerate_rings(point_lists,degen_shpname='degenerate_rings.shp'):
""" Given a list of lists of points - filter out point lists
which represent degenerate rings, writing the invalid rings
to a shapefile degen_shpname, and returning a list of only
the valid rings. Unclosed linestrings are passed through.
set degen_shpname to None to disable that output.
"""
degen_lines = []
valid_lists = []
for i in range(len(point_lists)):
point_list = point_lists[i]
if all(point_list[0]!=point_list[-1]):
valid_lists.append(point_list)
else: # closed - check it's area
poly = shapely.geometry.Polygon(point_list)
try:
a=poly.area
valid_lists.append(point_list)
except ValueError:
log.error( "degenerate feature: %s"%i )
degen_line = shapely.geometry.LineString(point_list)
degen_lines.append(degen_line)
if degen_shpname is not None and len(degen_lines)>0:
wkb2shp.wkb2shp(degen_shpname,degen_lines,srs_text='EPSG:26910',overwrite=True)
return valid_lists
def find_exterior_ring(point_lists):
open_strings = []
max_area = 0
max_area_id = None
for i in range(len(point_lists)):
point_list = point_lists[i]
if all(point_list[0]!=point_list[-1]):
open_strings.append(i)
else: # closed - check it's area
poly = shapely.geometry.Polygon(point_list)
a = poly.area
if a > max_area:
max_area = a
max_area_id = i
if len(open_strings) > 1:
log.error( "Wanted exactly 0 or 1 open strings, got %i"%len(open_strings) )
for i in open_strings:
log.error(" Open string: %s"%( point_lists[i] ) )
raise Exception("Can't figure out who is the exterior ring")
if len(open_strings) == 1:
log.error("Choosing exterior ring based on it being the only open ring")
log.error( "Endpoints: %s"%( point_lists[open_strings[0]][0],point_lists[open_strings[0]][-1] ) )
return open_strings[0],False
else:
log.info( "No open linestrings, resorting to choosing exterior ring by area" )
return max_area_id,True
def arc_to_close_line(points,n_arc_points=40):
""" Given a list of points, return an arc that closes the linestring,
and faces away from the centroid of the points
"""
# Find the centroid of the original points.
geo = shapely.geometry.Polygon(points)
centroid = array(geo.centroid)
# for now, assume a 180 degree arc:
arc_center = (points[0]+points[-1])/2.0
# the arc will get appended to the linestring, so find the initial vector from
# the last point in the linestring:
start_vector = points[-1] - arc_center
arc_center_to_centroid = centroid - arc_center
# if we are going CCW, then
if cross(arc_center_to_centroid, start_vector) > 0:
arc_dir = +1
else:
arc_dir = -1
# how many steps in the arc? ultimately could be tied to the desired spatial
# resolution, or at least that great (since it will get filtered down to the
# desired resolution, but not filtered up)
angles = linspace(0,arc_dir*pi,n_arc_points)
arc_points = zeros((n_arc_points,2),float64)
# rotate the start vector
for i in range(n_arc_points):
angle = angles[i]
xx = cos(angle)
xy = -sin(angle)
yx = sin(angle)
yy = cos(angle)
new_x = start_vector[0]*xx + start_vector[1]*xy
new_y = start_vector[0]*yx + start_vector[1]*yy
arc_points[i] = arc_center + [new_x,new_y]
return arc_points
def lines_to_polygons(new_features,close_arc=False,single_feature=True,force_orientation=True):
"""
single_feature: False is not yet implemented!
returns a list of Polygons
force_orientation: ensure that interior rings have negative signed area
"""
assert single_feature
### Remove non-polygons - still not smart enough to handle duplicate points
new_features = [f for f in new_features if len(f) > 2]
### Find exterior ring
log.info("Finding exterior ring from %d linestrings"%len(new_features))
new_features = clean_degenerate_rings(new_features)
exterior_id,closed_p = find_exterior_ring(new_features)
if close_arc and not closed_p:
### Add an arc to close the exterior ring:
# really this out to test whether or not it's necessary
# to add the arc.
closing_arc = arc_to_close_line(new_features[exterior_id])
new_features[exterior_id] = concatenate((new_features[exterior_id],closing_arc))
exterior = new_features[exterior_id]
interiors = [new_features[i] for i in range(len(new_features)) if i!=exterior_id]
### Remove features that are not contained by the exterior ring:
ext_poly = shapely.geometry.Polygon(exterior)
if prepared is not None:
prep_ext_poly = prepared.prep(ext_poly)
else:
prep_ext_poly = None
new_interiors = []
extras = [] # features which were not inside exterior, but otherwise valid
for i in range(len(interiors)):
interior = interiors[i]
if i%300==0:
progress_message("Checking for orphan interior features",i,len(interiors))
if force_orientation and (utils.signed_area(interior) > 0):
interior=interior[::-1]
int_poly = shapely.geometry.Polygon(interior)
# spaghetti logic
if prep_ext_poly is None or prep_ext_poly.contains(int_poly):
if prep_ext_poly and trust_prepared:
new_interiors.append(interior)
else:
if ext_poly.contains(int_poly):
new_interiors.append(interior)
else:
if prep_ext_poly is not None:
log.warning( "A feature got through the prepared query, but the real query says it's outside the exterior")
else:
log.debug("Removing a feature that was outside the exterior ring" )
extras.append(interior)
else:
log.debug("Removing a feature that the fast query said was outside the exterior ring")
extras.append(interior)
# create a single polygon feature from all of the rings:
poly_geom = shapely.geometry.Polygon(exterior,new_interiors)
return [poly_geom],extras
####### Running the actual steps ########
def vector_mag(vectors):
"""
vectors: xy vectors, shape [...,2]
return magnitude (L2 norm)
"""
# equivalent to np.linalg.norm(axis=-1), but older numpy doesn't
# allow axis keyword
return np.sqrt(np.sum(vectors**2,axis=-1))
def process_layer(orig_layer,output_name,tolerance=0.0,
create_polygons=False,close_arc=False,
single_feature=True,
remove_duplicates=True):
"""
remove_duplicates: if true, exactly duplicated nodes along a single path will be removed, i.e.
the linestring A-B-B-C will become A-B-C.
single_feature: only save the biggest feature
"""
if isinstance(orig_layer,str):
ods = ogr.Open(orig_layer)
orig_layer = ods.GetLayer(0)
### The actual geometry processing: ###
### <processing>
new_features = merge_lines(orig_layer)
if remove_duplicates:
log.info("Checking the merged features for duplicate points" )
# possibly important here to have the duplicate test more stringent than
# the tolerant_merge_lines.
# also have to be careful about a string of points closely spaced - don't
# want to remove all of them, just enough to keep the minimal spacing above
# tolerance.
short_tol = 0.5*tolerance
for fi in range(len(new_features)):
pnts = new_features[fi]
valid = ones( len(pnts), 'bool8')
# go with a slower but safer loop here -
last_valid=0
for i in range(1,len(pnts)):
if vector_mag( pnts[last_valid]-pnts[i] ) < short_tol:
if i==len(pnts)-1:
# special case to avoid moving the last vertex
valid[last_valid] = False
last_valid = i
else:
valid[i] = False
else:
last_valid = i
# print "Ring %d: # invalid=%d / %d"%(i,sum(~valid),len(new_features[i]))
new_features[fi] = new_features[fi][valid,:]
if tolerance > 0.0:
new_features = tolerant_merge_lines(new_features,tolerance)
### </processing>
### <output>
if create_polygons:
if single_feature:
geoms,extras = lines_to_polygons(new_features,close_arc=close_arc)
else:
geoms=[]
unmatched=new_features
while len(unmatched):
one_poly,unmatched=lines_to_polygons(unmatched,close_arc=close_arc,single_feature=True)
geoms.append(one_poly[0])
else:
# Line output
geoms = [shapely.geometry.LineString(pnts) for pnts in new_features]
# Write it all out to a shapefile:
progress_message("Writing output")
wkb2shp.wkb2shp(output_name,geoms,
overwrite=True)
return output_name
|
{"hexsha": "5d3ce39a223d64fed41754ef0add63e1f3d0bb84", "size": 18852, "ext": "py", "lang": "Python", "max_stars_repo_path": "stompy/spatial/join_features.py", "max_stars_repo_name": "oneconcern/stompy", "max_stars_repo_head_hexsha": "d2cb86e7d1a2de698701b8d1b391e27e1ee935c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "stompy/spatial/join_features.py", "max_issues_repo_name": "oneconcern/stompy", "max_issues_repo_head_hexsha": "d2cb86e7d1a2de698701b8d1b391e27e1ee935c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stompy/spatial/join_features.py", "max_forks_repo_name": "oneconcern/stompy", "max_forks_repo_head_hexsha": "d2cb86e7d1a2de698701b8d1b391e27e1ee935c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-11-27T21:21:45.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-03T19:20:27.000Z", "avg_line_length": 35.4360902256, "max_line_length": 131, "alphanum_fraction": 0.6066199873, "include": true, "reason": "from numpy", "num_tokens": 4341}
|
mutable struct ILU0Preconditioner{Tv, Ti} <: AbstractExtendablePreconditioner{Tv,Ti}
extmatrix::ExtendableSparseMatrix{Tv,Ti}
xdiag::Array{Tv,1}
idiag::Array{Ti,1}
pattern_timestamp::Float64
end
function ILU0Preconditioner(extmatrix::ExtendableSparseMatrix{Tv,Ti}) where {Tv,Ti}
@assert size(extmatrix,1)==size(extmatrix,2)
flush!(extmatrix)
n=size(extmatrix,1)
xdiag=Array{Tv,1}(undef,n)
idiag=Array{Ti,1}(undef,n)
precon=ILU0Preconditioner{Tv, Ti}(extmatrix,xdiag,idiag,0.0)
update!(precon)
end
ILU0Preconditioner(cscmatrix::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti}=ILU0Preconditioner(ExtendableSparseMatrix(cscmatrix))
function update!(precon::ILU0Preconditioner{Tv,Ti}) where {Tv,Ti}
cscmatrix=precon.extmatrix.cscmatrix
colptr=cscmatrix.colptr
rowval=cscmatrix.rowval
nzval=cscmatrix.nzval
n=cscmatrix.n
xdiag=precon.xdiag
idiag=precon.idiag
# Find main diagonal index and
# copy main diagonal values
if need_symbolic_update(precon)
@inbounds for j=1:n
@inbounds for k=colptr[j]:colptr[j+1]-1
i=rowval[k]
if i==j
idiag[j]=k
break
end
end
end
timestamp!(precon)
end
@inbounds for j=1:n
xdiag[j]=one(Tv)/nzval[idiag[j]]
@inbounds for k=idiag[j]+1:colptr[j+1]-1
i=rowval[k]
for l=colptr[i]:colptr[i+1]-1
if rowval[l]==j
xdiag[i]-=nzval[l]*xdiag[j]*nzval[k]
break
end
end
end
end
precon
end
function LinearAlgebra.ldiv!(u::AbstractArray{T,1}, precon::ILU0Preconditioner, v::AbstractArray{T,1}) where T
cscmatrix=precon.extmatrix.cscmatrix
colptr=cscmatrix.colptr
rowval=cscmatrix.rowval
n=cscmatrix.n
nzval=cscmatrix.nzval
xdiag=precon.xdiag
idiag=precon.idiag
@inbounds for j=1:n
x=zero(T)
@inbounds for k=colptr[j]:idiag[j]-1
x+=nzval[k]*u[rowval[k]]
end
u[j]=xdiag[j]*(v[j]-x)
end
@inbounds for j=n:-1:1
x=zero(T)
@inbounds for k=idiag[j]+1:colptr[j+1]-1
x+=u[rowval[k]]*nzval[k]
end
u[j]-=x*xdiag[j]
end
end
|
{"hexsha": "8c45050a9a9d827a3dd28a0c5e79e84ebbc32bb6", "size": 2336, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/ilu0.jl", "max_stars_repo_name": "MaximilianJHuber/ExtendableSparse.jl", "max_stars_repo_head_hexsha": "c7e3a00c7901f0d46d884d6dfc6148bcfa73a268", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/ilu0.jl", "max_issues_repo_name": "MaximilianJHuber/ExtendableSparse.jl", "max_issues_repo_head_hexsha": "c7e3a00c7901f0d46d884d6dfc6148bcfa73a268", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ilu0.jl", "max_forks_repo_name": "MaximilianJHuber/ExtendableSparse.jl", "max_forks_repo_head_hexsha": "c7e3a00c7901f0d46d884d6dfc6148bcfa73a268", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.2471910112, "max_line_length": 121, "alphanum_fraction": 0.5946061644, "num_tokens": 757}
|
using Documenter, EqualizerFilters
makedocs(
sitename="EqualizerFilters.jl",
modules=[EqualizerFilters],
pages = [
"index.md",
"IndividualFilters.md",
"TupleFormat.md",
"SamplingRateSettings.md"
])
deploydocs(repo="github.com/Firionus/EqualizerFilters.jl.git")
|
{"hexsha": "1517c26482ea0c856995d895943b3810f27bdfbe", "size": 279, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "Firionus/EqualizerFilters.jl", "max_stars_repo_head_hexsha": "aa2415bc5981b67bcddcbfac77c46c1249e69a24", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-05T21:51:30.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-05T21:51:30.000Z", "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "Firionus/EqualizerFilters.jl", "max_issues_repo_head_hexsha": "aa2415bc5981b67bcddcbfac77c46c1249e69a24", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-12-11T21:31:34.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-11T21:39:03.000Z", "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "Firionus/EqualizerFilters.jl", "max_forks_repo_head_hexsha": "aa2415bc5981b67bcddcbfac77c46c1249e69a24", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.9285714286, "max_line_length": 62, "alphanum_fraction": 0.7383512545, "num_tokens": 73}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 5 07:39:31 2020
@author: lizet
"""
from numpy import array
import os
def rename_old( folder):
# olds_files = array([file_name for file_name in name.parent.iterdir() if name.stem in file_name])
olds_files = []
for i in folder.parent.iterdir():
if folder.stem in i.name:
olds_files.append(i)
olds_files = array(olds_files)
modificatios_date = array([os.path.getmtime(file_name)
for file_name in olds_files])
olds_files = olds_files[modificatios_date.argsort()][::-1]
name = folder.name
dump_name = []
number = 1
for file_name in olds_files:
file_name.rename( file_name.parent / f'd-{name}.{number}')
# os.rename(file_name, 'd-{0}.{1}'.format(name, number))
dump_name.append(file_name.parent / f'd-{name}.{number}')
number += 1
number = 1
for file_name in dump_name:
file_name.rename(file_name.parent / f'#{name}.{number}')
number += 1
|
{"hexsha": "29db23313b2f788b829ef3a10087d2d59ff15047", "size": 1069, "ext": "py", "lang": "Python", "max_stars_repo_path": "mscreen/screening/utils.py", "max_stars_repo_name": "e-mayo/mscreen", "max_stars_repo_head_hexsha": "a50f0b2f7104007c730baa51b4ec65c891008c47", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-03-06T04:24:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-03T09:53:07.000Z", "max_issues_repo_path": "mscreen/screening/utils.py", "max_issues_repo_name": "e-mayo/mscreen", "max_issues_repo_head_hexsha": "a50f0b2f7104007c730baa51b4ec65c891008c47", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-03-07T05:37:16.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-19T15:06:54.000Z", "max_forks_repo_path": "mscreen/screening/utils.py", "max_forks_repo_name": "e-mayo/mscreen", "max_forks_repo_head_hexsha": "a50f0b2f7104007c730baa51b4ec65c891008c47", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-11T15:04:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-11T15:04:27.000Z", "avg_line_length": 31.4411764706, "max_line_length": 103, "alphanum_fraction": 0.5977549111, "include": true, "reason": "from numpy", "num_tokens": 268}
|
import numpy as np
def CO2_flux(DelCO2, grid_wind, unit='Pg'):
"""
Returns carbon flux in moles, g or Pg (1e15 g) of carbon (not CO2) per year.
F = A * E * DeltaCO2
where:
A is area in m2
E is the gas transfer coefficient (mol CO2 m-2 yr-1 uatm-1) from Wanninkhof (1992)
DeltaCO2 is the difference in pCO2 in uatm
The gas transfer coefficient is an area of uncertainty. See Takahashi et al (1997, 10.1073/pnas.94.16.8292) for discussion.
"""
E = 1.13e-3 * grid_wind.wind**2 # Gas transfer coefficient from Wanninkhof (1992)
F = np.sum(grid_wind.area * E * DelCO2) # moles of C yr-1
mC = 12.0107 # mass of C
if unit=='g':
return F * mC
elif unit=='Pg':
return F * mC * 1e-15
return F
|
{"hexsha": "91fc25318c8e013c0267f6124ae28ce33fab9fef", "size": 809, "ext": "py", "lang": "Python", "max_stars_repo_path": "helpers/calc.py", "max_stars_repo_name": "ollie-bell/ai4er_ocean", "max_stars_repo_head_hexsha": "16aff3c8ca157ef7fc4becfb94d4c081e80bae12", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-17T13:38:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-15T13:05:26.000Z", "max_issues_repo_path": "helpers/calc.py", "max_issues_repo_name": "ollie-bell/ai4er_ocean", "max_issues_repo_head_hexsha": "16aff3c8ca157ef7fc4becfb94d4c081e80bae12", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-17T15:22:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-27T13:24:23.000Z", "max_forks_repo_path": "helpers/calc.py", "max_forks_repo_name": "ollie-bell/ai4er_ocean", "max_forks_repo_head_hexsha": "16aff3c8ca157ef7fc4becfb94d4c081e80bae12", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-03-17T11:05:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-15T02:26:35.000Z", "avg_line_length": 29.962962963, "max_line_length": 127, "alphanum_fraction": 0.5945611867, "include": true, "reason": "import numpy", "num_tokens": 268}
|
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.impute import SimpleImputer
continues = [0, 2, 4, 10, 11, 12] # 记录数值型数据的维度
categories = [1, 3, 5, 6, 7, 8, 9] # 记录类别型数的维度
# 类别数据转数值型
def cate_encode(arrays):
enc = preprocessing.OrdinalEncoder()
if len(arrays) == 1:
result = enc.fit_transform(arrays.T)
else:
result = enc.fit_transform(arrays)
return result
# 缺失值处理
def imputation(arrays, missing, strategy):
arrays[arrays == missing] = np.nan # 将?转化为nan
imp = SimpleImputer(missing_values=np.nan, strategy=strategy)
if len(arrays.shape) == 1:
result = imp.fit_transform(arrays.reshape(1, -1))
else:
result = imp.fit_transform(arrays)
return result
# 正则化
def normal(arrays):
X_normalized = preprocessing.StandardScaler()
return X_normalized.fit_transform(arrays)
# 数值型数据批量处理
def continues_process(arrays):
arrays = imputation(arrays, "?", 'mean')
result = normal(arrays)
return result
# 类别型数据批量处理:
def category_process(arrays):
arrays = imputation(arrays, '?', "most_frequent")
result = cate_encode(arrays)
return result
# 加载数据
def load_data(filename):
data = pd.read_csv(filename).values
x = data[:, 0:-2]
y = data[:, -1]
return x, y
# 读取文件输出array数组
def out_data(filename):
x, y = load_data(filename)
cont = continues_process(x[:, continues])
cate = category_process(x[:, categories])
label = category_process(y)
new_x = np.hstack((cate, cont))
return new_x, label
# 写入数据
def write_data(filename, matrix):
result = ''
try:
np.savetxt(filename, matrix, fmt='%f', delimiter=',')
result = filename
except:
print('文件写入出错')
return result
# 整体写入
def write(filename, x_outname, y_outname):
x, y = load_data(filename)
cont = continues_process(x[:, continues]) # 取出数值型数据所在列
cate = category_process(x[:, categories]) # 取出类别型数据所在列
label = category_process(y) # 去除标签列
new_x = np.hstack((cate, cont)) # 处理后的数值型数据与类别型数据进行水平合并
write_data(x_outname, new_x) # 将新数据写入文件
write_data(y_outname, label)
return new_x, label
|
{"hexsha": "1634ccf4c61496bdd127a13b0d6288a9fce10938", "size": 2268, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataProcess.py", "max_stars_repo_name": "wang534602988/data_mine", "max_stars_repo_head_hexsha": "3797dab412ba49f4440c06f98f7d00c2c2824c9e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dataProcess.py", "max_issues_repo_name": "wang534602988/data_mine", "max_issues_repo_head_hexsha": "3797dab412ba49f4440c06f98f7d00c2c2824c9e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dataProcess.py", "max_forks_repo_name": "wang534602988/data_mine", "max_forks_repo_head_hexsha": "3797dab412ba49f4440c06f98f7d00c2c2824c9e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-20T12:58:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-20T12:58:54.000Z", "avg_line_length": 25.4831460674, "max_line_length": 66, "alphanum_fraction": 0.6375661376, "include": true, "reason": "import numpy", "num_tokens": 684}
|
#include "RBGL.hpp"
#include <boost/graph/graph_utility.hpp>
using namespace boost;
typedef adjacency_list<vecS, vecS, undirectedS,
// vertex properties
property<vertex_index_t, int,
property<vertex_centrality_t, double> >,
// edge properties
property<edge_weight_t, double,
property<edge_centrality_t, double> > >
BCGraph;
// Explicit instantiation of template function max_element circumvents
// compiler error in llvm8 libc++
typedef graph_traits<BCGraph>::edge_descriptor Edge;
typedef graph_traits<BCGraph>::edge_iterator EdgeIterator;
typedef property_map < BCGraph, edge_centrality_t >::type EdgeCentralityMap;
typedef typename property_traits<EdgeCentralityMap>::value_type centrality_type;
typedef indirect_cmp<EdgeCentralityMap, std::less<centrality_type> > EdgeCentralityCompare;
EdgeIterator
max_element(EdgeIterator __first, EdgeIterator __last, EdgeCentralityCompare __comp)
{
if (__first != __last)
{
EdgeIterator __i = __first;
while (++__i != __last)
if (__comp(*__first, *__i))
__first = __i;
}
return __first;
}
#include <boost/graph/bc_clustering.hpp>
#include <boost/graph/betweenness_centrality.hpp>
extern "C"
{
SEXP BGL_brandes_betweenness_centrality(SEXP num_verts_in,
SEXP num_edges_in, SEXP R_edges_in, SEXP R_weights_in)
{
BCGraph g;
int NV = Rf_asInteger(num_verts_in);
int NE = Rf_asInteger(num_edges_in);
int* edges_in = INTEGER(R_edges_in);
double* weights_in = REAL(R_weights_in);
for (int i = 0; i < NE ; i++, edges_in += 2, weights_in++)
boost::add_edge(*edges_in, *(edges_in+1), *weights_in, g);
SEXP anslst, bcvlst, enlst, bcelst, rbcvlst, dom;
PROTECT(anslst = Rf_allocVector(VECSXP,5));
PROTECT(bcvlst = Rf_allocMatrix(REALSXP, 1, NV));
PROTECT(enlst = Rf_allocMatrix(INTSXP, 2, NE));
PROTECT(bcelst = Rf_allocMatrix(REALSXP, 1, NE));
PROTECT(rbcvlst = Rf_allocMatrix(REALSXP, 1, NV));
PROTECT(dom = Rf_allocVector(REALSXP, 1));
brandes_betweenness_centrality(g,
centrality_map(get(vertex_centrality, g)).
edge_centrality_map(get(edge_centrality, g)).
weight_map(get(edge_weight, g)));
property_map<BCGraph, vertex_centrality_t>::type
v_map = get(vertex_centrality, g);
property_map<BCGraph, edge_centrality_t>::type
e_map = get(edge_centrality, g);
graph_traits < BCGraph>::vertex_iterator vi, v_end;
graph_traits < BCGraph>::edge_iterator ei, e_end;
int v = 0, e = 0;
for ( tie(vi, v_end) = vertices(g); vi != v_end; vi++ )
REAL(bcvlst)[v++] = v_map[*vi];
for ( v = 0, tie(ei, e_end) = edges(g); ei != e_end ; ei++ )
{
INTEGER(enlst)[v++] = source(*ei, g);
INTEGER(enlst)[v++] = target(*ei, g);
REAL(bcelst)[e++] = e_map[*ei];
}
relative_betweenness_centrality(g, get(vertex_centrality, g));
v_map = get(vertex_centrality, g);
for ( v = 0, tie(vi, v_end) = vertices(g); vi != v_end; vi++ )
REAL(rbcvlst)[v++] = v_map[*vi];
double dominance = central_point_dominance(g,
get(vertex_centrality, g));
REAL(dom)[0] = dominance;
SET_VECTOR_ELT(anslst,0,bcvlst);
SET_VECTOR_ELT(anslst,1,bcelst);
SET_VECTOR_ELT(anslst,2,rbcvlst);
SET_VECTOR_ELT(anslst,3,dom);
SET_VECTOR_ELT(anslst,4,enlst);
UNPROTECT(6);
return(anslst);
}
class clustering_threshold : public bc_clustering_threshold<double>
{
typedef bc_clustering_threshold<double> inherited;
public:
clustering_threshold(double threshold, const BCGraph& g, bool normalize)
: inherited(threshold, g, normalize), iter(1) { }
bool operator()(double max_centrality, Edge e, const BCGraph& g)
{
#if DEBUG
std::cout << "Iter: " << iter << " Max Centrality: "
<< (max_centrality / dividend) << std::endl;
#endif
++iter;
return inherited::operator()(max_centrality, e, g);
}
private:
unsigned int iter;
};
SEXP BGL_betweenness_centrality_clustering (SEXP num_verts_in,
SEXP num_edges_in, SEXP R_edges_in, SEXP R_weights_in,
SEXP R_threshold, SEXP R_normalize)
{
BCGraph g;
int NE = Rf_asInteger(num_edges_in);
int* edges_in = INTEGER(R_edges_in);
double* weights_in = REAL(R_weights_in);
for (int i = 0; i < NE ; i++, edges_in += 2, weights_in++)
boost::add_edge(*edges_in, *(edges_in+1), *weights_in, g);
double threshold = REAL(R_threshold)[0];
bool normalize = LOGICAL(R_normalize)[0];
betweenness_centrality_clustering(g,
clustering_threshold(threshold, g, normalize),
get(edge_centrality, g));
// betweenness_centrality_clustering(g,
// clustering_threshold(threshold, g, normalize));
SEXP anslst, cnt, bcvlst, bcelst;
PROTECT(anslst = Rf_allocVector(VECSXP,3));
PROTECT(cnt = Rf_allocVector(INTSXP, 1));
PROTECT(bcvlst = Rf_allocMatrix(INTSXP, 2, num_edges(g)));
PROTECT(bcelst = Rf_allocMatrix(REALSXP, 1, num_edges(g)));
INTEGER(cnt)[0] = num_edges(g);
property_map < BCGraph, edge_centrality_t >::type
ec = get(edge_centrality, g);
typedef graph_traits<BCGraph>::edge_iterator edge_iterator;
edge_iterator ei, e_end;
#if DEBUG
std::cout << " edge centralities: ";
for ( tie(ei, e_end) = edges(g); ei != e_end; ++ei )
std::cout << " " << ec[*ei];
std::cout << std::endl;
#endif
int i = 0, j = 0;
for ( tie(ei, e_end) = edges(g); ei != e_end; ++ei )
{
INTEGER(bcvlst)[i++] = source(*ei, g);
INTEGER(bcvlst)[i++] = target(*ei, g);
REAL(bcelst)[j++] = ec[*ei];
}
SET_VECTOR_ELT(anslst,0,cnt);
SET_VECTOR_ELT(anslst,1,bcvlst);
SET_VECTOR_ELT(anslst,2,bcelst);
UNPROTECT(4);
return(anslst);
}
}
|
{"hexsha": "277cab40f48677b28ebb80ab533f25583ef6e434", "size": 5916, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/bbc.cpp", "max_stars_repo_name": "HenrikBengtsson/RBGL", "max_stars_repo_head_hexsha": "9e34efd0dcab3babe1cea49b060a643bee79931c", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/bbc.cpp", "max_issues_repo_name": "HenrikBengtsson/RBGL", "max_issues_repo_head_hexsha": "9e34efd0dcab3babe1cea49b060a643bee79931c", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2019-09-05T02:26:35.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-30T20:28:53.000Z", "max_forks_repo_path": "src/bbc.cpp", "max_forks_repo_name": "HenrikBengtsson/RBGL", "max_forks_repo_head_hexsha": "9e34efd0dcab3babe1cea49b060a643bee79931c", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2018-12-19T10:17:56.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-14T01:22:29.000Z", "avg_line_length": 30.6528497409, "max_line_length": 91, "alphanum_fraction": 0.6509465855, "num_tokens": 1756}
|
import sys
import dlib
import numpy as np
MAX_DIMENSION = 1024
WIDTH_MARGIN = 0.18
TOP_SHIFT = 0.2
class Edhead(object):
def __init__(self):
self.detector = dlib.get_frontal_face_detector()
self.overlay = dlib.load_rgb_image('A1opZLgQdoL.jpg')
def _preprocess(self, image):
"""Load image and resize if any dimension is greater than MAX_DIMENSION"""
img = dlib.load_rgb_image(image)
print(f"Input image shape {img.shape}")
max_dim = img.shape[0] if img.shape[0] > img.shape[1] else img.shape[1]
if max_dim > MAX_DIMENSION:
img = dlib.resize_image(img, MAX_DIMENSION / max_dim)
print(f"Resized image shape {img.shape}")
return img
def edheadify(self, image, view=False):
"""Edheadify an image
image: Path to a portrait image.
For best result, check out https://unsplash.com/s/photos/portrait for inspiration.
Output image saved with suffix '_edhead.jpg'.
view: If True, show output image.
"""
img = self.preprocess(image)
dets, scores, _ = self.detector.run(img, 1, -1)
most_likely = np.argmax(scores)
d = dets[most_likely]
print("Left: {} Top: {} Right: {} Bottom: {}".format(d.left(), d.top(), d.right(), d.bottom()))
face_center = [round((d.top() + d.bottom()) / 2.0), round((d.left() + d.right()) / 2.0)]
face_center[0] = round(face_center[0] / (1 + TOP_SHIFT))
padded_width = round((d.right() - d.left()) * (1 + 2 * WIDTH_MARGIN))
scale = padded_width / overlay.shape[1]
overlay = dlib.resize_image(overlay, scale)
height, width, _ = overlay.shape
overlay_top_left = (max([0, face_center[0] - height // 2]), face_center[1] - width // 2)
img[overlay_top_left[0]: overlay_top_left[0] + height,
overlay_top_left[1]: overlay_top_left[1] + width] = overlay
if view:
win = dlib.image_window()
win.clear_overlay()
win.set_image(img)
dlib.hit_enter_to_continue()
dlib.save_image(img, image + '_edhead.jpg')
if __name__ == "__main__":
edhead = Edhead()
input_image = sys.argv[1]
edhead.edheadify(input_image)
|
{"hexsha": "b3e7ef1432ccf95c9d827725ace0f9ab9746469b", "size": 2268, "ext": "py", "lang": "Python", "max_stars_repo_path": "edhead.py", "max_stars_repo_name": "ivylee/edhead", "max_stars_repo_head_hexsha": "4ec38a3232841669c1162cb9220d78a81c31bd81", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "edhead.py", "max_issues_repo_name": "ivylee/edhead", "max_issues_repo_head_hexsha": "4ec38a3232841669c1162cb9220d78a81c31bd81", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "edhead.py", "max_forks_repo_name": "ivylee/edhead", "max_forks_repo_head_hexsha": "4ec38a3232841669c1162cb9220d78a81c31bd81", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3529411765, "max_line_length": 103, "alphanum_fraction": 0.60670194, "include": true, "reason": "import numpy", "num_tokens": 600}
|
[STATEMENT]
lemma synth_trans: "\<lbrakk> X \<in> synth G; G \<subseteq> synth H \<rbrakk> \<Longrightarrow> X \<in> synth H"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>X \<in> synth G; G \<subseteq> synth H\<rbrakk> \<Longrightarrow> X \<in> synth H
[PROOF STEP]
by (drule synth_mono, blast)
|
{"llama_tokens": 120, "file": "Key_Agreement_Strong_Adversaries_Message_derivation", "length": 1}
|
import IDONE
import numpy as np
import os
from scipy.optimize import rosen
def test_Rosenbrock(d):
print(f"Testing IDONE on the {d}-dimensional Rosenbrock function with integer constraints.")
print("The known global minimum is f(1,1,...,1)=0")
lb = -5*np.ones(d).astype(int) # Lower bound
ub = 10*np.ones(d).astype(int) # Upper bound
x0 = np.round(np.random.rand(d)*(ub-lb) + lb) # Random initial guess
max_evals = 500 # Maximum number of IDONE iterations
def f(x):
scaling = d*(100*((ub[0]-lb[0]**2)**2)+(ub[0]-1)**2)
result = rosen(x)/scaling
return result
solX, solY, model, logfile = IDONE.IDONE_minimize(f, x0, lb, ub, max_evals)
print("Solution found: ")
print(f"X = {solX}")
print(f"Y = {solY}")
return solX, solY, model, logfile
d = 5 # Change this number to optimize the Rosenbrock function for different numbers of variables
solX, solY, model, logfile = test_Rosenbrock(d)
# Visualise the results
IDONE.plot_results(logfile)
|
{"hexsha": "e18332d3f6a2a9b26e522c9c79152fff825692f4", "size": 956, "ext": "py", "lang": "Python", "max_stars_repo_path": "expensiveoptimbenchmark/solvers/IDONE/demo_Rosenbrock.py", "max_stars_repo_name": "AlgTUDelft/ExpensiveOptimBenchmark", "max_stars_repo_head_hexsha": "642056f8d94c7f953e50c3cd05bbbf9f39ad5c3d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-03-03T15:17:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-11T14:30:29.000Z", "max_issues_repo_path": "expensiveoptimbenchmark/solvers/IDONE/demo_Rosenbrock.py", "max_issues_repo_name": "WholeG/ExpensiveOptimBenchmark", "max_issues_repo_head_hexsha": "642056f8d94c7f953e50c3cd05bbbf9f39ad5c3d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-06-08T22:24:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:49:58.000Z", "max_forks_repo_path": "expensiveoptimbenchmark/solvers/IDONE/demo_Rosenbrock.py", "max_forks_repo_name": "WholeG/ExpensiveOptimBenchmark", "max_forks_repo_head_hexsha": "642056f8d94c7f953e50c3cd05bbbf9f39ad5c3d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-03-22T12:12:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-25T15:24:07.000Z", "avg_line_length": 34.1428571429, "max_line_length": 97, "alphanum_fraction": 0.7060669456, "include": true, "reason": "import numpy,from scipy", "num_tokens": 295}
|
import numpy as np
import pcl
import pyrealsense2 as rs
from swagscanner.scanner.d435 import D435
from swagscanner.scanner.kinect import Kinect
import swagscanner.visualization.viewer as viewer
class DepthProcessor():
'''Factory object creator to send processing to either 'fast' or
'slow' DepthProcessor objects
'''
def initialize_processor(self, camera=None, fast=True):
if camera is None:
raise ValueError('Error: must pass a camera to DepthProcessor')
if fast == True:
return DepthProcessorFast(camera)
else:
return DepthProcessorSlow(camera)
def get_pointcloud(self):
'''Use a depth frame to create a pointcloud
'''
pass
def deproject_depth_frame(self):
''' Use a depth frame and camera intrinsics to deproject 2D pixels
& depth values to real world coordinates
'''
pass
def clip_depth(self, point_cloud_array):
''' Clip the points that are outside the field of the scan bed
Clips the array after deprojection, can speed things up by
clipping immediately after grabbing the depth frame.
TODO: Implement in each camera object instead?
'''
shape = point_cloud_array.shape
point_cloud_array = point_cloud_array[(point_cloud_array[:, 0] > -.2) &
(point_cloud_array[:, 0] < .2) &
(point_cloud_array[:, 1] > -.5) &
(point_cloud_array[:, 1] < .15) &
(point_cloud_array[:, 2] < .44)]
return point_cloud_array
class DepthProcessorFast(DepthProcessor):
'''Uses fast vectorized operations to deproject depth frames
'''
def __init__(self, camera):
self.camera = camera
self.intrinsics = self.camera.depth_intrinsics
def get_pointcloud(self):
'''Use depth frame to create a pointcloud using vectorized
operations for speed
Returns:
a pointcloud object
'''
pointcloud_array = self.deproject_depth_frame()
# pointcloud_array = deproject_depth_frame_slow(depth_frame, depth_intrinsics)
point_cloud = pcl.PointCloud()
point_cloud.from_array(pointcloud_array)
return point_cloud
def deproject_depth_frame(self):
'''Deproject 2D pixels & depth values to real world coordinates
then shape into a (921600, 3) array
[[x,y,d],
[x,y,d],...]
Returns:
pointcloud_array (np.array): (921600, 3) array
'''
width = self.intrinsics['width']
height = self.intrinsics['height']
depth_array = self.camera.get_depth_array()
x_array = np.tile(np.arange(width), height)
y_array = np.repeat(np.arange(height), width)
# perform calculations to convert to real world coordinates
x_array = (x_array - self.intrinsics['ppx']) * \
depth_array * (1/self.intrinsics['fx'])
y_array = (y_array - self.intrinsics['ppy']) * \
depth_array * (1/self.intrinsics['fy'])
point_cloud_array = np.vstack((x_array, y_array, depth_array)).T
point_cloud_array = np.asarray(point_cloud_array, dtype=np.float32)
point_cloud_array = super().clip_depth(point_cloud_array)
print(point_cloud_array.shape)
return point_cloud_array
class DepthProcessorSlow(DepthProcessor):
'''Uses slow nested for loops and librealsense libraries to
deproject depth frames
'''
def __init__(self, camera):
self.camera = camera
self.intrinsics = self.camera.depth_intrinsics
def get_pointcloud(self):
'''Use depth frame to create a pointcloud using built in librealsense
library methods
Returns:
a pointcloud object
'''
pc = rs.pointcloud()
points = pc.calculate(self.camera.get_depth_frame())
v, t = points.get_vertices(), points.get_texture_coordinates()
verts = np.asanyarray(v).view(np.float32).reshape(-1, 3) # xyz
# texcoords = np.asanyarray(t).view(np.float32).reshape(-1, 2) # uv
point_cloud = pcl.PointCloud()
point_cloud.from_array(verts)
return point_cloud
def deproject_depth_frame(self):
'''Slow, naive approach to deproject using librealsense library methods
Returns:
pointcloud_array (np.array): (921600, 3) array
'''
width = self.intrinsics['width']
height = self.intrinsics['height']
depth_frame = self.camera.get_depth_frame()
depth_array = np.empty((width * height, 3))
intrin_object = self.camera.profile.get_stream(
rs.stream.depth).as_video_stream_profile().get_intrinsics()
counter = 0
for i in range(depth_frame.get_height()):
for j in range(depth_frame.get_width()):
depth = depth_frame.get_distance(j, i)
depth_point = np.asarray(rs.rs2_deproject_pixel_to_point(
intrin_object, [j, i], depth), dtype=np.float32)
depth_array[counter] = depth_point
counter += 1
point_cloud_array = depth_array.astype(dtype=np.float32)
return point_cloud_array
|
{"hexsha": "3886542de2c737fa85baf9974e9b8d38080ec71b", "size": 5411, "ext": "py", "lang": "Python", "max_stars_repo_path": "swagscanner/processing/depth.py", "max_stars_repo_name": "seanngpack/swag-scanner", "max_stars_repo_head_hexsha": "8932e4823e7edd570cbb063702ef4be335d12881", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2020-02-19T18:45:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-28T12:36:44.000Z", "max_issues_repo_path": "swagscanner/processing/depth.py", "max_issues_repo_name": "seanngpack/swag-scanner", "max_issues_repo_head_hexsha": "8932e4823e7edd570cbb063702ef4be335d12881", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-12-31T04:12:58.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-01T15:56:34.000Z", "max_forks_repo_path": "swagscanner/processing/depth.py", "max_forks_repo_name": "seanngpack/swag-scanner", "max_forks_repo_head_hexsha": "8932e4823e7edd570cbb063702ef4be335d12881", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-29T18:04:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-29T18:04:00.000Z", "avg_line_length": 31.4593023256, "max_line_length": 86, "alphanum_fraction": 0.617076326, "include": true, "reason": "import numpy", "num_tokens": 1172}
|
module Altro
import TrajectoryOptimization
import RobotDynamics
using StaticArrays
using BenchmarkTools
using Interpolations
using SolverLogging
using Crayons
using SparseArrays
using LinearAlgebra
using Logging
using Statistics
using TimerOutputs
using ForwardDiff
using FiniteDiff
import Octavian
const TO = TrajectoryOptimization
const RD = RobotDynamics
using TrajectoryOptimization:
num_constraints, get_trajectory
import TrajectoryOptimization: rollout!, get_constraints, get_model, get_objective,
evaluate_constraints!, constraint_jacobians!
import RobotDynamics: discrete_dynamics, dynamics, dynamics!, evaluate, evaluate!
using TrajectoryOptimization:
Problem,
ConstraintList,
AbstractObjective, Objective, #QuadraticObjective,
# SampledTrajectory,
# DynamicsExpansion, # TODO: Move to ALTRO
# ALConstraintSet,
# DynamicsConstraint,
states, controls,
Equality, Inequality, SecondOrderCone,
cost
using RobotDynamics:
AbstractModel, DiscreteDynamics, DiscreteLieDynamics,
QuadratureRule, Implicit, Explicit,
FunctionSignature, InPlace, StaticReturn,
DiffMethod, ForwardAD, FiniteDifference, UserDefined,
AbstractKnotPoint, KnotPoint, StaticKnotPoint,
state_dim, control_dim, output_dim, dims,
state, control, SampledTrajectory
# types
export
ALTROSolverOld,
ALTROSolver,
# iLQRSolverOld,
# AugmentedLagrangianSolver,
SolverStats,
SolverOptions
export
solve!,
benchmark_solve!,
iterations,
set_options!,
max_violation,
status
# modules
export
Problems
const ColonSlice = Base.Slice{Base.OneTo{Int}}
const SparseView{T,I} = SubArray{T, 2, SparseMatrixCSC{T, I}, Tuple{UnitRange{I}, UnitRange{I}}, false}
const VectorView{T,I} = SubArray{T, 1, Vector{T}, Tuple{UnitRange{I}}, true}
# Select the matix multiplication kernel
const USE_OCTAVIAN = parse(Bool, get(ENV, "ALTRO_USE_OCTAVIAN", "true"))
@static if USE_OCTAVIAN
const matmul! = Octavian.matmul!
else
const matmul! = mul!
end
# Include the QDLDL wrapper
include("qdldl.jl")
using .Cqdldl
# High level structs
include("utils.jl")
include("infeasible_model.jl")
include("solvers.jl")
include("solver_opts.jl")
# iLQR Solver
include("ilqr/cost_expansion.jl")
include("ilqr/dynamics_expansion.jl")
include("ilqr/ilqr_solver.jl")
include("ilqr/backwardpass.jl")
include("ilqr/forwardpass.jl")
include("ilqr/ilqr_solve.jl")
# Augmented Lagrangian Solver
include("augmented_lagrangian/alcon.jl")
include("augmented_lagrangian/alconset.jl")
include("augmented_lagrangian/al_objective.jl")
include("augmented_lagrangian/al_solver.jl")
include("augmented_lagrangian/al_solve.jl")
include("direct/sparseblocks.jl")
# Projected Newton Solver
include("direct/pncon.jl")
include("direct/pnconset.jl")
include("direct/pn_solver.jl")
include("direct/pn_solve.jl")
# ALTRO Solver
include("altro/altro_solver.jl")
include("altro/altro_solve.jl")
# Example problems submodule
include("problems.jl")
end # module
|
{"hexsha": "b3a3e448f0f5f73dda5c223f60a33b6481deb72a", "size": 3013, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Altro.jl", "max_stars_repo_name": "bjack205/ALTRO.jl", "max_stars_repo_head_hexsha": "4864df2bb8ab8f629f451304cbaaa8e0017932d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Altro.jl", "max_issues_repo_name": "bjack205/ALTRO.jl", "max_issues_repo_head_hexsha": "4864df2bb8ab8f629f451304cbaaa8e0017932d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Altro.jl", "max_forks_repo_name": "bjack205/ALTRO.jl", "max_forks_repo_head_hexsha": "4864df2bb8ab8f629f451304cbaaa8e0017932d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.104, "max_line_length": 103, "alphanum_fraction": 0.7716561567, "num_tokens": 815}
|
#include <atomic>
#include <chrono>
#include <fstream>
#include <future>
#include <memory>
#include <string>
#include <tuple>
#include <boost/asio/steady_timer.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/http.hpp>
#include <boost/beast/version.hpp>
#include <boost/filesystem.hpp>
#include "TileManager.h"
#include "ThreadSafePrinter.hpp"
using tcp = boost::asio::ip::tcp;
using steady_timer = boost::asio::steady_timer;
namespace http = boost::beast::http;
using namespace boost::filesystem;
using TSP = alt::ThreadSafePrinter<alt::MarkPolicy>;
namespace {
//! Name of the directory holding all cached tile images from all tile servers.
const std::string cache = "cache";
/*!
* \brief Compose directory name for a tile with a prefix.
*
* \param[in] head Tile header.
* \param[in] name Prefix (tile server name).
* \return Directory name.
*/
std::string tileDir( const gv::TileHead& head, const std::string& name )
{
return cache + "/" + name + "/" + std::to_string( head.z ) + "/" + std::to_string( head.x ) + "/";
}
/*!
* \brief Compose file name for a tile with a prefix.
*
* \param[in] head Tile header.
* \param[in] name Prefix (tile server name).
* \return File name.
*/
std::string tileFile( const gv::TileHead& head, const std::string& name )
{
return tileDir( head, name ) + std::to_string( head.y ) + ".png";
}
}
namespace gv {
/*!
* \brief Fetch a single tile image either from cache or tile server.
*
* Session is made friend with TileManager so it can have access to
* resulting container and the promise it must set if it fetch the
* last requested tile. It has a timeout for cases when tile server
* is not responsive. In this case a tile will not be fetched and
* an empty image (zero bytes) will return.
*/
class TileManager::Session : public std::enable_shared_from_this<TileManager::Session>
{
public:
explicit Session( TileManager*, const TileHead&, int msecTimeout );
~Session();
//! Start the session.
void start();
private:
//! Stop the session.
void stop();
//! Error report during session lifetime.
void error( boost::system::error_code, const std::string& );
//! Fetch an image from the tile server.
void get( const std::string& host, const std::string& port );
//! Timeout occurred.
void onTimeout();
//! Tile server address resolved.
void onResolve( boost::system::error_code, tcp::resolver::results_type );
//! Connected to the tile server.
void onConnect( boost::system::error_code );
//! Request to the tile server sent.
void onWrite( boost::system::error_code, std::size_t );
//! Response from the tile server received.
void onRead( boost::system::error_code, std::size_t );
//! Check if this was the last of the requested tiles.
void checkRemains();
TileManager* tm_; //!< Pointer to the owner TileManager instance.
TileHead tileHead_; //!< Tile header of the tile to fetch.
std::string mirror_; //!< Address of one of the tile server mirrors.
steady_timer timer_; //!< Timer to detect timeout.
tcp::resolver resolver_; //!< Boost.Asio resolver.
tcp::socket socket_; //!< Boost.Asio socket.
boost::beast::flat_buffer buffer_; //!< Boost.Beast buffer.
http::request<http::empty_body> request_; //!< Boost.Beast request container.
http::response<http::string_body> response_; //!< Boost.Beast response container.
std::chrono::time_point<std::chrono::steady_clock> start_; //!< Time of the session start.
int tries_; //!< Number of tries to fetch the tile image.
const int msecTimeout_; //!< Timeout time.
std::atomic<bool> connected_; //!< Indicator of session managing to connect to the tile server.
};
}
namespace gv {
/*!
* By default OpenStreetMap tile server is used.
*/
TileManager::TileManager()
: ioc_()
, work_( make_work_guard( ioc_ ) )
, serverType_( TileServer::OSM )
, tileServer_( TileServerFactory::createTileServer( serverType_ ) )
, activeRequest_( false )
, pendingRequest_( false )
{
const int tNum = 4; // std::thread::hardware_concurrency() - 1;
for ( int i = 0; i < tNum; ++i )
{
threads_.emplace_back( [this]() { ioc_.run(); } );
}
}
TileManager::~TileManager()
{
work_.reset();
ioc_.stop();
for ( auto&& t : threads_ )
{
if ( t.joinable() )
{
t.join();
}
}
}
/*!
* First of all state check happens in the manner of how MapGenerator does it.
* If request to be proceeded, first to be placed in task queue is the function
* waiting for the promiseReady_ to be set, and the for every requested tile
* there is a Session put in queue.
*
* \param[in] vec Tile headers.
* \param[in] ts Tile server identifier.
*/
void TileManager::requestTiles( const std::vector<TileHead>& vec, TileServer ts )
{
TSP() << "Request for " << vec.size() << " tiles";
if ( vec.empty() )
{
vecResult_.clear();
sendTiles( vecResult_ );
return;
}
{ // checking states
std::lock_guard<std::mutex> lock( mutexState_ );
if ( activeRequest_ )
{
pendingRequest_ = true;
lastRequest_ = std::move( vec );
lastTileServer_ = ts;
return;
}
else
{
activeRequest_ = true;
}
}
if ( ts != serverType_ )
{
serverType_ = ts;
tileServer_ = TileServerFactory::createTileServer( serverType_ );
}
vecResult_.clear();
remains_ = vec.size();
promiseReady_.reset( new std::promise<void> );
ioc_.post( [this, rem = vec.size()]()
{
promiseReady_->get_future().wait();
TSP() << "All " << rem << " jobs are done!\n"
<< "From cache: " << cacheCount_ << "\n"
<< "From mirrors" << ( mirrorCount_.empty() ? " nothing" : ": " );
for ( const auto& it : mirrorCount_ )
{
TSP() << it.first << ": " << it.second;
}
sendTiles( vecResult_ );
std::lock_guard<std::mutex> lock( mutexState_ );
activeRequest_ = false;
if ( pendingRequest_ )
{
pendingRequest_ = false;
// otherwise mutexState_ will be locked twice which is UB
ioc_.post( [this] { requestTiles( lastRequest_, lastTileServer_ ); } );
///\todo this request can arrive later than a new one from outside
/// which will make it outdated, some check needs to be added to prevent
/// processing of uotdated requests
}
} );
mirrorCount_.clear();
cacheCount_ = 0;
for ( const auto& head : vec )
{
ioc_.post( [head, this]()
{
std::make_shared<Session>( this, head, 1000 )->start();
} );
}
}
/*!
* \param[in] head Tile header.
*/
void TileManager::prepareDir( const TileHead& head )
{
create_directories( tileDir( head, tileServer_->serverName() ) );
}
}
namespace gv {
/*!
* \param[in] tm Pointer to the owner TileManager instance.
* \param[in] head Tile header.
* \param[in] msecTimeout Timeout time.
*/
TileManager::Session::Session( TileManager* tm, const TileHead& head, int msecTimeout )
: tm_( tm )
, tileHead_( head )
, timer_( tm_->ioc_ )
, resolver_( tm_->ioc_ )
, socket_( tm_->ioc_ )
, tries_( 3 )
, msecTimeout_( msecTimeout )
, connected_( false )
{
}
TileManager::Session::~Session()
{
stop();
checkRemains();
}
/*!
* First the cache is checked. If the tile image exists it's immediately
* returned and the session will automatically end.
*/
void TileManager::Session::start()
{
const auto tilePath = tileFile( tileHead_, tm_->tileServer_->serverName() );
if ( exists( path( tilePath ) ) )
{
std::ifstream tile( tilePath.c_str(), std::ios::in | std::ios::binary );
std::vector<unsigned char> vec{ std::istreambuf_iterator<char>( tile ), std::istreambuf_iterator<char>() };
tile.close();
{
std::lock_guard<std::mutex> lock( tm_->mutexResult_ );
tm_->vecResult_.emplace_back( tileHead_, std::move( TileData( vec ) ) );
}
++tm_->cacheCount_;
}
else
{
mirror_ = tm_->tileServer_->nextMirror();
get( mirror_, tm_->tileServer_->serverPort() );
}
}
/*!
* \param[in] host Tile server mirro ip address.
* \param[in] port Tile server mirro port.
*/
void TileManager::Session::get( const std::string& host, const std::string& port )
{
start_ = std::chrono::steady_clock::now();
request_.version( 11 );
request_.method( http::verb::get );
request_.target( tm_->tileServer_->tileTarget( tileHead_.z, tileHead_.x, tileHead_.y ) );
request_.set( http::field::host, host.c_str() );
request_.set( http::field::user_agent, BOOST_BEAST_VERSION_STRING );
namespace ph = std::placeholders;
resolver_.async_resolve( host.c_str(), port.c_str(),
std::bind( &Session::onResolve, shared_from_this(), ph::_1, ph::_2 ) );
}
/*!
* This is always invoked. It checks if there's a connection to the tile
* server. If there's none the session is forced to stop.
*/
void TileManager::Session::onTimeout()
{
if ( !connected_ )
{
stop();
}
}
/*!
* Gracefully close the socket if it's been opened.
*/
void TileManager::Session::stop()
{
if ( socket_.is_open() )
{
boost::system::error_code ec;
socket_.shutdown( tcp::socket::shutdown_both, ec );
if ( ec && ec != boost::system::errc::not_connected )
{
TSP() << "Session shutdown error: " << ec.message();
}
socket_.close( ec );
if ( ec )
{
TSP() << "Session socket close error: " << ec.message();
}
}
}
/*!
* For some errors try to start the session again if tries are left.
*
* \param[in] ec System error code.
* \param[in] msg Output message.
*/
void TileManager::Session::error( boost::system::error_code ec, const std::string& msg )
{
TSP() << msg << ": " << ec.message() << "\n";
if ( --tries_ > 0 && boost::system::errc::host_unreachable == ec )
{
start();
}
}
/*!
* \param[in] ec System error code.
* \param[in] results Boost.Asio resolver results type.
*/
void TileManager::Session::onResolve( boost::system::error_code ec, tcp::resolver::results_type results )
{
if ( ec )
{
return error( ec, "resolve" );
}
namespace ph = std::placeholders;
timer_.expires_at( std::chrono::steady_clock::now() + std::chrono::milliseconds( msecTimeout_ ) );
timer_.async_wait( std::bind( &TileManager::Session::onTimeout, shared_from_this() ) );
boost::asio::async_connect( socket_, results.begin(), results.end(),
std::bind( &Session::onConnect, shared_from_this(), ph::_1 ) );
}
/*!
* \param[in] ec System error code.
*/
void TileManager::Session::onConnect( boost::system::error_code ec )
{
if ( ec )
{
return error( ec, "connect" );
}
connected_ = true;
timer_.cancel();
namespace ph = std::placeholders;
http::async_write( socket_, request_,
std::bind( &Session::onWrite, shared_from_this(), ph::_1, ph::_2 ) );
}
/*!
* \param[in] ec System error code.
* \param[in] __ Not used.
*/
void TileManager::Session::onWrite( boost::system::error_code ec, std::size_t )
{
if ( ec )
{
return error( ec, "write" );
}
namespace ph = std::placeholders;
http::async_read( socket_, buffer_, response_,
std::bind( &Session::onRead, shared_from_this(), ph::_1, ph::_2 ) );
}
/*!
* \param[in] ec System error code.
* \param[in] bytes_transferred Not used.
*/
void TileManager::Session::onRead( boost::system::error_code ec, std::size_t bytes_transferred )
{
if ( ec )
{
return error( ec, "read" );
}
//auto end = std::chrono::steady_clock::now();
//auto msec = std::chrono::duration_cast< std::chrono::milliseconds >( end - start_ ).count();
//auto head = response_.base();
//TSP() << "Got response in " << msec << " msecs\n"
// << "result = " << head.result() << "\n"
// << "version = " << head.version() << "\n"
// << "reason = " << head.reason() << "\n";
tm_->prepareDir( tileHead_ );
const auto& body = response_.body();
std::ofstream tile( tileFile( tileHead_, tm_->tileServer_->serverName() ), std::ios::out | std::ios::binary );
tile.write( body.c_str(), body.size() );
tile.close();
std::vector<unsigned char> vec( body.begin(), body.end() );
{
std::lock_guard<std::mutex> lock( tm_->mutexResult_ );
tm_->vecResult_.emplace_back( std::move( tileHead_ ), std::move( vec ) );
}
{
std::lock_guard<std::mutex> lock( tm_->mutexCount_ );
auto it = tm_->mirrorCount_.find( mirror_ );
if ( it != tm_->mirrorCount_.end() )
{
++it->second;
}
else
{
tm_->mirrorCount_.emplace( mirror_, 1 );
}
}
}
/*!
* Checks TileManager::remains_ and if it equals zero sets TileManager::promiseReady_.
*/
void TileManager::Session::checkRemains()
{
if ( --tm_->remains_ == 0 )
{
tm_->promiseReady_->set_value();
}
}
}
|
{"hexsha": "d96cabf4d3418f68b79e97f9a611f755504143fe", "size": 13606, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "lib/src/TileManager.cpp", "max_stars_repo_name": "ford442/GlobeViewer", "max_stars_repo_head_hexsha": "f9c990322e5d1c9c4beae29b3fb7ff0fb70ce587", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6.0, "max_stars_repo_stars_event_min_datetime": "2018-12-09T20:06:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T02:37:58.000Z", "max_issues_repo_path": "lib/src/TileManager.cpp", "max_issues_repo_name": "ford442/GlobeViewer", "max_issues_repo_head_hexsha": "f9c990322e5d1c9c4beae29b3fb7ff0fb70ce587", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 51.0, "max_issues_repo_issues_event_min_datetime": "2018-09-26T13:05:33.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-18T17:07:09.000Z", "max_forks_repo_path": "lib/src/TileManager.cpp", "max_forks_repo_name": "ford442/GlobeViewer", "max_forks_repo_head_hexsha": "f9c990322e5d1c9c4beae29b3fb7ff0fb70ce587", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2018-12-09T20:06:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T05:51:56.000Z", "avg_line_length": 26.1653846154, "max_line_length": 117, "alphanum_fraction": 0.5966485374, "num_tokens": 3409}
|
# Important to keep cv2 top import
import cv2
import os
import copy
import json
from collections import defaultdict
import numpy as np
import logging
import torch
import torchvision
from detectron2.data.dataset_mapper import SimpleDatasetMapper
import detectron2.utils
from detectron2.utils import comm
import detectron2.data.detection_utils
from detectron2.utils.logger import setup_logger
from detectron2 import model_zoo
from detectron2.engine import DefaultTrainer, DefaultPredictor, default_argument_parser, launch, default_setup, hooks
from detectron2.config import get_cfg, set_global_cfg
from detectron2.data import MetadataCatalog, DatasetCatalog, \
build_detection_test_loader, build_detection_train_loader
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.structures import BoxMode
from detectron2.evaluation import COCOEvaluator, inference_on_dataset, verify_results, PascalVOCDetectionEvaluator, \
LVISEvaluator, DatasetEvaluators
from detectron2.data.datasets import register_coco_instances
import OurPaper.training
from OurPaper.dataset import custom_register_dataset, register_test_class_coco
import dutils.imageutils as diu
import dutils.learnutils as dlu
import dutils.simpleutils as dsu
import dutils.detectronutils as ddu
# register_test_class_coco('TestSmallCOCO', [3, 5])
# register_test_class_coco('TestSmallCOCO_test', [10], 'cocosplit/datasplit/5k.json', 'coco/val2014')
class Trainer(DefaultTrainer):
"""
We use the "DefaultTrainer" which contains a number pre-defined logic for
standard training workflow. They may not work for you, especially if you
are working on a new research project. In that case you can use the cleaner
"SimpleTrainer", or write your own training loop.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
metadata = MetadataCatalog.get(dataset_name)
# Note(): Check original meta classes, also tells us if we have a meta dataset
# Ugly but easier to support than other options
classes_to_eval = []
if hasattr(metadata, 'classes_to_eval'):
classes_to_eval = metadata.classes_to_eval
print(f'Using meta-dataset with classes {classes_to_eval}')
evaluator_type = metadata.evaluator_type
if evaluator_type == "coco":
evaluator_list.append(
COCOEvaluator(dataset_name, cfg, True, output_folder, classes_to_eval=classes_to_eval))
if evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
if len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
set_global_cfg(cfg)
default_setup(cfg, args)
# Setup loggers
setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="doublefewshot")
setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name='OurPaper')
return cfg
def main(args):
cfg = setup(args)
if args.print_only:
print(cfg)
return
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if comm.is_main_process():
verify_results(cfg, res)
return res
if 'pure-metric' in args.method:
OurPaper.training.metric_training(cfg, args)
return
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
def _check_and_sanitize_args(args):
"""Perform checks on parsed arguments"""
if args.method == 'pure-metric-finetuned':
assert args.src1
return args
if __name__ == "__main__":
args = default_argument_parser().parse_args()
args = _check_and_sanitize_args(args)
print('Executing training script!')
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
{"hexsha": "cd186b4135c9508879f3ccb1c5b9585ec53ceee8", "size": 5073, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/run_train.py", "max_stars_repo_name": "superclass-FSIS/test", "max_stars_repo_head_hexsha": "9bb2844c77704a609291135b75e94a794f235aa0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 45, "max_stars_repo_stars_event_min_datetime": "2021-04-09T12:52:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T09:08:58.000Z", "max_issues_repo_path": "tools/run_train.py", "max_issues_repo_name": "superclass-FSIS/test", "max_issues_repo_head_hexsha": "9bb2844c77704a609291135b75e94a794f235aa0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-06-29T07:47:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T04:21:00.000Z", "max_forks_repo_path": "tools/run_train.py", "max_forks_repo_name": "superclass-FSIS/test", "max_forks_repo_head_hexsha": "9bb2844c77704a609291135b75e94a794f235aa0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2021-05-23T05:48:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T06:42:13.000Z", "avg_line_length": 33.1568627451, "max_line_length": 117, "alphanum_fraction": 0.7092450227, "include": true, "reason": "import numpy", "num_tokens": 1121}
|
import numpy as np
from ..editortool import EditorTool
from ... import util
class ContourTool(EditorTool):
def on_paint(self):
if not self.is_mask:
return self.canvas
output = np.zeros(self.canvas.shape, dtype=np.uint8)
util.draw.contours(output, self.canvas, (*self.color, 255))
return output
|
{"hexsha": "92b9f7492a654220ce7d4c439bbe01f1dfd9d485", "size": 347, "ext": "py", "lang": "Python", "max_stars_repo_path": "segmate/editor/tools/contour.py", "max_stars_repo_name": "justacid/segmate", "max_stars_repo_head_hexsha": "7b66b207ca353805f7ad9c7e003645cd2cbc227a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "segmate/editor/tools/contour.py", "max_issues_repo_name": "justacid/segmate", "max_issues_repo_head_hexsha": "7b66b207ca353805f7ad9c7e003645cd2cbc227a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "segmate/editor/tools/contour.py", "max_forks_repo_name": "justacid/segmate", "max_forks_repo_head_hexsha": "7b66b207ca353805f7ad9c7e003645cd2cbc227a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.6875, "max_line_length": 67, "alphanum_fraction": 0.6570605187, "include": true, "reason": "import numpy", "num_tokens": 79}
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import numpy as np
from models.conv import GraphSageConv, ErnieSageV2Conv
class Encoder(nn.Layer):
""" Base class
Chose different type ErnieSage class.
"""
def __init__(self, config):
"""init function
Args:
config (Dict): all configs.
"""
super(Encoder, self).__init__()
self.config = config
# Don't add ernie to self, oterwise, there will be more copies of ernie weights
# self.ernie = ernie
@classmethod
def factory(cls, config, ernie):
"""Classmethod for ernie sage model.
Args:
config (Dict): all configs.
ernie (nn.Layer): the ernie model.
Raises:
ValueError: Invalid ernie sage model type.
Returns:
Class: real model class.
"""
model_type = config.model_type
if model_type == "ErnieSageV2":
return ErnieSageV2Encoder(config, ernie)
else:
raise ValueError("Invalid ernie sage model type")
def forward(self, *args, **kwargs):
raise NotImplementedError
class ErnieSageV2Encoder(Encoder):
def __init__(self, config, ernie):
""" Ernie sage v2 encoder
Args:
config (Dict): all config.
ernie (nn.Layer): the ernie model.
"""
super(ErnieSageV2Encoder, self).__init__(config)
# Don't add ernie to self, oterwise, there will be more copies of ernie weights
# self.ernie = ernie
self.convs = nn.LayerList()
initializer = None
fc_lr = self.config.lr / 0.001
erniesage_conv = ErnieSageV2Conv(
ernie,
ernie.config["hidden_size"],
self.config.hidden_size,
learning_rate=fc_lr,
aggr_func="sum")
self.convs.append(erniesage_conv)
for i in range(1, self.config.num_layers):
layer = GraphSageConv(
self.config.hidden_size,
self.config.hidden_size,
learning_rate=fc_lr,
aggr_func="sum")
self.convs.append(layer)
if self.config.final_fc:
self.linear = nn.Linear(
self.config.hidden_size,
self.config.hidden_size,
weight_attr=paddle.ParamAttr(learning_rate=fc_lr))
def take_final_feature(self, feature, index):
"""Gather the final feature.
Args:
feature (Tensor): the total featue tensor.
index (Tensor): the index to gather.
Returns:
Tensor: final result tensor.
"""
feat = paddle.gather(feature, index)
if self.config.final_fc:
feat = self.linear(feat)
if self.config.final_l2_norm:
feat = F.normalize(feat, axis=1)
return feat
def forward(self, graphs, term_ids, inputs):
""" forward train function of the model.
Args:
graphs (Graph List): list of graph tensors.
inputs (Tensor List): list of input tensors.
Returns:
Tensor List: list of final feature tensors.
"""
# term_ids for ErnieSageConv is the raw feature.
feature = term_ids
for i in range(len(graphs), self.config.num_layers):
graphs.append(graphs[0])
for i in range(0, self.config.num_layers):
if i == self.config.num_layers - 1 and i != 0:
act = None
else:
act = "leaky_relu"
feature = self.convs[i](graphs[i], feature, act)
final_feats = [self.take_final_feature(feature, x) for x in inputs]
return final_feats
|
{"hexsha": "42b19a492321184467a3cc379a67149436a3c670", "size": 4374, "ext": "py", "lang": "Python", "max_stars_repo_path": "PaddleNLP/examples/text_graph/erniesage/models/encoder.py", "max_stars_repo_name": "weiwei1115/models", "max_stars_repo_head_hexsha": "e2c96c5f64b1dc8f0d5d9aa121300b87150e11e3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-19T07:27:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-23T06:22:57.000Z", "max_issues_repo_path": "PaddleNLP/examples/text_graph/erniesage/models/encoder.py", "max_issues_repo_name": "weiwei1115/models", "max_issues_repo_head_hexsha": "e2c96c5f64b1dc8f0d5d9aa121300b87150e11e3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-22T08:11:08.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-22T08:11:08.000Z", "max_forks_repo_path": "PaddleNLP/examples/text_graph/erniesage/models/encoder.py", "max_forks_repo_name": "weiwei1115/models", "max_forks_repo_head_hexsha": "e2c96c5f64b1dc8f0d5d9aa121300b87150e11e3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-09T01:50:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-09T01:50:13.000Z", "avg_line_length": 31.6956521739, "max_line_length": 88, "alphanum_fraction": 0.5999085505, "include": true, "reason": "import numpy", "num_tokens": 990}
|
from collections import defaultdict
import jsonlines
import numpy as np
import os
import tagme
import ujson
import pandas as pd
from tqdm import tqdm
pd.options.display.max_colwidth = 500
from bootleg.symbols.constants import *
def copy_candidates(from_alias, to_alias, alias2qids, max_candidates=30, qids_to_add=None):
"""This will copy the candidates from from_alias to to_alias. We assume to_alias does not exist and from_alias does exists.
qids_to_add will be added to the beginning of the candidate list to ensure they are among the top 30"""
if qids_to_add is None:
qids_to_add = []
assert from_alias in alias2qids, f"The from_alias {from_alias} must be in the alias2qids mapping. Use add_new_alias command from a new alias"
assert to_alias not in alias2qids, f"The to_alias {to_alias} must not be in alias2qids."
candidates = alias2qids[from_alias]
# Add the qids to add to candidates. As the user wants these qids, give them the highest score
if len(qids_to_add) > 0:
top_score = candidates[0][1]
new_candidates = [[q, top_score] for q in qids_to_add]
candidates = new_candidates + candidates
if len(candidates) > max_candidates:
print(f"Filtering candidates down to top {max_candidates}")
candidates = candidates[:max_candidates]
alias2qids[to_alias] = candidates
return alias2qids
def add_new_alias(new_alias, alias2qids, qids_to_add, max_candidates=30):
assert new_alias not in alias2qids, f"The new_alias {new_alias} must not be in alias2qids."
# Assign each qid a score of 1.0
candidates = [[q, 1.0] for q in qids_to_add]
if len(candidates) > max_candidates:
print(f"Filtering candidates down to top {max_candidates}")
candidates = candidates[:max_candidates]
alias2qids[new_alias] = candidates
return alias2qids
def load_title_map(entity_mapping_dir):
return ujson.load(open(os.path.join(entity_mapping_dir, 'qid2title.json')))
def load_cand_map(entity_mapping_dir, alias_map_file):
return ujson.load(open(os.path.join(entity_mapping_dir, alias_map_file)))
def load_predictions(file):
lines = {}
with jsonlines.open(file) as f:
for line in f:
lines[line['sent_idx_unq']] = line
return lines
def score_predictions(orig_file, pred_file, title_map, cands_map=None, type_symbols=None, kg_symbols=None):
"""Loads a jsonl file and joins with the results from dump_preds"""
if cands_map is None:
cands_map = {}
if type_symbols is None:
type_symbols = []
if kg_symbols is None:
kg_symbols = []
num_lines = sum(1 for line in open(orig_file))
preds = load_predictions(pred_file)
correct = 0
total = 0
rows = []
with jsonlines.open(orig_file) as f:
for line in tqdm(f, total=num_lines):
sent_idx = line['sent_idx_unq']
gold_qids = line['qids']
pred_qids = preds[sent_idx]['qids']
assert len(gold_qids) == len(pred_qids), 'Gold and pred QIDs have different lengths'
correct += np.sum([gold_qid == pred_qid for gold_qid, pred_qid in zip(gold_qids, pred_qids)])
total += len(gold_qids)
# for each alias, append a row in the merged result table
for alias_idx in range(len(gold_qids)):
res = {
'sentence': line['sentence'],
'sent_idx': line['sent_idx_unq'],
'aliases': line['aliases'],
'span': line['spans'][alias_idx],
'slices': line.get('slices', {}),
'alias': line['aliases'][alias_idx],
'alias_idx': alias_idx,
'is_gold_label': line['gold'][alias_idx],
'gold_qid': gold_qids[alias_idx],
'pred_qid': pred_qids[alias_idx],
'gold_title': title_map[gold_qids[alias_idx]],
'pred_title': title_map[pred_qids[alias_idx]],
'all_gold_qids': gold_qids,
'all_pred_qids': pred_qids,
'gold_label_aliases': [al for i, al in enumerate(line['aliases']) if line['gold'][i] is True],
'all_is_gold_labels': line['gold'],
'all_spans': line['spans']
}
slices = []
if 'slices' in line:
for sl_name in line['slices']:
if str(alias_idx) in line['slices'][sl_name] and line['slices'][sl_name][str(alias_idx)] > 0.5:
slices.append(sl_name)
res['slices'] = slices
if len(cands_map) > 0:
res["cands"] = [tuple([title_map[q[0]], preds[sent_idx]["cand_probs"][alias_idx][i]]) for i, q in enumerate(cands_map[line['aliases'][alias_idx]])]
for type_sym in type_symbols:
type_nm = os.path.basename(os.path.splitext(type_sym.type_file)[0])
gold_types = type_sym.get_types(gold_qids[alias_idx])
pred_types = type_sym.get_types(pred_qids[alias_idx])
res[f"{type_nm}_gld"] = gold_types
res[f"{type_nm}_pred"] = pred_types
for kg_sym in kg_symbols:
kg_nm = os.path.basename(os.path.splitext(kg_sym.kg_adj_file)[0])
connected_pairs_gld = []
connected_pairs_pred = []
for alias_idx2 in range(len(gold_qids)):
if kg_sym.is_connected(gold_qids[alias_idx], gold_qids[alias_idx2]):
connected_pairs_gld.append(gold_qids[alias_idx2])
if kg_sym.is_connected(pred_qids[alias_idx], pred_qids[alias_idx2]):
connected_pairs_pred.append(pred_qids[alias_idx2])
res[f"{kg_nm}_gld"] = connected_pairs_gld
res[f"{kg_nm}_pred"] = connected_pairs_gld
rows.append(res)
return pd.DataFrame(rows)
def load_mentions(file):
lines = []
with jsonlines.open(file) as f:
for line in f:
new_line = {
'sentence': line['sentence'],
'aliases': line['aliases'],
'spans': line['spans']
}
lines.append(new_line)
return pd.DataFrame(lines)
def create_error(sent_obj, gold_aliases, gold_qids, gold_spans, found_aliases, found_spans, pred_qids, pred_probs, error):
return {
"sent_idx": sent_obj["sent_idx_unq"],
"sentence": sent_obj["sentence"],
"gold_aliases": gold_aliases,
"gold_qids": gold_qids,
"gold_spans": gold_spans,
"pred_aliases": found_aliases,
"pred_spans": found_spans,
"pred_qids": pred_qids,
"pred_probs": pred_probs,
"error": error
}
def compute_precision_and_recall(orig_label_file, new_label_file, threshold=None):
# read in first file and map by index for fast retrieval
total_mentions = 0
correct_mentions = 0
pred_mentions = 0
new_labels = {}
with jsonlines.open(new_label_file) as f:
for line in f:
new_labels[line['sent_idx_unq']] = line
errors = defaultdict(list)
with jsonlines.open(orig_label_file) as f:
for line in f:
gold_aliases = line['aliases']
gold_spans = line['spans']
gold_qids = line['qids']
pred_vals = new_labels[line['sent_idx_unq']]
pred_aliases = pred_vals['aliases']
pred_spans = pred_vals['spans']
pred_qids = pred_vals['qids']
pred_probs = [round(p, 3) for p in pred_vals['probs']]
if threshold is not None:
new_pred_qids = []
for pred_qid, pred_prob in zip(pred_qids, pred_probs):
if pred_prob < threshold:
new_pred_qids.append('NC')
else:
new_pred_qids.append(pred_qid)
pred_qids = new_pred_qids
total_mentions += len(gold_aliases)
# predicted mentions are only those that aren't nil ('NC')
pred_mentions += sum([pred_qid != 'NC' for pred_qid in pred_qids])
for gold_alias, gold_qid, gold_span in zip(gold_aliases, gold_qids, gold_spans):
gold_span_start, gold_span_end = gold_span
fuzzy_gold_left = [gold_span_start-1,gold_span_end]
fuzzy_gold_right = [gold_span_start+1,gold_span_end]
if gold_span in pred_spans or fuzzy_gold_left in pred_spans or fuzzy_gold_right in pred_spans:
if gold_span in pred_spans:
pred_idx = pred_spans.index(gold_span)
elif fuzzy_gold_left in pred_spans:
pred_idx = pred_spans.index(fuzzy_gold_left)
elif fuzzy_gold_right in pred_spans:
pred_idx = pred_spans.index(fuzzy_gold_right)
if gold_qid == pred_qids[pred_idx]:
correct_mentions += 1
# could not find a label for the mention
elif pred_qids[pred_idx] == 'NC':
errors['missing_mention'].append(create_error(line, gold_aliases, gold_qids,
gold_spans, pred_aliases, pred_spans,
pred_qids, pred_probs, error=gold_alias))
else:
errors['wrong_entity'].append(create_error(line, gold_aliases,
gold_qids, gold_spans, pred_aliases, pred_spans,
pred_qids, pred_probs, error=gold_alias))
else:
errors['missing_mention'].append(create_error(line, gold_aliases, gold_qids,
gold_spans, pred_aliases, pred_spans,
pred_qids, pred_probs, error=gold_alias))
for pred_alias, pred_span, pred_qid in zip(pred_aliases, pred_spans, pred_qids):
if pred_qid == 'NC':
errors['NC'].append(create_error(line, gold_aliases, gold_qids, gold_spans, pred_aliases, pred_spans,
pred_qids, pred_probs, error=''))
pred_span_start, pred_span_end = pred_span
fuzzy_gold_left = [pred_span_start-1,pred_span_end]
fuzzy_gold_right = [pred_span_start+1,pred_span_end]
if pred_span not in gold_spans and fuzzy_gold_left not in gold_spans and fuzzy_gold_right not in gold_spans and pred_qid != 'NC':
errors['extra_mention'].append(create_error(line, gold_aliases, gold_qids, gold_spans, pred_aliases, pred_spans,
pred_qids, pred_probs, error=pred_alias))
print(f'Recall: {round(correct_mentions/total_mentions, 2)} ({correct_mentions}/{total_mentions})')
print(f'Precision: {round(correct_mentions/pred_mentions, 2)} ({correct_mentions}/{pred_mentions})')
return errors
def tagme_annotate(in_file, out_file, threshold=0.1, wpid2qid=None):
with jsonlines.open(in_file) as f_in, jsonlines.open(out_file, 'w') as f_out:
for line in f_in:
aliases = []
spans = []
qids = []
probs = []
text = line['sentence']
text_spans = text.split()
text_span_indices = []
total_len = 0
for i,t in enumerate(text_spans):
text_span_indices.append(total_len)
total_len += len(t)+1
lunch_annotations = tagme.annotate(text)
# as the threshold increases, the precision increases, but the recall decreases
for ann in lunch_annotations.get_annotations(threshold):
mention = ann.mention
qid = wpid2qid[str(ann.entity_id)]
span_start = text_span_indices.index(ann.begin)
try:
span_end = text_span_indices.index(ann.end+1)
except:
span_end = len(text_spans)
aliases.append(mention)
spans.append([span_start, span_end])
qids.append(qid)
probs.append(ann.score)
line['aliases'] = aliases
line['qids'] = qids
line['spans'] = spans
line['probs'] = probs
line[ANCHOR_KEY] = [True for _ in aliases]
f_out.write(line)
|
{"hexsha": "425e550ef98ea5edf4de8b3b32a15b4f4e11ee9c", "size": 12904, "ext": "py", "lang": "Python", "max_stars_repo_path": "tutorials/utils.py", "max_stars_repo_name": "mleszczy/bootleg", "max_stars_repo_head_hexsha": "162d74001cdfbbe146753393641d549e0328acb1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-11T18:40:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-11T18:40:09.000Z", "max_issues_repo_path": "tutorials/utils.py", "max_issues_repo_name": "mleszczy/bootleg", "max_issues_repo_head_hexsha": "162d74001cdfbbe146753393641d549e0328acb1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tutorials/utils.py", "max_forks_repo_name": "mleszczy/bootleg", "max_forks_repo_head_hexsha": "162d74001cdfbbe146753393641d549e0328acb1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.6162361624, "max_line_length": 167, "alphanum_fraction": 0.5773403596, "include": true, "reason": "import numpy", "num_tokens": 2860}
|
%ADABOOSTC
%
% [W,V,ALF] = ADABOOSTC(A,CLASSF,N,RULE,VERBOSE);
%
% INPUT
% A Dataset
% CLASSF Untrained weak classifier
% N Number of classifiers to be trained
% RULE Combining rule (default: weighted voting)
% VERBOSE Suppress progress report if 0 (default)
%
% OUTPUT
% W Combined trained classifier
% V Cell array of all classifiers
% Use VC = stacked(V) for combining
% ALF Weights
%
% DESCRIPTION
%
% Computation of a combined classifier according to adaboost.
%
% In total N weighted versions of the training set A are generated
% iteratevely and used for the training of the specified classifier.
% Weights, to be used for the probabilities of the objects in the training
% set to be selected, are updated according to the Adaboost rule.
%
% The entire set of generated classifiers is given in V.
% The set of classifier weigths, according to Adaboost is returned in ALF
%
% Various aggregating possibilities can be given in
% the final parameter rule:
% []: WVOTEC, weighted voting.
% VOTEC voting
% MEANC sum rule
% AVERAGEC averaging of coeffients (for linear combiners)
% PRODC product rule
% MAXC maximum rule
% MINC minimum rule
% MEDIANC median rule
%
% REFERENCE
% Ji Zhu, Saharon Rosset, Hui Zhou and Trevor Hastie,
% Multiclass Adaboost. A multiclass generalization of the Adaboost
% algorithm, based on a generalization of the exponential loss.
% http://www-stat.stanford.edu/~hastie/Papers/samme.pdf
%
% SEE ALSO (<a href="http://37steps.com/prtools">PRTools Guide</a>)
% MAPPINGS, DATASETS
% Copyright: R.P.W. Duin, r.p.w.duin@37steps.com
% Faculty EWI, Delft University of Technology
% P.O. Box 5031, 2600 GA Delft, The Netherlands
% (Multiclass correction by Marcin Budka, Bournemouth Univ., UK)
%function [W,V,alf] = adaboostc(a,clasf,n,rule,verbose)
function [out,V,alf] = adaboostc(varargin)
%% INITIALISATION
argin = setdefaults(varargin,[],nmc,100,[],0);
if mapping_task(argin,'definition')
out = define_mapping(argin,'untrained','Adaboost');
%% TRAINING
elseif mapping_task(argin,'training')
[a,clasf,n,rule,verbose] = deal(argin{:});
[m,k,c] = getsize(a);
V = [];
laba = getlab(a);
u = ones(m,1)/m; % initialise object weights
alf = zeros(1,n); % space for classifier weights
isseparable = 0; % check if we can make 0 error
if verbose && k == 2
figure(verbose);
scatterd(a);
end
%% generate n classifiers
for i = 1:n
b = gendatw(a,u,m); % sample training set
b = setprior(b,getprior(a)); % use original priors
w = b*clasf; % train weak classifier
ra = a*w; % test weak classifier
if verbose && k == 2
plotc(w,1); drawnow
end
labc = labeld(ra);
diff = sum(labc~=laba,2)~=0; % objects erroneously classified
erra = sum((diff).*u); % weighted error on original dataset
if (erra==0)
isseparable = 1;
V = w;
break;
end
if (erra < (1-1/c)) % if classifier better then random guessing...
alf(i) = 0.5*(log((1-erra)/erra) + log(c-1));
correct = find(diff==0); % find correctly classified objects
wrong = find(diff==1); % find incorrectly classified objects
u(correct) = u(correct)*exp(-alf(i)); % give them the ...
u(wrong) = u(wrong)*exp(alf(i)); % proper weights
u = u./sum(u); % normalize weights
else
alf(i) = 0;
end
if verbose
disp([erra alf(i) sum(alf)])
end
V = [V w]; % store all classifiers
end
%% combine and return
if isseparable
W = V;
W = setname(W,['Boosted ',getname(V)]);
else
if isempty(rule)
W = wvotec(V,alf); % default is weighted combiner
else
W = traincc(a,V,rule); % otherwise, use user supplied combiner
end
end
if verbose > 0 && k == 2
plotc(W,'r',3)
ee = a*W*testc;
title(['Error: ', num2str(ee)]);
end
out = W;
else
error('Illegal call')
end
return
|
{"author": "marianux", "repo": "ecg-kit", "sha": "c8e3de47c54a9214138143676d2aa546b0540dd2", "save_path": "github-repos/MATLAB/marianux-ecg-kit", "path": "github-repos/MATLAB/marianux-ecg-kit/ecg-kit-c8e3de47c54a9214138143676d2aa546b0540dd2/common/prtools/adaboostc.m"}
|
import unittest
import os
import cPickle as pickle
import skrf as rf
import numpy as npy
from nose.tools import nottest
from nose.plugins.skip import SkipTest
class CalibrationTest(object):
'''
This is the generic Calibration test case which all Calibration
Subclasses should be able to pass. They must implement
'''
def test_accuracy_of_dut_correction(self):
a = self.wg.random(n_ports=self.n_ports, name = 'actual')
m = self.measure(a)
c = self.cal.apply_cal(m)
c.name = 'corrected'
self.assertEqual(c,a)
def test_error_ntwk(self):
a= self.cal.error_ntwk
def test_coefs_ntwks(self):
a= self.cal.coefs_ntwks
def test_caled_ntwks(self):
a= self.cal.caled_ntwks
def test_residual_ntwks(self):
a= self.cal.residual_ntwks
def test_embed_then_apply_cal(self):
a = self.wg.random(n_ports=self.n_ports)
self.assertEqual(self.cal.apply_cal(self.cal.embed(a)),a)
def test_embed_equal_measure(self):
a = self.wg.random(n_ports=self.n_ports)
self.assertEqual(self.cal.embed(a),self.measure(a))
def test_from_coefs(self):
cal_from_coefs = self.cal.from_coefs(self.cal.frequency, self.cal.coefs)
ntwk = self.wg.random(n_ports=self.n_ports)
if cal_from_coefs.apply_cal(self.cal.embed(ntwk))!= ntwk:
raise ValueError
self.assertEqual(cal_from_coefs.apply_cal(self.cal.embed(ntwk)),ntwk)
class OnePortTest(unittest.TestCase, CalibrationTest):
'''
One-port calibration test.
'''
def setUp(self):
self.n_ports = 1
self.wg = rf.RectangularWaveguide(rf.F(75,100,11), a=100*rf.mil,z0=50)
wg = self.wg
wg.frequency = rf.F.from_f([100])
self.E = wg.random(n_ports =2, name = 'E')
ideals = [
wg.short( name='short'),
wg.delay_short( 45.,'deg',name='ew'),
wg.delay_short( 90.,'deg',name='qw'),
wg.match( name='load'),
]
measured = [self.measure(k) for k in ideals]
self.cal = rf.OnePort(
is_reciprocal = True,
ideals = ideals,
measured = measured,
)
def measure(self, ntwk):
out = self.E**ntwk
out.name = ntwk.name
return out
def test_accuracy_of_directivity(self):
self.assertEqual(
self.E.s11,
self.cal.coefs_ntwks['directivity'],
)
def test_accuracy_of_source_match(self):
self.assertEqual(
self.E.s22,
self.cal.coefs_ntwks['source match'],
)
def test_accuracy_of_reflection_tracking(self):
self.assertEqual(
self.E.s21*self.E.s12,
self.cal.coefs_ntwks['reflection tracking'],
)
class SDDLTest(OnePortTest):
def setUp(self):
#raise SkipTest('Doesnt work yet')
self.n_ports = 1
self.wg = rf.RectangularWaveguide(rf.F(75,100,11), a=100*rf.mil,z0=50)
wg = self.wg
wg.frequency = rf.F.from_f([100])
self.E = wg.random(n_ports =2, name = 'E')
#self.E.s[0,:,:] = npy.array([[.1j,1],[1j,1j+2]])
#print self.E.s[0]
ideals = [
wg.short( name='short'),
wg.delay_short( 45.,'deg',name='ew'),
wg.delay_short( 90.,'deg',name='qw'),
wg.load(.2+.2j, name='load'),
]
actuals = [
wg.short( name='short'),
wg.delay_short( 10.,'deg',name='ew'),
wg.delay_short( 33.,'deg',name='qw'),
wg.load(.2+.2j, name='load'),
]
measured = [self.measure(k) for k in actuals]
self.cal = rf.SDDL(
is_reciprocal = True,
ideals = ideals,
measured = measured,
)
def test_from_coefs(self):
raise SkipTest('not applicable ')
class SDDL2Test(OnePortTest):
def setUp(self):
#raise SkipTest('Doesnt work yet')
self.n_ports = 1
self.wg = rf.RectangularWaveguide(rf.F(75,100,11), a=100*rf.mil,z0=50)
wg = self.wg
wg.frequency = rf.F.from_f([100])
self.E = wg.random(n_ports =2, name = 'E')
#self.E.s[0,:,:] = npy.array([[.1j,1],[1j,1j+2]])
#print self.E.s[0]
ideals = [
wg.short( name='short'),
wg.delay_short( 45.,'deg',name='ew'),
wg.delay_short( 90.,'deg',name='qw'),
wg.load(.2+.2j, name='load'),
]
actuals = [
wg.short( name='short'),
wg.delay_short( 10.,'deg',name='ew'),
wg.delay_short( 80.,'deg',name='qw'),
wg.load(.2+.2j, name='load'),
]
measured = [self.measure(k) for k in actuals]
self.cal = rf.SDDL2(
is_reciprocal = True,
ideals = ideals,
measured = measured,
)
def test_from_coefs(self):
raise SkipTest('not applicable ')
class EightTermTest(unittest.TestCase, CalibrationTest):
def setUp(self):
self.n_ports = 2
self.wg = rf.RectangularWaveguide(rf.F(75,100,3), a=100*rf.mil,z0=50)
wg= self.wg
wg.frequency = rf.F.from_f([100])
self.X = wg.random(n_ports =2, name = 'X')
self.Y = wg.random(n_ports =2, name='Y')
self.gamma_f = wg.random(n_ports =1, name='gamma_f')
self.gamma_r = wg.random(n_ports =1, name='gamma_r')
ideals = [
wg.short(nports=2, name='short'),
wg.open(nports=2, name='open'),
wg.match(nports=2, name='load'),
wg.thru(name='thru'),
]
measured = [self.measure(k) for k in ideals]
self.cal = rf.EightTerm(
ideals = ideals,
measured = measured,
switch_terms = (self.gamma_f, self.gamma_r)
)
def terminate(self, ntwk):
'''
terminate a measured network with the switch terms
'''
m = ntwk.copy()
ntwk_flip = ntwk.copy()
ntwk_flip.flip()
m.s[:,0,0] = (ntwk**self.gamma_f).s[:,0,0]
m.s[:,1,1] = (ntwk_flip**self.gamma_r).s[:,0,0]
m.s[:,1,0] = ntwk.s[:,1,0]/(1-ntwk.s[:,1,1]*self.gamma_f.s[:,0,0])
m.s[:,0,1] = ntwk.s[:,0,1]/(1-ntwk.s[:,0,0]*self.gamma_r.s[:,0,0])
return m
def measure(self,ntwk):
out = self.terminate(self.X**ntwk**self.Y)
out.name = ntwk.name
return out
def test_unterminating(self):
a = self.wg.random(n_ports=self.n_ports)
#unermintated measurment
ut = self.X**a**self.Y
#terminated measurement
m = self.measure(a)
self.assertEqual(self.cal.unterminate(m), ut)
def test_forward_directivity_accuracy(self):
self.assertEqual(
self.X.s11,
self.cal.coefs_ntwks['forward directivity'])
def test_forward_source_match_accuracy(self):
self.assertEqual(
self.X.s22 ,
self.cal.coefs_ntwks['forward source match'] )
def test_forward_reflection_tracking_accuracy(self):
self.assertEqual(
self.X.s21 * self.X.s12 ,
self.cal.coefs_ntwks['forward reflection tracking'])
def test_reverse_source_match_accuracy(self):
self.assertEqual(
self.Y.s11 ,
self.cal.coefs_ntwks['reverse source match'] )
def test_reverse_directivity_accuracy(self):
self.assertEqual(
self.Y.s22 ,
self.cal.coefs_ntwks['reverse directivity'] )
def test_reverse_reflection_tracking_accuracy(self):
self.assertEqual(
self.Y.s21 * self.Y.s12 ,
self.cal.coefs_ntwks['reverse reflection tracking'])
def test_k_accuracy(self):
self.assertEqual(
self.X.s21/self.Y.s12 ,
self.cal.coefs_ntwks['k'] )
@nottest
def test_verify_12term(self):
self.assertTrue(self.cal.verify_12term_ntwk.s_mag.max() < 1e-3)
class TRLTest(EightTermTest):
def setUp(self):
self.n_ports = 2
self.wg = rf.RectangularWaveguide(rf.F(75,100,11), a=100*rf.mil,z0=50)
wg= self.wg
wg.frequency = rf.F.from_f([100])
self.X = wg.random(n_ports =2, name = 'X')
self.Y = wg.random(n_ports =2, name='Y')
self.gamma_f = wg.random(n_ports =1, name='gamma_f')
self.gamma_r = wg.random(n_ports =1, name='gamma_r')
# make error networks have s21,s12 >> s11,s22 so that TRL
# can guess at line length
self.X.s[:,0,0] *=1e-1
self.Y.s[:,0,0] *=1e-1
self.X.s[:,1,1] *=1e-1
self.Y.s[:,1,1] *=1e-1
actuals = [
wg.thru( name='thru'),
wg.short(nports=2, name='short'),
wg.line(45,'deg',name='line'),
]
ideals = [
wg.thru( name='thru'),
wg.short(nports=2, name='short'),
wg.line(90,'deg',name='line'),
]
measured = [self.measure(k) for k in actuals]
self.cal = rf.TRL(
ideals = ideals,
measured = measured,
switch_terms = (self.gamma_f, self.gamma_r)
)
class SOLTTest(unittest.TestCase, CalibrationTest):
'''
This test verifys the accuracy of the SOLT calibration. Generating
measured networks requires different error networks for forward and
reverse excitation states, these are described as follows
forward excition
used for S21 and S11
Mf = Xf ** S ** Yf
reverse excition
used for S12 and S22
Mr = Xr ** S ** Yr
'''
def setUp(self):
self.n_ports = 2
self.wg = rf.RectangularWaveguide(rf.F(75,100,11), a=100*rf.mil,z0=50)
wg = self.wg
wg.frequency = rf.F.from_f([100])
self.wg = wg
self.Xf = wg.random(n_ports =2, name = 'Xf')
self.Xr = wg.random(n_ports =2, name = 'Xr')
self.Yf = wg.random(n_ports =2, name='Yf')
self.Yr = wg.random(n_ports =2, name='Yr')
ideals = [
wg.short(nports=2, name='short'),
wg.open(nports=2, name='open'),
wg.match(nports=2, name='load'),
wg.thru(name='thru'),
]
measured = [ self.measure(k) for k in ideals]
self.cal = rf.SOLT(
ideals = ideals,
measured = measured,
)
def measure(self,ntwk):
m = ntwk.copy()
mf = self.Xf**ntwk**self.Yf
mr = self.Xr**ntwk**self.Yr
m.s[:,1,0] = mf.s[:,1,0]
m.s[:,0,0] = mf.s[:,0,0]
m.s[:,0,1] = mr.s[:,0,1]
m.s[:,1,1] = mr.s[:,1,1]
return m
def test_forward_directivity_accuracy(self):
self.assertEqual(
self.Xf.s11,
self.cal.coefs_ntwks['forward directivity'])
def test_forward_source_match_accuracy(self):
self.assertEqual(
self.Xf.s22 ,
self.cal.coefs_ntwks['forward source match'] )
def test_forward_load_match_accuracy(self):
self.assertEqual(
self.Yf.s11 ,
self.cal.coefs_ntwks['forward load match'])
def test_forward_reflection_tracking_accuracy(self):
self.assertEqual(
self.Xf.s21 * self.Xf.s12 ,
self.cal.coefs_ntwks['forward reflection tracking'])
def test_forward_transmission_tracking_accuracy(self):
self.assertEqual(
self.Xf.s21*self.Yf.s21 ,
self.cal.coefs_ntwks['forward transmission tracking'])
def test_reverse_source_match_accuracy(self):
self.assertEqual(
self.Yr.s11 ,
self.cal.coefs_ntwks['reverse source match'] )
def test_reverse_directivity_accuracy(self):
self.assertEqual(
self.Yr.s22 ,
self.cal.coefs_ntwks['reverse directivity'] )
def test_reverse_load_match_accuracy(self):
self.assertEqual(
self.Xr.s22 ,
self.cal.coefs_ntwks['reverse load match'])
def test_reverse_reflection_tracking_accuracy(self):
self.assertEqual(
self.Yr.s21 * self.Yr.s12 ,
self.cal.coefs_ntwks['reverse reflection tracking'])
def test_reverse_transmission_tracking_accuracy(self):
self.assertEqual(
self.Yr.s12*self.Xr.s12 ,
self.cal.coefs_ntwks['reverse transmission tracking'])
@nottest
def test_convert_12term_2_8term(self):
converted = rf.convert_8term_2_12term(
rf.convert_12term_2_8term(self.cal.coefs))
for k in converted:
print('{}-{}'.format(k,abs(self.cal.coefs[k] - converted[k])))
for k in converted:
self.assertTrue(abs(self.cal.coefs[k] - converted[k])<1e-9)
@nottest
def test_convert_12term_2_8term_correction_accuracy(self):
converted = rf.convert_8term_2_12term(
rf.convert_12term_2_8term(self.cal.coefs))
self.cal._coefs = converted
a = self.wg.random(n_ports=2)
m = self.measure(a)
c = self.cal.apply_cal(m)
self.assertEqual(a,c)
@nottest
def test_verify_12term(self):
self.assertTrue(self.cal.verify_12term_ntwk.s_mag.max() < 1e-3)
class UnknownThruTest(EightTermTest):
def setUp(self):
self.n_ports = 2
self.wg = rf.RectangularWaveguide(rf.F(75,100,2), a=100*rf.mil,z0=50)
wg= self.wg
#wg.frequency = rf.F.from_f([100])
self.X = wg.random(n_ports =2, name = 'X')
self.Y = wg.random(n_ports =2, name='Y')
self.gamma_f = wg.random(n_ports =1, name='gamma_f')
self.gamma_r = wg.random(n_ports =1, name='gamma_r')
actuals = [
wg.short(nports=2, name='short'),
wg.open(nports=2, name='open'),
wg.match(nports=2, name='match'),
wg.impedance_mismatch(50,45)**wg.line(20,'deg',name='line')**wg.impedance_mismatch(45,50)
]
ideals = [
wg.short(nports=2, name='short'),
wg.open(nports=2, name='open'),
wg.match(nports=2, name='match'),
wg.thru(name='thru'),
]
measured = [self.measure(k) for k in actuals]
self.cal = rf.UnknownThru(
ideals = ideals,
measured = measured,
switch_terms = [self.gamma_f, self.gamma_r]
)
class MRCTest(EightTermTest):
def setUp(self):
self.n_ports = 2
self.wg = rf.RectangularWaveguide(rf.F(75,100,2), a=100*rf.mil,z0=50)
wg= self.wg
#wg.frequency = rf.F.from_f([100])
self.X = wg.random(n_ports =2, name = 'X')
self.Y = wg.random(n_ports =2, name='Y')
self.gamma_f = wg.random(n_ports =1, name='gamma_f')
self.gamma_r = wg.random(n_ports =1, name='gamma_r')
def delay_shorts(d1,d2):
ds1 = wg.delay_short(d1,'deg')
ds2 = wg.delay_short(d2,'deg')
return rf.two_port_reflect(ds1,ds2)
actuals = [
wg.short(nports=2, name='short'),
delay_shorts(65,130),
delay_shorts(120,75),
wg.load(.2+.2j,nports=2, name='match'),
wg.impedance_mismatch(50,45)**wg.line(20,'deg',name='line')**wg.impedance_mismatch(45,50)
]
ideals = [
wg.short(nports=2, name='short'),
delay_shorts(45,90),
delay_shorts(90,45),
wg.load(.2+.2j,nports=2, name='match'),
wg.thru(name='thru'),
]
measured = [self.measure(k) for k in actuals]
self.cal = rf.MRC(
ideals = ideals,
measured = measured,
switch_terms = [self.gamma_f, self.gamma_r]
)
class SOLTTest2(SOLTTest):
'''
This test verifys the accuracy of the SOLT calibration, when used
on an error-box (8-term) model.
'''
def setUp(self):
self.n_ports = 2
wg= rf.wr10
wg.frequency = rf.F.from_f([100])
self.wg = wg
self.X = wg.random(n_ports =2, name = 'X')
self.Y = wg.random(n_ports =2, name='Y')
self.gamma_f = wg.random(n_ports =1, name='gamma_f')
self.gamma_r = wg.random(n_ports =1, name='gamma_r')
self.Xf = self.X.copy()
self.Xr = self.X.copy()
self.Yf = self.Y.copy()
self.Yr = self.Y.copy()
Y_term = self.terminate(self.Y)
X_term = self.terminate(self.X)
self.Yf.s[:,0,0] = Y_term.s[:,0,0]
self.Yf.s[:,1,0] = Y_term.s[:,1,0]
self.Xr.s[:,1,1] = X_term.s[:,1,1]
self.Xr.s[:,0,1] = X_term.s[:,0,1]
ideals = [
wg.short(nports=2, name='short'),
wg.open(nports=2, name='open'),
wg.match(nports=2, name='load'),
wg.thru(name='thru'),
]
measured = [ self.measure(k) for k in ideals]
self.cal = rf.SOLT(
ideals = ideals,
measured = measured,
)
def terminate(self, ntwk):
'''
terminate a measured network with the switch terms
'''
m = ntwk.copy()
ntwk_flip = ntwk.copy()
ntwk_flip.flip()
m.s[:,0,0] = (ntwk**self.gamma_f).s[:,0,0]
m.s[:,1,1] = (ntwk_flip**self.gamma_r).s[:,0,0]
m.s[:,1,0] = ntwk.s[:,1,0]/(1-ntwk.s[:,1,1]*self.gamma_f.s[:,0,0])
m.s[:,0,1] = ntwk.s[:,0,1]/(1-ntwk.s[:,0,0]*self.gamma_r.s[:,0,0])
return m
def measure(self,ntwk):
m = ntwk.copy()
mf = self.Xf**ntwk**self.Yf
mr = self.Xr**ntwk**self.Yr
m.s[:,1,0] = mf.s[:,1,0]
m.s[:,0,0] = mf.s[:,0,0]
m.s[:,0,1] = mr.s[:,0,1]
m.s[:,1,1] = mr.s[:,1,1]
return m
@nottest
def test_12_2_8term(self):
coefs = rf.calibration.convert_12term_2_8term(self.cal.coefs)
coefs = rf.s_dict_to_ns(coefs, self.cal.frequency).to_dict()
self.assertEqual(coefs['forward switch term'], self.gamma_f)
self.assertEqual(coefs['reverse switch term'], self.gamma_r)
self.assertEqual(coefs['k'], self.X.s21*self.Y.s21)
def test_verify_12term(self):
self.assertTrue(self.cal.verify_12term_ntwk.s_mag.max() < 1e-3)
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "3d710c38d1188c51e43215f8ee46c9de77fb76ec", "size": 19323, "ext": "py", "lang": "Python", "max_stars_repo_path": "skrf/calibration/tests/test_calibration.py", "max_stars_repo_name": "sdurant/scikit-rf", "max_stars_repo_head_hexsha": "09161b879c1a52a1bc2e2df89f2656c97136c39b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "skrf/calibration/tests/test_calibration.py", "max_issues_repo_name": "sdurant/scikit-rf", "max_issues_repo_head_hexsha": "09161b879c1a52a1bc2e2df89f2656c97136c39b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "skrf/calibration/tests/test_calibration.py", "max_forks_repo_name": "sdurant/scikit-rf", "max_forks_repo_head_hexsha": "09161b879c1a52a1bc2e2df89f2656c97136c39b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8336079077, "max_line_length": 101, "alphanum_fraction": 0.5262640377, "include": true, "reason": "import numpy", "num_tokens": 5096}
|
"""Matrix Base Classes
This file contains the base Matrix classes that represent a linear
equation Gm=d. Where G is the design matrix of coefficients, m are the
model parameters and d is the data array of observations.
This file contains the following classes:
* DesignMatrix - handles coefficients of an arbitrary linear equation
* ModelMatrix - handles the model parameters
* DataMatrix - handles the data array
"""
import numpy as np
from typing import Union, Tuple
from .equation import Equation, Term
from scipy.sparse import vstack, coo_matrix
class Matrix():
"""
A class used as a base for all matrix like objects.
Sub-classes
-----------
DesignMatrix
The coefficients matrix (G) of a matrix equation like Gm=d.
Array
A matrix-like container, but expects a 1 dimensional input.
...
Attributes
----------
term_map : Union[.equation.Equation, .equation.Term]
An object that handles the mapping of parameter labels to recovered
values.
matrix : scipy.sparse.coo.coo_matrix or np.ndarray
a sparse matrix of coefficients for an arbitrary linear equation
Methods
-------
N/A
"""
def __init__(self,
term_map: Union[Equation, Term, None] = None,
matrix: Union[coo_matrix, np.ndarray, None] = None,
):
if term_map is not None:
self.term_map = term_map
# Got to create placeholder coo_matrix for each new instance of Matrix
# ... if one is not passed when Matrix is initiated.
if matrix is None:
matrix = coo_matrix(([0, ], ([2, ], [2, ])))
self.matrix = matrix
def allocate_matrix(self,
alloc_func,
term_map: Union[Equation, Term],
inplace: bool = True,
shape: Union[Tuple[int, int], None] = None,
**kwargs,
) -> coo_matrix:
"""
This function applies an arbitrary mapping function to values in the
term map to return three numpy arrays as a tuple that describe a
triplet matrix COOrdinate system.
...
Parameters
----------
"""
try:
rows, cols, vals = alloc_func(term_map, **kwargs)
except ValueError:
msg = "Allocation function must return three np.ndarrays as a" +\
"tuple of values, row numbers and column numbers"
raise ValueError(msg)
# TODO Figure out why vals sometimes has shape 1, N and not just flat.
tmat = coo_matrix(
(vals.flatten(), (rows.flatten(), cols.flatten())), shape=shape)
if not inplace:
return tmat
self.matrix = tmat
# Getters and Setters using @property decorator.
@property
def matrix(self) -> coo_matrix:
"""
Matrix attribute getter.
Returns
-------
"""
return self._matrix
@matrix.setter
def matrix(self, matrix: coo_matrix):
"""
Matrix attribute setter. Checks to see if the set object is a
coo_matrix.
...
Parameters
----------
term_map : .mappers.ParamMap
An object that handles the mapping of parameter labels to recovered
values.
Raises
------
Raises assertion error if a matrix is is not of type
scipy.sparse.coo.coo_matrix.
"""
assert type(matrix) is coo_matrix or type(matrix) is np.ndarray,\
f"Matrix must be of type {coo_matrix} or {np.ndarray} not {type(matrix)}."
if type(matrix) is np.ndarray:
matrix = coo_matrix(matrix)
self._matrix = matrix
@property
def term_map(self) -> Union[Equation, Term]:
"""
Parameter map attribute getter.
"""
return self._term_map
@term_map.setter
def term_map(self, term_map: Union[Equation, Term]):
"""
Matrix attribute setter. Checks to see if the set object is a
coo_matrix.
...
Parameters
----------
term_map : .mappers.ParamMap
An object that handles the mapping of parameter labels to recovered
values.
Raises
------
Raises assertion error if a ParamMap object is not passed.
"""
assert issubclass(type(term_map), Equation),\
f"Parameter map must be of type {Equation} or type Term."
self._term_map = term_map
class Array(Matrix):
"""
"""
def __init__(self,
term_map: Union[Equation, Term, None] = None,
array: Union[coo_matrix, None] = None,
):
super().__init__(term_map, array)
if array is None:
array = coo_matrix(([0, ], ([2, ], [0, ])))
else:
self.array = array
@property
def array(self): # array is just an alias for matrix
return self._matrix
@array.setter
def array(self, arr: coo_matrix): # perform one additional check on shape
assert arr.shape[0] == 1 or arr.shape[1] == 1,\
f"array must be 1 dimensional not {arr.shape}."
if arr.shape[1] != 1: # ensure that the shape is always like this.
arr = arr.reshape(arr.shape[::-1])
self.matrix = arr
def append(self, arr, inplace: bool = False):
"""
Appends an Array to the current Array or returns a new one.
"""
newarray = vstack((self.array, arr.array))
if not inplace:
try:
return Array(self.term_map, newarray)
except AttributeError:
return Array(array=newarray)
else:
self.array = newarray
|
{"hexsha": "e878f0a35231eceb38a704653e1c8fd704961606", "size": 5879, "ext": "py", "lang": "Python", "max_stars_repo_path": "lininvbox/lininvbox/basetypes.py", "max_stars_repo_name": "uofuseismo/YPMLRecalibration", "max_stars_repo_head_hexsha": "18a4231eb12775cf808d83d38a11cc02664b3e35", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lininvbox/lininvbox/basetypes.py", "max_issues_repo_name": "uofuseismo/YPMLRecalibration", "max_issues_repo_head_hexsha": "18a4231eb12775cf808d83d38a11cc02664b3e35", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lininvbox/lininvbox/basetypes.py", "max_forks_repo_name": "uofuseismo/YPMLRecalibration", "max_forks_repo_head_hexsha": "18a4231eb12775cf808d83d38a11cc02664b3e35", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-09-02T17:19:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-23T00:14:51.000Z", "avg_line_length": 29.103960396, "max_line_length": 86, "alphanum_fraction": 0.5674434428, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1280}
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
# List of all csv filenames
GAS_TRAIN_DATA = 'CSV_Files/Gas Data Last Year.csv'
GAS_TEST_DATA = 'CSV_Files/Gas Data Last Month.csv'
GOLD_TRAIN_DATA = 'CSV_Files/Gold Data Last Year.csv'
GOLD_TEST_DATA = 'CSV_Files/Gold Data Last Month.csv'
OIL_TRAIN_DATA = 'CSV_Files/Oil Data Last Year.csv'
OIL_TEST_DATA = 'CSV_Files/Oil Data Last Month.csv'
SILVER_TRAIN_DATA = 'CSV_Files/Silver Data Last Year.csv'
SILVER_TEST_DATA = 'CSV_Files/Silver Data Last Month.csv'
# Data sets for stock we are currently assessing
current_train_data = GOLD_TRAIN_DATA
current_test_data = GOLD_TEST_DATA
# Number of data points to retrieve from csv files (varies with each stock we assess)
NUM_TRAIN_DATA_POINTS = 266
NUM_TEST_DATA_POINTS = 22
LEARNING_RATE = 0.1
NUM_EPOCHS = 100
# Function to load data that we want from csv files and return final and opening prices and volume for each day
def load_stock_data(stock_name, num_data_points):
data = pd.read_csv(stock_name,
skiprows=0,
nrows=num_data_points,
usecols=['Price', 'Open', 'Vol.'])
# Prices of stock at the end of each day
final_prices = data['Price'].astype(str).str.replace(',','').astype(np.float)
# Prices of stock at the beginning of each day
opening_prices = data['Open'].astype(str).str.replace(',', '').astype(np.float)
# Volume of stock exchanged throughout the day
volumes = data['Vol.'].str.strip('MK').astype(np.float)
return final_prices, opening_prices, volumes
# Function to calculate differences between opening price of the next day and final price of the current day
def calculate_price_differences(final_prices, opening_prices):
price_differences = []
for d_i in range(len(final_prices) - 1):
price_difference = opening_prices[d_i + 1] - final_prices[d_i]
price_differences.append(price_difference)
return price_differences
def calculate_accuracy(expected_values, actual_values):
num_correct = 0
for a_i in range(len(actual_values)):
if actual_values[a_i] < 0 < expected_values[a_i]:
num_correct += 1
elif actual_values[a_i] > 0 > expected_values[a_i]:
num_correct += 1
return (num_correct / len(actual_values)) * 100
# Training data sets
train_final_prices, train_opening_prices, train_volumes = load_stock_data(current_train_data, NUM_TRAIN_DATA_POINTS)
train_price_differences = calculate_price_differences(train_final_prices, train_opening_prices)
train_volumes = train_volumes[:-1]
# Testing data sets
test_final_prices, test_opening_prices, test_volumes = load_stock_data(current_test_data, NUM_TEST_DATA_POINTS)
test_price_differences = calculate_price_differences(test_final_prices, test_opening_prices)
test_volumes = test_volumes[:-1]
# Building computational graph after y = Wx + b
# Used to input volumes
x = tf.placeholder(tf.float32, name='x')
# Variables that our model will change to get actual output as close to expected output as possible
W = tf.Variable([.1], name='W')
b = tf.Variable([.1], name='b')
# How our model outputs the actual values
y = W * x + b
# Used to input expected values for training purposes (shows the model what a "good" outcome is)
y_predicted = tf.placeholder(tf.float32, name='y_predicted')
# Loss function based on the difference between actual and expected outputs
loss = tf.reduce_sum(tf.square(y - y_predicted))
# Optimizer aimed at minimizing loss by changing W and b
optimizer = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)
# Session is used to actually run the nodes
session = tf.Session()
# Need to initialize global variables
session.run(tf.global_variables_initializer())
for _ in range(NUM_EPOCHS):
# Run the optimizer which will allow it to change the values of W and b to minimize loss
session.run(optimizer, feed_dict={x: train_volumes, y_predicted: train_price_differences})
results = session.run(y, feed_dict={x: test_volumes})
accuracy = calculate_accuracy(test_price_differences, results)
print("Accuracy of model: {0:.2f}%".format(accuracy))
# # Plotting purposes only, not necessary
# plt.figure(1)
# plt.plot(train_volumes, train_price_differences, 'bo')
# plt.title('Price Differences for Given Volumes for the Past Year')
# plt.xlabel('Volumes')
# plt.ylabel('Price differences')
# plt.show()
|
{"hexsha": "beef34fec145150b23fd94bae3127c8c4e802630", "size": 4430, "ext": "py", "lang": "Python", "max_stars_repo_path": "Stock_Prediction_Model.py", "max_stars_repo_name": "aniirudd/Stock-Market-Prediction", "max_stars_repo_head_hexsha": "1af00c7ef50e982d81f683c3c2203d9097a5e489", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Stock_Prediction_Model.py", "max_issues_repo_name": "aniirudd/Stock-Market-Prediction", "max_issues_repo_head_hexsha": "1af00c7ef50e982d81f683c3c2203d9097a5e489", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Stock_Prediction_Model.py", "max_forks_repo_name": "aniirudd/Stock-Market-Prediction", "max_forks_repo_head_hexsha": "1af00c7ef50e982d81f683c3c2203d9097a5e489", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.0185185185, "max_line_length": 116, "alphanum_fraction": 0.751241535, "include": true, "reason": "import numpy", "num_tokens": 1054}
|
\documentclass[11pt,a4paper]{book}
\usepackage{graphicx}
\begin{document}
\title{Book: How to Structure a LaTeX Document}
\author{Author1 \and Author2 \and ...}
\date{\today}
\maketitle
\frontmatter
\chapter{Preface}
\mainmatter
\chapter{First chapter}
\section{Section Title 1}
\section{Section Title 2}
\section{Section Title.....}
\chapter{....}
\chapter{Conclusion}
\chapter*{References}
\backmatter
\chapter{Last note}
\end{document}
|
{"hexsha": "7f481da8ea9059eb3aa320466c50966984a7da3b", "size": 490, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "ProjectMaterial/LatexEditorProject/tex-templates/book-template.tex", "max_stars_repo_name": "nikosp2196/LatexEditorRefactoring", "max_stars_repo_head_hexsha": "79b97ef212900a6e6970a63ceeccdbfa7e73c025", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ProjectMaterial/LatexEditorProject/tex-templates/book-template.tex", "max_issues_repo_name": "nikosp2196/LatexEditorRefactoring", "max_issues_repo_head_hexsha": "79b97ef212900a6e6970a63ceeccdbfa7e73c025", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ProjectMaterial/LatexEditorProject/tex-templates/book-template.tex", "max_forks_repo_name": "nikosp2196/LatexEditorRefactoring", "max_forks_repo_head_hexsha": "79b97ef212900a6e6970a63ceeccdbfa7e73c025", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.6111111111, "max_line_length": 48, "alphanum_fraction": 0.6734693878, "num_tokens": 134}
|
# под вопросом. пока не подключаем
"strftime(\"%F/%H\", now())"
strftime{T<:DateTime}(fmt::AbstractString, t::T) =
Libc.strftime(fmt,Dates.datetime2unix(t))
"strftime(\"%F\", Dates.today())"
strftime{T<:Date}(fmt::AbstractString, d::T) =
strftime(fmt,DateTime(d))
"""
[Dates.Date(2016,4,1), Dates.Date(2016,4,2)] |>
strftime("../%F/total.gz") |> first
"""
strftime(fmt::AbstractString) =
i->imap(t->strftime(fmt, t), i)
|
{"hexsha": "5a656b4b0e2f69be081feddb4f510d101d13abe1", "size": 449, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/strftime.jl", "max_stars_repo_name": "closescreen/Many", "max_stars_repo_head_hexsha": "cecec0af8c3a22f716412572df3374224c6210d0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/strftime.jl", "max_issues_repo_name": "closescreen/Many", "max_issues_repo_head_hexsha": "cecec0af8c3a22f716412572df3374224c6210d0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/strftime.jl", "max_forks_repo_name": "closescreen/Many", "max_forks_repo_head_hexsha": "cecec0af8c3a22f716412572df3374224c6210d0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.6315789474, "max_line_length": 51, "alphanum_fraction": 0.6146993318, "num_tokens": 148}
|
try:
from lapjv import lapjv
# from scipy.optimize import linear_sum_assignment
segment = False
except ImportError:
print('Module lap not found, emulating with much slower scipy.optimize.linear_sum_assignment')
segment = True
from scipy.optimize import linear_sum_assignment
import random
import numpy as np
import keras.backend as K
from keras.utils import Sequence
class TrainingData(Sequence):
def __init__(self, score, train, id2samples, train_idx, steps=1000, batch_size=32):
super(TrainingData, self).__init__()
# Maximizing the score is the same as minimuzing -score.
self.score = -score
self.train = train
self.dims = train.shape[1]
self.steps = steps
self.batch_size = batch_size
self.id2samples = id2samples
self.train_idx = train_idx
t2i = {}
for i,t in enumerate(train_idx): t2i[t] = i
for ts in id2samples.values():
idxs = [t2i[t] for t in ts]
for i in idxs:
for j in idxs:
# Set a large value for matching whales -- eliminates this potential pairing
self.score[i,j] = 10000.0
self.on_epoch_end()
def on_epoch_end(self):
# Skip this on the last epoch.
if self.steps <= 0: return
self.steps -= 1
self.match = []
self.unmatch = []
if segment:
# Using slow scipy. Make small batches.
tmp = []
batch = 512
for start in range(0, score.shape[0], batch):
end = min(score.shape[0], start + batch)
_, x = linear_sum_assignment(self.score[start:end, start:end])
tmp.append(x + start)
x = np.concatenate(tmp)
else:
# Solve the linear assignment problem
# _,_, x = lapjv(self.score)
# import ipdb; ipdb.set_trace()
x, _, _ = lapjv(self.score)
y = np.arange(len(x), dtype=np.int32)
# Compute a derangement for matching whales
for ts in self.id2samples.values():
d = ts.copy()
while True:
random.shuffle(d)
if not np.any(ts == d): break
for ab in zip(ts, d): self.match.append(ab)
if 1:
# Construct unmatched pairs from the LAP solution.
for i,j in zip(x,y):
if i == j:
print(f'i {i} == j {j}')
# print(self.score)
print(x)
print(y)
assert i != j
self.unmatch.append((self.train_idx[i], self.train_idx[j]))
# Force a different choice for an eventual next epoch.
self.score[x,y] = 10000.0
self.score[y,x] = 10000.0
random.shuffle(self.match)
random.shuffle(self.unmatch)
assert len(self.match) == len(self.train) and len(self.unmatch) == len(self.train)
def __len__(self):
return (len(self.match) + len(self.unmatch) + self.batch_size - 1) // self.batch_size
def __getitem__(self, index):
start = self.batch_size * index
end = min(start + self.batch_size, len(self.match) + len(self.unmatch))
size = end - start
assert size > 0
a = np.zeros((size,) + (self.dims,), dtype=K.floatx())
b = np.zeros((size,) + (self.dims,), dtype=K.floatx())
c = np.zeros((size,1), dtype=K.floatx())
j = start//2
for i in range(0, size, 2):
a[i, :] = self.train[self.match[j][0]]
b[i, :] = self.train[self.match[j][1]]
# This is a match
c[i, 0] = 1
a[i+1,:] = self.train[self.unmatch[j][0]]
b[i+1,:] = self.train[self.unmatch[j][1]]
# unmatch
c[i+1,0] = 0
j += 1
return [a[:,None,],b[:,None,]],c
if __name__ == '__main__':
from utils import load_cache, group_label, shuffle_idxs
train, y_, _, _ = load_cache('../../')
score = np.random.random_sample(size=(len(train), len(train)))
id2samples = group_label(y_)
train_idx, _ = shuffle_idxs(train)
data = TrainingData(score, train, id2samples, train_idx)
import ipdb; ipdb.set_trace()
print(data)
|
{"hexsha": "eb78335c77d1718578ee251ecc3858b45b79dd4b", "size": 4541, "ext": "py", "lang": "Python", "max_stars_repo_path": "api/classification/siamese/datagen.py", "max_stars_repo_name": "xiafanzeng/Raman-Spectroscopy", "max_stars_repo_head_hexsha": "ba0b8b7ad0d9b9487a7602b0a09a41d970f70598", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-03-19T14:09:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-09T09:33:55.000Z", "max_issues_repo_path": "api/classification/siamese/datagen.py", "max_issues_repo_name": "xiafanzeng/Raman-Spectroscopy", "max_issues_repo_head_hexsha": "ba0b8b7ad0d9b9487a7602b0a09a41d970f70598", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "api/classification/siamese/datagen.py", "max_forks_repo_name": "xiafanzeng/Raman-Spectroscopy", "max_forks_repo_head_hexsha": "ba0b8b7ad0d9b9487a7602b0a09a41d970f70598", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-24T07:18:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-24T07:18:41.000Z", "avg_line_length": 34.6641221374, "max_line_length": 98, "alphanum_fraction": 0.5199295309, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1089}
|
[STATEMENT]
lemma rel_witness_gpv_sel [simp]:
"the_gpv (rel_witness_gpv A C R R' (gpv, gpv')) =
map_spmf (map_generat id id (\<lambda>(rpv, rpv'). (rel_witness_gpv A C R R' \<circ> rel_witness_fun R R' (rpv, rpv'))) \<circ> rel_witness_generat)
(rel_witness_spmf (rel_generat A C (rel_fun (R OO R') (rel_gpv'' A C (R OO R')))) (the_gpv gpv, the_gpv gpv'))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. the_gpv (rel_witness_gpv A C R R' (gpv, gpv')) = map_spmf (map_generat id id (\<lambda>(rpv, rpv'). rel_witness_gpv A C R R' \<circ> rel_witness_fun R R' (rpv, rpv')) \<circ> rel_witness_generat) (rel_witness_spmf (rel_generat A C (R OO R' ===> rel_gpv'' A C (R OO R'))) (the_gpv gpv, the_gpv gpv'))
[PROOF STEP]
unfolding rel_witness_gpv_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. the_gpv (corec_gpv (map_spmf (map_generat id id (\<lambda>(rpv, rpv'). Inr \<circ> rel_witness_fun R R' (rpv, rpv')) \<circ> rel_witness_generat) \<circ> rel_witness_spmf (rel_generat A C (R OO R' ===> rel_gpv'' A C (R OO R'))) \<circ> map_prod the_gpv the_gpv) (gpv, gpv')) = map_spmf (map_generat id id (\<lambda>(rpv, rpv'). corec_gpv (map_spmf (map_generat id id (\<lambda>(rpv, rpv'). Inr \<circ> rel_witness_fun R R' (rpv, rpv')) \<circ> rel_witness_generat) \<circ> rel_witness_spmf (rel_generat A C (R OO R' ===> rel_gpv'' A C (R OO R'))) \<circ> map_prod the_gpv the_gpv) \<circ> rel_witness_fun R R' (rpv, rpv')) \<circ> rel_witness_generat) (rel_witness_spmf (rel_generat A C (R OO R' ===> rel_gpv'' A C (R OO R'))) (the_gpv gpv, the_gpv gpv'))
[PROOF STEP]
by(auto simp add: spmf.map_comp generat.map_comp o_def intro!: map_spmf_cong generat.map_cong)
|
{"llama_tokens": 845, "file": "CryptHOL_Generative_Probabilistic_Value", "length": 2}
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from parl import layers
from mlp_model import ActorModel, CriticModel
from paddle import fluid
from parl.utils import logger
VEL_OBS_DIM = 4 + 15
OBS_DIM = 98 + VEL_OBS_DIM
ACT_DIM = 22
class EnsembleBaseModel(object):
def __init__(self,
model_dirname=None,
stage_name=None,
ensemble_num=12,
use_cuda=False):
self.stage_name = stage_name
self.ensemble_num = ensemble_num
self.actors = []
self.critics1 = []
self.critics2 = []
for i in range(ensemble_num):
self.actors.append(
ActorModel(
OBS_DIM,
VEL_OBS_DIM,
ACT_DIM,
stage_name=stage_name,
model_id=i))
self.critics1.append(
CriticModel(
OBS_DIM,
VEL_OBS_DIM,
ACT_DIM,
stage_name=stage_name,
model_id=i * 2))
self.critics2.append(
CriticModel(
OBS_DIM,
VEL_OBS_DIM,
ACT_DIM,
stage_name=stage_name,
model_id=i * 2 + 1))
self._define_program()
self.place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
self.fluid_executor = fluid.Executor(self.place)
self.fluid_executor.run(self.startup_program)
if model_dirname is not None:
self._load_params(model_dirname)
def _load_params(self, dirname):
logger.info('[{}]: Loading model from {}'.format(
self.stage_name, dirname))
fluid.io.load_params(
executor=self.fluid_executor,
dirname=dirname,
main_program=self.ensemble_predict_program,
filename='model.ckpt')
def _define_program(self):
self.ensemble_predict_program = fluid.Program()
self.startup_program = fluid.Program()
with fluid.program_guard(self.ensemble_predict_program,
self.startup_program):
obs = layers.data(name='obs', shape=[OBS_DIM], dtype='float32')
action = self._ensemble_predict(obs)
self.ensemble_predict_output = [action]
def _ensemble_predict(self, obs):
actor_outputs = []
for i in range(self.ensemble_num):
actor_outputs.append(self.actors[i].predict(obs))
batch_actions = layers.concat(actor_outputs, axis=0)
batch_obs = layers.expand(obs, expand_times=[self.ensemble_num, 1])
critic_outputs = []
for i in range(self.ensemble_num):
critic1_output = self.critics1[i].predict(batch_obs, batch_actions)
critic1_output = layers.unsqueeze(critic1_output, axes=[1])
critic2_output = self.critics2[i].predict(batch_obs, batch_actions)
critic2_output = layers.unsqueeze(critic2_output, axes=[1])
critic_output = layers.elementwise_min(critic1_output,
critic2_output)
critic_outputs.append(critic_output)
score_matrix = layers.concat(critic_outputs, axis=1)
# Normalize scores given by each critic
sum_critic_score = layers.reduce_sum(
score_matrix, dim=0, keep_dim=True)
sum_critic_score = layers.expand(
sum_critic_score, expand_times=[self.ensemble_num, 1])
norm_score_matrix = score_matrix / sum_critic_score
actions_mean_score = layers.reduce_mean(
norm_score_matrix, dim=1, keep_dim=True)
best_score_id = layers.argmax(actions_mean_score, axis=0)
best_score_id = layers.cast(best_score_id, dtype='int32')
ensemble_predict_action = layers.gather(batch_actions, best_score_id)
ensemble_predict_action = layers.squeeze(
ensemble_predict_action, axes=[0])
return ensemble_predict_action
def pred_batch(self, obs):
feed = {'obs': obs}
action = self.fluid_executor.run(
self.ensemble_predict_program,
feed=feed,
fetch_list=self.ensemble_predict_output)[0]
return action
class SubmitModel(object):
def __init__(self, use_cuda=False):
self.stage0_model = EnsembleBaseModel(
model_dirname='./stage0_saved_models',
stage_name='stage0',
use_cuda=use_cuda)
self.stage1_model = EnsembleBaseModel(
model_dirname='./stage1_saved_models',
stage_name='stage1',
use_cuda=use_cuda)
def pred_batch(self, obs, target_change_times):
batch_obs = np.expand_dims(obs, axis=0).astype('float32')
if target_change_times == 0:
action = self.stage0_model.pred_batch(batch_obs)
else:
action = self.stage1_model.pred_batch(batch_obs)
return action
if __name__ == '__main__':
submit_model = SubmitModel()
|
{"hexsha": "c6b386d62c02c27ce72917a19d1bc29a7cd7f3bc", "size": 5677, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/NeurIPS2019-Learn-to-Move-Challenge/final_submit/submit_model.py", "max_stars_repo_name": "jkren6/PARL", "max_stars_repo_head_hexsha": "7299032f8e1804bb4ada0f087fd485816046fa90", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3172, "max_stars_repo_stars_event_min_datetime": "2018-05-22T02:02:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T09:14:56.000Z", "max_issues_repo_path": "examples/NeurIPS2019-Learn-to-Move-Challenge/final_submit/submit_model.py", "max_issues_repo_name": "ic7y/PARL", "max_issues_repo_head_hexsha": "e8797bd0d31d81bc81aae8b12792ff922bcb8ea9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 422, "max_issues_repo_issues_event_min_datetime": "2018-05-17T16:58:45.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T02:03:25.000Z", "max_forks_repo_path": "examples/NeurIPS2019-Learn-to-Move-Challenge/final_submit/submit_model.py", "max_forks_repo_name": "ic7y/PARL", "max_forks_repo_head_hexsha": "e8797bd0d31d81bc81aae8b12792ff922bcb8ea9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 794, "max_forks_repo_forks_event_min_datetime": "2018-05-21T18:33:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T13:38:09.000Z", "avg_line_length": 36.8636363636, "max_line_length": 79, "alphanum_fraction": 0.6166989607, "include": true, "reason": "import numpy", "num_tokens": 1191}
|
#!/usr/bin/env python
"""
Created on Wed Feb 26 16:23:30 2014
@author: Bodangles
"""
import os
import numpy as np
import tables
import glob
import matplotlib.pyplot as plt
import pdb
class BeamSelector(object):
"""This class will take in a numpy array with the first column being the beam numbers
the second coloumn the az angle in degrees and the third column the el angle in degrees.
The forth column is ksys variable for each beam.
Variables
beamnumdict - The dictionary with the beam number as the keys and angles as the values.
beamnumxydict - A dictionary with the beam number as the keys and x, y location the beams as the values.
angledict - beamnumdict with the key value pairs switched.
xydict - beamnumxydict with the key value pairs switched.
"""
def __init__(self,beammat,beamlist=None):
""" This constructor function takes a list of beams and a beam mat from
the hdf5 files from SRI which is a Nx4 table of beams"""
beammat = np.array(beammat)
self.beamnumdict = {}
self.beamnumxydict = {}
self.angledict = {}
self.xydict = {}
self.zenith = False # If this is false elevation is referenced to z=0 if true its referenced to z axis
allbeams = np.array([int(ib) for ib in beammat[:,0]])
if beamlist is None:
beamlist = allbeams
else:
beamlog = np.in1d(allbeams,[] )
for ibeam in beamlist:
beamlog = beamlog+(int(ibeam)==allbeams)
beammat=beammat[beamlog]
self.beammat = beammat
for ibeam,curlist in enumerate(beammat):
curlist[1] = np.mod(curlist[1],360.)# mod the az so it goes from 0 to 360
self.beamnumdict[int(curlist[0])] = {'az':curlist[1],'el':curlist[2]}
self.angledict[(curlist[1],curlist[2])] = int(curlist[0])
xydata = angles2xy(curlist[1],curlist[2])
self.xydict[xydata] = int(curlist[0])
self.beamnumxydict[int(curlist[0])] = np.array(xydata)
def updatebeamlist(self,beamlist,azvec,elvec):
""" This will update the objects beam list with a new one.
Inputs
beamlist - A list of beam numbers
azvec - The azimuth values in degrees
elvec - The elvation values in degrees
"""
assert len(azvec)==len(elvec)
assert len(azvec)==len(beamlist)
azvec = np.mod(azvec,360.0)
for nb,ib in enumerate(beamlist):
#clean up the dictionaries
angtemp = self.beamnumdict[int(ib)]
angtuple = (angtemp['az'],angtemp['el'])
xydata = angles2xy(angtuple[0],angtuple[1])
if xydata in self.xydict.keys(): del self.xydict[xydata]
if angtuple in self.angledict.keys(): del self.angledict[angtuple]
# add the new info to the dictionaries
self.beamnumdict[int(ib)]={'az':azvec[nb],'el':elvec[nb]}
self.angledict[(azvec[nb],elvec[nb])] = int(ib)
xydata = angles2xy(azvec[nb],elvec[nb])
self.xydict[xydata] = int(ib)
self.beamnumxydict[int(ib)] = np.array(xydata)
def updatebeamlistangonly(self,azvec,elvec):
""" This will update the objects beam list with a new one but the beam numbers aren't needed
If the angles are not in the beammat they are not added.
Inputs
azvec - The azimuth values in degrees
elvec - The elvation values in degrees
"""
azvec=np.array(azvec)
elvec= np.array(elvec)
beamlist = self.getbeamnums(azvec,elvec)
beamlist = np.array(beamlist)
blnan=np.isnan(beamlist)
self.updatebeamlist(beamlist[blnan],azvec[blnan],elvec[blnan])
def getbeamnums(self,azvec,elvec):
""" This will update the objects beam list with a new one.
Inputs
azvec - The azimuth values in degrees
elvec - The elvation values in degrees
Outputs
beamlist - A list of beam numbers
"""
assert len(azvec)==len(elvec)
outlist = [np.nan]*len(azvec)
for iang, ang in enumerate(zip(azvec,elvec)):
try:
outlist[iang]=self.angledict[ang]
except:
print('Angle {0} is not in beam mat'.format(iang))
return outlist
def shiftbeams(self,azoff=0.0,eloff=0.0):
""" This shifts all of the beam angles
inputs
azoff - The number of degrees in azimuth it will be shifted.
eloff - The number of degrees in elevation it will be shifted.
"""
beamnums = self.beamnumdict.keys()
(azvec,elvec) = self.azelvecs()
azvec = np.mod(azvec-azoff,360.0);
(xx,yy,zz) = angles2xyz(azvec,elvec)
rotmat = elrotmatrix(eloff)
outmat = rotmat*np.mat([xx,yy,zz])
(azvec,elvec) = xyz2angles(np.array(outmat[0]).flatten(),
np.array(outmat[1]).flatten(),np.array(outmat[2]).flatten())
self.updatebeamlist(beamnums,azvec,elvec)
def switchzenith(self,report=False):
""" This switches the zenith angle and adjusts the el angle accordingly.
Input
report - A bool that will determine whether a print message is output.
"""
beamnums = self.beamnumdict.keys()
(azvec,elvec) = self.azelvecs()
if self.zenith==False:
self.zenith=True
self.updatebeamlist(beamnums,azvec,elvec-90)
else:
self.zenith=False
self.updatebeamlist(beamnums,azvec,elvec+90)
if report:
print "Zenith now: " +str(self.zenith)
def getbeamsdist(self,beamnum,desdist,distdict=None):
""" This will get all of the beams within a specific spatial distance to
a beam. The user can give a distance dictionary which for each beam will
give a distance. """
if distdict is None:
distdict = self.__calcxydist__(beamnum)
#make reverse dict
revdist = {distdict[ikey]:ikey for ikey in distdict.keys()}
alldist = np.array(revdist.keys())
disttup = np.where(alldist<desdist)
distlist = alldist[disttup[0]]
# pdb.set_trace()
outbeamlist = [revdist[idist] for idist in distlist]
return outbeamlist
def getbeamangdist(self,beamnum,desdist):
""" This will get the beam numbers in a specific angular distance. The
distances in az and el will be different."""
cur_azel = self.beamnumdict[beamnum]
outbeamlist = []
for ibeam in self.beamnumdict.keys():
curangls = self.beamnumdict[ibeam]
azmet = np.abs(curangls['az']-cur_azel['az'])<desdist[0]
elmet = np.abs(curangls['el']-cur_azel['el'])<desdist[1]
if azmet and elmet:
outbeamlist.append(ibeam)
return outbeamlist
def getbeammat(self,beamlist=None):
if beamlist is None:
return self.beammat
allbeams = np.array([int(ibeam) for ibeam in self.beammat[:,0]])
indxlist = [np.where(ibeam==allbeams)[0][0] for ibeam in beamlist]
return self.beammat[indxlist,:]
def __calcxydist__(self,beamnum):
beamlist = self.beamnumdict.keys()
xycur = self.beamnumxydict[beamnum]
distdict = {}
for ibeam in beamlist:
ixy = self.beamnumxydict[ibeam]
distdict[ibeam] = np.sqrt(np.sum((xycur-ixy)**2))
return distdict
def azelvecs(self,beamlist=None):
if beamlist is None:
beamlist = self.beamnumdict.keys()
azvec = np.array([self.beamnumdict[ib]['az'] for ib in beamlist])
elvec = np.array([self.beamnumdict[ib]['el'] for ib in beamlist])
return (azvec,elvec)
def xyvecs(self,beamlist=None):
if beamlist is None:
beamlist = self.beamnumdict.keys()
(az,el) = self.azelvecs(beamlist)
(xx,yy) = angles2xy(az,el)
return (xx,yy)
def rotatebeams(self,azoff=0.0,eloff=0.0):
(azall,elall) = self.azelvecs()
def plotbeams(self,beamlist,plotall=True,filename=None,title=None,closefig=True):
"""Plots the location of the beams in yellow and plots the original beams
in blue"""
fig = make_polax(self.zenith)
if plotall:
(azall,elall) = self.azelvecs()
(xx,yy) = angles2xy(azall,elall,self.zenith)
plt.plot(xx,yy,'ko')
plt.plot(xx,yy,'b.')
plt.hold(True)
(azvec,elvec) = self.azelvecs(beamlist)
(xx2,yy2) = angles2xy(azvec,elvec,self.zenith)
plt.plot(xx2,yy2,'ko')
plt.plot(xx2,yy2,'y.')
plt.title(title)
if filename != None:
plt.savefig(filename)
print 'Saved image in '+filename
return fig
def printbeamlist(self,filename='outbeams.txt',beamlist=None):
if beamlist is None:
beamlist = self.beamnumdict.keys()
f = open(filename,'w')
for ib in beamlist:
f.write(str(int(ib))+'\n')
f.close()
def printbeamangles(self,filename='outangles.txt',beamlist=None):
if beamlist is None:
beamlist = self.beamnumdict.keys()
(azvec,elvec) = self.azelvecs(beamlist)
f = open(filename,'w')
for ib in range(len(beamlist)):
f.write(str(azvec[ib])+ ' ' +str(elvec[ib])+'\n')
f.close()
def make_polax(zenith=False):
""" This makes the polar axes for the beams"""
if zenith:
minel = 0.0
maxel = 70.0
elspace = 10.0
ellines = np.arange(minel,maxel,elspace)
else:
minel = 30.0
maxel = 90.0
elspace = 10.0
ellines = np.arange(minel,maxel,elspace)
azlines = np.arange(0.0,360.0,30.0)
fig = plt.figure(num=None, figsize=(12, 9), dpi=600, facecolor='w', edgecolor='w')
# plot all of the azlines
elvec = np.linspace(maxel,minel,100)
firstplot = True
for iaz in azlines:
azvec = iaz*np.ones_like(elvec)
(xx,yy) = angles2xy(azvec,elvec,zenith)
plt.plot(xx,yy,'k--')
if firstplot:
plt.hold(True)
firstplot=False
(xt,yt) = angles2xy(azvec[-1],elvec[-1]-5,zenith)
plt.text(xt,yt,str(int(iaz)))
azvec = np.linspace(0.0,360,100)
# plot the el lines
for iel in ellines:
elvec = iel*np.ones_like(azvec)
(xx,yy) = angles2xy(azvec,elvec,zenith)
plt.plot(xx,yy,'k--')
(xt,yt) = angles2xy(315,elvec[-1]-3,zenith)
plt.text(xt,yt,str(int(iel)))
plt.axis([-90,90,-90,90])
frame1 = plt.gca()
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
# plt.axis('off')
return fig
def angles2xy(az,el,zenith=False):
""" This will take az and el angles and move them to a Cartisian space for plotting"""
azt = (az)*np.pi/180.0
if not zenith:
el = 90-el
xout = el*np.sin(azt)
yout = el*np.cos(azt)
return (xout,yout)
def xy2angles(x,y):
elout = 90-np.sqrt(x**2+y**2)
azout = (180.0/np.pi)*np.arctan2(x,y)
return (azout,elout)
def angles2xyz(az,el):
elrad = el*np.pi/180.0
azrad = az*np.pi/180.0
x = np.cos(elrad)*np.cos(azrad)
y = np.cos(elrad)*np.sin(azrad)
z = np.sin(elrad)
return (x,y,z)
def xyz2angles(x,y,z):
el = np.arcsin(z)*180.0/np.pi
az = np.arctan2(y,x)*180/np.pi
return(az,el)
def elrotmatrix(theta):
thetar = theta*np.pi/180.0
return np.mat([[np.cos(thetar),0.0,np.sin(thetar)],[0.,1.,0.],[-np.sin(thetar),0.,np.cos(thetar)]])
def rotinel(azvec,elvec,eloff):
(xx,yy,zz) = angles2xyz(azvec,elvec)
rotmat = elrotmatrix(eloff)
outmat = rotmat*np.mat([xx,yy,zz])
(azvec,elvec) = xyz2angles(np.array(outmat[0]).flatten(),
np.array(outmat[1]).flatten(),np.array(outmat[2]).flatten())
if __name__ == "__main__":
""" Test function for class requires hdf5 files present for test."""
curfile = 'd0079140.dt0.h5'
filepath, fext =os.path.splitext(curfile)
h5file = tables.open_file(curfile)
#read data
# take only the first record because thats all thats needed
beamcode_order_power = h5file.getNode('/Raw11/RawData/Beamcodes').read()[0]
beamcode_order_iq = h5file.getNode('/Raw11/RawData/RadacHeader/BeamCode').read()[0]
beamcode_list = h5file.getNode('/Setup/BeamcodeMap').read()
h5file.close()
beamselect = BeamSelector(beamcode_list,beamcode_order_power)
testbeamlist = beamselect.getbeamsdist(beamcode_order_power[500],2)
print(testbeamlist)
testbeamlist2 = beamselect.getbeamangdist(beamcode_order_power[500],[4,3])
print(testbeamlist2)
beamselect.plotbeams(testbeamlist2)
plt.show()
|
{"hexsha": "1ec0bbb3296d8c8f38f5acf3e587cd817e9293a6", "size": 12878, "ext": "py", "lang": "Python", "max_stars_repo_path": "beamtools/beamfuncs.py", "max_stars_repo_name": "scienceopen/SimISR", "max_stars_repo_head_hexsha": "f9826c73137113b68dc2b751a735cd96837fceb3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-10-06T14:15:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-28T21:29:07.000Z", "max_issues_repo_path": "beamtools/beamfuncs.py", "max_issues_repo_name": "scienceopen/SimISR", "max_issues_repo_head_hexsha": "f9826c73137113b68dc2b751a735cd96837fceb3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2016-12-03T23:27:19.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-06T03:04:06.000Z", "max_forks_repo_path": "beamtools/beamfuncs.py", "max_forks_repo_name": "scienceopen/SimISR", "max_forks_repo_head_hexsha": "f9826c73137113b68dc2b751a735cd96837fceb3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2016-11-18T08:00:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-05T13:47:57.000Z", "avg_line_length": 36.7942857143, "max_line_length": 110, "alphanum_fraction": 0.6080136667, "include": true, "reason": "import numpy", "num_tokens": 3676}
|
import numpy as np
import cv2
from glob import glob
from tqdm import tqdm
bird_imgs_train = glob('../Data/bird_or_bicycle/0.0.3/train/bird/*')
bicycle_imgs_train = glob('../Data/bird_or_bicycle/0.0.3/train/bicycle/*')
bird_imgs_test = glob('../Data/bird_or_bicycle/0.0.3/test/bird/*')
bicycle_imgs_test = glob('../Data/bird_or_bicycle/0.0.3/test/bicycle/*')
array = np.empty(shape=(0, 299, 299, 3), dtype=int)
for f in tqdm(bird_imgs_train, desc='bird train'):
img = cv2.imread(f)
img = np.expand_dims(img, axis=0)
array = np.append(array, img, axis=0)
for f in tqdm(bicycle_imgs_train, desc='bicycle train'):
img = cv2.imread(f)
img = np.expand_dims(img, axis=0)
array = np.append(array, img, axis=0)
array = array[:, :, :, ::-1].copy()
np.save('bird_bicycle_train.npy', array)
print("Train saved ndarray of shape:", array.shape)
array = np.empty(shape=(0, 299, 299, 3), dtype=int)
for f in tqdm(bird_imgs_test, desc='bird test'):
img = cv2.imread(f)
img = np.expand_dims(img, axis=0)
array = np.append(array, img, axis=0)
for f in tqdm(bicycle_imgs_test, desc='bicycle test'):
img = cv2.imread(f)
img = np.expand_dims(img, axis=0)
array = np.append(array, img, axis=0)
array = array[:, :, :, ::-1].copy()
np.save('bird_bicycle_test.npy', array)
print("Test saved ndarray of shape:", array.shape)
|
{"hexsha": "bd4494b60ca01674a8d51c5545fd51db457d174c", "size": 1360, "ext": "py", "lang": "Python", "max_stars_repo_path": "Scripts/old_scripts/bird_npdata.py", "max_stars_repo_name": "calvinyong/adversarial_examples_capproj", "max_stars_repo_head_hexsha": "972e89e105dec6dc5f61e62f378d06656698b71d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-15T03:51:10.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-15T03:51:10.000Z", "max_issues_repo_path": "Scripts/old_scripts/bird_npdata.py", "max_issues_repo_name": "calvinyong/adversarial_examples_capproj", "max_issues_repo_head_hexsha": "972e89e105dec6dc5f61e62f378d06656698b71d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Scripts/old_scripts/bird_npdata.py", "max_forks_repo_name": "calvinyong/adversarial_examples_capproj", "max_forks_repo_head_hexsha": "972e89e105dec6dc5f61e62f378d06656698b71d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9361702128, "max_line_length": 74, "alphanum_fraction": 0.6735294118, "include": true, "reason": "import numpy", "num_tokens": 411}
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import make_scorer, f1_score, classification_report
from sklearn.tree import DecisionTreeClassifier
# adult_income_data = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data")
adult_income_data = pd.read_csv("../Data/adult.data")
adult_income_data.columns = [
"age",
"workclass",
"fnlwgt",
"education",
"education-num",
"marital-status",
"occupation",
"relationship",
"race",
"sex",
"capital-gain",
"capital-loss",
"hours-per-week",
"native-country",
"income",
]
print(adult_income_data.columns)
print(adult_income_data.head())
# Remove curse
adult_income_data = adult_income_data.drop("fnlwgt", axis=1)
attributes = adult_income_data.drop("income", axis=1)
labels = adult_income_data.income
attributes = pd.get_dummies(attributes)
print(attributes.columns[0])
scaler = MinMaxScaler()
# print(attributes.head())
# scaler.fit_transform(attributes)
# print(attributes.head())
attributes_train, attributes_test, labels_train, labels_test = train_test_split(
attributes, labels, train_size=0.7, stratify=labels
)
print(attributes_train.shape)
print(attributes_test.shape)
print(labels_train.shape)
print(labels_test.shape)
pd.Series(labels_train).groupby(labels_train).size() / len(labels_train)
tree = DecisionTreeClassifier(max_depth=2)
tree.fit(attributes_train, labels_train)
# Export tree as data
import pydotplus # In Python3 instead pydot
import sklearn.tree as sklearn_tree
from sklearn.externals.six import StringIO
# dot_data = StringIO()
# sklearn_tree.export_graphviz(tree, out_file=dot_data)
dot_data = sklearn_tree.export_graphviz(tree, out_file=None, filled=True, rounded=True)
print(sklearn_tree.export_text(tree))
# print(type(dot_data))
graph = pydotplus.graph_from_dot_data(dot_data)
# graph = pydot.graph_from_dot_file("tree.dot")
graph.write_png("tree.png")
"""
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import Image
im = np.array(Image(graph.create_png()))
plt.imshow(im)
plt.show()
"""
print(tree.score(attributes_train, labels_train))
print(tree.score(attributes_test, labels_test))
print(tree.get_depth())
params = {
"max_depth": [2, 4, 10, 15, 20, 25, 28, 30],
"min_samples_leaf": [5, 10, 15, 20, 30, 50],
}
grid = GridSearchCV(
DecisionTreeClassifier(), params, scoring=make_scorer(f1_score, pos_label=" >50K")
)
grid.fit(attributes_train, labels_train)
print(grid.best_params_)
print(grid.best_score_)
# print(grid.cv_results_)
print("----")
print(grid.best_estimator_.score(attributes_train, labels_train))
print(grid.best_estimator_.score(attributes_test, labels_test))
# This metrics is to be used for comparison
predicted_labels_train = grid.best_estimator_.predict(attributes_train)
print(classification_report(labels_train, predicted_labels_train))
predicted_labels_test = grid.best_estimator_.predict(attributes_test)
print(classification_report(labels_test, predicted_labels_test))
# plt.bar(grid.best_estimator_.feature_importances_)
important_features_dict = {}
for x, i in enumerate(grid.best_estimator_.feature_importances_):
important_features_dict[x] = i
print(x, i)
important_features_list = sorted(
important_features_dict, key=important_features_dict.get, reverse=True
)
for x in important_features_list:
print(attributes.columns[x], important_features_dict[x])
importances = grid.best_estimator_.feature_importances_
indices = np.argsort(importances)
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.barh(
range(attributes_train.shape[1]), importances[indices], color="r", align="center"
)
# If you want to define your own labels,
# change indices to a list of labels on the following line.
plt.yticks(range(attributes_train.shape[1]), indices)
plt.ylim([-1, attributes_train.shape[1]])
plt.show()
|
{"hexsha": "95e077299ec656b638156232eb00a14c91c04fff", "size": 4086, "ext": "py", "lang": "Python", "max_stars_repo_path": "4_Tree_And_Ensemble_Methods/PythonSklearn/tree_ensemble_methods.py", "max_stars_repo_name": "vladiant/SoftUniMachineLearning2019", "max_stars_repo_head_hexsha": "29f553000ca5e8fb56ca17b25e581018a656217a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-02-27T07:25:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-04T08:35:07.000Z", "max_issues_repo_path": "4_Tree_And_Ensemble_Methods/PythonSklearn/tree_ensemble_methods.py", "max_issues_repo_name": "vladiant/SoftUniMachineLearning2019", "max_issues_repo_head_hexsha": "29f553000ca5e8fb56ca17b25e581018a656217a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "4_Tree_And_Ensemble_Methods/PythonSklearn/tree_ensemble_methods.py", "max_forks_repo_name": "vladiant/SoftUniMachineLearning2019", "max_forks_repo_head_hexsha": "29f553000ca5e8fb56ca17b25e581018a656217a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1793103448, "max_line_length": 111, "alphanum_fraction": 0.7733724914, "include": true, "reason": "import numpy", "num_tokens": 941}
|
import networkx as nx
import math
from the_traffic_magic import get_pareto_traffic_one
from helper_scratch import chunks, connect_to_db, EdgeNames, graph_open
from dbhelper_scratch import database_commit, get_all_sd_using_this, get_first_last_from_id, add_to_frist_last, add_to_a
c, conn = connect_to_db()
database_commit(conn)
G = nx.karate_club_graph()
# G = graph_open()
print('Done Reading')
nodes = list(G.nodes())
sub_graph = chunks(nodes, math.ceil(len(nodes)/3))
Mega_graph = [G.subgraph(list(each)) for each in sub_graph]
en = EdgeNames(G.edges())
sd_id = 0
total = len(list(G.nodes()))
curr = 0
for node in G.nodes():
path = nx.single_source_shortest_path(G, node)
for each in path:
if each > node:
now = path[each]
traffic = get_pareto_traffic_one(1, 20)
add_to_frist_last(c, sd_id, (now[0], now[1]), (now[-2], now[-1]))
for o in range(len(now) - 1):
value = now[o:o + 2]
add_to_a(c, en.get_edge_id((value[0], value[1])), sd_id)
en.update_edge_traffic(value[0], value[1], traffic)
sd_id += 1
database_commit(conn)
print(total - curr, ' more nodes.')
curr += 1
print('Node Exploration Done')
with open('a_db.txt', 'w') as a, open('b_db.txt', 'w') as b:
for each in en.edge:
id = en.get_edge_id(each)
list_all = get_all_sd_using_this(c, id)
print(list_all)
# s = ', '.join([str(int(value_1[0])) for value_1 in list_all]) + '\n'
a.write(', '.join([str(int(value_1[0])) for value_1 in list_all]) + '\n')
b.write(str(en.get_traffic_on_edge(each)) + '\n')
print("Done writing to a and b")
denom = en.total_traffic()
with open('gravity_db.txt', 'w') as g:
for i in range(0, int((total*(total-1)/2))):
l = get_first_last_from_id(c, i)
first = l[0][0].split('_')
# first = en.get_traffic_on_edge((int(first[0]), int(first[1])))
last = l[0][1].split('_')
# last = en.get_traffic_on_edge((int(last[0]), int(last[1])))
g.write(str(en.get_traffic_on_edge((int(first[0]), int(first[1])))*en.get_traffic_on_edge((int(last[0]), int(last[1])))/denom) + '\n')
print("Done writing to gravity")
print()
|
{"hexsha": "14638a8380d155452379db3be1b666685e43978d", "size": 2250, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python/1_scratch copy.py", "max_stars_repo_name": "UIC-InDeXLab/SignalReconstruction", "max_stars_repo_head_hexsha": "d9d747eb9281c8e2d59d765b6067a2d57d0a3646", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Python/1_scratch copy.py", "max_issues_repo_name": "UIC-InDeXLab/SignalReconstruction", "max_issues_repo_head_hexsha": "d9d747eb9281c8e2d59d765b6067a2d57d0a3646", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python/1_scratch copy.py", "max_forks_repo_name": "UIC-InDeXLab/SignalReconstruction", "max_forks_repo_head_hexsha": "d9d747eb9281c8e2d59d765b6067a2d57d0a3646", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-10T00:13:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-10T00:13:18.000Z", "avg_line_length": 32.1428571429, "max_line_length": 142, "alphanum_fraction": 0.624, "include": true, "reason": "import networkx", "num_tokens": 644}
|
% !TeX root = ../main.tex
% Add the above to each chapter to make compiling the PDF easier in some editors.
\chapter{Introduction}\label{chapter:introduction}
\section{The Isabelle Proof Assistant}
\section{The B-Tree Data Structure}
Citation test~\parencite{latex}.
\subsection{Definition}
See~\autoref{tab:sample}, \autoref{fig:sample-drawing}, \autoref{fig:sample-plot}, \autoref{fig:sample-listing}.
\subsection{Applications}
\begin{table}[htpb]
\caption[Example table]{An example for a simple table.}\label{tab:sample}
\centering
\begin{tabular}{l l l l}
\toprule
A & B & C & D \\
\midrule
1 & 2 & 1 & 2 \\
2 & 3 & 2 & 3 \\
\bottomrule
\end{tabular}
\end{table}
\begin{figure}[htpb]
\centering
% This should probably go into a file in figures/
\begin{tikzpicture}[node distance=3cm]
\node (R0) {$R_1$};
\node (R1) [right of=R0] {$R_2$};
\node (R2) [below of=R1] {$R_4$};
\node (R3) [below of=R0] {$R_3$};
\node (R4) [right of=R1] {$R_5$};
\path[every node]
(R0) edge (R1)
(R0) edge (R3)
(R3) edge (R2)
(R2) edge (R1)
(R1) edge (R4);
\end{tikzpicture}
\caption[Example drawing]{An example for a simple drawing.}\label{fig:sample-drawing}
\end{figure}
\begin{figure}[htpb]
\centering
\pgfplotstableset{col sep=&, row sep=\\}
% This should probably go into a file in data/
\pgfplotstableread{
a & b \\
1 & 1000 \\
2 & 1500 \\
3 & 1600 \\
}\exampleA
\pgfplotstableread{
a & b \\
1 & 1200 \\
2 & 800 \\
3 & 1400 \\
}\exampleB
% This should probably go into a file in figures/
\begin{tikzpicture}
\begin{axis}[
ymin=0,
legend style={legend pos=south east},
grid,
thick,
ylabel=Y,
xlabel=X
]
\addplot table[x=a, y=b]{\exampleA};
\addlegendentry{Example A};
\addplot table[x=a, y=b]{\exampleB};
\addlegendentry{Example B};
\end{axis}
\end{tikzpicture}
\caption[Example plot]{An example for a simple plot.}\label{fig:sample-plot}
\end{figure}
\begin{figure}[htpb]
\centering
\begin{tabular}{c}
\begin{lstlisting}[language=SQL]
SELECT * FROM tbl WHERE tbl.str = "str"
\end{lstlisting}
\end{tabular}
\caption[Example listing]{An example for a source code listing.}\label{fig:sample-listing}
\end{figure}
|
{"hexsha": "185ff2af86f87b9a266685f6be9540ed9a1d560b", "size": 2375, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "chapters/examples.tex", "max_stars_repo_name": "nielstron/btrees-thesis", "max_stars_repo_head_hexsha": "14b6d8a4819378140e5a977e5278ae0a48057f6f", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chapters/examples.tex", "max_issues_repo_name": "nielstron/btrees-thesis", "max_issues_repo_head_hexsha": "14b6d8a4819378140e5a977e5278ae0a48057f6f", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapters/examples.tex", "max_forks_repo_name": "nielstron/btrees-thesis", "max_forks_repo_head_hexsha": "14b6d8a4819378140e5a977e5278ae0a48057f6f", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.7395833333, "max_line_length": 112, "alphanum_fraction": 0.6176842105, "num_tokens": 819}
|
#=
Copyright (c) 2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
=#
module ParForTest
using ParallelAccelerator
#ParallelAccelerator.DomainIR.set_debug_level(3)
#ParallelAccelerator.ParallelIR.set_debug_level(3)
#ParallelAccelerator.CGen.set_debug_level(3)
@acc function parfor1(n)
A = Array(Int, n, n)
@par for i in 1:n, j in 1:n
A[i,j] = i * j
end
return sum(A)
end
@acc function parfor2(n)
A = Array(Int, n, n)
s::Int = 0
m::Int = 0
@par s(+) m(+) for i in 1:n, j = 1:n
A[i,j] = i * j
s = s + A[i,j]
m = m + 1
end
return s * m
end
@acc function parfor3(n)
A = Array(Int, n, n)
s::Array{Int, 1} = zeros(n)
m::Int = 0
@par s(.+) m(+) for i in 1:n
for j = 1:n
A[j,i] = i * j
end
s = s .+ A[:,i]
m = m + 1
end
return s .* m
end
function test1()
parfor1(10) == @noacc parfor1(10)
end
function test2()
parfor2(10) == @noacc parfor2(10)
end
function test3()
parfor3(10) == @noacc parfor3(10)
end
end
using Base.Test
println("Testing parfor support via @par macro...")
@test ParForTest.test1()
@test ParForTest.test2()
@test ParForTest.test3()
println("Done testing parfor.")
|
{"hexsha": "67760c50291f941839b093b57a69856b9f688f15", "size": 2391, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/parfor.jl", "max_stars_repo_name": "JuliaPackageMirrors/ParallelAccelerator.jl", "max_stars_repo_head_hexsha": "8c38d3aea0d555264c19a39847b8f538596dff04", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/parfor.jl", "max_issues_repo_name": "JuliaPackageMirrors/ParallelAccelerator.jl", "max_issues_repo_head_hexsha": "8c38d3aea0d555264c19a39847b8f538596dff04", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/parfor.jl", "max_forks_repo_name": "JuliaPackageMirrors/ParallelAccelerator.jl", "max_forks_repo_head_hexsha": "8c38d3aea0d555264c19a39847b8f538596dff04", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8023255814, "max_line_length": 76, "alphanum_fraction": 0.721455458, "num_tokens": 666}
|
[STATEMENT]
lemma no_step_none:
"step e s r aa ba = None \<Longrightarrow> \<not> recognises_execution e s r ((aa, ba) # p)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. step e s r aa ba = None \<Longrightarrow> \<not> recognises_execution e s r ((aa, ba) # p)
[PROOF STEP]
using recognises_cons_step
[PROOF STATE]
proof (prove)
using this:
recognises_execution ?e ?s ?r (?h # ?t) \<Longrightarrow> step ?e ?s ?r (fst ?h) (snd ?h) \<noteq> None
goal (1 subgoal):
1. step e s r aa ba = None \<Longrightarrow> \<not> recognises_execution e s r ((aa, ba) # p)
[PROOF STEP]
by fastforce
|
{"llama_tokens": 227, "file": "Extended_Finite_State_Machines_EFSM", "length": 2}
|
from __future__ import division, print_function
from sklearn.feature_extraction.text import TfidfVectorizer
from collections import defaultdict
from sklearn import decomposition
import json
import os
import numpy as np
import pandas as pd
class NMF:
"""
Class for NMF model.
This is a wrapper for sklearn.decomposition.NMF,
which can perform data preprocessing, data acquisition, and model training.
"""
def __init__(self,
csv_path: str,
topics: int = 10,
iterations: int = 100,
words: int = 10,
write_output: bool = True,
min_count: int = 10,
max_freq: int = 20):
self.csv_path = csv_path
self.topics = topics
self.iterations = iterations
self.words = words
self.write_output = write_output
self.min_count = min_count
self.max_freq = max_freq
# Set random seed
np.random.seed(42)
# build corpus
self.corpus = self.acquire_data()
self.corpus = self.corpus.values[:, :1]
self.corpus = [x[0] for x in self.corpus]
self.corpus = np.array(self.corpus)
# build term document matrix
self.vec = self.get_tfidf(self.topics, self.words)
self.tdm = self.get_tdm()
# factor term document matrix
self.feature_names = self.vec.get_feature_names()
self.nmf = self.build_nmf()
# get topics by documents and topics by terms
self.documents_by_topics = self.get_documents_by_topics()
self.topics_by_terms = self.nmf.components_
# get topics by documents and topics by terms
self.docs_to_topics = self.get_doc_to_topics()
self.topics_to_words = self.get_topic_to_words()
# write the results
if self.write_output:
self.write_results()
@staticmethod
def write_json(filename, obj):
if not os.path.exists('results'):
os.makedirs('results')
with open(os.path.join('results', filename), 'w') as out:
json.dump(obj, out)
def acquire_data(self) -> pd.DataFrame:
"""
Acquire data from the csv file
"""
raw_data = pd.read_csv(self.csv_path)
return raw_data
def get_tfidf(self, topics, n_words):
"""
Return a TFIDF for building the input TDM matrix
"""
return TfidfVectorizer(
input='content',
stop_words='english',
max_df=self.max_freq,
min_df=self.min_count,
max_features=topics * n_words * 1000
)
def get_tdm(self):
"""
Return a TDM to factor
"""
return self.vec.fit_transform(self.corpus)
def build_nmf(self):
"""
Build the NMF model
:return: A sklearn NMF that can be used to factor the TDM
"""
return decomposition.NMF(n_components=self.topics,
random_state=1,
max_iter=self.iterations)
def get_documents_by_topics(self):
np.seterr(divide='ignore', invalid='ignore')
docs_by_topics = self.nmf.fit_transform(self.tdm)
normalized = docs_by_topics / np.sum(docs_by_topics, axis=1, keepdims=True)
return np.nan_to_num(normalized) # zero out nan's
def get_doc_to_topics(self):
"""
Find the distribution of each topic in each document
In our case, the document is the row from the dataframe
"""
doc_to_topics = defaultdict(lambda: defaultdict())
for doc_id, topic_values in enumerate(self.documents_by_topics):
for topic_id, topic_presence_in_doc in enumerate(topic_values):
data = self.corpus[doc_id]
doc_to_topics[data][topic_id] = topic_presence_in_doc
return doc_to_topics
def get_topic_to_words(self):
"""
Find the top words for each topic
"""
topic_to_words = defaultdict(list)
for topic_id, topic in enumerate(self.topics_by_terms):
top_features = topic.argsort()[:-self.words - 1:-1]
topic_to_words[topic_id] = [self.feature_names[i] for i in top_features]
return topic_to_words
def write_results(self):
self.write_json('doc_to_topics.json', self.docs_to_topics)
self.write_json('topic_to_words.json', self.topics_to_words)
# Small test for the class
if __name__ == '__main__':
nmf = NMF(csv_path='../data/tripadvisor_hotel_reviews.csv', write_output=True)
|
{"hexsha": "f0e1d09ffa54f5616d7ffe3e5b1d1717243bf18e", "size": 4655, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/nmf.py", "max_stars_repo_name": "popescuaaa/nmf", "max_stars_repo_head_hexsha": "c65263198158e1ba6e1b9270b7d7a87a8803fb72", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/nmf.py", "max_issues_repo_name": "popescuaaa/nmf", "max_issues_repo_head_hexsha": "c65263198158e1ba6e1b9270b7d7a87a8803fb72", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/nmf.py", "max_forks_repo_name": "popescuaaa/nmf", "max_forks_repo_head_hexsha": "c65263198158e1ba6e1b9270b7d7a87a8803fb72", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1034482759, "max_line_length": 84, "alphanum_fraction": 0.6092373792, "include": true, "reason": "import numpy", "num_tokens": 1011}
|
!*==dlarrk.f90 processed by SPAG 7.51RB at 20:08 on 3 Mar 2022
!> \brief \b DLARRK computes one eigenvalue of a symmetric tridiagonal matrix T to suitable accuracy.
!
! =========== DOCUMENTATION ===========
!
! Online html documentation available at
! http://www.netlib.org/lapack/explore-html/
!
!> \htmlonly
!> Download DLARRK + dependencies
!> <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/dlarrk.f">
!> [TGZ]</a>
!> <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/dlarrk.f">
!> [ZIP]</a>
!> <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/dlarrk.f">
!> [TXT]</a>
!> \endhtmlonly
!
! Definition:
! ===========
!
! SUBROUTINE DLARRK( N, IW, GL, GU,
! D, E2, PIVMIN, RELTOL, W, WERR, INFO)
!
! .. Scalar Arguments ..
! INTEGER INFO, IW, N
! DOUBLE PRECISION PIVMIN, RELTOL, GL, GU, W, WERR
! ..
! .. Array Arguments ..
! DOUBLE PRECISION D( * ), E2( * )
! ..
!
!
!> \par Purpose:
! =============
!>
!> \verbatim
!>
!> DLARRK computes one eigenvalue of a symmetric tridiagonal
!> matrix T to suitable accuracy. This is an auxiliary code to be
!> called from DSTEMR.
!>
!> To avoid overflow, the matrix must be scaled so that its
!> largest element is no greater than overflow**(1/2) * underflow**(1/4) in absolute value, and for greatest
!> accuracy, it should not be much smaller than that.
!>
!> See W. Kahan "Accurate Eigenvalues of a Symmetric Tridiagonal
!> Matrix", Report CS41, Computer Science Dept., Stanford
!> University, July 21, 1966.
!> \endverbatim
!
! Arguments:
! ==========
!
!> \param[in] N
!> \verbatim
!> N is INTEGER
!> The order of the tridiagonal matrix T. N >= 0.
!> \endverbatim
!>
!> \param[in] IW
!> \verbatim
!> IW is INTEGER
!> The index of the eigenvalues to be returned.
!> \endverbatim
!>
!> \param[in] GL
!> \verbatim
!> GL is DOUBLE PRECISION
!> \endverbatim
!>
!> \param[in] GU
!> \verbatim
!> GU is DOUBLE PRECISION
!> An upper and a lower bound on the eigenvalue.
!> \endverbatim
!>
!> \param[in] D
!> \verbatim
!> D is DOUBLE PRECISION array, dimension (N)
!> The n diagonal elements of the tridiagonal matrix T.
!> \endverbatim
!>
!> \param[in] E2
!> \verbatim
!> E2 is DOUBLE PRECISION array, dimension (N-1)
!> The (n-1) squared off-diagonal elements of the tridiagonal matrix T.
!> \endverbatim
!>
!> \param[in] PIVMIN
!> \verbatim
!> PIVMIN is DOUBLE PRECISION
!> The minimum pivot allowed in the Sturm sequence for T.
!> \endverbatim
!>
!> \param[in] RELTOL
!> \verbatim
!> RELTOL is DOUBLE PRECISION
!> The minimum relative width of an interval. When an interval
!> is narrower than RELTOL times the larger (in
!> magnitude) endpoint, then it is considered to be
!> sufficiently small, i.e., converged. Note: this should
!> always be at least radix*machine epsilon.
!> \endverbatim
!>
!> \param[out] W
!> \verbatim
!> W is DOUBLE PRECISION
!> \endverbatim
!>
!> \param[out] WERR
!> \verbatim
!> WERR is DOUBLE PRECISION
!> The error bound on the corresponding eigenvalue approximation
!> in W.
!> \endverbatim
!>
!> \param[out] INFO
!> \verbatim
!> INFO is INTEGER
!> = 0: Eigenvalue converged
!> = -1: Eigenvalue did NOT converge
!> \endverbatim
!
!> \par Internal Parameters:
! =========================
!>
!> \verbatim
!> FUDGE DOUBLE PRECISION, default = 2
!> A "fudge factor" to widen the Gershgorin intervals.
!> \endverbatim
!
! Authors:
! ========
!
!> \author Univ. of Tennessee
!> \author Univ. of California Berkeley
!> \author Univ. of Colorado Denver
!> \author NAG Ltd.
!
!> \date June 2017
!
!> \ingroup OTHERauxiliary
!
! =====================================================================
SUBROUTINE DLARRK(N,Iw,Gl,Gu,D,E2,Pivmin,Reltol,W,Werr,Info)
IMPLICIT NONE
!*--DLARRK148
!
! -- LAPACK auxiliary routine (version 3.7.1) --
! -- LAPACK is a software package provided by Univ. of Tennessee, --
! -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
! June 2017
!
! .. Scalar Arguments ..
INTEGER Info , Iw , N
DOUBLE PRECISION Pivmin , Reltol , Gl , Gu , W , Werr
! ..
! .. Array Arguments ..
DOUBLE PRECISION D(*) , E2(*)
! ..
!
! =====================================================================
!
! .. Parameters ..
DOUBLE PRECISION FUDGE , HALF , TWO , ZERO
PARAMETER (HALF=0.5D0,TWO=2.0D0,FUDGE=TWO,ZERO=0.0D0)
! ..
! .. Local Scalars ..
INTEGER i , it , itmax , negcnt
DOUBLE PRECISION atoli , eps , left , mid , right , rtoli , tmp1 ,&
& tmp2 , tnorm
! ..
! .. External Functions ..
DOUBLE PRECISION DLAMCH
EXTERNAL DLAMCH
! ..
! .. Intrinsic Functions ..
INTRINSIC ABS , INT , LOG , MAX
! ..
! .. Executable Statements ..
!
! Quick return if possible
!
IF ( N<=0 ) THEN
Info = 0
RETURN
ENDIF
!
! Get machine constants
eps = DLAMCH('P')
tnorm = MAX(ABS(Gl),ABS(Gu))
rtoli = Reltol
atoli = FUDGE*TWO*Pivmin
itmax = INT((LOG(tnorm+Pivmin)-LOG(Pivmin))/LOG(TWO)) + 2
Info = -1
left = Gl - FUDGE*tnorm*eps*N - FUDGE*TWO*Pivmin
right = Gu + FUDGE*tnorm*eps*N + FUDGE*TWO*Pivmin
it = 0
DO
!
! Check if interval converged or maximum number of iterations reached
!
tmp1 = ABS(right-left)
tmp2 = MAX(ABS(right),ABS(left))
IF ( tmp1<MAX(atoli,Pivmin,rtoli*tmp2) ) THEN
Info = 0
EXIT
ENDIF
IF ( it>itmax ) EXIT
!
! Count number of negative pivots for mid-point
!
it = it + 1
mid = HALF*(left+right)
negcnt = 0
tmp1 = D(1) - mid
IF ( ABS(tmp1)<Pivmin ) tmp1 = -Pivmin
IF ( tmp1<=ZERO ) negcnt = negcnt + 1
!
DO i = 2 , N
tmp1 = D(i) - E2(i-1)/tmp1 - mid
IF ( ABS(tmp1)<Pivmin ) tmp1 = -Pivmin
IF ( tmp1<=ZERO ) negcnt = negcnt + 1
ENDDO
IF ( negcnt>=Iw ) THEN
right = mid
ELSE
left = mid
ENDIF
ENDDO
!
! Converged or maximum number of iterations reached
!
W = HALF*(left+right)
Werr = HALF*ABS(right-left)
!
! End of DLARRK
!
END SUBROUTINE DLARRK
|
{"hexsha": "a5828f066176146b8f2e8e56ae9a4f616499307e", "size": 6693, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/double/dlarrk.f90", "max_stars_repo_name": "urbanjost/spag_lapack", "max_stars_repo_head_hexsha": "1b082638c0ff5feb1a295a3aa4624c538d14847c", "max_stars_repo_licenses": ["BSD-3-Clause-Open-MPI"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2022-03-04T13:15:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T17:40:26.000Z", "max_issues_repo_path": "src/double/dlarrk.f90", "max_issues_repo_name": "urbanjost/spag_lapack", "max_issues_repo_head_hexsha": "1b082638c0ff5feb1a295a3aa4624c538d14847c", "max_issues_repo_licenses": ["BSD-3-Clause-Open-MPI"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/double/dlarrk.f90", "max_forks_repo_name": "urbanjost/spag_lapack", "max_forks_repo_head_hexsha": "1b082638c0ff5feb1a295a3aa4624c538d14847c", "max_forks_repo_licenses": ["BSD-3-Clause-Open-MPI"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.772, "max_line_length": 111, "alphanum_fraction": 0.5622291947, "num_tokens": 2063}
|
# A 1D model with no-flux walls at ends.
# in this case, the orientation becomes +-1
from ald.rtp.rtpcompiler import AbstractCompiler
from jinja2 import Template
from ald.rtp.rtpkernels import AbstractRTPKernel
import pycuda.gpuarray as gpuarray
import numpy as np
from ald.rtp.rtpsimulator import RTPSimulator
import pycuda.curandom as curand
from ald.core.external_velocity import ZeroVelocity
from ald.core.ic import InitialConfig
from ald.core.particle import AbstractRTP
import pycuda.compiler as compiler
import pycuda
template1DRTP = Template(
"""
extern "C" {
__global__ void draw_runtimes(double *tauR,
curandStatePhilox4_32_10_t *state,
const int N)
{ // for loop allows more particles than threads.
for (int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < N;
tid += blockDim.x * gridDim.x) {
tauR[tid] = {{runtime}};
}
}
// draw +1/-1 uniformly.
__global__ void draw_binary(int *arr,
curandStatePhilox4_32_10_t *state,
const int N)
{ // for loop allows more particles than threads.
for (int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < N;
tid += blockDim.x * gridDim.x)
{
double U = curand_uniform_double(&state[tid]);
if (U <= 0.5)
{
arr[tid] = -1;
}
else {arr[tid] = 1;}
}
}
// evolution of RTPs in 1D.
__global__ void
update(double *__restrict__ x, // position x
int *__restrict__ direction, // +1 or -1
curandStatePhilox4_32_10_t *__restrict__ state, // RNG state
double *__restrict__ tauR, // reorientation time for each active particle
double *__restrict__ tau, // time since last tumble
double U0, // swim speed
double L, // simulation box length in x
double dt, // time step
int N)
{
// for loop allows more particles than threads.
for (int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < N;
tid += blockDim.x * gridDim.x) {
// need to tumble
if (tau[tid] >= tauR[tid]) {
// tumbles between +1 and -1: randomly
// double U = curand_uniform_double(&state[tid]);
// // only 50% chance of tumbling away
// if (U <= 0.5) {direction[tid] *= -1;}
direction[tid] *= -1; // always changing direction once runtime is reached.
// reset time since last tumble to zero.
tau[tid] = 0.0;
// after tumbling, need to draw a new tumbling time.
tauR[tid] = {{runtime}};
}
// next update the position
x[tid] += dt * U0 * direction[tid]+dt*{{u}};
// need to update time since last tumble.
tau[tid] += dt;
// x in [-L/2,L/2]
if (x[tid] < -L / 2.0) {
x[tid] = -L/2.0;
} else if (x[tid] > L / 2.0) {
x[tid] = L/2.0;
}
} // end thread loop.
} // end kernel.
}
"""
)
class Confined1DRTPKernel(AbstractRTPKernel):
"""1D two walls."""
def __init__(self):
# no additional args
arg_list = ""
super().__init__(arg_list)
def generate_cuda_code(self, cfg, flow, *args, **kwargs):
"""This kernel is hard coded"""
# need to add a new container for the orientation
setattr(
cfg,
"direction",
gpuarray.GPUArray(cfg.N, np.int32),
)
kernel = template1DRTP.render(runtime=cfg.particle.runtime_code,u=flow.ux)
return kernel
def update(self, func, cfg, threads, blocks):
func(
cfg.x,
cfg.direction,
cfg.state,
cfg.tauR,
cfg.tau,
np.float64(cfg.particle.U0),
np.float64(cfg.domain.Lx),
np.float64(cfg.dt),
np.int32(cfg.N),
block=(threads, 1, 1),
grid=(blocks, 1),
)
return None
class RTP1DCompiler(AbstractCompiler):
"""Cuda code compiler for RTPs."""
def __init__(
self,
kernel,
cfg,
flow=ZeroVelocity(),
ic=InitialConfig(),
):
if not isinstance(cfg.particle, AbstractRTP):
raise TypeError()
if not isinstance(kernel, AbstractRTPKernel):
raise TypeError()
super().__init__(kernel, cfg, flow=flow, ic=ic)
def generate_cuda_code(self, cfg, flow):
# combine cuda source codes
# make sure this function can be run multiple times
# do not touch self.cuda_code_base.
# get the base code
code = self.cuda_code_base
# runtime generation kernel
code += self.particle.runtime_device_code
# append bd_code.
code += self.kernel.generate_cuda_code(cfg, flow)
# append initial condition kernel
code += self.ic.cuda_code
return code
def compile(self, log=None):
"""Compile cuda source code"""
module = compiler.SourceModule(self.cuda_code, no_extern_c=True, keep=False)
# get functions from cuda module
self.update = module.get_function("update")
self.initrand = module.get_function("initrand")
self.init_config = module.get_function("init_config")
self.draw_runtimes = module.get_function("draw_runtimes")
self.draw_binary = module.get_function("draw_binary")
class RTP1DSimulator(RTPSimulator):
"""Langevin simulation of RTPs in 2D (channel or freespace.)"""
def __init__(self, cfg, compiler, threadsPerBlock=None, nblocks=None):
# not initialized yet
super().__init__(
cfg, compiler, threadsPerBlock=threadsPerBlock, nblocks=nblocks
)
def initialize(self, cfg):
"""Initialize the particle and simulation configurations."""
# initialize directiona rray
super().initialize(cfg)
self.compiler.draw_binary(
cfg.direction,
cfg.state,
np.int32(cfg.N),
block=(self.threadsPerBlock, 1, 1),
grid=(self.nblocks, 1),
)
return None
def update(self, cfg):
"""One step of the Langevin simulation."""
self.compiler.kernel.update(
self.compiler.update, cfg, self.threadsPerBlock, self.nblocks
)
|
{"hexsha": "6976b3da1dda543ddede7ea6e73d4092ab2a2014", "size": 6045, "ext": "py", "lang": "Python", "max_stars_repo_path": "ald/rtp/rtp1d.py", "max_stars_repo_name": "zpeng2/ald", "max_stars_repo_head_hexsha": "040ce6176998a9ca024eb9f420e8c6c63ca6af81", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ald/rtp/rtp1d.py", "max_issues_repo_name": "zpeng2/ald", "max_issues_repo_head_hexsha": "040ce6176998a9ca024eb9f420e8c6c63ca6af81", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-29T06:46:22.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-29T06:46:22.000Z", "max_forks_repo_path": "ald/rtp/rtp1d.py", "max_forks_repo_name": "zpeng2/ald", "max_forks_repo_head_hexsha": "040ce6176998a9ca024eb9f420e8c6c63ca6af81", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7783251232, "max_line_length": 84, "alphanum_fraction": 0.6148883375, "include": true, "reason": "import numpy,import pycuda", "num_tokens": 1523}
|
from __future__ import absolute_import
import wx
from wx.lib.pubsub import pub
import wx.lib.layoutf as layoutf
import numpy as np
import threading
import warnings
import psutil
import time
import os
import sys
import pickle
import glob
from astropy.io import fits
from astropy import wcs
from astropy.coordinates import ICRS
from astropy import units
import astropy.visualization
from astropy.stats import sigma_clipped_stats
import matplotlib
matplotlib.interactive(True)
try:
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
except IOError:
# on some linux installations this import needs to be done twice as the first time raises an error:
# IOError: [Errno 2] No such file or directory: '/tmp/matplotlib-parallels/fontList.cache'
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.figure import Figure
from matplotlib.widgets import AxesWidget
from matplotlib.patches import Rectangle
from matplotlib import cm
from matplotlib.colors import Normalize
from .file_picker import FilePicker
from .fits_header_dialog import FITSHeaderDialog
from .ztv_lib import send_to_stream, StreamListener, StreamListenerTimeOut
from .ztv_wx_lib import set_textctrl_background_color, validate_textctrl_str
base_dir = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(base_dir, "__about__.py")) as f:
exec(f.read(), about)
class Error(Exception):
pass
def clear_ticks_and_frame_from_axes(axes):
"""
Remove ticks and frame from an axes.
This is called out as a separate method so that sub-classes of ImagePanel can overload
with their own version as needed.
"""
if axes is None:
axes = plt.gca()
axes.xaxis.set_visible(False)
axes.yaxis.set_visible(False)
for spine in axes.spines.itervalues():
spine.set_visible(False)
class ColorMaps():
def basic(self):
"""
return a selection of useful colormaps that is less overwhelming than ALL of them
"""
return ['gray', 'bone', 'Blues_r', 'Greens_r', 'Oranges_r', 'RdPu_r', 'hot', 'gist_heat',
'rainbow', 'hsv', 'spectral', 'gnuplot', 'jet', 'viridis']
def all_except_reversed(self):
return [a for a in cm.datad if not a.endswith('_r')]
def all(self):
return [a for a in cm.datad]
class PrimaryImagePanel(wx.Panel):
def __init__(self, parent, dpi=None, **kwargs):
wx.Panel.__init__(self, parent, wx.ID_ANY, wx.DefaultPosition, wx.Size(512,512), **kwargs)
self.ztv_frame = self.GetTopLevelParent()
self.accelerator_table = []
self.center = wx.RealPoint()
self.zoom_rect = None
self.eventID_to_cmap = {wx.NewId(): x for x in self.ztv_frame.available_cmaps}
self.cmap_to_eventID = {self.eventID_to_cmap[x]: x for x in self.eventID_to_cmap}
self.eventID_to_scaling = {wx.NewId(): x for x in self.ztv_frame.available_scalings}
self.scaling_to_eventID = {self.eventID_to_scaling[x]: x for x in self.eventID_to_scaling}
cmap_bitmap_height = 15
cmap_bitmap_width = 100
self.cmap_bitmaps = {}
for cmap in self.ztv_frame.available_cmaps:
temp = cm.ScalarMappable(cmap=cmap)
rgba = temp.to_rgba(np.outer(np.ones(cmap_bitmap_height, dtype=np.uint8),
np.arange(cmap_bitmap_width, dtype=np.uint8)))
self.cmap_bitmaps[cmap] = wx.BitmapFromBufferRGBA(cmap_bitmap_width, cmap_bitmap_height,
np.uint8(np.round(rgba*255)))
self.popup_menu_cursor_modes = ['Zoom', 'Pan']
self.available_cursor_modes = {'Zoom':{'set-to-mode':self.set_cursor_to_zoom_mode},
'Pan':{'set-to-mode':self.set_cursor_to_pan_mode}}
self.available_key_presses = {}
self.cursor_mode = 'Zoom'
self.max_doubleclick_sec = 0.5 # needed to trap 'real' single clicks from the first click of a double click
self.popup_menu_needs_rebuild = True
self.popup_menu = None
self.xlim = [-9e9, 9e9]
self.ylim = [-9e9, 9e9]
self.figure = Figure(None, dpi)
self.axes = self.figure.add_axes([0., 0., 1., 1.])
self.canvas = FigureCanvasWxAgg(self, -1, self.figure)
self.Bind(wx.EVT_SIZE, self._onSize)
self.axes_widget = AxesWidget(self.figure.gca())
self.axes_widget.connect_event('motion_notify_event', self.on_motion)
self.axes_widget.connect_event('figure_leave_event', self.on_cursor_leave)
self.axes_widget.connect_event('figure_enter_event', self.on_cursor_enter)
self.axes_widget.connect_event('button_press_event', self.on_button_press)
self.axes_widget.connect_event('button_release_event', self.on_button_release)
self.axes_widget.connect_event('key_press_event', self.on_key_press)
wx.EVT_RIGHT_DOWN(self.figure.canvas, self.on_right_down) # supercedes the above button_press_event
pub.subscribe(self.redraw_primary_image, 'redraw-image')
pub.subscribe(self.reset_zoom_and_center, 'reset-zoom-and-center')
pub.subscribe(self.set_zoom_factor, 'set-zoom-factor')
pub.subscribe(self.set_xy_center, 'set-xy-center')
def _append_menu_item(self, menu, wx_id, title, fxn):
if wx_id is None:
wx_id = wx.NewId()
menu.Append(wx_id, title)
wx.EVT_MENU(menu, wx_id, fxn)
return wx_id
def init_popup_menu(self):
self.popup_menu_needs_rebuild = False
if self.popup_menu is not None:
self.popup_menu.Destroy()
menu = wx.Menu()
menu.Append(wx.NewId(), 'Cursor mode:').Enable(False)
self.cursor_mode_to_eventID = {}
cmd_num = 1
for cursor_mode in self.popup_menu_cursor_modes:
fxn = self.available_cursor_modes[cursor_mode]['set-to-mode']
wx_id = wx.NewId()
menu.AppendCheckItem(wx_id, ' ' + cursor_mode + '\tCtrl+' + str(cmd_num))
wx.EVT_MENU(menu, wx_id, fxn)
self.cursor_mode_to_eventID[cursor_mode] = wx_id
self.Bind(wx.EVT_MENU, fxn, id=wx_id)
self.accelerator_table.append((wx.ACCEL_CMD, ord(str(cmd_num)), wx_id))
cmd_num += 1
menu.AppendSeparator()
image_cmap_submenu = wx.Menu()
for cmap in self.ztv_frame.available_cmaps:
menu_item = image_cmap_submenu.AppendCheckItem(self.cmap_to_eventID[cmap], cmap)
wx.EVT_MENU(image_cmap_submenu, self.cmap_to_eventID[cmap], self.on_change_cmap_event)
menu_item.SetBitmap(self.cmap_bitmaps[cmap])
menu.AppendMenu(-1, 'Color Maps', image_cmap_submenu)
wx_id = wx.NewId()
self.menu_item_invert_map = menu.AppendCheckItem(wx_id, 'Invert Color Map')
wx.EVT_MENU(menu, wx_id, self.ztv_frame.invert_cmap)
self.menu_item_invert_map.Check(self.ztv_frame.is_cmap_inverted)
image_scaling_submenu = wx.Menu()
for scaling in self.ztv_frame.available_scalings:
menu_item = image_scaling_submenu.AppendCheckItem(self.scaling_to_eventID[scaling], scaling)
wx.EVT_MENU(image_scaling_submenu, self.scaling_to_eventID[scaling], self.on_change_scaling_event)
menu.AppendMenu(-1, 'Scaling', image_scaling_submenu)
menu.AppendSeparator()
self.popup_menu_cur_fits_header_eventID = wx.NewId()
self._append_menu_item(menu, self.popup_menu_cur_fits_header_eventID, 'FITS Header',
self.on_display_cur_fits_header)
self.popup_menu = menu
self.SetAcceleratorTable(wx.AcceleratorTable(self.accelerator_table))
def on_display_cur_fits_header(self, event):
raw_header_str = self.ztv_frame.cur_fits_hdulist[0].header.tostring()
header_str = (('\n'.join([raw_header_str[i:i+80] for i in np.arange(0, len(raw_header_str), 80)
if raw_header_str[i:i+80] != " "*80])) + '\n')
if hasattr(self, 'cur_fits_header_dialog') and self.cur_fits_header_dialog.is_dialog_still_open:
self.cur_fits_header_dialog.SetTitle(self.ztv_frame.cur_fitsfile_basename)
self.cur_fits_header_dialog.text.SetValue(header_str)
self.cur_fits_header_dialog.last_find_index = 0
self.cur_fits_header_dialog.on_search(None)
else:
self.cur_fits_header_dialog = FITSHeaderDialog(self, header_str, self.ztv_frame.cur_fitsfile_basename)
self.cur_fits_header_dialog.Show()
def set_and_get_xy_limits(self):
canvas_size = self.canvas.GetSize()
num_x_pixels = canvas_size.x
halfsize = (num_x_pixels / 2.0) / self.ztv_frame.zoom_factor
xlim = (self.center.x - halfsize, self.center.x + halfsize)
self.axes.set_xlim(xlim)
num_y_pixels = canvas_size.y
halfsize = (num_y_pixels / 2.0) / self.ztv_frame.zoom_factor
ylim = (self.center.y - halfsize, self.center.y + halfsize)
self.axes.set_ylim(ylim)
self.figure.canvas.draw() # bulk of time in method is spent in this line: TODO: look for ways to make faster
send_change_message = True
if xlim == self.xlim and ylim == self.ylim:
send_change_message = False
self.xlim, self.ylim = xlim, ylim
if send_change_message:
wx.CallAfter(pub.sendMessage, 'primary-xy-limits-changed', msg=None)
return {'xlim':xlim, 'ylim':ylim}
def set_cursor_to_none_mode(self, event):
self.cursor_mode = 'None'
self.ztv_frame.controls_notebook.clear_highlights()
def set_cursor_to_zoom_mode(self, event):
self.cursor_mode = 'Zoom'
self.ztv_frame.controls_notebook.clear_highlights()
def set_cursor_to_pan_mode(self, event):
self.cursor_mode = 'Pan'
self.ztv_frame.controls_notebook.clear_highlights()
def on_key_press(self, event):
# TODO: figure out why keypresses are only recognized after a click in the matplotlib frame.
if event.key == 'right':
self.ztv_frame.set_cur_display_frame_num(1, relative=True)
elif event.key == 'left':
self.ztv_frame.set_cur_display_frame_num(-1, relative=True)
elif event.key in self.available_key_presses:
self.available_key_presses[event.key](event)
def set_xy_center(self, msg):
if self.center.x != msg[0] or self.center.y != msg[1]:
self.center.x = msg[0]
self.center.y = msg[1]
self.set_and_get_xy_limits()
def set_zoom_factor(self, msg):
zoom_factor = msg
old_zoom_factor = self.ztv_frame.zoom_factor
if zoom_factor > 0.0:
self.ztv_frame.zoom_factor = zoom_factor
if old_zoom_factor != self.ztv_frame.zoom_factor:
self.set_and_get_xy_limits()
def reset_zoom_and_center(self, msg=None):
self.center.x = (self.ztv_frame.display_image.shape[1] / 2.) - 0.5
self.center.y = (self.ztv_frame.display_image.shape[0] / 2.) - 0.5
max_zoom_x = self.canvas.GetSize().x / float(self.ztv_frame.display_image.shape[1])
max_zoom_y = self.canvas.GetSize().y / float(self.ztv_frame.display_image.shape[0])
self.ztv_frame.zoom_factor = min(max_zoom_x, max_zoom_y)
self.set_and_get_xy_limits()
def on_change_cmap_event(self, event):
wx.CallAfter(pub.sendMessage, 'set-cmap', msg=(self.ztv_frame._pause_redraw_image,
self.eventID_to_cmap[event.GetId()]))
def on_change_scaling_event(self, event):
wx.CallAfter(pub.sendMessage, 'set-scaling', msg=(self.ztv_frame._pause_redraw_image,
self.eventID_to_scaling[event.GetId()]))
def on_button_press(self, event):
if event.button == 1: # left button
if self.cursor_mode == 'Zoom':
if event.dblclick:
self.center = wx.RealPoint(event.xdata, event.ydata)
self.ztv_frame.zoom_factor /= 2.
self.set_and_get_xy_limits()
else:
self.zoom_start_timestamp = time.time()
self.zoom_rect = Rectangle((event.xdata, event.ydata), 0, 0,
color='orange', fill=False, zorder=100)
self.axes.add_patch(self.zoom_rect)
self.figure.canvas.draw()
elif self.cursor_mode == 'Pan':
self.center = wx.RealPoint(event.xdata, event.ydata)
self.set_and_get_xy_limits()
else:
if (self.available_cursor_modes.has_key(self.cursor_mode) and
self.available_cursor_modes[self.cursor_mode].has_key('on_button_press')):
self.available_cursor_modes[self.cursor_mode]['on_button_press'](event)
def on_motion(self, event):
if event.xdata is None or event.ydata is None:
return
x = int(np.round(event.xdata))
y = int(np.round(event.ydata))
if event.button is not None:
if self.cursor_mode == 'Zoom' and self.zoom_rect is not None:
x0,y0 = self.zoom_rect.get_x(),self.zoom_rect.get_y()
self.zoom_rect.set_bounds(x0, y0, event.xdata - x0, event.ydata - y0)
self.figure.canvas.draw()
else:
if (self.available_cursor_modes.has_key(self.cursor_mode) and
self.available_cursor_modes[self.cursor_mode].has_key('on_motion')):
self.available_cursor_modes[self.cursor_mode]['on_motion'](event)
if ((x >= 0) and (x < self.ztv_frame.display_image.shape[1]) and
(y >= 0) and (y < self.ztv_frame.display_image.shape[0])):
imval = self.ztv_frame.display_image[y, x]
new_status_string = "x,y={},{}".format(x, y)
if self.ztv_frame.image_radec is not None:
c = self.ztv_frame.image_radec[y, x]
new_status_string += " radec={0} {1}".format(c.ra.to_string(units.hour, sep=':', precision=2, pad=True),
c.dec.to_string(sep=':', precision=2, alwayssign=True,
pad=True))
new_status_string += " val={:.5g}".format(imval)
self.ztv_frame.status_bar.SetStatusText(new_status_string, 0)
self.ztv_frame.loupe_image_panel.set_xy_limits((x, y))
# finally, catch for a situation where cursor should be active, but didn't enter, e.g. window launched under cursor
if not hasattr(self, 'saved_cursor') or self.saved_cursor is None:
self.on_cursor_enter(event)
else:
self.ztv_frame.status_bar.SetStatusText("", 0)
self.ztv_frame.loupe_image_panel.set_xy_limits()
def on_button_release(self, event):
if event.button == 1: # left button
if self.cursor_mode == 'Zoom':
# this catches for the first click-release of a double-click
if (time.time() - self.zoom_start_timestamp) > self.max_doubleclick_sec:
# this catches for a long click-and-release without motion
x0,y0 = self.zoom_rect.get_x(),self.zoom_rect.get_y()
x1 = x0 + self.zoom_rect.get_width()
y1 = y0 + self.zoom_rect.get_height()
if hasattr(event, 'xdata') and event.xdata is not None:
x1 = event.xdata
if hasattr(event, 'ydata') and event.ydata is not None:
y1 = event.ydata
if abs(x0 - x1) >= 2 and abs(y0 - y1) >= 2:
self.center = wx.RealPoint((x0 + x1)/2., (y0 + y1)/2.)
panel_size = self.canvas.GetSize()
x_zoom_factor = panel_size.x / abs(x1 - x0)
y_zoom_factor = panel_size.y / abs(y1 - y0)
self.ztv_frame.zoom_factor = min(x_zoom_factor, y_zoom_factor)
self.set_and_get_xy_limits()
if self.zoom_rect in self.axes.patches:
self.axes.patches.remove(self.zoom_rect)
self.zoom_rect = None
self.figure.canvas.draw()
else:
if (self.available_cursor_modes.has_key(self.cursor_mode) and
self.available_cursor_modes[self.cursor_mode].has_key('on_button_release')):
self.available_cursor_modes[self.cursor_mode]['on_button_release'](event)
def on_right_down(self, event):
if self.popup_menu_needs_rebuild or self.popup_menu is None:
self.init_popup_menu()
for cursor_mode in self.cursor_mode_to_eventID:
self.popup_menu.Check(self.cursor_mode_to_eventID[cursor_mode], False)
self.popup_menu.Check(self.cursor_mode_to_eventID[self.cursor_mode], True)
for cmap in self.ztv_frame.available_cmaps:
self.popup_menu.Check(self.cmap_to_eventID[cmap], False)
self.popup_menu.Check(self.cmap_to_eventID[self.ztv_frame.cmap], True)
for scaling in self.ztv_frame.available_scalings:
self.popup_menu.Check(self.scaling_to_eventID[scaling], False)
self.popup_menu.Check(self.scaling_to_eventID[self.ztv_frame.scaling], True)
if self.ztv_frame.cur_fits_hdulist is None:
self.popup_menu.Enable(self.popup_menu_cur_fits_header_eventID, False)
else:
self.popup_menu.Enable(self.popup_menu_cur_fits_header_eventID, True)
self.figure.canvas.PopupMenuXY(self.popup_menu, event.GetX() + 8, event.GetY() + 8)
def on_cursor_leave(self, event):
self.ztv_frame.status_bar.SetStatusText('', 0)
self.ztv_frame.loupe_image_panel.set_xy_limits()
if hasattr(self, 'saved_cursor') and self.saved_cursor is not None:
self.figure.canvas.SetCursor(self.saved_cursor)
self.saved_cursor = None
def on_cursor_enter(self, event):
self.saved_cursor = self.figure.canvas.GetCursor()
self.figure.canvas.SetCursor(wx.StockCursor(wx.CURSOR_CROSS))
def _onSize(self, event):
self._SetSize()
def _SetSize(self):
pixels = tuple(self.GetClientSize())
self.SetSize(pixels)
self.canvas.SetSize(pixels)
self.figure.set_size_inches(float(pixels[0])/self.figure.get_dpi(),
float(pixels[1])/self.figure.get_dpi())
self.set_and_get_xy_limits()
def redraw_primary_image(self, msg=None):
if msg is True or self.ztv_frame._pause_redraw_image:
return
if hasattr(self, 'axes_image'):
if self.axes_image in self.axes.images:
self.axes.images.remove(self.axes_image)
self.axes_image = self.axes.imshow(self.ztv_frame.normalize(self.ztv_frame.display_image),
interpolation='Nearest',
cmap=self.ztv_frame.get_cmap_to_display(), zorder=0)
clear_ticks_and_frame_from_axes(self.axes)
self.set_and_get_xy_limits()
# self.figure.canvas.draw() is not needed here, b/c called from within set_and_get_xy_limits
class OverviewImagePanel(wx.Panel):
def __init__(self, parent, size=wx.Size(128,128), dpi=None, **kwargs):
self.size = size
self.dragging_curview_is_active = False
wx.Panel.__init__(self, parent, wx.ID_ANY, wx.DefaultPosition, size, 0, **kwargs)
self.ztv_frame = self.GetTopLevelParent()
self.figure = Figure(None, dpi)
self.axes = self.figure.add_axes([0., 0., 1., 1.])
self.curview_rectangle = Rectangle((0, 0), 1, 1, color='orange', fill=False, zorder=100)
self.axes.add_patch(self.curview_rectangle)
self.canvas = FigureCanvasWxAgg(self, -1, self.figure)
self.overview_zoom_factor = 1.
self._SetSize()
self.set_xy_limits()
self.axes_widget = AxesWidget(self.figure.gca())
self.axes_widget.connect_event('button_press_event', self.on_button_press)
self.axes_widget.connect_event('button_release_event', self.on_button_release)
self.axes_widget.connect_event('motion_notify_event', self.on_motion)
pub.subscribe(self.redraw_overview_image, 'redraw-image')
pub.subscribe(self.redraw_box, 'primary-xy-limits-changed')
def redraw_box(self, msg=None):
xlim = self.ztv_frame.primary_image_panel.xlim
ylim = self.ztv_frame.primary_image_panel.ylim
self.curview_rectangle.set_bounds(xlim[0], ylim[0], xlim[1] - xlim[0], ylim[1] - ylim[0])
self.figure.canvas.draw()
def on_button_press(self, event):
if event.dblclick:
self.ztv_frame.primary_image_panel.reset_zoom_and_center()
else:
if self.curview_rectangle.contains(event)[0]:
self.dragging_curview_is_active = True
self.convert_x_to_xdata = lambda x: (x / self.overview_zoom_factor) + self.xlim[0]
self.convert_y_to_ydata = lambda y: (y / self.overview_zoom_factor) + self.ylim[0]
self.dragging_cursor_xdata0 = self.convert_x_to_xdata(event.x)
self.dragging_cursor_ydata0 = self.convert_y_to_ydata(event.y)
self.dragging_rect_xdata0 = self.ztv_frame.primary_image_panel.center.x
self.dragging_rect_ydata0 = self.ztv_frame.primary_image_panel.center.y
self.convert_dragging_x_to_new_center_x = lambda x: ((self.convert_x_to_xdata(x) -
self.dragging_cursor_xdata0) +
self.dragging_rect_xdata0)
self.convert_dragging_y_to_new_center_y = lambda y: ((self.convert_y_to_ydata(y) -
self.dragging_cursor_ydata0) +
self.dragging_rect_ydata0)
def on_button_release(self, event):
self.dragging_curview_is_active = False
def on_motion(self, event):
if self.dragging_curview_is_active:
new_center_x = self.convert_dragging_x_to_new_center_x(event.x)
new_center_y = self.convert_dragging_y_to_new_center_y(event.y)
new_center_x_constrained = min(max(new_center_x, self.xlim[0]), self.xlim[1])
new_center_y_constrained = min(max(new_center_y, self.ylim[0]), self.ylim[1])
if np.sqrt((new_center_x - new_center_x_constrained) ** 2 +
(new_center_y - new_center_y_constrained) ** 2) >= 100:
new_center_x = self.dragging_rect_xdata0
new_center_y = self.dragging_rect_ydata0
else:
new_center_x = new_center_x_constrained
new_center_y = new_center_y_constrained
self.ztv_frame.primary_image_panel.center.x = new_center_x
self.ztv_frame.primary_image_panel.center.y = new_center_y
self.ztv_frame.primary_image_panel.set_and_get_xy_limits()
def _SetSize(self):
self.SetSize(tuple(self.size))
self.canvas.SetSize(tuple(self.size))
self.figure.set_size_inches(float(self.size[0])/self.figure.get_dpi(),
float(self.size[1])/self.figure.get_dpi())
def set_xy_limits(self):
max_zoom_x = self.size.x / float(self.ztv_frame.display_image.shape[1])
max_zoom_y = self.size.y / float(self.ztv_frame.display_image.shape[0])
self.overview_zoom_factor = min(max_zoom_x, max_zoom_y)
x_cen = (self.ztv_frame.display_image.shape[1] / 2.) - 0.5
y_cen = (self.ztv_frame.display_image.shape[0] / 2.) - 0.5
halfXsize = self.size.x / (self.overview_zoom_factor * 2.)
halfYsize = self.size.y / (self.overview_zoom_factor * 2.)
self.xlim = (x_cen - halfXsize, x_cen + halfXsize)
self.ylim = (y_cen - halfYsize, y_cen + halfYsize)
self.axes.set_xlim(self.xlim)
self.axes.set_ylim(self.ylim)
def redraw_overview_image(self, msg=None):
if msg is True or self.ztv_frame._pause_redraw_image:
return
if hasattr(self, 'axes_image'):
if self.axes_image in self.axes.images:
self.axes.images.remove(self.axes_image)
# note that following is not an actual rebin, but a sub-sampling, which is what matplotlib ultimately
# would do on its own anyway if we gave it the full image. But, matplotlib takes longer. For a 2Kx2K
# image, this saves almost 0.3sec on a ~2014 MacBookProRetina
max_rebin_x = float(self.ztv_frame.display_image.shape[1]) / self.size.x
max_rebin_y = float(self.ztv_frame.display_image.shape[0]) / self.size.y
rebin_factor = max(1, np.int(np.floor(min([max_rebin_x, max_rebin_y]))))
self.axes_image = self.axes.imshow(self.ztv_frame.normalize(self.ztv_frame.display_image)[::rebin_factor,
::rebin_factor],
interpolation='Nearest', vmin=0., vmax=1.,
extent=[0., self.ztv_frame.display_image.shape[1],
self.ztv_frame.display_image.shape[0], 0.],
cmap=self.ztv_frame.get_cmap_to_display(), zorder=0)
clear_ticks_and_frame_from_axes(self.axes)
self.set_xy_limits()
self.figure.canvas.draw()
class LoupeImagePanel(wx.Panel):
def __init__(self, parent, size=wx.Size(128,128), dpi=None, **kwargs):
self.size = size
self.size_npix_xy = wx.Size(11, 11)
wx.Panel.__init__(self, parent, wx.ID_ANY, wx.DefaultPosition, size, 0, **kwargs)
self.ztv_frame = self.GetTopLevelParent()
self.figure = Figure(None, dpi)
self.axes = self.figure.add_axes([0., 0., 1., 1.])
self.canvas = FigureCanvasWxAgg(self, -1, self.figure)
self._SetSize()
self.set_xy_limits()
pub.subscribe(self.redraw_loupe_image, 'redraw-image')
def _SetSize(self):
self.SetSize(tuple(self.size))
self.canvas.SetSize(tuple(self.size))
self.figure.set_size_inches(float(self.size[0])/self.figure.get_dpi(),
float(self.size[1])/self.figure.get_dpi())
def set_xy_limits(self, center=wx.Point(-9999, -9999)):
self.axes.set_xlim([center[0] - self.size_npix_xy[0]/2.0, center[0] + self.size_npix_xy[0]/2.0])
self.axes.set_ylim([center[1] - self.size_npix_xy[1]/2.0, center[1] + self.size_npix_xy[1]/2.0])
if getattr(self, "crosshair", None) is None:
self.crosshair = self.axes.plot([center[0]], [center[1]], 'gx', zorder=100, markersize=7)
else:
self.crosshair[0].set_data([center[0]], [center[1]])
self.figure.canvas.draw()
def redraw_loupe_image(self, msg=None):
if msg is True or self.ztv_frame._pause_redraw_image:
return
if hasattr(self, 'axes_image'):
if self.axes_image in self.axes.images:
self.axes.images.remove(self.axes_image)
self.axes_image = self.axes.imshow(self.ztv_frame.normalize(self.ztv_frame.display_image),
interpolation='Nearest',
cmap=self.ztv_frame.get_cmap_to_display(), zorder=0)
clear_ticks_and_frame_from_axes(self.axes)
self.figure.canvas.draw() # bulk of time in method is spent in this line: TODO: look for ways to make faster
class ControlsNotebook(wx.Notebook):
# see "Book" Controls -> Notebook example in wxpython demo
def __init__(self, parent):
wx.Notebook.__init__(self, parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0)
self.highlight_char = unichr(0x2022)
self._prior_notebook_page_change = (None, None)
self.ztv_frame = self.GetTopLevelParent()
self.ztv_frame.control_panels = [] # list of currently loaded/visible control panels, in order of display
for cur_title, cur_panel in self.ztv_frame.control_panels_to_load:
self.AddPanelAndStoreID(cur_panel(self), cur_title)
self.ztv_frame.primary_image_panel.init_popup_menu()
def AddPanelAndStoreID(self, panel, text, **kwargs):
new_page_image_id = len(self.ztv_frame.control_panels)
setattr(panel, 'ztv_page_id', new_page_image_id)
setattr(panel, 'ztv_display_name', text)
setattr(panel, 'ztv_ref_name', text.lower() + '_panel')
setattr(panel, 'highlight_panel', lambda : self._highlight_page(panel))
setattr(panel, 'select_panel', lambda : self.SetSelection(panel.ztv_page_id))
setattr(self.ztv_frame, text.lower() + '_panel', panel)
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.notebook_page_changed)
self.AddPage(panel, text)
self.ztv_frame.control_panels.append(panel)
def notebook_page_changed(self, evt):
oldnew = (evt.GetOldSelection(), evt.GetSelection())
# EVT_NOTEBOOK_PAGE_CHANGED seems to be called 4-5 or more times per actual change, with identical
# OldSelection,Selection; so, need to filter those additional calls out after first one
if oldnew != self._prior_notebook_page_change:
if hasattr(self.ztv_frame.control_panels[oldnew[1]], 'on_activate'):
self.ztv_frame.control_panels[oldnew[1]].on_activate()
self._prior_notebook_page_change = oldnew
evt.Skip()
def clear_highlights(self):
for cur_id in range(len(self.ztv_frame.control_panels)):
if self.GetPageText(cur_id).startswith(self.highlight_char):
self.SetPageText(cur_id, self.GetPageText(cur_id)[1:])
def _highlight_page(self, panel=None):
self.clear_highlights()
if panel is not None:
new_name = self.highlight_char + self.GetPageText(panel.ztv_page_id)
self.SetPageText(panel.ztv_page_id, new_name)
class ZTVFrame(wx.Frame):
def __init__(self, title=None, launch_listen_thread=False, control_panels_to_load=None,
default_data_dir=None, default_autoload_pattern=None):
self.__version__ = version=about["__version__"]
self.ztv_frame_pid = os.getpid() # some add-on control panels will want this to pass to subprocs for knowing when to kill themselves, but NOTE: currently (as of 2015-04-13) on OS X is not working right as process doesn't die fully until uber-python session is killed.
if title is None:
self.base_title = 'ztv'
else:
self.base_title = title
if default_data_dir is None:
default_data_dir = os.getcwd()
if default_autoload_pattern is None:
default_autoload_pattern = os.getcwd()
self.default_data_dir = default_data_dir
self.default_autoload_pattern = default_autoload_pattern
if control_panels_to_load is None:
from .default_panels import control_panels_to_load
self.control_panels_to_load = control_panels_to_load
wx.Frame.__init__(self, None, title=self.base_title, pos=wx.DefaultPosition, size=wx.Size(1024,512),
style = wx.DEFAULT_FRAME_STYLE)
pub.subscribe(self.kill_ztv, 'kill-ztv')
pub.subscribe(self.load_numpy_array, 'load-numpy-array')
pub.subscribe(self.load_fits_file, 'load-fits-file')
pub.subscribe(self.load_default_image, 'load-default-image')
self._pause_redraw_image = False
self.cur_fitsfile_basename = ''
self.cur_fitsfile_path = ''
self.image_process_functions_to_apply = [] # list of tuples of ('NameOrLabelIdentifier', fxn), where fxn must accept the image and return the processed image
self.raw_image = np.zeros([2, 2]) # underlying raw data, can be 2-d [y,x] or 3-d [z,y,x]
self.proc_image = self.raw_image.copy() # raw_image processed with currently selected flat/sky/etc
self.cur_display_frame_num = 0 # ignored if raw_image/proc_image is 2-d, otherwise
# display_image is proc_image[self.cur_display_frame_num,:,:]
self.display_image = self.proc_image.copy() # 2-d array of what is currently displayed on-screen
# display_image is primarily different from proc_image in that proc_image can be 3-d, while
# display_image is always 2-d
# except for the above initialization, display_image should *never* be changed except by self.recalc_display_image
self._display_image_min = None # _display_image_min/max will be recalc'd in a 'safe' way (ignoring Inf/NaN)
self._display_image_max = None # as needed when display_image_min()/max() are called
self.normalized_image = None # this will be display_image clipped to clim and scaled (e.g. Linear/Log)
self._need_to_recalc_normalization = False
self._norm = None
self._scaling = None
self.available_cmaps = ColorMaps().basic()
self.cmap = 'jet' # will go back to gray later
self.is_cmap_inverted = False
self.accelerator_table = [] # keyboard accelerators, e.g. cmd-Q
self.zoom_factor = 2.0
pub.subscribe(self.invert_cmap, 'invert-cmap')
pub.subscribe(self.set_cmap, 'set-cmap')
pub.subscribe(self.set_cmap_inverted, 'set-cmap-inverted')
self.clim = [0.0, 1.0]
pub.subscribe(self.set_clim_to_minmax, 'set-clim-to-minmax')
pub.subscribe(self.set_clim_to_auto, 'set-clim-to-auto')
pub.subscribe(self.set_clim_to_auto_stats_box, 'set-clim-to-auto-stats-box')
pub.subscribe(self.set_clim, 'set-clim')
pub.subscribe(self.set_scaling, 'set-scaling')
pub.subscribe(self.set_norm, 'clim-changed')
pub.subscribe(self.set_norm, 'scaling-changed')
pub.subscribe(self.recalc_proc_image, 'image-process-functions-to-apply-changed')
pub.subscribe(self.set_cur_display_frame_num, 'set-cur-display-frame-num')
pub.subscribe(self.set_window_title, 'set-window-title')
self.scaling = 'Linear'
self.available_scalings = ['Linear', 'Asinh', 'Log', 'PowerDist', 'Sinh', 'Sqrt', 'Squared']
# scalings that require inputs & need additional work to implement:
# 'AsymmetricPercentile', 'ContrastBias', 'HistEq', 'Power'
# don't bother implementing these unless strong case is made they're needed in a way that existing can't satisfy
self.available_value_modes_on_new_image = ['data-min/max', 'auto', 'auto-stats-box', 'constant']
self.min_value_mode_on_new_image = 'data-min/max'
self.max_value_mode_on_new_image = 'data-min/max'
self.main_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.primary_image_panel = PrimaryImagePanel(self)
self.primary_image_panel.SetMinSize(wx.Size(256, 256))
self.main_sizer.Add(self.primary_image_panel, 1, wx.EXPAND | wx.ALL, border=5)
self.controls_sizer = wx.BoxSizer(wx.VERTICAL)
self.controls_sizer.SetMinSize(wx.Size(512, -1))
self.controls_images_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.overview_image_panel = OverviewImagePanel(self)
self.controls_images_sizer.Add(self.overview_image_panel, 0, wx.ALL, border=5)
self.loupe_image_panel = LoupeImagePanel(self)
self.controls_images_sizer.Add(self.loupe_image_panel, 0, wx.BOTTOM|wx.RIGHT|wx.TOP, border=5)
self.frame_number_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.frame_number_fullleft_button = wx.Button(self, -1, unichr(0x21e4), style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, lambda x: self.set_cur_display_frame_num(0), self.frame_number_fullleft_button)
self.frame_number_sizer.Add(self.frame_number_fullleft_button, 0, wx.ALIGN_CENTER_VERTICAL)
self.frame_number_left_button = wx.Button(self, -1, unichr(0x2190), style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, lambda x: self.set_cur_display_frame_num(-1, True), self.frame_number_left_button)
self.frame_number_sizer.Add(self.frame_number_left_button, 0, wx.ALIGN_CENTER_VERTICAL)
textentry_font = wx.Font(14, wx.FONTFAMILY_MODERN, wx.NORMAL, wx.FONTWEIGHT_LIGHT, False)
self.frame_number_textctrl = wx.TextCtrl(self, wx.ID_ANY, '0', wx.DefaultPosition, wx.Size(40, 21),
wx.TE_PROCESS_ENTER|wx.TE_CENTRE)
self.frame_number_textctrl.SetFont(textentry_font)
self.frame_number_sizer.Add(self.frame_number_textctrl, 0, wx.ALIGN_CENTER_VERTICAL, 0)
self.frame_number_textctrl.Bind(wx.EVT_TEXT, self.frame_number_textctrl_changed)
self.frame_number_textctrl.Bind(wx.EVT_TEXT_ENTER, self.frame_number_textctrl_entered)
self.frame_number_right_button = wx.Button(self, -1, unichr(0x2192), style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, lambda x: self.set_cur_display_frame_num(1, True), self.frame_number_right_button)
self.frame_number_sizer.Add(self.frame_number_right_button, 0, wx.ALIGN_CENTER_VERTICAL)
self.frame_number_fullright_button = wx.Button(self, -1, unichr(0x21e5), style=wx.BU_EXACTFIT)
self.Bind(wx.EVT_BUTTON, lambda x: self.set_cur_display_frame_num(-1), self.frame_number_fullright_button)
self.frame_number_sizer.Add(self.frame_number_fullright_button, 0, wx.ALIGN_CENTER_VERTICAL)
self.total_frame_numbers_text = wx.StaticText(self, wx.ID_ANY, u"of 9999", wx.DefaultPosition,
wx.DefaultSize, 0 )
self.total_frame_numbers_text.Wrap( -1 )
self.frame_number_sizer.Add(self.total_frame_numbers_text, 0, wx.ALIGN_CENTER_VERTICAL, 0)
self.controls_images_sizer.AddSpacer((0, 0), 1, wx.EXPAND, 0)
self.controls_images_sizer.Add(self.frame_number_sizer, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_BOTTOM, 5)
self.controls_sizer.Add(self.controls_images_sizer, 0, wx.EXPAND, border=5)
self.controls_notebook_sizer = wx.BoxSizer(wx.VERTICAL)
self.controls_notebook = ControlsNotebook(self)
self.controls_notebook_sizer.Add(self.controls_notebook, 1, wx.EXPAND | wx.ALL, border=0)
self.controls_sizer.Add(self.controls_notebook_sizer, 1, wx.EXPAND, border=0)
self.main_sizer.Add(self.controls_sizer, 0, wx.EXPAND, border=5)
self.SetSizer(self.main_sizer)
self.status_bar = self.CreateStatusBar(2)
rw, rh = self.primary_image_panel.GetSize()
sw, sh = self.controls_sizer.GetSize()
fw, fh = self.GetSize()
h = max(512, fh)
w = h + fw - rw - (fh - rh) # (fh - rh) accounts for status bar and window bar
self.SetSize((w, h))
self.Layout()
self.Centre(wx.BOTH)
self.load_default_image()
self.cur_fits_hdulist = None
if launch_listen_thread:
self.command_listener_thread = CommandListenerThread(self)
self.set_cmap((False, 'gray'))
temp_id = wx.NewId()
self.Bind(wx.EVT_MENU, self.kill_ztv, id=temp_id)
self.accelerator_table.append((wx.ACCEL_CMD, ord('Q'), temp_id))
self.accelerator_table.append((wx.ACCEL_CMD, ord('W'), temp_id))
rightarrow_id = wx.NewId()
self.Bind(wx.EVT_MENU, self.on_cmd_right_arrow, id=rightarrow_id)
self.accelerator_table.append((wx.ACCEL_CMD, ord(']'), rightarrow_id))
leftarrow_id = wx.NewId()
self.Bind(wx.EVT_MENU, self.on_cmd_left_arrow, id=leftarrow_id)
self.accelerator_table.append((wx.ACCEL_CMD, ord('['), leftarrow_id))
for n in np.arange(1,10):
new_id = wx.NewId()
self.Bind(wx.EVT_MENU, self.create_on_cmd_alt_number(n), id=new_id)
self.accelerator_table.append((wx.ACCEL_CMD|wx.ACCEL_ALT, ord(str(n)), new_id))
self.SetAcceleratorTable(wx.AcceleratorTable(self.accelerator_table))
self.Show()
def create_on_cmd_alt_number(self, n):
def on_cmd_alt_number(evt):
try:
self.controls_notebook.SetSelection(n - 1)
except:
pass # if this page # doesn't exist...
return on_cmd_alt_number
def kill_ztv(self, msg=None):
self.Close()
def on_cmd_left_arrow(self, evt):
self.controls_notebook.SetSelection((self.controls_notebook.GetSelection() - 1) % len(self.control_panels))
def on_cmd_right_arrow(self, evt):
self.controls_notebook.SetSelection((self.controls_notebook.GetSelection() + 1) % len(self.control_panels))
def get_cmap_to_display(self):
if self.is_cmap_inverted:
if self.cmap.endswith('_r'):
return self.cmap.replace('_r', '')
else:
return self.cmap + '_r'
else:
return self.cmap
# Any method that calls redraw_image (or, more often, uses wx.CallAfter
# to send a redraw-image message) should accept a tuple msg input where
# msg[0] is True/False of pause_redraw_image.
# Routines should test (pause_redraw_image or self._pause_redraw_image)
# and *only* send redraw-image message if nothing is calling for Pause.
def set_cmap_inverted(self, msg):
"""
msg is tuple of two booleans: (pause_redraw_image, is_cmap_inverted)
"""
old_is_cmap_inverted = self.is_cmap_inverted
pause_redraw_image, self.is_cmap_inverted = msg
if old_is_cmap_inverted != self.is_cmap_inverted:
wx.CallAfter(pub.sendMessage, 'is-cmap-inverted-changed', msg=None)
if not (pause_redraw_image or self._pause_redraw_image):
wx.CallAfter(pub.sendMessage, 'redraw-image', msg=False)
def invert_cmap(self, msg=(False,)):
"""
msg is (pause_redraw_image, )
"""
self.set_cmap_inverted(((msg[0] or self._pause_redraw_image), not self.is_cmap_inverted))
def set_cmap(self, msg):
"""
Verify that requested cmap is in the list (or it's reversed equivalent) and set it
msg is tuple: (pause_redraw_image, new_cmap)
"""
pause_redraw_image, new_cmap = msg
old_cmap = self.cmap
lower_available_cmaps = [a.lower() for a in self.available_cmaps]
if new_cmap.lower() in lower_available_cmaps:
self.cmap = self.available_cmaps[lower_available_cmaps.index(new_cmap.lower())]
self.set_cmap_inverted(((pause_redraw_image or self._pause_redraw_image), False))
elif new_cmap.replace('_r', '').lower() in lower_available_cmaps:
self.cmap = self.available_cmaps[lower_available_cmaps.index(new_cmap.lower().replace('_r', ''))]
self.set_cmap_inverted(((pause_redraw_image or self._pause_redraw_image), True))
elif (new_cmap.lower() + '_r') in lower_available_cmaps:
self.cmap = self.available_cmaps[lower_available_cmaps.index(new_cmap.lower() + '_r')]
self.set_cmap_inverted(((pause_redraw_image or self._pause_redraw_image), True))
else:
sys.stderr.write("unrecognized cmap ({}) requested\n".format(new_cmap))
if self.cmap != old_cmap:
wx.CallAfter(pub.sendMessage, 'cmap-changed', msg=None)
if not (pause_redraw_image or self._pause_redraw_image):
wx.CallAfter(pub.sendMessage, 'redraw-image', msg=False)
def set_clim(self, msg):
"""
msg is tuple: (pause_redraw_image, (clim[0], clim[1]))
"""
pause_redraw_image, clim = msg
old_clim = self.clim
if clim[0] is None:
clim[0] = self.clim[0]
if clim[1] is None:
clim[1] = self.clim[1]
if clim[0] > clim[1]:
self.clim = [clim[1], clim[0]]
self.set_cmap_inverted(((pause_redraw_image or self._pause_redraw_image), not self.is_cmap_inverted))
else:
self.clim = clim
if old_clim != self.clim:
wx.CallAfter(pub.sendMessage, 'clim-changed', msg=((pause_redraw_image or self._pause_redraw_image),))
def set_clim_to_minmax(self, msg=(False,)):
"""
msg is tuple: (pause_redraw_image, )
"""
self.set_clim(((msg[0] or self._pause_redraw_image),
[self.display_image_min(), self.display_image_max()]))
def get_auto_clim_values(self, *args):
"""
Set min/max of display to n_sigma_below and n_sigma_above background
'cheat' for speed by sampling only a subset of pts
"""
finite_mask = np.isfinite(self.display_image)
n_finite_pts = finite_mask.sum()
if n_finite_pts > 0:
n_pts = 1000
# sample ALL points unless the sampled points will be reasonably nicely distributed. e.g.
# n_pts=1000, n_finite_pts=1999 -> all samples would be clumped in one half.
# factor of 5* means that the 'missing' unsampled clump at the end is <=20% of total pts, which seems reasonable
if n_finite_pts < (5*n_pts):
robust_mean, robust_median, robust_stdev = sigma_clipped_stats(self.display_image[finite_mask])
else:
stepsize = n_finite_pts/n_pts
robust_mean, robust_median, robust_stdev = sigma_clipped_stats(self.display_image[finite_mask].ravel()[0::stepsize])
n_sigma_below = 1.0
n_sigma_above = 6.
return (robust_mean - n_sigma_below * robust_stdev, robust_mean + n_sigma_above * robust_stdev)
else:
return (0., 0.) # no valid pixels
def get_auto_stats_box_clim_values(self, *args):
"""
Set min/max of display to n_sigma_below and n_sigma_above background in stats box
'cheat' for speed by sampling only a subset of pts
"""
if (isinstance(self.stats_panel.stats_info, dict) and
self.stats_panel.stats_info.has_key('xrange') and
self.stats_panel.stats_info.has_key('yrange')):
temp_image = self.display_image[min(self.stats_panel.stats_info['yrange']):
max(self.stats_panel.stats_info['yrange']),
min(self.stats_panel.stats_info['xrange']):
max(self.stats_panel.stats_info['xrange'])]
else:
temp_image = self.display_image
finite_mask = np.isfinite(temp_image)
n_finite_pts = finite_mask.sum()
if n_finite_pts > 0:
n_pts = 1000
# sample ALL points unless the sampled points will be reasonably nicely distributed. e.g.
# n_pts=1000, n_finite_pts=1999 -> all samples would be clumped in one half.
# factor of 5* means that the 'missing' unsampled clump at the end is <=20% of total pts, which seems reasonable
if n_finite_pts < (5*n_pts):
robust_mean, robust_median, robust_stdev = sigma_clipped_stats(temp_image[finite_mask])
else:
stepsize = n_finite_pts/n_pts
robust_mean, robust_median, robust_stdev = sigma_clipped_stats(temp_image[finite_mask].ravel()[0::stepsize])
n_sigma_below = 1.0
n_sigma_above = 6.
return (robust_mean - n_sigma_below * robust_stdev, robust_mean + n_sigma_above * robust_stdev)
else:
return (0., 0.) # no valid pixels
def set_clim_to_auto_stats_box(self, msg=(False,)):
"""
msg is tuple: (pause_redraw_image, )
"""
auto_clim = self.get_auto_stats_box_clim_values()
self.set_clim(((msg[0] or self._pause_redraw_image), [auto_clim[0], auto_clim[1]]))
def set_clim_to_auto(self, msg=(False,)):
"""
msg is tuple: (pause_redraw_image, )
"""
auto_clim = self.get_auto_clim_values()
self.set_clim(((msg[0] or self._pause_redraw_image), [auto_clim[0], auto_clim[1]]))
def set_norm(self, msg=(False,)):
"""
msg is tuple: (pause_redraw_image, )
"""
if self._norm is None or self.clim != self._set_norm_old_clim:
self._norm = Normalize(vmin=self.clim[0], vmax=self.clim[1])
self._set_norm_old_clim = self.clim
self._need_to_recalc_normalization = True
if self._scaling is None or self.scaling != self._set_norm_old_scaling:
self._scaling = eval('astropy.visualization.' + self.scaling + 'Stretch()')
self._set_norm_old_scaling = self.scaling
self._need_to_recalc_normalization = True
if not (msg[0] or self._pause_redraw_image):
wx.CallAfter(pub.sendMessage, 'redraw-image', msg=False)
def normalize(self, im):
if self._need_to_recalc_normalization or self.normalized_image is None:
self.normalized_image = self._scaling(self._norm(self.display_image))
self._need_to_recalc_normalization = False
return self.normalized_image
def set_scaling(self, msg):
"""
msg is (pause_redraw_image, scaling)
"""
pause_redraw_image, scaling = msg
available_scalings_lowercase = [a.lower() for a in self.available_scalings]
if scaling.lower() in available_scalings_lowercase:
self.scaling = self.available_scalings[available_scalings_lowercase.index(scaling.lower())]
wx.CallAfter(pub.sendMessage, 'scaling-changed', msg=((pause_redraw_image or self._pause_redraw_image),))
else:
sys.stderr.write("unrecognized scaling ({}) requested\n".format(scaling))
def frame_number_textctrl_changed(self, evt):
validate_textctrl_str(self.frame_number_textctrl, int, str(self.cur_display_frame_num))
def frame_number_textctrl_entered(self, evt):
if validate_textctrl_str(self.frame_number_textctrl, int, str(self.cur_display_frame_num)):
self.set_cur_display_frame_num(int(self.frame_number_textctrl.GetValue()))
self.frame_number_textctrl.SetSelection(-1, -1)
def set_cur_display_frame_num(self, msg, relative=False):
"""
sets self.cur_display_frame_num to n (with -1 meaning last, -2 second to last, etc)
if relative=True, then increments by n.
Will automatically bound to existing number of frames
To ensure proper error checking & notifications, *all* changes to self.cur_display_frame_num
should come through this method
"""
if not isinstance(msg, int) and len(msg) == 2:
n, flag = msg
if flag == 'relative':
relative = True
elif flag == 'absolute':
relative = False
else:
n = msg
if self.proc_image.ndim == 2:
cur_total_frames = 1
else:
cur_total_frames = self.proc_image.shape[0]
if relative:
n = self.cur_display_frame_num + n
else:
if n < 0:
n = cur_total_frames + n
n = min(max(0, n), cur_total_frames - 1)
self.cur_display_frame_num = n
self.frame_number_textctrl.SetValue("{}".format(n))
set_textctrl_background_color(self.frame_number_textctrl, 'ok')
self.recalc_display_image()
def recalc_proc_image(self, msg=(False,)):
"""
msg is (pause_redraw_image, )
"""
self.proc_image = self.raw_image.copy()
for cur_imageproc_label, cur_imageproc_fxn in self.image_process_functions_to_apply:
self.proc_image = cur_imageproc_fxn(self.proc_image)
self.recalc_display_image(msg=((msg[0] or self._pause_redraw_image),))
wx.CallAfter(pub.sendMessage, 'recalc-proc-image-called',
msg=((msg[0] or self._pause_redraw_image),))
def _recalc_display_image_minmax(self):
finite_mask = np.isfinite(self.display_image)
if finite_mask.max() is np.True_:
self._display_image_min = self.display_image[finite_mask].min()
self._display_image_max = self.display_image[finite_mask].max()
else:
self._display_image_min = 0.
self._display_image_max = 0.
def display_image_min(self):
if self._display_image_min is None:
self._recalc_display_image_minmax()
return self._display_image_min
def display_image_max(self):
if self._display_image_max is None:
self._recalc_display_image_minmax()
return self._display_image_max
def recalc_display_image(self, msg=(False,)):
if self.proc_image.ndim == 2:
self.display_image = self.proc_image.copy()
elif self.proc_image.ndim == 3:
# clip self.cur_display_frame_num to allowed range
self.display_image = self.proc_image[min(max(0, self.cur_display_frame_num),
self.proc_image.shape[0] - 1), :, :]
else:
raise Error("proc_image must be 2-d or 3-d, was instead {}-d".format(self.proc_image.ndim))
self._display_image_min = None
self._display_image_max = None
new_min, new_max = None, None
if self.min_value_mode_on_new_image == 'data-min/max':
new_min = self.display_image_min()
elif self.min_value_mode_on_new_image == 'auto':
auto_clim_values = self.get_auto_clim_values()
new_min = auto_clim_values[0]
elif self.min_value_mode_on_new_image == 'auto-stats-box':
auto_stats_box_clim_values = self.get_auto_stats_box_clim_values()
new_min = auto_stats_box_clim_values[0]
if self.max_value_mode_on_new_image == 'data-min/max':
new_max = self.display_image_max()
elif self.max_value_mode_on_new_image == 'auto':
if self.min_value_mode_on_new_image != 'auto': # only calculate if didn't already calculate above
auto_clim_values = self.get_auto_clim_values()
new_max = auto_clim_values[1]
elif self.max_value_mode_on_new_image == 'auto-stats-box':
if self.min_value_mode_on_new_image != 'auto-stats-box': # only calculate if didn't already calculate above
auto_stats_box_clim_values = self.get_auto_stats_box_clim_values()
new_max = auto_stats_box_clim_values[1]
self._need_to_recalc_normalization = True
self.set_clim(((msg[0] or self._pause_redraw_image), [new_min, new_max]))
wx.CallAfter(pub.sendMessage, 'recalc-display-image-called',
msg=((msg[0] or self._pause_redraw_image),))
def load_numpy_array(self, msg, is_fits_file=False):
self._pause_redraw_image = True # pause redrawing during loading so that don't redraw for every step of the way
image = msg
if not is_fits_file:
self.cur_fits_hdulist = None
if (image.ndim != 2) and (image.ndim != 3):
sys.stderr.write("Only supports numpy arrays of 2-d or 3-d; " +
"tried to load a {}-d numpy array".format(image.ndim))
else:
need_to_reset_zoom_and_center = False
self.cur_display_frame_num = 0
old_2d_shape = self.raw_image.shape[-2:]
new_2d_shape = image.shape[-2:]
if new_2d_shape != old_2d_shape:
need_to_reset_zoom_and_center = True
if (len(image.shape) == 3) and (image.shape[0] == 1):
image = image[0, :, :]
self.raw_image = image
self.image_radec = None
self.cur_fitsfile_basename = ''
self.recalc_proc_image(msg=(self._pause_redraw_image,))
if need_to_reset_zoom_and_center:
self.primary_image_panel.reset_zoom_and_center()
self.SetTitle(self.base_title)
if self.raw_image.ndim == 2:
self.frame_number_sizer.ShowItems(False)
else:
self.frame_number_sizer.ShowItems(True)
self.frame_number_textctrl.SetValue('0')
self.total_frame_numbers_text.SetLabel('of {}'.format(self.raw_image.shape[0]))
self._pause_redraw_image = False
wx.CallAfter(pub.sendMessage, 'redraw-image', msg=(self._pause_redraw_image,))
def load_hdulist_from_fitsfile(self, filename):
"""
The purpose of wrapping fits.open inside this routine is to put
all the warning suppressions, flags, etc in one place.
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
max_n_tries = 5
pause_time_between_tries_sec = 1.
cur_try = 0
not_yet_successful = True
while (cur_try < max_n_tries) and not_yet_successful:
try:
hdulist = fits.open(filename, ignore_missing_end=True)
not_yet_successful = False
except: # I've only seen IOerror, but might as well catch for all errors and re-try
time.sleep(pause_time_between_tries_sec)
cur_try += 1
return hdulist
def set_window_title(self, msg=None):
new_title = 'ztv'
if len(self.cur_fitsfile_basename) > 0:
sky_subtraction = 'sky-subtraction' in [a[0] for a in self.image_process_functions_to_apply]
flat_division = 'flat-division' in [a[0] for a in self.image_process_functions_to_apply]
new_title += ': '
if flat_division:
new_title += '('
new_title += self.cur_fitsfile_basename
if sky_subtraction:
new_title += ' - ' + os.path.basename(self.source_panel.sky_file_fullname)
if flat_division:
new_title += ') / ' + os.path.basename(self.source_panel.flat_file_fullname)
self.SetTitle(new_title)
def load_fits_file(self, msg):
filename = msg
if isinstance(filename, str) or isinstance(filename, unicode):
if filename.lower().endswith('.fits') or filename.lower().endswith('.fits.gz'):
if os.path.isfile(filename):
# TODO: be more flexible about hdulist where image data is NOT just [0].data
# TODO also, in case of extended fits files need to deal with additional header info
# following try/except handles situation when autoloading files tries to autoload a file
# before it's been fully written to disk.
max_n_tries = 5
pause_time_between_tries_sec = 1.
cur_try = 0
not_yet_successful = True
while (cur_try < max_n_tries) and not_yet_successful:
try:
self.cur_fits_hdulist = self.load_hdulist_from_fitsfile(filename)
self.load_numpy_array(self.cur_fits_hdulist[0].data, is_fits_file=True)
not_yet_successful = False
except: # I've only seen ValueError, but might as well catch for all errors and re-try
time.sleep(pause_time_between_tries_sec)
cur_try += 1
self.cur_fitsfile_basename = os.path.basename(filename)
self.cur_fitsfile_path = os.path.abspath(os.path.dirname(filename))
self.set_window_title()
if (hasattr(self.primary_image_panel, 'cur_fits_header_dialog') and
self.primary_image_panel.cur_fits_header_dialog.is_dialog_still_open):
raw_header_str = self.cur_fits_hdulist[0].header.tostring()
header_str = (('\n'.join([raw_header_str[i:i+80] for i in np.arange(0, len(raw_header_str), 80)
if raw_header_str[i:i+80] != " "*80])) + '\n')
self.primary_image_panel.cur_fits_header_dialog.SetTitle(self.cur_fitsfile_basename)
self.primary_image_panel.cur_fits_header_dialog.text.SetValue(header_str)
self.primary_image_panel.cur_fits_header_dialog.last_find_index = 0
self.primary_image_panel.cur_fits_header_dialog.on_search(None)
# TODO: better error handling for if WCS not available or partially available
try:
w = wcs.WCS(self.cur_fits_hdulist[0].header)
# TODO: (urgent) need to check ones/arange in following, do I have this reversed?
a = w.all_pix2world(
np.outer(np.ones(self.raw_image.shape[-2]),
np.arange(self.raw_image.shape[-1])),
np.outer(np.arange(self.raw_image.shape[-2]),
np.ones(self.raw_image.shape[-1])),
0)
self.image_radec = ICRS(a[0]*units.degree, a[1]*units.degree)
except: # just ignore radec if anything at all goes wrong.
self.image_radec = None
wx.CallAfter(pub.sendMessage, 'fitsfile-loaded', msg=filename)
else:
raise Error("Cannot find file: {}".format(filename))
else:
raise Error("Requested filename ({}) does not end with .fits, .fits.gz, " +
"or other capitalization of those".format(filename))
else:
raise Error("load_fits_file requires string input, not type: {}".format(type(filename)))
def get_default_image(self):
imsize_x = 256
imsize_y = 256
im = np.sin(np.outer(np.arange(imsize_y), np.ones(imsize_x)) * np.pi / (imsize_y - 1.0))**3
im *= np.sin(np.outer(np.ones(imsize_y), np.arange(imsize_x)) * np.pi / (imsize_x - 1.0))**3
im *= np.angle(np.fft.fft2(np.sin(np.outer(np.arange(imsize_y), np.arange(imsize_x)) * 12*np.pi / min(imsize_x, imsize_y))))
return im
def load_default_image(self, msg=None):
self.load_numpy_array(self.get_default_image())
self.primary_image_panel.reset_zoom_and_center()
class WatchMasterPIDThread(threading.Thread):
def __init__(self, masterPID):
if masterPID > 0: # don't start unless there's a valid process ID
threading.Thread.__init__(self)
self.masterPID = masterPID
self.daemon = True
self.start()
def run(self):
time.sleep(10) # wait after launch before beginning to check for PID
while psutil.pid_exists(self.masterPID):
time.sleep(2)
sys.stderr.write("\n\n----\nlooks like python session that owned this instance of the " +
"ZTV gui is gone, so disposing of the window\n----\n")
wx.CallAfter(pub.sendMessage, 'kill-ztv', msg=None)
class CommandListenerThread(threading.Thread):
def __init__(self, ztv_frame):
"""
CommandListenerThread expects to be passed the main ZTVFrame object. Access to the ZTVFrame must be used
*very* carefully. Essentially view this access as "readonly". It's easy to screw things up with the gui if
CommandListenerThread starts messing with parameters in ZTVFrame. The appropriate way for CommandListenerThread
to send commands to ZTVFrame is with a wx.CallAfter(pub.sendMessage.... call, e.g.:
wx.CallAfter(pub.sendMessage, 'load-default-image', None)
"""
threading.Thread.__init__(self)
self.ztv_frame = ztv_frame
self.daemon = True
self.keep_running = True
self.start()
def run(self):
stream_listener = StreamListener(sys.stdin)
while self.keep_running:
try:
x = stream_listener.read_pickled_message(timeout=1.)
except EOFError: # means we are done here...
return
except StreamListenerTimeOut:
pass
else:
if not isinstance(x, tuple):
raise Error("ListenThread only accepts tuples")
wx.GetApp().ProcessIdle() # give time for any parameter changes to take effect
if (x[0].startswith('get-') and
hasattr(self.ztv_frame, x[0][4:].replace('-', '_')) and
not callable(getattr(self.ztv_frame, x[0][4:].replace('-', '_')))):
# catch the easiest cases where we just want some parameter out of ztv_frame, e.g.:
# ztv.frame_cmap is returned by the request message 'get-cmap'
wx.CallAfter(send_to_stream, sys.stdout, (x[0][4:],
getattr(self.ztv_frame, x[0][4:].replace('-', '_'))))
elif x[0] == 'get-xy-center':
wx.CallAfter(send_to_stream, sys.stdout,
(x[0][4:], (self.ztv_frame.primary_image_panel.center.x,
self.ztv_frame.primary_image_panel.center.y)))
# TODO: the following N elif statements accessing/controlling source_panel elements is ripe for some sort of sensible refactoring
elif x[0] == 'set-sky-subtraction-status':
if hasattr(self.ztv_frame, 'source_panel'):
if x[1]:
self.ztv_frame.source_panel.load_sky_subtraction_to_process_stack()
else:
self.ztv_frame.source_panel.unload_sky_subtraction_from_process_stack()
elif x[0] == 'set-sky-subtraction-filename':
if hasattr(self.ztv_frame, 'source_panel'):
self.ztv_frame.source_panel.load_sky_frame(x[1])
elif x[0] == 'get-sky-subtraction-status-and-filename':
if hasattr(self.ztv_frame, 'source_panel'):
sky_subtraction_loaded = False
if 'sky-subtraction' in [a[0] for a in self.ztv_frame.image_process_functions_to_apply]:
sky_subtraction_loaded = True
wx.CallAfter(send_to_stream, sys.stdout,
(x[0][4:], (sky_subtraction_loaded,
self.ztv_frame.source_panel.sky_file_fullname)))
else:
send_to_stream(sys.stdout, (x[0][4:], 'source_panel not available'))
elif x[0] == 'set-flat-division-status':
if hasattr(self.ztv_frame, 'source_panel'):
if x[1]:
self.ztv_frame.source_panel.load_flat_division_to_process_stack()
else:
self.ztv_frame.source_panel.unload_flat_division_from_process_stack()
elif x[0] == 'set-flat-division-filename':
if hasattr(self.ztv_frame, 'source_panel'):
self.ztv_frame.source_panel.load_flat_frame(x[1])
elif x[0] == 'get-flat-division-status-and-filename':
if hasattr(self.ztv_frame, 'source_panel'):
flat_division_loaded = False
if 'flat-division' in [a[0] for a in self.ztv_frame.image_process_functions_to_apply]:
flat_division_loaded = True
wx.CallAfter(send_to_stream, sys.stdout,
(x[0][4:],
(flat_division_loaded,
self.ztv_frame.source_panel.flatfile_file_picker.current_textctrl_GetValue())))
else:
send_to_stream(sys.stdout, (x[0][4:], 'source_panel not available'))
elif x[0] == 'set-autoload-filename-pattern-status':
if hasattr(self.ztv_frame, 'source_panel'):
if x[1]:
self.ztv_frame.source_panel.launch_autoload_filematch_thread()
self.ztv_frame.source_panel.autoload_mode = 'file-match'
else:
self.ztv_frame.source_panel.kill_autoload_filematch_thread()
self.ztv_frame.source_panel.autoload_mode = None
elif x[0] == 'set-autoload-filename-pattern':
if hasattr(self.ztv_frame, 'source_panel'):
self.ztv_frame.source_panel.autoload_curfile_file_picker_on_load(x[1])
elif x[0] == 'get-autoload-status-and-filename-pattern':
if hasattr(self.ztv_frame, 'source_panel'):
wx.CallAfter(send_to_stream, sys.stdout,
(x[0][4:],
(self.ztv_frame.source_panel.autoload_mode == 'file-match',
self.ztv_frame.source_panel.autoload_match_string)))
else:
send_to_stream(sys.stdout, (x[0][4:], 'source_panel not available'))
elif x[0] == 'set-autoload-pausetime':
if hasattr(self.ztv_frame, 'source_panel'):
i = np.abs(np.array(self.ztv_frame.source_panel.autoload_pausetime_choices) -
float(x[1])).argmin()
self.ztv_frame.source_panel.autoload_pausetime = self.ztv_frame.source_panel.autoload_pausetime_choices[i]
self.ztv_frame.source_panel.autoload_pausetime_choice.SetSelection(i)
elif x[0] == 'get-autoload-pausetime':
if hasattr(self.ztv_frame, 'source_panel'):
wx.CallAfter(send_to_stream, sys.stdout,
(x[0][4:], self.ztv_frame.source_panel.autoload_pausetime))
else:
send_to_stream(sys.stdout, (x[0][4:], 'source_panel not available'))
elif x[0] == 'switch-to-control-panel':
name_lower = x[1].lower()
display_names_lower = [a.ztv_display_name.lower() for a in self.ztv_frame.control_panels]
if name_lower in display_names_lower:
self.ztv_frame.control_panels[display_names_lower.index(name_lower)].select_panel()
else:
wx.CallAfter(pub.sendMessage, x[0], msg=(None if len(x) == 1 else x[1]))
class ZTVMain():
def __init__(self, title=None, masterPID=-1, launch_listen_thread=False, control_panels_to_load=None,
default_data_dir=None, default_autoload_pattern=None):
self.__version__ = version=about["__version__"]
WatchMasterPIDThread(masterPID)
app = wx.App(False)
self.frame = ZTVFrame(title=title, launch_listen_thread=launch_listen_thread,
control_panels_to_load=control_panels_to_load,
default_data_dir=default_data_dir,
default_autoload_pattern=default_autoload_pattern)
app.MainLoop()
# TODO: need to figure out why ztvframe_pid is being left alive
if __name__ == '__main__':
ZTVMain()
|
{"hexsha": "bbadb364b95e6aa6d25a733136428cd72b27b623", "size": 72350, "ext": "py", "lang": "Python", "max_stars_repo_path": "CAAPR/CAAPR_AstroMagic/PTS/pts/magic/view/ztv.py", "max_stars_repo_name": "wdobbels/CAAPR", "max_stars_repo_head_hexsha": "50d0b32642a61af614c22f1c6dc3c4a00a1e71a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2016-05-20T21:56:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-07T21:09:48.000Z", "max_issues_repo_path": "CAAPR/CAAPR_AstroMagic/PTS/pts/magic/view/ztv.py", "max_issues_repo_name": "wdobbels/CAAPR", "max_issues_repo_head_hexsha": "50d0b32642a61af614c22f1c6dc3c4a00a1e71a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-03-21T16:10:04.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-22T17:21:56.000Z", "max_forks_repo_path": "CAAPR/CAAPR_AstroMagic/PTS/pts/magic/view/ztv.py", "max_forks_repo_name": "wdobbels/CAAPR", "max_forks_repo_head_hexsha": "50d0b32642a61af614c22f1c6dc3c4a00a1e71a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-19T16:17:17.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-19T16:17:17.000Z", "avg_line_length": 54.1136873598, "max_line_length": 276, "alphanum_fraction": 0.6242017968, "include": true, "reason": "import numpy,import astropy,from astropy", "num_tokens": 16360}
|
*DECK DBSPVD
SUBROUTINE DBSPVD (T, K, NDERIV, X, ILEFT, LDVNIK, VNIKX, WORK)
C***BEGIN PROLOGUE DBSPVD
C***PURPOSE Calculate the value and all derivatives of order less than
C NDERIV of all basis functions which do not vanish at X.
C***LIBRARY SLATEC
C***CATEGORY E3, K6
C***TYPE DOUBLE PRECISION (BSPVD-S, DBSPVD-D)
C***KEYWORDS DIFFERENTIATION OF B-SPLINE, EVALUATION OF B-SPLINE
C***AUTHOR Amos, D. E., (SNLA)
C***DESCRIPTION
C
C Written by Carl de Boor and modified by D. E. Amos
C
C Abstract **** a double precision routine ****
C
C DBSPVD is the BSPLVD routine of the reference.
C
C DBSPVD calculates the value and all derivatives of order
C less than NDERIV of all basis functions which do not
C (possibly) vanish at X. ILEFT is input such that
C T(ILEFT) .LE. X .LT. T(ILEFT+1). A call to INTRV(T,N+1,X,
C ILO,ILEFT,MFLAG) will produce the proper ILEFT. The output of
C DBSPVD is a matrix VNIKX(I,J) of dimension at least (K,NDERIV)
C whose columns contain the K nonzero basis functions and
C their NDERIV-1 right derivatives at X, I=1,K, J=1,NDERIV.
C These basis functions have indices ILEFT-K+I, I=1,K,
C K .LE. ILEFT .LE. N. The nonzero part of the I-th basis
C function lies in (T(I),T(I+K)), I=1,N).
C
C If X=T(ILEFT+1) then VNIKX contains left limiting values
C (left derivatives) at T(ILEFT+1). In particular, ILEFT = N
C produces left limiting values at the right end point
C X=T(N+1). To obtain left limiting values at T(I), I=K+1,N+1,
C set X= next lower distinct knot, call INTRV to get ILEFT,
C set X=T(I), and then call DBSPVD.
C
C Description of Arguments
C Input T,X are double precision
C T - knot vector of length N+K, where
C N = number of B-spline basis functions
C N = sum of knot multiplicities-K
C K - order of the B-spline, K .GE. 1
C NDERIV - number of derivatives = NDERIV-1,
C 1 .LE. NDERIV .LE. K
C X - argument of basis functions,
C T(K) .LE. X .LE. T(N+1)
C ILEFT - largest integer such that
C T(ILEFT) .LE. X .LT. T(ILEFT+1)
C LDVNIK - leading dimension of matrix VNIKX
C
C Output VNIKX,WORK are double precision
C VNIKX - matrix of dimension at least (K,NDERIV) contain-
C ing the nonzero basis functions at X and their
C derivatives columnwise.
C WORK - a work vector of length (K+1)*(K+2)/2
C
C Error Conditions
C Improper input is a fatal error
C
C***REFERENCES Carl de Boor, Package for calculating with B-splines,
C SIAM Journal on Numerical Analysis 14, 3 (June 1977),
C pp. 441-472.
C***ROUTINES CALLED DBSPVN, XERMSG
C***REVISION HISTORY (YYMMDD)
C 800901 DATE WRITTEN
C 890531 Changed all specific intrinsics to generic. (WRB)
C 890831 Modified array declarations. (WRB)
C 890831 REVISION DATE from Version 3.2
C 891214 Prologue converted to Version 4.0 format. (BAB)
C 900315 CALLs to XERROR changed to CALLs to XERMSG. (THJ)
C 920501 Reformatted the REFERENCES section. (WRB)
C***END PROLOGUE DBSPVD
C
INTEGER I,IDERIV,ILEFT,IPKMD,J,JJ,JLOW,JM,JP1MID,K,KMD, KP1, L,
1 LDUMMY, M, MHIGH, NDERIV
DOUBLE PRECISION FACTOR, FKMD, T, V, VNIKX, WORK, X
C DIMENSION T(ILEFT+K), WORK((K+1)*(K+2)/2)
C A(I,J) = WORK(I+J*(J+1)/2), I=1,J+1 J=1,K-1
C A(I,K) = W0RK(I+K*(K-1)/2) I=1.K
C WORK(1) AND WORK((K+1)*(K+2)/2) ARE NOT USED.
DIMENSION T(*), VNIKX(LDVNIK,*), WORK(*)
C***FIRST EXECUTABLE STATEMENT DBSPVD
IF(K.LT.1) GO TO 200
IF(NDERIV.LT.1 .OR. NDERIV.GT.K) GO TO 205
IF(LDVNIK.LT.K) GO TO 210
IDERIV = NDERIV
KP1 = K + 1
JJ = KP1 - IDERIV
CALL DBSPVN(T, JJ, K, 1, X, ILEFT, VNIKX, WORK, IWORK)
IF (IDERIV.EQ.1) GO TO 100
MHIGH = IDERIV
DO 20 M=2,MHIGH
JP1MID = 1
DO 10 J=IDERIV,K
VNIKX(J,IDERIV) = VNIKX(JP1MID,1)
JP1MID = JP1MID + 1
10 CONTINUE
IDERIV = IDERIV - 1
JJ = KP1 - IDERIV
CALL DBSPVN(T, JJ, K, 2, X, ILEFT, VNIKX, WORK, IWORK)
20 CONTINUE
C
JM = KP1*(KP1+1)/2
DO 30 L = 1,JM
WORK(L) = 0.0D0
30 CONTINUE
C A(I,I) = WORK(I*(I+3)/2) = 1.0 I = 1,K
L = 2
J = 0
DO 40 I = 1,K
J = J + L
WORK(J) = 1.0D0
L = L + 1
40 CONTINUE
KMD = K
DO 90 M=2,MHIGH
KMD = KMD - 1
FKMD = KMD
I = ILEFT
J = K
JJ = J*(J+1)/2
JM = JJ - J
DO 60 LDUMMY=1,KMD
IPKMD = I + KMD
FACTOR = FKMD/(T(IPKMD)-T(I))
DO 50 L=1,J
WORK(L+JJ) = (WORK(L+JJ)-WORK(L+JM))*FACTOR
50 CONTINUE
I = I - 1
J = J - 1
JJ = JM
JM = JM - J
60 CONTINUE
C
DO 80 I=1,K
V = 0.0D0
JLOW = MAX(I,M)
JJ = JLOW*(JLOW+1)/2
DO 70 J=JLOW,K
V = WORK(I+JJ)*VNIKX(J,M) + V
JJ = JJ + J + 1
70 CONTINUE
VNIKX(I,M) = V
80 CONTINUE
90 CONTINUE
100 RETURN
C
C
200 CONTINUE
CALL XERMSG ('SLATEC', 'DBSPVD', 'K DOES NOT SATISFY K.GE.1', 2,
+ 1)
RETURN
205 CONTINUE
CALL XERMSG ('SLATEC', 'DBSPVD',
+ 'NDERIV DOES NOT SATISFY 1.LE.NDERIV.LE.K', 2, 1)
RETURN
210 CONTINUE
CALL XERMSG ('SLATEC', 'DBSPVD',
+ 'LDVNIK DOES NOT SATISFY LDVNIK.GE.K', 2, 1)
RETURN
END
|
{"hexsha": "99a8fba2d92aafad37a40294f1cd4070724dcc65", "size": 5744, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "external/SLATEC/src/dbspvd.f", "max_stars_repo_name": "ygeorgi/MESS", "max_stars_repo_head_hexsha": "42db490295b08193dfc37496489467ccd2e5b6ae", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-02-09T05:03:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-26T10:20:02.000Z", "max_issues_repo_path": "external/SLATEC/src/dbspvd.f", "max_issues_repo_name": "ygeorgi/MESS", "max_issues_repo_head_hexsha": "42db490295b08193dfc37496489467ccd2e5b6ae", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-04-28T17:09:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-01T16:24:43.000Z", "max_forks_repo_path": "external/SLATEC/src/dbspvd.f", "max_forks_repo_name": "ygeorgi/MESS", "max_forks_repo_head_hexsha": "42db490295b08193dfc37496489467ccd2e5b6ae", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-06-22T07:53:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-10T19:59:53.000Z", "avg_line_length": 35.2392638037, "max_line_length": 72, "alphanum_fraction": 0.561281337, "num_tokens": 2086}
|
DOUBLE PRECISION FUNCTION DT_SANO(Ecm)
C***********************************************************************
C This version dated 31.07.96 is written by S. Roesler *
C***********************************************************************
IMPLICIT NONE
DOUBLE PRECISION afra1 , afra2 , Ecm , ecmano , fraano , ONE ,
& rate , sighrd , TINY10 , TINY14 , TWO , ZERO
INTEGER ie , j1 , j2 , NE
SAVE
INCLUDE 'inc/dtflka'
PARAMETER (TINY10=1.0D-10,TINY14=1.0D-14,ZERO=0.0D0,ONE=1.0D0,
& TWO=2.0D0)
PARAMETER (NE=8)
C VDM parameter for photon-nucleus interactions
INCLUDE 'inc/dtvdmp'
C properties of interacting particles
INCLUDE 'inc/dtprta'
DIMENSION ecmano(NE) , fraano(NE) , sighrd(NE)
DATA ecmano/0.200D+02 , 0.500D+02 , 0.100D+03 , 0.200D+03 ,
& 0.500D+03 , 0.100D+04 , 0.200D+04 , 0.500D+04/
C fixed cut (3 GeV/c)
DATA fraano/0.085D+00 , 0.114D+00 , 0.105D+00 , 0.091D+00 ,
& 0.073D+00 , 0.062D+00 , 0.054D+00 , 0.042D+00/
DATA sighrd/4.0099D-04 , 3.3104D-03 , 1.1905D-02 , 3.6435D-02 ,
& 1.3493D-01 , 3.3086D-01 , 7.6255D-01 , 2.1319D+00/
C running cut (based on obsolete Phojet-caluclations, bugs..)
C DATA FRAANO /
C & 0.251E+00,0.313E+00,0.279E+00,0.239E+00,0.186E+00,
C & 0.167E+00,0.150E+00,0.131E+00
C & /
C DATA SIGHRD /
C & 6.6569E-04,4.4949E-03,1.4837E-02,4.1466E-02,1.5071E-01,
C & 2.5736E-01,4.5593E-01,8.2550E-01
C & /
DT_SANO = ZERO
IF ( (ISHad(2).NE.1) .OR. (IJProj.NE.7) ) RETURN
j1 = 0
j2 = 0
rate = ONE
IF ( Ecm.GE.ecmano(NE) ) THEN
j1 = NE
j2 = NE
ELSE IF ( Ecm.GT.ecmano(1) ) THEN
DO ie = 2 , NE
IF ( Ecm.LT.ecmano(ie) ) THEN
j1 = ie - 1
j2 = ie
rate = LOG10(Ecm/ecmano(j1))/LOG10(ecmano(j2)/ecmano(j1))
GOTO 100
END IF
END DO
END IF
100 IF ( (j1.GT.0) .AND. (j2.GT.0) ) THEN
afra1 = LOG10(MAX(fraano(j1)*sighrd(j1),TINY14))
afra2 = LOG10(MAX(fraano(j2)*sighrd(j2),TINY14))
DT_SANO = 10.0D0**(afra1+rate*(afra2-afra1))
END IF
END FUNCTION
|
{"hexsha": "5d49f82eb7b802b53c7000596721b221664e5f9e", "size": 2348, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/dpmjet/DT_SANO.f", "max_stars_repo_name": "pzhristov/DPMJET", "max_stars_repo_head_hexsha": "946e001290ca5ece608d7e5d1bfc7311cda7ebaa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-06-15T01:59:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-01T08:39:13.000Z", "max_issues_repo_path": "src/dpmjet/DT_SANO.f", "max_issues_repo_name": "pzhristov/DPMJET", "max_issues_repo_head_hexsha": "946e001290ca5ece608d7e5d1bfc7311cda7ebaa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-03-15T09:53:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T20:52:28.000Z", "max_forks_repo_path": "src/dpmjet/DT_SANO.f", "max_forks_repo_name": "pzhristov/DPMJET", "max_forks_repo_head_hexsha": "946e001290ca5ece608d7e5d1bfc7311cda7ebaa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-07-05T02:44:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-20T20:49:05.000Z", "avg_line_length": 34.5294117647, "max_line_length": 72, "alphanum_fraction": 0.4914821124, "num_tokens": 942}
|
#!/usr/bin/python
#coding:utf-8
# ***************************************************************
# 绘制正态分布曲线
# author: pruce
# email: 1756983926@qq.com
# date: 20180919
# ***************************************************************
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
def normDistribution():
mu, sigma , num_bins = 0, 1, 50
x = mu + sigma * np.random.randn(1000000)
# 正态分布的数据
n, bins, patches = plt.hist(x, num_bins, normed=True, facecolor = 'black', alpha = 0.5)
# 拟合曲线
y = mlab.normpdf(bins, mu, sigma)
plt.plot(bins, y, 'r--')
plt.xlabel('Expectation')
plt.ylabel('Probability')
plt.title('$N(0,1)$')
plt.subplots_adjust(left = 0.15)
plt.show()
normDistribution()
|
{"hexsha": "81103028631cb7f3e389a5b89146887745b0d166", "size": 776, "ext": "py", "lang": "Python", "max_stars_repo_path": "docs/scripts/statistics/PlotDistribution.py", "max_stars_repo_name": "prucehuang/machine-learning-introduction", "max_stars_repo_head_hexsha": "c543548b9f0f49479bcdf0c8e7b0098c4b7b0cac", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/scripts/statistics/PlotDistribution.py", "max_issues_repo_name": "prucehuang/machine-learning-introduction", "max_issues_repo_head_hexsha": "c543548b9f0f49479bcdf0c8e7b0098c4b7b0cac", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/scripts/statistics/PlotDistribution.py", "max_forks_repo_name": "prucehuang/machine-learning-introduction", "max_forks_repo_head_hexsha": "c543548b9f0f49479bcdf0c8e7b0098c4b7b0cac", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.7586206897, "max_line_length": 91, "alphanum_fraction": 0.5309278351, "include": true, "reason": "import numpy", "num_tokens": 232}
|
# -*- coding: utf-8 -*-
import sys
import numpy
from HiddenLayer import HiddenLayer
from LogisticRegression import LogisticRegression
from utils import *
class Dropout(object):
def __init__(self, input, label,\
n_in, hidden_layer_sizes, n_out,\
rng=None, activation=ReLU):
self.x = input
self.y = label
self.hidden_layers = []
self.n_layers = len(hidden_layer_sizes)
if rng is None:
rng = numpy.random.RandomState(1234)
assert self.n_layers > 0
# construct multi-layer
for i in xrange(self.n_layers):
# layer_size
if i == 0:
input_size = n_in
else:
input_size = hidden_layer_sizes[i-1]
# layer_input
if i == 0:
layer_input = self.x
else:
layer_input = self.hidden_layers[-1].output()
# construct hidden_layer
hidden_layer = HiddenLayer(input=layer_input,
n_in=input_size,
n_out=hidden_layer_sizes[i],
rng=rng,
activation=activation)
self.hidden_layers.append(hidden_layer)
# layer for ouput using Logistic Regression (softmax)
self.log_layer = LogisticRegression(input=self.hidden_layers[-1].output(),
label=self.y,
n_in=hidden_layer_sizes[-1],
n_out=n_out)
def train(self, epochs=5000, dropout=True, p_dropout=0.5, rng=None):
for epoch in xrange(epochs):
dropout_masks = [] # create different masks in each training epoch
# forward hidden_layers
for i in xrange(self.n_layers):
if i == 0:
layer_input = self.x
layer_input = self.hidden_layers[i].forward(input=layer_input)
if dropout == True:
mask = self.hidden_layers[i].dropout(input=layer_input, p=p_dropout, rng=rng)
layer_input *= mask
dropout_masks.append(mask)
# forward & backward log_layer
self.log_layer.train(input=layer_input)
# backward hidden_layers
for i in reversed(xrange(0, self.n_layers)):
if i == self.n_layers-1:
prev_layer = self.log_layer
else:
prev_layer = self.hidden_layers[i+1]
if dropout == True:
self.hidden_layers[i].backward(prev_layer=prev_layer, dropout=True, mask=dropout_masks[i])
else:
self.hidden_layers[i].backward(prev_layer=prev_layer)
def predict(self, x, dropout=True, p_dropout=0.5):
layer_input = x
for i in xrange(self.n_layers):
if dropout == True:
self.hidden_layers[i].W = (1 - p_dropout) * self.hidden_layers[i].W
layer_input = self.hidden_layers[i].output(input=layer_input)
return self.log_layer.predict(layer_input)
def test_dropout(n_epochs=5000, dropout=True, p_dropout=0.5):
x = numpy.array([[0, 0],
[0, 1],
[1, 0],
[1, 1]])
y = numpy.array([[0, 1],
[1, 0],
[1, 0],
[0, 1]])
rng = numpy.random.RandomState(123)
# construct Dropout MLP
classifier = Dropout(input=x, label=y, \
n_in=2, hidden_layer_sizes=[10, 10], n_out=2, \
rng=rng, activation=ReLU)
# train XOR
classifier.train(epochs=n_epochs, dropout=dropout, \
p_dropout=p_dropout, rng=rng)
# test
print classifier.predict(x)
if __name__ == "__main__":
test_dropout()
|
{"hexsha": "ba991169103263e920a5d3f4e9f27408cc5e9d03", "size": 4094, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/Dropout.py", "max_stars_repo_name": "khalane1221/DeepLearning", "max_stars_repo_head_hexsha": "34448a8fff2309bf85bf29dcb2276a6b4d9e29f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2567, "max_stars_repo_stars_event_min_datetime": "2015-01-01T05:21:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T00:55:20.000Z", "max_issues_repo_path": "python/Dropout.py", "max_issues_repo_name": "magiel1234/DeepLearning", "max_issues_repo_head_hexsha": "739dfd1d7919c328e0d3b8129855c2ad71b80036", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2015-02-13T21:19:05.000Z", "max_issues_repo_issues_event_max_datetime": "2018-04-24T02:28:05.000Z", "max_forks_repo_path": "python/Dropout.py", "max_forks_repo_name": "magiel1234/DeepLearning", "max_forks_repo_head_hexsha": "739dfd1d7919c328e0d3b8129855c2ad71b80036", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1280, "max_forks_repo_forks_event_min_datetime": "2015-01-03T14:17:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T09:06:09.000Z", "avg_line_length": 28.6293706294, "max_line_length": 110, "alphanum_fraction": 0.5051294577, "include": true, "reason": "import numpy", "num_tokens": 827}
|
[STATEMENT]
lemma shadow_root_delete_get_6 [simp]: "delete\<^sub>S\<^sub>h\<^sub>a\<^sub>d\<^sub>o\<^sub>w\<^sub>R\<^sub>o\<^sub>o\<^sub>t shadow_root_ptr h = Some h' \<Longrightarrow>
get\<^sub>C\<^sub>h\<^sub>a\<^sub>r\<^sub>a\<^sub>c\<^sub>t\<^sub>e\<^sub>r\<^sub>D\<^sub>a\<^sub>t\<^sub>a character_data_ptr h' = get\<^sub>C\<^sub>h\<^sub>a\<^sub>r\<^sub>a\<^sub>c\<^sub>t\<^sub>e\<^sub>r\<^sub>D\<^sub>a\<^sub>t\<^sub>a character_data_ptr h"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. delete\<^sub>S\<^sub>h\<^sub>a\<^sub>d\<^sub>o\<^sub>w\<^sub>R\<^sub>o\<^sub>o\<^sub>t shadow_root_ptr h = Some h' \<Longrightarrow> get character_data_ptr h' = get character_data_ptr h
[PROOF STEP]
by(simp add: get\<^sub>C\<^sub>h\<^sub>a\<^sub>r\<^sub>a\<^sub>c\<^sub>t\<^sub>e\<^sub>r\<^sub>D\<^sub>a\<^sub>t\<^sub>a_def)
|
{"llama_tokens": 359, "file": "Shadow_SC_DOM_classes_ShadowRootClass", "length": 1}
|
# Integer Functions
# TODO: vector types
const generic_integer_types = [Cchar, Cuchar, Cshort, Cushort, Cint, Cuint, Clong, Culong]
# generically typed
for gentype in generic_integer_types
@eval begin
@device_override Base.abs(x::$gentype) = @builtin_ccall("abs", $gentype, ($gentype,), x)
@device_function abs_diff(x::$gentype, y::$gentype) = @builtin_ccall("abs_diff", $gentype, ($gentype, $gentype), x, y)
@device_function add_sat(x::$gentype, y::$gentype) = @builtin_ccall("add_sat", $gentype, ($gentype, $gentype), x, y)
@device_function hadd(x::$gentype, y::$gentype) = @builtin_ccall("hadd", $gentype, ($gentype, $gentype), x, y)
@device_function rhadd(x::$gentype, y::$gentype) = @builtin_ccall("rhadd", $gentype, ($gentype, $gentype), x, y)
@device_override Base.clamp(x::$gentype, minval::$gentype, maxval::$gentype) = @builtin_ccall("clamp", $gentype, ($gentype, $gentype, $gentype), x, minval, maxval)
@device_function clz(x::$gentype) = @builtin_ccall("clz", $gentype, ($gentype,), x)
@device_function ctz(x::$gentype) = @builtin_ccall("ctz", $gentype, ($gentype,), x)
@device_function mad_hi(a::$gentype, b::$gentype, c::$gentype) = @builtin_ccall("mad_hi", $gentype, ($gentype, $gentype, $gentype), a, b, c)
@device_function mad_sat(a::$gentype, b::$gentype, c::$gentype) = @builtin_ccall("mad_sat", $gentype, ($gentype, $gentype, $gentype), a, b, c)
# XXX: these definitions introduce ambiguities
#@device_override Base.max(x::$gentype, y::$gentype) = @builtin_ccall("max", $gentype, ($gentype, $gentype), x, y)
#@device_override Base.min(x::$gentype, y::$gentype) = @builtin_ccall("min", $gentype, ($gentype, $gentype), x, y)
@device_function mul_hi(x::$gentype, y::$gentype) = @builtin_ccall("mul_hi", $gentype, ($gentype, $gentype), x, y)
@device_function rotate(v::$gentype, i::$gentype) = @builtin_ccall("rotate", $gentype, ($gentype, $gentype), v, i)
@device_function sub_sat(x::$gentype, y::$gentype) = @builtin_ccall("sub_sat", $gentype, ($gentype, $gentype), x, y)
@device_function popcount(x::$gentype) = @builtin_ccall("popcount", $gentype, ($gentype,), x)
@device_function mad24(x::$gentype, y::$gentype, z::$gentype) = @builtin_ccall("mad24", $gentype, ($gentype, $gentype, $gentype), x, y, z)
@device_function mul24(x::$gentype, y::$gentype) = @builtin_ccall("mul24", $gentype, ($gentype, $gentype), x, y)
end
end
# specifically typed
@device_function upsample(hi::Cchar, lo::Cuchar) = @builtin_ccall("upsample", Cshort, (Cchar, Cuchar), hi, lo)
upsample(hi::Cuchar, lo::Cuchar) = @builtin_ccall("upsample", Cushort, (Cuchar, Cuchar), hi, lo)
upsample(hi::Cshort, lo::Cushort) = @builtin_ccall("upsample", Cint, (Cshort, Cushort), hi, lo)
upsample(hi::Cushort, lo::Cushort) = @builtin_ccall("upsample", Cuint, (Cushort, Cushort), hi, lo)
upsample(hi::Cint, lo::Cuint) = @builtin_ccall("upsample", Clong, (Cint, Cuint), hi, lo)
upsample(hi::Cuint, lo::Cuint) = @builtin_ccall("upsample", Culong, (Cuint, Cuint), hi, lo)
|
{"hexsha": "e8c825118de10cdf34059ec375369d7502d4bd70", "size": 2968, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/device/opencl/integer.jl", "max_stars_repo_name": "troels/oneAPI.jl", "max_stars_repo_head_hexsha": "3aa4fe9383b525c5846c1254db981d431d058810", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 104, "max_stars_repo_stars_event_min_datetime": "2020-05-13T20:22:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T18:48:49.000Z", "max_issues_repo_path": "src/device/opencl/integer.jl", "max_issues_repo_name": "JuliaGPU/oneL0.jl", "max_issues_repo_head_hexsha": "9a43fb6fbf0ef20ffba34daef4b7ff44e7ed01c8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 132, "max_issues_repo_issues_event_min_datetime": "2020-07-19T00:24:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T20:00:50.000Z", "max_forks_repo_path": "src/device/opencl/integer.jl", "max_forks_repo_name": "JuliaGPU/oneL0.jl", "max_forks_repo_head_hexsha": "9a43fb6fbf0ef20ffba34daef4b7ff44e7ed01c8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-11-06T01:19:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T22:26:50.000Z", "avg_line_length": 54.962962963, "max_line_length": 163, "alphanum_fraction": 0.6937331536, "num_tokens": 1012}
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#!/usr/bin/env python
import glob
import os
import numpy as np
import torch
from setuptools import find_packages
from setuptools import setup, Extension
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
requirements = ["torch", "torchvision"]
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "model", "csrc")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"model._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
),
Extension(
'pycocotools._mask',
sources=['pycocotools/maskApi.c', 'pycocotools/_mask.pyx'],
include_dirs = [numpy_include, 'pycocotools'],
extra_compile_args={
'cxx': [],
'gcc': ['-Wno-cpp', '-Wno-unused-function', '-std=c99']},
),
]
return ext_modules
setup(
name="faster_rcnn",
version="0.1",
description="object detection in pytorch",
packages=find_packages(exclude=("configs", "tests",)),
# install_requires=requirements,
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
|
{"hexsha": "0865f3982d1fbc949b352edfe37f363db17ff180", "size": 2443, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/setup.py", "max_stars_repo_name": "Yoo-Youngjae/One-Shot-Object-Detection", "max_stars_repo_head_hexsha": "c560a3dfb042776854bb928682dbbf545e2cd1bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 376, "max_stars_repo_stars_event_min_datetime": "2019-11-29T02:26:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T04:07:29.000Z", "max_issues_repo_path": "lib/setup.py", "max_issues_repo_name": "Yoo-Youngjae/One-Shot-Object-Detection", "max_issues_repo_head_hexsha": "c560a3dfb042776854bb928682dbbf545e2cd1bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 31, "max_issues_repo_issues_event_min_datetime": "2019-12-03T08:05:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-15T06:33:05.000Z", "max_forks_repo_path": "lib/setup.py", "max_forks_repo_name": "Yoo-Youngjae/One-Shot-Object-Detection", "max_forks_repo_head_hexsha": "c560a3dfb042776854bb928682dbbf545e2cd1bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 78, "max_forks_repo_forks_event_min_datetime": "2019-11-29T05:20:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T12:40:34.000Z", "avg_line_length": 29.0833333333, "max_line_length": 78, "alphanum_fraction": 0.6614817847, "include": true, "reason": "import numpy", "num_tokens": 560}
|
module ProgressiveAligner
push!(LOAD_PATH, dirname(@__FILE__()))
#export DataReader,
# DataWriter,
# ProfileAligner,
# Clustering
include("DataReader.jl")
include("DataWriter.jl")
include("ProfileAligner.jl")
include("Clustering.jl")
end # module
|
{"hexsha": "5fd813c48ab0c199a5658b9dc31a4143b914d8c4", "size": 282, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/ProgressiveAligner.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/ProgressiveAligner.jl-6b90eaca-7ca7-5d1d-b675-a9023a889a1d", "max_stars_repo_head_hexsha": "0d08a3d32b98fb31b07c7908b54d6bad96f285b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2015-05-21T19:46:11.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-01T21:44:26.000Z", "max_issues_repo_path": "src/ProgressiveAligner.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/ProgressiveAligner.jl-6b90eaca-7ca7-5d1d-b675-a9023a889a1d", "max_issues_repo_head_hexsha": "0d08a3d32b98fb31b07c7908b54d6bad96f285b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-09-04T18:27:13.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-09T08:32:50.000Z", "max_forks_repo_path": "src/ProgressiveAligner.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/ProgressiveAligner.jl-6b90eaca-7ca7-5d1d-b675-a9023a889a1d", "max_forks_repo_head_hexsha": "0d08a3d32b98fb31b07c7908b54d6bad96f285b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2015-10-25T00:05:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T19:06:28.000Z", "avg_line_length": 20.1428571429, "max_line_length": 40, "alphanum_fraction": 0.6773049645, "num_tokens": 78}
|
import numpy as np
def msaeye(msa, unique, turbo):
tic1 = timeit.default_timer()
length = msa.shape[1]
number = msa.shape[0]
# number = 5
array = np.eye(int(number))
seqs = []
for i in range(number):
seqs.append(msa[i,:])
iseq = np.zeros((number, length), dtype=int)
for i in range(0,number-1):
if i == 0:
for k in range(length):
if ord(seqs[i][k])>90:
iseq[i,k]=ord(seqs[i][k])-96 if ord(seqs[i][k])-96 > 0 \
and ord(seqs[i][k])-96 < 26 else 0
else:
iseq[i,k]=ord(seqs[i][k])-64 if ord(seqs[i][k])-64 > 0 \
and ord(seqs[i][k])-64 < 26 else 0
for j in range(i+1,number):
score=0.
ncols=0.
for k in range(length):
if ord(seqs[j][k])>90:
iseq[j,k]=ord(seqs[j][k])-96 if ord(seqs[j][k])-96 > 0 \
and ord(seqs[j][k])-96 < 26 else 0
else:
iseq[j,k]=ord(seqs[j][k])-64 if ord(seqs[j][k])-64 > 0 and ord(seqs[j][k])-64 < 26 else 0
if iseq[i,k] or iseq[j,k]:
ncols += 1
if iseq[i,k]==iseq[j,k]:
score+=1
array[i,j]=float(score)/ncols
array[j,i]=array[i,j]
# print iseq[0]
# print seqs[0]
# raw_input()
else:
for j in range(i+1,number):
score=0.
ncols=0.
for k in range(length):
if iseq[i,k] or iseq[j,k]:
ncols += 1
if iseq[i,k]==iseq[j,k]:
score+=1
array[i,j]= float(score)/ncols#float(sum((iseq[i] == iseq[j])*(iseq[i]*iseq[j]!=0))) / sum(iseq[i]*iseq[j]!=0)
array[j,i]=array[i,j]
toc1 = timeit.default_timer()
elapsed1 = toc1 - tic1
LOGGER.debug('Elapsed: %4.2fs'%elapsed1)
def buildDaliEnsemble(PDBs, record):
daliInfo = record._alignPDB
n_confs = len(PDBs)
ref_pdb_ca = PDBs[0]
ref_chain = list(ref_pdb_ca.getHierView().iterChains())[0]
ref_indices_set = set(range(len(ref_chain)))
ensemble = PDBEnsemble('Dali ensemble - ' + record.getTitle())
ensemble.setAtoms(ref_chain)
ensemble.setCoords(ref_chain)
LOGGER.progress('Building PDB ensemble for {0} conformations from Dali...'
.format(n_confs), n_confs, '_prody_buildDaliEnsemble')
for i, pdb in enumerate(PDBs):
pdb_chain = pdb.getTitle()[:5]
temp_dict = daliInfo[pdb_chain]
sel_pdb_ca = PDBs[i]
map_ref = temp_dict['map_ref']
map_sel = temp_dict['map_sel']
dum_sel = list(ref_indices_set - set(map_ref))
atommap = AtomMap(sel_pdb_ca, indices=map_sel, mapping=map_ref, dummies=dum_sel)
ensemble.addCoordset(atommap, weights=atommap.getFlags('mapped'), degeneracy=True)
LOGGER.update(i, label='_prody_buildDaliEnsemble')
LOGGER.finish()
try:
ensemble.iterpose()
except:
LOGGER.warn('failed to iterpose the ensemble.')
return ensemble
def fetchCATH(filename, ftp_host=None, ftp_path=None, **kwargs):
"""Downloads CATH file via FTP."""
if ftp_host == None:
ftp_host = 'orengoftp.biochem.ucl.ac.uk'
if ftp_path == None:
ftp_path = '/cath/releases/daily-release/newest/'
from ftplib import FTP
output_folder = kwargs.pop('folder', None)
ftp_fn = filename
try:
ftp = FTP(ftp_host)
except Exception as error:
raise type(error)('FTP connection problem, potential reason: '
'no internet connectivity')
else:
success = 0
failure = 0
filenames = []
ftp.login('')
data = []
try:
ftp.cwd(ftp_path)
ftp.retrbinary('RETR ' + ftp_fn, data.append)
except Exception as error:
if ftp_fn in ftp.nlst():
LOGGER.warn('{0} download failed ({1}). It is '
'possible that you do not have rights to '
'download .gz files in the current network.'
.format(ftp_fn, str(error)))
else:
LOGGER.warn('{0} download failed. {1} does not exist '
'on {2}.'.format(ftp_fn, ftp_fn, ftp_host))
failure += 1
filenames.append(None)
else:
if len(data):
if output_folder is None:
output_folder = getcwd()
filename_full = join(output_folder, ftp_fn)
with open(filename_full, 'w+b') as pdbfile:
write = pdbfile.write
[write(block) for block in data]
filename_full = normpath(relpath(filename_full))
LOGGER.debug('{0} downloaded ({1})'
.format(ftp_fn, sympath(filename_full)))
success += 1
filenames.append(filename_full)
else:
LOGGER.warn('{0} download failed, reason unknown.'
.format(ftp_fn))
failure += 1
filenames.append(None)
ftp.quit()
def buildCATHNameDict(cath_file, iscommpressed=True):
"""Returns a dictionary for CATH names with key of CATH ID."""
if iscommpressed:
gunzip(cath_file, 'cath_b.names.temp')
cath_file = 'cath_b.names.temp'
cath_id2name = dict()
with open(cath_file, 'r') as file_temp:
for line in file_temp:
ind_temp = line.find(' ')
cath_id2name[line[:ind_temp]] = line[ind_temp:].strip()
if iscommpressed:
remove(cath_file)
return cath_id2name
def buildPDBChainCATHDict(cath_file, iscommpressed=True):
"""Returns a dictionary for CATH info (ID and version) with key of PDB chain."""
if iscommpressed:
gunzip(cath_file, 'cath_b.all.temp')
cath_file = 'cath_b.all.temp'
cath_dict_temp = dict()
cath_i_dict = dict()
with open(cath_file, 'r') as file_temp:
for line in file_temp:
line = line.strip()
if line != '':
line_list = line.split(' ')
cath_dict_temp[line_list[0]] = line_list[1:]
key, value = line[0:5], line[5:7]
if key in cath_i_dict:
cath_i_dict[key].append(value)
else:
cath_i_dict[key] = [value]
pdbChain2CATH = dict()
for key, values in cath_i_dict.items():
pdbChain2CATH[key] = []
for v in values:
pdbChain2CATH[key].append(cath_dict_temp[key+v])
if iscommpressed:
remove(cath_file)
return pdbChain2CATH
def fetchCATH(filename, ftp_host=None, ftp_path=None, **kwargs):
"""Downloads CATH file via FTP."""
if ftp_host == None:
ftp_host = 'orengoftp.biochem.ucl.ac.uk'
if ftp_path == None:
ftp_path = '/cath/releases/daily-release/newest/'
from ftplib import FTP
output_folder = kwargs.pop('folder', None)
ftp_fn = filename
try:
ftp = FTP(ftp_host)
except Exception as error:
raise type(error)('FTP connection problem, potential reason: '
'no internet connectivity')
else:
success = 0
failure = 0
filenames = []
ftp.login('')
data = []
try:
ftp.cwd(ftp_path)
ftp.retrbinary('RETR ' + ftp_fn, data.append)
except Exception as error:
if ftp_fn in ftp.nlst():
LOGGER.warn('{0} download failed ({1}). It is '
'possible that you do not have rights to '
'download .gz files in the current network.'
.format(ftp_fn, str(error)))
else:
LOGGER.warn('{0} download failed. {1} does not exist '
'on {2}.'.format(ftp_fn, ftp_fn, ftp_host))
failure += 1
filenames.append(None)
else:
if len(data):
if output_folder is None:
output_folder = getcwd()
filename_full = join(output_folder, ftp_fn)
with open(filename_full, 'w+b') as pdbfile:
write = pdbfile.write
[write(block) for block in data]
filename_full = normpath(relpath(filename_full))
LOGGER.debug('{0} downloaded ({1})'
.format(ftp_fn, sympath(filename_full)))
success += 1
filenames.append(filename_full)
else:
LOGGER.warn('{0} download failed, reason unknown.'
.format(ftp_fn))
failure += 1
filenames.append(None)
ftp.quit()
# ftp://orengoftp.biochem.ucl.ac.uk/cath/releases/daily-release/newest/
# fetchCATH('cath-b-newest-names.gz')
# cath_id2name = buildCATHNameDict('cath-b-newest-names.gz')
# fetchCATH('cath-b-newest-all.gz')
# pdbChain2CATH = buildPDBChainCATHDict('cath-b-newest-all.gz')
def extend(model, nodes, atoms):
"""Returns mapping indices and an :class:`.AtomMap`."""
try:
n_atoms = model.numAtoms()
is3d = model.is3d()
except AttributeError:
raise ValueError('model must be an NMA instance')
try:
n_nodes = nodes.numAtoms()
i_nodes = nodes.iterAtoms()
except AttributeError:
raise ValueError('nodes must be an Atomic instance')
if n_atoms != n_nodes:
raise ValueError('atom numbers must be the same')
if not nodes in atoms:
raise ValueError('nodes must be a subset of atoms')
atom_indices = []
indices = []
get = HierView(atoms).getResidue
for i, node in enumerate(i_nodes):
res = get(node.getChid() or None, node.getResnum(),
node.getIcode() or None, node.getSegname() or None)
if res is None:
raise ValueError('atoms must contain a residue for all atoms')
atom_indices.append(res._getIndices())
if is3d:
indices.append(list(range(i*3, (i+1)*3)) * len(res))
else:
indices.append([i] * len(res))
atom_indices = np.concatenate(atom_indices)
indices = np.concatenate(indices)
try:
ag = atoms.getAtomGroup()
except AttributeError:
ag = atoms
atommap = AtomMap(ag, atom_indices, atoms.getACSIndex(),
title=str(atoms), intarrays=True)
return indices, atommap
def extendAtomicData(data, nodes, atoms):
"""Extend a coarse grained data obtained for *nodes* to *atoms*.
:arg data: any data array
:type data: :class:`~numpy.ndarray`
:arg nodes: a set of atoms that has been used
as nodes in data generation
:type nodes: :class:`.Atomic`
:arg atoms: atoms to be selected from
:type atoms: :class:`.Atomic`
"""
from collections import Counter
try:
data = np.asarray(data)
except:
raise TypeError('The data must be array-like.')
if not isinstance(nodes, Atomic):
raise TypeError('nodes must be an Atomic instance')
if not isinstance(atoms, Atomic):
raise TypeError('atoms must be an Atomic instance')
nnodes = nodes.numAtoms()
is3d = False
if len(data) != nnodes:
if data.shape[0] == nnodes * 3:
is3d = True
else:
raise ValueError('data and atoms must have the same size')
indices = nodes.getResindices()
if is3d:
indices = np.array([[i*3, i*3+1, i*3+2]
for i in indices]
).reshape(3*len(indices))
data_ext = []
resid_counter = Counter(atoms.getResindices())
for i in indices:
data_ext.extend(resid_counter.values()[i]*[data[i]])
resid_selstr = ' '.join([str(resid) for resid in nodes.getResindices()])
rest = atoms.select('not resid {0}'.format(resid_selstr))
data_ext.extend(np.zeros(rest.numAtoms()))
return data_ext
def refineEnsemble(ens, lower=.5, upper=10.):
"""Refine a PDB ensemble based on RMSD criterions."""
from scipy.cluster.hierarchy import linkage, fcluster
from scipy.spatial.distance import squareform
from collections import Counter
### calculate pairwise RMSDs ###
RMSD = ens.getRMSDs(pairwise=True)
# convert the RMSD table to the compressed form
v = squareform(RMSD)
### apply upper threshold ###
Z_upper = linkage(v, method='complete')
labels = fcluster(Z_upper, upper, criterion='distance')
most_common_label = Counter(labels).most_common(1)[0][0]
I = np.where(labels==most_common_label)[0]
### apply lower threshold ###
Z_lower = linkage(v, method='single')
labels = fcluster(Z_lower, lower, criterion='distance')
uniq_labels = np.unique(labels)
clusters = []
for label in uniq_labels:
indices = np.where(labels==label)[0]
clusters.append(indices)
J = np.ones(len(clusters), dtype=int) * -1
rmsd = None
for i, cluster in enumerate(clusters):
if len(cluster) > 0:
# find the conformations with the largest coverage
# (the weight of the ref should be 1)
weights = [ens[j].getWeights().sum() for j in cluster]
js = np.where(weights==np.max(weights))[0]
# in the case where there are multiple structures with the same weight,
# the one with the smallest rmsd wrt the ens._coords is selected.
if len(js) > 1:
# rmsd is not calulated unless necessary for the sake of efficiency
rmsd = ens.getRMSDs() if rmsd is None else rmsd
j = js[np.argmin(rmsd[js])]
else:
j = js[0]
J[i] = cluster[j]
else:
J[i] = cluster[0]
### refine ensemble ###
K = np.intersect1d(I, J)
reens = ens[K]
return reens
def showVarianceBar(mode_ensemble, highlights=None, **kwargs):
from matplotlib.pyplot import figure, gca, annotate, subplots_adjust, plot
from matplotlib.figure import Figure
from matplotlib.colorbar import ColorbarBase
from matplotlib.colors import Normalize, NoNorm
from matplotlib import cm, colors
fig = kwargs.pop('figure', None)
if isinstance(fig, Figure):
fig_num = fig.number
elif fig is None or isinstance(fig, (int, str)):
fig_num = fig
else:
raise TypeError('figure can be either an instance of matplotlib.figure.Figure '
'or a figure number.')
if SETTINGS['auto_show']:
if fig_num is None:
figure(figsize=(6, 2))
else:
figure(fig_num)
elif fig_num is not None:
figure(fig_num)
ax = gca()
# adjust layouts
box = ax.get_position()
_, _, _, height = box.bounds
ratio = 2.5
box.y1 = box.y0 + height/ratio
#box.y0 += height/7.
ax.set_position(box)
fract = kwargs.pop('fraction', True)
#defarrow = {'width':1, 'headwidth':2,
# 'facecolor':'black',
# 'headlength': 4}
defarrow = {'arrowstyle': '->'}
arrowprops = kwargs.pop('arrowprops', defarrow)
if fract:
sig = calcSignatureFractVariance(mode_ensemble)
else:
sig = mode_ensemble.getVariances()
variances = sig.getArray().sum(axis=1)
#meanVar = variances.mean()
#stdVar = variances.std()
#variances = (variances - meanVar)/stdVar
maxVar = variances.max()
minVar = variances.min()
cmap = kwargs.pop('cmap', 'jet')
norm = Normalize(vmin=minVar, vmax=maxVar)
cb = ColorbarBase(ax, cmap=cmap, norm=norm,
orientation='horizontal')
if not highlights:
highlights = []
indices = []; labels = []
ens_labels = mode_ensemble.getLabels()
for hl in highlights:
if isinstance(hl, str):
if not ens_labels:
raise TypeError('highlights should be a list of integers because '
'mode_ensemble has no label')
indices.append(ens_labels.index(hl))
labels.append(hl)
else:
try:
index = int(hl)
except:
raise TypeError('highlights should be a list of integers or strings')
indices.append(index)
if ens_labels:
labels.append(ens_labels[index])
else:
labels.append(str(index))
annotations = []
for i, label in zip(indices, labels):
x = norm(variances[i])
an = annotate(label, xy=(x, 1), xytext=(x, ratio), arrowprops=arrowprops)
annotations.append(an)
for i in range(len(variances)):
x = norm(variances[i])
plot([x, x], [0, 1], 'w')
cb.set_label('Variances')
if SETTINGS['auto_show']:
showFigure()
return cb, annotations
def mapChainByChain(atoms, target, **kwargs):
"""This function is similar to :func:`.mapOntoChain` but correspondence
of chains is found by their chain identifiers.
:arg atoms: atoms to be mapped onto *target*
:type atoms: :class:`.Atomic`
:arg target: reference structure for mapping
:type target: :class:`.Atomic`
:arg return_all: whether to return all mappings.
If False, only mappings for the first chain will be returned.
Default is **True**
:arg return_all: bool
:arg correspondence: chain IDs in atoms corresponding to those in ref
Default is to use the same chain IDs as in ref.
:type correspondence: str, list, dict
"""
mappings = []
if isinstance(target, AtomGroup):
chs_ref_ag = target.iterChains()
else:
chs_ref_ag = target.getAtomGroup().iterChains()
id_atm = atoms.getTitle()
id_ref = target.getTitle()
chs_atm = [chain for chain in atoms.getHierView().iterChains()]
chs_ref = [chain for chain in target.getHierView().iterChains()]
corr_input = kwargs.get('correspondence', None)
if isinstance(corr_input, dict):
correspondence = corr_input
elif corr_input is None:
correspondence = {}
elif isinstance(corr_input, str):
correspondence = {}
correspondence[atoms.getTitle()] = corr_input
else:
correspondence = {}
try:
correspondence[id_atm] = corr_input[0]
correspondence[id_ref] = corr_input[1]
except (IndexError, TypeError):
raise TypeError('correspondence should be a dict with keys being titles of atoms and ref, '
'and values are str indicating chID correspondences')
if not id_atm in correspondence:
correspondence[id_atm] = ''.join([chain.getChid() for chain in chs_atm])
if not id_ref in correspondence:
correspondence[id_ref] = ''.join([chain.getChid() for chain in chs_ref_ag])
corr_tar = correspondence[id_atm]
corr_ref = correspondence[id_ref]
for chain in chs_ref:
try:
i = corr_ref.index(chain.getChid())
chid = corr_tar[i]
except ValueError:
continue
for target_chain in chs_atm:
if target_chain.getChid() == chid:
mappings_ = mapOntoChainByAlignment(target_chain, chain, **kwargs)
if len(mappings_):
mappings.append(mappings_[0])
return mappings
def _extend(self, arr, defval=0):
mask = self.mask#.copy()
if self.is3d():
mask = np.repeat(mask, 3)
n_true = np.sum(mask)
N = len(mask)
if arr.ndim == 1:
whole_array = np.empty(N, dtype=arr.dtype)
whole_array.fill(defval)
whole_array[mask] = arr[:n_true]
elif arr.ndim == 2:
n, m = arr.shape
whole_array = np.empty((N, m), dtype=arr.dtype)
whole_array.fill(defval)
#mask = np.expand_dims(mask, axis=1)
#mask = mask.repeat(m, axis=1)
whole_array[mask] = arr[:n_true, :]
else: # only developers can trigger this case
raise ValueError('arr can only be either 1D or 2D')
return whole_array
|
{"hexsha": "6a761edffb34f070692ecf5e4e83aa0783990663", "size": 20728, "ext": "py", "lang": "Python", "max_stars_repo_path": "prody/utilities/legacy.py", "max_stars_repo_name": "grandevelia/ProDy", "max_stars_repo_head_hexsha": "7c725640a94c16543423c0756388998cb86a97ae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "prody/utilities/legacy.py", "max_issues_repo_name": "grandevelia/ProDy", "max_issues_repo_head_hexsha": "7c725640a94c16543423c0756388998cb86a97ae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "prody/utilities/legacy.py", "max_forks_repo_name": "grandevelia/ProDy", "max_forks_repo_head_hexsha": "7c725640a94c16543423c0756388998cb86a97ae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8140293638, "max_line_length": 126, "alphanum_fraction": 0.5602566577, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4944}
|
function value = r4_besi1e ( x )
%*****************************************************************************80
%
%% R4_BESI1E: exponentially scaled Bessel function I of order 1 of an R4 argument.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 29 September 2011
%
% Author:
%
% Original FORTRAN77 version by Wayne Fullerton.
% MATLAB version by John Burkardt.
%
% Reference:
%
% Wayne Fullerton,
% Portable Special Function Routines,
% in Portability of Numerical Software,
% edited by Wayne Cowell,
% Lecture Notes in Computer Science, Volume 57,
% Springer 1977,
% ISBN: 978-3-540-08446-4,
% LC: QA297.W65.
%
% Parameters:
%
% Input, real X, the argument.
%
% Output, real VALUE, the exponentially scaled Bessel function I
% of order 1 of X.
%
persistent ai1cs
persistent ai12cs
persistent bi1cs
persistent ntai1
persistent ntai12
persistent nti1
persistent xmin
persistent xsml
if ( isempty ( nti1 ) )
ai1cs = [ ...
-0.02846744181881479E+00, ...
-0.01922953231443221E+00, ...
-0.00061151858579437E+00, ...
-0.00002069971253350E+00, ...
0.00000858561914581E+00, ...
0.00000104949824671E+00, ...
-0.00000029183389184E+00, ...
-0.00000001559378146E+00, ...
0.00000001318012367E+00, ...
-0.00000000144842341E+00, ...
-0.00000000029085122E+00, ...
0.00000000012663889E+00, ...
-0.00000000001664947E+00, ...
-0.00000000000166665E+00, ...
0.00000000000124260E+00, ...
-0.00000000000027315E+00, ...
0.00000000000002023E+00, ...
0.00000000000000730E+00, ...
-0.00000000000000333E+00, ...
0.00000000000000071E+00, ...
-0.00000000000000006E+00 ]';
ai12cs = [ ...
0.02857623501828014E+00, ...
-0.00976109749136147E+00, ...
-0.00011058893876263E+00, ...
-0.00000388256480887E+00, ...
-0.00000025122362377E+00, ...
-0.00000002631468847E+00, ...
-0.00000000383538039E+00, ...
-0.00000000055897433E+00, ...
-0.00000000001897495E+00, ...
0.00000000003252602E+00, ...
0.00000000001412580E+00, ...
0.00000000000203564E+00, ...
-0.00000000000071985E+00, ...
-0.00000000000040836E+00, ...
-0.00000000000002101E+00, ...
0.00000000000004273E+00, ...
0.00000000000001041E+00, ...
-0.00000000000000382E+00, ...
-0.00000000000000186E+00, ...
0.00000000000000033E+00, ...
0.00000000000000028E+00, ...
-0.00000000000000003E+00 ]';
bi1cs = [ ...
-0.001971713261099859E+00, ...
0.40734887667546481E+00, ...
0.034838994299959456E+00, ...
0.001545394556300123E+00, ...
0.000041888521098377E+00, ...
0.000000764902676483E+00, ...
0.000000010042493924E+00, ...
0.000000000099322077E+00, ...
0.000000000000766380E+00, ...
0.000000000000004741E+00, ...
0.000000000000000024E+00 ]';
nti1 = r4_inits ( bi1cs, 11, 0.1 * r4_mach ( 3 ) );
ntai1 = r4_inits ( ai1cs, 21, 0.1 * r4_mach ( 3 ) );
ntai12 = r4_inits ( ai12cs, 22, 0.1 * r4_mach ( 3 ) );
xmin = 2.0 * r4_mach ( 1 );
xsml = sqrt ( 8.0 * r4_mach ( 3 ) );
end
y = abs ( x );
if ( x == 0.0 )
value = 0.0;
elseif ( y <= xmin )
value = 0.0;
elseif ( y <= xsml )
value = 0.5 * x;
value = exp ( - y ) * value;
elseif ( y <= 3.0 )
value = x * ( 0.875 ...
+ r4_csevl ( y * y / 4.5 - 1.0, bi1cs, nti1 ) );
value = exp ( - y ) * value;
elseif ( y <= 8.0 )
value = ( 0.375 + r4_csevl ( ( 48.0 / y - 11.0 ) / 5.0, ...
ai1cs, ntai1) ) / sqrt ( y );
if ( x < 0.0 )
value = - value;
end
else
value = ( 0.375 + r4_csevl ( 16.0 / y - 1.0, ai12cs, ntai12 ) ) ...
/ sqrt ( y );
if ( x < 0.0 )
value = - value;
end
end
return
end
|
{"author": "johannesgerer", "repo": "jburkardt-m", "sha": "1726deb4a34dd08a49c26359d44ef47253f006c1", "save_path": "github-repos/MATLAB/johannesgerer-jburkardt-m", "path": "github-repos/MATLAB/johannesgerer-jburkardt-m/jburkardt-m-1726deb4a34dd08a49c26359d44ef47253f006c1/fn/r4_besi1e.m"}
|
! Rice test
! ROSE's unparser fails an assertion on an empty character string constant.
! It doesn't matter whether single or double quotes are used.
program empty_string_constant
character(*), parameter :: c1 = "" , c3 = '', c4 = "Zung" ! produces assertion failure in testTranslator
character(len = 8) :: c2 = '' ! ditto
end program
|
{"hexsha": "3ebeab8462cec69aea94c5ab09bafc8cea4c274a", "size": 341, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "tests/CompileTests/Fortran_tests/test2011_Rice_empty-string-constant.f90", "max_stars_repo_name": "maurizioabba/rose", "max_stars_repo_head_hexsha": "7597292cf14da292bdb9a4ef573001b6c5b9b6c0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 488, "max_stars_repo_stars_event_min_datetime": "2015-01-09T08:54:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:15:46.000Z", "max_issues_repo_path": "tests/CompileTests/Fortran_tests/test2011_Rice_empty-string-constant.f90", "max_issues_repo_name": "sujankh/rose-matlab", "max_issues_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 174, "max_issues_repo_issues_event_min_datetime": "2015-01-28T18:41:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:51:05.000Z", "max_forks_repo_path": "tests/CompileTests/Fortran_tests/test2011_Rice_empty-string-constant.f90", "max_forks_repo_name": "sujankh/rose-matlab", "max_forks_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 146, "max_forks_repo_forks_event_min_datetime": "2015-04-27T02:48:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T07:32:53.000Z", "avg_line_length": 37.8888888889, "max_line_length": 106, "alphanum_fraction": 0.7126099707, "num_tokens": 88}
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <float.h>
#include <iostream>
#include <armadillo>
#include <tuple>
#include "sys.h"
#include "grid.h"
#include "vtk_functions.h"
using namespace std;
typedef struct {
double minMag;
double maxMag;
double range;
} SYS_Image;
// void draw_image(System *sys, Grid *grid) {
// std::cout << "Started drawing image" << endl;
// int w = sys->NGrid;
// int h = sys->NGrid;
// FILE *f;
// unsigned char *img = NULL;
// int filesize = 54 + 3 * w * h;
// //w is your image width, h is image height, both int
// img = (unsigned char *) malloc(3 * w * h);
// memset(img, 0, sizeof(&img));
// for (int i = 0; i < sys->NGrid; i++) {
// for (int j = 0; j < sys->NGrid; j++) {
// int x = i;
// int y = j;
// double r = sys->theImage[i][j].x;
// double g = sys->theImage[i][j].y;
// double b = sys->theImage[i][j].z;
// if (r > 255) r = 255;
// if (g > 255) g = 255;
// if (b > 255) b = 255;
// img[(x + y * w) * 3 + 2] = (unsigned char) (r);
// img[(x + y * w) * 3 + 1] = (unsigned char) (g);
// img[(x + y * w) * 3 + 0] = (unsigned char) (b);
// }
// }
// unsigned char bmpfileheader[14] = {'B', 'M', 0, 0, 0, 0, 0, 0, 0, 0, 54, 0, 0, 0};
// unsigned char bmpinfoheader[40] = {40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 24, 0};
// unsigned char bmppad[3] = {0, 0, 0};
// bmpfileheader[2] = (unsigned char) (filesize);
// bmpfileheader[3] = (unsigned char) (filesize >> 8);
// bmpfileheader[4] = (unsigned char) (filesize >> 16);
// bmpfileheader[5] = (unsigned char) (filesize >> 24);
// bmpinfoheader[4] = (unsigned char) (w);
// bmpinfoheader[5] = (unsigned char) (w >> 8);
// bmpinfoheader[6] = (unsigned char) (w >> 16);
// bmpinfoheader[7] = (unsigned char) (w >> 24);
// bmpinfoheader[8] = (unsigned char) (h);
// bmpinfoheader[9] = (unsigned char) (h >> 8);
// bmpinfoheader[10] = (unsigned char) (h >> 16);
// bmpinfoheader[11] = (unsigned char) (h >> 24);
// f = fopen("magix.bmp", "wb");
// fwrite(bmpfileheader, 1, 14, f);
// fwrite(bmpinfoheader, 1, 40, f);
// for (int i = 0; i < h; i++) {
// fwrite(img + (w * (h - i - 1) * 3), 3, w, f);
// fwrite(bmppad, 1, (4 - (w * 3) % 4) % 4, f);
// }
// fclose(f);
// std::cout << "Image drawn" << endl;
// }
// void set_min_max(System *sys, Grid *grid) {
// int i, j;
// // sys->theImage = (Node **) calloc(sys->NGrid, sizeof(Node *));
// // for (i = 0; i < sys->NGrid; i++)
// // sys->theImage[i] = (Node *) calloc(sys->NGrid, sizeof(Node));
// double *magSorted = (double *) malloc(sizeof(double) * sys->NGrid * sys->NGrid);
// for (j = 0; j < sys->NGrid; j++) {
// for (i = 0; i < sys->NGrid; i++) {
// double mag = sqrtf(sys->BField[j][i].x * sys->BField[j][i].x + sys->BField[j][i].y * sys->BField[j][i].y + sys->BField[j][i].z * sys->BField[j][i].z);
// if (mag > 0)
// magSorted[j * sys->NGrid + i] = log(mag);
// }
// }
// mergesort(magSorted, sys->NGrid * sys->NGrid);
// sys_image->minMag = DBL_MAX;
// sys_image->maxMag = -DBL_MAX;
// for (j = 0; j < sys->NGrid; j++) {
// for (i = 0; i < sys->NGrid; i++) {
// if (magSorted[j * sys->NGrid + i] != DBL_MAX) {
// if (sys_image->minMag > magSorted[j * sys->NGrid + i])
// sys_image->minMag = magSorted[j * sys->NGrid + i];
// if (sys_image->maxMag < magSorted[j * sys->NGrid + i])
// sys_image->maxMag = magSorted[j * sys->NGrid + i];
// }
// }
// }
// sys_image->range = sys_image->maxMag - sys_image->minMag;
// if (sys->debug == 1) {
// printf("\nminMag: %E\n", sys_image->minMag);
// printf("maxMag: %E\n", sys_image->maxMag);
// printf("minMag non log: %E\n", exp(sys_image->minMag));
// printf("maxMag non log: %E\n", exp(sys_image->maxMag));
// }
// }
void write_bmp(System *sys, Grid *grid) {
double red = 0;
double green = 0;
double blue = 0;
Node *x_box = (Node *) calloc(1, sizeof(Node));
Node *y_box = (Node *) calloc(1, sizeof(Node));
Node *point = (Node *) calloc(1, sizeof(Node));
arma::field<arma::rowvec> image(sys->NGrid, sys->NGrid);
arma::rowvec pixel(3);
tuple <double, double, double> geek;
for (int i = 0; i < sys->NGrid; i++) {
for (int j = 0; j < sys->NGrid; j++) {
double lic_val = grid->Hx(j,i) / 255;
double val = 0;
// if (sys->pImage[j][i].y > 0)
// val = ((get_dec_place(sys->pImage[j][i].y, 20)) - sys_image->minMag) / sys_image->range;
if (val < 0.25) {
red = 0;
green = 255 * (val / 0.25);
blue = 255;
} else if ((val >= 0.25) && (val < 0.5)) {
red = 0;
green = 255;
blue = 255 - 255 * (val - 0.25) / 0.25;
} else if ((val >= 0.5) && (val < 0.75)) {
red = 255 * (val - 0.5) / 0.25;
green = 255;
blue = 0;
} else if ((val >= 0.75) && (val < 1.00)) {
red = 255;
green = 255 - 255 * (val - 0.75) / 0.25;
blue = 0;
} else {
red = 255;
green = 255 - 255 * (val - 0.75) / 0.25;
blue = 0;
}
pixel(0) = red * grid->Hy(j,i) * lic_val;
pixel(1) = green * grid->Hy(j,i) * lic_val;
pixel(2) = blue * grid->Hy(j,i) * lic_val;
image(i, j) = pixel;
// if (sys->set_vtk == 1) {
// pixel(0) = red * grid->Hy(j,i) * lic_val;
// pixel(1) = green * grid->Hy(j,i) * lic_val;
// pixel(2) = blue * grid->Hy(j,i) * lic_val;
// image(i, j) = pixel;
// } else {
// pixel(0) = red * lic_val;
// pixel(1) = green * lic_val;
// pixel(2) = blue * lic_val;
// image(i, j) = pixel;
// }
// if (sys->set_vtk == 1) {
// // sys->theImage[i][sys->NGrid - j].x = red * sys->pImage[j][i].y * lic_val;
// // sys->theImage[i][sys->NGrid - j].y = green * sys->pImage[j][i].y * lic_val;
// // sys->theImage[i][sys->NGrid - j].z = blue * sys->pImage[j][i].y * lic_val;
// } else {
// // sys->theImages[i][sys->NGrid - j].x = red * lic_val;
// // sys->theImage[i][sys->NGrid - j].y = green * lic_val;
// // sys->theImage[i][sys->NGrid - j].z = blue * lic_val;
// }
// for (k = 0; k < (int) p.size(); k++) {
// sort(p, x_box, 0, k, (int) p[k].size());
// sort(p, y_box, 1, k, (int) p[k].size());
// point->x = obs_grid(i)(1); // y-coord
// point->y = obs_grid(j)(2); // z-coord
// if (point_in_box(point, x_box, y_box)) {
// sys->theImage[i][sys->NGrid - j].x = 0;
// sys->theImage[i][sys->NGrid - j].y = 0;
// sys->theImage[i][sys->NGrid - j].z = 0;
// }
// }
}
}
// image.save("image.field");
// if (sys->set_vtk == 1)
// vtk_mag_field(sys, image, obs_grid);
// else
// draw_image(sys);
}
|
{"hexsha": "d285ff201f89b63b073fab901ed6d87fcc6d4e92", "size": 7765, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/images.cpp", "max_stars_repo_name": "rubenvanstaden/Magix", "max_stars_repo_head_hexsha": "0b45955d98a57b15b021e3d2e99698972f874a2d", "max_stars_repo_licenses": ["CECILL-B"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/images.cpp", "max_issues_repo_name": "rubenvanstaden/Magix", "max_issues_repo_head_hexsha": "0b45955d98a57b15b021e3d2e99698972f874a2d", "max_issues_repo_licenses": ["CECILL-B"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/images.cpp", "max_forks_repo_name": "rubenvanstaden/Magix", "max_forks_repo_head_hexsha": "0b45955d98a57b15b021e3d2e99698972f874a2d", "max_forks_repo_licenses": ["CECILL-B"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4698275862, "max_line_length": 165, "alphanum_fraction": 0.4405666452, "num_tokens": 2588}
|
"""
Cokriging example from [Forrester 2007] to show
MultiFiMetaModel and MultiFiCoKrigingSurrogate usage
"""
import numpy as np
from openmdao.api import Component, Group, Problem, MultiFiMetaModel, MultiFiCoKrigingSurrogate, KrigingSurrogate
def model_hifi(x):
return ((6*x-2)**2)*np.sin((6*x-2)*2)
def model_lofi(x):
return 0.5*((6*x-2)**2)*np.sin((6*x-2)*2)+(x-0.5)*10. - 5
class Simulation(Group):
def __init__(self, surrogate, nfi):
super(Simulation, self).__init__()
self.surrogate = surrogate
mm = self.add("mm", MultiFiMetaModel(nfi=nfi))
mm.add_param('x', val=0.)
mm.add_output('f_x', val=(0.,0.), surrogate=surrogate)
if __name__ == "__main__":
# Co-kriging with 2 levels of fidelity
surrogate = MultiFiCoKrigingSurrogate()
pbcok = Problem(Simulation(surrogate, nfi=2))
pbcok.setup(check=False)
doe_e = [0.0, 0.4, 0.6, 1.0]
doe_c = [0.1, 0.2, 0.3, 0.5, 0.7, 0.8, 0.9] + doe_e
pbcok['mm.train:x'] = np.array(doe_e).reshape(len(doe_e),1)
pbcok['mm.train:x_fi2'] = np.array(doe_c).reshape(len(doe_c),1)
pbcok['mm.train:f_x'] = model_hifi(pbcok['mm.train:x'])
pbcok['mm.train:f_x_fi2'] = model_lofi(pbcok['mm.train:x_fi2'])
# train
pbcok.run()
ngrid = 100
pred_cok = []
inputs = np.linspace(0, 1, ngrid)
for x in inputs:
pbcok['mm.x'] = x
pbcok.run()
pred_cok.append(pbcok['mm.f_x'])
pbcok_mu = np.array([float(p[0]) for p in pred_cok])
pbcok_sigma = np.array([float(p[1]) for p in pred_cok])
## "Co-kriging" with 1 level of fidelity a.k.a. kriging
surrogate = MultiFiCoKrigingSurrogate()
## Kriging from openmdao
#surrogate = KrigingSurrogate()
pbk = Problem(Simulation(surrogate, nfi=1))
pbk.setup()
pbk['mm.train:x'] = np.array(doe_e).reshape(len(doe_e),1)
pbk['mm.train:f_x'] = model_hifi(pbk['mm.train:x'])
pbk.run() # train
ngrid = 100
pred_k = []
inputs = np.linspace(0, 1, ngrid)
for x in inputs:
pbk['mm.x'] = x
pbk.run()
pred_k.append(pbk['mm.f_x'])
pbk_mu = np.array([float(p[0]) for p in pred_k])
pbk_sigma = np.array([float(p[1]) for p in pred_k])
check = inputs
actual = model_hifi(inputs)
import pylab as plt
plt.figure(2)
plt.plot(check, actual, 'k', label='True f')
plt.plot(doe_e, model_hifi(np.array(doe_e)),'ok',label="High Fi")
plt.plot(doe_c, model_lofi(np.array(doe_c)),'or',label="Low Fi")
plt.plot(check, pbcok_mu, 'g', label='Co-kriging')
plt.plot(check, pbcok_mu + 2*pbcok_sigma, 'g', alpha=0.5, label='I95%')
plt.plot(check, pbcok_mu - 2*pbcok_sigma, 'g', alpha=0.5)
plt.fill_between(check, pbcok_mu + 2*pbcok_sigma,
pbcok_mu - 2*pbcok_sigma, facecolor='g', alpha=0.2)
plt.plot(check, pbk_mu, 'b', label='Kriging')
plt.plot(check, pbk_mu + 2*pbk_sigma, 'b', alpha=0.5, label='I95%')
plt.plot(check, pbk_mu - 2*pbk_sigma, 'b', alpha=0.5)
plt.fill_between(check, pbk_mu + 2*pbk_sigma,
pbk_mu - 2*pbk_sigma, facecolor='b', alpha=0.2)
plt.legend(loc='best')
plt.show()
# RMSE CoKriging
error = 0.
for a,p in zip(actual, pbcok_mu):
error += (a-p)**2
error = (error/len(actual))
print("RMSE Cokriging = %g" % error)
# RMSE Kriging
error = 0.
for a,p in zip(actual, pbk_mu):
error += (a-p)**2
error = (error/len(actual))
print("RMSE Kriging = %g" % error)
|
{"hexsha": "76d9ead802915bbabc35a6c596a016858208ca96", "size": 3527, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/cokriging_forrester.py", "max_stars_repo_name": "colinxs/OpenMDAO", "max_stars_repo_head_hexsha": "a9a52be29281a23a102c64b577066ee5fc70f4b4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2018-01-11T20:13:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T03:46:05.000Z", "max_issues_repo_path": "examples/cokriging_forrester.py", "max_issues_repo_name": "colinxs/OpenMDAO", "max_issues_repo_head_hexsha": "a9a52be29281a23a102c64b577066ee5fc70f4b4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2017-10-19T23:14:14.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-22T17:30:57.000Z", "max_forks_repo_path": "examples/cokriging_forrester.py", "max_forks_repo_name": "colinxs/OpenMDAO", "max_forks_repo_head_hexsha": "a9a52be29281a23a102c64b577066ee5fc70f4b4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2018-04-12T22:13:33.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-07T10:02:59.000Z", "avg_line_length": 30.6695652174, "max_line_length": 113, "alphanum_fraction": 0.6067479444, "include": true, "reason": "import numpy", "num_tokens": 1244}
|
#!/usr/bin/env python
# coding: utf-8
from qiskit.aqua.components.optimizers import COBYLA, ADAM, SPSA
from qiskit.circuit.library import ZZFeatureMap, RealAmplitudes, ZFeatureMap, PauliFeatureMap
from Benchmarking import Benchmark, normalize_data
import csv
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import csv
import warnings
warnings.filterwarnings("ignore")
best_models = [
{"Featuremap": "ZFeatureMap(4, reps=2)", "Opt": "SPSA(max_trials=50)", "vdepth": 5},
{"Featuremap": "ZFeatureMap(4, reps=2)", "Opt": "SPSA(max_trials=50)", "vdepth": 3},
{"Featuremap": "ZFeatureMap(4, reps=2)", "Opt": "COBYLA(maxiter=50)", "vdepth": 3},
{"Featuremap": "ZFeatureMap(4, reps=2)", "Opt": "SPSA(max_trials=50)", "vdepth": 1},
{"Featuremap": "ZFeatureMap(4, reps=1)", "Opt": "COBYLA(maxiter=50)", "vdepth": 1},
{"Featuremap": "ZZFeatureMap(4, reps=1)", "Opt": "SPSA(max_trials=50)", "vdepth": 5},
{"Featuremap": "ZFeatureMap(4, reps=2)", "Opt": "COBYLA(maxiter=50)", "vdepth": 5},
{"Featuremap": "ZFeatureMap(4, reps=1)", "Opt": "SPSA(max_trials=50)", "vdepth": 3},
{"Featuremap": "ZFeatureMap(4, reps=1)", "Opt": "SPSA(max_trials=50)", "vdepth": 5},
{"Featuremap": "ZFeatureMap(4, reps=1)", "Opt": "COBYLA(maxiter=50)", "vdepth": 3},
]
def normalize_data(dataPath = "../../Data/Processed/iris_csv.csv"):
"""
Normalizes the data
"""
# Reads the data
data = pd.read_csv(dataPath)
data = shuffle(data, random_state=42)
if dataPath.__contains__("iris"):
X, Y = data[['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)']].values, data['target'].values
elif dataPath.__contains__("wine"):
X, Y = data[['alcohol', 'flavanoids', 'color_intensity', 'proline', 'target']].values, data['target'].values
# normalize the data
scaler = MinMaxScaler(feature_range=(-2 * np.pi, 2 * np.pi))
X = scaler.fit_transform(X)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=42)
return X_train, X_test, Y_train, Y_test
def main():
data = {}
dataPaths = ["../../Data/Processed/iris_csv.csv", "../../Data/Processed/winedata.csv"]
for dataPath in dataPaths:
x_train, x_test, y_train, y_test = normalize_data(dataPath = dataPath)
for model in best_models:
print("FE: {}\tDepth: {}\tOpt: {}".format(model['Featuremap'], model['vdepth'], model['Opt']))
test_benchmark = Benchmark(optimizer=eval(model['Opt']), variational_depth=model['vdepth'], feature_map=eval(model['Featuremap']), X_train=x_train, X_test=x_test, Y_train=y_train, Y_test=y_test)
test_benchmark.run()
data_list = "{} {} vdepth {}".format(model['Featuremap'], model['Opt'], model['vdepth'])
data[data_list] = test_benchmark.get_cost_list()
if dataPath.__contains__("iris"):
w = csv.writer(open("../../Data/Processed/iriscost1.csv", "w"))
else:
w = csv.writer(open("../../Data/Processed/winecost1.csv", "w"))
for key, val in data.items():
w.writerow([key, val])
if __name__ == "__main__":
main()
|
{"hexsha": "bd6b7a6f8b4f9fd10ad96cb6bef1f26557dad097", "size": 3292, "ext": "py", "lang": "Python", "max_stars_repo_path": "Src/Scripts/generalisation.py", "max_stars_repo_name": "0x6f736f646f/variational-quantum-classifier-on-heartattack", "max_stars_repo_head_hexsha": "b37c8b24cf84b1e697ca2115d7d8cbc5fa83d036", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-11-15T17:49:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-19T19:03:13.000Z", "max_issues_repo_path": "Src/Scripts/generalisation.py", "max_issues_repo_name": "0x6f736f646f/variational-quantum-classifier-on-heartattack", "max_issues_repo_head_hexsha": "b37c8b24cf84b1e697ca2115d7d8cbc5fa83d036", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Src/Scripts/generalisation.py", "max_forks_repo_name": "0x6f736f646f/variational-quantum-classifier-on-heartattack", "max_forks_repo_head_hexsha": "b37c8b24cf84b1e697ca2115d7d8cbc5fa83d036", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-01-24T17:57:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-07T08:04:54.000Z", "avg_line_length": 44.4864864865, "max_line_length": 206, "alphanum_fraction": 0.645200486, "include": true, "reason": "import numpy", "num_tokens": 959}
|
""" Module documentation:
https://medium.com/codingthesmartway-com-blog/the-machine-learning-crash-course-part-2-linear-regression-6a5955792109
__author__ = "R"
__copyright__ = ""
__credits__ = ["Sebastian Eschweiler"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "R"
__email__ = "~"
__status__ = "Production"
TO DO:
Show predictions in Plot
Show Weights
Fit to tensorboard
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import time
EPOCHS = 200
NAME = "Test_R{}".format(int(time.time()))
tensorboard = tf.keras.callbacks.TensorBoard(log_dir=''.format(NAME))
def linreg_main():
vec1, vec2 = training_data()
model = model_cfg()
print(model.summary())
trained_model, history = training(model, vec1, vec2)
plotty(trained_model, vec1, vec2, history)
def plotty(trained_model, vec1, vec2, history):
# axes = plt.gca()
# axes.set_xlim([-1,10])
# axes.set_ylim([-1,30])
plt.subplot(2, 1, 1)
plt.xlabel("Epoch Number")
plt.ylabel("Loss Magnitude")
plt.plot(history.history['loss'])
plt.subplot(2, 1, 2)
x = np.linspace(-10, 20, 100)
# plt.title("Graph of f(x)=2x+30")
# plt.plot(x, x * 2 + 30)
plt.scatter(vec1.numpy(), vec2.numpy(), color="blue")
# plt.scatter(20,trained_model.predict([20.0]), color="green")
for i in range(-10, 20):
plt.scatter(i, trained_model.predict([i]), color="red", s=7)
plt.show()
print("The Weights:", trained_model.layers[0].get_weights())
def training_data():
tf_values_x = tf.Variable([-10, -5, 0, 2, 6, 12, 15, 2], dtype=float)
tf_values_y = tf.add(tf.multiply(tf_values_x, 2), 30)
# for i, x in enumerate(values_x):
# print("X: {} Y: {}".format(x, values_y[i]))
return (tf_values_x, tf_values_y)
def model_cfg():
# A Dense layer can be seen as a linear operation in which every input is connected to every output by a weight and a
# bias. The number of inputs is specified by the first parameter units. The number of neurons in the layer is
# determined by the value of the parameter input_shape.
# model = tf.keras.Sequential([
# tf.keras.layers.Dense(units=1, input_shape=[1])
# ])
model2 = tf.keras.Sequential()
model2.add(tf.keras.layers.Dense(units=1, input_shape=[1], name="my_DenseLayer"))
model2.compile(loss="mean_squared_error", optimizer=tf.keras.optimizers.Adam(0.1))
return model2
def test():
print("Hi")
def training(model, vec1, vec2):
history = model.fit(vec1, vec2, epochs=EPOCHS, verbose=0, callbacks=[tensorboard])
# plt.xlabel("Epoch Number")
# plt.ylabel("Loss Magnidute")
# plt.plot(history.history['loss'])
# plt.show()
return (model, history)
|
{"hexsha": "1e394a383e3f2a35c29d65cf54112f18644563e4", "size": 2946, "ext": "py", "lang": "Python", "max_stars_repo_path": "Networks/TF_Experimental/tf_linreg.py", "max_stars_repo_name": "unigoetheradaw/CTiTN_SS19", "max_stars_repo_head_hexsha": "f9ed90c4ec13a8024b17ff09bfc2ce19a064a879", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Networks/TF_Experimental/tf_linreg.py", "max_issues_repo_name": "unigoetheradaw/CTiTN_SS19", "max_issues_repo_head_hexsha": "f9ed90c4ec13a8024b17ff09bfc2ce19a064a879", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-08-25T15:38:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:13:33.000Z", "max_forks_repo_path": "Networks/TF_Experimental/tf_linreg.py", "max_forks_repo_name": "unigoetheradaw/CTiTN_SS19", "max_forks_repo_head_hexsha": "f9ed90c4ec13a8024b17ff09bfc2ce19a064a879", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0105263158, "max_line_length": 122, "alphanum_fraction": 0.6479972845, "include": true, "reason": "import numpy", "num_tokens": 795}
|
# -*- coding: utf-8 -*-
"""Auto-Encoder-v0.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/19KI_q17bSNNI3LNAD9R1x-HCSXl3sZsf
"""
# Commented out IPython magic to ensure Python compatibility.
# %tensorflow_version 2.x
import time
import torch
import numpy as np
import pandas as pd
from PIL import Image
import tensorflow as tf
from google.colab import files
import matplotlib.pyplot as plt
from IPython.display import clear_output
from tensorflow.keras.models import Sequential
from tensorflow.keras.applications import vgg19
from tensorflow.keras import datasets, layers, models
from tensorflow.keras.utils import to_categorical, plot_model
from tensorflow.keras.applications.vgg19 import preprocess_input
from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D, Input
torch.cuda.empty_cache()
gpu_name = tf.test.gpu_device_name()
cpu_name = '/cpu:0'
print(gpu_name)
from keras.datasets import mnist
(x_train, label_train), (x_test, label_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print(x_train.shape)
print(x_test.shape)
# Encoder
encoder = models.Sequential()
encoder.add(Dense(128, input_shape=(784,), activation='relu'))
encoder.add(Dense( 64, activation='relu'))
encoder.add(Dense( 16, activation='relu'))
# Decoder
decoder = encoder
#encoder.add(Dense( 16, activation='relu'))
decoder.add(Dense( 64, activation='relu'))
decoder.add(Dense(128, activation='relu'))
decoder.add(Dense(784, activation='softmax'))
auto_encoder = decoder
noise = np.random.rand(*x_train.shape)
auto_encoder.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
fit_history = auto_encoder.fit(x_train+noise,
x_train,
epochs=20)
fig = plt.figure(figsize=(2, 20))
for i in range(20):
inp = np.array([x_test[i]])+np.random.rand(*x_test[i].shape)
out = auto_encoder(inp)
out = np.array(out).reshape(28, 28)
plt.subplot(20, 2, 2*i+1)
plt.imshow(inp.reshape(28, 28), cmap='gray')
plt.axis('off')
plt.subplot(20, 2, 2*i+2)
plt.imshow(out, cmap='gray')
plt.axis('off')
plt.subplot(121)
epochs_list = [*range(1, 21)]
plt.style.use('ggplot')
plt.title('Training Loss')
plt.plot(epochs_list, fit_history.history['loss'], marker='+', markeredgecolor='k')
plt.xlabel('Epoch')
plt.ylabel('Loss')
T = models.Model(auto_encoder.input,
outputs=[auto_encoder.output,
auto_encoder.get_layer(index=2).output]
)
test_loss, test_accuracy = auto_encoder.evaluate(x_test, x_test)
print("\nTest Results:\nAccuracy:\t%8.3f %%\nLoss val:\t%8.3f"%
(test_accuracy*100, test_loss))
plt.plot(T(x_test)[1][1])
plt.figure(figsize=(10, 10))
for j in range(10):
if i != 9:
plt.subplot(4, 3, j+1)
for num, label in enumerate(label_test[:200]):
if label == j:
plt.plot(T(x_test)[1][num])
else:
plt.subplot(4, 3, j+1)
for num, label in enumerate(label_test[:200]):
if label == j:
plt.plot(T(x_test)[1][num])
data = T(x_test)[1].numpy()
rep_mat = np.zeros([10, 16])
for num in range(10):
print("num %3d"%(num))
z = np.zeros(16)
for index, datum in enumerate(data):
if label_test[index] == num:
z += datum
rep_mat[num] = z/z.sum()
plt.style.use('classic')
plt.axis('off')
plt.matshow(rep_mat, cmap='jet')
plt.show()
auto_encoder.summary()
plot_model(auto_encoder, show_shapes=True)
auto_encoder.save('drive/My Drive/AutoEncoder-v0.h5')
#tf.keras.models.load_model('drive/My Drive/AutoEncoder-v0.h5')
#files.download('drive/My Drive/AutoEncoder-v0.h5')
|
{"hexsha": "34566682229056709cbb605513a702dba1001c86", "size": 3893, "ext": "py", "lang": "Python", "max_stars_repo_path": "Source Code/auto_encoder_v0.py", "max_stars_repo_name": "314arham/denoise-AE", "max_stars_repo_head_hexsha": "5fce72273cce26a60837d43dbb0fcea3fed36078", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Source Code/auto_encoder_v0.py", "max_issues_repo_name": "314arham/denoise-AE", "max_issues_repo_head_hexsha": "5fce72273cce26a60837d43dbb0fcea3fed36078", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Source Code/auto_encoder_v0.py", "max_forks_repo_name": "314arham/denoise-AE", "max_forks_repo_head_hexsha": "5fce72273cce26a60837d43dbb0fcea3fed36078", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4924242424, "max_line_length": 88, "alphanum_fraction": 0.6920113023, "include": true, "reason": "import numpy", "num_tokens": 1040}
|
#ifndef SEARCH_NBEST__
#define SEARCH_NBEST__
#include "search/applied.hh"
#include "search/config.hh"
#include "search/edge.hh"
#include <boost/pool/object_pool.hpp>
#include <cstddef>
#include <queue>
#include <vector>
#include <assert.h>
namespace search {
class NBestList;
class NBestList {
private:
class RevealedRef {
public:
explicit RevealedRef(History history)
: in_(static_cast<NBestList*>(history)), index_(0) {}
private:
friend class NBestList;
NBestList *in_;
std::size_t index_;
};
typedef GenericApplied<RevealedRef> QueueEntry;
public:
NBestList(std::vector<PartialEdge> &existing, util::Pool &entry_pool, std::size_t keep);
Score TopAfterConstructor() const;
const std::vector<Applied> &Extract(util::Pool &pool, std::size_t n);
private:
Score Visit(util::Pool &pool, std::size_t index);
Applied Get(util::Pool &pool, std::size_t index);
void MoveTop(util::Pool &pool);
typedef std::vector<Applied> Revealed;
Revealed revealed_;
typedef std::priority_queue<QueueEntry> Queue;
Queue queue_;
};
class NBest {
public:
typedef std::vector<PartialEdge> Combine;
explicit NBest(const NBestConfig &config) : config_(config) {}
void Add(std::vector<PartialEdge> &existing, PartialEdge addition) const {
existing.push_back(addition);
}
NBestComplete Complete(std::vector<PartialEdge> &partials);
const std::vector<Applied> &Extract(History root);
private:
const NBestConfig config_;
boost::object_pool<NBestList> list_pool_;
util::Pool entry_pool_;
};
} // namespace search
#endif // SEARCH_NBEST__
|
{"hexsha": "cb7651bc2ee0a4b4e3b5e9f3561116a34ad16296", "size": 1699, "ext": "hh", "lang": "C++", "max_stars_repo_path": "moses/search/nbest.hh", "max_stars_repo_name": "anshsarkar/TailBench", "max_stars_repo_head_hexsha": "25845756aee9a892229c25b681051591c94daafd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 114.0, "max_stars_repo_stars_event_min_datetime": "2015-01-11T05:41:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-31T03:47:12.000Z", "max_issues_repo_path": "moses/search/nbest.hh", "max_issues_repo_name": "anshsarkar/TailBench", "max_issues_repo_head_hexsha": "25845756aee9a892229c25b681051591c94daafd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 29.0, "max_issues_repo_issues_event_min_datetime": "2015-01-09T01:00:09.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-25T06:04:02.000Z", "max_forks_repo_path": "moses/search/nbest.hh", "max_forks_repo_name": "anshsarkar/TailBench", "max_forks_repo_head_hexsha": "25845756aee9a892229c25b681051591c94daafd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 50.0, "max_forks_repo_forks_event_min_datetime": "2015-02-13T13:48:39.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-07T09:45:11.000Z", "avg_line_length": 20.7195121951, "max_line_length": 92, "alphanum_fraction": 0.6851088876, "num_tokens": 419}
|
# -*- coding: utf-8 -*-
"""
Author: Philip Anfinrud, Brian Mahon, Friedrich Schotte
Date created: 12/8/2016
Date last modified: 10/17/2017
2017-06-02 1.5 Adapted for 3-way injection port
2017-10-06 1.6 Friedrich, Using IOC
2017-10-17 1.7 Brian, Friedrich, refill_1, refill_3
Setup:
Start desktop shortcut "Centris Syringe IOC"
(Target: python cavro_centris_syringe_pump_IOC.py run_IOC
Start in: %LAUECOLLECT%)
"""
__version__ = "1.7"
from time import sleep,time
from logging import debug,info,warn,error
from thread import start_new_thread
from pdb import pm
from tempfile import gettempdir
from cavro_centris_syringe_pump_IOC import volume,port as valve
volume1,volume2,volume3,volume4 = volume
valve1,valve2,valve3,valve4 = valve
# Assign default parameters.
Vol = {1:250,2:250,3:250,4:250} # Volumes of syringes.
Backlash = 100 # Backlash in increments.
V_prime = 25 # Volume needed to purge 2.3 m tubing (49 uL/m).
V_purge = 115 # Volume needed to purge 2.3 m tubing (49 uL/m).
V_inflate = 2 # Volume used to inflate tubing.
V_deflate = 2 # Volume used to deflate tubing.
V_clean = 4.0 # Volume used to advance dlivered xtal droplet
V_flush = 4.0 # Volume used to flush collapsible tubing.
V_injectX = 0.2 # Volume used to advance dlivered xtal droplet
V_injectM = 0.3 #Volume of mother liqour during inject
V_injectR = 0.2 # Volume desired for xtal delivery
V_droplet = 1 #Volume used to load droplets into cappilary
V_plug = 5 #Volume of fluorinert to remove protien from channels
S_pressure = 250 # Speed used to change pressure.
S_load = 50 # Speed used to load syringes.
S_prime = 20 # Speed used to prime capillaries.
S_flush = 68 # Speed used to flush collapsible tubing.
S_flow = 0.07 # Speed used to flow through collapsible tubing.
S_min = 0.002 # Minimum Speed available.
S_flowIX = 1.0 # Speed used for injection of xtals
S_flowIM = 0.5 #Speed of flow for injection cycle
S_flowRV = 0.75 #Speed of flow for reverse part of injection cycle
S_flowS1 = 0.05 #Speed used for small droplet generation
port = [1,2,3,4]
class PumpController(object):
def write_read(self, command_dict):
"""Writes commands to multiple pumps with pump ids and commands assembled in a dictionary.
Returns a dictionary of pump ids and their respective responses."""
from cavro_centris_syringe_pump_IOC import pump_controller
return pump_controller.write_read(command_dict)
def assign_pids(self):
"""Assigns pump id to each syringe pump according to dictionary; since
pump ids are written to non-volatile memory, need only execute once."""
self.write_read({1: "/1s0ZA1R\r",
2: "/1s0ZA2R\r",
3: "/1s0ZA3R\r",
4: "/1s0ZA4R\r"})
def syringe_setup(self):
"""Specifies the syringe volumes for each pump in the dictionary of
pumps. The command takes effect after power cycling the pumps, and
need only be executed once."""
# U93, U94, U90, U95 -> 50, 100, 250, 500 uL
self.write_read({1: "/1U90R\r",
2: "/1U90R\r",
3: "/1U90R\r",
4: "/1U90R\r"})
def move_abs(self,pid,position,speed=25):
"""Move plunger of pump[pid] to absolute position."""
if 0 <= position <= Vol[pid]:
self.write_read({pid: "".join(["/1J2V",str(speed),",1A",str(position),",1J0R\r"])})
else:
info('Position outside of absolute usable range: 0 <= position <= %r' % Vol[pid])
def move_rel(self,pid,position,speed=25):
"""Move plunger of pump[pid] to relative position."""
current = self.positions()[pid]
if 0 <= current + position <= Vol[pid]:
if position < 0:
position = abs(position)
self.write_read({pid: "".join(["/1J2V",str(speed),",1D",str(position),",1J0R\r"])})
else:
self.write_read({pid: "".join(["/1J2V",str(speed),",1P",str(position),",1J0R\r"])})
else:
info('Position outside of absolute usable range: 0 <= position <= %r' % Vol[pid])
def reset(self, *pid):
"""Performs a soft reset on pumps by passing pid number. if left blank, all pumps will soft reset."""
if len(pid) == 0:
pid = (1,2,3,4)
for i in pid:
self.write_read({pid: "/1!R\r" for pid in port})
def abort(self):
"""Terminates all pump motion and resets J to 0."""
self.write_read({pid: "/1TR\r" for pid in port})
self.write_read({pid: "/1J0R\r" for pid in port})
def init(self):
"""Initializes pumps, sets Backlash, loads syringes, and leaves valves set to "O"."""
t0 = time()
self.write_read({pid: "/1TR\r" for pid in port})
info("Executing init...")
info(" emptying syringes...")
self.write_read({1: "".join(["/1Y7,0,0IV",str(S_load),",1K",str(Backlash),"A0,1R\r"]),
2: "".join(["/1Z7,0,0IV",str(S_load),",1K",str(Backlash),"A0,1R\r"]),
3: "".join(["/1Y7,0,0IV",str(S_load),",1K",str(Backlash),"A0,1R\r"]),
4: "".join(["/1Z7,0,0IV",str(S_load),",1K",str(Backlash),"A0,1R\r"])})
while self.busy(1,2,3,4): sleep(0.1)
info(" filling syringes...")
self.write_read({1: "".join(["/1A",str(Vol[1]),",1R\r"]),
2: "".join(["/1A",str(Vol[2]),",1R\r"]),
3: "".join(["/1A",str(Vol[3]),",1R\r"]),
4: "".join(["/1A",str(Vol[4]),",1R\r"])})
while self.busy(1,2,3,4): sleep(0.1)
info(" emptying syringes...")
self.write_read({1: "".join(["/1A0,1R\r"]),
2: "".join(["/1A0,1R\r"]),
3: "".join(["/1A0,1R\r"]),
4: "".join(["/1A0,1R\r"])})
while self.busy(1,2,3,4): sleep(0.1)
info(" syringes are initialized, primed, and ready to load.")
info(" time to init (s): %r" % (time()-t0))
def prime(self):
"""Fills capillaries 1 with fluorinert and 3 with oil."""
t0 = time()
self.write_read({pid: "/1TR\r" for pid in port})
info("Executing purge...")
info(" filling capillary 1 with oil and 3 with mother liquor...")
self.write_read({1: "".join(["/1IV",str(S_load),",1A",str(Vol[1]),",1R\r"]),
3: "".join(["/1IV",str(S_load),",1A",str(Vol[3]),",1R\r"])})
while self.busy(1,3): sleep(0.1)
info(" purging lines...")
self.write_read({2: "/1BR\r"}) #Set pump2 valve to "B".
while self.busy(2): sleep(0.1)
self.write_read({1: "".join(["/1OV",str(S_prime),",1D",str(V_prime),",1R\r"]),
3: "".join(["/1OV",str(S_prime),",1D",str(V_prime),",1R\r"])})
i = -1
while self.busy(1,3):
i += 1
sleep(0.1)
if (i/20. == i/20): info("%r" % self.positions()) # every 2 s
info(" time to purge (s): %r" % (time()-t0))
self.refill()
def test_inject(self):
self.move_rel(3,-0.25,1)
self.move_rel(1,-0.25,1)
def pressure(self):
self.valve(2,port='O')
while self.busy(2): sleep(0.02)
self.write_read({2: "".join(["/1V",str(S_prime),",1P",str(V_prime),",1R\r"])})
while self.busy(2): sleep(0.1)
self.valve(2,port='B')
info("pressure down, valve 2 set to B...")
def pressure_old(self,strokes=-1):
"""Changes pressure using pump4."""
t0 = time()
info("Changing pressure...")
if strokes < 0:
for i in range(abs(strokes)):
self.write_read({2: "".join(["/1IV",str(S_pressure),",1A",str(Vol[4]),",1R\r"])})
while self.busy(2): sleep(0.1)
self.write_read({2: "".join(["/1OV",str(S_pressure),",1A",str(0),",1R\r"])})
while self.busy(2): sleep(0.1)
else:
for i in range(abs(strokes)):
self.write_read({2: "".join(["/1OV",str(S_pressure),",1A",str(Vol[4]),",1R\r"])})
while self.busy(2): sleep(0.1)
self.write_read({2: "".join(["/1IV",str(S_pressure),",1A",str(0),",1R\r"])})
while self.busy(2): sleep(0.1)
info(" time to change pressure (s): %r" % (time()-t0))
def flow(self,S = S_flow, pid = 1):
"""Starts flow."""
info("Executing flow...")
self.write_read({pid: "/1TR\r" for pid in port})
self.write_read({pid: "".join(["/1OV",str(S),",1A0,1R\r"])})
def run_flow(self,Speed = 0.25, pid = 1,N = 5):
for i in range(N):
self.flow(S = Speed, pid= pid)
while self.busy(pid): sleep(0.1)
self.fill(pid)
while self.busy(pid): sleep(0.1)
def injecttestN(self):
"""Assumes flow is active; increase flow from [1], while inject xtals using [4], Then increase flow speed
again, while retracting volume from inject. finish when resume normal flow [1]."""
t0 = time()
#info("Executing inject...")
self.write_read({1: "".join(["/1V",str(S_flowIX),",1F\r"]),
3: "".join(["/1V",str(S_flowIX),",1D",str(V_injectX+0.25),",1R\r"])})
while self.busy(3): sleep(0.02)
self.write_read({1: "".join(["/1V",str(S_flowIX),",1F\r"]),
3: "".join(["/1V",str(S_flowIM),",1P",str(V_injectM),",1R\r"])})
while self.busy(1,3): sleep(0.02)
self.write_read({1: "".join(["/1V",str(S_flow*5),",1F\r"])})
sleep (0.05)
self.write_read({1: "".join(["/1V",str(S_flow),",1F\r"])})
#info("time to swap flow source (s): %r" % (t1-t0))
info("time to inject (s): %r" % (time()-t0))
info("%r" % self.positions())
def injecttest(self):
"""Assumes flow is active; increase flow from [1], while inject xtals using [4], Then increase flow speed
again, while retracting volume from inject. finish when resume normal flow [1]."""
t0 = time()
#info("Executing inject...")
self.flush()
sleep(0.02)
self.write_read({1: "".join(["/1V",str(S_flowIX),",1F\r"]),
3: "".join(["/1V",str(S_flowIX),",1D",str(V_injectM),",1R\r"])})
while self.busy(1,3): sleep(0.02)
self.write_read({1: "".join(["/1V",str(S_flowIX),",1F\r"]),
3: "".join(["/1V",str(S_flowIM),",1P",str(V_injectM/2),",1R\r"])})
while self.busy(1,3): sleep(0.02)
self.write_read({1: "".join(["/1V",str(S_flow),",1F\r"])})
#info("time to swap flow source (s): %r" % (t1-t0))
info("time to inject (s): %r" % (time()-t0))
info("%r" % self.positions())
def inject(self):
"""Assumes flow is active; increase flow from [1], while inject xtals using [4], Then increase flow speed
again, while retracting volume from inject. finish when resume normal flow [1]."""
t0 = time()
#info("Executing inject...")
self.flush()
sleep(0.02)
self.write_read({1: "".join(["/1V",str(S_flowIM+0.5),",1F\r"]),
3: "".join(["/1V",str(S_flowIM),",1D",str(V_injectM+0.2),",1R\r"])})
while self.busy(3): sleep(0.02)
self.write_read({1: "".join(["/1V",str(S_flowIM*2),",1F\r"]),
3: "".join(["/1V",str(S_flowIM),",1P",str(V_injectM),",1R\r"])})
while self.busy(3): sleep(0.02)
self.write_read({1: "".join(["/1V",str(S_flow),",1F\r"])})
#info("time to swap flow source (s): %r" % (t1-t0))
info("time to inject (s): %r" % (time()-t0))
info("%r" % self.positions())
def reverse(self):
self.write_read({1: "".join(["/1V",str(S_flowIM),",1F\r"]),
3: "".join(["/1V",str(S_flowRV),",1P",str(V_injectX),",1R\r"])})
def injectN(self):
"""inject without flush."""
t0 = time()
#info("Executing inject...")
self.write_read({1: "".join(["/1V",str(S_flowIM/4),",1F\r"]),
3: "".join(["/1V",str(S_flowIM/2),",1D",str(V_injectR),",1R\r"])})
while self.busy(3): sleep(0.02)
self.write_read({1: "".join(["/1V",str(S_flowIM),",1F\r"]),
3: "".join(["/1V",str(S_flowIM/2),",1D",str(V_injectX),",1R\r"])})
while self.busy(3): sleep(0.02)
self.write_read({1: "".join(["/1V",str(S_flowIM),",1F\r"]),
3: "".join(["/1V",str(S_flowIM/4),",1P",str(V_injectX/2),",1R\r"])})
while self.busy(3): sleep(0.02)
self.write_read({1: "".join(["/1V",str(S_flow*4),",1F\r"])})
sleep (0.2)
self.write_read({1: "".join(["/1V",str(S_flow),",1F\r"])})
#info("time to swap flow source (s): %r" % (t1-t0))
info("time to inject (s): %r" % (time()-t0))
info("%r" % self.positions())
def clean(self):
"""injects cleaning solution and pressurizes from pump 4 to remove xtals."""
t0 = time()
info("Executing clean...")
self.abort()
self.valve(2,port = "I")
self.valve(4,port = "O")
while self.busy(2): sleep(0.02)
self.write_read({1: "".join(["/1V",str(S_flowIX),",1D",str(V_injectR),",1R\r"]),
3: "".join(["/1V",str(S_flowIM),",1P",str(V_injectX),",1R\r"])})
while self.busy(1,3): sleep(0.02)
self.write_read({4: "".join(["/1V",str(S_flush*2),",1D",str(V_clean),",1R\r"])})
while self.busy(4): sleep(0.02)
self.write_read({1: "".join(["/1V",str(S_flowRV),",1D",str(V_injectR),",1R\r"]),
3: "".join(["/1V",str(S_flowIM),",1D",str(V_injectM),",1R\r"])})
while self.busy(1,3): sleep(1.0)
self.valve(2,port = "B")
self.valve(4,port = "I")
while self.busy(2,4): sleep(0.02)
info("Initiating Flush...")
self.write_read({1: "".join(["/1V",str(S_flowIX*5),",1D",str(V_flush),",1R\r"])})
while self.busy(1): sleep(0.02)
info("Clean Sequence Finished, Resume Flow")
#info("time to swap flow source (s): %r" % (t1-t0))
info("time to clean (s): %r" % (time()-t0))
self.flow()
def xtal_grow1(self):
t0 = time()
info(" Initiating protein droplet generation")
self.write_read({1: "".join(["/1V",str(S_flowS1),",1D",str(V_droplet),",1R\r"]),
4: "".join(["/1V",str(S_flowS1),",1D",str(V_droplet),",1R\r"])})
while self.busy(1,4):
sleep(0.1)
info(" Sample loaded")
info("time to load (s): %r" % (time()-t0))
def xtal_grow2(self):
t0 = time()
info(" Initiating protein droplet generation")
self.write_read({1: "".join(["/1V",str(S_flowS1),",1D",str(V_droplet),",1R\r"]),
4: "".join(["/1V",str(S_flowS1),",1D",str(V_droplet),",1R\r"])})
while self.busy(1,4): sleep(0.1)
self.write_read({1: "".join(["/1V",str(S_flowRV),",1P",str(V_injectR),",1R\r"]),
4: "".join(["/1V",str(S_flowRV),",1P",str(V_injectR),",1R\r"])})
info(" Sample loaded")
info("time to load (s): %r" % (time()-t0))
def run(self):
"""executes a refill, flow, inject, flush, flow cycle"""
t0 = time()
info("%r" % self.positions())
info("executing run...")
self.flow(0.2)
sleep(3.0)
self.inject_new()
info("injecting xtals...")
sleep(8.0)
self.flush()
info("flushing xtals...")
info("resume flow...")
info("%r" % self.positions())
info("run time (s): %r" % (time()-t0))
def inject_old(self, V = V_injectX):
"""Assumes flow is active; slow flow from [1], inject using [3], then resume normal flow rate through [3]."""
t0 = time()
#info("Executing inject...")
info("%r" % self.positions())
self.write_read({1: "".join(["/1V",str(S_min),",1F\r"]),
4: "".join(["/1V",str(S_flow),",1D",str(V_injectX),",1R\r"])})
sleep(V/S_flow)
self.write_read({1: "".join(["/1V",str(S_flow),",1F\r"]),
4: "/1TR\r"})
#info("time to swap flow source (s): %r" % (t1-t0))
info("time to inject (s): %r" % (time()-t0))
info("%r" % self.positions())
def flush(self, V = V_flush, S = S_flush):
"""Stops flow, washes crystals out of tubing, then resumes flow."""
t0 = time()
info("Executing flush...")
self.write_read({1: "".join(["/1V",str(S),",1F\r"])})
sleep(V/float(S))
self.write_read({1: "".join(["/1V",str(S_min),",1F\r"])})
sleep(2)
self.write_read({1: "".join(["/1V",str(S_flow),",1F\r"])})
info("time to flush (s): %r" % (time()-t0))
def flush_1(self):
"""Stops flow, washes crystals out of tubing, then resumes flow."""
t0 = time()
info("Executing flush...")
#self.write_read({pid: "/1TR\r" for pid in port})
self.write_read({2: "/1OR\r"}) #Set pump2 valve to "O".
while self.busy(2): sleep(0.1)
#info(" filling capillary with water")
self.write_read({4: "".join(["/1V",str(S_flush),",1D",str(V_flush),",1R\r"])})
while self.busy(4): sleep(0.1)
self.write_read({2: "/1BR\r"}) #Set pump2 valve to "B".
while self.busy(2): sleep(0.1)
info("time to flush (s): %r" % (time()-t0))
#self.flow()
#sleep(1)
#self.inject()
def flush_2(self,N = 4):
"""Stops flow, washes crystals out of tubing, then resumes flow."""
t0 = time()
info("Executing flush...")
self.write_read({pid: "/1TR\r" for pid in port})
self.write_read({2: "/1OR\r"}) #Set pump2 valve to "O".
while self.busy(2): sleep(0.1)
info(" pulling back crystals in capillary 3")
self.write_read({1: "".join(["/1V",str(S_flush),",1D",str(V_injectX),",1R\r"]),
3: "".join(["/1V",str(S_flush),",1P",str(V_injectX),",1R\r"])})
while self.busy(1,3): sleep(0.1)
info(" filling capillary with water")
self.write_read({4: "".join(["/1V",str(S_flush),",1D",str(V_flush),",1R\r"])})
while self.busy(4): sleep(0.1)
info(" swishing water back and forth to dislodge/dissolve crystals")
for i in range(N):
self.write_read({1: "".join(["/1V",str(S_flush),",1D",str(V_flush),",1R\r"]),
4: "".join(["/1V",str(S_flush),",1P",str(V_flush),",1R\r"])})
while self.busy(1,4): sleep(0.1)
self.write_read({1: "".join(["/1V",str(S_flush),",1P",str(V_flush),",1R\r"]),
4: "".join(["/1V",str(S_flush),",1D",str(V_flush),",1R\r"])})
while self.busy(1,4): sleep(0.1)
info(" pushing crystals into capillary 2")
self.write_read({1: "".join(["/1V",str(S_flush),",1D",str(V_flush),",1R\r"]),
2: "".join(["/1V",str(S_flush),",1P",str(V_flush),",1R\r"])})
while self.busy(1,2): sleep(0.1)
self.write_read({2: "/1BR\r"}) #Set pump2 valve to "B".
while self.busy(2): sleep(0.1)
info(" pushing back crystals in capillary 3")
self.write_read({1: "".join(["/1V",str(S_flush),",1P",str(V_injectX),",1R\r"]),
3: "".join(["/1V",str(S_flush),",1D",str(V_injectX),",1R\r"])})
while self.busy(1,3): sleep(0.1)
info("time to flush (s): %r" % (time()-t0))
self.flow()
self.inject()
def refillN(self):
"""Loads syringe 1 and 3."""
t0=time()
info("Executing refill...")
self.write_read({pid: "/1TR\r" for pid in port})
self.write_read({3: "".join(["/1IV",str(S_load),",1A",str(Vol[4]),",1OR\r"]),
1: "".join(["/1IV",str(S_load),",1A",str(Vol[1]),",1OR\r"]),
4: "".join(["/1IV",str(S_load),",1A",str(Vol[1]),",1OR\r"])})
i = -1
while self.busy(1,3):
i += 1
sleep(0.1)
if (i/20. == i/20): info("%r" % self.positions()) # every 2 s
self.valve(2,port = "B")
self.valve(4,port = "I")
info(" time to refill (s): %r" % (time()-t0))
info("%r" % self.valve_read())
def degas(self):
"""increases upstream pressure to remove nucleated air bubbles."""
info("Degassing lines...")
self.valve(2, "O")
sleep(0.1)
self.flow(S=1.0)
sleep(3.0)
self.valve(2, "B")
sleep(0.1)
self.flow()
info("degassing complete, continue flow")
def refill_1(self):
"""Loads syringe 1 and restarts flow."""
t0=time()
info("Executing refill of pump 1...")
self.write_read({pid: "/1TR\r" for pid in port})
self.write_read({1: "".join(["/1IV",str(S_load),",1A",str(Vol[4]),",1OR\r"])})
i = -1
while self.busy(1):
i += 1
sleep(0.1)
if (i/20. == i/20): info("%r" % self.positions()) # every 2 s
self.valve(2,port = "B")
self.valve(4,port = "I")
info(" time to refill 1 (s): %r" % (time()-t0))
self.flow()
def refill_3(self):
"""Loads syringe 3."""
t0=time()
info("Executing refill of pump 3...")
self.write_read({3: "/1TR\r"})
self.write_read({3: "".join(["/1IV",str(S_load),",1A",str(Vol[4]),",1OR\r"])})
i = -1
while self.busy(3):
i += 1
sleep(0.1)
if (i/20. == i/20): info("%r" % self.positions()) # every 2 s
self.valve(2,port = "B")
self.valve(4,port = "I")
info(" time to refill 1 (s): %r" % (time()-t0))
def refill_all(self):
"""Loads syringe 1 and restarts flow."""
t0=time()
info("Executing refill...")
self.write_read({pid: "/1TR\r" for pid in port})
self.write_read({3: "".join(["/1IV",str(S_load),",1A",str(Vol[4]),",1OR\r"]),
1: "".join(["/1IV",str(S_load),",1A",str(Vol[1]),",1OR\r"]),
4: "".join(["/1IV",str(S_load),",1A",str(Vol[1]),",1OR\r"])})
i = -1
while self.busy(1,3):
i += 1
sleep(0.1)
if (i/20. == i/20): info("%r" % self.positions()) # every 2 s
self.valve(2,port = "B")
self.valve(4,port = "I")
info(" time to refill (s): %r" % (time()-t0))
self.flow()
def valve(self,pid,port = "I"):
"""Set port of pump[pid] to 'O', 'I', or 'B'."""
if port == 'i':
port = 'I'
elif port == 'o':
port = 'O'
elif port == 'b':
port = B
t0 = time()
self.write_read({pid: "".join(["/1",str(port),"R\r"])})
while self.busy(pid): sleep(0.1)
info("time to rotate valve (s): %r" % (time()-t0))
def empty(self):
"""Empty all syringes; switch all ports to B."""
self.write_read({1: "/1IV25,1A0,1R\r", 2: "/1IV25,1A0,1R\r",
3: "/1IV25,1A0,1R\r", 4: "/1IV25,1A0,1R\r"})
while self.busy(1, 2, 3, 4): sleep(0.1)
self.write_read({1: "/1BR\r", 2: "/1BR\r", 3: "/1BR\r", 4: "/1BR\r"})
def busy(self, *pids):
"""Returns True if any specified pump is busy. The query (?29) returns
the pump status, whose 4th byte is 1 or 0 (1 is busy)."""
from numpy import nan
reply = []
for pid in pids:
try:
reply.apend(self.write_read({pid: "/1?29\r"})[4])
except:
reply.append(nan)
return reply
def positions(self):
"""Queries positions of all pumps. Returns dict of pids to positions."""
reply = self.write_read({pid: "/1?18R\r" for pid in port})
return {pid: float(reply[pid][4:-3]) for pid in reply}
def valve_read(self,pids = []):
reply = []
"""Queries positions of all pumps. Returns dict of pids to positions."""
reply.append(self.write_read({pid: "/1?20R\r" for pid in port}))
return reply
def flow_old(self,S = S_flow):
"""Starts flow pfl changes flow speed on the fly."""
#self.write_read({1: "/1TR\r", 2: "/1TR\r"})
temp = self.positions()
info("%r" % temp)
if self.busy(1,2):
self.write_read({1: "".join(["/1V",str(S),",1F\r"]),
2: "".join(["/1V",str(S),",1F\r"])})
else:
V = min(temp[1],Vol[2]-temp[2])
self.write_read({1: "".join(["/1J1V",str(S),",1D",str(V),",1J0R\r"]),
2: "".join(["/1J1V",str(S),",1P",str(V),",1J0R\r"])})
def purge_12(self):
"""Purge bubbles from capillary using pumps (1,2) to displace 75 uL."""
self.write_read({pid: "/1TR\r" for pid in port})
temp = self.positions()
V = min(temp[1], Vol[2]-temp[2])
if V < 78: self.refill()
self.write_read({pid: "/1TR\r" for pid in port})
info(" purging...")
self.write_read({4: "/1OR\r"}) #Reposition #4 valve port before inflating.
while self.busy(4): sleep(0.1)
self.inflate(V_inflate)
while self.busy(2): sleep(0.1)
self.write_read({1: "".join(["/1J1V",str(S_prime),",1D",str(V_purge),",1J0R\r"]),
2: "".join(["/1J1V",str(S_prime),",1P",str(V_purge),",1J0R\r"])})
i = -1
while self.busy(1, 2):
i += 1
sleep(0.1)
if (i/20. == i/20): info("%r" % self.positions()) # every 2 s
self.refill()
def purge_32(self):
"""Purge bubbles from capillary using pumps (3,2) to displace 75 uL."""
self.write_read({pid: "/1TR\r" for pid in port})
temp = self.positions()
V = min(temp[3], Vol[2]-temp[2])
if V < 78: self.refill()
self.write_read({pid: "/1TR\r" for pid in port})
info(" purging...")
self.write_read({4: "/1OR\r"}) #Reposition #4 valve port before inflating.
while self.busy(4): sleep(0.1)
self.inflate(V_inflate)
while self.busy(2): sleep(0.1)
self.write_read({3: "".join(["/1J1V",str(S_prime),",1D",str(V_purge),",1J0R\r"]),
2: "".join(["/1J1V",str(S_prime),",1P",str(V_purge),",1J0R\r"])})
i = -1
while self.busy(2, 3):
i += 1
sleep(0.1)
if (i/20. == i/20): info("%r" % self.positions()) # every 2 s
self.refill()
def run_create_pressure(self,N):
start_new_thread(self.create_low_pressure,(N,))
def run_create_pressure_new(self,N):
start_new_thread(self.create_low_pressure_new,(N,))
def create_low_pressure_new(self,N):
from cavro_centris_syringe_pump_IOC import volume, port
for i in range(N):
port[1].value = 1
while port[1].moving: sleep(0.1)
volume[1].value = 250
while volume[1].moving: sleep(0.1)
port[1].value = 0
while port[1].moving: sleep(0.1)
volume[1].value = 0
while volume[1].moving: sleep(0.1)
def create_low_pressure(self, N = 2):
for i in range(N):
p.valve(2,'I')
while self.busy(2): sleep(0.1)
p.move_abs(2,250)
while self.busy(2): sleep(0.1)
p.valve(2,'O')
while self.busy(2): sleep(0.1)
p.move_abs(2,0)
while self.busy(2): sleep(0.1)
def fill(self, pid = 1):
while self.busy(pid): sleep(0.1)
p.valve(pid,'I')
p.move_abs(pid,250)
def prime_old(self):
"""Use after init; primes syringes and tubing (1,2) and (3,4) at S_load flow rate."""
info(" priming...")
self.write_read({pid: "/1TR\r" for pid in port})
self.write_read({4: "/1OR\r"})
while self.busy(4): sleep(0.1)
self.inflate(V_inflate)
while self.busy(2): sleep(0.1)
self.write_read({1: "".join(["/1J1V",str(S_load),",1D225,1J0R\r"]),
2: "".join(["/1J1V",str(S_load),",1P225,1J0R\r"]),
3: "".join(["/1J1V",str(S_load),",1D225,1J0R\r"]),
4: "".join(["/1J1V",str(S_load),",1P225,1JBR\r"])})
i = -1
while self.busy(1, 2, 3, 4):
i += 1
sleep(0.1)
if (i/20. == i/20): info("%r" % self.positions()) # every 2 s
self.refill()
def inflate(self, V = V_inflate):
"""Inflate tubing."""
self.write_read({1: "/1TR\r", 2: "/1TR\r"})
self.write_read({2: "".join(["/1J1V",str(S_flush),",1D",str(V),",1J0R\r"])})
while self.busy(2): sleep(0.1)
def reinject(self,V = V_flush):
"""Solution from pump 2 is rapidly pushed into the collapsible tubing;
then flow is continued."""
t0 = time()
self.write_read({pid: "/1TR\r" for pid in port})
self.write_read({4: "/1OR\r"})
while self.busy(4): sleep(0.1)
self.write_read({2: "".join(["/1V",str(S_flush),",1D",str(V),",1R\r"])})
while self.busy(2): sleep(0.1)
info("time to reinject (s): %r" % (time()-t0))
self.write_read({4: "/1BR\r"})
while self.busy(4): sleep(0.1)
self.flow()
if __name__ == "__main__":
import logging; logging.basicConfig(filename=gettempdir()+'/suringe_pump_DL.log',level=logging.INFO,format="%(levelname)s: %(message)s")
p = PumpController()
self = p # for debugging
print
print("p.init()")
print("p.flow()")
print("p.inject_new() # V = V_injectX")
print("p.flush() # V = V_flush, S = S_flush")
print("p.refillF()")
print("p.pressure() # strokes = -1")
print("p.positions()")
print("p.valve(2,'O') # pid, 'O', 'I', or 'B'")
print("p.valve_read()")
print("p.move_rel(3,-1,1) # pid,position,speed")
print("p.refill_1()")
print("p.refill_3()")
print("p.abort()")
# p.write_read({4:"/1?20R\r"}) # query valve position
# p.write_read({1: "/1IR\r"}) # Move pump1 valve to Input
# p.write_read({2: "/1V0.3,1F\r"}) # Change speed to 0.3 uL/s
# sum(p.positions().values()[:2]) # Returns sum of first two values
|
{"hexsha": "bd0b3d509fd42180f1b1ecac7af2aeec5bce0e05", "size": 31552, "ext": "py", "lang": "Python", "max_stars_repo_path": "cavro_centris_syringe_pump.py", "max_stars_repo_name": "bopopescu/Lauecollect", "max_stars_repo_head_hexsha": "60ae2b05ea8596ba0decf426e37aeaca0bc8b6be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cavro_centris_syringe_pump.py", "max_issues_repo_name": "bopopescu/Lauecollect", "max_issues_repo_head_hexsha": "60ae2b05ea8596ba0decf426e37aeaca0bc8b6be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-10-22T21:28:31.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-22T21:39:12.000Z", "max_forks_repo_path": "cavro_centris_syringe_pump.py", "max_forks_repo_name": "bopopescu/Lauecollect", "max_forks_repo_head_hexsha": "60ae2b05ea8596ba0decf426e37aeaca0bc8b6be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-06-06T15:06:46.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-20T02:03:22.000Z", "avg_line_length": 43.9442896936, "max_line_length": 140, "alphanum_fraction": 0.5007923428, "include": true, "reason": "from numpy", "num_tokens": 9620}
|
"""A binary to train using a single GPU.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os.path
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.framework.python.framework import checkpoint_utils
def restore_vgg(sess, path):
caffe_weights_fn = path
if sys.version_info[0] >= 3:
caffe_weights = np.load(caffe_weights_fn, encoding = 'latin1').item() # fuck pickles
else:
caffe_weights = np.load(caffe_weights_fn).item()
#caffe_weights = np.load(caffe_weights_fn).item()
var_to_restore = []
vgg_var_list = [l.split("\n")[0] for l in open('meta/net_vars/vgg_var.txt').readlines()]
var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
for var in var_list:
if var.op.name in vgg_var_list:
var_to_restore.append(var)
#print(var.op.name)
if len(var.op.name.split("/"))!=3:
print('No load %s' %var.op.name)
continue
scope, dummy, var_type = var.op.name.split("/")
#print('scope: %s, var_type: %s' %(scope, var_type))
if var_type=='kernel':
sess.run(var.assign(caffe_weights[scope]['weights']))
else:
sess.run(var.assign(caffe_weights[scope]['biases']))
def restore_alexnet(sess, path):
print('Weights initialization ...')
caffe_weights_fn = path
if sys.version_info[0] >= 3:
caffe_weights = np.load(caffe_weights_fn, encoding = 'latin1').item() # fuck pickles
else:
caffe_weights = np.load(caffe_weights_fn).item()
var_to_restore = []
nosplit_list = [l.split("\n")[0] for l in open('meta/net_vars/nosplit_var.txt').readlines()]
split_list = [l.split("\n")[0] for l in open('meta/net_vars/split_var.txt').readlines()]
var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
for var in var_list:
if len(var.op.name.split("/"))!=3:
#print('No load %s' %var.op.name)
continue
if var.op.name in nosplit_list:
var_to_restore.append(var)
#print(var.op.name)
scope, dummy, var_type = var.op.name.split("/")
#print('scope: %s, var_type: %s' %(scope, var_type))
if var_type=='kernel':
sess.run(var.assign(caffe_weights[scope]['weights']))
else:
sess.run(var.assign(caffe_weights[scope]['biases']))
if var.op.name in split_list:
var_to_restore.append(var)
#print(var.op.name)
scope, dummy, var_type = var.op.name.split("/")
scope_root = scope[:-2]
split = int(scope[-1])
#print('\nscope: %s, var_type: %s' %(scope, var_type))
if var_type=='kernel':
if split==1:
begin = 0
end = int(caffe_weights[scope_root]['weights'].shape[3]/2)
#print('%d -> %d' %(begin, end))
#print(caffe_weights[scope_root]['weights'].shape)
w = caffe_weights[scope_root]['weights'][:,:,:,0:end]
else:
begin = int(caffe_weights[scope_root]['weights'].shape[3]/2)
#print('%d ->' %(begin))
#print(caffe_weights[scope_root]['weights'].shape)
w = caffe_weights[scope_root]['weights'][:,:,:,begin:]
sess.run(var.assign(w))
else:
if split==1:
begin = 0
end = int(caffe_weights[scope_root]['biases'].shape[0]/2)
#print('%d -> %d' %(begin, end))
#print(caffe_weights[scope_root]['biases'].shape)
b = caffe_weights[scope_root]['biases'][begin:end]
else:
begin = int(caffe_weights[scope_root]['biases'].shape[0]/2)
#print('%d ->' %(begin))
#print(caffe_weights[scope_root]['biases'].shape)
b = caffe_weights[scope_root]['biases'][begin:]
sess.run(var.assign(b))
print('Weights initialization Done')
|
{"hexsha": "d10b26bfec967e357506542a5714f98130513c48", "size": 4317, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/init_weights.py", "max_stars_repo_name": "abenbihi/elf", "max_stars_repo_head_hexsha": "fb63b7ca316a4da93e75421abbb05663d1c5fe7e", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2019-08-30T06:39:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-22T12:32:32.000Z", "max_issues_repo_path": "tools/init_weights.py", "max_issues_repo_name": "abenbihi/elf", "max_issues_repo_head_hexsha": "fb63b7ca316a4da93e75421abbb05663d1c5fe7e", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2019-12-13T02:16:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:31:05.000Z", "max_forks_repo_path": "tools/init_weights.py", "max_forks_repo_name": "abenbihi/elf", "max_forks_repo_head_hexsha": "fb63b7ca316a4da93e75421abbb05663d1c5fe7e", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-08-15T16:02:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-24T11:57:10.000Z", "avg_line_length": 41.9126213592, "max_line_length": 96, "alphanum_fraction": 0.5615010424, "include": true, "reason": "import numpy", "num_tokens": 1009}
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def cross_entropy2d(input, target, weight=None, size_average=True):
n, c, h, w = input.size()
nt, ht, wt = target.size()
weights = [5.2406, 1.0, 0.0088]
class_weights = torch.FloatTensor(weights).cuda()
# Handle inconsistent size between input and target
if h > ht and w > wt: # upsample labels
target = target.unsequeeze(1)
target = F.upsample(target, size=(h, w), mode="nearest")
target = target.sequeeze(1)
elif h < ht and w < wt: # upsample images
input = F.upsample(input, size=(ht, wt), mode="bilinear")
elif h != ht and w != wt:
raise Exception("Only support upsampling")
input = input.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
target = target.view(-1)
loss = F.cross_entropy(
input, target, weight=class_weights, size_average=size_average, ignore_index=250
)
return loss
|
{"hexsha": "fd4871d75100ec0c97334d83d6bfd30241af6717", "size": 987, "ext": "py", "lang": "Python", "max_stars_repo_path": "training/rootnav2/loss/loss.py", "max_stars_repo_name": "robail-yasrab/6-RootNav-2.0", "max_stars_repo_head_hexsha": "3e973c0f7fc34b3938a2294e858d1a0de76e9f0f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2019-07-25T10:15:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-26T03:28:56.000Z", "max_issues_repo_path": "training/rootnav2/loss/loss.py", "max_issues_repo_name": "rootnav2/RootNav-2.0", "max_issues_repo_head_hexsha": "3e973c0f7fc34b3938a2294e858d1a0de76e9f0f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2019-08-07T15:56:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T01:28:22.000Z", "max_forks_repo_path": "training/rootnav2/loss/loss.py", "max_forks_repo_name": "rootnav2/RootNav-2.0", "max_forks_repo_head_hexsha": "3e973c0f7fc34b3938a2294e858d1a0de76e9f0f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2019-07-25T10:15:25.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-15T09:14:49.000Z", "avg_line_length": 32.9, "max_line_length": 88, "alphanum_fraction": 0.6464032421, "include": true, "reason": "import numpy", "num_tokens": 276}
|
import numpy as np
from typing import Tuple
from numpy.typing import ArrayLike
from liegroups.numpy import SO2, SE2, SO3, SE3
from numpy import sin, cos
def angle_to_se2(a: float, theta: float) -> SE2:
"""Transform a single set of DH parameters into an SE2 matrix
:param a: link length
:param theta: rotation
:returns: SE2 matrix
:rtype: lie.SE2Matrix
"""
# R = SO2.from_angle(theta) # TODO: active or passive (i.e., +/- theta?)
R = SO2.from_angle(theta)
return SE2(R, R.dot(np.array([a, 0.0]))) # TODO: rotate the translation or not?
def skew(x):
"""
Creates a skew symmetric matrix from vector x
"""
X = np.array([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])
return X
def trans_axis(t, axis="z") -> SE3:
if axis == "z":
return SE3(SO3.identity(), np.array([0, 0, t]))
if axis == "y":
return SE3(SO3.identity(), np.array([0, t, 0]))
if axis == "x":
return SE3(SO3.identity(), np.array([t, 0, 0]))
raise Exception("Invalid Axis")
def rot_axis(theta, axis="z") -> SE3:
if axis == "z":
return SE3(SO3.rotz(theta), np.array([0, 0, 0]))
if axis == "y":
return SE3(SO3.roty(theta), np.array([0, 0, 0]))
if axis == "x":
return SE3(SO3.rotx(theta), np.array([0, 0, 0]))
raise Exception("Invalid Axis")
def max_min_distance_revolute(r, P, C, N):
delta = P-C
d_min_s = N.dot(delta)**2 + (np.linalg.norm(np.cross(N, delta)) - r)**2
if d_min_s > 0:
d_min = np.sqrt(d_min_s)
else:
d_min = 0
d_max_s = N.dot(delta)**2 + (np.linalg.norm(np.cross(N, delta)) + r)**2
if d_max_s > 0:
d_max = np.sqrt(d_max_s)
else:
d_max = 0
return d_max, d_min
def best_fit_transform(A: ArrayLike, B: ArrayLike) -> Tuple[ArrayLike, ArrayLike]:
"""
Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions
Input:
A: Nxm numpy array of corresponding points
B: Nxm numpy array of corresponding points
Returns:
R: mxm rotation matrix
t: mx1 translation vector
"""
# try:
assert A.shape == B.shape
# except AssertionError:
# print("A: {:}".format(A))
# print("B: {:}".format(B))
# get number of dimensions
m = A.shape[1]
# translate points to their centroids
centroid_A = np.mean(A, axis=0)
centroid_B = np.mean(B, axis=0)
AA = A - centroid_A
BB = B - centroid_B
# rotation matrix
H = np.dot(AA.T, BB)
U, S, Vt = np.linalg.svd(H)
R = np.dot(Vt.T, U.T)
# translation
#
# special reflection case
# if np.linalg.det(R) < 0:
# print("det(R) < R, reflection detected!, correcting for it ...\n")
# Vt[2, :] *= -1
# R = np.dot(Vt.T, U.T)
t = centroid_B.T - np.dot(R, centroid_A.T)
return R, t
|
{"hexsha": "60adb82750aad0b72acbf4cbcbd16f6d413fce97", "size": 2884, "ext": "py", "lang": "Python", "max_stars_repo_path": "graphik/utils/geometry.py", "max_stars_repo_name": "utiasSTARS/GraphIK", "max_stars_repo_head_hexsha": "c2d05386bf9f9baf8ad146125bfebc3b73fccd14", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-08T23:26:03.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-08T23:26:03.000Z", "max_issues_repo_path": "graphik/utils/geometry.py", "max_issues_repo_name": "utiasSTARS/GraphIK", "max_issues_repo_head_hexsha": "c2d05386bf9f9baf8ad146125bfebc3b73fccd14", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graphik/utils/geometry.py", "max_forks_repo_name": "utiasSTARS/GraphIK", "max_forks_repo_head_hexsha": "c2d05386bf9f9baf8ad146125bfebc3b73fccd14", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5544554455, "max_line_length": 113, "alphanum_fraction": 0.580443828, "include": true, "reason": "import numpy,from numpy", "num_tokens": 905}
|
function funcplus(func1, funcs...)
function (k...)
v = func1(k...)
for func in funcs
v += func(k...)
end
v
end
end
function functimes(func1, funcs...)
function (k...)
v = func1(k...)
for func in funcs
v *= func(k...)
end
v
end
end
|
{"hexsha": "61c6b56a7c7f7ed5da3ab630d11c71a6e8945046", "size": 284, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/function.jl", "max_stars_repo_name": "kyungminlee/Kore.jl", "max_stars_repo_head_hexsha": "0567080ee8caf6005a83dc6e1f0e37a4ddb321af", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/function.jl", "max_issues_repo_name": "kyungminlee/Kore.jl", "max_issues_repo_head_hexsha": "0567080ee8caf6005a83dc6e1f0e37a4ddb321af", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/function.jl", "max_forks_repo_name": "kyungminlee/Kore.jl", "max_forks_repo_head_hexsha": "0567080ee8caf6005a83dc6e1f0e37a4ddb321af", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.2, "max_line_length": 35, "alphanum_fraction": 0.5105633803, "num_tokens": 86}
|
import geopandas
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import cm
import matplotlib.ticker as mtick
from matplotlib.ticker import MaxNLocator
import datetime
import datadotworld as dw
def import_geo_data(filename, index_col = "Date", rename_FIPS = "FIPS"):
# import county level shapefile
map_data = geopandas.read_file(filename = filename,
index_col = index_col)
map_data.rename(columns={"COUNTYFP":rename_FIPS, "State":"state"},
inplace = True)
map_data[rename_FIPS] = map_data["STATEFP"].astype(str) + \
map_data[rename_FIPS].astype(str)
map_data[rename_FIPS] = map_data[rename_FIPS].astype(np.int64)
map_data.set_index("fips_code", inplace=True)
cea_data = map_data.to_crs({"proj": "cea"})
map_data["area (sq. km)"] = cea_data.area / 10 ** 6
return map_data
def import_covid_data(filename, fips_name):
# Load COVID19 county data using datadotworld API
# Data provided by Johns Hopkins, file provided by Associated Press
dataset = dw.load_dataset("associatedpress/johns-hopkins-coronavirus-case-tracker")
covid_data = dataset.dataframes["2_cases_and_deaths_by_county_timeseries"]
covid_data = covid_data[covid_data[fips_name] < 57000]
covid_data[fips_name] = covid_data[fips_name].astype(int)
covid_data.set_index([fips_name, "date"], inplace = True)
covid_data.loc[:, "state_abr"] = ""
for state, abr in state_dict.items():
covid_data.loc[covid_data["state"] == state, "state_abr"] = abr
return covid_data
def create_covid_geo_dataframe(covid_data, map_data):
# create geopandas dataframe with multiindex for date
# original geopandas dataframe had no dates, so copies of the df are
# stacked vertically, with a new copy for each date in the covid_data index
#(dates is a global)
i = 0
for date in dates:
df = covid_data[covid_data.index.get_level_values("date")==date]
counties = df.index.get_level_values("fips_code")
agg_df = map_data.loc[counties]
agg_df["date"] = df.index.get_level_values("date")[0]
if i == 0:
matching_gpd = geopandas.GeoDataFrame(agg_df, crs = map_data.crs)
i += 1
else:
matching_gpd = matching_gpd.append(agg_df, ignore_index = False)
matching_gpd.reset_index(inplace=True)
matching_gpd.set_index(["fips_code","date"], inplace = True)
for key, val in covid_data.items():
matching_gpd[key] = val
matching_gpd["Location"] = matching_gpd["NAME"] + ", " + \
matching_gpd["state_abr"]
return matching_gpd
def create_state_dataframe(covid_data):
states = list(state_dict.keys())
states.remove("District of Columbia")
state_data = covid_data.reset_index().set_index(["date", "state","fips_code"]).groupby(["state", "date"]).sum(numeric_only = True,
ignore_index = False)
drop_cols = ["uid", "location_name", "cumulative_cases_per_100_000",
"cumulative_deaths_per_100_000", "new_cases_per_100_000",
"new_deaths_per_100_000",'new_cases_rolling_7_day_avg',
'new_deaths_rolling_7_day_avg']
# These values will be recalculated since the sum of the county values
# would need to be weighted to be meaningful
state_data.drop(drop_cols, axis = 1, inplace = True)
state_data["location_type"] = "state"
for state in states:
state_data.loc[state_data.index.get_level_values("state") == state, "Location"] = state
state_data.loc[state_data.index.get_level_values("state") == state, "state_abr"] = state_dict[state]
return state_data
def create_new_vars(covid_data, moving_average_days):
# covid_data["Population / Sq Km"] = covid_data["total_population"].div(covid_data['area (sq. km)'])
for key in ["cases", "deaths"]:
cap_key = key.title()
covid_data[cap_key + " per Million"] = covid_data["cumulative_" + key].div(covid_data["total_population"]).mul(10 ** 6)
covid_data["Daily " + cap_key + " per Million"] = \
covid_data["cumulative_" + key ].groupby(covid_data.index.names[0])\
.diff(1).div(covid_data["total_population"]).mul(10 ** 6)
covid_data["Daily " + cap_key + " per Million MA"] = covid_data["Daily " + \
cap_key + " per Million"].rolling(moving_average_days).mean()
def create_zero_day_dict(covid_data, start_date):
zero_day_dict = {}
for key in ["Cases", "Deaths"]:
zero_day_dict[key + " per Million"] = {}
zero_day_dict["Daily " + key + " per Million MA"] = {}
day_zero_val = {}
for key in zero_day_dict:
day_zero_val[key] = 2 if "Deaths" in key else 10
entities = sorted(list(set(covid_data.index.get_level_values(0))))
for key in zero_day_dict.keys():
vals = covid_data[key]
thresh_vals = covid_data["Deaths per Million"] if "Deaths" in key else \
covid_data["Cases per Million"]
dz_val = day_zero_val[key]
for entity in entities:
dpc = vals[vals.index.get_level_values(0) == entity][thresh_vals > dz_val]
dpc = dpc[dpc.index.get_level_values("date") > start_date]
zero_day_dict[key][entity] = dpc.copy()
print(entity)
return zero_day_dict, day_zero_val
def plot_zero_day_data(state_name, state, covid_data, zero_day_dict,
day_zero_val, keys, entity_type, entities, pp,
n_largest = 10, bold_entities = None, daily = False):
max_x = 0
fig, a = plt.subplots(2,1, figsize = (48, 32))
for key in keys:
val_key = "Daily " + key + " MA" if daily else key
if len(entities) > 0:
i = 0
j = 0
ax = a[0] if "Cases" in key else a[1]
max_x, max_y = plot_double_lines(ax, zero_day_dict, day_zero_val, val_key, entities, daily)
locs, top_locs = identify_plot_locs(state_name, covid_data, bold_entities)
for entity in entities:
vals = zero_day_dict[val_key][entity]
if len(vals) > 0 and entity != "District of Columbia":
loc = locs[locs.index.get_level_values(entity_type) == entity]["Location"][0]
i, j = plot_lines_and_text(ax, vals, state, state_dict, loc,
top_locs, colors_dict, i, j)
# set plot attributes
if daily:
ax.set_ylim(bottom = 0, top = max_y * 1.08)
else:
ax.set_yscale('log')
if max_y is not np.nan:
ax.set_ylim(bottom = np.e ** (np.log(day_zero_val[key])), top = np.e ** (np.log(max_y * 4) ))
vals = ax.get_yticks()
ax.set_yticklabels([int(y) if y >= 1 else round(y,1) for y in vals])
ax.set_ylabel(val_key)
ax.set_xlim(right = max_x + 10)
ax.set_xlabel("Days Since " + key + " Exceeded " + str(day_zero_val[key]))
title = str(end_date)[:10] + "\n7 Day Moving Average" + "\nCOVID-19 in " + state_name if daily else str(end_date)[:10] + "\nCOVID-19 in " + state_name
y_pos = .987 if daily else .95
fig.suptitle(title , y=y_pos, fontsize = 75)
pp.savefig(fig, bbox_inches = "tight")
plt.savefig("statePlots/" + state + " " + val_key + ".png", bbox_inches = "tight")
plt.show()
plt.close()
def plot_double_lines(ax, zero_day_dict, day_zero_val, key, entities, daily):
max_x = max([len(zero_day_dict[key][entity]) for entity in entities])
max_y = max([zero_day_dict[key][entity].max() for entity in entities])
if not daily:
double_lines ={}
for i in [2,3,5]:
double_lines[i] = [day_zero_val[key] * 2 ** (k/i) for k in range(9 * i)]
ax.plot(double_lines[i], label = None,
alpha = .2, color = "k", linewidth = 5)
ax.text(len(double_lines[i]),
double_lines[i][len(double_lines[i])-1],
"X2 every \n" + str(i) + " days", alpha = .2)
max_y2 = max(val[-1] for val in double_lines.values())
max_y = max_y if max_y > max_y2 else max_y2
return max_x, max_y
def identify_plot_locs(state_name, covid_data, bold_entities):
if state_name == "United States":
locs = covid_data
top_locs = covid_data[covid_data["state_abr"].isin(bold_entities)]
else:
locs = covid_data[covid_data["state"] == state_name][["Location", "state_abr", "total_population"]]
top_locs = locs[locs.index.get_level_values("date")==locs.index.get_level_values("date")[0]]
top_locs = top_locs[top_locs["total_population"] >= top_locs["total_population"].nlargest(n_largest).min()]["Location"]
return locs, top_locs
def plot_lines_and_text(ax, vals, state, state_dict, loc, top_locs, colors_dict,
i, j):
def select_color(loc, top_locs, colors_dict, colors, i, j):
val = i if loc in top_locs.values else j
if loc not in colors_dict.keys():
colors_dict[loc] = colors[val % 10]
color = colors_dict[loc]
if loc in top_locs.values: i += 1
else: j += 1
return color, i, j
color, i, j = select_color(loc, top_locs, colors_dict, colors, i, j)
label = state_dict[loc] if state in "U.S.A." else loc[:-4].replace(" ", "\n")
linewidth, ls, fontsize, alpha = (6, "-", 34, 1) if loc in top_locs.values else (2, "--", 24, .6)
ax.plot(vals.values, label = label,
ls = ls, linewidth = linewidth, alpha = alpha, color = color)
ax.text(x = len(vals.values) - 1, y = vals.values[-1], s = label,
fontsize = fontsize, color = color, alpha = alpha)
return i, j
def select_data_within_bounds(data, minx, miny, maxx, maxy):
data = data[data.bounds["maxx"] <= maxx]
data = data[data.bounds["maxy"] <= maxy]
data = data[data.bounds["minx"] >= minx]
data = data[data.bounds["miny"] >= miny]
return data
def plot_map(i, *fargs):
ax.clear()
date = dates[i]
# cmap = cm.get_cmap('YlOrBr', 8)
cmap = cm.get_cmap('Reds', 4)
vmin = 1 if "Deaths" in key else 10
print(key, date)
plt.cm.ScalarMappable(cmap=cmap, norm=cm.colors.LogNorm(vmin=vmin,
vmax =vmax))#round(vmax, len(str(vmax))-1)))
plot_df = val[val.index.get_level_values("date")==date]
plot_df.plot(ax=ax, cax = ax, column=key, vmin=vmin ,vmax = vmax,
cmap = cmap, legend=False, linewidth=.5, edgecolor='lightgrey',
norm = mpl.colors.LogNorm(vmin=vmin, vmax=vmax))
ax.set_title(str(date)[:10] + "\n" + "COVID-19 in the U.S.", fontsize = 30)
ax.axis("off")
def init():
# Create colorbar as a legend
cmap = cm.get_cmap('Reds', 4)
vmin = 1 if "Deaths" in key else 10
print(vmin, vmax)
size = "5%"
sm = plt.cm.ScalarMappable(cmap=cmap, norm=cm.colors.LogNorm(vmin=vmin,
vmax =vmax))#round(vmax, len(str(vmax))-1)))
# empty array for the data range
sm._A = []
# add the colorbar to the figure
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size = size, pad = 0.1)
cbar = fig.colorbar(sm, cax=cax, cmap = cmap)
cbar.ax.tick_params(labelsize=18)
vals = list(cbar.ax.get_yticks())
vals.append(vmax)
print(vals)
cbar.ax.yaxis.set_major_formatter(mtick.LogFormatter())
cbar.ax.set_yticklabels([int(x) for x in vals])
cbar.ax.set_ylabel(key, fontsize = 20)
# I maintained this dictionary to use the state abbrevations in the names of
# saved files.
state_dict = {
'Alabama': 'AL', 'Alaska': 'AK', 'Arizona': 'AZ',
'Arkansas': 'AR', 'California': 'CA', 'Colorado': 'CO', 'Connecticut': 'CT',
'Delaware': 'DE', 'District of Columbia': 'DC', 'Florida': 'FL',
'Georgia': 'GA', 'Hawaii': 'HI', 'Idaho': 'ID', 'Illinois': 'IL',
'Indiana': 'IN', 'Iowa': 'IA','Kansas': 'KS', 'Kentucky': 'KY',
'Louisiana': 'LA', 'Maine': 'ME', 'Maryland': 'MD', 'Massachusetts': 'MA',
'Michigan': 'MI', 'Minnesota': 'MN', 'Mississippi': 'MS', 'Missouri': 'MO',
'Montana': 'MT', 'Nebraska': 'NE', 'Nevada': 'NV', 'New Hampshire': 'NH',
'New Jersey': 'NJ', 'New Mexico': 'NM', 'New York': 'NY', 'North Carolina': 'NC',
'North Dakota': 'ND', 'Ohio': 'OH', 'Oklahoma': 'OK',
'Oregon': 'OR', 'Pennsylvania': 'PA', 'Rhode Island': 'RI',
'South Carolina': 'SC', 'South Dakota': 'SD', 'Tennessee': 'TN', 'Texas': 'TX',
'Utah': 'UT', 'Vermont': 'VT', 'Virginia': 'VA',
'Washington': 'WA', 'West Virginia': 'WV', 'Wisconsin': 'WI', 'Wyoming': 'WY'
}
plt.rcParams['axes.ymargin'] = 0
plt.rcParams['axes.xmargin'] = 0
plt.rcParams.update({'font.size': 32})
if "data_processed" not in locals():
fips_name = "fips_code"
covid_file = "COVID19DataAP.csv"
# rename_FIPS matches map_data FIPS with COVID19 FIPS name
map_data = import_geo_data(filename = "countiesWithStatesAndPopulation.shp",
index_col = "Date", rename_FIPS = fips_name)
covid_data = import_covid_data(filename = covid_file, fips_name = fips_name)
# dates is global, will be called in create_covid_geo_dataframe()
# and will be used later
dates = sorted(list(set(covid_data.index.get_level_values("date"))))
covid_data = create_covid_geo_dataframe(covid_data, map_data)
state_data = create_state_dataframe(covid_data)
moving_average_days = 7
create_new_vars(covid_data, moving_average_days)
create_new_vars(state_data, moving_average_days)
start_date = "03-15-2020"
end_date = dates[-1]
county_zero_day_dict, day_zero_val = create_zero_day_dict(covid_data, start_date)
state_zero_day_dict, day_zero_val = create_zero_day_dict(state_data, start_date)
# once data is processed, it is saved in the memory
# the if statement at the top of this block of code instructs the computer
# not to repeat these operations
data_processed = True
keys = ["Cases per Million", "Deaths per Million"]
lines= {}
colors = ["C" + str(i) for i in range(10)]
colors_dict = {}
pp = PdfPages("covidDataByState.pdf")
n_largest = 10
for daily in [True, False]:
if not daily:
for state_name, state in state_dict.items():
state_fips = sorted(list(set(covid_data[covid_data["state_abr"] == state].index.get_level_values("fips_code").copy())))
plot_zero_day_data(state_name, state, covid_data, county_zero_day_dict,
day_zero_val, keys, "fips_code", state_fips, pp,
n_largest, daily = daily)
plot_zero_day_data("United States", "U.S.A", state_data, state_zero_day_dict,
day_zero_val, keys, "state", state_dict.keys(), pp,
bold_entities = ["IA", "MN", "NE", "ND","SD", "WI"],
daily = daily)
pp.close()
if "map_bounded" not in locals():
minx = -125
miny = 25
maxx = -60
maxy = 52
covid_map_data = select_data_within_bounds(covid_data, minx, miny, maxx, maxy)
map_bounded = True
mpl.rcParams['animation.embed_limit'] = 200
plot_dates = sorted(list(set(covid_data[covid_data.index.get_level_values("date") > start_date].index.get_level_values("date"))))
#dates = plot_dates[plot_dates.index("2020-03-17"):]
dates = plot_dates[31*3*-1 -1::3]
keys.append("Daily Cases per Million MA")
keys.append("Daily Deaths per Million MA")
for key in keys:
val = covid_map_data
vmax = val[key][val.index.get_level_values("date").isin(dates)].max()
val[key] = val[key].astype(float)
fig, ax = plt.subplots(figsize=(18,8),
subplot_kw = {'aspect': 'equal'})
plt.rcParams.update({"font.size": 30})
plt.xticks(fontsize = 25)
plt.yticks(fontsize = 25)
frames=[i for i in range(len(dates))]
anim = FuncAnimation(fig, plot_map, frames = frames,
blit = False, init_func = init, interval=300,
fargs = (ax, val, vmax, key))
with open(key.replace("/", "-")+ ".html", "w") as f:
print(anim.to_html5_video(), file=f)
plt.close()
|
{"hexsha": "eb2edf7926038a269011df1fddea95488cc1520d", "size": 16646, "ext": "py", "lang": "Python", "max_stars_repo_path": "Projects/COVID19/createCOVID19StateVisualizationsByCountyAndState.py", "max_stars_repo_name": "hunterluepke/Learn-Python-for-Stats-and-Econ", "max_stars_repo_head_hexsha": "d580a8e27ba937fc8401ac6d0714b6488ac8bbb6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2019-01-10T18:54:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T20:07:20.000Z", "max_issues_repo_path": "Projects/COVID19/createCOVID19StateVisualizationsByCountyAndState.py", "max_issues_repo_name": "hunterluepke/Learn-Python-for-Stats-and-Econ", "max_issues_repo_head_hexsha": "d580a8e27ba937fc8401ac6d0714b6488ac8bbb6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Projects/COVID19/createCOVID19StateVisualizationsByCountyAndState.py", "max_forks_repo_name": "hunterluepke/Learn-Python-for-Stats-and-Econ", "max_forks_repo_head_hexsha": "d580a8e27ba937fc8401ac6d0714b6488ac8bbb6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2019-01-24T17:11:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-11T01:53:57.000Z", "avg_line_length": 45.4808743169, "max_line_length": 154, "alphanum_fraction": 0.6212303256, "include": true, "reason": "import numpy", "num_tokens": 4469}
|
import numpy as np
import matplotlib.pyplot as plt
deepsea="/home/fast/onimaru/encode/deepsea/deepsea_pred.txt"
deepshark="/home/fast/onimaru/encode/deepsea/deepshark_Tue_Apr_17_183529_2018.ckpt-57883_prediction.log"
deepsea_dict={}
with open(deepsea, 'r') as fin:
for line in fin:
if not line.startswith("Cell Type"):
#print line
line=line.split()
if len(line)==0:
continue
print(line)
if line[4]=="NA":
continue
sname=line[3].split('.')[0]
AUPRC=float(line[5])
deepsea_dict[sname]=AUPRC
sample_list=[]
deepsea_list=[]
deepshark_list=[]
with open(deepshark, 'r') as fin:
go=False
for line in fin:
if line.startswith("sample"):
go=True
continue
elif go:
line=line.split()
sname=line[0].split("_")[0]
if "Dnase" in sname and sname in deepsea_dict:
sample_list.append(sname)
deepsea_list.append(deepsea_dict[sname])
deepshark_list.append(float(line[2]))
print(sname, deepsea_dict[sname], float(line[2]))
deepsea_list=np.array(deepsea_list)
deepshark_list=np.array(deepshark_list)
log_fold=np.log2(deepshark_list/deepsea_list)
log_fold_neg=log_fold[log_fold<0.00]
print("total num: "+str(len(log_fold))+"\nless performed num:"+str(len(log_fold_neg))+" ("+str(len(log_fold_neg)/float(len(log_fold))*100.0)+"%)")
|
{"hexsha": "c04a01572edb6c0a8ad443fff743f8095f424705", "size": 1505, "ext": "py", "lang": "Python", "max_stars_repo_path": "deepgmap/misc/compare_deepsea_data.py", "max_stars_repo_name": "koonimaru/DeepGMAP", "max_stars_repo_head_hexsha": "7daac354229fc25fba81649b741921345dc5db05", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2018-06-27T11:45:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-01T15:32:56.000Z", "max_issues_repo_path": "deepgmap/misc/compare_deepsea_data.py", "max_issues_repo_name": "koonimaru/DeepGMAP", "max_issues_repo_head_hexsha": "7daac354229fc25fba81649b741921345dc5db05", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-01-28T21:45:15.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-20T02:40:48.000Z", "max_forks_repo_path": "deepgmap/misc/compare_deepsea_data.py", "max_forks_repo_name": "koonimaru/DeepGMAP", "max_forks_repo_head_hexsha": "7daac354229fc25fba81649b741921345dc5db05", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-10-19T19:43:27.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-19T19:43:27.000Z", "avg_line_length": 31.3541666667, "max_line_length": 146, "alphanum_fraction": 0.6132890365, "include": true, "reason": "import numpy", "num_tokens": 394}
|
import json
import codecs
import numpy as np
import plotly.express as px
import streamlit as st
import os
class Web:
def __init__(self):
self.ruta_menus = os.path.join("scraper_siglas-uc", "outputs", "menus.json")
self.min_rec = 4
self.max_rec = 30
self.step_rec = 2
# Valores por defecto
self.sel_escuelas = list()
self.sel_campus = list()
self.sel_formatos = list()
self.sel_recomend = 10
def cargar(self):
with codecs.open(self.ruta_menus, "rU", encoding = "utf-8") as archivo:
menus = json.load(archivo)
self.escuelas = menus["escuelas"]
self.campus = menus["campus"]
self.formatos = menus["formato"]
del menus
def base_url(self, sigla):
return f"https://catalogo.uc.cl/index.php?tmpl=component&option=com_catalogo&view=programa&sigla={sigla}"
def __datos_a_html(self, datos):
datos["url"] = datos["Sigla"].apply(lambda x: f"<a href={self.base_url(x)} target=\"_blank\">")
datos["Nombre"] = datos["url"] + datos["Nombre"].apply(lambda x: f"{x}</a>")
datos.drop(columns = "url", inplace = True)
datos_html = datos.to_html(escape=False, index=False)
return datos_html
def __crear_grafico(self, datos):
data_media = datos.\
groupby(["Escuela"]).\
agg({"Similitud": "mean"}).\
sort_values("Similitud", ascending = False)
fig = px.bar(
data_media,
labels = {"value": "Porcentaje (%)"},
title = "Media de Similitud por Escuela"
)
fig.update_layout(showlegend = False)
return fig
def mostrar_objeto(self, modelo, tipo = "datos"):
data_show = modelo.datos_modelo.iloc[:self.sel_recomend, :]
if len(self.consulta) == 0:
st.markdown(f"""
Acá aparecerán {"recomendaciones" if tipo == "datos" else "visualizaciones"} cuando ingreses una consulta ¡Anímate!
""")
elif modelo.datos_modelo["Similitud"].unique().shape[0] == 1:
st.markdown(f"""
No hay {"recomendaciones" if tipo == "datos" else "visualizaciones"} para mostrar. Intenta con una nueva consulta o utiliza sinónimos.
""")
else:
if tipo == "datos":
if len(self.sel_escuelas) != 0:
data_show = data_show.query("Escuela in @self.sel_escuelas")
if len(self.sel_campus) != 0:
data_show = data_show.query("Campus in @self.sel_campus")
if len(self.sel_formatos) != 0:
data_show = data_show.query("Formato in @self.sel_formatos")
data_show["Similitud"] = np.round(data_show["Similitud"], 1).astype(str) + "%"
data_show = self.__datos_a_html(data_show)
return st.write(data_show, unsafe_allow_html=True)
else:
fig = self.__crear_grafico(data_show)
return st.plotly_chart(fig)
|
{"hexsha": "83a53092089c83461b4291365f9919130c742576", "size": 3090, "ext": "py", "lang": "Python", "max_stars_repo_path": "app/web.py", "max_stars_repo_name": "estebanrucan/recomendador-cursos-uc", "max_stars_repo_head_hexsha": "5ea1f660832e7b21c4b13031c88e2b3e8d1ce9fe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app/web.py", "max_issues_repo_name": "estebanrucan/recomendador-cursos-uc", "max_issues_repo_head_hexsha": "5ea1f660832e7b21c4b13031c88e2b3e8d1ce9fe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app/web.py", "max_forks_repo_name": "estebanrucan/recomendador-cursos-uc", "max_forks_repo_head_hexsha": "5ea1f660832e7b21c4b13031c88e2b3e8d1ce9fe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.6153846154, "max_line_length": 146, "alphanum_fraction": 0.5734627832, "include": true, "reason": "import numpy", "num_tokens": 773}
|
! { dg-do compile }
! { dg-options "-fimplicit-none" }
!
! PR 41121: [4.5 Regression] compile-time error when building BLAS with -fimplicit-none
!
! Original test case: http://www.netlib.org/blas/dgbmv.f
! Reduced by Joost VandeVondele <jv244@cam.ac.uk>
INTRINSIC MIN
INTEGER :: I,J
print *,MIN(I,J)
END
|
{"hexsha": "77ecf32beeb5db2432c27a9e0414affb9a617c63", "size": 312, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "validation_tests/llvm/f18/gfortran.dg/intrinsic_5.f90", "max_stars_repo_name": "brugger1/testsuite", "max_stars_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 488, "max_stars_repo_stars_event_min_datetime": "2015-01-09T08:54:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:15:46.000Z", "max_issues_repo_path": "tests/CompileTests/Fortran_tests/gfortranTestSuite/gfortran.dg/intrinsic_5.f90", "max_issues_repo_name": "sujankh/rose-matlab", "max_issues_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 174, "max_issues_repo_issues_event_min_datetime": "2015-01-28T18:41:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:51:05.000Z", "max_forks_repo_path": "tests/CompileTests/Fortran_tests/gfortranTestSuite/gfortran.dg/intrinsic_5.f90", "max_forks_repo_name": "sujankh/rose-matlab", "max_forks_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 146, "max_forks_repo_forks_event_min_datetime": "2015-04-27T02:48:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T07:32:53.000Z", "avg_line_length": 22.2857142857, "max_line_length": 87, "alphanum_fraction": 0.6891025641, "num_tokens": 102}
|
[STATEMENT]
lemma maxr_lg: "\<lbrakk>Suc 0 < x; Suc 0 < y\<rbrakk> \<Longrightarrow> Maxr lgR [x, y] x = lg x y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>Suc 0 < x; Suc 0 < y\<rbrakk> \<Longrightarrow> Maxr lgR [x, y] x = lg x y
[PROOF STEP]
apply(auto simp add: lg.simps Maxr.simps)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>xa. \<lbrakk>Suc 0 < x; Suc 0 < y; \<forall>xa\<le>x. \<not> lgR [x, y, xa]; lgR [x, y, xa]\<rbrakk> \<Longrightarrow> Max {ya. ya \<le> x \<and> lgR [x, y, ya]} = 0
[PROOF STEP]
using lgR_ok
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>Suc 0 < ?y; lgR [?x, ?y, ?xa]\<rbrakk> \<Longrightarrow> ?xa \<le> ?x
goal (1 subgoal):
1. \<And>xa. \<lbrakk>Suc 0 < x; Suc 0 < y; \<forall>xa\<le>x. \<not> lgR [x, y, xa]; lgR [x, y, xa]\<rbrakk> \<Longrightarrow> Max {ya. ya \<le> x \<and> lgR [x, y, ya]} = 0
[PROOF STEP]
by blast
|
{"llama_tokens": 450, "file": "Universal_Turing_Machine_UF", "length": 3}
|
from sympl import (
AdamsBashforth, PlotFunctionMonitor)
from climt import (
Frierson06LongwaveOpticalDepth, GrayLongwaveRadiation,
SimplePhysics, DryConvectiveAdjustment, SlabSurface,
get_default_state)
import climt
import datetime
import numpy as np
import sympl
from datetime import timedelta
import matplotlib.pyplot as plt
import metpy.calc as calc
import os
from metpy.units import units
from stoppingCriteria_fn import *
from analyticFunctions import *
from fluxDivergence_fns import *
def runningModel(maxTau,albedo):
# Initialize components
diagnostic = Frierson06LongwaveOpticalDepth(linear_optical_depth_parameter=.1, longwave_optical_depth_at_equator=maxTau)
radiation = GrayLongwaveRadiation(tendencies_in_diagnostics=True)
surface = SlabSurface()
boundary_layer = SimplePhysics(
use_external_surface_specific_humidity=True)
dry_convection = DryConvectiveAdjustment()
time_stepper = AdamsBashforth([radiation, surface])
timestep = timedelta(hours=1)
# Set up model state.
state = get_default_state([radiation, diagnostic, surface,
boundary_layer, dry_convection])
sw_flux = 200
state['downwelling_shortwave_flux_in_air'][:] = sw_flux
state['ocean_mixed_layer_thickness'][:] = 1.
state['air_temperature'][:] = 250.
diff_acceptable = 5.
# Creates list for 0d historical profiles.
netEn = [(net_energy_level_in_column(state,diff_acceptable))[0]]
bdry_tempDiff = [surf_airBdry_tempDiff(state)]
olrs = [(np.array(state['upwelling_longwave_flux_in_air']).flatten())[-1]]
surfT = [(np.array(state['surface_temperature']).flatten())[0]]
# Iteration
startTime = datetime.datetime(2020,1,1,0,0,0)
counter = 0.
errorMargin = .5
stop = False
while stop == False:
#Updating state
state.update(diagnostic(state))
diagnostics, state = time_stepper(state,timestep)
state.update(diagnostics)
#Updating appropriate quantities every month
if counter % 168 == 0:
netEn.append((net_energy_level_in_column(state,diff_acceptable))[0])
bdry_tempDiff.append(surf_airBdry_tempDiff(state))
olrs.append((np.array(state['upwelling_longwave_flux_in_air']).flatten())[-1])
surfT.append((np.array(state['surface_temperature']).flatten())[0])
# Checks breakout condition and increments time + counter.
counter += 1
startTime += timestep
if abs(net_energy_level_in_column(state,diff_acceptable)[0]) < errorMargin:
stop = True
state['eastward_wind'][:] = 10.
#Calculating output quantities.
timeTaken = startTime - datetime.datetime(2020,1,1,0,0,0)
lwFluxNet, lwFluxUp, lwFluxDown = netFlux(state)
heatRate = heatingRate(state)
airTemperatureProf = (np.array(state['air_temperature'])).flatten()
return state, float(timeTaken.days), olrs, bdry_tempDiff, netEn, surfT, lwFluxNet, lwFluxUp, lwFluxDown, heatRate, airTemperatureProf
|
{"hexsha": "78f97ae7508ed25bf1cb1ac87358cc72fe4d58e7", "size": 2895, "ext": "py", "lang": "Python", "max_stars_repo_path": "RCEModel/modelTimestep.py", "max_stars_repo_name": "Mihir-DG/Modelling-Planetary-Climate", "max_stars_repo_head_hexsha": "4699d55d6ccecc4938f9844dd658e9c40c6d07c2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-21T04:52:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-21T04:52:46.000Z", "max_issues_repo_path": "RCEModel/modelTimestep.py", "max_issues_repo_name": "Mihir-DG/Modelling-Planetary-Climate", "max_issues_repo_head_hexsha": "4699d55d6ccecc4938f9844dd658e9c40c6d07c2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "RCEModel/modelTimestep.py", "max_forks_repo_name": "Mihir-DG/Modelling-Planetary-Climate", "max_forks_repo_head_hexsha": "4699d55d6ccecc4938f9844dd658e9c40c6d07c2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.7746478873, "max_line_length": 135, "alphanum_fraction": 0.7519861831, "include": true, "reason": "import numpy", "num_tokens": 751}
|
"""
Training a seq2bow encoder-decoder model
========================================
"""
from tmnt.estimator import SeqBowEstimator
import numpy as np
import gluonnlp as nlp
import os
import mxnet as mx
import logging
from sklearn.datasets import fetch_20newsgroups
from tmnt.preprocess.vectorizer import TMNTVectorizer
from tmnt.inference import SeqVEDInferencer
from tmnt.bert_handling import get_bert_datasets
from tmnt.utils.log_utils import logging_config
from mxnet.gluon.data import ArrayDataset
data, y = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'),
return_X_y=True)
train_data = data[:2000]
dev_data = data[-2000:]
train_y = y[:2000]
dev_y = y[-2000:]
model_name = 'bert_12_768_12'
dataset = 'book_corpus_wiki_en_uncased'
batch_size = 32
seq_len = 64
pad = True
vectorizer = TMNTVectorizer(vocab_size=2000)
vectorizer.fit_transform(train_data)
ctx = mx.cpu() ## or mx.gpu(N) if using GPU device=N
supervised = True
use_logging = True
if supervised:
num_classes = int(np.max(y) + 1)
classes = ['class_'+str(i) for i in range(num_classes)]
else:
num_classes = 0
classes = None
if use_logging:
logging_config(folder='.', name='f_seqbow_20news', level='info', console_level='info')
log_method = 'log'
else:
log_method = 'print'
train_y_s = ['class_'+str(y) for y in train_y]
dev_y_s = ['class_'+str(y) for y in dev_y]
tr_ds = ArrayDataset(train_data, train_y_s)
dev_ds = ArrayDataset(dev_data, dev_y_s)
print("Classes = {}".format(classes))
tr_dataset, dev_dataset, num_examples, bert_base, bert_vocab = get_bert_datasets(classes, vectorizer,
tr_ds, dev_ds, batch_size, seq_len,
bert_model_name=model_name,
bert_dataset=dataset,
num_classes=num_classes,
ctx=ctx)
estimator = SeqBowEstimator(bert_base, bert_model_name = model_name, bert_data_name = dataset,
n_labels = num_classes,
bow_vocab = vectorizer.get_vocab(),
optimizer='bertadam',
batch_size=batch_size, ctx=ctx, log_interval=1,
log_method=log_method, gamma=1.0,
lr=2e-5, decoder_lr=0.02, epochs=1)
# this will take quite some time without a GPU!
estimator.fit_with_validation(tr_dataset, dev_dataset, num_examples)
|
{"hexsha": "d344d61a2ab3fa177256f0730e0eda3a7e1908e3", "size": 2785, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/f_seqbow_20news.py", "max_stars_repo_name": "swfarnsworth/tmnt", "max_stars_repo_head_hexsha": "a53c8d62d0ddc6be5fc62013e6801019c345a6f4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/f_seqbow_20news.py", "max_issues_repo_name": "swfarnsworth/tmnt", "max_issues_repo_head_hexsha": "a53c8d62d0ddc6be5fc62013e6801019c345a6f4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/f_seqbow_20news.py", "max_forks_repo_name": "swfarnsworth/tmnt", "max_forks_repo_head_hexsha": "a53c8d62d0ddc6be5fc62013e6801019c345a6f4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.253164557, "max_line_length": 107, "alphanum_fraction": 0.5877917415, "include": true, "reason": "import numpy", "num_tokens": 612}
|
import os
import sys
import numpy as np
import random
import string
import tensorflow as tf
from tensorflow.models.rnn import rnn, rnn_cell
import collections
import urllib
import zipfile
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print 'Found and verified', filename
else:
print statinfo.st_size
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
def read_data(filename):
f = zipfile.ZipFile(filename)
for name in f.namelist():
return f.read(name)
f.close()
text = read_data(filename)
print "Data size", len(text)
valid_size = 1000
valid_text = text[:valid_size]
train_text = text[valid_size:]
train_size = len(train_text)
print train_size, train_text[:64]
print valid_size, valid_text[:64]
n_gram_size=2
def build_n_gram_dataset(text, n_gram_size):
index = 0
dictionary = dict()
text_len = len(text)
for i in xrange(text_len + n_gram_size):
letters = []
for j in xrange(n_gram_size):
letter_idx = (i + j) % text_len
letters.append(text[letter_idx])
n_gram = ''.join(letters)
if n_gram not in dictionary:
dictionary[n_gram] = len(dictionary)
index = dictionary[n_gram]
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return dictionary, reverse_dictionary
dictionary, reverse_dictionary = build_n_gram_dataset(text, n_gram_size)
vocabulary_size = len(dictionary)
def n_gram_to_encoding(n_gram):
id = dictionary[n_gram]
encoding = np.zeros(shape=(vocabulary_size), dtype=np.float)
encoding[id] = 1.0
return encoding
def probs_to_ids(probabilities):
return [c for c in np.argmax(probabilities, 1)]
def logprob(predictions, labels):
"""Log-probability of the true labels in a predicted batch."""
predictions[predictions < 1e-10] = 1e-10
return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0]
def sample(prediction, bottom_start=0):
"""Turn a (column) prediction into 1-hot encoded samples."""
p = np.zeros(shape=[vocabulary_size], dtype=np.float)
p[sample_distribution(prediction[0], bottom_start)] = 1.0
return p
def sample_distribution(distribution, bottom_start=0):
"""Sample one element from a distribution assumed to be an array of normalized
probabilities.
"""
r = random.uniform(0, 1)
s = 0
for i in xrange(len(distribution)):
s += distribution[i]
if s >= r:
return i
return len(distribution) - 1
def random_distribution():
"""Generate a random column of probabilities."""
b = np.random.uniform(0.0, 1.0, size=[1, vocabulary_size])
return b/np.sum(b, 1)[:,None]
def prob_to_n_gram(probability):
ngram_id = np.argmax(probability)
ngram = reverse_dictionary[ngram_id]
return ngram
def probs_2_n_gram_ids(probabilities):
return [np.argmax(probability) for probability in probabilities]
def probabilities_to_n_grams(probabilities):
return [prob_to_n_gram(x) for x in probabilities]
def n_gram_to_id(ngram):
return dictionary[ngram]
def id_to_n_gram(id):
return reverse_dictionary[id]
#print prob_to_n_gram(n_gram_to_encoding(" a"))
#enc = n_gram_to_encoding(" a")
#print enc
#print probabilities_to_n_grams([n_gram_to_encoding(" a"), n_gram_to_encoding("an")])
batch_size=64
num_unrollings=10
class BatchGenerator(object):
def __init__(self, text, batch_size, num_unrollings, n_gram_size):
self._text = text
self._text_size = len(text)
self._batch_size = batch_size
self._num_unrollings = num_unrollings
self._n_gram_size = n_gram_size
segment = self._text_size / batch_size
self._segment_size = segment
self._cursor = [ offset * segment for offset in xrange(batch_size)]
print self._cursor
self._last_batch = self._next_batch()
def _next_batch(self):
"""Generate a single batch from the current cursor position in the data."""
batch = np.zeros(shape=(self._batch_size, vocabulary_size), dtype=np.float)
for b in xrange(self._batch_size):
batch[b, char2id(self._text[self._cursor[b]])] = 1.0
self._cursor[b] = (self._cursor[b] + 1) % self._text_size
return batch
def _next_batch(self):
"""Generate a single batch from the current cursor position in the data."""
batch = np.zeros(shape=(self._batch_size, vocabulary_size), dtype=np.float)
for b in xrange(self._batch_size):
letters = []
for i in xrange(self._n_gram_size):
letter_idx = (self._cursor[b] + i) % self._text_size
letter = self._text[letter_idx]
letters.append(letter)
n_gram = ''.join(letters)
n_gram_id = n_gram_to_id(n_gram)
batch[b, n_gram_id] = 1.0
self._cursor[b] = (self._cursor[b] + self._n_gram_size) % self._text_size
return batch
def next(self):
"""Generate the next array of batches from the data. The array consists of
the last batch of the previous array, followed by num_unrollings new ones.
"""
batches = [self._last_batch]
for step in xrange(self._num_unrollings):
batches.append(self._next_batch())
self._last_batch = batches[-1]
return batches
def characters(probabilities):
"""Turn a 1-hot encoding or a probability distribution over the possible
characters back into its (mostl likely) character representation."""
return [id_to_n_gram(c) for c in np.argmax(probabilities, 1)]
def batches2string(batches):
"""Convert a sequence of batches back into their (most likely) string
representation."""
s = [''] * batches[0].shape[0]
for b in batches:
s = [''.join(x) for x in zip(s, probabilities_to_n_grams(b))]
return s
train_batches = BatchGenerator(train_text, batch_size, num_unrollings, n_gram_size)
valid_batches = BatchGenerator(valid_text, 1, 1, 2)
"""
print batches2string(train_batches.next())
print batches2string(train_batches.next())
print batches2string(valid_batches.next())
print batches2string(valid_batches.next())
"""
num_nodes = 64
embedding_size = 64
num_steps = 24001
graph = tf.Graph()
with graph.as_default():
# Parameters:
# Variables saving state across unrollings.
saved_output1 = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
saved_state1 = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
saved_output2 = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
saved_state2 = tf.Variable(tf.zeros([batch_size, num_nodes]), trainable=False)
# Classifier weights and biases.
w = tf.Variable(tf.truncated_normal([num_nodes, vocabulary_size], -0.1, 0.1))
b = tf.Variable(tf.zeros([vocabulary_size]))
# Defining matrices for: input gate, forget gate, memory cell, output gate
m_rows = 4
m_input_index = 0
m_forget_index = 1
m_update_index = 2
m_output_index = 3
m_input_w = tf.Variable(tf.truncated_normal([m_rows, embedding_size, num_nodes], -0.1, 0.1))
m_middle = tf.Variable(tf.truncated_normal([m_rows, num_nodes, num_nodes], -0.1, 0.1))
m_biases = tf.Variable(tf.truncated_normal([m_rows, 1, num_nodes], -0.1, 0.1))
m_saved_output = tf.Variable(tf.zeros([m_rows, batch_size, num_nodes]), trainable=False)
m_input = tf.Variable(tf.zeros([m_rows, batch_size, num_nodes]), trainable=False)
# Variables.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
# Dropout
keep_prob = tf.placeholder(tf.float32)
# Definition of the 2nd LSTM layer
m_input_w2 = tf.Variable(tf.truncated_normal([m_rows, embedding_size, num_nodes], -0.1, 0.1))
m_middle_w2 = tf.Variable(tf.truncated_normal([m_rows, num_nodes, num_nodes], -0.1, 0.1))
m_biases2 = tf.Variable(tf.truncated_normal([m_rows, 1, num_nodes], -0.1, 0.1))
m_saved_output2 = tf.Variable(tf.zeros([m_rows, batch_size, num_nodes]), trainable=False)
m_input2 = tf.Variable(tf.zeros([m_rows, batch_size, num_nodes]), trainable=False)
# Definition of the cell computation.
def lstm_cell_improved(i, o, state):
m_input = tf.pack([i for _ in range(m_rows)])
m_saved_output = tf.pack([o for _ in range(m_rows)])
m_input = tf.nn.dropout(m_input, keep_prob)
m_all = tf.batch_matmul(m_input, m_input_w) + tf.batch_matmul(m_saved_output, m_middle) + m_biases
m_all = tf.unpack(m_all)
input_gate = tf.sigmoid(m_all[m_input_index])
forget_gate = tf.sigmoid(m_all[m_forget_index])
update = m_all[m_update_index]
state = forget_gate * state + input_gate * tf.tanh(update)
output_gate = tf.sigmoid(m_all[m_output_index])
return output_gate * tf.tanh(state), state
def lstm_cell_2(i, o, state):
"""Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf
Note that in this formulation, we omit the various connections between the
previous state and the gates."""
m_input2 = tf.pack([i for _ in range(m_rows)])
m_saved_output2 = tf.pack([o for _ in range(m_rows)])
m_input2 = tf.nn.dropout(m_input2, keep_prob)
m_all = tf.batch_matmul(m_input2, m_input_w2) + tf.batch_matmul(m_saved_output2, m_middle_w2) + m_biases
m_all = tf.unpack(m_all)
input_gate = tf.sigmoid(m_all[m_input_index])
forget_gate = tf.sigmoid(m_all[m_forget_index])
update = m_all[m_update_index]
state = forget_gate * state + input_gate * tf.tanh(update)
output_gate = tf.sigmoid(m_all[m_output_index])
return output_gate * tf.tanh(state), state
# Input data.
train_data = list()
train_labels = list()
for x in xrange(num_unrollings):
train_data.append(
tf.placeholder(tf.int32, shape=[batch_size]))
train_labels.append(
tf.placeholder(tf.float32, shape=[batch_size,vocabulary_size]))
encoded_inputs = list()
for bigram_batch in train_data:
embed = tf.nn.embedding_lookup(embeddings, bigram_batch)
encoded_inputs.append(embed)
train_inputs = encoded_inputs
# Unrolled LSTM loop.
outputs = list()
output1 = saved_output1
output2 = saved_output2
state1 = saved_state1
state2 = saved_state2
for i in train_inputs:
output1, state1 = lstm_cell_improved(i, output1, state1)
output2, state2 = lstm_cell_2(output1, output2, state2)
outputs.append(output2)
# State saving across unrollings.
with tf.control_dependencies([saved_output1.assign(output1),
saved_state1.assign(state1),
saved_output2.assign(output2),
saved_state2.assign(state2)]):
# Classifier.
logits = tf.nn.xw_plus_b(tf.concat(0, outputs), w, b)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits, tf.concat(0, train_labels)))
# Optimizer.
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(
10.0, global_step, num_steps / 2, 0.1, staircase=False)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
gradients, v = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients, 1.25)
optimizer = optimizer.apply_gradients(
zip(gradients, v), global_step=global_step)
# Predictions.
train_prediction = tf.nn.softmax(logits)
# Sampling and validation eval: batch 1, no unrolling.
sample_input = tf.placeholder(tf.int32, shape=[1])
sample_embed = tf.nn.embedding_lookup(embeddings, sample_input)
saved_sample_output1 = tf.Variable(tf.zeros([1, num_nodes]))
saved_sample_state1 = tf.Variable(tf.zeros([1, num_nodes]))
saved_sample_output2 = tf.Variable(tf.zeros([1, num_nodes]))
saved_sample_state2 = tf.Variable(tf.zeros([1, num_nodes]))
reset_sample_state = tf.group(
saved_sample_output1.assign(tf.zeros([1, num_nodes])),
saved_sample_state1.assign(tf.zeros([1, num_nodes])),
saved_sample_output2.assign(tf.zeros([1, num_nodes])),
saved_sample_state2.assign(tf.zeros([1, num_nodes])))
sample_output1, sample_state1 = lstm_cell_improved(
sample_embed, saved_sample_output1, saved_sample_state1)
sample_output2, sample_state2 = lstm_cell_2(
sample_output1, saved_sample_output2, saved_sample_state2)
with tf.control_dependencies([saved_sample_output1.assign(sample_output1),
saved_sample_state1.assign(sample_state1),
saved_sample_output2.assign(sample_output2),
saved_sample_state2.assign(sample_state2)]):
sample_prediction = tf.nn.softmax(tf.nn.xw_plus_b(sample_output2, w, b))
summary_frequency = 100
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print 'Initialized'
mean_loss = 0
for step in xrange(num_steps):
batches = train_batches.next()
feed_dict = dict()
# setup inputs
for i in xrange(num_unrollings):
data = probs_to_ids(batches[i])
feed_dict[train_data[i]] = data
# setup outputs
for i in xrange(1, num_unrollings + 1, 1):
feed_dict[train_labels[i-1]] = batches[i]
# setup dropout
feed_dict[keep_prob] = 0.8
_, l, predictions, lr = session.run(
[optimizer, loss, train_prediction, learning_rate], feed_dict=feed_dict)
mean_loss += l
if step % summary_frequency == 0:
if step > 0:
mean_loss = mean_loss / summary_frequency
# The mean loss is an estimate of the loss over the last few batches.
print 'Average loss at step', step, ':', mean_loss, 'learning rate:', lr
mean_loss = 0
labels = np.concatenate(list(batches)[1:])
print 'Minibatch perplexity: %.2f' % float(
np.exp(logprob(predictions, labels)))
if step % (summary_frequency * 10) == 0:
# Generate some samples.
print '=' * 80
for _ in xrange(5):
feed = sample(random_distribution())
sentence = characters([feed])[0]
feed = probs_to_ids([feed])
reset_sample_state.run()
for _ in xrange(79):
prediction = sample_prediction.eval({sample_input: feed, keep_prob: 1.0})
feed = sample(prediction)
sentence += characters([feed])[0]
feed = probs_to_ids([feed])
print sentence
print '=' * 80
# Measure validation set perplexity.
reset_sample_state.run()
valid_logprob = 0
for _ in xrange(valid_size):
b = valid_batches.next()
feed = probs_to_ids(b[0])
predictions = sample_prediction.eval({sample_input: feed, keep_prob: 1.0})
valid_logprob = valid_logprob + logprob(predictions, b[1])
print 'Validation set perplexity: %.2f' % float(np.exp(
valid_logprob / valid_size))
|
{"hexsha": "d1180e2f598bd13f6f6057e8ee3c4d519de75e13", "size": 14939, "ext": "py", "lang": "Python", "max_stars_repo_path": "autocompletion.py", "max_stars_repo_name": "randomrandom/cbow-ml-autocompletion", "max_stars_repo_head_hexsha": "26bb277626dee2b5bbd92aa268c3f1528c66fbae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-01-04T11:00:32.000Z", "max_stars_repo_stars_event_max_datetime": "2017-08-08T23:31:00.000Z", "max_issues_repo_path": "autocompletion.py", "max_issues_repo_name": "randomrandom/cbow-ml-autocompletion", "max_issues_repo_head_hexsha": "26bb277626dee2b5bbd92aa268c3f1528c66fbae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "autocompletion.py", "max_forks_repo_name": "randomrandom/cbow-ml-autocompletion", "max_forks_repo_head_hexsha": "26bb277626dee2b5bbd92aa268c3f1528c66fbae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.824940048, "max_line_length": 108, "alphanum_fraction": 0.6974362407, "include": true, "reason": "import numpy", "num_tokens": 3867}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.