text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
/**
* @copyright Copyright 2018 The J-PET Framework Authors. All rights reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may find a copy of the License in the LICENCE file.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @file JPetMCHitTest.cpp
*/
#define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MODULE JPetMCHitTest
#include "JPetMCHit/JPetMCHit.h"
#include "JPetBarrelSlot/JPetBarrelSlot.h"
#include "JPetScin/JPetScin.h"
#include <boost/test/unit_test.hpp>
BOOST_AUTO_TEST_SUITE(FirstSuite)
BOOST_AUTO_TEST_CASE(default_constructor)
{
JPetMCHit hit;
double epsilon = 0.0001;
BOOST_REQUIRE_EQUAL(hit.getMCDecayTreeIndex(), 0u);
BOOST_REQUIRE_EQUAL(hit.getMCVtxIndex(), 0);
BOOST_REQUIRE_CLOSE(hit.getPolarization().X(), 0, epsilon);
BOOST_REQUIRE_CLOSE(hit.getPolarization().Y(), 0, epsilon);
BOOST_REQUIRE_CLOSE(hit.getPolarization().Z(), 0, epsilon);
BOOST_REQUIRE_CLOSE(hit.getMomentum().X(), 0, epsilon);
BOOST_REQUIRE_CLOSE(hit.getMomentum().Y(), 0, epsilon);
BOOST_REQUIRE_CLOSE(hit.getMomentum().Z(), 0, epsilon);
BOOST_REQUIRE_CLOSE(hit.getEnergy(), 0.0f, epsilon);
BOOST_REQUIRE_CLOSE(hit.getQualityOfEnergy(), 0.0f, epsilon);
BOOST_REQUIRE_CLOSE(hit.getTime(), 0.0f, epsilon);
BOOST_REQUIRE_CLOSE(hit.getQualityOfTime(), 0.0f, epsilon);
BOOST_REQUIRE_CLOSE(hit.getPosX(), 0, epsilon);
BOOST_REQUIRE_CLOSE(hit.getPosY(), 0, epsilon);
BOOST_REQUIRE_CLOSE(hit.getPosZ(), 0, epsilon);
BOOST_REQUIRE_EQUAL(hit.isSignalASet(), false);
BOOST_REQUIRE_EQUAL(hit.isSignalBSet(), false);
}
BOOST_AUTO_TEST_CASE(non_default_constructor)
{
TVector3 position(6.0, 7.0, 8.0);
TVector3 polarization(2.0, 3.0, 4.0);
TVector3 momentum(1.0, -13.0, 13.0);
auto MCDecayTreeIndex = 7u;
auto MCVtxIndex = 99u;
auto energy = 5.5;
auto time = 3.3;
double epsilon = 0.0001;
JPetMCHit hit(MCDecayTreeIndex, MCVtxIndex, energy, time, position, polarization, momentum);
BOOST_REQUIRE_CLOSE(hit.getEnergy(), energy, epsilon);
BOOST_REQUIRE_CLOSE(hit.getQualityOfEnergy(), 0.0f, epsilon);
BOOST_REQUIRE_CLOSE(hit.getTime(), time, epsilon);
BOOST_REQUIRE_CLOSE(hit.getQualityOfTime(), 0.0f, epsilon);
BOOST_REQUIRE_CLOSE(hit.getPosX(), position.X(), epsilon);
BOOST_REQUIRE_CLOSE(hit.getPosY(), position.Y(), epsilon);
BOOST_REQUIRE_CLOSE(hit.getPosZ(), position.Z(), epsilon);
BOOST_REQUIRE_CLOSE(hit.getPolarization().X(), polarization.X(), epsilon);
BOOST_REQUIRE_CLOSE(hit.getPolarization().Y(), polarization.Y(), epsilon);
BOOST_REQUIRE_CLOSE(hit.getPolarization().Z(), polarization.Z(), epsilon);
BOOST_REQUIRE_CLOSE(hit.getMomentum().X(), momentum.X(), epsilon);
BOOST_REQUIRE_CLOSE(hit.getMomentum().Y(), momentum.Y(), epsilon);
BOOST_REQUIRE_CLOSE(hit.getMomentum().Z(), momentum.Z(), epsilon);
BOOST_REQUIRE_EQUAL(hit.getMCDecayTreeIndex(), MCDecayTreeIndex);
BOOST_REQUIRE_EQUAL(hit.getMCVtxIndex(), MCVtxIndex);
}
BOOST_AUTO_TEST_CASE(getters_setters_mc)
{
JPetMCHit hit;
TVector3 pol(2.0, 3.0, 4.0);
TVector3 mom(1.0, -13.0, 13.0);
auto MCDecayTreeIndex = 7u;
auto MCVtxIndex = 99u;
double epsilon = 0.0001;
hit.setPolarization(pol.X(), pol.Y(), pol.Z());
hit.setMomentum(mom.X(), mom.Y(), mom.Z());
hit.setMCDecayTreeIndex(MCDecayTreeIndex);
hit.setMCVtxIndex(MCVtxIndex);
BOOST_REQUIRE_CLOSE(hit.getPolarization().X(), pol.X(), epsilon);
BOOST_REQUIRE_CLOSE(hit.getPolarization().Y(), pol.Y(), epsilon);
BOOST_REQUIRE_CLOSE(hit.getPolarization().Z(), pol.Z(), epsilon);
BOOST_REQUIRE_CLOSE(hit.getMomentum().X(), mom.X(), epsilon);
BOOST_REQUIRE_CLOSE(hit.getMomentum().Y(), mom.Y(), epsilon);
BOOST_REQUIRE_CLOSE(hit.getMomentum().Z(), mom.Z(), epsilon);
BOOST_REQUIRE_EQUAL(hit.getMCDecayTreeIndex(), MCDecayTreeIndex);
BOOST_REQUIRE_EQUAL(hit.getMCVtxIndex(), MCVtxIndex);
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "e6fa8f62642c8aa877b67caf1692073b7558e2f9", "size": 4227, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/MC/JPetMCHit/JPetMCHitTest.cpp", "max_stars_repo_name": "BlurredChoise/j-pet-framework", "max_stars_repo_head_hexsha": "f6728e027fae2b6ac0bdf274141254689894aa08", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2016-07-04T14:54:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-11T14:19:29.000Z", "max_issues_repo_path": "tests/MC/JPetMCHit/JPetMCHitTest.cpp", "max_issues_repo_name": "BlurredChoise/j-pet-framework", "max_issues_repo_head_hexsha": "f6728e027fae2b6ac0bdf274141254689894aa08", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 119.0, "max_issues_repo_issues_event_min_datetime": "2016-06-17T20:22:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T08:50:22.000Z", "max_forks_repo_path": "tests/MC/JPetMCHit/JPetMCHitTest.cpp", "max_forks_repo_name": "BlurredChoise/j-pet-framework", "max_forks_repo_head_hexsha": "f6728e027fae2b6ac0bdf274141254689894aa08", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 30.0, "max_forks_repo_forks_event_min_datetime": "2016-06-17T17:56:35.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-30T22:20:19.000Z", "avg_line_length": 41.8514851485, "max_line_length": 94, "alphanum_fraction": 0.7444996451, "num_tokens": 1167}
|
import numpy as np
import logging
logger = logging.getLogger('Solve it like a human')
class SolveItLikeAHuman:
"""
The idea behind this algorithm is to emulate how would a human being solve a sudoku
"""
def __is_number_valid_in_grid(self, number, grid, row_position, column_position):
grid_row = row_position // 3
grid_column = column_position // 3
return number in grid[grid_row * 3: 3 * grid_row + 3, grid_column * 3: 3 * grid_column + 3]
def __is_number_valid(self, number, grid, row_position, column_position):
result = True
if number in grid[row_position, :]:
result = False
elif number in grid[:, column_position]:
result = False
elif self.__is_number_valid_in_grid(number, grid, row_position, column_position):
result = False
return result
def __get_matrix_of_possibilities(self, grid):
matrix_of_possibilities = list()
for row_position in range(grid.shape[0]):
for column_position in range(grid.shape[1]):
if grid[row_position, column_position] == 0:
list_of_candidate_numbers = list()
for candidate_number in range(10):
if self.__is_number_valid(number=candidate_number,
grid=grid,
row_position=row_position,
column_position=column_position):
list_of_candidate_numbers.append(candidate_number)
matrix_of_possibilities.append([(row_position, column_position),
list_of_candidate_numbers,
len(list_of_candidate_numbers)])
return matrix_of_possibilities
def __select_from_matrix(self, matrix_of_possibilities, grid):
array_matrix_of_possibilities = np.array(matrix_of_possibilities)
is_feasible = True
if array_matrix_of_possibilities[array_matrix_of_possibilities[:, 2] == 1].shape[0] == 0:
is_feasible = False
logger.error(f'It is not possible to fill the sudoku with this method, the grid is: {grid}')
for row_with_single_candidate in array_matrix_of_possibilities[array_matrix_of_possibilities[:, 2] == 1]:
grid[row_with_single_candidate[0][0], row_with_single_candidate[0][1]] = row_with_single_candidate[1][0]
return is_feasible, grid
def run(self, grid):
is_feasible = True
while grid[grid == 0].shape[0] >= 1:
matrix_of_possibilities = self.__get_matrix_of_possibilities(grid)
is_feasible, grid = self.__select_from_matrix(matrix_of_possibilities, grid)
if not is_feasible:
logger.error(f'It was not posible to get a solution with this method')
is_feasible = False
break
return is_feasible, grid
|
{"hexsha": "14ebbd3e7615f2a51b9b5ebe86adf05e1010b2e8", "size": 3061, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/base_solver/solve_it_like_a_human.py", "max_stars_repo_name": "Mai13/sudoku", "max_stars_repo_head_hexsha": "1e9c1da612a78476ef60dad0f9943c8760e631ed", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-15T08:22:48.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-15T08:22:48.000Z", "max_issues_repo_path": "src/base_solver/solve_it_like_a_human.py", "max_issues_repo_name": "Mai13/sudoku", "max_issues_repo_head_hexsha": "1e9c1da612a78476ef60dad0f9943c8760e631ed", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/base_solver/solve_it_like_a_human.py", "max_forks_repo_name": "Mai13/sudoku", "max_forks_repo_head_hexsha": "1e9c1da612a78476ef60dad0f9943c8760e631ed", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.3648648649, "max_line_length": 116, "alphanum_fraction": 0.6086246325, "include": true, "reason": "import numpy", "num_tokens": 634}
|
using Test
using FlightMechanicsSimulator
using FlightMechanicsUtils
# Stevens, B. L., Lewis, F. L., & Johnson, E. N. (2015). Aircraft control
# and simulation: dynamics, controls design, and autonomous systems. John Wiley
# & Sons. (page 193 table 3.6-2)
trim_test_data = [
# TAS thtl AOA DE thtl_tol AOA_tol DE_tol
# ft/s unit deg deg unit deg deg
130 0.816 45.6 20.1 0.0005 0.05 0.15
140 0.736 40.3 -1.36 0.001 0.05 0.05
150 0.619 34.6 0.173 0.0005 0.05 0.05
170 0.464 27.2 0.621 0.001 0.05 0.05
640 0.23 0.742 -0.871 0.0005 0.015 0.0005
800 0.378 -0.045 -0.943 0.0005 0.001 0.001
200 0.287 19.7 0.723 0.0005 0.05 0.05
260 0.148 11.6 -0.09 0.0005 0.05 0.05
300 0.122 8.49 -0.591 0.0005 0.01 0.005
350 0.107 5.87 -0.539 0.001 0.005 0.005
400 0.108 4.16 -0.591 0.0005 0.005 0.005
440 0.113 3.19 -0.671 0.0005 0.005 0.005
500 0.137 2.14 -0.756 0.001 0.01 0.005
540 0.16 1.63 -0.798 0.0005 0.005 0.005
600 0.2 1.04 -0.846 0.0005 0.01 0.005
700 0.282 0.382 -0.9 0.0005 0.001 0.0005
]
xcg = 0.35
for case in eachrow(trim_test_data)
local x, controls, x_trim, controls_trim, x_dot_trim, outputs_trim, cost
x = [
case[1]*FT2M, #-> vt (m/s)
deg2rad(10.), # -> alpha (rad)
0.0, # -> beta (rad)
0.0, # -> phi (rad)
deg2rad(10.), # -> theta (rad)
0.0, # -> psi (rad)
0.0, # -> P (rad/s)
0.0, # -> Q (rad/s)
0.0, # -> R (rad/s)
0.0, # -> North (m)
0.0, # -> East (m)
0.0, # -> Altitude (m)
50.0, # -> Pow
]
controls = [
0.5, # thtl
0.0, # elev
0.0, # ail
0.0, # rudder
]
# TRIM
dssd, controls_trim, outputs_trim, cost = trim(
SixDOFAeroEuler(x),
controls,
F16(F16Stevens.MASS, F16Stevens.INERTIA, xcg),
F16StevensAtmosphere(x[12]),
LHDownGravity(FlightMechanicsSimulator.F16Stevens.GD*FT2M),
0.0,
0.0,
)
x_trim = get_x(dssd)
x_dot_trim = get_xdot(dssd)
@test isapprox(cost, zeros(6), atol=1e-12)
@test isapprox(controls_trim[1], case[2], atol=case[5]) # THTL
@test isapprox(rad2deg(x_trim[2]), case[3], atol=case[6]) # AOA
@test isapprox(controls_trim[2], case[4], atol=case[7]) # DE
end
# Stevens, B. L., Lewis, F. L., & Johnson, E. N. (2015). Aircraft control
# and simulation: dynamics, controls design, and autonomous systems. John Wiley
# & Sons. (page 195 table 3.6-3)
# NOMINAL (first column)
xcg = 0.35
x = [
502*FT2M, #-> vt (m/s)
deg2rad(10.), # -> alpha (rad)
0.0, # -> beta (rad)
0.0, # -> phi (rad)
deg2rad(10.), # -> theta (rad)
0.0, # -> psi (rad)
0.0, # -> P (rad/s)
0.0, # -> Q (rad/s)
0.0, # -> R (rad/s)
0.0, # -> North (m)
0.0, # -> East (m)
0.0, # -> Altitude (m)
50.0, # -> Pow
]
controls = [
0.5, # thtl
0.0, # elev
0.0, # ail
0.0, # rudder
]
dssd, controls_trim, outputs_trim, cost = trim(
SixDOFAeroEuler(x),
controls,
F16(F16Stevens.MASS, F16Stevens.INERTIA, xcg),
F16StevensAtmosphere(x[12]),
LHDownGravity(FlightMechanicsSimulator.F16Stevens.GD*FT2M),
0.0,
0.0,
)
x_trim = get_x(dssd)
x_dot_trim = get_xdot(dssd)
@test isapprox(cost, zeros(6), atol=1e-12)
@test isapprox(x_trim[2], 0.03691, atol=0.00005) # AOA
@test isapprox(x_trim[3], -4e-9, atol=1e-8) # AOS
@test isapprox(x_trim[4], 0) # PHI
@test isapprox(x_trim[5], 0.03691, atol=0.00005) # THETA
@test isapprox(x_trim[7], 0) # P
@test isapprox(x_trim[8], 0) # Q
@test isapprox(x_trim[9], 0) # R
@test isapprox(controls_trim[1], 0.1385, atol=0.0001) # THTL
@test isapprox(controls_trim[2], -0.7588, atol=0.0002) # DE
@test isapprox(controls_trim[3], -1.2e-7, atol=1e-6) # DA
@test isapprox(controls_trim[4], 6.2e-7, atol=1e-6) # DR
# XCG = 0.3 (second column)
xcg = 0.3
x = [
502*FT2M, #-> vt (m/s)
deg2rad(10.), # -> alpha (rad)
0.0, # -> beta (rad)
0.0, # -> phi (rad)
deg2rad(10.), # -> theta (rad)
0.0, # -> psi (rad)
0.0, # -> P (rad/s)
0.0, # -> Q (rad/s)
0.0, # -> R (rad/s)
0.0, # -> North (m)
0.0, # -> East (m)
0.0, # -> Altitude (m)
50.0, # -> Pow
]
controls = [
0.5, # thtl
0.0, # elev
0.0, # ail
0.0, # rudder
]
dssd, controls_trim, outputs_trim, cost = trim(
SixDOFAeroEuler(x),
controls,
F16(F16Stevens.MASS, F16Stevens.INERTIA, xcg),
F16StevensAtmosphere(x[12]),
LHDownGravity(FlightMechanicsSimulator.F16Stevens.GD*FT2M),
0.0,
0.0,
)
x_trim = get_x(dssd)
x_dot_trim = get_xdot(dssd)
@test isapprox(cost, zeros(6), atol=1e-12)
@test isapprox(x_trim[2], 0.03936, atol=0.00005) # AOA
@test isapprox(x_trim[3], 4.1e-9, atol=1e-8) # AOS
@test isapprox(x_trim[4], 0) # PHI
@test isapprox(x_trim[5], 0.03936, atol=0.00005) # THETA
@test isapprox(x_trim[7], 0) # P
@test isapprox(x_trim[8], 0) # Q
@test isapprox(x_trim[9], 0) # R
@test isapprox(controls_trim[1], 0.1485, atol=0.00005) # THTL
@test isapprox(controls_trim[2], -1.931, atol=0.0001) # DE
@test isapprox(controls_trim[3], -7e-8, atol=1e-6) # DA
@test isapprox(controls_trim[4], 8.3e-7, atol=1e-6) # DR
# XCG = 0.38 (third column)
xcg = 0.38
x = [
502*FT2M, #-> vt (m/s)
deg2rad(10.), # -> alpha (rad)
0.0, # -> beta (rad)
0.0, # -> phi (rad)
deg2rad(10.), # -> theta (rad)
0.0, # -> psi (rad)
0.0, # -> P (rad/s)
0.0, # -> Q (rad/s)
0.0, # -> R (rad/s)
0.0, # -> North (m)
0.0, # -> East (m)
0.0, # -> Altitude (m)
50.0, # -> Pow
]
controls = [
0.5, # thtl
0.0, # elev
0.0, # ail
0.0, # rudder
]
dssd, controls_trim, outputs_trim, cost = trim(
SixDOFAeroEuler(x),
controls,
F16(F16Stevens.MASS, F16Stevens.INERTIA, xcg),
F16StevensAtmosphere(x[12]),
LHDownGravity(FlightMechanicsSimulator.F16Stevens.GD*FT2M),
0.0,
0.0,
)
x_trim = get_x(dssd)
x_dot_trim = get_xdot(dssd)
@test isapprox(cost, zeros(6), atol=1e-12)
@test isapprox(x_trim[2], 0.03544, atol=0.00005) # AOA
@test isapprox(x_trim[3], 3.1e-8, atol=1e-7) # AOS
@test isapprox(x_trim[4], 0) # PHI
@test isapprox(x_trim[5], 0.03544, atol=0.00005) # THETA
@test isapprox(x_trim[7], 0) # P
@test isapprox(x_trim[8], 0) # Q
@test isapprox(x_trim[9], 0) # R
@test isapprox(controls_trim[1], 0.1325, atol=0.0001) # THTL
@test isapprox(controls_trim[2], -0.05590, atol=0.0005) # DE
@test isapprox(controls_trim[3], -5.1e-8, atol=1e-6) # DA
@test isapprox(controls_trim[4], 4.3e-6, atol=1e-5) # DR
# Coordinated turn (fourth column)
xcg = 0.3
x = [
502*FT2M, #-> vt (m/s)
deg2rad(10.), # -> alpha (rad)
0.0, # -> beta (rad)
0.0, # -> phi (rad)
deg2rad(10.), # -> theta (rad)
0.0, # -> psi (rad)
0.0, # -> P (rad/s)
0.0, # -> Q (rad/s)
0.0, # -> R (rad/s)
0.0, # -> North (m)
0.0, # -> East (m)
0.0, # -> Altitude (m)
50.0, # -> Pow
]
controls = [
0.5, # thtl
0.0, # elev
0.0, # ail
0.0, # rudder
]
dssd, controls_trim, outputs_trim, cost = trim(
SixDOFAeroEuler(x),
controls,
F16(F16Stevens.MASS, F16Stevens.INERTIA, xcg),
F16StevensAtmosphere(x[12]),
LHDownGravity(FlightMechanicsSimulator.F16Stevens.GD*FT2M),
0.0,
0.3, # rad/s
)
x_trim = get_x(dssd)
x_dot_trim = get_xdot(dssd)
@test isapprox(cost, zeros(6), atol=1e-12)
@test isapprox(x_trim[2], 0.2485, atol=0.0005) # AOA
@test isapprox(x_trim[3], 4.8e-4, atol=0.00005) # AOS
@test isapprox(x_trim[4], 1.367, atol=0.0005) # PHI
@test isapprox(x_trim[5], 0.05185, atol=0.00005) # THETA
@test isapprox(x_trim[7], -0.01555, atol=0.00001) # P
@test isapprox(x_trim[8], 0.2934, atol=0.00005) # Q
@test isapprox(x_trim[9], 0.06071, atol=0.000005) # R
@test isapprox(controls_trim[1], 0.8499, atol=0.0005) # THTL
@test isapprox(controls_trim[2], -6.256, atol=0.001) # DE
@test isapprox(controls_trim[3], 0.09891, atol=0.00005) # DA
@test isapprox(controls_trim[4], -0.4218, atol=0.0005) # DR
|
{"hexsha": "1169eee0c3ee874d8318c7eec929466f971aa300", "size": 8616, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/f16/trimmer.jl", "max_stars_repo_name": "jonniedie/FlightMechanicsSimulator.jl", "max_stars_repo_head_hexsha": "2097237a5ffb814fcaf81830c5dd419c322a5389", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/f16/trimmer.jl", "max_issues_repo_name": "jonniedie/FlightMechanicsSimulator.jl", "max_issues_repo_head_hexsha": "2097237a5ffb814fcaf81830c5dd419c322a5389", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/f16/trimmer.jl", "max_forks_repo_name": "jonniedie/FlightMechanicsSimulator.jl", "max_forks_repo_head_hexsha": "2097237a5ffb814fcaf81830c5dd419c322a5389", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8131487889, "max_line_length": 80, "alphanum_fraction": 0.5398096565, "num_tokens": 3795}
|
""" Module for image processing core methods
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
from IPython import embed
import numpy as np
from scipy import signal, ndimage
from scipy.optimize import curve_fit
from pypeit import msgs
from pypeit import utils
from pypeit.core import parse
def lacosmic(sciframe, saturation, nonlinear, varframe=None, maxiter=1, grow=1.5,
remove_compact_obj=True, sigclip=5.0, sigfrac=0.3, objlim=5.0):
"""
Identify cosmic rays using the L.A.Cosmic algorithm
U{http://www.astro.yale.edu/dokkum/lacosmic/}
(article : U{http://arxiv.org/abs/astro-ph/0108003})
This routine is mostly courtesy of Malte Tewes
Args:
sciframe:
saturation:
nonlinear:
varframe:
maxiter:
grow:
remove_compact_obj:
sigclip (float):
Threshold for identifying a CR
sigfrac:
objlim:
Returns:
ndarray: mask of cosmic rays (0=no CR, 1=CR)
"""
msgs.info("Detecting cosmic rays with the L.A.Cosmic algorithm")
# msgs.work("Include these parameters in the settings files to be adjusted by the user")
# Set the settings
scicopy = sciframe.copy()
crmask = np.cast['bool'](np.zeros(sciframe.shape))
sigcliplow = sigclip * sigfrac
# Determine if there are saturated pixels
satpix = np.zeros_like(sciframe)
# satlev = settings_det['saturation']*settings_det['nonlinear']
satlev = saturation*nonlinear
wsat = np.where(sciframe >= satlev)
if wsat[0].size == 0: satpix = None
else:
satpix[wsat] = 1.0
satpix = np.cast['bool'](satpix)
# Define the kernels
laplkernel = np.array([[0.0, -1.0, 0.0], [-1.0, 4.0, -1.0], [0.0, -1.0, 0.0]]) # Laplacian kernal
growkernel = np.ones((3,3))
for i in range(1, maxiter+1):
msgs.info("Convolving image with Laplacian kernel")
# Subsample, convolve, clip negative values, and rebin to original size
subsam = utils.subsample(scicopy)
conved = signal.convolve2d(subsam, laplkernel, mode="same", boundary="symm")
cliped = conved.clip(min=0.0)
lplus = utils.rebin_evlist(cliped, np.array(cliped.shape)/2.0)
msgs.info("Creating noise model")
# Build a custom noise map, and compare this to the laplacian
m5 = ndimage.filters.median_filter(scicopy, size=5, mode='mirror')
if varframe is None:
noise = np.sqrt(np.abs(m5))
else:
noise = np.sqrt(varframe)
msgs.info("Calculating Laplacian signal to noise ratio")
# Laplacian S/N
s = lplus / (2.0 * noise) # Note that the 2.0 is from the 2x2 subsampling
# Remove the large structures
sp = s - ndimage.filters.median_filter(s, size=5, mode='mirror')
msgs.info("Selecting candidate cosmic rays")
# Candidate cosmic rays (this will include HII regions)
candidates = sp > sigclip
nbcandidates = np.sum(candidates)
msgs.info("{0:5d} candidate pixels".format(nbcandidates))
# At this stage we use the saturated stars to mask the candidates, if available :
if satpix is not None:
msgs.info("Masking saturated pixels")
candidates = np.logical_and(np.logical_not(satpix), candidates)
nbcandidates = np.sum(candidates)
msgs.info("{0:5d} candidate pixels not part of saturated stars".format(nbcandidates))
msgs.info("Building fine structure image")
# We build the fine structure image :
m3 = ndimage.filters.median_filter(scicopy, size=3, mode='mirror')
m37 = ndimage.filters.median_filter(m3, size=7, mode='mirror')
f = m3 - m37
f /= noise
f = f.clip(min=0.01)
msgs.info("Removing suspected compact bright objects")
# Now we have our better selection of cosmics :
if remove_compact_obj:
cosmics = np.logical_and(candidates, sp/f > objlim)
else:
cosmics = candidates
nbcosmics = np.sum(cosmics)
msgs.info("{0:5d} remaining candidate pixels".format(nbcosmics))
# What follows is a special treatment for neighbors, with more relaxed constains.
msgs.info("Finding neighboring pixels affected by cosmic rays")
# We grow these cosmics a first time to determine the immediate neighborhod :
growcosmics = np.cast['bool'](signal.convolve2d(np.cast['float32'](cosmics), growkernel, mode="same", boundary="symm"))
# From this grown set, we keep those that have sp > sigmalim
# so obviously not requiring sp/f > objlim, otherwise it would be pointless
growcosmics = np.logical_and(sp > sigclip, growcosmics)
# Now we repeat this procedure, but lower the detection limit to sigmalimlow :
finalsel = np.cast['bool'](signal.convolve2d(np.cast['float32'](growcosmics), growkernel, mode="same", boundary="symm"))
finalsel = np.logical_and(sp > sigcliplow, finalsel)
# Unmask saturated pixels:
if satpix is not None:
msgs.info("Masking saturated stars")
finalsel = np.logical_and(np.logical_not(satpix), finalsel)
ncrp = np.sum(finalsel)
msgs.info("{0:5d} pixels detected as cosmics".format(ncrp))
# We find how many cosmics are not yet known :
newmask = np.logical_and(np.logical_not(crmask), finalsel)
nnew = np.sum(newmask)
# We update the mask with the cosmics we have found :
crmask = np.logical_or(crmask, finalsel)
msgs.info("Iteration {0:d} -- {1:d} pixels identified as cosmic rays ({2:d} new)".format(i, ncrp, nnew))
if ncrp == 0:
break
# Additional algorithms (not traditionally implemented by LA cosmic) to
# remove some false positives.
msgs.work("The following algorithm would be better on the rectified, tilts-corrected image")
filt = ndimage.sobel(sciframe, axis=1, mode='constant')
filty = ndimage.sobel(filt/np.sqrt(np.abs(sciframe)), axis=0, mode='constant')
filty[np.where(np.isnan(filty))]=0.0
sigimg = cr_screen(filty)
sigsmth = ndimage.filters.gaussian_filter(sigimg,1.5)
sigsmth[np.where(np.isnan(sigsmth))]=0.0
sigmask = np.cast['bool'](np.zeros(sciframe.shape))
sigmask[np.where(sigsmth>sigclip)] = True
crmask = np.logical_and(crmask, sigmask)
msgs.info("Growing cosmic ray mask by 1 pixel")
crmask = grow_masked(crmask.astype(np.float), grow, 1.0)
return crmask.astype(bool)
def cr_screen(a, mask_value=0.0, spatial_axis=1):
r"""
Calculate the significance of pixel deviations from the median along
the spatial direction.
No type checking is performed of the input array; however, the
function assumes floating point values.
Args:
a (numpy.ndarray): Input 2D array
mask_value (float): (**Optional**) Values to ignore during the
calculation of the median. Default is 0.0.
spatial_axis (int): (**Optional**) Axis along which to calculate
the median. Default is 1.
Returns:
numpy.ndarray: Returns a map of :math:`|\Delta_{i,j}|/\sigma_j`,
where :math:`\Delta_{i,j}` is the difference between the pixel
value and the median along axis :math:`i` and :math:`\sigma_j`
is robustly determined using the median absolute deviation,
:math:`sigma_j = 1.4826 MAD`.
"""
# Check input
if len(a.shape) != 2:
msgs.error('Input array must be two-dimensional.')
if spatial_axis not in [0,1]:
msgs.error('Spatial axis must be 0 or 1.')
# Mask the pixels equal to mask value: should use np.isclose()
_a = np.ma.MaskedArray(a, mask=(a==mask_value))
# Get the median along the spatial axis
meda = np.ma.median(_a, axis=spatial_axis)
# Get a robust measure of the standard deviation using the median
# absolute deviation; 1.4826 factor is the ratio of sigma/MAD
d = np.absolute(_a - meda[:,None])
mada = 1.4826*np.ma.median(d, axis=spatial_axis)
# Return the ratio of the difference to the standard deviation
return np.ma.divide(d, mada[:,None]).filled(mask_value)
def grow_masked(img, grow, growval):
if not np.any(img == growval):
return img
_img = img.copy()
sz_x, sz_y = img.shape
d = int(1+grow)
rsqr = grow*grow
# Grow any masked values by the specified amount
for x in range(sz_x):
for y in range(sz_y):
if img[x,y] != growval:
continue
mnx = 0 if x-d < 0 else x-d
mxx = x+d+1 if x+d+1 < sz_x else sz_x
mny = 0 if y-d < 0 else y-d
mxy = y+d+1 if y+d+1 < sz_y else sz_y
for i in range(mnx,mxx):
for j in range(mny, mxy):
if (i-x)*(i-x)+(j-y)*(j-y) <= rsqr:
_img[i,j] = growval
return _img
def gain_frame(amp_img, gain):
"""
Generate an image with the gain for each pixel.
Args:
amp_img (`numpy.ndarray`_):
Integer array that identifies which (1-indexed) amplifier
was used to read each pixel.
gain (array-like):
List of amplifier gain values in e-/ADU. Must be that the gain for
amplifier 1 is provided by `gain[0]`, etc.
Returns:
`numpy.ndarray`_: Image with the gain for each pixel.
"""
# TODO: Remove this or actually do it.
# msgs.warn("Should probably be measuring the gain across the amplifier boundary")
# Build the gain image
gain_img = np.zeros_like(amp_img, dtype=float)
for i,_gain in enumerate(gain):
gain_img[amp_img == i+1] = _gain
# Return the image, trimming if requested
return gain_img
def rn2_frame(datasec_img, ronoise, units='e-', gain=None, digitization=False):
r"""
Construct a readnoise variance image.
Provided the detector readnoise and gain for each amplifier, this constructs
an image with the combination of the readnoise and digitization (or
quantization) noise expected for a single detector readout. Digitization
noise is a fixed :math:`\sqrt{1/12}` ADU [1]_ [2]_, derived as the second
moment of a uniform distribution between values of -1/2 to 1/2 (i.e., the
variance associated with converting a number of electrons into an ADU
integer quantized by the gain). The digitization noise is typically much
smaller than the readnoise, unless the gain is very large, and, depending on
how it was measured, the digitization noise is most often incorporated in
the documented readnoise of the given instrument. To include the
digitization noise in the variance, you must provide ``gain`` and set
``digitization=True``.
The variance calculation in electrons is :math:`V = {\rm RN}^2 +
\gamma^2/12`, when including the digitization noise, and simply :math:`V =
{\rm RN}^2` otherwise; where RN is the readnoise and :math:`\gamma` is the
gain in e-/ADU. In the rare case one would need the units in ADU, the
returned variance is :math:`V/\gamma^2`.
.. [1] `Newberry (1991, PASP, 103, 122) <https://ui.adsabs.harvard.edu/abs/1991PASP..103..122N/abstract>`_
.. [2] `Merline & Howell (1995, ExA, 6, 163) <https://ui.adsabs.harvard.edu/abs/1995ExA.....6..163M/abstract>`_
Args:
datasec_img (`numpy.ndarray`_):
An integer array indicating the 1-indexed amplifier used to read
each pixel in the main data section of the detector. Values of 0
are ignored. Amplifier numbers are expected sequential and match
the number of readnoise and gain values provided. The shape of this
image dictates the shape of the output readnoise variance image.
ronoise (:obj:`float`, array-like):
The value of the readnoise for each amplifier in electrons (e-). If
there is only one amplifier, this can be provided as a single float.
units (:obj:`str`, optional):
Units for the output variance. Options are ``'e-'`` for variance in
square electrons (counts) or ``'ADU'`` for square ADU.
gain (:obj:`float`, array-like, optional):
The value of the gain for each amplifier in e-/ADU. If
``digitization`` is False, this is ignored.
digitization (:obj:`bool`, optional):
Include digitization error in the calculation. If True, ``gain``
*must* be provided.
Returns:
`numpy.ndarray`_: The image variance resulting from reading the detector
in the selected units for each pixel. The shape is the same as
``datasec_img``. Pixels where ``datasec_img`` is 0 are set to 0.
"""
# Check units
if units not in ['e-', 'ADU']:
msgs.error(f"Unknown units: {units}. Must be 'e-' or 'ADU'.")
if gain is None and (digitization or units == 'ADU'):
msgs.error('If including digitization error or return units in ADU, must provide gain.')
# Determine the number of amplifiers from the datasec image
_datasec_img = datasec_img.astype(int)
numamplifiers = np.amax(_datasec_img)
if numamplifiers == 0:
msgs.error('Amplifier identification image (datasec_img) does not have any values larger '
'than 0! The image should indicate the 1-indexed integer of the amplifier '
'used to read each pixel.')
# Check the number of RN values
_ronoise = np.atleast_1d(ronoise) if isinstance(ronoise, (list, np.ndarray)) \
else np.array([ronoise])
if len(_ronoise) != numamplifiers:
msgs.error('Must provide a read-noise for each amplifier.')
# Get the amplifier indices
indx = np.logical_not(_datasec_img == 0)
amp = _datasec_img[indx] - 1
# Instantiate the output image. Any pixels without an assigned amplifier
# are given a noise of 0.
var = np.zeros(_datasec_img.shape, dtype=float)
var[indx] = (_ronoise**2)[amp]
if not digitization and units == 'e-':
return var
# Check the number of gain values
_gain = np.atleast_1d(gain) if isinstance(gain, (list, np.ndarray)) else np.array([gain])
if len(_gain) != numamplifiers:
msgs.error('Must provide a gain for each amplifier.')
if digitization:
# Add in the digitization error
var[indx] += (_gain**2/12)[amp]
if units == 'ADU':
# Convert to ADUs
var[indx] /= (_gain**2)[amp]
return var
def rect_slice_with_mask(image, mask, mask_val=1):
"""
Generate rectangular slices from a mask image.
Args:
image (`numpy.ndarray`_):
Image to mask
mask (`numpy.ndarray`_):
Mask image
mask_val (:obj:`int`, optional):
Value to mask on
Returns:
:obj:`tuple`: The image at mask values and a 2-tuple with the
:obj:`slice` objects that select the masked data.
"""
pix = np.where(mask == mask_val)
slices = (slice(np.min(pix[0]), np.max(pix[0])+1), slice(np.min(pix[1]), np.max(pix[1])+1))
return image[slices], slices
def subtract_overscan(rawframe, datasec_img, oscansec_img, method='savgol', params=[5,65],
var=None):
"""
Subtract overscan.
Args:
rawframe (`numpy.ndarray`_):
Frame from which to subtract overscan. Must be 2d.
datasec_img (`numpy.ndarray`_):
An array the same shape as ``rawframe`` that identifies the pixels
associated with the data on each amplifier; 0 for no data, 1 for
amplifier 1, 2 for amplifier 2, etc.
oscansec_img (:obj:`numpy.ndarray`):
An array the same shape as ``rawframe`` that identifies the pixels
associated with the overscan region on each amplifier; 0 for no
data, 1 for amplifier 1, 2 for amplifier 2, etc.
method (:obj:`str`, optional):
The method used to fit the overscan region. Options are
polynomial, savgol, median.
params (:obj:`list`, optional):
Parameters for the overscan subtraction. For ``method=polynomial``,
set ``params`` to the order, number of pixels, number of repeats;
for ``method=savgol``, set ``params`` to the order and window size;
for ``method=median``, ``params`` are ignored.
var (`numpy.ndarray`_, optional):
Variance in the raw frame. If provided, must have the same shape as
``rawframe`` and used to estimate the error in the overscan
subtraction. The estimated error is the standard error in the
median for the pixels included in the overscan correction. This
estimate is also used for the ``'savgol'`` method as an upper limit.
If None, no variance in the overscan subtraction is calculated, and
the 2nd object in the returned tuple is None.
Returns:
:obj:`tuple`: The input frame with the overscan region subtracted and an
estimate of the variance in the overscan subtraction; both have the same
shape as the input ``rawframe``. If ``var`` is no provided, the 2nd
returned object is None.
"""
# Check input
if method.lower() not in ['polynomial', 'savgol', 'median']:
msgs.error(f'Unrecognized overscan subtraction method: {method}')
if rawframe.ndim != 2:
msgs.error('Input raw frame must be 2D.')
if datasec_img.shape != rawframe.shape:
msgs.error('Datasec image must have the same shape as the raw frame.')
if oscansec_img.shape != rawframe.shape:
msgs.error('Overscan section image must have the same shape as the raw frame.')
if var is not None and var.shape != rawframe.shape:
msgs.error('Variance image must have the same shape as the raw frame.')
# Copy the data so that the subtraction is not done in place
no_overscan = rawframe.copy()
_var = None if var is None else np.zeros(var.shape, dtype=float)
# Amplifiers
amps = np.unique(datasec_img[datasec_img > 0]).tolist()
# Perform the overscan subtraction for each amplifier
for amp in amps:
# Pull out the overscan data
if np.sum(oscansec_img == amp) == 0:
msgs.error(f'No overscan region for amplifier {amp+1}!')
overscan, os_slice = rect_slice_with_mask(rawframe, oscansec_img, amp)
if var is not None:
osvar = var[os_slice]
# Pull out the real data
if np.sum(datasec_img == amp) == 0:
msgs.error(f'No data region for amplifier {amp+1}!')
data, data_slice = rect_slice_with_mask(rawframe, datasec_img, amp)
# Shape along at least one axis must match
if not np.any([dd == do for dd, do in zip(data.shape, overscan.shape)]):
msgs.error('Overscan sections do not match amplifier sections for'
'amplifier {0}'.format(amp))
compress_axis = 1 if data.shape[0] == overscan.shape[0] else 0
# Fit/Model the overscan region
osfit = np.median(overscan) if method.lower() == 'median' \
else np.median(overscan, axis=compress_axis)
if var is not None:
# pi/2 coefficient yields asymptotic variance in the median relative
# to the error in the mean
osvar = np.pi/2*(np.sum(osvar)/osvar.size**2 if method.lower() == 'median'
else np.sum(osvar, axis=compress_axis)/osvar.shape[compress_axis]**2)
if method.lower() == 'polynomial':
# TODO: Use np.polynomial.polynomial.polyfit instead?
c = np.polyfit(np.arange(osfit.size), osfit, params[0])
ossub = np.polyval(c, np.arange(osfit.size))
elif method.lower() == 'savgol':
ossub = signal.savgol_filter(osfit, params[1], params[0])
elif method.lower() == 'median':
# Subtract scalar and continue
no_overscan[data_slice] -= osfit
if var is not None:
_var[data_slice] = osvar
continue
# Subtract along the appropriate axis
no_overscan[data_slice] -= (ossub[:, None] if compress_axis == 1 else ossub[None, :])
if var is not None:
_var[data_slice] = (osvar[:,None] if compress_axis == 1 else osvar[None,:])
return no_overscan, _var
def subtract_pattern(rawframe, datasec_img, oscansec_img, frequency=None, axis=1, debug=False):
"""
Subtract a sinusoidal pattern from the input rawframe. The algorithm
calculates the frequency of the signal, generates a model, and subtracts
this signal from the data. This sinusoidal pattern noise was first
identified in KCWI, but the source of this pattern noise is not currently
known.
Args:
rawframe (`numpy.ndarray`_):
Frame from which to subtract overscan
numamplifiers (:obj:`int`):
Number of amplifiers for this detector.
datasec_img (`numpy.ndarray`_):
An array the same shape as rawframe that identifies
the pixels associated with the data on each amplifier.
0 for not data, 1 for amplifier 1, 2 for amplifier 2, etc.
oscansec_img (`numpy.ndarray`_):
An array the same shape as rawframe that identifies
the pixels associated with the overscan region on each
amplifier.
0 for not data, 1 for amplifier 1, 2 for amplifier 2, etc.
frequency (:obj:`float`, :obj:`list`, optional):
The frequency (or list of frequencies - one for each amplifier)
of the sinusoidal pattern. If None, the frequency of each amplifier
will be determined from the overscan region.
axis (:obj:`int`, optional):
Which axis should the pattern subtraction be applied?
debug (:obj:`bool`, optional):
Debug the code (True means yes)
Returns:
`numpy.ndarray`_: The input frame with the pattern subtracted
"""
msgs.info("Analyzing detector pattern")
# Copy the data so that the subtraction is not done in place
frame_orig = rawframe.copy()
outframe = rawframe.copy()
tmp_oscan = oscansec_img.copy()
tmp_data = datasec_img.copy()
if axis == 0:
frame_orig = rawframe.copy().T
outframe = rawframe.copy().T
tmp_oscan = oscansec_img.copy().T
tmp_data = datasec_img.copy().T
# Amplifiers
amps = np.sort(np.unique(tmp_data[tmp_data > 0])).tolist()
# Estimate the frequency in each amplifier (then average over all amps)
if frequency is None:
frq = np.zeros(len(amps))
for aa, amp in enumerate(amps):
pixs = np.where(tmp_oscan == amp)
#pixs = np.where((tmp_oscan == amp) | (tmp_data == amp))
cmin, cmax = np.min(pixs[0]), np.max(pixs[0])
rmin, rmax = np.min(pixs[1]), np.max(pixs[1])
frame = frame_orig[cmin:cmax, rmin:rmax].astype(np.float64)
frq[aa] = pattern_frequency(frame)
frequency = np.mean(frq)
# Perform the overscan subtraction for each amplifier
for aa, amp in enumerate(amps):
# Get the frequency to use for this amplifier
if isinstance(frequency, list):
# if it's a list, then use a different frequency for each amplifier
use_fr = frequency[aa]
else:
# float
use_fr = frequency
# Extract overscan
overscan, os_slice = rect_slice_with_mask(frame_orig, tmp_oscan, amp)
# Extract overscan+data
oscandata, osd_slice = rect_slice_with_mask(frame_orig, tmp_oscan+tmp_data, amp)
# Subtract the DC offset
overscan -= np.median(overscan, axis=1)[:, np.newaxis]
# Convert frequency to the size of the overscan region
msgs.info("Subtracting detector pattern with frequency = {0:f}".format(use_fr))
use_fr *= (overscan.shape[1]-1)
# Get a first guess of the amplitude and phase information
amp = np.fft.rfft(overscan, axis=1)
idx = (np.arange(overscan.shape[0]), np.argmax(np.abs(amp), axis=1))
# Convert result to amplitude and phase
amps = (np.abs(amp))[idx] * (2.0 / overscan.shape[1])
phss = np.arctan2(amp.imag, amp.real)[idx]
# Use the above to as initial guess parameters in chi-squared minimisation
cosfunc = lambda xarr, *p: p[0] * np.cos(2.0 * np.pi * p[1] * xarr + p[2])
xdata, step = np.linspace(0.0, 1.0, overscan.shape[1], retstep=True)
xdata_all = (np.arange(osd_slice[1].start, osd_slice[1].stop) - os_slice[1].start) * step
model_pattern = np.zeros_like(oscandata)
val = np.zeros(overscan.shape[0])
# Get the best estimate of the amplitude
for ii in range(overscan.shape[0]):
try:
popt, pcov = curve_fit(cosfunc, xdata, overscan[ii, :], p0=[amps[ii], use_fr, phss[ii]],
bounds=([-np.inf, use_fr * 0.99999999, -np.inf], [+np.inf, use_fr * 1.00000001, +np.inf]))
except ValueError:
msgs.warn("Input data invalid for pattern subtraction of row {0:d}/{1:d}".format(ii + 1, overscan.shape[0]))
continue
except RuntimeError:
msgs.warn("Pattern subtraction fit failed for row {0:d}/{1:d}".format(ii + 1, overscan.shape[0]))
continue
val[ii] = popt[0]
model_pattern[ii, :] = cosfunc(xdata_all, *popt)
use_amp = np.median(val)
# Get the best estimate of the phase, and generate a model
for ii in range(overscan.shape[0]):
try:
popt, pcov = curve_fit(cosfunc, xdata, overscan[ii, :], p0=[use_amp, use_fr, phss[ii]],
bounds=([use_amp * 0.99999999, use_fr * 0.99999999, -np.inf],
[use_amp * 1.00000001, use_fr * 1.00000001, +np.inf]))
except ValueError:
msgs.warn("Input data invalid for pattern subtraction of row {0:d}/{1:d}".format(ii + 1, overscan.shape[0]))
continue
except RuntimeError:
msgs.warn("Pattern subtraction fit failed for row {0:d}/{1:d}".format(ii + 1, overscan.shape[0]))
continue
model_pattern[ii, :] = cosfunc(xdata_all, *popt)
outframe[osd_slice] -= model_pattern
debug = False
if debug:
embed()
import astropy.io.fits as fits
hdu = fits.PrimaryHDU(rawframe)
hdu.writeto("tst_raw.fits", overwrite=True)
hdu = fits.PrimaryHDU(outframe)
hdu.writeto("tst_sub.fits", overwrite=True)
hdu = fits.PrimaryHDU(rawframe - outframe)
hdu.writeto("tst_mod.fits", overwrite=True)
# Transpose if the input frame if applied along a different axis
if axis == 0:
outframe = outframe.T
# Return the result
return outframe
def pattern_frequency(frame, axis=1):
"""
Using the supplied 2D array, calculate the pattern frequency
along the specified axis.
Args:
frame (`numpy.ndarray`_):
2D array to measure the pattern frequency
axis (:obj:`int`, optional):
Which axis should the pattern frequency be measured?
Returns:
:obj:`float`: The frequency of the sinusoidal pattern.
"""
# For axis=0, transpose
arr = frame.copy()
if axis == 0:
arr = frame.T
elif axis != 1:
msgs.error("frame must be a 2D image, and axis must be 0 or 1")
# Calculate the output image dimensions of the model signal
# Subtract the DC offset
arr -= np.median(arr, axis=1)[:, np.newaxis]
# Find significant deviations and ignore those rows
mad = 1.4826*np.median(np.abs(arr))
ww = np.where(arr > 10*mad)
# Create a mask of these rows
msk = np.sort(np.unique(ww[0]))
# Compute the Fourier transform to obtain an estimate of the dominant frequency component
amp = np.fft.rfft(arr, axis=1)
idx = (np.arange(arr.shape[0]), np.argmax(np.abs(amp), axis=1))
# Construct the variables of the sinusoidal waveform
amps = (np.abs(amp))[idx] * (2.0 / arr.shape[1])
phss = np.arctan2(amp.imag, amp.real)[idx]
frqs = idx[1]
# Use the above to as initial guess parameters in chi-squared minimisation
cosfunc = lambda xarr, *p: p[0] * np.cos(2.0 * np.pi * p[1] * xarr + p[2])
xdata = np.linspace(0.0, 1.0, arr.shape[1])
# Calculate the amplitude distribution
amp_dist = np.zeros(arr.shape[0])
frq_dist = np.zeros(arr.shape[0])
# Loop over all rows to new independent values that can be averaged
for ii in range(arr.shape[0]):
if ii in msk:
continue
try:
popt, pcov = curve_fit(cosfunc, xdata, arr[ii, :], p0=[amps[ii], frqs[ii], phss[ii]],
bounds=([-np.inf, frqs[ii]-1, -np.inf],
[+np.inf, frqs[ii]+1, +np.inf]))
except ValueError:
msgs.warn(f'Input data invalid for pattern frequency fit of row {ii+1}/{arr.shape[0]}')
continue
except RuntimeError:
msgs.warn(f'Pattern frequency fit failed for row {ii+1}/{arr.shape[0]}')
continue
amp_dist[ii] = popt[0]
frq_dist[ii] = popt[1]
ww = np.where(amp_dist > 0.0)
use_amp = np.median(amp_dist[ww])
use_frq = np.median(frq_dist[ww])
# Calculate the frequency distribution with a prior on the amplitude
frq_dist = np.zeros(arr.shape[0])
for ii in range(arr.shape[0]):
if ii in msk:
continue
try:
popt, pcov = curve_fit(cosfunc, xdata, arr[ii, :], p0=[use_amp, use_frq, phss[ii]],
bounds=([use_amp * 0.99999999, use_frq-1, -np.inf],
[use_amp * 1.00000001, use_frq+1, +np.inf]))
except ValueError:
msgs.warn(f'Input data invalid for pattern frequency fit of row {ii+1}/{arr.shape[0]}')
continue
except RuntimeError:
msgs.warn(f'Pattern frequency fit failed for row {ii+1}/{arr.shape[0]}')
continue
frq_dist[ii] = popt[1]
# Ignore masked values, and return the best estimate of the frequency
ww = np.where(frq_dist > 0.0)
medfrq = np.median(frq_dist[ww])
return medfrq/(arr.shape[1]-1)
# TODO: Provide a replace_pixels method that does this on a pixel by
# pixel basis instead of full columns.
def replace_columns(img, bad_cols, replace_with='mean', copy=False):
"""
Replace bad image columns.
Args:
img (`numpy.ndarray`_):
A 2D array with image values to replace.
bad_cols (`numpy.ndarray`_):
Boolean array selecting bad columns in `img`. Must have the
correct shape.
replace_with (:obj:`str`, optional):
Method to use for the replacements. Can be 'mean' (see
:func:`replace_column_mean`) or 'linear' (see
:func:`replace_column_linear`).
copy (:obj:`bool`, optional):
Copy `img` to a new array before making any
modifications. Otherwise, `img` is modified in-place.
Returns:
`numpy.ndarray`_: The modified image, which is either a new
array or points to the in-place modification of `img` according
to the value of `copy`.
"""
# Check
if img.ndim != 2:
msgs.error('Images must be 2D!')
if bad_cols.size != img.shape[1]:
msgs.error('Bad column array has incorrect length!')
if np.all(bad_cols):
msgs.error('All columns are bad!')
_img = img.copy() if copy else img
if np.sum(bad_cols) == 0:
# No bad columns
return _img
# Find the starting/ending indices of adjacent bad columns
borders = np.zeros(img.shape[1], dtype=int)
borders[bad_cols] = 1
borders = borders - np.roll(borders,1)
if borders[0] == -1:
borders[0] = 0
# Get edge indices and deal with edge cases
lindx = borders == 1
ledges = np.where(lindx)[0] if np.any(lindx) else [0]
rindx = borders == -1
redges = np.where(rindx)[0] if np.any(rindx) else [img.shape[1]]
if ledges[0] > redges[0]:
ledges = np.append([0], ledges)
if ledges[-1] > redges[-1]:
redges = np.append(redges, [img.shape[1]])
# If this is tripped, there's a coding error
assert len(ledges) == len(redges), 'Problem in edge setup'
# Replace the image values
if replace_with == 'mean':
for l,r in zip(ledges, redges):
replace_column_mean(_img, l, r)
elif replace_with == 'linear':
for l,r in zip(ledges, redges):
replace_column_linear(_img, l, r)
else:
msgs.error('Unknown replace_columns method. Must be mean or linear.')
return _img
def replace_column_mean(img, left, right):
"""
Replace the column values between left and right indices for all
rows by the mean of the columns just outside the region.
Columns at the end of the image with no left or right reference
column (`left==0` or `right==img.shape[1]`) are just replaced by the
closest valid column.
Args:
img (`numpy.ndarray`_):
Image with values to both use and replace.
left (:obj:`int`):
Inclusive starting column index.
right (:obj:`int`):
Exclusive ending column index.
"""
if left == 0:
img[:,left:right] = img[:,right][:,None]
return
if right == img.shape[1]:
img[:,left:] = img[:,left-1][:,None]
return
img[:,left:right] = 0.5*(img[:,left-1]+img[:,right])[:,None]
def replace_column_linear(img, left, right):
"""
Replace the column values between left and right indices for all
rows by a linear interpolation between the columns just outside the
region.
If possible, extrapolation is used for columns at the end of the
image with no left or right reference column (`left==0` or
`right==img.shape[1]`) using the two most adjacent columns.
Otherwise, this function calls :func:`replace_column_mean`.
Args:
img (`numpy.ndarray`_):
Image with values to both use and replace.
left (:obj:`int`):
Inclusive starting column index.
right (:obj:`int`):
Exclusive ending column index.
"""
if left == 0 and right > img.shape[1]-2 or right == img.shape[1] and left < 2:
# No extrapolation available so revert to mean
return replace_column_mean(img, left, right)
if left == 0:
# Extrapolate down
img[:,:right] = (img[:,right+1]-img[:,right])[:,None]*np.arange(right)[None,:] \
+ img[:,right][:,None]
return
if right == img.shape[1]:
# Extrapolate up
img[:,left:] = (img[:,left-1]-img[:,left-2])[:,None]*np.arange(right-left)[None,:] \
+ img[:,left-2][:,None]
return
# Interpolate
img[:,left:right] = np.divide(img[:,right]-img[:,left-1],right-left+1)[:,None] \
* (np.arange(right-left)+1)[None,:] + img[:,left-1][:,None]
def old_replace_columns(img, bad_cols, replace_with='mean'):
""" Replace bad columns with values from the neighbors
Parameters
----------
img : ndarray
bad_cols: ndarray (bool, 1D, shape[1] of img)
True = bad column
False = ok column
replace_with : str, optional
Option for replacement
mean -- Use the mean of the closest left/right columns
Returns
-------
img2 : ndarray
Copy of the input image with the bad columns replaced
"""
# Prep
img2 = img.copy()
# Find the starting/ends of the bad column sets
tmp = np.zeros(img.shape[1], dtype=int)
tmp[bad_cols] = 1
tmp2 = tmp - np.roll(tmp,1)
# Deal with first column
if bad_cols[0]:
tmp2[0]=1
# Deal with last column
if bad_cols[-1]:
tmp2[-1]=-1
ledges = np.where(tmp2 == 1)[0]
redges = np.where(tmp2 == -1)[0]
# Last column?
if tmp2[-1] == 1:
redges = np.concatenate([redges, np.array([bad_cols.size-1])])
# Loop on em
for kk, ledge in enumerate(ledges):
lval = img[:,redges[kk]+1] if ledge == 0 else img[:,ledge-1]
rval = img[:, redges[kk]]
# First columns?
# Replace
if replace_with == 'mean':
mval = (lval+rval)/2.
for ii in range(ledge, redges[kk]+1):
img2[:,ii] = mval
else:
msgs.error("Bad option to replace_columns")
# Return
return img2
def trim_frame(frame, mask):
"""
Trim the masked regions from a frame.
Args:
frame (`numpy.ndarray`_):
Image to be trimmed
mask (`numpy.ndarray`_):
Boolean image set to True for values that should be trimmed
and False for values to be returned in the output trimmed
image.
Return:
`numpy.ndarray`_: Trimmed image
Raises:
PypitError:
Error raised if the trimmed image includes masked values
because the shape of the valid region is odd.
"""
# TODO: Should check for this failure mode earlier
if np.any(mask[np.logical_not(np.all(mask,axis=1)),:][:,np.logical_not(np.all(mask,axis=0))]):
msgs.error('Data section is oddly shaped. Trimming does not exclude all '
'pixels outside the data sections.')
return frame[np.logical_not(np.all(mask,axis=1)),:][:,np.logical_not(np.all(mask,axis=0))]
def base_variance(rn_var, darkcurr=None, exptime=None, proc_var=None, count_scale=None):
r"""
Calculate the "base-level" variance in a processed image driven by the
detector properties and the additive noise from the image processing steps.
The full variance model (see :func:`variance_model`), :math:`V`, is:
.. math::
V = s^2\ \left[ {\rm max}(0, C) + D t_{\rm exp} / 3600 +
V_{\rm rn} + V_{\rm proc} \right]
+ \epsilon^2 {\rm max}(0, c)^2
where:
- :math:`c=s\ C` are the rescaled observed sky + object counts,
- :math:`C` is the observed number of sky + object counts,
- :math:`s` is a scale factor derived from the (inverse of the)
flat-field frames (see ``count_scale``),
- :math:`D` is the dark current in electrons per **hour** (see
``darkcurr``),
- :math:`t_{\rm exp}` is the effective exposure time in seconds (see
``exptime``),
- :math:`V_{\rm rn}` is the detector readnoise variance (i.e.,
read-noise squared; see ``rn_var``),
- :math:`V_{\rm proc}` is added variance from image processing (e.g.,
bias subtraction; see ``proc_var``), and
- :math:`\epsilon` is an added error term that imposes a maximum
signal-to-noise on the observed counts.
This function consolidates terms that do not change with the forward
modeling of the sky + object counts. That is, this function calculates
.. math::
V_{\rm base} = s^2\ \left[ D t_{\rm exp} / 3600 + V_{\rm rn} + V_{\rm
proc} \right]
such that the first equation can be re-written as
.. math::
V = s {\rm max}(0,c) + V_{\rm base} + \epsilon^2 {\rm max}(0, c)^2.
.. warning::
- If :math:`s` (``count_scale``) is provided, the variance will be 0
wherever :math:`s \leq 0`.
- Note that dark current is typically given in electrons per second *per
pixel*. If on-chip binning was used for the detector readout, each
binned pixel will have accummulated the expected dark-current (in
e-/s/pixel) multiplied by the number of binned pixels. Beware the
units of ``darkcurr``, both in that it is dark-current per *hour* and
that it is the dark-current expected in the *binned* pixel. For
example, see the calling function
:func:`pypeit.images.rawimage.RawImage.build_ivar`.
Args:
rn_var (`numpy.ndarray`_):
A 2D array with the readnoise variance (i.e., readnoise squared)
from the instrument detector; see :func:`rn2_frame`. This should
include digitization noise and any difference in the readnoise
across the detector due to the use of multiple amplifiers.
Readnoise should be in e-, meaning this is in elections squared.
darkcurr (:obj:`float`, `numpy.ndarray`_, optional):
Dark current in electrons per **hour** (as is the convention for the
:class:`~pypeit.images.detector_container.DetectorContainer` object)
if the exposure time is provided, otherwise in electrons. Note that
this is the dark-current in each read pixel, meaning you likely need
to multiply the quoted detector dark-current by the number of pixels
in a bin (e.g., 4 for 2x2 binning) for binned data. If None, set to
0. If a single float, assumed to be constant across the full image.
If an array, the shape must match ``rn_var``.
exptime (:obj:`float`, optional):
Exposure time in seconds. If None, dark current *must* be
in electrons.
proc_var (:obj:`float`, `numpy.ndarray`_, optional):
Additional variance terms to include that are due to the image
processing steps (e.g., bias subtraction). If None, set to 0. If a
single float, assumed to be constant across the full image. If an
array, the shape must match ``rn_var``.
count_scale (:obj:`float`, `numpy.ndarray`_, optional):
A scale factor that *has already been applied* to the provided
counts. For example, if the image has been flat-field corrected,
this is the inverse of the flat-field counts. If None, set to 1.
If a single float, assumed to be constant across the full image. If
an array, the shape must match ``rn_var``. The variance will be 0
wherever :math:`s \leq 0`, modulo the provided ``noise_floor``.
Returns:
`numpy.ndarray`_: Base-level variance image computed via the equation
above with the same shape as ``rn_var``.
"""
# Check input
if count_scale is not None and isinstance(count_scale, np.ndarray) \
and count_scale.shape != rn_var.shape:
msgs.error('Count scale and readnoise variance have different shape.')
if proc_var is not None and isinstance(proc_var, np.ndarray) \
and proc_var.shape != rn_var.shape:
msgs.error('Processing variance and readnoise variance have different shape.')
if darkcurr is not None and isinstance(darkcurr, np.ndarray) \
and darkcurr.shape != rn_var.shape:
msgs.error('Dark image and readnoise variance have different shape.')
# Build the variance
# - First term is the read-noise
var = rn_var.copy()
# - Add the processing noise
if proc_var is not None:
var += proc_var
# - Add the dark current
if darkcurr is not None:
var += darkcurr if exptime is None else darkcurr * exptime / 3600
# - Include the rescaling
if count_scale is not None:
_count_scale = count_scale.copy() if isinstance(count_scale, np.ndarray) \
else np.full(var.shape, count_scale, dtype=float)
var *= _count_scale**2
# Done
return var
def variance_model(base, counts=None, count_scale=None, noise_floor=None):
r"""
Calculate the expected variance in an image.
The full variance model (see :func:`variance_model`), :math:`V`, is:
.. math::
V = s^2\ \left[ {\rm max}(0, C) + D t_{\rm exp} / 3600 +
V_{\rm rn} + V_{\rm proc} \right]
+ \epsilon^2 {\rm max}(0, c)^2
where:
- :math:`c=s\ C` are the rescaled observed sky + object counts (see
``counts``),
- :math:`C` is the observed number of sky + object counts,
- :math:`s` is a scale factor derived from the (inverse of the)
flat-field frames (see ``count_scale``),
- :math:`D` is the dark current in electrons per **hour**,
- :math:`t_{\rm exp}` is the effective exposure time in seconds,
- :math:`V_{\rm rn}` is the detector readnoise variance (i.e.,
read-noise squared),
- :math:`V_{\rm proc}` is added variance from image processing (e.g.,
bias subtraction), and
- :math:`\epsilon` is an added error term that imposes a maximum
signal-to-noise on the observed counts (see ``noise_floor``).
The function :func:`base_variance` consolidates all terms
that do not change with the forward
modeling of the sky + object counts into a single "base-level" variance
.. math::
V_{\rm base} = s^2\ \left[ D t_{\rm exp} / 3600 + V_{\rm rn} + V_{\rm
proc} \right]
such that the first equation can be re-written as
.. math::
V = s {\rm max}(0,c) + V_{\rm base} + \epsilon^2 {\rm max}(0, c)^2,
which is the quantity returned by this function.
We emphasize that this is a *model* for the per-pixel image variance. In
real data, the as-observed pixel values are used to estimate the Poisson
error in the observed counts. Because of the variance in the image, this
systematically overestimates the variance toward low counts (:math:`\lesssim
2 \sigma_{\rm rn}`), with a bias of approximately :math:`1.4/\sigma_{\rm
rn}` for :math:`C=0` (i.e., about 20% for a readnoise of 2 e-) and less than
10% for :math:`C=1`.
.. note::
If :math:`s` (``count_scale``) is provided, the variance will be 0
wherever :math:`s \leq 0`, modulo the provided ``noise_floor``.
Args:
base (`numpy.ndarray`_):
The "base-level" variance in the data set by the detector properties
and the image processing steps. See :func:`base_variance`;
:math:`V_{\rm base}` in the equations above.
counts (`numpy.ndarray`_, optional):
A 2D array with the number of source-plus-sky counts, possibly
rescaled by a relative throughput; see :math:`c` in the equations
above. Because this is used to calculate the noise floor, this
*must* be provided if ``noise_floor`` is not None. Shape must match
``base``.
count_scale (:obj:`float`, `numpy.ndarray`_, optional):
A scale factor that *has already been applied* to the provided
counts; see :math:`s` in the equations above. For example, if the
image has been flat-field corrected, this is the inverse of the
flat-field counts. If None, no scaling is expected, meaning
``counts`` are exactly the observed detector counts. If a single
float, assumed to be constant across the full image. If an array,
the shape must match ``base``. The variance will be 0 wherever
:math:`s \leq 0`, modulo the provided ``noise_floor``.
noise_floor (:obj:`float`, optional):
A fraction of the counts to add to the variance, which has the
effect of ensuring that the S/N is never greater than
``1/noise_floor``; see :math:`epsilon` in the equations above. If
None, no noise floor is added. If not None, ``counts`` *must* be
provided.
Returns:
`numpy.ndarray`_: Variance image computed via the equation above with
the same shape as ``base``.
"""
# Check input
if noise_floor is not None and noise_floor > 0. and counts is None:
msgs.error('To impose a noise floor, must provide counts.')
if counts is not None and counts.shape != base.shape:
msgs.error('Counts image and base-level variance have different shape.')
if count_scale is not None and isinstance(count_scale, np.ndarray) \
and count_scale.shape != base.shape:
msgs.error('Count scale and base-level variance have different shape.')
# Clip the counts
_counts = None if counts is None else np.clip(counts, 0, None)
# Build the variance
# - Start with the base-level variance
var = base.copy()
# - Add the sky + object counts
if counts is not None:
var += _counts if count_scale is None else count_scale * _counts
# - Add the noise floor
if noise_floor is not None and noise_floor > 0.:
var += (noise_floor * _counts)**2
# Done
return var
|
{"hexsha": "386436341792000b5b9e87d515d858711f91f334", "size": 49017, "ext": "py", "lang": "Python", "max_stars_repo_path": "pypeit/core/procimg.py", "max_stars_repo_name": "brackham/PypeIt", "max_stars_repo_head_hexsha": "8769f06ae8e8f18d3a55d12b01dd3dde50b98040", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 107, "max_stars_repo_stars_event_min_datetime": "2018-08-06T07:07:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T14:33:42.000Z", "max_issues_repo_path": "pypeit/core/procimg.py", "max_issues_repo_name": "brackham/PypeIt", "max_issues_repo_head_hexsha": "8769f06ae8e8f18d3a55d12b01dd3dde50b98040", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 889, "max_issues_repo_issues_event_min_datetime": "2018-07-26T12:14:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T22:49:42.000Z", "max_forks_repo_path": "pypeit/core/procimg.py", "max_forks_repo_name": "brackham/PypeIt", "max_forks_repo_head_hexsha": "8769f06ae8e8f18d3a55d12b01dd3dde50b98040", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 74, "max_forks_repo_forks_event_min_datetime": "2018-09-25T17:03:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-10T23:59:24.000Z", "avg_line_length": 41.7165957447, "max_line_length": 129, "alphanum_fraction": 0.6173776445, "include": true, "reason": "import numpy,from scipy,import astropy", "num_tokens": 12540}
|
import numpy as np
def measure_correlation(snapshots, correlation_threshold):
correlated_inputs = get_list_of_correlated_inputs(
snapshots, correlation_threshold)
if len(correlated_inputs) > 0:
print(("Caution!\nCorrelation between input data can affect the "
+ "reliability of the importance measure.\n"
+ "Correlations of more than {} "
+ "were found between {} pair(s) of input variables:\n\t{}\n")
.format(correlation_threshold,
len(correlated_inputs),
"\n\t".join([convert_correlation_list_entry_to_string(entry)
for entry in correlated_inputs])))
else:
print(f"No correlation above {correlation_threshold} was found between the inputs.")
return correlated_inputs
def get_list_of_correlated_inputs(snapshots, correlation_threshold):
return [make_correlation_list_entry(row_nr, col_nr, entry)
for row_nr, row in enumerate(get_covariance_matrix(snapshots))
for col_nr, entry in enumerate(row)
if row_nr > col_nr and abs(entry) >= correlation_threshold]
def get_covariance_matrix(snapshots):
return np.cov(np.transpose(snapshots))
def make_correlation_list_entry(row_nr, col_nr, entry):
return [str(row_nr), str(col_nr), f"{entry:.3f}"]
def convert_correlation_list_entry_to_string(entry):
return "{},{}: {}".format(*entry)
|
{"hexsha": "414877ad1c4d3dfc00a21e9e7bb74a4deb3ff7c4", "size": 1439, "ext": "py", "lang": "Python", "max_stars_repo_path": "NucleationModel/correlation_measure.py", "max_stars_repo_name": "MFrassek/CommittorEAE", "max_stars_repo_head_hexsha": "88a467e4500bc9ab69834209f4eaec9f2d0d7a61", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "NucleationModel/correlation_measure.py", "max_issues_repo_name": "MFrassek/CommittorEAE", "max_issues_repo_head_hexsha": "88a467e4500bc9ab69834209f4eaec9f2d0d7a61", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NucleationModel/correlation_measure.py", "max_forks_repo_name": "MFrassek/CommittorEAE", "max_forks_repo_head_hexsha": "88a467e4500bc9ab69834209f4eaec9f2d0d7a61", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.8684210526, "max_line_length": 92, "alphanum_fraction": 0.6824183461, "include": true, "reason": "import numpy", "num_tokens": 309}
|
%Protein processing II process test case
%
% Author: Jared Jacobs, jmjacobs@stanford.edu
% Author: Jonathan Karr, jkarr@stanford.edu
% Affilitation: Covert Lab, Department of Bioengineering, Stanford University
% Last updated: 8/9/2010
classdef ProteinProcessingII_Test < edu.stanford.covert.cell.sim.ProcessTestCase
methods
function this = ProteinProcessingII_Test(methodName)
this = this@edu.stanford.covert.cell.sim.ProcessTestCase(methodName);
end
function testOneMonomerRequiringNoProcessing(this)
m = this.process;
m.lipoproteinMonomerIndexs = [];
m.secretedMonomerIndexs = [];
m.unprocessedMonomerIndexs = 1;
m.substrates(:) = 0;
m.enzymes(:) = 0;
m.unprocessedMonomers = 1;
m.processedMonomers = 0;
m.signalSequenceMonomers = 0;
m.evolveState();
assertEqual(0, m.unprocessedMonomers);
assertEqual(1, m.processedMonomers);
assertEqual(0, m.signalSequenceMonomers);
assertEqual(zeros(size(m.substrates)), m.substrates);
end
function testOneSecretedMonomer(this)
m = this.process;
m.lipoproteinMonomerIndexs = [];
m.secretedMonomerIndexs = 1;
m.unprocessedMonomerIndexs = [];
m.substrates(:) = 0;
m.substrates(m.substrateIndexs_water) = 1;
m.enzymes(:) = 0;
m.enzymes(m.enzymeIndexs_signalPeptidase) = 1;
m.unprocessedMonomers = 1;
m.processedMonomers = 0;
m.signalSequenceMonomers = 0;
m.evolveState();
assertEqual(0, m.unprocessedMonomers);
assertEqual(1, m.processedMonomers);
assertEqual(1, m.signalSequenceMonomers);
assertEqual(zeros(size(m.substrates)), m.substrates);
assertEqual(1, m.enzymes(m.enzymeIndexs_signalPeptidase));
end
function testOneLipoprotein(this)
m = this.process;
m.lipoproteinMonomerIndexs = 1;
m.secretedMonomerIndexs = [];
m.unprocessedMonomerIndexs = [];
m.substrates(:) = 0;
m.substrates(m.substrateIndexs_water) = 1;
m.substrates(m.substrateIndexs_PG160) = 1;
m.enzymes(:) = 0;
m.enzymes(m.enzymeIndexs_signalPeptidase) = 1;
m.enzymes(m.enzymeIndexs_diacylglycerylTransferase) = 99;
m.unprocessedMonomers = 1;
m.processedMonomers = 0;
m.signalSequenceMonomers = 0;
m.evolveState();
assertEqual(0, m.unprocessedMonomers);
assertEqual(1, m.processedMonomers);
assertEqual(1, m.signalSequenceMonomers);
assertEqual(0, m.substrates(m.substrateIndexs_water));
assertEqual(0, m.substrates(m.substrateIndexs_PG160));
assertEqual(1, m.substrates(m.substrateIndexs_hydrogen));
assertEqual(1, m.substrates(m.substrateIndexs_SNGLYP));
assertEqual(1, m.enzymes(m.enzymeIndexs_signalPeptidase));
assertEqual(99, m.enzymes(m.enzymeIndexs_diacylglycerylTransferase));
end
function testNoProcessingWithoutWater(this)
m = this.process;
m.lipoproteinMonomerIndexs = 1;
m.secretedMonomerIndexs = 2;
m.unprocessedMonomerIndexs = [];
m.substrates(:) = 0;
m.substrates(m.substrateIndexs_PG160) = 1e3;
m.enzymes(:) = 1e3;
m.unprocessedMonomers = [1; 1];
m.processedMonomers = [0; 0];
m.signalSequenceMonomers = [0; 0];
m.evolveState();
assertEqual([1 1], m.unprocessedMonomers');
assertEqual([0 0], m.processedMonomers');
assertEqual([0 0], m.signalSequenceMonomers');
assertEqual(0, m.substrates(m.substrateIndexs_water));
assertEqual(1e3, m.substrates(m.substrateIndexs_PG160));
end
function testNoProcessingWithoutSignalPeptidase(this)
m = this.process;
m.lipoproteinMonomerIndexs = 1;
m.secretedMonomerIndexs = 2;
m.unprocessedMonomerIndexs = [];
m.substrates(:) = 0;
m.substrates(m.substrateIndexs_water) = 1e6;
m.substrates(m.substrateIndexs_PG160) = 1e3;
m.enzymes(:) = 0;
m.enzymes(m.enzymeIndexs_diacylglycerylTransferase) = 1e3;
m.unprocessedMonomers = [1;1];
m.processedMonomers = [0;0];
m.signalSequenceMonomers = [0;0];
m.evolveState();
assertEqual([1 1], m.unprocessedMonomers');
assertEqual([0 0], m.processedMonomers');
assertEqual([0 0], m.signalSequenceMonomers');
assertEqual(1e6, m.substrates(m.substrateIndexs_water));
assertEqual(1e3, m.substrates(m.substrateIndexs_PG160));
end
function testNoLipoproteinProcessingWithoutDiacylglycerylTransferase(this)
m = this.process;
m.lipoproteinMonomerIndexs = 1;
m.secretedMonomerIndexs = [];
m.unprocessedMonomerIndexs = [];
m.substrates(:) = 0;
m.substrates(m.substrateIndexs_water) = 1e6;
m.substrates(m.substrateIndexs_PG160) = 1e3;
m.enzymes(:) = 0;
m.enzymes(m.enzymeIndexs_signalPeptidase) = 1e3;
m.unprocessedMonomers = 1;
m.processedMonomers = 0;
m.signalSequenceMonomers = 0;
m.evolveState();
assertEqual(1, m.unprocessedMonomers);
assertEqual(0, m.processedMonomers);
assertEqual(0, m.signalSequenceMonomers);
assertEqual(1e6, m.substrates(m.substrateIndexs_water));
assertEqual(1e3, m.substrates(m.substrateIndexs_PG160));
end
% Verifies that signal peptidase processes roughly as many monomers as
% its rate will allow when it's the limiting factor, and that the
% monomers chosen for processing are chosen without egregious bias.
function testLimitedSignalPeptidase_secretedProteinsOnly(this)
m = this.process;
m.substrates(:) = 0;
m.substrates(m.substrateIndexs_water) = 1e6;
m.enzymes(:) = 0;
m.enzymes(m.enzymeIndexs_signalPeptidase) = 3;
m.unprocessedMonomers(:) = 0;
m.unprocessedMonomers(m.secretedMonomerIndexs) = 10;
m.processedMonomers(:) = 0;
m.signalSequenceMonomers(:) = 0;
m.evolveState();
n = m.enzymes(m.enzymeIndexs_signalPeptidase) * ...
m.lipoproteinSignalPeptidaseSpecificRate * m.stepSizeSec;
i = m.secretedMonomerIndexs;
assertVectorsAlmostEqual(...
n, sum(m.processedMonomers(i)), 'relative', 0.10);
assertTrue(10 > max(m.processedMonomers(i)));
assertTrue(0 < min(m.processedMonomers(i)));
assertEqual(...
10 * ones(size(i)), ...
m.processedMonomers(i) + m.unprocessedMonomers(i));
assertEqual(m.processedMonomers, m.signalSequenceMonomers);
end
% Verifies that signal peptidase processes roughly as many monomers as
% its rate will allow when it's the limiting factor and there is a mix
% of lipoproteins and secreted proteins, and that the monomers chosen
% for processing are chosen without egregious bias.
function testLimitedSignalPeptidase_lipoproteinsAndSecretedProteins(this)
m = this.process;
m.substrates(:) = 0;
m.substrates(m.substrateIndexs_water) = 1e6;
m.substrates(m.substrateIndexs_PG160) = 1e3;
m.enzymes(:) = 0;
m.enzymes(m.enzymeIndexs_signalPeptidase) = 20;
m.enzymes(m.enzymeIndexs_diacylglycerylTransferase) = 1e4;
m.unprocessedMonomers(:) = 0;
m.unprocessedMonomers(m.secretedMonomerIndexs) = 10;
m.unprocessedMonomers(m.lipoproteinMonomerIndexs) = 10;
m.processedMonomers(:) = 0;
m.signalSequenceMonomers(:) = 0;
m.evolveState();
n = m.enzymes(m.enzymeIndexs_signalPeptidase) * ...
m.lipoproteinSignalPeptidaseSpecificRate * m.stepSizeSec;
i = [m.secretedMonomerIndexs;m.lipoproteinMonomerIndexs];
assertVectorsAlmostEqual(...
n, sum(m.processedMonomers(i)), 'relative', 0.05);
assertTrue(10 > max(m.processedMonomers(i)));
assertTrue(0 < min(m.processedMonomers(i)));
assertEqual(...
10 * ones(size(i)), ...
m.processedMonomers(i) + m.unprocessedMonomers(i));
assertEqual(m.processedMonomers, m.signalSequenceMonomers);
end
% Verifies that diacylglyceryl transferase processes roughly as many
% lipoproteins as its rate will allow when it's the limiting factor.
function testLimitedDiacylglycerylTransferase(this)
m = this.process;
m.substrates(:) = 0;
m.substrates(m.substrateIndexs_water) = 1e6;
m.substrates(m.substrateIndexs_PG160) = 1e3;
m.enzymes(:) = 0;
m.enzymes(m.enzymeIndexs_signalPeptidase) = 1e6;
m.enzymes(m.enzymeIndexs_diacylglycerylTransferase) = 1e3;
m.unprocessedMonomers(:) = 0;
m.unprocessedMonomers(m.lipoproteinMonomerIndexs) = 10;
m.processedMonomers(:) = 0;
m.signalSequenceMonomers(:) = 0;
m.evolveState();
n = m.enzymes(m.enzymeIndexs_diacylglycerylTransferase) * ...
m.lipoproteinDiacylglycerylTransferaseSpecificRate * m.stepSizeSec;
i = m.lipoproteinMonomerIndexs;
assertElementsAlmostEqual(n, sum(m.processedMonomers(i)), 'absolute', 3);
assertTrue(10 > max(m.processedMonomers(i)));
assertTrue(0 < min(m.processedMonomers(i)));
end
function testLimitedPG160(this)
m = this.process;
m.substrates(:) = 0;
m.substrates(m.substrateIndexs_water) = 1e6;
m.substrates(m.substrateIndexs_PG160) = 100;
m.enzymes(:) = 0;
m.enzymes(m.enzymeIndexs_signalPeptidase) = 1e5;
m.enzymes(m.enzymeIndexs_diacylglycerylTransferase) = 1e5;
m.unprocessedMonomers(:) = 0;
m.unprocessedMonomers(m.lipoproteinMonomerIndexs) = 10;
m.processedMonomers(:) = 0;
m.signalSequenceMonomers(:) = 0;
assertTrue(sum(m.unprocessedMonomers(m.lipoproteinMonomerIndexs)) > m.substrates(m.substrateIndexs_PG160));
m.evolveState();
i = m.lipoproteinMonomerIndexs;
assertIn(nnz(m.unprocessedMonomers(m.lipoproteinMonomerIndexs)), [1 Inf]);
assertEqual(100, sum(m.processedMonomers(i)));
end
function testLotsOfEverything(this)
m = this.process;
m.substrates(m.substrateIndexs_water) = 1e6;
m.substrates(m.substrateIndexs_PG160) = 1e4;
m.enzymes(m.enzymeIndexs_signalPeptidase) = 1e3;
m.enzymes(m.enzymeIndexs_diacylglycerylTransferase) = 1e2;
i = [m.secretedMonomerIndexs;m.lipoproteinMonomerIndexs];
m.unprocessedMonomers(i) = randi(100, size(i));
m.processedMonomers(:) = 0;
m.signalSequenceMonomers(:) = 0;
m.evolveState();
bounds = sort(...
[m.enzymes(m.enzymeIndexs_signalPeptidase) * ...
m.lipoproteinSignalPeptidaseSpecificRate * ...
m.stepSizeSec;
m.enzymes(m.enzymeIndexs_diacylglycerylTransferase) * ...
m.lipoproteinDiacylglycerylTransferaseSpecificRate * ...
m.stepSizeSec]);
assertTrue(0.99 * bounds(1) < sum(m.processedMonomers(i)));
assertTrue(1.01 * bounds(2) > sum(m.processedMonomers(i)));
end
function testGeneEssentiality(this)
m = this.process;
m.substrates(m.substrateIndexs_water) = 1e6;
m.substrates(m.substrateIndexs_PG160) = 1e3;
m.enzymes(m.enzymeIndexs_signalPeptidase) = 1e3;
m.enzymes(m.enzymeIndexs_diacylglycerylTransferase) = 1e3;
m.unprocessedMonomers(:) = 1;
m.processedMonomers(:) = 0;
this.helpTestGeneEssentiality({
'MG_086'; %prolipoprotein diacylglyceryl transferase
'MG_210'},... %prolipoprotein signal peptidase, signal peptidase II
@(m, i) any(i.processedMonomers(m.unprocessedMonomerIndexs) < ...
m.processedMonomers(m.unprocessedMonomerIndexs)) && ...
any(i.processedMonomers(m.lipoproteinMonomerIndexs) < ...
m.processedMonomers(m.lipoproteinMonomerIndexs)) && ...
any(i.processedMonomers(m.secretedMonomerIndexs) < ...
m.processedMonomers(m.secretedMonomerIndexs)));
end
end
end
|
{"author": "CovertLab", "repo": "WholeCell", "sha": "6cdee6b355aa0f5ff2953b1ab356eea049108e07", "save_path": "github-repos/MATLAB/CovertLab-WholeCell", "path": "github-repos/MATLAB/CovertLab-WholeCell/WholeCell-6cdee6b355aa0f5ff2953b1ab356eea049108e07/src_test/+edu/+stanford/+covert/+cell/+sim/+process/ProteinProcessingII_Test.m"}
|
[STATEMENT]
lemma linorder_rank_set_sorted_wrt:
assumes "linorder_on B R" "set xs \<subseteq> B" "sorted_wrt R xs" "x \<in> set xs" "distinct xs"
shows "linorder_rank R (set xs) x = index xs x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. linorder_rank R (set xs) x = index xs x
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. linorder_rank R (set xs) x = index xs x
[PROOF STEP]
define j where "j = index xs x"
[PROOF STATE]
proof (state)
this:
j = index xs x
goal (1 subgoal):
1. linorder_rank R (set xs) x = index xs x
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
linorder_on B R
set xs \<subseteq> B
Linorder_Relations.sorted_wrt R xs
x \<in> set xs
distinct xs
[PROOF STEP]
have j: "j < length xs"
[PROOF STATE]
proof (prove)
using this:
linorder_on B R
set xs \<subseteq> B
Linorder_Relations.sorted_wrt R xs
x \<in> set xs
distinct xs
goal (1 subgoal):
1. j < length xs
[PROOF STEP]
by (simp add: j_def)
[PROOF STATE]
proof (state)
this:
j < length xs
goal (1 subgoal):
1. linorder_rank R (set xs) x = index xs x
[PROOF STEP]
have *: "x = y \<or> ((x, y) \<in> R \<and> (y, x) \<notin> R) \<or> ((y, x) \<in> R \<and> (x, y) \<notin> R)" if "y \<in> set xs" for y
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x = y \<or> (x, y) \<in> R \<and> (y, x) \<notin> R \<or> (y, x) \<in> R \<and> (x, y) \<notin> R
[PROOF STEP]
using linorder_on_cases[OF assms(1), of x y] assms that
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>x \<in> B; y \<in> B\<rbrakk> \<Longrightarrow> x = y \<or> (x, y) \<in> R \<and> (y, x) \<notin> R \<or> (y, x) \<in> R \<and> (x, y) \<notin> R
linorder_on B R
set xs \<subseteq> B
Linorder_Relations.sorted_wrt R xs
x \<in> set xs
distinct xs
y \<in> set xs
goal (1 subgoal):
1. x = y \<or> (x, y) \<in> R \<and> (y, x) \<notin> R \<or> (y, x) \<in> R \<and> (x, y) \<notin> R
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
?y \<in> set xs \<Longrightarrow> x = ?y \<or> (x, ?y) \<in> R \<and> (?y, x) \<notin> R \<or> (?y, x) \<in> R \<and> (x, ?y) \<notin> R
goal (1 subgoal):
1. linorder_rank R (set xs) x = index xs x
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
linorder_on B R
set xs \<subseteq> B
Linorder_Relations.sorted_wrt R xs
x \<in> set xs
distinct xs
[PROOF STEP]
have "{y\<in>set xs-{x}. (y, x) \<in> R} = {y\<in>set xs-{x}. index xs y < index xs x}"
[PROOF STATE]
proof (prove)
using this:
linorder_on B R
set xs \<subseteq> B
Linorder_Relations.sorted_wrt R xs
x \<in> set xs
distinct xs
goal (1 subgoal):
1. {y \<in> set xs - {x}. (y, x) \<in> R} = {y \<in> set xs - {x}. index xs y < index xs x}
[PROOF STEP]
by (auto simp: sorted_wrt_linorder_index_less_iff[OF assms(1-3)] dest: *)
[PROOF STATE]
proof (state)
this:
{y \<in> set xs - {x}. (y, x) \<in> R} = {y \<in> set xs - {x}. index xs y < index xs x}
goal (1 subgoal):
1. linorder_rank R (set xs) x = index xs x
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
{y \<in> set xs - {x}. (y, x) \<in> R} = {y \<in> set xs - {x}. index xs y < index xs x}
goal (1 subgoal):
1. linorder_rank R (set xs) x = index xs x
[PROOF STEP]
have "\<dots> = {y\<in>set xs. index xs y < j}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {y \<in> set xs - {x}. index xs y < index xs x} = {y \<in> set xs. index xs y < j}
[PROOF STEP]
by (auto simp: j_def)
[PROOF STATE]
proof (state)
this:
{y \<in> set xs - {x}. index xs y < index xs x} = {y \<in> set xs. index xs y < j}
goal (1 subgoal):
1. linorder_rank R (set xs) x = index xs x
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
{y \<in> set xs - {x}. index xs y < index xs x} = {y \<in> set xs. index xs y < j}
goal (1 subgoal):
1. linorder_rank R (set xs) x = index xs x
[PROOF STEP]
have "\<dots> = (\<lambda>i. xs ! i) ` {i. i < j}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {y \<in> set xs. index xs y < j} = (!) xs ` {i. i < j}
[PROOF STEP]
proof safe
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>x. \<lbrakk>x \<in> set xs; index xs x < j\<rbrakk> \<Longrightarrow> x \<in> (!) xs ` {i. i < j}
2. \<And>x i. i < j \<Longrightarrow> xs ! i \<in> set xs
3. \<And>x i. i < j \<Longrightarrow> index xs (xs ! i) < j
[PROOF STEP]
fix y
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>x. \<lbrakk>x \<in> set xs; index xs x < j\<rbrakk> \<Longrightarrow> x \<in> (!) xs ` {i. i < j}
2. \<And>x i. i < j \<Longrightarrow> xs ! i \<in> set xs
3. \<And>x i. i < j \<Longrightarrow> index xs (xs ! i) < j
[PROOF STEP]
assume "y \<in> set xs" "index xs y < j"
[PROOF STATE]
proof (state)
this:
y \<in> set xs
index xs y < j
goal (3 subgoals):
1. \<And>x. \<lbrakk>x \<in> set xs; index xs x < j\<rbrakk> \<Longrightarrow> x \<in> (!) xs ` {i. i < j}
2. \<And>x i. i < j \<Longrightarrow> xs ! i \<in> set xs
3. \<And>x i. i < j \<Longrightarrow> index xs (xs ! i) < j
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
y \<in> set xs
index xs y < j
goal (3 subgoals):
1. \<And>x. \<lbrakk>x \<in> set xs; index xs x < j\<rbrakk> \<Longrightarrow> x \<in> (!) xs ` {i. i < j}
2. \<And>x i. i < j \<Longrightarrow> xs ! i \<in> set xs
3. \<And>x i. i < j \<Longrightarrow> index xs (xs ! i) < j
[PROOF STEP]
from this and j
[PROOF STATE]
proof (chain)
picking this:
y \<in> set xs
index xs y < j
j < length xs
[PROOF STEP]
have "y = xs ! index xs y"
[PROOF STATE]
proof (prove)
using this:
y \<in> set xs
index xs y < j
j < length xs
goal (1 subgoal):
1. y = xs ! index xs y
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
y = xs ! index xs y
goal (3 subgoals):
1. \<And>x. \<lbrakk>x \<in> set xs; index xs x < j\<rbrakk> \<Longrightarrow> x \<in> (!) xs ` {i. i < j}
2. \<And>x i. i < j \<Longrightarrow> xs ! i \<in> set xs
3. \<And>x i. i < j \<Longrightarrow> index xs (xs ! i) < j
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
y \<in> set xs
index xs y < j
y = xs ! index xs y
[PROOF STEP]
show "y \<in> (!) xs ` {i. i < j}"
[PROOF STATE]
proof (prove)
using this:
y \<in> set xs
index xs y < j
y = xs ! index xs y
goal (1 subgoal):
1. y \<in> (!) xs ` {i. i < j}
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
y \<in> (!) xs ` {i. i < j}
goal (2 subgoals):
1. \<And>x i. i < j \<Longrightarrow> xs ! i \<in> set xs
2. \<And>x i. i < j \<Longrightarrow> index xs (xs ! i) < j
[PROOF STEP]
qed (insert assms j, auto simp: index_nth_id)
[PROOF STATE]
proof (state)
this:
{y \<in> set xs. index xs y < j} = (!) xs ` {i. i < j}
goal (1 subgoal):
1. linorder_rank R (set xs) x = index xs x
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
{y \<in> set xs. index xs y < j} = (!) xs ` {i. i < j}
goal (1 subgoal):
1. linorder_rank R (set xs) x = index xs x
[PROOF STEP]
from assms and j
[PROOF STATE]
proof (chain)
picking this:
linorder_on B R
set xs \<subseteq> B
Linorder_Relations.sorted_wrt R xs
x \<in> set xs
distinct xs
j < length xs
[PROOF STEP]
have "card \<dots> = card {i. i < j}"
[PROOF STATE]
proof (prove)
using this:
linorder_on B R
set xs \<subseteq> B
Linorder_Relations.sorted_wrt R xs
x \<in> set xs
distinct xs
j < length xs
goal (1 subgoal):
1. card ((!) xs ` {i. i < j}) = card {i. i < j}
[PROOF STEP]
by (intro card_image) (auto simp: inj_on_def nth_eq_iff_index_eq)
[PROOF STATE]
proof (state)
this:
card ((!) xs ` {i. i < j}) = card {i. i < j}
goal (1 subgoal):
1. linorder_rank R (set xs) x = index xs x
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
card ((!) xs ` {i. i < j}) = card {i. i < j}
goal (1 subgoal):
1. linorder_rank R (set xs) x = index xs x
[PROOF STEP]
have "\<dots> = j"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. card {i. i < j} = j
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
card {i. i < j} = j
goal (1 subgoal):
1. linorder_rank R (set xs) x = index xs x
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
card {y \<in> set xs - {x}. (y, x) \<in> R} = j
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
card {y \<in> set xs - {x}. (y, x) \<in> R} = j
goal (1 subgoal):
1. linorder_rank R (set xs) x = index xs x
[PROOF STEP]
by (simp only: j_def linorder_rank_def)
[PROOF STATE]
proof (state)
this:
linorder_rank R (set xs) x = index xs x
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 3662, "file": "Comparison_Sort_Lower_Bound_Linorder_Relations", "length": 38}
|
/* integration/qk.c
*
* Copyright (C) 1996, 1997, 1998, 1999, 2000 Brian Gough
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <config.h>
#include <float.h>
#include <math.h>
#include <gsl/gsl_integration.h>
#include "err.c"
void
gsl_integration_qk (const int n,
const double xgk[], const double wg[], const double wgk[],
double fv1[], double fv2[],
const gsl_function * f, double a, double b,
double *result, double *abserr,
double *resabs, double *resasc)
{
const double center = 0.5 * (a + b);
const double half_length = 0.5 * (b - a);
const double abs_half_length = fabs (half_length);
const double f_center = GSL_FN_EVAL (f, center);
double result_gauss = 0;
double result_kronrod = f_center * wgk[n - 1];
double result_abs = fabs (result_kronrod);
double result_asc = 0;
double mean = 0, err = 0;
int j;
if (n % 2 == 0)
{
result_gauss = f_center * wg[n / 2 - 1];
}
for (j = 0; j < (n - 1) / 2; j++)
{
const int jtw = j * 2 + 1; /* j=1,2,3 jtw=2,4,6 */
const double abscissa = half_length * xgk[jtw];
const double fval1 = GSL_FN_EVAL (f, center - abscissa);
const double fval2 = GSL_FN_EVAL (f, center + abscissa);
const double fsum = fval1 + fval2;
fv1[jtw] = fval1;
fv2[jtw] = fval2;
result_gauss += wg[j] * fsum;
result_kronrod += wgk[jtw] * fsum;
result_abs += wgk[jtw] * (fabs (fval1) + fabs (fval2));
}
for (j = 0; j < n / 2; j++)
{
int jtwm1 = j * 2;
const double abscissa = half_length * xgk[jtwm1];
const double fval1 = GSL_FN_EVAL (f, center - abscissa);
const double fval2 = GSL_FN_EVAL (f, center + abscissa);
fv1[jtwm1] = fval1;
fv2[jtwm1] = fval2;
result_kronrod += wgk[jtwm1] * (fval1 + fval2);
result_abs += wgk[jtwm1] * (fabs (fval1) + fabs (fval2));
};
mean = result_kronrod * 0.5;
result_asc = wgk[n - 1] * fabs (f_center - mean);
for (j = 0; j < n - 1; j++)
{
result_asc += wgk[j] * (fabs (fv1[j] - mean) + fabs (fv2[j] - mean));
}
/* scale by the width of the integration region */
err = (result_kronrod - result_gauss) * half_length;
result_kronrod *= half_length;
result_abs *= abs_half_length;
result_asc *= abs_half_length;
*result = result_kronrod;
*resabs = result_abs;
*resasc = result_asc;
*abserr = rescale_error (err, result_abs, result_asc);
}
|
{"hexsha": "4b62a17365495ca9a189a3863dd0ad1fefc79d74", "size": 3172, "ext": "c", "lang": "C", "max_stars_repo_path": "code/em/treba/gsl-1.0/integration/qk.c", "max_stars_repo_name": "ICML14MoMCompare/spectral-learn", "max_stars_repo_head_hexsha": "91e70bc88726ee680ec6e8cbc609977db3fdcff9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 14.0, "max_stars_repo_stars_event_min_datetime": "2015-12-18T18:09:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-10T11:31:28.000Z", "max_issues_repo_path": "code/em/treba/gsl-1.0/integration/qk.c", "max_issues_repo_name": "ICML14MoMCompare/spectral-learn", "max_issues_repo_head_hexsha": "91e70bc88726ee680ec6e8cbc609977db3fdcff9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/em/treba/gsl-1.0/integration/qk.c", "max_forks_repo_name": "ICML14MoMCompare/spectral-learn", "max_forks_repo_head_hexsha": "91e70bc88726ee680ec6e8cbc609977db3fdcff9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2015-10-02T01:32:59.000Z", "max_forks_repo_forks_event_max_datetime": "2015-10-02T01:32:59.000Z", "avg_line_length": 30.7961165049, "max_line_length": 78, "alphanum_fraction": 0.6179066835, "num_tokens": 991}
|
"""
2021 Simon Bing, ETHZ, MPI IS
"""
import numpy as np
from absl import flags
class BaseProcessor(object):
def __init__(self):
self.name = None
def transform(self, x):
raise NotImplementedError
|
{"hexsha": "630b8bdc1ca9842989623aa8a26806eb73524765", "size": 221, "ext": "py", "lang": "Python", "max_stars_repo_path": "healthgen/data_access/preprocessing/base_processor.py", "max_stars_repo_name": "simonbing/HealthGen", "max_stars_repo_head_hexsha": "d5886a5a41dc36c6a70dece3dba3c60a90bf1fdd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "healthgen/data_access/preprocessing/base_processor.py", "max_issues_repo_name": "simonbing/HealthGen", "max_issues_repo_head_hexsha": "d5886a5a41dc36c6a70dece3dba3c60a90bf1fdd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "healthgen/data_access/preprocessing/base_processor.py", "max_forks_repo_name": "simonbing/HealthGen", "max_forks_repo_head_hexsha": "d5886a5a41dc36c6a70dece3dba3c60a90bf1fdd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.4166666667, "max_line_length": 33, "alphanum_fraction": 0.6696832579, "include": true, "reason": "import numpy", "num_tokens": 51}
|
/**
* Copyright (c) 2017 Melown Technologies SE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <cstdlib>
#include <utility>
#include <functional>
#include <map>
#include <numeric>
#include <algorithm>
#include <boost/optional.hpp>
#include <boost/utility/in_place_factory.hpp>
#include <boost/filesystem.hpp>
#include <boost/algorithm/string/predicate.hpp>
#include <boost/algorithm/string/case_conv.hpp>
#include <boost/thread.hpp>
#include <boost/format.hpp>
#include <boost/range/adaptor/reversed.hpp>
#include <gdal/vrtdataset.h>
#include "cpl_minixml.h"
#include "utility/streams.hpp"
#include "utility/buildsys.hpp"
#include "utility/openmp.hpp"
#include "utility/raise.hpp"
#include "utility/duration.hpp"
#include "utility/time.hpp"
#include "service/cmdline.hpp"
#include "utility/enum-io.hpp"
#include "utility/path.hpp"
#include "geo/geodataset.hpp"
#include "geo/gdal.hpp"
#include "gdal-drivers/solid.hpp"
#include "./generatevrtwo.hpp"
#include "./io.hpp"
namespace fs = boost::filesystem;
namespace ba = boost::algorithm;
namespace vrtwo {
namespace {
class NodeIterator {
public:
NodeIterator(::CPLXMLNode *node, const char *name = nullptr)
: node_(node->psChild), name_(name)
{
// go till node with given name is hit
while (node_ && !matches()) {
node_ = node_->psNext;
}
}
operator bool() const { return node_; }
::CPLXMLNode* operator*() { return node_; }
::CPLXMLNode* operator->() { return node_; }
NodeIterator& operator++() {
if (!node_) { return *this; }
// skip current node and find new with the same name
do {
node_ = node_->psNext;
} while (node_ && !matches());
return *this;
}
private:
bool matches() const {
return !name_ || !std::strcmp(name_, node_->pszValue);
}
::CPLXMLNode *node_;
const char *name_;
};
typedef std::shared_ptr< ::CPLXMLNode> XmlNode;
XmlNode xmlNode(const fs::path &path)
{
auto n(::CPLParseXMLFile(path.c_str()));
if (!n) {
LOGTHROW(err1, std::runtime_error)
<< "Cannot parse XML from " << path
<< ": <" << ::CPLGetLastErrorMsg() << ">.";
}
return XmlNode(n, [](::CPLXMLNode *n) { ::CPLDestroyXMLNode(n); });
}
typedef std::shared_ptr< ::CPLXMLNode> XmlNode;
XmlNode xmlNodeFromString(const std::string &data)
{
auto n(::CPLParseXMLString(data.c_str()));
if (!n) {
LOGTHROW(err1, std::runtime_error)
<< "Cannot parse XML from a string \"" << data
<< "\": <" << ::CPLGetLastErrorMsg() << ">.";
}
return XmlNode(n, [](::CPLXMLNode *n) { ::CPLDestroyXMLNode(n); });
}
typedef std::vector<math::Size2> Sizes;
// dataset mask type
UTILITY_GENERATE_ENUM(MaskType,
((none))
((nodata))
((band))
)
struct Setup {
math::Size2 size;
math::Extents2 extents;
Sizes ovrSizes;
Sizes ovrTiled;
int xPlus;
MaskType maskType;
fs::path outputDataset;
Setup() : xPlus(), maskType() {}
};
Setup makeSetup(const geo::GeoDataset::Descriptor &ds
, const Config &config)
{
auto size(ds.size);
auto extents(ds.extents);
auto halve([&]()
{
size.width = int(std::round(size.width / 2.0));
size.height = int(std::round(size.height / 2.0));
});
Setup setup;
setup.extents = extents;
setup.size = size;
// determine mask type
if (ds.maskType & GMF_ALL_VALID) {
setup.maskType = MaskType::none;
} else if (ds.maskType & GMF_NODATA) {
setup.maskType = MaskType::nodata;
} else {
setup.maskType = MaskType::band;
}
halve();
while ((size.width >= config.minOvrSize.width)
|| (size.height >= config.minOvrSize.height))
{
setup.ovrSizes.push_back(size);
if ((size.width == config.minOvrSize.width)
|| (size.height == config.minOvrSize.height))
{
// special case
break;
}
halve();
}
auto makeTiled([&]()
{
const auto &ts(config.tileSize);
for (const auto &size : setup.ovrSizes) {
setup.ovrTiled.emplace_back
((size.width + ts.width - 1) / ts.width
, (size.height + ts.height - 1) / ts.height);
}
});
if (!config.wrapx) {
makeTiled();
return setup;
}
// add 3 pixel to each side at bottom level and double on the way up
// 3 pixels because of worst scenario (lanczos filter)
int add(6);
for (auto &s : boost::adaptors::reverse(setup.ovrSizes)) {
s.width += add;
add *= 2;
}
// set x plus component
setup.xPlus = add / 2;
// calculate pixel width
auto es(math::size(setup.extents));
auto pw(es.width / setup.size.width);
// calculate addition
auto eadd(setup.xPlus * pw);
// apply addition in both dimensions
setup.extents.ll(0) -= eadd;
setup.extents.ur(0) += eadd;
// and finally update size
setup.size.width += add;
makeTiled();
return setup;
}
geo::GeoDataset::Format asVrt(geo::GeoDataset::Format f)
{
f.storageType = geo::GeoDataset::Format::Storage::vrt;
return f;
}
struct Rect {
math::Point2i origin;
math::Size2 size;
Rect(const math::Point2i &origin = math::Point2i()
, const math::Size2 &size = math::Size2())
: origin(origin), size(size)
{}
Rect(const math::Size2 &size) : origin(), size(size) {}
};
typedef boost::optional<Rect> OptionalRect;
struct BandDescriptor {
fs::path filename;
int srcBand;
Rect src;
Rect dst;
geo::GeoDataset::BandProperties bp;
BandDescriptor(const fs::path &filename
, const geo::GeoDataset &ds, int srcBand
, const OptionalRect &srcRect
, const OptionalRect &dstRect)
: filename(filename), srcBand(srcBand)
, src(srcRect ? *srcRect : Rect(ds.size()))
, dst(dstRect ? *dstRect : src)
, bp(ds.bandProperties(srcBand))
{}
void serialize(std::ostream &os, bool mask = false) const;
typedef std::vector<BandDescriptor> list;
};
class VrtDs {
public:
VrtDs(const fs::path &path, const geo::SrsDefinition &srs
, const math::Extents2 &extents, const math::Size2 &size
, const geo::GeoDataset::Format &format
, const geo::GeoDataset::NodataValue &nodata
, MaskType maskType)
: path_(path.string())
, ds_(geo::GeoDataset::create
(path, srs, extents, size, asVrt(format), nodata))
, bandCount_(format.channels.size())
, maskType_(maskType), maskBand_()
{
if (maskType == MaskType::band) {
maskBand_ = ds_.createPerDatasetMask<VRTSourcedRasterBand>();
}
}
void flush() {
// destroy dataset
ds_ = geo::GeoDataset::placeholder();
}
/** NB: band and srcBand are zero-based!
*/
void addSimpleSource(int band, const fs::path &filename
, const geo::GeoDataset &ds
, int srcBand
, const OptionalRect &srcRect = boost::none
, const OptionalRect &dstRect = boost::none);
void addBackground(const fs::path &path, const Color::optional &color
, const boost::optional<fs::path> &localTo
= boost::none);
const geo::GeoDataset& dataset() const { return ds_; }
std::size_t bandCount() const { return bandCount_; };
private:
// need to use std::string becase fs::path is non-moveable
std::string path_;
geo::GeoDataset ds_;
std::size_t bandCount_;
MaskType maskType_;
VRTSourcedRasterBand *maskBand_;
};
void writeSourceFilename(std::ostream &os, const fs::path &filename
, bool shared)
{
os << "<SourceFilename relativeToVRT=\""
<< int(!filename.is_absolute())
<< " shared=" << int(shared)
<< "\">" << filename.string() << "</SourceFilename>\n"
;
}
void writeSourceBand(std::ostream &os, int srcBand, bool mask)
{
os << "<SourceBand>";
if (mask) { os << "mask,"; }
os << (srcBand + 1) << "</SourceBand>\n";
}
void writeRect(std::ostream &os, const char *name, const Rect &r)
{
os << "<" << name << " xOff=\"" << r.origin(0)
<< "\" yOff=\"" << r.origin(1)
<< "\" xSize=\"" << r.size.width
<< "\" ySize=\"" << r.size.height << "\" />";
}
void BandDescriptor::serialize(std::ostream &os, bool mask) const
{
os << "<SimpleSource>\n";
writeSourceFilename(os, filename, true);
writeSourceBand(os, srcBand, mask);
writeRect(os, "SrcRect", src);
writeRect(os, "DstRect", dst);
os << "<SourceProperties RasterXSize=\""<< bp.size.width
<< "\" RasterYSize=\"" << bp.size.height
<< "\" DataType=\"" << bp.dataType
<< "\" BlockXSize=\"" << bp.blockSize.width
<< "\" BlockYSize=\"" << bp.blockSize.height
<< "\" />\n"
;
os << "</SimpleSource>\n"
;
}
void VrtDs::addSimpleSource(int band, const fs::path &filename
, const geo::GeoDataset &ds
, int srcBand
, const OptionalRect &srcRect
, const OptionalRect &dstRect)
{
BandDescriptor bd(filename, ds, srcBand, srcRect, dstRect);
// set source
{
std::ostringstream os;
bd.serialize(os);
ds_.setMetadata(band + 1, geo::GeoDataset::Metadata("source", os.str())
, "new_vrt_sources");
}
// only if mask is being used
if (band || !maskBand_) { return; }
// add mask simple source
// serialize as a XML
std::ostringstream os;
bd.serialize(os, true);
// try to create simple source from parsed string
std::unique_ptr< ::VRTSimpleSource> src(new ::VRTSimpleSource());
#if GDAL_VERSION_NUM >= 3000000
std::map<CPLString, GDALDataset*> dsMap;
if (src->XMLInit(xmlNodeFromString(os.str()).get(), nullptr, nullptr
, dsMap) != CE_None)
#elif GDAL_VERSION_NUM >= 2040000
if (src->XMLInit(xmlNodeFromString(os.str()).get(), nullptr, nullptr)
!= CE_None)
#else
if (src->XMLInit(xmlNodeFromString(os.str()).get(), nullptr) != CE_None)
#endif
{
LOGTHROW(err2, std::runtime_error)
<< "Cannot parse VRT source from XML: <"
<< ::CPLGetLastErrorNo() << ", "
<< ::CPLGetLastErrorMsg() << ">.";
}
// add source to mask band
maskBand_->AddSource(src.release());
}
void VrtDs::addBackground(const fs::path &path
, const Color::optional &color
, const boost::optional<fs::path> &localTo)
{
if (!color) { return; }
auto background(*color);
background.resize(bandCount_);
const fs::path fname("bg.solid");
const fs::path bgPath(path / fname);
const fs::path storePath(localTo ? (*localTo / fname) : bgPath);
gdal_drivers::SolidDataset::Config cfg;
cfg.srs = ds_.srs();
cfg.size = ds_.size();
cfg.geoTransform(ds_.geoTransform());
for (std::size_t i(0); i != bandCount_; ++i) {
const auto bp(ds_.bandProperties(i));
gdal_drivers::SolidDataset::Config::Band band;
band.value = background[i];
band.colorInterpretation = bp.colorInterpretation;
band.dataType = bp.dataType;
cfg.bands.push_back(band);
}
// create background dataset
auto bg(geo::GeoDataset::use
(gdal_drivers::SolidDataset::create(bgPath, cfg)));
// map layers
for (std::size_t i(0); i != bandCount_; ++i) {
addSimpleSource(i, storePath, bg, i);
}
}
void addOverview(const fs::path &vrtPath, const fs::path &ovrPath)
{
auto root(xmlNode(vrtPath));
for (NodeIterator ni(root.get(), "VRTRasterBand"); ni; ++ni) {
NodeIterator bandNode(*ni, "band");
if (!bandNode) {
LOG(warn3) << "Cannot find band attribute in VRTRasterBand.";
continue;
}
// get band number
auto band(bandNode->psChild->pszValue);
auto overview(::CPLCreateXMLNode(*ni, ::CXT_Element, "Overview"));
auto sourceFilename(::CPLCreateXMLNode
(overview, ::CXT_Element, "SourceFilename"));
auto relativeToVRT(::CPLCreateXMLNode
(sourceFilename, CXT_Attribute, "relativeToVRT"));
::CPLCreateXMLNode(relativeToVRT, CXT_Text
, (ovrPath.is_absolute() ? "0" : "1"));
::CPLCreateXMLNode(sourceFilename, ::CXT_Text, ovrPath.c_str());
auto sourceBand(::CPLCreateXMLNode
(overview, ::CXT_Element, "SourceBand"));
::CPLCreateXMLNode(sourceBand, ::CXT_Text, band);
}
auto res(::CPLSerializeXMLTreeToFile(root.get(), vrtPath.c_str()));
if (!res) {
LOGTHROW(err3, std::runtime_error)
<< "Cannot save updated VRT file into " << vrtPath << ".";
}
}
fs::path symlinkSource(const Config &config, const fs::path &path
, const fs::path &base)
{
if (config.pathToOriginalDataset
== PathToOriginalDataset::absoluteSymlink)
{
return fs::absolute(path);
}
return utility::lexically_relative(fs::absolute(path)
, fs::absolute(base));
}
Setup buildDatasetBase(const Config &config
, const fs::path &input
, const fs::path &output)
{
if (config.pathToOriginalDataset == PathToOriginalDataset::copy) {
LOGTHROW(err2, std::runtime_error)
<< "Support for dataset copy not implemented yet.";
// TODO: use dataset->driver->CopyFiles to copy files
}
const auto outputDataset(output / "dataset");
fs::path inputDataset("./original");
{
// use original file name for datasets that insist of special name
const auto des(geo::GeoDataset::open(input).descriptor());
if (des.driverName == "SRTMHGT") {
inputDataset = input.filename();
}
}
fs::path inputDatasetSymlink(output / inputDataset);
LOG(info3) << "Creating dataset base in " << outputDataset
<< " from " << inputDatasetSymlink << ".";
// make a symlink, remove newpath beforehand
auto symlink([](const fs::path &oldpath, const fs::path &newpath)
{
LOG(info1) << "Linking " << oldpath << " <- " << newpath << ".";
fs::remove(newpath);
fs::create_symlink(oldpath, newpath);
});
// make symlink to input dataset
symlink(symlinkSource(config, input, output), inputDatasetSymlink);
// make symlinks to "sidecar" files
// FIXME: update for symlink source!
{
const auto dir(input.parent_path());
const auto basename(input.filename().string());
const auto prefix(basename + ".");
// temporarily open dataset and grab list of datasets files
const auto in(geo::GeoDataset::open(input));
for (const auto &file : in.files()) {
// get file name
const auto name(file.filename().string());
if (ba::starts_with(name, prefix)) {
const auto ext(name.substr(basename.size()));
symlink(symlinkSource(config, dir / name, output)
, utility::addExtension(inputDatasetSymlink, ext));
}
}
}
auto in(geo::GeoDataset::open(inputDatasetSymlink));
const auto ds(in.descriptor());
auto setup(makeSetup(ds, config));
setup.outputDataset = outputDataset;
// remove anything lying in the way of the dataset
boost::system::error_code ec;
fs::remove(outputDataset, ec);
// create virtual output dataset
VrtDs out(outputDataset, in.srs(), setup.extents
, setup.size, in.getFormat()
, (config.nodata ? *config.nodata
: in.rawNodataValue())
, setup.maskType);
// add input bands
auto inSize(in.size());
for (std::size_t i(0); i != in.bandCount(); ++i) {
if (config.wrapx) {
// wrapping in x
// get shift based on pixel overlap
const auto shift(*config.wrapx);
// add center section
Rect centerDst(math::Point2i(setup.xPlus, 0), inSize);
out.addSimpleSource(i, inputDataset, in, i
, boost::none, centerDst);
math::Size2 strip(math::Size2(setup.xPlus, inSize.height));
Rect rightSrc
(math::Point2i(inSize.width - setup.xPlus - shift, 0)
, strip);
Rect leftDst(math::Size2(setup.xPlus, inSize.height));
out.addSimpleSource(i, inputDataset, in, i, rightSrc, leftDst);
Rect leftSrc(math::Point2i(shift, 0)
, math::Size2(setup.xPlus, inSize.height));
Rect rightDst(math::Point2i(inSize.width + setup.xPlus, 0)
, strip);
out.addSimpleSource(i, inputDataset, in, i, leftSrc, rightDst);
} else {
out.addSimpleSource(i, inputDataset, in, i);
}
}
// done
out.flush();
return setup;
}
struct TIDGuard {
TIDGuard(const std::string &id)
: old(dbglog::thread_id())
{
dbglog::thread_id(id);
}
~TIDGuard() { dbglog::thread_id(old); }
const std::string old;
};
class Dataset {
public:
Dataset(const std::string &path)
: path_(path), ds_(geo::GeoDataset::placeholder())
{}
Dataset(const Dataset &d)
: path_(d.path_), ds_(geo::GeoDataset::open(path_))
{}
~Dataset() {}
geo::GeoDataset& ds() { return ds_; }
private:
std::string path_;
geo::GeoDataset ds_;
};
template <typename T>
bool compareValue(const cv::Mat_<T> &block
, const math::Size2 &size
, T value)
{
for (int j(0); j != size.height; ++j) {
for (int i(0); i != size.width; ++i) {
if (block(j, i) != value) { return false; }
}
}
return true;
}
bool compare(const geo::GeoDataset::Block &block, const math::Size2 &size
, ::GDALDataType type, double value)
{
switch (type) {
case ::GDT_Byte:
return compareValue<std::uint8_t>(block.data, size, value);
case ::GDT_UInt16:
return compareValue<std::uint16_t>(block.data, size, value);
case ::GDT_Int16:
return compareValue<std::int16_t>(block.data, size, value);
case ::GDT_UInt32:
case ::GDT_Int32:
// use signed comparison for unsigned int since OpenCV 4 has no
// specialization for unsigned int
return compareValue<std::int32_t>(block.data, size, value);
case ::GDT_Float32:
return compareValue<float>(block.data, size, value);
case ::GDT_Float64:
return compareValue<double>(block.data, size, value);
default:
utility::raise<std::runtime_error>
("Unsupported data type <%s>.", type);
};
throw;
}
bool emptyTile(const Config &config, const geo::GeoDataset &ds)
{
if (config.background) {
// TODO: we are using a background color: need to check content for
// exact color
// get background
int bands(ds.bandCount());
auto background(*config.background);
background.resize(bands);
auto bps(ds.bandProperties());
// process all blocks
for (const auto &bi : ds.getBlocking()) {
for (int i(0); i != bands; ++i) {
// load block in native format
auto block(ds.readBlock(bi.offset, i, true));
if (!compare(block, bi.size, bps[i].dataType, background[i])) {
// not single color
return false;
}
}
}
return true;
}
// no background -> do not store if mask is empty
// fetch optimized mask
auto mask(ds.fetchMask(true));
// no data -> full area is valid
if (!mask.data) { return false; }
// no non-zero count -> empty mask
return !cv::countNonZero(mask);
}
geo::GeoDataset createTmpDataset(const geo::GeoDataset &src
, const math::Extents2 &extents
, const math::Size2 &size
, MaskType maskType)
{
// data format
auto format(src.getFormat());
format.storageType = geo::GeoDataset::Format::Storage::memory;
auto nodata(src.rawNodataValue());
if (maskType == MaskType::band) {
// internal mask type, derive bigger data type and nodata value
const auto ds(src.descriptor());
switch (ds.dataType) {
// 8 bit -> 16 bits
case ::GDT_Byte:
format.channelType = ::GDT_Int16;
nodata = std::numeric_limits<std::int16_t>::lowest();
break;
// 16 bits -> 32 bits
case ::GDT_UInt16:
case ::GDT_Int16:
format.channelType = ::GDT_Int32;
nodata = std::numeric_limits<std::int32_t>::lowest();
break;
// 32 bits -> 64 bits
case ::GDT_UInt32:
case ::GDT_Int32:
case ::GDT_Float32:
format.channelType = ::GDT_Float64;
nodata = std::numeric_limits<double>::lowest();
break;
// 64 bits -> well, 64 bits + nodata value
case ::GDT_Float64:
nodata = std::numeric_limits<double>::lowest();
break;
default:
utility::raise<std::runtime_error>
("Unsupported data type <%s>.", ds.dataType);
}
}
// create in-memory temporary dataset dataset
return geo::GeoDataset::create
("MEM", src.srs(), extents, size, format, nodata);
}
void copyWithMask(const geo::GeoDataset &src, geo::GeoDataset &dst)
{
for (const auto &bi : src.getBlocking()) {
// copy all data bands
dst.writeBlock(bi.offset, src.readBlock(bi.offset, true).data);
// copy mask band
dst.writeMaskBlock(bi.offset, src.readBlock(bi.offset, -1, true).data);
}
}
void createOutputDataset(const geo::GeoDataset &original
, const geo::GeoDataset &src
, const fs::path &path
, const geo::Options &createOptions
, MaskType maskType)
{
if (maskType != MaskType::band) {
// we can copy as is
UTILITY_OMP(critical(createOutputDataset))
src.copy(path, "GTiff", createOptions);
return;
}
// we need to create output dataset manually
auto format(original.getFormat());
// use custom format to prevent .tfw and .prj creation...
format.storageType = geo::GeoDataset::Format::Storage::custom;
format.driver = "GTiff";
auto dst(geo::GeoDataset::placeholder());
UTILITY_OMP(critical(createOutputDataset))
dst = geo::GeoDataset::create(path, src.srs(), src.extents()
, src.size(), format, boost::none
, createOptions);
copyWithMask(src, dst);
dst.flush();
}
fs::path createOverview(const Config &config
, const boost::filesystem::path &output
, int ovrIndex
, const fs::path &srcPath
, const fs::path &dir
, const math::Size2 &size
, const math::Size2 &tiled
, std::atomic<int> &progress, int total
, MaskType maskType)
{
auto ovrName(dir / "ovr.vrt");
auto ovrPath(output / ovrName);
const auto &ts(config.tileSize);
LOG(info3)
<< "Creating overview #" << ovrIndex
<< " of " << math::area(tiled) << " tiles in "
<< ovrPath << " from " << srcPath << ".";
// copy options so that the PREDICTOR can be possibly modified
geo::Options createOptions(config.createOptions);
VrtDs ovr([&]() -> VrtDs
{
auto src(geo::GeoDataset::open(srcPath));
// If create options contain PREDICTOR, check/set its value based on
// original dataset type.
auto &opts(createOptions.options);
auto it(std::find_if( opts.begin(), opts.end()
, [](const geo::Options::Option &op)
{
return op.first == "PREDICTOR";
}));
if (it != opts.end()) {
// find out what the value of predictor should be
auto predictor([&]() -> std::string {
switch (src.descriptor().dataType) {
case ::GDT_Float32:
case ::GDT_Float64:
return "3";
default:
break;
}
return "2";
}());
// set predictor to optimal
if (it->second.empty()) {
it->second = predictor;
// leave it if predictor is turned off
} else if (it->second == "1") {
// if predictor is set, check if the value is right
} else if (it->second != predictor) {
LOGTHROW(err2, std::runtime_error)
<< "PREDICTOR value and bandtype mismatch. Use 2 for "
<< "integer and 3 for floating point or leave without "
<< "value to be determined automatically.";
}
}
return VrtDs(ovrPath, src.srs(), src.extents()
, size, src.getFormat(), src.rawNodataValue()
, maskType);
}());
(void) maskType;
auto extents(ovr.dataset().extents());
ovr.addBackground(output / dir, config.background, fs::path());
// compute tile size in real extents
auto tileSize([&]() -> math::Size2f
{
auto es(math::size(extents));
return math::Size2f((es.width * ts.width) / size.width
, (es.height * ts.height) / size.height);
}());
// extent's upper-left corner is origin for tile calculations
math::Point2 origin(ul(extents));
auto tc(math::area(tiled));
// last tile size
math::Size2 lts(size.width - (tiled.width - 1) * ts.width
, size.height - (tiled.height - 1) * ts.height);
// Dataset dataset(srcPath.string());
// use full dataset and disable safe-chunking
geo::GeoDataset::WarpOptions warpOptions;
warpOptions.overview = geo::GeoDataset::Overview();
warpOptions.safeChunks = false;
// UTILITY_OMP(parallel for firstprivate(dataset) schedule(dynamic))
UTILITY_OMP(parallel for schedule(dynamic))
for (int i = 0; i < tc; ++i) {
utility::DurationMeter timer;
math::Point2i tile(i % tiled.width, i / tiled.width);
bool lastX(tile(0) == (tiled.width - 1));
bool lastY(tile(1) == (tiled.height - 1));
math::Size2 pxSize(lastX ? lts.width : ts.width
, lastY ? lts.height : ts.height);
// calculate extents
math::Point2 ul(origin(0) + tileSize.width * tile(0)
, origin(1) - tileSize.height * tile(1));
math::Point2 lr(lastX ? extents.ur(0) : ul(0) + tileSize.width
, lastY ? extents.ll(1): ul(1) - tileSize.height);
math::Extents2 te(ul(0), lr(1), lr(0), ul(1));
TIDGuard tg(str(boost::format("tile:%d-%d-%d")
% ovrIndex % tile(0) % tile(1)));
LOG(info2)
<< std::fixed
<< "Processing tile " << ovrIndex
<< '-' << tile(0) << '-' << tile(1) << " (size: " << pxSize
<< ", extents: " << te << ").";
// try warp
auto src(geo::GeoDataset::open(srcPath));
// auto &src(dataset.ds());
// strore result to file
fs::path tileName(str(boost::format("%d-%d.tif")
% tile(0) % tile(1)));
fs::path tilePath(output / dir / tileName);
auto tmp(createTmpDataset(src, te, pxSize, maskType));
src.warpInto(tmp, config.resampling, warpOptions);
// check result and skip if no need to store
if (emptyTile(config, tmp)) {
auto id(++progress);
LOG(info3)
<< std::fixed
<< "Processed tile #" << id << '/' << total << ' '
<< ovrIndex
<< '-' << tile(0) << '-' << tile(1) << " (size: " << pxSize
<< ", extents: " << te << ") [empty]"
<< "; duration: "
<< utility::formatDuration(timer.duration()) << ".";
continue;
}
// make room for output file
fs::remove(tilePath);
createOutputDataset(src, tmp, tilePath
, createOptions // use modified options
, maskType);
// store result
Rect drect(math::Point2i(tile(0) * ts.width, tile(1) * ts.height)
, pxSize);
UTILITY_OMP(critical(createOverwiew_addSimpleSource))
for (std::size_t b(0), eb(ovr.bandCount()); b != eb; ++b) {
ovr.addSimpleSource(b, tileName, tmp, b
, boost::none, drect);
}
auto id(++progress);
LOG(info3)
<< std::fixed
<< "Processed tile #" << id << '/' << total << ' ' << ovrIndex
<< '-' << tile(0) << '-' << tile(1) << " (size: " << pxSize
<< ", extents: " << te << ") [valid]"
<< "; duration: "
<< utility::formatDuration(timer.duration()) << ".";
}
ovr.flush();
return ovrName;
}
} // namespace
/** Generate virtual geodataset with overviews
*/
void generate(const boost::filesystem::path &input
, const boost::filesystem::path &output
, const Config &config)
{
if (!fs::create_directories(output) && !config.overwrite) {
LOGTHROW(err3, std::runtime_error)
<< "Destination directory already exits. Use --overwrite "
"to force existing output overwrite.";
}
auto setup(buildDatasetBase(config, input, output));
auto total(std::accumulate(setup.ovrTiled.begin(), setup.ovrTiled.end()
, 0, [&](int t, const math::Size2 &tiled)
{
return t + math::area(tiled);
}));
LOG(info3) << "About to generate " << setup.ovrSizes.size()
<< " overviews with " << total << " tiles of size "
<< config.tileSize << ".";
std::atomic<int> progress(0);
// generate overviews
fs::path inputPath(setup.outputDataset);
for (std::size_t i(0); i != setup.ovrSizes.size(); ++i) {
auto dir(str(boost::format("%d") % i));
fs::create_directories(output / dir);
auto path(createOverview
(config, output, i, inputPath, dir, setup.ovrSizes[i]
, setup.ovrTiled[i], progress, total
, setup.maskType));
// add overview (manually by manipulating the XML)
addOverview(setup.outputDataset, path);
// use previous level in the next round
inputPath = output / path;
}
}
} // namespace vrtwo
|
{"hexsha": "fdf9805304abc1419a308eb51dc34ea5ecd20eb5", "size": 33211, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "mapproxy/src/generatevrtwo/generatevrtwo.cpp", "max_stars_repo_name": "melowntech/vts-mapproxy", "max_stars_repo_head_hexsha": "241ba43c1f7dcc226ec0f2089d47e11c699c2587", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 13.0, "max_stars_repo_stars_event_min_datetime": "2019-05-03T06:09:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-10T05:05:45.000Z", "max_issues_repo_path": "mapproxy/src/generatevrtwo/generatevrtwo.cpp", "max_issues_repo_name": "melowntech/vts-mapproxy", "max_issues_repo_head_hexsha": "241ba43c1f7dcc226ec0f2089d47e11c699c2587", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 10.0, "max_issues_repo_issues_event_min_datetime": "2019-04-16T12:43:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-08T02:44:09.000Z", "max_forks_repo_path": "mapproxy/src/generatevrtwo/generatevrtwo.cpp", "max_forks_repo_name": "melowntech/vts-mapproxy", "max_forks_repo_head_hexsha": "241ba43c1f7dcc226ec0f2089d47e11c699c2587", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-09-25T04:57:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-10T05:05:46.000Z", "avg_line_length": 31.4498106061, "max_line_length": 79, "alphanum_fraction": 0.555809822, "num_tokens": 7991}
|
// kv_dictionary_test_harness.cpp
/**
* Copyright (C) 2014 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link the
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
* must comply with the GNU Affero General Public License in all respects for
* all of the code used other than as permitted herein. If you modify file(s)
* with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also delete
* it in the license file.
*/
#include <algorithm>
#include <vector>
#include <boost/scoped_ptr.hpp>
#include "mongo/db/storage/kv/dictionary/kv_dictionary.h"
#include "mongo/db/storage/kv/dictionary/kv_dictionary_test_harness.h"
#include "mongo/db/storage/kv/slice.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
using boost::scoped_ptr;
TEST( KVDictionary, Simple1 ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<KVDictionary> db( harnessHelper->newKVDictionary() );
{
const Slice hi = Slice::of("hi");
const Slice there = Slice::of("there");
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
Slice value;
WriteUnitOfWork uow( opCtx.get() );
Status status = db->insert( opCtx.get(), hi, there, false );
ASSERT( status.isOK() );
status = db->get( opCtx.get(), hi, value );
ASSERT( status.isOK() );
status = db->remove( opCtx.get(), hi );
ASSERT( status.isOK() );
status = db->get( opCtx.get(), hi, value );
ASSERT( status.code() == ErrorCodes::NoSuchKey );
uow.commit();
}
}
}
TEST( KVDictionary, Simple2 ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<KVDictionary> db( harnessHelper->newKVDictionary() );
const Slice hi = Slice::of("hi");
const Slice there = Slice::of("there");
const Slice apple = Slice::of("apple");
const Slice bears = Slice::of("bears");
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
Status status = db->insert( opCtx.get(), hi, there, false );
ASSERT( status.isOK() );
uow.commit();
}
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
Status status = db->insert( opCtx.get(), apple, bears, false );
ASSERT( status.isOK() );
uow.commit();
}
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
Slice value;
Status status = db->get( opCtx.get(), hi, value );
ASSERT( status.isOK() );
ASSERT( value.size() == 6 );
ASSERT( std::string( "there" ) == std::string( value.data() ) );
}
{
Slice value;
Status status = db->get( opCtx.get(), apple, value );
ASSERT( status.isOK() );
ASSERT( value.size() == 6 );
ASSERT( std::string( "bears" ) == std::string( value.data() ) );
}
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
Status status = db->remove( opCtx.get(), hi );
ASSERT( status.isOK() );
uow.commit();
}
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
Slice value;
Status status = db->get( opCtx.get(), hi, value );
ASSERT( status.code() == ErrorCodes::NoSuchKey );
}
{
Slice value;
Status status = db->get( opCtx.get(), apple, value );
ASSERT( status.isOK() );
ASSERT( value.size() == 6 );
ASSERT( std::string( "bears" ) == std::string( value.data() ) );
}
{
WriteUnitOfWork uow( opCtx.get() );
Status status = db->remove( opCtx.get(), apple );
ASSERT( status.isOK() );
uow.commit();
}
{
Slice value;
Status status = db->get( opCtx.get(), apple, value );
ASSERT( status.code() == ErrorCodes::NoSuchKey );
}
}
}
TEST( KVDictionary, InsertSerialGetSerial ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<KVDictionary> db( harnessHelper->newKVDictionary() );
const unsigned char nKeys = 100;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
Slice value;
WriteUnitOfWork uow( opCtx.get() );
for (unsigned char i = 0; i < nKeys; i++) {
const Slice slice = Slice::of(i);
Status status = db->insert( opCtx.get(), slice, slice, false );
ASSERT( status.isOK() );
}
uow.commit();
}
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
for (unsigned char i = 0; i < nKeys; i++) {
Slice value;
Status status = db->get( opCtx.get(), Slice::of(i), value );
ASSERT( status.isOK() );
ASSERT( value.as<unsigned char>() == i );
}
}
}
}
static int _rng(int i) { return std::rand() % i; }
TEST( KVDictionary, InsertRandomGetSerial ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<KVDictionary> db( harnessHelper->newKVDictionary() );
const unsigned char nKeys = 100;
{
std::vector<unsigned char> keys;
for (unsigned char i = 0; i < nKeys; i++) {
keys.push_back(i);
}
std::srand(unsigned(time(0)));
std::random_shuffle(keys.begin(), keys.end(), _rng);
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
Slice value;
WriteUnitOfWork uow( opCtx.get() );
for (unsigned char i = 0; i < nKeys; i++) {
const Slice slice = Slice::of(keys[i]);
Status status = db->insert( opCtx.get(), slice, slice, false );
ASSERT( status.isOK() );
}
uow.commit();
}
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
Slice value;
for (unsigned char i = 0; i < nKeys; i++) {
Status status = db->get( opCtx.get(), Slice::of(i), value );
ASSERT( status.isOK() );
ASSERT( value.as<unsigned char>() == i );
}
}
}
}
TEST( KVDictionary, InsertRandomCursor ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<KVDictionary> db( harnessHelper->newKVDictionary() );
const unsigned char nKeys = 100;
{
std::vector<unsigned char> keys;
for (unsigned char i = 0; i < nKeys; i++) {
keys.push_back(i);
}
std::srand(unsigned(time(0)));
std::random_shuffle(keys.begin(), keys.end(), _rng);
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
Slice value;
WriteUnitOfWork uow( opCtx.get() );
for (unsigned char i = 0; i < nKeys; i++) {
const Slice slice = Slice::of(keys[i]);
Status status = db->insert( opCtx.get(), slice, slice, false );
ASSERT( status.isOK() );
}
uow.commit();
}
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
Slice value;
const int direction = 1;
unsigned char i = 0;
for (scoped_ptr<KVDictionary::Cursor> c(db->getCursor(opCtx.get(), direction));
c->ok(); c->advance(opCtx.get()), i++) {
ASSERT( c->currKey().as<unsigned char>() == i );
ASSERT( c->currVal().as<unsigned char>() == i );
}
}
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
Slice value;
const int direction = -1;
unsigned char i = nKeys - 1;
for (scoped_ptr<KVDictionary::Cursor> c(db->getCursor(opCtx.get(), direction));
c->ok(); c->advance(opCtx.get()), i--) {
ASSERT( c->currKey().as<unsigned char>() == i );
ASSERT( c->currVal().as<unsigned char>() == i );
}
}
}
}
TEST( KVDictionary, InsertDeleteCursor ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<KVDictionary> db( harnessHelper->newKVDictionary() );
const unsigned char nKeys = 100;
std::vector<unsigned char> keys;
for (unsigned char i = 0; i < nKeys; i++) {
keys.push_back(i);
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
Slice value;
WriteUnitOfWork uow( opCtx.get() );
for (unsigned char i = 0; i < nKeys; i++) {
const Slice slice = Slice::of(keys[i]);
Status status = db->insert( opCtx.get(), slice, slice, false );
ASSERT( status.isOK() );
}
uow.commit();
}
}
std::srand(unsigned(time(0)));
std::random_shuffle(keys.begin(), keys.end(), _rng);
std::set<unsigned char> remainingKeys;
std::set<unsigned char> deletedKeys;
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
WriteUnitOfWork uow( opCtx.get() );
for (unsigned char i = 0; i < nKeys; i++) {
unsigned char k = keys[i];
if (i < (nKeys / 2)) {
Status status = db->remove( opCtx.get(), Slice::of(k) );
ASSERT( status.isOK() );
deletedKeys.insert(k);
} else {
remainingKeys.insert(k);
}
}
uow.commit();
}
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
const int direction = 1;
unsigned char i = 0;
for (scoped_ptr<KVDictionary::Cursor> c(db->getCursor(opCtx.get(), direction));
c->ok(); c->advance(opCtx.get()), i++) {
unsigned char k = c->currKey().as<unsigned char>();
ASSERT( remainingKeys.count(k) == 1 );
ASSERT( deletedKeys.count(k) == 0 );
ASSERT( k == c->currVal().as<unsigned char>() );
remainingKeys.erase(k);
}
ASSERT( remainingKeys.empty() );
}
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
for (std::set<unsigned char>::const_iterator it = deletedKeys.begin();
it != deletedKeys.end(); it++) {
unsigned char k = *it;
Slice value;
Status status = db->get( opCtx.get(), Slice::of(k), value );
ASSERT( status.code() == ErrorCodes::NoSuchKey );
ASSERT( value.size() == 0 );
}
}
}
}
TEST( KVDictionary, CursorSeekForward ) {
scoped_ptr<HarnessHelper> harnessHelper( newHarnessHelper() );
scoped_ptr<KVDictionary> db( harnessHelper->newKVDictionary() );
const unsigned char nKeys = 101; // even number makes the test magic more complicated
std::vector<unsigned char> keys;
for (unsigned char i = 0; i < nKeys; i++) {
keys.push_back(i);
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
Slice value;
WriteUnitOfWork uow( opCtx.get() );
for (unsigned char i = 0; i < nKeys; i += 2) {
const Slice slice = Slice::of(keys[i]);
Status status = db->insert( opCtx.get(), slice, slice, false );
ASSERT( status.isOK() );
}
uow.commit();
}
}
{
scoped_ptr<OperationContext> opCtx( harnessHelper->newOperationContext() );
{
scoped_ptr<KVDictionary::Cursor> cursor( db->getCursor( opCtx.get(), 1 ) );
for (unsigned char i = 0; i < nKeys; i++) {
cursor->seek( opCtx.get(), Slice::of(keys[i]) );
if ( i % 2 == 0 ) {
ASSERT( cursor->currKey().as<unsigned char>() == i );
} else if ( i + 1 < nKeys ) {
ASSERT( cursor->currKey().as<unsigned char>() == i + 1);
}
}
}
{
scoped_ptr<KVDictionary::Cursor> cursor( db->getCursor( opCtx.get(), -1 ) );
for (unsigned char i = 1; i < nKeys; i++) {
cursor->seek(opCtx.get(), Slice::of(keys[i]));
if ( i % 2 == 0 ) {
ASSERT( cursor->currKey().as<unsigned char>() == i );
} else {
ASSERT( cursor->currKey().as<unsigned char>() == i - 1 );
}
}
}
}
}
}
|
{"hexsha": "b94383cda60b97134dce5190f9f7e23e0971907e", "size": 15925, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/mongo/db/storage/kv/dictionary/kv_dictionary_test_harness.cpp", "max_stars_repo_name": "leifwalsh/mongo", "max_stars_repo_head_hexsha": "4cf51324255f76a110246f6d1646dc8cda570141", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/mongo/db/storage/kv/dictionary/kv_dictionary_test_harness.cpp", "max_issues_repo_name": "leifwalsh/mongo", "max_issues_repo_head_hexsha": "4cf51324255f76a110246f6d1646dc8cda570141", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mongo/db/storage/kv/dictionary/kv_dictionary_test_harness.cpp", "max_forks_repo_name": "leifwalsh/mongo", "max_forks_repo_head_hexsha": "4cf51324255f76a110246f6d1646dc8cda570141", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.6477541371, "max_line_length": 95, "alphanum_fraction": 0.4915541601, "num_tokens": 3370}
|
"""Functions for specific to timelapse datasets."""
import numpy as np
from skimage.util import img_as_ubyte
from skimage.exposure import rescale_intensity
from .tissue import epithelium_watershed, largest_object_mask, segment_hemijunctions
from ..utils import validate_mask
def segment_epithelium_timelapse(
ims_intensities, ims_mask=None, ims_seeds=None
):
"""
Segment a timelapse of a live-imaged epithelium.
Parameters
----------
ims_intensities : 2D+T ndarray (t,y,x)
Each timepoint is a 2D array.
ims_mask : 2D+T ndarray (t,y,x)
Each timepoint is a 2D boolean array. True values are pixels to
be included for analysis.
ims_seeds : 2D+T ndarray (t,y,x)
Each timepoint is a 2D array with integer region labels.
Returns
-------
ims_labels : 3D numpy array, (t,y,x)
Each timepoint is a 2D array with integer-labeled regions.
"""
ims_mask = validate_mask(ims_intensities, ims_mask)
# Total number of frames
total_t = np.shape(ims_intensities)[0]
# Make an (x,y,t) array of time-lapse frames
ims_labels = np.zeros(np.shape(ims_intensities), dtype=np.uint16)
# Loop over frames, rescaling each one
for t in range(total_t):
if ims_seeds is not None:
seed = ims_seeds[t]
else:
seed = None
ims_labels[t] = epithelium_watershed(
img_as_ubyte(rescale_intensity(ims_intensities[t])),
mask=ims_mask[t],
im_seeds=seed,
)
return ims_labels
def largest_object_mask_timelapse(
ims_intensities, blurring_sigma=15, threshold="adaptive"
):
"""
Make a mask of the largest bright object in each timelapse timepoint.
Parameters
----------
ims_intensities : 3D ndarray (t,y,x)
Each timepoint is a 2D array.
blurring_sigma: int
Sigma of Gaussian kernel used to blur the images
threshold: int or str "adaptive"
Threshold to separate object from background pixels.
If "adaptive", Otsu's adaptive thresholding is used.
Returns
-------
ims_mask: 3D ndarray (t,y,x)
3D boolean array with same shape as ims_intensities. True objects with
a background of False.
"""
ims_mask = np.zeros(ims_intensities.shape, dtype=bool)
for i in range(ims_intensities.shape[0]):
ims_mask[i] = largest_object_mask(ims_intensities[i], blurring_sigma, threshold)
return ims_mask
def segment_hemijunctions_timelapse(
ims_labels, ims_intensities, edge_range=(10, 200), area_range=(20, 2000)
):
"""
Segment all hemijunctions in a timelapse.
Parameters
----------
ims_labels : 3D ndarray (t,y,x)
Each timepoint is a 2D array with region labels.
ims_intensities : 3D ndarray (t,y,x)
Each timepoint is a 2D array.
Returns
-------
ims_labels_refined : 3D ndarray (t,y,x)
Each timepoint is a 2D array with region labels, but cell-cell boundaries
have been updated.
ims_labels_hjs : 3D ndarray (t,y,x)
Each timepoint is a 2D array with hemijunctions labeled such that each one
has the same label as its "sending cell". Each "interface" spans a cell-cell
junction and is composed of two hemijunctions.
"""
ims_labels_refined = np.zeros_like(ims_labels)
ims_labels_hjs = np.zeros_like(ims_labels)
for t in range(ims_labels.shape[0]):
print(f"Segmenting hemijunctions for timepoint {t}.")
ims_labels_refined[t], ims_labels_hjs[t] = segment_hemijunctions(
ims_labels[t], ims_intensities[t], edge_range, area_range
)
return ims_labels_refined, ims_labels_hjs
|
{"hexsha": "67ead659c27864df64a853a12b2ad9272b98f00d", "size": 3701, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/functions/segment/timelapse.py", "max_stars_repo_name": "a9w/Fat2_polarizes_WAVE", "max_stars_repo_head_hexsha": "be39ba21245a9b532a70954a38139976a2355a7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/functions/segment/timelapse.py", "max_issues_repo_name": "a9w/Fat2_polarizes_WAVE", "max_issues_repo_head_hexsha": "be39ba21245a9b532a70954a38139976a2355a7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/functions/segment/timelapse.py", "max_forks_repo_name": "a9w/Fat2_polarizes_WAVE", "max_forks_repo_head_hexsha": "be39ba21245a9b532a70954a38139976a2355a7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4649122807, "max_line_length": 88, "alphanum_fraction": 0.6719805458, "include": true, "reason": "import numpy", "num_tokens": 971}
|
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tensorflow as tf
from addict import Dict
import numpy as np
import pytest
from pytest import approx
from beta.nncf.tensorflow.layers.wrapper import NNCFWrapper
from beta.nncf.tensorflow.sparsity.magnitude.operation import BinaryMask
from beta.nncf.tensorflow.sparsity.magnitude.algorithm import MagnitudeSparsityController
from beta.nncf.tensorflow.sparsity.magnitude.functions import normed_magnitude
from beta.tests.tensorflow.helpers import check_equal, create_compressed_model_and_algo_for_test, \
get_mock_model, get_empty_config, get_basic_conv_test_model
from beta.tests.tensorflow.sparsity.magnitude.test_helpers import get_magnitude_test_model, \
get_basic_magnitude_sparsity_config, ref_mask_2, ref_mask_1
def test_can_create_magnitude_sparse_algo__with_defaults():
model = get_magnitude_test_model()
config = get_basic_magnitude_sparsity_config()
config['compression']['params'] = \
{'schedule': 'multistep'}
sparse_model, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
assert isinstance(compression_ctrl, MagnitudeSparsityController)
assert compression_ctrl.scheduler.current_sparsity_level == approx(0.1)
conv_names = [layer.name for layer in model.layers if isinstance(layer, tf.keras.layers.Conv2D)]
wrappers = [layer for layer in sparse_model.layers if isinstance(layer, NNCFWrapper)]
correct_wrappers = [wrapper for wrapper in wrappers if wrapper.name in conv_names]
assert len(conv_names) == len(wrappers)
assert len(conv_names) == len(correct_wrappers)
assert compression_ctrl._threshold == approx(0.24, 0.1) # pylint: disable=protected-access
# pylint: disable=protected-access
assert isinstance(compression_ctrl._weight_importance_fn, type(normed_magnitude))
for i, wrapper in enumerate(wrappers):
ref_mask = tf.ones_like(wrapper.weights[-1]) if i == 0 else ref_mask_2
mask = list(wrapper.ops_weights.values())[0]['mask']
op = list(wrapper.weights_attr_ops['kernel'].values())[0]
tf.assert_equal(mask, ref_mask)
assert isinstance(op, BinaryMask)
def test_compression_controller_state():
model = get_magnitude_test_model()
config = get_basic_magnitude_sparsity_config()
config['compression']['params'] = \
{'schedule': 'multistep'}
_, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
# Test get state
compression_ctrl.scheduler.current_step = 100
compression_ctrl.scheduler.current_epoch = 5
assert compression_ctrl.get_state()['scheduler_state'] == {'current_step': 100, 'current_epoch': 5}
# Test load state
new_state = {'scheduler_state': {'current_step': 500, 'current_epoch': 10}, 'loss_state': {}}
compression_ctrl.load_state(new_state)
assert compression_ctrl.scheduler.current_step == 500
assert compression_ctrl.scheduler.current_epoch == 10
assert compression_ctrl.get_state() == new_state
@pytest.mark.parametrize(
('weight_importance', 'sparsity_level', 'threshold'),
(
('normed_abs', None, 0.219),
('abs', None, 9),
('normed_abs', 0.5, 0.243),
('abs', 0.5, 10),
)
)
def test_magnitude_sparse_algo_sets_threshold(weight_importance, sparsity_level, threshold):
model = get_magnitude_test_model()
config = get_basic_magnitude_sparsity_config()
config['compression']['params'] = {'schedule': 'multistep',
'weight_importance': weight_importance}
_, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
if sparsity_level:
compression_ctrl.set_sparsity_level(sparsity_level)
assert compression_ctrl._threshold == pytest.approx(threshold, 0.01) # pylint: disable=protected-access
def test_can_create_magnitude_algo__without_levels():
config = get_basic_magnitude_sparsity_config()
config['compression']['params'] = {'schedule': 'multistep', 'multistep_steps': [1]}
_, compression_ctrl = create_compressed_model_and_algo_for_test(get_mock_model(), config)
assert compression_ctrl.scheduler.current_sparsity_level == approx(0.1)
def test_can_not_create_magnitude_algo__with_not_matched_steps_and_levels():
config = get_basic_magnitude_sparsity_config()
config['compression']['params'] = {'schedule': 'multistep', 'multistep_sparsity_levels': [0.1],
'multistep_steps': [1, 2]}
with pytest.raises(ValueError):
_, _ = create_compressed_model_and_algo_for_test(get_mock_model(), config)
def test_magnitude_algo_set_binary_mask_on_forward():
config = get_basic_magnitude_sparsity_config()
config['compression']['params'] = {'weight_importance': 'abs'}
sparse_model, compression_ctrl = create_compressed_model_and_algo_for_test(get_magnitude_test_model(), config)
compression_ctrl.set_sparsity_level(0.3)
check_equal(ref_mask_1, sparse_model.layers[1].weights[-1])
check_equal(ref_mask_2, sparse_model.layers[2].weights[-1])
def test_magnitude_algo_binary_masks_are_applied():
input_shape = (1, 5, 5, 1)
model = get_basic_conv_test_model(input_shape=input_shape[1:])
config = get_empty_config(input_sample_sizes=input_shape)
config.update(Dict({'compression': {'algorithm': "magnitude_sparsity"}}))
compressed_model, _ = create_compressed_model_and_algo_for_test(model, config)
conv = compressed_model.layers[1]
op_name = list(conv.ops_weights.keys())[0]
conv.ops_weights[op_name] = {'mask': tf.ones_like(conv.weights[0])}
input_ = tf.ones(input_shape)
ref_output_1 = -4 * tf.ones((1, 4, 4, 2))
output_1 = compressed_model(input_)
tf.assert_equal(output_1, ref_output_1)
np_mask = conv.ops_weights[op_name]['mask'].numpy()
np_mask[0, 1, 0, 0] = 0
np_mask[1, 0, 0, 1] = 0
conv.ops_weights[op_name] = {'mask': tf.constant(np_mask)}
ref_output_2 = - 3 * tf.ones_like(ref_output_1)
output_2 = compressed_model(input_)
tf.assert_equal(output_2, ref_output_2)
np_mask[0, 1, 0, 1] = 0
conv.ops_weights[op_name] = {'mask': tf.constant(np_mask)}
ref_output_3 = ref_output_2.numpy()
ref_output_3[..., 1] = -2 * np.ones_like(ref_output_1[..., 1])
ref_output_3 = tf.constant(ref_output_3)
output_3 = compressed_model(input_)
tf.assert_equal(output_3, ref_output_3)
|
{"hexsha": "960a45c3b295eee79eae1ec7ab5bfcc60af20dfa", "size": 6959, "ext": "py", "lang": "Python", "max_stars_repo_path": "beta/tests/tensorflow/sparsity/magnitude/test_algorithm.py", "max_stars_repo_name": "xiao1228/nncf", "max_stars_repo_head_hexsha": "307262119ee3f50eec2fa4022b2ef96693fd8448", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "beta/tests/tensorflow/sparsity/magnitude/test_algorithm.py", "max_issues_repo_name": "xiao1228/nncf", "max_issues_repo_head_hexsha": "307262119ee3f50eec2fa4022b2ef96693fd8448", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "beta/tests/tensorflow/sparsity/magnitude/test_algorithm.py", "max_forks_repo_name": "xiao1228/nncf", "max_forks_repo_head_hexsha": "307262119ee3f50eec2fa4022b2ef96693fd8448", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.1883116883, "max_line_length": 114, "alphanum_fraction": 0.7416295445, "include": true, "reason": "import numpy", "num_tokens": 1660}
|
from numpy import array, full, sqrt, sin, abs
from benchmarks.benchmark import Benchmark
class Schwefel(Benchmark):
"""dim: n"""
def __init__(self, lower=-500, upper=500, dimension=2):
super(Schwefel, self).__init__(lower, upper, dimension)
def get_optimum(self):
return array([full(self.dimension, 420.9687)]), 2.545567497236334e-05
@staticmethod
def eval(sol):
val = 0
for x in sol:
val = val + x * sin(sqrt(abs(x)))
return 418.9829 * len(sol) - val
|
{"hexsha": "155a2877445ffda9d227bd667a746820a27bc86b", "size": 537, "ext": "py", "lang": "Python", "max_stars_repo_path": "benchmarks/schwefel.py", "max_stars_repo_name": "buctlab/NIO", "max_stars_repo_head_hexsha": "094e688dd1cd3def7f31cd16ff927d4324651422", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-09-23T09:12:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T08:43:32.000Z", "max_issues_repo_path": "benchmarks/schwefel.py", "max_issues_repo_name": "buctlab/NIO", "max_issues_repo_head_hexsha": "094e688dd1cd3def7f31cd16ff927d4324651422", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "benchmarks/schwefel.py", "max_forks_repo_name": "buctlab/NIO", "max_forks_repo_head_hexsha": "094e688dd1cd3def7f31cd16ff927d4324651422", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-12-02T08:03:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-05T18:04:00.000Z", "avg_line_length": 26.85, "max_line_length": 77, "alphanum_fraction": 0.6145251397, "include": true, "reason": "from numpy", "num_tokens": 151}
|
using Decomp
using Base.Test
a = zeros(3,3)
for i=1:100
a[1,1] = rand(-2.:10e-8:2.)
a[2,2] = rand(-2.:10e-8:2.)
a[3,3] = rand(-2.:10e-8:2.)
a[1,2] = rand(-2.:10e-8:2.)
a[1,3] = rand(-2.:10e-8:2.)
a[2,3] = rand(-2.:10e-8:2.)
a[2,1] = a[1,2]
a[3,1] = a[1,3]
a[3,2] = a[2,3]
eigv,eigvec1,eigvec2,eigvec3 = eigen(a)
eigvc,eigvec123 = eig(a)
@test eigv ≈ sort!(eigvc,by=abs,rev=true)
@test eigv[1] * eigvec1*eigvec1' .+ eigv[2] * eigvec2*eigvec2' .+ eigv[3] * eigvec3*eigvec3' ≈ a
end
|
{"hexsha": "d957f38cb89cf46918a69e09b190e5d7770adf68", "size": 507, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "favba/Decomp.jl", "max_stars_repo_head_hexsha": "edc0df4cb2964334a5c02fcb2af60bc027b09c67", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "favba/Decomp.jl", "max_issues_repo_head_hexsha": "edc0df4cb2964334a5c02fcb2af60bc027b09c67", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "favba/Decomp.jl", "max_forks_repo_head_hexsha": "edc0df4cb2964334a5c02fcb2af60bc027b09c67", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.35, "max_line_length": 98, "alphanum_fraction": 0.550295858, "num_tokens": 283}
|
#!/usr/bin/python
'''
Program:
This is a program for doing photometry on observation data table.
Usage:
photometry.py [option file]
The input table should follow the form in TAT_env.obs_data_titles
Editor:
Jacob975
20181029
#################################
update log
20181029 version alpha 1:
1. The code works
20181205 version alpha 2:
1. Add an option for choosing a method of photometry you like.
'''
from sys import argv
import numpy as np
import time
import photometry_lib
from mysqlio_lib import TAT_auth, save2sql_EP, save2sql_CATA, find_source_match_coords
import TAT_env
from astropy.time import Time
import matplotlib.pyplot as plt
from test_EP import flux2mag
import collections
from input_lib import option_photometry
def take_data_within(start_date, end_date, ra_cntr_str, dec_cntr_str):
#----------------------------------------
times = ['{0}-{1}-{2}T12:00:00'.format(start_date[:4], start_date[4:6], start_date[6:]),
'{0}-{1}-{2}T12:00:00'.format(end_date[:4], end_date[4:6], end_date[6:])]
t = Time(times, format='isot', scale='utc')
start_jd = t.jd[0]
end_jd = t.jd[1]
ra_cntr = float(ra_cntr_str)
dec_cntr = float(dec_cntr_str)
#----------------------------------------
# Query data
cnx = TAT_auth()
cursor = cnx.cursor()
print 'start JD : {0}'.format(start_jd)
print 'end JD : {0}'.format(end_jd)
print "Center at ({0}, {1})".format(ra_cntr, dec_cntr)
print "band: {0}, exptime : {1}".format(band, exptime)
print 'Start ID : {0}, Numbers of aux star : {1}'.format(begin_of_aux, no_of_aux)
# Selected by Coordinate.
cursor.execute('select * from {0} where `JD` between {1} and {2} \
and `RA` between {3} and {4} \
and `DEC` between {5} and {6}'\
.format(TAT_env.obs_data_tb_name,
start_jd,
end_jd,
ra_cntr-0.5,
ra_cntr+0.5,
dec_cntr-0.5,
dec_cntr+0.5
))
data = cursor.fetchall()
data = np.array(data)
# Take the ID of selected images.
if band == 'skip' and exptime == 'skip':
print ('No band and exptime selection.')
return data
elif band == 'skip':
print ('Selected by exptime.')
band_selection = ''
exptime_selection = 'and `EXPTIME` = {0}'.format(exptime)
cursor.execute('select `ID` from {0} where `JD` between {1} and {2}\
{3} {4}'
.format(TAT_env.im_tb_name,
start_jd,
end_jd,
band_selection,
exptime_selection
))
elif exptime == 'skip':
print ('Selected by bands.')
band_selection = 'and `FILTER` = "{0}"'.format(band)
exptime_selection = ''
cursor.execute('select `ID` from {0} where `JD` between {1} and {2}\
{3} {4}'
.format(TAT_env.im_tb_name,
start_jd,
end_jd,
band_selection,
exptime_selection
))
else:
print ('Selected by bands and exptime.')
cursor.execute('select `ID` from {0} where `JD` between {1} and {2}\
and `FILTER` = "{3}"\
and `EXPTIME` = {4}'
.format(TAT_env.im_tb_name,
start_jd,
end_jd,
band,
exptime
))
selected_image_ID = cursor.fetchall()
cursor.close()
cnx.close()
# Selected by Bands and Exposure Time.
selected_image_ID = np.array(selected_image_ID)
ID_index = TAT_env.obs_data_titles.index('FILEID')
selected_data = []
for source in data:
dummy_index = np.where(selected_image_ID == source[ID_index])
if len(dummy_index[0]) >= 1:
selected_data.append(source)
selected_data = np.array(selected_data)
return selected_data
def EP_process(data):
#----------------------------------------
# Load the index of some parameters
bjd_index = TAT_env.obs_data_titles.index('BJD')
inst_mag_index = TAT_env.obs_data_titles.index('INST_MAG')
e_inst_mag_index = TAT_env.obs_data_titles.index('E_INST_MAG')
target_name_index = TAT_env.obs_data_titles.index('NAME')
fileID_index = TAT_env.obs_data_titles.index("FILEID")
#----------------------------------------
# Pick several brightest stars from each frame
# They have to be the same set of stars in diff. frames.)
# Take all the data in the first frame
first_bjd = np.amin(data[:, bjd_index])
first_frame_data = data[data[:,bjd_index] == first_bjd]
# Sort the first frame data by the brightness
first_frame_data = first_frame_data[np.argsort(first_frame_data[:,inst_mag_index])]
# Take the data from all frames.
all_fileIDs = data[:,fileID_index]
fileIDs = [item for item, count in collections.Counter(all_fileIDs).items() if count > 1]
source_list = []
selected_source_name = []
# Find sources found in all frames.
for source in first_frame_data[int(begin_of_aux):]:
if len(source_list) >= int(no_of_aux):
break
if source[target_name_index] == var_star:
#print ("Skipped, it is an var star")
continue
source_data = data[data[:,target_name_index] == source[target_name_index]]
source_fileIDs = source_data[:,fileID_index]
#print ('# of A frames: {0}, # of B frames: {1}'.format(len(source_fileIDs), len(fileIDs)))
if len(source_fileIDs) == len(fileIDs):
#print ("Take it")
source_error = source_data[:, e_inst_mag_index]
source_error[source_error == 0.0] = 1e-4
source_data_lite = np.transpose(np.array([source_data[:, bjd_index],
source_data[:, inst_mag_index],
source_error]))
source_list.append(source_data_lite)
selected_source_name.append(source[target_name_index])
continue
else:
#print ("Abort it")
continue
#----------------------------------------
# Do photometry on Bright Stars only, save the result.
source_data_array = np.array(source_list)
print (np.array(selected_source_name))
print (source_data_array.shape)
stu = photometry_lib.EP(source_data_array[0], source_data_array)
ems, var_ems, m0s, var_m0s = stu.make_airmass_model()
#----------------------------------------
# Pick a image, find the center position.
cnx = TAT_auth()
cursor = cnx.cursor()
print (fileIDs)
cursor.execute('select * from {0} where `ID` = {1}'.format(TAT_env.im_tb_name, fileIDs[0]))
img_data = cursor.fetchall()
cursor.close()
cnx.close()
img_ra_cntr = float(img_data[0][4])
img_dec_cntr = float(img_data[0][5])
# Get all possible target within the region.
observed_targets = find_source_match_coords(img_ra_cntr, img_dec_cntr, margin = TAT_env.pix1*1024./3600.)
# Pick a target star, we make a photometry on it.
for source in observed_targets:
# Take the name of the source
source_name = source[target_name_index]
# Get the data of the source from original dataset.
data2 = data[np.isin(data[:,target_name_index], source_name)]
# Take the ID, time, magnitude, and uncertainties.
observation_data_ID = data2[:,0]
time_array = data2[:, bjd_index]
mag_array = data2[:, inst_mag_index]
err_mag_array = data2[:, e_inst_mag_index]
# Combine and do EP phot.
source_data = np.transpose(np.array([time_array, mag_array, err_mag_array]))
failure, correlated_target, matched = stu.phot(source_data)
if failure:
print 'One event {0} cannot be measure.'.format(source_name)
continue
observation_data_ID = observation_data_ID[matched]
save2sql_EP(correlated_target, observation_data_ID)
return False
# find the corresponding filter with fileID
def find_filter(fileID):
cnx = TAT_auth()
cursor = cnx.cursor()
cursor.execute("select `FILTER` from TAT.{0} where ID='{1}'".format(
TAT_env.im_tb_name,
fileID))
data = cursor.fetchall()
data = np.array(data).flatten()
ans = data[0]
cursor.close()
cnx.close()
return ans
def CATA_process(data):
#----------------------------------------
# Save the index of some parameters
fileID_index = TAT_env.obs_data_titles.index("FILEID")
ID_index = TAT_env.obs_data_titles.index('ID')
#----------------------------------------
# Load data frame by frame
fileID_array = np.unique(data[:,fileID_index])
for fileID in fileID_array:
# Take all extracted sources on that frame.
frame_src_data = data[data[:,fileID_index] == fileID]
_filter = find_filter(fileID)
stu = photometry_lib.CATA(frame_src_data, _filter)
failure = stu.make_airmass_model()
if failure:
print 'air mass model fail.'
continue
mag, err_mag = stu.phot()
mag_array = np.transpose(np.array([mag, err_mag]))
observation_data_ID = frame_src_data[:,ID_index]
# save the result into database
save2sql_CATA(mag_array, observation_data_ID)
return 0
#--------------------------------------------
# Main code
if __name__ == "__main__":
# Measure time
start_time = time.time()
#----------------------------------------
# Laod argv
stu = option_photometry()
if len(argv) != 2:
print 'Error!'
print 'The number of arguments is wrong.'
print 'Usage: photometry.py [option file]'
print 'You should modify the [option file] before execution.'
stu.create()
exit(1)
options = argv[1]
phot_type,\
start_date,\
end_date,\
ra_cntr,\
dec_cntr,\
band,\
exptime,\
begin_of_aux,\
no_of_aux,\
var_star = stu.load(options)
#----------------------------------------
# Load data
data = take_data_within(start_date, end_date, ra_cntr, dec_cntr)
# Sort data by BJD
bjd_index = TAT_env.obs_data_titles.index('BJD')
BJD = data[:,bjd_index]
BJD_index = np.argsort(BJD)
data = data[BJD_index]
if phot_type == 'EP':
failure = EP_process(data)
elif phot_type == 'CATA':
failure = CATA_process(data)
#---------------------------------------
# Measure time
elapsed_time = time.time() - start_time
print "Exiting Main Program, spending ", elapsed_time, "seconds."
|
{"hexsha": "e7d18330ac1afb829c3e44bcf6d0a45e6b0b802a", "size": 11181, "ext": "py", "lang": "Python", "max_stars_repo_path": "photometry.py", "max_stars_repo_name": "jacob975/TATIRP", "max_stars_repo_head_hexsha": "2d81fa280e039aa931c6f8456632a23ef123282a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "photometry.py", "max_issues_repo_name": "jacob975/TATIRP", "max_issues_repo_head_hexsha": "2d81fa280e039aa931c6f8456632a23ef123282a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2017-08-22T03:15:22.000Z", "max_issues_repo_issues_event_max_datetime": "2017-12-19T17:55:31.000Z", "max_forks_repo_path": "photometry.py", "max_forks_repo_name": "jacob975/TATIRP", "max_forks_repo_head_hexsha": "2d81fa280e039aa931c6f8456632a23ef123282a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.5088339223, "max_line_length": 109, "alphanum_fraction": 0.5622931759, "include": true, "reason": "import numpy,from astropy", "num_tokens": 2626}
|
import numpy as np
import codecs
import os
def init(root_training, root_emb):
global emb_dir, train_dir
emb_dir = root_emb
train_dir = root_training
def get_embeddings(what='expression'):
uri_file = '%s/%s.emb.u' % (emb_dir, what)
vector_file = '%s/%s.emb.v' % (emb_dir, what)
header_file = '%s/%s.emb.h' % (emb_dir, what)
label_file = '%s/%s.emb.l' % (emb_dir, what)
# load embeddings
vectors = np.array([line.strip().split(' ') for line in codecs.open(vector_file, 'r', 'utf-8')], np.float32)
uris = np.array([line.strip() for line in codecs.open(uri_file, 'r', 'utf-8')])
lbs = np.array([line.strip() for line in codecs.open(label_file, 'r', 'utf-8').read().split('\n')[:-1]])
try:
heads = np.array([line.strip() for line in codecs.open(header_file, 'r', 'utf-8')])
# header for printing
head_label = heads[0].split()
head_val = heads[1].split()
head_dim = []
for i in range(0, len(head_val)):
for j in range(0, int(head_val[i])):
head_dim.append(head_label[i])
heads_print = [head_label, head_val]
except FileNotFoundError:
head_dim = None
heads_print = None
return vectors, uris, lbs, head_dim, heads_print
def all_training(what='expression'):
return [{
'name': 'pp_concerts',
'playlists': _load_training('concerts/output/list/philharmonie', what)
}, {
'name': 'itema3_concerts',
'playlists': _load_training('concerts/output/list/itema3', what)
}, {
'name': 'web-radio',
'playlists': _load_training('web-radio/output/list', what)
}, {
'name': 'spotify_pl',
'playlists': _load_training('spotify/output/playlists/list', what)
}]
def _load_training(sub, what='expression'):
folder = os.path.join(train_dir, sub, what)
playlists = []
for f in sorted(os.listdir(folder)):
file = '%s/%s' % (folder, f)
data = np.array([line.strip() for line in codecs.open(file, 'r', 'utf-8')])
playlists.append({
'name': file,
'data': data
})
return playlists
|
{"hexsha": "a49fa3cb1476c524a2f4948b255d465496237649", "size": 2168, "ext": "py", "lang": "Python", "max_stars_repo_path": "training/doremus_data.py", "max_stars_repo_name": "DOREMUS-ANR/recommender", "max_stars_repo_head_hexsha": "027e0dcb3639f03204c67777e2e10aac8505a70a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-03-28T15:48:18.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-06T08:50:34.000Z", "max_issues_repo_path": "training/doremus_data.py", "max_issues_repo_name": "DOREMUS-ANR/recommender", "max_issues_repo_head_hexsha": "027e0dcb3639f03204c67777e2e10aac8505a70a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "training/doremus_data.py", "max_forks_repo_name": "DOREMUS-ANR/recommender", "max_forks_repo_head_hexsha": "027e0dcb3639f03204c67777e2e10aac8505a70a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4202898551, "max_line_length": 112, "alphanum_fraction": 0.5936346863, "include": true, "reason": "import numpy", "num_tokens": 588}
|
module rsdft_allgather_module
implicit none
private
public :: d_rsdft_allgatherv_div
integer :: nblock_default=4
integer :: n_opt, n_opt_h
contains
subroutine d_rsdft_allgatherv_div( n, a, ir, id, comm, nblk_in )
implicit none
integer,intent(in) :: n
real(8),intent(inout) :: a(n)
integer,intent(in) :: ir(0:), id(0:)
integer,intent(in) :: comm
integer,intent(in) :: nblk_in
integer :: nblk
logical :: disp_sw
integer :: i0,i1,nprc,mrnk,ierr,p
integer :: nmax,ndat,i,j
integer,allocatable :: irr(:),idd(:),id0(:),id1(:)
real(8),allocatable :: tmp(:)
include 'mpif.h'
call write_border( 1, " d_rsdft_allgatherv_div(start)" )
call check_disp_switch( disp_sw, 0 )
nblk = nblk_in
nprc = size(ir)
call MPI_Comm_rank( comm, mrnk, ierr )
allocate( tmp(nblk*nprc) ); tmp=0.0d0
allocate( irr(0:nprc-1) ); irr=0
allocate( idd(0:nprc-1) ); idd=0
allocate( id0(0:nprc-1) ); id0=0
allocate( id1(0:nprc-1) ); id1=0
id0(:) = id(:)
nmax = maxval(ir)
do i = 1, nmax, nblk
do p=0,nprc-1
id1(p) = min( id0(p)+nblk, id(p)+ir(p) ) - 1
irr(p) = id1(p) - id0(p) + 1
idd(p) = sum(irr(0:p))-irr(p)
end do
call MPI_Allgatherv( a(id0(mrnk)+1),irr(mrnk),MPI_REAL8 &
,tmp,irr,idd,MPI_REAL8,comm,ierr )
do p=0,nprc-1
if ( p /= mrnk ) then
do j=1,irr(p)
a(id0(p)+j)=tmp(idd(p)+j)
end do
end if
id0(p) = id1(p) + 1
end do
end do
deallocate( id1 )
deallocate( id0 )
deallocate( idd )
deallocate( irr )
deallocate( tmp )
call write_border( 1, " d_rsdft_allgatherv_div(end)" )
end subroutine d_rsdft_allgatherv_div
subroutine d_rsdft_allgather( a, b, comm, ierr, nblock_in )
implicit none
real(8),intent(in) :: a(:)
real(8),intent(out) :: b(:)
integer,intent(in) :: comm
integer,intent(out) :: ierr
integer,optional,intent(in) :: nblock_in
integer :: na, nb, nprocs, nblock
integer :: i,i0,i1,mm,p
real(8),allocatable :: c(:)
include 'mpif.h'
call MPI_Comm_size( comm, nprocs, ierr )
na=size(a)
nb=size(b)
if ( present(nblock_in) ) then
nblock = nblock_in
else
nblock = nblock_default
end if
allocate( c(nblock*nprocs) ); c=0.0d0
do i=1,na,nblock
i0 = i
i1 = min(i0+nblock-1,na)
mm = i1-i0+1
call MPI_Allgather(a(i0),mm,MPI_REAL8,c,mm,MPI_REAL8,comm,ierr)
do p=0,nprocs-1
b(p*na+i0:p*na+i1) = c(p*mm+1:p*mm+mm)
end do
end do
deallocate( c )
end subroutine d_rsdft_allgather
subroutine test_allgather
implicit none
integer :: i,j,k,m,mt,n,i0,ierr,myrank,nprocs,npow,p
real(8),allocatable :: a(:), b(:), c(:)
real(8) :: ct,ct0,ct1,ctmin,et,et0,et1,etmin
include 'mpif.h'
!return
call write_border( 0, " test_allgather(start)" )
call MPI_Comm_rank(MPI_COMM_WORLD,myrank,ierr)
call MPI_Comm_size(MPI_COMM_WORLD,nprocs,ierr)
npow=16
n=2**npow
allocate( a(n) ); a(:)=myrank+1
allocate( b(n*nprocs) ); b(:)=0.0d0
allocate( c(n*nprocs) ); c(:)=0.0d0
ctmin=1.d100
etmin=1.d100
do j=npow,0,-1
m = 2**j
mt=0
do i=1,2**(npow-j)
mt=mt+m
end do
b=0.0d0
c=0.0d0
call MPI_Barrier( MPI_COMM_WORLD, ierr )
call cpu_time(ct0) ; et0=mpi_wtime()
do k=1,10
do i=1,2**(npow-j)
i0=(i-1)*m+1
call MPI_Allgather(a(i0),m,MPI_REAL8,b,m,MPI_REAL8,MPI_COMM_WORLD,ierr)
do p=0,nprocs-1
c(p*mt+i0:p*mt+i0+m-1)=b(p*m+1:p*m+m)
end do
end do
end do ! k
call MPI_Barrier( MPI_COMM_WORLD, ierr )
call cpu_time(ct1) ; et1=mpi_wtime()
ct=ct1-ct0
et=et1-et0
ctmin=min(ct,ctmin)
if ( et < etmin ) then
n_opt = m
etmin = et
end if
if ( myrank == 0 ) then
write(*,'(1x,3i12,4f10.5,2x,i8,g15.8)') m,i-1,mt,ct,ctmin,et,etmin,count(c/=0.0d0),sum(c)
!do p=0,nprocs-1
! write(*,*) p, count(nint(c)==p+1)
!end do
end if
end do ! j
deallocate( c )
deallocate( b )
deallocate( a )
call MPI_BCAST(n_opt,1,MPI_INTEGER,0,MPI_COMM_WORLD,ierr)
n_opt_h=n_opt/2
if ( myrank == 0 ) write(*,*) "n_opt, n_opt_h=",n_opt,n_opt_h
call write_border( 0, " test_allgather(end)" )
end subroutine test_allgather
end module rsdft_allgather_module
|
{"hexsha": "4a202b833421672dbeda9f95e9ef12f8170f0c31", "size": 4620, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/rsdft_allgather_module.f90", "max_stars_repo_name": "j-iwata/RSDFT_DEVELOP", "max_stars_repo_head_hexsha": "14e79a4d78a19e5e5c6fd7b3d2f2f0986f2ff6df", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-02T05:03:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-02T05:03:05.000Z", "max_issues_repo_path": "src/rsdft_allgather_module.f90", "max_issues_repo_name": "j-iwata/RSDFT_DEVELOP", "max_issues_repo_head_hexsha": "14e79a4d78a19e5e5c6fd7b3d2f2f0986f2ff6df", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/rsdft_allgather_module.f90", "max_forks_repo_name": "j-iwata/RSDFT_DEVELOP", "max_forks_repo_head_hexsha": "14e79a4d78a19e5e5c6fd7b3d2f2f0986f2ff6df", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-22T02:44:58.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-22T02:44:58.000Z", "avg_line_length": 24.972972973, "max_line_length": 99, "alphanum_fraction": 0.5621212121, "num_tokens": 1699}
|
# ADG with two real variables and Covariance inequality
**author:Alessio Benavoli**
<a href="http://www.alessiobenavoli.com"> alessiobenavoli.com </a>
We will learn how to build a PyRational **ADG (Almost Desirable Gambles)** belief model on the outcome of an experiment whose space of possibility is $\mathbb{R}^2$.
```python
%load_ext autoreload
%autoreload 2
from __future__ import absolute_import
from PyRational.models.ADG import ADG
from sympy import symbols, Interval, Piecewise, Eq, exp
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
*PyRational* uses *Sympy* for symbolic mathematics. We need to define in *Sympy* a `symbol` associated to the real variable and its relative domain (we use Sympy `Interval` for the latter).
```python
x1=symbols('x1', real=True)
x2=symbols('x2', real=True)
domain_x=[Interval(-10,10),Interval(-10,10)]
```
```python
model = ADG([x1,x2],domain_x)
model
```
<h4>ADG model</h4><table width="100%" border="3" ><tr> <th bgcolor="#FFCC33" width="30%" style="text-align: left;"> List of Symbols </th><td bgcolor="#f1f1f1" style="text-align: left;">[x1, x2]</th></tr><tr> <th bgcolor="#FFDD33" width="30%" style="text-align: left;"> Domain </th><td bgcolor="#f6f6f6" style="text-align: left;">Ω=Interval(-10, 10) x Interval(-10, 10)</th></tr><tr> <th bgcolor="#FFCC33" width="30%" style="text-align: left;"> List of desirable gambles </th><td bgcolor="#f1f1f1" style="text-align: left;">G=[]</th></tr><tr> <th bgcolor="#FFDD33" width="30%" style="text-align: left;"> Avoiding sure loss? </th><td bgcolor="#f6f6f6" style="text-align: left;"> to be verified </th></tr></table>
We assume that our agent, Alice, finds the following gambles desirable.
```python
G=[]
G.append( x1)
G.append(-x1)
G.append(x1**2-1)
G.append(1-x1**2)
G.append( x2)
G.append(-x2)
G.append(x2**2-1)
G.append(1-x2**2)
```
We add all these gambles to `model` as follows:
```python
model.add_gambleList(G)
model
```
<h4>ADG model</h4><table width="100%" border="3" ><tr> <th bgcolor="#FFCC33" width="30%" style="text-align: left;"> List of Symbols </th><td bgcolor="#f1f1f1" style="text-align: left;">[x1, x2]</th></tr><tr> <th bgcolor="#FFDD33" width="30%" style="text-align: left;"> Domain </th><td bgcolor="#f6f6f6" style="text-align: left;">Ω=Interval(-10, 10) x Interval(-10, 10)</th></tr><tr> <th bgcolor="#FFCC33" width="30%" style="text-align: left;"> List of desirable gambles </th><td bgcolor="#f1f1f1" style="text-align: left;">G=[x1, -x1, x1**2 - 1, -x1**2 + 1, x2, -x2, x2**2 - 1, -x2**2 + 1]</th></tr><tr> <th bgcolor="#FFDD33" width="30%" style="text-align: left;"> Avoiding sure loss? </th><td bgcolor="#f6f6f6" style="text-align: left;"> to be verified </th></tr></table>
```python
model.Gambles
```
[x1, -x1, x1**2 - 1, -x1**2 + 1, x2, -x2, x2**2 - 1, -x2**2 + 1]
Note that $G$ is a list that includes all Alice's desirable gambles. We now `build` the belief model
and check if it avods sure loss:
```python
optimoptions={'method_LISP': 'Cutting_plane', #'Cutting_plane', 'discretise'
'SolverLP':'linprog', #'linprog', 'cplex'
'LP_acc_constraints':1e-8,
'SolverNLP':'differential_evolution',
'NLP_alpha_cut':-0.00001,
'num_support_points': 150,
'verbose':False}
model.buildModel(options=optimoptions)
model.check_avs(options=optimoptions)
model
```
/home/benavoli/anaconda3/lib/python3.6/site-packages/scipy/optimize/_linprog_ip.py:1262: OptimizeWarning: Solving system with option 'sym_pos':True failed. It is normal for this to happen occasionally, especially as the solution is approached. However, if you see this frequently, consider setting option 'sym_pos' to False.
OptimizeWarning)
/home/benavoli/anaconda3/lib/python3.6/site-packages/scipy/optimize/_linprog_ip.py:1274: OptimizeWarning: Solving system with option 'sym_pos':False failed. This may happen occasionally, especially as the solution is approached. However, if you see this frequently, your problem may be numerically challenging. If you cannot improve the formulation, consider setting 'lstsq' to True. Consider also setting `presolve` to True, if it is not already.
OptimizeWarning)
Belief Model avoids sure loss
<h4>ADG model</h4><table width="100%" border="3" ><tr> <th bgcolor="#FFCC33" width="30%" style="text-align: left;"> List of Symbols </th><td bgcolor="#f1f1f1" style="text-align: left;">[x1, x2]</th></tr><tr> <th bgcolor="#FFDD33" width="30%" style="text-align: left;"> Domain </th><td bgcolor="#f6f6f6" style="text-align: left;">Ω=Interval(-10, 10) x Interval(-10, 10)</th></tr><tr> <th bgcolor="#FFCC33" width="30%" style="text-align: left;"> List of desirable gambles </th><td bgcolor="#f1f1f1" style="text-align: left;">G=posi(ℒ(Ω)<sup>+</sup> ∪ [x1, x1**2 - 1, x2, x2**2 - 1])</th></tr><tr> <th bgcolor="#FFDD33" width="30%" style="text-align: left;"> Avoiding sure loss? </th><td bgcolor="#f6f6f6" style="text-align: left;"> Yes </th></tr></table>
So Alice is **rational** or, equivalently, her set of desirable gambles is coherent.
## Inference
Assume Alice is interested in computing her maximum buying/minimum selling price for the gamble
$$
f=x_1 x_2
$$
We can do that as follows:
```python
f_range=(None,None)
f=x1*x2
lp=model.lower_prevision(f,f_range,options=optimoptions)
up=model.upper_prevision(f,f_range,options=optimoptions)
print(lp,str("<= E[x_1 x_2] <="), up)
```
-0.9955920104136098 <= E[x_1 x_2] <= 0.9983673733966469
We have obtained the covariance inequality for standardised variables:
$$
|E(X_1X_2)|^2 \leq E(X_1^2) E(X_2^2).
$$
which implies
$$
-1=-\sqrt{E(X_1^2) E(X_2^2)} \leq E(X_1X_2) \leq \sqrt{E(X_1^2) E(X_2^2)}=1.
$$
## Structual judgments (independence)
Under a judgment of 'independence', if Alice finds the gambles in $G_x$ on $x_1$ desirable and the gambles
in $G_y$ on $x_2$ desirable, she should also find the gambles in $G_x \otimes G_y$ desirable
```python
Gx=[]
Gx.append( Piecewise((1,True)) ) # constant 1
Gx.append( x1)
Gx.append(-x1)
Gx.append(x1**2-1)
Gx.append(1-x1**2)
Gy=[]
Gy.append( Piecewise((1,True)) )
Gy.append( x2)
Gy.append(-x2)
Gy.append(x2**2-1)
Gy.append(1-x2**2)
Gprod=[a*b for a in Gx for b in Gy]
Gprod
```
[1,
x2,
-x2,
x2**2 - 1,
-x2**2 + 1,
x1,
x1*x2,
-x1*x2,
x1*(x2**2 - 1),
x1*(-x2**2 + 1),
-x1,
-x1*x2,
x1*x2,
-x1*(x2**2 - 1),
-x1*(-x2**2 + 1),
x1**2 - 1,
x2*(x1**2 - 1),
-x2*(x1**2 - 1),
(x1**2 - 1)*(x2**2 - 1),
(x1**2 - 1)*(-x2**2 + 1),
-x1**2 + 1,
x2*(-x1**2 + 1),
-x2*(-x1**2 + 1),
(-x1**2 + 1)*(x2**2 - 1),
(-x1**2 + 1)*(-x2**2 + 1)]
```python
model1 = ADG([x1,x2],domain_x)
model1.add_gambleList(Gprod)
model1.buildModel(options=optimoptions)
model1.check_avs(options=optimoptions)
model1
```
/home/benavoli/anaconda3/lib/python3.6/site-packages/scipy/optimize/_linprog_ip.py:1262: OptimizeWarning: Solving system with option 'sym_pos':True failed. It is normal for this to happen occasionally, especially as the solution is approached. However, if you see this frequently, consider setting option 'sym_pos' to False.
OptimizeWarning)
/home/benavoli/anaconda3/lib/python3.6/site-packages/scipy/optimize/_linprog_ip.py:1274: OptimizeWarning: Solving system with option 'sym_pos':False failed. This may happen occasionally, especially as the solution is approached. However, if you see this frequently, your problem may be numerically challenging. If you cannot improve the formulation, consider setting 'lstsq' to True. Consider also setting `presolve` to True, if it is not already.
OptimizeWarning)
Belief Model avoids sure loss
<h4>ADG model</h4><table width="100%" border="3" ><tr> <th bgcolor="#FFCC33" width="30%" style="text-align: left;"> List of Symbols </th><td bgcolor="#f1f1f1" style="text-align: left;">[x1, x2]</th></tr><tr> <th bgcolor="#FFDD33" width="30%" style="text-align: left;"> Domain </th><td bgcolor="#f6f6f6" style="text-align: left;">Ω=Interval(-10, 10) x Interval(-10, 10)</th></tr><tr> <th bgcolor="#FFCC33" width="30%" style="text-align: left;"> List of desirable gambles </th><td bgcolor="#f1f1f1" style="text-align: left;">G=posi(ℒ(Ω)<sup>+</sup> ∪ [1, x2, x2**2 - 1, x1, x1*x2, x1*(x2**2 - 1), x1*(-x2**2 + 1), x1**2 - 1, x2*(x1**2 - 1), (x1**2 - 1)*(x2**2 - 1), (x1**2 - 1)*(-x2**2 + 1), x2*(-x1**2 + 1), (-x1**2 + 1)*(x2**2 - 1), (-x1**2 + 1)*(-x2**2 + 1)])</th></tr><tr> <th bgcolor="#FFDD33" width="30%" style="text-align: left;"> Avoiding sure loss? </th><td bgcolor="#f6f6f6" style="text-align: left;"> Yes </th></tr></table>
```python
f_range=(None,None)
f=x1*x2
lp=model1.lower_prevision(f,f_range,options=optimoptions)
up=model1.upper_prevision(f,f_range,options=optimoptions)
print(lp,str("<= E[x_1 x_2] <="), up)
```
/home/benavoli/anaconda3/lib/python3.6/site-packages/scipy/optimize/_linprog_ip.py:1262: OptimizeWarning: Solving system with option 'sym_pos':True failed. It is normal for this to happen occasionally, especially as the solution is approached. However, if you see this frequently, consider setting option 'sym_pos' to False.
OptimizeWarning)
/home/benavoli/anaconda3/lib/python3.6/site-packages/scipy/optimize/_linprog_ip.py:1274: OptimizeWarning: Solving system with option 'sym_pos':False failed. This may happen occasionally, especially as the solution is approached. However, if you see this frequently, your problem may be numerically challenging. If you cannot improve the formulation, consider setting 'lstsq' to True. Consider also setting `presolve` to True, if it is not already.
OptimizeWarning)
9.996338666551363e-09 <= E[x_1 x_2] <= -9.998929295651493e-09
This time $E[x_1 x_2]=0$ which follows from independence
|
{"hexsha": "95f13e53350607ac6333ea540a3c5138a6ea6bdb", "size": 15607, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "notebooks/Two_Real_Variables_and_Covariance_Inequality.ipynb", "max_stars_repo_name": "PyRational/PyRational", "max_stars_repo_head_hexsha": "dad982bbd7a0b1f9a27f87e0c1d069a922384450", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-11-16T10:12:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-06T11:30:51.000Z", "max_issues_repo_path": "notebooks/Two_Real_Variables_and_Covariance_Inequality.ipynb", "max_issues_repo_name": "PyRational/PyRational", "max_issues_repo_head_hexsha": "dad982bbd7a0b1f9a27f87e0c1d069a922384450", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/Two_Real_Variables_and_Covariance_Inequality.ipynb", "max_forks_repo_name": "PyRational/PyRational", "max_forks_repo_head_hexsha": "dad982bbd7a0b1f9a27f87e0c1d069a922384450", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-10-28T14:43:49.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-28T14:43:49.000Z", "avg_line_length": 35.230248307, "max_line_length": 1015, "alphanum_fraction": 0.5542384827, "converted": true, "num_tokens": 3343}
|
module SE
using DataFrames
using Random
using XLSX
using StructArrays
using StatsBase
using CSV
using Main.JOH
using JuMP
using JSON
""" create a variety of SSIT methods. Accept a parameter to multiply each time
limit by. """
function make_SSIT_methods(m=60; n_threads=6)
[
JOH.Matheur.SSIT.make_SSIT_method(
[.001, .005, .01, .02, .05],
[m*5, m*5, m*5, m*5, m*5],
"even time", n_threads)
JOH.Matheur.SSIT.make_SSIT_method(
[.001, .001, .001, .001, .001],
[m*5,m*5,m*5,m*5,m*5],
"one tolerance", n_threads)
JOH.Matheur.SSIT.make_SSIT_method(
[.001, .001, .005, .005, .005],
[m*5, m*5, m*5, m*5, m*5],
"tight tolerances", n_threads)
JOH.Matheur.SSIT.make_SSIT_method(
[.001, .005, .01, .02, .05],
[m*2, m*4, m*5, m*6, m*8],
"increasing time", n_threads)
JOH.Matheur.SSIT.make_SSIT_method(
[.001, .005, .01, .02, .05],
[m*8, m*6, m*5, m*4, m*2],
"decreasing time", n_threads)
]
end
struct MethodProblemResult
method_name
problem_id
lowest_gap
highest_reached_tolerance
total_time
last_phase_time
cplex_obj
true_obj
infeasibility
n_phases
end
MethodProblemResult(method, problem, solution, last_row) = MethodProblemResult(
method.name,
problem.id.id,
last_row[!, :gap],
last_row[!, :rtol],
last_row[!, :elapsed_time],
last_row[!, :solve_time],
last_row[!, :objective],
solution.objective,
solution.infeasibility,
last_row[!, :index])
mutable struct ExperimentResults{T}
problem_ids::Vector{T}
SSIT_phases::Vector{DataFrame}
method_problem_results::Vector{MethodProblemResult}
end
ExperimentResults() = ExperimentResults([], [], [])
function flatten_ssit(df::DataFrame, tolerances)
times = []
gaps = []
objectives = []
n_rows = length(df[!, 1])
for i in 1:n_rows
push!(times, df[i, :][:elapsed_time])
push!(gaps, df[i, :][:gap])
push!(objectives, df[i, :][:objective])
end
flat_df = DataFrame()
for i in 1:n_rows
flat_df[!, Symbol("phase $i time")] = [times[i]]
flat_df[!, Symbol("phase $i gap")] = [gaps[i]]
flat_df[!, Symbol("phase $i obj")] = [objectives[i]]
end
flat_df[!, :termination] = [last(df)[:term_stat]]
flat_df[!, :lowest_gap] = [last(df)[:gap]]
flat_df
end
function include_aux_data(df::DataFrame, method, problem_id)
df[!, :method] .= method.name
for field in fieldnames(typeof(problem_id))
val = getfield(problem_id, field)
df[!, Symbol("problem_$(field)")] = [val]
end
df
end
function include_sol_data(df, ssit_phases, model)
lp = last(ssit_phases, 1)
try
df[!, :objective] = [objective_value(model)]
catch e
df[!, :objective] = [-1]
end
df
end
_get_id(p) = try p["id"] catch LoadError p.id end
function summarize_ssit(ssit_phases::DataFrame, method, problem, model)
df = include_aux_data(flatten_ssit(ssit_phases, method.tolerances),
method, _get_id(problem))
df = include_sol_data(df, ssit_phases, model)
df
end
function generate_comparison_data(
method::JOH.Matheur.SSIT.SSIT_method,
problems::Vector{T},
mips_model; results_dir="results") where T
results = []
for problem in problems
model = mips_model(problem)
ssit_phases = JOH.Matheur.evaluate(model, method)
result_df = summarize_ssit(ssit_phases, method, problem, model)
CSV.write("$(results_dir)/$(problem.id.id).csv", result_df)
push!(results, result_df)
end
results
end
function generate_comparison_data2(
method::JOH.Matheur.SSIT.SSIT_method,
problems::Vector{T},
; results_dir="results") where T
results = []
for problem in problems
ssit_phases = JOH.Matheur.evaluate(problem.model, method)
result_df = summarize_ssit(ssit_phases, method, problem, problem.model)
CSV.write("$(results_dir)/$(_get_id(problem.id)).csv", result_df)
push!(results, result_df)
problem.model = nothing
end
results
end
function save_model(m, m_path, s_path)
write_to_file(m, m_path)
try
open(s_path, "w") do f
print(f, json(value.(all_variables(m))))
end
catch e
if !(isa(e, JuMP.OptimizeNotCalled))
rethrow()
end
end
end
function read_solution(s_path::String)
try
open(s_path, "r") do f
JSON.parse(read(f,String))
end
catch e
false
end
end
function log_ssit_run(m::JuMP.Model, method, res_dir::String, optimizer,
getdettime)
JOH.Matheur.set_threads!(m, method.num_threads)
for i in 1:length(method.tolerances)
#create a directory to store this phase's information
phase_dir = joinpath(res_dir, "$(i)")
mkpath(phase_dir)
#generate paths to data files
m_path, s_path, r_path = map(n->joinpath(phase_dir, n),
["model.mps", "start_sol.json", "results.json"])
#save the model and starting solution
save_model(m, m_path, s_path) #TODO: save the method and phase as well
#delete the current model, then replace from the saved record
#this is to make starting from the saved files deterministic
m = nothing
m = read_from_file(m_path)
solution = read_solution(s_path)
set_optimizer(m, optimizer)
#update the model according to the SSIT phase parameters
JOH.Matheur.set_tolerance!(m, method.tolerances[i])
JOH.Matheur.set_time!(m, method.times[i])
JOH.Matheur.set_threads!(m, method.num_threads)
if solution != false
set_start_value.(all_variables(m), solution)
end
# run the optimization, and record the elapsed time
start_time = time()
optimize!(m)
end_time = time()
elapsed_time = end_time - start_time
#make sure julia deletes the C optimizers memory
GC.gc() #this doesn't always happen automatically
row = JOH.Matheur.get_DF_row(m, elapsed_time=elapsed_time, index=i,
getdettime=getdettime)
GC.gc() #this doesn't always happen automatically
open(r_path, "w") do f
print(f, json(row))
end
if termination_status(m) == MOI.OPTIMAL || termination_status(m) ==
MOI.INFEASIBLE
break
end
end
end
function log_method_results(
method::JOH.Matheur.SSIT.SSIT_method,
problems::Vector{T},
mips_model, res_dir, optimizer, getdettime) where T <: JOH.Problem
rm(res_dir, force=true, recursive=true)
mkpath(res_dir)
for problem in problems
problem_dir = joinpath(res_dir, "$(problem.id.id)")
mkdir(problem_dir)
log_ssit_run(mips_model(problem), method, problem_dir, optimizer,
getdettime)
end
end
function ba_rep(ba)
join([b == 1 ? "1" : "0" for b in ba], "")
end
end
|
{"hexsha": "f500b1f0d074bc805bed95247b1346a5a8578237", "size": 6304, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "experiments/SSIT.jl", "max_stars_repo_name": "Dibillilia/JuliaOptHeuristics", "max_stars_repo_head_hexsha": "e7acc803d8037969b98534cccba87b99028a0f5b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "experiments/SSIT.jl", "max_issues_repo_name": "Dibillilia/JuliaOptHeuristics", "max_issues_repo_head_hexsha": "e7acc803d8037969b98534cccba87b99028a0f5b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiments/SSIT.jl", "max_forks_repo_name": "Dibillilia/JuliaOptHeuristics", "max_forks_repo_head_hexsha": "e7acc803d8037969b98534cccba87b99028a0f5b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.969581749, "max_line_length": 79, "alphanum_fraction": 0.6992385787, "num_tokens": 1940}
|
import sys
import itertools
sys.path.append('/home/shunan/Code/CNN_Doc2Vec/imdb')
sys.path.append('/home/shunan/Code/CNN_Doc2Vec/Amazon_Doc2Vec')
import imdb_experiments
import amazon_experiments
import os
import cPickle
import subprocess
import numpy as np
from training import train
from training import tools
from scipy.io import loadmat
from encode_amazon import preprocess as amazon_preprocess
from encode_imdb import preprocess as imdb_preprocess
import pdb
DATA_PATH = '/home/shunan/Code/Data/'
MAX_EPOCHS = 5
# Script to do a grid search across the different parameters.
def generate_param_combinations(hyper_params):
'''Generate and return all combinations of the hyper parameters for the grid search.'''
all_params = sorted(hyper_params)
all_combs = itertools.product(*(hyper_params[name] for name in all_params))
all_combs = list(all_combs)
combinations_list = []
for comb in all_combs:
d = dict(zip(all_params, comb))
combinations_list.append(d)
return combinations_list
def get_data(dataset):
'''Get the data that is to be encoded.'''
word_set = set()
dict_f = open(os.path.join(DATA_PATH, 'word2vec/dict.txt'), 'r')
for line in dict_f:
word_set.add(line.strip())
dict_f.close()
if dataset == 'amazon':
# Getting the data.
with open(os.path.join(DATA_PATH, 'amazon_food/train_data.pkl'), 'r') as f:
train_data_all = cPickle.load(f)
train_labels = np.array(train_data_all[1]) - 1
train_data = train_data_all[0]
with open(os.path.join(DATA_PATH, 'amazon_food/test_data.pkl'), 'r') as f:
test_data_all = cPickle.load(f)
test_labels = np.array(test_data_all[1]) - 1
test_data = test_data_all[0]
# binarizing the data
I = train_labels != 3
train_labels_bin = train_labels[I] >= 4
train_vecs_bin = []
for i in range(len(I)):
if I[i]:
train_vecs_bin.append(train_data[i])
I = test_labels != 3
test_labels_bin = test_labels[I] >= 4
test_vecs_bin = []
for i in range(len(I)):
if I[i]:
test_vecs_bin.append(test_data[i])
train_preprocessed = amazon_preprocess(train_vecs_bin, word_set)
test_preprocessed = amazon_preprocess(test_vecs_bin, word_set)
return train_preprocessed, train_labels_bin, test_preprocessed, test_labels_bin
elif dataset == 'imdb':
train_preprocessed, test_preprocessed = [], []
temp = loadmat(os.path.join(DATA_PATH, 'imdb_sentiment/imdb_sentiment.mat'))
# Grabbing the test data first
test_data = temp['test_data']
for sen in test_data:
sen = imdb_preprocess(sen[0][0].strip(), word_set)
test_preprocessed.append(sen)
# Grabbing the training data
train_data = temp['train_data']
train_labels = temp['train_labels']
train_labels = train_labels.reshape([train_labels.shape[0]])
# Only use the data that has labels.
I = train_labels != 0
train_labels_sup = train_labels[I]
train_data_sup = train_data[I]
test_labels = temp['test_labels']
test_labels = test_labels.reshape([test_labels.shape[0]])
for sen in train_data_sup:
sen = imdb_preprocess(sen[0][0].strip(), word_set)
train_preprocessed.append(sen)
test_labels_sup = test_labels >= 7
train_labels_sup = train_labels_sup >= 7
return train_preprocessed, train_labels_sup, test_preprocessed, test_labels_sup
else:
return None
def call_training(param, n_words, dataset, dict_loc, reload_, encoder, save_loc):
'''
Train the skip-thought model as a subprocess.
'''
subprocess_call = ['python', './training/train.py']
for option in param:
subprocess_call.append('--' + option)
subprocess_call.append(str(param[option]))
additional_params = ['--n-words', str(n_words), '--dataset', dataset, '--dictionary', dict_loc, '--encoder',
encoder, '--saveto', save_loc, '--max-epochs', '1']
if reload_:
additional_params.append('--reload')
subprocess_call.extend(additional_params)
subprocess.call(subprocess_call)
def run_grid_search(hyper_params, dataset):
'''
Run the grid search experiments, given the hyper-parameters
'''
all_params = generate_param_combinations(hyper_params)
if dataset == 'amazon':
n_words = 38830
dict_location = '/home/shunan/Code/skip-thoughts/experiments/amazon/word_dicts.pkl'
elif dataset == 'imdb':
n_words = 64526
dict_location = '/home/shunan/Code/skip-thoughts/experiments/imdb/word_dicts.pkl'
exp_info = {
'dataset': dataset,
'param_num': 0,
'epoch_num': 0,
'max_acc_uni': 0,
'max_acc_uni_params': None,
'max_acc_bi': 0,
'max_acc_bi_params': None,
'max_acc_combine': 0,
'max_acc_combine_params': None
}
uni_save_loc = '/home/shunan/Code/skip-thoughts/experiments/{}/model_uni.npz'.format(dataset)
bi_save_loc = '/home/shunan/Code/skip-thoughts/experiments/{}/model_bi.npz'.format(dataset)
dict_path = '/home/shunan/Code/skip-thoughts/experiments/{}/word_dicts.pkl'.format(dataset)
# Getting the data to encode, not for training.
train_data, train_labels, test_data, test_labels = get_data(dataset)
for p in range(len(all_params)):
# loading from previous grid search.
if p < 5:
continue
elif p == 5:
load = False
e = 0
else:
load = False
e = 0
param = all_params[p]
print('Using hyper-parameter setting {} of {}'.format(p + 1, len(all_params)))
exp_info['param_num'] = p
while e < MAX_EPOCHS:
exp_info['epoch_num'] = e
call_training(param, n_words, dataset, dict_location, load, 'gru', uni_save_loc)
print('Training bidirectional model.')
call_training(param, n_words, dataset, dict_location, load, 'bidirectional', bi_save_loc)
if e == 0:
load = True
# Running the classification experiment.
model_uni = tools.load_model(path_to_model=uni_save_loc, path_to_dictionary=dict_path)
model_bi = tools.load_model(path_to_model=bi_save_loc, path_to_dictionary=dict_path)
print('Encoding uni-directional vectors')
uni_train_vectors = tools.encode(model_uni, train_data)
uni_test_vectors = tools.encode(model_uni, test_data)
print('Encoding bi-directional vectors')
bi_train_vectors = tools.encode(model_bi, train_data)
bi_test_vectors = tools.encode(model_bi, test_data)
combine_train_vectors = np.hstack((uni_train_vectors, bi_train_vectors))
combine_test_vectors = np.hstack((uni_test_vectors, bi_test_vectors))
# Training the classifier now.
if dataset == 'amazon':
acc = amazon_experiments.pre_trained_experiments(uni_train_vectors, train_labels, uni_test_vectors,
test_labels, 2)
if acc > exp_info['max_acc_uni']:
exp_info['max_acc_uni'] = acc
exp_info['max_acc_uni_params'] = param
acc = amazon_experiments.pre_trained_experiments(bi_train_vectors, train_labels, bi_test_vectors,
test_labels, 2)
if acc > exp_info['max_acc_bi']:
exp_info['max_acc_bi'] = acc
exp_info['max_acc_bi_params'] = param
acc = amazon_experiments.pre_trained_experiments(combine_train_vectors, train_labels, combine_test_vectors,
test_labels, 2)
if acc > exp_info['max_acc_combine']:
exp_info['max_acc_combine'] = acc
exp_info['max_acc_combine_params'] = param
elif dataset == 'imdb':
acc = imdb_experiments.pre_trained_experiments(uni_train_vectors, train_labels, uni_test_vectors,
test_labels)
if acc > exp_info['max_acc_uni']:
exp_info['max_acc_uni'] = acc
exp_info['max_acc_uni_params'] = param
acc = imdb_experiments.pre_trained_experiments(bi_train_vectors, train_labels, bi_test_vectors,
test_labels)
if acc > exp_info['max_acc_bi']:
exp_info['max_acc_bi'] = acc
exp_info['max_acc_bi_params'] = param
acc = imdb_experiments.pre_trained_experiments(combine_train_vectors, train_labels, combine_test_vectors,
test_labels)
if acc > exp_info['max_acc_combine']:
exp_info['max_acc_combine'] = acc
exp_info['max_acc_combine_params'] = param
# Dump the info
with open('./experiments/{}/accs.pkl'.format(dataset), 'w') as f:
cPickle.dump(exp_info, f)
e += 1
return exp_info
if __name__ == '__main__':
hyper_params = {
'dim': [300, 600],
'decay-c': [0.1, 0.],
'grad-clip': [5., 8.],
'maxlen-w': [20, 30, 50]
}
exp_info = run_grid_search(hyper_params, 'imdb')
with open('./experiments/imdb/accs.pkl', 'w') as f:
cPickle.dump(exp_info, f)
exp_info = run_grid_search(hyper_params, 'amazon')
with open('./experiments/amazon/accs.pkl', 'w') as f:
cPickle.dump(exp_info, f)
|
{"hexsha": "cc33fd32bb8b4230f2d820a057aa9dfc30297dbd", "size": 9972, "ext": "py", "lang": "Python", "max_stars_repo_path": "skip_thought_grid_search.py", "max_stars_repo_name": "zashuna/skip-thoughts", "max_stars_repo_head_hexsha": "dec2c97f47d2ad139f5ae8602faca40c81ac096b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "skip_thought_grid_search.py", "max_issues_repo_name": "zashuna/skip-thoughts", "max_issues_repo_head_hexsha": "dec2c97f47d2ad139f5ae8602faca40c81ac096b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "skip_thought_grid_search.py", "max_forks_repo_name": "zashuna/skip-thoughts", "max_forks_repo_head_hexsha": "dec2c97f47d2ad139f5ae8602faca40c81ac096b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.7290836653, "max_line_length": 123, "alphanum_fraction": 0.6100080225, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2269}
|
[STATEMENT]
lemma new\<^sub>E\<^sub>l\<^sub>e\<^sub>m\<^sub>e\<^sub>n\<^sub>t_get\<^sub>S\<^sub>h\<^sub>a\<^sub>d\<^sub>o\<^sub>w\<^sub>R\<^sub>o\<^sub>o\<^sub>t [simp]:
assumes "new\<^sub>E\<^sub>l\<^sub>e\<^sub>m\<^sub>e\<^sub>n\<^sub>t h = (new_element_ptr, h')"
shows "get\<^sub>S\<^sub>h\<^sub>a\<^sub>d\<^sub>o\<^sub>w\<^sub>R\<^sub>o\<^sub>o\<^sub>t ptr h = get\<^sub>S\<^sub>h\<^sub>a\<^sub>d\<^sub>o\<^sub>w\<^sub>R\<^sub>o\<^sub>o\<^sub>t ptr h'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. get ptr h = get ptr h'
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
new\<^sub>E\<^sub>l\<^sub>e\<^sub>m\<^sub>e\<^sub>n\<^sub>t h = (new_element_ptr, h')
goal (1 subgoal):
1. get ptr h = get ptr h'
[PROOF STEP]
by(auto simp add: new\<^sub>E\<^sub>l\<^sub>e\<^sub>m\<^sub>e\<^sub>n\<^sub>t_def Let_def)
|
{"llama_tokens": 389, "file": "Shadow_DOM_classes_ShadowRootClass", "length": 2}
|
import numpy as np
def scale_convert(self,list_to_convert):
"""Takes a list of values and scales using NumPy
log10() and rounds two decimal places.
Arguments:
list_to_convert {list} -- List of values int or float
Returns:
list -- List of float values two decimal places
with NumPy log10 function.
"""
converted = np.array(list_to_convert)
converted_ln = np.log10(converted)
converted_ln = [round(i,2) for i in converted_ln]
return converted_ln
def convert_thousands(self,values_to_convert, to_convert = bool):
"""Takes two inputs and divides a list of numbers by 1000
Arguments:
values_to_convert {list} -- List of values int or float
Keyword Arguments:
to_convert {bool} -- True/False if list should be converted
(default: {True})
Returns:
[list] -- List of int values
"""
if to_convert == True:
convert = np.array(values_to_convert)
converted = convert / 1000
# returns list of int values
converted_values = [int(i) for i in converted]
return converted_values
else:
return values_to_convert
|
{"hexsha": "033bf7829f7b8326672ab91dd96765723e7d96a2", "size": 1229, "ext": "py", "lang": "Python", "max_stars_repo_path": "HelperFunctions.py", "max_stars_repo_name": "brianRingler/EDA-Tools-", "max_stars_repo_head_hexsha": "1870e786f1cd009f03a51243177e5b22a98bb921", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "HelperFunctions.py", "max_issues_repo_name": "brianRingler/EDA-Tools-", "max_issues_repo_head_hexsha": "1870e786f1cd009f03a51243177e5b22a98bb921", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-04-03T17:46:45.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-03T17:46:45.000Z", "max_forks_repo_path": "HelperFunctions.py", "max_forks_repo_name": "brianRingler/EDA-Tools-", "max_forks_repo_head_hexsha": "1870e786f1cd009f03a51243177e5b22a98bb921", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.725, "max_line_length": 68, "alphanum_fraction": 0.6265256306, "include": true, "reason": "import numpy", "num_tokens": 272}
|
import numpy as np
import datetime
import datetime
from osgeo import gdal, gdalnumeric, ogr, osr
from datetime import timedelta
import numpy as np
from PIL import ImageDraw
def convert_time(time_since_1900):
d = datetime.datetime(1900, 1, 1)
return (str(d+timedelta(hours=time_since_1900)))
def convert_time_reverse(date_time):
d1 = datetime.datetime(1900, 1, 1)
d2 = date_time
d3 = d2-d1
return ((d3.days*24)+(d3.seconds/3600))
def kelvin_to_celsius(temperature):
return temperature-273.15
kelvin_to_celsius_vector = np.vectorize(kelvin_to_celsius)
class Grid:
'''
Data describes grid class.
The most important attributes are WGS-84 coordinates of the grid origin.
Then stepsize which is a tuple of x and y stepsize.
And griddata which is data matrix which needs to be fit into the grid.
'''
def __init__ (self, grid_origin, grid_stepsize, grid_size=None, grid_data=None):
'''
Initialization of the grid object that takes in the described 3 parameters.
'''
self._grid_origin = grid_origin
self._grid_stepsize = grid_stepsize
self._grid_size=grid_size
self._grid_data=grid_data
def get_gridorigin(self):
'''
Returns x,y coordinates of the grid origin.
'''
return self._grid_origin
def get_gridstepsize(self):
'''
Returns tuple of x,y grid step size.
'''
return self._grid_stepsize
def get_gridsize(self):
'''
Returns the number of rows and columns that are supposed to be inside the grid.
'''
return self._grid_size
def get_griddata(self):
'''
Returns the ndarray with the data that is fit into the grid.
'''
return self._grid_data
def get_affinetransformation(self):
'''
Returns gdal affine transformation matrix.
'''
return (self._grid_origin[0],self._grid_stepsize[0],0,self._grid_origin[1],0,self._grid_stepsize[1])
def iterate_sm_grids(self, step):
start=self._grid_origin
stop=start+np.multiply(self._grid_size, step)
i=start[0]
while (i!=stop[0]):
j=start[1]
while (j!=stop[1]):
yield [i,j]
j+=step[1]
i+=step[0]
def find_index(self,coordinate):
'''
Finds index of the point coordinate if it is within the grid.
Otherwise returns error.
'''
xOffset = math.floor(round(coordinate[0] - self._grid_origin[0], 2)/self._grid_stepsize[0])
yOffset = math.ceil(round(coordinate[1] - self._grid_origin[1], 2)/self._grid_stepsize[1])
return(xOffset,yOffset)
class Image:
"""Multi-array image. Typically netCDF data format."""
def __init__ (self, data=None, metadata=None):
"""Initialize the image object.
Typically should have attributes such as data and metadata.
Data is a netCDF4 dataset object and metadata is a dictionary object."""
self._data = data
self._metadata = metadata
def get_dimensions(self):
return list(self._data.dimensions.keys())[::-1]
def get_variables(self):
return list(self._data.variables)
def get_data(self):
return self._data
def set_data(self, data):
self._data=data
def find_index(self, dictionary):
"""Find slice indices given the dictionary with slice dimension name and its' range.
For instance, dictionary {'latitude':[40,50]} would return index to make appropriate slice of dataframe"""
variable=list(dictionary.keys())[0]
if len(dictionary[variable])==2:
return np.where(np.logical_and(self._data.variables[variable][:]>=np.sort(dictionary[variable])[0], self._data.variables[variable][:]<=np.sort(dictionary[variable])[1]))[0]
elif len(dictionary[variable])==1:
return np.array(np.where(self._data.variables[variable][:]==dictionary[variable][0])).flatten()
else:
raise ValueError('The dictionary should contain variable name with its value or range. ')
def slice (self, attribute, dictionary):
"""Create subImage in a way of slicing original Image by dictionary of attributes"""
dimensions=self._data.variables[attribute].dimensions
indices=[]
for dim in dimensions:
if dim in list(dictionary.keys()):
indices.append(self.find_index({dim:dictionary[dim]}))
else:
indices.append(slice(None))
return self._data.variables[attribute][tuple(indices)].data
def get_statistics(self, attribute, dictionary, kind):
dimensions=self._data.variables[attribute].dimensions
indices=[]
for dim in dimensions:
if dim in list(dictionary.keys()):
indices.append(self.find_index({dim:dictionary[dim]}))
else:
indices.append(slice(None))
if 'longitude' in list(dictionary.keys()):
min_longitude=np.min(dictionary['longitude'])
else:
min_longitude=np.min(self._data.variables['longitude'])
if 'latitude' in list(dictionary.keys()):
max_latitude=np.max(dictionary['latitude'])
else:
max_latitude=np.max(self._data.variables['latitude'])
if kind=='min':
data=self._data.variables[attribute][indices].min(axis=0)
elif kind=='max':
data=self._data.variables[attribute][indices].max(axis=0)
elif kind=='mean':
data=self._data.variables[attribute][indices].mean(axis=0)
elif kind=='sum':
data=self._data.variables[attribute][indices].sum(axis=0)
elif kind=='less_then_0_count':
def less_then_zero(a):
return (a<273.15).astype(int)
pre_data=np.apply_along_axis(less_then_zero,0, self._data.variables[attribute][indices])
data=pre_data.sum(axis=0)
else:
print('This kind of statistical measurement is not yet available. ')
return subImage(data,{'affine_transformation':(min_longitude,abs(self._data.variables['longitude'][1]-self._data.variables['longitude'][0]),0,max_latitude,0,-abs(self._data.variables['longitude'][1]-self._data.variables['longitude'][0]))})
def export_as(self, folder, filename, format):
if format=='h5':
create_folder_if_not_exists(folder)
h5file = tables.open_file(folder+filename+'.'+format, "w")
h5file.create_array(h5file.root, 'data', self._data, title='data')
h5file.close()
return (folder+filename)
else:
return ('export to this file format not supported')
class subImage:
'''
Data is a double(x,y)-array image.
Metadata is a dictionary object. One of the metadata keys should be 'affine_transformation'.
It holds affine transformation parameters from ogr.gdal.GetGeoTransform() function.
Typically represented by gdal array data type.
Another recommended metadata key in the dictionary is 'nodata' key referring to which value should be neglected.
'''
def __init__ (self, dataarray=None, metadata=None):
'''
Initialize the Imagee object.
It is needed to provide numpy array (values in 2D space) as well as metadata,
where 'affine_transformation' and 'nodata' keys are important.
'''
self._data = dataarray
self._metadata = metadata
def get_metadata(self):
'''
Returns metadata dictionary.
'''
return self._metadata
def set_metadata(self, dictionary):
'''
Sets subImage metadata by dictionary.
'''
self._metadata=dictionary
def get_data(self):
'''
Returns 2D matrix of values.
'''
return self._data
def set_data(self,data_matrix):
'''
Sets self data by provided 2D matrix of values.
'''
self._data=data_matrix
def export_as_tif(self,filename):
'''
Export self data as GeoTiff 1-band image.
Output filename should be provided as a parameter.
'''
nrows,ncols=self._data.shape
geotransform = self._metadata['affine_transformation']
output_raster = gdal.GetDriverByName('GTiff').Create(filename, ncols, nrows, 1, gdal.GDT_Float32)
output_raster.SetGeoTransform(geotransform)
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
#srs.ImportFromWkt('GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]')
output_raster.SetProjection(srs.ExportToWkt())
output_raster.GetRasterBand(1).WriteArray(self._data)
output_raster.GetRasterBand(1).SetNoDataValue(-32767)
output_raster.FlushCache()
del output_raster
def clip_by_shape(self, geom_wkt, nodata=-32767):
'''
Clip an Imagee by wkt geometry.
'''
rast = self._data
gt=self._metadata['affine_transformation']
poly=ogr.CreateGeometryFromWkt(geom_wkt)
# Convert the layer extent to image pixel coordinates
minX, maxX, minY, maxY = poly.GetEnvelope()
ulX, ulY = world_to_pixel(gt, minX, maxY)
lrX, lrY = world_to_pixel(gt, maxX, minY)
# Calculate the pixel size of the new image
pxWidth = int(lrX - ulX)
pxHeight = int(lrY - ulY)
# If the clipping features extend out-of-bounds and ABOVE the raster...
if gt[3] < maxY:
# In such a case... ulY ends up being negative--can't have that!
iY = ulY
ulY = 0
clip = rast[ulY:lrY, ulX:lrX]
# Create a new geomatrix for the image
gt2 = list(gt)
gt2[0] = minX
gt2[3] = maxY
# Map points to pixels for drawing the boundary on a blank 8-bit,
# black and white, mask image.
raster_poly = Image.new('L', (pxWidth, pxHeight), 1)
rasterize = ImageDraw.Draw(raster_poly)
def rec(poly_geom):
'''
Recursive drawing of parts of multipolygons over initialized PIL Image object using ImageDraw.Draw method.
'''
if poly_geom.GetGeometryCount()==0:
points=[]
pixels=[]
for p in range(poly_geom.GetPointCount()):
points.append((poly_geom.GetX(p), poly_geom.GetY(p)))
for p in points:
pixels.append(world_to_pixel(gt2, p[0], p[1]))
rasterize.polygon(pixels, 0)
if poly_geom.GetGeometryCount()>=1:
for j in range(poly_geom.GetGeometryCount()):
rec(poly_geom.GetGeometryRef(j))
rec(poly)
mask = image_to_array(raster_poly)
# Clip the image using the mask
try:
clip = gdalnumeric.choose(mask, (clip, nodata))
# If the clipping features extend out-of-bounds and BELOW the raster...
except ValueError:
# We have to cut the clipping features to the raster!
rshp = list(mask.shape)
if mask.shape[-2] != clip.shape[-2]:
rshp[0] = clip.shape[-2]
if mask.shape[-1] != clip.shape[-1]:
rshp[1] = clip.shape[-1]
mask.resize(*rshp, refcheck=False)
clip = gdalnumeric.choose(mask, (clip, nodata))
d={}
d['affine_transformation'],d['ul_x'],d['ul_y'],d['nodata']=gt2,ulX,ulY,-32767
return (clip, d)
def clip_by_shape_bb_buffer(self, envelope, buffer=0):
'''
Clip an Imagee by bounding box of wkt geometry. Add buffer in pixels optionally.
'''
rast = self._data
gt=self._metadata['affine_transformation']
# Convert the layer extent to image pixel coordinates
minX = custom_floor(envelope[0],gt[1],precision_and_scale(gt[1])[1])
maxX = custom_ceiling(envelope[1],gt[1],precision_and_scale(gt[1])[1])
minY = custom_floor(envelope[2],gt[1],precision_and_scale(gt[1])[1])
maxY = custom_ceiling(envelope[3],gt[1],precision_and_scale(gt[1])[1])
minX-=(buffer*gt[1])
maxX+=(buffer*gt[1])
minY+=(buffer*gt[5])
maxY-=(buffer*gt[5])
ulX, ulY = world_to_pixel(gt, minX, maxY)
lrX, lrY = world_to_pixel(gt, maxX, minY)
# Calculate the pixel size of the new image
pxWidth = int(lrX - ulX)
pxHeight = int(lrY - ulY)
clip = rast[ulY:lrY, ulX:lrX]
# Create a new geomatrix for the image
gt2 = list(gt)
gt2[0] = minX
gt2[3] = maxY
d={}
d['affine_transformation'],d['ul_x'],d['ul_y']=gt2,ulX,ulY
return (clip, d)
def calculate_slope(self):
'''
Calculate slope from self data of DEM image.
'''
x, y = np.gradient(self._data)
slope = np.pi/2. - np.arctan(np.sqrt(x*x + y*y))
return (slope,self._metadata)
def calculate_azimuth(self):
'''
Calculate azimuth from self data of DEM image.
'''
x, y = np.gradient(self._data)
aspect = (np.arctan2(-x, y))*180/np.pi
return (aspect,self._metadata)
def get_min_value(self):
'''
Get self min value excluding self nodata value.
'''
return np.min(self._data[np.where(self._data!=self._metadata['nodata'])])
def get_max_value(self):
'''
Get self max value excluding self nodata value.
'''
return np.max(self._data[np.where(self._data!=self._metadata['nodata'])])
def get_mean_value(self):
'''
Get self mean value excluding self nodata values.
'''
return np.mean(self._data[np.where(self._data!=self._metadata['nodata'])])
def get_median_value(self):
'''
Get self median value excluding self nodata values.
'''
return np.median(self._data[np.where(self._data!=self._metadata['nodata'])])
|
{"hexsha": "8298fda8a840d8a0dc91798780555a634a79ee52", "size": 12382, "ext": "py", "lang": "Python", "max_stars_repo_path": "auxiliary_classes.py", "max_stars_repo_name": "JiriVales/agroclimatic-factors", "max_stars_repo_head_hexsha": "36e2924a0a19f16f6be151d457df9c465bb39a44", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-08-21T02:44:17.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-09T23:22:52.000Z", "max_issues_repo_path": "auxiliary_classes.py", "max_issues_repo_name": "JiriVales/agroclimatic-factors", "max_issues_repo_head_hexsha": "36e2924a0a19f16f6be151d457df9c465bb39a44", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "auxiliary_classes.py", "max_forks_repo_name": "JiriVales/agroclimatic-factors", "max_forks_repo_head_hexsha": "36e2924a0a19f16f6be151d457df9c465bb39a44", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1105527638, "max_line_length": 281, "alphanum_fraction": 0.7089323211, "include": true, "reason": "import numpy", "num_tokens": 3368}
|
/*
* BSD 2-Clause License
*
* Copyright (c) 2021, Christoph Neuhauser
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <iostream>
#include <unordered_map>
#include <boost/algorithm/string/predicate.hpp>
#include <boost/algorithm/string.hpp>
#include "../libs/volk/volk.h"
#include <Utils/Convert.hpp>
#include <Utils/AppSettings.hpp>
#include <Utils/File/Logfile.hpp>
#include <Utils/File/FileUtils.hpp>
#include <Graphics/Vulkan/Utils/Instance.hpp>
#include "Internal/IncluderInterface.hpp"
#include "ShaderManager.hpp"
namespace sgl { namespace vk {
ShaderManagerVk::ShaderManagerVk(Device* device) : device(device) {
shaderCompiler = new shaderc::Compiler;
pathPrefix = sgl::AppSettings::get()->getDataDirectory() + "Shaders/";
indexFiles(pathPrefix);
// Was a file called "GlobalDefinesVulkan.glsl" found? If yes, store its content in the variable globalDefines.
auto it = shaderFileMap.find("GlobalDefinesVulkan.glsl");
if (it != shaderFileMap.end()) {
std::ifstream file(it->second);
if (!file.is_open()) {
Logfile::get()->writeError(
"ShaderManagerVk::ShaderManagerVk: Unexpected error occured while loading "
"\"GlobalDefinesVulkan.glsl\".");
}
globalDefines = std::string((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
}
globalDefinesMvpMatrices =
"layout (set = 1, binding = 0) uniform MatrixBlock {\n"
" mat4 mMatrix; // Model matrix\n"
" mat4 vMatrix; // View matrix\n"
" mat4 pMatrix; // Projection matrix\n"
" mat4 mvpMatrix; // Model-view-projection matrix\n"
"};\n\n";
}
ShaderManagerVk::~ShaderManagerVk() {
if (shaderCompiler) {
delete shaderCompiler;
shaderCompiler = nullptr;
}
}
ShaderStagesPtr ShaderManagerVk::getShaderStages(const std::vector<std::string> &shaderIds, bool dumpTextDebug) {
return createShaderStages(shaderIds, dumpTextDebug);
}
ShaderStagesPtr ShaderManagerVk::getShaderStages(
const std::vector<std::string> &shaderIds, const std::map<std::string, std::string>& customPreprocessorDefines,
bool dumpTextDebug) {
tempPreprocessorDefines = customPreprocessorDefines;
ShaderStagesPtr shaderStages = createShaderStages(shaderIds, dumpTextDebug);
tempPreprocessorDefines.clear();
return shaderStages;
}
ShaderModulePtr ShaderManagerVk::getShaderModule(const std::string& shaderId, ShaderModuleType shaderModuleType) {
ShaderModuleInfo info;
info.filename = shaderId;
info.shaderModuleType = shaderModuleType;
return FileManager<ShaderModule, ShaderModuleInfo>::getAsset(info);
}
ShaderModulePtr ShaderManagerVk::getShaderModule(
const std::string& shaderId, ShaderModuleType shaderModuleType,
const std::map<std::string, std::string>& customPreprocessorDefines) {
tempPreprocessorDefines = customPreprocessorDefines;
ShaderModuleInfo info;
info.filename = shaderId;
info.shaderModuleType = shaderModuleType;
ShaderModulePtr shaderModule = FileManager<ShaderModule, ShaderModuleInfo>::getAsset(info);
tempPreprocessorDefines.clear();
return shaderModule;
}
ShaderModuleType getShaderModuleTypeFromString(const std::string& shaderId) {
std::string shaderIdLower = boost::algorithm::to_lower_copy(shaderId);
ShaderModuleType shaderModuleType = ShaderModuleType::VERTEX;
if (boost::algorithm::ends_with(shaderIdLower.c_str(), "vertex")) {
shaderModuleType = ShaderModuleType::VERTEX;
} else if (boost::algorithm::ends_with(shaderIdLower.c_str(), "fragment")) {
shaderModuleType = ShaderModuleType::FRAGMENT;
} else if (boost::algorithm::ends_with(shaderIdLower.c_str(), "geometry")) {
shaderModuleType = ShaderModuleType::GEOMETRY;
} else if (boost::algorithm::ends_with(shaderIdLower.c_str(), "tesselationevaluation")) {
shaderModuleType = ShaderModuleType::TESSELATION_EVALUATION;
} else if (boost::algorithm::ends_with(shaderIdLower.c_str(), "tesselationcontrol")) {
shaderModuleType = ShaderModuleType::TESSELATION_CONTROL;
} else if (boost::algorithm::ends_with(shaderIdLower.c_str(), "compute")) {
shaderModuleType = ShaderModuleType::COMPUTE;
} else if (boost::algorithm::ends_with(shaderIdLower.c_str(), "raygen")) {
shaderModuleType = ShaderModuleType::RAYGEN;
} else if (boost::algorithm::ends_with(shaderIdLower.c_str(), "anyhit")) {
shaderModuleType = ShaderModuleType::ANY_HIT;
} else if (boost::algorithm::ends_with(shaderIdLower.c_str(), "closesthit")) {
shaderModuleType = ShaderModuleType::CLOSEST_HIT;
} else if (boost::algorithm::ends_with(shaderIdLower.c_str(), "miss")) {
shaderModuleType = ShaderModuleType::MISS;
} else if (boost::algorithm::ends_with(shaderIdLower.c_str(), "intersection")) {
shaderModuleType = ShaderModuleType::INTERSECTION;
} else if (boost::algorithm::ends_with(shaderIdLower.c_str(), "callable")) {
shaderModuleType = ShaderModuleType::CALLABLE;
} else {
if (boost::algorithm::contains(shaderIdLower.c_str(), "vert")) {
shaderModuleType = ShaderModuleType::VERTEX;
} else if (boost::algorithm::contains(shaderIdLower.c_str(), "frag")) {
shaderModuleType = ShaderModuleType::FRAGMENT;
} else if (boost::algorithm::contains(shaderIdLower.c_str(), "geom")) {
shaderModuleType = ShaderModuleType::GEOMETRY;
} else if (boost::algorithm::contains(shaderIdLower.c_str(), "tess")) {
if (boost::algorithm::contains(shaderIdLower.c_str(), "eval")) {
shaderModuleType = ShaderModuleType::TESSELATION_EVALUATION;
} else if (boost::algorithm::contains(shaderIdLower.c_str(), "control")) {
shaderModuleType = ShaderModuleType::TESSELATION_CONTROL;
}
} else if (boost::algorithm::contains(shaderIdLower.c_str(), "comp")) {
shaderModuleType = ShaderModuleType::COMPUTE;
} else if (boost::algorithm::contains(shaderIdLower.c_str(), "raygen")) {
shaderModuleType = ShaderModuleType::RAYGEN;
} else if (boost::algorithm::contains(shaderIdLower.c_str(), "anyhit")) {
shaderModuleType = ShaderModuleType::ANY_HIT;
} else if (boost::algorithm::contains(shaderIdLower.c_str(), "closesthit")) {
shaderModuleType = ShaderModuleType::CLOSEST_HIT;
} else if (boost::algorithm::contains(shaderIdLower.c_str(), "miss")) {
shaderModuleType = ShaderModuleType::MISS;
} else if (boost::algorithm::contains(shaderIdLower.c_str(), "intersection")) {
shaderModuleType = ShaderModuleType::INTERSECTION;
} else if (boost::algorithm::contains(shaderIdLower.c_str(), "callable")) {
shaderModuleType = ShaderModuleType::CALLABLE;
} else {
Logfile::get()->throwError(
std::string() + "ERROR: ShaderManagerVk::createShaderProgram: "
+ "Unknown shader type (id: \"" + shaderId + "\")");
return ShaderModuleType(0);
}
}
return shaderModuleType;
}
static bool dumpTextDebugStatic = false;
ShaderStagesPtr ShaderManagerVk::createShaderStages(const std::vector<std::string>& shaderIds, bool dumpTextDebug) {
dumpTextDebugStatic = dumpTextDebug;
std::vector<ShaderModulePtr> shaderModules;
for (const std::string &shaderId : shaderIds) {
ShaderModuleType shaderModuleType = getShaderModuleTypeFromString(shaderId);
ShaderModulePtr shaderModule = getShaderModule(shaderId, shaderModuleType);
if (!shaderModule) {
return ShaderStagesPtr();
}
shaderModules.push_back(shaderModule);
}
dumpTextDebugStatic = false;
ShaderStagesPtr shaderProgram(new ShaderStages(device, shaderModules));
return shaderProgram;
}
ShaderModulePtr ShaderManagerVk::loadAsset(ShaderModuleInfo& shaderInfo) {
std::string id = shaderInfo.filename;
std::string shaderString = getShaderString(id);
if (dumpTextDebugStatic) {
std::cout << "Shader dump (" << id << "):" << std::endl;
std::cout << "--------------------------------------------" << std::endl;
std::cout << shaderString << std::endl << std::endl;
}
shaderc::CompileOptions compileOptions;
for (auto& it : preprocessorDefines) {
compileOptions.AddMacroDefinition(it.first, it.second);
}
for (auto& it : tempPreprocessorDefines) {
compileOptions.AddMacroDefinition(it.first, it.second);
}
auto includerInterface = new IncluderInterface();
compileOptions.SetIncluder(std::unique_ptr<shaderc::CompileOptions::IncluderInterface>(includerInterface));
if (device->getInstance()->getInstanceVulkanVersion() < VK_API_VERSION_1_1) {
compileOptions.SetTargetSpirv(shaderc_spirv_version_1_0);
} else if (device->getInstance()->getInstanceVulkanVersion() < VK_API_VERSION_1_2) {
compileOptions.SetTargetSpirv(shaderc_spirv_version_1_3);
} else {
compileOptions.SetTargetSpirv(shaderc_spirv_version_1_5);
}
// Sets the target SPIR-V version. The generated module will use this version
// of SPIR-V. Each target environment determines what versions of SPIR-V
// it can consume. Defaults to the highest version of SPIR-V 1.0 which is
// required to be supported by the target environment. E.g. Default to SPIR-V
// 1.0 for Vulkan 1.0 and SPIR-V 1.3 for Vulkan 1.1.
const std::unordered_map<ShaderModuleType, shaderc_shader_kind> shaderKindLookupTable = {
{ ShaderModuleType::VERTEX, shaderc_vertex_shader },
{ ShaderModuleType::FRAGMENT, shaderc_fragment_shader },
{ ShaderModuleType::COMPUTE, shaderc_compute_shader },
{ ShaderModuleType::GEOMETRY, shaderc_geometry_shader },
{ ShaderModuleType::TESSELATION_CONTROL, shaderc_tess_control_shader },
{ ShaderModuleType::TESSELATION_EVALUATION, shaderc_tess_evaluation_shader },
#if VK_VERSION_1_2 && VK_HEADER_VERSION >= 162
{ ShaderModuleType::RAYGEN, shaderc_raygen_shader },
{ ShaderModuleType::ANY_HIT, shaderc_anyhit_shader },
{ ShaderModuleType::CLOSEST_HIT, shaderc_closesthit_shader },
{ ShaderModuleType::MISS, shaderc_miss_shader },
{ ShaderModuleType::INTERSECTION, shaderc_intersection_shader },
{ ShaderModuleType::CALLABLE, shaderc_callable_shader },
{ ShaderModuleType::TASK, shaderc_task_shader },
{ ShaderModuleType::MESH, shaderc_mesh_shader },
#endif
};
auto it = shaderKindLookupTable.find(shaderInfo.shaderModuleType);
if (it == shaderKindLookupTable.end()) {
sgl::Logfile::get()->writeError("Error in ShaderManagerVk::loadAsset: Invalid shader type.");
return ShaderModulePtr();
}
shaderc_shader_kind shaderKind = it->second;
shaderc::SpvCompilationResult compilationResult = shaderCompiler->CompileGlslToSpv(
shaderString.c_str(), shaderString.size(), shaderKind, id.c_str(), compileOptions);
if (compilationResult.GetNumErrors() != 0 || compilationResult.GetNumWarnings() != 0) {
sgl::Logfile::get()->writeError(compilationResult.GetErrorMessage());
if (compilationResult.GetNumErrors() != 0) {
return ShaderModulePtr();
}
}
std::vector<uint32_t> compilationResultWords(compilationResult.cbegin(), compilationResult.cend());
ShaderModulePtr shaderModule(new ShaderModule(
device, shaderInfo.filename, shaderInfo.shaderModuleType, compilationResultWords));
return shaderModule;
}
std::string ShaderManagerVk::loadHeaderFileString(const std::string &shaderName, std::string &prependContent) {
std::ifstream file(shaderName.c_str());
if (!file.is_open()) {
Logfile::get()->throwError(
std::string() + "Error in loadHeaderFileString: Couldn't open the file \"" + shaderName + "\".");
return "";
}
//std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
std::string fileContent = "#line 1\n";
// Support preprocessor for embedded headers
std::string linestr;
int lineNum = 1;
while (getline(file, linestr)) {
// Remove \r if line ending is \r\n
if (linestr.size() > 0 && linestr.at(linestr.size()-1) == '\r') {
linestr = linestr.substr(0, linestr.size()-1);
}
lineNum++;
if (boost::starts_with(linestr, "#include")) {
std::string includedFileName = getShaderFileName(getHeaderName(linestr));
std::string includedFileContent = loadHeaderFileString(includedFileName, prependContent);
fileContent += includedFileContent + "\n";
fileContent += std::string() + "#line " + toString(lineNum) + "\n";
} else if (boost::starts_with(linestr, "#extension") || boost::starts_with(linestr, "#version")) {
prependContent += linestr + "\n";
fileContent = std::string() + fileContent + "#line " + toString(lineNum) + "\n";
} else {
fileContent += std::string() + linestr + "\n";
}
}
file.close();
fileContent = fileContent;
return fileContent;
}
std::string ShaderManagerVk::getHeaderName(const std::string &lineString) {
// Filename in quotes?
auto startFilename = lineString.find("\"");
auto endFilename = lineString.find_last_of("\"");
if (startFilename != std::string::npos && endFilename != std::string::npos) {
return lineString.substr(startFilename+1, endFilename-startFilename-1);
} else {
// Filename is user-specified #define directive?
std::vector<std::string> line;
boost::algorithm::split(line, lineString, boost::is_any_of("\t "), boost::token_compress_on);
if (line.size() < 2) {
Logfile::get()->writeError("Error in ShaderManagerVk::getHeaderFilename: Too few tokens.");
return "";
}
auto it = preprocessorDefines.find(line.at(1));
if (it != preprocessorDefines.end()) {
std::string::size_type startFilename = it->second.find('\"');
std::string::size_type endFilename = it->second.find_last_of('\"');
return it->second.substr(startFilename+1, endFilename-startFilename-1);
} else {
Logfile::get()->writeError("Error in ShaderManagerVk::getHeaderFilename: Invalid include directive.");
Logfile::get()->writeError(std::string() + "Line string: " + lineString);
return "";
}
}
}
void ShaderManagerVk::indexFiles(const std::string &file) {
if (FileUtils::get()->isDirectory(file)) {
// Scan content of directory
std::vector<std::string> elements = FileUtils::get()->getFilesInDirectoryVector(file);
for (std::string &childFile : elements) {
indexFiles(childFile);
}
} else if (FileUtils::get()->hasExtension(file.c_str(), ".glsl")) {
// File to index. "fileName" is name without path.
std::string fileName = FileUtils::get()->getPureFilename(file);
shaderFileMap.insert(make_pair(fileName, file));
}
}
std::string ShaderManagerVk::getShaderFileName(const std::string &pureFilename) {
auto it = shaderFileMap.find(pureFilename);
if (it == shaderFileMap.end()) {
sgl::Logfile::get()->writeError(
"Error in ShaderManagerVk::getShaderFileName: Unknown file name \"" + pureFilename + "\".");
return "";
}
return it->second;
}
std::string ShaderManagerVk::getPreprocessorDefines(ShaderModuleType shaderModuleType) {
std::string preprocessorStatements;
for (auto it = preprocessorDefines.begin(); it != preprocessorDefines.end(); it++) {
preprocessorStatements += std::string() + "#define " + it->first + " " + it->second + "\n";
}
if (shaderModuleType == ShaderModuleType::VERTEX || shaderModuleType == ShaderModuleType::GEOMETRY) {
preprocessorStatements += globalDefinesMvpMatrices;
}
preprocessorStatements += globalDefines;
return preprocessorStatements;
}
std::string ShaderManagerVk::getShaderString(const std::string &globalShaderName) {
auto it = effectSources.find(globalShaderName);
if (it != effectSources.end()) {
return it->second;
}
std::string::size_type filenameEnd = globalShaderName.find('.');
std::string pureFilename = globalShaderName.substr(0, filenameEnd);
std::string shaderFilename = getShaderFileName(pureFilename + ".glsl");
std::string shaderInternalId = globalShaderName.substr(filenameEnd + 1);
std::ifstream file(shaderFilename.c_str());
if (!file.is_open()) {
Logfile::get()->throwError(
std::string() + "Error in getShader: Couldn't open the file \"" + shaderFilename + "\".");
}
std::string shaderName;
std::string shaderContent = "#line 1\n";
std::string prependContent;
int lineNum = 1;
std::string linestr;
while (getline(file, linestr)) {
// Remove \r if line ending is \r\n
if (!linestr.empty() && linestr.at(linestr.size()-1) == '\r') {
linestr = linestr.substr(0, linestr.size()-1);
}
lineNum++;
if (boost::starts_with(linestr, "-- ")) {
if (!shaderContent.empty() && !shaderName.empty()) {
shaderContent = prependContent + shaderContent;
effectSources.insert(make_pair(shaderName, shaderContent));
}
shaderName = pureFilename + "." + linestr.substr(3);
ShaderModuleType shaderModuleType = getShaderModuleTypeFromString(shaderName);
shaderContent =
std::string() + getPreprocessorDefines(shaderModuleType) + "#line " + toString(lineNum) + "\n";
prependContent = "";
} else if (boost::starts_with(linestr, "#version") || boost::starts_with(linestr, "#extension")) {
prependContent += linestr + "\n";
shaderContent += "#line " + toString(lineNum) + "\n";
} else if (boost::starts_with(linestr, "#include")) {
std::string includedFileName = getShaderFileName(getHeaderName(linestr));
std::string includedFileContent = loadHeaderFileString(includedFileName, prependContent);
shaderContent += includedFileContent + "\n";
shaderContent += std::string() + "#line " + toString(lineNum) + "\n";
} else {
shaderContent += std::string() + linestr + "\n";
}
}
shaderContent = prependContent + shaderContent;
file.close();
if (!shaderName.empty()) {
effectSources.insert(make_pair(shaderName, shaderContent));
} else {
effectSources.insert(make_pair(pureFilename + ".glsl", shaderContent));
}
it = effectSources.find(globalShaderName);
if (it != effectSources.end()) {
return it->second;
}
Logfile::get()->writeError(std::string() + "Error in getShader: Couldn't find the shader \""
+ globalShaderName + "\".");
return "";
}
void ShaderManagerVk::invalidateShaderCache() {
assetMap.clear();
effectSources.clear();
}
}}
|
{"hexsha": "ec5ebbca02ad9130f2affe292b2a8813aa946ea6", "size": 20928, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/Graphics/Vulkan/Shader/ShaderManager.cpp", "max_stars_repo_name": "chrismile/sgl", "max_stars_repo_head_hexsha": "03748cadbd1661285081c47775213091b665cb86", "max_stars_repo_licenses": ["MIT", "Unlicense", "Apache-2.0", "BSD-3-Clause"], "max_stars_count": 8.0, "max_stars_repo_stars_event_min_datetime": "2018-10-20T19:13:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-17T01:45:10.000Z", "max_issues_repo_path": "src/Graphics/Vulkan/Shader/ShaderManager.cpp", "max_issues_repo_name": "chrismile/sgl", "max_issues_repo_head_hexsha": "03748cadbd1661285081c47775213091b665cb86", "max_issues_repo_licenses": ["MIT", "Unlicense", "Apache-2.0", "BSD-3-Clause"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2018-10-20T20:56:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T20:32:46.000Z", "max_forks_repo_path": "src/Graphics/Vulkan/Shader/ShaderManager.cpp", "max_forks_repo_name": "chrismile/sgl", "max_forks_repo_head_hexsha": "03748cadbd1661285081c47775213091b665cb86", "max_forks_repo_licenses": ["MIT", "Unlicense", "Apache-2.0", "BSD-3-Clause"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2020-12-01T13:02:58.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-24T06:39:46.000Z", "avg_line_length": 45.1034482759, "max_line_length": 119, "alphanum_fraction": 0.6613149847, "num_tokens": 4812}
|
from torch.utils.tensorboard import SummaryWriter
from PIL import Image
import numpy as np
"""
TensorBoard主要用来对训练过程中的参数等数据做可视化,比如你可以看到训练过程中loss、梯度等数据的变化。
1、使用之前先安装TensorBoard包:
conda install TensorBoard
2、编写代码,展示需要可视化的数据:
3、使用命令启动TensorBoard页面;
tensorboard --logdir=Pytorch/2-TensorBoard/logs --port=6007
"""
# SummaryWriter中的核心参数为事件文件保存位置
writer = SummaryWriter("logs")
image_path = "../dataset/hymenoptera_data/train/ants_image/67270775_e9fdf77e9d.jpg"
img_PIL = Image.open(image_path)
img_array = np.array(img_PIL)
print(type(img_array))
print(img_array.shape)
# 注意add_image的img_tensor只能是torch.Tensor, numpy.array, or string/blobname类型,而且还要注意图片的格式,详情Ctrl
writer.add_image(tag="train", img_tensor=img_array, global_step=1, dataformats='HWC')
writer.close()
|
{"hexsha": "f46286efb17ec3a2b0373c7d119958b96a04711a", "size": 801, "ext": "py", "lang": "Python", "max_stars_repo_path": "Pytorch/2-TensorBoard/2_add_img_tensorboard.py", "max_stars_repo_name": "pengchenyu111/PaperCodeReplication", "max_stars_repo_head_hexsha": "7b8681654e25b7d707f4b4d7ebcfb85ffc0fd52a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Pytorch/2-TensorBoard/2_add_img_tensorboard.py", "max_issues_repo_name": "pengchenyu111/PaperCodeReplication", "max_issues_repo_head_hexsha": "7b8681654e25b7d707f4b4d7ebcfb85ffc0fd52a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Pytorch/2-TensorBoard/2_add_img_tensorboard.py", "max_forks_repo_name": "pengchenyu111/PaperCodeReplication", "max_forks_repo_head_hexsha": "7b8681654e25b7d707f4b4d7ebcfb85ffc0fd52a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6071428571, "max_line_length": 93, "alphanum_fraction": 0.7802746567, "include": true, "reason": "import numpy", "num_tokens": 283}
|
import matplotlib
matplotlib.use('TkAgg')
from numpy import arange, sin, pi
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
# implement the default mpl key bindings
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
from tkinter import *
from bot import *
import tkinter as tk
from threading import Thread
class BotGUI():
def __init__(self):
self.setup_backend()
self.setup_frontend()
self._updater_thread = Thread(target=self.automatic_update)
self._updater_thread.start()
self._root.mainloop()
###################################################################################################
# function: setup_backend
# purpose: initialize the bot architecture.
#
# description: This method should only be called in the constructor for this class unless
# The backend is purposefully destroyed. It will completely re-create the backend
###################################################################################################
def setup_backend(self):
socket = BotSocket(product=["BTC-USD", "LTC-USD", "ETH-USD", "BCH-USD"], channels=["matches"])
self._bot = Bot("Betty", "LTC-USD", socket)
###################################################################################################
# function: setup_frontend
# purpose: Creates the GUI for the user.
#
# description: This method should only be called in the constructor for this class with no
# exceptions. The GUI consists of:
# start/stop buttons
# portfolio pie chart
# price line chart + checkboxes and radio buttons to show the moving averages
# refresh button for pie chart and line chart
# radio buttons to choose which currency to trade.
###################################################################################################
def setup_frontend(self):
####################
# MAIN-WINDOW SETUP
####################
self._root = Tk()
self._root.title("Betty the trade bot")
#create a top and bottom frame to divide the window into 2 parts. You won't see this division in
#the window, but it helps us lay things out properly.
self._topframe = Frame(self._root)
self._bottomframe = Frame(self._root)
self._topframe.pack(side=TOP)
self._bottomframe.pack(side=BOTTOM)
self._pie_chart_frame = Frame(self._topframe)
self._line_chart_frame = Frame(self._bottomframe)
self._upper_dash_board = Frame(self._topframe)
self._lower_dash_board = Frame(self._bottomframe)
self._pie_chart_frame.pack(side=RIGHT)
self._line_chart_frame.pack(side=RIGHT)
self._upper_dash_board.pack(side=LEFT)
self._lower_dash_board.pack(side=LEFT)
#######################
# WIDGET SETUP
#######################
#create start/stop buttons
self._startButton = Button(self._upper_dash_board, text="Start Bot", bg="green", fg="black", command=self._bot.start)
self._stopButton = Button(self._upper_dash_board, text="Stop Bot" , bg="red" , fg="white", command=self._bot.stop )
self._startButton.grid(row=0, column=0)
self._stopButton.grid( row=0, column=1)
##########################################
# Choose currency to trade (radio buttons)
##########################################
v = tk.StringVar()
v.set("LTC-USD")
myList = [("BTC-USD"), ("BCH-USD"), ("LTC-USD"), ("ETH-USD")]
tk.Radiobutton(self._upper_dash_board, text=myList[0], padx=20, variable=v, value=myList[0], command=lambda: self._bot.set_currency(myList[0])).grid(row=1, column=0)
tk.Radiobutton(self._upper_dash_board, text=myList[1], padx=20, variable=v, value=myList[1], command=lambda: self._bot.set_currency(myList[1])).grid(row=2, column=0)
tk.Radiobutton(self._upper_dash_board, text=myList[2], padx=20, variable=v, value=myList[2], command=lambda: self._bot.set_currency(myList[2])).grid(row=3, column=0)
tk.Radiobutton(self._upper_dash_board, text=myList[3], padx=20, variable=v, value=myList[3], command=lambda: self._bot.set_currency(myList[3])).grid(row=4, column=0)
###############################################################################################################
# Allows user to decide the duration of their investments. This is done by comparing different moving averages.
###############################################################################################################
duration = tk.StringVar()
duration.set("long")
tk.Label(self._upper_dash_board, text="Trade Duration").grid(row=1, column=2)
tk.Radiobutton(self._upper_dash_board, text="Short", variable=duration, value="short", command=lambda: self._bot._trade_hands.set_trade_duration(duration.get())).grid(row=2, column=2)
tk.Radiobutton(self._upper_dash_board, text="Medium",variable=duration, value="medium",command=lambda: self._bot._trade_hands.set_trade_duration(duration.get())).grid(row=3, column=2)
tk.Radiobutton(self._upper_dash_board, text="Long", variable=duration, value="long", command=lambda: self._bot._trade_hands.set_trade_duration(duration.get())).grid(row=4, column=2)
################################################################
# Allows the user to decide how sensitive they want sells to be.
################################################################
self._sell_cushion_slider = Scale(self._upper_dash_board, from_=0, to=1, length=300, tickinterval=0.5, resolution=0.01, orient=HORIZONTAL, command=self._bot._trade_hands.set_sell_cushion)
self._sell_cushion_slider.grid(row=5, column=0, columnspan=3)
self._sell_cushion_slider.set(.3)
#####################################
# show position history in a list box
#####################################
scrollbar = Scrollbar(self._upper_dash_board, orient=VERTICAL)
scrollbar.grid(row=0, column=6, rowspan=5)
self._position_history_box = tk.Listbox(self._upper_dash_board, yscrollcommand=scrollbar.set)
self._position_history_box.grid(row=0, column=3, columnspan=3, rowspan=5)
######################################################
# Choose which averages to show on graph (check boxes)
######################################################
self._average_type = StringVar()
self._average_type.set("simple")
#This should be handled more gracefully eventually.
self._CheckVars = [IntVar(), IntVar(), IntVar(), IntVar()]
self._averages = [(" SMA 30", 30), (" SMA 10", 10), (" SMA 5", 5), (" SMA 1", 1)]
i=0;
#these widgets are check boxes for showing the individual average sizes.
for string, size in self._averages:
x = tk.Checkbutton(self._lower_dash_board, text = string, variable = self._CheckVars[i], onvalue = 1, offvalue = 0, height=1, width = 6, command= lambda:self.update_line_charts(self._CheckVars, self._averages, self._average_type))
x.pack(side=BOTTOM)
i+=1
########################################################
# Set up the price chart and portfolio/trading chart
########################################################
crypto_history = self._bot._data_center._crypto_history
self._line_chart_figure = Figure(figsize=(20, 3))
self._price_plot = self._line_chart_figure.add_subplot(111)
self._price_plot.set_xlabel("Time")
self._price_plot.set_ylabel("Dollars")
self._price_plot.set_title("Price vs. Time")
self._portfolio_chart_figure = Figure(figsize=(20,3))
self._portfolio_plot = self._portfolio_chart_figure.add_subplot(111)
self._portfolio_plot.set_xlabel("Time")
self._portfolio_plot.set_ylabel("Dollars")
self._portfolio_plot.set_title("Portfolio Value vs. Time")
#I don't really know how this stuff works exactly, but the purpose is to embed the plot in our window
canvas3 = FigureCanvasTkAgg(self._portfolio_chart_figure, master=self._line_chart_frame)
canvas3.show()
canvas3.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
toolbar3 = NavigationToolbar2TkAgg(canvas3, self._line_chart_frame)
toolbar3.update()
canvas3._tkcanvas.pack(side=BOTTOM, fill=BOTH, expand=1)
#I don't really know how this stuff works exactly, but the purpose is to embed the plot in our window
canvas = FigureCanvasTkAgg(self._line_chart_figure, master=self._line_chart_frame)
canvas.show()
canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
toolbar = NavigationToolbar2TkAgg(canvas, self._line_chart_frame)
toolbar.update()
canvas._tkcanvas.pack(side=BOTTOM, fill=BOTH, expand=1)
########################################################
# Set up the pie chart
########################################################
portfolio = self._bot._data_center.get_portfolio()
portfolio_keys = portfolio.keys()
labels = [key for key in portfolio_keys if "USD" in key]
amounts = [portfolio[key]["value"] for key in portfolio_keys if "USD" in key]
colors = ["gold", "green", "blue", "red", "purple"]
explode = [0,0,0,0,0]
self._pie_chart_figure = Figure(figsize=(5, 3.5), dpi=100) #we keep the pie chart figure
self._pie_plot = self._pie_chart_figure.add_subplot(111) #we also keep the sub plot
self._pie_plot.pie(amounts, explode=explode, labels=labels, colors=colors, autopct='%5.2f%%', shadow=True, startangle=140)[0] #plot the pie chart
self._pie_chart_figure.gca().add_artist(matplotlib.patches.Circle((0,0),0.75,color='black', fc='white',linewidth=1.25)) #plot a circle over it to make a donut
self._pie_plot.axis('equal')
#I don't really know how this stuff works exactly, but the purpose is to embed the plot in our window
canvas2 = FigureCanvasTkAgg(self._pie_chart_figure, master=self._pie_chart_frame)
canvas2.show()
canvas2.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
toolbar2 = NavigationToolbar2TkAgg(canvas2, self._pie_chart_frame)
toolbar2.update()
canvas2._tkcanvas.pack(side=TOP, fill=BOTH, expand=1)
#This is the refresh button. pressing this will reset the graph and pie chart, but you still have to click the chart for it to update.
self._refresh_button = Button(self._upper_dash_board, text="refresh graphics", bg="blue", fg="white", command= lambda: self.refresh_graphics(self._CheckVars, self._averages, self._average_type))
self._refresh_button.grid(row=0, column=2)
###################################################################################################
# function: automatic_update
# purpose: refresh graphics automatically
#
# description: This method will constantly call the refresh_graphics method while the bot is
# running. It will update the graphs a coule of times each second.
###################################################################################################
def automatic_update(self):
while True:
if self._bot._running:
time.sleep(5)
self.refresh_graphics(self._CheckVars, self._averages, self._average_type)
###################################################################################################
# function: refresh_graphics
# purpose: refresh both the line graph and the pie chart
#
# description: This method is called when the refresh button is clicked, and also should be
# called automatically by another thread causing the plots to update periodically
###################################################################################################
def refresh_graphics(self, CheckVars, Average_list, average_type):
self.update_line_charts(CheckVars, Average_list, average_type)
self.update_pie_chart()
self.update_positions_history()
###################################################################################################
# function: update_positions_history
# purpose: show all past and current holdings
#
# description: This method will check for any trades that have been posted in the trade
# history, but not posted in the listbox
###################################################################################################
def update_positions_history(self):
trade_history = self._bot._data_center._trade_history
current_position = self._bot._trade_hands._long_position
self._position_history_box.delete(0, END)
for past_position in trade_history:
entry = past_position["entry_price"]
exit = past_position["exit_price"]
gain = ((exit-entry)/entry) * 100
msg = "{} {} {}%".format(str(entry), str(exit), str(gain))
self._position_history_box.insert(END, msg)
if current_position != None:
msg = str(current_position["entry_price"])
self._position_history_box.insert(END, msg)
###################################################################################################
# function: update_line_chart
# purpose: shows new data that was not shown the last time the chart was updated, and
# reacts to the average checkboxes being selected/deselected.
#
# description: This will replot the entire graph, taking into account user preferences of
# averages they wish to see.
###################################################################################################
def update_line_charts(self, CheckVars, Average_list, average_type):
try:
###stuff dealing with the price plot
self._price_plot.clear()
self._portfolio_plot.clear()
ma_collection = self._bot._data_center._ma_collection
crypto_history = self._bot._data_center._crypto_history
portfolio_history = self._bot._data_center._portfolio_history
trade_history = self._bot._data_center._trade_history
for i in range(len(CheckVars)):
if CheckVars[i].get() == 1:
times = [j["time"] for j in ma_collection[Average_list[i][1]]]
#times = matplotlib.dates.date2num(times)
values = [j[average_type.get()] for j in ma_collection[Average_list[i][1]]]
if len(times) != len(values):
print("Could not update graph because x and y dimensions were not the same for the ", Average_list[i][0], ".")
return
self._price_plot.plot_date(times, values)[0]
else:
self._price_plot.plot_date([],[])
times = [i["time"] for i in crypto_history[self._bot.currency()]]
prices = [i["price"] for i in crypto_history[self._bot.currency()]]
if len(times) != len(prices):
print("Could not update graph because x and y dimensions were not the same for the price line")
return
self._prices_line = self._price_plot.plot_date(times, prices)[0]
#plot horizontal sell line
current_position = self._bot._trade_hands._long_position
if current_position != None:
self._price_plot.axhline(y=current_position["high_price"] * (1-self._bot._trade_hands._sell_cushion/100))
self._line_chart_figure.autofmt_xdate()
###stuff dealing with the portfolio plot
portfolio_history = self._bot._data_center._portfolio_history
portfolio_values = [element["total"] for element in portfolio_history if element["total"]!=0]
times = [element["time" ] for element in portfolio_history if element["total"]!=0]
if len(portfolio_values) != len(times):
return
self._portfolio_plot.clear()
self._portfolio_line = self._portfolio_plot.plot_date(times, portfolio_values)
self._portfolio_chart_figure.autofmt_xdate()
trade_history = self._bot._data_center._trade_history
for trade in trade_history:
self._portfolio_plot.axvline(x=trade["entry_time"], color="g")
self._portfolio_plot.axvline(x=trade["exit_time"], color="r")
if current_position != None:
self._portfolio_plot.axvline(x=current_position["entry_time"], color="g")
except:
x_max = crypto_history[self._bot.currency()][-1]
x_min = crypto_history[self._bot.currency()][0]
self._portfolio_plot.set_xlim([x_min, x_max])
self._price_plot.set_xlim([x_min, x_max])
return
###################################################################################################
# function: update_pie_chart
# purpose: re-plots the portfolio pie-chart
#
# description: re-plots the pie-chart by first clearing all data and then plotting again.
###################################################################################################
def update_pie_chart(self):
#----------------------------Setup up pie chart ----------------------------
try:
portfolio = self._bot._data_center._portfolio_history[-1]
except:
return
portfolio_keys = portfolio.keys()
labels = [key for key in portfolio_keys if "USD" in key]
amounts = [portfolio[key]["value"] for key in portfolio_keys if "USD" in key]
colors = ["gold", "green", "blue", "red", "purple"]
explode = [0,0,0,0,0]
self._pie_plot.clear()
self._pie_plot.pie(amounts, explode=explode, labels=labels, colors=colors, autopct='%5.2f%%', shadow=True, startangle=140)[0]
self._pie_chart_figure.gca().add_artist(matplotlib.patches.Circle((0,0),0.75,color='black', fc='white',linewidth=1.25))
self._pie_plot.axis('equal')
def main():
GUI = BotGUI()
main()
|
{"hexsha": "ef646fe33eb9c250b8e4cf8290858ed2555b0733", "size": 19843, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/Caroline/client_side/GUI.py", "max_stars_repo_name": "smalbadger/TradeBot", "max_stars_repo_head_hexsha": "a6d4b443a6584af3e91b2d9bf0162db2b4c362e5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Caroline/client_side/GUI.py", "max_issues_repo_name": "smalbadger/TradeBot", "max_issues_repo_head_hexsha": "a6d4b443a6584af3e91b2d9bf0162db2b4c362e5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Caroline/client_side/GUI.py", "max_forks_repo_name": "smalbadger/TradeBot", "max_forks_repo_head_hexsha": "a6d4b443a6584af3e91b2d9bf0162db2b4c362e5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 55.7387640449, "max_line_length": 243, "alphanum_fraction": 0.5374691327, "include": true, "reason": "from numpy", "num_tokens": 4036}
|
module tcai2 use adj_mod use tcai1 implicit none integer private nx contains subroutine tcai2_init aa nx_in integer nx_in real dimension pointer aa nx nx_in call tcai1_init aa end subroutine function tcai2_lop adj add x r result stat integer stat logical intent in adj add real dimension x r call adjnull adj add x r call tcai2_lop2 adj add x r stat 0 end function subroutine tcai2_lop2 adj add x r logical intent in adj add real dimension x real dimension r integer stat1 stat1 tcai1_lop adj true x nx r end subroutine subroutine tcai2_close end subroutine end module
|
{"hexsha": "634b37141065faff3332bcdb267ce17e5c84ef00", "size": 569, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "pa1-skeleton/pa1-data/5/sepwww.stanford.edu_sep_prof_geelib_tcai2.f90", "max_stars_repo_name": "yzhong94/cs276-spring-2019", "max_stars_repo_head_hexsha": "a4780a9f88b8c535146040fe11bb513c91c5693b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pa1-skeleton/pa1-data/5/sepwww.stanford.edu_sep_prof_geelib_tcai2.f90", "max_issues_repo_name": "yzhong94/cs276-spring-2019", "max_issues_repo_head_hexsha": "a4780a9f88b8c535146040fe11bb513c91c5693b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pa1-skeleton/pa1-data/5/sepwww.stanford.edu_sep_prof_geelib_tcai2.f90", "max_forks_repo_name": "yzhong94/cs276-spring-2019", "max_forks_repo_head_hexsha": "a4780a9f88b8c535146040fe11bb513c91c5693b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 284.5, "max_line_length": 568, "alphanum_fraction": 0.8277680141, "num_tokens": 138}
|
from functools import cache
from typing import Optional, Union
import numpy as np
import torch
from mtutils.mtutils import BatchedLinear, BatchedSequential, broadcast_xwb
from torch.nn import Module, MSELoss, Tanh
from torch.nn.parameter import Parameter
from torch.nn.utils.clip_grad import clip_grad_norm_
from torch.optim import Adam
from torch.optim.lr_scheduler import ExponentialLR
def _create_mtmlp(
d_x: int,
d_y: int,
n_hidden: int,
d_hidden: int,
n_task: int,
) -> BatchedSequential:
"""
Generate a multi-task MLP Torch module.
"""
layers = []
if n_hidden == 0: # linear model
layers.append(
BatchedLinear(
in_features=d_x,
out_features=d_y,
external_wb=False,
n_task=n_task,
)
)
else: # fully connected MLP
layers.append(
BatchedLinear(
in_features=d_x,
out_features=d_hidden,
external_wb=False,
n_task=n_task,
)
)
layers.append(Tanh())
for _ in range(n_hidden - 1):
layers.append(
BatchedLinear(
in_features=d_hidden,
out_features=d_hidden,
external_wb=False,
n_task=n_task,
)
)
layers.append(Tanh())
layers.append(
BatchedLinear(
in_features=d_hidden,
out_features=d_y,
external_wb=False,
n_task=n_task,
)
)
net = BatchedSequential(*layers)
return net
def _train_model_mse(
model: Module,
x: torch.tensor,
y: torch.tensor,
n_epoch: int,
initial_lr: float,
final_lr: Optional[float],
wandb_run,
log_identifier: str,
) -> torch.tensor:
# watch model exectuion with wandb
wandb_run.watch(model, log="all")
## optimizer
params = list(model.parameters())
optim = torch.optim.Adam(params=params, lr=initial_lr)
if final_lr is not None:
gamma = final_lr / initial_lr # final learning rate will be gamma * initial_lr
lr_decay = gamma ** (1 / n_epoch)
lr_scheduler = ExponentialLR(optimizer=optim, gamma=lr_decay)
else:
lr_scheduler = None
## loss
loss_fn = MSELoss()
regularizer_fn = None
## training loop
train_losses = []
for i in range(n_epoch):
optim.zero_grad()
# loss
pred = model(x)
mse = loss_fn(pred, y)
loss = mse
# regularizer
if regularizer_fn is not None:
raise NotImplementedError
else:
regularizer = torch.tensor(0.0)
# compute gradients and step
loss.backward()
clip_grad_norm_(params, max_norm=10.0)
optim.step()
# adapt lr
if lr_scheduler is not None:
lr_scheduler.step()
# logging
train_losses.append(loss.item())
# TODO: find neater way to log parametric learning curve
n_context = x.shape[-2]
wandb_run.log(
{
f"{log_identifier}/epoch": i,
f"{log_identifier}/loss_n_context_{n_context:03d}": loss,
f"{log_identifier}/mse_n_context_{n_context:03d}": mse,
f"{log_identifier}/regularizer_n_context_{n_context:03d}": regularizer,
}
)
if i % 100 == 0 or i == len(range(n_epoch)) - 1:
print(f"[iter {i:04d}] mse = {mse:.4e} | reg = {regularizer:.4e}")
return torch.tensor(train_losses)
def _mse(model: Module, x: torch.tensor, y: torch.tensor) -> torch.tensor:
"""
Computes predictive MSE of model on data (x, y).
"""
pred = model(x)
mse = MSELoss(reduction="mean")(pred, y)
return mse
class MultiTaskMultiLayerPerceptron(Module):
def __init__(
self,
d_x: int,
d_y: int,
n_hidden: int,
d_hidden: int,
):
super().__init__()
self.d_x, self.d_y, self.n_hidden, self.d_hidden = d_x, d_y, n_hidden, d_hidden
self._mlp = None # will be set in self.adapt
self.eval()
def _reset(self, n_task):
self._mlp = _create_mtmlp(
d_x=self.d_x,
d_y=self.d_y,
n_hidden=self.n_hidden,
d_hidden=self.d_hidden,
n_task=n_task,
)
def forward(self, x: torch.tensor) -> torch.tensor:
assert x.ndim == 3
pred = self._mlp(x=x)
return pred
def adapt(
self,
x: np.ndarray,
y: np.ndarray,
n_epoch: int,
initial_lr: float,
final_lr: float,
wandb_run,
) -> np.ndarray:
self.train()
# check dimensions
assert x.ndim == 3
n_task = x.shape[0]
n_context = x.shape[1]
# reset model
self._reset(n_task=n_task)
# adapt model
if n_context == 0:
epoch_losses = np.array([])
else:
epoch_losses = _train_model_mse(
model=self,
x=torch.tensor(x, dtype=torch.float),
y=torch.tensor(y, dtype=torch.float),
n_epoch=n_epoch,
initial_lr=initial_lr,
final_lr=final_lr,
wandb_run=wandb_run,
log_identifier="adapt",
).numpy()
self.eval()
return epoch_losses
@torch.no_grad()
def mse(self, x: np.ndarray, y: np.ndarray):
if x.size > 0:
mse = _mse(
model=self,
x=torch.tensor(x, dtype=torch.float),
y=torch.tensor(y, dtype=torch.float),
).numpy()
else:
mse = np.nan
return mse
@torch.no_grad()
def predict(self, x: np.ndarray):
return self(torch.tensor(x, dtype=torch.float))
|
{"hexsha": "2080d2991ac45128808d5e002c8f6b894ed3c4fc", "size": 5997, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/mtmlp/mtmlp.py", "max_stars_repo_name": "michaelvolpp/mtbnn_pyro", "max_stars_repo_head_hexsha": "225360d62cdebeb800833ae8fba1364e4a8601c5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/mtmlp/mtmlp.py", "max_issues_repo_name": "michaelvolpp/mtbnn_pyro", "max_issues_repo_head_hexsha": "225360d62cdebeb800833ae8fba1364e4a8601c5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mtmlp/mtmlp.py", "max_forks_repo_name": "michaelvolpp/mtbnn_pyro", "max_forks_repo_head_hexsha": "225360d62cdebeb800833ae8fba1364e4a8601c5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1877729258, "max_line_length": 87, "alphanum_fraction": 0.5427713857, "include": true, "reason": "import numpy", "num_tokens": 1402}
|
# SVR(Support Vector Regression)
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# The StandardScaler class expects the input in a certain format-the inputs(which are X,Y have to be represented in the form of a 2D array(so its necessary to give the inputs in the form of 2D arrays)
#print(X) Here X is in the form of a 2D array
#print(y) Y is in the form of series and not in a 2D array
# We need to reshape Y here(have to convert it into a 2D array)
y=y.reshape(len(y),1) # reshape take the arguments as the no of rows and columns
#print(y)
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
# In the preprocessing pat we have applied feature scaling only for the independent variables but not the dependent variable.
# In that case we had a binary outcome of 0,1 so that's why we did not apply feature scaling but in this case the range of salaries are spread out from 45K to 1 million dollars
# So we need to get them down to a proper scale so that's why we have to apply feature scaling so that they can be compared.
# Here we are not splitting the dataset so that's why we have to apply feature scaling on the whole dataset(and not on X_train and Y_train)
# After getting the results we get the scaled answer so to visualize the results we need to convert it back to the original scale(so then only we'll be able to get the predicted salary)-Inverse Feature Scaling
from sklearn.preprocessing import StandardScaler
# StandardScaler is nothing but standardization
# For every value we calculate (X-mean)/standard deviation
sc_X = StandardScaler()
sc_y = StandardScaler()
y = sc_y.fit_transform(y) #We create objects of the StandardScaler class and then try to fit it to the model,basically fit and then the transform scales our features-X,y
X = sc_X.fit_transform(X)
print(X)
print(y) # So here we get the scaled values of X and y-now we can compare the values
# Fitting SVR to the dataset
from sklearn.svm import SVR
regressor = SVR(kernel = 'rbf') # Most common kernels are linear,gaussian,polynomial kernels but here we take the RBF kernel,as this is a non-linear curve so here we choose a non-linear kernel
regressor.fit(X, y) # Training the dataset
# Predicting a new result
#y_pred = regressor.predict(6.5) # here this method is wrong since X,y are on different scales so we wont get an accurate result here
y_pred=regressor.predict(sc_X.transform([[6.5]]))# this is the correct method since apply predictions on the scale of X
# After applying the prediction we get a scaled value and we need to convert that value again into the scale of Y
sc_y.inverse_transform(regressor.predict(sc_X.transform([[6.5]]))) # So thats's why we use the inverse_transform method(to convert the scaled value to the original scale-in terms of 1000dollars)
# The polynomial regression model predicted the salary to be 158K and this model showed us 170K-both the models are close and have given a decent prediction(level of 6.5)
# Visualising the SVR results
plt.scatter(sc_X.inverse_transform(X),sc_X.inverse_transform(y), color = 'red') # Scatter plot for the observation-gives us all the data points
# Now the X,y are in a different scale(scaled values) but we want the scatter plot of the original values so that's why again we apply the inverse_transform method
plt.plot(sc_X.inverse_transform(X),sc_y.inverse_transform(regressor.predict(X))) # here again we plot the X values and the predicted values
# For predicting the values they have to be in the scale of y(so thats why we give sc_y.inverse_transform)-so predict the values of X and then convert back into y's scale
# X,y so X is nothing but the original values so thats why inverse_transform so X also
plt.title('Truth or Bluff (SVR)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show() #Generally when we use other classes like LinearRegression etc.it automatically does the feature scaling so thats why we dont apply it explicitly,
#But here SVR is a low/small class and SVR is not a very common model,so it seems that it has not applied feature scaling here so thats why our graph is not at all proper so we need to aply the feature scaling here
# Visualising the SVR results (for higher resolution and smoother curve)
X_grid = np.arange(min(sc_X.inverse_transform(X)), max(sc_X.inverse_transform(X)), 0.01) # choice of 0.01 instead of 0.1 step because the data is feature scaled
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(sc_X.inverse_transform(X),sc_X.inverse_transform(y), color = 'red')
plt.plot(X_grid,sc_y.inverse_transform(X),sc_y.inverse_transform(regressor.predict(sc_X.transform(X_grid))),color='blue')
plt.title('Truth or Bluff (SVR)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
|
{"hexsha": "f4b3af1ca491a65c8ee0b8747ec1db97caa5abce", "size": 5061, "ext": "py", "lang": "Python", "max_stars_repo_path": "Projects/Python/Machine_Learning_Project/Regression/svr.py", "max_stars_repo_name": "kshivam654/hacktoberfest2020", "max_stars_repo_head_hexsha": "41632803eff3f6cf5a1684fe5ab98f17e59cb765", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 71, "max_stars_repo_stars_event_min_datetime": "2020-10-06T05:53:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-27T03:14:42.000Z", "max_issues_repo_path": "Projects/Python/Machine_Learning_Project/Regression/svr.py", "max_issues_repo_name": "kshivam654/hacktoberfest2020", "max_issues_repo_head_hexsha": "41632803eff3f6cf5a1684fe5ab98f17e59cb765", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 92, "max_issues_repo_issues_event_min_datetime": "2020-10-05T19:18:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-09T04:35:16.000Z", "max_forks_repo_path": "Projects/Python/Machine_Learning_Project/Regression/svr.py", "max_forks_repo_name": "kshivam654/hacktoberfest2020", "max_forks_repo_head_hexsha": "41632803eff3f6cf5a1684fe5ab98f17e59cb765", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 572, "max_forks_repo_forks_event_min_datetime": "2020-10-05T20:11:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-10T16:28:29.000Z", "avg_line_length": 65.7272727273, "max_line_length": 214, "alphanum_fraction": 0.7765263782, "include": true, "reason": "import numpy", "num_tokens": 1218}
|
import numpy as np
def sigmoid(x):
return 1/(1+np.exp(-x))
def relu(x):
return np.maximum(0, x)
def relu_deriv(x):
return np.where(x < 0, 0, 1)
x = np.array([[0,0,1],
[0,1,1],
[1,0,1],
[1,1,1]])
y = np.array([[0],
[1],
[1],
[0]])
np.random.seed(1)
w1 = np.random.random((3, 5))
w2 = np.random.random((5, 1))
lr = 0.1
for i in range(10000):
z1 = x.dot(w1)
a1 = relu(z1)
z2 = a1.dot(w2)
a2 = sigmoid(z2)
delta2 = y - a2
delta1 = delta2.dot(w2.T)
delta0 = relu_deriv(z1) * delta1
w2 += lr * a1.T.dot(delta2)
w1 += lr * x.T.dot(delta0)
if i % 1000 == 0:
print("Error", np.mean(np.abs(delta2)))
|
{"hexsha": "b66042bfbfe66cd23153d76f7f9349e93da8a72d", "size": 745, "ext": "py", "lang": "Python", "max_stars_repo_path": "2nn3.py", "max_stars_repo_name": "kimtg/neural-network-test", "max_stars_repo_head_hexsha": "4072347d785e4e9b6624c45f8341181c18c9f5d3", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "2nn3.py", "max_issues_repo_name": "kimtg/neural-network-test", "max_issues_repo_head_hexsha": "4072347d785e4e9b6624c45f8341181c18c9f5d3", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2nn3.py", "max_forks_repo_name": "kimtg/neural-network-test", "max_forks_repo_head_hexsha": "4072347d785e4e9b6624c45f8341181c18c9f5d3", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.3255813953, "max_line_length": 47, "alphanum_fraction": 0.4697986577, "include": true, "reason": "import numpy", "num_tokens": 269}
|
##############################################################################
##
## Gensys solver adapted from phactsolver.m
##
##############################################################################
function gensys(Γ0, Γ1, c, Ψ, Π; clean = true, continuous = true, check_existence = true, check_uniqueness = true)
if clean
@sprintf "Converting to Reduced Form"
redundant = (maxabs(Γ0, 2) .== 0) & (maxabs(Ψ, 2) .== 0)
base = nullspace(Γ1[redundant, :])
try
Γ0 = lufact!(At_mul_B(base, Γ0 * base))
Γ1 = Γ0 \ At_mul_B(base, Γ1 * base)
Ψ = Γ0 \ At_mul_B(base, Ψ)
Π = Γ0 \ At_mul_B(base, Π)
c = Γ0 \ At_mul_B(base, c)
catch
error("Wrong Form. Try running Gensys")
end
else
Γ1 = Γ0 \ Γ1
end
n = size(Γ1, 1)
# Schur Decomposition
Γ1 = schurfact!(Γ1)
if continuous
select = real(Γ1[:values]) .< 0
else
select = abs(Γ1[:values]) .< 1
end
ordschur!(Γ1, select)
n1 = sum(select)
Γ1vectors = Γ1[:vectors]
# Compute G1
G1 = real(
A_mul_Bt(Γ1vectors * Γ1[:Schur] * diagm(vcat(ones(n1), zeros(n - n1))), Γ1vectors))
# Compute impact
u2 = Γ1vectors[:, (n1 + 1):n]
etawt = svdfact!(At_mul_B(u2, Π))
ueta, deta, veta = etawt[:U], etawt[:S], etawt[:V]
impact = real(-Π * veta * (diagm(deta) \ ueta') * At_mul_B(u2, Ψ) + Ψ)
# check existence
if check_existence
temp = svdfact!(At_mul_B(u2, Ψ))
uz, dz, vz = temp[:U], temp[:S], temp[:V]
existence = vecnorm(uz - ueta * At_mul_B(ueta, uz), 2) < (sqrt(eps()) * 10 * n)
end
# check uniqueness
if check_uniqueness
u1 = Γ1vectors[:, 1:n1]
temp = svdfact!(At_mul_B(u1, Π))
dont, deta1, veta1 = temp[:U], temp[:S], temp[:V]
uniqueness = vecnorm(veta1 - veta * At_mul_B(veta, veta1), 2) < (sqrt(eps()) * 10 * n)
end
if clean
G1 = base * A_mul_Bt(G1, base)
impact = base * impact
end
return G1, impact, existence, uniqueness
end
|
{"hexsha": "570b102b509527ffdc8f010519b59550aa0f6dab", "size": 2107, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/aiyagari/gensys.jl", "max_stars_repo_name": "sglyon/HJBFiniteDifference.jl", "max_stars_repo_head_hexsha": "f678d57731bcadee0e770493418b86a55fac9197", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/aiyagari/gensys.jl", "max_issues_repo_name": "sglyon/HJBFiniteDifference.jl", "max_issues_repo_head_hexsha": "f678d57731bcadee0e770493418b86a55fac9197", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/aiyagari/gensys.jl", "max_forks_repo_name": "sglyon/HJBFiniteDifference.jl", "max_forks_repo_head_hexsha": "f678d57731bcadee0e770493418b86a55fac9197", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-09-30T13:52:40.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-06T16:49:16.000Z", "avg_line_length": 30.5362318841, "max_line_length": 114, "alphanum_fraction": 0.4983388704, "num_tokens": 730}
|
\documentclass[10pt, a4paper, twoside]{basestyle}
\usepackage[backend=biber,firstinits=true,maxnames=100,style=alphabetic,maxalphanames=4,doi=true,isbn=false,url=false,eprint=true]{biblatex}
\bibliography{bibliography}
\usepackage{tikz}
\usetikzlibrary{cd}
\usepackage[Mathematics]{semtex}
\usepackage{chngcntr}
\counterwithout{equation}{section}
%%%% Shorthands.
%%%% Title and authors.
\title{%
\textdisplay{%
On an Article by Celledoni et al.%
}%
}
\author{Pascal~Leroy (phl)}
\begin{document}
\maketitle
\begin{sloppypar}
\noindent
This document provides clarifications, corrections, and accuracy improvements to the formulæ presented in \cite{Celledoni2007}. It follows the notation
and conventions of that paper. Note that the preprint \cite{Celledoni2007} differs in some of the formulæ from the final publication \cite{Celledoni2008},
but we generally follow the former.
\end{sloppypar}
\section*{Preamble}
We remind the reader of the derivation formulæ for the Jacobian elliptic functions (\cite{NistHMF2010}, section 22.13(i)):
\[
\begin{dcases}
\derivop{u}{\JacobiSN u} &= \JacobiCN u \JacobiDN u \\
\derivop{u}{\JacobiCN u} &= -\JacobiSN u \JacobiDN u \\
\derivop{u}{\JacobiDN u} &= -k^2 \JacobiSN u \JacobiCN u
\end{dcases}
\]
and for the hyperbolic functions (\cite{NistHMF2010}, section 4.34):
\[
\begin{dcases}
\derivop{u}{\HyperbolicTangent u} &= \HyperbolicSecant^2 u \\
\derivop{u}{\HyperbolicSecant u} &= -\HyperbolicSecant u \HyperbolicTangent u
\end{dcases}
\]
\section*{The equations of motion}
We start by writing equation (1) of \cite{Celledoni2007} in coordinates. The coordinates of $\vm$ and $\VectorSymbol{I}$ are defined by:
\[
\vm\DefineAs
\begin{pmatrix}
m_1 \\ m_2 \\ m_3
\end{pmatrix}
\]
and:
\[
\VectorSymbol{I}\DefineAs
\begin{pmatrix}
I_1 & 0 & 0 \\ 0 & I_2 & 0 \\ 0 & 0 & I_3
\end{pmatrix}
\]
with $I_1 \leq I_2 \leq I_3$.
Euler's equation $\TimeDerivative{\vm} = \commutator{\vm}{\VectorSymbol{\gw}}$ can be written in coordinates in the principal axes frame:
\[
\TimeDerivative{\vm} =
\begin{pmatrix}
m_1 \\ m_2 \\ m_3
\end{pmatrix}
\times
\begin{pmatrix}
m_1/I_1 \\ m_2/I_2 \\ m_3/I_3
\end{pmatrix}
\]
thus:
\begin{equation}
\begin{dcases}
\TimeDerivative{m}_1 &= m_2 m_3 \pa{1/I_3 - 1/I_2}\\
\TimeDerivative{m}_2 &= m_3 m_1 \pa{1/I_1 - 1/I_3}\\
\TimeDerivative{m}_3 &= m_1 m_2 \pa{1/I_2 - 1/I_1}
\end{dcases}
\label{eqneuler}
\end{equation}
\section*{Solution of Euler's equation}
The solution of Euler's equation has three cases depending on the initial value of $\vm$ (more precisely, on the sign of
$\gD_2 = m_1^2 \frac{I_{12}}{I_1} + m_3^2 \frac{I_{32}}{I_3}$, see discussion below). Figure~\ref{figm} illustrates the
possible evolutions of $\vm$. The sphere is the surface $\norm\vm = G$, which is an invariant of motion. The planes are
the surfaces $\gD_2 = 0$ and separate different modes of the motion.
The blue curve is called case (i) in \cite{Celledoni2007}: $\vm$ follows a periodic curve, and when that curve is close to the $m_1$
axis we have a classical case of precession. The red curve is case (ii), and again the motion of $\vm$ is periodic and exhibits
precession when the curve remains close to the $m_3$ axis. The green curve is case (iii): $\vm$ takes an infinite amount
of time to reach the point $\tuple{0, G, 0}$; furthermore, the motion is unstable as any perturbation moves it either to
the blue or the red region where $\vm$ oscillates between points close to $\tuple{0, G, 0}$ and $\tuple{0, -G, 0}$; this is
the Джанибеков effect.
\begin{figure}[htb!]
\centering
\includegraphics[scale=0.45]{Celledoni-m}
\caption{Possible trajectories of $\vm$: the blue and red curves are cases (i) and (ii), respectively, and correspond to motion
with precession. The green curve is the (unstable) case (iii) and any perturbation demonstrates the Джанибеков effect.\label{figm}}
\end{figure}
The solutions may also be visualized by intersecting the sphere $\norm\vm = G$ with ellipsoids defined by the value of the kinetic energy $T$,
which is also a constant of motion. Since $T = \frac{G^2 - \gD_2}{2 I_2 \Radian^2}$, different values of $T$ determine the same modes as above.
\begin{figure}[htb!]
\centering
\includegraphics[scale=0.45]{Celledoni-G-T}
\caption{Possible trajectories of $\vm$: the sphere is identical to that of Figure~\ref{figm}.
The ellipsoids are surfaces of equal kinetic energy and intersect the sphere on the blue, red, and green curves depending on the
value of $T$ .\label{figGT}}
\end{figure}
In the rest of this section, we describe our notation and derive (corrected) formulæ for the three cases described above.
\subsection*{Notation}
\cite{Celledoni2007} uses a dimensionless formulation where $\norm\vm = 1$, and absolute values for $I_{jh}$ and $\gD_j$.
We prefer to use a dimensionful formulation where $\norm\vm = G$, and to avoid absolute values. Thus we define:
\begin{align*}
I_{jh} &\DefineAs I_j - I_h &\gD_j &\DefineAs G^2 - 2 T I_j \Radian^2 &B_{jh} &\DefineAs \sqrt{±\frac{I_j \gD_h}{I_{jh}}} \\
k &\DefineAs \sqrt{-\frac{\gD_1 I_{32}}{\gD_3 I_{21}}} &\gl_1 &\DefineAs \sqrt{\frac{\gD_1 I_{32}}{I_1 I_2 I_3}} &\gl_3 &\DefineAs \sqrt{\frac{\gD_3 I_{12}}{I_1 I_2 I_3}}
\end{align*}
With these definitions, $I_{jh} ≥ 0$ if and only if $j ≥ h$, and we will prove later that $\gD_1 ≥ 0$, $\gD_3 ≤ 0$, and that $\gD_2$ can have either sign.
The sign under the radical in the definition of $B_{jh}$ is $+$ if $h = 1$ and $j ≥ h$, or $h = 3$ and $j < h$; it is $-$ otherwise (note that we never use
$B_{j2}$ in the analysis below). At this point it is also useful
to observe that:
\[
B_{31}^2 + B_{13}^2 = \frac{\gD_1 I_3 - \gD_3 I_1}{I_{31}} = G^2
\]
Physically, $I_{jh}$ has the dimension of a moment of inertia $\squareBrackets{L^2 M}$. $G$ has the dimension of an angular
momentum $\squareBrackets{L^2 M T^{-1} A}$. $T$ has the dimension of an energy $\squareBrackets{L^2 M T^{-2}}$.
$\gD_j$ has the same dimension as $G^2$. $B_{jh}$ has the same dimension as
$\sqrt{\gD_h}$, i.e., the same dimension as $G$. $\gl_1$ and $\gl_3$ have the
same dimension as the quotient $\frac{G}{I_j}$, i.e., $\squareBrackets{T^{-1} A}$ which is appropriate for their usage.
\subsection*{Case (i)}
Case (i) of the solution of Euler's equation in section 2.2 of \cite{Celledoni2007} is:
\[
{\vm}_t =
\begin{pmatrix}
\gs B_{13} \JacobiDN\of{\gl t - \gn, k} \\
-B_{21} \JacobiSN\of{\gl t - \gn, k} \\
B_{31} \JacobiCN\of{\gl t - \gn, k}
\end{pmatrix}
\]
If we derive this expression with respect to $t$, inject in into (\ref{eqneuler}), and eliminate the elliptic functions we obtain:
\begin{equation}
\begin{dcases}
-\gs \gl k^2 B_{13} &= -B_{21} B_{31} \pa{1/I_3 - 1/I_2} \\
-\gl B_{21} &= \gs B_{13} B_{31} \pa{1/I_1 - 1/I_3} \\
-\gl B_{31} &= -\gs B_{13} B_{21} \pa{1/I_2 - 1/I_1}
\end{dcases}
\label{solneuleri}
\end{equation}
The last equation of (\ref{solneuleri}) yields the following value for $\gl$:
\begin{align*}
\gl &= \gs \frac{B_{13} B_{21}}{B_{31}} \frac{I_1 - I_2}{I_1 I_2}
= \gs\sqrt{\frac{I_1 \gD_3}{I_{13}} \frac{I_2 \gD_1}{I_{21}} \frac{I_{31}}{I_3 \gD_1}} \frac{I_1 - I_2}{I_1 I_2} \\
&= \gs\sqrt{\frac{-\gD_3}{I_{21} I_1 I_2 I_3}} \pa{I_1 - I_2}
= -\gs\sqrt{\frac{-\gD_3 I_{21}}{I_1 I_2 I_3}}
= -\gs \gl_3
\end{align*}
The sign change when moving $I_1 - I_2$ under the radical is necessary because $I_1 - I_2 < 0$.
It is straightforward to check that this value of $\gl$ also satisfies the other equations of (\ref{solneuleri}). Note that it
differs in sign from the one given by \cite{Celledoni2007}: the sign error is visible in that it does not yield the proper precession
direction.
\subsection*{Case (ii)}
Case (ii) of the solution of Euler's equation in section 2.2 of \cite{Celledoni2007} is:
\[
{\vm}_t =
\begin{pmatrix}
B_{13} \JacobiCN\of{\gl t - \gn, k^{-1}} \\
-B_{23} \JacobiSN\of{\gl t - \gn, k^{-1}} \\
\gs B_{31} \JacobiDN\of{\gl t - \gn, k^{-1}}
\end{pmatrix}
\]
Just as we did above, we derive this expression with respect to $t$, inject in into (\ref{eqneuler}), and eliminate the elliptic functions:
\begin{equation}
\begin{dcases}
-\gl B_{13} &= -\gs B_{23} B_{31} \pa{1/I_3 - 1/I_2} \\
-\gl B_{23} &= \gs B_{13} B_{31} \pa{1/I_1 - 1/I_3} \\
-\gs \gl k^{-2} B_{31} &= -B_{13} B_{23} \pa{1/I_2 - 1/I_1}
\end{dcases}
\label{solneulerii}
\end{equation}
The first equation of (\ref{solneulerii}) yields the following value for $\gl$:
\begin{align*}
\gl &= \gs \frac{B_{23} B_{31}}{B_{13}} \frac{I_2 - I_3}{I_2 I_3}
= \gs\sqrt{\frac{I_2 \gD_3}{I_{23}} \frac{I_3 \gD_1}{I_{31}} \frac{I_{13}}{I_1 \gD_3}} \frac{I_2 - I_3}{I_2 I_3} \\
&= \gs\sqrt{\frac{-\gD_1}{I_{23} I_1 I_2 I_3}} \pa{I_2 - I_3}
= -\gs\sqrt{\frac{-\gD_1 I_{23}}{I_1 I_2 I_3}}
= -\gs \gl_1
\end{align*}
Again, note the change of sign due to the fact that $I_2 - I_3 < 0$. And again, the same value of $\gl$ can be shown to satisfy the other
equations of (\ref{solneulerii}).
\subsection*{Case (iii)}
Case (iii) of the solution of Euler's equation in section 2.2 of \cite{Celledoni2007} is clearly incorrect as it implies that $m_1$ and $m_3$
always have the same sign, whereas it is straightforward to choose initial conditions where they do not (because the separatrix is made of two
planes, see Figure~\ref{figm}). Instead, we introduce an extra parameter $\gs'' = ±1$ and posit a solution of the form:
\[
{\vm}_t =
\begin{pmatrix}
\gs' B_{13} \HyperbolicSecant\of{\gl t - \gn} \\
G \HyperbolicTangent\of{\gl t - \gn} \\
\gs'' B_{31} \HyperbolicSecant\of{\gl t - \gn}
\end{pmatrix}
\]
Deriving this expression and injecting it into (\ref{eqneuler}) yields:
\begin{equation}
\begin{dcases}
-\gs' \gl B_{13} &= \gs'' G B_{31} \pa{1/I_3 - 1/I_2} \\
\gl G &= \gs' \gs'' B_{13} B_{31} \pa{1/I_1 - 1/I_3} \\
-\gs'' \gl B_{31} &= \gs' G B_{13} \pa{1/I_2 - 1/I_1}
\end{dcases}
\label{solneuleriii}
\end{equation}
The second equation of (\ref{solneuleriii}) gives the following value for $\gl$:
\[
\gl = \gs' \gs'' \frac{B_{13} B_{31}}{G} \frac{I_3 - I_1}{I_1 I_3}
= \gs' \gs'' \frac{1}{G} \sqrt{\frac{I_1 \gD_3}{I_{13}} \frac{I_3 \gD_1}{I_{31}}} \frac{I_3 - I_1}{I_1 I_3}
= \gs' \gs'' \frac{1}{G} \sqrt{-\frac{\gD_1 \gD_3}{I_1 I_3}}
\]
In this case it is a bit less obvious that the other equations yield the same value of $\gl$. We detail the derivation for the first equation,
using the fact that ${\gs'}^2 = 1$:
\begin{align*}
\gl &= -\gs' \gs'' G \frac{B_{31}}{B_{13}} \frac{I_2 - I_3}{I_2 I_3}
= -\gs' \gs'' G \sqrt{\frac{I_3 \gD_1}{I_{31}} \frac{I_{13}}{I_1 \gD_3}} \frac{I_2 - I_3}{I_2 I_3} \\
&= -\gs' \gs'' G \sqrt{-\frac{\gD_1}{I_1 I_3 \gD_3}} \frac{I_2 - I_3}{I_2}
= \gs' \gs'' G \sqrt{-\frac{\gD_1}{I_1 I_3 \gD_3}} \pa{\frac{I_3}{I_2} - 1}
\end{align*}
Now note that in case (iii) we have $2 T I_2 \Radian^2 = G^2$ thus $1/I_2 = 2 T \Radian^2/G^2$. $\gl$ can be rewritten as:
\[
\gl = \gs' \gs'' G \sqrt{-\frac{\gD_1}{I_1 I_3 \gD_3}} \pa{\frac{2 T I_3 \Radian^2}{G^2} - 1} = \gs' \gs'' \frac{1}{G} \sqrt{-\frac{\gD_1 \gD_3}{I_1 I_3}}
\]
where we have used the fact that $2 T I_3 \Radian^2 - G^2 = -\gD_3 = 2 T \Radian^2 \pa{I_3 - I_2} ≥ 0$.
We then define:
\[
\gl_2 \DefineAs \frac{1}{G} \sqrt{-\frac{\gD_1 \gD_3}{I_1 I_3}}
\]
It is easy to see that $\gl_2$ is the common value of $\gl_1$ and $\gl_3$ in case (iii), that $\gs'$ and $\gs''$ are free parameters and that:
\[
\gl = \gs' \gs'' \gl_2
\]
Note that $\gl_2$ has the same dimension as the quotient $\frac{\gD_j}{G I_j}$, which has the same dimension as $\frac{G}{I_j}$, namely, $\squareBrackets{T^{-1} A}$.
\subsection*{Phase and initial value}
The phase $\gn$ and the free parameters $\gs$, $\gs'$ and $\gs''$ are determined from the initial value ${\vm}_0$ by setting $t = 0$.
\subsubsection*{Case (i)}
We have:
\[
{\vm}_0 =
\begin{pmatrix}
\gs B_{13} \JacobiDN\of{-\gn, k} \\
-B_{21} \JacobiSN\of{-\gn, k} \\
B_{31} \JacobiCN\of{-\gn, k}
\end{pmatrix}
\]
First, we set $\gs$ to be the sign of $m_{01}$. Then, forming the quotient of the last two coordinates we find:
\[
\frac{m_{02}}{m_{03}} = \frac{B_{21}}{B_{31}}\TrigonometricTangent\of{\JacobiAmplitude\of{\gn, k}}
\]
This equation defines $\gn$ modulo $2 K\of{k}$ because $\JacobiAmplitude\of{\gn + 2 K\of{k}, k} = \JacobiAmplitude\of{\gn, k} + \gp$ (\cite{NistHMF2010}, equation 22.16.2). It comes:
\[
\InverseTrigonometricTangent\of{\frac{m_{02}}{m_{03}} \frac{B_{31}}{B_{21}}} = \JacobiAmplitude\of{\gn, k}
\]
and finally we obtain $\gn$ as:
\[
\gn = F\of{\InverseTrigonometricTangent\of{\frac{m_{02}}{m_{03}} \frac{B_{31}}{B_{21}}}, k}
\]
Any determination of the arc tangent works, because $F\of{\gp + \gf, k} = 2 K\of{k} + F\of{\gf, k}$ (\cite{NistHMF2010}, equation 19.2.10). In pratice we use the \texttt{atan2} function.
\subsubsection*{Case (ii)}
Starting from:
\[
{\vm}_0 =
\begin{pmatrix}
B_{13} \JacobiCN\of{-\gn, k^{-1}} \\
-B_{23} \JacobiSN\of{-\gn, k^{-1}} \\
\gs B_{31} \JacobiDN\of{-\gn, k^{-1}}
\end{pmatrix}
\]
we set $\gs$ to be the sign of $m_{03}$ and form the quotient of the first two coordinates. We obtain:
\[
\frac{m_{02}}{m_{01}} = \frac{B_{23}}{B_{13}} \TrigonometricTangent\of{\JacobiAmplitude\of{\gn, k^{-1}}}
\]
and for $\gn$:
\[
\gn = F\of{\InverseTrigonometricTangent\of{\frac{m_{02}}{m_{01}} \frac{B_{13}}{B_{23}}}, k^{-1}}
\]
The same comments as above apply regarding the computation of the arc tangent.
\subsubsection*{Case (iii)}
The initial value ${\vm}_0$ is:
\[
{\vm}_0 =
\begin{pmatrix}
\gs' B_{13} \HyperbolicSecant\of{-\gn} \\
G \HyperbolicTangent\of{-\gn} \\
\gs'' B_{31} \HyperbolicSecant\of{-\gn}
\end{pmatrix}
\]
$\gs'$ and $\gs''$ are set to be the signs of $m_{01}$ and $m_{03}$, respectively. The second coordinate immediately gives:
\[
\gn = -\InverseHyperbolicTangent\of{\frac{m_{02}}{G}}
\]
\subsection*{Implementation considerations}
Some of the formulæ given by \cite{Celledoni2007} do not lend themselves to an easy implementation or lead to numerical inaccuracies. We
describe in this section the modifications we make to these formulæ in our implementation.
\subsubsection*{The quantity $\gD_j$}
We notice that the computation of $\gD_j$ as written in \cite{Celledoni2007} entails cancellations, so we go back to the definition of $\norm{\vm}$ and of the kinetic energy:
\[
\begin{dcases}
G^2 &= m_1^2 + m_2^2 + m_3^2 \\
2 T \Radian^2 &= \frac{m_1^2}{I_1} + \frac{m_2^2}{I_2} + \frac{m_3^2}{I_3}
\end{dcases}
\]
When, for instance, $j = 2$, this yields:
\begin{align*}
\gD_2 &= m_1^2 \pa{1 - \frac{I_2}{I_1}} + m_3^2 \pa{1 - \frac{I_2}{I_3}} \\
&= m_1^2 \frac{I_{12}}{I_1} + m_3^2 \frac{I_{32}}{I_3}
\end{align*}
and similarly:
\[
\begin{dcases}
\gD_1 &= m_2^2 \frac{I_{21}}{I_2} + m_3^2 \frac{I_{31}}{I_3} \\
\gD_3 &= m_1^2 \frac{I_{13}}{I_1} + m_2^2 \frac{I_{23}}{I_2}
\end{dcases}
\]
It is easy to see that $\gD_1$ and $\gD_3$ are the sums of terms of the same sign, so they can be computed without cancellations. Furthermore,
$\gD_1 \geq 0$ and $\gD_3 \leq 0$. $\gD_2$ can have either sign, which correspond exactly to cases (i) ($\gD_2 < 0$), (ii) ($\gD_2 > 0$) and
(iii) ($\gD_2 = 0$).
\subsubsection*{The elliptic modulus}
For the computation of the elliptic functions and integrals \cite{Celledoni2007} gives the value of the elliptic modulus $k$ but we need the value of
the complementary parameter $m_c = 1 - m$ (see \cite{NistHMF2010}, section 19.1.2 for an overview of the notation). In case (i) we have:
\[
m_c = 1 - k^2 = 1 + \frac{\gD_1 I_{32}}{\gD_3 I_{21}}
\]
This can be rewritten as follows:
\begin{align*}
m_c &= \frac{\gD_3 I_{21} + \gD_1 I_{32}}{\gD_3 I_{21}}
=\frac{\pa{G^2 - 2 T I_3}\pa{I_2 - I_1} + \pa{G^2 - 2 T I_1}\pa{I_3 - I_2}}{\gD_3 I_{21}} \\
&=\frac{G^2\pa{I_3 - I_1} + 2 T I_2\pa{I_1 - I_3}}{\gD_3 I_{21}}
=\frac{\gD_2 I_{31}}{\gD_3 I_{21}}
\end{align*}
Similarly, in case (ii):
\[
m_c = 1 - k^{-2} = 1 + \frac{\gD_3 I_{21}}{\gD_1 I_{32}}
=\frac{\gD_1 I_{32} + \gD_3 I_{21}}{\gD_1 I_{32}}
=\frac{\gD_2 I_{31}}{\gD_1 I_{32}}
\]
In both cases we have $m_c \geq 0$.
\section*{Integration of the rotation matrix}
In order to be compatible with our geometrical libraries, our notation differs from that of \cite{Celledoni2007}.
\subsection*{Notation}
\cite{Celledoni2007} describe the physical space as a three-dimensional vector space where vectors like $\VectorSymbol M$ live. In this vector space,
they pick orthonormal bases like $\curlyBrackets{\VectorSymbol{E}^b_1, \VectorSymbol{E}^b_2, \VectorSymbol{E}^b_3}$ which they identify with the
canonical basis of $\Reals^3$ to obtain a coordinate representation $\vm$ of $\VectorSymbol M$.
They then define active rotations in the physical (vector) space. For instance they explain that $\mathscr P_t$ takes $\VectorSymbol M$ to ${\VectorSymbol E}^b_3$ and transforms the basis $\mathscr B_t$ into the basis $\mathscr B^b$. By contrast, our libraries operate on coordinate
representations, not abstract vectors, and implement passive rotations where the physical space is represented by multiple copies of $\Reals^3$ with
different coordinate systems. Therefore, we view $\mathscr P_t$ as transforming $\VectorSymbol M$ with coordinates $\vm$ in the coordinate system
$\mathscr B^b$ of the body into $\VectorSymbol M$ with coordinates $\ve_3$ in the coordinate system $\mathscr B_t$. Confusingly,
\cite{Celledoni2007} appear to use passive rotations when they write matrices, so their $P_t$ has semantics similar to that of our $\mathscr P_t$.
In what follows (and in our code) we try to use the same symbols as \cite{Celledoni2007} with the understanding that our rotations, written in
script font, are passive, and that the entities denoted by $\mathscr B$ are coordinate systems in multiple copies of $\Reals^3$, not bases in a
single vector space.
\cite{Celledoni2007} decompose the attitude rotation $\mathscr Q_t$ of the body as follows:
\[
\begin{tikzcd}
{\mathscr Q_t: \mathscr B^b} \arrow{r}{\mathscr P_t} & {\mathscr B_t} \arrow{r}{\mathscr Y_t} & {\mathscr B'} \arrow{r}{\mathscr R} & {\mathscr B^s}
\end{tikzcd}
\]
where $\mathscr P_t$ maps $\vm$ onto $\ve_3^b$, $\mathscr Y_t$ is a rotation of angle $\gy\of{t}$ around $\vm$, and ${\mathscr R}$
maps $\ve_3^s$ onto $\vm$, where they assume that $\mathscr Q_{t_0} = \Identity$. This yields the following decomposition for
$\mathscr R$:
\[
\begin{tikzcd}
{\mathscr R: \mathscr B'} \arrow{r}{\mathscr Y_{t_0}^{-1} = \Identity} & {\mathscr B_t} \arrow{r}{\mathscr P_{t_0}^{-1}} & {\mathscr B^b}
\end{tikzcd}
\]
This is not sufficient for our purpose, however, because in practical situations $\mathscr Q_{t_0}$ cannot be chosen. Therefore we
decompose $\mathscr R$ as follows:
\[
\begin{tikzcd}
{\mathscr R: \mathscr B'} \arrow{r}{\mathscr Y_{t_0}^{-1} = \Identity} & {\mathscr B_t} \arrow{r}{\mathscr P_{t_0}^{-1}} & {\mathscr B^b}
\arrow{r}{\mathscr Q_{t_0}} & {\mathscr B^s}
\end{tikzcd}
\]
\cite{Celledoni2007} derive the following expression for $\TimeDerivative{\gy}\of{t}$ (which they write slightly differently):
\begin{align*}
\TimeDerivative{\gy}\of{t} &= \frac{2 T \Radian^2}{G} + \frac{\gD_2}{G I_2}\pa{\frac{1}{1 + \frac{I_{12}I_{23}G^2}{I_2^2 \gD_1 \gD_3}m_2^2}}\\
&= \frac{2 T \Radian^2}{G} + \frac{\gD_2}{G I_2}\pa{\frac{1}{1 - \frac{G^2}{B_{21}^2 B_{23}^2}m_2^2}}
\end{align*}
\subsection*{Case (i)}
In case (i) we have $m_2 = -B_{21} \JacobiSN\of{\gl t - \gn, k}$ and the above expression becomes:
\[
\TimeDerivative{\gy}\of{t} = \frac{2 T \Radian^2}{G} + \frac{\gD_2}{G I_2}\pa{\frac{1}{1 - \frac{G^2}{B_{23}^2}\JacobiSN^2\of{\gl t - \gn, k}}}
\]
This expression can be integrated using formula 110.04 of \cite{ByrdFriedman1954} with $\ga = G/B_{23}$ to yield:
\[
\gy\of{t} = \frac{2 T \Radian^2}{G}t + \frac{\gD_2}{\gl G I_2}\EllipticPi\of{\JacobiAmplitude\of{\gl t - \gn, k}, \frac{G^2}{B_{23}^2}, k}
\]
Note that this differs from the formula given by \cite{Celledoni2007} in the value of $n$.
\subsection*{Case (ii)}
In case (ii) we have $m_2 = -B_{23} \JacobiSN\of{\gl t - \gn, k^{-1}}$ and a computation similar to the one above gives:
\[
\TimeDerivative{\gy}\of{t} = \frac{2 T \Radian^2}{G} + \frac{\gD_2}{G I_2}\pa{\frac{1}{1 - \frac{G^2}{B_{21}^2}\JacobiSN^2\of{\gl t - \gn, k^{-1}}}}
\]
and:
\[
\gy\of{t} = \frac{2 T \Radian^2}{G}t + \frac{\gD_2}{\gl G I_2}\EllipticPi\of{\JacobiAmplitude\of{\gl t - \gn, k^{-1}}, \frac{G^2}{B_{21}^2}, k^{-1}}
\]
\subsection*{Case (iii)}
In case (iii) we have $m_2 = G \HyperbolicTangent\of{\gl t - \gn}$ and:
\[
\TimeDerivative{\gy}\of{t} = \frac{2 T \Radian^2}{G} + \frac{\gD_2}{G I_2}\pa{\frac{1}{1 - \frac{G^4}{B_{21}^2 B_{23}^2} \HyperbolicTangent^2\of{\gl t - \gn}}}
\]
which can be integrated using:
\[
\int{}\frac{1}{1 - n \HyperbolicTangent^2\of{\gl t - \gn}}\diffd{t} =
\frac{t}{1 - n} - \frac{\sqrt{n}}{\pa{1 - n}\gl} \InverseHyperbolicTangent\of{\sqrt{n} \HyperbolicTangent\of{\gl t - \gn}}
\]
\subsection*{Implementation considerations}
The approach in this section relies on the intermediate basis $\set{\vm, \TimeDerivative{\vm}, \commutator{\vm}{\TimeDerivative{\vm}}}$.
Unfortunately, it is not suitable for a practical implementation because it has an essential singularity when $\TimeDerivative{\vm} = \VectorSymbol{0}$:
while the condition $\TimeDerivative{\vm} = \VectorSymbol{0}$ is physically a constant of motion, $\TimeDerivative{\vm}$ itself is not. This means that
any inaccuracies in numerical computations of $\TimeDerivative{\vm}$ (cancellations, underflow) may cause it to switch from $\VectorSymbol{0}$ to
non-$\VectorSymbol{0}$ and back.
When $\TimeDerivative{\vm} = \VectorSymbol{0}$ at $t = 0$, the motion may be computed at all times assuming a constant $\vm$, so the singularity may be
eliminated. But when $\TimeDerivative{\vm} \neq \VectorSymbol{0}$ at $t = 0$ and it later becomes $\VectorSymbol{0}$, there is no way to find a basis at $t$ that
continuously corresponds to the one at $t = 0$.
While it might be possible to deal with the neighbourhoods of the stable zeros ($m_1$ and $m_3$) by handling the relevant regions with distinct
formulæ (e.g., the low-order precession approximations), this is infeasible for $m_2$ where any neighbourhood of the singularity propagates all the
way around the separatrix. The reader is invited to consult figure~\ref{figm}.
In conclusion, we tried to use this approach but had to abandon it because of the impossibility of handling the singularity.
\section*{Integration of the quaternion}
\cite{Celledoni2007} use a rotation $\mathscr P_t$ to map $\vm$ onto $\VectorSymbol{e_3}$ and obtain the following quaternionic representation for that rotation:
\[
\begin{dcases}
p_1 &= \frac{p_3 m_1 + p_0 m_2}{G + m_3} \\
p_2 &= \frac{p_3 m_2 - p_0 m_1}{G + m_3} \\
p_0^2 + p_3^2 &= \frac{G + m_3}{2 G}
\end{dcases}
\]
and the angle $\gy\of{t}$ for the rotation $\mathscr Y_t$ around $\VectorSymbol{e_3}$:
\[
\TimeDerivative{\gy}\of{t} = \frac{2 T \Radian^2 + G m_3 / I_3}{G + m_3} + 4 G \frac{p_3 \TimeDerivative{p_0} - p_0 \TimeDerivative{p_3}}{G + m_3}
\]
Solving these equations they obtain:
\[
\begin{dcases}
p_0 &= \sqrt\frac{1 + m_3/G}{2} \\
p_1 &= \frac{m_2}{\sqrt{2 G\pa{G + m_3}}} \\
p_2 &= \frac{-m_1}{\sqrt{2 G\pa{G + m_3}}} \\
p_3 &= 0
\end{dcases}
\]
and:
\[
\gy\of{t} = \frac{G}{I_3}t + \frac{G I_{31}}{\gl I_1 I_3}
\pa{\EllipticPi\of{\JacobiAmplitude\of{\gl t - \gn, k}, -\pa{\frac{B_{31}}{B_{13}}}^2, k} + f\of{t}}
\]
They note that these formulæ are only applicable if $m_3 \neq -G$ but go on applying them to case (i) where $m_3 = B_{31} \JacobiCN\of{\gl t - \gn, k}$ can be negative.
One should note at this point that \cite{Celledoni2007} make an error when copying the definition of $f_1\of{u}$ from formula 361.54 of
\cite{ByrdFriedman1954}. This error is corrected in \cite{Celledoni2008}, but the sign of $f\of{s}$ in the definition of $\gy\of{t}$ is
still incorrect and should read:
\[
f\of{s} \DefineAs -B_{31} \frac{B_{13}}{B_{21}} \InverseTrigonometricTangent\of{\frac{B_{21}}{B_{13}} \JacobiSD\of{\gl s - \gn, k}}
\]
\section*{An alternative quaternionic solution}
\cite{Celledoni2007} do not explain how they handle cases (ii) and (iii). It is intriguing to note, though, that the formulæ above work well
for case (ii) where $m_3 = \gs B_{31} \JacobiDN\of{\gl t - \gn, k^{-1}}$. The reason is that, without loss of generality, we can apply to the principal
axes of the body a rotation $\mathscr S$ that maps $m_3$ onto $-m_3$ (there are many possible choices for $\mathscr S$, but for simplicity we pick one that flips the sign of
either $m_1$ or $m_2$). With this rotation, the multiplier $\gs$ disappears from $m_3$ and we have $m_3 \geq 0$ for all times, which ensures that
the denominator of the quaternionic coordinates $G + m_3$ can be safely computed. The key insight here is that the coordinate where the Jacobi funtion
$\JacobiDN$ appears does not change sign, and is therefore a better choice for rotating $\vm$ to $\VectorSymbol{e_i}$.
Observing that, in case (i), $m_1 = \gs B_{13} \JacobiDN\of{\gl t - \gn, k}$, we will construct a rotation $\mathscr S$ to make $m_1$ positive and then
a rotation $\mathscr P_t$ that maps $\vm$ onto $\VectorSymbol{e_1}$. Similarly, in case (iii) the function $\HyperbolicSecant$ appears in the
expressions of $m_1$ and $m_3$ and is always positive, so we will construct $\mathscr S$ to make both $m_1$ and $m_3$ positive and choose $\mathscr P_t$
to map $\vm$ onto $\VectorSymbol{e_1}$ or $\VectorSymbol{e_3}$. In the rest of this section we detail the calculations used to compute $\mathscr S$,
$\mathscr P_t$ and $\gy\of{t}$.
With the introduction of $\mathscr S$, we are effectively introducing a new base $\mathscr B^p$ for the ``preferred'' principal axes of the body, and
the rotation diagrams above are modified as follows for $\mathscr Q_t$:
\[
\begin{tikzcd}
{\mathscr Q_t: \mathscr B^b} \arrow{r}{\mathscr S} & {\mathscr B^p}\arrow{r}{\mathscr P_t} & {\mathscr B_t} \arrow{r}{\mathscr Y_t} & {\mathscr B'} \arrow{r}{\mathscr R} & {\mathscr B^s}
\end{tikzcd}
\]
And for $\mathscr R$:
\[
\begin{tikzcd}
{\mathscr R: \mathscr B'} \arrow{r}{\mathscr Y_{t_0}^{-1} = \Identity} & {\mathscr B_t} \arrow{r}{\mathscr P_{t_0}^{-1}} & {\mathscr B^p}
\arrow{r}{\mathscr S^{-1}} & {\mathscr B^b} \arrow{r}{\mathscr Q_{t_0}} & {\mathscr B^s}
\end{tikzcd}
\]
With this choice of $\mathscr S$, the free parameters $\gs$, $\gs'$, and $\gs''$ appearing in the three cases of the resolution of Euler's equation are all $1$.
\subsection*{Integrals}
We start by computing two integrals that are useful to obtain $\gy\of{t}$. They are valid for $0 \le a < 1$. As far as we can tell the following integral,
which is useful for cases (i) and (ii),
is missing from \cite{ByrdFriedman1954}:
\begin{align}
\int{}\frac{1}{1 + a \JacobiDN\of{u, k}}\diffd{u} &= \nonumber\\
\frac{1}{1 - a^2}&
\begin{aligned}[t]
\;\Biggl[&\EllipticPi\of{\JacobiAmplitude\of{u, k}, \frac{a^2 k^2}{a^2 - 1}, k} - \\
&a\sqrt\frac{1 - a^2}{a^2\pa{k^2 - 1} + 1} \InverseTrigonometricTangent\of{\sqrt\frac{a^2 \pa{k^2 - 1} + 1}{1 - a^2} \JacobiSC\of{u, k}}\Biggr]
\end{aligned}
\label{integraldn}
\end{align}
Also, the following integral is useful for case (iii):
\begin{equation}
\int{}\frac{1}{1 + a \HyperbolicSecant\of{u}}\diffd{u} = u + \frac{2 a}{\sqrt{1 - a^2}}
\InverseTrigonometricTangent\of{\frac{a - 1}{\sqrt{1 - a^2}} \HyperbolicTangent\of{\frac{u}{2}}}
\label{integralsech}
\end{equation}
\subsection*{Case (i)}
In case (i), we define $\mathscr P_t$ to map $\vm$ onto $\VectorSymbol{e_1}$. A computation similar to that in \cite{Celledoni2007} yields that rotation
in quaternionic form:
\[
\begin{dcases}
p_2 &= \frac{p_1 m_2 + p_0 m_3}{G + m_1} \\
p_3 &= \frac{p_1 m_3 - p_0 m_2}{G + m_1} \\
p_0^2 + p_1^2 &= \frac{G + m_1}{2 G}
\end{dcases}
\]
and, for the angle $\gy\of{t}$ of the rotation $\mathscr Y_t$ around $\VectorSymbol{e_1}$:
\[
\TimeDerivative{\gy}\of{t} = \frac{2 T \Radian^2 + G m_1 / I_1}{G + m_1} + 4 G \frac{p_1 \TimeDerivative{p_0} - p_0 \TimeDerivative{p_1}}{G + m_1}
\]
We then write $p_0 = c_0 \sqrt{1 + m_1/G}$ and $p_1= c_1 \sqrt{1 + m_1/G}$ and pick $c_0 = 1/\sqrt{2}$ and $c_1 = 0$. The quaternion simplifies to:
\[
\begin{dcases}
p_0 &= \sqrt\frac{1 + m_1/G}{2} \\
p_1 &= 0 \\
p_2 &= \frac{m_3}{\sqrt{2 G\pa{G + m_1}}} \\
p_3 &= \frac{-m_2}{\sqrt{2 G\pa{G + m_1}}}
\end{dcases}
\]
and the angle to:
\[
\TimeDerivative{\gy}\of{t} = \frac{2 T \Radian^2 + G m_1 / I_1}{G + m_1} = \frac{G^2 - \gD_1 + G m_1}{I_1\pa{G + m_1}} = \frac{G}{I_1} - \frac{\gD_1/I_1}{G + m_1} =
\frac{G}{I_1} - \frac{\gD_1}{G I_1}\frac{1}{1 + m_1/G}
\]
Using equation (\ref{integraldn}) with $a = B_{13}/G$ and simplifying the various coefficients we obtain:
\[
\gy\of{t} = \frac{G}{I_1}t + \frac{G I_{13}}{\gl I_1 I_3}
\EllipticPi\of{\JacobiAmplitude\of{\gl t - \gn, k}, \frac{I_1 I_{32}}{I_3 I_{12}}, k} -
\InverseTrigonometricTangent\of{\sqrt\frac{I_2 I_{31}}{I_3 I_{21}} \JacobiSC\of{\gl t - \gn, k}}
\]
\subsection*{Case (ii)}
In case (ii) $\mathscr P_t$ maps $\vm$ onto $\VectorSymbol{e_3}$ and we follow the computation given in \cite{Celledoni2007}. We repeat their results
here in dimensionful form. The quaternion is:
\[
\begin{dcases}
p_0 &= \sqrt\frac{1 + m_3/G}{2} \\
p_1 &= \frac{m_2}{\sqrt{2 G\pa{G + m_3}}} \\
p_2 &= \frac{-m_1}{\sqrt{2 G\pa{G + m_3}}} \\
p_3 &= 0
\end{dcases}
\]
and the angle:
\[
\TimeDerivative{\gy}\of{t} = \frac{G}{I_3} - \frac{\gD_3}{G I_3}\frac{1}{1 + m_3/G}
\]
Using equation (\ref{integraldn}) with $a = B_{31}/G$ and simplifying the various coefficients we obtain:
\[
\gy\of{t} = \frac{G}{I_3}t + \frac{G I_{31}}{\gl I_1 I_3}
\EllipticPi\of{\JacobiAmplitude\of{\gl t - \gn, k^{-1}}, \frac{I_3 I_{21}}{I_1 I_{23}}, k^{-1}} +
\InverseTrigonometricTangent\of{\sqrt\frac{I_2 I_{31}}{I_1 I_{32}} \JacobiSC\of{\gl t - \gn, k^{-1}}}
\]
\subsection*{Case (iii)}
In case (iii) we start by defining $\mathscr S$ so as to make both $m_1$ and $m_3$ positive. This is always possible, perhaps by flipping $m_2$. We then
choose $\mathscr P_t$ to map $\vm$ onto $\VectorSymbol{e_1}$ or $\VectorSymbol{e_3}$. Which one we pick is explained below.
Assume that we map $\vm$ onto $\VectorSymbol{e_1}$. Then using equation (\ref{integralsech}) with $a = B_{13}/G$ and simplifying the coefficients we obtain:
\[
\begin{dcases}
\gy\of{t} &= \pa{\frac{G}{I_1} - \frac{\gD_1}{G I_1}}t - \frac{2 B_{13} \gD_1}{\gl G B_{31} I_1}
\InverseTrigonometricTangent\of{\frac{B_{13} - G}{B_{31}} \HyperbolicTangent\of{\frac{\gl t - \gn}{2}}} \\
&= \frac{G}{I_2}t - 2 \InverseTrigonometricTangent\of{\frac{B_{13} - G}{B_{31}} \HyperbolicTangent\of{\frac{\gl t - \gn}{2}}}
\end{dcases}
\]
Because $B_{13} = G$ when $B_{31} = 0$, this formula is only usable if $B_{31} \neq 0$. For safety, we rotate onto $\VectorSymbol{e_1}$
if and only if $B_{13} < B_{31}$.
Conversely when we map $\vm$ onto $\VectorSymbol{e_3}$, we have $a = B_{31}/G$ and:
\[
\gy\of{t} = \frac{G}{I_2}t + 2 \InverseTrigonometricTangent\of{\frac{B_{31} - G}{B_{13}} \HyperbolicTangent\of{\frac{\gl t - \gn}{2}}}
\]
We use this formula when $B_{13} \geq B_{31}$ so we are sure that $B_{13}$ is non-$0$.
\printbibliography
\end{document}
|
{"hexsha": "e62c1cc761233ddb8d89eb0e3a2d7da49f14fe3b", "size": 31024, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "documentation/Celledoni.tex", "max_stars_repo_name": "erplsf/Principia", "max_stars_repo_head_hexsha": "1f2a1fc53f8a73c1bc67f12213169e6969c8488f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "documentation/Celledoni.tex", "max_issues_repo_name": "erplsf/Principia", "max_issues_repo_head_hexsha": "1f2a1fc53f8a73c1bc67f12213169e6969c8488f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "documentation/Celledoni.tex", "max_forks_repo_name": "erplsf/Principia", "max_forks_repo_head_hexsha": "1f2a1fc53f8a73c1bc67f12213169e6969c8488f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.0387096774, "max_line_length": 284, "alphanum_fraction": 0.6724149046, "num_tokens": 11974}
|
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE DeriveDataTypeable, DeriveGeneric #-}
-- |
-- Module : Statistics.Distribution.Poisson
-- Copyright : (c) 2009, 2011 Bryan O'Sullivan
-- License : BSD3
--
-- Maintainer : bos@serpentine.com
-- Stability : experimental
-- Portability : portable
--
-- The Poisson distribution. This is the discrete probability
-- distribution of a number of events occurring in a fixed interval if
-- these events occur with a known average rate, and occur
-- independently from each other within that interval.
module Statistics.Distribution.Poisson
(
PoissonDistribution
-- * Constructors
, poisson
, poissonE
-- * Accessors
, poissonLambda
-- * References
-- $references
) where
import Control.Applicative
import Data.Data (Data, Typeable)
import GHC.Generics (Generic)
import Numeric.SpecFunctions (incompleteGamma,logFactorial)
import Numeric.MathFunctions.Constants (m_neg_inf)
import qualified Statistics.Distribution as D
import qualified Statistics.Distribution.Poisson.Internal as I
import Statistics.Internal
newtype PoissonDistribution = PD {
poissonLambda :: Double
} deriving (Eq, Typeable, Data, Generic)
instance Show PoissonDistribution where
showsPrec i (PD l) = defaultShow1 "poisson" l i
instance Read PoissonDistribution where
readPrec = defaultReadPrecM1 "poisson" poissonE
instance D.Distribution PoissonDistribution where
cumulative (PD lambda) x
| x < 0 = 0
| isInfinite x = 1
| isNaN x = error "Statistics.Distribution.Poisson.cumulative: NaN input"
| otherwise = 1 - incompleteGamma (fromIntegral (floor x + 1 :: Int)) lambda
instance D.DiscreteDistr PoissonDistribution where
probability (PD lambda) x = I.probability lambda (fromIntegral x)
logProbability (PD lambda) i
| i < 0 = m_neg_inf
| otherwise = log lambda * fromIntegral i - logFactorial i - lambda
instance D.Variance PoissonDistribution where
variance = poissonLambda
instance D.Mean PoissonDistribution where
mean = poissonLambda
instance D.MaybeMean PoissonDistribution where
maybeMean = Just . D.mean
instance D.MaybeVariance PoissonDistribution where
maybeStdDev = Just . D.stdDev
instance D.Entropy PoissonDistribution where
entropy (PD lambda) = I.poissonEntropy lambda
instance D.MaybeEntropy PoissonDistribution where
maybeEntropy = Just . D.entropy
-- | Create Poisson distribution.
poisson :: Double -> PoissonDistribution
poisson l = maybe (error $ errMsg l) id $ poissonE l
-- | Create Poisson distribution.
poissonE :: Double -> Maybe PoissonDistribution
poissonE l
| l >= 0 = Just (PD l)
| otherwise = Nothing
errMsg :: Double -> String
errMsg l = "Statistics.Distribution.Poisson.poisson: lambda must be non-negative. Got "
++ show l
-- $references
--
-- * Loader, C. (2000) Fast and Accurate Computation of Binomial
-- Probabilities. <http://projects.scipy.org/scipy/raw-attachment/ticket/620/loader2000Fast.pdf>
-- * Adell, J., Lekuona, A., and Yu, Y. (2010) Sharp Bounds on the
-- Entropy of the Poisson Law and Related Quantities
-- <http://arxiv.org/pdf/1001.2897.pdf>
|
{"hexsha": "e07a7b45982bfe5588af271e71068d6fd7feae18", "size": 3223, "ext": "hs", "lang": "Haskell", "max_stars_repo_path": "Statistics/Distribution/Poisson.hs", "max_stars_repo_name": "vmchale/statistics", "max_stars_repo_head_hexsha": "7f19ba0569ff34891c3ec18293a23ffb7eac8edf", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Statistics/Distribution/Poisson.hs", "max_issues_repo_name": "vmchale/statistics", "max_issues_repo_head_hexsha": "7f19ba0569ff34891c3ec18293a23ffb7eac8edf", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Statistics/Distribution/Poisson.hs", "max_forks_repo_name": "vmchale/statistics", "max_forks_repo_head_hexsha": "7f19ba0569ff34891c3ec18293a23ffb7eac8edf", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2912621359, "max_line_length": 98, "alphanum_fraction": 0.7219981384, "num_tokens": 819}
|
import unittest
import pandas as pd
import numpy as np
from src.models.QuantumSLIM.Aggregators.AggregatorFirst import AggregatorFirst
from src.models.QuantumSLIM.Aggregators.AggregatorUnion import AggregatorUnion
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
data1 = [[0, 1, 0, -20, 1], [0, 1, 1, -25, 1], [0, 0, 0, 0, 1]]
self.df1 = pd.DataFrame(data=data1, columns=["a00", "a01", "a02", "energy", "num_occurrences"])
self.log_operation_fn = lambda arr: np.log1p(arr)
self.no_operation_fn = lambda arr: arr
self.exp_operation_fn = lambda arr: np.exp(arr)
data2 = [[0, 1, 0, -2, 1], [0, 1, 1, -1, 1], [0, 0, 0, 0, 1]]
self.df2 = pd.DataFrame(data=data2, columns=["a00", "a01", "a02", "energy", "num_occurrences"])
def test_aggregator_first_class(self):
agg_first = AggregatorFirst()
res = agg_first.get_aggregated_response(self.df1)
self.assertTrue(res.tolist() == [0, 1, 1])
def test_aggregator_union_class(self):
agg_avg = AggregatorUnion(self.no_operation_fn, is_filter_first=False, is_weighted=False)
res = agg_avg.get_aggregated_response(self.df1)
self.assertTrue(res.tolist() == [0, 2/3, 1/3])
agg_avg_first = AggregatorUnion(self.no_operation_fn, is_filter_first=True, is_weighted=False)
res = agg_avg_first.get_aggregated_response(self.df2)
self.assertTrue(res.tolist() == [0, 2/3, 0])
agg_weighted_avg = AggregatorUnion(self.no_operation_fn, is_filter_first=False, is_weighted=True)
res = agg_weighted_avg.get_aggregated_response(self.df2)
self.assertTrue(res.tolist() == [0, 1.5/1.5, 0.5/1.5])
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "df42c1b7f4f77719453b94307a32a326395dc184", "size": 1745, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/models/QuantumSLIM/aggregators_test.py", "max_stars_repo_name": "tangtang95/qslim-recommender", "max_stars_repo_head_hexsha": "31b3825a171f3010890491f1b9675072a37252aa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/models/QuantumSLIM/aggregators_test.py", "max_issues_repo_name": "tangtang95/qslim-recommender", "max_issues_repo_head_hexsha": "31b3825a171f3010890491f1b9675072a37252aa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/models/QuantumSLIM/aggregators_test.py", "max_forks_repo_name": "tangtang95/qslim-recommender", "max_forks_repo_head_hexsha": "31b3825a171f3010890491f1b9675072a37252aa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7777777778, "max_line_length": 105, "alphanum_fraction": 0.6687679083, "include": true, "reason": "import numpy", "num_tokens": 510}
|
# -*- coding: utf-8 -*-
import itertools
from copy import deepcopy
import networkx as nx
from networkx import MultiGraph
from bg.edge import BGEdge, BGEdge_JSON_SCHEMA_JSON_KEY
from bg.genome import BGGenome, BGGenome_JSON_SCHEMA_JSON_KEY
from bg.kbreak import KBreak
from bg.multicolor import Multicolor
from bg.utils import get_from_dict_with_path, merge_fragment_edge_data, recursive_dict_update
from bg.vertices import BGVertex_JSON_SCHEMA_JSON_KEY, BlockVertex, BGVertex, InfinityVertex, TaggedInfinityVertex, \
TaggedBlockVertex, TaggedVertex
__author__ = "Sergey Aganezov"
__email__ = "aganezov(at)cs.jhu.edu"
__status__ = "production"
class BreakpointGraph(object):
""" Class providing implementation of breakpoint graph data structure and most utilized operations on it.
:class:`BreakpointGraph` anticipates to work with :class:`bg.vertex.BGVertex`, :class:`bg.edge.BGEdge` and :class:`bg.multicolor.Multicolor` classes instances, but is not limited to them. Extreme caution has to be assumed when working with non-expected classes.
The engine of graph information storage, low-level algorithms implementation is powered by NetworkX package MultiGraph data structure. This class provides a smart wrapping around it to perform most useful, from combinatorial bioinformatics stand point, operations and manipulations.
Class carries following attributes carrying information about graphs structure:
* :attr:`BreakpointGraph.bg`: instance of NetworkX MultiGraph class
Main operations:
* :meth:`BreakpointGraph.add_bgedge`: adds an instance of :class:`bg.edge.BGEdge` to the current :class:`BreakpointGraph`
* :meth:`BreakpointGraph.add_edge`: adds a new :class:`bg.edge.BGEdge`, constructed from a pair of supplied vertices instances and :class:`bg.multicolor.Multicolor` object, to the current :class:`BreakpointGraph`
* :meth:`BreakpointGraph.get_vertex_by_name`: returns a :class:`bg.vertex.BGVertex` instance by provided ``name`` argument
* :meth:`BreakpointGraph.get_edge_by_two_vertices`: returns a first edge (order is determined by ``key`` NetworkX MultiGraph edge attribute) between two supplied :class:`bg.vertex.BGVertex`
* :meth:`BreakpointGraph.get_edges_by_vertex`: returns a generator yielding :class:`bg.edge.BGEdge`
* :meth:`BreakpointGraph.edges_between_two_vertices`: returns a generator yielding :class:`bg.edge.BGEdge` between two supplied vertices
* :meth:`BreakpointGraph.connected_components_subgraphs`: returns a generator of :class:`BreakpointGraph` object, that represent connected components of a current :class:`BreakpointGraph` object, deep copying(by default) all information of current :class:`BreakpointGraph`
* :meth:`BreakpointGraph.delete_edge`: deletes and edge from perspective of multi-color substitution of supplied vertices
* :meth:`BreakpointGraph.delete_bgedge`: deletes a supplied :class:`bg.edge.BGEdge` instance from perspective of substituting multi-colors.
* :meth:`BreakpointGraph.split_edge`: deletes a supplied :class:`bg.multicolor.Multicolor` instance in identifies edge from two supplied vertices.
* :meth:`BreakpointGraph.split_bgedge`: splits a :class:`bg.edge.BGEdge` with respect to provided guidance
* :meth:`BreakpointGraph.split_all_edges_between_two_vertices`: splits all edges between two supplied vertives with respect to provided guidance.
* :meth:`BreakpointGraph.split_all_edges`: splits all edge in :class:`BreakpointGraph` with respect to provided guidance.
* :meth:`BreakpointGraph.delete_all_edges_between_two_vertices`: deletes all edges between two given vertices, by plain deleting them from MultiGraph underling structure.
* :meth:`BreakpointGraph.merge_all_edges_between_two_vertices`: merges all edge between two given vertices creating a single edge containing information about multi-colors in respective edges.
* :meth:`BreakpointGraph.merge_all_edges`: merges all edges in current :class:`BreakpointGraph`.
* :meth:`BreakpointGraph.merge`: merges two :class:`BreakpointGraph` instances with respect to vertices, edges, and multicolors.
* :meth:`BreakpointGraph.update`: updates information in current :class:`BreakpointGraph` instance by adding new :class:`bg.edge.BGEdge` instances form supplied :class:`BreakpointGraph`.
"""
# class wide variables that are utilized in json deserialization process, when various types of vertices are obtained and processed
# each deserialized class has a schema resolution dict specified below, and this dict can be updated on the fly, to specify more JSON schemas
genomes_json_schemas = {"BGGenomeJSONSchema": BGGenome.BGGenomeJSONSchema}
edges_json_schemas = {"BGEdgeJSONSchema": BGEdge.BGEdgeJSONSchema}
vertices_json_schemas = {"BGVertexJSONSchema": BGVertex.BGVertexJSONSchema,
"BlockVertexJSONSchema": BlockVertex.BlockVertexJSONSchema,
"InfinityVertexJSONSchema": InfinityVertex.InfinityVertexJSONSchema,
"TaggedVertexJSONSchema": TaggedVertex.TaggedVertexJSONSchema,
"TaggedBlockVertexJSONSchema": TaggedBlockVertex.TaggedBlockVertexJSONSchema,
"TaggedInfinityVertexJSONSchema": TaggedInfinityVertex.TaggedInfinityVertexJSONSchema}
def __init__(self, graph=None):
""" Initialization of a :class:`BreakpointGraph` object.
:param graph: is supplied, :class:`BreakpointGraph` is initialized with supplied or brand new (empty) instance of NetworkX MultiGraph.
:type graph: instance of NetworkX MultiGraph is expected.
"""
self.cache = {}
self.cache_valid = {}
if graph is None:
self.bg = MultiGraph()
else:
self.bg = graph
def __edges(self, nbunch=None, keys=False):
""" Iterates over edges in current :class:`BreakpointGraph` instance.
Returns a generator over the edges in current :class:`BreakpointGraph` instance producing instances of :class:`bg.edge.BGEdge` instances wrapping around information in underlying MultiGraph object.
:param nbunch: a vertex to iterate over edges outgoing from, if not provided,iteration over all edges is performed.
:type nbuch: any hashable python object
:param keys: a flag to indicate if information about unique edge's ids has to be returned alongside with edge
:type keys: ``Boolean``
:return: generator over edges in current :class:`BreakpointGraph`
:rtype: ``generator``
"""
for v1, v2, key, data in self.bg.edges(nbunch=nbunch, data=True, keys=True):
bgedge = BGEdge(vertex1=v1, vertex2=v2, multicolor=data["attr_dict"]["multicolor"],
data=data["attr_dict"]["data"])
if not keys:
yield bgedge
else:
yield bgedge, key
def edges(self, nbunch=None, keys=False):
""" Iterates over edges in current :class:`BreakpointGraph` instance.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__edges`.
:param nbunch: a vertex to iterate over edges outgoing from, if not provided,iteration over all edges is performed.
:type nbuch: any hashable python object
:param keys: a flag to indicate if information about unique edge's ids has to be returned alongside with edge
:type keys: ``Boolean``
:return: generator over edges in current :class:`BreakpointGraph`
:rtype: ``generator``
"""
for entry in self.__edges(nbunch=nbunch, keys=keys):
yield entry
def nodes(self):
""" Iterates over nodes in current :class:`BreakpointGraph` instance.
:return: generator over nodes (vertices) in current :class:`BreakpointGraph` instance.
:rtype: ``generator``
"""
for entry in self.bg.nodes():
yield entry
def add_edge(self, vertex1, vertex2, multicolor, merge=True, data=None):
""" Creates a new :class:`bg.edge.BGEdge` object from supplied information and adds it to current instance of :class:`BreakpointGraph`.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__add_bgedge` method.
:param vertex1: first vertex instance out of two in current :class:`BreakpointGraph`
:type vertex1: any hashable object
:param vertex2: second vertex instance out of two in current :class:`BreakpointGraph`
:type vertex2: any hashable object
:param multicolor: an information about multi-colors of added edge
:type multicolor: :class:`bg.multicolor.Multicolor`
:param merge: a flag to merge supplied information from multi-color perspective into a first existing edge between two supplied vertices
:type merge: ``Boolean``
:return: ``None``, performs inplace changes
"""
self.__add_bgedge(BGEdge(vertex1=vertex1, vertex2=vertex2, multicolor=multicolor, data=data), merge=merge)
def __add_bgedge(self, bgedge, merge=True):
""" Adds supplied :class:`bg.edge.BGEdge` object to current instance of :class:`BreakpointGraph`.
Checks that vertices in supplied :class:`bg.edge.BGEdge` instance actually are present in current :class:`BreakpointGraph` if **merge** option of provided. Otherwise a new edge is added to the current :class:`BreakpointGraph`.
:param bgedge: instance of :class:`bg.edge.BGEdge` infromation form which is to be added to current :class:`BreakpointGraph`
:type bgedge: :class:`bg.edge.BGEdge`
:param merge: a flag to merge supplied information from multi-color perspective into a first existing edge between two supplied vertices
:type merge: ``Boolean``
:return: ``None``, performs inplace changes
"""
if bgedge.vertex1 in self.bg and bgedge.vertex2 in self.bg[bgedge.vertex1] and merge:
key = min(self.bg[bgedge.vertex1][bgedge.vertex2].keys())
self.bg[bgedge.vertex1][bgedge.vertex2][key]["attr_dict"]["multicolor"] += bgedge.multicolor
self.bg[bgedge.vertex1][bgedge.vertex2][key]["attr_dict"]["data"] = {}
else:
self.bg.add_edge(bgedge.vertex1, bgedge.vertex2, attr_dict={"multicolor": deepcopy(bgedge.multicolor),
"data": bgedge.data})
self.cache_valid["overall_set_of_colors"] = False
def add_bgedge(self, bgedge, merge=True):
""" Adds supplied :class:`bg.edge.BGEdge` object to current instance of :class:`BreakpointGraph`.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__add_bgedge` method.
:param bgedge: instance of :class:`bg.edge.BGEdge` infromation form which is to be added to current :class:`BreakpointGraph`
:type bgedge: :class:`bg.edge.BGEdge`
:param merge: a flag to merge supplied information from multi-color perspective into a first existing edge between two supplied vertices
:type merge: ``Boolean``
:return: ``None``, performs inplace changes
"""
self.__add_bgedge(bgedge=bgedge, merge=merge)
def __get_vertex_by_name(self, vertex_name):
""" Obtains a vertex object by supplied label
Returns a :class:`bg.vertex.BGVertex` or its subclass instance
:param vertex_name: a vertex label it is identified by.
:type vertex_name: any hashable python object. ``str`` expected.
:return: vertex with supplied label if present in current :class:`BreakpointGraph`, ``None`` otherwise
"""
vertex_class = BGVertex.get_vertex_class_from_vertex_name(vertex_name)
data = vertex_name.split(BlockVertex.NAME_SEPARATOR)
root_name, data = data[0], data[1:]
if issubclass(vertex_class, TaggedVertex):
tags = [entry.split(TaggedVertex.TAG_SEPARATOR) for entry in data]
for tag_entry in tags:
if len(tag_entry) == 1:
tag_entry.append(None)
elif len(tag_entry) > 2:
tag_entry[1:] = [TaggedVertex.TAG_SEPARATOR.join(tag_entry[1:])]
result = vertex_class(root_name)
for tag, value in tags:
if tag == InfinityVertex.NAME_SUFFIX and issubclass(vertex_class, InfinityVertex):
continue
result.add_tag(tag, value)
else:
result = vertex_class(root_name)
if result in self.bg:
adjacencies = self.bg[result]
for key, _ in adjacencies.items():
for ref_key, values in self.bg[key].items():
if ref_key == result:
return ref_key
return list(self.bg[result].keys())[0]
return None
def get_vertex_by_name(self, vertex_name):
""" Obtains a vertex object by supplied label
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__get_vertex_by_name`.
:param vertex_name: a vertex label it is identified by.
:type vertex_name: any hashable python object. ``str`` expected.
:return: vertex with supplied label if present in current :class:`BreakpointGraph`, ``None`` otherwise
:rtype: :class:`bg.vertices.BGVertex` or ``None``
"""
return self.__get_vertex_by_name(vertex_name=vertex_name)
def __get_edge_by_two_vertices(self, vertex1, vertex2, key=None):
""" Returns an instance of :class:`bg.edge.BBGEdge` edge between to supplied vertices (if ``key`` is supplied, returns a :class:`bg.edge.BBGEdge` instance about specified edge).
Checks that both specified vertices are in current :class:`BreakpointGraph` and then depending on ``key`` argument, creates a new :class:`bg.edge.BBGEdge` instance and incorporates respective multi-color information into it.
:param vertex1: first vertex instance out of two in current :class:`BreakpointGraph`
:type vertex1: any hashable object
:param vertex2: second vertex instance out of two in current :class:`BreakpointGraph`
:type vertex2: any hashable object
:param key: unique identifier of edge of interested to be retrieved from current :class:`BreakpointGraph`
:type key: any python object. ``None`` or ``int`` is expected
:return: edge between two specified edges respecting a ``key`` argument.
:rtype: :class:`bg.edge.BGEdge`
"""
if vertex1 in self.bg and vertex2 in self.bg[vertex1]:
if key is None:
key = min(self.bg[vertex1][vertex2])
return BGEdge(vertex1=vertex1, vertex2=vertex2,
multicolor=self.bg[vertex1][vertex2][key]["attr_dict"]["multicolor"],
data=self.bg[vertex1][vertex2][key]["attr_dict"]["data"])
return None
def get_edge_by_two_vertices(self, vertex1, vertex2, key=None):
""" Returns an instance of :class:`bg.edge.BBGEdge` edge between to supplied vertices (if ``key`` is supplied, returns a :class:`bg.edge.BBGEdge` instance about specified edge).
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__get_edge_by_two_vertices`.
:param vertex1: first vertex instance out of two in current :class:`BreakpointGraph`
:type vertex1: any hashable object
:param vertex2: second vertex instance out of two in current :class:`BreakpointGraph`
:type vertex2: any hashable object
:param key: unique identifier of edge of interested to be retrieved from current :class:`BreakpointGraph`
:type key: any python object. ``None`` or ``int`` is expected
:return: edge between two specified edges respecting a ``key`` argument.
:rtype: :class:`bg.edge.BGEdge`
"""
return self.__get_edge_by_two_vertices(vertex1=vertex1, vertex2=vertex2, key=key)
def __get_edges_by_vertex(self, vertex, keys=False):
""" Iterates over edges that are incident to supplied vertex argument in current :class:`BreakpointGraph`
Checks that the supplied vertex argument exists in underlying MultiGraph object as a vertex, then iterates over all edges that are incident to it. Wraps each yielded object into :class:`bg.edge.BGEdge` object.
:param vertex: a vertex object in current :class:`BreakpointGraph` object
:type vertex: any hashable object. :class:`bg.vertex.BGVertex` object is expected.
:param keys: a flag to indicate if information about unique edge's ids has to be returned alongside with edge
:type keys: ``Boolean``
:return: generator over edges (tuples ``edge, edge_id`` if keys specified) in current :class:`BreakpointGraph` wrapped in :class:`bg.vertex.BGEVertex`
:rtype: ``generator``
"""
if vertex in self.bg:
for vertex2, edges in self.bg[vertex].items():
for key, data in self.bg[vertex][vertex2].items():
bg_edge = BGEdge(vertex1=vertex, vertex2=vertex2, multicolor=data["attr_dict"]["multicolor"],
data=data["attr_dict"]["data"])
if keys:
yield bg_edge, key
else:
yield bg_edge
def get_edges_by_vertex(self, vertex, keys=False):
""" Iterates over edges that are incident to supplied vertex argument in current :class:`BreakpointGraph`
Proxies a call to :meth:`Breakpoint._Breakpoint__get_edges_by_vertex` method.
:param vertex: a vertex object in current :class:`BreakpointGraph` object
:type vertex: any hashable object. :class:`bg.vertex.BGVertex` object is expected.
:param keys: a flag to indicate if information about unique edge's ids has to be returned alongside with edge
:type keys: ``Boolean``
:return: generator over edges (tuples ``edge, edge_id`` if keys specified) in current :class:`BreakpointGraph` wrapped in :class:`bg.vertex.BGEVertex`
:rtype: ``generator``
"""
for entry in self.__get_edges_by_vertex(vertex=vertex, keys=keys):
yield entry
def __edges_between_two_vertices(self, vertex1, vertex2, keys=False):
""" Iterates over edges between two supplied vertices in current :class:`BreakpointGraph`
Checks that both supplied vertices are present in current breakpoint graph and then yield all edges that are located between two supplied vertices.
If keys option is specified, then not just edges are yielded, but rather pairs (edge, edge_id) are yielded
:param vertex1: a first vertex out of two, edges of interest are incident to
:type vertex1: any hashable object, :class:`bg.vertex.BGVertex` is expected
:param vertex2: a second vertex out of two, edges of interest are incident to
:type vertex2: any hashable object, :class:`bg.vertex.BGVertex` is expected
:param keys: a flag to indicate if information about unique edge's ids has to be returned alongside with edge
:type keys: ``Boolean``
:return: generator over edges (tuples ``edge, edge_id`` if keys specified) between two supplied vertices in current :class:`BreakpointGraph` wrapped in :class:`bg.vertex.BGVertex`
:rtype: ``generator``
"""
for vertex in vertex1, vertex2:
if vertex not in self.bg:
raise ValueError("Supplied vertex ({vertex_name}) is not present in current BreakpointGraph"
"".format(vertex_name=str(vertex.name)))
for bgedge, key in self.__get_edges_by_vertex(vertex=vertex1, keys=True):
if bgedge.vertex2 == vertex2:
if keys:
yield bgedge, key
else:
yield bgedge
def edges_between_two_vertices(self, vertex1, vertex2, keys=False):
""" Iterates over edges between two supplied vertices in current :class:`BreakpointGraph`
Proxies a call to :meth:`Breakpoint._Breakpoint__edges_between_two_vertices` method.
:param vertex1: a first vertex out of two, edges of interest are incident to
:type vertex1: any hashable object, :class:`bg.vertex.BGVertex` is expected
:param vertex2: a second vertex out of two, edges of interest are incident to
:type vertex2: any hashable object, :class:`bg.vertex.BGVertex` is expected
:param keys: a flag to indicate if information about unique edge's ids has to be returned alongside with edge
:type keys: ``Boolean``
:return: generator over edges (tuples ``edge, edge_id`` if keys specified) between two supplied vertices in current :class:`BreakpointGraph` wrapped in :class:`bg.vertex.BGVertex`
:rtype: ``generator``
"""
for entry in self.__edges_between_two_vertices(vertex1=vertex1, vertex2=vertex2, keys=keys):
yield entry
def connected_components_subgraphs(self, copy=True):
""" Iterates over connected components in current :class:`BreakpointGraph` object, and yields new instances of :class:`BreakpointGraph` with respective information deep-copied by default (week reference is possible of specified in method call).
:param copy: a flag to signal if graph information has to be deep copied while producing new :class:`BreakpointGraph` instances, of just reference to respective data has to be made.
:type copy: ``Boolean``
:return: generator over connected components in current :class:`BreakpointGraph` wrapping respective connected components into new :class:`BreakpointGraph` objects.
:rtype: ``generator``
"""
for component in nx.connected_components(self.bg):
component = self.bg.subgraph(component)
if copy:
component.copy()
yield BreakpointGraph(component)
def __delete_bgedge(self, bgedge, key=None, keep_vertices=False):
""" Deletes a supplied :class:`bg.edge.BGEdge` from a perspective of multi-color substitution. If unique identifier ``key`` is not provided, most similar (from perspective of :meth:`bg.multicolor.Multicolor.similarity_score` result) edge between respective vertices is chosen for change.
If no unique identifier for edge to be changed is specified, edge to be updated is determined by iterating over all edges between vertices in supplied :class:`bg.edge.BGEdge` instance and the edge with most similarity score to supplied one is chosen.
Once the edge to be substituted from is determined, substitution if performed form a perspective of :class:`bg.multicolor.Multicolor` substitution.
If after substitution the remaining multicolor of respective edge is empty, such edge is deleted form a perspective of MultiGraph edge deletion.
:param bgedge: an edge to be deleted from a perspective of multi-color substitution
:type bgedge: :class:`bg.edge.BGEdge`
:param key: unique identifier of existing edges in current :class:`BreakpointGraph` instance to be changed
:type: any python object. ``int`` is expected.
:return: ``None``, performed inplace changes.
"""
############################################################################################################
#
# determines which edge to delete
# candidate edges setup
#
############################################################################################################
if key is not None:
############################################################################################################
#
# is an edge specific key is provided, only edge with that key can undergo multicolor deletion
# even if that edge is not the most suited to the edge to be deleted
#
############################################################################################################
self.bg[bgedge.vertex1][bgedge.vertex2][key]["attr_dict"]["multicolor"] -= bgedge.multicolor
if len(self.bg[bgedge.vertex1][bgedge.vertex2][key]["attr_dict"]["multicolor"].multicolors) == 0:
############################################################################################################
#
# since edge deletion correspond to multicolor substitution one must make sure
# that no edges with empty multicolor are left in the graph
#
############################################################################################################
self.bg.remove_edge(v=bgedge.vertex1, u=bgedge.vertex2, key=key)
if keep_vertices:
self.bg.add_node(bgedge.vertex1)
self.bg.add_node(bgedge.vertex2)
else:
candidate_data, candidate_id, candidate_score = self.__determine_most_suitable_edge_for_deletion(bgedge)
if candidate_data is not None:
candidate_data["attr_dict"]["multicolor"] -= bgedge.multicolor
if len(self.bg[bgedge.vertex1][bgedge.vertex2][candidate_id]["attr_dict"][
"multicolor"].multicolors) == 0:
self.bg.remove_edge(v=bgedge.vertex1, u=bgedge.vertex2, key=candidate_id)
if keep_vertices:
self.bg.add_node(bgedge.vertex1)
self.bg.add_node(bgedge.vertex2)
self.cache_valid["overall_set_of_colors"] = False
def __determine_most_suitable_edge_for_deletion(self, bgedge):
candidate_id = None
candidate_score = -1
candidate_data = None
for v1, v2, key, data in self.bg.edges(nbunch=bgedge.vertex1, data=True, keys=True):
############################################################################################################
#
# iterate over all edges and determine which edge has a multicolor most related to the provided for deletion edge
#
############################################################################################################
if v2 == bgedge.vertex2:
score = Multicolor.similarity_score(bgedge.multicolor, data["attr_dict"]["multicolor"])
if score > candidate_score:
candidate_id = key
candidate_data = data
candidate_score = score
return candidate_data, candidate_id, candidate_score
def delete_edge(self, vertex1, vertex2, multicolor, key=None):
""" Creates a new :class:`bg.edge.BGEdge` instance from supplied information and deletes it from a perspective of multi-color substitution. If unique identifier ``key`` is not provided, most similar (from perspective of :meth:`bg.multicolor.Multicolor.similarity_score` result) edge between respective vertices is chosen for change.
Proxies a call to :math:`BreakpointGraph._BreakpointGraph__delete_bgedge` method.
:param vertex1: a first vertex out of two the edge to be deleted is incident to
:type vertex1: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param vertex2: a second vertex out of two the edge to be deleted is incident to
:type vertex2: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param multicolor: a multi-color to find most suitable edge to be deleted
:type multicolor: :class:`bg.multicolor.Multicolor`
:param key: unique identifier of existing edges in current :class:`BreakpointGraph` instance to be changed
:type: any python object. ``int`` is expected.
:return: ``None``, performed inplace changes.
"""
self.__delete_bgedge(bgedge=BGEdge(vertex1=vertex1, vertex2=vertex2, multicolor=multicolor), key=key)
def delete_bgedge(self, bgedge, key=None):
""" Deletes a supplied :class:`bg.edge.BGEdge` from a perspective of multi-color substitution. If unique identifier ``key`` is not provided, most similar (from perspective of :meth:`bg.multicolor.Multicolor.similarity_score` result) edge between respective vertices is chosen for change.
Proxies a call to :math:`BreakpointGraph._BreakpointGraph__delete_bgedge` method.
:param bgedge: an edge to be deleted from a perspective of multi-color substitution
:type bgedge: :class:`bg.edge.BGEdge`
:param key: unique identifier of existing edges in current :class:`BreakpointGraph` instance to be changed
:type: any python object. ``int`` is expected.
:return: ``None``, performed inplace changes.
"""
self.__delete_bgedge(bgedge=bgedge, key=key)
def __split_bgedge(self, bgedge, guidance=None, sorted_guidance=False,
account_for_colors_multiplicity_in_guidance=True, key=None):
""" Splits a :class:`bg.edge.BGEdge` in current :class:`BreakpointGraph` most similar to supplied one (if no unique identifier ``key`` is provided) with respect to supplied guidance.
If no unique identifier for edge to be changed is specified, edge to be split is determined by iterating over all edges between vertices in supplied :class:`bg.edge.BGEdge` instance and the edge with most similarity score to supplied one is chosen.
Once the edge to be split is determined, split if performed form a perspective of :class:`bg.multicolor.Multicolor` split.
The originally detected edge is deleted, and new edges containing information about multi-colors after splitting, are added to the current :class:`BreakpointGraph`.
:param bgedge: an edge to find most "similar to" among existing edges for a split
:type bgedge: :class:`bg.edge.BGEdge`
:param guidance: a guidance for underlying :class:`bg.multicolor.Multicolor` object to be split
:type guidance: iterable where each entry is iterable with colors entries
:param duplication_splitting: flag (**not** currently implemented) for a splitting of color-based splitting to take into account multiplicity of respective colors
:type duplication_splitting: ``Boolean``
:param key: unique identifier of edge to be split
:type key: any python object. ``int`` is expected
:return: ``None``, performs inplace changes
"""
candidate_id = None
candidate_score = 0
candidate_data = None
if key is not None:
new_multicolors = Multicolor.split_colors(
multicolor=self.bg[bgedge.vertex1][bgedge.vertex2][key]["attr_dict"]["multicolor"],
guidance=guidance, sorted_guidance=sorted_guidance,
account_for_color_multiplicity_in_guidance=account_for_colors_multiplicity_in_guidance)
self.__delete_bgedge(bgedge=BGEdge(vertex1=bgedge.vertex1, vertex2=bgedge.vertex2,
multicolor=self.bg[bgedge.vertex1][bgedge.vertex2][key]["attr_dict"]["multicolor"]),
key=key)
for multicolor in new_multicolors:
self.__add_bgedge(BGEdge(vertex1=bgedge.vertex1, vertex2=bgedge.vertex2, multicolor=multicolor),
merge=False)
else:
for v1, v2, key, data in self.bg.edges(nbunch=bgedge.vertex1, data=True, keys=True):
if v2 == bgedge.vertex2:
score = Multicolor.similarity_score(bgedge.multicolor, data["attr_dict"]["multicolor"])
if score > candidate_score:
candidate_id = key
candidate_data = data
candidate_score = score
if candidate_data is not None:
new_multicolors = Multicolor.split_colors(multicolor=candidate_data["attr_dict"]["multicolor"],
guidance=guidance, sorted_guidance=sorted_guidance,
account_for_color_multiplicity_in_guidance=account_for_colors_multiplicity_in_guidance)
self.__delete_bgedge(bgedge=BGEdge(vertex1=bgedge.vertex1, vertex2=bgedge.vertex2,
multicolor=candidate_data["attr_dict"]["multicolor"]),
key=candidate_id)
for multicolor in new_multicolors:
self.__add_bgedge(BGEdge(vertex1=bgedge.vertex1, vertex2=bgedge.vertex2,
multicolor=multicolor), merge=False)
def split_edge(self, vertex1, vertex2, multicolor, guidance=None, sorted_guidance=False,
account_for_colors_multiplicity_in_guidance=True, key=None):
""" Splits an edge in current :class:`BreakpointGraph` most similar to supplied data (if no unique identifier ``key`` is provided) with respect to supplied guidance.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__split_bgedge` method.
:param vertex1: a first vertex out of two the edge to be split is incident to
:type vertex1: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param vertex2: a second vertex out of two the edge to be split is incident to
:type vertex2: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param multicolor: a multi-color to find most suitable edge to be split
:type multicolor: :class:`bg.multicolor.Multicolor`
:param duplication_splitting: flag (**not** currently implemented) for a splitting of color-based splitting to take into account multiplicity of respective colors
:type duplication_splitting: ``Boolean``
:param key: unique identifier of edge to be split
:type key: any python object. ``int`` is expected
:return: ``None``, performs inplace changes
"""
self.__split_bgedge(bgedge=BGEdge(vertex1=vertex1, vertex2=vertex2, multicolor=multicolor), guidance=guidance,
sorted_guidance=sorted_guidance,
account_for_colors_multiplicity_in_guidance=account_for_colors_multiplicity_in_guidance,
key=key)
def split_bgedge(self, bgedge, guidance=None, sorted_guidance=False,
account_for_colors_multiplicity_in_guidance=True,
key=None):
""" Splits a :class:`bg.edge.BGEdge` in current :class:`BreakpointGraph` most similar to supplied one (if no unique identifier ``key`` is provided) with respect to supplied guidance.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__split_bgedge` method.
:param bgedge: an edge to find most "similar to" among existing edges for a split
:type bgedge: :class:`bg.edge.BGEdge`
:param guidance: a guidance for underlying :class:`bg.multicolor.Multicolor` object to be split
:type guidance: iterable where each entry is iterable with colors entries
:param duplication_splitting: flag (**not** currently implemented) for a splitting of color-based splitting to take into account multiplicity of respective colors
:type duplication_splitting: ``Boolean``
:param key: unique identifier of edge to be split
:type key: any python object. ``int`` is expected
:return: ``None``, performs inplace changes
"""
self.__split_bgedge(bgedge=bgedge, guidance=guidance, sorted_guidance=sorted_guidance,
account_for_colors_multiplicity_in_guidance=account_for_colors_multiplicity_in_guidance,
key=key)
def __split_all_edges_between_two_vertices(self, vertex1, vertex2, guidance=None, sorted_guidance=False,
account_for_colors_multiplicity_in_guidance=True):
""" Splits all edges between two supplied vertices in current :class:`BreakpointGraph` instance with respect to the provided guidance.
Iterates over all edges between two supplied vertices and splits each one of them with respect to the guidance.
:param vertex1: a first out of two vertices edges between which are to be split
:type vertex1: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param vertex2: a second out of two vertices edges between which are to be split
:type vertex2: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param guidance: a guidance for underlying :class:`bg.multicolor.Multicolor` objects to be split
:type guidance: iterable where each entry is iterable with colors entries
:return: ``None``, performs inplace changes
"""
edges_to_be_split_keys = [key for v1, v2, key in self.bg.edges(nbunch=vertex1, keys=True) if v2 == vertex2]
for key in edges_to_be_split_keys:
self.__split_bgedge(BGEdge(vertex1=vertex1, vertex2=vertex2, multicolor=None), guidance=guidance,
sorted_guidance=sorted_guidance,
account_for_colors_multiplicity_in_guidance=account_for_colors_multiplicity_in_guidance,
key=key)
def split_all_edges_between_two_vertices(self, vertex1, vertex2, guidance=None, sorted_guidance=False,
account_for_colors_multiplicity_in_guidance=True):
""" Splits all edges between two supplied vertices in current :class:`BreakpointGraph` instance with respect to the provided guidance.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__split_all_edges_between_two_vertices` method.
:param vertex1: a first out of two vertices edges between which are to be split
:type vertex1: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param vertex2: a second out of two vertices edges between which are to be split
:type vertex2: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param guidance: a guidance for underlying :class:`bg.multicolor.Multicolor` objects to be split
:type guidance: iterable where each entry is iterable with colors entries
:return: ``None``, performs inplace changes
"""
self.__split_all_edges_between_two_vertices(vertex1=vertex1, vertex2=vertex2, guidance=guidance,
sorted_guidance=sorted_guidance,
account_for_colors_multiplicity_in_guidance=account_for_colors_multiplicity_in_guidance)
def split_all_edges(self, guidance=None, sorted_guidance=False, account_for_colors_multiplicity_in_guidance=True):
""" Splits all edge in current :class:`BreakpointGraph` instance with respect to the provided guidance.
Iterate over all possible distinct pairs of vertices in current :class:`BreakpointGraph` instance and splits all edges between such pairs with respect to provided guidance.
:param guidance: a guidance for underlying :class:`bg.multicolor.Multicolor` objects to be split
:type guidance: iterable where each entry is iterable with colors entries
:return: ``None``, performs inplace changes
"""
vertex_pairs = [(edge.vertex1, edge.vertex2) for edge in self.edges()]
for v1, v2 in vertex_pairs:
self.__split_all_edges_between_two_vertices(vertex1=v1, vertex2=v2, guidance=guidance,
sorted_guidance=sorted_guidance,
account_for_colors_multiplicity_in_guidance=account_for_colors_multiplicity_in_guidance)
def __delete_all_bgedges_between_two_vertices(self, vertex1, vertex2):
""" Deletes all edges between two supplied vertices
:param vertex1: a first out of two vertices edges between which are to be deleted
:type vertex1: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param vertex2: a second out of two vertices edges between which are to be deleted
:type vertex2: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:return: ``None``, performs inplace changes
"""
edges_to_be_deleted_with_keys = [(key, data) for v1, v2, key, data in self.bg.edges(nbunch=vertex1, keys=True,
data=True) if v2 == vertex2]
for key, data in edges_to_be_deleted_with_keys:
self.__delete_bgedge(BGEdge(vertex1=vertex1, vertex2=vertex2, multicolor=data["attr_dict"]["multicolor"]),
key=key)
def delete_all_edges_between_two_vertices(self, vertex1, vertex2):
""" Deletes all edges between two supplied vertices
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__delete_all_bgedges_between_two_vertices` method.
:param vertex1: a first out of two vertices edges between which are to be deleted
:type vertex1: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param vertex2: a second out of two vertices edges between which are to be deleted
:type vertex2: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:return: ``None``, performs inplace changes
"""
self.__delete_all_bgedges_between_two_vertices(vertex1=vertex1, vertex2=vertex2)
def __merge_all_bgedges_between_two_vertices(self, vertex1, vertex2):
""" Merges all edge between two supplied vertices into a single edge from a perspective of multi-color merging.
:param vertex1: a first out of two vertices edges between which are to be merged together
:type vertex1: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param vertex2: a second out of two vertices edges between which are to be merged together
:type vertex2: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:return: ``None``, performs inplace changes
"""
############################################################################################################
#
# no actual merging is performed, but rather all edges between two vertices are deleted
# and then added with a merge argument set to true
#
############################################################################################################
edges_multicolors = [deepcopy(data["attr_dict"]["multicolor"]) for v1, v2, data in
self.bg.edges(nbunch=vertex1, data=True) if v2 == vertex2]
self.__delete_all_bgedges_between_two_vertices(vertex1=vertex1, vertex2=vertex2)
for multicolor in edges_multicolors:
self.__add_bgedge(BGEdge(vertex1=vertex1, vertex2=vertex2, multicolor=multicolor), merge=True)
def merge_all_edges_between_two_vertices(self, vertex1, vertex2):
""" Merges all edge between two supplied vertices into a single edge from a perspective of multi-color merging.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__merge_all_bgedges_between_two_vertices`
:param vertex1: a first out of two vertices edges between which are to be merged together
:type vertex1: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param vertex2: a second out of two vertices edges between which are to be merged together
:type vertex2: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:return: ``None``, performs inplace changes
"""
self.__merge_all_bgedges_between_two_vertices(vertex1=vertex1, vertex2=vertex2)
def merge_all_edges(self):
""" Merges all edges in a current :class`BreakpointGraph` instance between same pairs of vertices into a single edge from a perspective of multi-color merging.
Iterates over all possible pairs of vertices in current :class:`BreakpointGraph` and merges all edges between respective pairs.
:return: ``None``, performs inplace changes
"""
pairs_of_vetices = [(edge.vertex1, edge.vertex2) for edge in self.edges()]
for v1, v2 in pairs_of_vetices:
############################################################################################################
#
# we iterate over all pairs of vertices in the given graph and merge edges between them
#
############################################################################################################
self.__merge_all_bgedges_between_two_vertices(vertex1=v1, vertex2=v2)
@classmethod
def merge(cls, breakpoint_graph1, breakpoint_graph2, merge_edges=False):
""" Merges two given instances of :class`BreakpointGraph` into a new one, that gather all available information from both supplied objects.
Depending of a ``merge_edges`` flag, while merging of two dat structures is occurring, edges between similar vertices can be merged during the creation of a result :class`BreakpointGraph` obejct.
Accounts for subclassing.
:param breakpoint_graph1: a first out of two :class`BreakpointGraph` instances to gather information from
:type breakpoint_graph1: :class`BreakpointGraph`
:param breakpoint_graph2: a second out of two :class`BreakpointGraph` instances to gather information from
:type breakpoint_graph2: :class`BreakpointGraph`
:param merge_edges: flag to indicate if edges in a new merged :class`BreakpointGraph` object has to be merged between same vertices, or if splitting from supplied graphs shall be preserved.
:type merge_edges: ``Boolean``
:return: a new breakpoint graph object that contains all information gathered from both supplied breakpoint graphs
:rtype: :class`BreakpointGraph`
"""
result = cls()
for bgedge in breakpoint_graph1.edges():
result.__add_bgedge(bgedge=bgedge, merge=merge_edges)
for bgedge in breakpoint_graph2.edges():
result.__add_bgedge(bgedge=bgedge, merge=merge_edges)
return result
def __update(self, breakpoint_graph, merge_edges=False):
""" Updates a current :class`BreakpointGraph` object with information from a supplied :class`BreakpointGraph` instance.
Depending of a ``merge_edges`` flag, while updating of a current :class`BreakpointGraph` object is occuring, edges between similar vertices can be merged to already existing ones.
:param breakpoint_graph: a breakpoint graph to extract information from, which will be then added to the current
:type breakpoint_graph: :class`BreakpointGraph`
:param merge_edges: flag to indicate if edges to be added to current :class`BreakpointGraph` object are to be merged to already existing ones
:type merge_edges: ``Boolean``
:return: ``None``, performs inplace changes
"""
for bgedge in breakpoint_graph.edges():
self.__add_bgedge(bgedge=deepcopy(bgedge), merge=merge_edges)
def update(self, breakpoint_graph, merge_edges=False):
""" Updates a current :class`BreakpointGraph` object with information from a supplied :class`BreakpointGraph` instance.
Proxoes a call to :meth:`BreakpointGraph._BreakpointGraph__update` method.
:param breakpoint_graph: a breakpoint graph to extract information from, which will be then added to the current
:type breakpoint_graph: :class:`BreakpointGraph`
:param merge_edges: flag to indicate if edges to be added to current :class`BreakpointGraph` object are to be merged to already existing ones
:type merge_edges: ``Boolean``
:return: ``None``, performs inplace changes
"""
self.__update(breakpoint_graph=breakpoint_graph,
merge_edges=merge_edges)
def apply_kbreak(self, kbreak, merge=True):
""" Check validity of supplied k-break and then applies it to current :class:`BreakpointGraph`
Only :class:`bg.kbreak.KBreak` (or its heirs) instances are allowed as ``kbreak`` argument.
KBreak must correspond to the valid kbreak and, since some changes to its internals might have been done since its creation, a validity check in terms of starting/resulting edges is performed.
All vertices in supplied KBreak (except for paired infinity vertices) must be present in current :class:`BreakpointGraph`.
For all supplied pairs of vertices (except for paired infinity vertices), there must be edges between such pairs of vertices, at least one of which must contain a multicolor matching a multicolor of supplied kbreak.
Edges of specified in kbreak multicolor are deleted between supplied pairs of vertices in kbreak.start_edges (except for paired infinity vertices).
New edges of specified in kbreak multicolor are added between all pairs of vertices in kbreak.result_edges (except for paired infinity vertices).
If after the kbreak application there is an infinity vertex, that now has no edges incident to it, it is deleted form the current :class:`BreakpointGraph`.
:param kbreak: a k-break to be applied to current :class:`BreakpointGraph`
:type kbreak: `bg.kbreak.KBreak`
:param merge: a flag to indicate on how edges, that will be created by a k-break, will be added to current :class:`BreakpointGraph`
:type merge: ``Boolean``
:return: nothing, performs inplace changes
:rtype: ``None``
:raises: ``ValueError``, ``TypeError``
"""
############################################################################################################
#
# k-break must ba valid to be applied
#
############################################################################################################
vertices = {}
edge_data = {}
if not isinstance(kbreak, KBreak):
raise TypeError("Only KBreak and derivatives are allowed as kbreak argument")
if not KBreak.valid_kbreak_matchings(kbreak.start_edges, kbreak.result_edges):
raise ValueError("Supplied KBreak is not valid form perspective of starting/resulting sets of vertices")
for vertex1, vertex2 in kbreak.start_edges:
if vertex1.is_infinity_vertex and vertex2.is_infinity_vertex:
############################################################################################################
#
# when we encounter a fully infinity edge (both vertices are infinity vertices)
# we shall not check if they are present in the current graph, because hat portion of a kbreak is artificial
#
############################################################################################################
continue
if vertex1 not in self.bg or vertex2 not in self.bg:
raise ValueError("Supplied KBreak targets vertices (`{v1}` and `{v2}`) at least one of which "
"does not exist in current BreakpointGraph"
"".format(v1=vertex1.name, v2=vertex2.name))
for vertex1, vertex2 in kbreak.start_edges:
if vertex1.is_infinity_vertex and vertex2.is_infinity_vertex:
continue
for bgedge in self.__edges_between_two_vertices(vertex1=vertex1, vertex2=vertex2):
############################################################################################################
#
# at least one edge between supplied pair of vertices must contain a multicolor that is specified for the kbreak
#
############################################################################################################
if kbreak.multicolor <= bgedge.multicolor:
break
else:
raise ValueError("Some targeted by kbreak edge with specified multicolor does not exists")
for vertex1, vertex2 in kbreak.start_edges:
if vertex1.is_infinity_vertex and vertex2.is_infinity_vertex:
continue
v1 = self.__get_vertex_by_name(vertex_name=vertex1.name)
vertices[v1] = v1
v2 = self.__get_vertex_by_name(vertex_name=vertex2.name)
vertices[v2] = v2
bgedge = BGEdge(vertex1=v1, vertex2=v2, multicolor=kbreak.multicolor)
candidate_data, candidate_id, candidate_score = self.__determine_most_suitable_edge_for_deletion(
bgedge=bgedge)
data = candidate_data["attr_dict"]["data"]
edge_data[v1] = data
edge_data[v2] = data
self.__delete_bgedge(bgedge=bgedge, keep_vertices=True)
for vertex_set in kbreak.start_edges:
for vertex in vertex_set:
if vertex.is_infinity_vertex and vertex in self.bg:
############################################################################################################
#
# after the first portion of a kbreak is performed one must make sure we don't leave any infinity vertices
# that have edges going to them, as infinity vertex is a special artificial vertex
# and it has meaning only if there are edges going to / from it
#
############################################################################################################
if len(list(self.get_edges_by_vertex(vertex=vertex))) == 0:
self.bg.remove_node(vertex)
for vertex1, vertex2 in kbreak.result_edges:
if vertex1.is_infinity_vertex and vertex2.is_infinity_vertex:
############################################################################################################
#
# if we encounter a pair of infinity vertices in result edges set, we shall not add them
# as at least a part of kbreak corresponded to fusion
# and those infinity edges on their own won't have any meaning
#
############################################################################################################
continue
origin = kbreak.data.get("origin", None)
v1 = vertices.get(vertex1, vertex1)
v2 = vertices.get(vertex2, vertex2)
bg_edge = BGEdge(vertex1=v1, vertex2=v2, multicolor=kbreak.multicolor)
if "origin" in bg_edge.data:
bg_edge.data["origin"] = origin
if kbreak.is_a_fusion:
edge1_data = edge_data[v1]
edge2_data = edge_data[v2]
merged_edge_fragment_data = merge_fragment_edge_data(edge1_data["fragment"], edge2_data["fragment"])
result_edge_data = {}
recursive_dict_update(result_edge_data, edge1_data)
recursive_dict_update(result_edge_data, edge2_data)
recursive_dict_update(result_edge_data, {"fragment": merged_edge_fragment_data})
recursive_dict_update(bg_edge.data, result_edge_data)
self.__add_bgedge(bg_edge, merge=merge)
def to_json(self, schema_info=True):
""" JSON serialization method that account for all information-wise important part of breakpoint graph
"""
genomes = set()
result = {}
result["edges"] = []
for bgedge in self.edges():
genomes |= bgedge.multicolor.colors
result["edges"].append(bgedge.to_json(schema_info=schema_info))
result["vertices"] = [bgvertex.to_json(schema_info=schema_info) for bgvertex in self.nodes()]
result["genomes"] = [bggenome.to_json(schema_info=schema_info) for bggenome in genomes]
return result
@classmethod
def from_json(cls, data, genomes_data=None, genomes_deserialization_required=True, merge=False):
""" A JSON deserialization operation, that recovers a breakpoint graph from its JSON representation
as information about genomes, that are encoded in breakpoint graph might be available somewhere else, but not the
json object, there is an option to provide it and omit encoding information about genomes.
"""
result = cls()
merge = merge
vertices_dict = {}
genomes_dict = genomes_data if genomes_data is not None and not genomes_deserialization_required else None
if genomes_dict is None:
############################################################################################################
#
# if we need to recover genomes information from breakpoint graph json object
# we are happy to do that
#
############################################################################################################
genomes_dict = {}
try:
source = genomes_data if genomes_data is not None and genomes_deserialization_required else data[
"genomes"]
except KeyError as exc:
raise ValueError("Error during breakpoint graph deserialization. No \"genomes\" information found")
for g_dict in source:
############################################################################################################
#
# if explicitly specified in genome json object, it can be decoded using provided schema name,
# of course a decoding breakpoint graph object shall be aware of such scheme
# (it has to be specified in the `genomes_json_schemas` class wide dict)
#
############################################################################################################
schema_name = g_dict.get(BGGenome_JSON_SCHEMA_JSON_KEY, None)
schema_class = None if schema_name is None else cls.genomes_json_schemas.get(schema_name, None)
genomes_dict[g_dict["g_id"]] = BGGenome.from_json(data=g_dict, json_schema_class=schema_class)
if "vertices" not in data:
############################################################################################################
#
# breakpoint graph can not be decoded without having information about vertices explicitly
# as vertices are referenced in edges object, rather than explicitly provided
#
############################################################################################################
raise ValueError(
"Error during breakpoint graph deserialization. \"vertices\" key is not present in json object")
for vertex_dict in data["vertices"]:
############################################################################################################
#
# if explicitly specified in vertex json object, it can be decoded using provided schema name,
# of course a decoding breakpoint graph object shall be aware of such scheme
# (it has to be specified in the `vertices_json_schemas` class wide dict)
#
############################################################################################################
schema_name = vertex_dict.get(BGVertex_JSON_SCHEMA_JSON_KEY, None)
schema_class = None if schema_name is None else cls.vertices_json_schemas.get(schema_name, None)
try:
############################################################################################################
#
# we try to recover a specific vertex class based on its name.
# it does not overwrite the schema based behaviour
# but provides a correct default schema for a specific vertex type
#
############################################################################################################
vertex_class = BGVertex.get_vertex_class_from_vertex_name(vertex_dict["name"])
except KeyError:
vertex_class = BGVertex
vertices_dict[vertex_dict["v_id"]] = vertex_class.from_json(data=vertex_dict,
json_schema_class=schema_class)
for edge_dict in data["edges"]:
############################################################################################################
#
# if explicitly specified in edge json object, it can be decoded using provided schema name,
# of course a decoding breakpoint graph object shall be aware of such scheme
# (it has to be specified in the `edges_json_schemas` class wide dict)
#
############################################################################################################
schema_name = edge_dict.get(BGEdge_JSON_SCHEMA_JSON_KEY, None)
schema = None if schema_name is None else cls.edges_json_schemas.get(schema_name, None)
edge = BGEdge.from_json(data=edge_dict, json_schema_class=schema)
try:
edge.vertex1 = vertices_dict[edge.vertex1]
edge.vertex2 = vertices_dict[edge.vertex2]
except KeyError:
############################################################################################################
#
# as edge references a pair of vertices, we must be sure respective vertices were decoded
#
############################################################################################################
raise ValueError(
"Error during breakpoint graph deserialization. Deserialized edge references non-present vertex")
if len(edge.multicolor) == 0:
############################################################################################################
#
# edges with empty multicolor are not permitted in breakpoint graphs
#
############################################################################################################
raise ValueError(
"Error during breakpoint graph deserialization. Empty multicolor for deserialized edge")
try:
edge.multicolor = Multicolor(*[genomes_dict[g_id] for g_id in edge.multicolor])
except KeyError:
raise ValueError(
"Error during breakpoint graph deserialization. Deserialized edge reference non-present "
"genome in its multicolor")
result.__add_bgedge(edge, merge=merge)
return result
def get_overall_set_of_colors(self):
if "overall_set_of_colors" not in self.cache_valid or not self.cache_valid["overall_set_of_colors"]:
self.cache["overall_set_of_colors"] = {color for bg_edge in self.edges() for color in
bg_edge.multicolor.colors}
self.cache_valid["overall_set_of_colors"] = True
return self.cache["overall_set_of_colors"]
def get_genome_graph(self, color):
result = BreakpointGraph()
mc = Multicolor(color)
for edge in self.edges():
if mc <= edge.multicolor:
result.__add_bgedge(bgedge=BGEdge(vertex1=edge.vertex1, vertex2=edge.vertex2,
multicolor=mc, data=edge.data))
return result
def get_blocks_order(self):
genome = self.get_overall_set_of_colors().pop()
result = {genome: []}
visited_vertices = set()
for vertex in self.nodes():
if vertex in visited_vertices:
continue
visited_vertices.add(vertex)
chr_type_f, fragment_part_forward = self._traverse_blocks_forward_from_vertex(vertex=vertex,
visited_vertices=visited_vertices)
chr_type_r, fragment_part_reverse = self._traverse_blocks_reverse_from_vertex(vertex=vertex,
visited_vertices=visited_vertices)
if chr_type_f != chr_type_r:
raise Exception("During the gene order sequence traversal we got a conflicted situation. "
"Most probably case for this to happen is to have a genome with non-unique gene content")
if chr_type_f == "$":
fragment = fragment_part_reverse + fragment_part_forward
else:
fragment = fragment_part_forward if len(fragment_part_forward) > len(
fragment_part_reverse) else fragment_part_reverse
result[genome].append((chr_type_f, fragment))
return result
def _traverse_blocks_from_vertex(self, vertex, visited_vertices, direction):
result = []
current_vertex = vertex
visited_vertices.add(current_vertex)
if current_vertex.is_irregular_vertex:
edge = list(self.get_edges_by_vertex(vertex=current_vertex))[0]
current_vertex = edge.vertex1 if edge.vertex1 != current_vertex else edge.vertex2
visited_vertices.add(current_vertex)
if current_vertex.is_tail_vertex and direction == "forward" or current_vertex.is_head_vertex and direction == "reverse":
result.append(("+", current_vertex.block_name))
current_vertex = current_vertex.mate_vertex
visited_vertices.add(current_vertex)
edge = list(self.get_edges_by_vertex(vertex=current_vertex))[0]
current_vertex = edge.vertex1 if edge.vertex1 != current_vertex else edge.vertex2
while current_vertex not in visited_vertices and current_vertex.is_regular_vertex:
visited_vertices.add(current_vertex)
if direction == "forward":
sign = "+" if current_vertex.is_tail_vertex else "-"
elif direction == "reverse":
sign = "-" if current_vertex.is_tail_vertex else "+"
else:
sign = "*"
result.append((sign, current_vertex.block_name))
current_vertex = current_vertex.mate_vertex
visited_vertices.add(current_vertex)
edge = list(self.get_edges_by_vertex(vertex=current_vertex))[0]
current_vertex = edge.vertex1 if edge.vertex1 != current_vertex else edge.vertex2
visited_vertices.add(current_vertex)
if current_vertex.is_irregular_vertex:
chr_type = "$"
else:
chr_type = "@"
if direction == "reverse":
result = result[::-1]
return chr_type, result
def _traverse_blocks_forward_from_vertex(self, vertex, visited_vertices):
return self._traverse_blocks_from_vertex(vertex=vertex, visited_vertices=visited_vertices, direction="forward")
def _traverse_blocks_reverse_from_vertex(self, vertex, visited_vertices):
return self._traverse_blocks_from_vertex(vertex=vertex, visited_vertices=visited_vertices, direction="reverse")
def _traverse_fragments_forward_from_vertex(self, vertex, visited_vertices):
return self._traverse_fragments_from_vertex(vertex=vertex, visited_vertices=visited_vertices,
direction="forward")
def _traverse_fragments_reverse_from_vertex(self, vertex, visited_vertices):
return self._traverse_fragments_from_vertex(vertex=vertex, visited_vertices=visited_vertices,
direction="reverse")
def has_edge(self, vertex1, vertex2):
return self.bg.has_edge(u=vertex1, v=vertex2)
def get_condensed_edge(self, vertex1, vertex2):
if not self.has_edge(vertex1=vertex1, vertex2=vertex2):
return None
result = BGEdge(vertex1=vertex1, vertex2=vertex2, multicolor=Multicolor())
for edge in self.__edges_between_two_vertices(vertex1=vertex1, vertex2=vertex2):
result.multicolor += edge.multicolor
return result
def get_fragments_orders(self):
genome = self.get_overall_set_of_colors().pop()
result = {genome: []}
visited_vertices = set()
ivs = (v for v in self.nodes() if v.is_irregular_vertex)
rvs = (v for v in self.nodes() if v.is_regular_vertex)
for vertex in itertools.chain(ivs, rvs):
if vertex in visited_vertices:
continue
chr_type_f, fragments_order_part_forward = self._traverse_fragments_forward_from_vertex(vertex=vertex,
visited_vertices=visited_vertices)
chr_type_r, fragments_order_part_reverse = self._traverse_fragments_reverse_from_vertex(vertex=vertex,
visited_vertices=visited_vertices)
if chr_type_f != chr_type_r:
raise Exception("During the fragment order sequence traversal we got a conflicted situation. "
"Most probably case for this to happen is to have a genome with non-unique gene content")
if chr_type_f == "$":
if len(fragments_order_part_forward) == 0:
fragment = fragments_order_part_reverse
elif len(fragments_order_part_reverse) == 0:
fragment = fragments_order_part_forward
else:
coincide = fragments_order_part_reverse[-1][0] == fragments_order_part_forward[0][0]
coincide &= fragments_order_part_reverse[-1][1] == fragments_order_part_forward[0][1]
if coincide:
fragment = fragments_order_part_reverse[:-1] + fragments_order_part_forward
else:
fragment = fragments_order_part_reverse + fragments_order_part_forward
else:
fragment = fragments_order_part_forward if len(fragments_order_part_forward) > len(
fragments_order_part_reverse) else fragments_order_part_reverse
if len(fragment) > 1 and fragment[-1][0] == fragment[0][0] and fragment[-1][1] == fragment[0][1]:
fragment = fragment[:-1]
result[genome].append((chr_type_f, fragment))
return result
def _traverse_fragments_from_vertex(self, vertex, visited_vertices, direction):
result = []
current_vertex = vertex
current_fragment_name = None
current_fragment_orientation = None
if current_vertex.is_tail_vertex and direction == "forward" or current_vertex.is_head_vertex and direction == "reverse":
current_vertex = current_vertex.mate_vertex
elif not (current_vertex.is_irregular_vertex and current_vertex in visited_vertices):
visited_vertices.add(current_vertex)
edge = list(self.get_edges_by_vertex(vertex=current_vertex))[0]
fragment_names = get_from_dict_with_path(source_dict=edge.data, key="name", path=["fragment"])
if not isinstance(fragment_names, list):
fragment_names = [fragment_names]
fragment_orientations = self._get_fragment_to_edge_orientation(current_vertex=current_vertex, edge=edge)
fragment_orientations = self.update_orientation_with_direction(orientation=fragment_orientations,
direction=direction)
for name, orientation in zip(fragment_names, fragment_orientations):
new_encounter = current_fragment_name != name or current_fragment_orientation != name
if name not in [None, ""] and orientation not in [None, ""] and new_encounter:
current_fragment_name = name
current_fragment_orientation = orientation
result.append((current_fragment_orientation, current_fragment_name))
current_vertex = edge.vertex1 if edge.vertex1 != current_vertex else edge.vertex2
visited_vertices.add(current_vertex)
if not current_vertex.is_irregular_vertex:
current_vertex = current_vertex.mate_vertex
while current_vertex not in visited_vertices and not current_vertex.is_irregular_vertex:
visited_vertices.add(current_vertex)
edge = list(self.get_edges_by_vertex(vertex=current_vertex))[0]
fragment_names = get_from_dict_with_path(source_dict=edge.data, key="name", path=["fragment"])
if not isinstance(fragment_names, list):
fragment_names = [fragment_names]
fragment_orientations = self._get_fragment_to_edge_orientation(current_vertex=current_vertex, edge=edge)
fragment_orientations = self.update_orientation_with_direction(orientation=fragment_orientations,
direction=direction)
if current_fragment_name == fragment_names[-1]:
fragment_names = fragment_names[::-1]
fragment_orientations = fragment_orientations[::-1]
for name, orientation in zip(fragment_names, fragment_orientations):
initial_state = current_fragment_name is None or current_fragment_orientation is None
new_encounter = current_fragment_name != name or current_fragment_orientation != orientation
new_encounter &= name not in [None, ""] and orientation not in [None, ""]
if initial_state or new_encounter:
current_fragment_name = name
current_fragment_orientation = orientation
if current_fragment_name not in [None, ""] and current_fragment_orientation not in [None, ""]:
result.append((current_fragment_orientation, current_fragment_name))
current_vertex = edge.vertex1 if edge.vertex1 != current_vertex else edge.vertex2
if current_vertex.is_irregular_vertex:
break
visited_vertices.add(current_vertex)
current_vertex = current_vertex.mate_vertex
visited_vertices.add(current_vertex)
if current_vertex.is_irregular_vertex:
chr_type = "$"
else:
chr_type = "@"
if direction == "reverse":
result = result[::-1]
return chr_type, result
@staticmethod
def _get_fragment_to_edge_orientation(current_vertex, edge):
v1, v2 = (edge.vertex1, edge.vertex2) if edge.vertex1 == current_vertex else (edge.vertex2, edge.vertex1)
forward_orientation = get_from_dict_with_path(source_dict=edge.data, key="forward_orientation",
path=["fragment"])
if isinstance(forward_orientation, list):
return ["+" if BreakpointGraph._forward_orientation(v1, v2, orientation) else "-" for orientation in
forward_orientation]
else:
return ["+" if BreakpointGraph._forward_orientation(v1, v2, forward_orientation) else "-"]
@staticmethod
def _forward_orientation(v1, v2, forward_orientation):
if forward_orientation is None:
return True
left_v = v1 not in forward_orientation or forward_orientation[0] == v1
right_v = v2 not in forward_orientation or forward_orientation[1] == v2
return left_v and right_v
@staticmethod
def update_orientation_with_direction(orientation, direction):
result = []
for entry in orientation:
if direction == "forward":
result.append(entry)
else:
result.append("-" if entry == "+" else "+")
return result
class BGConnectedComponentFilter(object):
def __init__(self):
self.name = None
def accept_connected_component(self, cc, breakpoint_graph=None):
return True
class CompleteMultiEdgeConnectedComponentFilter(BGConnectedComponentFilter):
def __init__(self):
super(CompleteMultiEdgeConnectedComponentFilter, self).__init__()
self.name = "Complete ME filter"
def accept_connected_component(self, cc, breakpoint_graph=None):
if len(list(cc.nodes())) != 2:
return True
genomes_cnt = len(breakpoint_graph.get_overall_set_of_colors())
edges_genomes_cnt = len({color for edge in cc.edges() for color in edge.multicolor.colors})
return genomes_cnt != edges_genomes_cnt
class TwoNodeConnectedComponentFilter(BGConnectedComponentFilter):
def __init__(self):
super(TwoNodeConnectedComponentFilter, self).__init__()
self.name = "Two node filter"
def accept_connected_component(self, cc, breakpoint_graph=None):
return len(list(cc.nodes())) != 2
|
{"hexsha": "846bcd8a094780bc3ab9d22a4268afca9f21f2a2", "size": 78102, "ext": "py", "lang": "Python", "max_stars_repo_path": "bg/breakpoint_graph.py", "max_stars_repo_name": "sergey-aganezov-jr/bg", "max_stars_repo_head_hexsha": "1ec758193441e49e7b34e0da09571480f4c24455", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-01-29T14:26:18.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-08T05:37:15.000Z", "max_issues_repo_path": "bg/breakpoint_graph.py", "max_issues_repo_name": "sergey-aganezov-jr/bg", "max_issues_repo_head_hexsha": "1ec758193441e49e7b34e0da09571480f4c24455", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2015-12-25T17:36:50.000Z", "max_issues_repo_issues_event_max_datetime": "2017-11-23T17:06:09.000Z", "max_forks_repo_path": "bg/breakpoint_graph.py", "max_forks_repo_name": "sergey-aganezov-jr/bg", "max_forks_repo_head_hexsha": "1ec758193441e49e7b34e0da09571480f4c24455", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 64.0705496308, "max_line_length": 340, "alphanum_fraction": 0.6267445136, "include": true, "reason": "import networkx,from networkx", "num_tokens": 15544}
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from utils import zscore_normalize
import boss_utils
np.random.seed(0)
def encode_dna(s):
if s=='A':
return 0
if s=='C':
return 1
if s=='G':
return 2
if s=='T':
return 3
def encode_data(S):
# S is an N-list of L-strings, L=8, N=65536
S1 = [list(s) for s in S] # N-list of L-lists
S2 = np.array(S1) # (N,L) array of strings, N=A**L
X = np.vectorize(encode_dna)(S2) # (N,L) array of ints (in 0..A)
return X
def decode_dna(x):
alpha = ['A', 'C', 'G', 'T']
return alpha[x]
def decode_data(X):
S = np.vectorize(decode_dna)(X)
return S
def get_8mer_data():
file_name = '/home/kpmurphy/github/pyprobml/data/8mers_crx_ref_r1.csv'
data = pd.read_csv(file_name, sep='\t')
S = data['seq'].values
y = data['val'].values
X = encode_data(S)
y = zscore_normalize(y)
return X, y
Xall, yall = get_8mer_data()
nseq, seq_len = np.shape(Xall)
alpha_size = 4
def oracle(x):
ndx = np.where((Xall==x).all(axis=1))[0][0]
return yall[ndx]
def oracle_batch(X):
return np.apply_along_axis(oracle, 1, X)
plt.figure()
plt.plot(yall)
# Extract training set based on "medium performing" strings
# These could be unlabeled (if we use an RNN feature extractor)
bins = pd.qcut(yall, 100, labels=False, duplicates='drop')
middle_bins = np.where(np.logical_and(bins>=25, bins<=75))
Xtrain = Xall[middle_bins]
ytrain = yall[middle_bins]
ntrain = np.shape(Xtrain)[0]
print("Training set has {} examples from {}".format(ntrain, nseq))
# Pick a small labeled subset for training the GP
perm = np.random.permutation(ntrain)
ninit = 10
perm = perm[:ninit]
Xinit = Xtrain[perm]
yinit = ytrain[perm]
predictor = boss_utils.learn_supervised_model(Xtrain, ytrain)
ypred = predictor.predict(Xall)
plt.figure()
plt.scatter(yall, ypred)
plt.xlabel('True Values')
plt.ylabel('Predictions')
plt.show()
embedder = boss_utils.convert_to_embedder(predictor, seq_len)
Z = embedder.predict(Xtrain)
plt.figure()
plt.scatter(Z[:,0], Z[:,1], c=ytrain)
plt.title('embeddings of training set')
plt.colorbar()
plt.show()
#from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import pairwise_distances
#kernel_matrix = rbf_kernel(Z, gamma=1)
#dist_matrix = pairwise_distances(Z)
#nearest = np.argsort(dist__matrix, axis=1)
sources = np.arange(4)
dist_matrix = pairwise_distances(Z[sources], Z)
nearest = np.argsort(dist_matrix, axis=1)
knn = 100
fig, ax = plt.subplots(2,2)
for i, source in enumerate(sources):
ysource = oracle(Xall[source])
nbrs = nearest[source, 0:knn];
dst = dist_matrix[source, nbrs];
ytargets = oracle_batch(Xall[nbrs])
#plt.figure()
r = i // 2
c = i % 2
ax[r,c].plot(dst, ytargets-ysource, 'o')
ax[r,c].set_title('source {}'.format(source))
plt.show()
def embed_fn(x):
return embedder.predict(x)
n_iter=10
methods = []
methods.append('enum')
methods.append('bayes')
for method in methods:
np.random.seed(0)
ytrace = boss_utils.boss_maximize(method, oracle, Xinit, yinit, embed_fn, n_iter=n_iter)
plt.figure()
plt.plot(ytrace)
plt.title(method)
|
{"hexsha": "195d146eb13e02046c17a38961e43e357948bc83", "size": 3133, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/Old/boss_8mer.py", "max_stars_repo_name": "always-newbie161/pyprobml", "max_stars_repo_head_hexsha": "eb70c84f9618d68235ef9ba7da147c009b2e4a80", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-02-26T04:36:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-26T04:36:24.000Z", "max_issues_repo_path": "scripts/Old/boss_8mer.py", "max_issues_repo_name": "always-newbie161/pyprobml", "max_issues_repo_head_hexsha": "eb70c84f9618d68235ef9ba7da147c009b2e4a80", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-03-31T20:18:21.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:52:47.000Z", "max_forks_repo_path": "scripts/Old/boss_8mer.py", "max_forks_repo_name": "always-newbie161/pyprobml", "max_forks_repo_head_hexsha": "eb70c84f9618d68235ef9ba7da147c009b2e4a80", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-21T01:18:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-21T01:18:07.000Z", "avg_line_length": 23.7348484848, "max_line_length": 95, "alphanum_fraction": 0.6951803383, "include": true, "reason": "import numpy", "num_tokens": 965}
|
from scipy import stats
def test_scaling_exponent_estimation(desired_alpha, result, size=0.01):
"""
Test whether the desired alpha lies within some specified confidence
interval of the estimated scaling exponent.
"""
critical_value = stats.norm.ppf(size / 2) # this is negative!
alpha_hat, alpha_se = result.params['alpha'], result.standard_errors['alpha']
lower = alpha_hat + critical_value * alpha_se
upper = alpha_hat - critical_value * alpha_se
assert lower <= desired_alpha <= upper
|
{"hexsha": "f0819af56302cfb55289c9415960e23c451fcea9", "size": 528, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyreto/tests/utilities.py", "max_stars_repo_name": "davidrpugh/Pyreto", "max_stars_repo_head_hexsha": "7a9ca92ea1204c89f0dbffb08d3f08b344a8d7dd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2016-03-14T07:22:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-24T18:02:07.000Z", "max_issues_repo_path": "pyreto/tests/utilities.py", "max_issues_repo_name": "davidrpugh/Pyreto", "max_issues_repo_head_hexsha": "7a9ca92ea1204c89f0dbffb08d3f08b344a8d7dd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyreto/tests/utilities.py", "max_forks_repo_name": "davidrpugh/Pyreto", "max_forks_repo_head_hexsha": "7a9ca92ea1204c89f0dbffb08d3f08b344a8d7dd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-10-15T19:31:32.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-19T01:21:34.000Z", "avg_line_length": 35.2, "max_line_length": 81, "alphanum_fraction": 0.7253787879, "include": true, "reason": "from scipy", "num_tokens": 118}
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os, sys, shutil
import os.path as osp
import cv2
from collections import OrderedDict
import mocap_utils.general_utils as gnu
import numpy as np
import json
import subprocess as sp
def setup_render_out(out_dir):
if out_dir is not None:
gnu.build_dir(out_dir)
outputFileName = 'scene_%08d.jpg' # Hardcoded in glViewer.py
overlaidImageFolder= osp.join(out_dir, 'overlaid')
gnu.build_dir(overlaidImageFolder)
sideImageFolder= osp.join(out_dir, 'side')
gnu.build_dir(sideImageFolder)
mergedImageFolder= osp.join(out_dir, 'merged')
gnu.build_dir(mergedImageFolder)
res_subdirs = \
[outputFileName, overlaidImageFolder, sideImageFolder, mergedImageFolder]
return res_subdirs
else:
return None
def __get_input_type(args):
input_type =None
image_exts = ('jpg', 'png', 'jpeg', 'bmp')
video_exts = ('mp4', 'avi', 'mov')
extension = osp.splitext(args.input_path)[1][1:]
if extension.lower() in video_exts:
input_type ='video'
elif osp.isdir(args.input_path):
file_list = os.listdir(args.input_path)
assert len(file_list) >0, f"{args.input_path} is a blank folder"
extension = osp.splitext(file_list[0])[1][1:]
if extension == 'json':
input_type ='bbox_dir'
else:
assert extension.lower() in image_exts
input_type ='image_dir'
elif args.input_path =='webcam':
input_type ='webcam'
else:
assert False, "Unknown input path. It should be an image," + \
"or an image folder, or a video file, or \'webcam\' "
return input_type
def __video_setup(args):
video_path = args.input_path
video_dir, video_name, video_basename, ext = gnu.analyze_path(video_path)
args.seq_name = video_basename
if args.save_frame:
frame_dir = osp.join(args.out_dir, "frames")
gnu.build_dir(frame_dir)
render_out_dir = osp.join(args.out_dir, "rendered")
gnu.build_dir(render_out_dir)
mocap_out_dir = osp.join(args.out_dir, "mocap")
gnu.build_dir(mocap_out_dir)
def __img_seq_setup(args):
seq_dir_path = args.input_path
args.seq_name = os.path.basename(args.input_path)
render_out_dir = osp.join(args.out_dir, 'rendered')
gnu.build_dir(render_out_dir)
mocap_out_dir = osp.join(args.out_dir, "mocap")
gnu.build_dir(mocap_out_dir)
def setup_input(args):
"""
Input type can be
an image file
a video file
a folder with image files
a folder with bbox (json) files
"webcam"
"""
image_exts = ('jpg', 'png', 'jpeg', 'bmp')
video_exts = ('mp4', 'avi', 'mov')
# get type of input
input_type = __get_input_type(args)
if input_type =='video':
cap = cv2.VideoCapture(args.input_path)
assert cap.isOpened(), f"Failed in opening video: {args.input_path}"
__video_setup(args)
return input_type, cap
elif input_type =='webcam':
cap = cv2.VideoCapture(0) #webcam input
return input_type, cap
elif input_type =='image_dir':
image_list = gnu.get_all_files(args.input_path, image_exts, "relative")
image_list = [ osp.join(args.input_path, image_name) for image_name in image_list ]
__img_seq_setup(args)
return input_type, image_list
elif input_type =='bbox_dir':
__img_seq_setup(args)
json_files = gnu.get_all_files(args.input_path, '.json', "relative")
input_data = list()
for json_file in json_files:
json_path = osp.join(args.input_path, json_file)
image_path, body_bbox_list, hand_bbox_list = load_info_from_json(json_path)
input_data.append(dict(
image_path = image_path,
hand_bbox_list = hand_bbox_list,
body_bbox_list = body_bbox_list
))
return input_type, input_data
else:
assert False, "Unknown input type"
def extract_mesh_from_output(pred_output_list):
pred_mesh_list = list()
for pred_output in pred_output_list:
if pred_output is not None:
if 'left_hand' in pred_output: # hand mocap
for hand_type in pred_output:
if pred_output[hand_type] is not None:
vertices = pred_output[hand_type]['pred_vertices_img']
faces = pred_output[hand_type]['faces'].astype(np.int32)
pred_mesh_list.append(dict(
vertices = vertices,
faces = faces
))
else: # body mocap (includes frank/whole/total mocap)
vertices = pred_output['pred_vertices_img']
faces = pred_output['faces'].astype(np.int32)
pred_mesh_list.append(dict(
vertices = vertices,
faces = faces
))
return pred_mesh_list
def load_info_from_json(json_path):
data = gnu.load_json(json_path)
# image path
assert ('image_path' in data), "Path of input image should be specified"
image_path = data['image_path']
assert osp.exists(image_path), f"{image_path} does not exists"
# body bboxes
body_bbox_list = list()
if 'body_bbox_list' in data:
body_bbox_list = data['body_bbox_list']
assert isinstance(body_bbox_list, list)
for b_id, body_bbox in enumerate(body_bbox_list):
if isinstance(body_bbox, list) and len(body_bbox) == 4:
body_bbox_list[b_id] = np.array(body_bbox)
# hand bboxes
hand_bbox_list = list()
if 'hand_bbox_list' in data:
hand_bbox_list = data['hand_bbox_list']
assert isinstance(hand_bbox_list, list)
for hand_bbox in hand_bbox_list:
for hand_type in ['left_hand', 'right_hand']:
if hand_type in hand_bbox:
bbox = hand_bbox[hand_type]
if isinstance(bbox, list) and len(bbox) == 4:
hand_bbox[hand_type] = np.array(bbox)
else:
hand_bbox[hand_type] = None
return image_path, body_bbox_list, hand_bbox_list
def save_info_to_json(args, image_path, body_bbox_list, hand_bbox_list):
saved_data = dict()
# image_path
saved_data['image_path'] = image_path
# body_bbox_list
saved_body_bbox_list = list()
for body_bbox in body_bbox_list:
if body_bbox is not None:
saved_body_bbox_list.append(body_bbox.tolist())
saved_data['body_bbox_list'] = saved_body_bbox_list
# hand_bbox_list
saved_hand_bbox_list = list()
for hand_bbox in hand_bbox_list:
if hand_bbox is not None:
saved_hand_bbox = dict(
left_hand = None,
right_hand = None)
for hand_type in saved_hand_bbox:
bbox = hand_bbox[hand_type]
if bbox is not None:
saved_hand_bbox[hand_type] = bbox.tolist()
saved_hand_bbox_list.append(saved_hand_bbox)
saved_data['hand_bbox_list'] = saved_hand_bbox_list
# write data to json
img_name = osp.basename(image_path)
record = img_name.split('.')
json_name = f"{'.'.join(record[:-1])}_bbox.json"
json_path = osp.join(args.out_dir, 'bbox', json_name)
gnu.make_subdir(json_path)
gnu.save_json(json_path, saved_data)
print(f"Bbox saved: {json_path}")
def save_pred_to_pkl(
args, demo_type, image_path,
body_bbox_list, hand_bbox_list, pred_output_list):
smpl_type = 'smplx' if args.use_smplx else 'smpl'
assert demo_type in ['hand', 'body', 'frank']
if demo_type in ['hand', 'frank']:
assert smpl_type == 'smplx'
assert len(hand_bbox_list) == len(body_bbox_list)
assert len(body_bbox_list) == len(pred_output_list)
saved_data = dict()
# demo type / smpl type / image / bbox
saved_data = OrderedDict()
saved_data['demo_type'] = demo_type
saved_data['smpl_type'] = smpl_type
saved_data['image_path'] = osp.abspath(image_path)
saved_data['body_bbox_list'] = body_bbox_list
saved_data['hand_bbox_list'] = hand_bbox_list
saved_data['save_mesh'] = args.save_mesh
saved_data['pred_output_list'] = list()
num_subject = len(hand_bbox_list)
for s_id in range(num_subject):
# predict params
pred_output = pred_output_list[s_id]
if pred_output is None:
saved_pred_output = None
else:
saved_pred_output = dict()
if demo_type == 'hand':
for hand_type in ['left_hand', 'right_hand']:
pred_hand = pred_output[hand_type]
saved_pred_output[hand_type] = dict()
saved_data_hand = saved_pred_output[hand_type]
if pred_hand is None:
saved_data_hand = None
else:
for pred_key in pred_hand:
if pred_key.find("vertices")<0 or pred_key == 'faces' :
saved_data_hand[pred_key] = pred_hand[pred_key]
else:
if args.save_mesh:
if pred_key != 'faces':
saved_data_hand[pred_key] = \
pred_hand[pred_key].astype(np.float16)
else:
saved_data_hand[pred_key] = pred_hand[pred_key]
else:
for pred_key in pred_output:
if pred_key.find("vertices")<0 or pred_key == 'faces' :
saved_pred_output[pred_key] = pred_output[pred_key]
else:
if args.save_mesh:
if pred_key != 'faces':
saved_pred_output[pred_key] = \
pred_output[pred_key].astype(np.float16)
else:
saved_pred_output[pred_key] = pred_output[pred_key]
saved_data['pred_output_list'].append(saved_pred_output)
# write data to pkl
img_name = osp.basename(image_path)
record = img_name.split('.')
pkl_name = f"{'.'.join(record[:-1])}_prediction_result.pkl"
pkl_path = osp.join(args.out_dir, 'mocap', pkl_name)
gnu.make_subdir(pkl_path)
gnu.save_pkl(pkl_path, saved_data)
print(f"Prediction saved: {pkl_path}")
def save_res_img(out_dir, image_path, res_img):
out_dir = osp.join(out_dir, "rendered")
img_name = osp.basename(image_path)
img_name = img_name[:-4] + '.jpg' #Always save as jpg
res_img_path = osp.join(out_dir, img_name)
gnu.make_subdir(res_img_path)
cv2.imwrite(res_img_path, res_img)
print(f"Visualization saved: {res_img_path}")
def gen_video_out(out_dir, seq_name):
outVideo_fileName = osp.join(out_dir, seq_name+'.mp4')
print(f">> Generating video in {outVideo_fileName}")
in_dir = osp.abspath(osp.join(out_dir, "rendered"))
out_path = osp.abspath(osp.join(out_dir, seq_name+'.mp4'))
ffmpeg_cmd = f'ffmpeg -y -f image2 -framerate 25 -pattern_type glob -i "{in_dir}/*.jpg" -pix_fmt yuv420p -c:v libx264 -x264opts keyint=25:min-keyint=25:scenecut=-1 -vf "scale=trunc(iw/2)*2:trunc(ih/2)*2" {out_path}'
os.system(ffmpeg_cmd)
# print(ffmpeg_cmd.split())
# sp.run(ffmpeg_cmd.split())
# sp.Popen(ffmpeg_cmd.split(), stdout=sp.PIPE, stderr=sp.PIPE)
|
{"hexsha": "96a032bb87aae09809eff0dc3ea5659fbb19a5ff", "size": 11781, "ext": "py", "lang": "Python", "max_stars_repo_path": "mocap_utils/demo_utils.py", "max_stars_repo_name": "Zhora1337/umnik", "max_stars_repo_head_hexsha": "12a0b18d542c46625bd627d7b6a14958eb16c503", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mocap_utils/demo_utils.py", "max_issues_repo_name": "Zhora1337/umnik", "max_issues_repo_head_hexsha": "12a0b18d542c46625bd627d7b6a14958eb16c503", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mocap_utils/demo_utils.py", "max_forks_repo_name": "Zhora1337/umnik", "max_forks_repo_head_hexsha": "12a0b18d542c46625bd627d7b6a14958eb16c503", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.815625, "max_line_length": 220, "alphanum_fraction": 0.606824548, "include": true, "reason": "import numpy", "num_tokens": 2742}
|
import numpy as np
import time
#import rtlsdr
import kid_readout.equipment.rtlkid
import kid_readout.equipment.agilent_33220
import kid_readout.equipment.lockin_controller
lockin = kid_readout.equipment.lockin_controller.lockinController()
print lockin.get_idn()
fg = kid_readout.equipment.agilent_33220.FunctionGenerator()
on_bias = 0.4
off_bias = 1.5
pulse_bias = 0.0
pulse_period = 2e-3
pulse_width = 2e-3-2e-6
hittite_power_level = 10.0
fg.set_pulse(period=pulse_period,width=pulse_width,high_level=on_bias,low_level=pulse_bias)
fg.enable_output(True)
#f_ref = 871.380e6
#f_ref = 870.436e6
f_ref=991.825e6
rtl = kid_readout.equipment.rtlkid.RtlKidReadout()
rtl.rtl.gain = 40.0
rtl.rtl.sample_rate = 256e3
rtl.hittite.set_power(hittite_power_level)
rtl.hittite.on()
rtl.adjust_freq_correction()
error = rtl.measure_freq_error()
if abs(error/1e9) > 5e-6:
print "adjusting freq correction failed!"
atten_turns = eval(raw_input("Enter mmw attenuator turns as a tuple: "))
suffix='_pin_atten'
freq,data = rtl.do_scan(freqs=np.linspace(-8e5,3e5,500)+f_ref,level=hittite_power_level)
peak = freq[data.argmin()]#+1e3
print "peak at",peak
rtl.hittite.set_freq(peak)
rtl.rtl.center_freq = peak + 10e3
rtl.hittite.on()
time.sleep(2)
print "measuring pulses from on state"
d = rtl.rtl.read_samples(2**21)
start_time = time.time()
d = rtl.rtl.read_samples(2**21)
d = d[2048:]
print "measuring on state zbd voltage"
fg.set_pulse(period=pulse_period,width=pulse_period/2,high_level=off_bias,low_level=on_bias)
fg.enable_output(True)
time.sleep(2)
x,y,r,theta = lockin.get_data()
filename = '/home/data2/rtl/%s' % (time.strftime('%Y-%m-%d_%H-%M-%S'))
filename += suffix
np.savez(filename,data=d, time= time.time(), sample_rate=rtl.rtl.sample_rate, gain= rtl.rtl.gain,
center_freq = rtl.rtl.center_freq,sweep_freq = freq, sweep_mag = data, start_time = start_time,
hittite_power_level= hittite_power_level, mmw_atten_turns = atten_turns, pulse_period=pulse_period,
pulse_width=pulse_width,high_level=on_bias,low_level=pulse_bias,zbd_voltage=x)
print "saved on measurement in ", filename
print "measuring pulses from off state"
fg.set_pulse(period=pulse_period,width=pulse_width,high_level=off_bias,low_level=pulse_bias)
fg.enable_output(True)
time.sleep(2)
d = rtl.rtl.read_samples(2**21)
start_time = time.time()
d = rtl.rtl.read_samples(2**21)
d = d[2048:]
print "measuring pulse state zbd voltage"
fg.set_pulse(period=pulse_period,width=pulse_period/2,high_level=off_bias,low_level=pulse_bias)
fg.enable_output(True)
time.sleep(2)
x,y,r,theta = lockin.get_data()
filename = '/home/data2/rtl/%s' % (time.strftime('%Y-%m-%d_%H-%M-%S'))
filename += suffix
np.savez(filename,data=d, time= time.time(), sample_rate=rtl.rtl.sample_rate, gain= rtl.rtl.gain,
center_freq = rtl.rtl.center_freq,sweep_freq = freq, sweep_mag = data, start_time = start_time,
hittite_power_level= hittite_power_level, mmw_atten_turns = atten_turns, pulse_period=pulse_period,
pulse_width=pulse_width,high_level=off_bias,low_level=pulse_bias,zbd_voltage=x)
print "saved baseline measurement in ", filename
|
{"hexsha": "09cb7f5b07469b83b275c4ed98d1c0f5dbf1591d", "size": 3140, "ext": "py", "lang": "Python", "max_stars_repo_path": "apps/data_taking_scripts/old_scripts/rtl_time_constant_with_pin_atten.py", "max_stars_repo_name": "danielflanigan/kid_readout", "max_stars_repo_head_hexsha": "07202090d468669200cab78297122880c1c03e87", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "apps/data_taking_scripts/old_scripts/rtl_time_constant_with_pin_atten.py", "max_issues_repo_name": "danielflanigan/kid_readout", "max_issues_repo_head_hexsha": "07202090d468669200cab78297122880c1c03e87", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "apps/data_taking_scripts/old_scripts/rtl_time_constant_with_pin_atten.py", "max_forks_repo_name": "danielflanigan/kid_readout", "max_forks_repo_head_hexsha": "07202090d468669200cab78297122880c1c03e87", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1304347826, "max_line_length": 108, "alphanum_fraction": 0.7694267516, "include": true, "reason": "import numpy", "num_tokens": 939}
|
from typing import List, Dict, Iterable
import hypothesis
import numpy as np
from gl0learn import fit
from hypothesis.strategies import composite
def is_mosek_installed() -> bool:
try:
import mosek
except ModuleNotFoundError:
return False
else:
return True
def is_scipy_installed() -> bool:
try:
import scipy
except ModuleNotFoundError:
return False
else:
return True
def top_n_triu_indicies_by_abs_value(x, n):
"""
Parameters
----------
n: int
Number of indicies to return.
If n is greather than p*(p-1)//2, the number of upper triangluer coordinates, an error is raised
If there are only k non-zero vaues, st k < n. Only k values are returned.
"""
if n <= 0:
raise ValueError(f"Cannot request {n} non-zero items")
p, p1 = x.shape
if p != p1:
raise ValueError(f"x is not a square matrix")
if n > p * (p - 1) // 2:
raise ValueError(f"n is to large for a {p} by {p} matrix")
triu_x = np.abs(np.triu(x, k=1))
if (triu_x == 0).all():
raise ValueError("All triu values of x are 0.")
triu_x_flat = triu_x.flatten()
non_zero_triu_x = triu_x_flat[np.nonzero(triu_x_flat)]
nnz = non_zero_triu_x.size
if np.unique(non_zero_triu_x).size != nnz:
raise NotImplementedError("Not implemented for arrays with duplicate values")
sorted_triu_values = np.sort(triu_x_flat)[::-1]
if sorted_triu_values[n] == 0:
n = np.where(sorted_triu_values == 0)[0][0] - 1
return np.where(triu_x >= sorted_triu_values[n])
return np.where(triu_x > sorted_triu_values[n])
@composite
def random_penalty(
draw,
l0: hypothesis.strategies.SearchStrategy[bool],
l1: hypothesis.strategies.SearchStrategy[bool],
l2: hypothesis.strategies.SearchStrategy[bool],
) -> List[str]:
penalties = []
if draw(l0):
penalties.append("l0")
if draw(l1):
penalties.append("l1")
if draw(l2):
penalties.append("l2")
return penalties
@composite
def random_penalty_values(
draw,
values_strategies: Dict[str, hypothesis.strategies.SearchStrategy[float]],
penalty_strategies: hypothesis.strategies.SearchStrategy[Iterable[str]],
) -> Dict[str, float]:
penalties = draw(penalty_strategies)
values = {}
for penalty in penalties:
values[penalty] = draw(values_strategies[penalty])
return values
def make_bisect_func(desired_nnz: int, Y: np.ndarray, verbose: bool = True, **kwargs):
def inner_bisect(l0):
fit_gl0learn = fit(Y, l0=l0, **kwargs)
theta_hat = fit_gl0learn.theta
np.fill_diagonal(theta_hat, 0)
nnz = np.count_nonzero(theta_hat) // 2
cost = desired_nnz - nnz
if verbose:
print(f"gl0Learn found solution with {nnz} non-zeros with parameters:")
print(f"\t l0 = {l0})")
print(f"\t cost = {cost}")
return cost
return inner_bisect
def overlap_covariance_matrix(p: int, seed: int = 0, max_overlaps: int = 1, decay=0.99):
overlaps = {i: 0 for i in range(p)}
cov = np.eye(p)
v = 1
rng = np.random.RandomState(seed=seed)
while len(overlaps) >= 2:
rows = list(overlaps.keys())
row, col = rng.choice(rows, size=2, replace=False)
overlaps[row] += 1
overlaps[col] += 1
cov[row, col] += v
v *= decay
overlaps = {r: o for (r, o) in overlaps.items() if o < max_overlaps}
cov = (cov + cov.T) / 2
return cov
def sample_from_cov(cov: np.ndarray, n: int = 1000, seed: int = 0) -> np.ndarray:
p, p2 = cov.shape
assert p == p2
mu = np.zeros(p)
rng = np.random.default_rng(seed)
x = rng.multivariate_normal(mu, cov=np.linalg.inv(cov), size=n)
return x
|
{"hexsha": "47f6b97a1513efa263edb6ecf87fcc80f755abfd", "size": 3845, "ext": "py", "lang": "Python", "max_stars_repo_path": "pypkg/tests/helper/utils.py", "max_stars_repo_name": "TNonet/gL0Learn", "max_stars_repo_head_hexsha": "cfa94ffd83b294faf94c8c7820f195d6b93c620b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-07T21:33:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T21:33:13.000Z", "max_issues_repo_path": "pypkg/tests/helper/utils.py", "max_issues_repo_name": "TNonet/gL0Learn", "max_issues_repo_head_hexsha": "cfa94ffd83b294faf94c8c7820f195d6b93c620b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pypkg/tests/helper/utils.py", "max_forks_repo_name": "TNonet/gL0Learn", "max_forks_repo_head_hexsha": "cfa94ffd83b294faf94c8c7820f195d6b93c620b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9675324675, "max_line_length": 104, "alphanum_fraction": 0.6241872562, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1057}
|
using MLStyle
using DataFrames
include("MQuery.ConstantNames.jl")
include("MQuery.DynamicInfer.jl")
include("MQuery.Interfaces.jl")
include("MQuery.MacroProcessor.jl")
include("MQuery.Impl.jl")
using Base.Enums
@enum TypeChecking Dynamic Static
df = DataFrame(
Symbol("Type checking") => [
Dynamic, Static, Static, Dynamic, Static, Dynamic, Dynamic, Static
],
:name => [
"Julia", "C#", "F#", "Ruby", "Java", "JavaScript", "Python", "Haskell"
],
:year => [
2012, 2000, 2005, 1995, 1995, 1995, 1990, 1990
]
)
df |>
@where !startswith(_.name, "Java"),
@groupby _."Type checking" => TC, endswith(_.name, "#") => is_sharp,
@having TC === Dynamic || is_sharp,
@select join(_.name, " and ") => result, _.TC => TC
|
{"hexsha": "78e03aa8f9836a8520ba05422a2f1706d246a43b", "size": 793, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "MQuery/MQuery.jl", "max_stars_repo_name": "thautwarm/MLStyle-Playground", "max_stars_repo_head_hexsha": "89095cab786e40476bc6c8f5e7b028ea02b8c4ff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-04-08T15:10:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-07T05:35:58.000Z", "max_issues_repo_path": "MQuery/MQuery.jl", "max_issues_repo_name": "cherichy/MLStyle-Playground", "max_issues_repo_head_hexsha": "c9143c90be7cad98d650fc696185dcbbfdcde00c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-02-18T13:01:38.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-26T01:29:46.000Z", "max_forks_repo_path": "MQuery/MQuery.jl", "max_forks_repo_name": "cherichy/MLStyle-Playground", "max_forks_repo_head_hexsha": "c9143c90be7cad98d650fc696185dcbbfdcde00c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-02-18T12:51:56.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-18T12:51:56.000Z", "avg_line_length": 27.3448275862, "max_line_length": 82, "alphanum_fraction": 0.6027742749, "num_tokens": 233}
|
#ifndef OPENGM_PYTHON_INTERFACE
#define OPENGM_PYTHON_INTERFACE 1
#endif
#include <stdexcept>
#include <stddef.h>
#include <string>
#include <boost/python.hpp>
#include <opengm/graphicalmodel/graphicalmodel.hxx>
#include <opengm/inference/inference.hxx>
#include <opengm/inference/lazyflipper.hxx>
#include "../export_typedes.hxx"
#include "nifty_iterator.hxx"
#include "inferencehelpers.hxx"
using namespace boost::python;
namespace layzflipper{
template<class PARAM>
inline void set
(
PARAM & p,
const size_t maxSubgraphSize
) {
p.maxSubgraphSize_=maxSubgraphSize;
}
}
template<class GM,class ACC>
void export_lazyflipper(){
import_array();
// Py Inference Types
typedef opengm::LazyFlipper<GM, ACC> PyLazyFlipper;
typedef typename PyLazyFlipper::Parameter PyLazyFlipperParameter;
typedef typename PyLazyFlipper::VerboseVisitorType PyLazyFlipperVerboseVisitor;
class_<PyLazyFlipperParameter > ( "LazyFlipperParameter" , init< const size_t > (args("maxSubGraphSize")))
.def(init<>())
.def_readwrite("maxSubgraphSize", &PyLazyFlipperParameter::maxSubgraphSize_)
.def ("set", &layzflipper::set<PyLazyFlipperParameter>,
(
arg("maxSubgraphSize")=2
)
)
;
OPENGM_PYTHON_VERBOSE_VISITOR_EXPORTER(PyLazyFlipperVerboseVisitor,"LazyFlipperVerboseVisitor" );
OPENGM_PYTHON_INFERENCE_EXPORTER(PyLazyFlipper,"LazyFlipper");
}
template void export_lazyflipper<GmAdder,opengm::Minimizer>();
template void export_lazyflipper<GmAdder,opengm::Maximizer>();
template void export_lazyflipper<GmMultiplier,opengm::Minimizer>();
template void export_lazyflipper<GmMultiplier,opengm::Maximizer>();
|
{"hexsha": "ea488bd1f5f2a8384dabb055670da41cf5153aef", "size": 1682, "ext": "cxx", "lang": "C++", "max_stars_repo_path": "src/interfaces/python/opengm/inference/pyLazyflipper.cxx", "max_stars_repo_name": "amueller/opengm", "max_stars_repo_head_hexsha": "bf2d0c611ade9bbf1d2ae537fee0df4cb6553777", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-03-13T20:56:48.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-13T20:56:48.000Z", "max_issues_repo_path": "src/interfaces/python/opengm/inference/pyLazyflipper.cxx", "max_issues_repo_name": "amueller/opengm", "max_issues_repo_head_hexsha": "bf2d0c611ade9bbf1d2ae537fee0df4cb6553777", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/interfaces/python/opengm/inference/pyLazyflipper.cxx", "max_forks_repo_name": "amueller/opengm", "max_forks_repo_head_hexsha": "bf2d0c611ade9bbf1d2ae537fee0df4cb6553777", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1481481481, "max_line_length": 109, "alphanum_fraction": 0.7627824019, "num_tokens": 445}
|
!
! LBLRTM_Fhdr_netCDF_IO
!
! Module containing routine to read and write LBLRTM Fhdr objects as
! groups to a netCDF format file.
!
!
! CREATION HISTORY:
! Written by: Paul van Delst, 19-Feb-2014
! paul.vandelst@noaa.gov
!
MODULE LBLRTM_Fhdr_netCDF_IO
! -----------------
! Environment setup
! -----------------
! Module usage
USE Type_Kinds , ONLY: FP, IP, DP => Double, Long
USE Message_Handler , ONLY: SUCCESS, FAILURE, INFORMATION, Display_Message
USE String_Utility , ONLY: StrClean
USE LBLRTM_Parameters , ONLY: N_MOL => LBLRTM_MAX_N_MOLECULES
USE LBLRTM_Fhdr_Define, ONLY: LBLRTM_Fhdr_type , &
LBLRTM_Fhdr_IsValid , &
LBLRTM_Fhdr_SetValid, &
LBLRTM_Fhdr_Destroy , &
LBLRTM_Fhdr_Inspect
USE netcdf
! Disable all implicit typing
IMPLICIT NONE
! ------------
! Visibilities
! ------------
PRIVATE
! Procedures
PUBLIC :: LBLRTM_Fhdr_netCDF_ReadGroup
PUBLIC :: LBLRTM_Fhdr_netCDF_WriteGroup
PUBLIC :: LBLRTM_Fhdr_netCDF_IOVersion
! -----------------
! Module parameters
! -----------------
CHARACTER(*), PARAMETER :: MODULE_VERSION_ID = &
! Default message string length
INTEGER, PARAMETER :: ML = 1024
! Literal constants
REAL(DP), PARAMETER :: ZERO = 0.0_DP
REAL(DP), PARAMETER :: ONE = 1.0_DP
! Extra parameters not in netCDF(?)
INTEGER, PARAMETER :: MAX_N_GROUPS = 8096
! Global attribute names. Case sensitive
CHARACTER(*), PARAMETER :: RELEASE_GATTNAME = 'Release'
CHARACTER(*), PARAMETER :: VERSION_GATTNAME = 'Version'
CHARACTER(*), PARAMETER :: TITLE_GATTNAME = 'Title'
CHARACTER(*), PARAMETER :: HISTORY_GATTNAME = 'History'
CHARACTER(*), PARAMETER :: COMMENT_GATTNAME = 'Comment'
! Dimension names
CHARACTER(*), PARAMETER :: MOLECULE_DIMNAME = 'n_molecules'
CHARACTER(*), PARAMETER :: ANCILLARY_DIMNAME = 'n_ancillary'
CHARACTER(*), PARAMETER :: UID_STRLEN_DIMNAME = 'uid_strlen'
CHARACTER(*), PARAMETER :: SL_STRLEN_DIMNAME = 'sl_strlen'
! Variable names
CHARACTER(*), PARAMETER :: USER_ID_VARNAME = 'User_ID'
CHARACTER(*), PARAMETER :: COL_SCALE_FACTOR_VARNAME = 'Column_Scale_Factor'
CHARACTER(*), PARAMETER :: AVG_LAYER_PRES_VARNAME = 'Average_Layer_Pressure'
CHARACTER(*), PARAMETER :: AVG_LAYER_TEMP_VARNAME = 'Average_Layer_Temperature'
CHARACTER(*), PARAMETER :: MOL_ID_VARNAME = 'Molecule_Id'
CHARACTER(*), PARAMETER :: MOL_COL_DENS_VARNAME = 'Molecule_Column_Density'
CHARACTER(*), PARAMETER :: BROAD_COL_DENS_VARNAME = 'Broadening_Gas_Column_Density'
CHARACTER(*), PARAMETER :: FREQ_INTERVAL_VARNAME = 'Frequency_Interval'
CHARACTER(*), PARAMETER :: BEGIN_FREQ_VARNAME = 'Begin_Frequency'
CHARACTER(*), PARAMETER :: END_FREQ_VARNAME = 'End_Frequency'
CHARACTER(*), PARAMETER :: BDRY_TEMP_VARNAME = 'Boundary_Temperature'
CHARACTER(*), PARAMETER :: BDRY_EMIS_VARNAME = 'Boundary_Emissivity'
CHARACTER(*), PARAMETER :: N_MOLECULES_VARNAME = 'n_Molecules'
CHARACTER(*), PARAMETER :: N_LAYER_VARNAME = 'n_Layer'
CHARACTER(*), PARAMETER :: OD_LAYER_FLAG_VARNAME = 'OD_Layering_Control_Flag'
CHARACTER(*), PARAMETER :: CALC_DATE_VARNAME = 'Calculation_Date'
CHARACTER(*), PARAMETER :: CALC_TIME_VARNAME = 'Calculation_Time'
CHARACTER(*), PARAMETER :: ANCILLARY_VARNAME = 'ancillary'
! ...The run flags
CHARACTER(*), PARAMETER :: HIRAC_VARNAME = 'hirac'
CHARACTER(*), PARAMETER :: LBLF4_VARNAME = 'lblf4'
CHARACTER(*), PARAMETER :: XSCNT_VARNAME = 'xscnt'
CHARACTER(*), PARAMETER :: AERSL_VARNAME = 'aersl'
CHARACTER(*), PARAMETER :: EMIT_VARNAME = 'emit'
CHARACTER(*), PARAMETER :: SCAN_VARNAME = 'scan'
CHARACTER(*), PARAMETER :: PLOT_VARNAME = 'plot'
CHARACTER(*), PARAMETER :: PATH_VARNAME = 'path'
CHARACTER(*), PARAMETER :: JRAD_VARNAME = 'jrad'
CHARACTER(*), PARAMETER :: TEST_VARNAME = 'test'
CHARACTER(*), PARAMETER :: MERGE_VARNAME = 'merge'
CHARACTER(*), PARAMETER :: SCNID_VARNAME = 'scnid'
CHARACTER(*), PARAMETER :: HWHM_VARNAME = 'hwhm'
CHARACTER(*), PARAMETER :: IDABS_VARNAME = 'idabs'
CHARACTER(*), PARAMETER :: ATM_VARNAME = 'atm'
CHARACTER(*), PARAMETER :: LAYR1_VARNAME = 'layr1'
CHARACTER(*), PARAMETER :: NLAYR_VARNAME = 'nlayr'
! Variable long name attribute
CHARACTER(*), PARAMETER :: LONGNAME_ATTNAME = 'long_name'
CHARACTER(*), PARAMETER :: USER_ID_LONGNAME = 'User ID'
CHARACTER(*), PARAMETER :: COL_SCALE_FACTOR_LONGNAME = 'Column Scale Factor'
CHARACTER(*), PARAMETER :: AVG_LAYER_PRES_LONGNAME = 'Average Layer Pressure'
CHARACTER(*), PARAMETER :: AVG_LAYER_TEMP_LONGNAME = 'Average Layer Temperature'
CHARACTER(*), PARAMETER :: MOL_ID_LONGNAME = 'Molecule Id'
CHARACTER(*), PARAMETER :: MOL_COL_DENS_LONGNAME = 'Molecule Column Density'
CHARACTER(*), PARAMETER :: BROAD_COL_DENS_LONGNAME = 'Broadening Gas Column Density'
CHARACTER(*), PARAMETER :: FREQ_INTERVAL_LONGNAME = 'Frequency Interval'
CHARACTER(*), PARAMETER :: BEGIN_FREQ_LONGNAME = 'Begin Frequency'
CHARACTER(*), PARAMETER :: END_FREQ_LONGNAME = 'End Frequency'
CHARACTER(*), PARAMETER :: BDRY_TEMP_LONGNAME = 'Boundary Temperature'
CHARACTER(*), PARAMETER :: BDRY_EMIS_LONGNAME = 'Boundary Emissivity'
CHARACTER(*), PARAMETER :: N_MOLECULES_LONGNAME = 'Number of molecules'
CHARACTER(*), PARAMETER :: N_LAYER_LONGNAME = 'Number of layer'
CHARACTER(*), PARAMETER :: OD_LAYER_FLAG_LONGNAME = 'OD Layering Control Flag'
CHARACTER(*), PARAMETER :: CALC_DATE_LONGNAME = 'Calculation Date'
CHARACTER(*), PARAMETER :: CALC_TIME_LONGNAME = 'Calculation Time'
CHARACTER(*), PARAMETER :: ANCILLARY_LONGNAME = 'ancillary'
! ...The run flags
CHARACTER(*), PARAMETER :: HIRAC_LONGNAME = 'hirac run flag'
CHARACTER(*), PARAMETER :: LBLF4_LONGNAME = 'lblf4 run flag'
CHARACTER(*), PARAMETER :: XSCNT_LONGNAME = 'xscnt run flag'
CHARACTER(*), PARAMETER :: AERSL_LONGNAME = 'aersl run flag'
CHARACTER(*), PARAMETER :: EMIT_LONGNAME = 'emit run flag'
CHARACTER(*), PARAMETER :: SCAN_LONGNAME = 'scan run flag'
CHARACTER(*), PARAMETER :: PLOT_LONGNAME = 'plot run flag'
CHARACTER(*), PARAMETER :: PATH_LONGNAME = 'path run flag'
CHARACTER(*), PARAMETER :: JRAD_LONGNAME = 'jrad run flag'
CHARACTER(*), PARAMETER :: TEST_LONGNAME = 'test run flag'
CHARACTER(*), PARAMETER :: MERGE_LONGNAME = 'merge run flag'
CHARACTER(*), PARAMETER :: SCNID_LONGNAME = 'scnid run flag'
CHARACTER(*), PARAMETER :: HWHM_LONGNAME = 'hwhm run flag'
CHARACTER(*), PARAMETER :: IDABS_LONGNAME = 'idabs run flag'
CHARACTER(*), PARAMETER :: ATM_LONGNAME = 'atm run flag'
CHARACTER(*), PARAMETER :: LAYR1_LONGNAME = 'layr1 run flag'
CHARACTER(*), PARAMETER :: NLAYR_LONGNAME = 'nlayr run flag'
! Variable description attribute
CHARACTER(*), PARAMETER :: DESCRIPTION_ATTNAME = 'description'
CHARACTER(*), PARAMETER :: USER_ID_DESCRIPTION = 'User Identification string'
CHARACTER(*), PARAMETER :: COL_SCALE_FACTOR_DESCRIPTION = 'Column profile amount scaling factor'
CHARACTER(*), PARAMETER :: AVG_LAYER_PRES_DESCRIPTION = 'Average layer pressure'
CHARACTER(*), PARAMETER :: AVG_LAYER_TEMP_DESCRIPTION = 'Average layer temperature'
CHARACTER(*), PARAMETER :: MOL_ID_DESCRIPTION = 'Molecule identification string'
CHARACTER(*), PARAMETER :: MOL_COL_DENS_DESCRIPTION = 'Molecule column density'
CHARACTER(*), PARAMETER :: BROAD_COL_DENS_DESCRIPTION = 'Broadening gas column density'
CHARACTER(*), PARAMETER :: FREQ_INTERVAL_DESCRIPTION = 'Calculation frequency interval'
CHARACTER(*), PARAMETER :: BEGIN_FREQ_DESCRIPTION = 'Calculation begin frequency'
CHARACTER(*), PARAMETER :: END_FREQ_DESCRIPTION = 'Calculation end frequency'
CHARACTER(*), PARAMETER :: BDRY_TEMP_DESCRIPTION = 'Boundary temperature'
CHARACTER(*), PARAMETER :: BDRY_EMIS_DESCRIPTION = 'Boundary emissivity'
CHARACTER(*), PARAMETER :: N_MOLECULES_DESCRIPTION = 'Number of gaseous absorbers included used in calculation'
CHARACTER(*), PARAMETER :: N_LAYER_DESCRIPTION = 'Number of atmospheric layer'
CHARACTER(*), PARAMETER :: OD_LAYER_FLAG_DESCRIPTION = 'Optical depth layering control flag'
CHARACTER(*), PARAMETER :: CALC_DATE_DESCRIPTION = 'Calculation date'
CHARACTER(*), PARAMETER :: CALC_TIME_DESCRIPTION = 'Calculation time'
CHARACTER(*), PARAMETER :: ANCILLARY_DESCRIPTION = 'ancillary'
! ...The run flags
CHARACTER(*), PARAMETER :: HIRAC_DESCRIPTION = 'LBLRTM control - hirac run flag'
CHARACTER(*), PARAMETER :: LBLF4_DESCRIPTION = 'LBLRTM control - lblf4 run flag'
CHARACTER(*), PARAMETER :: XSCNT_DESCRIPTION = 'LBLRTM control - xscnt run flag'
CHARACTER(*), PARAMETER :: AERSL_DESCRIPTION = 'LBLRTM control - aersl run flag'
CHARACTER(*), PARAMETER :: EMIT_DESCRIPTION = 'LBLRTM control - emit run flag'
CHARACTER(*), PARAMETER :: SCAN_DESCRIPTION = 'LBLRTM control - scan run flag'
CHARACTER(*), PARAMETER :: PLOT_DESCRIPTION = 'LBLRTM control - plot run flag'
CHARACTER(*), PARAMETER :: PATH_DESCRIPTION = 'LBLRTM control - path run flag'
CHARACTER(*), PARAMETER :: JRAD_DESCRIPTION = 'LBLRTM control - jrad run flag'
CHARACTER(*), PARAMETER :: TEST_DESCRIPTION = 'LBLRTM control - test run flag'
CHARACTER(*), PARAMETER :: MERGE_DESCRIPTION = 'LBLRTM control - merge run flag'
CHARACTER(*), PARAMETER :: SCNID_DESCRIPTION = 'LBLRTM control - scnid run flag'
CHARACTER(*), PARAMETER :: HWHM_DESCRIPTION = 'LBLRTM control - hwhm run flag'
CHARACTER(*), PARAMETER :: IDABS_DESCRIPTION = 'LBLRTM control - idabs run flag'
CHARACTER(*), PARAMETER :: ATM_DESCRIPTION = 'LBLRTM control - atm run flag'
CHARACTER(*), PARAMETER :: LAYR1_DESCRIPTION = 'LBLRTM control - layr1 run flag'
CHARACTER(*), PARAMETER :: NLAYR_DESCRIPTION = 'LBLRTM control - nlayr run flag'
! Variable units attribute.
CHARACTER(*), PARAMETER :: UNITS_ATTNAME = 'units'
CHARACTER(*), PARAMETER :: USER_ID_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: COL_SCALE_FACTOR_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: AVG_LAYER_PRES_UNITS = 'hPa'
CHARACTER(*), PARAMETER :: AVG_LAYER_TEMP_UNITS = 'K'
CHARACTER(*), PARAMETER :: MOL_ID_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: MOL_COL_DENS_UNITS = 'mol/cm^2'
CHARACTER(*), PARAMETER :: BROAD_COL_DENS_UNITS = 'mol/cm^2'
CHARACTER(*), PARAMETER :: FREQ_INTERVAL_UNITS = 'cm^-1'
CHARACTER(*), PARAMETER :: BEGIN_FREQ_UNITS = 'cm^-1'
CHARACTER(*), PARAMETER :: END_FREQ_UNITS = 'cm^-1'
CHARACTER(*), PARAMETER :: BDRY_TEMP_UNITS = 'K'
CHARACTER(*), PARAMETER :: BDRY_EMIS_UNITS = 'dimensionless'
CHARACTER(*), PARAMETER :: N_MOLECULES_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: N_LAYER_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: OD_LAYER_FLAG_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: CALC_DATE_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: CALC_TIME_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: ANCILLARY_UNITS = 'N/A'
! ...The run flags
CHARACTER(*), PARAMETER :: HIRAC_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: LBLF4_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: XSCNT_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: AERSL_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: EMIT_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: SCAN_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: PLOT_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: PATH_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: JRAD_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: TEST_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: MERGE_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: SCNID_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: HWHM_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: IDABS_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: ATM_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: LAYR1_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: NLAYR_UNITS = 'N/A'
! Variable _FillValue attribute.
CHARACTER(*), PARAMETER :: FILLVALUE_ATTNAME = '_FillValue'
CHARACTER(*), PARAMETER :: USER_ID_FILLVALUE = NF90_FILL_CHAR
REAL(DP) , PARAMETER :: COL_SCALE_FACTOR_FILLVALUE = 0.0_DP
REAL(FP) , PARAMETER :: AVG_LAYER_PRES_FILLVALUE = 0.0_FP
REAL(FP) , PARAMETER :: AVG_LAYER_TEMP_FILLVALUE = 0.0_FP
CHARACTER(*), PARAMETER :: MOL_ID_FILLVALUE = NF90_FILL_CHAR
REAL(FP) , PARAMETER :: MOL_COL_DENS_FILLVALUE = 0.0_FP
REAL(FP) , PARAMETER :: BROAD_COL_DENS_FILLVALUE = 0.0_FP
REAL(FP) , PARAMETER :: FREQ_INTERVAL_FILLVALUE = 0.0_FP
REAL(DP) , PARAMETER :: BEGIN_FREQ_FILLVALUE = 0.0_DP
REAL(DP) , PARAMETER :: END_FREQ_FILLVALUE = 0.0_DP
REAL(FP) , PARAMETER :: BDRY_TEMP_FILLVALUE = 0.0_FP
REAL(FP) , PARAMETER :: BDRY_EMIS_FILLVALUE = 0.0_FP
INTEGER(IP) , PARAMETER :: N_MOLECULES_FILLVALUE = 0_IP
INTEGER(IP) , PARAMETER :: N_LAYER_FILLVALUE = 0_IP
INTEGER(IP) , PARAMETER :: OD_LAYER_FLAG_FILLVALUE = 0_IP
CHARACTER(*), PARAMETER :: CALC_DATE_FILLVALUE = NF90_FILL_CHAR
CHARACTER(*), PARAMETER :: CALC_TIME_FILLVALUE = NF90_FILL_CHAR
CHARACTER(*), PARAMETER :: ANCILLARY_FILLVALUE = NF90_FILL_CHAR
! ...The run flags
INTEGER(IP), PARAMETER :: HIRAC_FILLVALUE = 0_IP
INTEGER(IP), PARAMETER :: LBLF4_FILLVALUE = 0_IP
INTEGER(IP), PARAMETER :: XSCNT_FILLVALUE = 0_IP
INTEGER(IP), PARAMETER :: AERSL_FILLVALUE = 0_IP
INTEGER(IP), PARAMETER :: EMIT_FILLVALUE = 0_IP
INTEGER(IP), PARAMETER :: SCAN_FILLVALUE = 0_IP
INTEGER(IP), PARAMETER :: PLOT_FILLVALUE = 0_IP
INTEGER(IP), PARAMETER :: PATH_FILLVALUE = 0_IP
INTEGER(IP), PARAMETER :: JRAD_FILLVALUE = 0_IP
INTEGER(IP), PARAMETER :: TEST_FILLVALUE = 0_IP
INTEGER(IP), PARAMETER :: MERGE_FILLVALUE = 0_IP
REAL(FP) , PARAMETER :: SCNID_FILLVALUE = 0.0_FP
REAL(FP) , PARAMETER :: HWHM_FILLVALUE = 0.0_FP
INTEGER(IP), PARAMETER :: IDABS_FILLVALUE = 0_IP
INTEGER(IP), PARAMETER :: ATM_FILLVALUE = 0_IP
INTEGER(IP), PARAMETER :: LAYR1_FILLVALUE = 0_IP
INTEGER(IP), PARAMETER :: NLAYR_FILLVALUE = 0_IP
! Variable netCDF datatypes
INTEGER(Long), PARAMETER :: USER_ID_TYPE = NF90_CHAR
INTEGER(Long), PARAMETER :: COL_SCALE_FACTOR_TYPE = NF90_DOUBLE
INTEGER(Long), PARAMETER :: AVG_LAYER_PRES_TYPE = NF90_DOUBLE
INTEGER(Long), PARAMETER :: AVG_LAYER_TEMP_TYPE = NF90_DOUBLE
INTEGER(Long), PARAMETER :: MOL_ID_TYPE = NF90_CHAR
INTEGER(Long), PARAMETER :: MOL_COL_DENS_TYPE = NF90_DOUBLE
INTEGER(Long), PARAMETER :: BROAD_COL_DENS_TYPE = NF90_DOUBLE
INTEGER(Long), PARAMETER :: FREQ_INTERVAL_TYPE = NF90_DOUBLE
INTEGER(Long), PARAMETER :: BEGIN_FREQ_TYPE = NF90_DOUBLE
INTEGER(Long), PARAMETER :: END_FREQ_TYPE = NF90_DOUBLE
INTEGER(Long), PARAMETER :: BDRY_TEMP_TYPE = NF90_DOUBLE
INTEGER(Long), PARAMETER :: BDRY_EMIS_TYPE = NF90_DOUBLE
INTEGER(Long), PARAMETER :: N_MOLECULES_TYPE = NF90_INT
INTEGER(Long), PARAMETER :: N_LAYER_TYPE = NF90_INT
INTEGER(Long), PARAMETER :: OD_LAYER_FLAG_TYPE = NF90_INT
INTEGER(Long), PARAMETER :: CALC_DATE_TYPE = NF90_CHAR
INTEGER(Long), PARAMETER :: CALC_TIME_TYPE = NF90_CHAR
INTEGER(Long), PARAMETER :: ANCILLARY_TYPE = NF90_CHAR
! ...The run flags
INTEGER(Long), PARAMETER :: HIRAC_TYPE = NF90_INT
INTEGER(Long), PARAMETER :: LBLF4_TYPE = NF90_INT
INTEGER(Long), PARAMETER :: XSCNT_TYPE = NF90_INT
INTEGER(Long), PARAMETER :: AERSL_TYPE = NF90_INT
INTEGER(Long), PARAMETER :: EMIT_TYPE = NF90_INT
INTEGER(Long), PARAMETER :: SCAN_TYPE = NF90_INT
INTEGER(Long), PARAMETER :: PLOT_TYPE = NF90_INT
INTEGER(Long), PARAMETER :: PATH_TYPE = NF90_INT
INTEGER(Long), PARAMETER :: JRAD_TYPE = NF90_INT
INTEGER(Long), PARAMETER :: TEST_TYPE = NF90_INT
INTEGER(Long), PARAMETER :: MERGE_TYPE = NF90_INT
INTEGER(Long), PARAMETER :: SCNID_TYPE = NF90_DOUBLE
INTEGER(Long), PARAMETER :: HWHM_TYPE = NF90_DOUBLE
INTEGER(Long), PARAMETER :: IDABS_TYPE = NF90_INT
INTEGER(Long), PARAMETER :: ATM_TYPE = NF90_INT
INTEGER(Long), PARAMETER :: LAYR1_TYPE = NF90_INT
INTEGER(Long), PARAMETER :: NLAYR_TYPE = NF90_INT
CONTAINS
!################################################################################
!################################################################################
!## ##
!## ## PUBLIC MODULE ROUTINES ## ##
!## ##
!################################################################################
!################################################################################
!----------------------------------------------------------
! Function to write an LBLRTM File Header object as a group
!----------------------------------------------------------
FUNCTION LBLRTM_Fhdr_netCDF_WriteGroup( &
Fhdr , & ! Input
FileId , & ! Input
GroupName, & ! Optional input
Quiet , & ! Optional input
Debug ) & ! Optional input (Debug output control)
RESULT( err_stat )
! Arguments
TYPE(LBLRTM_Fhdr_type), INTENT(IN) :: Fhdr
INTEGER(Long), INTENT(IN) :: FileId
CHARACTER(*), OPTIONAL, INTENT(IN) :: GroupName
LOGICAL, OPTIONAL, INTENT(IN) :: Quiet
LOGICAL, OPTIONAL, INTENT(IN) :: Debug
! Function result
INTEGER :: err_stat
! Local parameters
CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'LBLRTM_Fhdr_netCDF_IO::WriteGroup'
! Local variables
CHARACTER(ML) :: msg
CHARACTER(ML) :: group_name
LOGICAL :: noisy
LOGICAL :: debug_output
INTEGER(Long) :: nf90_stat
INTEGER(Long) :: groupid
INTEGER(Long) :: n_mol_dimid, n_ancillary_dimid
INTEGER(Long) :: uid_strlen_dimid, sl_strlen_dimid
! Setup
err_stat = SUCCESS
! ...Check structure
IF ( .NOT. (LBLRTM_Fhdr_IsValid( Fhdr )) ) THEN
msg = 'LBLRTM Fhdr object is invalid. Nothing to do!'
CALL Write_CleanUp(); RETURN
END IF
! ...Check GroupName argument, defining default.
group_name = 'Fhdr'
IF ( PRESENT(GroupName) ) THEN
group_name = ADJUSTL(GroupName)
END IF
! ...Check Quiet argument
noisy = .TRUE.
IF ( PRESENT(Quiet) ) noisy = .NOT. Quiet
! ...Set debug option
debug_output = .FALSE.
IF ( PRESENT(debug) ) debug_output = debug
IF ( debug_output ) THEN
CALL Display_Message(ROUTINE_NAME,'Entering...',INFORMATION)
noisy = .TRUE.
END IF
! Create a new group for the file header data
nf90_stat = NF90_DEF_GRP( &
fileid, &
group_name, &
groupid )
IF ( nf90_stat /= NF90_NOERR ) THEN
msg = 'Error creating '//TRIM(group_name)//' group - '//&
' - '//TRIM(NF90_STRERROR( nf90_stat ))
CALL Write_Cleanup(); RETURN
END IF
! Define the dimensions for the group
err_stat = DefineDimensions( &
Fhdr , &
groupid , &
n_mol_dimid , &
n_ancillary_dimid, &
uid_strlen_dimid , &
sl_strlen_dimid )
IF ( err_stat /= SUCCESS ) THEN
msg = 'Error defining dimensions for the '//TRIM(group_name)//&
' group - '//TRIM(NF90_STRERROR( nf90_stat ))
CALL Write_Cleanup(); RETURN
END IF
! Define the variables for the group
err_stat = DefineVariables( &
groupid , &
n_mol_dimid , &
n_ancillary_dimid, &
uid_strlen_dimid , &
sl_strlen_dimid )
IF ( err_stat /= SUCCESS ) THEN
msg = 'Error defining variables for the '//TRIM(group_name)//&
' group - '//TRIM(NF90_STRERROR( nf90_stat ))
CALL Write_Cleanup(); RETURN
END IF
! Take netCDF file out of define mode
nf90_stat = NF90_ENDDEF( fileid )
IF ( nf90_stat /= NF90_NOERR ) THEN
msg = 'Error taking file out of define mode to write the '//&
TRIM(group_name)//' group - '//TRIM(NF90_STRERROR( nf90_stat ))
CALL Write_Cleanup(); RETURN
END IF
! Write the variables for the group
err_stat = WriteVariables( Fhdr, groupid )
IF ( err_stat /= SUCCESS ) THEN
msg = 'Error writing variables for the '//TRIM(group_name)//&
' group - '//TRIM(NF90_STRERROR( nf90_stat ))
CALL Write_Cleanup(); RETURN
END IF
! Put netCDF file back into define mode
nf90_stat = NF90_REDEF( fileid )
IF ( nf90_stat /= NF90_NOERR ) THEN
msg = 'Error putting file back into define mode after writing the '//&
TRIM(group_name)//' group - '//TRIM(NF90_STRERROR( nf90_stat ))
CALL Write_Cleanup(); RETURN
END IF
CONTAINS
SUBROUTINE Write_CleanUp()
nf90_stat = NF90_CLOSE( fileid )
err_stat = FAILURE
CALL Display_Message( ROUTINE_NAME,msg,err_stat )
END SUBROUTINE Write_CleanUp
END FUNCTION LBLRTM_Fhdr_netCDF_WriteGroup
!---------------------------------------------------------
! Function to read an LBLRTM File Header object as a group
!---------------------------------------------------------
FUNCTION LBLRTM_Fhdr_netCDF_ReadGroup( &
Fhdr , & ! Output
FileId , & ! Input
GroupName, & ! Optional input
Quiet , & ! Optional input
Debug ) & ! Optional input (Debug output control)
RESULT( err_stat )
! Arguments
TYPE(LBLRTM_Fhdr_type), INTENT(OUT) :: Fhdr
INTEGER(Long), INTENT(IN) :: FileId
CHARACTER(*), OPTIONAL, INTENT(IN) :: GroupName
LOGICAL, OPTIONAL, INTENT(IN) :: Quiet
LOGICAL, OPTIONAL, INTENT(IN) :: Debug
! Function result
INTEGER :: err_stat
! Local parameters
CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'LBLRTM_Fhdr_netCDF_IO::ReadGroup'
! Local variables
CHARACTER(ML) :: msg
CHARACTER(ML) :: group_name
LOGICAL :: noisy
LOGICAL :: debug_output
INTEGER(Long) :: nf90_stat
INTEGER(Long) :: groupid
! Setup
err_stat = SUCCESS
! ...Check GroupName argument, defining default.
group_name = 'Fhdr'
IF ( PRESENT(GroupName) ) THEN
group_name = ADJUSTL(GroupName)
END IF
! ...Check Quiet argument
noisy = .TRUE.
IF ( PRESENT(Quiet) ) noisy = .NOT. Quiet
! ...Set debug option
debug_output = .FALSE.
IF ( PRESENT(debug) ) debug_output = debug
IF ( debug_output ) THEN
CALL Display_Message(ROUTINE_NAME,'Entering...',INFORMATION)
noisy = .TRUE.
END IF
! Get the group id
nf90_stat = NF90_INQ_GRP_NCID(fileid, group_name, groupid)
IF ( nf90_stat /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(group_name)//' group for its group id - '//&
TRIM(NF90_STRERROR( nf90_stat ))
CALL Read_Cleanup(); RETURN
END IF
! Read the variables for the group
err_stat = ReadVariables( Fhdr, groupid )
IF ( err_stat /= SUCCESS ) THEN
msg = 'Error reading variables for the '//TRIM(group_name)//&
' group - '//TRIM(NF90_STRERROR( nf90_stat ))
CALL Read_Cleanup(); RETURN
END IF
! Tag object as valid
CALL LBLRTM_Fhdr_SetValid(Fhdr)
IF ( debug_output ) CALL LBLRTM_Fhdr_Inspect(fhdr)
CONTAINS
SUBROUTINE Read_CleanUp()
CALL LBLRTM_Fhdr_Destroy(Fhdr)
err_stat = FAILURE
CALL Display_Message( ROUTINE_NAME,msg,err_stat )
END SUBROUTINE Read_CleanUp
END FUNCTION LBLRTM_Fhdr_netCDF_ReadGroup
!------------------------------------------------
! Subroutine to return module version information
!------------------------------------------------
SUBROUTINE LBLRTM_Fhdr_netCDF_IOVersion( Id )
CHARACTER(*), INTENT(OUT) :: Id
Id = MODULE_VERSION_ID
END SUBROUTINE LBLRTM_Fhdr_netCDF_IOVersion
!################################################################################
!################################################################################
!## ##
!## ## PRIVATE MODULE ROUTINES ## ##
!## ##
!################################################################################
!################################################################################
INCLUDE 'LBLRTM_Fhdr_netCDF_IO.inc'
END MODULE LBLRTM_Fhdr_netCDF_IO
|
{"hexsha": "5d510dccc3bc2ee988b9919d135478408d96ea67", "size": 24785, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/TauProd/LBL/lblrtm/io/netcdf/fhdr/LBLRTM_Fhdr_netCDF_IO.f90", "max_stars_repo_name": "hsbadr/crtm", "max_stars_repo_head_hexsha": "bfeb9955637f361fc69fa0b7af0e8d92d40718b1", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-11-19T10:00:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T02:42:18.000Z", "max_issues_repo_path": "src/TauProd/LBL/lblrtm/io/netcdf/fhdr/LBLRTM_Fhdr_netCDF_IO.f90", "max_issues_repo_name": "hsbadr/crtm", "max_issues_repo_head_hexsha": "bfeb9955637f361fc69fa0b7af0e8d92d40718b1", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-11-05T21:04:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-18T18:23:10.000Z", "max_forks_repo_path": "src/TauProd/LBL/lblrtm/io/netcdf/fhdr/LBLRTM_Fhdr_netCDF_IO.f90", "max_forks_repo_name": "hsbadr/crtm", "max_forks_repo_head_hexsha": "bfeb9955637f361fc69fa0b7af0e8d92d40718b1", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-10-29T17:54:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-29T08:42:45.000Z", "avg_line_length": 44.738267148, "max_line_length": 118, "alphanum_fraction": 0.6271535203, "num_tokens": 6611}
|
# Import Dependencies
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
import datetime as dt
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
Measurement = Base.classes.measurement
Station = Base.classes.station
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br>"
f"/api/v1.0/precipitation<br>"
f"/api/v1.0/stations<br>"
f"/api/v1.0/tobs<br>"
f"/api/v1.0/yyyy-mm-dd<br/>"
f"/api/v1.0/yyyy-mm-dd/yyyy-mm-dd"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of precipitations and dates"""
# Query all precipitations
results = session.query(Measurement.date,Measurement.prcp).all()
session.close()
# Convert list of tuples into normal list
precipitation = []
for date,prcp in results:
prcp_dict = {}
prcp_dict["Date"] = date
prcp_dict["Precipitation"] = prcp
precipitation.append(prcp_dict)
return jsonify(precipitation)
@app.route("/api/v1.0/stations")
def stations():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of stations"""
# Query all stations
results = session.query(Station.station).all()
session.close()
# Convert list of tuples into normal list
stations = list(np.ravel(results))
return jsonify(stations)
# Query for the dates and temperature observations from a year from the last data point.
@app.route("/api/v1.0/tobs")
def tobs():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Query the dates and temperature observations of the most active station for the last year of data."""
query_date = dt.date(2017, 8, 23) - dt.timedelta(days=365)
results = session.query(Measurement.tobs).\
filter(Measurement.date >= query_date).all()
session.close()
# Convert list of tuples into normal list
tobs = list(np.ravel(results))
return jsonify(tobs)
# When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date.
@app.route("/api/v1.0/<start>")
def start_date(start):
session = Session(engine)
results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).all()
session.close()
tobsall = []
for min,avg,max in queryresult:
tobs_dict = {}
tobs_dict["Min"] = min
tobs_dict["Average"] = avg
tobs_dict["Max"] = max
tobsall.append(tobs_dict)
return jsonify(tobsall)
# When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates between the start and end date inclusive.
@app.route("/api/v1.0/<start>/<end>")
def start_stop_date(start,stop):
session = Session(engine)
queryresult = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).filter(Measurement.date <= stop).all()
session.close()
tobsall = []
for min,avg,max in queryresult:
tobs_dict = {}
tobs_dict["Min"] = min
tobs_dict["Average"] = avg
tobs_dict["Max"] = max
tobsall.append(tobs_dict)
return jsonify(tobsall)
if __name__ == '__main__':
app.run(debug=True)
|
{"hexsha": "39d30314d3ff0ba746f6696aabb862f06a7a3119", "size": 4174, "ext": "py", "lang": "Python", "max_stars_repo_path": "app.py", "max_stars_repo_name": "tylerlvaughn/SQLAlchemy-Challenge", "max_stars_repo_head_hexsha": "0d09acb421f48fc1167602ee62e18867c5f06089", "max_stars_repo_licenses": ["ADSL"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app.py", "max_issues_repo_name": "tylerlvaughn/SQLAlchemy-Challenge", "max_issues_repo_head_hexsha": "0d09acb421f48fc1167602ee62e18867c5f06089", "max_issues_repo_licenses": ["ADSL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app.py", "max_forks_repo_name": "tylerlvaughn/SQLAlchemy-Challenge", "max_forks_repo_head_hexsha": "0d09acb421f48fc1167602ee62e18867c5f06089", "max_forks_repo_licenses": ["ADSL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7862068966, "max_line_length": 127, "alphanum_fraction": 0.6238620029, "include": true, "reason": "import numpy", "num_tokens": 959}
|
[STATEMENT]
lemma remove_term_keys:
shows "keys (mapping_of p) - {m} = keys (mapping_of (remove_term m p))" (is "?A = ?B")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. keys (mapping_of p) - {m} = keys (mapping_of (remove_term m p))
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. keys (mapping_of p) - {m} \<subseteq> keys (mapping_of (remove_term m p))
2. keys (mapping_of (remove_term m p)) \<subseteq> keys (mapping_of p) - {m}
[PROOF STEP]
show "?A \<subseteq> ?B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. keys (mapping_of p) - {m} \<subseteq> keys (mapping_of (remove_term m p))
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in> keys (mapping_of p) - {m} \<Longrightarrow> x \<in> keys (mapping_of (remove_term m p))
[PROOF STEP]
fix m'
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in> keys (mapping_of p) - {m} \<Longrightarrow> x \<in> keys (mapping_of (remove_term m p))
[PROOF STEP]
assume "m'\<in>?A"
[PROOF STATE]
proof (state)
this:
m' \<in> keys (mapping_of p) - {m}
goal (1 subgoal):
1. \<And>x. x \<in> keys (mapping_of p) - {m} \<Longrightarrow> x \<in> keys (mapping_of (remove_term m p))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
m' \<in> keys (mapping_of p) - {m}
[PROOF STEP]
show "m' \<in> ?B"
[PROOF STATE]
proof (prove)
using this:
m' \<in> keys (mapping_of p) - {m}
goal (1 subgoal):
1. m' \<in> keys (mapping_of (remove_term m p))
[PROOF STEP]
by (simp add: coeff_keys remove_term_coeff)
[PROOF STATE]
proof (state)
this:
m' \<in> keys (mapping_of (remove_term m p))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
keys (mapping_of p) - {m} \<subseteq> keys (mapping_of (remove_term m p))
goal (1 subgoal):
1. keys (mapping_of (remove_term m p)) \<subseteq> keys (mapping_of p) - {m}
[PROOF STEP]
show "?B \<subseteq> ?A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. keys (mapping_of (remove_term m p)) \<subseteq> keys (mapping_of p) - {m}
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in> keys (mapping_of (remove_term m p)) \<Longrightarrow> x \<in> keys (mapping_of p) - {m}
[PROOF STEP]
fix m'
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in> keys (mapping_of (remove_term m p)) \<Longrightarrow> x \<in> keys (mapping_of p) - {m}
[PROOF STEP]
assume "m'\<in> ?B"
[PROOF STATE]
proof (state)
this:
m' \<in> keys (mapping_of (remove_term m p))
goal (1 subgoal):
1. \<And>x. x \<in> keys (mapping_of (remove_term m p)) \<Longrightarrow> x \<in> keys (mapping_of p) - {m}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
m' \<in> keys (mapping_of (remove_term m p))
[PROOF STEP]
show "m' \<in> ?A"
[PROOF STATE]
proof (prove)
using this:
m' \<in> keys (mapping_of (remove_term m p))
goal (1 subgoal):
1. m' \<in> keys (mapping_of p) - {m}
[PROOF STEP]
by (simp add: coeff_keys remove_term_coeff)
[PROOF STATE]
proof (state)
this:
m' \<in> keys (mapping_of p) - {m}
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
keys (mapping_of (remove_term m p)) \<subseteq> keys (mapping_of p) - {m}
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1378, "file": "Polynomials_More_MPoly_Type", "length": 18}
|
setwd("/home/yuanhao/github_repositories/DISC/reproducibility")
utilities_path = "./source/utilities.r"
source(utilities_path)
#### STEP 1
#Here, we use BONE_MARROW dataset. The detail information of this dataset can be seen at https://raw.githack.com/iyhaoo/DISC/master/reproducibility/data_preparation_and_imputation/data_preprocessing_BONE_MARROW.nb.html.</br>
# We used the raw data after gene selection for cell identification.
gene_bc_mat = readh5_loom("./data/BONE_MARROW/raw.loom")
gene_bc_filt = gene_bc_mat[gene_selection(gene_bc_mat, 10), ]
dim(gene_bc_filt) # 13813, 6939
used_genes = rownames(gene_bc_filt)
output_dir = "./results/BONE_MARROW"
dir.create(output_dir, showWarnings = F, recursive = T)
#### STEP 2
#Following this script (https://github.com/Winnie09/imputationBenchmark/blob/master/data/code/process/07_hca_assign_celltype.R), we use the bulk-sequence data (https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE74246) of 13 normal hematopoietic cell types and 3 acute myeloid leukemia cell types for cell identification, the file is downloaded from https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSE74246&format=file&file=GSE74246%5FRNAseq%5FAll%5FCounts%2Etxt%2Egz.
if(!file.exists("./data/BONE_MARROW/cell_type.rds")){
library(scran)
scran_normalization = function(gene_bc_mat){
# The source of this function is https://github.com/Winnie09/imputationBenchmark/blob/master/data/code/process/06_make_hca_MantonBM6.R
dimnames_gene_bc_mat = dimnames(gene_bc_mat)
dimnames(gene_bc_mat) = list()
sce = SingleCellExperiment(list(counts = gene_bc_mat))
no_cores = max(c(detectCores() - 1, 1))
if(ncol(gene_bc_mat) < 21){
sce = computeSumFactors(sce, BPPARAM = MulticoreParam(workers = no_cores), sizes = c(5, 10, 15, 20))
} else {
sce = computeSumFactors(sce, BPPARAM = MulticoreParam(workers = no_cores))
}
sf = sizeFactors(sce)
dimnames(gene_bc_mat) = dimnames_gene_bc_mat
return(log2(sweep(gene_bc_mat, 2, sf, "/") + 1))
}
scalematrix = function(data){
cm = rowMeans(data)
csd = apply(data, 1, sd)
(data - cm) / csd
}
corfunc = function(m1, m2){
scalematrix(t(m1)) %*% t(scalematrix(t(m2))) / (nrow(m1) - 1)
}
gene_bulk_all = as.matrix(read.table("./data/BONE_MARROW/original_data/GSE74246_RNAseq_All_Counts.txt.gz", header = T, row.names = 1))
gene_bulk_mat = gene_bulk_all[, grep("^X", colnames(gene_bulk_all))]
# Use annotation information
gz_path = "./data/hg19/Homo_sapiens.GRCh37.87.gtf.gz"
annotation_mat = get_map(gz_path)
tgngl = tapply(annotation_mat[, "gene_length"] / 1000, annotation_mat[, "gene_name"], max)
gngl = as.vector(tgngl)
names(gngl) = names(tgngl)
gene_bulk_filt = gene_bulk_mat[row.names(gene_bulk_mat) %in% names(gngl),]
gene_bulk_norm = sweep(gene_bulk_filt / gngl[rownames(gene_bulk_filt)], 2, colSums(gene_bulk_filt) / 1e6, "/")
bulk_data = log2(gene_bulk_norm[rowSums(gene_bulk_norm) > 0,] + 1)
bulk_cell_type = sapply(colnames(bulk_data), function(x){
strsplit(x,"\\.")[[1]][2]
}, USE.NAMES = F)
sc_data = scran_normalization(gene_bc_filt)
rownames(sc_data) = sub(".*:", "", rownames(gene_bc_filt))
used_genes = intersect(rownames(bulk_data), rownames(sc_data))
bulk_filt = bulk_data[used_genes, ]
sc_filt = sc_data[used_genes, ]
# The expression level for each cell type in bulk sequencing
bulk_mean = sapply(unique(bulk_cell_type),function(x) {
rowMeans(bulk_filt[ , bulk_cell_type == x])
})
# Find 100 top postive differentially expressed genes for each celltype pair.
DEG_list = list()
top_number = 100
unique_celltype_pairs = combn(ncol(bulk_mean), 2)
for(ii in seq(ncol(unique_celltype_pairs))){
celltype_1 = colnames(bulk_mean)[unique_celltype_pairs[1, ii]]
celltype_2 = colnames(bulk_mean)[unique_celltype_pairs[2, ii]]
sort_result = sort(bulk_mean[ , celltype_1] - bulk_mean[ , celltype_2], decreasing = FALSE)
DEG_list[[paste(celltype_2, celltype_1, sep = "-")]] = names(sort_result)[seq(top_number)]
DEG_list[[paste(celltype_1, celltype_2, sep = "-")]] = names(sort_result)[seq(from = length(sort_result), to = length(sort_result) - (top_number - 1))]
}
# Calculate the mean expression of these top-gene combinations across cell types (bulk) or cells (single-cell).
expression_mean_function = function(gene_bc_norm, DEG_list){
return(t(sapply(DEG_list, function(x){
colMeans(gene_bc_norm[x, ])
})))
}
bulk_DEG_expression_mean = expression_mean_function(bulk_mean, DEG_list)
sc_DEG_expression_mean = expression_mean_function(sc_filt, DEG_list)
# Calculate the expression variation of these top-gene combinations across cell types (bulk) or cells (single-cell).
expression_variation_function = function(x){
return((x - rowMeans(x)) / apply(x, 1, sd))
}
bulk_DEG_expression_variation = expression_variation_function(bulk_DEG_expression_mean)
sc_DEG_expression_variation = expression_variation_function(sc_DEG_expression_mean)
# Each top-gene combination correspond a cell type.
bulk_DEG_combination_rank = apply(bulk_DEG_expression_variation, 2, rank)
sc_DEG_combination_rank = apply(sc_DEG_expression_variation, 2, rank)
# Cell type identification.
maxcorcut = 0.6
difcorcut = 0
cormat = corfunc(sc_DEG_combination_rank, bulk_DEG_combination_rank)
maxcor = apply(cormat, 1, max)
max2cor = apply(cormat, 1, function(x){
sort(x, decreasing = T)[2]
})
cell_type = colnames(cormat)[apply(cormat, 1, which.max)]
cell_type[maxcor < maxcorcut] = NA
cell_type[maxcor - max2cor < difcorcut] = NA
names(cell_type) = colnames(sc_data)
saveRDS(cell_type, "./data/BONE_MARROW/cell_type.rds")
}else{
cell_type = readRDS("./data/BONE_MARROW/cell_type.rds")
}
print("Cell Type ... OK!")
### Trajectory evaluation
#After cell identification, we evaluate the trajectory performance using monocle following these scripts(https://github.com/Winnie09/imputationBenchmark/blob/93f27e890a86fdc732257a4036bf38a52faf9f33/trajectory/code/hca/monocle2/01_get_score.R, https://github.com/Winnie09/imputationBenchmark/blob/93f27e890a86fdc732257a4036bf38a52faf9f33/trajectory/code/hca/tscan/01_get_score.R).
### monocle2
library(monocle)
get_cds_monocle2 = function(gene_bc_mat){
# Make a new CDS and use DDRTree for dimension reduction.
pd = new("AnnotatedDataFrame", data = data.frame(row.names = colnames(gene_bc_mat), cell = colnames(gene_bc_mat)))
fd = new("AnnotatedDataFrame", data = data.frame(row.names = rownames(gene_bc_mat), gene_short_name = rownames(gene_bc_mat)))
cds = newCellDataSet(gene_bc_mat, phenoData = pd, featureData = fd, expressionFamily = negbinomial.size())
cds = estimateSizeFactors(cds)
cds = estimateDispersions(cds)
print("Reducing dimension...")
cds = reduceDimension(cds)
return(orderCells(cds))
}
###
cell_level_df = data.frame(level = c(1, 2, 3, 3, 4, 4, 4, 5, 5, 5, 5, 5, 5),
immunepath = c(1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0),
monopath = c(1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0),
erypath = c(1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1),
stringsAsFactors = F)
rownames(cell_level_df) = c("HSC", "MPP", "LMPP", "CMP", "CLP", "GMP", "MEP", "Bcell", "CD4Tcell", "CD8Tcell", "NKcell", "Mono", "Ery")
path_name = c("immunepath", "monopath", "erypath")
order_list = list(correct = list(), wrong = list(), cell_type = list())
wrong_order_list = list("1" = c(), "2" = c(), "3" = c(), "4" = c())
for(ii in path_name){
path_celltype = rownames(cell_level_df)[cell_level_df[, ii] == 1]
order_list[["cell_type"]][[ii]] = path_celltype
cell_type_pair = as.matrix(apply(expand.grid(path_celltype, path_celltype), 2, as.character))
cell_type_pair = cell_type_pair[cell_type_pair[, 1] != cell_type_pair[, 2], ]
correct_mat = cell_type_pair[cell_level_df[cell_type_pair[, 1], "level"] < cell_level_df[cell_type_pair[, 2], "level"], ]
wrong_mat = cell_type_pair[cell_level_df[cell_type_pair[, 1], "level"] > cell_level_df[cell_type_pair[, 2], "level"], ]
order_list[["correct"]][[ii]] = apply(correct_mat, 1, paste, collapse = "_")
order_list[["wrong"]][[ii]] = apply(wrong_mat, 1, paste, collapse = "_")
for(jj in seq(max(cell_level_df[, "level"]) - min(cell_level_df[, "level"]))){
this_mask = cell_level_df[cell_type_pair[, 1], "level"] == jj + cell_level_df[cell_type_pair[, 2], "level"]
if(sum(this_mask) >= 1){
if(sum(this_mask) == 1){
wrong_order_list[[as.character(jj)]] = c(wrong_order_list[[as.character(jj)]], paste(cell_type_pair[this_mask, ], collapse = "_"))
}else{
wrong_order_list[[as.character(jj)]] = c(wrong_order_list[[as.character(jj)]], apply(cell_type_pair[this_mask, ], 1, paste, collapse = "_"))
}
}
}
}
type_level = as.character(cell_level_df[, "level"])
names(type_level) = rownames(cell_level_df)
correct_order_all = unique(unlist(order_list[["correct"]]))
wrong_order_all = unique(unlist(order_list[["wrong"]]))
get_score_monocle2 = function(cds, cell_type, correct_order, wrong_order = NULL, wrong_order_list = NULL, output_dir = NULL, type_level = NULL){
if(is.null(wrong_order) + is.null(wrong_order_list) != 1){
stop("One of wrong_order and wrong_order_list should be input.")
}
if(!is.null(wrong_order_list)){
wrong_order = unique(unlist(wrong_order_list))
result_mat = matrix(nrow = 0, ncol = 5, dimnames = list(c(), c("acc", "correct_number", "wrong_number", "pair_number", "distance_sum")))
}else{
result_mat = matrix(nrow = 0, ncol = 4, dimnames = list(c(), c("acc", "correct_number", "wrong_number", "pair_number")))
}
print("Looking for the root state...")
used_cells = as.character(pData(cds)$cell)
if(!is.null(output_dir)){
dir.create(output_dir, recursive = T, showWarnings = F)
p = plot_cell_trajectory(cds, color_by = "State")
ggsave(paste0(output_dir, "/state.pdf"), p)
pData(cds)$CellType = cell_type[used_cells]
p = plot_cell_trajectory(cds, color_by = "CellType")
ggsave(paste0(output_dir, "/celltype.pdf"), p)
if(!is.null(type_level)){
pData(cds)$Level = type_level[cell_type[used_cells]]
p = plot_cell_trajectory(cds, color_by = "Level")
ggsave(paste0(output_dir, "/level.pdf"), p)
}
}
cell_states = as.numeric(as.character(pData(cds)$State))
names(cell_states) = used_cells
unique_states = unique(cell_states)
checkroot = sapply(unique_states, function(x){
cds = orderCells(cds, root_state = x)
return(length(cds@auxOrderingData[[cds@dim_reduce_type]]$root_cell))
})
candidate_root_states = sort(unique_states[checkroot > 0])
for(ii in candidate_root_states){
cds = orderCells(cds, root_state = ii)
this_output_dir = paste0(output_dir, "/rootstate_", ii)
dir.create(this_output_dir, recursive = T, showWarnings = F)
if(!is.null(output_dir)){
p = plot_cell_trajectory(cds, color_by = "Pseudotime")
ggsave(paste0(this_output_dir, "/pseudotime.pdf"), p)
}
all_branch_points = cds@auxOrderingData[[cds@dim_reduce_type]]$branch_points
if(length(all_branch_points) > 0){
for(jj in seq(length(all_branch_points))){
cds_tmp = cds
tryCatch({
cds_reduced = buildBranchCellDataSet(cds_tmp, branch_point = jj)
df = data.frame(pData(cds_reduced),stringsAsFactors = F)[used_cells, ]
if(!is.null(output_dir)){
pData(cds_tmp)$Pseudotime = df[, "Pseudotime"]
pData(cds_tmp)$Branch = df[, "Branch"]
pData(cds_tmp)$State = df[, "State"]
p = plot_cell_trajectory(cds_tmp, color_by = "Pseudotime")
ggsave(paste0(this_output_dir, "/branchpoint_", jj, "_pseudotime.pdf"), p)
p = plot_cell_trajectory(cds_tmp, color_by = "Branch")
ggsave(paste0(this_output_dir, "/branchpoint_", jj, "_branch.pdf"), p)
p = plot_cell_trajectory(cds_tmp, color_by = "State")
ggsave(paste0(this_output_dir, "/branchpoint_", jj, "_state.pdf"), p)
pData(cds_tmp)$CellType = df[, "CellType"]
p = plot_cell_trajectory(cds_tmp, color_by = "CellType")
ggsave(paste0(this_output_dir, "/branchpoint_", jj, "_celltype.pdf"), p)
if(!is.null(type_level)){
pData(cds_tmp)$Level = df[, "Level"]
p = plot_cell_trajectory(cds_tmp, color_by = "Level")
ggsave(paste0(this_output_dir, "/branchpoint_", jj, "_level.pdf"), p)
}
}
df = df[order(df$Pseudotime), ]
score = rowSums(sapply(unique(df$Branch),function(x){
branch_cell = as.character(df[df$Branch == x, 1])
branch_celltype = cell_type[branch_cell]
index_pair = combn(length(branch_cell), 2)
if(min(index_pair[2,] - index_pair[1,]) < 0){
stop("index_pair error")
}
branch_cellorder = sprintf("%s_%s",branch_celltype[index_pair[1, ]], branch_celltype[index_pair[2, ]])
return_c = c(sum(branch_cellorder %in% correct_order), sum(branch_cellorder %in% wrong_order))
sum(branch_cellorder %in% correct_order)
if(!is.null(wrong_order_list)){
distance_sum = 0
for(kk in names(wrong_order_list)){
distance_sum = distance_sum + sum(branch_cellorder %in% wrong_order_list[[kk]]) * as.numeric(kk)
}
return_c = c(return_c, distance_sum)
}
return(return_c)
}))
pair_number = sum(score[c(1, 2)])
acc = score[1] / pair_number
if(!is.null(wrong_order_list)){
this_branch_point_results = matrix(c(acc, score[1], score[2], pair_number, score[3]), nrow = 1)
}else{
this_branch_point_results = matrix(c(acc, score[1], score[2], pair_number), nrow = 1)
}
rownames(this_branch_point_results) = paste0("RS_", ii, "_BP_", jj)
result_mat = rbind(result_mat, this_branch_point_results)
}, error = function(e){
cat(ii, " - ", jj, "\n")
print(e)
})
}
}
}
return(result_mat)
}
|
{"hexsha": "a9016efee5c23b1c4c5fc86842fff26ea055f908", "size": 14211, "ext": "r", "lang": "R", "max_stars_repo_path": "reproducibility/Down-stream Analysis Improvement/raw_scripts/pseudotemporal_analysis_source.r", "max_stars_repo_name": "iyhaoo/DISC", "max_stars_repo_head_hexsha": "42bcb570bc76ac28bba1681e905efc5189c15e39", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2019-12-13T06:20:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T07:48:27.000Z", "max_issues_repo_path": "reproducibility/Down-stream Analysis Improvement/raw_scripts/pseudotemporal_analysis_source.r", "max_issues_repo_name": "xie-lab/DISC", "max_issues_repo_head_hexsha": "f7e79c89fb3840f548cc093184edd53ffb3f57ca", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-12-13T11:25:06.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-21T01:23:55.000Z", "max_forks_repo_path": "reproducibility/Down-stream Analysis Improvement/raw_scripts/pseudotemporal_analysis_source.r", "max_forks_repo_name": "iyhaoo/DISC", "max_forks_repo_head_hexsha": "42bcb570bc76ac28bba1681e905efc5189c15e39", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2019-12-13T06:20:32.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-21T07:42:34.000Z", "avg_line_length": 54.2404580153, "max_line_length": 469, "alphanum_fraction": 0.6801069594, "num_tokens": 4129}
|
"""
# @Time : 2021/7/3 8:04 上午
# @Author : hezhiqiang01
# @Email : hezhiqiang01@baidu.com
# @File : naiveAC.py
"""
import argparse
import torch
import gym
import numpy as np
import collections
import torch.nn as nn
from torch.distributions import Categorical
import torch.nn.functional as F
Experience = collections.namedtuple(typename="Experience", field_names=['state', 'action', 'reward', 'done', 'nextState'])
class ExperienceBuffer(object):
def __init__(self, args):
self.buffer = collections.deque(maxlen=args.replay_size)
def __len__(self):
return len(self.buffer)
def append(self, experience):
self.buffer.append(experience)
def sample_trajectory(self):
indices = np.arange(0, self.__len__())
states, actions, rewards, done, next_states = zip(*[self.buffer[idx] for idx in indices])
self.buffer.clear()
return np.array(states), actions, np.array(rewards, dtype=np.float32), done, np.array(next_states)
class Actor(nn.Module):
def __init__(self, input_dim, output_dim):
super(Actor, self).__init__()
self.fc1 = nn.Linear(input_dim, 32)
self.fc2 = nn.Linear(32, output_dim)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = F.softmax(self.fc2(x), dim=1)
return x
class Critic(nn.Module):
def __init__(self, input_dim, output_dim):
super(Critic, self).__init__()
self.fc1 = nn.Linear(input_dim, 32)
self.fc2 = nn.Linear(32, output_dim)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x) # Scalar Value
return x
class Agent(object):
def __init__(self, env, exp_buffer, args):
super(Agent, self).__init__()
self.env = env
self.exp_buffer = exp_buffer
self.args = args
self.actor = None
self.critic = None
self.build_model()
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.args.actor_lr)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.args.critic_lr)
def build_model(self):
obs_dim = self.env.observation_space.shape[0]
action_dim = self.env.action_space.n
self.actor = Actor(input_dim=obs_dim, output_dim=action_dim)
self.critic = Critic(input_dim=obs_dim, output_dim=1)
def choose_action(self, state):
x = torch.unsqueeze(torch.FloatTensor(state), 0)
prob = self.actor(x)
c = Categorical(prob)
action = c.sample()
return action
def store_transition(self, state, action, r, done, state_next):
exp = Experience(state, action, r, done, state_next)
self.exp_buffer.append(exp)
def learn(self):
buffer = self.exp_buffer.sample_trajectory()
states, actions, rewards, done, next_states = buffer
for i in reversed(range(len(rewards))):
if done[i]:
rewards[i] = 0
else:
rewards[i] = self.args.gamma * rewards[i+1] + rewards[i]
# Normalize reward
r_mean = np.mean(rewards)
r_std = np.std(rewards)
rewards = (rewards - r_mean) / r_std
states_tensor = torch.FloatTensor(states)
actions_tensor = torch.FloatTensor(actions)
rewards_tensor = torch.FloatTensor(rewards)
state_v = torch.squeeze(self.critic(states_tensor), 1)
prob = self.actor(states_tensor)
c = Categorical(prob)
self.actor_optimizer.zero_grad()
adv = rewards_tensor - state_v.detach()
actor_loss = torch.sum(-c.log_prob(actions_tensor) * adv)
actor_loss.backward()
self.actor_optimizer.step()
self.critic_optimizer.zero_grad()
critic_loss = F.smooth_l1_loss(state_v, rewards_tensor)
critic_loss.backward()
self.critic_optimizer.step()
def main():
parser = argparse.ArgumentParser(description="the parameter of actor critic")
parser.add_argument('--replay_size', type=int, help="maximum capacity of the buffer", default=2000)
parser.add_argument('--actor_lr', type=float, help='actor learning rate used in the Adam optimizer', default=0.01)
parser.add_argument('--critic_lr', type=float, help='critic learning rate used in the Adam optimizer', default=0.01)
parser.add_argument('--gamma', type=float, help="gamma value used for Bellman approximation", default=0.99)
arg = parser.parse_args()
buffer = ExperienceBuffer(args=arg)
env = gym.make('CartPole-v0')
agent = Agent(env, buffer, arg)
for epoch in range(10000):
state, done = env.reset(), False
episode_r = []
while not done:
action = agent.choose_action(state)
state_next, r, done, info = env.step(action.item())
agent.store_transition(state, action.item(), r, done, state_next)
if not done:
state = state_next
episode_r.append(r)
agent.learn()
print("epoch: {} | len_ep_r: {} | avg_r: {}".format(epoch, len(episode_r), np.sum(episode_r) / len(episode_r)))
env.close()
if __name__ == "__main__":
main()
|
{"hexsha": "10e2c2bdc9c606da02f65559c3db306ce8004cf7", "size": 5210, "ext": "py", "lang": "Python", "max_stars_repo_path": "chap05 Actor Critic/naiveAC.py", "max_stars_repo_name": "18279406017/awesome-reinforcement-learning", "max_stars_repo_head_hexsha": "88644e65f6c18ad74a84eb87e7ce433fa65530bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-11-19T13:37:16.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-19T13:37:16.000Z", "max_issues_repo_path": "chap05 Actor Critic/naiveAC.py", "max_issues_repo_name": "18279406017/awesome-reinforcement-learning", "max_issues_repo_head_hexsha": "88644e65f6c18ad74a84eb87e7ce433fa65530bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chap05 Actor Critic/naiveAC.py", "max_forks_repo_name": "18279406017/awesome-reinforcement-learning", "max_forks_repo_head_hexsha": "88644e65f6c18ad74a84eb87e7ce433fa65530bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6129032258, "max_line_length": 122, "alphanum_fraction": 0.6355086372, "include": true, "reason": "import numpy", "num_tokens": 1238}
|
import preprocessing
import unittest
import numpy as np
class PreprocessingTest(unittest.TestCase):
def setUp(self):
self.raw_small_image = np.random.uniform(0, 255, (16,17,3)).astype(int)
self.char2ind = {'a': 0,
'b': 1,
'c': 2}
self.ind2char = dict((b,a) for a,b in self.char2ind.items())
def test_randomString(self):
result = preprocessing.randomString(
'aaaaaaaaaa', lenght=5)
self.assertEqual(result, 'aaaaa')
def test_resize_one(self):
result = preprocessing.resize_one(
self.raw_small_image, shape=(32, 32, 3))
self.assertEqual(result.shape, (32, 32, 3))
def test_OHE(self):
result = preprocessing.OHE('abc', self.char2ind)
correct_result = np.array([[1,0,0],[0,1,0],[0,0,1]])
cond = np.array_equal(result, correct_result)
self.assertTrue(cond)
|
{"hexsha": "ecc7fa963fc641e0f40e5795b51bc2de95d327cc", "size": 937, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_preprocessing.py", "max_stars_repo_name": "denkuzin/captcha_solver", "max_stars_repo_head_hexsha": "cea3a3673df2d9c9529811d0ed4ee0a2244166d3", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-02-25T15:16:48.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-04T18:42:31.000Z", "max_issues_repo_path": "tests/test_preprocessing.py", "max_issues_repo_name": "denkuzin/captcha_solver", "max_issues_repo_head_hexsha": "cea3a3673df2d9c9529811d0ed4ee0a2244166d3", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_preprocessing.py", "max_forks_repo_name": "denkuzin/captcha_solver", "max_forks_repo_head_hexsha": "cea3a3673df2d9c9529811d0ed4ee0a2244166d3", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2258064516, "max_line_length": 79, "alphanum_fraction": 0.5933831377, "include": true, "reason": "import numpy", "num_tokens": 234}
|
#!/usr/bin/env python
"""
Author:
Yixin Li
Email: liyixin@mit.edu
"""
import numpy as np
from of.utils import *
from of.gpu.KernelThinWrapper import KernelThinWrapper
from .gpu import dirname_of_cuda_files
cuda_filename = os.path.join(dirname_of_cuda_files,'rgb_to_lab.cu')
FilesDirs.raise_if_file_does_not_exist(cuda_filename)
with open(cuda_filename,'r') as cuda_file:
_gpu_kernel = cuda_file.read()
include_dirs=[dirname_of_cuda_files]
class _RgbToLab(KernelThinWrapper):
def __init__(self):
super(type(self),self).__init__(gpu_kernel=_gpu_kernel,
include_dirs=include_dirs)
self._get_function_from_src_module('rgb_to_lab')
def __call__(self,img_gpu, threads_per_block = 1024, do_input_checks=False):
if do_input_checks:
if not isinstance(img_gpu,gpuarray.GPUArray):
raise TypeError(type(img_gpu))
nPts = img_gpu.shape[0] * img_gpu.shape[1]
num_block = int ( np.ceil(nPts / float(threads_per_block)) )
self._gpu_rgb_to_lab(img_gpu,
np.int32(nPts),
grid=(num_block,1,1),
block=(threads_per_block,1,1))
rgb_to_lab = _RgbToLab()
|
{"hexsha": "549e223c9731a5fe38116a869eebafd1b0fe1732", "size": 1282, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/superpixels/rgb_to_lab.py", "max_stars_repo_name": "BGU-CS-VIL/fastScsp", "max_stars_repo_head_hexsha": "32e36d2ee2a6636303bd4cbf8b7cc91190af2202", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-03-07T13:00:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T00:38:57.000Z", "max_issues_repo_path": "python/superpixels/rgb_to_lab.py", "max_issues_repo_name": "BGU-CS-VIL/fastScsp", "max_issues_repo_head_hexsha": "32e36d2ee2a6636303bd4cbf8b7cc91190af2202", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/superpixels/rgb_to_lab.py", "max_forks_repo_name": "BGU-CS-VIL/fastScsp", "max_forks_repo_head_hexsha": "32e36d2ee2a6636303bd4cbf8b7cc91190af2202", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2682926829, "max_line_length": 82, "alphanum_fraction": 0.6458658346, "include": true, "reason": "import numpy", "num_tokens": 303}
|
import time
import math
import numpy as np
from pykeops.numpy import LazyTensor, ComplexLazyTensor
M, N, D = 1000, 1000, 3
dtype = "float32"
do_warmup = False
x = np.random.rand(M, 1, D).astype(dtype) + 1j * np.random.rand(M, 1, D).astype(dtype)
y = np.random.rand(1, N, D).astype(dtype) + 1j * np.random.rand(1, N, D).astype(dtype)
a = -1.23
b = 1.54
def view_as_real(x):
if x.dtype == complex:
return torch.view_as_real(x)
else:
return x
def fun(x, y, a, b, backend):
if backend == "keops":
x = LazyTensor(x)
y = LazyTensor(y)
conj = ComplexLazyTensor.conj
angle = ComplexLazyTensor.angle
else:
conj = np.conj
angle = np.angle
Kxy = ((x * y) * y.real + x + x.real).sum(axis=2)
return Kxy.sum(axis=0)
backends = ["numpy", "keops"]
out = []
for backend in backends:
if do_warmup:
fun(x[: min(M, 100), :, :], y[:, : min(N, 100), :], a, b, backend)
fun(x[: min(M, 100), :, :], y[:, : min(N, 100), :], a, b, backend)
start = time.time()
out.append(fun(x, y, a, b, backend).squeeze())
end = time.time()
print("time for " + backend + ":", end - start)
if len(out) > 1:
# print(out[0])
# print(out[1])
print(
"relative error:",
(
np.linalg.norm((out[0] - out[1]).view("float"))
/ np.linalg.norm((out[0]).view("float"))
).item(),
)
|
{"hexsha": "4220802187b671e101890e5f1116f7f24d4873c1", "size": 1426, "ext": "py", "lang": "Python", "max_stars_repo_path": "pykeops/sandbox/test_complex_numpy.py", "max_stars_repo_name": "mdiazmel/keops", "max_stars_repo_head_hexsha": "52a3d2ee80a720639f52898305f85399b7b45a63", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 695, "max_stars_repo_stars_event_min_datetime": "2019-04-29T10:20:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T13:07:24.000Z", "max_issues_repo_path": "pykeops/sandbox/test_complex_numpy.py", "max_issues_repo_name": "mdiazmel/keops", "max_issues_repo_head_hexsha": "52a3d2ee80a720639f52898305f85399b7b45a63", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 213, "max_issues_repo_issues_event_min_datetime": "2019-04-18T09:24:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T14:27:12.000Z", "max_forks_repo_path": "pykeops/sandbox/test_complex_numpy.py", "max_forks_repo_name": "mdiazmel/keops", "max_forks_repo_head_hexsha": "52a3d2ee80a720639f52898305f85399b7b45a63", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 52, "max_forks_repo_forks_event_min_datetime": "2019-04-18T09:18:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T01:48:33.000Z", "avg_line_length": 23.3770491803, "max_line_length": 86, "alphanum_fraction": 0.5413744741, "include": true, "reason": "import numpy", "num_tokens": 456}
|
from flask import Flask, Response
from flask_socketio import SocketIO, send, emit
from queue import Queue
import base64
import cv2
import numpy as np
from PIL import Image
import io
d = dirname(dirname(abspath(__file__)))
app = Flask(__name__)
app.queue = Queue()
socketio = SocketIO(app)
@socketio.on('connect', namespace='/live')
def test_connect():
print('Client wants to connect.')
emit('response', {'data': 'OK'},broadcast=True)
@socketio.on('disconnect', namespace='/live')
def test_disconnect():
print('Client disconnected')
@socketio.on('livevideo', namespace='/live')
def test_live(message):
app.queue.put(message['data'])
emit('camera_update', {'data': app.queue.get()},broadcast=True)
# change port and IP
if __name__ == '__main__':
socketio.run(app, host = '0.0.0.0', port = 8020,debug=True)
|
{"hexsha": "b0c8862e6df6caae2f0853b66dcfd9234c8fded0", "size": 833, "ext": "py", "lang": "Python", "max_stars_repo_path": "server.py", "max_stars_repo_name": "DevconX/Tello-Python", "max_stars_repo_head_hexsha": "0e7ef8375e6904a536ff274ec7c868388424327e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2018-06-22T14:00:04.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-16T23:11:39.000Z", "max_issues_repo_path": "server.py", "max_issues_repo_name": "DevconX/Tello-Python", "max_issues_repo_head_hexsha": "0e7ef8375e6904a536ff274ec7c868388424327e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "server.py", "max_forks_repo_name": "DevconX/Tello-Python", "max_forks_repo_head_hexsha": "0e7ef8375e6904a536ff274ec7c868388424327e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-07-03T23:42:22.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-09T22:36:20.000Z", "avg_line_length": 26.03125, "max_line_length": 67, "alphanum_fraction": 0.7118847539, "include": true, "reason": "import numpy", "num_tokens": 212}
|
/*=============================================================================
Copyright (c) 1999-2003 Jaakko Jarvi
Copyright (c) 2001-2011 Joel de Guzman
Copyright (c) 2006 Dan Marsden
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
==============================================================================*/
#include <boost/fusion/container/map/map.hpp>
#include <boost/detail/lightweight_test.hpp>
#include <boost/fusion/sequence/intrinsic/at.hpp>
struct key1 {};
struct key2 {};
struct key3 {};
namespace test_detail
{
// something to prevent warnings for unused variables
template<class T> void dummy(const T&) {}
// no public default constructor
class foo
{
public:
explicit foo(int v) : val(v) {}
bool operator==(const foo& other) const
{
return val == other.val;
}
private:
foo() {}
int val;
};
// another class without a public default constructor
class no_def_constructor
{
no_def_constructor() {}
public:
no_def_constructor(std::string) {}
};
}
inline void
test()
{
using namespace boost::fusion;
using namespace test_detail;
nil empty;
(void)empty;
map<> empty0;
(void)empty0;
#ifndef NO_CONSTRUCT_FROM_NIL
map<> empty1(empty);
(void)empty1;
#endif
map<pair<key1, int> > t1;
BOOST_TEST(at_c<0>(t1).second == int());
map<pair<key1, float> > t2(5.5f);
BOOST_TEST(at_c<0>(t2).second > 5.4f && at_c<0>(t2).second < 5.6f);
map<pair<key1, foo> > t3(foo(12));
BOOST_TEST(at_c<0>(t3).second == foo(12));
map<pair<key1, double> > t4(t2);
BOOST_TEST(at_c<0>(t4).second > 5.4 && at_c<0>(t4).second < 5.6);
map<pair<key1, int>, pair<key2, float> > t5;
BOOST_TEST(at_c<0>(t5).second == int());
BOOST_TEST(at_c<1>(t5).second == float());
map<pair<key1, int>, pair<key2, float> > t6(12, 5.5f);
BOOST_TEST(at_c<0>(t6).second == 12);
BOOST_TEST(at_c<1>(t6).second > 5.4f && at_c<1>(t6).second < 5.6f);
map<pair<key1, int>, pair<key2, float> > t7(t6);
BOOST_TEST(at_c<0>(t7).second == 12);
BOOST_TEST(at_c<1>(t7).second > 5.4f && at_c<1>(t7).second < 5.6f);
map<pair<key1, long>, pair<key2, double> > t8(t6);
BOOST_TEST(at_c<0>(t8).second == 12);
BOOST_TEST(at_c<1>(t8).second > 5.4f && at_c<1>(t8).second < 5.6f);
dummy
(
map<
pair<key1, no_def_constructor>,
pair<key2, no_def_constructor>,
pair<key3, no_def_constructor> >
(
pair<key1, no_def_constructor>(std::string("Jaba")), // ok, since the default
pair<key2, no_def_constructor>(std::string("Daba")), // constructor is not used
pair<key3, no_def_constructor>(std::string("Doo"))
)
);
dummy(map<pair<key1, int>, pair<key2, double> >());
dummy(map<pair<key1, int>, pair<key2, double> >(1,3.14));
#if defined(FUSION_TEST_FAIL)
dummy(map<pair<key1, double&> >()); // should fail, no defaults for references
dummy(map<pair<key1, const double&> >()); // likewise
#endif
{
double dd = 5;
dummy(map<pair<key1, double&> >(pair<key1, double&>(dd))); // ok
dummy(map<pair<key1, const double&> >(pair<key1, const double&>(dd+3.14))); // ok, but dangerous
}
#if defined(FUSION_TEST_FAIL)
dummy(map<pair<key1, double&> >(dd+3.14)); // should fail,
// temporary to non-const reference
#endif
}
int
main()
{
test();
return boost::report_errors();
}
|
{"hexsha": "9c03b4268adcb094447a74edb42e9de5a2b47552", "size": 3707, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "3rdParty/boost/1.71.0/libs/fusion/test/sequence/map_construction.cpp", "max_stars_repo_name": "rajeev02101987/arangodb", "max_stars_repo_head_hexsha": "817e6c04cb82777d266f3b444494140676da98e2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12278.0, "max_stars_repo_stars_event_min_datetime": "2015-01-29T17:11:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T21:12:00.000Z", "max_issues_repo_path": "3rdParty/boost/1.71.0/libs/fusion/test/sequence/map_construction.cpp", "max_issues_repo_name": "rajeev02101987/arangodb", "max_issues_repo_head_hexsha": "817e6c04cb82777d266f3b444494140676da98e2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9469.0, "max_issues_repo_issues_event_min_datetime": "2015-01-30T05:33:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:17:21.000Z", "max_forks_repo_path": "3rdParty/boost/1.71.0/libs/fusion/test/sequence/map_construction.cpp", "max_forks_repo_name": "rajeev02101987/arangodb", "max_forks_repo_head_hexsha": "817e6c04cb82777d266f3b444494140676da98e2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 892.0, "max_forks_repo_forks_event_min_datetime": "2015-01-29T16:26:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T07:44:30.000Z", "avg_line_length": 27.4592592593, "max_line_length": 104, "alphanum_fraction": 0.5673050985, "num_tokens": 1097}
|
"""
ImageSpace: image matrix, inc dimensions, voxel size, vox2world matrix and
inverse, of an image. Used for resampling operations between different
spaces and also for saving images into said space (eg, save PV estimates
into the space of an image)
"""
import copy
from textwrap import dedent
import nibabel
import numpy as np
from nibabel import Nifti1Image, MGHImage
from fsl.data.image import Image as FSLImage
class ImageSpace(object):
"""
Voxel grid of an image, ignoring actual image data.
Args:
img: path to image, nibabel Nifti/MGH or FSL Image object
Attributes:
size: array of voxel counts in each dimension
vox_size: array of voxel size in each dimension
vox2world: 4x4 affine to transform voxel coords -> world
world2vox: inverse of above
"""
def __init__(self, img):
if isinstance(img, str):
fname = img
img = nibabel.load(img)
else:
assert isinstance(img, (Nifti1Image, MGHImage, FSLImage))
if type(img) is FSLImage:
img = img.nibImage
fname = img.get_filename()
self.fname = fname
self.size = np.array(img.shape[:3], dtype=int)
self.vox2world = img.affine
self.header = img.header
@classmethod
def manual(cls, vox2world, size):
"""Manual constructor"""
spc = cls.__new__(cls)
spc.vox2world = vox2world
spc.size = np.array(size, dtype=int)
spc.fname = None
spc.header = None
return spc
@classmethod
def create_axis_aligned(cls, bbox_corner, size, vox_size):
"""
Create an ImageSpace from bounding box location and voxel size.
Note that the voxels will be axis-aligned (no rotation).
Args:
bbox_corner: 3-vector, location of the furthest corner of the
bounding box, at which the corner of voxel 0 0 0 will lie.
size: 3-vector, number of voxels in each spatial dimension
vox_size: 3-vector, size of voxel in each dimension
Returns
ImageSpace object
"""
bbox_corner = np.array(bbox_corner)
vox2world = np.identity(4)
vox2world[(0,1,2),(0,1,2)] = vox_size
orig = bbox_corner + (np.array((3 * [0.5])) @ vox2world[0:3,0:3])
vox2world[0:3,3] = orig
return cls.manual(vox2world, size)
@classmethod
def save_like(cls, ref, data, path):
"""Save data into the space of an existing image
Args:
ref: path to image defining space to use
data: ndarray (of appropriate dimensions)
path: path to write to
"""
spc = ImageSpace(ref)
spc.save_image(data, path)
@property
def vox_size(self):
"""Voxel size of image"""
return np.linalg.norm(self.vox2world[:3,:3], ord=2, axis=0)
@property
def fov_size(self):
"""FoV associated with image, in mm"""
return self.size * self.vox_size
@property
def bbox_origin(self):
"""
Origin of the image's bounding box, referenced to first voxel's
corner, not center (ie, -0.5, -0.5, -0.5)
"""
orig = np.array((3 * [-0.5]) + [1])
return (self.vox2world @ orig)[:3]
@property
def world2vox(self):
"""World coordinates to voxels"""
return np.linalg.inv(self.vox2world)
@property
def vox2FSL(self):
"""
Transformation between voxels and FSL coordinates (scaled mm). FLIRT
matrices are given in (src FSL) -> (ref FSL) terms.
See: https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FLIRT/FAQ
"""
if len(self.size) < 3:
raise RuntimeError("Volume has less than 3 dimensions, "
"cannot resolve space")
det = np.linalg.det(self.vox2world[0:3, 0:3])
vox2FSL = np.zeros((4,4))
vox2FSL[range(3), range(3)] = self.vox_size
# Check the xyzt field to find the spatial units.
multi = 1
if (self.header is not None) and ('xyzt_units' in self.header):
xyzt = str(self.header['xyzt_units'])
if xyzt == '01':
multi = 1000
elif xyzt == '10':
multi = 1
elif xyzt == '11':
multi = 1e-3
else:
multi = 1
if det > 0:
vox2FSL[0,0] = -self.vox_size[0]
vox2FSL[0,3] = (self.size[0] - 1) * self.vox_size[0]
vox2FSL *= multi
vox2FSL[3,3] = 1
return vox2FSL
@property
def file_name(self):
if self.fname is not None:
return self.fname
else:
return "<ImageSpace not created from file path>"
@property
def FSL2vox(self):
"""Transformation from FSL scaled coordinates to voxels"""
return np.linalg.inv(self.vox2FSL)
@property
def world2FSL(self):
"""Transformation from world coordinates to FSL scaled"""
return self.vox2FSL @ self.world2vox
@property
def FSL2world(self):
"""Transformation from FSL scaled coordinates to world"""
return self.vox2world @ self.FSL2vox
def resize_voxels(self, factor, mode="floor"):
"""
Resize voxels of this grid.
Args:
factor: either a single value, or 3 values in array-like form,
by which to multiply voxel size in each dimension
mode: "floor" or "ceil", whether to round the grid size up or down
if factor does not divide perfectly into the current size
Returns:
new ImageSpace object
"""
if mode == "floor":
rounder = np.floor
else:
rounder = np.ceil
factor = np.array(factor)
if factor.size == 1:
factor = factor * np.ones(3)
new_size = rounder(self.size / factor).astype(int)
new_vox2world = copy.deepcopy(self.vox2world)
new_vox2world[:3,:3] *= factor[None,:]
bbox_shift = (new_vox2world[:3,:3] @ [0.5, 0.5, 0.5])
new_vox2world[:3,3] = self.bbox_origin + bbox_shift
return ImageSpace.manual(new_vox2world, new_size)
def touch(self, path, dtype=float):
"""Save empty volume at path"""
vol = np.zeros(self.size, dtype)
self.save_image(vol, path)
def resize(self, start, new_size):
"""
Resize the FoV of this space, maintaining axis alignment and voxel
size. Can be used to both crop and expand the grid. For example,
to expand the grid sized X,Y,Z by 10 voxels split equally both
before and after each dimension, use (-5,5,5) and (X+5, Y+5, Z+5)
Args:
start: sequence of 3 ints, voxel indices by which to shift first
voxel (0,0,0 is origin, negative values can be used to expand
and positive values to crop)
new_size: sequence of 3 ints, length in voxels for each dimension,
starting from the new origin
Returns:
new ImageSpace object
"""
start = np.array(start)
new_size = np.array(new_size)
new_size[new_size == 0] = self.size[new_size == 0]
if (start.size != 3) and (new_size.size != 3):
raise RuntimeError("Extents must be 3 elements each")
if np.any(new_size < 0):
raise RuntimeError("new_size must be positive")
new = copy.deepcopy(self)
new_orig = self.vox2world[0:3,3] + (self.vox2world[0:3,0:3] @ start)
new.vox2world[0:3,3] = new_orig
new.size = new_size
new.fname = None
return new
def make_nifti(self, data):
"""Construct nibabel Nifti for this voxel grid with data"""
if not np.all(data.shape[0:3] == self.size):
if data.size == np.prod(self.size):
print("Reshaping data to 3D volume")
data = data.reshape(self.size)
elif not(data.size % np.prod(self.size)):
print("Reshaping data as 4D volume")
data = data.reshape((*self.size, -1))
else:
raise RuntimeError("Data size does not match image size")
if data.dtype is np.dtype(bool):
data = data.astype(np.int8)
nii = nibabel.nifti1.Nifti1Image(data, self.vox2world)
return nii
def save_image(self, data, path):
"""Save 3D or 4D data array at path using this image's voxel grid"""
if not (path.endswith('.nii') or path.endswith('.nii.gz')):
path += '.nii.gz'
nii = self.make_nifti(data)
nibabel.save(nii, path)
def ijk_grid(self, indexing='ij'):
"""
Return a 4D matrix of voxel indices for this space. Default indexing
is 'ij' (matrix convention), 'xy' can also be used - see np.meshgrid
for more info.
Returns:
4D array, size of this space in the first three dimensions, and
stacked I,J,K in the fourth dimension
"""
ijk = np.meshgrid(*[ np.arange(d) for d in self.size ], indexing=indexing)
return np.stack(ijk, axis=-1)
def voxel_centres(self, indexing='ij'):
"""
Return a 4D matrix of voxel centre coordinates for this space. Default
indexing is as for ImageSpace.ijk_grid(), which is 'ij' matrix convention.
See np.meshgrid for more info.
Returns:
4D array, size of this space in the first three dimensions, and
stacked I,J,K in the fourth dimension.
"""
from regtricks.application_helpers import aff_trans
ijk = self.ijk_grid(indexing).reshape(-1,3)
cents = aff_trans(self.vox2world, ijk)
return cents.reshape(*self.size, 3)
def transform(self, reg):
"""
Apply affine transformation to voxel grid of this space.
If the reg is a np.array, it must be in world-world terms, and
if it is a Registration object, the world-world transform will
be used automatically.
Args:
reg: either a 4x4 np.array (in world-world terms) or Registration
Returns:
a transformed copy of this image space
"""
from regtricks import Registration
if isinstance(reg, Registration):
reg = reg.src2ref
if not isinstance(reg, np.ndarray):
raise RuntimeError("argument must be a np.array or Registration")
new_spc = copy.deepcopy(self)
new_spc.vox2world = reg @ new_spc.vox2world
new_spc.fname = None
return new_spc
def __repr__(self):
formatter = "{:8.3f}".format
with np.printoptions(precision=3, formatter={'all': formatter}):
text = dedent(f"""\
ImageSpace with properties:
size: {self.size},
voxel size: {self.vox_size},
field of view: {self.fov_size},
vox2world: {self.vox2world[0,:]}
{self.vox2world[1,:]}
{self.vox2world[2,:]}
{self.vox2world[3,:]}
loaded from: {self.file_name}""")
return text
def __eq__(self, other):
f1 = np.allclose(self.vox2world, other.vox2world)
f2 = np.allclose(self.size, other.size)
return all([f1, f2])
|
{"hexsha": "35d83ecfedaae803957903dd9a4536d98a6ec81e", "size": 11652, "ext": "py", "lang": "Python", "max_stars_repo_path": "regtricks/image_space.py", "max_stars_repo_name": "tomfrankkirk/regtools", "max_stars_repo_head_hexsha": "844fb1f108aa7dd02e8b9d36a2975022bce2c98d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "regtricks/image_space.py", "max_issues_repo_name": "tomfrankkirk/regtools", "max_issues_repo_head_hexsha": "844fb1f108aa7dd02e8b9d36a2975022bce2c98d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "regtricks/image_space.py", "max_forks_repo_name": "tomfrankkirk/regtools", "max_forks_repo_head_hexsha": "844fb1f108aa7dd02e8b9d36a2975022bce2c98d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2386058981, "max_line_length": 82, "alphanum_fraction": 0.5704600069, "include": true, "reason": "import numpy", "num_tokens": 2926}
|
! MODULE: params_obs
!
! This module contains all of the necessary parameters related to the
! observations, and observation operators.
!
! Author: Prof. Stephen G. Penny
! University of Maryland, College Park
! Department of Atmospheric and Oceanic Science
!
! 2016.4.7
MODULE params_obs
USE common, ONLY: r_size
IMPLICIT NONE
PUBLIC
INTEGER,SAVE :: nobs
INTEGER,PARAMETER :: nid_obs=8 !STEVE: sets the dimension of the obs arrays - must be updated depending on choice of obs used.
INTEGER,PARAMETER :: id_u_obs=2819
INTEGER,PARAMETER :: id_v_obs=2820
INTEGER,PARAMETER :: id_t_obs=3073
INTEGER,PARAMETER :: id_s_obs=5521 !(OCEAN)
INTEGER,PARAMETER :: id_ssh_obs=5526 !(OCEAN)
INTEGER,PARAMETER :: id_sst_obs=5525 !(OCEAN)
INTEGER,PARAMETER :: id_sss_obs=5522 !(OCEAN)
INTEGER,PARAMETER :: id_eta_obs=5351 !(OCEAN)
INTEGER,PARAMETER :: id_sic_obs=6282 !(SEAICE)
INTEGER,PARAMETER :: id_x_obs=1111 !(OCEAN) (DRIFTERS) !STEVE: may want to change this depending on type of drifters
INTEGER,PARAMETER :: id_y_obs=2222 !(OCEAN) (DRIFTERS) !STEVE: may want to change this depending on type of drifters
INTEGER,PARAMETER :: id_z_obs=3333 !(OCEAN) (DRIFTERS) !STEVE: may want to change this depending on type of drifters
INTEGER,PARAMETER :: id_hs_obs=2692 !(SIS) snow thickness
INTEGER,PARAMETER :: id_hi_obs=8335 !(SIS) ice thickness
INTEGER,PARAMETER :: id_t1_obs=8915 !(SIS) layer 1 ice temperature
INTEGER,PARAMETER :: id_t2_obs=8925 !(SIS) layer 2 ice temperature
INTEGER,PARAMETER :: id_cn_obs=8333 !(SIS) ice concentration
INTEGER,PARAMETER :: id_ui_obs=8337 !(SIS) ice drift u
INTEGER,PARAMETER :: id_vi_obs=8334 !(SIS) ice drift v
!!------------------------------------------------------------
!! unique ID's for observations in COUPLED SYSTEM
!! STEVE: the following will replace what is above:
!!------------------------------------------------------------
!! atmosphere obs
INTEGER, PARAMETER :: obsid_atm_min = 1000
INTEGER, PARAMETER :: obsid_atm_max = 1999
INTEGER, PARAMETER :: obsid_atm_num = 8
INTEGER, PARAMETER :: obsid_atm_offset = 0
INTEGER, PARAMETER :: obsid_atm_ps = 1100
INTEGER, PARAMETER :: obsid_atm_rain = 1110
INTEGER, PARAMETER :: obsid_atm_t = 1210
INTEGER, PARAMETER :: obsid_atm_tv = 1211
INTEGER, PARAMETER :: obsid_atm_q = 1220
INTEGER, PARAMETER :: obsid_atm_rh = 1221
INTEGER, PARAMETER :: obsid_atm_u = 1250
INTEGER, PARAMETER :: obsid_atm_v = 1251
!! ocean obs
INTEGER, PARAMETER :: obsid_ocn_min = 2000
INTEGER, PARAMETER :: obsid_ocn_max = 2999
INTEGER, PARAMETER :: obsid_ocn_num = 8
INTEGER, PARAMETER :: obsid_ocn_offset = obsid_atm_offset + obsid_atm_num
INTEGER, PARAMETER :: obsid_ocn_ssh = 2100
INTEGER, PARAMETER :: obsid_ocn_eta = 2101
INTEGER, PARAMETER :: obsid_ocn_sst = 2110
INTEGER, PARAMETER :: obsid_ocn_sss = 2120
INTEGER, PARAMETER :: obsid_ocn_t = 2210
INTEGER, PARAMETER :: obsid_ocn_s = 2220
INTEGER, PARAMETER :: obsid_ocn_u = 2250
INTEGER, PARAMETER :: obsid_ocn_v = 2251
!->
INTEGER, PARAMETER :: obsid_ocn_x = 2301
INTEGER, PARAMETER :: obsid_ocn_y = 2302
INTEGER, PARAMETER :: obsid_ocn_z = 2303
!! sea-ice obs
INTEGER, PARAMETER :: obsid_sic_min = 3000
INTEGER, PARAMETER :: obsid_sic_max = 3999
INTEGER, PARAMETER :: obsid_sic_num = 1
INTEGER, PARAMETER :: obsid_sic_offset = obsid_ocn_offset + obsid_ocn_num
INTEGER, PARAMETER :: obsid_sic_con = 3100
!! land obs
INTEGER, PARAMETER :: obsid_lnd_min = 4000
INTEGER, PARAMETER :: obsid_lnd_max = 4999
INTEGER, PARAMETER :: obsid_lnd_num = 1
INTEGER, PARAMETER :: obsid_lnd_offset = obsid_sic_offset + obsid_sic_num
INTEGER, PARAMETER :: obsid_lnd_wat = 4100
!! wave obs
INTEGER, PARAMETER :: obsid_wav_min = 5000
INTEGER, PARAMETER :: obsid_wav_max = 5999
INTEGER, PARAMETER :: obsid_wav_num = 1
INTEGER, PARAMETER :: obsid_wav_offset = obsid_lnd_offset + obsid_lnd_num
INTEGER, PARAMETER :: obsid_wav_hgt = 5100
!! aerosol obs
INTEGER, PARAMETER :: obsid_aer_min = 6000
INTEGER, PARAMETER :: obsid_aer_max = 6999
INTEGER, PARAMETER :: obsid_aer_num = 1
INTEGER, PARAMETER :: obsid_aer_offset = obsid_wav_offset + obsid_wav_num
INTEGER, PARAMETER :: obsid_aer_aod = 6100
!-------------------------------------------------------------------------------
! arrays holding all observation id's and names, for easy iteration
! in loops that want to print stats for obs
!-------------------------------------------------------------------------------
INTEGER, PARAMETER :: obsid_num = 16
INTEGER, PARAMETER, DIMENSION(obsid_num) :: obsid_array = (/&
obsid_atm_ps, obsid_atm_rain, obsid_atm_t, obsid_atm_tv, &
obsid_atm_q, obsid_atm_rh, obsid_atm_u, obsid_atm_v, &
obsid_ocn_ssh, obsid_ocn_eta, obsid_ocn_sst, obsid_ocn_sss, &
obsid_ocn_t, obsid_ocn_s, obsid_ocn_u, obsid_ocn_v/)
CHARACTER (len=10) :: obsid_names(obsid_num) = (/&
"ATM_PS ", "ATM_RAIN", "ATM_T ", "ATM_TV ", &
"ATM_Q ", "ATM_RH ", "ATM_U ", "ATM_V ", &
"OCN_SSH ", "OCN_ETA ", "OCN_SST ", "OCN_SSS ",&
"OCN_T ", "OCN_S ", "OCN_U ", "OCN_V "/)
!-------------------------------------------------------------------------------
! Number of records in obs1 or obs2 formatted observation input binary files.
! ISSUE: make these namelist controllable:
!-------------------------------------------------------------------------------
INTEGER :: obs1nrec = 6 ! The number of records in the obs1-formatted file (previous 6, 7 adds a time record).
INTEGER :: obs2nrec = 9 ! The number of records in the obs2-formatted file (previous 8, 9 adds a time record).
!-------------------------------------------------------------------------------
! Remove all observations above 65ºN due to tripolar grid
!-------------------------------------------------------------------------------
LOGICAL :: DO_REMOVE_65N = .false.
!-------------------------------------------------------------------------------
! Temperature conversion method for compting OMFs
!-------------------------------------------------------------------------------
LOGICAL :: DO_POTTEMP_to_INSITU = .true. ! Conversion to observation space. This is needed if the
! observations aren't converted to potential temperature
! (as is done by most - NCEP, SODA, NASA/GMAO, etc.). But
! unlike that approach, this does not require synthetic salinity
! observations to be constructed from climatologies.
! This approach is theoretically better, but investigation must
! be done to ensure model biases do not cause significant errors.
! (a warning from J. Carton of potential difficulty)
!
! Only one can be true, this one takes prioirty
!
LOGICAL :: DO_INSITU_to_POTTEMP = .false. ! Technically, this would require matching an observed salinity
! measurement with each observed in situ temperature measurement
! and using it to compute the potential temperature. The opposite
! process is quite a bit easier.
END MODULE params_obs
|
{"hexsha": "28e12617717fe0c7b2d9937292dc78106f7a60b8", "size": 7727, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/obs/params_obs.f90", "max_stars_repo_name": "GEOS-ESM/Ocean-LETKF", "max_stars_repo_head_hexsha": "a7c4bbf86cdbff078212914dcc059d0b1450accf", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-12-31T15:40:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-03T13:44:20.000Z", "max_issues_repo_path": "src/obs/params_obs.f90", "max_issues_repo_name": "GEOS-ESM/Ocean-LETKF", "max_issues_repo_head_hexsha": "a7c4bbf86cdbff078212914dcc059d0b1450accf", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/obs/params_obs.f90", "max_forks_repo_name": "GEOS-ESM/Ocean-LETKF", "max_forks_repo_head_hexsha": "a7c4bbf86cdbff078212914dcc059d0b1450accf", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-14T18:46:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-14T18:46:56.000Z", "avg_line_length": 47.6975308642, "max_line_length": 130, "alphanum_fraction": 0.5907855571, "num_tokens": 2020}
|
#
# colormaps.jl --
#
# Implements management of colors and colormaps for using with the PGPlot
# library.
#
module Colormaps
export
RGBVec,
palette
using Colors
using PGPlot.Bindings
import PGPlot.Bindings: pgqcr, pgscr
const DATA_DIR = normpath(joinpath(@__DIR__, "..", "data"))
"""
`RGBVec{T}(r,g,b)` represents an RGB color whose components have type `T`.
When `T` is a floating-point, the color triplet can be manipulated as a
3-element *vector*, that is `α*v + β*v` for any reals `α` and `β` and
colors `u` and `v` of type `RGBVec{<:AbstractFloat}` yields a color
of type `RGBVec{<:AbstractFloat}`. This is useful to interpolate colors
and build colormaps.
Having `T = UInt8` can be used to parse/convert colors.
"""
struct RGBVec{T}
r::T
g::T
b::T
end
Base.eltype(::RGBVec{T}) where {T} = T
RGBVec{T}(col::RGBVec{T}) where {T} = col
function RGBVec{T}(col::RGBVec{UInt8}) where {T<:AbstractFloat}
a = one(T)/T(255)
return RGBVec{T}(a*col.r, a*col.g, a*col.b)
end
function RGBVec{UInt8}(col::RGBVec{T}) where {T<:AbstractFloat}
a = T(255)
return RGBVec{T}(round(UInt8, a*clamp(col.r, zero(T), one(T))),
round(UInt8, a*clamp(col.g, zero(T), one(T))),
round(UInt8, a*clamp(col.b, zero(T), one(T))))
end
RGBVec{T}(rgb::NTuple{3,Real}) where {T} = RGB{T}(rgb...)
RGBVec(rgb::NTuple{3,Real}) = RGB(rgb...)
RGBVec{T}(col::Colorant) where {T} = RGBVec{T}(RGB(col))
RGBVec(col::Colorant) = RGBVec(RGB(col))
RGBVec{T}(col::RGB) where {T} = RGBVec{T}(col.r, col.g, col.b)
RGBVec(col::RGB) = RGBVec(col.r, col.g, col.b)
RGBVec{T}(col::Symbol) where {T} = RGBVec{T}(String(col))
RGBVec(col::Symbol) = RGBVec(String(col))
RGBVec{T}(col::AbstractString) where {T} = RGBVec{T}(parse(RGB, col))
RGBVec(col::AbstractString) = RGBVec(parse(RGB, col))
Base.convert(::Type{T}, arg::T) where {T<:RGBVec} = arg
Base.convert(::Type{T}, arg::Colorant) where {T<:RGBVec} = T(arg)
Base.convert(::Type{T}, arg::NTuple{3,Real}) where {T<:RGBVec} = T(arg)
Base.convert(::Type{T}, arg::RGBVec) where {T<:RGB} = T(arg)
black(::Type{RGBVec{T}}) where {T} = RGBVec{T}(zero(T), zero(T), zero(T))
white(::Type{RGBVec{T}}) where {T<:AbstractFloat} =
RGBVec{T}(one(T), one(T), one(T))
white(::Type{RGBVec{T}}) where {T<:Unsigned} =
RGBVec{T}(typemax(T), typemax(T), typemax(T))
gray(::Type{RGBVec{T}}, f::T) where {T<:AbstractFloat} = RGBVec{T}(f, f, f)
gray(::Type{RGBVec{T}}, f::Real) where {T<:AbstractFloat} =
gray(RGBVec{T}, T(f))
gray(::Type{RGBVec{T}}, f::Real) where {T<:Unsigned} =
(g = round(T, typemax(T)*clamp(f, oftype(f, 0), oftype(f, 1)));
RGBVec{T}(g, g, g))
background(::Type{T}) where {T<:RGBVec} = pgqcr(T, 0)
foreground(::Type{T}) where {T<:RGBVec} = pgqcr(T, 1)
# Extend arithmetic operators to allow for simple color operations such as
# linear interpolation. This is restricted to floating-point colorants.
Base.:( + )(c1::RGBVec{T1}, c2::RGBVec{T2}) where {T1<:Real,T2<:Real} =
(T = float(promote_type(T1, T2)); RGBVec{T}(c1) + RGBVec{T}(c2))
Base.:( + )(c1::RGBVec{<:AbstractFloat}, c2::RGBVec{<:AbstractFloat}) =
RGBVec(c1.r + c2.r, c1.g + c2.g, c1.b + c2.b)
Base.:( - )(c1::RGBVec{T1}, c2::RGBVec{T2}) where {T1<:Real,T2<:Real} =
(T = float(promote_type(T1, T2)); RGBVec{T}(c1) - RGBVec{T}(c2))
Base.:( - )(c1::RGBVec{<:AbstractFloat}, c2::RGBVec{<:AbstractFloat}) =
RGBVec(c1.r - c2.r, c1.g - c2.g, c1.b - c2.b)
Base.:( * )(col::RGBVec, α::Real) = α*col
Base.:( * )(α::Real, col::RGBVec{T}) where {T<:Real} = α*RGB{float(T)}(col)
Base.:( * )(α::Real, col::RGBVec{<:AbstractFloat}) =
RGBVec(α*col.r, α*col.g, α*col.b)
Base.:( / )(col::RGBVec{T}, α::Real) where {T<:Real} = RGB{float(T)}(col)/α
Base.:( / )(col::RGBVec{<:AbstractFloat}, α::Real) =
RGBVec(col.r/α, col.g/α, col.b/α)
Base.:( \ )(α::Real, col::RGBVec) = col/α
Base.Tuple(col::RGBVec) = (col.r, col.g, col.b)
Base.clamp(col::RGBVec{T}) where {T<:AbstractFloat} =
RGBVec{T}(clamp(col.r, zero(T), one(T)),
clamp(col.g, zero(T), one(T)),
clamp(col.b, zero(T), one(T)))
function Base.tryparse(::Type{RGBVec{T}}, str::AbstractString) where {T}
cols = split(str, ' ', keepempty=false)
length(cols) == 3 || return nothing
r = tryparse(T, cols[1])
r === nothing && return nothing
g = tryparse(T, cols[2])
g === nothing && return nothing
b = tryparse(T, cols[3])
b === nothing && return nothing
return RGBVec(r,g,b)
end
# Query indexed color as an RGBVec structure.
pgqcr(::Type{RGBVec}, ci::Integer) where {T<:AbstractFloat} =
RGBVec(pgqcr(ci)...)
pgqcr(::Type{RGBVec{T}}, ci::Integer) where {T<:AbstractFloat} =
RGBVec{T}(pgqcr(ci)...)
pgqcr(::Type{RGBVec{UInt8}}, ci::Integer) where {T<:AbstractFloat} =
RGBVec{UInt8}(pgqcr(RGBVec, ci))
# Set indexed color with an RGBVec structure.
pgscr(ci::Integer, col::RGBVec{UInt8}) = pgscr(PGInt(ci), RGBVec{PGFloat}(col))
pgscr(ci::Integer, col::RGBVec{<:AbstractFloat}) =
pgscr(PGInt(ci), col.r, col.g, col.b)
"""
```julia
find_file(name) -> path
```
yields the path to a readable graphics file.
"""
find_file(name::AbstractString) = find_file(convert(String, name))
function find_file(name::String)
if isfile(name)
return name
else
path = joinpath(DATA_DIR, name)
if isfile(path)
return path
else
throw_file_not_found(name)
end
end
end
@noinline throw_file_not_found(name::AbstractString) =
throw(ArgumentError(string("file \"", name, "\" not found")))
"""
```julia
load_gist(name) -> lut
```
yields the colormap read in Gist file `name`.
"""
function load_gist(name::AbstractString)
path = find_file(name)
lut = Vector{RGBVec{UInt8}}(undef, 0)
open(path, "r") do io
load_gist!(lut, io)
end
return lut
end
load_gist(io::IO) = load_gist!(Vector{RGBVec{UInt8}}(undef, 0), io::IO)
"""
```julia
load_gist!(lut, name) -> lut
```
overwrites the contents of the colormap `lut` with the contents read in Gist
file `name`.
"""
function load_gist!(lut::AbstractVector{T}, io::IO) where {T<:RGBVec}
resize!(lut, 0)
while !eof(io)
line = readline(io)
rgb = tryparse(T, line)
if rgb !== nothing
push!(lut, rgb)
end
end
return lut
end
"""
```julia
palette(cmap)
```
installs the colormap `cmap` (a name or a look-up table) in the current
plotting device.
The color index range may be specified:
```julia
palette(cmap, cmin, cmax)
```
If `cmin:cmax` is larger than the current index range, an attempt is made to
enlarge it.
Also see [`set_color_ramp`](@ref).
"""
function palette(ident::Union{AbstractString,AbstractVector{RGBVec{UInt8}}},
cmin::Union{Nothing,Integer} = nothing,
cmax::Union{Nothing,Integer} = nothing)
palette(ident, get_color_index_range(cmin, cmax)...)
end
function palette(name::AbstractString, cmin::Int, cmax::Int)
if endswith(name, ".gp")
lut = load_gist(name)
palette(lut, cmin, cmax)
elseif name == "gray" || name == "+gray"
set_color_ramp(cmin, cmax, 0)
elseif name == "-gray"
set_color_ramp(cmin, cmax, 1)
elseif name == "bg-fg"
set_color_ramp(cmin, cmax, 2)
elseif name == "fg-bg"
set_color_ramp(cmin, cmax, 3)
elseif name == "red" || name == "+red"
set_color_ramp(cmin, cmax, black(RGBVec{PGFloat}),
RGBVec{PGFloat}(1,0,0))
elseif name == "-red"
set_color_ramp(cmin, cmax, RGBVec{PGFloat}(1,0,0),
black(RGBVec{PGFloat}))
elseif name == "green" || name == "+green"
set_color_ramp(cmin, cmax, black(RGBVec{PGFloat}),
RGBVec{PGFloat}(0,1,0))
elseif name == "-green"
set_color_ramp(cmin, cmax, RGBVec{PGFloat}(0,1,0),
black(RGBVec{PGFloat}))
elseif name == "blue" || name == "+blue"
set_color_ramp(cmin, cmax, black(RGBVec{PGFloat}),
RGBVec{PGFloat}(0,0,1))
elseif name == "-blue"
set_color_ramp(cmin, cmax, RGBVec{PGFloat}(0,0,1),
black(RGBVec{PGFloat}))
else
throw_unknown_colormap(name)
end
end
function palette(lut::AbstractVector{RGBVec{UInt8}}, cmin::Int, cmax::Int)
length(lut) > 0 || error("no colors!")
f = 1/255
I = axes(lut, 1)
imin, imax = Int(first(I)), Int(last(I))
if cmin != cmax
a = (imax - imin)/(cmax - cmin)
for c in min(cmin,cmax):max(cmin,cmax)
t = (c - cmin)*a + imin
i0 = floor(Int, t)
i1 = min(i0 + 1, imax)
a1 = t - i0
a0 = one(a1) - a1
r = a0*lut[i0].r + a1*lut[i1].r
g = a0*lut[i0].g + a1*lut[i1].g
b = a0*lut[i0].b + a1*lut[i1].b
pgscr(c, f*r, f*g, f*b)
end
else
i = ((imax + imin + 1) >> 1)
pgscr(c, f*lut[i].r, f*lut[i].g, f*lut[i].b)
end
end
@noinline throw_unknown_colormap(name::AbstractString) =
throw(ArgumentError(string("unknown colormap \"", name, "\"")))
get_color_index_range(::Nothing, ::Nothing) = get_color_index_range()
function get_color_index_range()
cmin, cmax = pgqcir()
return (Int(cmin), Int(cmax))
end
function get_color_index_range(cmin::Union{Nothing,Integer},
cmax::Union{Nothing,Integer})
qmin, qmax = get_color_index_range()
rmin, rmax = qmin, qmax
if cmin !== nothing
rmin = oftype(rmin, cmin)
end
if cmax !== nothing
rmax = oftype(rmax, cmax)
end
if min(rmin, rmax) < qmin || max(rmin, rmax) > qmax
pgscir(min(rmin, rmax), max(rmin, rmax))
end
return (rmin, rmax)
end
"""
```julia
set_color_ramp([cmin::Integer, cmax::Integer,]
[flag=0 | lo::RGBVec, hi::RGBVec])
```
sets the current colormap with a linear ramp of shades of grays or of colors
interpolated between the background and the foreground color or between two
given colors.
Optional arguments `cmin` and `cmax` are to specify the range for the color
indices to set. If unspecified, the full range of indices used for images
(cmap1) is modified. Note that `cmin > cmax` is allowed to reverse the order
of colors.
Optional argument `flag` is an integer. If the least significant bit of `flag`
is set. Then the colors are reversed; if the second least significant bit of
`flag` is set, background and the foreground colors are interpolated;
otherwise, black and white colors are interpolated.
Two RGBVec colors, `lo` and `hi`, can be specified instead of `flag` to
interpolate between these two colors.
"""
set_color_ramp(flag::Integer = 0) = set_color_ramp(pgqcir()..., flag)
set_color_ramp(cmin::Integer, cmax::Integer, flag::Integer = 0) =
set_color_ramp(Int(cmin), Int(cmax), Int(flag))
function set_color_ramp(cmin::Int, cmax::Int, flag::Int = 0)
if (flag&2) == 1
# Use background and foreground colors.
col0 = background(RGBVec{PGFloat})
col1 = foreground(RGBVec{PGFloat})
else
# Force black and white.
col0 = black(RGBVec{PGFloat})
col1 = white(RGBVec{PGFloat})
end
if (flag&1) == 1
# Reverse colors.
col0, col1 = col1, col0
end
set_color_ramp(col0, col1, cmin, cmax)
end
set_color_ramp(lo::RGBVec, hi::RGBVec) = set_color_ramp(pgqcir()..., lo, hi)
set_color_ramp(lo::RGBVec, hi::RGBVec, cmin::Integer, cmax::Integer) =
set_color_ramp(cmin, cmax, lo, hi)
set_color_ramp(cmin::Integer, cmax::Integer, lo::RGBVec, hi::RGBVec) =
set_color_ramp(Int(cmin), Int(cmax),
RGBVec{PGFloat}(lo), RGBVec{PGFloat}(hi))
function set_color_ramp(cmin::Int, cmax::Int,
lo::RGBVec{PGFloat}, hi::RGBVec{PGFloat})
lo = clamp(lo)
hi = clamp(hi)
if cmin == cmax
# Set all color indices to the mean level.
col = (lo + hi)/2
for c in min(cmin,cmax):max(cmin,cmax)
pgscr(c, col)
end
else
# Interpolate the given colors.
f = one(PGFloat)/PGFloat(cmax - cmin)
for c in min(cmin,cmax):max(cmin,cmax)
a1 = (c - cmin)*f
a0 = one(a1) - a1
pgscr(c, a0*lo + a1*hi)
end
end
end
function set_standard_colors()
pgscr(0, 0.0,0.0,0.0)
pgscr(1, 1.0,1.0,1.0)
pgscr(2, 1.0,0.0,0.0)
pgscr(3, 0.0,1.0,0.0)
pgscr(4, 0.0,0.0,1.0)
pgscr(5, 0.0,1.0,1.0)
pgscr(6, 1.0,0.0,1.0)
pgscr(7, 1.0,1.0,0.0)
pgscr(8, 1.0,0.5,0.0)
pgscr(9, 0.5,1.0,0.0)
pgscr(10, 0.0,1.0,0.5)
pgscr(11, 0.0,0.5,1.0)
pgscr(12, 0.5,0.0,1.0)
pgscr(13, 1.0,0.0,0.5)
pgscr(14, 0.333,0.333,0.333)
pgscr(15, 0.667,0.667,0.667)
end
end # module
|
{"hexsha": "84f64f9e021b12536a359ebf55585daef9ddb9aa", "size": 12804, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/colormaps.jl", "max_stars_repo_name": "emmt/PGPlot.jl", "max_stars_repo_head_hexsha": "884ddc3624c071e081a804e0d55ae371204c6549", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-02-18T14:07:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-19T01:20:10.000Z", "max_issues_repo_path": "src/colormaps.jl", "max_issues_repo_name": "emmt/PGPlot.jl", "max_issues_repo_head_hexsha": "884ddc3624c071e081a804e0d55ae371204c6549", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/colormaps.jl", "max_forks_repo_name": "emmt/PGPlot.jl", "max_forks_repo_head_hexsha": "884ddc3624c071e081a804e0d55ae371204c6549", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7788461538, "max_line_length": 79, "alphanum_fraction": 0.6073883162, "num_tokens": 4290}
|
import os
import h5py
import numpy as np
from sklearn.model_selection import train_test_split
from utilsTrain import generator, ensureDir
from modelLib import makeModel
from keras.models import load_model
from keras.callbacks import ModelCheckpoint, EarlyStopping, CSVLogger, ReduceLROnPlateau
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
configTF = tf.ConfigProto()
configTF.gpu_options.allow_growth = True
sess = tf.Session(config=configTF)
# setting RNG seeds
tf.set_random_seed(2727)
np.random.seed(2727)
dbPath = '../data/HiSeqV2.h5'
rootModelDir = '../models/'
modelName = 'DilatedCNN2D_002'
modelFolder = os.path.join(rootModelDir, modelName)
weightsFolder = os.path.join(modelFolder, 'weights')
bestModelPath = os.path.join(modelFolder, 'best.hdf5')
ensureDir(weightsFolder)
epochs = 50
epochStart = 0
patience = 10
batchSize = 32
db = h5py.File(dbPath, 'r')
nTotal = db["RNASeq"].shape[0]
X = np.arange(nTotal)
y = db["label"][...]
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify = y, test_size = 0.25, random_state = 42)
train_generator = generator(db, X_train, batch_size = 32)
test_generator = generator(db, X_test, batch_size = 32)
if epochStart > 0:
model = load_model(bestModelPath)
else:
model = makeModel(modelName)
model.compile(loss = 'categorical_crossentropy', optimizer = 'adamax', metrics = ['categorical_accuracy'])
check1 = ModelCheckpoint(os.path.join(weightsFolder, modelName +"_{epoch:02d}-loss-{val_loss:.3f}.hdf5"),
monitor='val_loss', save_best_only=True, mode='auto')
check2 = ModelCheckpoint(bestModelPath, monitor='val_loss', save_best_only=True, mode='auto')
check3 = EarlyStopping(monitor='val_loss', min_delta=0.01,
patience=patience*3, verbose=0, mode='auto')
check4 = CSVLogger(os.path.join(modelFolder, modelName +'_trainingLog.csv'),
separator=',', append=True)
check5 = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=patience // 1.5,
verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=1e-10)
trained_model=model.fit_generator(train_generator,
steps_per_epoch=(len(X_train) // batchSize),
epochs=epochs,
initial_epoch=epochStart,
validation_data=test_generator,
validation_steps=(len(X_test) // batchSize),
callbacks=[check1, check2, check3, check4, check5],
verbose=1)
db.close()
|
{"hexsha": "80a3fedad662ac9111726e2d07390abf54b45044", "size": 2408, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/train.py", "max_stars_repo_name": "suhailnajeeb/tcga-cancer-predict", "max_stars_repo_head_hexsha": "60e7061311f36f722abeae0ad26ed34c44095843", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-04-08T08:44:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-08T08:58:06.000Z", "max_issues_repo_path": "code/train.py", "max_issues_repo_name": "suhailnajeeb/tcga-cancer-predict", "max_issues_repo_head_hexsha": "60e7061311f36f722abeae0ad26ed34c44095843", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/train.py", "max_forks_repo_name": "suhailnajeeb/tcga-cancer-predict", "max_forks_repo_head_hexsha": "60e7061311f36f722abeae0ad26ed34c44095843", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2727272727, "max_line_length": 108, "alphanum_fraction": 0.7321428571, "include": true, "reason": "import numpy", "num_tokens": 655}
|
import re
import itertools as it
import numpy as np
import pandas as pd
from string import punctuation
import unicodedata
from sklearn.feature_extraction.text import CountVectorizer
import nltk
from nltk.tokenize import TweetTokenizer
# import tweepy
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
def plot_ts(series,
col,
ma=False,
raw=False,
expanding=False,
ewma=False,
overall=False,
median=False,
title=None,
time_bin="hour",
date_markers=None,
y_label=None,
custom_yaxis=None,
custom_ax=None,
**kwargs):
"""
custom plotting function for our time-series dataframes.
Args:
series: pd.Series or pd.Dataframe
raw: plot the basic values in the frame
expanding: plot an expanding mean
ewma: plot an ewma line
overall: plot an overall mean
median: plot the overall median
title: custom title to use
time_bin: marks the y-axis correctly
date_markers: plots a dot on the signal where a given date is noted.
y_label: custom y-axis label
custom_yaxis: custom axis
custom_ax: passing a custom Axes here will assign this plot to that
axis
"""
if isinstance(series, pd.DataFrame):
series = series[col]
lw = 0.75
if custom_ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = custom_ax
if y_label is None:
period = series.index.to_period().freqstr
_bin = "day" if period == "D" else "hour"
_y_label = "tweets per {}".format(_bin)
plt.ylabel(_y_label)
else:
if isinstance(y_label, str):
plt.ylabel(y_label)
if date_markers is not None:
def dateindex_to_str(index, include_hour=True):
idx = 16 if include_hour else 10
return [str(date)[0:idx].replace("T", " ")
for date in index.values]
(ax.plot(date_markers, series.loc[date_markers],
"o", markersize=4, color='m', label="point"))
if raw:
series.plot(label="raw", lw=lw, ax=ax)
if ma:
(series.rolling(ma).mean()
.plot(ax=ax, label="{}{} ma".format(ma, time_bin), lw=lw))
if ewma:
if isinstance(ewma, int):
(series.ewm(span=ewma).mean()
.plot(ax=ax, label="emwa - span {}".format(ewma), lw=lw))
else:
(series.ewm(alpha=0.05).mean()
.plot(ax=ax, label="emwa, $\alpha = 0.05$", lw=lw))
if expanding:
series.expanding().mean().plot(ax=ax, label="expanding_mean", lw=lw)
if overall:
(pd.DataFrame(series)
.assign(global_mean=lambda x: x['count']
.mean())["global_mean"]
.plot(ax=ax, label="global_mean", lw=lw))
if median:
(pd.DataFrame(series)
.assign(global_median=lambda x: x['count'].median())["global_median"]
.plot(ax=ax, label="global_median"))
plt.tight_layout()
plt.xlabel("datetime")
if custom_yaxis is not None:
def log_axis(x, pos):
'The two args are the value and tick position'
str_ = '$' + "2^{" + str(x) + "}" + '$'
return str_
formatter = FuncFormatter(log_axis)
ax.yaxis.set_major_formatter(formatter)
if title:
ax.set_title(title)
if custom_ax is not None:
return
else:
return ax
TWEET_TOKENIZER = TweetTokenizer(preserve_case=False,
strip_handles=True, reduce_len=False)
STOPWORDS = (set(nltk.corpus.stopwords.words("english")) |
{"...", '…', '•', '’', "com"} |
set(punctuation))
def strip_accents(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def replace_urls(string, replacement=None):
"""
Replace URLs in `string` with text of `replacement`
"""
if replacement is None:
replacement = "<-URL->"
pattern = re.compile('(https?://)?(\w*[.]\w+)+([/?=&]+\w+)*')
return re.sub(pattern, replacement, string)
def tokenizer(tweet_text, custom_words=None):
text = (replace_urls(tweet_text))
tokens = TWEET_TOKENIZER.tokenize(text)
tokens = (token for token in tokens if token not in punctuation)
tokens = (token for token in tokens if token not in STOPWORDS)
tokens = (token for token in tokens if len(token) >= 3)
if custom_words:
tokens = (token for token in tokens if token not in custom_words)
return list(tokens)
def get_frequent_terms(text_series, stop_words=None, ngram_range=None):
if ngram_range is None:
ngram_range = (1, 3)
count_vectorizer = CountVectorizer(analyzer="word",
tokenizer=tokenizer,
stop_words=stop_words,
ngram_range=ngram_range)
term_freq_matrix = count_vectorizer.fit_transform(text_series)
terms = count_vectorizer.get_feature_names()
term_frequencies = term_freq_matrix.sum(axis=0).tolist()[0]
term_freq_df = (pd.DataFrame(list(zip(terms, term_frequencies)),
columns=["token", "count"])
.set_index("token")
.sort_values("count", ascending=False))
return term_freq_df
def common_words_and_phrases(tweets, _stopwords=None, most_common=50,
ngram_range=None):
if _stopwords is None:
_stopwords = STOPWORDS
tweet_texts = (t.all_text for t in tweets)
return (get_frequent_terms(tweet_texts,
stop_words=_stopwords,
ngram_range=ngram_range)
.head(most_common))
def summarize_tweet_text(tweets, samples=5):
freq_terms = (get_frequent_terms(map(lambda x: x.all_text, tweets),
ngram_range=(2, 3))
.head(50))
terms = list(freq_terms.reset_index()["token"])
# print a few tweets
# print("###########################################################")
print("-----------------start summary-----------------------------")
print("\t----sample tweets ----")
_tweets = [t for t in it.islice(sorted(tweets,
key=lambda x: x.favorite_count,
reverse=True),
samples)]
for tweet in _tweets:
print(f"tweet text:\n \t {tweet.all_text} \n favs: \t {tweet.favorite_count}")
print()
print("\t----sample terms ----")
print(', '.join(terms))
print("----------------- end summary------------------------------")
# print("###########################################################")
def make_normalplot(df, random=True):
if random:
plt.plot(df.index.values,
np.random.normal(size=df.shape[0]),
lw=0.8,
alpha=0.75)
plt.ylim((-5, 5))
plt.title("Generated normal time series with $\sigma$ bands")
else:
plt.plot(df.index.values, df.values, lw=0.8, alpha=0.75)
plt.ylim((-5, 8))
plt.title("Dataframe with bands showing up to 3 sigma")
plt.axhline(y=1, color="red")
plt.axhline(y=-1, color="red")
plt.axhline(y=2, color="orange")
plt.axhline(y=-2, color="orange")
plt.axhline(y=3, color="yellow")
plt.axhline(y=-3, color="yellow")
arrowprops = dict(arrowstyle="-",
color="black",
lw=2)
#textprops = dict(rotation="vertical", fontsize=16)
textprops = dict()
plt.annotate("1 $\sigma$",
xy=(df.index.values[10], 1),
xytext=(df.index.values[10], -1.5),
arrowprops=arrowprops,
**textprops)
plt.annotate("2 $\sigma$",
xy=(df.index.values[750], 2),
xytext=(df.index.values[750], -2.5),
arrowprops=arrowprops,
**textprops
)
plt.annotate("3 $\sigma$",
xy=(df.index.values[1500], 3),
xytext=(df.index.values[1500], -3.5),
arrowprops=arrowprops,
**textprops
)
pop_star_rules = [{"artist": "katy_perry",
"rule": '("katy perry" OR @katyperry) -is:retweet lang:en'},
{"artist": "rihanna",
"rule": '(rihanna OR @rihanna) -is:retweet lang:en'},
{"artist":"lady_gaga",
"rule": '("lady gaga" OR @ladygaga) -is:retweet lang:en'},
{"artist": "ariana_grande",
"rule": '("ariana grande" OR @arianagrande) -is:retweet lang:en'},
{"artist": 'beyonce',
"rule": "(beyonce OR @beyonce) -is:retweet lang:en"},
{"artist": "selena_gomez",
"rule": '("selena gomez" OR @selenagomez) -is:retweet lang:en'}]
spotify_popular_artists_rule = """
(
"Drake" OR @Drake OR
"Ed Sheeran" OR @edsheeran OR
"The Chainsmokers" OR @TheChainsmokers OR
"The Weeknd" OR @theweeknd OR
"Justin Bieber" OR @justinbieber OR
"Calvin Harris" OR @CalvinHarris OR
"Major Lazer" OR @MAJORLAZER OR
"Shawn Mendes" OR @ShawnMendes OR
"Kygo" OR @KygoMusic OR
"Sia" OR @Sia OR
"Maroon 5" OR @maroon5 OR
"Imagine Dragons" OR @Imaginedragons OR
"Twenty One Pilots" OR @twentyonepilots OR
"Kendrick Lamar" OR @kendricklamar OR
"Rihanna" OR @rihanna OR
"David Guetta" OR @davidguetta OR
"Sam Smith" OR @samsmithworld OR
"Luis Fonsi" OR @LuisFonsi OR
"Charlie Puth" OR @charlieputh OR
"Clean Bandit" OR @cleanbandit OR
"Coldplay" OR @coldplay OR
"Jason Derulo" OR @jasonderulo OR
"Post Malone" OR @PostMalone OR
"ZAYN" OR @zaynmalik OR
"Avicii" OR @Avicii OR
"DJ Snake" OR @djsnake OR
"J Balvin" OR @JBALVIN OR
"Jonas Blue" OR @JonasBlue OR
"Adele" OR @Adele OR
"Martin Garrix" OR @MartinGarrix OR
"Bruno Mars" OR @BrunoMars OR
"Zara Larsson" OR @zaralarsson OR
"Fifth Harmony" OR @FifthHarmony OR
"DJ Khaled" OR @djkhaled OR
"Future" OR @1future OR
"Katy Perry" OR @katyperry OR
"Hailee Steinfeld" OR @HaileeSteinfeld OR
"One Direction" OR @onedirection OR
"Alan Walker" OR @IAmAlanWalker OR
"Robin Schulz" OR @robin_schulz OR
"Fetty Wap" OR @fettywap OR
"Alessia Cara" OR @alessiacara OR
"Ellie Goulding" OR @elliegoulding OR
"Cheat Codes" OR @CheatCodesMusic OR
"Mike Posner" OR @MikePosner OR
"Pitbull" OR @pitbull OR
"Meghan Trainor" OR @Meghan_Trainor
)
-is:retweet
lang:en
"""
spotify_charts_rule = """
(
"Post Malone" OR @PostMalone OR
"Lil Pump" OR @lilpump OR
"Camila Cabello" OR @Camila_Cabello OR
"Offset" OR @OffsetYRN OR
"G-Eazy" OR @G_Eazy OR
"A$AP Ferg" OR @burdxkeyz OR
"21 Savage" OR @21savage OR
"Sam Smith" OR @samsmithworld OR
"Migos" OR @Migos OR
"Ed Sheeran" OR @edsheeran OR
"Logic" OR @Logic301 OR
"Khalid" OR @thegreatkhalid OR
"Gucci Mane" OR @gucci1017 OR
"Maroon 5" OR @maroon5 OR
"Bebe Rexha" OR @BebeRexha OR
"Marshmello" OR @marshmellomusic OR
"Hailee Steinfeld" OR @HaileeSteinfeld OR
"Cardi B" OR @iamcardib OR
"Halsey" OR @halsey OR
"Kodak Black" OR @KodakBlack1k OR
"Kendrick Lamar" OR @kendricklamar OR
"Travis Scott" OR @trvisXX OR
"XXXTENTACION" OR @xxxtentacion OR
"French Montana" OR @FrencHMonTanA OR
"Demi Lovato" OR @ddlovato OR
"NAV" OR @beatsbynav OR
"Imagine Dragons" OR @Imaginedragons OR
"Charlie Puth" OR @charlieputh OR
"ZAYN" OR @zaynmalik OR
"Yo Gotti" OR @yogottikom OR
"YBN Nahmir" OR @nahmir205 OR
"Portugal. The Man" OR @portugaltheman OR
"Andy Williams" OR @ventriloquist29 OR
"Tay-K" OR @TAYK47USA OR
"Luis Fonsi" OR @LuisFonsi OR
"Clean Bandit" OR @cleanbandit OR
"Wham!" OR @13WHAM OR
"Playboi Carti" OR @damnbrandont OR
"Childish Gambino" OR @donaldglover OR
"SZA" OR @sza OR
"J Balvin" OR @JBALVIN OR
"Eminem" OR @Eminem OR
"Future" OR @1future OR
"2 Chainz" OR @2chainz OR
"Kesha" OR @KeshaRose OR
"Vince Guaraldi Trio" OR @RefinedPirate OR
"Band Aid" OR @FirstAidKitBand
)
-is:retweet
lang:en
"""
|
{"hexsha": "8f57ee4ba3e4a1b7a8441d6ae2adf2985d30c6d5", "size": 12227, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/timeseries_utils.py", "max_stars_repo_name": "mehdimansouri/pictet", "max_stars_repo_head_hexsha": "449378dd3df2e54968e31de13635b509b1f4572b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/timeseries_utils.py", "max_issues_repo_name": "mehdimansouri/pictet", "max_issues_repo_head_hexsha": "449378dd3df2e54968e31de13635b509b1f4572b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/timeseries_utils.py", "max_forks_repo_name": "mehdimansouri/pictet", "max_forks_repo_head_hexsha": "449378dd3df2e54968e31de13635b509b1f4572b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9242819843, "max_line_length": 86, "alphanum_fraction": 0.5888607181, "include": true, "reason": "import numpy", "num_tokens": 3368}
|
import inspect
import tubular.testing.helpers as h
import tubular
import pandas as pd
import numpy as np
from unittest import mock
from _pytest.mark.structures import ParameterSet
def test_arguments():
"""Test arguments for arguments of tubular.testing.helpers.index_preserved_params."""
expected_arguments = ["df_1", "df_2", "seed"]
arg_spec = inspect.getfullargspec(h.index_preserved_params)
arguments = arg_spec.args
assert len(expected_arguments) == len(
arguments
), f"Incorrect number of arguments -\n Expected: {len(expected_arguments)}\n Actual: {len(arguments)}"
for i, (e, a) in enumerate(zip(expected_arguments, arguments)):
assert e == a, f"Incorrect arg at index {i} -\n Expected: {e}\n Actual: {a}"
default_values = arg_spec.defaults
assert default_values == (
0,
), f"Unexpected default values -\n Expected: {(0, )}\n Actual: {default_values}"
def test__check_dfs_passed_call():
"""Test the call to _check_dfs_passed."""
df1 = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=[7, 8, 9])
df2 = pd.DataFrame({"a": [2, 3, 4], "b": [5, 6, 7]}, index=[7, 8, 9])
with mock.patch.object(tubular.testing.helpers, "_check_dfs_passed") as mocked:
h.index_preserved_params(df1, df2, seed=1)
assert mocked.call_count == 1, "unexpected number of calls to _check_dfs_passed"
call_args = mocked.call_args_list[0]
assert call_args[1] == {}, "unexpected kwargs in _check_dfs_passed call"
assert call_args[0] == (
df1,
df2,
), "unexpected positional args in _check_dfs_passed call"
def test_returned_object():
"""Test the function returns the expected output."""
df1_1 = pd.DataFrame({"a": [1], "b": [4]}, index=[7])
df1_2 = pd.DataFrame({"a": [2], "b": [5]}, index=[8])
df1_3 = pd.DataFrame({"a": [3], "b": [6]}, index=[9])
df2_1 = pd.DataFrame({"c": [10], "d": [13]}, index=[7])
df2_2 = pd.DataFrame({"c": [11], "d": [14]}, index=[8])
df2_3 = pd.DataFrame({"c": [12], "d": [15]}, index=[9])
df1 = pd.concat([df1_1, df1_2, df1_3], axis=0)
df2 = pd.concat([df2_1, df2_2, df2_3], axis=0)
seed_value = 111
np.random.seed(seed_value)
random_index = np.random.randint(low=-99999999, high=100000000, size=df1.shape[0])
start_decreasing_index = np.random.randint(low=-99999999, high=100000000, size=1)[0]
decreasing_index = range(
start_decreasing_index, start_decreasing_index - df1.shape[0], -1
)
start_increasing_index = np.random.randint(low=-99999999, high=100000000, size=1)[0]
increasing_index = range(
start_increasing_index, start_increasing_index + df1.shape[0], 1
)
df1_copy = df1.copy()
df2_copy = df2.copy()
df1_copy.index = random_index
df2_copy.index = random_index
expected_df_pairs = [(df1_copy, df2_copy)]
df1_copy = df1.copy()
df2_copy = df2.copy()
df1_copy.index = decreasing_index
df2_copy.index = decreasing_index
expected_df_pairs.append((df1_copy, df2_copy))
df1_copy = df1.copy()
df2_copy = df2.copy()
df1_copy.index = increasing_index
df2_copy.index = increasing_index
expected_df_pairs.append((df1_copy, df2_copy))
expected_df_pairs.append((df1, df2))
expected_ids = [
"random index",
"decreasing index",
"increasing index",
"original index",
]
results = h.index_preserved_params(df1, df2, seed=seed_value)
assert (
type(results) is list
), "unexpected type for object returned from index_preserved_params"
assert len(results) == len(
expected_df_pairs
), "unexpected len of object returned from index_preserved_params"
for i in range(len(expected_df_pairs)):
assert (
type(results[i]) is ParameterSet
), f"unexpected type for {i}th item in returned list"
h.assert_equal_dispatch(
expected_df_pairs[i],
results[i].values,
f"unexpected values for {i}th item in returned list",
)
assert (
results[i].marks == ()
), f"unexpected marks for {i}th item in returned list"
assert (
results[i].id == expected_ids[i]
), f"unexpected id for {i}th item in returned list"
|
{"hexsha": "fadd0f638192b360a9791e9fb5688c6a03ccce21", "size": 4322, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/testing/helpers/test_index_preserved_params.py", "max_stars_repo_name": "munichpavel/tubular", "max_stars_repo_head_hexsha": "53e277dea2cc869702f2ed49f2b495bf79b92355", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/testing/helpers/test_index_preserved_params.py", "max_issues_repo_name": "munichpavel/tubular", "max_issues_repo_head_hexsha": "53e277dea2cc869702f2ed49f2b495bf79b92355", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/testing/helpers/test_index_preserved_params.py", "max_forks_repo_name": "munichpavel/tubular", "max_forks_repo_head_hexsha": "53e277dea2cc869702f2ed49f2b495bf79b92355", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2237762238, "max_line_length": 108, "alphanum_fraction": 0.6406756131, "include": true, "reason": "import numpy", "num_tokens": 1206}
|
/// @file TwitterSpark.cpp
/// @brief TwitterSpark class implementation.
#include "TwitterSpark.h"
#include <algorithm>
#include <boost/tokenizer.hpp>
/* LOG4CPLUS Headers */
#include <log4cplus/logger.h>
#include <log4cplus/fileappender.h>
#include <log4cplus/layout.h>
#include <log4cplus/ndc.h>
#include <log4cplus/helpers/loglog.h>
#include <syscall.h>
using namespace std;
using namespace log4cplus;
using namespace log4cplus::helpers;
Logger myLogger;
extern "C"
Component *createComponent(
char *componentInstanceName,
char *componentType,
ComponentSystem *componentSystem
)
{
if (!strcmp(componentType, "TwitterSpark")) {
return new TwitterSpark(componentInstanceName, componentSystem);
}
else {
return NULL;
}
}
struct less_than_key
{
inline bool operator() (const pair<string, Json::Value>& struct1, const pair<string, Json::Value>& struct2)
{
return (struct1.first < struct2.first);
}
};
/// Initializes TwitterSpark component.
void TwitterSpark::init(void){
user = getComponentConfiguration()->getString(const_cast<char*>("User"));
password = getComponentConfiguration()->getString(const_cast<char*>("Password"));
//LoggerInfo("user = %s", user.c_str() );
//LoggerInfo("password = %s", password.c_str() );
std::string tmpStr, tmpStr2;
std::string replyMsg;
/* Set twitter username and password */
twitterObj.setTwitterUsername( user );
twitterObj.setTwitterPassword( password );
myOAuthAccessConsumerKey = getComponentConfiguration()->getString(const_cast<char*>("Consumer_Key"));
myOAuthAccessConsumerSecret = getComponentConfiguration()->getString(const_cast<char*>("Consumer_Secret"));
//LoggerInfo("ckey = %s", myOAuthAccessConsumerKey.c_str() );
//LoggerInfo("csecret = %s", myOAuthAccessConsumerSecret.c_str() );
/* OAuth flow begins */
/* Step 0: Set OAuth related params. These are got by registering your app at twitter.com */
twitterObj.getOAuth().setConsumerKey(myOAuthAccessConsumerKey);
twitterObj.getOAuth().setConsumerSecret(myOAuthAccessConsumerSecret);
myOAuthAccessTokenKey = getComponentConfiguration()->getString(const_cast<char*>("Token_Key"));
myOAuthAccessTokenSecret = getComponentConfiguration()->getString(const_cast<char*>("Token_Secret"));
//LoggerInfo("tkey = %s", myOAuthAccessTokenKey.c_str() );
//LoggerInfo("tsecret = %s", myOAuthAccessTokenSecret.c_str() );
if( myOAuthAccessTokenKey.size() && myOAuthAccessTokenSecret.size() )
{
/* If we already have these keys, then no need to go through auth again */
//LoggerInfo( "Using:\nKey: %s\nSecret: %s", myOAuthAccessTokenKey.c_str(), myOAuthAccessTokenSecret.c_str() );
twitterObj.getOAuth().setOAuthTokenKey( myOAuthAccessTokenKey );
twitterObj.getOAuth().setOAuthTokenSecret( myOAuthAccessTokenSecret );
}
else {
ERR("Invalid token authentication");
}
/* Account credentials verification */
if( twitterObj.accountVerifyCredGet() )
{
twitterObj.getLastWebResponse( replyMsg );
//Json::Value accountJSON;
//Json::Reader().parse(replyMsg, accountJSON);
//printf( "\ntwitterClient:: twitCurl::accountVerifyCredGet web response:\n%s\n", Json::StyledWriter().write(accountJSON).c_str() );
}
else
{
twitterObj.getLastCurlError( replyMsg );
LoggerError( "twitterClient:: twitCurl::accountVerifyCredGet error:\n%s", replyMsg.c_str() );
ERR("Account verification failed");
}
lastIdReplied = "";
firstTime = true;
timePoll = getComponentConfiguration()->getFloat(const_cast<char*>("Time_Polling"));
delay = getComponentConfiguration()->getInt(const_cast<char*>("Delay"));
// Get user's working directory
string logFilename = getGlobalConfiguration()->getUserDir();
// Main thread ID
int threadId = syscall(SYS_gettid);
logFilename.append("/");
//logFilename.append(boost::lexical_cast<string>(threadId));
time_t timestamp = time(0);
char hostname[1024];
size_t len = 1024;
gethostname(hostname, len);
string session = getGlobalConfiguration()->getString(const_cast<char*>("session"));
stringstream nombre;
nombre << timestamp << "_" << hostname << "_" << threadId << "_" << session << "_twitter.log";
logFilename.append(nombre.str());
// Initialize log session
myLogger = Logger::getInstance(LOG4CPLUS_TEXT("TWITTERLOG"));
LogLog::getLogLog()->setInternalDebugging(true);
SharedAppenderPtr append_1(new RollingFileAppender(LOG4CPLUS_TEXT(logFilename)));
append_1->setName(LOG4CPLUS_TEXT("TwitterLog"));
append_1->setLayout( std::auto_ptr<Layout>(new PatternLayout("%D{%y-%m-%d %H:%M:%S} - %m%n")) );
myLogger.addAppender(append_1);
// Parent loggers will not log 'myLogger' messages
myLogger.setAdditivity(false);
LOG4CPLUS_INFO(myLogger,LOG4CPLUS_TEXT("INIT"));
stopWatch.restart();
}
/// Unitializes the TwitterSpark component.
void TwitterSpark::quit(void){
}
// IFlow<char*> implementation
void TwitterSpark::processData(char *prompt){
string text(prompt);
if(text.empty() || unrepliedMentions.empty())
return;
if(text == "[RESPONSE_NOT_FOUND]") {
lastIdReplied = unrepliedMentions.front().first;
unrepliedMentions.erase(unrepliedMentions.begin());
text = "<error>El avatar no ha respondido a esta pregunta.";
string outputMsg = "Avatar: ";
outputMsg.append(text);
LOG4CPLUS_INFO(myLogger,LOG4CPLUS_TEXT(outputMsg));
return;
}
string outputMsg = "Avatar: ";
outputMsg.append(text);
LOG4CPLUS_INFO(myLogger,LOG4CPLUS_TEXT(text));
string id = unrepliedMentions.front().first;
//LoggerInfo("Got from Rebecca: %s", prompt);
reply(text, id);
}
//IThreadProc implementation
void TwitterSpark::process() {
if(firstTime) {
myFlow->processData("[TWITTER]");
getLastIdReplied();
firstTime = false;
}
//LoggerInfo("process");
if(stopWatch.elapsedTime() >= timePoll) {
getMentions(lastIdReplied);
//LoggerInfo("%d new mentions", unrepliedMentions.size());
while(!unrepliedMentions.empty()) {
sleep(delay);
string text = unrepliedMentions.front().second.get("text", "").asString();
string outputMsg = "User: ";
outputMsg.append(text);
LOG4CPLUS_INFO(myLogger,LOG4CPLUS_TEXT(outputMsg));
string::size_type pos = text.find("@" + user + " ");
if(pos != string::npos) {
text = text.erase(pos, user.size() + 2);
}
else {
pos = text.find("@" + user);
if(pos != string::npos) {
text = text.erase(pos, user.size() + 1);
}
}
char * msg = const_cast<char*>(text.c_str());
//LoggerInfo("Sent to Rebecca: %s", msg);
myFlow->processData(msg);
}
stopWatch.restart();
}
else
usleep(200000);
}
bool TwitterSpark::newMentions() {
return !unrepliedMentions.empty();
}
/* Get mentions */
void TwitterSpark::getMentions(string sinceId = "") {
//LoggerInfo("getMentions id %s", sinceId.c_str());
string replyMsg = "";
Json::Value mentions;
Json::Value mensajes;
if( twitterObj.mentionsGet(sinceId) )
{
twitterObj.getLastWebResponse( replyMsg );
Json::Reader().parse(replyMsg, mentions);
//LoggerInfo("%s", Json::StyledWriter().write(mentions).c_str());
if(!mentions.isArray()) {
LoggerError("code: %d\tmessage: %s", mentions["errors"][0u].get("code", "").asInt(), mentions["errors"][0u].get("message", "").asString().c_str());
return;
}
int i = 0;
// Por cada mencion a partir del id pasado
for(Json::ValueIterator it = mentions.begin(); it != mentions.end(); it++) {
string id = mentions[i].get("id_str", "").asString();
//if(std::find(unrepliedMentions.begin(), unrepliedMentions.end(), id) == unrepliedMentions.end())
vector<pair<string, Json::Value> >::iterator it2;
// Si no esta en los no respondidos
for(it2 = unrepliedMentions.begin(); it2 != unrepliedMentions.end(); it2++) {
if(it2->first == id)
break;
}
if(it2 == unrepliedMentions.end()) {
// Lo añado
mensajes.clear();
mensajes["user"] = mentions[i]["user"].get("screen_name", "").asString();
mensajes["id"] = id;
mensajes["text"] = mentions[i].get("text", "").asString();
//LoggerInfo("unrepliedMentions.push_back %s %s", id.c_str(), mentions[i].get("text", "").asString().c_str());
unrepliedMentions.push_back(pair<string, Json::Value>(id, mensajes));
std::sort(unrepliedMentions.begin(), unrepliedMentions.end(), less_than_key());
}
i++;
}
//printf( "\ntwitterClient:: twitCurl::mentionsGet web response:\n%s\n", Json::StyledWriter().write(mensajes).c_str() );
}
else
{
twitterObj.getLastCurlError( replyMsg );
LoggerWarn( "twitterClient:: twitCurl::mentionsGet error:\n%s", replyMsg.c_str() );
}
}
/* Post a new reply */
void TwitterSpark::reply(string msg, string id) {
//LoggerInfo("reply %s %s", msg.c_str(), id.c_str());
string replyMsg = "";
if(!msg.empty() && !id.empty()) {
uint i;
string user = "";
uint size = unrepliedMentions.size();
for(i = 0; i < size; i++) {
if(unrepliedMentions[i].first == id) {
user = unrepliedMentions[i].second.get("user", "").asString();
unrepliedMentions.erase(unrepliedMentions.begin() + i);
break;
}
}
if(i == size) {
LoggerWarn("No such id in mentions");
}
else {
//LoggerInfo("%s, %s", user.c_str(), id.c_str());
if( twitterObj.statusUpdate( "@" + user + " " + msg, id ) )
{
twitterObj.getLastWebResponse( replyMsg );
Json::Value response;
Json::Reader().parse(replyMsg, response);
if(!response["errors"].isNull())
LoggerWarn("twitterClient:: twitCurl::statusUpdate web response:\n%s", replyMsg.c_str() );
//LoggerInfo("twitterClient:: twitCurl::statusUpdate web response:\n%s", replyMsg.c_str() );
}
else
{
twitterObj.getLastCurlError( replyMsg );
LoggerWarn( "twitterClient:: twitCurl::statusUpdate error:\n%s", replyMsg.c_str() );
}
lastIdReplied = id;
}
}
}
void TwitterSpark::getLastIdReplied() {
string replyMsg = "";
//printf( "\nGetting user timeline\n" );
if( twitterObj.timelineUserGet( true, true, 0 ) )
{
twitterObj.getLastWebResponse( replyMsg );
Json::Value userTimelineJSON;
Json::Reader().parse(replyMsg, userTimelineJSON);
/*if(!userTimelineJSON["errors"].isNull())
LoggerWarn("twitterClient:: twitCurl::statusUpdate web response:\n%s", replyMsg.c_str() );*/
string lastId = "";
uint i = 0;
for(Json::ValueIterator it = userTimelineJSON.begin(); it != userTimelineJSON.end(); it++) {
lastId = userTimelineJSON[i].get("in_reply_to_status_id_str", "").asString();
// TODO: deberiamos comprobar tambien los numero para coger el mayor por si acaso? Parece que no
if(! lastId.empty()) {
lastIdReplied = lastId;
break;
}
i++;
}
}
else
{
twitterObj.getLastCurlError( replyMsg );
LoggerWarn("twitterClient:: twitCurl::timelineUserGet error:\n%s", replyMsg.c_str() );
}
//LoggerInfo("TwitterSpark::LastIdReplied = %s", lastIdReplied.c_str());
}
|
{"hexsha": "a515a408d2eea65955c6a6ea921f24ec54826d3a", "size": 11076, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "SPARKS/TwitterSpark/TwitterSpark.cpp", "max_stars_repo_name": "adele-robots/fiona", "max_stars_repo_head_hexsha": "1ef1fb18e620e18b2187e79e4cca31d66d3f1fd2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SPARKS/TwitterSpark/TwitterSpark.cpp", "max_issues_repo_name": "adele-robots/fiona", "max_issues_repo_head_hexsha": "1ef1fb18e620e18b2187e79e4cca31d66d3f1fd2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SPARKS/TwitterSpark/TwitterSpark.cpp", "max_forks_repo_name": "adele-robots/fiona", "max_forks_repo_head_hexsha": "1ef1fb18e620e18b2187e79e4cca31d66d3f1fd2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1976744186, "max_line_length": 150, "alphanum_fraction": 0.6744312026, "num_tokens": 2901}
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from attention import MHSATransformerPos
def xy2uv(xyz, eps = 0.001):
x, y, z = torch.unbind(xyz, dim=2)
x = x+eps
y = y+eps
z = z+eps
u = torch.atan2(x, -y)
v = - torch.atan(z / torch.sqrt(x**2 + y**2)) ### (default: - for z neg (under horizon) - grid sample instead expects -1,-1 top-left
pi = float(np.pi)
u = u / pi
v = (2.0 * v) / pi
u = torch.clamp(u, min=-1, max=1)
v = torch.clamp(v, min=-1, max=1)
###output: [batch_size x num_points x 2]##range -1,+1
output = torch.stack([u, v], dim=-1)
return output
class gravity_projection(nn.Module):
def __init__(self, lfeats = 1024, use_mhsa = False, use_rnn = False, num_heads = 4, hdim_factor = 2, use_pos_encoding = False, verts_count = 642):
super(gravity_projection, self).__init__()
self.use_mhsa = use_mhsa
self.lfeats = lfeats
self.use_rnn = use_rnn
if(self.use_mhsa):
self.num_heads=num_heads
self.mhsa = MHSATransformerPos(num_layers=1, d_model=self.lfeats, num_heads=num_heads, conv_hidden_dim=2048, maximum_position_encoding = verts_count)
if(self.use_rnn):
self.bi_rnn = nn.LSTM(input_size=self.lfeats,
hidden_size=(self.lfeats//2),
num_layers=2,
dropout=0.5,
batch_first=False,
bidirectional=True)
self.drop_out = nn.Dropout(0.5)
def slice_projection(self, uv_inputs, img_feature):
uv_inputs = uv_inputs.to(img_feature.device)
uv_inputs = uv_inputs.unsqueeze(1)
output = F.grid_sample(img_feature, uv_inputs, align_corners=True)
output = torch.transpose(output.squeeze(2), 1, 2)
return output
def forward(self, img_features, inputs, is_squeezed_h = False, get_vertices = True, return_packed=False):
###
uv_inputs = xy2uv(inputs) ####mesh device
feats = []
for img_feature in img_features:
feats.append( self.slice_projection(uv_inputs, img_feature) )
output = torch.cat(feats, 2)
if(self.use_mhsa):
output = self.mhsa(output)
output = self.drop_out(output)
if(self.use_rnn):
output = output.permute(1, 0, 2)
output,hidden = self.bi_rnn(output)
output = self.drop_out(output)
output = output.permute(1, 0, 2)
###NB prepend previous state vertices coords
if(get_vertices):
output = torch.cat((inputs,output), 2) #### BxVx(1024+3)
if(return_packed):
output = output.view(-1, output.shape[-1])
return output
|
{"hexsha": "cf1da77af59c11b885b755c6467620f94b6d6f39", "size": 3279, "ext": "py", "lang": "Python", "max_stars_repo_path": "gaf.py", "max_stars_repo_name": "crs4/Deep3DLayout", "max_stars_repo_head_hexsha": "881a66cfeac52b043f0aaffef40e3a2aea5c22ec", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-12-07T08:58:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T07:03:28.000Z", "max_issues_repo_path": "gaf.py", "max_issues_repo_name": "crs4/Deep3DLayout", "max_issues_repo_head_hexsha": "881a66cfeac52b043f0aaffef40e3a2aea5c22ec", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gaf.py", "max_forks_repo_name": "crs4/Deep3DLayout", "max_forks_repo_head_hexsha": "881a66cfeac52b043f0aaffef40e3a2aea5c22ec", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-19T16:20:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-19T16:20:09.000Z", "avg_line_length": 31.8349514563, "max_line_length": 161, "alphanum_fraction": 0.51631595, "include": true, "reason": "import numpy", "num_tokens": 782}
|
# just a example
# use it in each script
import numpy as np
import keras.backend as K
from keras import Model
from keras.layers import Dense, Input
def get_model(num_class):
input = Input([5,])()
print(base_model.summary())
x = base_model.get_layer("bn").output
# x = base_model.get_layer("block5_pool").output
x = GlobalAveragePooling2D()(x)
predictions = Dense(num_class, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
model = Model
f = K.function([model.layers[0].input, K.learning_phase()],
[model.layers[-1].output])
def predict_with_uncertainty(f, x, n_iter=10):
result = np.zeros((n_iter,) + x.shape)
for iter in range(n_iter):
result[iter] = f(x, 1)
prediction = result.mean(axis=0)
uncertainty = result.var(axis=0)
return prediction, uncertainty
|
{"hexsha": "20579af9102f3c3203e49d460cf3aabe0b83304f", "size": 1080, "ext": "py", "lang": "Python", "max_stars_repo_path": "predict_with_uncertainty.py", "max_stars_repo_name": "OsciiArt/Cookpad", "max_stars_repo_head_hexsha": "b2245f84db0650d6282c97c98600de825c6ed6e0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "predict_with_uncertainty.py", "max_issues_repo_name": "OsciiArt/Cookpad", "max_issues_repo_head_hexsha": "b2245f84db0650d6282c97c98600de825c6ed6e0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "predict_with_uncertainty.py", "max_forks_repo_name": "OsciiArt/Cookpad", "max_forks_repo_head_hexsha": "b2245f84db0650d6282c97c98600de825c6ed6e0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.4210526316, "max_line_length": 63, "alphanum_fraction": 0.6601851852, "include": true, "reason": "import numpy", "num_tokens": 275}
|
"""
Torn numbers in cpmpy.
From
http://www.comp.nus.edu.sg/~henz/projects/puzzles/digits/torn.html?19
---
The Torn Number from 'Amusements in Mathematics', Dudeney, number 113
I had the other day in my possession a label bearing the number 3025
in large figures. This got accidentally torn in half, so that 30 was
on one piece and 25 on the other. On looking at these pieces I began
to make a calculation, scarcely concious of what I was doing, when I
discovered this little peculiarity. If we add the 30 and the 25
together and square the sum we get as the result the complete original
number on the label! Now, the puzzle is to find another number,
composed of four figures, all different, which may be divided in the
middle and produce the same result.
'''
Model created by Hakan Kjellerstrand, hakank@hakank.com
See also my cpmpy page: http://www.hakank.org/cpmpy/
"""
import sys,math
import numpy as np
from cpmpy import *
from cpmpy.solvers import *
from cpmpy_hakank import *
from itertools import combinations
def torn_numbers():
x = intvar(0,9,shape=4,name="x")
x3, x2, x1, x0 = x
sumx = intvar(0,9999,name="sumx")
model = Model([AllDifferent(x),
x3 != 0,
sumx == x3 * 10 + x2 + x1 * 10 + x0,
sumx*sumx == x3 * 1000 + x2 * 100 + x1 * 10 + x0
])
num_solutions = 0
ss = CPM_ortools(model)
while ss.solve() is not False:
num_solutions += 1
print("x:",x.value(),"sum:",sumx.value())
get_different_solution(ss,x)
print("number of solutions:", num_solutions)
torn_numbers()
|
{"hexsha": "a2284201b3d7516ec84eccba8fafb21193935aed", "size": 1642, "ext": "py", "lang": "Python", "max_stars_repo_path": "cpmpy/torn_numbers.py", "max_stars_repo_name": "hakank/hakank", "max_stars_repo_head_hexsha": "313e5c0552569863047f6ce9ae48ea0f6ec0c32b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 279, "max_stars_repo_stars_event_min_datetime": "2015-01-10T09:55:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T02:34:03.000Z", "max_issues_repo_path": "cpmpy/torn_numbers.py", "max_issues_repo_name": "hakank/hakank", "max_issues_repo_head_hexsha": "313e5c0552569863047f6ce9ae48ea0f6ec0c32b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2017-10-05T15:48:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-20T12:06:52.000Z", "max_forks_repo_path": "cpmpy/torn_numbers.py", "max_forks_repo_name": "hakank/hakank", "max_forks_repo_head_hexsha": "313e5c0552569863047f6ce9ae48ea0f6ec0c32b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 83, "max_forks_repo_forks_event_min_datetime": "2015-01-20T03:44:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-13T23:53:06.000Z", "avg_line_length": 28.8070175439, "max_line_length": 72, "alphanum_fraction": 0.6650426309, "include": true, "reason": "import numpy", "num_tokens": 474}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 4 16:27:43 2020
@author: bernardo
"""
import matplotlib.pyplot as plt
import numpy as np
import csv
import sys
from datetime import datetime, timezone
ts = []
p = []
tmp = []
iaq = []
iaqAcq = []
gRes = []
hum = []
cO2 = []
voc = []
staticIaq = []
if len(sys.argv) > 1:
filename = str(sys.argv[1])
else:
filename = 'bme680_data.csv'
with open(filename, 'r') as csvfile:
data = csv.reader(csvfile, delimiter=',')
for row in data:
ts.append(datetime.fromtimestamp(int(row[0]), timezone.utc))
p.append(float(row[2]))
gRes.append(float(row[3]))
iaq.append(float(row[4]))
iaqAcq.append(int(row[5]))
tmp.append(float(row[6]))
hum.append(float(row[7]))
cO2.append(float(row[9]))
voc.append(float(row[10]))
fig, axs = plt.subplots(1, 1, sharex=True)
color = 'tab:red'
axs.set_ylabel('IAQ ')
# axs.set_xlabel('time (s)')
axs.tick_params(axis='y', labelcolor=color)
axs.plot(ts, iaq, color=color)
ax3 = axs.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax3.set_ylabel('IAQ (0-3)', color=color) # we already handled the x-label with ax1
ax3.set_ylim(-1, 10)
ax3.plot(ts, iaqAcq, color=color)
# ax3.set_xlabel('Time (s)')
# beautify the x-labels
plt.gcf().autofmt_xdate()
plt.show()
|
{"hexsha": "5647988ac60264a56cfff6929b48f3e2f13dd1b4", "size": 1378, "ext": "py", "lang": "Python", "max_stars_repo_path": "plot_bme680_iaq_csv.py", "max_stars_repo_name": "bernardocarvalho/esp32-bme680", "max_stars_repo_head_hexsha": "83d143aab831ab6444f157c9f170433f384371c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plot_bme680_iaq_csv.py", "max_issues_repo_name": "bernardocarvalho/esp32-bme680", "max_issues_repo_head_hexsha": "83d143aab831ab6444f157c9f170433f384371c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plot_bme680_iaq_csv.py", "max_forks_repo_name": "bernardocarvalho/esp32-bme680", "max_forks_repo_head_hexsha": "83d143aab831ab6444f157c9f170433f384371c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.2258064516, "max_line_length": 83, "alphanum_fraction": 0.6328011611, "include": true, "reason": "import numpy", "num_tokens": 416}
|
Jo Hatcher is a licensed marriage and family Counselors and Therapists Therapist (license: MFC #33486).
It is easy to get swept away in the busyness of life and drift from that which is truly meaningful and important to us. When stress and unplanned events happen, we sometimes lose our balance. In my work with women, teens, couples, and children, we celebrate each persons strengths allowing a more adaptive way of handling whatever comes your way. It is a journey of the heart to really know yourself and your strengths.
Davis offers a wide variety of Counseling and Psychological Services.
|
{"hexsha": "6083cc2881ee563cf1453daa3e1b49df169b8d04", "size": 603, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Jo_Hatcher%2C_LMFT.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Jo_Hatcher%2C_LMFT.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Jo_Hatcher%2C_LMFT.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 75.375, "max_line_length": 423, "alphanum_fraction": 0.7960199005, "num_tokens": 127}
|
\clearpage
\section{Kidney Droplet}
\subsection{All Cells, labeled by \emph{Cell Ontology Class}}
\subsubsection{Table of cell counts in All Cells, per \emph{Cell Ontology Class}}\begin{table}[h]
\centering
\label{my-label}
\begin{tabular}{@{}ll@{}}
\toprule
\emph{Cell Ontology Class}& Number of cells \\ \midrule
kidney capillary endothelial cell & 392 \\
kidney cell & 45 \\
kidney collecting duct epithelial cell & 443 \\
kidney loop of Henle ascending limb epithelial cell & 471 \\
kidney proximal straight tubule epithelial cell & 1198 \\
leukocyte & 42 \\
macrophage & 139 \\
mesangial cell & 51 \\
\bottomrule
\end{tabular}
\caption{Cell counts for All Cells, per \emph{Cell Ontology Class}.}
\end{table}
\clearpage
\subsubsection{t-SNE plot}
\begin{figure}[h]
\centering
\includegraphics[height=.35\textheight]{{"../30_tissue_supplement_figures/Kidney/droplet/allcells_cell_ontology_class_tsneplot"}.pdf}
\includegraphics[height=.35\textheight]{{"../30_tissue_supplement_figures/Kidney/droplet/allcells_cell_ontology_class_tsneplot_legend"}.pdf}
\caption{Top, t-Distributed stochastic neighbor embedding (tSNE) plot \emph{Cell Ontology Class} labels in All Cells of Kidney Droplet. Bottom, legend mapping \emph{Cell Ontology Class} (and letter abbreviation) to colors}
\end{figure}
\clearpage
\subsubsection{Violinplot (1 of 2, \emph{Acta2}--\emph{Pecam1})}
\begin{figure}[h]
\centering
\includegraphics[width=.6\textwidth]{{"../30_tissue_supplement_figures/Kidney/droplet/allcells_cell_ontology_class_violinplot_1-of-2"}.pdf}
\caption{ Violinplot (1 of 2) showing gene expression enrichment in \emph{Cell Ontology Class} labels in All Cells of Kidney Droplet. A: kidney capillary endothelial cell, B: kidney cell, C: kidney collecting duct epithelial cell, D: kidney loop of Henle ascending limb epithelial cell, E: kidney proximal straight tubule epithelial cell, F: leukocyte, G: macrophage, H: mesangial cell.}
\end{figure}
\clearpage
\subsubsection{Violinplot (2 of 2, \emph{Podxl}--\emph{Wt1})}
\begin{figure}[h]
\centering
\includegraphics[width=.6\textwidth]{{"../30_tissue_supplement_figures/Kidney/droplet/allcells_cell_ontology_class_violinplot_2-of-2"}.pdf}
\caption{ Violinplot (2 of 2) showing gene expression enrichment in \emph{Cell Ontology Class} labels in All Cells of Kidney Droplet. A: kidney capillary endothelial cell, B: kidney cell, C: kidney collecting duct epithelial cell, D: kidney loop of Henle ascending limb epithelial cell, E: kidney proximal straight tubule epithelial cell, F: leukocyte, G: macrophage, H: mesangial cell.}
\end{figure}
\clearpage
\subsubsection{Dotplot (1 of 2, \emph{Acta2}--\emph{Pecam1})}
\begin{figure}[h]
\centering
\includegraphics[angle=90, height=.6\textheight]{{"../30_tissue_supplement_figures/Kidney/droplet/allcells_cell_ontology_class_dotplot_1-of-2"}.pdf}
\caption{ Dotplot (1 of 2) showing gene expression enrichment in \emph{Cell Ontology Class} labels in All Cells of Kidney Droplet. A: kidney capillary endothelial cell, B: kidney cell, C: kidney collecting duct epithelial cell, D: kidney loop of Henle ascending limb epithelial cell, E: kidney proximal straight tubule epithelial cell, F: leukocyte, G: macrophage, H: mesangial cell.}
\end{figure}
\clearpage
\subsubsection{Dotplot (2 of 2, \emph{Podxl}--\emph{Wt1})}
\begin{figure}[h]
\centering
\includegraphics[angle=90, height=.6\textheight]{{"../30_tissue_supplement_figures/Kidney/droplet/allcells_cell_ontology_class_dotplot_2-of-2"}.pdf}
\caption{ Dotplot (2 of 2) showing gene expression enrichment in \emph{Cell Ontology Class} labels in All Cells of Kidney Droplet. A: kidney capillary endothelial cell, B: kidney cell, C: kidney collecting duct epithelial cell, D: kidney loop of Henle ascending limb epithelial cell, E: kidney proximal straight tubule epithelial cell, F: leukocyte, G: macrophage, H: mesangial cell.}
\end{figure}
\clearpage
\subsection{All Cells, labeled by \emph{Cluster IDs}}
\subsubsection{Table of cell counts in All Cells, per \emph{Cluster IDs}}\begin{table}[h]
\centering
\label{my-label}
\begin{tabular}{@{}ll@{}}
\toprule
\emph{Cluster IDs}& Number of cells \\ \midrule
0 & 395 \\
1 & 392 \\
2 & 282 \\
3 & 279 \\
4 & 264 \\
5 & 257 \\
6 & 244 \\
7 & 192 \\
8 & 139 \\
9 & 117 \\
10 & 82 \\
11 & 51 \\
12 & 45 \\
13 & 42 \\
\bottomrule
\end{tabular}
\caption{Cell counts for All Cells, per \emph{Cluster IDs}.}
\end{table}
\clearpage
\subsubsection{t-SNE plot}
\begin{figure}[h]
\centering
\includegraphics[height=.35\textheight]{{"../30_tissue_supplement_figures/Kidney/droplet/allcells_cluster-ids_tsneplot"}.pdf}
\includegraphics[height=.35\textheight]{{"../30_tissue_supplement_figures/Kidney/droplet/allcells_cluster-ids_tsneplot_legend"}.pdf}
\caption{Top, t-Distributed stochastic neighbor embedding (tSNE) plot \emph{Cluster IDs} labels in All Cells of Kidney Droplet. Bottom, legend mapping \emph{Cluster IDs} to colors}
\end{figure}
\clearpage
\subsubsection{Violinplot (1 of 2, \emph{Acta2}--\emph{Pecam1})}
\begin{figure}[h]
\centering
\includegraphics[width=.6\textwidth]{{"../30_tissue_supplement_figures/Kidney/droplet/allcells_cluster-ids_violinplot_1-of-2"}.pdf}
\caption{ Violinplot (1 of 2) showing gene expression enrichment in \emph{Cluster IDs} labels in All Cells of Kidney Droplet. }
\end{figure}
\clearpage
\subsubsection{Violinplot (2 of 2, \emph{Podxl}--\emph{Wt1})}
\begin{figure}[h]
\centering
\includegraphics[width=.6\textwidth]{{"../30_tissue_supplement_figures/Kidney/droplet/allcells_cluster-ids_violinplot_2-of-2"}.pdf}
\caption{ Violinplot (2 of 2) showing gene expression enrichment in \emph{Cluster IDs} labels in All Cells of Kidney Droplet. }
\end{figure}
\clearpage
\subsubsection{Dotplot (1 of 2, \emph{Acta2}--\emph{Pecam1})}
\begin{figure}[h]
\centering
\includegraphics[angle=90, height=.6\textheight]{{"../30_tissue_supplement_figures/Kidney/droplet/allcells_cluster-ids_dotplot_1-of-2"}.pdf}
\caption{ Dotplot (1 of 2) showing gene expression enrichment in \emph{Cluster IDs} labels in All Cells of Kidney Droplet. }
\end{figure}
\clearpage
\subsubsection{Dotplot (2 of 2, \emph{Podxl}--\emph{Wt1})}
\begin{figure}[h]
\centering
\includegraphics[angle=90, height=.6\textheight]{{"../30_tissue_supplement_figures/Kidney/droplet/allcells_cluster-ids_dotplot_2-of-2"}.pdf}
\caption{ Dotplot (2 of 2) showing gene expression enrichment in \emph{Cluster IDs} labels in All Cells of Kidney Droplet. }
\end{figure}
\clearpage
\subsection{All Cells, labeled by \emph{Free Annotation}}
\subsubsection{Table of cell counts in All Cells, per \emph{Free Annotation}}\begin{table}[h]
\centering
\label{my-label}
\begin{tabular}{@{}ll@{}}
\toprule
\emph{Free Annotation}& Number of cells \\ \midrule
kidney capillary endothelial cell & 392 \\
kidney cell & 45 \\
kidney collecting duct epithelial cell & 443 \\
kidney loop of Henle ascending limb epithelial cell & 471 \\
kidney proximal straight tubule epithelial cell & 1198 \\
leukocyte & 42 \\
macrophage & 139 \\
mesangial cell & 51 \\
\bottomrule
\end{tabular}
\caption{Cell counts for All Cells, per \emph{Free Annotation}.}
\end{table}
\clearpage
\subsubsection{t-SNE plot}
\begin{figure}[h]
\centering
\includegraphics[height=.35\textheight]{{"../30_tissue_supplement_figures/Kidney/droplet/allcells_free_annotation_tsneplot"}.pdf}
\includegraphics[height=.35\textheight]{{"../30_tissue_supplement_figures/Kidney/droplet/allcells_free_annotation_tsneplot_legend"}.pdf}
\caption{Top, t-Distributed stochastic neighbor embedding (tSNE) plot \emph{Free Annotation} labels in All Cells of Kidney Droplet. Bottom, legend mapping \emph{Free Annotation} (and letter abbreviation) to colors}
\end{figure}
\clearpage
\subsubsection{Violinplot (1 of 2, \emph{Acta2}--\emph{Pecam1})}
\begin{figure}[h]
\centering
\includegraphics[width=.6\textwidth]{{"../30_tissue_supplement_figures/Kidney/droplet/allcells_free_annotation_violinplot_1-of-2"}.pdf}
\caption{ Violinplot (1 of 2) showing gene expression enrichment in \emph{Free Annotation} labels in All Cells of Kidney Droplet. A: kidney capillary endothelial cell, B: kidney cell, C: kidney collecting duct epithelial cell, D: kidney loop of Henle ascending limb epithelial cell, E: kidney proximal straight tubule epithelial cell, F: leukocyte, G: macrophage, H: mesangial cell.}
\end{figure}
\clearpage
\subsubsection{Violinplot (2 of 2, \emph{Podxl}--\emph{Wt1})}
\begin{figure}[h]
\centering
\includegraphics[width=.6\textwidth]{{"../30_tissue_supplement_figures/Kidney/droplet/allcells_free_annotation_violinplot_2-of-2"}.pdf}
\caption{ Violinplot (2 of 2) showing gene expression enrichment in \emph{Free Annotation} labels in All Cells of Kidney Droplet. A: kidney capillary endothelial cell, B: kidney cell, C: kidney collecting duct epithelial cell, D: kidney loop of Henle ascending limb epithelial cell, E: kidney proximal straight tubule epithelial cell, F: leukocyte, G: macrophage, H: mesangial cell.}
\end{figure}
\clearpage
\subsubsection{Dotplot (1 of 2, \emph{Acta2}--\emph{Pecam1})}
\begin{figure}[h]
\centering
\includegraphics[angle=90, height=.6\textheight]{{"../30_tissue_supplement_figures/Kidney/droplet/allcells_free_annotation_dotplot_1-of-2"}.pdf}
\caption{ Dotplot (1 of 2) showing gene expression enrichment in \emph{Free Annotation} labels in All Cells of Kidney Droplet. A: kidney capillary endothelial cell, B: kidney cell, C: kidney collecting duct epithelial cell, D: kidney loop of Henle ascending limb epithelial cell, E: kidney proximal straight tubule epithelial cell, F: leukocyte, G: macrophage, H: mesangial cell.}
\end{figure}
\clearpage
\subsubsection{Dotplot (2 of 2, \emph{Podxl}--\emph{Wt1})}
\begin{figure}[h]
\centering
\includegraphics[angle=90, height=.6\textheight]{{"../30_tissue_supplement_figures/Kidney/droplet/allcells_free_annotation_dotplot_2-of-2"}.pdf}
\caption{ Dotplot (2 of 2) showing gene expression enrichment in \emph{Free Annotation} labels in All Cells of Kidney Droplet. A: kidney capillary endothelial cell, B: kidney cell, C: kidney collecting duct epithelial cell, D: kidney loop of Henle ascending limb epithelial cell, E: kidney proximal straight tubule epithelial cell, F: leukocyte, G: macrophage, H: mesangial cell.}
\end{figure}
|
{"hexsha": "a04d7ee652a407d8e419729a64775814b2968810", "size": 10316, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "31_tissue_supplement_tex/Kidney_droplet_auto_generated.tex", "max_stars_repo_name": "mjoppich/tabula-muris", "max_stars_repo_head_hexsha": "c1a7b7854b7b9a191141c6f2c4d89179ec41603b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 147, "max_stars_repo_stars_event_min_datetime": "2018-02-21T21:20:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T15:33:10.000Z", "max_issues_repo_path": "31_tissue_supplement_tex/Kidney_droplet_auto_generated.tex", "max_issues_repo_name": "mjoppich/tabula-muris", "max_issues_repo_head_hexsha": "c1a7b7854b7b9a191141c6f2c4d89179ec41603b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 180, "max_issues_repo_issues_event_min_datetime": "2018-02-07T22:23:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-25T21:13:57.000Z", "max_forks_repo_path": "31_tissue_supplement_tex/Kidney_droplet_auto_generated.tex", "max_forks_repo_name": "mjoppich/tabula-muris", "max_forks_repo_head_hexsha": "c1a7b7854b7b9a191141c6f2c4d89179ec41603b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 81, "max_forks_repo_forks_event_min_datetime": "2018-02-07T00:03:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-25T07:04:22.000Z", "avg_line_length": 38.9283018868, "max_line_length": 388, "alphanum_fraction": 0.7657037611, "num_tokens": 3240}
|
# import the necessary packages
# coding:utf-8
import json
import os
import cv2 as cv
import keras.backend as K
import numpy as np
from keras.applications.inception_resnet_v2 import preprocess_input
from tqdm import tqdm
from config import train_data, test_a_image_folder, img_height, img_width
from model import build_model
from utils import get_best_model
if __name__ == '__main__':
best_model, epoch = get_best_model()
model = build_model()
model.load_weights(best_model)
labels = [folder for folder in os.listdir(train_data) if os.path.isdir(os.path.join(train_data, folder))]
test_images = [f for f in os.listdir(test_a_image_folder) if
os.path.isfile(os.path.join(test_a_image_folder, f)) and f.lower().endswith('.jpg')]
results = []
for image_id in tqdm(test_images):
filename = os.path.join(test_a_image_folder, image_id)
# print('Start processing image: {}'.format(filename))
image = cv.imread(filename)
image = cv.resize(image, (img_height, img_width), cv.INTER_CUBIC)
rgb_img = cv.cvtColor(image, cv.COLOR_BGR2RGB)
rgb_img = np.expand_dims(rgb_img, 0).astype(np.float32)
rgb_img = preprocess_input(rgb_img)
preds = model.predict(rgb_img)
prob = np.max(preds)
class_id = int(np.argmax(preds))
# print(labels[class_id])
results.append({'image_id': image_id, 'disease_class': class_id})
with open('eval.json', 'w') as file:
json.dump(results, file, ensure_ascii=False, indent=4)
K.clear_session()
|
{"hexsha": "2bd848af5710a7e3865286131861067dc3c874a1", "size": 1572, "ext": "py", "lang": "Python", "max_stars_repo_path": "eval.py", "max_stars_repo_name": "foamliu/Crop-Disease-Detection", "max_stars_repo_head_hexsha": "fca9baefca48ad2fbdbc62075580fa27e5ed3531", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2018-09-28T02:29:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T16:33:33.000Z", "max_issues_repo_path": "eval.py", "max_issues_repo_name": "Cooper111/Crop-Disease-Detection", "max_issues_repo_head_hexsha": "fca9baefca48ad2fbdbc62075580fa27e5ed3531", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-09-25T05:04:47.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-09T07:14:26.000Z", "max_forks_repo_path": "eval.py", "max_forks_repo_name": "Cooper111/Crop-Disease-Detection", "max_forks_repo_head_hexsha": "fca9baefca48ad2fbdbc62075580fa27e5ed3531", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2018-09-26T13:16:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-05T23:15:13.000Z", "avg_line_length": 34.9333333333, "max_line_length": 109, "alphanum_fraction": 0.6959287532, "include": true, "reason": "import numpy", "num_tokens": 374}
|
import numpy as np
from bpdb import set_trace
from scipy.constants import c
from sympy import Matrix, symbols
from sympy.utilities.lambdify import lambdify
class Sensors:
def __init__(self, env):
self.env = env
self.define_measurement_models()
def define_measurement_models(self):
self.define_pseudorange_model()
self.define_gps_model()
def define_pseudorange_model(self):
self.evaluate_pseudorange = {}
self.evaluate_pseudorange_jac = {}
self.evaluate_pseudorange_R = {}
x_vec = self.env.dynamics.x_vec
sigma_read_func = lambda agent_name: self.env.agent_configs[
agent_name
].getfloat("sigma_clock_reading")
R_read_func = lambda agent_name_0, agent_name_1: self.env.c ** 2 * (
sigma_read_func(agent_name_0) ** 2 + sigma_read_func(agent_name_1) ** 2
)
for T in self.env.AGENT_NAMES:
for R in self.env.AGENT_NAMES:
if T == R:
continue
# receiver states
x_R = self.env.dynamics.get_sym_position(R)
b_R = self.env.dynamics.get_sym("b", R)
# transmitter states
x_T = self.env.dynamics.get_sym_position(T)
x_dot_T = self.env.dynamics.get_sym_velocity(T)
b_T = self.env.dynamics.get_sym("b", T)
b_dot_T = self.env.dynamics.get_sym("b_dot", T)
# distance
d = Matrix(x_R - x_T).norm()
# transmit time
tau_dist = d / c
# transmitter states at transmit time
x_T = x_T - x_dot_T * tau_dist
b_T = b_T - b_dot_T * tau_dist
# pseudorange measurement
rho = d + b_R - b_T
h = Matrix([rho])
dh_dx = h.jacobian(x_vec)
# pseudorange noise
R_matrix = R_read_func(T, R)
# lambdify
TR = T + R
self.evaluate_pseudorange[TR] = lambdify(x_vec, np.squeeze(h), "numpy")
self.evaluate_pseudorange_jac[TR] = lambdify(
x_vec, np.squeeze(dh_dx), "numpy"
)
self.evaluate_pseudorange_R[TR] = R_matrix
def define_gps_model(self):
self.evaluate_gps = {}
self.evaluate_gps_R = {}
x_vec = self.env.dynamics.x_vec
for agent_name in self.env.AGENT_NAMES:
x = self.env.dynamics.get_sym_position(agent_name)
h = Matrix([x])
self.evaluate_gps[agent_name] = lambdify(x_vec, np.squeeze(h), "numpy")
if self.env.agent_configs[agent_name].getboolean("gps"):
sigma_gps = self.env.agent_configs[agent_name].getfloat("sigma_gps")
self.evaluate_gps_R[agent_name] = sigma_gps ** 2
|
{"hexsha": "70472caae5474c58c5ec0250390eb31b09901712", "size": 2907, "ext": "py", "lang": "Python", "max_stars_repo_path": "pntddf/sensors.py", "max_stars_repo_name": "COHRINT/pnt-ddf", "max_stars_repo_head_hexsha": "5ea00957e74452ab41a34b00a60b9e4001144280", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pntddf/sensors.py", "max_issues_repo_name": "COHRINT/pnt-ddf", "max_issues_repo_head_hexsha": "5ea00957e74452ab41a34b00a60b9e4001144280", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pntddf/sensors.py", "max_forks_repo_name": "COHRINT/pnt-ddf", "max_forks_repo_head_hexsha": "5ea00957e74452ab41a34b00a60b9e4001144280", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-25T20:54:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-25T20:54:15.000Z", "avg_line_length": 33.4137931034, "max_line_length": 87, "alphanum_fraction": 0.563123495, "include": true, "reason": "import numpy,from scipy,from sympy", "num_tokens": 668}
|
[STATEMENT]
lemma f_make_mono_less:
"\<forall>n. f n < oLimit f \<Longrightarrow> f (make_mono f n) < f (make_mono f (Suc n))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>n. f n < oLimit f \<Longrightarrow> f (make_mono f n) < f (make_mono f (Suc n))
[PROOF STEP]
apply (drule_tac x="make_mono f n" in spec)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f (make_mono f n) < oLimit f \<Longrightarrow> f (make_mono f n) < f (make_mono f (Suc n))
[PROOF STEP]
apply (drule less_oLimitD, clarsimp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>na. f (make_mono f n) < f na \<Longrightarrow> f (make_mono f n) < f (LEAST x. f (make_mono f n) < f x)
[PROOF STEP]
apply (erule LeastI)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 348, "file": "Ordinal_OrdinalInduct", "length": 4}
|
import numpy as np
import matplotlib.pyplot as plt
plt.imshow(np.zeros((100, 100)))
plt.show()
|
{"hexsha": "6264e8c1e70682f46f6dfb3471f2ac8af48a8990", "size": 95, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/plt.py", "max_stars_repo_name": "f-sky/DeepV2D", "max_stars_repo_head_hexsha": "5c1c6f58ee359d045a7efd5161445ea87d83bdbe", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-29T06:49:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-29T06:49:42.000Z", "max_issues_repo_path": "tests/plt.py", "max_issues_repo_name": "f-sky/DeepV2D", "max_issues_repo_head_hexsha": "5c1c6f58ee359d045a7efd5161445ea87d83bdbe", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/plt.py", "max_forks_repo_name": "f-sky/DeepV2D", "max_forks_repo_head_hexsha": "5c1c6f58ee359d045a7efd5161445ea87d83bdbe", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.0, "max_line_length": 32, "alphanum_fraction": 0.7473684211, "include": true, "reason": "import numpy", "num_tokens": 24}
|
"""Tests for SIR model in this repo
* Compares conserved quantities
* Compares model against Penn CHIME w/wo social policies
* Checks logistic policies in extreme limit
"""
from typing import Tuple
from datetime import date
from pytest import fixture
from numpy import zeros
from pandas import DataFrame, Series
from pandas.testing import assert_frame_equal, assert_series_equal
from penn_chime.model.parameters import Parameters, Disposition
from penn_chime.model.sir import (
Sir,
sim_sir,
calculate_dispositions,
calculate_admits,
calculate_census,
)
from models import sir_step, FitFcn, one_minus_logistic_fcn
COLS_TO_COMPARE = [
"susceptible",
"infected",
"recovered",
"hospitalized_new",
"hospitalized",
]
COLUMN_MAP = {
"hospitalized": "hospitalized_new",
"census_hospitalized": "hospitalized",
}
@fixture(name="penn_chime_setup")
def fixture_penn_chime_setup() -> Tuple[Parameters, Sir]:
"""Initializes penn_chime parameters and SIR model
"""
p = Parameters(
current_hospitalized=69,
date_first_hospitalized=date(2020, 3, 7),
doubling_time=None,
hospitalized=Disposition.create(days=7, rate=0.025),
icu=Disposition.create(days=9, rate=0.0075),
infectious_days=14,
market_share=0.15,
n_days=100,
population=3600000,
recovered=0,
relative_contact_rate=0.3,
ventilated=Disposition.create(days=10, rate=0.005),
)
return p, Sir(p)
@fixture(name="penn_chime_raw_df_no_beta")
def fixture_penn_chime_raw_df_no_beta(penn_chime_setup) -> DataFrame:
"""Runs penn_chime SIR model for no social policies
"""
p, simsir = penn_chime_setup
n_days = simsir.raw_df.day.max() - simsir.raw_df.day.min()
policies = [(simsir.beta, n_days)]
raw = sim_sir(
simsir.susceptible,
simsir.infected,
p.recovered,
simsir.gamma,
-simsir.i_day,
policies,
)
calculate_dispositions(raw, simsir.rates, market_share=p.market_share)
calculate_admits(raw, simsir.rates)
calculate_census(raw, simsir.days)
raw_df = DataFrame(raw)
return raw_df
@fixture(name="sir_data")
def fixture_sir_data(penn_chime_setup, penn_chime_raw_df_no_beta):
"""Provides data for local sir module
"""
p, simsir = penn_chime_setup
raw_df = penn_chime_raw_df_no_beta
day0 = raw_df.iloc[0].fillna(0)
pars = {
"beta_i": simsir.beta,
"gamma_i": simsir.gamma,
"initial_susceptible": day0.susceptible,
"initial_infected": day0.infected,
"initial_hospitalized": day0.hospitalized,
"initial_recovered": day0.recovered,
"hospitalization_rate": simsir.rates["hospitalized"] * p.market_share,
}
x = {
"n_iter": raw_df.shape[0],
"length_of_stay": p.dispositions["hospitalized"].days,
}
return x, pars
@fixture(name="sir_data_w_policy")
def fixture_sir_data_w_policy(penn_chime_setup):
"""Provides data for local sir module with implemented policies
"""
p, simsir = penn_chime_setup
raw_df = simsir.raw_df
day0 = raw_df.iloc[0].fillna(0)
pars = {
"beta_i": simsir.beta,
"gamma_i": simsir.gamma,
"initial_susceptible": day0.susceptible,
"initial_infected": day0.infected,
"initial_hospitalized": day0.hospitalized,
"initial_recovered": day0.recovered,
"hospitalization_rate": simsir.rates["hospitalized"] * p.market_share,
}
x = {
"n_iter": raw_df.shape[0],
"length_of_stay": p.dispositions["hospitalized"].days,
}
return x, pars
def test_conserved_n(sir_data):
"""Checks if S + I + R is conserved for local SIR
"""
x, pars = sir_data
n_total = 0
for key in ["susceptible", "infected", "recovered"]:
n_total += pars[f"initial_{key}"]
f = FitFcn(sir_step)
y = f(x, pars)[["susceptible", "infected", "recovered"]].sum(axis=1) - n_total
assert_series_equal(y, Series([0.0] * len(y)))
def test_sir_vs_penn_chime_no_policies(penn_chime_raw_df_no_beta, sir_data):
"""Compares local SIR against penn_chime SIR for no social policies
"""
x, pars = sir_data
f = FitFcn(sir_step)
y = f(x, pars)
assert_frame_equal(
penn_chime_raw_df_no_beta.rename(columns=COLUMN_MAP)[COLS_TO_COMPARE],
y[COLS_TO_COMPARE],
)
def test_sir_vs_penn_chime_w_policies(penn_chime_setup, sir_data_w_policy):
"""Compares local SIR against penn_chime SIR for with social policies
"""
p, sir = penn_chime_setup
x, pars = sir_data_w_policy
policies = sir.gen_policy(p)
def beta_i_fcn(x_iter, **kwargs): # pylint: disable=W0613
out = zeros(len(x_iter))
ii = 0
for beta, n_days in policies:
for _ in range(n_days):
out[ii] = beta
ii += 1
return out
f = FitFcn(sir_step, beta_i_fcn=beta_i_fcn)
y = f(x, pars)
assert_frame_equal(
sir.raw_df.rename(columns=COLUMN_MAP)[COLS_TO_COMPARE], y[COLS_TO_COMPARE],
)
def test_sir_logistic_policy(penn_chime_setup, sir_data_w_policy):
"""Compares local SIR against penn_chime SIR for with social policies
where social distancing policies are no implemented as a logistic function
"""
p, sir = penn_chime_setup
x, pars = sir_data_w_policy
policies = sir.gen_policy(p)
# Set up logistic function to match policies (Sharp decay)
pars["beta_i"] = policies[0][0]
pars["ratio"] = 1 - policies[1][0] / policies[0][0]
pars["x0"] = policies[0][1] - 0.5
pars["decay_width"] = 1.0e7
f = FitFcn(sir_step, beta_i_fcn=one_minus_logistic_fcn)
y = f(x, pars)
assert_frame_equal(
sir.raw_df.rename(columns=COLUMN_MAP)[COLS_TO_COMPARE], y[COLS_TO_COMPARE],
)
|
{"hexsha": "0f6923a9170d6828a2358f14f60bddc04ec10c6e", "size": 5871, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/sir_test.py", "max_stars_repo_name": "ckoerber/covid-19-analysis", "max_stars_repo_head_hexsha": "d6b3324cdac37b20aebc18932ffd9077e9d1b969", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/sir_test.py", "max_issues_repo_name": "ckoerber/covid-19-analysis", "max_issues_repo_head_hexsha": "d6b3324cdac37b20aebc18932ffd9077e9d1b969", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/sir_test.py", "max_forks_repo_name": "ckoerber/covid-19-analysis", "max_forks_repo_head_hexsha": "d6b3324cdac37b20aebc18932ffd9077e9d1b969", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.0909090909, "max_line_length": 83, "alphanum_fraction": 0.6695622552, "include": true, "reason": "from numpy", "num_tokens": 1613}
|
import os
from pathlib import Path
import csv
import tensorflow as tf
import sqlite3
import numpy as np
DATA_PATH = Path(__file__).resolve().parents[3] / "parsed_data"
DB_PATH = Path(__file__).resolve().parents[3] / "webserver" / "app.db"
RATING_TRAIN_FILENAME = "ratings_train.csv"
RATING_TEST_FILENAME = "ratings_test.csv"
MOVIE_FILENAME = "movies.csv"
class Dataset:
"""Simple class for datasets."""
def __init__(self, test_fraction=0.3, batch_size=512):
self.test_fraction = test_fraction
self.batch_size = batch_size
self.train = None
self.test = None
self.movies = None
def load_or_generate_data(self, update_to_latest_db=True):
dirname = _load_or_generate_csv_data(self.test_fraction, update_to_latest_db)
self.train = tf.data.experimental.make_csv_dataset(os.path.join(dirname, RATING_TRAIN_FILENAME),batch_size=self.batch_size,num_epochs=1)
self.test = tf.data.experimental.make_csv_dataset(os.path.join(dirname, RATING_TEST_FILENAME),batch_size=self.batch_size,num_epochs=1)
self.movies = tf.data.experimental.make_csv_dataset(os.path.join(dirname, MOVIE_FILENAME),batch_size=self.batch_size,num_epochs=1,shuffle=False)
@property
def unique_user_ids(self):
user_ids = self.train.map(lambda x: x["userid"])
return np.unique(np.concatenate(list(user_ids)))
@property
def unique_movie_ids(self):
movie_ids = self.train.map(lambda x: x["movieid"])
return np.unique(np.concatenate(list(movie_ids)))
def _load_or_generate_csv_data(test_fraction, update_to_latest_db):
DATA_PATH.mkdir(parents=True, exist_ok=True)
list_of_dirs = [os.path.join(DATA_PATH, d) for d in os.listdir(DATA_PATH) if os.path.isdir(os.path.join(DATA_PATH, d))]
if len(list_of_dirs) > 0:
latest_dir = max(list_of_dirs, key=os.path.getctime)
if not update_to_latest_db:
print("Loaded latest dataset(without update check)")
return latest_dir
if os.path.getctime(latest_dir) >= os.path.getmtime(DB_PATH):
print("No DB update... Loaded latest dataset")
return latest_dir
print("Generating New dataset...")
db_mtime = os.path.getmtime(DB_PATH)
datadir = os.path.join(DATA_PATH, str(db_mtime))
os.mkdir(datadir)
con = sqlite3.connect(DB_PATH)
with open(os.path.join(datadir,MOVIE_FILENAME), 'w') as f:
cursor = con.execute('select * from movie')
outcsv = csv.writer(f)
outcsv.writerow(x[0] for x in cursor.description)
outcsv.writerows(cursor.fetchall())
tr = open(os.path.join(datadir,RATING_TRAIN_FILENAME), 'w')
te = open(os.path.join(datadir,RATING_TEST_FILENAME), 'w')
tr_outcsv = csv.writer(tr)
te_outcsv = csv.writer(te)
cursor = con.execute('select * from user_movie_rating') #serMovieRating
tr_outcsv.writerow(x[0] for x in cursor.description)
te_outcsv.writerow(x[0] for x in cursor.description)
for row in cursor.fetchall():
if np.random.random_sample() > test_fraction:
tr_outcsv.writerow(x for x in row)
else:
te_outcsv.writerow(x for x in row)
return datadir
|
{"hexsha": "e0248eff98feace23f859ba58717df7adc41fb0f", "size": 3252, "ext": "py", "lang": "Python", "max_stars_repo_path": "recommenders/recommenders/datasets/dataset.py", "max_stars_repo_name": "hojinYang/tfrs-movierec-serving", "max_stars_repo_head_hexsha": "bef4f19a8be99cde510d761082de7602151a7d99", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2020-11-14T07:03:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T00:56:49.000Z", "max_issues_repo_path": "recommenders/recommenders/datasets/dataset.py", "max_issues_repo_name": "hojinYang/tfrs-movierec-serving", "max_issues_repo_head_hexsha": "bef4f19a8be99cde510d761082de7602151a7d99", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "recommenders/recommenders/datasets/dataset.py", "max_forks_repo_name": "hojinYang/tfrs-movierec-serving", "max_forks_repo_head_hexsha": "bef4f19a8be99cde510d761082de7602151a7d99", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-20T06:00:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-20T06:00:51.000Z", "avg_line_length": 36.9545454545, "max_line_length": 152, "alphanum_fraction": 0.6888068881, "include": true, "reason": "import numpy", "num_tokens": 759}
|
import argparse
import os
from plyfile import PlyData, PlyElement
import numpy as np
from sklearn.decomposition import PCA
parser = argparse.ArgumentParser()
parser.add_argument("--rootdir", type=str, required=True)
parser.add_argument("--destdir", type=str, required=True)
parser.add_argument("--test", action="store_true")
args = parser.parse_args()
# create the directory
train_filenames = ["Lille1_1.ply", "Lille1_2.ply", "Lille2.ply", "Paris.ply",]
test_filenames = ["ajaccio_2.ply", "ajaccio_57.ply", "dijon_9.ply"]
if args.test:
filenames = test_filenames
save_dir = os.path.join(args.destdir,"test_pointclouds")
else:
filenames = train_filenames
save_dir = os.path.join(args.destdir,"train_pointclouds")
os.makedirs(save_dir, exist_ok=True)
for filename in filenames:
if args.test:
fname = os.path.join(args.rootdir, "test_10_classes", filename)
else:
fname = os.path.join(args.rootdir, "training_10_classes", filename)
print(fname)
plydata = PlyData.read(fname)
print(plydata)
x = plydata["vertex"].data["x"].astype(np.float32)
y = plydata["vertex"].data["y"].astype(np.float32)
z = plydata["vertex"].data["z"].astype(np.float32)
reflectance = plydata["vertex"].data["reflectance"].astype(np.float32)
if not args.test:
label = plydata["vertex"].data["class"].astype(np.float32)
if args.test:
pts = np.concatenate([
np.expand_dims(x,1),
np.expand_dims(y,1),
np.expand_dims(z,1),
np.expand_dims(reflectance,1),
], axis=1).astype(np.float32)
np.save(os.path.join(save_dir, os.path.splitext(filename)[0]), pts)
else:
pts = np.concatenate([
np.expand_dims(x,1),
np.expand_dims(y,1),
np.expand_dims(z,1),
np.expand_dims(reflectance,1),
np.expand_dims(label,1),
], axis=1).astype(np.float32)
pca = PCA(n_components=1)
pca.fit(pts[::10,:2])
pts_new = pca.transform(pts[:,:2])
hist, edges = np.histogram(pts_new, pts_new.shape[0]// 2500000)
count = 0
for i in range(1,edges.shape[0]):
mask = np.logical_and(pts_new<=edges[i], pts_new>edges[i-1])[:,0]
np.save(os.path.join(save_dir, os.path.splitext(filename)[0]+f"_{count}"), pts[mask])
count+=1
hist, edges = np.histogram(pts_new, pts_new.shape[0]// 2500000 -2, range=[(edges[1]+edges[0])//2,(edges[-1]+edges[-2])//2])
for i in range(1,edges.shape[0]):
mask = np.logical_and(pts_new<=edges[i], pts_new>edges[i-1])[:,0]
np.save(os.path.join(save_dir, os.path.splitext(filename)[0]+f"_{count}"), pts[mask])
count+=1
|
{"hexsha": "852051c3217a4dfda6f958bbc4424d607e1d203b", "size": 2818, "ext": "py", "lang": "Python", "max_stars_repo_path": "legacy_examples/npm3d/prepare_data.py", "max_stars_repo_name": "StructuralNeurobiologyLab/LightConvPoint", "max_stars_repo_head_hexsha": "3f353f45e9e910fa390a74520dfd478e3e88f104", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 58, "max_stars_repo_stars_event_min_datetime": "2020-04-16T16:55:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-27T00:36:55.000Z", "max_issues_repo_path": "legacy_examples/npm3d/prepare_data.py", "max_issues_repo_name": "StructuralNeurobiologyLab/LightConvPoint", "max_issues_repo_head_hexsha": "3f353f45e9e910fa390a74520dfd478e3e88f104", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-04-18T13:04:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-13T02:37:44.000Z", "max_forks_repo_path": "legacy_examples/npm3d/prepare_data.py", "max_forks_repo_name": "StructuralNeurobiologyLab/LightConvPoint", "max_forks_repo_head_hexsha": "3f353f45e9e910fa390a74520dfd478e3e88f104", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-04-17T04:07:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-10T00:38:23.000Z", "avg_line_length": 36.5974025974, "max_line_length": 131, "alphanum_fraction": 0.6124911285, "include": true, "reason": "import numpy", "num_tokens": 748}
|
abstract type AbstractGrid{T, N} <: AbstractArray{T, N} end
"""
struct Grid{T, N, S <: AbstractCoordinateSystem, AT} <: AbstractGrid{T, N}
Collection of `N` axes that define the dimensions of the grid needed to calculate
[`ElectricPotential`](@ref), [`ElectricField`](@ref) or [`WeightingPotential`](@ref).
## Parametric types
* `T`: Tick type (element type) of the axes.
* `N`: Dimension of the grid.
* `S`: Coordinate system (`Cartesian` or `Cylindrical`).
* `AT`: Axes type.
## Fields
* `axes::AT`: Tuple of length `N` containing `DiscreteAxis` for each dimension of the grid.
See also [`DiscreteAxis`](@ref).
"""
struct Grid{T, N, S <: AbstractCoordinateSystem, AT} <: AbstractGrid{T, N}
axes::AT
end
const CartesianGrid{T, N} = Grid{T, N, Cartesian}
const CartesianGrid1D{T} = CartesianGrid{T, 1}
const CartesianGrid2D{T} = CartesianGrid{T, 2}
const CartesianGrid3D{T} = CartesianGrid{T, 3}
const CylindricalGrid{T} = Grid{T, 3, Cylindrical}
#const RadialGrid{T} = Grid{T, 1, Radial}
#const PolarGrid{T} = Grid{T, 2, Polar}
#const SphericalGrid{T} = Grid{T, 3, Spherical}
CylindricalGrid{T}(a) where {T} = Grid{T, 3, Cylindrical, typeof(a)}(a)
CartesianGrid3D{T}(a) where {T} = Grid{T, 3, Cartesian, typeof(a)}(a)
@inline size(grid::Grid{T, N, S}) where {T, N, S} = size.(grid.axes, 1)
@inline length(grid::Grid{T, N, S}) where {T, N, S} = prod(size(grid))
@inline getindex(grid::Grid{T, N, S}, I::Vararg{Int, N}) where {T, N, S} = broadcast(getindex, grid.axes, I)
@inline getindex(grid::Grid{T, N, S}, i::Int) where {T, N, S} = getproperty(grid, :axes)[i]
@inline getindex(grid::Grid{T, N, S}, s::Symbol) where {T, N, S} = getindex(grid, Val{s}())
@inline getproperty(grid::Grid{T, N, S}, s::Symbol) where {T, N, S} = getproperty(grid, Val{s}())
@inline getproperty(grid::Grid{T}, ::Val{:axes}) where {T} = getfield(grid, :axes)
@inline getproperty(grid::CylindricalGrid{T}, ::Val{:axes}) where {T} = getfield(grid, :axes)
@inline getproperty(grid::CylindricalGrid{T}, ::Val{:r}) where {T} = @inbounds grid.axes[1]
@inline getproperty(grid::CylindricalGrid{T}, ::Val{:φ}) where {T} = @inbounds grid.axes[2]
@inline getproperty(grid::CylindricalGrid{T}, ::Val{:z}) where {T} = @inbounds grid.axes[3]
@inline getproperty(grid::CartesianGrid3D{T}, ::Val{:x}) where {T} = @inbounds grid.axes[1]
@inline getproperty(grid::CartesianGrid3D{T}, ::Val{:y}) where {T} = @inbounds grid.axes[2]
@inline getproperty(grid::CartesianGrid3D{T}, ::Val{:z}) where {T} = @inbounds grid.axes[3]
@inline getindex(grid::CylindricalGrid{T}, ::Val{:r}) where {T} = @inbounds grid.axes[1]
@inline getindex(grid::CylindricalGrid{T}, ::Val{:φ}) where {T} = @inbounds grid.axes[2]
@inline getindex(grid::CylindricalGrid{T}, ::Val{:z}) where {T} = @inbounds grid.axes[3]
@inline getindex(grid::CartesianGrid3D{T}, ::Val{:x}) where {T} = @inbounds grid.axes[1]
@inline getindex(grid::CartesianGrid3D{T}, ::Val{:y}) where {T} = @inbounds grid.axes[2]
@inline getindex(grid::CartesianGrid3D{T}, ::Val{:z}) where {T} = @inbounds grid.axes[3]
@inline GridPoint(grid::Grid{T, 3, Cylindrical}, inds::NTuple{3, Int}) where {T} =
CylindricalPoint{T}(broadcast(i -> grid.axes[i].ticks[inds[i]], (1, 2, 3)))
@inline GridPoint(grid::Grid{T, 3, Cartesian}, inds::NTuple{3, Int}) where {T} =
CartesianPoint{T}(broadcast(i -> grid.axes[i].ticks[inds[i]], (1, 2, 3)))
function sizeof(grid::Grid{T, N, S}) where {T, N, S}
return sum( sizeof.(grid.axes) )
end
function print(io::IO, grid::Grid{T, N, S}) where {T, N, S}
print(io, "Grid{$T, $N, $S}", grid.axes)
end
function println(io::IO, grid::Grid{T, N, S}) where {T, N, S}
println(" Grid{$T, $N, $S}")
for (i, ax) in enumerate(grid.axes)
println(io, " Axis $(i): ", ax)
end
end
show(io::IO, grid::Grid{T, N, S}) where {T, N, S} = print(io, grid)
show(io::IO, ::MIME"text/plain", grid::Grid{T, N, S}) where {T, N, S} = show(io, grid)
function check_grid(grid::CylindricalGrid{T})::Nothing where {T}
nr::Int, nφ::Int, nz::Int = size(grid)
@assert iseven(nz) "GridError: Field simulation algorithm in cylindrical coordinates needs an even number of grid points in z. This is not the case. #z-ticks = $(nz)."
@assert (iseven(nφ) || (nφ == 1)) "GridError: Field simulation algorithm in cylindrical coordinates needs an even number of grid points in φ or just one point (2D). This is not the case. #φ-ticks = $(nφ)."
return nothing
end
function check_grid(grid::CartesianGrid3D{T})::Nothing where {T}
nx::Int, ny::Int, nz::Int = size(grid)
@assert iseven(nx) "GridError: Field simulation algorithm in cartesian coordinates needs an even number of grid points in x. This is not the case. #x-ticks = $(nx)."
return nothing
end
function get_coordinate_system(grid::Grid{T, N, S}) where {T, N, S}
return S
end
function get_number_of_dimensions(grid::Grid{T, N, S}) where {T, N, S}
return N
end
function Base.eltype(grid::Grid{T, N, S})::DataType where {T, N, S}
return T
end
function get_boundary_types(grid::Grid{T, N, S}) where {T, N, S}
return get_boundary_types.(grid.axes)
end
# Tuples with ticks to sample with differently spaced ticks
const CartesianTicksTuple{T} = NamedTuple{(:x,:y,:z), NTuple{3,Vector{T}}}
const CylindricalTicksTuple{T} = NamedTuple{(:r,:φ,:z), NTuple{3,Vector{T}}}
TicksTuple(grid::CartesianGrid3D{T}) where {T} = (x = grid.axes[1].ticks, y = grid.axes[2].ticks, z = grid.axes[3].ticks)
TicksTuple(grid::CylindricalGrid{T}) where {T} = (r = grid.axes[1].ticks, φ = grid.axes[2].ticks, z = grid.axes[3].ticks)
function Grid(nt::NamedTuple)
if nt.coordtype == "cylindrical"
axr::DiscreteAxis = DiscreteAxis(nt.axes.r, unit=u"m")
axφ::DiscreteAxis = DiscreteAxis(nt.axes.phi, unit=u"rad")
axz::DiscreteAxis = DiscreteAxis(nt.axes.z, unit=u"m")
T = typeof(axr.ticks[1])
return CylindricalGrid{T}( (axr, axφ, axz) )
elseif nt.coordtype == "cartesian"
axx::DiscreteAxis = DiscreteAxis(nt.axes.x, unit=u"m")
axy::DiscreteAxis = DiscreteAxis(nt.axes.y, unit=u"m")
axz = DiscreteAxis(nt.axes.z, unit=u"m")
T = typeof(axx.ticks[1])
return CartesianGrid3D{T}( (axx, axy, axz) )
else
error("`coordtype` = $(nt.coordtype) is not valid.")
end
end
Base.convert(T::Type{Grid}, x::NamedTuple) = T(x)
function NamedTuple(grid::CylindricalGrid{T}) where {T}
axr::DiscreteAxis{T} = grid.axes[1]
axφ::DiscreteAxis{T} = grid.axes[2]
axz::DiscreteAxis{T} = grid.axes[3]
return (
coordtype = "cylindrical",
ndims = 3,
axes = (
r = NamedTuple(axr, unit=u"m"),
phi = NamedTuple(axφ, unit=u"rad"),
z = NamedTuple(axz, unit=u"m"),
)
)
end
function NamedTuple(grid::CartesianGrid3D{T}) where {T}
axx::DiscreteAxis{T} = grid.axes[1]
axy::DiscreteAxis{T} = grid.axes[2]
axz::DiscreteAxis{T} = grid.axes[3]
return (
coordtype = "cartesian",
ndims = 3,
axes = (
x = NamedTuple(axx, unit=u"m"),
y = NamedTuple(axy, unit=u"m"),
z = NamedTuple(axz, unit=u"m"),
)
)
end
Base.convert(T::Type{NamedTuple}, x::Grid) = T(x)
function find_closest_gridpoint(pt::CylindricalPoint{T}, grid::CylindricalGrid{T})::NTuple{3,Int} where {T <: SSDFloat}
return (searchsortednearest(grid.axes[1].ticks, pt.r), searchsortednearest(grid.axes[2].ticks, pt.φ), searchsortednearest(grid.axes[3].ticks, pt.z))
end
function find_closest_gridpoint(pt::CartesianPoint{T}, grid::CylindricalGrid{T})::NTuple{3,Int} where {T <: SSDFloat}
find_closest_gridpoint(CylindricalPoint(pt),grid)
end
function find_closest_gridpoint(pt::CartesianPoint{T}, grid::CartesianGrid3D{T})::NTuple{3,Int} where {T <: SSDFloat}
@inbounds return (searchsortednearest(grid.axes[1].ticks, pt.x), searchsortednearest(grid.axes[2].ticks, pt.y), searchsortednearest(grid.axes[3].ticks, pt.z))
end
function find_closest_gridpoint(pt::CylindricalPoint{T}, grid::CartesianGrid3D{T})::NTuple{3,Int} where {T <: SSDFloat}
find_closest_gridpoint(CartesianPoint(pt),grid)
end
multiplicity(g::Grid) = prod(multiplicities(g))
function multiplicities(g::CylindricalGrid{T}) where {T}
mr = one(T)
mφ = T(2π) / width(g.axes[2].interval)
mz = multiplicity(g.axes[3], Cartesian)
mr, mφ, mz
end
multiplicities(g::CartesianGrid3D) = broadcast(ax -> multiplicity(ax, Cartesian), g.axes)
function voxel_widths(grid::CartesianGrid3D{T}, i1::Int, i2::Int, i3::Int) where {T}
wx::T = grid[1].ticks[i1 + 1] - grid[1].ticks[i1]
wy::T = grid[2].ticks[i2 + 1] - grid[2].ticks[i2]
wz::T = grid[3].ticks[i3 + 1] - grid[3].ticks[i3]
wx, wy, wz
end
function voxel_widths(grid::CylindricalGrid{T}, i1::Int, i2::Int, i3::Int) where {T}
wr::T = grid[1].ticks[i1 + 1] - grid[1].ticks[i1]
wφ::T = (grid[2].ticks[i2 + 1] - grid[2].ticks[i2]) * (grid[1].ticks[i1 + 1] + grid[1].ticks[i1])/2
wz::T = grid[3].ticks[i3 + 1] - grid[3].ticks[i3]
wr, wφ, wz
end
voxel_volume(grid::CylindricalGrid{T}, i1::Int, i2::Int, i3::Int, w1::T, w2::T, w3::T) where {T} =
(grid[2].ticks[i2 + 1] - grid[2].ticks[i2]) * w3 * (grid[1].ticks[i1 + 1]^2 - grid[1].ticks[i1]^2) / 2
voxel_volume(grid::CartesianGrid3D{T}, i1::Int, i2::Int, i3::Int, w1::T, w2::T, w3::T) where {T} =
w1 * w2 * w3
function get_extended_midpoints_grid(grid::Grid{T,3}) where {T}
ticks = broadcast(i -> midpoints(get_extended_ticks(grid.axes[i])), (1,2,3))
axes = broadcast(i -> typeof(grid.axes[i])(grid.axes[i].interval, ticks[i]) , (1,2,3))
typeof(grid)(axes)
end
|
{"hexsha": "9b9012ea0f415ab77eb29d7f9c00ea9678647822", "size": 9849, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Grids/Grids.jl", "max_stars_repo_name": "hervasa2/SolidStateDetectors.jl", "max_stars_repo_head_hexsha": "c640fc84c617fb5dc360aba43550c86e959e47a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2019-07-10T06:21:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T11:45:57.000Z", "max_issues_repo_path": "src/Grids/Grids.jl", "max_issues_repo_name": "hervasa2/SolidStateDetectors.jl", "max_issues_repo_head_hexsha": "c640fc84c617fb5dc360aba43550c86e959e47a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 183, "max_issues_repo_issues_event_min_datetime": "2019-07-05T09:54:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T14:45:25.000Z", "max_forks_repo_path": "src/Grids/Grids.jl", "max_forks_repo_name": "hervasa2/SolidStateDetectors.jl", "max_forks_repo_head_hexsha": "c640fc84c617fb5dc360aba43550c86e959e47a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2019-08-28T11:44:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-11T10:26:24.000Z", "avg_line_length": 45.5972222222, "max_line_length": 210, "alphanum_fraction": 0.6378312519, "num_tokens": 3203}
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import numpy as np
from tensorflow.compiler.plugin.poplar.tests import test_utils as tu
from tensorflow.python.platform import googletest
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.data.ops.dataset_ops import Dataset
from tensorflow.python.ipu import internal_ops
from tensorflow.python.ipu import ipu_compiler
from tensorflow.python.ipu import ipu_infeed_queue
from tensorflow.python.ipu import ipu_outfeed_queue
from tensorflow.python.ipu import pipelining_ops
from tensorflow.python.ipu import utils
from tensorflow.python.ipu.config import IPUConfig
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
class IterationCounterTest(test_util.TensorFlowTestCase):
@test_util.deprecated_graph_mode_only
def testIterationCounter(self):
gradient_accumulation_count = 10
repeat_count = 3
dataset = Dataset.range(gradient_accumulation_count * repeat_count)
dataset = dataset.map(lambda i: math_ops.cast(i, np.int32))
dataset = dataset.batch(batch_size=1, drop_remainder=True)
infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue()
def stage1(x):
with variable_scope.variable_scope("vs", use_resource=True):
c1 = internal_ops.get_current_iteration_counter()
return x, c1
def stage2(x, c1):
with variable_scope.variable_scope("vs", use_resource=True):
c2 = internal_ops.get_current_iteration_counter()
return x, c1, c2
def my_net():
return pipelining_ops.pipeline(
[stage1, stage2],
gradient_accumulation_count=gradient_accumulation_count,
repeat_count=repeat_count,
infeed_queue=infeed_queue,
outfeed_queue=outfeed_queue,
device_mapping=[0, 0])
with ops.device("/device:IPU:0"):
r = ipu_compiler.compile(my_net, inputs=[])
dequeue = outfeed_queue.dequeue()
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
with tu.ipu_session() as sess:
sess.run(infeed_queue.initializer)
sess.run(r)
_, c1, c2 = sess.run(dequeue)
expected_numpy = np.tile(np.arange(gradient_accumulation_count),
reps=repeat_count)
self.assertAllEqual(c1, c2)
self.assertAllEqual(c1, expected_numpy)
if __name__ == "__main__":
googletest.main()
|
{"hexsha": "81e9b299ebaa4bb3e254ecc2c19253154d85278d", "size": 3139, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow/python/ipu/tests/iteration_counter_test.py", "max_stars_repo_name": "chenzhengda/tensorflow", "max_stars_repo_head_hexsha": "8debb698097670458b5f21d728bc6f734a7b5a53", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 74, "max_stars_repo_stars_event_min_datetime": "2020-07-06T17:11:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T06:31:28.000Z", "max_issues_repo_path": "tensorflow/python/ipu/tests/iteration_counter_test.py", "max_issues_repo_name": "chenzhengda/tensorflow", "max_issues_repo_head_hexsha": "8debb698097670458b5f21d728bc6f734a7b5a53", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-10-13T23:25:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T06:54:48.000Z", "max_forks_repo_path": "tensorflow/python/ipu/tests/iteration_counter_test.py", "max_forks_repo_name": "chenzhengda/tensorflow", "max_forks_repo_head_hexsha": "8debb698097670458b5f21d728bc6f734a7b5a53", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-07-08T07:27:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-27T08:54:27.000Z", "avg_line_length": 35.2696629213, "max_line_length": 79, "alphanum_fraction": 0.7288945524, "include": true, "reason": "import numpy", "num_tokens": 695}
|
// All content Copyright (C) 2018 Genomics plc
#ifndef WECALL_REDUCE_HPP
#define WECALL_REDUCE_HPP
#include <iomanip>
#include <boost/program_options.hpp>
#include <boost/asio/io_service.hpp>
#include <boost/bind.hpp>
#include <boost/thread/thread.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/filesystem/path.hpp>
#include "caller/jobReduce.hpp"
#include "common.hpp"
#include "caller/job.hpp"
#include "version/version.hpp"
#include "weCallBase.hpp"
namespace wecall
{
class weCallReduce : public weCallBase
{
public:
weCallReduce();
int processJob( int argc, char * argv[] );
private:
void initOptions();
};
}
#endif
|
{"hexsha": "09b6c682f706b8ca945a037786e4adc6ec97f8b3", "size": 651, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "cpp/src/weCallReduce.hpp", "max_stars_repo_name": "dylex/wecall", "max_stars_repo_head_hexsha": "35d24cefa4fba549e737cd99329ae1b17dd0156b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8.0, "max_stars_repo_stars_event_min_datetime": "2018-10-08T15:47:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T07:13:05.000Z", "max_issues_repo_path": "cpp/src/weCallReduce.hpp", "max_issues_repo_name": "dylex/wecall", "max_issues_repo_head_hexsha": "35d24cefa4fba549e737cd99329ae1b17dd0156b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2018-11-05T09:16:27.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-09T12:32:56.000Z", "max_forks_repo_path": "cpp/src/weCallReduce.hpp", "max_forks_repo_name": "dylex/wecall", "max_forks_repo_head_hexsha": "35d24cefa4fba549e737cd99329ae1b17dd0156b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-09-03T15:46:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-04T07:28:33.000Z", "avg_line_length": 19.1470588235, "max_line_length": 46, "alphanum_fraction": 0.7434715822, "num_tokens": 159}
|
# Anthony Krivonos
# Nov 9th, 2018
# src/models/price.py
# Imports
import sys
import json
# Pandas
import pandas as pd
# NumPy
import numpy as np
# SciPy
import scipy.optimize as optimize
# Enums
from enums import *
# Math
from math import exp
# PriceModel
from models.price import *
# QuoteModel
from models.quote import *
# Utility
from utility import *
# Mathematics
from mathematics import *
# Matplotlib
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
# Abstract: Model storing stock info and historical prices.
class Portfolio:
def __init__(self, query, quotes, name='Portfolio'):
# Set properties
self.__query = query
self.__quotes = quotes
self.__name = name
self.__symbol_map = {}
self.__total_assets = 0
self.__expected_return = 0
self.__covariance = 0
# Update assets
self.update_assets()
##
#
# MARK: - UPDATERS
#
##
# update_assets:Void
# NOTE: - Updates the total asset count and weights of each quote.
def update_assets(self):
self.__total_assets = 0
self.__symbol_map = {}
for quote in self.__quotes:
self.__total_assets += quote.count
self.__symbol_map[quote.symbol] = quote
if self.__total_assets > 0:
for quote in self.__quotes:
quote.weight = quote.count / self.__total_assets
else:
for quote in self.__quotes:
quote.weight = 0.0
market_data = self.get_market_data_tuple()
self.__expected_return = market_data[1] # Set portfolio return
self.__covariance = market_data[2] # Set portfolio covariance
##
#
# MARK: - CHECKERS
#
##
# is_symbol_in_portfolio:Boolean
# param symbol:String => A string stock symbol.
def is_symbol_in_portfolio(self, symbol):
return symbol in self.__symbol_map
# is_symbol_in_portfolio:Boolean
# param symbol:String => A string stock symbol.
def get_quote_from_portfolio(self, symbol):
return self.__symbol_map[symbol] if self.is_symbol_in_portfolio(symbol) else None
##
#
# MARK: - SETTERS
#
##
# add_quote:Void
# param quote:Quote => A quote object to add to the portfolio. Overwrites existing quotes.
def add_quote(self, quote):
for i, q in enumerate(self.__quotes):
if q.symbol == quote.symbol:
self.__quotes[i].count += quote.count
self.update_assets()
return
self.__quotes.append(quote)
self.update_assets()
# remove_quote:Void
# param quoteOrSymbol:Quote => A quote object or symbol string to remove from the portfolio, if it exists.
def remove_quote(self, quote_or_symbol):
for i, q in enumerate(self.__quotes):
if (isinstance(quote_or_symbol, Quote) and q.symbol == quote.symbol) or quote_or_symbol == q.symbol:
if isinstance(quote_or_symbol, Quote) and quote_or_symbol.count > self.__quotes[i].count:
self.__quotes[i].count -= quote_or_symbol.count
else:
self.__quotes.remove(i)
self.update_assets()
return
# set_name:Void
# param quotes:[Quote] => A list of quote objects to set.
def set_quotes(self, quotes):
self.__quotes = quotes
self.update_assets()
# set_name:Void
# param name:String => The name of the portfolio.
def set_name(self, name):
self.__name = name
##
#
# MARK: - GETTERS
#
##
# get_quotes:[Quote]
# Returns a list of quote objects in the portfolio.
def get_quotes(self):
return self.__quotes
# get_symbols:[String]
# Returns a list of symbols in the portfolio.
def get_symbols(self):
return list(map(lambda quote: quote.symbol, self.__quotes))
# get_expected_return:[Quote]
# Returns a float percentage for the return of this portfolio.
def get_expected_return(self):
return self.__expected_return
# get_covariance:[Quote]
# Returns the float covariance of this portfolio.
# NOTE: - If > 0, the stocks in this portfolio are interrelated. Otherwise, not.
def get_covariance(self):
return self.__covariance
# get_history:[String:[Price]]
# param symbol:String => String symbol of the instrument.
# param interval:Span => Time in between each value. (default: DAY)
# param span:Span => Range for the data to be returned. (default: YEAR)
# param bounds:Span => The bounds to be included. (default: REGULAR)
# returns Map of symbols to lists of Price models.
def get_history(self, interval = Span.DAY, span = Span.YEAR, bounds = Bounds.REGULAR):
historicals = {}
for quote in self.__quotes:
historicals[quote.symbol] = list(map(lambda price: price, self.get_symbol_history(quote.symbol, interval, span, bounds)))
return historicals
# get_history_tuple:([String:[Float:Price]], [Float])
# param symbol:String => String symbol of the instrument.
# param interval:Span => Time in between each value. (default: DAY)
# param span:Span => Range for the data to be returned. (default: YEAR)
# param bounds:Span => The bounds to be included. (default: REGULAR)
# returns Tuple containing: (map of symbols to map of float timestamps to Price models, list of all times in historicals map).
def get_history_tuple(self, interval = Span.DAY, span = Span.YEAR, bounds = Bounds.REGULAR):
historicals = {}
times = {}
time_list = []
for quote in self.__quotes:
hist_map = {}
hist_array = list(map(lambda price: price, self.get_symbol_history(quote.symbol, interval, span, bounds)))
for price in hist_array:
hist_map[price.time] = price
if price.time not in times:
times[price.time] = True
historicals[quote.symbol] = hist_map
for time in times:
time_list.append(time)
time_list = sorted(time_list)
return (historicals, time_list)
# get_history_tuples:[[(time, open, close, high, low)]]
# param symbol:String => String symbol of the instrument.
# param interval:Span => Time in between each value. (default: DAY)
# param span:Span => Range for the data to be returned. (default: YEAR)
# param bounds:Span => The bounds to be included. (default: REGULAR)
# returns List of price tuples with the time, volume, open, close, high, low for each time in the interval.
def get_history_tuples(self, interval = Span.DAY, span = Span.YEAR, bounds = Bounds.REGULAR):
history = self.get_history(interval, span, bounds)
for symbol in history:
history[symbol] = [ quote.as_tuple() for quote in history[quote] ]
return history
# get_symbol_history:[Price]
# param symbol:String => String symbol of the instrument.
# param interval:Span => Time in between each value. (default: DAY)
# param span:Span => Range for the data to be returned. (default: YEAR)
# param bounds:Span => The bounds to be included. (default: REGULAR)
# returns List of Price models with the time, volume, open, close, high, low for each time in the interval.
def get_symbol_history(self, symbol, interval = Span.DAY, span = Span.YEAR, bounds = Bounds.REGULAR):
historicals = self.__query.get_history(symbol, interval, span, bounds)
historicals = historicals['results'][0]['historicals']
historicals = list(map(lambda h: Price(Utility.datetime_to_float(Utility.iso_to_datetime(h['begins_at'])), float(h['open_price']), float(h['close_price']), float(h['high_price']), float(h['low_price'])), historicals))
return historicals
# get_symbol_history_map:[Float:Price]
# param symbol:String => String symbol of the instrument.
# param interval:Span => Time in between each value. (default: DAY)
# param span:Span => Range for the data to be returned. (default: YEAR)
# param bounds:Span => The bounds to be included. (default: REGULAR)
# returns Map of float timestamps to prices for the given symbol.
def get_symbol_history_map(self, symbol, interval = Span.DAY, span = Span.YEAR, bounds = Bounds.REGULAR):
historicals = self.__query.get_history(symbol, interval, span, bounds)
historicals = historicals['results'][0]['historicals']
historicals = list(map(lambda h: Price(Utility.datetime_to_float(Utility.iso_to_datetime(h['begins_at'])), float(h['open_price']), float(h['close_price']), float(h['high_price']), float(h['low_price'])), historicals))
history = {}
for price in historicals:
history[price.time] = price
return history
# get_portfolio_history:[Price]
# param symbol:String => String symbol of the instrument.
# param interval:Span => Time in between each value. (default: DAY)
# param span:Span => Range for the data to be returned. (default: YEAR)
# param bounds:Span => The bounds to be included. (default: REGULAR)
# returns Map of Price model symbols to price tuples.
def get_portfolio_history(self, interval = Span.DAY, span = Span.YEAR, bounds = Bounds.REGULAR):
portfolio_history = {}
for quote in quotes:
portfolio_history[quote.symbol] = quote.price.as_tuple()
return portfolio_history
# get_market_data_tuple:(dataFrame, float, float, [float], [float])
# param symbol:String => String symbol of the instrument.
# param interval:Span => Time in between each value. (default: DAY)
# param span:Span => Range for the data to be returned. (default: YEAR)
# param bounds:Span => The bounds to be included. (default: REGULAR)
# returns A tuple containing (dataFrame, float, float, [float], [float]).
def get_market_data_tuple(self, interval = Span.DAY, span = Span.YEAR, bounds = Bounds.REGULAR):
# Create dataFrame with times as rows, symbols as columns, and close prices as data
historicals = self.get_history(interval, span, bounds)
times = []
close_prices = []
weights = []
market_days = 0
for quote in self.__quotes:
t = []
close_prices = []
for price in historicals[quote.symbol]:
if len(times) is 0:
t.append(price.time)
close_prices.append(price.close)
if len(times) is 0:
times = t
time_filled = True
market_days = len(times)
historicals[quote.symbol] = close_prices
weights.append(quote.weight)
df = pd.DataFrame(historicals)
df.index = times
# Calculate the returns for the given data
returns = Math.get_returns(df, df.shift(1))
# Portfolio's return
portfolio_stats = self.get_portfolio_statistics(weights, returns)
portfolio_return = portfolio_stats[0]
portfolio_covariance = portfolio_stats[1]
return (
df,
portfolio_return,
portfolio_covariance,
returns,
weights
)
# get_portfolio_statistics:(float, float)
# param weights:[float] => List of weights per quote, in order.
# param returns:[float] => List of returns per quote, in order.
# returns A tuple containing (portfolio_return, portfolio_covariance).
def get_portfolio_statistics(self, weights, returns):
returns_mean = returns.mean()
returns_cov = returns.cov()
market_days = len(returns)
portfolio_return = np.sum(returns.mean()*weights)*market_days
portfolio_covariance = np.sqrt(np.dot(np.transpose(weights), np.dot(returns.cov()*market_days, weights)))
return (portfolio_return, portfolio_covariance)
##
#
# MARK: - PORTFOLIO ANALYSIS
#
##
# sharpe_optimization:([Quote], float, float)
# NOTE: - Optimizes according to the sharp ratio with the Markowitz Model.
# Returns A tuple with list of quotes with quantities that would produce the optimal portfolio for the given symbols, optimized return, and optimized covariance.
def sharpe_optimization(self):
quote_count = len(self.__quotes)
market_data = self.get_market_data_tuple()
returns = market_data[3]
market_days = len(returns)
portfolio_return = market_data[1]
portfolio_covariance = market_data[2]
weights = [ quote.weight for quote in self.__quotes ]
def min_sharpe_function(weights, returns):
cur_stats = self.get_portfolio_statistics(weights, returns)
return -cur_stats[0]/cur_stats[1]
# Optimization
constraints = ({ 'type': 'eq', 'fun': lambda x: np.sum(x) - 1 })
bounds = tuple((0, 1) for x in range(quote_count))
optimized_weights = optimize.minimize(fun=min_sharpe_function, x0=weights, args=returns, method='SLSQP', bounds=bounds, constraints=constraints)['x'].round(3)
optimized_quotes = []
for i, weight in enumerate(optimized_weights):
optimized_quotes.append(Quote(self.__quotes[i], weight*100, weight))
optimized_stats = self.get_portfolio_statistics(optimized_weights, returns)
optimized_return = optimized_stats[0]
optimized_covariance = optimized_stats[1]
return (
optimized_quotes,
optimized_return,
optimized_covariance
)
##
#
# MARK: - PLOTTING
#
##
# plot_historicals:Void(static)
# param historicals:String => Raw dictionary returned from get_history(...) method in __query.
# param is_candlestick_chart:Boolean => If true, plots a candlestick plot. Else, plots a line plot.
# param legend_on:Boolean => If true, shows the legend. Else, hides the legend.
def plot_historicals(self, is_candlestick_chart = True, legend_on = True):
# Set Pandas properties
pd.options.display.max_columns = 3000
pd.options.display.max_rows = 3000
historicals_list = self.get_history_tuples()
colors = [Utility.get_random_hex() for historicals in historicals_list]
fig, ax = plt.subplots(figsize=(8, 5))
fig.subplots_adjust(bottom=0.2)
legend = []
# Plot closes
for i, historicals in enumerate(historicals_list):
if is_candlestick_chart:
mpf.candlestick_ochl(ax, historicals, width=0.1, colorup=colors[i], colordown=colors[i])
else:
closes = list(map(lambda quote: quote[2], historicals))
dates = list(map(lambda quote: quote[0], historicals))
ax.plot(dates, closes, colors[i])
legend.append(mpatches.Patch(color=colors[i], label=self.__quotes[i].symbol))
# Set legend
if legend_on:
plt.legend(handles=legend)
for label in ax.xaxis.get_ticklabels():
ax.xaxis.set_major_formatter(mpl.dates.DateFormatter('%Y-%m-%d'))
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(10))
ax.grid(True)
plt.xlabel('Date')
plt.ylabel('Price')
plt.title(self.__name)
plt.subplots_adjust(left=0.09, bottom=0.20, right=0.94, top=0.90, wspace=0.2, hspace=0)
label.set_rotation(45)
plt.show()
|
{"hexsha": "7da6f80d5902c184da615060b8a1a409555eee32", "size": 15611, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models/portfolio.py", "max_stars_repo_name": "derricksmith/Quantico", "max_stars_repo_head_hexsha": "e8ff3da3813e0373b67ec489c8b78d70b5565034", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/models/portfolio.py", "max_issues_repo_name": "derricksmith/Quantico", "max_issues_repo_head_hexsha": "e8ff3da3813e0373b67ec489c8b78d70b5565034", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/models/portfolio.py", "max_forks_repo_name": "derricksmith/Quantico", "max_forks_repo_head_hexsha": "e8ff3da3813e0373b67ec489c8b78d70b5565034", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3562653563, "max_line_length": 225, "alphanum_fraction": 0.6432003075, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3555}
|
#!/usr/bin/env python
try:
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
except Exception as e:
print(e)
print("Not possible to set gpu allow growth")
import pandas as pd
def getPatterns(path, cv, sort):
def norm1( data ):
norms = np.abs( data.sum(axis=1) )
norms[norms==0] = 1
return data/norms[:,None]
from Gaugi import load
import numpy as np
d = load(path)
# new df
data_df = pd.DataFrame(data=d['data'], columns=d['features'])
# norm considering all rings
#all_rings = ['L2Calo_ring_%i' %iring for iring in range(100)]
#data_df.loc[:, all_rings] = norm1(data_df[all_rings].values)
# for new training, we selected 1/2 of rings in each layer
#pre-sample - 8 rings
# EM1 - 64 rings
# EM2 - 8 rings
# EM3 - 8 rings
# Had1 - 4 rings
# Had2 - 4 rings
# Had3 - 4 rings
prefix = 'L2Calo_ring_%i'
# rings presmaple
presample = [prefix %iring for iring in range(8//2)]
# EM1 list
sum_rings = 8
em1 = [prefix %iring for iring in range(sum_rings, sum_rings+(64//2))]
# EM2 list
sum_rings = 8+64
em2 = [prefix %iring for iring in range(sum_rings, sum_rings+(8//2))]
# EM3 list
sum_rings = 8+64+8
em3 = [prefix %iring for iring in range(sum_rings, sum_rings+(8//2))]
# HAD1 list
sum_rings = 8+64+8+8
had1 = [prefix %iring for iring in range(sum_rings, sum_rings+(4//2))]
# HAD2 list
sum_rings = 8+64+8+8+4
had2 = [prefix %iring for iring in range(sum_rings, sum_rings+(4//2))]
# HAD3 list
sum_rings = 8+64+8+8+4+4
had3 = [prefix %iring for iring in range(sum_rings, sum_rings+(4//2))]
selection_list = presample+em1+em2+em3+had1+had2+had3
#data = norm1(d['data'][:,1:101])
# normalization considering only in half of rings
data = norm1(data_df[selection_list].values)
# normalization considering all rings
#data = data_df[selected_rings].values
target = d['target']
target[target!=1]=-1
splits = [(train_index, val_index) for train_index, val_index in cv.split(data,target)]
x_train = data [ splits[sort][0]]
y_train = target [ splits[sort][0] ]
x_val = data [ splits[sort][1]]
y_val = target [ splits[sort][1] ]
return x_train, x_val, y_train, y_val, splits, []
def getPileup( path ):
from Gaugi import load
return load(path)['data'][:,0]
def getJobConfigId( path ):
from Gaugi import load
return dict(load(path))['id']
import argparse
import sys,os
parser = argparse.ArgumentParser(description = '', add_help = False)
parser = argparse.ArgumentParser()
parser.add_argument('-c','--configFile', action='store',
dest='configFile', required = True,
help = "The job config file that will be used to configure the job (sort and init).")
parser.add_argument('-v','--volume', action='store',
dest='volume', required = False, default = None,
help = "The volume output.")
parser.add_argument('-d','--dataFile', action='store',
dest='dataFile', required = True, default = None,
help = "The data/target file used to train the model.")
parser.add_argument('-r','--refFile', action='store',
dest='refFile', required = True, default = None,
help = "The reference file.")
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
try:
job_id = getJobConfigId( args.configFile )
outputFile = args.volume+'/tunedDiscr.jobID_%s'%str(job_id).zfill(4) if args.volume else 'test.jobId_%s'%str(job_id).zfill(4)
targets = [
('tight_cutbased' , 'T0HLTElectronT2CaloTight' ),
('medium_cutbased', 'T0HLTElectronT2CaloMedium' ),
('loose_cutbased' , 'T0HLTElectronT2CaloLoose' ),
('vloose_cutbased', 'T0HLTElectronT2CaloVLoose' ),
]
from saphyra.decorators import Summary, Reference
decorators = [Summary(), Reference(args.refFile, targets)]
from saphyra.callbacks import sp
from saphyra import PatternGenerator
from sklearn.model_selection import StratifiedKFold
from saphyra.applications import BinaryClassificationJob
job = BinaryClassificationJob( PatternGenerator( args.dataFile, getPatterns ),
StratifiedKFold(n_splits=10, random_state=512, shuffle=True),
job = args.configFile,
loss = 'mean_squared_error',
metrics = ['accuracy'],
callbacks = [sp(patience=25, verbose=True, save_the_best=True)],
epochs = 5000,
class_weight = False,
outputFile = outputFile )
job.decorators += decorators
# Run it!
job.run()
# necessary to work on orchestra
from saphyra import lock_as_completed_job
lock_as_completed_job(args.volume if args.volume else '.')
sys.exit(0)
except Exception as e:
print(e)
# necessary to work on orchestra
from saphyra import lock_as_failed_job
lock_as_failed_job(args.volume if args.volume else '.')
sys.exit(1)
|
{"hexsha": "d96362a477fbb0703ce5e057bfd123d215ef4345", "size": 5391, "ext": "py", "lang": "Python", "max_stars_repo_path": "tunings/run_tuning.py", "max_stars_repo_name": "natmourajr/CERN-ATLAS-Qualify", "max_stars_repo_head_hexsha": "9b40106df97df5f75ba09a7acbbc763a9fdbb8b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tunings/run_tuning.py", "max_issues_repo_name": "natmourajr/CERN-ATLAS-Qualify", "max_issues_repo_head_hexsha": "9b40106df97df5f75ba09a7acbbc763a9fdbb8b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tunings/run_tuning.py", "max_forks_repo_name": "natmourajr/CERN-ATLAS-Qualify", "max_forks_repo_head_hexsha": "9b40106df97df5f75ba09a7acbbc763a9fdbb8b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-17T15:29:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T15:29:27.000Z", "avg_line_length": 28.078125, "max_line_length": 127, "alphanum_fraction": 0.6277128548, "include": true, "reason": "import numpy", "num_tokens": 1488}
|
{-# OPTIONS --safe #-}
module Cubical.Algebra.CommAlgebra.FreeCommAlgebra.Properties where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.HLevels
open import Cubical.Foundations.Structure
open import Cubical.Foundations.Function hiding (const)
open import Cubical.Foundations.Isomorphism
open import Cubical.Data.Sigma.Properties using (Σ≡Prop)
open import Cubical.HITs.SetTruncation
open import Cubical.Algebra.CommRing
open import Cubical.Algebra.CommAlgebra.FreeCommAlgebra.Base
open import Cubical.Algebra.Ring using ()
open import Cubical.Algebra.CommAlgebra
open import Cubical.Algebra.CommAlgebra.Instances.Initial
open import Cubical.Algebra.Algebra
open import Cubical.Data.Empty
open import Cubical.Data.Sigma
private
variable
ℓ ℓ' ℓ'' : Level
module Theory {R : CommRing ℓ} {I : Type ℓ'} where
open CommRingStr (snd R)
using (0r; 1r)
renaming (_·_ to _·r_; _+_ to _+r_; ·Comm to ·r-comm; ·Rid to ·r-rid)
module _ (A : CommAlgebra R ℓ'') (φ : I → ⟨ A ⟩) where
open CommAlgebraStr (A .snd)
open AlgebraTheory (CommRing→Ring R) (CommAlgebra→Algebra A)
open Construction using (var; const) renaming (_+_ to _+c_; -_ to -c_; _·_ to _·c_)
imageOf0Works : 0r ⋆ 1a ≡ 0a
imageOf0Works = 0-actsNullifying 1a
imageOf1Works : 1r ⋆ 1a ≡ 1a
imageOf1Works = ⋆-lid 1a
inducedMap : ⟨ R [ I ] ⟩ → ⟨ A ⟩
inducedMap (var x) = φ x
inducedMap (const r) = r ⋆ 1a
inducedMap (P +c Q) = (inducedMap P) + (inducedMap Q)
inducedMap (-c P) = - inducedMap P
inducedMap (Construction.+-assoc P Q S i) = +-assoc (inducedMap P) (inducedMap Q) (inducedMap S) i
inducedMap (Construction.+-rid P i) =
let
eq : (inducedMap P) + (inducedMap (const 0r)) ≡ (inducedMap P)
eq = (inducedMap P) + (inducedMap (const 0r)) ≡⟨ refl ⟩
(inducedMap P) + (0r ⋆ 1a) ≡⟨ cong
(λ u → (inducedMap P) + u)
(imageOf0Works) ⟩
(inducedMap P) + 0a ≡⟨ +-rid _ ⟩
(inducedMap P) ∎
in eq i
inducedMap (Construction.+-rinv P i) =
let eq : (inducedMap P - inducedMap P) ≡ (inducedMap (const 0r))
eq = (inducedMap P - inducedMap P) ≡⟨ +-rinv _ ⟩
0a ≡⟨ sym imageOf0Works ⟩
(inducedMap (const 0r))∎
in eq i
inducedMap (Construction.+-comm P Q i) = +-comm (inducedMap P) (inducedMap Q) i
inducedMap (P ·c Q) = inducedMap P · inducedMap Q
inducedMap (Construction.·-assoc P Q S i) = ·Assoc (inducedMap P) (inducedMap Q) (inducedMap S) i
inducedMap (Construction.·-lid P i) =
let eq = inducedMap (const 1r) · inducedMap P ≡⟨ cong (λ u → u · inducedMap P) imageOf1Works ⟩
1a · inducedMap P ≡⟨ ·Lid (inducedMap P) ⟩
inducedMap P ∎
in eq i
inducedMap (Construction.·-comm P Q i) = ·-comm (inducedMap P) (inducedMap Q) i
inducedMap (Construction.ldist P Q S i) = ·Ldist+ (inducedMap P) (inducedMap Q) (inducedMap S) i
inducedMap (Construction.+HomConst s t i) = ⋆-ldist s t 1a i
inducedMap (Construction.·HomConst s t i) =
let eq = (s ·r t) ⋆ 1a ≡⟨ cong (λ u → u ⋆ 1a) (·r-comm _ _) ⟩
(t ·r s) ⋆ 1a ≡⟨ ⋆-assoc t s 1a ⟩
t ⋆ (s ⋆ 1a) ≡⟨ cong (λ u → t ⋆ u) (sym (·Rid _)) ⟩
t ⋆ ((s ⋆ 1a) · 1a) ≡⟨ ⋆-rassoc t (s ⋆ 1a) 1a ⟩
(s ⋆ 1a) · (t ⋆ 1a) ∎
in eq i
inducedMap (Construction.0-trunc P Q p q i j) =
isSetAlgebra (CommAlgebra→Algebra A) (inducedMap P) (inducedMap Q) (cong _ p) (cong _ q) i j
module _ where
open IsAlgebraHom
inducedHom : AlgebraHom (CommAlgebra→Algebra (R [ I ])) (CommAlgebra→Algebra A)
inducedHom .fst = inducedMap
inducedHom .snd .pres0 = 0-actsNullifying _
inducedHom .snd .pres1 = imageOf1Works
inducedHom .snd .pres+ x y = refl
inducedHom .snd .pres· x y = refl
inducedHom .snd .pres- x = refl
inducedHom .snd .pres⋆ r x =
(r ⋆ 1a) · inducedMap x ≡⟨ ⋆-lassoc r 1a (inducedMap x) ⟩
r ⋆ (1a · inducedMap x) ≡⟨ cong (λ u → r ⋆ u) (·Lid (inducedMap x)) ⟩
r ⋆ inducedMap x ∎
module _ (A : CommAlgebra R ℓ'') where
open CommAlgebraStr (A .snd)
open AlgebraTheory (CommRing→Ring R) (CommAlgebra→Algebra A)
open Construction using (var; const) renaming (_+_ to _+c_; -_ to -c_; _·_ to _·c_)
Hom = CommAlgebraHom (R [ I ]) A
open IsAlgebraHom
evaluateAt : Hom → I → ⟨ A ⟩
evaluateAt φ x = φ .fst (var x)
mapRetrievable : ∀ (φ : I → ⟨ A ⟩)
→ evaluateAt (inducedHom A φ) ≡ φ
mapRetrievable φ = refl
proveEq : ∀ {X : Type ℓ''} (isSetX : isSet X) (f g : ⟨ R [ I ] ⟩ → X)
→ (var-eq : (x : I) → f (var x) ≡ g (var x))
→ (const-eq : (r : ⟨ R ⟩) → f (const r) ≡ g (const r))
→ (+-eq : (x y : ⟨ R [ I ] ⟩) → (eq-x : f x ≡ g x) → (eq-y : f y ≡ g y)
→ f (x +c y) ≡ g (x +c y))
→ (·-eq : (x y : ⟨ R [ I ] ⟩) → (eq-x : f x ≡ g x) → (eq-y : f y ≡ g y)
→ f (x ·c y) ≡ g (x ·c y))
→ (-eq : (x : ⟨ R [ I ] ⟩) → (eq-x : f x ≡ g x)
→ f (-c x) ≡ g (-c x))
→ f ≡ g
proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i (var x) = var-eq x i
proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i (const x) = const-eq x i
proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i (x +c y) =
+-eq x y
(λ i → proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i x)
(λ i → proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i y)
i
proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i (-c x) =
-eq x ((λ i → proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i x)) i
proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i (x ·c y) =
·-eq x y
(λ i → proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i x)
(λ i → proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i y)
i
proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i (Construction.+-assoc x y z j) =
let
rec : (x : ⟨ R [ I ] ⟩) → f x ≡ g x
rec x = (λ i → proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i x)
a₀₋ : f (x +c (y +c z)) ≡ g (x +c (y +c z))
a₀₋ = +-eq _ _ (rec x) (+-eq _ _ (rec y) (rec z))
a₁₋ : f ((x +c y) +c z) ≡ g ((x +c y) +c z)
a₁₋ = +-eq _ _ (+-eq _ _ (rec x) (rec y)) (rec z)
a₋₀ : f (x +c (y +c z)) ≡ f ((x +c y) +c z)
a₋₀ = cong f (Construction.+-assoc x y z)
a₋₁ : g (x +c (y +c z)) ≡ g ((x +c y) +c z)
a₋₁ = cong g (Construction.+-assoc x y z)
in isSet→isSet' isSetX a₀₋ a₁₋ a₋₀ a₋₁ j i
proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i (Construction.+-rid x j) =
let
rec : (x : ⟨ R [ I ] ⟩) → f x ≡ g x
rec x = (λ i → proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i x)
a₀₋ : f (x +c (const 0r)) ≡ g (x +c (const 0r))
a₀₋ = +-eq _ _ (rec x) (const-eq 0r)
a₁₋ : f x ≡ g x
a₁₋ = rec x
a₋₀ : f (x +c (const 0r)) ≡ f x
a₋₀ = cong f (Construction.+-rid x)
a₋₁ : g (x +c (const 0r)) ≡ g x
a₋₁ = cong g (Construction.+-rid x)
in isSet→isSet' isSetX a₀₋ a₁₋ a₋₀ a₋₁ j i
proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i (Construction.+-rinv x j) =
let
rec : (x : ⟨ R [ I ] ⟩) → f x ≡ g x
rec x = (λ i → proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i x)
a₀₋ : f (x +c (-c x)) ≡ g (x +c (-c x))
a₀₋ = +-eq x (-c x) (rec x) (-eq x (rec x))
a₁₋ : f (const 0r) ≡ g (const 0r)
a₁₋ = const-eq 0r
a₋₀ : f (x +c (-c x)) ≡ f (const 0r)
a₋₀ = cong f (Construction.+-rinv x)
a₋₁ : g (x +c (-c x)) ≡ g (const 0r)
a₋₁ = cong g (Construction.+-rinv x)
in isSet→isSet' isSetX a₀₋ a₁₋ a₋₀ a₋₁ j i
proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i (Construction.+-comm x y j) =
let
rec : (x : ⟨ R [ I ] ⟩) → f x ≡ g x
rec x = (λ i → proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i x)
a₀₋ : f (x +c y) ≡ g (x +c y)
a₀₋ = +-eq x y (rec x) (rec y)
a₁₋ : f (y +c x) ≡ g (y +c x)
a₁₋ = +-eq y x (rec y) (rec x)
a₋₀ : f (x +c y) ≡ f (y +c x)
a₋₀ = cong f (Construction.+-comm x y)
a₋₁ : g (x +c y) ≡ g (y +c x)
a₋₁ = cong g (Construction.+-comm x y)
in isSet→isSet' isSetX a₀₋ a₁₋ a₋₀ a₋₁ j i
proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i (Construction.·-assoc x y z j) =
let
rec : (x : ⟨ R [ I ] ⟩) → f x ≡ g x
rec x = (λ i → proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i x)
a₀₋ : f (x ·c (y ·c z)) ≡ g (x ·c (y ·c z))
a₀₋ = ·-eq _ _ (rec x) (·-eq _ _ (rec y) (rec z))
a₁₋ : f ((x ·c y) ·c z) ≡ g ((x ·c y) ·c z)
a₁₋ = ·-eq _ _ (·-eq _ _ (rec x) (rec y)) (rec z)
a₋₀ : f (x ·c (y ·c z)) ≡ f ((x ·c y) ·c z)
a₋₀ = cong f (Construction.·-assoc x y z)
a₋₁ : g (x ·c (y ·c z)) ≡ g ((x ·c y) ·c z)
a₋₁ = cong g (Construction.·-assoc x y z)
in isSet→isSet' isSetX a₀₋ a₁₋ a₋₀ a₋₁ j i
proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i (Construction.·-lid x j) =
let
rec : (x : ⟨ R [ I ] ⟩) → f x ≡ g x
rec x = (λ i → proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i x)
a₀₋ : f ((const 1r) ·c x) ≡ g ((const 1r) ·c x)
a₀₋ = ·-eq _ _ (const-eq 1r) (rec x)
a₁₋ : f x ≡ g x
a₁₋ = rec x
a₋₀ : f ((const 1r) ·c x) ≡ f x
a₋₀ = cong f (Construction.·-lid x)
a₋₁ : g ((const 1r) ·c x) ≡ g x
a₋₁ = cong g (Construction.·-lid x)
in isSet→isSet' isSetX a₀₋ a₁₋ a₋₀ a₋₁ j i
proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i (Construction.·-comm x y j) =
let
rec : (x : ⟨ R [ I ] ⟩) → f x ≡ g x
rec x = (λ i → proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i x)
a₀₋ : f (x ·c y) ≡ g (x ·c y)
a₀₋ = ·-eq _ _ (rec x) (rec y)
a₁₋ : f (y ·c x) ≡ g (y ·c x)
a₁₋ = ·-eq _ _ (rec y) (rec x)
a₋₀ : f (x ·c y) ≡ f (y ·c x)
a₋₀ = cong f (Construction.·-comm x y)
a₋₁ : g (x ·c y) ≡ g (y ·c x)
a₋₁ = cong g (Construction.·-comm x y)
in isSet→isSet' isSetX a₀₋ a₁₋ a₋₀ a₋₁ j i
proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i (Construction.ldist x y z j) =
let
rec : (x : ⟨ R [ I ] ⟩) → f x ≡ g x
rec x = (λ i → proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i x)
a₀₋ : f ((x +c y) ·c z) ≡ g ((x +c y) ·c z)
a₀₋ = ·-eq (x +c y) z
(+-eq _ _ (rec x) (rec y))
(rec z)
a₁₋ : f ((x ·c z) +c (y ·c z)) ≡ g ((x ·c z) +c (y ·c z))
a₁₋ = +-eq _ _ (·-eq _ _ (rec x) (rec z)) (·-eq _ _ (rec y) (rec z))
a₋₀ : f ((x +c y) ·c z) ≡ f ((x ·c z) +c (y ·c z))
a₋₀ = cong f (Construction.ldist x y z)
a₋₁ : g ((x +c y) ·c z) ≡ g ((x ·c z) +c (y ·c z))
a₋₁ = cong g (Construction.ldist x y z)
in isSet→isSet' isSetX a₀₋ a₁₋ a₋₀ a₋₁ j i
proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i (Construction.+HomConst s t j) =
let
rec : (x : ⟨ R [ I ] ⟩) → f x ≡ g x
rec x = (λ i → proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i x)
a₀₋ : f (const (s +r t)) ≡ g (const (s +r t))
a₀₋ = const-eq (s +r t)
a₁₋ : f (const s +c const t) ≡ g (const s +c const t)
a₁₋ = +-eq _ _ (const-eq s) (const-eq t)
a₋₀ : f (const (s +r t)) ≡ f (const s +c const t)
a₋₀ = cong f (Construction.+HomConst s t)
a₋₁ : g (const (s +r t)) ≡ g (const s +c const t)
a₋₁ = cong g (Construction.+HomConst s t)
in isSet→isSet' isSetX a₀₋ a₁₋ a₋₀ a₋₁ j i
proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i (Construction.·HomConst s t j) =
let
rec : (x : ⟨ R [ I ] ⟩) → f x ≡ g x
rec x = (λ i → proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i x)
a₀₋ : f (const (s ·r t)) ≡ g (const (s ·r t))
a₀₋ = const-eq (s ·r t)
a₁₋ : f (const s ·c const t) ≡ g (const s ·c const t)
a₁₋ = ·-eq _ _ (const-eq s) (const-eq t)
a₋₀ : f (const (s ·r t)) ≡ f (const s ·c const t)
a₋₀ = cong f (Construction.·HomConst s t)
a₋₁ : g (const (s ·r t)) ≡ g (const s ·c const t)
a₋₁ = cong g (Construction.·HomConst s t)
in isSet→isSet' isSetX a₀₋ a₁₋ a₋₀ a₋₁ j i
proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i (Construction.0-trunc x y p q j k) =
let
P : (x : ⟨ R [ I ] ⟩) → f x ≡ g x
P x i = proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i x
Q : (x : ⟨ R [ I ] ⟩) → f x ≡ g x
Q x i = proveEq isSetX f g var-eq const-eq +-eq ·-eq -eq i x
in isOfHLevel→isOfHLevelDep 2
(λ z → isProp→isSet (isSetX (f z) (g z))) _ _
(cong P p)
(cong Q q)
(Construction.0-trunc x y p q) j k i
homRetrievable : ∀ (f : Hom)
→ inducedMap A (evaluateAt f) ≡ fst f
homRetrievable f =
proveEq
(isSetAlgebra (CommAlgebra→Algebra A))
(inducedMap A (evaluateAt f))
(λ x → f $a x)
(λ x → refl)
(λ r → r ⋆ 1a ≡⟨ cong (λ u → r ⋆ u) (sym f.pres1) ⟩
r ⋆ (f $a (const 1r)) ≡⟨ sym (f.pres⋆ r _) ⟩
f $a (const r ·c const 1r) ≡⟨ cong (λ u → f $a u) (sym (Construction.·HomConst r 1r)) ⟩
f $a (const (r ·r 1r)) ≡⟨ cong (λ u → f $a (const u)) (·r-rid r) ⟩
f $a (const r) ∎)
(λ x y eq-x eq-y →
ι (x +c y) ≡⟨ refl ⟩
(ι x + ι y) ≡⟨ cong (λ u → u + ι y) eq-x ⟩
((f $a x) + ι y) ≡⟨
cong (λ u → (f $a x) + u) eq-y ⟩
((f $a x) + (f $a y)) ≡⟨ sym (f.pres+ _ _) ⟩ (f $a (x +c y)) ∎)
(λ x y eq-x eq-y →
ι (x ·c y) ≡⟨ refl ⟩
ι x · ι y ≡⟨ cong (λ u → u · ι y) eq-x ⟩
(f $a x) · (ι y) ≡⟨ cong (λ u → (f $a x) · u) eq-y ⟩
(f $a x) · (f $a y) ≡⟨ sym (f.pres· _ _) ⟩
f $a (x ·c y) ∎)
(λ x eq-x →
ι (-c x) ≡⟨ refl ⟩
- ι x ≡⟨ cong (λ u → - u) eq-x ⟩
- (f $a x) ≡⟨ sym (f.pres- x) ⟩
f $a (-c x) ∎)
where
ι = inducedMap A (evaluateAt f)
module f = IsAlgebraHom (f .snd)
evaluateAt : {R : CommRing ℓ} {I : Type ℓ'} (A : CommAlgebra R ℓ'')
(f : CommAlgebraHom (R [ I ]) A)
→ (I → fst A)
evaluateAt A f x = f $a (Construction.var x)
inducedHom : {R : CommRing ℓ} {I : Type ℓ'} (A : CommAlgebra R ℓ'')
(φ : I → fst A )
→ CommAlgebraHom (R [ I ]) A
inducedHom A φ = Theory.inducedHom A φ
homMapIso : {R : CommRing ℓ} {I : Type ℓ} (A : CommAlgebra R ℓ')
→ Iso (CommAlgebraHom (R [ I ]) A) (I → (fst A))
Iso.fun (homMapIso A) = evaluateAt A
Iso.inv (homMapIso A) = inducedHom A
Iso.rightInv (homMapIso A) = λ ϕ → Theory.mapRetrievable A ϕ
Iso.leftInv (homMapIso {R = R} {I = I} A) =
λ f → Σ≡Prop (λ f → isPropIsCommAlgebraHom {M = R [ I ]} {N = A} f)
(Theory.homRetrievable A f)
homMapPath : {R : CommRing ℓ} {I : Type ℓ} (A : CommAlgebra R ℓ')
→ CommAlgebraHom (R [ I ]) A ≡ (I → fst A)
homMapPath A = isoToPath (homMapIso A)
module _ {R : CommRing ℓ} {A B : CommAlgebra R ℓ''} where
open AlgebraHoms
A′ = CommAlgebra→Algebra A
B′ = CommAlgebra→Algebra B
R′ = (CommRing→Ring R)
ν : AlgebraHom A′ B′ → (⟨ A ⟩ → ⟨ B ⟩)
ν φ = φ .fst
{-
Hom(R[I],A) → (I → A)
↓ ↓
Hom(R[I],B) → (I → B)
-}
naturalR : {I : Type ℓ'} (ψ : CommAlgebraHom A B)
(f : CommAlgebraHom (R [ I ]) A)
→ (fst ψ) ∘ evaluateAt A f ≡ evaluateAt B (ψ ∘a f)
naturalR ψ f = refl
{-
Hom(R[I],A) → (I → A)
↓ ↓
Hom(R[J],A) → (J → A)
-}
naturalL : {I J : Type ℓ'} (φ : J → I)
(f : CommAlgebraHom (R [ I ]) A)
→ (evaluateAt A f) ∘ φ
≡ evaluateAt A (f ∘a (inducedHom (R [ I ]) (λ x → Construction.var (φ x))))
naturalL φ f = refl
module _ {R : CommRing ℓ} where
{-
Prove that the FreeCommAlgebra over R on zero generators is
isomorphic to the initial R-Algebra - R itsself.
-}
freeOn⊥ : CommAlgebraEquiv (R [ ⊥ ]) (initialCAlg R)
freeOn⊥ =
equivByInitiality
R (R [ ⊥ ])
{- Show that R[⊥] has the universal property of the
initial R-Algbera and conclude that those are isomorphic -}
λ B → let to : CommAlgebraHom (R [ ⊥ ]) B → (⊥ → fst B)
to = evaluateAt B
from : (⊥ → fst B) → CommAlgebraHom (R [ ⊥ ]) B
from = inducedHom B
from-to : (x : _) → from (to x) ≡ x
from-to x =
Σ≡Prop (λ f → isPropIsCommAlgebraHom {M = R [ ⊥ ]} {N = B} f)
(Theory.homRetrievable B x)
equiv : CommAlgebraHom (R [ ⊥ ]) B ≃ (⊥ → fst B)
equiv =
isoToEquiv
(iso to from (λ x → isContr→isOfHLevel 1 isContr⊥→A _ _) from-to)
in isOfHLevelRespectEquiv 0 (invEquiv equiv) isContr⊥→A
|
{"hexsha": "e02ddc0e7f46d17ba799a046defa42d5c6f48558", "size": 17635, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "Cubical/Algebra/CommAlgebra/FreeCommAlgebra/Properties.agda", "max_stars_repo_name": "howsiyu/cubical", "max_stars_repo_head_hexsha": "1b9c97a2140fe96fe636f4c66beedfd7b8096e8f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Cubical/Algebra/CommAlgebra/FreeCommAlgebra/Properties.agda", "max_issues_repo_name": "howsiyu/cubical", "max_issues_repo_head_hexsha": "1b9c97a2140fe96fe636f4c66beedfd7b8096e8f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Cubical/Algebra/CommAlgebra/FreeCommAlgebra/Properties.agda", "max_forks_repo_name": "howsiyu/cubical", "max_forks_repo_head_hexsha": "1b9c97a2140fe96fe636f4c66beedfd7b8096e8f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.4939759036, "max_line_length": 102, "alphanum_fraction": 0.4792174653, "num_tokens": 7165}
|
source("utils/rtools.r");
list.packages = c("stats", "utils", "Rcpp", "stringr", "jsonlite")
install_missing(list.packages)
sourceCpp('utils/parseParams.cpp')
params <- list(
wantedCol="x_OfSpectra",
pthreshold=0.05
);
params$twoStats <- list( # stats comparing 2 test groups
"wilcoxon"=function(x,y) tryCatch(wilcox.test(x,y)$p.value, error=function(cond) return(NaN))
)
params$multiStats <- list( # stats comparing value to test group
"anova"=function(x,y) null_na(summary(aov(x~y))[[1]][1,5])
)
params <- mergeList(parseParams('statTests.r'), params);
if (!('name' %in% names(params))) {
params$name <- readInput("dataset name:");
}
homedir <- fileExists(file.path("Results",params$name), paste("Dataset", params$name, "cannot be found. Please run combinedHM to generate HeatMap data before this program is run."));
setwd(homedir);
dir.create("StatTests", showWarnings = FALSE)
dir.create("StatsHeatMap", showWarnings = FALSE)
fids <- read.csv("fileIDs.csv");
dataset.groupids <- unique(fids$Test_Group) #unique test groups
hms <- list.files(path="HeatMap/Files"); #All heatmaps generated
parsedHM <- str_match(hms, "(.+)\\.csv")[,2]
if (length(dataset.groupids) > 50){
message("Number of test groups exceeds 50. 2d statistics tests will be calculated unless a list of pairs is provided.")
stopQuietly();
}
# json list
jsonData <- list()
# all pairs of files
cnames <- combn(dataset.groupids, 2)
significanceJSON = list()
for(hmid in 1:length(hms)){
hm <- hms[hmid]
hmname <- unlist(strsplit(hm, ".", fixed=TRUE))[1];
f <- read.csv(file.path("HeatMap", "Files", hm));
groups <- list();
significance <- read.csv(file.path("Significance", hmname, "raw.csv"));
significance$statTests = vector(mode="character", length=NROW(significance));
for(x in 1:length(dataset.groupids)){
fnames <- fids$ID[fids$Test_Group==dataset.groupids[x]] # get file ids in this group
colnames <- paste(params$wantedCol, fnames, sep="_");
selected <- f[,colnames];
groups[[x]] <- as.data.frame(selected);
}
colnames <- paste(params$wantedCol, 1:length(fids$ID), sep="_")
ungrouped <- f[,colnames];
colnames(ungrouped) <- fids$Test_Group;
statTables <- list()
for (stat in names(params$twoStats)) {
statTables[[stat]] <- data.frame(matrix(NA, nrow = NROW(f), ncol = NCOL(cnames)));
for(col in 1:NCOL(cnames)) {
pair <- cnames[,col];
colnames(statTables[[stat]])[[col]] <- paste(pair, collapse="_");
for(row in 1:NROW(f)) {
p <- lapply(pair, function(i) as.numeric(groups[[i]][row,]))
statTables[[stat]][row,col] <- params$twoStats[[stat]](p[[1]], p[[2]]);
if (f$Row_Type[row] == 1 && !is.nan(statTables[[stat]][row,col]) && !is.na(statTables[[stat]][row,col]) && statTables[[stat]][row,col] < params$pthreshold) {
significance[significance$Rank_Number==f$Rank_Number[[row]],"statTests"] = paste(significance[significance$Rank_Number==f$Rank_Number[[row]],"statTests"],
"P value of ", formatSig(statTables[[stat]][row,col], 4), " for 2D test ", stat, " between groups ", pair[1], "&", pair[2], "\n", sep="");
}
}
}
statTables[[stat]] <- cbind(f[c('Rank_Number','Protein_Name','Gene_Name')], statTables[[stat]], f['Row_Type'])
write.csv(statTables[[stat]], file=file.path("StatTests", paste(hmname, '_', stat, '.csv', sep='')), row.names=FALSE)
}
multiName <- "MultiDim"
statTables[[multiName]] <- data.frame(matrix(NA, nrow = NROW(f), ncol = length(params$multiStats)));
for (col in 1:length(params$multiStats)) {
stat <- names(params$multiStats)[[col]];
colnames(statTables[[multiName]])[[col]] <- stat;
for(row in 1:NROW(f)) {
statTables[[multiName]][row,col] <- params$multiStats[[stat]](as.numeric(colnames(ungrouped)), as.numeric(ungrouped[row,]));
if (f$Row_Type[row] == 1 && !is.nan(statTables[[multiName]][row,col]) && !is.na(statTables[[multiName]][row,col]) && statTables[[multiName]][row,col] < params$pthreshold) {
significance[significance$Rank_Number==f$Rank_Number[[row]],"statTests"] = paste(significance[significance$Rank_Number==f$Rank_Number[[row]],"statTests"],
"P value of ", formatSig(statTables[[multiName]][row,col], 4), " for MultiDim test ", stat, "\n", sep="");
}
}
}
write.csv(cbind(f[,names(mtcars)!="Row_Type"], statTables[[multiName]], f['Row_Type']), file=file.path("StatsHeatMap", paste(hmname, '.csv', sep='')), row.names=FALSE)
statTables[[multiName]] <- cbind(f[c('Rank_Number','Protein_Name','Gene_Name')], statTables[[multiName]], f['Row_Type'])
write.csv(statTables[[multiName]], file=file.path("StatTests", paste(hmname, '_', multiName, '.csv', sep='')), row.names=FALSE)
jsonData$StatTests[[hmid]] <- list(name=hmname, data=statTables);
# print(statTables);
write.csv(significance, file=file.path("Significance", hmname, "raw.csv"), row.names=FALSE);
significanceJSON[[hmid]] = list(name=hmname, data=significance);
}
write(toJSON(list(Significance=significanceJSON), auto_unbox=TRUE), file=file.path("Raws", "significance.json"));
write(toJSON(jsonData, auto_unbox=TRUE), file=file.path("Raws", "statTests.json"));
|
{"hexsha": "db322fc66b957b37b5ce6302bd207c1e51b38595", "size": 5076, "ext": "r", "lang": "R", "max_stars_repo_path": "statTests.r", "max_stars_repo_name": "UnsignedByte/MassSpec-Data-Visualizer", "max_stars_repo_head_hexsha": "c75d242768d99aa61d87e2bc01462389c83a7028", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-17T21:55:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-17T21:55:57.000Z", "max_issues_repo_path": "statTests.r", "max_issues_repo_name": "UnsignedByte/MassSpec-Data-Visualizer", "max_issues_repo_head_hexsha": "c75d242768d99aa61d87e2bc01462389c83a7028", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 29, "max_issues_repo_issues_event_min_datetime": "2020-06-11T00:18:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-13T05:56:25.000Z", "max_forks_repo_path": "statTests.r", "max_forks_repo_name": "UnsignedByte/MassSpec-Data-Visualizer", "max_forks_repo_head_hexsha": "c75d242768d99aa61d87e2bc01462389c83a7028", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.0169491525, "max_line_length": 182, "alphanum_fraction": 0.6784869976, "num_tokens": 1483}
|
import re
import json
from typing import Dict
import numpy as np
import sklearn
from gensim.utils import tokenize
from gensim.models import KeyedVectors
from sklearn.cluster import AgglomerativeClustering
from models.models_tools import filter_data
class BaselineWord2Vec:
def __init__(self, filepath: str, path_to_embeddings: str):
self.path_to_embeddings = path_to_embeddings
self.filepath = filepath
self.data = []
self.transformed_data: list = []
self.transformed_data_ids: list = []
self.vectorizer = None
self.embedding = []
def load_json(self) -> None:
"""
This method loads data from the filepath attribute.
:return: None
"""
with open(self.filepath, encoding="utf8") as f:
data = json.load(f)["datasets"]
for dataset in data:
self.data.append(dataset)
def load_and_prepare(
self,
filepath: str = "",
tags_filters=None,
random_data: int = None,
):
"""
Load the data from the given filepath if not an empty string, else from the filepath attribute.
Builds the corpus of texts and creates the Word2Vec vectorizer.
:param random_data:
:param filepath: an optional parameter, a string indicating from where the data must be loaded
:param tags_filters: List of tags to include in the tf_idf representation default to
["dataset_name", "keywords", "description"]
:param random_data: Number of random data picked from the transformed datas
:return: None
"""
if tags_filters is None:
tags_filters = ["dataset_name", "keywords", "description"]
if filepath != "":
self.filepath = filepath
self.load_json()
filtered_data = filter_data(self.filepath, tags_filters, random_data)
self.transformed_data = [
list(tokenize(dataset, deacc=True, lowercase=True))
for dataset in list(filtered_data.values())
]
self.transformed_data_ids = list(filtered_data.keys())
self.vectorize()
def kmean_clustering(
self,
clustering_model: sklearn.cluster = AgglomerativeClustering(n_clusters=10),
) -> Dict:
"""
Compute cluster given a skelarn clustering model
:param clustering_model: A cluster model from sklearn.cluster
:return: A dict of clustered datas from the actual embedding
"""
clustering_model.fit(self.embedding)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
clustered_sentences[self.transformed_data_ids[sentence_id]] = cluster_id
return clustered_sentences
def build_corpus_from_data(self):
"""
This method organizes all the text contained in the loaded datasets info in a single list of strings, except for
the dataset description. Each string represents one unique dataset.
:return: None
"""
self.words = []
for dataset in self.data:
dataset_as_string = ""
dataset_as_string += " ".join(dataset["metadata"]["keywords"])
dataset_as_string += dataset["author"]
dataset_as_string += dataset["licence"]
dataset_as_string += dataset["geographic_hold"]
tokens = re.split(r"(?u)\b[a-zA-Z_][a-zA-Z0-9_]+\b", dataset_as_string)
self.words.append(tokens)
def vectorize(self):
"""
This method computes the vector forms of each token found in the datasets info.
:return: None
"""
self.vectorizer = KeyedVectors.load_word2vec_format(self.path_to_embeddings)
self.embedding = [
[
np.mean(
[
self.vectorizer[word]
if word in self.vectorizer
else self.vectorizer["unk"]
for word in dataset
]
)
]
for dataset in self.transformed_data
]
def get_k_nearest(self, dataset_index: int, k: int = 5, print_result: bool = True):
"""
This method computes and returns the names of the k-nearest neighbors of the provided dataset with respect to
the cosine similarity.
:param dataset_index: an integer, the index of the dataset from which to compute the similarities
:param k: an integer, the number of "near" datasets to return
:param print_result: a boolean, indicates whether to print out the result or not
:return: an array containing the names of the k nearest datasets from the given dataset
"""
similarities = []
target_dataset = self.embedding[dataset_index]
a = np.linalg.norm(target_dataset)
for index, dataset in enumerate(self.embedding):
if index != dataset_index:
b = np.linalg.norm(dataset)
similarities.append(
np.linalg.norm(np.array(target_dataset) - np.array(dataset))
/ (a * b)
)
neighbours_indices = np.argsort(similarities)[-k:]
if print_result:
print(
np.array([dataset["dataset_name"] for dataset in self.data])[
neighbours_indices
]
)
return np.array([dataset["dataset_name"] for dataset in self.data])[
neighbours_indices
]
|
{"hexsha": "6bf2ae6ebbca0ba689f56a3c9a810893e9f2fae1", "size": 5648, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/baseline/baseline_Word2Vec.py", "max_stars_repo_name": "datactivist/dataoutai", "max_stars_repo_head_hexsha": "a1db230a31e78d17cd1c79daa8c13a508d493f9f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-19T19:06:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-19T19:06:18.000Z", "max_issues_repo_path": "models/baseline/baseline_Word2Vec.py", "max_issues_repo_name": "datactivist/dataoutai", "max_issues_repo_head_hexsha": "a1db230a31e78d17cd1c79daa8c13a508d493f9f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-12-01T19:44:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-12T15:48:53.000Z", "max_forks_repo_path": "models/baseline/baseline_Word2Vec.py", "max_forks_repo_name": "datactivist/dataoutai", "max_forks_repo_head_hexsha": "a1db230a31e78d17cd1c79daa8c13a508d493f9f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8641975309, "max_line_length": 120, "alphanum_fraction": 0.6083569405, "include": true, "reason": "import numpy", "num_tokens": 1108}
|
SUBROUTINE PS_USTB ( datain, nparm, plev, outdat, iret )
C************************************************************************
C* PS_USTB *
C* *
C* This subroutine finds the most unstable level of a sounding from *
C* surface up to PLEV. The most unstable level is defined as the level *
C* which has the warmest pseudo wet-bulb potential temperature computed *
C* by lifting the air parcel to saturation then returning it moist *
C* adiabatically to 1000 mb. If plev = -1, the entire sounding is *
C* searched. *
C* *
C* PS_USTB ( DATAIN, NPARM, PLEV, OUTDAT, IRET ) *
C* *
C* Input parameters: *
C* DATAIN(NPARM,*) REAL Input sounding data *
C* NPARM INTEGER Number of parameters *
C* PLEV REAL Pressure level *
C* *
C* Output parameters: *
C* OUTDAT (*) REAL Data at the most unstable level *
C* IRET INTEGER Return code *
C* 0 = normal return *
C** *
C* Log: *
C* T. Lee/GSC 1/00 Created *
C* T. Lee/GSC 4/01 Assigned plev to a temporary variable *
C* T. Piper/SAIC 4/02 Fixed UMR; initialized datout *
C************************************************************************
INCLUDE 'GEMPRM.PRM'
PARAMETER ( NPMS = 10 )
C*
REAL datain (*), outdat (*)
C*
REAL datlev (NPMS), datout (NPMS)
LOGICAL done
C*
C------------------------------------------------------------------------
iret = 0
eps = RMISSD
ppp = plev
C
C* Find the top and surface level.
C
CALL PC_FTOP ( datain, nparm, nlev, datlev, ier )
ptop = datlev ( 1 )
CALL PC_FLVL ( 0., 1, datain, psfc, level1, level2, lvtyp, ier )
C
IF ( ppp .gt. psfc ) THEN
DO i = 1, NPMS
outdat ( i ) = RMISSD
END DO
RETURN
ELSE IF ( ( ppp .eq. -1. ) .or. ( ppp .le. ptop ) ) THEN
ppp = ptop
END IF
C
C* Loop through the sounding data.
C
done = .false.
lev = 1
DO i = 1, NPMS
datout (i) = RMISSD
END DO
DO WHILE ( .not. done )
CALL PC_GLEV ( lev, datain, nparm, datlev, ier )
CALL PC_COMP ( 5, datlev, datout, ier )
pres = datout ( 1 )
tmpc = datout ( 2 )
dwpc = datout ( 3 )
thwc = PR_THWC ( pres, tmpc, dwpc )
C
IF ( ( thwc .gt. eps ) .and. ( pres .ge. ppp ) ) THEN
eps = thwc
DO i = 1, NPMS
outdat ( i ) = datout ( i )
END DO
END IF
lev = lev + 1
IF ( pres .le. ppp ) done = .true.
END DO
C*
RETURN
END
|
{"hexsha": "75d8add6e8e96b0b67b41a40d39b9da23550ba7a", "size": 2433, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "gempak/source/prmcnvlib/ps/psustb.f", "max_stars_repo_name": "oxelson/gempak", "max_stars_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2015-06-03T15:26:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T22:36:03.000Z", "max_issues_repo_path": "gempak/source/prmcnvlib/ps/psustb.f", "max_issues_repo_name": "oxelson/gempak", "max_issues_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 60, "max_issues_repo_issues_event_min_datetime": "2015-05-11T21:36:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T16:22:42.000Z", "max_forks_repo_path": "gempak/source/prmcnvlib/ps/psustb.f", "max_forks_repo_name": "oxelson/gempak", "max_forks_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2016-06-06T21:55:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T18:23:28.000Z", "avg_line_length": 28.6235294118, "max_line_length": 73, "alphanum_fraction": 0.5281545417, "num_tokens": 874}
|
import logging
from functools import lru_cache
from typing import Optional, Tuple, Any
import numpy as np
from opensfm import features as ft
from opensfm.dataset import DataSetBase
logger = logging.getLogger(__name__)
class FeatureLoader(object):
def clear_cache(self):
self.load_mask.cache_clear()
self.load_points_colors_segmentations_instances.cache_clear()
self._load_all_data_unmasked.cache_clear()
self._load_all_data_masked.cache_clear()
self.load_features_index.cache_clear()
self.load_words.cache_clear
@lru_cache(1000)
def load_mask(self, data: DataSetBase, image: str) -> Optional[np.ndarray]:
all_features_data = self._load_all_data_unmasked(data, image)
if not all_features_data:
return None
if (
data.config["features_bake_segmentation"]
and all_features_data.semantic is not None
):
# pyre-fixme [16]: `Optional` has no attribute `segmentation`
segmentations = all_features_data.semantic.segmentation
ignore_values = set(data.segmentation_ignore_values(image))
return np.array(
[
False if segmentations[i] in ignore_values else True
for i in range(len(segmentations))
],
dtype=bool
)
else:
return data.load_features_mask(image, all_features_data.points[:, :2])
@lru_cache(1000)
def load_points_colors_segmentations_instances(
self, data: DataSetBase, image: str
) -> Optional[ft.FeaturesData]:
all_features_data = self._load_features_nocache(data, image)
if not all_features_data:
return None
return ft.FeaturesData(
all_features_data.points,
None,
all_features_data.colors,
all_features_data.semantic,
)
def load_all_data(
self, data: DataSetBase, image: str, masked: bool
) -> Optional[ft.FeaturesData]:
if masked:
return self._load_all_data_masked(data, image)
else:
return self._load_all_data_unmasked(data, image)
@lru_cache(20)
def _load_all_data_unmasked(
self, data: DataSetBase, image: str
) -> Optional[ft.FeaturesData]:
return self._load_features_nocache(data, image)
@lru_cache(200)
def _load_all_data_masked(
self, data: DataSetBase, image: str
) -> Optional[ft.FeaturesData]:
features_data = self._load_all_data_unmasked(data, image)
if not features_data:
return features_data
mask = self.load_mask(data, image)
if mask is not None:
return features_data.mask(mask)
return features_data
@lru_cache(200)
def load_features_index(
self, data: DataSetBase, image: str, masked: bool
) -> Optional[Tuple[ft.FeaturesData, Any]]:
features_data = self.load_all_data(data, image, masked)
if not features_data:
return None
return features_data, ft.build_flann_index(
# pyre-fixme [6]: Expected `np.ndarray`
features_data.descriptors,
data.config,
)
@lru_cache(200)
def load_words(self, data: DataSetBase, image: str, masked: bool) -> np.ndarray:
words = data.load_words(image)
if masked:
mask = self.load_mask(data, image)
if mask is not None:
words = words[mask]
return words
def _load_features_nocache(
self, data: DataSetBase, image: str
) -> Optional[ft.FeaturesData]:
features_data = data.load_features(image)
if features_data is None:
logger.error("Could not load features for image {}".format(image))
return None
else:
features_data.points = np.array(features_data.points[:, :3], dtype=float)
return features_data
|
{"hexsha": "4993359dc58e0ac1040addc00bc6515325e654c3", "size": 3983, "ext": "py", "lang": "Python", "max_stars_repo_path": "opensfm/feature_loading.py", "max_stars_repo_name": "whuaegeanse/OpenSfM", "max_stars_repo_head_hexsha": "1c261fbd1330f9a4483597ceb5cb1098fcdbd97f", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-12-07T10:54:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T19:29:41.000Z", "max_issues_repo_path": "opensfm/feature_loading.py", "max_issues_repo_name": "whuaegeanse/OpenSfM", "max_issues_repo_head_hexsha": "1c261fbd1330f9a4483597ceb5cb1098fcdbd97f", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2022-01-24T16:40:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-10T01:17:58.000Z", "max_forks_repo_path": "extractor/mapping/OpenSfM/opensfm/feature_loading.py", "max_forks_repo_name": "LukasBommes/PV-Hawk", "max_forks_repo_head_hexsha": "af07a5e5690326837d1e9b26bdbb32f5582e89fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-25T14:17:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-25T14:17:22.000Z", "avg_line_length": 34.3362068966, "max_line_length": 85, "alphanum_fraction": 0.6321867939, "include": true, "reason": "import numpy", "num_tokens": 855}
|
#!/bin/env python
import numpy as np
# controls printing array corners
# np.set_printoptions(threshold='nan')
zero = np.zeros(10)
one = np.ones(20)
print zero
print one
# read file into a numpy array
data = np.loadtxt('../data/strlist10k.txt', dtype='string')
print data
|
{"hexsha": "dc825129c647abc8738e151719b952e42eb507e6", "size": 277, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/python/arrayops.py", "max_stars_repo_name": "ketancmaheshwari/hello-goog", "max_stars_repo_head_hexsha": "fc479ea0017edfc96f3b109eff336fb9954e1f3e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/python/arrayops.py", "max_issues_repo_name": "ketancmaheshwari/hello-goog", "max_issues_repo_head_hexsha": "fc479ea0017edfc96f3b109eff336fb9954e1f3e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/python/arrayops.py", "max_forks_repo_name": "ketancmaheshwari/hello-goog", "max_forks_repo_head_hexsha": "fc479ea0017edfc96f3b109eff336fb9954e1f3e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.3888888889, "max_line_length": 59, "alphanum_fraction": 0.7184115523, "include": true, "reason": "import numpy", "num_tokens": 74}
|
"""
===============================================
Creating a timeline with lines, dates, and text
===============================================
How to create a simple timeline using Matplotlib release dates.
Timelines can be created with a collection of dates and text. In this example,
we show how to create a simple timeline using the dates for recent releases
of Matplotlib. First, we'll pull the data from GitHub.
"""
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
from datetime import datetime
try:
# Try to fetch a list of Matplotlib releases and their dates
# from https://api.github.com/repos/matplotlib/matplotlib/releases
import urllib.request
import json
url = 'https://api.github.com/repos/matplotlib/matplotlib/releases'
url += '?per_page=100'
data = json.loads(urllib.request.urlopen(url, timeout=.4).read().decode())
dates = []
names = []
for item in data:
if 'rc' not in item['tag_name'] and 'b' not in item['tag_name']:
dates.append(item['published_at'].split("T")[0])
names.append(item['tag_name'])
# Convert date strings (e.g. 2014-10-18) to datetime
dates = [datetime.strptime(d, "%Y-%m-%d") for d in dates]
except Exception:
# In case the above fails, e.g. because of missing internet connection
# use the following lists as fallback.
names = ['v2.2.4', 'v3.0.3', 'v3.0.2', 'v3.0.1', 'v3.0.0', 'v2.2.3',
'v2.2.2', 'v2.2.1', 'v2.2.0', 'v2.1.2', 'v2.1.1', 'v2.1.0',
'v2.0.2', 'v2.0.1', 'v2.0.0', 'v1.5.3', 'v1.5.2', 'v1.5.1',
'v1.5.0', 'v1.4.3', 'v1.4.2', 'v1.4.1', 'v1.4.0']
dates = ['2019-02-26', '2019-02-26', '2018-11-10', '2018-11-10',
'2018-09-18', '2018-08-10', '2018-03-17', '2018-03-16',
'2018-03-06', '2018-01-18', '2017-12-10', '2017-10-07',
'2017-05-10', '2017-05-02', '2017-01-17', '2016-09-09',
'2016-07-03', '2016-01-10', '2015-10-29', '2015-02-16',
'2014-10-26', '2014-10-18', '2014-08-26']
# Convert date strings (e.g. 2014-10-18) to datetime
dates = [datetime.strptime(d, "%Y-%m-%d") for d in dates]
##############################################################################
# Next, we'll create a stem plot with some variation in levels as to
# distinguish even close-by events. We add markers on the baseline for visual
# emphasis on the one-dimensional nature of the time line.
#
# For each event, we add a text label via `~.Axes.annotate`, which is offset
# in units of points from the tip of the event line.
#
# Note that Matplotlib will automatically plot datetime inputs.
# Choose some nice levels
levels = np.tile([-5, 5, -3, 3, -1, 1],
int(np.ceil(len(dates)/6)))[:len(dates)]
# Create figure and plot a stem plot with the date
fig, ax = plt.subplots(figsize=(8.8, 4), constrained_layout=True)
ax.set(title="Matplotlib release dates")
ax.vlines(dates, 0, levels, color="tab:red") # The vertical stems.
ax.plot(dates, np.zeros_like(dates), "-o",
color="k", markerfacecolor="w") # Baseline and markers on it.
# annotate lines
for d, l, r in zip(dates, levels, names):
ax.annotate(r, xy=(d, l),
xytext=(-3, np.sign(l)*3), textcoords="offset points",
horizontalalignment="right",
verticalalignment="bottom" if l > 0 else "top")
# format xaxis with 4 month intervals
ax.xaxis.set_major_locator(mdates.MonthLocator(interval=4))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%b %Y"))
plt.setp(ax.get_xticklabels(), rotation=30, ha="right")
# remove y axis and spines
ax.yaxis.set_visible(False)
ax.spines[["left", "top", "right"]].set_visible(False)
ax.margins(y=0.1)
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.axes.Axes.annotate`
# - `matplotlib.axes.Axes.vlines`
# - `matplotlib.axis.Axis.set_major_locator`
# - `matplotlib.axis.Axis.set_major_formatter`
# - `matplotlib.dates.MonthLocator`
# - `matplotlib.dates.DateFormatter`
|
{"hexsha": "087e7320f6b84f1fbc1c695596b5e045a3c012bf", "size": 4243, "ext": "py", "lang": "Python", "max_stars_repo_path": "matplotlib-3.4.3/matplotlib-3.4.3/examples/lines_bars_and_markers/timeline.py", "max_stars_repo_name": "JohnLauFoo/clc_packages_Yu", "max_stars_repo_head_hexsha": "259f01d9b5c02154ce258734d519ae8995cd0991", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-13T17:21:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-13T17:21:44.000Z", "max_issues_repo_path": "matplotlib-3.4.3/matplotlib-3.4.3/examples/lines_bars_and_markers/timeline.py", "max_issues_repo_name": "JohnLauFoo/clc_packages_Yu", "max_issues_repo_head_hexsha": "259f01d9b5c02154ce258734d519ae8995cd0991", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "matplotlib-3.4.3/matplotlib-3.4.3/examples/lines_bars_and_markers/timeline.py", "max_forks_repo_name": "JohnLauFoo/clc_packages_Yu", "max_forks_repo_head_hexsha": "259f01d9b5c02154ce258734d519ae8995cd0991", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.2252252252, "max_line_length": 78, "alphanum_fraction": 0.6040537356, "include": true, "reason": "import numpy", "num_tokens": 1234}
|
"""
This is the implementation of the User MAD ranking metric.
It proceeds from a user-wise computation, and average the values over the users.
"""
__version__ = '0.3.1'
__author__ = 'Vito Walter Anelli, Claudio Pomo'
__email__ = 'vitowalter.anelli@poliba.it, claudio.pomo@poliba.it'
import math
import typing as t
import numpy as np
import pandas as pd
from elliot.evaluation.metrics.base_metric import BaseMetric
class UserMADranking(BaseMetric):
r"""
User MAD Ranking-based
This class represents the implementation of the User MAD ranking recommendation metric.
For further details, please refer to the `paper <https://link.springer.com/article/10.1007/s11257-020-09285-1>`_
.. math::
\mathrm {MAD}={avg}_{i, j}({MAD}(R^{(i)}, R^{(j)}))
To compute the metric, add it to the config file adopting the following pattern:
.. code:: yaml
complex_metrics:
- metric: UserMADranking
clustering_name: Happiness
clustering_file: ../data/movielens_1m/u_happy.tsv
"""
def __init__(self, recommendations, config, params, eval_objects, additional_data):
"""
Constructor
:param recommendations: list of recommendations in the form {user: [(item1,value1),...]}
:param config: SimpleNameSpace that represents the configuration of the experiment
:param params: Parameters of the model
:param eval_objects: list of objects that may be useful for the computation of the different metrics
"""
super().__init__(recommendations, config, params, eval_objects, additional_data)
self._cutoff = self._evaluation_objects.cutoff
self._relevance = self._evaluation_objects.relevance.discounted_relevance
# self.rel_threshold = self._evaluation_objects.relevance._rel_threshold
self._user_clustering_path = self._additional_data.get("clustering_file", False)
self._user_clustering_name = self._additional_data.get("clustering_name", "")
if self._user_clustering_path:
self._user_clustering = pd.read_csv(self._additional_data["clustering_file"], sep="\t", header=None)
self._n_clusters = self._user_clustering[1].nunique()
self._user_clustering = dict(zip(self._user_clustering[0], self._user_clustering[1]))
else:
self._n_clusters = 1
self._user_clustering = {}
self._sum = np.zeros(self._n_clusters)
self._n_users = np.zeros(self._n_clusters)
def name(self):
"""
Metric Name Getter
:return: returns the public name of the metric
"""
return f"UserMADranking_{self._user_clustering_name}"
def __user_mad(self, user_recommendations, user, cutoff):
"""
Per User User MAD ranking
:param user_recommendations: list of user recommendation in the form [(item1,value1),...]
:param cutoff: numerical threshold to limit the recommendation list
:param user_relevant_items: list of user relevant items in the form [item1,...]
:return: the value of the Precision metric for the specific user
"""
return self.compute_user_ndcg(user_recommendations, user, cutoff)
# @staticmethod
# def compute_discount(k: int) -> float:
# """
# Method to compute logarithmic discount
# :param k:
# :return:
# """
# return 1 / math.log(k + 2) * math.log(2)
def compute_idcg(self, user: int, cutoff: int) -> float:
"""
Method to compute Ideal Discounted Cumulative Gain
:param gain_map:
:param cutoff:
:return:
"""
gains: t.List = sorted(list(self._relevance.get_user_rel_gains(user).values()))
n: int = min(len(gains), cutoff)
m: int = len(gains)
return sum(map(lambda g, r: gains[m - r - 1] * self._relevance.logarithmic_ranking_discount(r), gains, range(n)))
def compute_user_ndcg(self, user_recommendations: t.List, user: int, cutoff: int) -> float:
"""
Method to compute normalized Discounted Cumulative Gain
:param sorted_item_predictions:
:param gain_map:
:param cutoff:
:return:
"""
idcg: float = self.compute_idcg(user, cutoff)
dcg: float = sum(
[self._relevance.get_rel(user, x) * self._relevance.logarithmic_ranking_discount(r)
for r, x in enumerate([item for item, _ in user_recommendations]) if r < cutoff])
return dcg / idcg if dcg > 0 else 0
def eval(self):
"""
Evaluation function
:return: the overall averaged value of User MAD ranking
"""
for u, u_r in self._recommendations.items():
if len(self._relevance.get_user_rel(u)):
v = self.__user_mad(u_r, u, self._cutoff)
cluster = self._user_clustering.get(u, None)
if cluster is not None:
self._sum[cluster] += v
self._n_users[cluster] += 1
avg = [self._sum[i]/self._n_users[i] for i in range(self._n_clusters)]
differences = []
for i in range(self._n_clusters):
for j in range(i+1,self._n_clusters):
differences.append(abs(avg[i] - avg[j]))
return np.average(differences)
def get(self):
return [self]
|
{"hexsha": "584827fa8272a47f0228a439163ded9c05dc74fb", "size": 5384, "ext": "py", "lang": "Python", "max_stars_repo_path": "elliot/evaluation/metrics/fairness/MAD/UserMADranking.py", "max_stars_repo_name": "gategill/elliot", "max_stars_repo_head_hexsha": "113763ba6d595976e14ead2e3d460d9705cd882e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 175, "max_stars_repo_stars_event_min_datetime": "2021-03-04T15:46:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T05:56:58.000Z", "max_issues_repo_path": "elliot/evaluation/metrics/fairness/MAD/UserMADranking.py", "max_issues_repo_name": "gategill/elliot", "max_issues_repo_head_hexsha": "113763ba6d595976e14ead2e3d460d9705cd882e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2021-03-06T17:53:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T17:02:07.000Z", "max_forks_repo_path": "elliot/evaluation/metrics/fairness/MAD/UserMADranking.py", "max_forks_repo_name": "gategill/elliot", "max_forks_repo_head_hexsha": "113763ba6d595976e14ead2e3d460d9705cd882e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 39, "max_forks_repo_forks_event_min_datetime": "2021-03-04T15:46:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T15:37:12.000Z", "avg_line_length": 38.4571428571, "max_line_length": 121, "alphanum_fraction": 0.6417161961, "include": true, "reason": "import numpy", "num_tokens": 1279}
|
# stdlib imports
from datetime import timedelta, datetime
import tempfile
import os.path
import io
import urllib
import ftplib
import logging
import shutil
# third party imports
import pytz
import numpy as np
import requests
from openquake.hazardlib.geo.geodetic import geodetic_distance
from obspy.core.utcdatetime import UTCDateTime
import pandas as pd
# local imports
from gmprocess.io.fetcher import _get_first_value
from gmprocess.io.geonet.core import read_geonet
from gmprocess.core.streamcollection import StreamCollection
from gmprocess.utils.config import get_config
CATBASE = "https://quakesearch.geonet.org.nz/csv?bbox=163.95996,-49.18170,182.63672,-32.28713&startdate=%s&enddate=%s"
GEOBASE = "ftp://ftp.geonet.org.nz/strong/processed/[YEAR]/[MONTH]/"
TIMEFMT = "%Y-%m-%dT%H:%M:%S"
NZTIMEDELTA = 2 # number of seconds allowed between GeoNet catalog time and
# event timestamp on FTP site
NZCATWINDOW = 5 * 60 # number of seconds to search around in GeoNet EQ catalog
KM2DEG = 1 / 111.0
# default values for this fetcher
# if None specified in constructor, AND no parameters specified in
# config, then use these.
RADIUS = 100 # kilometers
DT = 16 # seconds
DDEPTH = 30 # km
DMAG = 0.3
# NOTE - this class is currently disabled, as GNS is at the time of
# this writing on a path to shutting down their FTP service in favor
# of their FDSN service. To re-enable it, uncomment the line below
# and comment the one inheriting from object.
# class GeoNetFetcher(DataFetcher):
class GeoNetFetcher(object):
# this announces to the world the valid bounds for this fetcher.
BOUNDS = [158.555, 192.656, -51.553, -26.809]
def __init__(
self,
time,
lat,
lon,
depth,
magnitude,
user=None,
password=None,
radius=None,
dt=None,
ddepth=None,
dmag=None,
rawdir=None,
config=None,
drop_non_free=True,
stream_collection=True,
):
"""Create a GeoNetFetcher instance.
Args:
time (datetime):
Origin time.
lat (float):
Origin latitude.
lon (float):
Origin longitude.
depth (float):
Origin depth.
magnitude (float):
Origin magnitude.
user (str):
(Optional) username for site.
password (str):
(Optional) password for site.
radius (float):
Search radius (km).
dt (float):
Search time window (sec).
ddepth (float):
Search depth window (km).
dmag (float):
Search magnitude window (magnitude units).
rawdir (str):
Path to location where raw data will be stored. If not
specified, raw data will be deleted.
config (dict):
Dictionary containing configuration.
If None, retrieve global config.
drop_non_free (bool):
Option to ignore non-free-field (borehole, sensors on
structures, etc.)
stream_collection (bool):
Construct and return a StreamCollection instance?
"""
# what values do we use for search thresholds?
# In order of priority:
# 1) Not-None values passed in constructor
# 2) Configured values
# 3) DEFAULT values at top of the module
if config is None:
config = get_config()
cfg_radius = None
cfg_dt = None
cfg_ddepth = None
cfg_dmag = None
if "fetchers" in config:
if "GeoNetFetcher" in config["fetchers"]:
fetch_cfg = config["fetchers"]["GeoNetFetcher"]
if "radius" in fetch_cfg:
cfg_radius = float(fetch_cfg["radius"])
if "dt" in fetch_cfg:
cfg_dt = float(fetch_cfg["dt"])
if "ddepth" in fetch_cfg:
cfg_ddepth = float(fetch_cfg["ddepth"])
if "dmag" in fetch_cfg:
cfg_dmag = float(fetch_cfg["dmag"])
radius = _get_first_value(radius, cfg_radius, RADIUS)
dt = _get_first_value(dt, cfg_dt, DT)
ddepth = _get_first_value(ddepth, cfg_ddepth, DDEPTH)
dmag = _get_first_value(dmag, cfg_dmag, DMAG)
tz = pytz.UTC
if isinstance(time, UTCDateTime):
time = time.datetime
self.time = tz.localize(time)
self.lat = lat
self.lon = lon
self.radius = radius
self.dt = dt
self.rawdir = rawdir
self.depth = depth
self.magnitude = magnitude
self.ddepth = ddepth
self.dmag = dmag
self.drop_non_free = drop_non_free
self.stream_collection = stream_collection
def getMatchingEvents(self, solve=True):
"""Return a list of dictionaries matching input parameters.
Args:
solve (bool):
If set to True, then this method
should return a list with a maximum of one event.
Returns:
list: List of event dictionaries, with fields:
- time Event time (UTC)
- lat Event latitude
- lon Event longitude
- depth Event depth
- mag Event magnitude
"""
start_time = self.time - timedelta(seconds=3600)
end_time = self.time + timedelta(seconds=3600)
tpl = (start_time.strftime(TIMEFMT), end_time.strftime(TIMEFMT))
url = CATBASE % tpl
req = requests.get(url)
logging.debug("GeoNet search url: %s", str(url))
logging.debug("GeoNet search response code: %s", req.status_code)
data = req.text
f = io.StringIO(data)
df = pd.read_csv(f, parse_dates=["origintime"])
f.close()
# some of the column names have spaces in them
cols = df.columns
newcols = {}
for col in cols:
newcol = col.strip()
newcols[col] = newcol
df = df.rename(columns=newcols)
lats = df["latitude"].to_numpy()
lons = df["longitude"].to_numpy()
etime = pd.Timestamp(self.time)
dtimes = np.abs(df["origintime"] - etime)
distances = geodetic_distance(self.lon, self.lat, lons, lats)
didx = distances <= self.radius
tidx = (dtimes <= np.timedelta64(int(self.dt), "s")).to_numpy()
newdf = df[didx & tidx]
events = []
for idx, row in newdf.iterrows():
eventdict = {
"time": UTCDateTime(row["origintime"]),
"lat": row["latitude"],
"lon": row["longitude"],
"depth": row["depth"],
"mag": row["magnitude"],
}
events.append(eventdict)
if solve and len(events) > 1:
event = self.solveEvents(events)
events = [event]
return events
def retrieveData(self, event_dict):
"""Retrieve data from GeoNet FTP, turn into StreamCollection.
Args:
event (dict):
Best dictionary matching input event, fields as above
in return of getMatchingEvents().
Returns:
StreamCollection: StreamCollection object.
"""
rawdir = self.rawdir
if self.rawdir is None:
rawdir = tempfile.mkdtemp()
else:
if not os.path.isdir(rawdir):
os.makedirs(rawdir)
etime = event_dict["time"]
neturl = GEOBASE.replace("[YEAR]", str(etime.year))
monthstr = etime.strftime("%m_%b")
neturl = neturl.replace("[MONTH]", monthstr)
urlparts = urllib.parse.urlparse(neturl)
ftp = ftplib.FTP(urlparts.netloc)
ftp.login() # anonymous
dirparts = urlparts.path.strip("/").split("/")
for d in dirparts:
try:
ftp.cwd(d)
except ftplib.error_perm as msg:
raise Exception(msg)
# cd to the desired output folder
os.chdir(rawdir)
datafiles = []
# we cannot depend on the time given to us by the GeoNet catalog to
# match the directory name on the FTP site, so we must do a secondary
# matching.
dirlist = ftp.nlst()
fname = _match_closest_time(etime, dirlist)
# create the event folder name from the time we got above
# fname = etime.strftime('%Y-%m-%d_%H%M%S')
try:
ftp.cwd(fname)
except ftplib.error_perm:
msg = 'Could not find an FTP data folder called "%s". Returning.' % (
urllib.parse.urljoin(neturl, fname)
)
raise Exception(msg)
dirlist = ftp.nlst()
for volume in dirlist:
if volume.startswith("Vol1"):
ftp.cwd(volume)
if "data" not in ftp.nlst():
ftp.cwd("..")
continue
ftp.cwd("data")
flist = ftp.nlst()
for ftpfile in flist:
if not ftpfile.endswith("V1A"):
continue
localfile = os.path.join(os.getcwd(), ftpfile)
if localfile in datafiles:
continue
datafiles.append(localfile)
f = open(localfile, "wb")
logging.info(f"Retrieving remote file {ftpfile}...\n")
ftp.retrbinary(f"RETR {ftpfile}", f.write)
f.close()
ftp.cwd("..")
ftp.cwd("..")
ftp.quit()
streams = []
for dfile in datafiles:
logging.info(f"Reading GeoNet file {dfile}...")
try:
tstreams = read_geonet(dfile)
streams += tstreams
except BaseException as e:
fmt = (
'Failed to read GeoNet file "%s" due to error "%s". ' "Continuing."
)
tpl = (dfile, str(e))
logging.warn(fmt % tpl)
if self.rawdir is None:
shutil.rmtree(rawdir)
if self.stream_collection:
stream_collection = StreamCollection(
streams=streams, drop_non_free=self.drop_non_free
)
return stream_collection
else:
return None
def _match_closest_time(etime, dirlist):
timefmt = "%Y-%m-%d_%H%M%S"
etimes = [np.datetime64(datetime.strptime(dirname, timefmt)) for dirname in dirlist]
etime = np.datetime64(etime)
dtimes = np.abs(etimes - etime)
new_etime = etimes[dtimes.argmin()]
newtime = datetime.strptime(str(new_etime)[0:19], TIMEFMT)
fname = newtime.strftime("%Y-%m-%d_%H%M%S")
return fname
|
{"hexsha": "dcb5feb0b34ccdbb9f9fba66c0528c86bae80f76", "size": 10978, "ext": "py", "lang": "Python", "max_stars_repo_path": "gmprocess/io/geonet/geonet_fetcher.py", "max_stars_repo_name": "smithj382/groundmotion-processing", "max_stars_repo_head_hexsha": "b6c8284dc945deb868e90c6e674b1743a424b4f9", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gmprocess/io/geonet/geonet_fetcher.py", "max_issues_repo_name": "smithj382/groundmotion-processing", "max_issues_repo_head_hexsha": "b6c8284dc945deb868e90c6e674b1743a424b4f9", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gmprocess/io/geonet/geonet_fetcher.py", "max_forks_repo_name": "smithj382/groundmotion-processing", "max_forks_repo_head_hexsha": "b6c8284dc945deb868e90c6e674b1743a424b4f9", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6748466258, "max_line_length": 118, "alphanum_fraction": 0.5552013117, "include": true, "reason": "import numpy", "num_tokens": 2460}
|
#!/usr/bin/env python
# coding:utf-8
import torch.nn as nn
from models.structure_model.graphcnn import HierarchyGCN
from models.structure_model.tree import Tree
import json
import os
import numpy as np
from helper.utils import get_hierarchy_relations
from models.structure_model.weighted_tree_lstm import WeightedHierarchicalTreeLSTMEndtoEnd
MODEL_MODULE = {
'TreeLSTM': WeightedHierarchicalTreeLSTMEndtoEnd,
'GCN': HierarchyGCN
}
class StructureEncoder(nn.Module):
def __init__(self,
config,
label_map,
device,
graph_model_type):
"""
Structure Encoder module
:param config: helper.configure, Configure Object
:param label_map: data_modules.vocab.v2i['label']
:param device: torch.device, config.train.device_setting.device
:param graph_model_type: Str, model_type, ['TreeLSTM', 'GCN']
"""
super(StructureEncoder, self).__init__()
self.label_map = label_map
self.root = Tree(-1)
self.hierarchical_label_dict, self.label_trees = get_hierarchy_relations(os.path.join(config.data.data_dir,
config.data.hierarchy),
self.label_map,
root=self.root,
fortree=True)
hierarchy_prob_file = os.path.join(config.data.data_dir, config.data.prob_json)
f = open(hierarchy_prob_file, 'r')
hierarchy_prob_str = f.readlines()
f.close()
self.hierarchy_prob = json.loads(hierarchy_prob_str[0])
self.node_prob_from_parent = np.zeros((len(self.label_map), len(self.label_map)))
self.node_prob_from_child = np.zeros((len(self.label_map), len(self.label_map)))
for p in self.hierarchy_prob.keys():
if p == 'Root':
continue
for c in self.hierarchy_prob[p].keys():
# self.hierarchy_id_prob[self.label_map[p]][self.label_map[c]] = self.hierarchy_prob[p][c]
self.node_prob_from_child[int(self.label_map[p])][int(self.label_map[c])] = 1.0
self.node_prob_from_parent[int(self.label_map[c])][int(self.label_map[p])] = self.hierarchy_prob[p][c]
# node_prob_from_parent: row means parent, col refers to children
self.model = MODEL_MODULE[graph_model_type](num_nodes=len(self.label_map),
in_matrix=self.node_prob_from_child,
out_matrix=self.node_prob_from_parent,
in_dim=config.structure_encoder.node.dimension,
dropout=config.structure_encoder.node.dropout,
device=device,
root=self.root,
hierarchical_label_dict=self.hierarchical_label_dict,
label_trees=self.label_trees)
def forward(self, inputs):
return self.model(inputs)
|
{"hexsha": "4c510a231d9ce1d45c010b8bbb4997e37efea665", "size": 3403, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/structure_model/structure_encoder.py", "max_stars_repo_name": "TownShaw/HiAGM", "max_stars_repo_head_hexsha": "26c20362467ab703cde4eb0352809de622deddf0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 105, "max_stars_repo_stars_event_min_datetime": "2020-06-24T09:22:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T03:10:31.000Z", "max_issues_repo_path": "models/structure_model/structure_encoder.py", "max_issues_repo_name": "TownShaw/HiAGM", "max_issues_repo_head_hexsha": "26c20362467ab703cde4eb0352809de622deddf0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-07-18T05:38:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T03:36:49.000Z", "max_forks_repo_path": "models/structure_model/structure_encoder.py", "max_forks_repo_name": "TownShaw/HiAGM", "max_forks_repo_head_hexsha": "26c20362467ab703cde4eb0352809de622deddf0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 22, "max_forks_repo_forks_event_min_datetime": "2020-06-24T02:43:46.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T04:12:19.000Z", "avg_line_length": 47.9295774648, "max_line_length": 118, "alphanum_fraction": 0.5430502498, "include": true, "reason": "import numpy", "num_tokens": 595}
|
Agricultural and Environmental Education (AEE) is a major offered by the College of Agricultural and Environmental Sciences as of 2010. This major prepares students to enter a teacher credential program in either science or agricultural and environmental education. Students in AEE take classes on a variety of subjects including animal science, plant sciences plant science, soil science, environmental horticulture, economics, and environmental science and policy environmental science. Students are able to specialize in an area of their interest.
It is very flexible with its graduation requirements. You are not limited to the courses offered on the list given in the catalogue. I would strongly advise checking in with your advisor or the Animal Science Office to make sure your courses are compatible.
If you plan on going into the credentials program for agriculture, you will need 2000 hours of agricultural experience,after high school graduation. However, you do not need these hours to graduate with your Bachelors.
|
{"hexsha": "2f10bac2328a702547755103e203a5f6ed22f5c1", "size": 1030, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Agricultural_and_Environmental_Education.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Agricultural_and_Environmental_Education.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Agricultural_and_Environmental_Education.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 171.6666666667, "max_line_length": 550, "alphanum_fraction": 0.8281553398, "num_tokens": 188}
|
(* *********************************************************************)
(* *)
(* The CertiKOS Certified Kit Operating System *)
(* *)
(* The FLINT Group, Yale University *)
(* *)
(* Copyright The FLINT Group, Yale University. All rights reserved. *)
(* This file is distributed under the terms of the Yale University *)
(* Non-Commercial License Agreement. *)
(* *)
(* *********************************************************************)
(* *********************************************************************)
(* *)
(* Proof of functional correctness *)
(* for the C functions implemented in the TTrapArg layer *)
(* *)
(* Xiongnan (Newman) Wu *)
(* Hao Chen (hao.chen@yale.edu) *)
(* *)
(* Yale University *)
(* *)
(* *********************************************************************)
Require Import Coqlib.
Require Import Maps.
Require Import AST.
Require Import Integers.
Require Import Floats.
Require Import Values.
Require Import MemoryX.
Require Import EventsX.
Require Import Globalenvs.
Require Import Locations.
Require Import Smallstep.
Require Import ClightBigstep.
Require Import Cop.
Require Import compcert.lib.Integers.
Require Import ZArith.Zwf.
Require Import RealParams.
Require Import VCGen.
Require Import liblayers.compcertx.Stencil.
Require Import liblayers.compcertx.MakeProgram.
Require Import liblayers.compat.CompatLayers.
Require Import liblayers.compat.CompatGenSem.
Require Import CompatClightSem.
Require Import PrimSemantics.
Require Import TacticsForTesting.
Require Import XOmega.
Require Import Clight.
Require Import CDataTypes.
Require Import Ctypes.
Require Import CLemmas.
Require Import AbstractDataType.
Require Import TTrapArg.
Require Import TrapGenSpec.
Require Import TTrapArgCSource.
Require Import ObjTrap.
Require Import CommonTactic.
Module TTRAPARGCODE2.
Section WithPrimitives.
Context `{real_params: RealParams}.
Context {memb} `{Hmemx: Mem.MemoryModelX memb}.
Context `{Hmwd: UseMemWithData memb}.
Let mem := mwd (cdata RData).
Context `{Hstencil: Stencil}.
Context `{make_program_ops: !MakeProgramOps Clight.function type Clight.fundef type}.
Context `{Hmake_program: !MakeProgram Clight.function type Clight.fundef type}.
(*Section SYSMMAP.
Let L: compatlayer (cdata RData) := get_curid ↦ gensem get_curid_spec
⊕ uctx_arg2 ↦ gensem uctx_arg2_spec
⊕ uctx_arg3 ↦ gensem uctx_arg3_spec
⊕ uctx_arg4 ↦ gensem uctx_arg4_spec
⊕ pt_read ↦ gensem ptRead_spec
⊕ pt_resv ↦ gensem ptResv_spec
⊕ vmx_set_mmap ↦ gensem vmx_set_mmap_spec
⊕ uctx_set_errno ↦ gensem uctx_set_errno_spec.
Local Instance: ExternalCallsOps mem := CompatExternalCalls.compatlayer_extcall_ops L.
Local Instance: CompilerConfigOps mem := CompatExternalCalls.compatlayer_compiler_config_ops L.
Section SysMMapBody.
Context `{Hwb: WritableBlockOps}.
Variable (sc: stencil).
Variables (ge: genv)
(STENCIL_MATCHES: stencil_matches sc ge).
(** get_curid *)
Variable bget_curid: block.
Hypothesis hget_curid1 : Genv.find_symbol ge get_curid = Some bget_curid.
Hypothesis hget_curid2 : Genv.find_funct_ptr ge bget_curid = Some (External (EF_external get_curid (signature_of_type Tnil tint cc_default)) Tnil tint cc_default).
(** uctx_arg2 *)
Variable buctx_arg2: block.
Hypothesis huctx_arg21 : Genv.find_symbol ge uctx_arg2 = Some buctx_arg2.
Hypothesis huctx_arg22 : Genv.find_funct_ptr ge buctx_arg2 = Some (External (EF_external uctx_arg2 (signature_of_type Tnil tint cc_default)) Tnil tint cc_default).
(** uctx_arg3 *)
Variable buctx_arg3: block.
Hypothesis huctx_arg31 : Genv.find_symbol ge uctx_arg3 = Some buctx_arg3.
Hypothesis huctx_arg32 : Genv.find_funct_ptr ge buctx_arg3 = Some (External (EF_external uctx_arg3 (signature_of_type Tnil tint cc_default)) Tnil tint cc_default).
(** uctx_arg4 *)
Variable buctx_arg4: block.
Hypothesis huctx_arg41 : Genv.find_symbol ge uctx_arg4 = Some buctx_arg4.
Hypothesis huctx_arg42 : Genv.find_funct_ptr ge buctx_arg4 = Some (External (EF_external uctx_arg4 (signature_of_type Tnil tint cc_default)) Tnil tint cc_default).
(** pt_read *)
Variable bpt_read: block.
Hypothesis hpt_read1 : Genv.find_symbol ge pt_read = Some bpt_read.
Hypothesis hpt_read2 : Genv.find_funct_ptr ge bpt_read = Some (External (EF_external pt_read (signature_of_type (Tcons tint (Tcons tint Tnil)) tint cc_default)) (Tcons tint (Tcons tint Tnil)) tint cc_default).
(** pt_resv *)
Variable bpt_resv: block.
Hypothesis hpt_resv1 : Genv.find_symbol ge pt_resv = Some bpt_resv.
Hypothesis hpt_resv2 : Genv.find_funct_ptr ge bpt_resv = Some (External (EF_external pt_resv (signature_of_type (Tcons tint (Tcons tint (Tcons tint Tnil))) tint cc_default)) (Tcons tint (Tcons tint (Tcons tint Tnil))) tint cc_default).
(** vmx_set_mmap *)
Variable bvmx_set_mmap: block.
Hypothesis hvmx_set_mmap1 : Genv.find_symbol ge vmx_set_mmap = Some bvmx_set_mmap.
Hypothesis hvmx_set_mmap2 : Genv.find_funct_ptr ge bvmx_set_mmap = Some (External (EF_external vmx_set_mmap (signature_of_type (Tcons tint (Tcons tint (Tcons tint Tnil))) tint cc_default)) (Tcons tint (Tcons tint (Tcons tint Tnil))) tint cc_default).
(** uctx_set_errno *)
Variable buctx_set_errno: block.
Hypothesis huctx_set_errno1 : Genv.find_symbol ge uctx_set_errno = Some buctx_set_errno.
Hypothesis huctx_set_errno2 : Genv.find_funct_ptr ge buctx_set_errno = Some (External (EF_external uctx_set_errno (signature_of_type (Tcons tint Tnil) Tvoid cc_default)) (Tcons tint Tnil) Tvoid cc_default).
Lemma sys_mmap_body_correct: forall m d d' env le,
env = PTree.empty _ ->
trap_mmap_spec d = Some d' ->
high_level_invariant d ->
exists le',
exec_stmt ge env le ((m, d): mem) sys_mmap_body E0 le' (m, d') Out_normal.
Proof.
generalize max_unsigned_val; intro muval.
intros.
assert(iflags: ikern d = true /\ pg d = true /\ ihost d = true).
{
functional inversion H0; subst.
functional inversion H3; auto.
functional inversion H3; auto.
functional inversion H3; auto.
}
destruct iflags as [ikern iflags].
destruct iflags as [pg ihost].
destruct H1.
assert(negval: Int.repr (-4096) = Int.repr (4294963200)).
{
apply Int.eqm_samerepr.
unfold Int.eqm.
unfold Int.eqmod.
exists (-1).
repeat autounfold.
unfold two_power_nat, shift_nat.
simpl.
reflexivity.
}
functional inversion H0; subst.
unfold hpa0 in *.
functional inversion H2; subst.
functional inversion H3; subst.
functional inversion H4; subst.
unfold andb in H5.
subdestruct.
destruct (Zdivide_dec 4096 (Int.unsigned n0) AuxStateDataType.HPS).
destruct (Zdivide_dec 4096 (Int.unsigned n) AuxStateDataType.HPS).
unfold Z.divide in *.
destruct d0.
destruct d1.
Focus 2.
simpl in *.
discriminate Hdestruct0.
Focus 2.
simpl in *.
discriminate Hdestruct.
exploit (Z.mod_unique_pos (Int.unsigned n0) 4096 x 0).
omega.
omega.
intro n0modval.
exploit (Z.mod_unique_pos (Int.unsigned n) 4096 x0 0).
omega.
omega.
intro nmodval.
destruct (zle_le 1073741824 (Int.unsigned n0) 4026527744).
Focus 2.
simpl in *.
discriminate H5.
unfold sys_mmap_body.
rewrite negval.
assert(0 <= _x1 <= Int.max_unsigned).
{
functional inversion H9; try omega.
functional inversion H; try omega.
subst.
functional inversion H36; try omega.
destruct _x6.
generalize (valid_nps pg); intro.
functional inversion H25.
clear H47.
rewrite <- H49 in a0.
simpl in a0.
omega.
omega.
omega.
}
assert(0 <= _x3 <= Int.max_unsigned).
{
functional inversion H12; try omega.
functional inversion H31; try omega.
}
esplit.
repeat vcgen.
unfold get_curid_spec.
rewrite ikern, pg, ihost.
instantiate (1:= (Int.repr (cid d))).
rewrite Int.unsigned_repr; try omega.
reflexivity.
repeat vcgen.
repeat vcgen.
repeat vcgen.
discharge_cmp.
discharge_unsigned_range.
discharge_unsigned_range.
repeat vcgen.
discharge_cmp.
discharge_cmp.
ptreesolve.
discharge_cmp.
repeat vcgen.
discharge_cmp.
econstructor.
discharge_cmp.
discharge_cmp.
econstructor.
ptreesolve.
discharge_cmp.
repeat vcgen.
repeat ptreesolve.
simpl.
repeat ptreesolve.
repeat vcgen.
repeat ptreesolve.
simpl.
repeat ptreesolve.
repeat vcgen.
repeat vcgen.
discharge_cmp.
omega.
omega.
omega.
repeat vcgenfull.
change (Z.lor (Z.lor 1 4) 2) with 7.
instantiate (1:= (Int.repr _x1)).
rewrite Int.unsigned_repr; try omega.
eassumption.
repeat vcgen.
instantiate (1:= (Int.repr hpa')).
rewrite Int.unsigned_repr; try omega.
reflexivity.
repeat ptreesolve.
discharge_cmp.
repeat ptreesolve.
simpl.
repeat ptreesolve.
simpl.
unfold sem_mod, sem_binarith.
simpl.
discharge_cmp.
discharge_cmp.
repeat ptreesolve.
simpl.
repeat ptreesolve.
repeat vcgen.
repeat vcgen.
repeat ptreesolve.
simpl.
repeat ptreesolve.
repeat vcgen.
repeat vcgenfull.
rewrite <- n0modval.
rewrite Z.add_0_r.
repeat discharge_unsigned_range.
rewrite <- n0modval.
rewrite Z.add_0_r.
repeat discharge_unsigned_range.
unfold hpa0 in *.
functional inversion H2; subst.
functional inversion H3; subst.
functional inversion H4; subst.
unfold andb in H5.
subdestruct.
destruct (Zdivide_dec 4096 (Int.unsigned n0) AuxStateDataType.HPS).
destruct (Zdivide_dec 4096 (Int.unsigned n) AuxStateDataType.HPS).
unfold Z.divide in *.
destruct d0.
destruct d1.
Focus 2.
simpl in *.
discriminate Hdestruct0.
Focus 2.
simpl in *.
discriminate Hdestruct.
exploit (Z.mod_unique_pos (Int.unsigned n0) 4096 x 0).
omega.
omega.
intro n0modval.
exploit (Z.mod_unique_pos (Int.unsigned n) 4096 x0 0).
omega.
omega.
intro nmodval.
destruct (zle_le 1073741824 (Int.unsigned n0) 4026527744).
Focus 2.
simpl in *.
discriminate H5.
unfold sys_mmap_body.
rewrite negval.
assert(0 <= _x1 <= Int.max_unsigned).
{
functional inversion H9; try omega.
functional inversion H27; try omega.
}
subst.
esplit.
repeat vcgen.
unfold get_curid_spec.
rewrite ikern, pg, ihost.
instantiate (1:= (Int.repr (cid d))).
rewrite Int.unsigned_repr; try omega.
reflexivity.
repeat vcgen.
repeat vcgen.
repeat vcgen.
discharge_cmp.
discharge_unsigned_range.
discharge_unsigned_range.
repeat vcgen.
discharge_cmp.
discharge_cmp.
ptreesolve.
discharge_cmp.
repeat vcgen.
discharge_cmp.
econstructor.
discharge_cmp.
discharge_cmp.
econstructor.
ptreesolve.
discharge_cmp.
repeat vcgen.
repeat ptreesolve.
simpl.
repeat ptreesolve.
repeat vcgen.
repeat ptreesolve.
simpl.
repeat ptreesolve.
repeat vcgen.
repeat vcgen.
discharge_cmp.
discharge_unsigned_range.
omega.
omega.
repeat vcgenfull.
repeat ptreesolve.
discharge_cmp.
repeat ptreesolve.
simpl.
repeat ptreesolve.
simpl.
unfold sem_mod, sem_binarith.
simpl.
discharge_cmp.
discharge_cmp.
repeat ptreesolve.
simpl.
repeat ptreesolve.
repeat vcgen.
repeat vcgen.
repeat ptreesolve.
simpl.
repeat ptreesolve.
repeat vcgen.
repeat vcgenfull.
rewrite <- n0modval.
rewrite Z.add_0_r.
repeat discharge_unsigned_range.
rewrite <- n0modval.
rewrite Z.add_0_r.
repeat discharge_unsigned_range.
functional inversion H2; subst.
functional inversion H3; subst.
functional inversion H4; subst.
unfold andb in H5.
subdestruct.
destruct (Zdivide_dec 4096 (Int.unsigned n0) AuxStateDataType.HPS).
destruct (Zdivide_dec 4096 (Int.unsigned n) AuxStateDataType.HPS).
unfold Z.divide in *.
destruct d0.
destruct d1.
Focus 2.
simpl in *.
discriminate Hdestruct0.
Focus 2.
simpl in *.
discriminate Hdestruct.
exploit (Z.mod_unique_pos (Int.unsigned n0) 4096 x 0).
omega.
omega.
intro n0modval.
exploit (Z.mod_unique_pos (Int.unsigned n) 4096 x0 0).
omega.
omega.
intro nmodval.
destruct (zle_le 1073741824 (Int.unsigned n0) 4026527744).
simpl in *.
discriminate H5.
unfold sys_mmap_body.
rewrite negval.
destruct o.
{
esplit.
repeat vcgen.
unfold get_curid_spec.
rewrite ikern, pg, ihost.
instantiate (1:= (Int.repr (cid d))).
rewrite Int.unsigned_repr; try omega.
reflexivity.
repeat vcgen.
repeat vcgen.
repeat vcgen.
discharge_cmp.
discharge_unsigned_range.
discharge_unsigned_range.
repeat vcgen.
discharge_cmp.
discharge_cmp.
ptreesolve.
discharge_cmp.
repeat vcgen.
discharge_cmp.
repeat ptreesolve.
discharge_cmp.
repeat vcgen.
}
{
esplit.
repeat vcgen.
unfold get_curid_spec.
rewrite ikern, pg, ihost.
instantiate (1:= (Int.repr (cid d))).
rewrite Int.unsigned_repr; try omega.
reflexivity.
repeat vcgen.
repeat vcgen.
repeat vcgen.
discharge_cmp.
discharge_unsigned_range.
discharge_unsigned_range.
repeat vcgen.
discharge_cmp.
discharge_cmp.
ptreesolve.
discharge_cmp.
repeat vcgen.
discharge_cmp.
repeat ptreesolve.
discharge_cmp.
repeat vcgen.
repeat vcgen.
repeat vcgen.
repeat ptreesolve.
discharge_cmp.
repeat vcgen.
}
{
destruct (Zdivide_dec 4096 (Int.unsigned n0) AuxStateDataType.HPS).
destruct (Zdivide_dec 4096 (Int.unsigned n) AuxStateDataType.HPS).
simpl in *.
discriminate Hdestruct0.
Focus 2.
simpl in *.
discriminate Hdestruct.
unfold Z.divide in d0.
destruct d0.
exploit (Z.mod_unique_pos (Int.unsigned n0) 4096 x 0).
omega.
omega.
intro n0modval.
assert(nmodneq0: 0 <> Int.unsigned n mod 4096).
{
intro.
symmetry in H.
eapply Z.mod_divide in H.
contradiction.
omega.
}
assert(0 <= Int.unsigned n mod 4096 < 4096).
{
apply Z.mod_bound_pos.
discharge_unsigned_range.
omega.
}
unfold sys_mmap_body.
esplit.
repeat vcgen.
unfold get_curid_spec.
rewrite ikern, pg, ihost.
instantiate (1:= (Int.repr (cid d))).
rewrite Int.unsigned_repr; try omega.
reflexivity.
repeat vcgen.
repeat vcgen.
repeat vcgen.
discharge_cmp.
discharge_unsigned_range.
discharge_unsigned_range.
repeat vcgen.
discharge_cmp.
discharge_cmp.
ptreesolve.
discharge_cmp.
repeat vcgen.
discharge_cmp.
repeat ptreesolve.
discharge_cmp.
repeat vcgen.
}
{
destruct (Zdivide_dec 4096 (Int.unsigned n0) AuxStateDataType.HPS).
simpl in *.
discriminate Hdestruct.
assert(nmodneq0: 0 <> Int.unsigned n0 mod 4096).
{
intro.
symmetry in H.
eapply Z.mod_divide in H.
contradiction.
omega.
}
assert(0 <= Int.unsigned n0 mod 4096 < 4096).
{
apply Z.mod_bound_pos.
discharge_unsigned_range.
omega.
}
unfold sys_mmap_body.
esplit.
repeat vcgen.
unfold get_curid_spec.
rewrite ikern, pg, ihost.
instantiate (1:= (Int.repr (cid d))).
rewrite Int.unsigned_repr; try omega.
reflexivity.
repeat vcgen.
repeat vcgen.
repeat vcgen.
discharge_cmp.
discharge_unsigned_range.
discharge_unsigned_range.
repeat vcgen.
discharge_cmp.
discharge_cmp.
ptreesolve.
discharge_cmp.
repeat vcgen.
discharge_cmp.
repeat ptreesolve.
discharge_cmp.
repeat vcgen.
}
Qed.
End SysMMapBody.
Theorem sys_mmap_code_correct:
spec_le (sys_mmap ↦ trap_mmap_spec_low) (〚sys_mmap ↦ f_sys_mmap 〛L).
Proof.
set (L' := L) in *. unfold L in *.
fbigstep_pre L'.
fbigstep (sys_mmap_body_correct s (Genv.globalenv p) makeglobalenv b0 Hb0fs Hb0fp b1 Hb1fs Hb1fp b2 Hb2fs Hb2fp b3 Hb3fs Hb3fp b4 Hb4fs Hb4fp b5 Hb5fs Hb5fp b6 Hb6fs Hb6fp b7 Hb7fs Hb7fp m'0 labd labd0 (PTree.empty _)
(bind_parameter_temps' (fn_params f_sys_mmap)
nil
(create_undef_temps (fn_temps f_sys_mmap)))) H1.
Qed.
End SYSMMAP.*)
Section PTFRESV.
Let L: compatlayer (cdata RData) := get_curid ↦ gensem get_curid_spec
⊕ pt_resv ↦ gensem ptResv_spec.
Local Instance: ExternalCallsOps mem := CompatExternalCalls.compatlayer_extcall_ops L.
Local Instance: CompilerConfigOps mem := CompatExternalCalls.compatlayer_compiler_config_ops L.
Section PtfResvBody.
Context `{Hwb: WritableBlockOps}.
Variable (sc: stencil).
Variables (ge: genv)
(STENCIL_MATCHES: stencil_matches sc ge).
(** get_curid *)
Variable bget_curid: block.
Hypothesis hget_curid1 : Genv.find_symbol ge get_curid = Some bget_curid.
Hypothesis hget_curid2 : Genv.find_funct_ptr ge bget_curid = Some (External (EF_external get_curid (signature_of_type Tnil tint cc_default)) Tnil tint cc_default).
(** pt_resv *)
Variable bpt_resv: block.
Hypothesis hpt_resv1 : Genv.find_symbol ge pt_resv = Some bpt_resv.
Hypothesis hpt_resv2 : Genv.find_funct_ptr ge bpt_resv = Some (External (EF_external pt_resv (signature_of_type (Tcons tint (Tcons tint (Tcons tint Tnil))) tint cc_default)) (Tcons tint (Tcons tint (Tcons tint Tnil))) tint cc_default).
Lemma high_inv_curid:
forall d,
high_level_invariant d ->
ikern d = true ->
ihost d = true ->
pg d = true ->
get_curid_spec d = Some (cid d)
/\ 0 <= (cid d) <= Int.max_unsigned.
Proof.
unfold get_curid_spec. intros.
subrewrite'. split; trivial.
destruct H.
generalize max_unsigned_val; intro muval.
omega.
Qed.
Require Import AuxLemma.
Lemma ptInsert0_range:
forall p1 p2 v d d' re n,
ptInsert0_spec p1 p2 v n d = Some (d', re) ->
262144 <= nps d <= 1048576 ->
0 <= re <= Int.max_unsigned.
Proof.
intros. rewrite_omega.
functional inversion H; subst; try omega.
functional inversion H10; try subst; try omega.
Qed.
Lemma ptResv_range:
forall v1 v2 v3 d d' r,
ptResv_spec v1 v2 v3 d = Some (d', r) ->
262144 <= nps d <= 1048576 ->
0 <= r <= Int.max_unsigned.
Proof.
intros. rewrite_omega.
functional inversion H; clear H; [omega|].
eapply ptInsert0_range; eauto.
functional inversion H2; subst; trivial.
Qed.
Lemma ptResv_range':
forall v1 v2 v3 d d' r,
ptResv_spec v1 v2 v3 d = Some (d', r) ->
high_level_invariant d ->
pg d = true ->
0 <= r <= Int.max_unsigned.
Proof.
intros.
eapply ptResv_range; eauto.
inv H0.
eauto.
Qed.
Lemma ptfault_resv_body_correct:
forall m d d' env le vaddr,
env = PTree.empty _ ->
PTree.get _vaddr le = Some (Vint vaddr) ->
ptfault_resv_spec (Int.unsigned vaddr) d = Some d' ->
high_level_invariant d ->
exists le',
exec_stmt ge env le ((m, d): mem) ptfault_resv_body E0 le' (m, d') Out_normal.
Proof.
generalize max_unsigned_val; intro muval.
intros.
unfold ptfault_resv_body.
functional inversion H1; subst.
{
exploit high_inv_curid; eauto.
intros (Hget & Hrange).
exploit ptResv_range'; eauto. intros Hrange'.
esplit.
repeat vcgen.
- rewrite <- (Int.unsigned_repr (cid d)).
reflexivity.
assumption.
- rewrite <- (Int.unsigned_repr (_x0)) in H8;
eassumption.
- apply Hrange.
- apply Hrange.
}
{
exploit high_inv_curid; eauto.
intros (Hget & Hrange).
esplit.
repeat vcgen.
rewrite <- (Int.unsigned_repr (cid d')).
reflexivity.
assumption.
}
Qed.
End PtfResvBody.
Theorem ptfault_resv_code_correct:
spec_le (ptfault_resv ↦ ptf_resv_spec_low) (〚ptfault_resv ↦ f_ptfault_resv 〛L).
Proof.
set (L' := L) in *. unfold L in *.
fbigstep_pre L'.
fbigstep (ptfault_resv_body_correct s (Genv.globalenv p) makeglobalenv b0 Hb0fs Hb0fp b1 Hb1fs Hb1fp
m'0 labd labd0 (PTree.empty _)
(bind_parameter_temps' (fn_params f_ptfault_resv)
(Vint i::nil)
(create_undef_temps (fn_temps f_ptfault_resv)))) H1.
Qed.
End PTFRESV.
Section SYSPROCCREATE.
Let L: compatlayer (cdata RData) := uctx_arg2 ↦ gensem uctx_arg2_spec
⊕ uctx_arg3 ↦ gensem uctx_arg3_spec
⊕ uctx_set_errno ↦ gensem uctx_set_errno_spec
⊕ uctx_set_retval1 ↦ gensem uctx_set_retval1_spec
⊕ get_curid ↦ gensem get_curid_spec
⊕ container_get_nchildren ↦ gensem container_get_nchildren_spec
⊕ container_can_consume ↦ gensem container_can_consume_spec
⊕ proc_create ↦ proc_create_compatsem proc_create_spec.
Local Instance: ExternalCallsOps mem := CompatExternalCalls.compatlayer_extcall_ops L.
Local Instance: CompilerConfigOps mem := CompatExternalCalls.compatlayer_compiler_config_ops L.
Section SysProcCreateBody.
Context `{Hwb: WritableBlockOps}.
Variable (sc: stencil).
Variables (ge: genv)
(STENCIL_MATCHES: stencil_matches sc ge).
(** uctx_arg2 *)
Variable buctx_arg2: block.
Hypothesis huctx_arg21 : Genv.find_symbol ge uctx_arg2 = Some buctx_arg2.
Hypothesis huctx_arg22 : Genv.find_funct_ptr ge buctx_arg2 = Some (External (EF_external uctx_arg2 (signature_of_type Tnil tint cc_default)) Tnil tint cc_default).
(** uctx_arg3 *)
Variable buctx_arg3: block.
Hypothesis huctx_arg31 : Genv.find_symbol ge uctx_arg3 = Some buctx_arg3.
Hypothesis huctx_arg32 : Genv.find_funct_ptr ge buctx_arg3 = Some (External (EF_external uctx_arg3 (signature_of_type Tnil tint cc_default)) Tnil tint cc_default).
(** uctx_set_errno *)
Variable buctx_set_errno: block.
Hypothesis huctx_set_errno1 : Genv.find_symbol ge uctx_set_errno = Some buctx_set_errno.
Hypothesis huctx_set_errno2 : Genv.find_funct_ptr ge buctx_set_errno = Some (External (EF_external uctx_set_errno (signature_of_type (Tcons tint Tnil) Tvoid cc_default)) (Tcons tint Tnil) Tvoid cc_default).
(** uctx_set_retval1 *)
Variable buctx_set_retval1: block.
Hypothesis huctx_set_retval11 : Genv.find_symbol ge uctx_set_retval1 = Some buctx_set_retval1.
Hypothesis huctx_set_retval12 : Genv.find_funct_ptr ge buctx_set_retval1 = Some (External (EF_external uctx_set_retval1 (signature_of_type (Tcons tint Tnil) Tvoid cc_default)) (Tcons tint Tnil) Tvoid cc_default).
(** get_curid *)
Variable bget_curid: block.
Hypothesis hget_curid1 : Genv.find_symbol ge get_curid = Some bget_curid.
Hypothesis hget_curid2 : Genv.find_funct_ptr ge bget_curid = Some (External (EF_external get_curid (signature_of_type Tnil tint cc_default)) Tnil tint cc_default).
(** get_nchildren *)
Variable bget_nchildren: block.
Hypothesis hget_nchildren1 : Genv.find_symbol ge container_get_nchildren = Some bget_nchildren.
Hypothesis hget_nchildren2 :
Genv.find_funct_ptr ge bget_nchildren =
Some (External (EF_external container_get_nchildren
(signature_of_type (Tcons tint Tnil) tint cc_default))
(Tcons tint Tnil) tint cc_default).
(** container_can_consume *)
Variable bcan_consume: block.
Hypothesis hcan_consume1 : Genv.find_symbol ge container_can_consume = Some bcan_consume.
Hypothesis hcan_consume2 : Genv.find_funct_ptr ge bcan_consume = Some (External (EF_external container_can_consume (signature_of_type (Tcons tint (Tcons tint Tnil)) tint cc_default)) (Tcons tint (Tcons tint Tnil)) tint cc_default).
(** proc_create *)
Variable bproc_create: block.
Hypothesis hproc_create1 : Genv.find_symbol ge proc_create = Some bproc_create.
Hypothesis hproc_create2 : Genv.find_funct_ptr ge bproc_create = Some (External (EF_external proc_create (signature_of_type (Tcons (tptr tvoid) (Tcons (tptr tvoid) (Tcons tint Tnil))) tint cc_default)) (Tcons (tptr tvoid) (Tcons (tptr tvoid) (Tcons tint Tnil))) tint cc_default).
Ltac if_simpl :=
repeat match goal with
| [ H : ?a = _ |- context [if ?a then _ else _] ] => rewrite H
| [ H : _ = ?a |- context [if ?a then _ else _] ] => rewrite <- H
end.
Lemma sys_proc_create_body_correct:
forall m d d' env le,
env = PTree.empty _ ->
trap_proc_create_spec sc m d = Some d' ->
high_level_invariant d ->
exists le',
exec_stmt ge env le ((m, d): mem) sys_proc_create_body E0 le' (m, d') Out_normal.
Proof.
generalize max_unsigned_val; intro muval.
generalize (tptrsize tvoid).
intros.
subst.
destruct H2.
destruct valid_container.
rename H1 into Hspec; unfold trap_proc_create_spec in Hspec.
destruct (uctx_arg3_spec d) eqn:Harg3; try discriminate Hspec.
assert (Herrno: uctx_set_errno_spec 1 d = Some d' \/
exists abd', uctx_set_errno_spec 0 abd' = Some d')
by (subdestruct; eauto); destruct Herrno as [Herrno|Herrno].
(* Case 1: one of the if conditions fails; return error code *)
functional inversion Herrno; subst.
functional inversion Harg3; subst.
specialize (cvalid_max_children _ (proj1 (correct_curid H2))).
unfold sys_proc_create_body.
destruct (zle_le 0 (Int.unsigned n)
(cquota (ZMap.get (cid d) (AC d)) -
cusage (ZMap.get (cid d) (AC d)))) eqn:Hquota.
{
esplit.
d3 vcgen.
repeat vcgen.
unfold get_curid_spec; rewrites.
rewrite Int.unsigned_repr; eauto; omega.
d2 vcgen.
repeat vcgen.
d2 vcgen.
repeat vcgen.
unfold container_can_consume_spec; rewrites.
erewrite (proj1 (correct_curid _)); rewrite Int.unsigned_repr; eauto; omega.
d2 vcgen.
repeat vcgen.
unfold container_get_nchildren_spec; rewrites.
erewrite (proj1 (correct_curid _)); rewrite Int.unsigned_repr; eauto; omega.
destruct (zle_le 0 (cid d * max_children + 1 + max_children) num_id) eqn:Hchild.
{
destruct (zlt (Z.of_nat (length (cchildren (ZMap.get (cid d) (AC d))))) max_children) eqn:Hnc.
{
subdestruct.
rewrite <- Herrno in Hspec.
unfold uctx_set_errno_spec in Hspec; subdestruct.
inv Hspec.
rename H37 into Hspec.
apply f_equal with (f:= PTree.get (ZIndexed.index (cid r0))) in Hspec.
rewrite 2 PTree.gss in Hspec.
inv Hspec.
rename H41 into Hspec.
apply f_equal with (f:= PTree.get 14) in Hspec.
rewrite 2 PTree.gss in Hspec; inv Hspec.
}
{
vcgen.
repeat vcgen.
cases; try omega; vcgen.
repeat vcgen.
}
}
{
vcgen.
repeat vcgen.
cases; try omega; vcgen.
repeat vcgen.
}
}
{
esplit.
d3 vcgen.
repeat vcgen.
unfold get_curid_spec; if_simpl.
rewrite Int.unsigned_repr; eauto; omega.
d2 vcgen.
repeat vcgen.
d2 vcgen.
repeat vcgen.
unfold container_can_consume_spec; rewrites.
erewrite (proj1 (correct_curid _)); rewrite Int.unsigned_repr; eauto; omega.
d2 vcgen.
repeat vcgen.
unfold container_get_nchildren_spec; rewrites.
erewrite (proj1 (correct_curid _)); rewrite Int.unsigned_repr; eauto; omega.
destruct (zle_le 0 (cid d * max_children + 1 + max_children) num_id) eqn:Hchild.
{
destruct (zlt (Z.of_nat (length (cchildren (ZMap.get (cid d) (AC d))))) max_children);
repeat vcgen.
}
{
repeat vcgen.
}
}
(* Case 2: requester has enough available quota to spawn child, and has not exceeded
its maximum number of allowed children *)
destruct Herrno as [d'' Herrno].
assert (Hcon: uctx_set_errno_spec 0 d'' <> uctx_set_errno_spec 1 d).
{
unfold uctx_set_errno_spec.
functional inversion Herrno; functional inversion Harg3; rewrites.
intro Hcon; inv Hcon.
rename H38 into Hcon.
apply f_equal with (f:= PTree.get (ZIndexed.index (cid d''))) in Hcon.
rewrite 2 PTree.gss in Hcon; inv Hcon.
rename H42 into Hcon.
apply f_equal with (f:= PTree.get 14) in Hcon.
rewrite 2 PTree.gss in Hcon; inv Hcon.
}
subdestruct; try solve [contradiction Hcon; rewrite Herrno, Hspec; reflexivity].
unfold ELF_ident in Hdestruct5.
unfold Int.eq in Hdestruct13; subdestruct.
injection Hdestruct5; intros; subst.
rewrite Hdestruct7 in Hdestruct9.
injection Hdestruct9; intros; subst.
clear Hdestruct17.
apply unsigned_inj in e0.
generalize Hdestruct14; intro proc_create_inv.
unfold proc_create_spec in proc_create_inv.
subdestruct.
subst.
destruct a0.
injection proc_create_inv; intros; subst.
unfold sys_proc_create_body.
destruct (correct_curid eq_refl) as [Hused _].
specialize (cvalid_quota _ Hused); specialize (cvalid_usage _ Hused).
esplit.
d3 vcgen.
repeat vcgen.
unfold get_curid_spec; if_simpl.
rewrite Int.unsigned_repr; eauto; omega.
d4 vcgen.
repeat vcgen.
repeat vcgen.
repeat vcgen.
repeat vcgen.
repeat vcgen.
repeat vcgen.
unfold container_can_consume_spec; if_simpl.
rewrite Int.unsigned_repr; eauto; omega.
d2 vcgen.
repeat vcgen.
unfold container_get_nchildren_spec; if_simpl.
rewrite Int.unsigned_repr; eauto; omega.
vcgen.
repeat vcgen.
repeat vcgen.
d2 vcgen.
repeat vcgen.
d2 vcgen.
d4 vcgen.
repeat vcgen.
repeat vcgen.
repeat vcgen.
repeat vcgen.
erewrite stencil_matches_symbols; eauto.
repeat vcgen.
erewrite stencil_matches_symbols; eauto.
repeat vcgen.
repeat vcgen.
repeat vcgen.
repeat vcgen.
repeat vcgen.
repeat vcgen.
repeat vcgen.
repeat vcgen.
unfold proc_create_spec; if_simpl.
rewrite Hdestruct22; rewrite Int.unsigned_repr; eauto; omega.
repeat vcgen.
Grab Existential Variables.
assumption.
assumption.
assumption.
assumption.
Qed.
End SysProcCreateBody.
Theorem sys_proc_create_code_correct:
spec_le (sys_proc_create ↦ trap_proc_create_spec_low) (〚sys_proc_create ↦ f_sys_proc_create 〛L).
Proof.
set (L' := L) in *. unfold L in *.
fbigstep_pre L'.
fbigstep (sys_proc_create_body_correct
s (Genv.globalenv p) makeglobalenv b0 Hb0fs Hb0fp b1 Hb1fs Hb1fp
b2 Hb2fs Hb2fp b3 Hb3fs Hb3fp b4 Hb4fs Hb4fp b5 Hb5fs Hb5fp
b6 Hb6fs Hb6fp b7 Hb7fs Hb7fp m'0 labd labd0 (PTree.empty _)
(bind_parameter_temps' (fn_params f_sys_proc_create) nil
(create_undef_temps (fn_temps f_sys_proc_create)))) H0.
Qed.
End SYSPROCCREATE.
End WithPrimitives.
End TTRAPARGCODE2.
|
{"author": "npe9", "repo": "certikos", "sha": "dd2631a096523a29a2e8a3101d8a224b754ea56a", "save_path": "github-repos/coq/npe9-certikos", "path": "github-repos/coq/npe9-certikos/certikos-dd2631a096523a29a2e8a3101d8a224b754ea56a/mcertikos/trap/TTrapArgCode2.v"}
|
#Outlier Detection
# WARNING : DATA SET USED FOR OUTLIER DETECTION MUST BE ENTIERLY FILLING IN (NO MISSING VALUES)
# HERE WE USED THE MEAN METHOD TO FILLING MISSING VALUES, REPLACE "MEAN" BY "MEDIAN" or "KNN" TO USE ANOTHER METHOD
######################################## PERCENTILE ################################################
#######################TEST###################
x_train = read.csv("x_train_mean.csv")
x_train
#EXAMPLE for first column
#Check density function in order to see whether are not the distribution is a Gaussian distribution or not
d <- density(x_train[,2])
plot(d)
#Get the lower bound
lower_bound <- quantile(x_train[,2], 0.025)
lower_bound
#Get the upper bound
upper_bound <- quantile(x_train[,2], 0.975)
upper_bound
#Get samples index (raw) of the outlier
outlier_ind <- which(x_train[,2] < lower_bound | x_train[,2] > upper_bound)
outlier_ind
#Get the number of outlier
nbr <- length(outlier_ind)
nbr
######################SELECTION AND REPLACEMENT##################
#############TRAININGSET################
#FOR ALL THE DATA SET USING MEAN
x_train = read.csv("x_train_mean.csv")
x_train
#Check all coloumn outlier
for(i in 2:ncol(x_train)){
lower_bound <- quantile(x_train[,i], 0.025)
upper_bound <- quantile(x_train[,i], 0.975)
outlier_ind <- which(x_train[,i] < lower_bound | x_train[,i] > upper_bound)
#replace outlier by zeros
if (length(outlier_ind) > 0) {
for (j in 1:length(outlier_ind)){
x_train[outlier_ind[j], i] <- NA
}
}
}
#The data set with NAs instead of outlier
x_train
##############TESTSET##################
x_test = read.csv("x_test_mean.csv")
x_test
#Check all coloumn outlier
for(i in 2:ncol(x_test)){
lower_bound <- quantile(x_test[,i], 0.025)
upper_bound <- quantile(x_test[,i], 0.975)
outlier_ind <- which(x_test[,i] < lower_bound | x_test[,i] > upper_bound)
#replace outlier by zeros
if (length(outlier_ind) > 0) {
for (j in 1:length(outlier_ind)){
x_test[outlier_ind[j], i] <- NA
}
}
}
#The data set with NAs instead of outlier
x_test
############WRITTING PROCESS################
write.csv(x_train, "x_train_mean_percentile.csv", row.names = FALSE)
write.csv(x_test, "x_test_mean_percentile.csv", row.names = FALSE)
######################################### HAMPEL ####################################################
#############TEST####################
x_train = read.csv("x_train_mean.csv")
x_train
#EXAMPLE for first column only
#Get the lower bound
lower_bound <- median(x_train[,2]) - 3 * mad(x_train[,2])
lower_bound
#Get the upper bound
upper_bound <- median(x_train[,2]) + 3 * mad(x_train[,2])
upper_bound
#Get samples index (raw) of the outlier
outlier_ind <- which(x_train[,2] < lower_bound | x_train[,2] > upper_bound)
outlier_ind
#Get the number of outlier
nbr <- length(outlier_ind)
nbr
######################SELECTION AND REPLACEMENT##################
#############TRAININGSET##########
x_train = read.csv("x_train_mean.csv")
x_train
#Check all coloumn outlier
for(i in 2:ncol(x_train)){
lower_bound <- median(x_train[,i]) - 3 * mad(x_train[,i])
upper_bound <- median(x_train[,i]) + 3 * mad(x_train[,i])
outlier_ind <- which(x_train[,i] < lower_bound | x_train[,i] > upper_bound)
#replace outlier by zeros
if (length(outlier_ind) > 0) {
for (j in 1:length(outlier_ind)){
x_train[outlier_ind[j], i] <- NA
}
}
}
#The data set with NAs instead of outlier
x_train
###########TESTSET##########
x_test = read.csv("x_test_mean.csv")
x_test
#Check all coloumn outlier
for(i in 2:ncol(x_test)){
lower_bound <- median(x_test[,i]) - 3 * mad(x_test[,i])
upper_bound <- median(x_test[,i]) + 3 * mad(x_test[,i])
outlier_ind <- which(x_test[,i] < lower_bound | x_test[,i] > upper_bound)
#replace outlier by zeros
if (length(outlier_ind) > 0) {
for (j in 1:length(outlier_ind)){
x_test[outlier_ind[j], i] <- NA
}
}
}
#The data set with NAs instead of outlier
x_test
######WRITTING PROCESS############
write.csv(x_train, "x_train_mean_hampel.csv", row.names = FALSE)
write.csv(x_test, "x_test_mean_hampel.csv", row.names = FALSE)
########################################### ISOLATION FOREST #######################################################
# IsolationForest Method
install.packages("solitude")
library(solitude)
x_train = read.csv("x_train_mean.csv")
n = 1000
Var1 = c(rnorm(n, 0, 0.5), rnorm(n*0.1, -2, 1))
Var2 = c(rnorm(n, 0, 0.5), rnorm(n*0.1, 2, 1))
outliers = c(rep(0, n), rep(1, (0.1*n))) + 3
data = data.frame(Var1, Var2)
iforest <- solitude::isolationForest$new(sample_size = length(data))
iforest$fit(data)
data
############################################# DBSCan ########################################################
install.packages("ggplot2")
install.packages("data.table")
install.packages("dbscan")
library(ggplot2)
library(data.table)
library(dbscan)
x_train <- read.csv("x_train_mean.csv")
x_scale <- apply(x_train, 2, function(y) (y - mean(y)) / sd(y) ^ as.logical(sd(y)))
print(sum(is.na(x_train)))
distance_matrix <- as.matrix(dist(x_scale))
pca <- prcomp(distance_matrix)
embedding <- data.table(pca$x[, 1:2])
embedding[, ids := rownames(x_train)]
ggplot(embedding, aes(x = PC1, y = PC2)) +
geom_point(size = 10, colour = "steelblue", alpha = 0.3) +
geom_text(aes(label = ids), check_overlap = TRUE) +
theme_minimal()
embedding[, DClusters := dbscan(x_scale, eps = 0.2, minPts = 2)$cluster]
ggplot(embedding, aes(x = PC1, y = PC2)) +
geom_point(aes(colour = factor(DClusters)), size = 10, alpha = 0.3) +
geom_text(aes(label = ids), check_overlap = TRUE) +
theme_minimal()
################################### EXPECTATION MAXIMISATION ###############################
install.packages("ggplot2")
install.packages("data.table")
install.packages("mclust")
library(ggplot2)
library(data.table)
library(mclust)
x_train <- read.csv("x_train_mean.csv")
x_scale <- apply(x_train, 2, function(y) (y - mean(y)) / sd(y) ^ as.logical(sd(y)))
print(sum(is.na(x_train)))
distance_matrix <- as.matrix(dist(x_scale))
pca <- prcomp(distance_matrix)
embedding <- data.table(pca$x[, 1:2])
embedding[, ids := rownames(x_train)]
ggplot(embedding, aes(x = PC1, y = PC2)) +
geom_point(size = 10, colour = "steelblue", alpha = 0.3) +
geom_text(aes(label = ids), check_overlap = TRUE) +
theme_minimal()
cars_em <- Mclust(scale(x_train), G = 4)
embedding[, EMClusters := cars_em$classification]
ggplot(embedding, aes(x = PC1, y = PC2)) +
geom_point(aes(colour = factor(EMClusters)), size = 10, alpha = 0.3) +
geom_text(aes(label = ids), check_overlap = TRUE) +
theme_minimal()
################################### PCOutlierDetecton ######################################
install.packages("OutlierDetection")
library(OutlierDetection)
x_train <- read.csv("x_train_mean.csv")
outdetect <- PCOutlierDetection(x_train[,2])
|
{"hexsha": "170ef4386e73ff75c74096f73d6409e16f4832b8", "size": 6873, "ext": "r", "lang": "R", "max_stars_repo_path": "outlier_detection.r", "max_stars_repo_name": "AugusteLef/MRI", "max_stars_repo_head_hexsha": "58e48e9e7026dd3a9e043a5d5f912202bf5a2b3b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "outlier_detection.r", "max_issues_repo_name": "AugusteLef/MRI", "max_issues_repo_head_hexsha": "58e48e9e7026dd3a9e043a5d5f912202bf5a2b3b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "outlier_detection.r", "max_forks_repo_name": "AugusteLef/MRI", "max_forks_repo_head_hexsha": "58e48e9e7026dd3a9e043a5d5f912202bf5a2b3b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.0530612245, "max_line_length": 116, "alphanum_fraction": 0.6155972647, "num_tokens": 1914}
|
#!/usr/bin/env python
import os
import os.path as osp
import numpy as np
import skimage.io
import instance_occlsegm_lib
def main():
root_dir = osp.expanduser('~/.ros/instance_occlsegm')
for save_dir in sorted(os.listdir(root_dir)):
save_dir = osp.join(root_dir, save_dir)
print('-' * 79)
print(save_dir)
for frame_dir in sorted(os.listdir(save_dir)):
frame_dir = osp.join(save_dir, frame_dir)
print(frame_dir)
img_file = osp.join(frame_dir, 'image.jpg')
img = skimage.io.imread(img_file)
depth_file = osp.join(frame_dir, 'depth.npz')
depth = np.load(depth_file)['arr_0']
depth_viz = instance_occlsegm_lib.image.colorize_depth(
depth, min_value=0.4, max_value=0.9
)
# depth_viz_file = osp.join(frame_dir, 'depth_viz.jpg')
# depth_viz = skimage.io.imread(depth_viz_file)
viz = instance_occlsegm_lib.image.tile([img, depth_viz])
instance_occlsegm_lib.io.imshow(viz)
if instance_occlsegm_lib.io.waitkey() == ord('q'):
return
if __name__ == '__main__':
main()
|
{"hexsha": "397d0ab92b9d1c2cbe467312a91e7b0a5a8141fd", "size": 1196, "ext": "py", "lang": "Python", "max_stars_repo_path": "demos/instance_occlsegm/ros/instance_occlsegm/scripts/view_collected_data.py", "max_stars_repo_name": "pazeshun/jsk_apc", "max_stars_repo_head_hexsha": "0ff42000ad5992f8a31e719a5360a39cf4fa1fde", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "demos/instance_occlsegm/ros/instance_occlsegm/scripts/view_collected_data.py", "max_issues_repo_name": "pazeshun/jsk_apc", "max_issues_repo_head_hexsha": "0ff42000ad5992f8a31e719a5360a39cf4fa1fde", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-04-11T05:36:23.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-19T12:58:10.000Z", "max_forks_repo_path": "demos/instance_occlsegm/ros/instance_occlsegm/scripts/view_collected_data.py", "max_forks_repo_name": "pazeshun/jsk_apc", "max_forks_repo_head_hexsha": "0ff42000ad5992f8a31e719a5360a39cf4fa1fde", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9, "max_line_length": 68, "alphanum_fraction": 0.6112040134, "include": true, "reason": "import numpy", "num_tokens": 292}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import assume, given, settings
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import unittest
import os
class TestReduceFrontSum(hu.HypothesisTestCase):
@given(batch_size=st.integers(1, 3),
stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
size=st.integers(7, 10),
channels=st.integers(1, 8),
**hu.gcs)
def test_im2col_layout(self, batch_size, stride, pad, kernel, dilation,
size, channels, gc, dc):
dkernel = (dilation * (kernel - 1) + 1)
assume(size >= dkernel)
NCHW_TO_NHWC = (0, 2, 3, 1)
NHWC_TO_NCHW = (0, 3, 1, 2)
COL_NHWC_TO_NCHW = (4, 2, 3, 0, 1)
N = batch_size
C = channels
H = size
W = size
out_h = int((H + (2 * pad) - dkernel) / stride + 1)
out_w = int((W + (2 * pad) - dkernel) / stride + 1)
im_nchw = np.random.rand(N, C, H, W).astype(np.float32) - 0.5
im_nhwc = im_nchw.transpose(NCHW_TO_NHWC)
op_im2col_nchw = core.CreateOperator(
"Im2Col",
["im_nchw"], ["col_nchw"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NCHW",
device_option=gc)
op_im2col_nhwc = core.CreateOperator(
"Im2Col",
["im_nhwc"], ["col_nhwc"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NHWC",
device_option=gc)
self.ws.create_blob("im_nchw").feed(im_nchw, device_option=gc)
self.ws.create_blob("im_nhwc").feed(im_nhwc, device_option=gc)
self.ws.run(op_im2col_nchw)
self.ws.run(op_im2col_nhwc)
# there is probably a clever way to spell this in np
col_nchw = self.ws.blobs["col_nchw"].fetch()
col_nhwc = self.ws.blobs["col_nhwc"].fetch()
col_nchw_ = col_nchw.reshape(N, C, kernel, kernel, out_h, out_w)
col_nhwc_ = col_nhwc.reshape(N, out_h, out_w, kernel, kernel, C)
for i in range(0, N):
np.testing.assert_allclose(
col_nchw_[i],
col_nhwc_[i].transpose(COL_NHWC_TO_NCHW),
atol=1e-4,
rtol=1e-4)
op_col2im_nchw = core.CreateOperator(
"Col2Im",
["col_nchw", "im_nchw"],
["out_nchw"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NCHW",
device_option=gc)
op_col2im_nhwc = core.CreateOperator(
"Col2Im",
["col_nhwc", "im_nhwc"],
["out_nhwc"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NHWC",
device_option=gc)
self.ws.run(op_col2im_nchw)
self.ws.run(op_col2im_nhwc)
out_nchw = self.ws.blobs["out_nchw"].fetch()
out_nhwc = self.ws.blobs["out_nhwc"].fetch()
np.testing.assert_allclose(
out_nchw,
out_nhwc.transpose(NHWC_TO_NCHW),
atol=1e-4,
rtol=1e-4)
@given(batch_size=st.integers(1, 3),
stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
size=st.integers(7, 10),
channels=st.integers(1, 8),
order=st.sampled_from(["NCHW"]),
**hu.gcs)
@settings(deadline=10000)
def test_col2im_gradients(self, batch_size, stride, pad, kernel,
dilation, size, channels, order, gc, dc):
assume(size >= dilation * (kernel - 1) + 1)
op = core.CreateOperator(
"Im2Col",
["X"], ["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
device_option=gc)
X = np.random.rand(batch_size, channels, size, size).astype(np.float32)
self.assertGradientChecks(gc, op, [X], 0, [0])
return
|
{"hexsha": "98e9d61b5bd02820fbc4aa4bac41f460cc484745", "size": 4483, "ext": "py", "lang": "Python", "max_stars_repo_path": "pytorch-frontend/caffe2/python/operator_test/im2col_col2im_test.py", "max_stars_repo_name": "AndreasKaratzas/stonne", "max_stars_repo_head_hexsha": "2915fcc46cc94196303d81abbd1d79a56d6dd4a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 40, "max_stars_repo_stars_event_min_datetime": "2021-06-01T07:37:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T01:42:09.000Z", "max_issues_repo_path": "pytorch-frontend/caffe2/python/operator_test/im2col_col2im_test.py", "max_issues_repo_name": "AndreasKaratzas/stonne", "max_issues_repo_head_hexsha": "2915fcc46cc94196303d81abbd1d79a56d6dd4a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2021-06-01T11:52:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T02:13:08.000Z", "max_forks_repo_path": "pytorch-frontend/caffe2/python/operator_test/im2col_col2im_test.py", "max_forks_repo_name": "AndreasKaratzas/stonne", "max_forks_repo_head_hexsha": "2915fcc46cc94196303d81abbd1d79a56d6dd4a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2021-07-20T19:34:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-13T21:07:36.000Z", "avg_line_length": 31.5704225352, "max_line_length": 79, "alphanum_fraction": 0.5427169306, "include": true, "reason": "import numpy", "num_tokens": 1235}
|
from nanograd.tensor import Tensor
from nanograd.device import Device
import nanograd.nn.module as nnn
import nanograd.optim.optimizer as optim
import torch
import torch.nn.functional as F
import torch.optim
import numpy as np
import unittest
x_init = np.random.randn(1, 3).astype(np.float32)
W_init = np.random.randn(3, 3).astype(np.float32)
m_init = np.random.randn(1, 3).astype(np.float32)
def step_nanograd(optim, device, kwargs={}):
net = TinyNet().train()
if device == Device.GPU: net.gpu()
optim = optim([net.x, net.W], **kwargs)
out = net.forward()
out.backward()
optim.step()
return net.x.cpu().data, net.W.cpu().data
def step_pytorch(optim, kwargs={}):
net = TorchNet()
optim = optim([net.x, net.W], **kwargs)
out = net.forward()
out.backward()
optim.step()
return net.x.detach().numpy(), net.W.detach().numpy()
class TinyNet(nnn.Module):
def __init__(self):
super().__init__()
self.x = Tensor(x_init.copy(), requires_grad=True)
self.W = Tensor(W_init.copy(), requires_grad=True)
self.m = Tensor(m_init.copy())
def forward(self):
out = (self.x @ self.W).relu()
out = out.log_softmax()
out = out.__mul__(self.m).__add__(self.m).sum()
return out
class TorchNet():
def __init__(self):
self.x = torch.tensor(x_init.copy(), requires_grad=True)
self.W = torch.tensor(W_init.copy(), requires_grad=True)
self.m = torch.tensor(m_init.copy())
def forward(self):
out = (self.x @ self.W).relu()
out = F.log_softmax(out, 1)
out = out.__mul__(self.m).__add__(self.m).sum()
return out
class TestStepCPU(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestStepCPU, self).__init__(*args, **kwargs)
self.device = Device.CPU
def test_sgd(self):
for mom in [0, 0.9]:
with self.subTest(mom=mom):
kwargs = {'lr': 0.001, 'momentum': mom}
for x, y in zip(step_nanograd(optim.SGD, self.device, kwargs), step_pytorch(torch.optim.SGD, kwargs)):
np.testing.assert_allclose(x, y, atol=1e-5)
def test_adam(self):
for x, y in zip(step_nanograd(optim.Adam, self.device), step_pytorch(torch.optim.Adam)):
np.testing.assert_allclose(x, y, atol=1e-5)
def test_adamw(self):
for wd in [1e-1, 1e-2, 1e-3]:
with self.subTest(wd=wd):
kwargs = {'lr': 1e-3, 'weight_decay': wd}
for x, y in zip(step_nanograd(optim.AdamW, self.device, kwargs), step_pytorch(torch.optim.AdamW, kwargs)):
np.testing.assert_allclose(x, y, atol=1e-5)
class TestStepGPU(TestStepCPU):
def __init__(self, *args, **kwargs):
super(TestStepGPU, self).__init__(*args, **kwargs)
self.device = Device.GPU
|
{"hexsha": "f0fcee3735978d2c8428e5f2e7da396ccb74652f", "size": 2904, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_step.py", "max_stars_repo_name": "PABannier/nanograd", "max_stars_repo_head_hexsha": "5acd355c638885cbfc0fd0f1c4903964e7fb7de9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2020-12-22T19:05:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-23T02:52:22.000Z", "max_issues_repo_path": "tests/test_step.py", "max_issues_repo_name": "PABannier/nanograd", "max_issues_repo_head_hexsha": "5acd355c638885cbfc0fd0f1c4903964e7fb7de9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-01-26T23:09:47.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-29T13:30:06.000Z", "max_forks_repo_path": "tests/test_step.py", "max_forks_repo_name": "PABannier/nanograd", "max_forks_repo_head_hexsha": "5acd355c638885cbfc0fd0f1c4903964e7fb7de9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-01-04T01:47:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-05T04:59:46.000Z", "avg_line_length": 30.5684210526, "max_line_length": 122, "alphanum_fraction": 0.6126033058, "include": true, "reason": "import numpy", "num_tokens": 769}
|
const GR_SUPPORTED_TYPES = Union{
MIME"image/svg", MIME"image/svg+xml", MIME"image/png", MIME"image/jpeg",
MIME"image/tiff", MIME"image/bmp", MIME"application/pdf",
MIME"application/postscript", MIME"application/x-tex"
}
backend_showable(::GRBackend, ::GR_SUPPORTED_TYPES, scene::SceneLike) = true
function gr_save(io, scene, filetype)
fp = tempname() * "." * filetype
touch(fp)
GR.beginprint(fp)
gr_draw(scene)
GR.endprint()
write(io, read(fp))
rm(fp)
end
function backend_show(::GRBackend, io::IO, ::MIME"image/png", scene::Scene)
AbstractPlotting.update!(scene)
gr_save(io, scene, "png")
end
function backend_show(::GRBackend, io::IO, ::MIME"image/jpeg", scene::Scene)
AbstractPlotting.update!(scene)
gr_save(io, scene, "jpeg")
end
function backend_show(::GRBackend, io::IO, ::MIME"image/bmp", scene::Scene)
AbstractPlotting.update!(scene)
gr_save(io, scene, "bmp")
end
function backend_show(::GRBackend, io::IO, ::MIME"image/tiff", scene::Scene)
AbstractPlotting.update!(scene)
gr_save(io, scene, "tiff")
end
function backend_show(::GRBackend, io::IO, ::Union{MIME"image/svg", MIME"image/svg+xml"}, scene::Scene)
AbstractPlotting.update!(scene)
gr_save(io, scene, "svg")
end
function backend_show(::GRBackend, io::IO, ::MIME"application/pdf", scene::Scene)
AbstractPlotting.update!(scene)
gr_save(io, scene, "pdf")
end
function backend_show(::GRBackend, io::IO, ::MIME"application/postscript", scene::Scene)
AbstractPlotting.update!(scene)
gr_save(io, scene, "eps")
end
function backend_show(::GRBackend, io::IO, ::MIME"application/x-tex", scene::Scene)
AbstractPlotting.update!(scene)
fp = tempname() * ".tex"
withenv("GKS_WSTYPE" => "pgf", "GKS_FILEPATH" => fp) do
GR.clearws()
gr_draw(scene)
GR.updatews()
end
write(io, read(fp))
end
function gr_record(f::Function, filename::String, scene::Scene, iter)
ext = uppercase(splitext(filename)[2][2:end])
@assert ext in ("GIF", "MOV", "MP4", "WEBM", "OGG") """
Extension of file is incorrect! Expected one of (\"GIF\", \"MOV\", \"MP4\", \"WEBM\", \"OGG\").
Found $ext.
"""
withenv("GKS_WSTYPE" => uppercase(ext), "GKS_FILEPATH" => filename) do
for i in iter
GR.clearws()
f(i)
gr_draw(scene)
end
end
end
|
{"hexsha": "c24a3ae16b260d08e5a64e1687327f587ea0e403", "size": 2393, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/saving.jl", "max_stars_repo_name": "JuliaPlots/GRMakie.jl", "max_stars_repo_head_hexsha": "a027c79236de47bda98cf7d6592bc83f340c9f08", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-11-12T17:14:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-08T14:36:49.000Z", "max_issues_repo_path": "src/saving.jl", "max_issues_repo_name": "JuliaPlots/GRMakie.jl", "max_issues_repo_head_hexsha": "a027c79236de47bda98cf7d6592bc83f340c9f08", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-05-23T13:54:34.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-23T13:54:34.000Z", "max_forks_repo_path": "src/saving.jl", "max_forks_repo_name": "JuliaPlots/GRMakie.jl", "max_forks_repo_head_hexsha": "a027c79236de47bda98cf7d6592bc83f340c9f08", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:43:07.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:43:07.000Z", "avg_line_length": 28.1529411765, "max_line_length": 103, "alphanum_fraction": 0.649394066, "num_tokens": 682}
|
[STATEMENT]
lemma inv_is_iD [elim]:
fixes ip rt
assumes "ip\<in>kD(rt)"
and "the (flag rt ip) = inv"
shows "ip\<in>iD(rt)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ip \<in> iD rt
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
ip \<in> kD rt
the (flag rt ip) = Aodv_Basic.inv
goal (1 subgoal):
1. ip \<in> iD rt
[PROOF STEP]
unfolding iD_def
[PROOF STATE]
proof (prove)
using this:
ip \<in> kD rt
the (flag rt ip) = Aodv_Basic.inv
goal (1 subgoal):
1. ip \<in> {dip. flag rt dip = Some Aodv_Basic.inv}
[PROOF STEP]
by auto
|
{"llama_tokens": 271, "file": "AODV_variants_e_all_abcd_E_Aodv_Data", "length": 3}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Routines to import atmospheric data from text files.
Created on Thu Nov 17 09:57:08 2016
@author: maxwell
"""
__all__ = ['readprof']
import numpy as np
def readprof(fname):
return readprof_full(fname)
def readprof_full(fname):
"""
Read ASCII table of p,t,q,o3 and return the result as an ndarray tuple.
"""
#skip first row which contains metadata, then unpack the remainder of data
p,t,q,o3 = np.loadtxt(fname, skiprows=1, usecols=(0,1,2,3), unpack=True)
if p[1] > p[0]: #pressure increases with index
return p,t,q,o3
else:
return p[::-1],t[::-1],q[::-1],o3[::-1]
def readprof_ozone(fname):
"""
Get Ozone data only from file, with pressure.
"""
p,o3 = np.loadtxt(fname, skiprows=1, usecols=(0,1), unpack=True)
if p[1] > p[0]: #pressure increases with index
return p,o3
else:
return p[::-1],o3[::-1]
def readprof_hr(fname):
"""
HR from file
"""
z, hrir, hrsw = np.loadtxt(
fname, skiprows=1, usecols=(0,2,1), unpack=True)
#convert to m from km, if the values look like they are in km
if not any(z>100):
z*=1000
if z[1] < z[0]:
return z,hrir,hrsw
else:
return z[::-1], hrir[::-1], hrsw[::-1]
|
{"hexsha": "9549c5aae2e64f7905a132b477aaebd58107b91b", "size": 1328, "ext": "py", "lang": "Python", "max_stars_repo_path": "atmosphere/readprof.py", "max_stars_repo_name": "msmithsm/rce", "max_stars_repo_head_hexsha": "91e6fd2ee93b64a471aa7e0ca62bb4649c1b3b19", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "atmosphere/readprof.py", "max_issues_repo_name": "msmithsm/rce", "max_issues_repo_head_hexsha": "91e6fd2ee93b64a471aa7e0ca62bb4649c1b3b19", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "atmosphere/readprof.py", "max_forks_repo_name": "msmithsm/rce", "max_forks_repo_head_hexsha": "91e6fd2ee93b64a471aa7e0ca62bb4649c1b3b19", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.8965517241, "max_line_length": 78, "alphanum_fraction": 0.5828313253, "include": true, "reason": "import numpy", "num_tokens": 398}
|
import numpy as np
from menpo.transform.piecewiseaffine.base import barycentric_vectors
from menpo.image import BooleanImage, MaskedImage
def _pixels_to_check_python(start, end, _):
pixel_locations = []
tri_indices = []
for i, ((s_x, s_y), (e_x, e_y)) in enumerate(zip(start, end)):
for x in range(s_x, e_x):
for y in range(s_y, e_y):
pixel_locations.append((x, y))
tri_indices.append(i)
pixel_locations = np.array(pixel_locations)
tri_indices = np.array(tri_indices)
return pixel_locations, tri_indices
try:
from .tripixel import pixels_to_check
except IOError:
print('Falling back to CPU pixel checking')
pixels_to_check = _pixels_to_check_python
def pixel_locations_and_tri_indices(mesh):
vertex_trilist = mesh.points[mesh.trilist]
start = np.floor(vertex_trilist.min(axis=1)[:, :2])
end = np.ceil(vertex_trilist.max(axis=1)[:, :2])
start = start.astype(int)
end = end.astype(int)
n_sites = np.product((end - start), axis=1).sum()
return pixels_to_check(start, end, n_sites)
def alpha_beta(i, ij, ik, points):
ip = points - i
dot_jj = np.einsum('dt, dt -> t', ij, ij)
dot_kk = np.einsum('dt, dt -> t', ik, ik)
dot_jk = np.einsum('dt, dt -> t', ij, ik)
dot_pj = np.einsum('dt, dt -> t', ip, ij)
dot_pk = np.einsum('dt, dt -> t', ip, ik)
d = 1.0/(dot_jj * dot_kk - dot_jk * dot_jk)
alpha = (dot_kk * dot_pj - dot_jk * dot_pk) * d
beta = (dot_jj * dot_pk - dot_jk * dot_pj) * d
return alpha, beta
def xy_bcoords(mesh, tri_indices, pixel_locations):
i, ij, ik = barycentric_vectors(mesh.points[:, :2], mesh.trilist)
i = i[:, tri_indices]
ij = ij[:, tri_indices]
ik = ik[:, tri_indices]
a, b = alpha_beta(i, ij, ik, pixel_locations.T)
c = 1 - a - b
bcoords = np.array([c, a, b]).T
return bcoords
def tri_containment(bcoords):
alpha, beta, _ = bcoords.T
return np.logical_and(np.logical_and(
alpha >= 0, beta >= 0),
alpha + beta <= 1)
def z_values_for_bcoords(mesh, bcoords, tri_indices):
return mesh.barycentric_coordinate_interpolation(
mesh.points[:, -1][..., None], bcoords, tri_indices)[:, 0]
def pixel_sample_uniform(xy, n_samples):
chosen_mask = np.random.permutation(np.arange(xy.shape[0]))[:n_samples]
return xy[chosen_mask]
def unique_locations(xy, width, height):
mask = np.zeros([width, height], dtype=np.bool)
mask[xy[:, 0], xy[:, 1]] = True
return np.vstack(np.nonzero(mask)).T
def location_to_index(xy, width):
return xy[:, 0] * width + xy[:, 1]
def rasterize_barycentric_coordinates(mesh, image_shape):
height, width = int(image_shape[0]), int(image_shape[1])
# 1. Find all pixel-sites that may need to be rendered to
# + the triangle that may partake in rendering
yx, tri_indices = pixel_locations_and_tri_indices(mesh)
# 2. Limit to only pixel sites in the image
out_of_bounds = np.logical_or(
np.any(yx < 0, axis=1),
np.any((np.array([height, width]) - yx) <= 0, axis=1))
in_image = ~out_of_bounds
yx = yx[in_image]
tri_indices = tri_indices[in_image]
# # Optionally limit to subset of pixels
# if n_random_samples is not None:
# # 2. Find the unique pixel sites
# xy_u = unique_locations(yx, width, height)
#
# xy_u = pixel_sample_uniform(xy_u, n_random_samples)
# to_keep = np.in1d(location_to_index(yx, width),
# location_to_index(xy_u, width))
# yx = yx[to_keep]
# tri_indices = tri_indices[to_keep]
bcoords = xy_bcoords(mesh, tri_indices, yx)
# check the mask based on triangle containment
in_tri_mask = tri_containment(bcoords)
# use this mask on the pixels
yx = yx[in_tri_mask]
bcoords = bcoords[in_tri_mask]
tri_indices = tri_indices[in_tri_mask]
# Find the z values for all pixels and calculate the mask
z_values = z_values_for_bcoords(mesh, bcoords, tri_indices)
# argsort z from smallest to biggest - use this to sort all data
sort = np.argsort(z_values)
yx = yx[sort]
bcoords = bcoords[sort]
tri_indices = tri_indices[sort]
# make a unique id per-pixel location
pixel_index = yx[:, 0] * width + yx[:, 1]
# find the first instance of each pixel site by depth
_, z_buffer_mask = np.unique(pixel_index, return_index=True)
# mask the locations one last time
yx = yx[z_buffer_mask]
bcoords = bcoords[z_buffer_mask]
tri_indices = tri_indices[z_buffer_mask]
return yx, bcoords, tri_indices
def rasterize_barycentric_coordinate_images(mesh, image_shape):
h, w = image_shape
yx, bcoords, tri_indices = rasterize_barycentric_coordinates(mesh,
image_shape)
tri_indices_img = np.zeros((1, h, w), dtype=int)
bcoords_img = np.zeros((3, h, w))
mask = np.zeros((h, w), dtype=np.bool)
mask[yx[:, 0], yx[:, 1]] = True
tri_indices_img[:, yx[:, 0], yx[:, 1]] = tri_indices
bcoords_img[:, yx[:, 0], yx[:, 1]] = bcoords.T
mask = BooleanImage(mask)
return (MaskedImage(bcoords_img, mask=mask.copy(), copy=False),
MaskedImage(tri_indices_img, mask=mask.copy(), copy=False))
|
{"hexsha": "beefb9549763ebeceaa3f6e7066d824d5ca8191e", "size": 5314, "ext": "py", "lang": "Python", "max_stars_repo_path": "menpo3d/rasterize/cpu.py", "max_stars_repo_name": "nontas/menpo3d", "max_stars_repo_head_hexsha": "f29324b12a147f5b716ae5c3048d2c6b7a298752", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-10-03T19:49:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-11T10:48:05.000Z", "max_issues_repo_path": "menpo3d/rasterize/cpu.py", "max_issues_repo_name": "nontas/menpo3d", "max_issues_repo_head_hexsha": "f29324b12a147f5b716ae5c3048d2c6b7a298752", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "menpo3d/rasterize/cpu.py", "max_forks_repo_name": "nontas/menpo3d", "max_forks_repo_head_hexsha": "f29324b12a147f5b716ae5c3048d2c6b7a298752", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-21T01:13:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-21T01:13:24.000Z", "avg_line_length": 33.0062111801, "max_line_length": 77, "alphanum_fraction": 0.6441475348, "include": true, "reason": "import numpy", "num_tokens": 1511}
|
import sightlines as los
import numpy as np
def test_halfway():
short_z_r_list = [(0,0,1), (1,0,10)]
seg_dict = los.compute_len_in_each_cell(short_z_r_list)
assert(len(seg_dict)==2)
assert(seg_dict[1]==0.5)
assert(seg_dict[10]==0.5)
def test_equal_10():
nice_z_r_list = [(0,1,10), (1,1,11), (2,1,12), (3,1,13), (4,1,14),
(5,1,15), (6,1,16), (7,1,17), (8,1,18), (9,1,19)]
seg_dict = los.compute_len_in_each_cell(nice_z_r_list)
assert(len(seg_dict)==10)
assert(seg_dict[10] == 0.5)
assert(seg_dict[11] == 1.0)
assert(seg_dict[12] == 1.0)
assert(seg_dict[13] == 1.0)
assert(seg_dict[14] == 1.0)
assert(seg_dict[15] == 1.0)
assert(seg_dict[16] == 1.0)
assert(seg_dict[17] == 1.0)
assert(seg_dict[18] == 1.0)
assert(seg_dict[19] == 0.5)
def test_middle_tie():
pythagoras_hill = [(0,0,10), (9,3,21), (15,3,22), (24,0,88)]
seg_dict = los.compute_len_in_each_cell(pythagoras_hill)
assert(len(seg_dict)==4)
assert(seg_dict[10] == 5)
assert(seg_dict[21] == 7)
assert(seg_dict[22] == 7)
assert(seg_dict[88] == 5)
def test_4_way_tie():
circular_hill = [(0,0,10), (2,1,21), (3,2,22), (5,2,24), (6,1,25), (8,0,76)]
seg_dict = los.compute_len_in_each_cell(circular_hill)
assert(len(seg_dict)==4)
assert(seg_dict[10]==1.25)
assert(seg_dict[21]==2.75)
assert(seg_dict[25]==2.75)
assert(seg_dict[76]==1.25)
def test_worst_case_for_efficiency():
meano_zeno = [(1,0,1), (2,0,2), (4,0,4), (8,0,8), (16,0,16), (32,0,32), (64,0,64)]
seg_dict = los.compute_len_in_each_cell(meano_zeno)
assert(len(seg_dict)==7)
assert(seg_dict[1]==1.5)
assert(seg_dict[2]==1.5)
assert(seg_dict[4]==3)
assert(seg_dict[8]==6)
assert(seg_dict[16]==12)
assert(seg_dict[32]==24)
assert(seg_dict[64]==16)
def test_no_galaxy():
just_the_origin = [(0,0,0)]
seg_dict = los.compute_len_in_each_cell(just_the_origin)
assert(len(seg_dict)==1)
assert(seg_dict[0]==0)
def test_empty():
seg_dict = los.compute_len_in_each_cell([])
assert(len(seg_dict)==0)
def test_the_thing_I_was_worried_about_before_but_will_probably_be_totally_fine_since_I_redesigned_my_code():
calm_down = [(0,0,0), (9,5,4), (9,3,1)]
seg_dict = los.compute_len_in_each_cell(calm_down)
assert(len(seg_dict)==2)
assert(seg_dict[0]==5)
assert(seg_dict[1]==4)
def test_ends_are_not_closest():
high_ends = [(0,100,100), (1,1,10), (9,1,20), (10,100, 200)]
seg_dict = los.compute_len_in_each_cell(high_ends)
assert(len(seg_dict)==2)
assert(seg_dict[10]==5)
assert(seg_dict[20]==5)
|
{"hexsha": "3ca1a93d6763ce4e88eac78040b4017075531d31", "size": 2677, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_segment_lister.py", "max_stars_repo_name": "astrobenji/sightlines", "max_stars_repo_head_hexsha": "e81464a672dae7105d2507d1753d415a51f5a512", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_segment_lister.py", "max_issues_repo_name": "astrobenji/sightlines", "max_issues_repo_head_hexsha": "e81464a672dae7105d2507d1753d415a51f5a512", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_segment_lister.py", "max_forks_repo_name": "astrobenji/sightlines", "max_forks_repo_head_hexsha": "e81464a672dae7105d2507d1753d415a51f5a512", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4625, "max_line_length": 109, "alphanum_fraction": 0.6290623833, "include": true, "reason": "import numpy", "num_tokens": 961}
|
# python 2/3 compatibility
from __future__ import division, print_function
# global imports
import numpy
import pandas
import json
class InfoMatrices(object):
"""
Class holding information on the compartments in the model.
Attributes
----------
Reaction_Reaction : pandas.DataFrame
Mapping of model-reactions to unique BiGG IDs
(compensation for reaction-duplication for Isozymes).
rows: unique BiGG-reactions ; cols: model-reactions
Protein_Enzyme : pandas.DataFrame
Conversion of Enzyme levels to levels of proteins, constituting them.
(Gives Proteome of Metabolic Proteins, needs to be added to Process_Protein to obtain full proteome)
rows: proteins ; cols: model-enzymes
ProteinWeight : pandas.DataFrame
Molecular weight and number of amino-acids for each protein
rows: proteins ; cols: AAnum and weight
Protein_ProcessMachinery : pandas.DataFrame
Conversion of Process levels to levels of proteins, constituting their machineries.
(Gives Proteome of Process Proteins, needs to be added to Enzyme_Protein to obtain full proteome)
rows: proteins ; cols: model-processes
Process_Protein : pandas.DataFrame
Indication on how much each protein requires of each process
rows: processes ; cols: protein
Compartment_Protein : pandas.DataFrame
Logical matrix on which protein is located in which compartment
rows: compartments ; cols: proteins
S : pandas.DataFrame
Model stoichiometry-matrix
rows: metabolites ; cols: reactions
"""
def __init__(self,struct):
BiGGids=[]
for rx in list(struct.ReactionInfo.Elements.keys()):
Bid=struct.ReactionInfo.Elements[rx]['OtherIDs']['ProtoID']
if Bid is not '':
BiGGids.append(Bid)
self.Reaction_Reaction = makeReaction_Reaction(struct,BiGGids)
self.Protein_Enzyme = makeProtein_Enzyme(struct)['Matrix']
self.ProteinWeight = makeProtein_Enzyme(struct)['Weight']
self.Protein_ProcessMachinery = makeProcessMachinery_Protein(struct)
self.Process_Protein = makeProcessRequirements_Protein(struct)
self.Compartment_Protein = makeCompartment_Protein(struct)
self.S = make_S(struct)
def make_S(input):
Mets=input.MetaboliteInfo.toDataFrame()
InternalMets=sorted(list(Mets[Mets['Type'].apply(json.loads)=='internal']['ID'].apply(json.loads)))
ExternalMets=sorted(list(Mets[Mets['Type'].apply(json.loads)=='external']['ID'].apply(json.loads)))
PrecursorMets=sorted(list(Mets[Mets['Type'].apply(json.loads)=='precursor']['ID'].apply(json.loads)))
Metabolites=ExternalMets + InternalMets + PrecursorMets
Rxns=input.ReactionInfo.toDataFrame()
Reactions=sorted(Rxns.index.tolist())
S=pandas.DataFrame(numpy.zeros((len(Metabolites),len(Reactions))),index=Metabolites,columns=Reactions)
for rx in Reactions:
for j in list(json.loads(Rxns.loc[rx]['Reactants']).keys()):
S.loc[j,rx]=-json.loads(Rxns.loc[rx]['Reactants'])[j]
for j in list(json.loads(Rxns.loc[rx]['Products']).keys()):
S.loc[j,rx]=json.loads(Rxns.loc[rx]['Products'])[j]
return(S)
def makeReaction_Reaction(struct,BiGGids):
R_Matrix=pandas.DataFrame(numpy.zeros((len(numpy.unique(BiGGids)),len(list(struct.ReactionInfo.Elements.keys())))),columns=list(struct.ReactionInfo.Elements.keys()),index=numpy.unique(BiGGids))
for rx in list(struct.ReactionInfo.Elements.keys()):
R_Matrix.loc[struct.ReactionInfo.Elements[rx]['OtherIDs']['ProtoID'],rx]=1
return(R_Matrix)
def makeProtein_Enzyme(struct):
P_Matrix=pandas.DataFrame(numpy.zeros((len(numpy.unique(list(struct.ProteinInfo.Elements.keys()))),len(numpy.unique(list(struct.EnzymeInfo.Elements.keys()))))),columns=numpy.unique(list(struct.EnzymeInfo.Elements.keys())),index=numpy.unique(list(struct.ProteinInfo.Elements.keys())))
Pweight=pandas.DataFrame(numpy.zeros((len(numpy.unique(list(struct.ProteinInfo.Elements.keys()))),2)),columns=['AAlength','MolecMass'],index=numpy.unique(list(struct.ProteinInfo.Elements.keys())))
for i in numpy.unique(list(struct.ProteinInfo.Elements.keys())):
Pweight.loc[i,'AAlength']=struct.ProteinInfo.Elements[i]['AAnumber']
Pweight.loc[i,'MolecMass']=struct.ProteinInfo.Elements[i]['Weight']
if len(struct.ProteinInfo.Elements[i]['associatedEnzymes']) >0:
for j in struct.ProteinInfo.Elements[i]['associatedEnzymes']:
P_Matrix.loc[i,j]=struct.EnzymeInfo.Elements[j]['Subunits'][i]['StochFac']
return({'Matrix': P_Matrix , 'Weight': Pweight})
def makeProcessRequirements_Protein(struct):
PM_Matrix=pandas.DataFrame(numpy.zeros((2,len(numpy.unique(list(struct.ProteinInfo.Elements.keys()))))),columns=numpy.unique(list(struct.ProteinInfo.Elements.keys())),index=['P_TA','P_CHP'])
for i in numpy.unique(list(struct.ProteinInfo.Elements.keys())):
t=0
f=0
if 'Translation' in list(struct.ProteinInfo.Elements[i]['ProcessRequirements'].keys()):
t=struct.ProteinInfo.Elements[i]['ProcessRequirements']['Translation']
if 'Folding' in list(struct.ProteinInfo.Elements[i]['ProcessRequirements'].keys()):
f=struct.ProteinInfo.Elements[i]['ProcessRequirements']['Folding']
PM_Matrix.loc['P_TA',i]=t
PM_Matrix.loc['P_CHP',i]=f
return(PM_Matrix)
def makeProcessMachinery_Protein(struct):
M_Matrix=pandas.DataFrame(numpy.zeros((len(numpy.unique(list(struct.ProteinInfo.Elements.keys()))),2)),columns=['P_TA','P_CHP'],index=numpy.unique(list(struct.ProteinInfo.Elements.keys())))
for i in numpy.unique(list(struct.ProteinInfo.Elements.keys())):
if i in list(struct.ProcessInfo.Elements['Translation']['Composition'].keys()):
M_Matrix.loc[i,'P_TA']=struct.ProcessInfo.Elements['Translation']['Composition'][i]
if i in list(struct.ProcessInfo.Elements['Folding']['Composition'].keys()):
M_Matrix.loc[i,'P_CHP']=struct.ProcessInfo.Elements['Folding']['Composition'][i]
return(M_Matrix)
def makeCompartment_Protein(struct):
CP_Matrix=pandas.DataFrame(numpy.zeros((len(numpy.unique(list(struct.CompartmentInfo.Elements.keys()))),len(numpy.unique(list(struct.ProteinInfo.Elements.keys()))))),columns=numpy.unique(list(struct.ProteinInfo.Elements.keys())),index=numpy.unique(list(struct.CompartmentInfo.Elements.keys())))
for i in numpy.unique(list(struct.CompartmentInfo.Elements.keys())):
CP_Matrix.loc[i,struct.CompartmentInfo.Elements[i]['associatedProteins']]=1
return(CP_Matrix)
|
{"hexsha": "2a164f979eeda7291f33c90b6a0ebd9d14ff3237", "size": 6826, "ext": "py", "lang": "Python", "max_stars_repo_path": "simulator/static/python/rbatools/infoMatrices.py", "max_stars_repo_name": "tlubitz/rba", "max_stars_repo_head_hexsha": "073b591ff6047ee8df00288ecfe45094e2b7d195", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "simulator/static/python/rbatools/infoMatrices.py", "max_issues_repo_name": "tlubitz/rba", "max_issues_repo_head_hexsha": "073b591ff6047ee8df00288ecfe45094e2b7d195", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "simulator/static/python/rbatools/infoMatrices.py", "max_forks_repo_name": "tlubitz/rba", "max_forks_repo_head_hexsha": "073b591ff6047ee8df00288ecfe45094e2b7d195", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 58.8448275862, "max_line_length": 299, "alphanum_fraction": 0.6952827425, "include": true, "reason": "import numpy", "num_tokens": 1536}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.