text
stringlengths 0
27.1M
| meta
dict |
|---|---|
% Make the command for organizing stories
% \story{name}{title}{story}{pronouns}{major}{year}
\newcommand{\story}[6]{
\section*{\uppercase{\textbf{#1}}: #2}
#3 \\
\textit{-#4} \\
\textit{-#5, #6}
}
|
{
"alphanum_fraction": 0.5860465116,
"author": null,
"avg_line_length": 21.5,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "13d9d77f8508aaad28ed3b9b974bc446ea78c5f1",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "95f30dd77eea764e7b72fef63009ee7f506c20ec",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "EarthSquirrel/msu-stories-of-the-pride",
"max_forks_repo_path": "stories/msu-2019-2020/layout.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "95f30dd77eea764e7b72fef63009ee7f506c20ec",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "EarthSquirrel/msu-stories-of-the-pride",
"max_issues_repo_path": "stories/msu-2019-2020/layout.tex",
"max_line_length": 51,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "95f30dd77eea764e7b72fef63009ee7f506c20ec",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "EarthSquirrel/msu-stories-of-the-pride",
"max_stars_repo_path": "stories/msu-2019-2020/layout.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 73,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 215
}
|
FUNCTION PS_TOTL ( t850, td850, t500 )
C************************************************************************
C* PS_TOTL *
C* *
C* This function computes the total totals index: *
C* *
C* TOTL = ( T850 - T500 ) + ( TD850 - T500 ) *
C* *
C* REAL PS_TOTL ( T850, TD850, T500 ) *
C* *
C* Input parameters: *
C* T850 REAL 850 mb temperature in Celsius *
C* TD850 REAL 850 mb dewpoint in Celsius *
C* T500 REAL 500 mb temperature in Celsius *
C* *
C* Output parameters: *
C* PS_TOTL REAL Total totals index *
C** *
C* Log: *
C* P. Kocin/GSFC 1980 *
C* M. Goodman/RDS 11/84 Cleaned code *
C* M. desJardins/GSFC 3/88 Cleaned code *
C************************************************************************
INCLUDE 'GEMPRM.PRM'
INCLUDE 'ERMISS.FNC'
C------------------------------------------------------------------------------
IF ( ERMISS (t850) .or. ERMISS (td850) .or. ERMISS (t500) ) THEN
PS_TOTL = RMISSD
ELSE
C
C* Compute vertical totals.
C
vtot = PS_VTOT ( t850, t500 )
C
C* Compute cross totals.
C
ctot = PS_CTOT ( td850, t500 )
C
C* The sum is the total totals.
C
PS_TOTL = ctot + vtot
END IF
C*
RETURN
END
|
{
"alphanum_fraction": 0.4537177542,
"author": null,
"avg_line_length": 28.652173913,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "93e2534b53027c694d8ed91ed196519d36dfa9c8",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 27,
"max_forks_repo_forks_event_max_datetime": "2022-03-18T18:23:28.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-06-06T21:55:14.000Z",
"max_forks_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "oxelson/gempak",
"max_forks_repo_path": "gempak/source/prmcnvlib/ps/pstotl.f",
"max_issues_count": 60,
"max_issues_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1",
"max_issues_repo_issues_event_max_datetime": "2022-03-29T16:22:42.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-05-11T21:36:08.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "oxelson/gempak",
"max_issues_repo_path": "gempak/source/prmcnvlib/ps/pstotl.f",
"max_line_length": 79,
"max_stars_count": 42,
"max_stars_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "oxelson/gempak",
"max_stars_repo_path": "gempak/source/prmcnvlib/ps/pstotl.f",
"max_stars_repo_stars_event_max_datetime": "2022-02-28T22:36:03.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-06-03T15:26:21.000Z",
"num_tokens": 423,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1318
}
|
[STATEMENT]
lemma charpoly_eq: "charpoly A = Cayley_Hamilton.charpoly (from_vec A)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Cayley_Hamilton_Compatible.charpoly A = Cayley_Hamilton.charpoly (from_vec A)
[PROOF STEP]
unfolding charpoly_def Cayley_Hamilton.charpoly_def det_sq_matrix_eq[symmetric] X_def C_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Square_Matrix.det (from_vec (mat (monom (1::'a) (Suc 0)) - mat2matofpoly A)) = Square_Matrix.det (diag [:0::'a, 1::'a:] - map_sq_matrix (\<lambda>c. [:c:]) (from_vec A))
[PROOF STEP]
apply (intro arg_cong[where f=Square_Matrix.det])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. from_vec (mat (monom (1::'a) (Suc 0)) - mat2matofpoly A) = diag [:0::'a, 1::'a:] - map_sq_matrix (\<lambda>c. [:c:]) (from_vec A)
[PROOF STEP]
apply transfer'
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>A. (\<lambda>i. ($) ((mat (monom (1::'a) (Suc 0)) - mat2matofpoly A) $ i)) = (\<lambda>i j. (if i = j then [:0::'a, 1::'a:] else 0) - [:A $ i $ j:])
[PROOF STEP]
apply (simp add: fun_eq_iff mat_def mat2matofpoly_def C_def monom_Suc)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Echelon_Form_Cayley_Hamilton_Compatible",
"hexsha": null,
"include": null,
"lang": null,
"length": 5,
"llama_tokens": 525,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
}
|
# (c) Tom Gaimann, 2020
# Zusammensetzung eines frequenzmodulierten Signals
# Ref: https://gist.github.com/fedden/d06cd490fcceab83952619311556044a
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 22})
f_modulator = 4 # Frequenz der Nachrichten Welle
f_carrier = 40 # Frequenz der Träger Welle
modulation_index = 1.0
time = np.linspace(0, 1, 1000) # Zeit
carrier = np.cos(2.0 * np.pi * f_carrier * time)
modulator = np.cos(2.0 * np.pi * f_modulator * time) * modulation_index
product = np.zeros_like(modulator)
for i, t in enumerate(time):
product[i] = np.cos(2.0 * np.pi * (f_carrier * t + modulator[i]))
plt.subplot(3, 1, 1)
plt.title("Frequenz Modulation")
plt.plot(modulator, color="green")
plt.ylabel("Amplitude")
plt.xlabel("Nachrichten Signal")
plt.subplot(3, 1, 2)
plt.plot(carrier, color="red")
plt.ylabel("Amplitude")
plt.xlabel("Träger Signal")
plt.subplot(3, 1, 3)
plt.plot(product, color="purple")
plt.ylabel("Amplitude")
plt.xlabel("Moduliertes Signal")
# Plot Einstellungen
plt.subplots_adjust(hspace=1)
plt.rc("font", size=15)
fig = plt.gcf()
fig.set_size_inches(16, 9)
fig.savefig("FM.png", dpi=100)
|
{
"alphanum_fraction": 0.7228400342,
"author": null,
"avg_line_length": 25.9777777778,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "0abc8d07dfe14b20f5b61edc9524b68b48155455",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6f1ebaf6222f0db892b568bf021b87717d461345",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "tomg404/WSeminar19-21",
"max_forks_repo_path": "Endpräsentation/Python-Skripte/FM.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6f1ebaf6222f0db892b568bf021b87717d461345",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "tomg404/WSeminar19-21",
"max_issues_repo_path": "Endpräsentation/Python-Skripte/FM.py",
"max_line_length": 71,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6f1ebaf6222f0db892b568bf021b87717d461345",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "tomg404/WSeminar19-21",
"max_stars_repo_path": "Endpräsentation/Python-Skripte/FM.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 377,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1169
}
|
from ale_python_interface import ALEInterface
import pygame
from pygame.locals import *
import numpy as np
import os
import scipy.ndimage as ndimage
class AtariEnvironment:
"""
Environment for playing Atari games using ALE Interface
"""
def __init__(self, game_filename, **kwargs):
"""
Create an environment with the provided game
"""
pygame.init()
self.screen = pygame.display.set_mode((160,210))
self.fps_clock = pygame.time.Clock()
self.show_while_training = True
# Buffer for grabbing the screen from ALE
self.screen_buffer = np.zeros((100800,), np.uint8)
# Create the ALE interface and load the game
self.ale = ALEInterface()
self.ale.setBool('color_averaging', True)
self.ale.setFloat('repeat_action_probability', 0.0)
self.ale.loadROM(game_filename)
# Grab the set of available moves for this game
self.move_list = self.ale.getMinimalActionSet()
self.listeners = []
def update_screen(self):
"""
Grab the current screen from ALE and display it via pygame
"""
self.ale.getScreenRGB(self.screen_buffer)
# if self.show_while_training:
# game_screen = self.screen_buffer.reshape((210,160,3))
# game_screen = np.transpose(game_screen, (1,0,2))
#
# game_surface = pygame.surfarray.make_surface(game_screen)
# self.screen.blit(game_surface, (0,0))
# pygame.display.flip()
def get_reduced_screen(self):
"""
Convert current screen to 84x84 np array of luminescence values. Scale values
from 0.0 to 1.0 to work with Tensorflow
"""
# Reshape the screen buffer to an appropriate shape
game_screen = self.screen_buffer.reshape((210,160,3))
# Convert to luminosity
gray_screen = np.dot(game_screen, np.array([0.299, 0.587, 0.114])).astype(np.uint8)
gray_screen = ndimage.zoom(gray_screen, (0.4, 0.525))
return gray_screen
def act(self, action):
"""
Perform an action on the environment
"""
ale_action = self.move_list[action]
return self.ale.act(ale_action)
def terminal(self):
"""
Return if the state is a terminal state
"""
return self.ale.game_over()
def lives(self):
"""
How many lives are left
"""
return self.ale.lives()
def reset_game(self):
"""
"""
self.ale.reset_game()
|
{
"alphanum_fraction": 0.709851552,
"author": null,
"avg_line_length": 20.5833333333,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "37ddd0b2cc240505d5cbabbe2f4a5f78c16e4845",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "edffb87221a5f3f18b35556583d798981f78726a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "danathughes/AtariDQN",
"max_forks_repo_path": "environment.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "edffb87221a5f3f18b35556583d798981f78726a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "danathughes/AtariDQN",
"max_issues_repo_path": "environment.py",
"max_line_length": 85,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "edffb87221a5f3f18b35556583d798981f78726a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "danathughes/AtariDQN",
"max_stars_repo_path": "environment.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 608,
"path": null,
"reason": "import numpy,import scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2223
}
|
#define BOOST_TEST_MODULE URL
#include <boost/test/unit_test.hpp>
#include "odil/webservices/URL.h"
BOOST_AUTO_TEST_CASE(Equal)
{
auto const url = odil::webservices::URL::parse(
"foo://example.com:8042/over/there?name=ferret#nose");
BOOST_REQUIRE(url == url);
BOOST_REQUIRE(!(url != url));
}
BOOST_AUTO_TEST_CASE(Different)
{
auto const url1 = odil::webservices::URL::parse(
"foo://example.com:8042/over/there?name=ferret#nose");
auto const url2 = odil::webservices::URL::parse(
"foo://example.com:8042/over/there?name=goose#wing");
BOOST_REQUIRE(!(url1 == url2));
BOOST_REQUIRE(url1 != url2);
}
BOOST_AUTO_TEST_CASE(Parse)
{
auto const url = odil::webservices::URL::parse(
"foo://example.com:8042/over/there?name=ferret#nose");
BOOST_REQUIRE_EQUAL(url.scheme, "foo");
BOOST_REQUIRE_EQUAL(url.authority, "example.com:8042");
BOOST_REQUIRE_EQUAL(url.path, "/over/there");
BOOST_REQUIRE_EQUAL(url.query, "name=ferret");
BOOST_REQUIRE_EQUAL(url.fragment, "nose");
}
BOOST_AUTO_TEST_CASE(ParseIncomplete)
{
auto const url = odil::webservices::URL::parse(
"foo://example.com:8042?name=ferret#nose");
BOOST_REQUIRE_EQUAL(url.scheme, "foo");
BOOST_REQUIRE_EQUAL(url.authority, "example.com:8042");
BOOST_REQUIRE_EQUAL(url.path, "");
BOOST_REQUIRE_EQUAL(url.query, "name=ferret");
BOOST_REQUIRE_EQUAL(url.fragment, "nose");
}
BOOST_AUTO_TEST_CASE(Recompose)
{
odil::webservices::URL const url{
"foo", "example.com:8042", "/over/there", "name=ferret", "nose"};
std::string const string(url);
BOOST_REQUIRE_EQUAL(
string, "foo://example.com:8042/over/there?name=ferret#nose");
}
BOOST_AUTO_TEST_CASE(ParseQueryDefaultSeparator)
{
odil::webservices::URL const url{"", "", "", "name=ferret&color=purple", ""};
auto const items = url.parse_query();
decltype(items) expected{ {"name", "ferret"}, {"color", "purple"}};
BOOST_REQUIRE(items == expected);
}
BOOST_AUTO_TEST_CASE(ParseQueryOtherSeparator)
{
odil::webservices::URL const url{"", "", "", "name=ferret;color=purple", ""};
auto const items = url.parse_query(";");
decltype(items) expected{ {"name", "ferret"}, {"color", "purple"}};
BOOST_REQUIRE(items == expected);
}
|
{
"alphanum_fraction": 0.6772648084,
"author": null,
"avg_line_length": 32.8,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "de8b1aa4d3e7d73d515bb04a1e95c876a8bd4d31",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 23,
"max_forks_repo_forks_event_max_datetime": "2021-09-28T21:59:31.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-04-27T07:14:56.000Z",
"max_forks_repo_head_hexsha": "e6b12df698ce452f9c5d86858e896e9b6d28cdf0",
"max_forks_repo_licenses": [
"CECILL-B"
],
"max_forks_repo_name": "genisysram/odil",
"max_forks_repo_path": "tests/code/webservices/URL.cpp",
"max_issues_count": 74,
"max_issues_repo_head_hexsha": "e6b12df698ce452f9c5d86858e896e9b6d28cdf0",
"max_issues_repo_issues_event_max_datetime": "2021-11-18T16:36:11.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-01-11T16:04:46.000Z",
"max_issues_repo_licenses": [
"CECILL-B"
],
"max_issues_repo_name": "genisysram/odil",
"max_issues_repo_path": "tests/code/webservices/URL.cpp",
"max_line_length": 81,
"max_stars_count": 72,
"max_stars_repo_head_hexsha": "e6b12df698ce452f9c5d86858e896e9b6d28cdf0",
"max_stars_repo_licenses": [
"CECILL-B"
],
"max_stars_repo_name": "genisysram/odil",
"max_stars_repo_path": "tests/code/webservices/URL.cpp",
"max_stars_repo_stars_event_max_datetime": "2022-03-18T18:10:34.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-02-04T00:41:02.000Z",
"num_tokens": 623,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2296
}
|
"""
struct GridPortion{Dc,Dp,G} <: Grid{Dc,Dp}
parent_grid::G
cell_to_parent_cell::Vector{Int32}
node_to_parent_node::Vector{Int32}
end
"""
struct GridPortion{Dc,Dp,G} <: Grid{Dc,Dp}
parent_grid::G
cell_to_parent_cell::Vector{Int32}
node_to_parent_node::Vector{Int32}
cell_to_nodes::Table{Int32,Vector{Int32},Vector{Int32}}
@doc """
GridPortion(parent_grid::Grid{Dc,Dp},cell_to_parent_cell::Vector{Int32}) where {Dc,Dp}
"""
function GridPortion(parent_grid::Grid,cell_to_parent_cell::AbstractVector{<:Integer})
Dc = num_cell_dims(parent_grid)
Dp = num_point_dims(parent_grid)
parent_cell_to_parent_nodes = get_cell_node_ids(parent_grid)
nparent_nodes = num_nodes(parent_grid)
parent_node_to_coords = get_node_coordinates(parent_grid)
node_to_parent_node, parent_node_to_node = _find_active_nodes(
parent_cell_to_parent_nodes,cell_to_parent_cell,nparent_nodes)
cell_to_nodes = _renumber_cell_nodes(
parent_cell_to_parent_nodes,parent_node_to_node,cell_to_parent_cell)
new{Dc,Dp,typeof(parent_grid)}(parent_grid,cell_to_parent_cell,node_to_parent_node,cell_to_nodes)
end
end
function GridPortion(parent_grid::Grid,parent_cell_to_mask::AbstractArray{Bool})
cell_to_parent_cell = findall(collect1d(parent_cell_to_mask))
GridPortion(parent_grid,cell_to_parent_cell)
end
function GridPortion(parent_grid::Grid,parent_cell_to_mask::AbstractVector{Bool})
cell_to_parent_cell = findall(parent_cell_to_mask)
GridPortion(parent_grid,cell_to_parent_cell)
end
function OrientationStyle(::Type{GridPortion{Dc,Dp,G}}) where {Dc,Dp,G}
OrientationStyle(G)
end
function RegularityStyle(::Type{GridPortion{Dc,Dp,G}}) where {Dc,Dp,G}
RegularityStyle(G)
end
function get_node_coordinates(grid::GridPortion)
parent_node_to_coords = get_node_coordinates(grid.parent_grid)
lazy_map(Reindex(parent_node_to_coords),grid.node_to_parent_node)
end
function get_cell_node_ids(grid::GridPortion)
grid.cell_to_nodes
end
function get_reffes(grid::GridPortion)
get_reffes(grid.parent_grid)
end
function get_cell_type(grid::GridPortion)
lazy_map(Reindex(get_cell_type(grid.parent_grid)),grid.cell_to_parent_cell)
end
# Helpers
function _find_active_nodes(oldcell_to_oldnodes,cell_to_oldcell,noldnodes)
oldnode_is_active = fill(false,noldnodes)
cache = array_cache(oldcell_to_oldnodes)
for oldcell in cell_to_oldcell
oldnodes = getindex!(cache,oldcell_to_oldnodes,oldcell)
for oldnode in oldnodes
oldnode_is_active[oldnode] = true
end
end
node_to_oldnode = findall(oldnode_is_active)
oldnode_to_node = fill(UNSET,noldnodes)
oldnode_to_node[node_to_oldnode] = 1:length(node_to_oldnode)
(node_to_oldnode, oldnode_to_node)
end
function _renumber_cell_nodes(oldcell_to_oldnodes,oldnode_to_node,cell_to_oldcell)
ncells = length(cell_to_oldcell)
cell_to_nodes_ptrs = zeros(Int32,ncells+1)
cache = array_cache(oldcell_to_oldnodes)
for (cell,oldcell) in enumerate(cell_to_oldcell)
oldnodes = getindex!(cache,oldcell_to_oldnodes,oldcell)
cell_to_nodes_ptrs[cell+1] = length(oldnodes)
end
length_to_ptrs!(cell_to_nodes_ptrs)
ndata = cell_to_nodes_ptrs[end]-1
cell_to_nodes_data = zeros(Int32,ndata)
for (cell,oldcell) in enumerate(cell_to_oldcell)
oldnodes = getindex!(cache,oldcell_to_oldnodes,oldcell)
a = cell_to_nodes_ptrs[cell]-1
for (lnode,oldnode) in enumerate(oldnodes)
node = oldnode_to_node[oldnode]
@assert node > 0
cell_to_nodes_data[a+lnode] = node
end
end
Table(cell_to_nodes_data,cell_to_nodes_ptrs)
end
|
{
"alphanum_fraction": 0.7887284842,
"author": null,
"avg_line_length": 32.7454545455,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "967aa5c64a7c5b0ab2f683ac2d7cd326933deeb3",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2021-05-24T07:54:42.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-05-10T06:37:07.000Z",
"max_forks_repo_head_hexsha": "962da3b03647a3017fb2684f88106eae49db9051",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Paulms/Gridap.jl",
"max_forks_repo_path": "src/Geometry/GridPortions.jl",
"max_issues_count": 7,
"max_issues_repo_head_hexsha": "962da3b03647a3017fb2684f88106eae49db9051",
"max_issues_repo_issues_event_max_datetime": "2020-08-25T08:48:11.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-07-06T04:00:46.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Paulms/Gridap.jl",
"max_issues_repo_path": "src/Geometry/GridPortions.jl",
"max_line_length": 101,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "962da3b03647a3017fb2684f88106eae49db9051",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Paulms/Gridap.jl",
"max_stars_repo_path": "src/Geometry/GridPortions.jl",
"max_stars_repo_stars_event_max_datetime": "2020-08-27T06:32:26.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-08-27T06:32:26.000Z",
"num_tokens": 1013,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 3602
}
|
/*
* Server.h
*
* Created on: 19 нояб. 2017 г.
* Author: snork
*/
#ifndef SERVER_HPP_
#define SERVER_HPP_
#include <queue>
#include <boost/asio/io_service.hpp>
#include <boost/asio/strand.hpp>
#include <boost/asio/ip/v6_only.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/write.hpp>
#include "log.hpp"
#include "Messages.hpp"
class Server : private Logable<Server>
{
private:
typedef boost::asio::ip::tcp::acceptor acceptor_t;
typedef boost::asio::ip::tcp::socket socket_t;
typedef boost::asio::io_service::strand strand_t;
struct Session { socket_t socket; };
typedef std::list<std::shared_ptr<Session>> session_list_t;
public:
Server(boost::asio::io_service & io)
: ::Logable<Server>("Server"), _acceptor(io), _socket(io), _strand(io)
{}
~Server()
{
Server::stop();
// Импровизированный псевдо спинлок
// Не умираем пока все сессии не умрут
enum TheState { OKAY, AGAIN };
again:
auto promise = std::promise<TheState>();
auto future = promise.get_future();
_strand.dispatch([&, this](){
if (_sessions.size() == 0)
promise.set_value(TheState::OKAY);
else
promise.set_value(TheState::AGAIN);
});
TheState res = future.get();
if (TheState::AGAIN == res)
goto again;
}
void start(int32_t port, const std::string iface="0.0.0.0")
{
using namespace boost;
auto promise = std::promise<void>();
auto future = promise.get_future();
_strand.dispatch([&, this](){
try
{
auto ep = asio::ip::tcp::endpoint(asio::ip::address::from_string(iface),port);
_acceptor.open(ep.protocol());
if (ep.protocol() == asio::ip::tcp::v6())
_acceptor.set_option(asio::ip::v6_only(false));
_acceptor.bind(ep);
_acceptor.listen();
_do_accept_ws();
promise.set_value();
}
catch (...)
{
promise.set_exception(std::current_exception());
}
});
future.get();
}
void send_message(std::shared_ptr<Message> msg)
{
_strand.dispatch([this, msg](){
if (_current_message)
{
LOG_INFO << "Клиент не готов, сбрасываю сообщение";
return;
}
else
{
_current_message = std::move(msg);
_current_message_buffers.clear();
_current_message->fill_buffer_sequnce(_current_message_buffers);
_do_chain_ws(_sessions.begin());
}
});
}
void stop()
{
using namespace boost;
auto promise = std::promise<void>();
auto future = promise.get_future();
_strand.dispatch([&, this](){
boost::system::error_code err;
_acceptor.close(err);
if (err)
LOG_WARN << "Что-то нехорошее на отмене приёма новых соединений: " << err << ":" << err.message();
for (auto sessionPtr : _sessions)
sessionPtr->socket.close();
if (_current_message) // Если никакое сообщение не стоит сейчас на отправку - просто удаляем все сокеты сами
_sessions.clear();
promise.set_value();
});
future.get();
}
private:
void _do_accept_ws()
{
using namespace boost;
assert(_strand.running_in_this_thread());
_acceptor.async_accept(_socket, _strand.wrap([this](const system::error_code & err){
if (err == asio::error::operation_aborted)
return;
else if (err)
{
LOG_ERROR << "Ошибка на приеме соединения: " << err << ":" << err.message();
_do_accept_ws();
return;
}
auto session = std::make_shared<Session>( Session{std::move(_socket)} );
// // Запускаем чтение, чтобы ловить EOF и вежливо закрывать соединение с клиентами, которые отключились сами
// session->socket.async_read_some(asio::null_buffers(), _strand.wrap([this, session](const system::error_code & err, size_t){
// // впринципе не важно что мы тут поймали, закрываем сокет - вежливо или уже как получится
// // всеравно нам никто ничего не должно отправлять
//
// LOG_INFO << "Что-то пришло от клиента. err = " << err << ":" << err.message() << ". Закрываю и удаляю сессию";
// session->socket.close();
// _sessions.remove(session);
// // удаляем сокет из списка
// }));
LOG_INFO << "Новый клиент: " << session->socket.remote_endpoint();
_sessions.emplace_front(std::move(session));
_do_accept_ws();
}));
}
void _do_chain_ws(session_list_t::iterator current)
{
using namespace boost;
assert(_strand.running_in_this_thread());
if (current == _sessions.end())
{
_current_message.reset();
return;
}
asio::async_write((*current)->socket, _current_message_buffers,
_strand.wrap([this, current](const system::error_code & err, size_t transferred) {
assert(_strand.running_in_this_thread());
decltype(current) next;
if (err == asio::error::operation_aborted)
{
// очень некрасивое решение, но в целом пойдет
_sessions.clear();
return;
}
else if (err)
{
// Что-то пошло не так, выкидываем текущего клиента
LOG_ERROR << "Ошибка на передаче данных к клиенту: " << err << ":" << err.message();
system::error_code socket_close_err;
(*current)->socket.close(socket_close_err);
if (socket_close_err)
LOG_WARN << "Какая-то ошибка с закрытием сокета : " << socket_close_err
<< " : " << socket_close_err.message();
next = _sessions.erase(current);
}
else
next = std::next(current, 1);
// Если ошибки нет, просто переходим к следующему
_do_chain_ws(next);
})
);
}
acceptor_t _acceptor;
socket_t _socket;
strand_t _strand;
session_list_t _sessions;
std::shared_ptr<Message> _current_message;
Message::buff_sequence_t _current_message_buffers;
};
#endif /* SERVER_HPP_ */
|
{
"alphanum_fraction": 0.6614130435,
"author": null,
"avg_line_length": 23.2911392405,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "12d004012a9006abe087092e902f2a7eac71793f",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2018-06-12T11:30:10.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-06-12T11:30:10.000Z",
"max_forks_repo_head_hexsha": "4d9db6f2d55c726e11abbb60fd436ec3eafc2373",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "granum-space/cansat-2018",
"max_forks_repo_path": "research/spectrometer/spectrometer-server/src/Server.hpp",
"max_issues_count": 9,
"max_issues_repo_head_hexsha": "4d9db6f2d55c726e11abbb60fd436ec3eafc2373",
"max_issues_repo_issues_event_max_datetime": "2018-06-17T19:08:52.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-10-31T19:20:05.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "granum-space/cansat-2017-2018",
"max_issues_repo_path": "research/spectrometer/spectrometer-server/src/Server.hpp",
"max_line_length": 128,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "4d9db6f2d55c726e11abbb60fd436ec3eafc2373",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "granum-space/cansat-2017-2018",
"max_stars_repo_path": "research/spectrometer/spectrometer-server/src/Server.hpp",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1634,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 5520
}
|
import subprocess
import os
import datetime
import time
import numpy as np
"""
'Logger' intended for real-time logging from inside executors to hdfs. May be slow, so use sparingly!
Looks at CONFIG["LOGS"]["hdfs_logfile"] for hdfs logfile path. Example (note the *3* fwd slashes) might be:
hdfs:///tmp/logfile.txt
Can be viewed easily while getting populated with:
hdfs dfs -cat hdfs://tmp/logfile.txt &> current_contents_of_logfile
and then opening the logfile locally.
WARNING: since HDFS is Write Once Read Many, some messages generated by this logger seem to be lost when parallel
appends occur to the log file...
WARNING: to force all messages to appear in log file, added wait-retry behavior. Will be *very slow*. Use *sparingly*.
WARNING: does not currently like to see ' (or presumably ") in logged messages.
"""
def setup(config):
msg = str(datetime.datetime.now()) + " "*10 + "<<<<<<<<<<<< Beginning hdfs logfile... >>>>>>>>>>"
if config.has_section("LOGS") and config.has_option("LOGS","hdfs_logfile"):
logPath = config["LOGS"]["hdfs_logfile"]
try:
verbose = True if config["LOGS"]["verbose"] == "True" else False
overwrite = bool(config["LOGS"]["hdfs_log_overwrite"])
if overwrite:
cmd = "hdfs dfs -rm " + logPath
hdfs_rm_output = os.system(cmd)
if verbose and int(hdfs_rm_output) != 0:
print("hdfs logger rm said something: " + str(hdfs_rm_output))
cmd = "echo \"" + msg + "\" | hdfs dfs -appendToFile - " + logPath
hdfs_append_output = os.system(cmd)
if verbose and int(hdfs_append_output) != 0:
print("hdfsLog append (first line) encountered a problem: " + str(hdfs_append_output))
except subprocess.CalledProcessError as e:
print("hdfs logger failed to write >> " + msg + " << due to error " + e.output)
def hdfsLog(config, msg):
msg = str(datetime.datetime.now()) + " "*10 + msg
succeeded = False
maxAttempts = 10
attempts = 0
rand_wait_base = np.random.uniform(1,3)
if config.has_section("LOGS") and config.has_option("LOGS","hdfs_logfile"):
logPath = config["LOGS"]["hdfs_logfile"]
try:
while not succeeded and attempts < maxAttempts:
verbose = True if config["LOGS"]["verbose"] == "True" else False
cmd = "echo \"" + msg + " (hdfsLog attempt " + str(attempts) + ")" + "\" | hdfs dfs -appendToFile - " + logPath
hdfs_append_output = os.system(cmd)
if verbose and int(hdfs_append_output) != 0:
attempts += 1
print(f"hdfsLog append encountered a problem on try # {attempts}: " + str(hdfs_append_output))
rand_wait = 1.3**(attempts-1)*rand_wait_base
time.sleep(rand_wait/1000)
else:
succeeded = True
except subprocess.CalledProcessError as e:
print("hdfs logger failed to write >> " + msg + " << due to error " + e.output)
|
{
"alphanum_fraction": 0.6116473616,
"author": null,
"avg_line_length": 42,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "9d62ecc33682be9b35a63254101c3cc77e5c609f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7f7ba44055da15d13b191180249e656e1bd398c6",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "p-b-j/uscb-das-container-public",
"max_forks_repo_path": "das_decennial/hdfs_logger.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "7f7ba44055da15d13b191180249e656e1bd398c6",
"max_issues_repo_issues_event_max_datetime": "2021-11-01T23:33:46.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-10-30T00:48:45.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "p-b-j/uscb-das-container-public",
"max_issues_repo_path": "das_decennial/hdfs_logger.py",
"max_line_length": 128,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "7f7ba44055da15d13b191180249e656e1bd398c6",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "p-b-j/uscb-das-container-public",
"max_stars_repo_path": "das_decennial/hdfs_logger.py",
"max_stars_repo_stars_event_max_datetime": "2021-11-13T01:35:31.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-11-13T01:35:31.000Z",
"num_tokens": 750,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3108
}
|
## Demo de for Loops
## Prof. James Hunter
## from: https://rstudio.cloud/project/1181172
## 28 de maio de 2020
## Baseado em Cap. 21 de Grolemund & Wickham, R for Data Science (O'Reilly)
set.seed(42)
df <- tibble(
a = rnorm(10),
b = rnorm(10),
c = rnorm(10),
d = rnorm(10)
)
glimpse(df)
set.seed(42)
output <- numeric(length = ncol(df))
for(i in seq_along(output)) {
output[i] <- median(df[[i]]) # dupla [[]] porque median veja df como lista
}
output
|
{
"alphanum_fraction": 0.604040404,
"author": null,
"avg_line_length": 18.3333333333,
"converted": null,
"ext": "r",
"file": null,
"hexsha": "cd6d9c373ff60ccd20953c746a0350d52abcd33c",
"include": null,
"lang": "R",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "299a7f0af7e999e59cc53c57cf22618b6eb68092",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jameshunterbr/Sustentare_MAD_2020",
"max_forks_repo_path": "demo_for_loop.r",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "299a7f0af7e999e59cc53c57cf22618b6eb68092",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jameshunterbr/Sustentare_MAD_2020",
"max_issues_repo_path": "demo_for_loop.r",
"max_line_length": 77,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "299a7f0af7e999e59cc53c57cf22618b6eb68092",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jameshunterbr/Sustentare_MAD_2020",
"max_stars_repo_path": "demo_for_loop.r",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 161,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 495
}
|
@testset "Testing CHSF Descriptor for dc Si" begin
@info("Testing CHSF Descriptor for dc Si.")
using DescriptorZoo, JuLIP, Test
at = bulk(:Si, cubic=true)
desc = chsf(at, 6.5, n=2, l=2)
chsf_ref = [10.3698237,1.4503467,-8.2118063,51.6882200,-53.0113716,69.8233316] #n=2,l=2 case
chsf_now = vcat(desc[1,:]...)
println(@test chsf_now ≈ chsf_ref)
end
|
{
"alphanum_fraction": 0.7017045455,
"author": null,
"avg_line_length": 29.3333333333,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "967c851fed62c1dd43953a1ce847b39e9d2a2648",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "fe0baccc36f02ff964b11f2dca5402f9693ed82b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "DescriptorZoo/DescriptorZoo.jl",
"max_forks_repo_path": "test/test_chsf.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "fe0baccc36f02ff964b11f2dca5402f9693ed82b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "DescriptorZoo/DescriptorZoo.jl",
"max_issues_repo_path": "test/test_chsf.jl",
"max_line_length": 92,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "fe0baccc36f02ff964b11f2dca5402f9693ed82b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "DescriptorZoo/DescriptorZoo.jl",
"max_stars_repo_path": "test/test_chsf.jl",
"max_stars_repo_stars_event_max_datetime": "2021-05-05T20:13:19.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-10-14T10:52:22.000Z",
"num_tokens": 140,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 352
}
|
from pytest_check import check
import fdm
import jax
from jax.config import config
import jax.numpy as np
import fenics
import fenics_adjoint as fa
import ufl
from jaxfenics_adjoint import build_jax_fem_eval
config.update("jax_enable_x64", True)
fenics.parameters["std_out_all_processes"] = False
fenics.set_log_level(fenics.LogLevel.ERROR)
mesh = fa.UnitSquareMesh(3, 2)
V = fenics.FunctionSpace(mesh, "P", 1)
def solve_fenics(kappa0, kappa1):
f = fa.Expression(
"10*exp(-(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2)) / 0.02)", degree=2
)
u = fa.Function(V)
bcs = [fa.DirichletBC(V, fa.Constant(0.0), "on_boundary")]
inner, grad, dx = ufl.inner, ufl.grad, ufl.dx
JJ = 0.5 * inner(kappa0 * grad(u), grad(u)) * dx - kappa1 * f * u * dx
v = fenics.TestFunction(V)
F = fenics.derivative(JJ, u, v)
fa.solve(F == 0, u, bcs=bcs)
return u
templates = (fa.Constant(0.0), fa.Constant(0.0))
jax_solve_eval = build_jax_fem_eval(templates)(solve_fenics)
# multivariate output function
ff = lambda x, y: np.sqrt(np.square(jax_solve_eval(np.sqrt(x ** 3), y))) # noqa: E731
x_input = np.ones(1)
y_input = 1.2 * np.ones(1)
# multivariate output function of the first argument
hh = lambda x: ff(x, y_input) # noqa: E731
# multivariate output function of the second argument
gg = lambda y: ff(x_input, y) # noqa: E731
def test_jacobian():
fdm_jac0 = fdm.jacobian(hh)(x_input)
jax_jac0 = jax.jacrev(hh)(x_input)
with check:
assert np.allclose(fdm_jac0, jax_jac0)
rngkey = jax.random.PRNGKey(0)
v = jax.random.normal(rngkey, shape=(V.dim(),), dtype="float64")
fdm_vjp0 = v @ fdm_jac0
jax_vjp0 = jax.vjp(hh, x_input)[1](v)
with check:
assert np.allclose(fdm_vjp0, jax_vjp0)
fdm_jac1 = fdm.jacobian(gg)(y_input)
jax_jac1 = jax.jacrev(gg)(y_input)
with check:
assert np.allclose(fdm_jac1, jax_jac1)
rngkey = jax.random.PRNGKey(1)
v = jax.random.normal(rngkey, shape=(V.dim(),), dtype="float64")
fdm_vjp1 = v @ fdm_jac1
jax_vjp1 = jax.vjp(gg, y_input)[1](v)
with check:
assert np.allclose(fdm_vjp1, jax_vjp1)
# scalar output function
f_scalar = lambda x, y: np.sqrt( # noqa: E731
np.sum(np.square(jax_solve_eval(np.sqrt(x ** 3), y)))
)
h_scalar = lambda x: f_scalar(x, y_input) # noqa: E731
def test_grad():
fdm_grad = fdm.gradient(h_scalar)(x_input)
jax_grad = jax.grad(h_scalar)(x_input)
with check:
assert np.allclose(fdm_grad, jax_grad)
jax_grads = jax.grad(f_scalar, (0, 1))(x_input, y_input)
fdm_grad0 = fdm_grad
fdm_grad1 = fdm.gradient(lambda y: f_scalar(x_input, y))(y_input) # noqa: E731
with check:
assert np.allclose(fdm_grad0, jax_grads[0])
with check:
assert np.allclose(fdm_grad1, jax_grads[1])
|
{
"alphanum_fraction": 0.6659582005,
"author": null,
"avg_line_length": 26.6320754717,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "462e34fe362824d72778960c32b6a4dcc00477e3",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "88f3f6b3d6cc51336511945356635a6dd39023e9",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ColCarroll/jax-fenics-adjoint",
"max_forks_repo_path": "tests/test_reverse_ad.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "88f3f6b3d6cc51336511945356635a6dd39023e9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ColCarroll/jax-fenics-adjoint",
"max_issues_repo_path": "tests/test_reverse_ad.py",
"max_line_length": 86,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "88f3f6b3d6cc51336511945356635a6dd39023e9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ColCarroll/jax-fenics-adjoint",
"max_stars_repo_path": "tests/test_reverse_ad.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 970,
"path": null,
"reason": "import jax,from jax",
"repo": null,
"save_path": null,
"sha": null,
"size": 2823
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019.4.30
# @Author : FrankEl
# @File : Feature_selection_demo_rt.py
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as scio
from sklearn.cross_decomposition import PLSRegression
from sklearn.model_selection import train_test_split
from calib.FeatureSelection import RT
if __name__ == "__main__":
ncomp = 7
MatPath = './data/corn.mat'
CornData = scio.loadmat(MatPath)
wv = np.linspace(1100, 2498, (2498 - 1100) // 2 + 1, endpoint=True)
X = CornData["X"]
Y = CornData["Y"]
# Building normal pls model
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y[:, 0], test_size=0.2)
plsModel = PLSRegression(n_components=ncomp)
plsModel.fit(Xtrain, Ytrain)
T, P, U, Q, W, C, beta = plsModel.x_scores_, plsModel.x_loadings_, plsModel.y_scores_, plsModel.y_loadings_, plsModel.x_weights_, plsModel.y_weights_, plsModel.coef_
plt.plot(wv, beta[0:])
plt.xlabel("Wavelength")
plt.ylabel("Intensity")
plt.title("Regression Coefficients")
plt.savefig("./Image/Image1_RegressionCoefficient_PLS.png")
plt.close()
# Prediction result of pls model
Ytrain_hat = plsModel.predict(Xtrain)
Ytest_hat = plsModel.predict(Xtest)
plt.plot([Ytrain.min(), Ytrain.max()], [Ytrain.min(), Ytrain.max()], 'k--', lw=4)
plt.scatter(Ytrain, Ytrain_hat, marker='*')
plt.scatter(Ytest, Ytest_hat, marker='*')
plt.xlabel("Prediction")
plt.ylabel("Reference")
plt.title("Prediction of normal pls model")
plt.savefig("./Image/Image2_PredictionPLS.png")
plt.close()
# P value of RT
rtModel = RT(Xtrain, Ytrain, ncomp)
rtModel.calcCriteria()
plt.plot(rtModel.criteria)
plt.xlabel("Wavelength")
plt.ylabel("Intensity")
plt.title("P value of RT")
plt.savefig("./Image/Image3_P_vale.png")
plt.close()
# Feature ranking efficienty by stability of RT
rtModel.evalCriteria(cv=5)
plt.plot(rtModel.featureR2)
plt.xlabel("Wavelength")
plt.ylabel("Intensity")
plt.title("R2")
plt.savefig("./Image/Image4_R2.png")
plt.close()
# Prediction results after feature selection by RT
XtrainNew, XtestNew = rtModel.cutFeature(Xtrain, Xtest)
plsModelNew = PLSRegression(n_components=ncomp)
plsModelNew.fit(XtrainNew, Ytrain)
YtrainNew_hat = plsModelNew.predict(XtrainNew)
YtestNew_hat = plsModelNew.predict(XtestNew)
plt.plot([Ytrain.min(), Ytrain.max()], [Ytrain.min(), Ytrain.max()], 'k--', lw=4)
plt.scatter(Ytrain, YtrainNew_hat, marker='*')
plt.scatter(Ytest, YtestNew_hat, marker='*')
plt.xlabel("Prediction")
plt.ylabel("Reference")
plt.title("Prediction after RT")
plt.savefig("./Image/Image5_Prediction_RT.png")
plt.close()
|
{
"alphanum_fraction": 0.6846814603,
"author": null,
"avg_line_length": 35.8205128205,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "741551d00f27797476d86c3a848545a8a5489275",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2020-09-11T06:01:40.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-06-13T07:10:54.000Z",
"max_forks_repo_head_hexsha": "952abcf471b819b6b6dfa23b6d5dd248155f9dbf",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "uncledragon/CalibrationLibrary",
"max_forks_repo_path": "FeatureSelectionDemo_RT.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "952abcf471b819b6b6dfa23b6d5dd248155f9dbf",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "uncledragon/CalibrationLibrary",
"max_issues_repo_path": "FeatureSelectionDemo_RT.py",
"max_line_length": 169,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "7840f7f7a43bd92bf825b917a936728f1a1e8a08",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "nankaigc/Calibrationlib",
"max_stars_repo_path": "FeatureSelectionDemo_RT.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-17T09:49:46.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-12-05T06:40:40.000Z",
"num_tokens": 777,
"path": null,
"reason": "import numpy,import scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2794
}
|
double precision function deter3(r)
implicit none
double precision r(3,3)
c
c return the determinant of a 3x3 matrix
c
deter3 =
$ r(1,1)*(r(2,2)*r(3,3)-r(2,3)*r(3,2)) -
$ r(1,2)*(r(2,1)*r(3,3)-r(2,3)*r(3,1)) +
$ r(1,3)*(r(2,1)*r(3,2)-r(2,2)*r(3,1))
c
end
|
{
"alphanum_fraction": 0.4716981132,
"author": null,
"avg_line_length": 24.4615384615,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "6b5f41db05944622587a1d57a2cb3bb42b378da7",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8e185a3b8190b5e168f034c0e0c366ca492fd49e",
"max_forks_repo_licenses": [
"ECL-2.0"
],
"max_forks_repo_name": "mattbernst/ECCE",
"max_forks_repo_path": "ecce-v7.0/src/apps/symmetry/deter3.f",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8e185a3b8190b5e168f034c0e0c366ca492fd49e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"ECL-2.0"
],
"max_issues_repo_name": "mattbernst/ECCE",
"max_issues_repo_path": "ecce-v7.0/src/apps/symmetry/deter3.f",
"max_line_length": 50,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "8e185a3b8190b5e168f034c0e0c366ca492fd49e",
"max_stars_repo_licenses": [
"ECL-2.0"
],
"max_stars_repo_name": "mattbernst/ECCE",
"max_stars_repo_path": "ecce-v7.0/src/apps/symmetry/deter3.f",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 150,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 318
}
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2017 by Brendt Wohlberg <brendt@ieee.org>
# All rights reserved. BSD 3-clause License.
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""Utility functions"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from future.utils import PY2
from builtins import range
from builtins import object
from timeit import default_timer as timer
import os
import sys
import imghdr
import io
import platform
import multiprocessing as mp
import itertools
import collections
import socket
if PY2:
import urllib2 as urlrequest
import urllib2 as urlerror
else:
import urllib.request as urlrequest
import urllib.error as urlerror
import numpy as np
from scipy import misc
import scipy.ndimage.interpolation as sni
import sporco.linalg as sla
__author__ = """Brendt Wohlberg <brendt@ieee.org>"""
# Python 2/3 unicode literal compatibility
if PY2:
import codecs
def u(x):
"""Python 2/3 compatible definition of utf8 literals"""
return x.decode('utf8')
else:
def u(x):
"""Python 2/3 compatible definition of utf8 literals"""
return x
def _fix_nested_class_lookup(cls, nstnm):
"""Fix name lookup problem that prevents pickling of classes with nested
class definitions. The approach is loosely based on that implemented at
https://git.io/viGqU , simplified and modified to work in both Python 2.7
and Python 3.x.
Parameters
----------
cls : class
Outer class to which fix is to be applied
nstnm : string
Name of nested (inner) class to be renamed
"""
# Check that nstmm is an attribute of cls
if nstnm in cls.__dict__:
# Get the attribute of cls by its name
nst = cls.__dict__[nstnm]
# Check that the attribute is a class
if isinstance(nst, type):
# Get the module in which the outer class is defined
mdl = sys.modules[cls.__module__]
# Construct an extended name by concatenating inner and outer names
extnm = cls.__name__ + nst.__name__
# Allow lookup of the nested class within the module via
# its extended name
setattr(mdl, extnm, nst)
# Change the nested class name to the extended name
nst.__name__ = extnm
return cls
def _fix_dynamic_class_lookup(cls, pstfx):
"""Fix name lookup problem that prevents pickling of dynamically defined
classes.
Parameters
----------
cls : class
Dynamically generated class to which fix is to be applied
pstfx : string
Postfix that can be used to identify dynamically generated classes
that are equivalent by construction
"""
# Extended name for the class that will be added to the module namespace
extnm = '_' + cls.__name__ + '_' + pstfx
# Get the module in which the dynamic class is defined
mdl = sys.modules[cls.__module__]
# Allow lookup of the dynamically generated class within the module via
# its extended name
setattr(mdl, extnm, cls)
# Change the dynamically generated class name to the extended name
if hasattr(cls, '__qualname__'):
cls.__qualname__ = extnm
else:
cls.__name__ = extnm
def ntpl2array(ntpl):
"""
Convert a :func:`collections.namedtuple` object to a :class:`numpy.ndarray`
object that can be saved using :func:`numpy.savez`.
Parameters
----------
ntpl : collections.namedtuple object
Named tuple object to be converted to ndarray
Returns
-------
arr : ndarray
Array representation of input named tuple
"""
return np.asarray((np.vstack([col for col in ntpl]), ntpl._fields,
ntpl.__class__.__name__))
def array2ntpl(arr):
"""
Convert a :class:`numpy.ndarray` object constructed by :func:`ntpl2array`
back to the original :func:`collections.namedtuple` representation.
Parameters
----------
arr : ndarray
Array representation of named tuple constructed by :func:`ntpl2array`
Returns
-------
ntpl : collections.namedtuple object
Named tuple object with the same name and fields as the original named
typle object provided to :func:`ntpl2array`
"""
cls = collections.namedtuple(arr[2], arr[1])
return cls(*tuple(arr[0]))
def transpose_ntpl_list(lst):
"""Transpose a list of named tuple objects (of the same type) into a
named tuple of lists.
Parameters
----------
lst : list of collections.namedtuple object
List of named tuple objects of the same type
Returns
-------
ntpl : collections.namedtuple object
Named tuple object with each entry consisting of a list of the
corresponding fields of the named tuple objects in list ``lst``
"""
cls = collections.namedtuple(lst[0].__class__.__name__, lst[0]._fields)
if len(lst) == 0:
return None
else:
return cls(*[[lst[k][l] for k in range(len(lst))]
for l in range(len(lst[0]))])
def solve_status_str(hdrtxt, fwiter=4, fpothr=2):
"""Construct header and format details for status display of an
iterative solver.
Parameters
----------
hdrtxt : tuple of strings
Tuple of field header strings
fwiter : int, optional (default 4)
Number of characters in iteration count integer field
fpothr : int, optional (default 2)
Precision of other float field
Returns
-------
hdrstr : string
Complete header string
fmtstr : string
Complete print formatting string for numeric values
nsep : integer
Number of characters in separator string
"""
# Field width for all fields other than first depends on precision
fwothr = fpothr + 6
# Construct header string from hdrtxt list of column headers
hdrstr = ("%-*s" % (fwiter+2, hdrtxt[0])) + \
((("%%-%ds " % (fwothr+1)) * (len(hdrtxt)-1)) % \
tuple(hdrtxt[1:]))
# Construct iteration status format string
fmtstr = ("%%%dd" % (fwiter)) + (((" %%%d.%de" % (fwothr, fpothr)) * \
(len(hdrtxt)-1)))
# Compute length of separator string
nsep = fwiter + (fwothr + 2)*(len(hdrtxt)-1)
return hdrstr, fmtstr, nsep
def tiledict(D, sz=None):
"""Construct an image allowing visualization of dictionary content.
Parameters
----------
D : array_like
Dictionary matrix/array.
sz : tuple
Size of each block in dictionary.
Returns
-------
im : ndarray
Image tiled with dictionary entries.
"""
# Handle standard 2D (non-convolutional) dictionary
if D.ndim == 2:
D = D.reshape((sz + (D.shape[1],)))
sz = None
dsz = D.shape
if D.ndim == 4:
axisM = 3
szni = 3
else:
axisM = 2
szni = 2
# Construct dictionary atom size vector if not provided
if sz is None:
sz = np.tile(np.array(dsz[0:2]).reshape([2, 1]), (1, D.shape[axisM]))
else:
sz = np.array(sum(tuple((x[0:2],) * x[szni] for x in sz), ())).T
# Compute the maximum atom dimensions
mxsz = np.amax(sz, 1)
# Shift and scale values to [0, 1]
D = D - D.min()
D = D / D.max()
# Construct tiled image
N = dsz[axisM]
Vr = int(np.floor(np.sqrt(N)))
Vc = int(np.ceil(N/float(Vr)))
if D.ndim == 4:
im = np.ones((Vr*mxsz[0] + Vr-1, Vc*mxsz[1] + Vc-1, dsz[2]))
else:
im = np.ones((Vr*mxsz[0] + Vr-1, Vc*mxsz[1] + Vc-1))
k = 0
for l in range(0, Vr):
for m in range(0, Vc):
r = mxsz[0]*l + l
c = mxsz[1]*m + m
if D.ndim == 4:
im[r:(r+sz[0, k]), c:(c+sz[1, k]), :] = D[0:sz[0, k],
0:sz[1, k], :, k]
else:
im[r:(r+sz[0, k]), c:(c+sz[1, k])] = D[0:sz[0, k],
0:sz[1, k], k]
k = k + 1
if k >= N:
break
if k >= N:
break
return im
def imageblocks(imgs, blksz):
"""Extract all blocks of specified size from an image or list of images.
Parameters
----------
imgs: array_like or tuple of array_like
Single image or tuple of images from which to extract blocks
blksz : tuple of two ints
Size of the blocks
Returns
-------
blks : ndarray
Array of extracted blocks
"""
# See http://stackoverflow.com/questions/16774148 and
# sklearn.feature_extraction.image.extract_patches_2d
if not isinstance(imgs, tuple):
imgs = (imgs,)
blks = np.array([]).reshape(blksz + (0,))
for im in imgs:
Nr, Nc = im.shape
nr, nc = blksz
shape = (Nr-nr+1, Nc-nc+1, nr, nc)
strides = im.itemsize*np.array([Nc, 1, Nc, 1])
sb = np.lib.stride_tricks.as_strided(np.ascontiguousarray(im),
shape=shape, strides=strides)
sb = np.ascontiguousarray(sb)
sb.shape = (-1, nr, nc)
sb = np.rollaxis(sb, 0, 3)
blks = np.dstack((blks, sb))
return blks
def rgb2gray(rgb):
"""Convert an RGB image (or images) to grayscale.
Parameters
----------
rgb : ndarray
RGB image as Nr x Nc x 3 or Nr x Nc x 3 x K array
Returns
-------
gry : ndarray
Grayscale image as Nr x Nc or Nr x Nc x K array
"""
w = sla.atleast_nd(rgb.ndim, np.array([0.299, 0.587, 0.144],
dtype=rgb.dtype, ndmin=3))
return np.sum(w * rgb, axis=2)
def complex_randn(*args):
"""Return a complex array of samples drawn from a standard normal
distribution.
Parameters
----------
d0, d1, ..., dn : int
Dimensions of the random array
Returns
-------
a : ndarray
Random array of shape (d0, d1, ..., dn)
"""
return np.random.randn(*args) + 1j*np.random.randn(*args)
def spnoise(s, frc, smn=0.0, smx=1.0):
"""Return image with salt & pepper noise imposed on it.
Parameters
----------
s : ndarray
Input image
frc : float
Desired fraction of pixels corrupted by noise
smn : float, optional (default 0.0)
Lower value for noise (pepper)
smx : float, optional (default 1.0)
Upper value for noise (salt)
Returns
-------
sn : ndarray
Noisy image
"""
sn = s.copy()
spm = np.random.uniform(-1.0, 1.0, s.shape)
sn[spm < frc - 1.0] = smn
sn[spm > 1.0 - frc] = smx
return sn
def tikhonov_filter(s, lmbda, npd=16):
r"""Lowpass filter based on Tikhonov regularization.
Lowpass filter image(s) and return low and high frequency
components, consisting of the lowpass filtered image and its
difference with the input image. The lowpass filter is equivalent to
Tikhonov regularization with `lmbda` as the regularization parameter
and a discrete gradient as the operator in the regularization term,
i.e. the lowpass component is the solution to
.. math::
\mathrm{argmin}_\mathbf{x} \; (1/2) \left\|\mathbf{x} - \mathbf{s}
\right\|_2^2 + (\lambda / 2) \sum_i \| G_i \mathbf{x} \|_2^2 \;\;,
where :math:`\mathbf{s}` is the input image, :math:`\lambda` is the
regularization parameter, and :math:`G_i` is an operator that
computes the discrete gradient along image axis :math:`i`. Once the
lowpass component :math:`\mathbf{x}` has been computed, the highpass
component is just :math:`\mathbf{s} - \mathbf{x}`.
Parameters
----------
s : array_like
Input image or array of images.
lmbda : float
Regularization parameter controlling lowpass filtering.
npd : int, optional (default=16)
Number of samples to pad at image boundaries.
Returns
-------
sl : array_like
Lowpass image or array of images.
sh : array_like
Highpass image or array of images.
"""
grv = np.array([-1.0, 1.0]).reshape([2, 1])
gcv = np.array([-1.0, 1.0]).reshape([1, 2])
Gr = sla.fftn(grv, (s.shape[0]+2*npd, s.shape[1]+2*npd), (0, 1))
Gc = sla.fftn(gcv, (s.shape[0]+2*npd, s.shape[1]+2*npd), (0, 1))
A = 1.0 + lmbda*np.conj(Gr)*Gr + lmbda*np.conj(Gc)*Gc
if s.ndim > 2:
A = A[(slice(None),)*2 + (np.newaxis,)*(s.ndim-2)]
sp = np.pad(s, ((npd, npd),)*2 + ((0, 0),)*(s.ndim-2), 'symmetric')
slp = np.real(sla.ifftn(sla.fftn(sp, axes=(0, 1)) / A, axes=(0, 1)))
sl = slp[npd:(slp.shape[0]-npd), npd:(slp.shape[1]-npd)]
sh = s - sl
return sl.astype(s.dtype), sh.astype(s.dtype)
def idle_cpu_count(mincpu=1):
"""Estimate number of idle CPUs, for use by multiprocessing code
needing to determine how many processes can be run without excessive
load. This function uses :func:`os.getloadavg` which is only available
under a Unix OS.
Parameters
----------
mincpu : int
Minimum number of CPUs to report, independent of actual estimate
Returns
-------
idle : int
Estimate of number of idle CPUs
"""
if PY2:
ncpu = mp.cpu_count()
else:
ncpu = os.cpu_count()
idle = int(ncpu - np.floor(os.getloadavg()[0]))
return max(mincpu, idle)
def grid_search(fn, grd, fmin=True, nproc=None):
"""Perform a grid search for optimal parameters of a specified
function. In the simplest case the function returns a float value,
and a single optimum value and corresponding parameter values are
identified. If the function returns a tuple of values, each of
these is taken to define a separate function on the search grid,
with optimum function values and corresponding parameter values
being identified for each of them. On all platforms except Windows
(where ``mp.Pool`` usage has some limitations), the computation
of the function at the grid points is computed in parallel.
**Warning:** This function will hang if `fn` makes use of :mod:`pyfftw`
with multi-threading enabled (the
`bug <https://github.com/pyFFTW/pyFFTW/issues/135>`_ has been reported).
When using the FFT functions in :mod:`sporco.linalg`, multi-threading
can be disabled by including the following code::
import sporco.linalg
sporco.linalg.pyfftw_threads = 1
Parameters
----------
fn : function
Function to be evaluated. It should take a tuple of parameter values as
an argument, and return a float value or a tuple of float values.
grd : tuple of array_like
A tuple providing an array of sample points for each axis of the grid
on which the search is to be performed.
fmin : bool, optional (default True)
Determine whether optimal function values are selected as minima or
maxima. If `fmin` is True then minima are selected.
nproc : int or None, optional (default None)
Number of processes to run in parallel. If None, the number of
CPUs of the system is used.
Returns
-------
sprm : ndarray
Optimal parameter values on each axis. If `fn` is multi-valued,
`sprm` is a matrix with rows corresponding to parameter values
and columns corresponding to function values.
sfvl : float or ndarray
Optimum function value or values
fvmx : ndarray
Function value(s) on search grid
sidx : tuple of int or tuple of ndarray
Indices of optimal values on parameter grid
"""
if fmin:
slct = np.argmin
else:
slct = np.argmax
fprm = itertools.product(*grd)
if platform.system() == 'Windows':
fval = list(map(fn, fprm))
else:
if nproc is None:
nproc = mp.cpu_count()
pool = mp.Pool(processes=nproc)
fval = pool.map(fn, fprm)
pool.close()
pool.join()
if isinstance(fval[0], (tuple, list, np.ndarray)):
nfnv = len(fval[0])
fvmx = np.reshape(fval, [a.size for a in grd] + [nfnv,])
sidx = np.unravel_index(slct(fvmx.reshape((-1, nfnv)), axis=0),
fvmx.shape[0:-1]) + (np.array((range(nfnv))),)
sprm = np.array([grd[k][sidx[k]] for k in range(len(grd))])
sfvl = tuple(fvmx[sidx])
else:
fvmx = np.reshape(fval, [a.size for a in grd])
sidx = np.unravel_index(slct(fvmx), fvmx.shape)
sprm = np.array([grd[k][sidx[k]] for k in range(len(grd))])
sfvl = fvmx[sidx]
return sprm, sfvl, fvmx, sidx
def convdicts():
"""Access a set of example learned convolutional dictionaries.
Returns
-------
cdd : dict
A dict associating description strings with dictionaries represented
as ndarrays
Examples
--------
Print the dict keys to obtain the identifiers of the available
dictionaries
>>> from sporco import util
>>> cd = util.convdicts()
>>> print(cd.keys())
['G:12x12x72', 'G:8x8x16,12x12x32,16x16x48', ...]
Select a specific example dictionary using the corresponding identifier
>>> D = cd['G:8x8x96']
"""
pth = os.path.join(os.path.dirname(__file__), 'data', 'convdict.npz')
npz = np.load(pth)
cdd = {}
for k in list(npz.keys()):
cdd[k] = npz[k]
return cdd
def netgetdata(url, maxtry=3, timeout=10):
"""
Get content of a file via a URL.
Parameters
----------
url : string
URL of the file to be downloaded
maxtry : int, optional (default 3)
Maximum number of download retries
timeout : int, optional (default 10)
Timeout in seconds for blocking operations
Returns
-------
str : io.BytesIO
Buffered I/O stream
Raises
------
urlerror.URLError (urllib2.URLError in Python 2,
urllib.error.URLError in Python 3)
If the file cannot be downloaded
"""
err = ValueError('maxtry parameter should be greater than zero')
for ntry in range(maxtry):
try:
rspns = urlrequest.urlopen(url, timeout=timeout)
cntnt = rspns.read()
break
except urlerror.URLError as e:
err = e
if not isinstance(e.reason, socket.timeout):
raise
else:
raise err
return io.BytesIO(cntnt)
def in_ipython():
"""
Determine whether code is running in an ipython shell.
Returns
-------
ip : bool
True if running in an ipython shell, False otherwise
"""
try:
# See https://stackoverflow.com/questions/15411967
shell = get_ipython().__class__.__name__
return bool(shell == 'TerminalInteractiveShell')
except NameError:
return False
def in_notebook():
"""
Determine whether code is running in a Jupyter Notebook shell.
Returns
-------
ip : bool
True if running in a notebook shell, False otherwise
"""
try:
# See https://stackoverflow.com/questions/15411967
shell = get_ipython().__class__.__name__
return bool(shell == 'ZMQInteractiveShell')
except NameError:
return False
class ExampleImages(object):
"""Access a set of example images."""
def __init__(self, scaled=False, dtype=None, zoom=None, gray=False,
pth=None):
"""Initialise an ExampleImages object.
Parameters
----------
scaled : bool, optional (default False)
Flag indicating whether images should be on the range [0,...,255]
with np.uint8 dtype (False), or on the range [0,...,1] with
np.float32 dtype (True)
dtype : data-type or None, optional (default None)
Desired data type of images. If `scaled` is True and `dtype` is an
integer type, the output data type is np.float32
zoom : float or None, optional (default None)
Optional support rescaling factor to apply to the images
gray : bool, optional (default False)
Flag indicating whether RGB images should be converted to grayscale
pth : string or None (default None)
Path to directory containing image files. If the value is None the
path points to a set of example images that are included with the
package.
"""
self.scaled = scaled
self.dtype = dtype
self.zoom = zoom
self.gray = gray
if pth is None:
self.bpth = os.path.join(os.path.dirname(__file__), 'data')
else:
self.bpth = pth
self.imglst = []
self.grpimg = {}
for dirpath, dirnames, filenames in os.walk(self.bpth):
# It would be more robust and portable to use
# pathlib.PurePath.relative_to
prnpth = dirpath[len(self.bpth)+1:]
for f in filenames:
fpth = os.path.join(dirpath, f)
if imghdr.what(fpth) is not None:
gpth = os.path.join(prnpth, f)
self.imglst.append(gpth)
if prnpth not in self.grpimg:
self.grpimg[prnpth] = []
self.grpimg[prnpth].append(gpth)
def images(self):
"""Get list of available images.
Returns
-------
nlst : list
A list of names of available images
"""
return self.imglst
def groups(self):
"""Get list of available image groups.
Returns
-------
grp : list
A list of names of available image groups
"""
return list(self.grpimg.keys())
def groupimages(self, grp):
"""Get list of available images in specified group.
Parameters
----------
grp : str
Name of image group
Returns
-------
nlst : list
A list of names of available images in the specified group
"""
return self.grpimg[grp]
def image(self, fname, group=None, scaled=None, dtype=None, idxexp=None,
zoom=None, gray=None):
"""Get named image.
Parameters
----------
fname : string
Filename of image
group : string or None, optional (default None)
Name of image group
scaled : bool or None, optional (default None)
Flag indicating whether images should be on the range [0,...,255]
with np.uint8 dtype (False), or on the range [0,...,1] with
np.float32 dtype (True). If the value is None, scaling behaviour
is determined by the `scaling` parameter passed to the object
initializer, otherwise that selection is overridden.
dtype : data-type or None, optional (default None)
Desired data type of images. If `scaled` is True and `dtype` is an
integer type, the output data type is np.float32. If the value is
None, the data type is determined by the `dtype` parameter passed to
the object initializer, otherwise that selection is overridden.
idxexp : index expression or None, optional (default None)
An index expression selecting, for example, a cropped region of
the requested image. This selection is applied *before* any
`zoom` rescaling so the expression does not need to be modified when
the zoom factor is changed.
zoom : float or None, optional (default None)
Optional rescaling factor to apply to the images. If the value is
None, support rescaling behaviour is determined by the `zoom`
parameter passed to the object initializer, otherwise that selection
is overridden.
gray : bool or None, optional (default None)
Flag indicating whether RGB images should be converted to grayscale.
If the value is None, behaviour is determined by the `gray`
parameter passed to the object initializer.
Returns
-------
img : ndarray
Image array
Raises
------
IOError
If the image is not accessible
"""
if scaled is None:
scaled = self.scaled
if dtype is None:
if self.dtype is None:
dtype = np.uint8
else:
dtype = self.dtype
if scaled and np.issubdtype(dtype, np.integer):
dtype = np.float32
if zoom is None:
zoom = self.zoom
if gray is None:
gray = self.gray
if group is None:
pth = os.path.join(self.bpth, fname)
else:
pth = os.path.join(self.bpth, group, fname)
try:
img = np.asarray(misc.imread(pth), dtype=dtype)
except IOError:
raise IOError('Could not access image %s in group %s' %
(fname, group))
if scaled:
img /= 255.0
if idxexp is not None:
img = img[idxexp]
if zoom is not None:
if img.ndim == 2:
img = sni.zoom(img, zoom)
else:
img = sni.zoom(img, (zoom,)*2 + (1,)*(img.ndim-2))
if gray:
img = rgb2gray(img)
return img
class Timer(object):
"""Timer class supporting multiple independent labelled timers.
The timer is based on the relative time returned by
:func:`timeit.default_timer`.
"""
def __init__(self, labels=None, dfltlbl='main', alllbl='all'):
"""Initialise timer object.
Parameters
----------
labels : string or list, optional (default None)
Specify the label(s) of the timer(s) to be initialised to zero.
dfltlbl : string, optional (default 'main')
Set the default timer label to be used when methods are
called without specifying a label
alllbl : string, optional (default 'all')
Set the label string that will be used to denote all timer labels
"""
# Initialise current and accumulated time dictionaries
self.t0 = {}
self.td = {}
# Record default label and string indicating all labels
self.dfltlbl = dfltlbl
self.alllbl = alllbl
# Initialise dictionary entries for labels to be created
# immediately
if labels is not None:
if not isinstance(labels, (list, tuple)):
labels = [labels,]
for lbl in labels:
self.td[lbl] = 0.0
self.t0[lbl] = None
def start(self, labels=None):
"""Start specified timer(s).
Parameters
----------
labels : string or list, optional (default None)
Specify the label(s) of the timer(s) to be started. If it is
``None``, start the default timer with label specified by the
``dfltlbl`` parameter of :meth:`__init__`.
"""
# Default label is self.dfltlbl
if labels is None:
labels = self.dfltlbl
# If label is not a list or tuple, create a singleton list
# containing it
if not isinstance(labels, (list, tuple)):
labels = [labels,]
# Iterate over specified label(s)
t = timer()
for lbl in labels:
# On first call to start for a label, set its accumulator to zero
if lbl not in self.td:
self.td[lbl] = 0.0
self.t0[lbl] = None
# Record the time at which start was called for this lbl if
# it isn't already running
if self.t0[lbl] is None:
self.t0[lbl] = t
def stop(self, labels=None):
"""Stop specified timer(s).
Parameters
----------
labels : string or list, optional (default None)
Specify the label(s) of the timer(s) to be stopped. If it is
``None``, stop the default timer with label specified by the
``dfltlbl`` parameter of :meth:`__init__`. If it is equal to
the string specified by the ``alllbl`` parameter of
:meth:`__init__`, stop all timers.
"""
# Get current time
t = timer()
# Default label is self.dfltlbl
if labels is None:
labels = self.dfltlbl
# All timers are affected if label is equal to self.alllbl,
# otherwise only the timer(s) specified by label
if labels == self.alllbl:
labels = self.t0.keys()
elif not isinstance(labels, (list, tuple)):
labels = [labels,]
# Iterate over specified label(s)
for lbl in labels:
if lbl not in self.t0:
raise KeyError('Unrecognized timer key %s' % lbl)
# If self.t0[lbl] is None, the corresponding timer is
# already stopped, so no action is required
if self.t0[lbl] is not None:
# Increment time accumulator from the elapsed time
# since most recent start call
self.td[lbl] += t - self.t0[lbl]
# Set start time to None to indicate timer is not running
self.t0[lbl] = None
def reset(self, labels=None):
"""Reset specified timer(s).
Parameters
----------
labels : string or list, optional (default None)
Specify the label(s) of the timer(s) to be stopped. If it is
``None``, stop the default timer with label specified by the
``dfltlbl`` parameter of :meth:`__init__`. If it is equal to
the string specified by the ``alllbl`` parameter of
:meth:`__init__`, stop all timers.
"""
# Get current time
t = timer()
# Default label is self.dfltlbl
if labels is None:
labels = self.dfltlbl
# All timers are affected if label is equal to self.alllbl,
# otherwise only the timer(s) specified by label
if labels == self.alllbl:
labels = self.t0.keys()
elif not isinstance(labels, (list, tuple)):
labels = [labels,]
# Iterate over specified label(s)
for lbl in labels:
if lbl not in self.t0:
raise KeyError('Unrecognized timer key %s' % lbl)
# Set start time to None to indicate timer is not running
self.t0[lbl] = None
# Set time accumulator to zero
self.td[lbl] = 0.0
def elapsed(self, label=None, total=True):
"""Get elapsed time since timer start.
Parameters
----------
label : string, optional (default None)
Specify the label of the timer for which the elapsed time is
required. If it is ``None``, the default timer with label
specified by the ``dfltlbl`` parameter of :meth:`__init__`
is selected.
total : bool, optional (default True)
If ``True`` return the total elapsed time since the first
call of :meth:`start` for the selected timer, otherwise
return the elapsed time since the most recent call of
:meth:`start` for which there has not been a corresponding
call to :meth:`stop`.
Returns
-------
dlt : float
Elapsed time
"""
# Get current time
t = timer()
# Default label is self.dfltlbl
if label is None:
label = self.dfltlbl
# Return 0.0 if default timer selected and it is not initialised
if label not in self.t0:
return 0.0
# Raise exception if timer with specified label does not exist
if label not in self.t0:
raise KeyError('Unrecognized timer key %s' % label)
# If total flag is True return sum of accumulated time from
# previous start/stop calls and current start call, otherwise
# return just the time since the current start call
te = 0.0
if self.t0[label] is not None:
te = t - self.t0[label]
if total:
te += self.td[label]
return te
def labels(self):
"""Get a list of timer labels.
Returns
-------
lbl : list
List of timer labels
"""
return self.t0.keys()
def __str__(self):
"""Return string representation of object.
The representation consists of a table with the following columns:
* Timer label
* Accumulated time from past start/stop calls
* Time since current start call, or 'Stopped' if timer is not
currently running
"""
# Get current time
t = timer()
# Length of label field, calculated from max label length
lfldln = max([len(lbl) for lbl in self.t0] + [len(self.dfltlbl),]) + 2
# Header string for table of timers
s = '%-*s Accum. Current\n' % (lfldln, 'Label')
s += '-' * (lfldln + 25) + '\n'
# Construct table of timer details
for lbl in sorted(self.t0):
td = self.td[lbl]
if self.t0[lbl] is None:
ts = ' Stopped'
else:
ts = ' %.2e s' % (t - self.t0[lbl])
s += '%-*s %.2e s %s\n' % (lfldln, lbl, td, ts)
return s
class ContextTimer(object):
"""A wrapper class for :class:`Timer` that enables its use as a
context manager.
For example, instead of
>>> t = Timer()
>>> t.start()
>>> do_something()
>>> t.stop()
>>> elapsed = t.elapsed()
one can use
>>> t = Timer()
>>> with ContextTimer(t):
... do_something()
>>> elapsed = t.elapsed()
"""
def __init__(self, timer=None, label=None, action='StartStop'):
"""Initialise context manager timer wrapper.
Parameters
----------
timer : class:`Timer` object, optional (default None)
Specify the timer object to be used as a context manager. If
``None``, a new class:`Timer` object is constructed.
label : string, optional (default None)
Specify the label of the timer to be used. If it is ``None``,
start the default timer.
action : string, optional (default 'StartStop')
Specify actions to be taken on context entry and exit. If
the value is 'StartStop', start the timer on entry and stop
on exit; if it is 'StopStart', stop the timer on entry and
start it on exit.
"""
if action not in ['StartStop', 'StopStart']:
raise ValueError('Unrecognized action %s' % action)
if timer is None:
self.timer = Timer()
else:
self.timer = timer
self.label = label
self.action = action
def __enter__(self):
"""Start the timer and return this ContextTimer instance."""
if self.action == 'StartStop':
self.timer.start(self.label)
else:
self.timer.stop(self.label)
return self
def __exit__(self, type, value, traceback):
"""Stop the timer and return True if no exception was raised within
the 'with' block, otherwise return False.
"""
if self.action == 'StartStop':
self.timer.stop(self.label)
else:
self.timer.start(self.label)
if type:
return False
else:
return True
def elapsed(self, total=True):
"""Return the elapsed time for the timer.
Parameters
----------
total : bool, optional (default True)
If ``True`` return the total elapsed time since the first
call of :meth:`start` for the selected timer, otherwise
return the elapsed time since the most recent call of
:meth:`start` for which there has not been a corresponding
call to :meth:`stop`.
Returns
-------
dlt : float
Elapsed time
"""
return self.timer.elapsed(self.label, total=total)
|
{
"alphanum_fraction": 0.5870279827,
"author": null,
"avg_line_length": 30.5751689189,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "bb179cb464cfb0e59f2f01187d18f6f6ef4a59be",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 35,
"max_forks_repo_forks_event_max_datetime": "2022-03-21T09:49:55.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-06-16T12:48:23.000Z",
"max_forks_repo_head_hexsha": "402b8f6c8ee4ba9c86e9da0e2073d900cf8da207",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "sophiaas/alphacsc",
"max_forks_repo_path": "alphacsc/other/sporco/sporco/util.py",
"max_issues_count": 75,
"max_issues_repo_head_hexsha": "402b8f6c8ee4ba9c86e9da0e2073d900cf8da207",
"max_issues_repo_issues_event_max_datetime": "2022-03-29T17:31:58.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-07-15T14:03:40.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "sophiaas/alphacsc",
"max_issues_repo_path": "alphacsc/other/sporco/sporco/util.py",
"max_line_length": 79,
"max_stars_count": 89,
"max_stars_repo_head_hexsha": "402b8f6c8ee4ba9c86e9da0e2073d900cf8da207",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "sophiaas/alphacsc",
"max_stars_repo_path": "alphacsc/other/sporco/sporco/util.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-22T09:52:17.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-05-31T19:20:52.000Z",
"num_tokens": 8923,
"path": null,
"reason": "import numpy,import scipy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 36201
}
|
## Main function: header_analysis(text)
## input file: readme files text data
## output file: json files with categories extracted using header analysis; other text data cannot be extracted
import os
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
import string
import collections
from textblob import Word
import json
# Define wordnet groups
group = dict()
citation = [Word("citation").synsets[2], Word("reference").synsets[1], Word("cite").synsets[3]]
group.update({"citation":citation})
run = [Word("run").synsets[9],Word("run").synsets[34],Word("execute").synsets[4]]
group.update({"run":run})
install = [Word("installation").synsets[0],Word("install").synsets[0],Word("setup").synsets[1],Word("prepare").synsets[0],Word("preparation").synsets[0],Word("manual").synsets[0],Word("guide").synsets[2],Word("guide").synsets[9]]
group.update({"installation":install})
download = [Word("download").synsets[0]]
group.update({"download":download})
requirement = [Word("requirement").synsets[2],Word("prerequisite").synsets[0],Word("prerequisite").synsets[1],Word("dependency").synsets[0],Word("dependent").synsets[0]]
group.update({"requirement":requirement})
contact = [Word("contact").synsets[9]]
group.update({"contact":contact})
description = [Word("description").synsets[0],Word("description").synsets[1],Word("introduction").synsets[3],Word("introduction").synsets[6],Word("basics").synsets[0],Word("initiation").synsets[1],Word("start").synsets[0],Word("start").synsets[4],Word("started").synsets[0],Word("started").synsets[1],Word("started").synsets[7],Word("started").synsets[8],Word("overview").synsets[0],Word("summary").synsets[0],Word("summary").synsets[2]]
group.update({"description":description})
contributor = [Word("contributor").synsets[0]]
group.update({"contributor":contributor})
documentation = [Word("documentation").synsets[1]]
group.update({"documentation":documentation})
license = [Word("license").synsets[3],Word("license").synsets[0]]
group.update({"license":license})
usage = [Word("usage").synsets[0],Word("example").synsets[0],Word("example").synsets[5],Word("implement").synsets[1],Word("implementation").synsets[1],Word("demo").synsets[1],Word("tutorial").synsets[0],Word("tutorial").synsets[1]]
group.update({"usage":usage})
update = [Word("updating").synsets[0],Word("updating").synsets[3]]
group.update({"update":update})
issues = [Word("issues").synsets[0],Word("errors").synsets[5],Word("problems").synsets[0],Word("problems").synsets[2]]
group.update({"issues":issues})
support = [Word("support").synsets[7],Word("help").synsets[0],Word("help").synsets[9],Word("report").synsets[0],Word("report").synsets[6]]
group.update({"support":support})
def extract_header_content(text): # extract the header and content of text to dataframe
# check the format of header
underline_header = re.findall('.+[\n]={3,}[\n]', text)
# header declared with ==== and ---
if len(underline_header) != 0:
Header = re.findall('(.+[\n]={3,}[\n])|(.+[\n]-{3,}[\n])', text)
Header = [i[0] for i in Header]
Header = [re.sub('([\n]={3,}[\n])|([\n]-{3,}[\n])', '', i) for i in Header]
Header.insert(0, '*extra content')
Content = re.split('.+[\n]={3,}[\n]', text)
# header declared with ##
else:
a = re.findall('\`\`\`[^\`]+\`\`\`', text, flags=re.DOTALL)
a_sub = [re.sub('#', '#notes:', i) for i in a]
for i, j in zip(a, a_sub):
text = text.replace(i, j)
Header = re.findall('#{1,5} .*', text)
Header = [re.sub('#', '', i) for i in Header]
Header.insert(0, '*extra content')
Content = re.split('#{1,5} .*', text)
Content = [re.sub('#notes', '#', i) for i in Content]
Content = [re.sub("[\n]+", '', i, 1) for i in Content]
# into dataframe
df = pd.DataFrame(columns=['Header', 'Content'])
for i, j in zip(Header, Content):
df = df.append({'Header': i, 'Content': j}, ignore_index=True)
df['Content'].replace('', np.nan, inplace=True)
df.dropna(subset=['Content'], inplace=True)
print('Extracting headers and content.')
return df
def find_sim(wordlist, wd): # returns the max probability between a word and subgroup
simvalue = []
for sense in wordlist:
if (wd.path_similarity(sense) != None):
simvalue.append(wd.path_similarity(sense))
if (len(simvalue) != 0):
return max(simvalue)
else:
return 0
def match_group(word_syn, group, threshold): # match a word with a subgroup
currmax = 0
maxgroup = ""
simvalues = dict()
for sense in word_syn: # for a given sense of a word
similarities = []
for key, value in group.items(): # value has all the similar words
path_sim = find_sim(value, sense)
# print("Similarity is:",path_sim)
if (path_sim > threshold): # then append to the list
if (path_sim > currmax):
maxgroup = key
currmax = path_sim
return maxgroup
def label_header(header): # label the header with a subgroup
sentence = header.lstrip().split(" ")
label = []
for s in sentence:
synn = Word(s).synsets
if(len(synn)>0):
bestgroup = match_group(synn,group,0.8)
if(bestgroup !=""):
label.append(bestgroup)
return label
def cleanhtml(text):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', text)
return cleantext
def extract_categories_using_headers(text): # main function
text = cleanhtml(text)
data = extract_header_content(text)
print('Labeling headers.')
if data.empty:
return {}, []
data['Group'] = data['Header'].apply(lambda row: label_header(row))
if len(data['Group'].iloc[0]) == 0:
data['Group'].iloc[0] = ['unknown']
groups = data.apply(lambda x: pd.Series(x['Group']), axis=1).stack().reset_index(level=1, drop=True)
groups.name = 'Group'
data = data.drop('Group', axis=1).join(groups)
if data['Group'].iloc[0] == 'unknown':
data['Group'].iloc[0] = np.NaN
# to json
group = data.loc[(data['Group'] != 'None') & pd.notna(data['Group']), ['Content', 'Group']]
group['confidence'] = [[1]] * len(group)
group.rename(columns={'Content': 'excerpt'}, inplace=True)
group['technique'] = 'wordnet'
group_json = group.groupby('Group').apply(lambda x: x.to_dict('r')).to_dict()
for key in group_json.keys():
for ind in range(len(group_json[key])):
del group_json[key][ind]['Group']
print('Converting to json files.')
# strings without tag
str_list = data.loc[data['Group'].isna(), ['Content']].values.squeeze().tolist()
if type(str_list) != list:
str_list = [str_list]
return group_json, str_list
|
{
"alphanum_fraction": 0.6319464649,
"author": null,
"avg_line_length": 40.9166666667,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f4dc9386dbdbffbe8561eb8aef85dab4e8533dd8",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-01-18T14:47:04.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-01-18T14:47:04.000Z",
"max_forks_repo_head_hexsha": "88d76b019f5b272a1d92deaaa0767f71bf685a75",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "liling10822/SOMEF_Extension-",
"max_forks_repo_path": "src/somef/header_analysis.py",
"max_issues_count": 22,
"max_issues_repo_head_hexsha": "a10be879350014f08984ef78665c04c66c4015f9",
"max_issues_repo_issues_event_max_datetime": "2020-11-10T10:18:46.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-10-21T01:03:55.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "AidanKelley/somef",
"max_issues_repo_path": "src/somef/header_analysis.py",
"max_line_length": 437,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "a10be879350014f08984ef78665c04c66c4015f9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "AidanKelley/somef",
"max_stars_repo_path": "src/somef/header_analysis.py",
"max_stars_repo_stars_event_max_datetime": "2020-11-20T22:19:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-11-20T22:19:56.000Z",
"num_tokens": 1886,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6874
}
|
module Accounting
using Compat
import Compat: String
import Currencies: currency
using Currencies
using DataStructures
import Base.push!
include("debitcredit.jl")
include("accounts.jl")
include("entries.jl")
include("ledger.jl")
include("reports.jl")
export Split, Entry, Ledger
export Asset, Liability, Equity, Revenue, Expense, Trading
export asset!, liability!, equity!, revenue!, expense!, trading!
export debit, credit, balances, currency
export register!, transfer!
end # module
|
{
"alphanum_fraction": 0.7759674134,
"author": null,
"avg_line_length": 20.4583333333,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "7ecd6e10998fdf703527c95ee76184dae1da88b8",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ea2fe8dcab52ac022bbd386dc7242b868e6ef30e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "TotalVerb/Accounting.jl",
"max_forks_repo_path": "src/Accounting.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ea2fe8dcab52ac022bbd386dc7242b868e6ef30e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "TotalVerb/Accounting.jl",
"max_issues_repo_path": "src/Accounting.jl",
"max_line_length": 64,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ea2fe8dcab52ac022bbd386dc7242b868e6ef30e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "TotalVerb/Accounting.jl",
"max_stars_repo_path": "src/Accounting.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 115,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 491
}
|
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
data = pd.read_csv('mobile_cleaned.csv')
data.head()
ax = sns.scatterplot(x="stand_by_time", y="battery_capacity", data=data)
plt.show()
ax = sns.scatterplot(x = "stand_by_time", y = "battery_capacity", hue="thickness", data=data)
plt.show()
ax = sns.distplot(data["stand_by_time"])
plt.show()
ax = sns.boxplot(x="is_liked", y="battery_capacity", data=data)
plt.show()
ax = sns.boxplot(x = "expandable_memory", y = "price", data=data)
plt.show()
|
{
"alphanum_fraction": 0.7179487179,
"author": null,
"avg_line_length": 22.75,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "2f9ca9d66e5db8c758b289fb1815e97a0f20886b",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 18,
"max_forks_repo_forks_event_max_datetime": "2021-08-07T05:17:32.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-03-09T20:34:30.000Z",
"max_forks_repo_head_hexsha": "45571c00d22f339024d96b734728b4e40b6915f0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "raguvarankr/deep_learning",
"max_forks_repo_path": "Plotting/2_Mobile.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "45571c00d22f339024d96b734728b4e40b6915f0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "raguvarankr/deep_learning",
"max_issues_repo_path": "Plotting/2_Mobile.py",
"max_line_length": 93,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "45571c00d22f339024d96b734728b4e40b6915f0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "raguvarankr/deep_learning",
"max_stars_repo_path": "Plotting/2_Mobile.py",
"max_stars_repo_stars_event_max_datetime": "2020-04-14T10:37:58.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-03-09T20:33:24.000Z",
"num_tokens": 144,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 546
}
|
# coding=utf-8
# coding=utf-8
# Copyright 2019 The RecSim Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper class to collect cluster click and impression counts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from gym import spaces
import numpy as np
from recsim.agents.layers import sufficient_statistics
class ClusterClickStatsLayer(sufficient_statistics.SufficientStatisticsLayer):
"""Track impressions and clicks on a per-cluster basis and pass down to agent.
This module assumes each document belongs to single cluster and we know the
number of possible clusters. Every time we increase impression count for a
cluster if the agent recommends a document from that cluster. We also increase
click count for a cluster if user responds a click.
"""
def __init__(self, base_agent_ctor, observation_space, action_space,
**kwargs):
"""Initializes a ClusterClickStatsLayer object.
Args:
base_agent_ctor: a constructor for the base agent.
observation_space: a gym.spaces object specifying the format of
observations.
action_space: A gym.spaces object that specifies the format of actions.
**kwargs: arguments to pass to the downstream agent at construction time.
"""
single_response_space = observation_space.spaces['response'].spaces[0]
if 'cluster_id' not in single_response_space.spaces:
raise ValueError('observation_space.spaces[\'response\']'
' must contain \'cluster_id\' key.')
cluster_id_space = single_response_space.spaces['cluster_id']
if isinstance(cluster_id_space, spaces.Box):
if len(cluster_id_space.high) > 1:
raise ValueError('cluster_id response field must be 0 dimensional.')
num_clusters = cluster_id_space.high
elif isinstance(cluster_id_space, spaces.Discrete):
num_clusters = cluster_id_space.n
else:
raise ValueError('cluster_id response field must be either gym.spaces.Box'
' or gym spaces.Discrete')
self._num_clusters = num_clusters
if 'click' not in single_response_space.spaces:
raise ValueError(
'observation_space.spaces[\'response\'] must contain \'click\' key.')
suf_stat_space = spaces.Dict({
'impression_count':
spaces.Box(
shape=(num_clusters,), dtype=np.float32, low=0.0, high=np.inf),
'click_count':
spaces.Box(
shape=(num_clusters,), dtype=np.float32, low=0.0, high=np.inf)
})
super(ClusterClickStatsLayer,
self).__init__(base_agent_ctor, observation_space, action_space,
suf_stat_space, **kwargs)
def _create_observation(self):
return {
'impression_count':
np.array(self._sufficient_statistics['impression_count']),
'click_count':
np.array(self._sufficient_statistics['click_count']),
}
def _update(self, observation):
"""Updates user impression/click count given user response on each item."""
if self._sufficient_statistics is None:
self._sufficient_statistics = {
'impression_count': [
0,
] * self._num_clusters,
'click_count': [
0,
] * self._num_clusters
}
if observation['response'] is not None:
for response in observation['response']:
cluster_id = int(response['cluster_id'])
self._sufficient_statistics['impression_count'][cluster_id] += 1
if response['click']:
self._sufficient_statistics['click_count'][cluster_id] += 1
|
{
"alphanum_fraction": 0.6945978391,
"author": null,
"avg_line_length": 40.8333333333,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f5b9198d4584b9df0b176523564a3c576c9e9a0d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 108,
"max_forks_repo_forks_event_max_datetime": "2022-03-30T09:39:05.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-09-27T18:58:58.000Z",
"max_forks_repo_head_hexsha": "55e50e4be736d222ffe8c2477ed1981b40f91605",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "isabella232/recsim",
"max_forks_repo_path": "recsim/agents/layers/cluster_click_statistics.py",
"max_issues_count": 22,
"max_issues_repo_head_hexsha": "9098a8fac9aad62a880011ee575a3db4e7d80ee2",
"max_issues_repo_issues_event_max_datetime": "2022-03-29T18:29:56.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-10-21T13:40:41.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "MontrealAI/recsim",
"max_issues_repo_path": "recsim/agents/layers/cluster_click_statistics.py",
"max_line_length": 80,
"max_stars_count": 625,
"max_stars_repo_head_hexsha": "9098a8fac9aad62a880011ee575a3db4e7d80ee2",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "MontrealAI/recsim",
"max_stars_repo_path": "recsim/agents/layers/cluster_click_statistics.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T10:27:30.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-09-25T00:45:42.000Z",
"num_tokens": 887,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4165
}
|
[STATEMENT]
lemma mult_L_omega_below:
"(x * L)\<^sup>\<omega> \<le> x * L"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (x * L)\<^sup>\<omega> \<le> x * L
[PROOF STEP]
by (metis mult_right_isotone n_L_below_L omega_slide)
|
{
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Correctness_Algebras_N_Omega_Algebras",
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 107,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
}
|
#include <iostream>
#include <stdexcept>
#include <boost/lexical_cast.hpp>
#include "Tudat/Astrodynamics/BasicAstrodynamics/physicalConstants.h"
#include "Tudat/Astrodynamics/BasicAstrodynamics/timeConversions.h"
#include "Tudat/External/SpiceInterface/spiceInterface.h"
#include "Tudat/External/SpiceInterface/spiceRotationalEphemeris.h"
namespace tudat
{
namespace ephemerides
{
//! Function to calculate the rotation quaternion from target frame to original frame.
Eigen::Quaterniond SpiceRotationalEphemeris::getRotationToBaseFrame(
const double secondsSinceEpoch, const double julianDayAtEpoch )
{
// Set number of seconds since J2000.
double ephemerisTime = secondsSinceEpoch;
if ( julianDayAtEpoch != basic_astrodynamics::JULIAN_DAY_ON_J2000 )
{
ephemerisTime -= ( basic_astrodynamics::JULIAN_DAY_ON_J2000 - julianDayAtEpoch )
* physical_constants::JULIAN_DAY;
}
// Get rotational quaternion from spice wrapper function
return spice_interface::computeRotationQuaternionBetweenFrames(
targetFrameOrientation_, baseFrameOrientation_, ephemerisTime );
}
//! Function to calculate the derivative of the rotation matrix from target frame to original
//! frame.
Eigen::Matrix3d SpiceRotationalEphemeris::getDerivativeOfRotationToBaseFrame(
const double secondsSinceEpoch, const double julianDayAtEpoch )
{
// Set number of seconds since J2000.
double ephemerisTime = secondsSinceEpoch;
if ( julianDayAtEpoch != basic_astrodynamics::JULIAN_DAY_ON_J2000 )
{
ephemerisTime -= ( basic_astrodynamics::JULIAN_DAY_ON_J2000 - julianDayAtEpoch )
* physical_constants::JULIAN_DAY;
}
// Get rotation matrix derivative from spice wrapper function
return spice_interface::computeRotationMatrixDerivativeBetweenFrames(
targetFrameOrientation_, baseFrameOrientation_, ephemerisTime );
}
//! Function to calculate the full rotational state at given time
void SpiceRotationalEphemeris::getFullRotationalQuantitiesToTargetFrame(
Eigen::Quaterniond& currentRotationToLocalFrame,
Eigen::Matrix3d& currentRotationToLocalFrameDerivative,
Eigen::Vector3d& currentAngularVelocityVectorInGlobalFrame,
const double secondsSinceEpoch, const double julianDayAtEpoch)
{
// Set number of seconds since J2000.
double ephemerisTime = secondsSinceEpoch;
if ( julianDayAtEpoch != basic_astrodynamics::JULIAN_DAY_ON_J2000 )
{
ephemerisTime -= ( basic_astrodynamics::JULIAN_DAY_ON_J2000 - julianDayAtEpoch )
* physical_constants::JULIAN_DAY;
}
// Calculate rotation (and its time derivative) directly from spice.
std::pair< Eigen::Quaterniond, Eigen::Matrix3d > fullRotation =
spice_interface::computeRotationQuaternionAndRotationMatrixDerivativeBetweenFrames(
baseFrameOrientation_, targetFrameOrientation_, ephemerisTime );
currentRotationToLocalFrame = fullRotation.first;
currentRotationToLocalFrameDerivative = fullRotation.second;
// Calculate angular velocity vector.
currentAngularVelocityVectorInGlobalFrame = getRotationalVelocityVectorInBaseFrameFromMatrices(
Eigen::Matrix3d( currentRotationToLocalFrame ), currentRotationToLocalFrameDerivative.transpose( ) );
}
} // namespace ephemerides
} // namespace tudat
|
{
"alphanum_fraction": 0.7455485353,
"author": null,
"avg_line_length": 41.4523809524,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "5ee05354fed2f0e9cca7226fb924665c4682367d",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-05-30T03:42:22.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-05-30T03:42:22.000Z",
"max_forks_repo_head_hexsha": "b94ce35fb7c8fa44ae83238e296a979dfa3adfe8",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "JPelamatti/ThesisTUDAT",
"max_forks_repo_path": "Tudat/External/SpiceInterface/spiceRotationalEphemeris.cpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b94ce35fb7c8fa44ae83238e296a979dfa3adfe8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "JPelamatti/ThesisTUDAT",
"max_issues_repo_path": "Tudat/External/SpiceInterface/spiceRotationalEphemeris.cpp",
"max_line_length": 118,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b94ce35fb7c8fa44ae83238e296a979dfa3adfe8",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "JPelamatti/ThesisTUDAT",
"max_stars_repo_path": "Tudat/External/SpiceInterface/spiceRotationalEphemeris.cpp",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 793,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 3482
}
|
#!/usr/bin/env python3
"""
Fast approximations for various trig functions
"""
from approx.cheby import cheby_poly, cheby_fit
from utils import utils
import math
from matplotlib import pyplot as plt
import numpy as np
from typing import Callable, Union, Optional, Tuple
PI = math.pi
HALF_PI = 0.5 * PI
TWO_PI = 2.0 * PI
#
# Utility functions
#
@np.vectorize
def _quadrant_1_4_wrapper(x: float, f: Callable, is_sine: bool) -> float:
x = abs(x) % TWO_PI
quadrant = int(math.floor(x / HALF_PI))
return {
0: f(x),
# FIXME: figure out the actual math problem here instead of this hack
1: f(math.pi - x) if is_sine else -f(math.pi - x),
2: -f(x - math.pi),
3: f(x - TWO_PI),
}[quadrant]
@np.vectorize
def _quadrant_1_2_wrapper(x: float, f: Callable, is_sine: bool) -> float:
x = abs(x) % TWO_PI
quadrant_3_4 = bool(int(math.floor(x / PI)))
if is_sine:
return -f(x - PI) if quadrant_3_4 else f(x)
else:
raise NotImplementedError
@np.vectorize
def _quadrant_1_wrapper(x: float, f: Callable, is_sine: bool) -> float:
x = abs(x) % TWO_PI
quadrant = int(math.floor(x / HALF_PI))
#return quadrant # DEBUG
if is_sine:
raise NotImplementedError
# TESTME
return {
0: f(x),
1: f(HALF_PI - x),
2: -f(x - PI),
3: -f(PI - x),
}[quadrant]
else:
return {
0: f(x),
1: -f(PI - x),
2: -f(x - PI),
3: f(TWO_PI - x),
}[quadrant]
def _sin_from_cos(f_cos: Callable, x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return f_cos(x - HALF_PI)
def _cos_from_sin(f_sin: Callable, x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return f_sin(x + HALF_PI)
#
# Small-angle approximations
#
def cos_small_angle(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return 1.0 - 0.5 * np.square(x)
def sin_small_angle(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return x
#
# Taylor/Maclaurin approximations
#
# (not great, but just for comparison)
def sin_maclaurin_3s(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return x - (x ** 3) / 6
def cos_maclaurin_4s(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return 1.0 - (x ** 2) / 2 + (x ** 4) / 24
def sin_maclaurin_5s(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return x - (x ** 3) / 6 + (x ** 5) / 120
def cos_maclaurin_6s(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return 1.0 - (x ** 2) / 2 + (x ** 4) / 24 - (x ** 6) / 720
def sin_maclaurin_3(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return _quadrant_1_4_wrapper(x, sin_maclaurin_3s, True)
def cos_maclaurin_4(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return _quadrant_1_4_wrapper(x, cos_maclaurin_4s, False)
def sin_maclaurin_5(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return _quadrant_1_4_wrapper(x, sin_maclaurin_5s, True)
def cos_maclaurin_6(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return _quadrant_1_4_wrapper(x, cos_maclaurin_6s, False)
#
# Bhaskara I's sine approximation formula
#
def sin_bhaskara_positive(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
x = np.rad2deg(x)
return (4 * x * (180 - x)) / (40500 - x * (180 - x))
def sin_bhaskara(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return _quadrant_1_2_wrapper(x, sin_bhaskara_positive, True)
#
# Chebyshev approximations (see cheby.py)
#
def cos_cheb2_evenodd_q1(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
x = utils.scale(x, (0, HALF_PI), (-1, 1))
return \
0.705741045518510246026267 + \
-0.513625166679106959222167 * x + \
-0.207092688525927492992906 * (x ** 2)
def cos_cheb2_evenodd(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return _quadrant_1_wrapper(x, cos_cheb2_evenodd_q1, is_sine=False)
def cos_cheb3_evenodd_q1(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
x = utils.scale(x, (0, HALF_PI), (-1, 1))
return \
0.705741045518510246026267 + \
-0.554821269382182347129628 * x + \
-0.207092688525927492992906 * (x ** 2) + \
0.054928136937433862108104 * (x ** 3)
def cos_cheb3_evenodd(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return _quadrant_1_wrapper(x, cos_cheb3_evenodd_q1, is_sine=False)
def cos_cheb4_evenodd_q1(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
x = utils.scale(x, (0, HALF_PI), (-1, 1))
return \
0.707099715356600544424737 + \
-0.554821269382182347129628 * x + \
-0.217962047230650046714118 * (x ** 2) + \
0.054928136937433862108104 * (x ** 3) + \
0.010869358704722560660105 * (x ** 4)
def cos_cheb4_evenodd(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return _quadrant_1_wrapper(x, cos_cheb4_evenodd_q1, is_sine=False)
def cos_cheb5_evenodd_q1(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
x = utils.scale(x, (0, HALF_PI), (-1, 1))
return \
0.707099715356600544424737 + \
-0.555357584854211272507030 * x + \
-0.217962047230650046714118 * (x ** 2) + \
0.057073398825549619128861 * (x ** 3) + \
0.010869358704722560660105 * (x ** 4) + \
-0.001716209510492606527335 * (x ** 5)
def cos_cheb5_evenodd(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return _quadrant_1_wrapper(x, cos_cheb5_evenodd_q1, is_sine=False)
def sin_cheb3s(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
x = x * 2 / math.pi
return \
1.547863507573323804678012 * x + \
-0.552287106348768208619049 * (x ** 3)
def sin_cheb5s(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
x = x * 2 / math.pi
return \
1.570317078806098720633599 * x + \
-0.642101391279867650396795 * (x ** 3) + \
0.071851427944879600606676 * (x ** 5)
def cos_cheb2s(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
x = x * 2 / math.pi
return \
0.971404474038641829736207 + \
-0.998806516540814204319076 * (x ** 2)
def cos_cheb4s(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
x = x * 2 / math.pi
return \
0.999396553656189623460193 + \
-1.222743153481196776155571 * (x ** 2) + \
0.223936636940382599592070 * (x ** 4)
def sin_cheb2(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return _sin_from_cos(cos_cheb2, x)
def sin_cheb3(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return _quadrant_1_4_wrapper(x, sin_cheb3s, True)
def sin_cheb4(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return _sin_from_cos(cos_cheb4, x)
def sin_cheb5(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return _quadrant_1_4_wrapper(x, sin_cheb5s, True)
def cos_cheb2(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return _quadrant_1_4_wrapper(x, cos_cheb2s, False)
def cos_cheb3(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return _cos_from_sin(sin_cheb3, x)
def cos_cheb4(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return _quadrant_1_4_wrapper(x, cos_cheb4s, False)
def cos_cheb5(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return _cos_from_sin(sin_cheb5, x)
def cos_cheb4_branchfree(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
x = x % TWO_PI
x = x / math.pi - 1
return \
-0.969474842882559806334086 + \
4.364528972635080883435421 * (x ** 2) + \
-2.422793242109031908171346 * (x ** 4)
def cos_cheb6_branchfree(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
x = x % TWO_PI
x = x / math.pi - 1
return \
-0.998566776961081203900505 + \
4.888183786048466927809386 * (x ** 2) + \
-3.819206077878061655894726 * (x ** 4) + \
0.930941890512686387459951 * (x ** 6)
def cos_cheb8_branchfree(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
x = x % TWO_PI
x = x / math.pi - 1
return \
-0.999959031778776208376769 + \
4.932735940214705294692976 * (x ** 2) + \
-4.041966848709254378491096 * (x ** 4) + \
1.287359123842594543773998 * (x ** 6) + \
-0.178208616664954105912599 * (x ** 8)
def sinc_lobe_cheb2(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return \
0.924409333585792136744885 + \
-0.990956771464502783608452 * (x ** 2)
def sinc_lobe_cheb4(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return \
0.995315379142853617899789 + \
-1.558205135920994299780773 * (x ** 2) + \
0.567248364456491516172321 * (x ** 4)
def sinc_lobe_cheb6(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return \
0.999833206861104328844192 + \
-1.639526034849507984958450 * (x ** 2) + \
0.784104094932527750927420 * (x ** 4) + \
-0.144570486984024165755258 * (x ** 6)
def sinc_lobe_cheb8(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return \
0.999996152743521826700146 + \
-1.644740303086868360438189 * (x ** 2) + \
0.810175436119329850370718 * (x ** 4) + \
-0.186284632882907530415650 * (x ** 6) + \
0.020857072949441682330196 * (x ** 8)
#
# Main functions
#
def _plot_func(f: Callable, name: str, range: Optional[Tuple[float, float]]=None, f_actual: Optional[Callable]=None, plot_x_as_pi=False):
if range is not None:
x = np.linspace(range[0], range[1], num=256)
else:
x = np.linspace(0, TWO_PI, num=1024, endpoint=False)
f_zero = f(0) # Falcon kick!
f_half_pi = f(HALF_PI)
f_pi = f(PI)
if f_actual is None:
if math.isclose(f_zero, 1) and math.isclose(f_pi, 0):
f_actual = np.sinc
elif f_zero < f_half_pi:
f_actual = np.sin
else:
f_actual = np.cos
y_approx = f(x)
y_actual = f_actual(x)
err = np.abs(y_approx - y_actual)
# TODO: plot derivative & its error, 4th derivative & its error
ffty = np.fft.fft(y_approx)
ffty /= len(ffty)
ffty = np.abs(ffty)
ffty = ffty[:len(ffty) // 2]
ffty_dB = utils.to_dB(ffty, -200)
thdn = sum(ffty[2:])
thdn_dB = 20*np.log10(thdn) - ffty_dB[1]
max_harmonic_dB = np.amax(ffty_dB[2:]) - ffty_dB[1]
max_err = np.amax(err)
if range is None:
print('%20s %12g %12f %12g %12g %20g %12g' % (
name,
max_err,
20 * np.log10(max_err),
abs(f(0) - f_actual(0)),
abs(f(HALF_PI) - f_actual(HALF_PI)),
max_harmonic_dB,
thdn_dB,
))
else:
print('%20s %12g %12f %12g %12g' % (
name,
max_err,
20 * np.log10(max_err),
abs(f(0) - f_actual(0)),
abs(f(HALF_PI) - f_actual(HALF_PI))
))
if plot_x_as_pi:
x /= PI
xlabel = 'x / pi' if plot_x_as_pi else 'x'
fig = plt.figure()
fig.suptitle(name)
num_plots = 3 if range is None else 2
plt.subplot(num_plots, 1, 1)
plt.plot(x, y_actual, label='Exact')
plt.plot(x, y_approx, label=name)
plt.legend()
plt.grid()
plt.xlabel(xlabel)
plt.subplot(num_plots, 1, 2)
plt.plot(x, err, 'r')
plt.grid()
plt.ylabel('Error')
plt.xlabel(xlabel)
# TODO: others
if range is None:
plt.subplot(num_plots, 1, num_plots)
plt.plot(ffty_dB[:64])
plt.grid()
plt.ylabel('FFT')
plt.ylim([-160, 0])
def plot(args):
main(args)
def main(args):
print('%20s %12s %12s %12s %12s %20s %12s' % (
'function', 'Max err', 'Max err (dB)', 'err f(0)', 'err f(pi/2)', 'max harmonic (dB)', 'THD (dB)'))
for f_name, f in [
('cos_small_angle', cos_small_angle),
('sin_small_angle', sin_small_angle),
]:
_plot_func(f, name=f_name, range=[-PI/4, PI/4], plot_x_as_pi=True)
for f_name, f in [
('sin_maclaurin_3', sin_maclaurin_3),
('cos_maclaurin_4', cos_maclaurin_4),
('sin_maclaurin_5', sin_maclaurin_5),
('cos_maclaurin_6', cos_maclaurin_6),
('sin_bhaskara', sin_bhaskara),
('cos_cheb2', cos_cheb2),
('sin_cheb3', sin_cheb3),
('cos_cheb4', cos_cheb4),
('sin_cheb5', sin_cheb5),
('cos_cheb2_evenodd', cos_cheb2_evenodd),
('cos_cheb3_evenodd', cos_cheb3_evenodd),
('cos_cheb4_evenodd', cos_cheb4_evenodd),
('cos_cheb5_evenodd', cos_cheb5_evenodd),
('cos_cheb4_branchfree', cos_cheb4_branchfree),
('cos_cheb6_branchfree', cos_cheb6_branchfree),
('cos_cheb8_branchfree', cos_cheb8_branchfree),
]:
_plot_func(f, name=f_name, plot_x_as_pi=True)
_plot_func(sinc_lobe_cheb4, name='sinc_lobe_cheb4', range=[-1, 1], f_actual=np.sinc)
_plot_func(sinc_lobe_cheb6, name='sinc_lobe_cheb6', range=[-1, 1], f_actual=np.sinc)
plt.show()
|
{
"alphanum_fraction": 0.6502163115,
"author": null,
"avg_line_length": 26.3333333333,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "7df05a214fd3eaddf7f68d73e359e9bcae40579f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9f677ce72b0a50c7240ace880603d8e955f270a8",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Joeltronics/audioexperiments",
"max_forks_repo_path": "approx/trig.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9f677ce72b0a50c7240ace880603d8e955f270a8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Joeltronics/audioexperiments",
"max_issues_repo_path": "approx/trig.py",
"max_line_length": 138,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "9f677ce72b0a50c7240ace880603d8e955f270a8",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Joeltronics/audioexperiments",
"max_stars_repo_path": "approx/trig.py",
"max_stars_repo_stars_event_max_datetime": "2021-12-13T03:05:03.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-12-13T03:05:03.000Z",
"num_tokens": 4335,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 12482
}
|
[STATEMENT]
lemma map2set_finite[relator_props]:
assumes "finite_map_rel (\<langle>Rk,Id\<rangle>R)"
shows "finite_set_rel (\<langle>Rk\<rangle>map2set_rel R)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite_set_rel (\<langle>Rk\<rangle>map2set_rel R)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
finite_map_rel (\<langle>Rk, unit_rel\<rangle>R)
goal (1 subgoal):
1. finite_set_rel (\<langle>Rk\<rangle>map2set_rel R)
[PROOF STEP]
unfolding map2set_rel_def finite_set_rel_def finite_map_rel_def
[PROOF STATE]
proof (prove)
using this:
Range (\<langle>Rk, unit_rel\<rangle>R) \<subseteq> Collect (finite \<circ> dom)
goal (1 subgoal):
1. Range (\<langle>Rk, unit_rel\<rangle>R O {(m, dom m) |m. True}) \<subseteq> Collect finite
[PROOF STEP]
by auto
|
{
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Collections_GenCF_Gen_Gen_Map2Set",
"hexsha": null,
"include": null,
"lang": null,
"length": 3,
"llama_tokens": 319,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
}
|
"""
Copyright (c) 2016 Jet Propulsion Laboratory,
California Institute of Technology. All rights reserved
"""
import sys
import traceback
from datetime import datetime, timedelta
from multiprocessing.dummy import Pool, Manager
from shapely.geometry import box
import numpy as np
import pytz
from nexustiles.nexustiles import NexusTileService, NexusTileServiceException
from webservice.NexusHandler import NexusHandler, nexus_handler
from webservice.webmodel import NexusResults, NexusProcessingException
SENTINEL = 'STOP'
@nexus_handler
class DailyDifferenceAverageImpl(NexusHandler):
name = "Daily Difference Average"
path = "/dailydifferenceaverage"
description = "Subtracts data in box in Dataset 1 from Dataset 2, then averages the difference per day."
params = {
"ds1": {
"name": "Dataset 1",
"type": "string",
"description": "The first Dataset shortname to use in calculation"
},
"ds2": {
"name": "Dataset 2",
"type": "string",
"description": "The second Dataset shortname to use in calculation"
},
"minLon": {
"name": "Minimum Longitude",
"type": "float",
"description": "Minimum (Western) bounding box Longitude"
},
"minLat": {
"name": "Minimum Latitude",
"type": "float",
"description": "Minimum (Southern) bounding box Latitude"
},
"maxLon": {
"name": "Maximum Longitude",
"type": "float",
"description": "Maximum (Eastern) bounding box Longitude"
},
"maxLat": {
"name": "Maximum Latitude",
"type": "float",
"description": "Maximum (Northern) bounding box Latitude"
},
"startTime": {
"name": "Start Time",
"type": "long integer",
"description": "Starting time in milliseconds since midnight Jan. 1st, 1970 UTC"
},
"endTime": {
"name": "End Time",
"type": "long integer",
"description": "Ending time in milliseconds since midnight Jan. 1st, 1970 UTC"
}
}
singleton = True
def __init__(self):
NexusHandler.__init__(self, skipCassandra=True)
def calc(self, request, **args):
min_lat, max_lat, min_lon, max_lon = request.get_min_lat(), request.get_max_lat(), request.get_min_lon(), request.get_max_lon()
dataset1 = request.get_argument("ds1", None)
dataset2 = request.get_argument("ds2", None)
start_time = request.get_start_time()
end_time = request.get_end_time()
simple = request.get_argument("simple", None) is not None
averagebyday = self.get_daily_difference_average_for_box(min_lat, max_lat, min_lon, max_lon, dataset1, dataset2,
start_time, end_time)
averagebyday = sorted(averagebyday, key=lambda dayavg: dayavg[0])
if simple:
import matplotlib.pyplot as plt
from matplotlib.dates import date2num
times = [date2num(self.date_from_ms(dayavg[0])) for dayavg in averagebyday]
means = [dayavg[1] for dayavg in averagebyday]
plt.plot_date(times, means, ls='solid')
plt.xlabel('Date')
plt.xticks(rotation=70)
plt.ylabel(u'Difference from 5-Day mean (\u00B0C)')
plt.title('Sea Surface Temperature (SST) Anomalies')
plt.grid(True)
plt.tight_layout()
plt.savefig("test.png")
return averagebyday, None, None
else:
result = NexusResults(
results=[[{'time': dayms, 'mean': avg, 'ds': 0}] for dayms, avg in averagebyday],
stats={},
meta=self.get_meta())
result.extendMeta(min_lat, max_lat, min_lon, max_lon, "", start_time, end_time)
result.meta()['label'] = u'Difference from 5-Day mean (\u00B0C)'
return result
def date_from_ms(self, dayms):
base_datetime = datetime(1970, 1, 1)
delta = timedelta(0, 0, 0, dayms)
return base_datetime + delta
def get_meta(self):
meta = {
"title": "Sea Surface Temperature (SST) Anomalies",
"description": "SST anomolies are departures from the 5-day pixel mean",
"units": u'\u00B0C',
}
return meta
def get_daily_difference_average_for_box(self, min_lat, max_lat, min_lon, max_lon, dataset1, dataset2,
start_time,
end_time):
daysinrange = self._tile_service.find_days_in_range_asc(min_lat, max_lat, min_lon, max_lon, dataset1,
start_time, end_time)
maxprocesses = int(self.algorithm_config.get("multiprocessing", "maxprocesses"))
if maxprocesses == 1:
calculator = DailyDifferenceAverageCalculator()
averagebyday = []
for dayinseconds in daysinrange:
result = calculator.calc_average_diff_on_day(min_lat, max_lat, min_lon, max_lon, dataset1, dataset2,
dayinseconds)
averagebyday.append((result[0], result[1]))
else:
# Create a task to calc average difference for each day
manager = Manager()
work_queue = manager.Queue()
done_queue = manager.Queue()
for dayinseconds in daysinrange:
work_queue.put(
('calc_average_diff_on_day', min_lat, max_lat, min_lon, max_lon, dataset1, dataset2, dayinseconds))
[work_queue.put(SENTINEL) for _ in xrange(0, maxprocesses)]
# Start new processes to handle the work
pool = Pool(maxprocesses)
[pool.apply_async(pool_worker, (work_queue, done_queue)) for _ in xrange(0, maxprocesses)]
pool.close()
# Collect the results as [(day (in ms), average difference for that day)]
averagebyday = []
for i in xrange(0, len(daysinrange)):
result = done_queue.get()
if result[0] == 'error':
print >> sys.stderr, result[1]
raise NexusProcessingException(reason="Error calculating average by day.")
rdata = result
averagebyday.append((rdata[0], rdata[1]))
pool.terminate()
manager.shutdown()
return averagebyday
class DailyDifferenceAverageCalculator(object):
def __init__(self):
self.__tile_service = NexusTileService()
def calc_average_diff_on_day(self, min_lat, max_lat, min_lon, max_lon, dataset1, dataset2, timeinseconds):
day_of_year = datetime.fromtimestamp(timeinseconds, pytz.utc).timetuple().tm_yday
ds1_nexus_tiles = self.__tile_service.find_all_tiles_in_box_at_time(min_lat, max_lat, min_lon, max_lon,
dataset1,
timeinseconds)
# Initialize list of differences
differences = []
# For each ds1tile
for ds1_tile in ds1_nexus_tiles:
# Get tile for ds2 using bbox from ds1_tile and day ms
try:
ds2_tile = self.__tile_service.find_tile_by_polygon_and_most_recent_day_of_year(
box(ds1_tile.bbox.min_lon, ds1_tile.bbox.min_lat, ds1_tile.bbox.max_lon, ds1_tile.bbox.max_lat),
dataset2, day_of_year)[0]
# Subtract ds2 tile from ds1 tile
diff = np.subtract(ds1_tile.data, ds2_tile.data)
except NexusTileServiceException:
# This happens when there is data in ds1tile but all NaNs in ds2tile because the
# Solr query being used filters out results where stats_count = 0.
# Technically, this should never happen if ds2 is a climatology generated in part from ds1
# and it is probably a data error
# For now, just treat ds2 as an array of all masked data (which essentially discards the ds1 data)
ds2_tile = np.ma.masked_all(ds1_tile.data.shape)
diff = np.subtract(ds1_tile.data, ds2_tile)
# Put results in list of differences
differences.append(np.ma.array(diff).ravel())
# Average List of differences
diffaverage = np.ma.mean(differences).item()
# Return Average by day
return int(timeinseconds), diffaverage
def pool_worker(work_queue, done_queue):
try:
calculator = DailyDifferenceAverageCalculator()
for work in iter(work_queue.get, SENTINEL):
scifunction = work[0]
args = work[1:]
result = calculator.__getattribute__(scifunction)(*args)
done_queue.put(result)
except Exception as e:
e_str = traceback.format_exc(e)
done_queue.put(('error', e_str))
|
{
"alphanum_fraction": 0.5854899838,
"author": null,
"avg_line_length": 39.9783549784,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "1518c9efe394881abddc34ca953bb6c067d0cc25",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 5,
"max_forks_repo_forks_event_max_datetime": "2017-11-14T21:45:22.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-08-31T13:47:29.000Z",
"max_forks_repo_head_hexsha": "f25a89e85eba098da9c6db1ff3d408dae8a6b310",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "dataplumber/nexus",
"max_forks_repo_path": "analysis/webservice/algorithms/DailyDifferenceAverage.py",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "ff98fa346303431542b8391cc2a1bf7561d1bd03",
"max_issues_repo_issues_event_max_datetime": "2021-06-01T21:45:52.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-04-27T21:22:17.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "lewismc/incubator-sdap-nexus",
"max_issues_repo_path": "analysis/webservice/algorithms/DailyDifferenceAverage.py",
"max_line_length": 135,
"max_stars_count": 23,
"max_stars_repo_head_hexsha": "f25a89e85eba098da9c6db1ff3d408dae8a6b310",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "dataplumber/nexus",
"max_stars_repo_path": "analysis/webservice/algorithms/DailyDifferenceAverage.py",
"max_stars_repo_stars_event_max_datetime": "2020-02-17T08:18:29.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-08-09T22:45:14.000Z",
"num_tokens": 2008,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 9235
}
|
import numpy as np
from scipy.stats.distributions import norm
def generate_logistic():
# Number of clusters
nclust = 100
# Regression coefficients
beta = np.array([1, -2, 1], dtype=np.float64)
# Covariate correlations
r = 0.4
# Cluster effects of covariates
rx = 0.5
# Within-cluster outcome dependence
re = 0.3
p = len(beta)
OUT = open("gee_logistic_1.csv", "w")
for i in range(nclust):
n = np.random.randint(3, 6) # Cluster size
x = np.random.normal(size=(n, p))
x = rx*np.random.normal() + np.sqrt(1-rx**2)*x
x[:, 2] = r*x[:, 1] + np.sqrt(1-r**2)*x[:, 2]
pr = 1/(1+np.exp(-np.dot(x, beta)))
z = re*np.random.normal() +\
np.sqrt(1-re**2)*np.random.normal(size=n)
u = norm.cdf(z)
y = 1*(u < pr)
for j in range(n):
OUT.write("%d, %d," % (i, y[j]))
OUT.write(",".join(["%.3f" % b for b in x[j, :]]) + "\n")
OUT.close()
def generate_linear():
# Number of clusters
nclust = 100
# Regression coefficients
beta = np.array([1, -2, 1], dtype=np.float64)
# Within cluster covariate correlations
r = 0.4
# Between cluster covariate effects
rx = 0.5
# Within-cluster outcome dependence
# re = 0.3
p = len(beta)
OUT = open("gee_linear_1.csv", "w")
for i in range(nclust):
n = np.random.randint(3, 6) # Cluster size
x = np.random.normal(size=(n, p))
x = rx*np.random.normal() + np.sqrt(1-rx**2)*x
x[:, 2] = r*x[:, 1] + np.sqrt(1-r**2)*x[:, 2]
# TODO: should `e` be used somewhere?
# e = np.sqrt(1-re**2)*np.random.normal(size=n) + re*np.random.normal()
y = np.dot(x, beta) + np.random.normal(size=n)
for j in range(n):
OUT.write("%d, %d," % (i, y[j]))
OUT.write(",".join(["%.3f" % b for b in x[j, :]]) + "\n")
OUT.close()
def generate_nested_linear():
# Number of clusters (clusters have 10 values, partitioned into 2
# subclusters of size 5).
nclust = 200
# Regression coefficients
beta = np.array([1, -2, 1], dtype=np.float64)
# Top level cluster variance component
v1 = 1
# Subcluster variance component
v2 = 0.5
# Error variance component
v3 = 1.5
p = len(beta)
OUT = open("gee_nested_linear_1.csv", "w")
for i in range(nclust):
x = np.random.normal(size=(10, p))
y = np.dot(x, beta)
y += np.sqrt(v1)*np.random.normal()
y[0:5] += np.sqrt(v2)*np.random.normal()
y[5:10] += np.sqrt(v2)*np.random.normal()
y += np.sqrt(v3)*np.random.normal(size=10)
for j in range(10):
OUT.write("%d, %.3f," % (i, y[j]))
OUT.write(",".join(["%.3f" % b for b in x[j, :]]) + "\n")
OUT.close()
def generate_ordinal():
# Regression coefficients
beta = np.zeros(5, dtype=np.float64)
beta[2] = 1
beta[4] = -1
rz = 0.5
OUT = open("gee_ordinal_1.csv", "w")
for i in range(200):
n = np.random.randint(3, 6) # Cluster size
x = np.random.normal(size=(n, 5))
for j in range(5):
x[:, j] += np.random.normal()
pr = np.dot(x, beta)
pr = np.array([1, 0, -0.5]) + pr[:, None]
pr = 1 / (1 + np.exp(-pr))
z = rz*np.random.normal() +\
np.sqrt(1-rz**2)*np.random.normal(size=n)
u = norm.cdf(z)
y = (u[:, None] > pr).sum(1)
for j in range(n):
OUT.write("%d, %d," % (i, y[j]))
OUT.write(",".join(["%.3f" % b for b in x[j, :]]) + "\n")
OUT.close()
def generate_nominal():
# Regression coefficients
beta1 = np.r_[0.5, 0.5]
beta2 = np.r_[-1, -0.5]
p = len(beta1)
rz = 0.5
OUT = open("gee_nominal_1.csv", "w")
for i in range(200):
n = np.random.randint(3, 6) # Cluster size
x = np.random.normal(size=(n, p))
x[:, 0] = 1
for j in range(1, x.shape[1]):
x[:, j] += np.random.normal()
pr1 = np.exp(np.dot(x, beta1))[:, None]
pr2 = np.exp(np.dot(x, beta2))[:, None]
den = 1 + pr1 + pr2
pr = np.hstack((pr1/den, pr2/den, 1/den))
cpr = np.cumsum(pr, 1)
z = rz*np.random.normal() +\
np.sqrt(1-rz**2)*np.random.normal(size=n)
u = norm.cdf(z)
y = (u[:, None] > cpr).sum(1)
for j in range(n):
OUT.write("%d, %d," % (i, y[j]))
OUT.write(",".join(["%.3f" % b for b in x[j, :]]) + "\n")
OUT.close()
def generate_poisson():
# Regression coefficients
beta = np.zeros(5, dtype=np.float64)
beta[2] = 0.5
beta[4] = -0.5
nclust = 100
OUT = open("gee_poisson_1.csv", "w")
for i in range(nclust):
n = np.random.randint(3, 6) # Cluster size
x = np.random.normal(size=(n, 5))
for j in range(5):
x[:, j] += np.random.normal()
lp = np.dot(x, beta)
E = np.exp(lp)
y = [np.random.poisson(e) for e in E]
y = np.array(y)
for j in range(n):
OUT.write("%d, %d," % (i, y[j]))
OUT.write(",".join(["%.3f" % b for b in x[j, :]]) + "\n")
OUT.close()
|
{
"alphanum_fraction": 0.4978149344,
"author": null,
"avg_line_length": 23.3911111111,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "e0c11beb5ede3740657dcdd1cf0993e165a343c3",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2608,
"max_forks_repo_forks_event_max_datetime": "2022-03-31T07:38:30.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-01-02T21:32:31.000Z",
"max_forks_repo_head_hexsha": "aad1a32044da671d0b4f11908416044753360b39",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "EkremBayar/bayar",
"max_forks_repo_path": "venv/Lib/site-packages/statsmodels/genmod/tests/results/gee_generate_tests.py",
"max_issues_count": 6137,
"max_issues_repo_head_hexsha": "aad1a32044da671d0b4f11908416044753360b39",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T22:53:17.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-01-01T00:33:45.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "EkremBayar/bayar",
"max_issues_repo_path": "venv/Lib/site-packages/statsmodels/genmod/tests/results/gee_generate_tests.py",
"max_line_length": 79,
"max_stars_count": 6931,
"max_stars_repo_head_hexsha": "aad1a32044da671d0b4f11908416044753360b39",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "EkremBayar/bayar",
"max_stars_repo_path": "venv/Lib/site-packages/statsmodels/genmod/tests/results/gee_generate_tests.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T17:03:24.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-01T11:41:55.000Z",
"num_tokens": 1698,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5263
}
|
## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
#############################################################################
#
# Author: Alex T. GILLET
#
# Copyright: A. Gillet TSRI 2003
#
#############################################################################
#
# $Header: /opt/cvs/DejaVu2/Arrows.py,v 1.1.1.1 2014/06/19 19:41:02 sanner Exp $
#
# $Id: Arrows.py,v 1.1.1.1 2014/06/19 19:41:02 sanner Exp $
#
##
# class to draw a arrow in DejaVu2
##
from opengltk.OpenGL import GL
import numpy.oldnumeric as Numeric
from numpy.oldnumeric import array
from IndexedGeom import IndexedGeom
import datamodel, viewerConst
from viewerFns import checkKeywords
from Materials import Materials
from colorTool import glMaterialWithCheck, resetMaterialMemory
class Arrows(IndexedGeom):
"""Class for sets of arrows"""
keywords = IndexedGeom.keywords
def __init__(self, name=None, check=1, **kw):
if __debug__:
if check:
apply( checkKeywords, (name,self.keywords), kw)
if not kw.get('shape'):
kw['shape'] = (0,3) # default shape for sphere set
apply( IndexedGeom.__init__, (self, name, 0), kw)
self.culling = GL.GL_BACK
self.inheritCulling = 0
assert len(self.vertexSet.vertices.ashape)==2
self.frontPolyMode = GL.GL_FILL
self.inheritFrontPolyMode = viewerConst.NO
self.lighting = viewerConst.YES
self.realFMat = Materials() # used in RedoDisplayList to build
self.realBMat = Materials() # used in RedoDisplayList to build
# material taking getFrom into account
self.radius = 0.2
self.oneRadius = viewerConst.YES
self._arrowTemplate(4)
Arrows.Set(self)
self._modified = False
def Set(self, check=1, redo=1, updateOwnGui=True, **kw):
"""set data for this object:
check=1 : verify that all the keywords present can be handle by this func
redo=1 : append self to viewer.objectsNeedingRedo
updateOwnGui=True : allow to update owngui at the end this func
"""
redoFlags = apply( IndexedGeom.Set, (self, check, 0), kw )
v=kw.get('vertices')
if v:
redoFlags |= self._redoFlags['redoDisplayListFlag']
self.oneRadius = viewerConst.NO
return self.redoNow(redo, updateOwnGui, redoFlags)
def Draw(self):
if len(self.vertexSet.vertices) == 0:
return 0
if self.inheritMaterial:
fp = None
bp = None
face = None
else:
mat = self.materials[GL.GL_FRONT]
rmat = self.realFMat
bind = [10,10,10,10]
for pInd in range(4):
bind[pInd], rmat.prop[pInd] = mat.GetProperty(pInd)
rmat.prop[4] = mat.prop[4]
rmat.prop[5] = mat.prop[5]
rmat.binding[:4] = bind
rmat.binding[4:] = rmat.binding[4:]
fp = rmat
if fp:
if self.frontAndBack:
face = GL.GL_FRONT_AND_BACK
bp = None
else:
face = GL.GL_FRONT
mat = self.materials[GL.GL_BACK]
rmat = self.realBMat
bind = [10,10,10,10]
for pInd in range(4):
bind[pInd], rmat.prop[pInd]=mat.GetProperty(pInd)
rmat.prop[4] = mat.prop[4]
rmat.prop[5] = mat.prop[5]
rmat.binding[:4] = bind
rmat.binding[4:] = rmat.binding[4:]
bp = rmat
c = self.vertexSet.vertices.array
#if self.oneRadius == viewerConst.NO:
#radii = self.vertexSet.radii.array
pickName = 0
for i in xrange(len(self.faceSet.faces.array)):
for j in xrange(len(self.faceSet.faces.array[i])-1):
vi1 = self.faceSet.faces.array[i][j]
vi2 = self.faceSet.faces.array[i][j+1]
#print vi1,vi2
if fp:
fpp1 = [None,None,None,None,None]
fpp2 = [None,None,None,None,None]
for m in (0,1,2,3,4):
if fp.binding[m] == viewerConst.PER_VERTEX:
fpp1[m] = fp.prop[m][vi2]
# to make sure array is contiguous
fpp1[m] = array(fpp1[m],copy=1)
fpp2[m] = fp.prop[m][vi1]
fpp2[m] = array(fpp2[m],copy=1)
elif fp.binding[m] == viewerConst.PER_PART:
fpp2[m]= fpp1[m] = fp.prop[m][i]
fpp1[m] = array(fpp1[m],copy=1)
fpp2[m] = array(fpp2[m],copy=1)
else:
fpp1 = fpp2 = None
if bp and not self.frontAndBack:
bpp1 = [None,None,None,None,None]
bpp2 = [None,None,None,None,None]
for m in (0,1,2,3,4):
if bp.binding[m] == viewerConst.PER_VERTEX:
bpp1[m] = bp.prop[m][vi2]
bpp1[m] = array(bpp1[m],copy=1)
bpp2[m] = bp.prop[m][vi1]
bpp2[m] = array(bpp2[m],copy=1)
elif bp.binding[m] == viewerConst.PER_PART:
bpp2[m] = bpp1[m] = bp.prop[m][i]
bpp1[m] = array(bpp1[m],copy=1)
else:
bpp1 = bpp2 = None
GL.glPushName(pickName)
self.arrowdraw(c[vi1], c[vi2],fpp1, bpp1, fpp2, bpp2,face)
GL.glPopName()
pickName = pickName +1
return 1
def _arrowTemplate(self, npoly):
assert (npoly >1)
import numpy.oldnumeric as Numeric, math
self.npoly = npoly
self.v = Numeric.zeros( ((npoly+2),3), 'f')
a = -math.pi # starting angle
d = 2*math.pi / npoly # increment
# coord of 1st point of arrow
self.v[0][0] = 0.
self.v[0][1] = 0.
self.v[0][2] = 0.
# coord of 2st point of arrow
self.v[1][0] = 0.
self.v[1][1] = 0.
self.v[1][2] = 1.
# coord of the others points
for i in range(npoly):
h = i+2
self.v[h][0] = math.cos(a)/10.
self.v[h][1] = math.sin(a)/10.
self.v[h][2] = 0.75
a=a+d
def arrowdraw(self, x, y, colxf=None, colxb=None,
colyf=None, colyb=None,face=None):
# draw a cylinder going from x to y
# col for materials
# face can be GL_FRONT_AND_BACK or something else
# determine scale and rotation of template
import math
sz=0.0
for i in (0,1,2): sz=sz+(x[i]-y[i])*(x[i]-y[i])
if sz <= 0.0: return
sz = math.sqrt(sz)
rx = -180.0*math.acos((y[2]-x[2])/sz)/math.pi
dx = y[0]-x[0]
dy = y[1]-x[1]
if math.fabs(dx) < 0.00001 and math.fabs(dy) < 0.00001:
rz = 0.0
else:
rz = -180.0*math.atan2(dx,dy)/math.pi
GL.glPushMatrix()
GL.glTranslatef(float(x[0]),float(x[1]),float(x[2]))
if rz<=180.0 and rz >=-180.0: GL.glRotatef(float(rz), 0., 0., 1.)
GL.glRotatef(float(rx), 1., 0., 0.)
# draw arrow
GL.glBegin(GL.GL_LINES)
if colxf:
for m in (0,1,2,3,4):
if colxf[m] is not None:
glMaterialWithCheck( face, viewerConst.propConst[m],
colxf[m] )
if colxb and face!=GL.GL_FRONT_AND_BACK:
for m in (0,1,2,3,4):
if colxb[m] is not None:
glMaterialWithCheck( GL.GL_BACK,
viewerConst.propConst[m],
colxb[m] )
GL.glVertex3f(float(self.v[0][0]), float(self.v[0][1]), float(self.v[0][2]*sz))
GL.glVertex3f(float(self.v[1][0]), float(self.v[1][1]), float(self.v[1][2]*sz))
for i in range(self.npoly):
h = i+2
vx = self.v[h]
GL.glVertex3f(float(self.v[1][0]), float(self.v[1][1]),
float(self.v[1][2]*sz))
GL.glVertex3f(float(vx[0]*sz), float(vx[1]*sz), float(vx[2]*sz))
GL.glEnd()
GL.glPopMatrix()
#############################################################################
#
# Author: Yong Zhao
#
# Copyright: Y.Zhao, 2004
#
#############################################################################
from DejaVu2.Cylinders import Cylinders
#from opengltk.OpenGL import GL
from math import sqrt
#import numpy.oldnumeric as Numeric
from warnings import warn
from mglutil.util.defaultPalettes import ChooseColor
import types
import numpy
class Axis(Cylinders):
"""This class displays an axis in a viewer"""
keywords = Cylinders.keywords + ['point1', 'point2', 'radius', 'color', 'point',
'unitVector', 'length' ]
def __init__(self, name=None, check=1, **kw):
"""constructor: two ways of building the axis:
1) by specifing point and unit vector, axis goes through 'point', with center at
the point, unit vector gives the direction of axis
2) by two points, axis goes through the center (average) of the two points,
axis pointing to the direction of ( point1--> point2)
if both definitions are specified 2) will overwrite 1) """
apply(Cylinders.__init__, (self, name), {})
self.length = None
self.point1 = None
self.point2 = None
#print 'kw: ', kw
if kw.get('faces') == None:
kw['faces'] = [ [0,1], [1,2], [2,3], [3,4] ]
if kw.get('quality') == None:
kw['quality']=20
if kw.get('inheritMaterial') == None:
kw['inheritMaterial'] = 0
if kw.get('culling') == None:
kw['culling'] = GL.GL_NONE
apply(self.Set, (check,), kw)
def Set(self, check=1, redo=1, **kw):
cylkw = {}
updateverts = False
point = kw.get('point', None)
if point != None:
kw.pop('point')
unitVector = kw.get('unitVector', None)
if unitVector != None:
assert len(unitVector) == 3
kw.pop('unitVector')
if point is not None and unitVector is not None:
p=numpy.array(point, 'f')
v=numpy.array(unitVector, 'f')
self.point1=p - 0.5 * v
self.point2=p + 0.5 * v
updateverts = True
point1 = kw.get('point1')
if point1 is not None:
assert len(point1) == 3
self.point1 = numpy.array(point1, 'f')
updateverts = True
kw.pop('point1')
point2 = kw.get('point2')
if point2 is not None:
assert len(point2) == 3
self.point2 = numpy.array(point2, 'f')
updateverts=True
kw.pop('point2')
length = kw.get('length', None)
if length is not None and length != self.length:
updateverts=True
kw.pop('length')
self.length = length
if updateverts:
cylkw['vertices'] = self.calculateVertices(self.length)
radius = kw.get('radius', None)
if radius is not None:
kw.pop('radius')
cylkw ['radii'] = [0.0, radius, radius, 2*radius, 0.0]
color = kw.get('color')
if color is not None:
self.color = color
cylkw['materials'] = [self.getColor(color),]
kw.pop('color')
cylkw.update(kw)
return apply(Cylinders.Set , (self, check, redo), cylkw)
def getColor(self, color):
"""returns the color of axis, color can be string (e.g. 'red' ) or
tuple (e.g. (1., 0., 0.) )
The default color is white (1,1,1)
"""
if type(color) == types.TupleType and len(color)==3:
return color
if type(color) == types.StringType:
if color in ChooseColor.keys():
return ChooseColor[color]
return ChooseColor['white']
def calculateVertices(self, length, center=None):
""" tail -> head """
p1=self.point1
p2=self.point2
#if not center:
center = (p1+p2) /2.0
vector = p2-p1
distance = sqrt( (p2[0]-p1[0])**2 +(p2[1]-p1[1])**2 +(p2[2]-p1[2])**2 )
if distance < 0.000001:
warn("The two points specified are too close.")
return
half = vector * (length / distance/2.0)
head= center + half
tail= center - half
# the arrow's length is 10% of shaft
summit = center + vector * \
(length / distance /2.0 * 1.2)
##
## |\
## |-----------------| \
## |-----------------| / <- summit
## |/
## d_tail, d_head
## tail head summit
d_half = vector * ((length+0.0001) / distance/2.0)
d_tail= center - d_half
d_head= center + d_half
head=head.tolist()
tail=tail.tolist()
d_head=d_head.tolist()
d_tail=d_tail.tolist()
summit = summit.tolist()
return [d_tail, tail, d_head, head, summit]
#from DejaVu2.Arrows import Axis
#xx = Axis('axis', point=[0,0,0 ], unitVector=[1,0,0], length=20., radius=0.3, color = 'green')
#self.GUI.VIEWER.AddObject(xx)
class AxisOld:
"""This class displays an axis in a viewer"""
def __init__(self, point1=None, point2=None, point=None, unitVector=None,
length = 1.0, viewer=None,
radius = 1.0, color='white', name=None):
"""constructor: two ways of building the axis:
1) by specifing point and unit vector, axis goes through 'point', with center at
the point, unit vector gives the direction of axis
2) by two points, axis goes throw the center (average) of the two points,
axis pointing to the direction of ( point1--> point2)
if both definitions are specified 2) will overwrite 1)
"""
if point is not None and unitVector is not None:
p=Numeric.array(point, 'f')
v=Numeric.array(unitVector, 'f')
self.point1=p - 0.5 * v
self.point2=p + 0.5 * v
if point1 is not None and point2 is not None:
self.point1=Numeric.array(point1,'f')
self.point2=Numeric.array(point2,'f')
self.length=length
self.radius=radius
self.viewer = viewer
self.color=self.getColor(color)
if name is None:
name='Axis'
self.shaft = Cylinders(name=name, quality=20, materials = [self.color],
inheritMaterial=0,
culling=GL.GL_NONE)
if self.viewer:
self.viewer.AddObject(self.shaft)
self.display()
def getColor(self, color):
"""returns the color of axis, color can be string (e.g. 'red' ) or
tuple (e.g. (1., 0., 0.) )
The default color is white (1,1,1)
"""
if type(color) == types.TupleType and len(color)==3:
return color
if type(color) == types.StringType:
if color in ChooseColor.keys():
return ChooseColor[color]
return ChooseColor['white']
def display(self):
"""Display the """
## |\
## |-----------------| \
## |-----------------| / <- summit
## |/
## v1,v2 v3,v4
# v = [v1, v2, v3, v4, summit]
v = self.calculateVertices()
# faces = [ [v1,v2],[v2,v3], [v3,v4], [v4,v5],[v5,summit] ]
faces = [ [0,1], [1,2], [2,3], [3,4] ]
# radii
r=self.radius
radii=( 0.0, r, r, 2*r, 0.0)
self.shaft.Set( vertices=v,
faces=faces,
radii=radii,
materials = [self.color],
)
## self.shaft = Cylinders("shaft", vertices=v,
## faces=faces,
## radii=radii,
## quality=20, materials = [(1,0,0)],
## inheritMaterial=0,
## cull =GL.GL_NONE)
## self.shaft = Cylinders("shaft", vertices=v[:3],
## faces=faces[:2],
## radii=radii[:2],
## quality=20, materials = [(1,0,0)],
## inheritMaterial=0,
## cull =GL.GL_NONE)
## self.arrow = Cylinders("arrow", vertices=v[2:],
## #faces=faces[2:],
## faces = [ [0,1], [1,2] ],
## radii=radii[2:],
## quality=20, materials = [(1,0,0)],
## inheritMaterial=0,
## cull =GL.GL_NONE)
if self.viewer:
self.shaft.Set(culling=GL.GL_NONE,
backPolyMode=GL.GL_FILL,
vertices=v,
faces=faces,
radii=radii,
materials = [self.color],
)
## self.viewer.AddObject(self.shaft)
self.viewer.Redraw()
def calculateVertices(self, center=None):
""" tail -> head """
p1=self.point1
p2=self.point2
length = self.length
#if not center:
center = (p1+p2) /2.0
vector = p2-p1
distance = sqrt( (p2[0]-p1[0])**2 +(p2[1]-p1[1])**2 +(p2[2]-p1[2])**2 )
if distance < 0.000001:
warn("The two points specified are too close.")
return
half = vector * (length / distance/2.0)
head= center + half
tail= center - half
# the arrow's length is 10% of shaft
summit = center + vector * \
(length / distance /2.0 * 1.2)
##
## |\
## |-----------------| \
## |-----------------| / <- summit
## |/
## d_tail, d_head
## tail head summit
d_half = vector * ((length+0.0001) / distance/2.0)
d_tail= center - d_half
d_head= center + d_half
head=head.tolist()
tail=tail.tolist()
d_head=d_head.tolist()
d_tail=d_tail.tolist()
summit = summit.tolist()
return [d_tail, tail, d_head, head, summit]
def configure(self, length=None, radius=None, point1=None, point2=None,
viewer=None , color =None):
""" change the configuration of axis"""
update=False
if length is not None and length != self.length:
self.length=length
update=True
if point1 is not None and point1 != self.point1:
self.point1 = point1
update=True
if point2 is not None and point2 != self.point2:
self.point2 = point2
update=True
if radius is not None and radius != self.radius:
self.radius = radius
update=True
if viewer is not None and viewer != self.viewer:
self.viewer = viewer
update=True
if color is not None and color != self.color:
self.color = color
update=True
if update:
self.display()
## Fixme:
## after changing point1, point2, some parts of the axis is not upated.
def getShaft(self):
return self.shaft
# fixme unit tests to be added
# example of usage
"""
from DejaVu2.Arrows import AxisOld as Axis
xx= Axis(point=[0,0,0 ], unitVector=[1,0,0], length=60., viewer=self.GUI.VIEWER, radius=0.3, color='red', name='X axis')
yy= Axis(point=[0,0,0 ], unitVector=[0,1,0], length=60., viewer=self.GUI.VIEWER, radius=0.3, color='green', name='Y axis')
zz= Axis(point=[0,0,0 ], unitVector=[0,0,1], length=60., viewer=self.GUI.VIEWER, radius=0.3, color='blue', name='Z axis')
"""
|
{
"alphanum_fraction": 0.4826805415,
"author": null,
"avg_line_length": 33.641815235,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "7d018f3ec5c34978661a295e5ab4f4aae70a40cc",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6900497629f601c4b6c0c37da26de58ffa221988",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "bio-hpc/metascreener",
"max_forks_repo_path": "MetaScreener/external_sw/mgltools/MGLToolsPckgs/DejaVu2/Arrows.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6900497629f601c4b6c0c37da26de58ffa221988",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "bio-hpc/metascreener",
"max_issues_repo_path": "MetaScreener/external_sw/mgltools/MGLToolsPckgs/DejaVu2/Arrows.py",
"max_line_length": 123,
"max_stars_count": 8,
"max_stars_repo_head_hexsha": "6900497629f601c4b6c0c37da26de58ffa221988",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "bio-hpc/metascreener",
"max_stars_repo_path": "MetaScreener/external_sw/mgltools/MGLToolsPckgs/DejaVu2/Arrows.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-14T11:30:03.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-12-14T21:30:01.000Z",
"num_tokens": 5274,
"path": null,
"reason": "import numpy,from numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 20757
}
|
import http.client
import json
import numpy
from pymongo import MongoClient
import datetime
import pprint
from bson.objectid import ObjectId
client = MongoClient('mongodb://trading:secret@127.0.0.1:27017/')
trading_db = client['trading-db']
api_response_collection = trading_db['api-response-collection']
# post_id = trading_collection.insert_one(post).inserted_id
# print(post_id)
# pprint.pprint(trading_collection.find_one({'_id': ObjectId(post_id)}))
# print(trading_collection.drop())
# conn = http.client.HTTPSConnection("rapidapi.p.rapidapi.com")
# headers = {
# 'x-rapidapi-host': "alpha-vantage.p.rapidapi.com",
# 'x-rapidapi-key': "c1a14f833cmsh62627ab4f191309p1d4987jsne74b5fe0a94f"
# }
# conn.request("GET", "/query?interval=60min&function=TIME_SERIES_INTRADAY&symbol=TSLA&datatype=json&output_size=compact", headers=headers)
# res = conn.getresponse()
# data = res.read()
# data_json = json.loads(data)
# response_id = api_response_collection.insert(data_json, check_keys=False)
pprint.pprint(api_response_collection.find_one())
# print(response_id)
# time_series = data_json['Time Series (60min)']
# # print(data_json['Time Series (60min)'])
# close_list = []
# for element in time_series:
# print(time_series[element])
# close_list.append(float(time_series[element]['4. close']))
# print(close_list)
# # close_np_array = numpy.array(close_list)
# # output = talib.SMA(close_np_array)
|
{
"alphanum_fraction": 0.7533193571,
"author": null,
"avg_line_length": 29.8125,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "a83e03f5939eeb1d77027c937cc52417732f87ac",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6e960fcf6752d65c396a89e10185536ccec3cc52",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "FabiCorp/trade-ts",
"max_forks_repo_path": "test.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6e960fcf6752d65c396a89e10185536ccec3cc52",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "FabiCorp/trade-ts",
"max_issues_repo_path": "test.py",
"max_line_length": 139,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6e960fcf6752d65c396a89e10185536ccec3cc52",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "FabiCorp/trade-ts",
"max_stars_repo_path": "test.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 377,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1431
}
|
# FIXME: Import order causes error:
# ImportError: dlopen: cannot load any more object with static TL
# https://github.com/pytorch/pytorch/issues/2083
import torch
import numpy as np
import skimage.data
from torchfcn.models.fcn32s import get_upsampling_weight
def test_get_upsampling_weight():
src = skimage.data.coffee()
x = src.transpose(2, 0, 1)
x = x[np.newaxis, :, :, :]
x = torch.from_numpy(x).float()
x = torch.autograd.Variable(x)
in_channels = 3
out_channels = 3
kernel_size = 4
m = torch.nn.ConvTranspose2d(
in_channels, out_channels, kernel_size, stride=2, bias=False)
m.weight.data = get_upsampling_weight(
in_channels, out_channels, kernel_size)
y = m(x)
y = y.data.numpy()
y = y[0]
y = y.transpose(1, 2, 0)
dst = y.astype(np.uint8)
assert abs(src.shape[0] * 2 - dst.shape[0]) <= 2
assert abs(src.shape[1] * 2 - dst.shape[1]) <= 2
return src, dst
if __name__ == '__main__':
import matplotlib.pyplot as plt
src, dst = test_get_upsampling_weight()
plt.subplot(121)
plt.imshow(src)
plt.title('x1: {}'.format(src.shape))
plt.subplot(122)
plt.imshow(dst)
plt.title('x2: {}'.format(dst.shape))
plt.show()
|
{
"alphanum_fraction": 0.646917534,
"author": null,
"avg_line_length": 24.0192307692,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "c4f92588858e2f1837cb4d179c2a3c0ce2e06d50",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 577,
"max_forks_repo_forks_event_max_datetime": "2022-03-31T12:15:21.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-03-14T01:12:13.000Z",
"max_forks_repo_head_hexsha": "5907fb7765bdcc0cff8b2819d6548a277f2e2231",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "linesd/pytorch-fcn",
"max_forks_repo_path": "tests/models_tests/test_fcn32s.py",
"max_issues_count": 151,
"max_issues_repo_head_hexsha": "5907fb7765bdcc0cff8b2819d6548a277f2e2231",
"max_issues_repo_issues_event_max_datetime": "2022-03-27T13:47:52.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-02-21T03:06:49.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "linesd/pytorch-fcn",
"max_issues_repo_path": "tests/models_tests/test_fcn32s.py",
"max_line_length": 69,
"max_stars_count": 1736,
"max_stars_repo_head_hexsha": "5907fb7765bdcc0cff8b2819d6548a277f2e2231",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "linesd/pytorch-fcn",
"max_stars_repo_path": "tests/models_tests/test_fcn32s.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T16:17:30.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-03-03T04:06:34.000Z",
"num_tokens": 356,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1249
}
|
program test1
integer I
real X(100)
C AND expression
DO 100 I =1,N
IF ((I.LT.1).AND.(I.LE.50)) THEN
ELSE
X(I) = 1
ENDIF
100 CONTINUE
C OR expression
DO 200 I =1,N
IF ((1.LE.I).OR.(I.LE.50)) THEN
ELSE
X(I) = 1
ENDIF
200 CONTINUE
C NOT expression
DO 300 I =2,10
IF (.NOT.(I.GT.1)) THEN
ELSE
X(I) = 0
ENDIF
300 CONTINUE
end
|
{
"alphanum_fraction": 0.3496732026,
"author": null,
"avg_line_length": 13.3043478261,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "0c85d4caf9503b369f273bd463baf89d3b839554",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 12,
"max_forks_repo_forks_event_max_datetime": "2022-02-18T02:01:51.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-03-26T08:05:38.000Z",
"max_forks_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "DVSR1966/par4all",
"max_forks_repo_path": "packages/PIPS/validation/Expressions/expressions.f",
"max_issues_count": 7,
"max_issues_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996",
"max_issues_repo_issues_event_max_datetime": "2019-03-11T16:01:39.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-05-29T09:29:00.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "DVSR1966/par4all",
"max_issues_repo_path": "packages/PIPS/validation/Expressions/expressions.f",
"max_line_length": 77,
"max_stars_count": 51,
"max_stars_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "DVSR1966/par4all",
"max_stars_repo_path": "packages/PIPS/validation/Expressions/expressions.f",
"max_stars_repo_stars_event_max_datetime": "2022-02-18T02:01:50.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-31T01:51:39.000Z",
"num_tokens": 174,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 612
}
|
# -*- coding: utf-8 -*-
"""
Test of the population propagator
"""
from aloe import step
from aloe import world
import numpy
from quantarhei.testing.feature import FeatureFileGenerator
from quantarhei.testing.feature import match_number
from quantarhei import TimeAxis
from quantarhei import PopulationPropagator
feature = """
#
# Population progator `propagate` method and the propagation matrix
# from `get_PropagationMatrix` method must aggree
#
#
#
Feature: Result of propagate method and the propagation matrix agree
We check that the propagate method gives the same populations as the
propagation matrix
@poppropagator
Scenario Outline: Propagator for a simple relaxation matrix
"""
example_1 = """
Examples:
| time_step | nsteps | time_step_2 | nsteps_2 | t_up | t_down |
| 0.1 | 10000 | 10.0 | 50 | 100.0 | 30.0 |
| 0.1 | 10000 | 10.0 | 50 | 200.0 | 100.0 |
| 0.1 | 10000 | 10.0 | 50 | 100.0 | 100.0 |
| 0.1 | 10000 | 10.0 | 50 | 60.0 | 30.0 |
"""
@step(r'a propagation time interval from zero with time step '+match_number+r' fs and '
+match_number+r' steps')
def time_interval_prop(self, time_step, nsteps):
"""Matching the following
#begin_feature
Given a propagation time interval from zero with time step <time_step> fs and <nsteps> steps
#end_feature
"""
world.time = TimeAxis(0.0, int(nsteps), float(time_step))
print("Setting time")
@step(r'a subset time with time step '+match_number+r' fs and '
+match_number+r' steps')
def time_interval_sub(self, time_step, nsteps):
"""Matching the following
#begin_feature
And a subset time with time step <time_step_2> fs and <nsteps_2> steps
#end_feature
"""
world.subtime = TimeAxis(0.0, int(nsteps), float(time_step))
print("Setting subtime")
@step(r'a relaxation matrix with uphill time '+match_number+r' fs and downhill time '
+match_number+r' fs')
def relaxation_matrix(self, uphill, downhill):
"""Matching the following
#begin_feature
And a relaxation matrix with uphill time <t_up> fs and downhill time <t_down> fs
#end_feature
"""
world.KK = numpy.zeros((2,2), dtype=numpy.float64)
Kup = 1.0/float(uphill)
world.KK[0,0] = -Kup
world.KK[1,0] = Kup
Kdn = 1.0/float(downhill)
world.KK[0,1] = Kdn
world.KK[1,1] = -Kdn
@step(r' I calculate density matrix propagation and propagation matrix')
def calculation_of_propagation(self):
"""Matching the following
#begin_feature
When I calculate density matrix propagation and propagation matrix
#end_feature
"""
prop = PopulationPropagator(world.time, rate_matrix=world.KK)
pop_ini = numpy.array([1.0, 0.0])
pop_t = prop.propagate(pop_ini)
sta = world.subtime
U = prop.get_PropagationMatrix(sta)
pop_sub = numpy.zeros((2,sta.length))
for i in range(sta.length):
pop_sub[:,i] = numpy.dot(U[:,:,i],pop_ini)
world.pop_t = pop_t
world.pop_sub = pop_sub
@step(r' density matrix propagation and propagation matrix aggree')
def test_of_agreement(self):
"""Matching the following
#begin_feature
Then density matrix propagation and propagation matrix aggree
#end_feature
"""
pop_t = world.pop_t
pop_sub = world.pop_sub
Ns = world.subtime.length
dt = world.time.step
ds = world.subtime.step
N = round(ds/dt)
for i in range(Ns):
numpy.testing.assert_allclose(pop_sub[:,i],pop_t[i*N,:])
if __name__ == '__main__':
"""
Feature file: poppropagator_1.feature
"""
gen = FeatureFileGenerator(feature+example_1)
gen.add_Given(time_interval_prop)
gen.add_Given(time_interval_sub)
gen.add_Given(relaxation_matrix)
gen.add_When(calculation_of_propagation)
gen.add_Then(test_of_agreement)
gen.generate_feature_file("poppropagator_1.feature")
|
{
"alphanum_fraction": 0.6383029722,
"author": null,
"avg_line_length": 25.5950920245,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d47cc3620f2fe0188bb2a2071c2da94f8c6402d9",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 21,
"max_forks_repo_forks_event_max_datetime": "2022-03-30T03:16:35.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-08-30T09:09:28.000Z",
"max_forks_repo_head_hexsha": "d822bc2db86152c418e330a9152e7866869776f7",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "slamavl/quantarhei",
"max_forks_repo_path": "tests/bdd/features/qm/propagators/poppropagator_steps.py",
"max_issues_count": 61,
"max_issues_repo_head_hexsha": "d822bc2db86152c418e330a9152e7866869776f7",
"max_issues_repo_issues_event_max_datetime": "2021-11-10T13:53:06.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-09-19T10:45:56.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "slamavl/quantarhei",
"max_issues_repo_path": "tests/bdd/features/qm/propagators/poppropagator_steps.py",
"max_line_length": 96,
"max_stars_count": 14,
"max_stars_repo_head_hexsha": "d822bc2db86152c418e330a9152e7866869776f7",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "slamavl/quantarhei",
"max_stars_repo_path": "tests/bdd/features/qm/propagators/poppropagator_steps.py",
"max_stars_repo_stars_event_max_datetime": "2021-11-09T11:40:52.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-10-16T13:26:05.000Z",
"num_tokens": 1120,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4172
}
|
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
from torch.quantization import QuantStub, DeQuantStub
import torchvision
import unittest
import os
from neural_compressor.adaptor import FRAMEWORKS
from neural_compressor.model import MODELS
from neural_compressor.adaptor.pytorch import PyTorchVersionMode
import neural_compressor.adaptor.pytorch as nc_torch
from neural_compressor.experimental import Quantization, common
from neural_compressor.utils.pytorch import load
from neural_compressor.utils.utility import recover
import shutil
import copy
import numpy as np
import yaml
try:
import intel_pytorch_extension as ipex
TEST_IPEX = True
except:
TEST_IPEX = False
PT_VERSION = nc_torch.get_torch_version()
if PT_VERSION >= PyTorchVersionMode.PT18.value:
FX_MODE = True
else:
FX_MODE = False
class TestPytorchModel(unittest.TestCase):
framework = "pytorch"
model = torchvision.models.quantization.resnet18()
lpot_model = MODELS['pytorch'](model)
def test_get_all_weight_name(self):
assert len(list(self.lpot_model.get_all_weight_names())) == 62
def test_get_weight(self):
for name, param in self.model.named_parameters():
if name == "layer4.1.conv2.weight":
param.data.fill_(0.0)
if name == "fc.bias":
param.data.fill_(0.1)
assert int(torch.sum(self.lpot_model.get_weight("layer4.1.conv2.weight"))) == 0
assert torch.allclose(
torch.sum(
torch.tensor(self.lpot_model.get_weight("fc.bias"))),
torch.tensor(100.))
def test_get_input(self):
model = MODELS['pytorch'](torchvision.models.quantization.resnet18())
model.model.eval().fuse_model()
model.register_forward_pre_hook()
rand_input = torch.rand(100, 3, 256, 256).float()
model.model(rand_input)
assert torch.equal(model.get_inputs('x'), rand_input)
model.remove_hooks()
def test_update_weights(self):
self.lpot_model.update_weights('fc.bias', torch.zeros([1000]))
assert int(torch.sum(self.lpot_model.get_weight("fc.bias"))) == 0
def test_gradient(self):
with self.assertRaises(AssertionError):
self.lpot_model.get_gradient('fc.bias')
shape = None
for name, tensor in self.lpot_model._model.named_parameters():
if name == 'fc.bias':
shape = tensor.shape
tensor.grad = torch.randn(shape)
break
new_grad = torch.zeros(shape)
self.lpot_model.update_gradient('fc.bias', new_grad)
assert torch.equal(torch.tensor(self.lpot_model.get_gradient('fc.bias')), torch.zeros(shape))
rand_input = torch.rand(100, 3, 256, 256).float()
rand_input.grad = torch.ones_like(rand_input)
assert torch.equal(torch.tensor(self.lpot_model.get_gradient(rand_input)),
torch.ones_like(rand_input))
def test_report_sparsity(self):
df, total_sparsity = self.lpot_model.report_sparsity()
self.assertTrue(total_sparsity > 0)
self.assertTrue(len(df) == 22)
if __name__ == "__main__":
unittest.main()
|
{
"alphanum_fraction": 0.6811955168,
"author": null,
"avg_line_length": 34.5376344086,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "4fbfe5cb60f25aab866c919596b8ef36b5610e69",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 33,
"max_forks_repo_forks_event_max_datetime": "2022-03-25T08:30:57.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-09-15T07:27:25.000Z",
"max_forks_repo_head_hexsha": "16a4a12045fcb468da4d33769aff2c1a5e2ba6ba",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "intel/neural-compressor",
"max_forks_repo_path": "test/test_model_pytorch.py",
"max_issues_count": 40,
"max_issues_repo_head_hexsha": "130eefa3586b38df6c0ff78cc8807ae273f6a63f",
"max_issues_repo_issues_event_max_datetime": "2022-03-29T08:34:04.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-09-14T02:26:12.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "intel/lp-opt-tool",
"max_issues_repo_path": "test/test_model_pytorch.py",
"max_line_length": 101,
"max_stars_count": 172,
"max_stars_repo_head_hexsha": "aaad4c357a86914ffa583753c9a26d949838a2a5",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "huggingface/neural-compressor",
"max_stars_repo_path": "test/test_model_pytorch.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-30T06:49:53.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-09-14T18:34:17.000Z",
"num_tokens": 728,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3212
}
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from mpl_toolkits.mplot3d.axes3d import Axes3D
def load_data(filename):
'''
读取数据,将其转换为np.array的形式,将x和y以元组形式返回,解包获取数据
'''
column1 = list()
column2 = list()
with open(filename) as fp:
for line in fp.readlines():
line = line.strip()
a, b = line.split(',')
column1.append(a)
column2.append(b)
column1 = np.array(column1).astype(float)
column2 = np.array(column2).astype(float)
return column1, column2
def plot_data(X, y, Xlabel, ylabel):
'''
将图像进行描绘,散点图
'''
plt.figure()
plt.plot(X, y, 'rx', markersize=10)
plt.xlabel(Xlabel)
plt.ylabel(ylabel)
plt.show()
def gradient_descent(X, y, theta, alpha, iterations):
m = len(y)
J_history = np.zeros((iterations, 1))
for i in range(iterations):
t = alpha * np.sum(((X@theta).T - y).T * X, axis=0) / m
theta = (theta.T - t).T
J_history[i] = compute_cost(X, y, theta)
return theta
def compute_cost(X: np.matrix, y: np.matrix, theta: np.matrix)->float:
m = len(y)
# 最小二乘法计算损失
tmp = (X@theta).T - y
loss = 1 / (2 * m) * tmp@tmp.T
return loss
if __name__ == '__main__':
# part1 可视化数据
# 从文件之中读取数据
X, y = load_data('./ex1data1.txt')
plot_data(X, y, 'Profit in $10,000s', 'Population of City in 10,000s')
input('next step')
# part2 损失函数和梯度
# 对数据进行处理,形式变为列的形式
train = X.T
# 添加偏置
train = np.concatenate((np.ones((X.shape[0], 1)), X[:, None]), axis=1)
theta = np.zeros((2, 1))
iterations = 1500
alpha = 0.01
J = compute_cost(train, y, theta)
print('预期损失32.07')
print(J)
J = compute_cost(train, y, np.array([-1, 2]).T)
print('预期损失54.24')
print(J)
theta = gradient_descent(train, y, theta, alpha, iterations)
print('预期theta为-3.6303 1.1664')
print('theta', theta.flatten())
fig = plt.figure(1)
ax = fig.add_subplot(111)
ax.scatter(X, y, s=75, marker='x',
c='b', alpha=0.5, label='Admitted')
ax.plot(X, train@theta, '-')
plt.legend(['Linear regression', 'Training data'])
plt.show()
predict1 = np.array([1, 3.5])@theta
predict2 = np.array([1, 7])@theta
print('对于35,000 和 70,000人口,做出预测如下: ')
print(predict1, predict2)
input('next step')
# part3 可视化J
theta1 = np.linspace(-10, 10, 100)
theta2 = np.linspace(-1, 4, 100)
# 存储损失,后面画出等线图
J_all = np.zeros((len(theta1), len(theta2)))
for i, m in enumerate(theta1):
for j, n in enumerate(theta2):
theta = np.array([m, n]).T
J_all[i][j] = compute_cost(train, y, theta)
# 将x轴和y轴进行转化
T1, T2 = np.meshgrid(theta1, theta2)
pic = plt.figure(2)
ax = pic.gca(projection='3d')
# 需要将J_all进行转置才为正确的图
surf = ax.plot_surface(T1, T2, J_all.T, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.view_init(elev=15, azim=-118)
plt.show()
input('next step')
plt.close()
plt.figure(3)
cs = plt.contour(T1, T2, J_all.T,
np.logspace(-2, 3, 20),
colors=('r', 'g', 'b', (1, 1, 0), '#afeeee', '0.5'))
plt.show()
|
{
"alphanum_fraction": 0.5781862007,
"author": null,
"avg_line_length": 28.1271186441,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f647c95da42357808bb2dc85d02bd8647b959baf",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2018-02-20T15:52:50.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-12-01T17:28:00.000Z",
"max_forks_repo_head_hexsha": "cda2927b4f64ff6c2f2d044428d7e6cc156af255",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "Rouzip/Stanford_Machine_Learning",
"max_forks_repo_path": "python/ex1/ex1.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "cda2927b4f64ff6c2f2d044428d7e6cc156af255",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "Rouzip/Stanford_Machine_Learning",
"max_issues_repo_path": "python/ex1/ex1.py",
"max_line_length": 74,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "cda2927b4f64ff6c2f2d044428d7e6cc156af255",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "Rouzip/Stanford_Machine_Learning",
"max_stars_repo_path": "python/ex1/ex1.py",
"max_stars_repo_stars_event_max_datetime": "2018-03-14T12:05:59.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-12-14T06:35:04.000Z",
"num_tokens": 1147,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3319
}
|
#ifndef BEAST_TEST_STRING_ISTREAM_HPP
#define BEAST_TEST_STRING_ISTREAM_HPP
#include <beast/core/async_result.hpp>
#include <beast/core/bind_handler.hpp>
#include <beast/core/error.hpp>
#include <beast/websocket/teardown.hpp>
#include <boost/asio/buffer.hpp>
#include <boost/asio/io_service.hpp>
#include <boost/throw_exception.hpp>
#include <string>
namespace beast {
namespace test {
class string_istream
{
std::string s_;
boost::asio::const_buffer cb_;
boost::asio::io_service& ios_;
std::size_t read_max_;
public:
string_istream(boost::asio::io_service& ios,
std::string s, std::size_t read_max =
(std::numeric_limits<std::size_t>::max)())
: s_(std::move(s))
, cb_(boost::asio::buffer(s_))
, ios_(ios)
, read_max_(read_max)
{
}
boost::asio::io_service&
get_io_service()
{
return ios_;
}
template<class MutableBufferSequence>
std::size_t
read_some(MutableBufferSequence const& buffers)
{
error_code ec;
auto const n = read_some(buffers, ec);
if(ec)
BOOST_THROW_EXCEPTION(system_error{ec});
return n;
}
template<class MutableBufferSequence>
std::size_t
read_some(MutableBufferSequence const& buffers,
error_code& ec)
{
auto const n = boost::asio::buffer_copy(
buffers, cb_, read_max_);
if(n > 0)
{
ec.assign(0, ec.category());
cb_ = cb_ + n;
}
else
{
ec = boost::asio::error::eof;
}
return n;
}
template<class MutableBufferSequence, class ReadHandler>
async_return_type<
ReadHandler, void(error_code, std::size_t)>
async_read_some(MutableBufferSequence const& buffers,
ReadHandler&& handler)
{
auto const n = boost::asio::buffer_copy(
buffers, boost::asio::buffer(s_));
error_code ec;
if(n > 0)
s_.erase(0, n);
else
ec = boost::asio::error::eof;
async_completion<ReadHandler,
void(error_code, std::size_t)> init{handler};
ios_.post(bind_handler(
init.completion_handler, ec, n));
return init.result.get();
}
template<class ConstBufferSequence>
std::size_t
write_some(ConstBufferSequence const& buffers)
{
error_code ec;
auto const n = write_some(buffers, ec);
if(ec)
BOOST_THROW_EXCEPTION(system_error{ec});
return n;
}
template<class ConstBufferSequence>
std::size_t
write_some(ConstBufferSequence const& buffers,
error_code& ec)
{
ec.assign(0, ec.category());
return boost::asio::buffer_size(buffers);
}
template<class ConstBuffeSequence, class WriteHandler>
async_return_type<
WriteHandler, void(error_code, std::size_t)>
async_write_some(ConstBuffeSequence const& buffers,
WriteHandler&& handler)
{
async_completion<WriteHandler,
void(error_code, std::size_t)> init{handler};
ios_.post(bind_handler(init.completion_handler,
error_code{}, boost::asio::buffer_size(buffers)));
return init.result.get();
}
friend
void
teardown(websocket::teardown_tag,
string_istream&,
boost::system::error_code& ec)
{
ec.assign(0, ec.category());
}
template<class TeardownHandler>
friend
void
async_teardown(websocket::teardown_tag,
string_istream& stream,
TeardownHandler&& handler)
{
stream.get_io_service().post(
bind_handler(std::move(handler),
error_code{}));
}
};
}
}
#endif
|
{
"alphanum_fraction": 0.6051309177,
"author": null,
"avg_line_length": 24.0828025478,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "230fffac93761e6c189654c15aaee5ac3ba55cfc",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "97f133aa87b17c760b90f2358d6ba10bc7ad9d1f",
"max_forks_repo_licenses": [
"ISC"
],
"max_forks_repo_name": "dfm-official/dfm",
"max_forks_repo_path": "src/beast/extras/beast/test/string_istream.hpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "97f133aa87b17c760b90f2358d6ba10bc7ad9d1f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"ISC"
],
"max_issues_repo_name": "dfm-official/dfm",
"max_issues_repo_path": "src/beast/extras/beast/test/string_istream.hpp",
"max_line_length": 62,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "97f133aa87b17c760b90f2358d6ba10bc7ad9d1f",
"max_stars_repo_licenses": [
"ISC"
],
"max_stars_repo_name": "dfm-official/dfm",
"max_stars_repo_path": "src/beast/extras/beast/test/string_istream.hpp",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 886,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 3781
}
|
[STATEMENT]
lemma row_empty:"row [] i = []"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. row [] i = []
[PROOF STEP]
unfolding row_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map (\<lambda>w. w ! i) [] = []
[PROOF STEP]
by auto
|
{
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Matrix_Tensor_Matrix_Tensor",
"hexsha": null,
"include": null,
"lang": null,
"length": 2,
"llama_tokens": 108,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
}
|
#%%
import tensorflow as tf
import numpy as np
t = tf.constant([0,1,2,1])
# %%
tf.equal(t, 1)
# %%
tf.cast(tf.equal(t, 1), tf.int32)
# %%t
indices = tf.where(tf.not_equal(t, 1))
|
{
"alphanum_fraction": 0.6055555556,
"author": null,
"avg_line_length": 13.8461538462,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "c275beb45a8f9428157cedf201cce21328b73ab1",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "hth945/pytest",
"max_forks_repo_path": "tf/za/equal/test.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "hth945/pytest",
"max_issues_repo_path": "tf/za/equal/test.py",
"max_line_length": 38,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "hth945/pytest",
"max_stars_repo_path": "tf/za/equal/test.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 65,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 180
}
|
#!/usr/bin/env python3
# coding=utf-8
import matplotlib.pyplot as plt
import numpy as np
def f(x, y):
# the height function
return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)
n = 100
x = np.linspace(-3, 3, n)
y = np.linspace(-3, 3, n)
X, Y = np.meshgrid(x, y)
# use plt.contourf to filling contours
# X, Y and value for (X,Y) point
# plt.contourf(X, Y, f(X, Y), 8, alpha=.75, cmap=plt.cm.hot)
# use plt.contour to add contour lines
C = plt.contour(X, Y, f(X, Y), 8)
# C = plt.contour(X, Y, f(X, Y), 8, colors='black')
# plt.clabel(C, inline=True, fontsize=10)
# plt.xticks(())
# plt.yticks(())
plt.show()
|
{
"alphanum_fraction": 0.5930599369,
"author": null,
"avg_line_length": 21.1333333333,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "7c34a7b0c2c3e77ab6acf4293c99a3a5aef01eaf",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c87578d3c04b7345a99fef7390c8ea12c6f2c716",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "qianhk/FeiPython",
"max_forks_repo_path": "Python3Test/matplotlib/countourf_mountain.py",
"max_issues_count": 15,
"max_issues_repo_head_hexsha": "c87578d3c04b7345a99fef7390c8ea12c6f2c716",
"max_issues_repo_issues_event_max_datetime": "2022-03-02T02:55:54.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-11-18T06:09:50.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "qianhk/FeiPython",
"max_issues_repo_path": "Python3Test/matplotlib/countourf_mountain.py",
"max_line_length": 67,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c87578d3c04b7345a99fef7390c8ea12c6f2c716",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "qianhk/FeiPython",
"max_stars_repo_path": "Python3Test/matplotlib/countourf_mountain.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 233,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 634
}
|
[STATEMENT]
lemma locally_empty [iff]: "locally P {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. locally P {}
[PROOF STEP]
by (simp add: locally_def openin_subtopology)
|
{
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 69,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
}
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import division
from __future__ import print_function
import os
import shutil
import struct
import tempfile
import unittest
import numpy as np
from scipy.io import wavfile
import prep_wavs
class PrepWavsTest(unittest.TestCase):
def setUp(self):
self._tmpDir = tempfile.mkdtemp()
def tearDown(self):
if os.path.isdir(self._tmpDir):
shutil.rmtree(self._tmpDir)
def testReadAsFloats(self):
wav_path = os.path.join(self._tmpDir, 'test.wav')
wavfile.write(wav_path, 44100, np.zeros([100], dtype=np.int16))
fs, signal = prep_wavs.read_as_floats(wav_path)
self.assertEqual(44100, fs)
self.assertEqual(100, len(signal))
self.assertEqual(np.float32, signal.dtype)
self.assertEqual(0.0, signal[0])
self.assertEqual(0.0, signal[-1])
def testReadAndResampleAsFloats(self):
wav_path = os.path.join(self._tmpDir, 'test.wav')
wavfile.write(wav_path, 22050, 100 * np.ones([100], dtype=np.int16))
signal = prep_wavs.read_and_resample_as_floats(wav_path, 44100)
self.assertEqual(100 * 2, len(signal))
self.assertEqual(np.float32, signal.dtype)
self.assertAlmostEqual(100 / 32768, signal[0])
self.assertAlmostEqual(100 / 32768, signal[-1])
def testLoadAndNormalizeWaveform(self):
wav_path = os.path.join(self._tmpDir, 'test.wav')
wavfile.write(wav_path, 22050, 100 * np.ones([100], dtype=np.int16))
signal = prep_wavs.load_and_normalize_waveform(wav_path, 44100, 32)
self.assertEqual(192, len(signal))
self.assertEqual(np.float32, signal.dtype)
self.assertAlmostEqual(100 / 32768, signal[0])
self.assertAlmostEqual(100 / 32768, signal[-1])
def testConvert(self):
in_wav_path = os.path.join(self._tmpDir, 'test_in.wav')
wavfile.write(in_wav_path, 22050, 100 * np.ones([100], dtype=np.int16))
out_dat_path = os.path.join(self._tmpDir, 'test_out.dat')
length = prep_wavs.convert(in_wav_path, 44100, 32, out_dat_path)
self.assertEqual(192, length)
self.assertTrue(os.path.isfile(out_dat_path))
self.assertEqual(192 * 4, os.stat(out_dat_path).st_size)
with open(out_dat_path, 'rb') as f:
data = struct.unpack('=192f', f.read())
self.assertAlmostEqual(100 / 32768, data[0])
self.assertAlmostEqual(100 / 32768, data[-1])
if __name__ == '__main__':
unittest.main()
|
{
"alphanum_fraction": 0.7043478261,
"author": null,
"avg_line_length": 35.1764705882,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "544167e99d7adf89cea4802d0e0a80df0062e541",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2019-05-19T04:12:05.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-04-09T10:06:30.000Z",
"max_forks_repo_head_hexsha": "d7f643d113829c62a03b383c81c728c4b43896f2",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "code945/tfjs-models",
"max_forks_repo_path": "speech-commands/training/browser-fft/prep_wavs_test.py",
"max_issues_count": 131,
"max_issues_repo_head_hexsha": "d7f643d113829c62a03b383c81c728c4b43896f2",
"max_issues_repo_issues_event_max_datetime": "2021-08-02T21:22:44.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-04-20T21:19:47.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "code945/tfjs-models",
"max_issues_repo_path": "speech-commands/training/browser-fft/prep_wavs_test.py",
"max_line_length": 80,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "d7f643d113829c62a03b383c81c728c4b43896f2",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "code945/tfjs-models",
"max_stars_repo_path": "speech-commands/training/browser-fft/prep_wavs_test.py",
"max_stars_repo_stars_event_max_datetime": "2020-04-04T06:56:30.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-10-24T18:20:14.000Z",
"num_tokens": 778,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2990
}
|
[STATEMENT]
lemma kc_8x7_hd: "hd kc8x7 = (1,1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. hd kc8x7 = (1, 1)
[PROOF STEP]
by eval
|
{
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Knights_Tour_KnightsTour",
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 77,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
}
|
import numpy as np
import itertools
from affordable.affordable import Affordable, get_action
ACTIONS = ('np', 'up', 'dn', 'lf', 'rt', 'rs')
class Shaman(Affordable):
def __init__(self, ctx, name, width, height):
super(Shaman, self).__init__(ctx, name)
self.width = width
self.height = height
self.canvas = np.zeros([height, width])
self.x = width // 2
self.y = height // 2
def available_actions(self):
return list(itertools.product(ACTIONS))
def reset(self):
super(Shaman, self).reset()
self.canvas = np.zeros([self.height, self.width])
self.x = self.width // 2
self.y = self.height // 2
def act(self, action):
nm = self.name()
if nm == 'shaman_hl':
action = get_action(self.ctx, action=action).shaman_hl
elif nm == 'shaman_hr':
action = get_action(self.ctx, action=action).shaman_hr
elif nm == 'shaman_rl':
action = get_action(self.ctx, action=action).shaman_rl
elif nm == 'shaman_rr':
action = get_action(self.ctx, action=action).shaman_rr
for a in action:
if a == 'up':
self.up()
elif a == 'dn':
self.dn()
elif a == 'lf':
self.lf()
elif a == 'rt':
self.rt()
elif a == 'rs':
self.rs()
def up(self):
self.y -= 1
if self.y < 0:
self.y = 0
self.canvas[self.y, self.x] = 1.0
def dn(self):
self.y += 1
if self.y > self.height - 1:
self.y = self.height - 1
self.canvas[self.y, self.x] = 1.0
def lf(self):
self.x -= 1
if self.x < 0:
self.x = 0
self.canvas[self.y, self.x] = 1.0
def rt(self):
self.x += 1
if self.x > self.width - 1:
self.x = self.width - 1
self.canvas[self.y, self.x] = 1.0
def rs(self):
self.canvas = np.zeros([self.height, self.width])
self.x = self.width // 2
self.y = self.height // 2
|
{
"alphanum_fraction": 0.4997671169,
"author": null,
"avg_line_length": 25.5595238095,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d229da0da73a79e606221ff0ee0e15f818d67d03",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "66ff00b57b197788113b51af97bf176451206c75",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mountain/numbers",
"max_forks_repo_path": "src/numx/shaman.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "66ff00b57b197788113b51af97bf176451206c75",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mountain/numbers",
"max_issues_repo_path": "src/numx/shaman.py",
"max_line_length": 66,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "66ff00b57b197788113b51af97bf176451206c75",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mountain/numbers",
"max_stars_repo_path": "src/numx/shaman.py",
"max_stars_repo_stars_event_max_datetime": "2021-08-13T23:38:14.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-12-14T07:09:38.000Z",
"num_tokens": 575,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2147
}
|
module SurfaceCouplingTests
using Test
using Gridap
import Gridap: ∇
using LinearAlgebra: tr, ⋅
# Analytical functions
u(x) = VectorValue( x[1]^2 + 2*x[2]^2, -x[1]^2 )
∇u(x) = TensorValue( 2*x[1], 4*x[2], -2*x[1], zero(x[1]) )
Δu(x) = VectorValue( 6, -2 )
p(x) = x[1] + 3*x[2]
∇p(x) = VectorValue(1,3)
s(x) = -Δu(x)
f(x) = -Δu(x) + ∇p(x)
g(x) = tr(∇u(x))
∇(::typeof(u)) = ∇u
∇(::typeof(p)) = ∇p
# Geometry + Integration
n = 20
mesh = (n,n)
domain = 2 .* (0,1,0,1) .- 1
order = 1
model = CartesianDiscreteModel(domain, mesh)
labels = get_face_labeling(model)
add_tag_from_tags!(labels,"dirichlet",[1,2,5])
add_tag_from_tags!(labels,"neumann",[6,7,8])
Ω = Triangulation(model)
const R = 0.4
function is_in(coords)
n = length(coords)
x = (1/n)*sum(coords)
d = x[1]^2 + x[2]^2 - R^2
d < 0
end
cell_to_coords = get_cell_coordinates(Ω)
cell_to_is_solid = lazy_map(is_in,cell_to_coords)
cell_to_is_fluid = lazy_map(!,cell_to_is_solid)
Ωs = Triangulation(model,cell_to_is_solid)
Ωf = Triangulation(model,cell_to_is_fluid)
Λ = BoundaryTriangulation(model,labels,tags="neumann")
Γ = InterfaceTriangulation(Ωf,Ωs)
n_Λ = get_normal_vector(Λ)
n_Γ = get_normal_vector(Γ)
order = 2
degree = 2*order
dΩ = Measure(Ω,degree)
dΩs = Measure(Ωs,degree)
dΩf = Measure(Ωf,degree)
dΛ = Measure(Λ,degree)
dΓ = Measure(Γ,degree)
# FE Spaces
reffe_u = ReferenceFE(lagrangian,VectorValue{2,Float64},order)
reffe_p = ReferenceFE(lagrangian,Float64,order-1,space=:P)
V = TestFESpace(Ω,reffe_u,conformity=:H1,labels=labels,dirichlet_tags="dirichlet")
Q = TestFESpace(Ωf,reffe_p,conformity=:L2)
U = TrialFESpace(V,u)
P = Q
Y = MultiFieldFESpace([V,Q])
X = MultiFieldFESpace([U,P])
#uh, ph = FEFunction(X,rand(num_free_dofs(X)))
#vh, qh = FEFunction(Y,rand(num_free_dofs(Y)))
#writevtk(Ω,"trian",cellfields=["uh"=>uh,"ph"=>ph,"vh"=>vh,"qh"=>qh])
# Weak form
a((u,p),(v,q)) =
∫( ∇(v)⊙∇(u) )*dΩs +
∫( ∇(v)⊙∇(u) - (∇⋅v)*p + q*(∇⋅u) )*dΩf
l((v,q)) =
∫( v⋅s )*dΩs +
∫( v⋅f + q*g )*dΩf +
∫( v⋅(n_Λ⋅∇u) - (n_Λ⋅v)*p )*dΛ +
∫( - (n_Γ.⁺⋅v.⁺)*p )*dΓ
# FE problem
op = AffineFEOperator(a,l,X,Y)
uh, ph = solve(op)
# Visualization
eu = u - uh
ep = p - ph
#writevtk(Ω,"trian_Ω",cellfields=["uh"=>uh,"ph"=>ph,"eu"=>eu,"ep"=>ep])
#writevtk(Ωs,"trian_Ωs",cellfields=["uh"=>uh,"ph"=>ph,"eu"=>eu,"ep"=>ep])
#writevtk(Γ,"trian_Γ",cellfields=["uh+"=>uh.⁺,"p"=>p,"n+"=>n_Γ.⁺])
#writevtk(Λ,"trian_Λ",cellfields=["uh"=>uh,"ph"=>ph,"eu"=>eu,"ep"=>ep,"n"=>n_Λ])
#writevtk(Ωf,"trian_Ωf",cellfields=["uh"=>uh,"ph"=>ph,"eu"=>eu,"ep"=>ep])
# Errors
eu_l2 = sqrt(sum(∫( eu⋅eu )*dΩ))
eu_h1 = sqrt(sum(∫( eu⋅eu + ∇(eu)⊙∇(eu) )*dΩ))
ep_l2 = sqrt(sum(∫( ep*ep )*dΩf))
tol = 1.0e-9
@test eu_l2 < tol
@test eu_h1 < tol
@test ep_l2 < tol
end # module
|
{
"alphanum_fraction": 0.626691042,
"author": null,
"avg_line_length": 21.7063492063,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "490f958fa331db20280e7f274490e73f0792b228",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 61,
"max_forks_repo_forks_event_max_datetime": "2022-03-28T02:56:24.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-12-30T23:35:37.000Z",
"max_forks_repo_head_hexsha": "1fb3dc9abf8c47685637901bd14a74e4355a9492",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "aerappa/Gridap.jl",
"max_forks_repo_path": "test/GridapTests/SurfaceCouplingTests.jl",
"max_issues_count": 576,
"max_issues_repo_head_hexsha": "1fb3dc9abf8c47685637901bd14a74e4355a9492",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T08:21:35.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-05-16T20:50:22.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "aerappa/Gridap.jl",
"max_issues_repo_path": "test/GridapTests/SurfaceCouplingTests.jl",
"max_line_length": 82,
"max_stars_count": 390,
"max_stars_repo_head_hexsha": "1fb3dc9abf8c47685637901bd14a74e4355a9492",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "aerappa/Gridap.jl",
"max_stars_repo_path": "test/GridapTests/SurfaceCouplingTests.jl",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T01:59:17.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-05-16T17:38:02.000Z",
"num_tokens": 1258,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2735
}
|
In Chestnut Park there is a roundhouse that serves as a mini community center for the neighborhood. Sadly it is often the target of vandalism.
|
{
"alphanum_fraction": 0.8,
"author": null,
"avg_line_length": 36.25,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "6bbe95273dd8147aed24e1f43b3eb4c962f282c3",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "voflo/Search",
"max_forks_repo_path": "lab/davisWiki/Chestnut_Park_Roundhouse.f",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "voflo/Search",
"max_issues_repo_path": "lab/davisWiki/Chestnut_Park_Roundhouse.f",
"max_line_length": 142,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "voflo/Search",
"max_stars_repo_path": "lab/davisWiki/Chestnut_Park_Roundhouse.f",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 30,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 145
}
|
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016
"""
This module contains the main function to interact with tsfresh: extract features
"""
from __future__ import absolute_import, division
import logging
import warnings
from functools import partial
from multiprocessing import Pool
import numpy as np
import pandas as pd
from builtins import str
from multiprocessing import Pool
from functools import partial
from six.moves.queue import Queue
import logging
import pandas as pd
import numpy as np
from tqdm import tqdm
from tsfresh.feature_extraction.settings import ComprehensiveFCParameters, get_aggregate_functions, get_apply_functions
from tsfresh import defaults
from tsfresh.utilities import dataframe_functions, profiling
_logger = logging.getLogger(__name__)
def extract_features(timeseries_container, default_fc_parameters=None,
kind_to_fc_parameters=None,
column_id=None, column_sort=None, column_kind=None, column_value=None,
parallelization=None, chunksize=defaults.CHUNKSIZE,
n_processes=defaults.N_PROCESSES, show_warnings=defaults.SHOW_WARNINGS,
disable_progressbar=defaults.DISABLE_PROGRESSBAR,
impute_function=defaults.IMPUTE_FUNCTION,
profile=defaults.PROFILING,
profiling_filename=defaults.PROFILING_FILENAME,
profiling_sorting=defaults.PROFILING_SORTING):
"""
Extract features from
* a :class:`pandas.DataFrame` containing the different time series
or
* a dictionary of :class:`pandas.DataFrame` each containing one type of time series
In both cases a :class:`pandas.DataFrame` with the calculated features will be returned.
For a list of all the calculated time series features, please see the
:class:`~tsfresh.feature_extraction.settings.ComprehensiveFCParameters` class,
which is used to control which features with which parameters are calculated.
For a detailed explanation of the different parameters and data formats please see :ref:`data-formats-label`.
Examples
========
>>> from tsfresh.examples import load_robot_execution_failures
>>> from tsfresh import extract_features
>>> df, _ = load_robot_execution_failures()
>>> X = extract_features(df, column_id='id', column_sort='time')
:param timeseries_container: The pandas.DataFrame with the time series to compute the features for, or a
dictionary of pandas.DataFrames.
:type timeseries_container: pandas.DataFrame or dict
:param default_fc_parameters: mapping from feature calculator names to parameters. Only those names
which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for
more information.
:type default_fc_parameters: dict
:param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for
default_fc_parameters. If you put a kind as a key here, the fc_parameters
object (which is the value), will be used instead of the default_fc_parameters.
:type kind_to_fc_parameters: dict
:param column_id: The name of the id column to group by.
:type column_id: str
:param column_sort: The name of the sort column.
:type column_sort: str
:param column_kind: The name of the column keeping record on the kind of the value.
:type column_kind: str
:param column_value: The name for the column keeping the value itself.
:type column_value: str
:param parallelization: Either ``'per_sample'`` or ``'per_kind'`` , see
:func:`~tsfresh.feature_extraction.extraction._extract_features_parallel_per_sample`,
:func:`~tsfresh.feature_extraction.extraction._extract_features_parallel_per_kind` and
:ref:`parallelization-label` for details.
Choosing None makes the algorithm look for the best parallelization technique by applying
some general assumptions.
:type parallelization: str
:param chunksize: The size of one chunk for the parallelisation
:type chunksize: None or int
:param n_processes: The number of processes to use for parallelisation.
:type n_processes: int
:param: show_warnings: Show warnings during the feature extraction (needed for debugging of calculators).
:type show_warnings: bool
:param disable_progressbar: Do not show a progressbar while doing the calculation.
:type disable_progressbar: bool
:param impute_function: None, if no imputing should happen or the function to call for imputing.
:type impute_function: None or function
:param profile: Turn on profiling during feature extraction
:type profile: bool
:param profiling_sorting: How to sort the profiling results (see the documentation of the profiling package for
more information)
:type profiling_sorting: basestring
:param profiling_filename: Where to save the profiling results.
:type profiling_filename: basestring
:return: The (maybe imputed) DataFrame containing extracted features.
:rtype: pandas.DataFrame
"""
import logging
logging.basicConfig()
# Always use the standardized way of storing the data.
# See the function normalize_input_to_internal_representation for more information.
kind_to_df_map, column_id, column_value = \
dataframe_functions.normalize_input_to_internal_representation(df_or_dict=timeseries_container,
column_id=column_id,
column_sort=column_sort,
column_kind=column_kind,
column_value=column_value)
# Use the standard setting if the user did not supply ones himself.
if default_fc_parameters is None:
default_fc_parameters = ComprehensiveFCParameters()
# Choose the parallelization according to a rule-of-thumb
if parallelization is None:
parallelization = 'per_sample' if n_processes / 2 > len(kind_to_df_map) else 'per_kind'
_logger.info('Parallelizing feature calculation {}'.format(parallelization))
# If requested, do profiling (advanced feature)
if profile:
profiler = profiling.start_profiling()
# Calculate the result
if parallelization == 'per_kind':
calculation_function = _extract_features_parallel_per_kind
elif parallelization == 'per_sample':
calculation_function = _extract_features_parallel_per_sample
else:
raise ValueError("Argument parallelization must be one of: 'per_kind', 'per_sample'")
result = calculation_function(kind_to_df_map,
default_fc_parameters=default_fc_parameters,
kind_to_fc_parameters=kind_to_fc_parameters,
column_id=column_id,
column_value=column_value,
chunksize=chunksize,
n_processes=n_processes,
show_warnings=show_warnings,
disable_progressbar=disable_progressbar,
impute_function=impute_function
)
# Turn off profiling if it was turned on
if profile:
profiling.end_profiling(profiler, filename=profiling_filename,
sorting=profiling_sorting)
return result
def _extract_features_parallel_per_kind(kind_to_df_map,
column_id, column_value,
default_fc_parameters,
kind_to_fc_parameters=None,
chunksize=defaults.CHUNKSIZE,
n_processes=defaults.N_PROCESSES, show_warnings=defaults.SHOW_WARNINGS,
disable_progressbar=defaults.DISABLE_PROGRESSBAR,
impute_function=defaults.IMPUTE_FUNCTION):
"""
Parallelize the feature extraction per kind.
:param kind_to_df_map: The time series to compute the features for in our internal format
:type kind_to_df_map: dict of pandas.DataFrame
:param column_id: The name of the id column to group by.
:type column_id: str
:param column_value: The name for the column keeping the value itself.
:type column_value: str
:param default_fc_parameters: mapping from feature calculator names to parameters. Only those names
which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for
more information.
:type default_fc_parameters: dict
:param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for
default_fc_parameters. If you put a kind as a key here, the fc_parameters
object (which is the value), will be used instead of the default_fc_parameters.
:type kind_to_fc_parameters: dict
:param chunksize: The size of one chunk for the parallelisation
:type chunksize: None or int
:param n_processes: The number of processes to use for parallelisation.
:type n_processes: int
:param: show_warnings: Show warnings during the feature extraction (needed for debugging of calculators).
:type show_warnings: bool
:param disable_progressbar: Do not show a progressbar while doing the calculation.
:type disable_progressbar: bool
:param impute_function: None, if no imputing should happen or the function to call for imputing.
:type impute_function: None or function
:return: The (maybe imputed) DataFrame containing extracted features.
:rtype: pandas.DataFrame
"""
partial_extract_features_for_one_time_series = partial(_extract_features_for_one_time_series,
column_id=column_id,
column_value=column_value,
default_fc_parameters=default_fc_parameters,
kind_to_fc_parameters=kind_to_fc_parameters,
show_warnings=show_warnings)
pool = Pool(n_processes)
if not chunksize:
chunksize = _calculate_best_chunksize(kind_to_df_map, n_processes)
total_number_of_expected_results = len(kind_to_df_map)
extracted_features = tqdm(pool.imap_unordered(partial_extract_features_for_one_time_series, kind_to_df_map.items(),
chunksize=chunksize), total=total_number_of_expected_results,
desc="Feature Extraction", disable=disable_progressbar)
pool.close()
# Concatenate all partial results
result = pd.concat(extracted_features, axis=1, join='outer').astype(np.float64)
# Impute the result if requested
if impute_function is not None:
impute_function(result)
pool.join()
return result
def _extract_features_parallel_per_sample(kind_to_df_map,
column_id, column_value,
default_fc_parameters,
kind_to_fc_parameters=None,
chunksize=defaults.CHUNKSIZE,
n_processes=defaults.N_PROCESSES, show_warnings=defaults.SHOW_WARNINGS,
disable_progressbar=defaults.DISABLE_PROGRESSBAR,
impute_function=defaults.IMPUTE_FUNCTION):
"""
Parallelize the feature extraction per kind and per sample.
As the splitting of the dataframes per kind along column_id is quite costly, we settled for an async map in this
function. The result objects are temporarily stored in a fifo queue from which they can be retrieved in order
of submission.
:param kind_to_df_map: The time series to compute the features for in our internal format
:type kind_to_df_map: dict of pandas.DataFrame
:param column_id: The name of the id column to group by.
:type column_id: str
:param column_value: The name for the column keeping the value itself.
:type column_value: str
:param default_fc_parameters: mapping from feature calculator names to parameters. Only those names
which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for
more information.
:type default_fc_parameters: dict
:param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for
default_fc_parameters. If you put a kind as a key here, the fc_parameters
object (which is the value), will be used instead of the default_fc_parameters.
:type kind_to_fc_parameters: dict
:param chunksize: The size of one chunk for the parallelisation
:type chunksize: None or int
:param n_processes: The number of processes to use for parallelisation.
:type n_processes: int
:param: show_warnings: Show warnings during the feature extraction (needed for debugging of calculators).
:type show_warnings: bool
:param disable_progressbar: Do not show a progressbar while doing the calculation.
:type disable_progressbar: bool
:param impute_function: None, if no imputing should happen or the function to call for imputing.
:type impute_function: None or function
:return: The (maybe imputed) DataFrame containing extracted features.
:rtype: pandas.DataFrame
"""
partial_extract_features_for_one_time_series = partial(_extract_features_for_one_time_series,
column_id=column_id,
column_value=column_value,
default_fc_parameters=default_fc_parameters,
kind_to_fc_parameters=kind_to_fc_parameters,
show_warnings=show_warnings)
pool = Pool(n_processes)
total_number_of_expected_results = 0
# Submit map jobs per kind per sample
results_fifo = Queue()
for kind, df_kind in kind_to_df_map.items():
df_grouped_by_id = df_kind.groupby(column_id)
total_number_of_expected_results += len(df_grouped_by_id)
if not chunksize:
chunksize = _calculate_best_chunksize(df_grouped_by_id, n_processes)
results_fifo.put(
pool.imap_unordered(
partial_extract_features_for_one_time_series,
[(kind, df_group) for _, df_group in df_grouped_by_id],
chunksize=chunksize
)
)
pool.close()
# Wait for the jobs to complete and concatenate the partial results
dfs_per_kind = []
# Do this all with a progress bar
with tqdm(total=total_number_of_expected_results, desc="Feature Extraction",
disable=disable_progressbar) as progress_bar:
# We need some sort of measure, when a new result is there. So we wrap the
# map_results into another iterable which updates the progress bar each time,
# a new result is there
def iterable_with_tqdm_update(queue, progress_bar):
for element in queue:
progress_bar.update(1)
yield element
result = pd.DataFrame()
while not results_fifo.empty():
map_result = results_fifo.get()
dfs_kind = iterable_with_tqdm_update(map_result, progress_bar)
df_tmp = pd.concat(dfs_kind, axis=0).astype(np.float64)
# Impute the result if requested
if impute_function is not None:
impute_function(df_tmp)
result = pd.concat([result, df_tmp], axis=1).astype(np.float64)
pool.join()
return result
def _calculate_best_chunksize(iterable_list, n_processes):
"""
Helper function to calculate the best chunksize for a given number of elements to calculate.
The formula is more or less an empirical result.
:param iterable_list: A list which defines how many calculations there need to be.
:param n_processes: The number of processes that will be used in the calculation.
:return: The chunksize which should be used.
TODO: Investigate which is the best chunk size for different settings.
"""
chunksize, extra = divmod(len(iterable_list), n_processes * 5)
if extra:
chunksize += 1
return chunksize
def _extract_features_for_one_time_series(prefix_and_dataframe, column_id, column_value,
default_fc_parameters,
kind_to_fc_parameters=None,
show_warnings=defaults.SHOW_WARNINGS):
"""
Extract time series features for a given data frame based on the passed settings.
This is an internal function, please use the extract_features function.
The `dataframe` is expected to have at least two columns: column_id and column_value. The data is grouped together
by their column_id value and the time series features are calculated independently for each of the groups.
As a result, the function returns a :class:`pandas.DataFrame` with the ids as an index and as many columns as there
were features calculated.
To distinguish the features from others, all the calculated columns are given the prefix passed in by column_prefix.
For example, if you pass in a `dataframe` of shape
+====+=======+=====+
| id | value | ... |
+====+=======+=====+
| A | 1 | ... |
+----+-------+-----+
| A | 2 | ... |
+----+-------+-----+
| A | 3 | ... |
+----+-------+-----+
| B | 1 | ... |
+----+-------+-----+
| B | 2 | ... |
+----+-------+-----+
| B | 3 | ... |
+----+-------+-----+
with `column_id="id"`, `column_value="value"` and `column_prefix="prefix"` the resulting :class:`pandas.DataFrame`
will have shape
+=======+==================+==================+=====+==================+
| Index | prefix_feature_1 | prefix_feature_2 | ... | prefix_feature_N |
+=======+==================+==================+=====+==================+
| A | ... | ... | ... | ... |
+-------+------------------+------------------+-----+------------------+
| B | ... | ... | ... | ... |
+-------+------------------+------------------+-----+------------------+
where N is the number of features that were calculated. Which features are calculated is controlled by the
passed settings instance (see :class:`~tsfresh.feature_extraction.settings.ComprehensiveFCParameters` for a list of
all possible features to calculate).
The parameter `dataframe` is not allowed to have any NaN value in it. It is possible to have different numbers
of values for different ids.
:param prefix_and_dataframe: Tuple of column_prefix and dataframe
column_prefix is the string that each extracted feature will be prefixed with (for better separation)
dataframe with at least the columns column_id and column_value to extract the time
series features for.
:type prefix_and_dataframe: (str, DataFrame)
:param column_id: The name of the column with the ids.
:type column_id: str
:param column_value: The name of the column with the values.
:type column_value: str
:param default_fc_parameters: mapping from feature calculator names to parameters. Only those names
which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for
more information.
:type default_fc_parameters: dict
:param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for
default_fc_parameters. If you put a kind as a key here, the fc_parameters
object (which is the value), will be used instead of the default_fc_parameters.
:type kind_to_fc_parameters: dict
:param show_warnings: Show warnings during the feature extraction (needed for debugging of calculators).
:type show_warnings: bool
:return: A dataframe with the extracted features as the columns (prefixed with column_prefix) and as many
rows as their are unique values in the id column.
"""
if kind_to_fc_parameters is None:
kind_to_fc_parameters = {}
column_prefix, dataframe = prefix_and_dataframe
column_prefix = str(column_prefix)
# Ensure features are calculated on float64
dataframe[column_value] = dataframe[column_value].astype(np.float64)
# If there are no special settings for this column_prefix, use the default ones.
if column_prefix in kind_to_fc_parameters:
fc_parameters = kind_to_fc_parameters[column_prefix]
else:
fc_parameters = default_fc_parameters
with warnings.catch_warnings():
if not show_warnings:
warnings.simplefilter("ignore")
else:
warnings.simplefilter("default")
# Calculate the aggregation functions
column_name_to_aggregate_function = get_aggregate_functions(fc_parameters, column_prefix)
if column_name_to_aggregate_function:
extracted_features = dataframe.groupby(column_id)[column_value].aggregate(column_name_to_aggregate_function)
else:
extracted_features = pd.DataFrame(index=dataframe[column_id].unique())
# Calculate the apply functions
apply_functions = get_apply_functions(fc_parameters, column_prefix)
if apply_functions:
list_of_extracted_feature_dataframes = [extracted_features]
for apply_function, kwargs in apply_functions:
current_result = dataframe.groupby(column_id)[column_value].apply(apply_function, **kwargs).unstack()
if len(current_result) > 0:
list_of_extracted_feature_dataframes.append(current_result)
if len(list_of_extracted_feature_dataframes) > 0:
extracted_features = pd.concat(list_of_extracted_feature_dataframes, axis=1,
join_axes=[extracted_features.index])
return extracted_features
|
{
"alphanum_fraction": 0.6451168596,
"author": null,
"avg_line_length": 45.2884990253,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "dc8f3c38bfddb59edd690015b328f2f4b1823871",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "83865d67b7a931a9eff4ba6fd4d033b2219225f1",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "davidcrowland/layer_vb_tagging",
"max_forks_repo_path": "extra/tsfresh_examples/tsfresh/feature_extraction/extraction.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "83865d67b7a931a9eff4ba6fd4d033b2219225f1",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "davidcrowland/layer_vb_tagging",
"max_issues_repo_path": "extra/tsfresh_examples/tsfresh/feature_extraction/extraction.py",
"max_line_length": 120,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "83865d67b7a931a9eff4ba6fd4d033b2219225f1",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "davidcrowland/layer_vb_tagging",
"max_stars_repo_path": "extra/tsfresh_examples/tsfresh/feature_extraction/extraction.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4516,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 23233
}
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from copy import deepcopy
from graphviz import Digraph
from torch import nn
from .. import dynamic_graph
logger = logging.getLogger(__name__)
graph_theme = {
"background_color": "#FFFFFF",
"fill_color": "#E8E8E8",
"outline_color": "#000000",
"font_color": "#000000",
"font_name": "Times",
"font_size": "10",
"margin": "0,0",
"padding": "1.0,1.0",
}
def to_networkx(context):
import networkx as nx
graph = nx.DiGraph()
for node_name, node in context.graph.nodes.items():
graph.add_node(node_name, type=node['type'], id=node['id'], scope='/'.join(node['scope']))
for u, v in context.graph.edges:
graph.add_edge(u, v)
return graph
def dump_graph(context, path):
import networkx as nx
nx_graph = to_networkx(context)
nx.drawing.nx_pydot.write_dot(nx_graph, path)
def draw_dot(context):
graph = context.graph
dot = Digraph()
dot.attr("graph",
bgcolor=graph_theme["background_color"],
color=graph_theme["outline_color"],
fontsize=graph_theme["font_size"],
fontcolor=graph_theme["font_color"],
fontname=graph_theme["font_name"],
margin=graph_theme["margin"],
# rankdir="LR",
pad=graph_theme["padding"])
dot.attr("node", shape="box",
style="filled", margin="0,0",
fillcolor=graph_theme["fill_color"],
color=graph_theme["outline_color"],
fontsize=graph_theme["font_size"],
fontcolor=graph_theme["font_color"],
fontname=graph_theme["font_name"])
dot.attr("edge", style="solid",
color=graph_theme["outline_color"],
fontsize=graph_theme["font_size"],
fontcolor=graph_theme["font_color"],
fontname=graph_theme["font_name"])
for node in graph.nodes:
dot.node(graph.nodes[node]['name'])
for child in graph.successors(node):
dot.edge(node, child)
return dot
def build_graph(module: nn.Module, context_name, input_args=None, reset_context=False):
logger.info("Building graph: {}".format(context_name))
sd = deepcopy(module.state_dict())
if reset_context:
ctx = dynamic_graph.reset_context(context_name)
else:
ctx = dynamic_graph.get_context(context_name)
with dynamic_graph.context(context_name):
if hasattr(module, "dummy_forward_fn"):
module.dummy_forward_fn(module)
else:
module(*input_args)
module.load_state_dict(sd)
return ctx
|
{
"alphanum_fraction": 0.6524064171,
"author": null,
"avg_line_length": 31.79,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "704e9e9cecd582ae1edf47a94f0ec092c0ba8214",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-07-13T03:16:46.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-07-13T03:16:46.000Z",
"max_forks_repo_head_hexsha": "a0bb39424151a98e1ca80c4aa5c865636d401785",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "aalborov/openvino_training_extensions",
"max_forks_repo_path": "pytorch_toolkit/nncf/nncf/dynamic_graph/utils.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a0bb39424151a98e1ca80c4aa5c865636d401785",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "aalborov/openvino_training_extensions",
"max_issues_repo_path": "pytorch_toolkit/nncf/nncf/dynamic_graph/utils.py",
"max_line_length": 98,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "a0bb39424151a98e1ca80c4aa5c865636d401785",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "aalborov/openvino_training_extensions",
"max_stars_repo_path": "pytorch_toolkit/nncf/nncf/dynamic_graph/utils.py",
"max_stars_repo_stars_event_max_datetime": "2020-01-13T02:55:06.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-01-13T02:55:06.000Z",
"num_tokens": 711,
"path": null,
"reason": "import networkx",
"repo": null,
"save_path": null,
"sha": null,
"size": 3179
}
|
//
// lager - library for functional interactive c++ programs
// Copyright (C) 2017 Juan Pedro Bolivar Puente
//
// This file is part of lager.
//
// lager is free software: you can redistribute it and/or modify
// it under the terms of the MIT License, as detailed in the LICENSE
// file located at the root of this source code distribution,
// or here: <https://github.com/arximboldi/lager/blob/master/LICENSE>
//
#pragma once
#include <boost/preprocessor/punctuation/remove_parens.hpp>
#include <boost/preprocessor/seq/for_each.hpp>
#define LAGER_DERIVE_IMPL_EQ_ITER__(r__, data__, elem__) &&a.elem__ == b.elem__
#define LAGER_DERIVE_IMPL_EQ(r__, ns__, name__, members__) \
namespace ns__ { \
inline bool operator==(name__ const& a, name__ const& b) \
{ \
return true BOOST_PP_SEQ_FOR_EACH_R( \
r__, LAGER_DERIVE_IMPL_EQ_ITER__, _, members__); \
} \
inline bool operator!=(name__ const& a, name__ const& b) \
{ \
return !(a == b); \
} \
} \
//
#define LAGER_DERIVE_TEMPLATE_IMPL_EQ(r__, ns__, tpl__, name__, members__) \
namespace ns__ { \
template <BOOST_PP_REMOVE_PARENS(tpl__)> \
inline bool operator==(BOOST_PP_REMOVE_PARENS(name__) const& a, \
BOOST_PP_REMOVE_PARENS(name__) const& b) \
{ \
return true BOOST_PP_SEQ_FOR_EACH_R( \
r__, LAGER_DERIVE_IMPL_EQ_ITER__, _, members__); \
} \
template <BOOST_PP_REMOVE_PARENS(tpl__)> \
inline bool operator!=(BOOST_PP_REMOVE_PARENS(name__) const& a, \
BOOST_PP_REMOVE_PARENS(name__) const& b) \
{ \
return !(a == b); \
} \
} \
//
#define LAGER_DERIVE_NESTED_IMPL_EQ(r__, name__, members__) \
friend bool operator==(name__ const& a, name__ const& b) \
{ \
return true BOOST_PP_SEQ_FOR_EACH_R( \
r__, LAGER_DERIVE_IMPL_EQ_ITER__, _, members__); \
} \
friend bool operator!=(name__ const& a, name__ const& b) \
{ \
return !(a == b); \
} \
//
|
{
"alphanum_fraction": 0.3507121741,
"author": null,
"avg_line_length": 59.0634920635,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "77330a380776ff1d856f2fe39bd980bdaa7e1b3b",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "fe7f16fe4a48999e411682f07481d687b808b5a5",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "gomez-addams/lager",
"max_forks_repo_path": "lager/extra/derive/eq.hpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "fe7f16fe4a48999e411682f07481d687b808b5a5",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "gomez-addams/lager",
"max_issues_repo_path": "lager/extra/derive/eq.hpp",
"max_line_length": 80,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "fe7f16fe4a48999e411682f07481d687b808b5a5",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "gomez-addams/lager",
"max_stars_repo_path": "lager/extra/derive/eq.hpp",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 573,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 3721
}
|
export TimeDelay, ps2μm!, μm2ps!
ps2μm(t::Real) = round(149.896225 * t)
μm2ps(d::Real) = d / 149.896225
for f in (:ps2μm, :μm2ps)
@eval begin
function $(Symbol(f, !))(arr::VecI)
@simd for i in eachindex(arr)
@inbounds arr[i] = $f(arr[i])
end
return nothing
end
end
end
struct TimeDelay <: TimeAxis
dat::Vector{Float64} # unit: μm
function TimeDelay(src::VecI, unit::String="ps")
dat = similar(src)
if unit ≡ "ps"
@simd for i in eachindex(dat)
@inbounds dat[i] = ps2μm(src[i])
end
elseif unit ≡ "μm"
@simd for i in eachindex(dat)
@inbounds dat[i] = src[i]
end
else
error("TimeDelay(..., unit = $unit) is not allowed.")
end
return new(dat)
end
function TimeDelay(s::String, unit::String="μm"; Itdy::Int=1)
raw = readdlm(s)
dat = Vector{Float64}(undef, size(raw, 1))
if unit ≡ "μm"
@simd for i in eachindex(dat)
@inbounds dat[i] = raw[i, Itdy]
end
elseif unit ≡ "ps"
@simd for i in eachindex(dat)
@inbounds dat[i] = ps2μm(raw[i, Itdy])
end
else
error("TimeDelay(..., unit = $unit) is not allowed.")
end
return new(dat)
end
end
|
{
"alphanum_fraction": 0.4925690021,
"author": null,
"avg_line_length": 26.6603773585,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "bab9d9d73e84c86414303f65bdac7f2531f6228a",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a7f3d255e65c32075f1e9240e86bee5d921ce9a0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "brianyjtai1994/Spectroscopy.jl",
"max_forks_repo_path": "src/TimeDelay.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a7f3d255e65c32075f1e9240e86bee5d921ce9a0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "brianyjtai1994/Spectroscopy.jl",
"max_issues_repo_path": "src/TimeDelay.jl",
"max_line_length": 65,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a7f3d255e65c32075f1e9240e86bee5d921ce9a0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "brianyjtai1994/Spectroscopy.jl",
"max_stars_repo_path": "src/TimeDelay.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 414,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1413
}
|
from __future__ import print_function, division
import os
from PIL import Image
import numpy as np
from torch.utils.data import Dataset
from mypath import Path
from torchvision import transforms
from dataloaders import custom_transforms as tr
import pandas as pd
class LiverSegmentation(Dataset):
"""
LITS dataset
"""
NUM_CLASSES = 2
def __init__(self,
args,
base_dir=Path.db_root_dir('lits_liver'),
split='train',
study_num=0
):
"""
:param base_dir: path to lits dataset directory
:param split: train/val
:param transform: transform to apply
"""
super().__init__()
self._base_dir = base_dir
self.root = os.path.join(self._base_dir,'dataset_6')
self.df = pd.read_csv(os.path.join(self._base_dir,"lits_df.csv"))
if split != "val":
self.df = self.df[self.df['liver_mask_empty'] == True]
self.train_df = self.df[self.df['study_number']<111]
self.test_df = self.df[self.df['study_number']>=121]
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.args = args
def __len__(self):
for split in self.split:
if split == "train":
return len(self.train_df)
elif split == 'val' or split == 'vis':
return len(self.test_df)
def __getitem__(self, index):
for split in self.split:
if split == "train":
imgpath = os.path.join(self.root, os.path.basename(self.train_df.iloc[index]['filepath']))
_img = Image.open(imgpath).convert('RGB')
maskpath = os.path.join(self.root, os.path.basename(self.train_df.iloc[index]['liver_maskpath']))
_target = Image.open(maskpath).convert('L')
sample = {'image': _img, 'label': _target}
return self.transform_tr(sample)
elif split == 'val' or split == 'vis':
imgpath = os.path.join(self.root, os.path.basename(self.test_df.iloc[index]['filepath']))
_img = Image.open(imgpath).convert('RGB')
maskpath = os.path.join(self.root, os.path.basename(self.test_df.iloc[index]['liver_maskpath']))
_target = Image.open(maskpath).convert('L')
sample = {'image': _img, 'label': _target}
return self.transform_val(sample)
def transform_tr(self, sample):
composed_transforms = transforms.Compose([
# tr.RandomRotate(30.0),
# tr.RandomHorizontalFlip(),
# tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),
tr.FixedResize(size=self.args.base_size),
# tr.RandomGaussianBlur(),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
def transform_val(self, sample):
composed_transforms = transforms.Compose([
# tr.FixScaleCrop(crop_size=self.args.crop_size),
tr.FixedResize(size=self.args.base_size),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
def __str__(self):
return 'LITS_liver(split=' + str(self.split) + ')'
class TumorSegmentation(Dataset):
"""
LITS dataset
"""
NUM_CLASSES = 2
def __init__(self,
args,
base_dir=Path.db_root_dir('lits_tumor'),
split='train',
):
"""
:param base_dir: path to lits dataset directory
:param split: train/val
:param transform: transform to apply
"""
super().__init__()
self._base_dir = base_dir
self.root = os.path.join(self._base_dir,'dataset_6')
self.df = pd.read_csv(os.path.join(self._base_dir,"lits_df.csv"))
if split != "val":
self.df = self.df[self.df['tumor_mask_empty'] == True]
self.train_df = self.df[self.df['study_number']<111]
self.test_df = self.df[self.df['study_number']>=111]
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.args = args
def __len__(self):
for split in self.split:
if split == "train":
return len(self.train_df)
elif split == 'val' or split == 'vis':
return len(self.test_df)
def __getitem__(self, index):
for split in self.split:
if split == "train":
imgpath = os.path.join(self.root, os.path.basename(self.train_df.iloc[index]['filepath']))
_img = Image.open(imgpath).convert('RGB')
maskpath = os.path.join(self.root, os.path.basename(self.train_df.iloc[index]['tumor_maskpath']))
_target = Image.open(maskpath).convert('L')
sample = {'image': _img, 'label': _target}
return self.transform_tr(sample)
elif split == 'val' or split == 'vis':
imgpath = os.path.join(self.root, os.path.basename(self.test_df.iloc[index]['filepath']))
_img = Image.open(imgpath).convert('RGB')
maskpath = os.path.join(self.root, os.path.basename(self.test_df.iloc[index]['tumor_maskpath']))
_target = Image.open(maskpath).convert('L')
sample = {'image': _img, 'label': _target}
return self.transform_val(sample)
def transform_tr(self, sample):
composed_transforms = transforms.Compose([
# tr.RandomRotate(30.0),
# tr.RandomHorizontalFlip(),
# tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),
tr.FixedResize(size=self.args.base_size),
# tr.RandomGaussianBlur(),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
def transform_val(self, sample):
composed_transforms = transforms.Compose([
# tr.FixScaleCrop(crop_size=self.args.crop_size),
tr.FixedResize(size=self.args.base_size),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
def __str__(self):
return 'LITS_liver(split=' + str(self.split) + ')'
|
{
"alphanum_fraction": 0.5657058389,
"author": null,
"avg_line_length": 37.5833333333,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "9b46de617bc2f1042e8f5de1e2319be5b5cb4e4e",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b8cdb3b3490e66495566bca9b322eae3b724b8ce",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "HyeongminMoon/pytorch-deeplab-xception",
"max_forks_repo_path": "dataloaders/datasets/lits.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b8cdb3b3490e66495566bca9b322eae3b724b8ce",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "HyeongminMoon/pytorch-deeplab-xception",
"max_issues_repo_path": "dataloaders/datasets/lits.py",
"max_line_length": 113,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "b8cdb3b3490e66495566bca9b322eae3b724b8ce",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "HyeongminMoon/pytorch-deeplab-xception",
"max_stars_repo_path": "dataloaders/datasets/lits.py",
"max_stars_repo_stars_event_max_datetime": "2022-01-17T06:21:40.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-01-17T06:21:40.000Z",
"num_tokens": 1529,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6765
}
|
[STATEMENT]
lemma inv_2 :
"(\<tau> \<Turnstile> Person .allInstances@pre()->includes\<^sub>S\<^sub>e\<^sub>t(self)) \<Longrightarrow>
(\<tau> \<Turnstile> inv\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n\<^sub>_\<^sub>l\<^sub>a\<^sub>b\<^sub>e\<^sub>l\<^sub>A\<^sub>T\<^sub>p\<^sub>r\<^sub>e(self)) = ((\<tau> \<Turnstile> (self .boss@pre \<doteq> null)) \<or>
(\<tau> \<Turnstile> (self .boss@pre <> null) \<and>
(\<tau> \<Turnstile> (self .boss@pre .salary@pre \<le>\<^sub>i\<^sub>n\<^sub>t self .salary@pre)) \<and>
(\<tau> \<Turnstile> (inv\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n\<^sub>_\<^sub>l\<^sub>a\<^sub>b\<^sub>e\<^sub>l\<^sub>A\<^sub>T\<^sub>p\<^sub>r\<^sub>e(self .boss@pre)))))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<tau> \<Turnstile> Person .allInstances@pre()->includes\<^sub>S\<^sub>e\<^sub>t(self) \<Longrightarrow> (\<tau> \<Turnstile> inv\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n\<^sub>_\<^sub>l\<^sub>a\<^sub>b\<^sub>e\<^sub>l\<^sub>A\<^sub>T\<^sub>p\<^sub>r\<^sub>e self) = (\<tau> \<Turnstile> self.boss@pre \<doteq> null \<or> \<tau> \<Turnstile> self.boss@pre <> null \<and> \<tau> \<Turnstile> self.boss@pre.salary@pre \<le>\<^sub>i\<^sub>n\<^sub>t self.salary@pre \<and> \<tau> \<Turnstile> inv\<^sub>P\<^sub>e\<^sub>r\<^sub>s\<^sub>o\<^sub>n\<^sub>_\<^sub>l\<^sub>a\<^sub>b\<^sub>e\<^sub>l\<^sub>A\<^sub>T\<^sub>p\<^sub>r\<^sub>e (self.boss@pre))
[PROOF STEP]
oops
|
{
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Featherweight_OCL_examples_Employee_Model_Analysis_Analysis_OCL",
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 649,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
}
|
import sys
import argparse
from time import time
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
import re
from scipy import stats
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import stopwords
import spacy
import en_core_web_sm
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from joblib import dump, load
def parse_args(args):
'''
Sets up command line parsing for required and optional arguments.
Parameters
----------
args: list of str that correspond to the various positional and optional command line arguments
Returns
-------
A dict wherein the keys are the various argument fields (e.g. 'messages_filepath' or '--help') and the
values are the arguments passed to each of those fields
'''
# The purpose of the script, as seen from the command line
parser = argparse.ArgumentParser(description='Loads up clean data from a SQLite3 database, \
runs it through a machine learning pipeline, and classifies the messages from the database \
as belonging to some subset of 36 potential disaster-related categories.')
parser.add_argument('database_filepath', type=str, help='Relative filepath for loading \
the SQLite3 database result. There should be a *.db filename at the end of it. \
Typically this is of the form "../../data/database_name.db"')
parser.add_argument('model_filepath', type=str, help='Relative filepath for saving the pickled model \
after it is trained and ready to be used for predictions. This should be of the format \
"../../models/date-trained_model-name.pkl"')
return vars(parser.parse_args(args))
def load_data(database_filepath):
'''
Loads data from the identified sqlite3 database file.
Parameters
----------
database_filepath: str. Filepath of sqlite3 database file.
The database should contain only a single table called "categorized_messages".
Returns
-------
3-tuple of the form (features, labels, category_names) where category_names
refers to the unique labels of every possible predicted category
'''
engine = create_engine('sqlite:///' + database_filepath)
df = pd.read_sql_table('categorized_messages', engine)
# Drop columns 'original', 'genre' and 'id' as we don't need them at this stage
df.drop(columns=['original', 'id', 'genre'], inplace=True)
# Select only 'message', 'translated', and 'entity_*' columns for features
possible_feature_columns = ['message',
'translated',
'entity_PERSON',
'entity_NORP',
'entity_FAC',
'entity_ORG',
'entity_GPE',
'entity_LOC',
'entity_PRODUCT',
'entity_EVENT',
'entity_LANGUAGE',
'entity_DATE',
'entity_TIME',
'entity_MONEY']
# Keep any columns that match our allowed column list for features
# and keep any columns that DON'T match for the labels
features = df[df.columns[df.columns.isin(possible_feature_columns)]]
labels = df[df.columns[~df.columns.isin(possible_feature_columns)]]
category_names = labels.columns
return features, labels, category_names
def tokenize(text, lemma=True, use_spacy_full=False, use_spacy_lemma_only=True):
'''
Performs various preprocessing steps on a single piece of text. Specifically, this function:
1. Strips all leading and trailing whitespace
2. Makes everything lowercase
3. Removes punctuation
4. Tokenizes the text into individual words
5. Removes common English stopwords
6. If enabled, lemmatizes the remaining words
Parameters
----------
text: string representing a single message
lemma: bool. Indicates if lemmatization should be done
use_spacy_full: bool. If True, performs a full corpus analysis (POS, lemmas of all types, etc.)
using the spacy package instead of nltk lemmatization
use_spacy_lemma_only: bool. If True, only performs verb-based lemmatization. Faster than full spacy
corpus analysis by about 88x.
Returns
-------
List of processed strings from a single message
'''
# Strip leading and trailing whitespace
text = text.strip()
# Make everything lowercase
text = text.lower()
# Retain only parts of text that are non-punctuation
text = re.sub(r"[^a-zA-Z0-9]", " ", text)
# Tokenize into individual words
words = word_tokenize(text)
# Remove common English stopwords
words = [w for w in words if w not in stopwords.words("english")]
# Lemmatize to root words, if option is enabled
if lemma and not use_spacy_full and not use_spacy_lemma_only:
words = [WordNetLemmatizer().lemmatize(w, pos='v') for w in words]
elif lemma and use_spacy_full:
nlp = en_core_web_sm.load()
doc = nlp(text)
words = [token.lemma_ for token in doc if not token.is_stop]
elif lemma and use_spacy_lemma_only:
from spacy.lemmatizer import Lemmatizer
from spacy.lang.en import LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES
lemmatizer = Lemmatizer(LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES)
words = [lemmatizer(w, u"VERB")[0] for w in words]
return words
def build_model(features_train, labels_train):
'''
Builds the modeling pipeline that can then be trained and tested.
Parameters
----------
features_train: pandas DataFrame or numpy array containing training features data. Used
to perform hyperparameter tuning on the pipeline
labels_train: pandas DataFrame or numpy array containing training labels data. Used
to perform hyperparameter tuning on the pipeline
Returns
-------
scikit-learn Pipeline that includes a tf-idf step and a RandomForestClassifier,
trained to have the optimal hyperparameters. NOTE: this model should be fit
on the full training data before being used for predictions, so that it is as
accurate as possible.
'''
pipeline = Pipeline([
('text_analysis', ColumnTransformer(transformers=[('tf-idf',
TfidfVectorizer(
tokenizer=tokenize),
'message')],
remainder='passthrough',
verbose=True)),
('classifier', RandomForestClassifier())
],
verbose=True)
# Setup grid search to optimize hyperparameters based on notebook explorations earlier
grid_parameters = {
'classifier__n_estimators': [10, 50, 100],
'classifier__min_samples_leaf': [1, 5, 10]
}
cv = GridSearchCV(pipeline, grid_parameters, cv = 5,
scoring='f1_weighted', error_score=0.0,
iid=False, verbose=1, n_jobs=1,
return_train_score=True)
cv.fit(features_train, labels_train)
tuning_results = pd.DataFrame(cv.cv_results_).sort_values('rank_test_score')
print("Hyperparameter Tuning Results:\n")
print(tuning_results)
print("\n\n")
return cv.best_estimator_
def evaluate_model(model, features_test, labels_test, category_names):
'''
Predicts the labels for the test set and then generates a classification report
outlining how well the model performed with its predictions. The report is returned
as a pandas DataFrame and printed to stdout.
Parameters
----------
model: trained scikit-learn Pipeline object that contains a classifier of some sort
X_test: pandas DataFrame/Series or numpy array. All of the feature data set aside for testing
Y_test: pandas DataFrame/Series or numpy array. All of the label/target data set aside for testing
category_names: list of str. Names of all possible labels, ordered as they are in Y_test.
These are used to provide meaningful column names in the output report
Returns
-------
pandas DataFrame representing the classification results. The metric assumed most relevant
for an imbalanced multi-label prediction of this sort is 'f1-score, weighted avg'
'''
labels_pred = model.predict(features_test)
class_report_dict = classification_report(labels_test, labels_pred,
target_names=category_names,
digits=2, output_dict=True)
class_report = pd.DataFrame.from_dict(class_report_dict)
print("Fully-trained model evaluation results: \n")
print(class_report)
print("\n\n")
return class_report
def save_model(model, model_filepath):
'''
Saves model as a PKL (pickle) file to disk for later use.
Code from https://scikit-learn.org/stable/modules/model_persistence.html
Parameters
----------
model: trained scikit-learn Pipeline object that contains a classifier of some sort
model_filepath: str. Filepath (including filename.pkl) where you want the model stored.
Typical values are of the format "../../models/date-trained_model-name.pkl"
Returns
-------
Nothing, just saves model to disk.
'''
dump(model, model_filepath)
def main():
'''
Runs through each stage of the modeling process:
1. Data loading
2. Model building
3. Model fitting/training
4. Model testing
5. Saving the model for future use
'''
if len(sys.argv) == 3:
args = parse_args(sys.argv[1:])
database_filepath = args['database_filepath']
model_filepath = args['model_filepath']
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.2)
print('Building model...')
model = build_model(X_train, Y_train)
# Fitting one more time on the full training dataset for max accuracy
# post-tuning
print('Training model...')
time0 = time()
model.fit(X_train, Y_train)
print(f"Final model trained in {(time()-time0) / 60} minutes")
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '
'as the first argument and the filepath of the pickle file to '
'save the model to as the second argument. \n\nExample: python '
'train_classifier.py ../../data/DisasterTweets.db ../../models/01-01-2019_ModelName.pkl')
if __name__ == '__main__':
main()
|
{
"alphanum_fraction": 0.6506719504,
"author": null,
"avg_line_length": 34.754491018,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "41daeff2358eedaeadd8ee50f4eed4d572fd2a40",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "00b952770a711c9aad2f4e18f4b854e0ef64807b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "emigre459/disaster-prone-nlp",
"max_forks_repo_path": "src/models/train_classifier.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "00b952770a711c9aad2f4e18f4b854e0ef64807b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "emigre459/disaster-prone-nlp",
"max_issues_repo_path": "src/models/train_classifier.py",
"max_line_length": 106,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "00b952770a711c9aad2f4e18f4b854e0ef64807b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "emigre459/disaster-prone-nlp",
"max_stars_repo_path": "src/models/train_classifier.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2419,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 11608
}
|
from __future__ import division
import numpy as np
from skimage.color import rgb2gray
from skimage import transform
from skimage import segmentation
from skimage import morphology
from skimage import transform
from scipy.interpolate import interp1d
from scipy import stats
from scipy import sparse
from scipy.misc import toimage
from scipy.spatial.distance import pdist, squareform
from collections import Counter
from typing import *
def distance_point_line(point: Tuple[float, float], line: Tuple[float, float, float]) -> float:
'''Calculate the distance between a point and a line
Parameters
----------
point: touple of float
Coordinates (x, y) of the point
line: touple of float
Parameters (a,b,c) where the line is ax + by + c = 0
Returns
-------
distance: float
Euclidean distance between the point and the line
'''
x0, y0 = point
a, b, c = line
return np.abs(a * x0 + b * y0 + c) / np.sqrt(a**2 + b**2)
def linear_kernel(D: Union[np.ndarray, float], w: float) -> np.ndarray:
'''Returns the linear kernel at a point distant D from the max
Parameters
----------
D: ndarray or float
distance from the center(max) of the kernel
w: float
half width of the wavelet (distance from max to zero)
for d > w the kernel evaluate to 0
Returns
-------
K: ndarray or float
Array containg at K[i,j] the value of the kernel function
at D[i,j]. For definition of kernel it is 0<=K[i,j]<=1
'''
X = D / float(w)
return (np.abs(X) <= 1) * (1 - np.abs(X))
def calculate_projection(angle: float, q: float, X_coords: np.ndarray, Y_coords: np.ndarray, slice_width: float) -> np.ndarray:
'''Returns a flattened array of the pixels contribution to the point projection
Parameters
----------
angle: float
The slope of the line the slide is centered upon
q: float
The q parameter of the line (y = mx +q) the slide is centered upon
X_coords: 2-D array
contains x at pixel (y,x)
Y_coords: 2-D array
contains y at pixel (y,x)
slice_width: float
the width of a slice
sparse: bool, default=False
wether to return a csr sparse matrix
Returns
--------
design_matrix_row: 1-D array
Notes
-----
What is returned by this function correspond to a row of the Design/Projection matrix.
To be more precise this is the raveled form of a matrix that contains:
in position (r,c) the contribution of the (r,c) pixel to the point projection.
In practice this is calculated with an heuristic function.
contribution around the line distributes as a linear (triangular) kernel
the line y = tan(alpha)*x + q
If plotted would look like a pixelated line (y = tan(alpha) + q) that skips all the
zero point of the image mask
The reason why this is used and prefered over other kernels is that this is robust
over summation. This means that summing all the pixel contributions one gets back
the original grayscale image mask.
'''
# Explict line parameters
a = np.tan(angle)
b = -1.
c = q
# Distance of every point from the line
D = distance_point_line((X_coords, Y_coords), (a, b, c))
# Contribution of every pixel, caluclated with linear kernel heuristic
M = linear_kernel(D, slice_width) # NB! the total width of the kernel is duble the slice_widht
# Return it ravelled
return M.ravel()
def find_extremities(image: np.ndarray, angle: float) -> Tuple[np.ndarray, np.ndarray]:
'''Returns the first and last point encoutered by slicing
the mask `image` with angle `angle`
Parameters
----------
image: 2-D array
the convex_hull of the mask
angle: float
(in radiants) the angle of the slicing with respect to x (conter clockwise)
Returns
-------
first_point, last_point: arrays
first and last point touched rotating counterclockwise (clockwise if looking at the image) direction
Notes
-----
Given a slicing angle and a mask it returns the first and
last point encoutered by the slicing. We use a rotation
trick to do this in the fastest and most robust way
Timing: 2.75 ms
'''
# To rotate around the center of the image
# we compose translation to the center and rotation
# we retranslate to bring back in position
shift_y, shift_x = (np.array(image.shape) - 1) / 2.
tf_rotate = transform.SimilarityTransform(rotation=-angle)
tf_shift = transform.SimilarityTransform(translation=[-shift_x, -shift_y])
tf_shift_inv = transform.SimilarityTransform(translation=[shift_x, shift_y])
# we only use the inverse so we assign this and not the direct transformation
tf_composed_inv = (tf_shift + (tf_rotate + tf_shift_inv)).inverse
# Rotate the image and find the max and min point
image_rotated = transform.warp(image, inverse_map=tf_composed_inv, order=1) # order might be changed to 3
# the line above returns a float after transformation even if input was bool
pos_cordinates = list(zip(*np.where(image_rotated > 0.7))) # we threshold because of the reason above
y_min, x_min = min(pos_cordinates)
y_max, x_max = max(pos_cordinates)
# Apply the inverse transformation to return to the original coordinates
return tf_composed_inv([x_min, y_min])[0], tf_composed_inv([x_max, y_max])[0]
def slicing_parameters(angle: float, extremity_points: Tuple[np.ndarray, np.ndarray]) -> Tuple[float, Tuple[float, float]]:
'''Calculates all the parameters necessary for slicing.
Parameters
----------
angle: float
angle in radiants
extremity_points: touple of 1-D array
(first_point, last_point) as it is given from find_extremities()
Returns
-------
m, qs: floats
Parameters for the two tangent lines: y = m*x + qs[0] and y = m*x + qs[1]
TO DO: Cases here r // x and r // y
'''
# the slope
m = np.tan(angle)
# the intercepts q = y - m*x
q_first = extremity_points[0][1] - m * extremity_points[0][0]
q_last = extremity_points[1][1] - m * extremity_points[1][0]
return m, (q_first, q_last)
def sum_project(image: np.ndarray, angle: float, slice_width: float, notation_inverse: bool=True) -> np.ndarray:
'''Utility to get the projection of a greayscale `image` in a direction `angle`
Parameters
----------
image: 2-D array
Grayscale image. Reppresenting a mask or a signal
angle: float
In radiants, the angle of slicing
slice_width: float
The width of a slice
notation_inverse: bool
the angle is ment from the point of view of the image
Returns
-------
projection: 1-D array
the sum of all the pixels
TODO: add suppport with precomputed projection matrix
'''
img_shape = image.shape
# Build two arrays containing respectivelly the x and the y coordinates of every pixel
Y_coords, X_coords = np.mgrid[0:img_shape[0], 0:img_shape[1]]
raveled_img = image.ravel()
if notation_inverse:
angle = np.pi - angle
# when the q2 - q1 is an integer exactly n slides the loss of precision of np.tan will cause an extra slice to be cut
_tan = lambda x: np.round(np.tan(x), decimals=4)
# Calculate the q parameters of the two lines passing by the extremities
if (angle % np.pi) > np.pi / 2.:
x0 = np.max(X_coords) + 0.5
y0 = np.max(Y_coords) + 0.5
q1 = y0 - _tan(angle) * x0
x0 = 0 - 0.5
y0 = 0 - 0.5
q2 = y0 - _tan(angle) * x0
# Calculate the distance between two proj lines passing from the corners of the image
dist = distance_point_line((x0, y0), (_tan(angle), -1, q1))
else:
# q = y0 - m * x0]
x0 = np.max(X_coords) + 0.5
y0 = 0 - 0.5
q2 = y0 - _tan(angle) * x0
x0 = 0 - 0.5
y0 = np.max(Y_coords) + 0.5
q1 = y0 - _tan(angle) * x0
# Calculate the distance between two proj lines passing from the corners of the image
dist = distance_point_line((x0, y0), (_tan(angle), -1, q2))
number_of_slices = dist / slice_width
# Calculate the different q parameters of the lines around which the slices are centered on
# The naive qs are of lines start from the tangent and finishes on the tangent
q_step = np.abs(q2 - q1) / number_of_slices
if q1 < q2:
naive_qs = np.arange(q1, q2 + q_step, q_step)
else:
naive_qs = np.arange(q1, q2 - q_step, -q_step)
# The above is not correct: we want the line in the middle of the one generated above
# adjusted_qs = (naive_qs[1:] + naive_qs[:-1]) /2.
adjusted_qs = naive_qs
# Inititalize
proj_matrix = []
# Iterate over the slices (parameter q)
for q in adjusted_qs:
proj_matrix.append(calculate_projection(angle, q, X_coords, Y_coords, slice_width))
# Transform the list of arrays in a 2-D array
proj_matrix = np.array(proj_matrix)
# Calculate the pojection as a dot product and return
return np.dot(proj_matrix, raveled_img[:, None])
def create_connectivity(mask: np.ndarray, kind: str="queen") -> np.ndarray:
""" Create a connectivity matrix of the pixels in a image
Args
----
mask: np.2darray
Square image of side N
kind: str default 'queen
The kind of connectivity to apply. Can be: rook, bishop, queen (as in chess)
Returns
-------
connectivity_matrix: np.ndarray
A connectivity matrix (N^2, N^2) where N is the side of mask
"""
ll = mask.shape[0]
ll += 2 # add a 1 pixel margin all around the image to simplify the construction (will be removed as last step)
cstm_connectivity = np.zeros((ll**2, ll**2)) # initialize empty
pixel_ixs = np.arange(ll**2)
# Compute the indexes of the fake edges
real_pixel_bool = np.ones((ll, ll), dtype=bool)
real_pixel_bool[0, :] = False
real_pixel_bool[-1, :] = False
real_pixel_bool[:, 0] = False
real_pixel_bool[:, -1] = False
real_pixel_bool = real_pixel_bool.flat[:]
real_pixel_ixs = pixel_ixs[real_pixel_bool]
# Neighbour rule
if kind == "rook":
neig_relative_ix_pos = np.array([+1, -1, -ll, ll])
elif kind == "bishop":
neig_relative_ix_pos = np.array([-ll + 1, ll + 1, -ll - 1, ll - 1])
elif kind in ["queen", "king"]:
neig_relative_ix_pos = np.array([+1, -1, -ll, ll, -ll + 1, ll + 1, -ll - 1, ll - 1])
# Set True at where there is connectivity
cstm_connectivity[real_pixel_ixs[:, None], real_pixel_ixs[:, None] + neig_relative_ix_pos] = True
# Is the same as following but using broadcasting
# for i in real_pixel_ixs:
# cstm_connectivity[i, neig_relative_ix_pos+i] = True
# Remove connectivity entry corresponding to the dummy 1 pixel edges
cstm_connectivity = cstm_connectivity[real_pixel_ixs[:, None], real_pixel_ixs[None, :]]
return cstm_connectivity
def place_inside_mask(values: np.ndarray, mask_bw: np.ndarray) -> np.ndarray:
"""Place the values at the position that are 1/True in mask followin the C_CONTIGUOUS enumeration order
Args
----
values: np.ndarray (1d, float)
the vaues to fill in mask
mask_bw: np.ndarray (2d, binary or boolean)
the mask to fill values in
Returns
-------
x: np.ndarray 2d
2d array with the values subsittuted in the right place of the mask
"""
# assert np.allclose(mask.sum(), len(values))
x = np.zeros(mask_bw.shape)
x.flat[mask_bw.flat[:].astype(bool)] = values
return x
def build_Design_Matrix(angles: np.ndarray, widths: List[float],
mask_g: np.ndarray, mask_thrs: float=0.2,
notation_inverse: bool=True, return_projlen: bool=True,
return_sparse: bool=False) -> np.ndarray:
'''Builds the regression design matrix (Projection Matrix).
Parameters
----------
angles: np.ndarray
the angles of the slicing
widths: list of float
(number of pixels) real width of a slice for every cutting angle
mask_g: 2-D array of floats
greyscale mask reppresenting the shape of the tissue slice,
works good also if entries are only 1s and 0s
mask_thrs: float
value to threshold mask_g
notation_inverse: bool, default=True
the angles are ment from the point of view of the image
return_projlen: bool, default=True
wether to return the information about the number of rows for each angle
return_sparse: bool, default=False
wether to return a scipy.sparse.csr_matrix
Returns
-------
design_matrix: 2-d array
Notes
-----
it returns design matrix for the mask constrained regression problem
the correct angles would be the one looking at the image flipped (origin at left-top positive of y is down)
but if notation_inverse == True it assumes that angles are respect to a origin at the left-bottom
Assumptions: The image is reliable and the width are reliable.
This has to be adjusted beforehand
'''
if notation_inverse:
angles = (np.pi - angles) % (2 * np.pi)
img_shape = mask_g.shape
# Build two arrays containing respectivelly the x and the y coordinates of every pixel
Y_coords, X_coords = np.mgrid[0:img_shape[0], 0:img_shape[1]]
# Prepare the raveled image for the multiplication below
raveled_img = mask_g.ravel()
# Initialize a list to which the rows of the design matrix will be appended
Z = []
projlen = []
# Iterate over the angles of projections
for n_a, angle in enumerate(angles):
# Calculate q1 and q2
conv_hull = morphology.convex_hull_image(mask_g > mask_thrs) # Note: before 0.05
first_point, last_point, q1, q2, extension = all_slicing_parameters(conv_hull, angle)
# Calculate the distance between the first and last line
number_of_slices = extension / widths[n_a]
# Calculate the different q parameters of the lines around which the slices are centered on
# The naive qs are of lines start from the tangent and finishes on the tangent
q_step = np.abs(q2 - q1) / number_of_slices
if q1 < q2:
naive_qs = np.arange(q1, q2 + q_step, q_step)
else:
naive_qs = np.arange(q1, q2 - q_step, -q_step)
# This is not correct and we want the line in the middle of the one generated above
# adjusted_qs = (naive_qs[1:] + naive_qs[:-1]) /2.
adjusted_qs = naive_qs
projlen.append(len(adjusted_qs))
# Iterate over the slices (over q)
for q in adjusted_qs:
# Calculate the row of the design matrix and append it
if return_sparse:
Z.append(sparse.csr_matrix(calculate_projection(angle, q, X_coords, Y_coords, widths[n_a]) * raveled_img))
else:
Z.append(calculate_projection(angle, q, X_coords, Y_coords, widths[n_a]) * raveled_img)
# transform the list of arrays in a 2-D array
if return_sparse:
if return_projlen:
return sparse.vstack(Z), projlen
return sparse.vstack(Z)
else:
if return_projlen:
return np.array(Z), projlen
return np.array(Z)
def all_slicing_parameters(convex_hull_mask: np.ndarray, angle: float) -> Tuple[np.ndarray, np.ndarray, float, float, float]:
m = np.tan(angle) # Don't round, the fact that the maximum yielded is 1e16 is good, avoids nan and good precision
yy, xx = np.where(convex_hull_mask)
qq = yy - m * xx
q1 = np.max(qq)
q2 = np.min(qq)
ix_max, ix_min = qq.argmax(), qq.argmin()
first_point = (xx[ix_max], yy[ix_max])
last_point = (xx[ix_min], yy[ix_min])
last_line = (-np.tan(angle), 1., -q2)
extension = distance_point_line(first_point, last_line) # this function is nan-proof when use np.tan
if 0.5 * np.pi < (angle % (2 * np.pi)) <= 1.5 * np.pi:
return first_point, last_point, q1, q2, extension
else:
return last_point, first_point, q2, q1, extension
def prepare_design_symmetry(D: np.ndarray) -> np.ndarray:
"""Add to the design matrix strip simmetric to the one in the input
Args
----
D: np.ndarray
The design matrix as it comes from build_Design_Matrix
Returns
-------
New design matrix with appended the simmetric angles
"""
# Flip the mask assuming simmetry
MD, ND = D.shape
sqrtND = int(np.sqrt(ND))
cols_ord = np.arange(ND).reshape(sqrtND, sqrtND)[:, ::-1].ravel() # flipped index
return np.append(D, D[:, cols_ord], 0)
def prepare_design_masked(D: np.ndarray, mask_bw: np.ndarray) -> np.ndarray:
"""Design matrix using only the pixels in the mask
Args
----
D: np.ndarray
The design matrix as it comes from build_Design_Matrix
mask_bw: np.ndarray (binary or boolean)
The image used as mask. Entryes should be only 1/0 or True/False
Returns
-------
New design matrix with appended the simmetric angles. And where only the pixels in the mask are considered
"""
filt_mask = mask_bw.flat[:].astype(bool) # prepare the mask to be used as a boolean filter
return np.copy(D[:, filt_mask])
def prepare_design_symmetry_masked(D: np.ndarray, mask_bw: np.ndarray) -> np.ndarray:
"""Design matrix using only the pixels in the mask and adding strips simmetric to the one in the input
Args
----
D: np.ndarray
The design matrix as it comes from build_Design_Matrix
mask_bw: np.ndarray (binary or boolean)
The image used as mask. Entryes should be only 1/0 or True/False
Returns
-------
New design matrix with appended the simmetric angles. And where only the pixels in the mask are considered
"""
filt_mask = mask_bw.flat[:].astype(bool) # prepare the mask to be used as a boolean filter
return prepare_design_symmetry(D)[:, filt_mask]
def prepare_observations(projections: List[np.ndarray], xs: List[np.ndarray],
first_points: List[int], projs_len: List[int],
interpolation: str="linear", verbose: bool=False) -> np.ndarray:
"""Prepare the observation vector `b`
Args
----------
projections: list of 1-D array
a list of projection. Projections are first selected so that the first value is the first reliable section and
the last the last reliable section
xs: list of 1-D array
Contains arrays indicating wich are indexes of the 'projections' input.
`projections` are usually filtered and trimmed, so an ix array is kept to keep track of it.
Its values[i] usually gets filtered and some samples are missing.
e.g. [ array([20,21,22,24,25,27,30,...]), array([10,11,12,15,16,17,18,...]), array([3,4,7,9,10,11,12,...]) ]
first_points: list of int
for every proj-angle it indicates how shifted it is from the theoretical one
projs_len: list of int
the expected number of slices that should be taken in account starting from `list_of_points[i]`
interpolation: str
kind interpolation one of "linear", "cubic", "mixed"
verbose: bool
prints min(xi), max(xi), max(xi)-min(xi)+1, n_points, len(xi), len(p)
Returns
-------
final_proj: 1-D array
The projections ready to be given as an imput of a regression problem
"""
final_proj = np.array([])
for projection, xi, first_point, n_points in zip(projections, xs, first_points, projs_len):
full_x = np.arange(first_point, first_point + n_points)
p = projection.copy()
# take the points between first point and last point
bool_ix = (first_point <= xi) & (xi < first_point + n_points)
xi = xi[bool_ix]
p = p[bool_ix]
# Deal with some special cases if they occur (e.g. samples at the extremities did not wokr)
if first_point not in xi:
xi = np.r_[first_point, xi]
p = np.r_[0, p]
if (first_point + n_points - 1) not in xi:
xi = np.r_[xi, first_point + n_points - 1]
p = np.r_[p, 0]
if verbose:
print(np.min(xi), np.max(xi), np.max(xi) - np.min(xi) + 1, n_points, len(xi), len(p))
# Perform the interpolation for the missing ixs
if interpolation == "linear":
f1 = interp1d(xi, p, kind='linear', fill_value=0, bounds_error=False)
interpolated = f1(full_x)
elif interpolation == "cubic":
f3 = interp1d(xi, p, kind='cubic', fill_value=0, bounds_error=False)
interpolated = np.clip(f3(full_x), a_min=0, a_max=1.2 * max(p))
elif interpolation == "mixed":
f1 = interp1d(xi, p, kind='linear', fill_value=0, bounds_error=False)
f3 = interp1d(xi, p, kind='cubic', fill_value=0, bounds_error=False)
intp1 = np.clip(f1(full_x), a_min=0, a_max=1.2 * max(p))
intp3 = np.clip(f3(full_x), a_min=0, a_max=1.2 * max(p))
intp0 = intp1 * np.array([(i in xi) for i in full_x])
interpolated = 0.15 * intp3 + 0.35 * intp1 + 0.5 * intp0
final_proj = np.r_[final_proj, interpolated] # This is just appending to the previous projections
return final_proj
def prepare_observations_symmetry(projections: List[np.ndarray], xs: List[np.ndarray],
first_points: List[int], projs_len: List[int],
interpolation: str="linear", verbose: bool=False) -> np.ndarray:
"""Prepare the observation vector `b` assuming symmetry.
It will will copy the observations at one angle so to assume that the projection at the symmetrical is the same
Args
----------
projections: list of 1-D array
a list of projection. Projections are first selected so that the first value is the first reliable section and
the last the last reliable section
xs: list of 1-D array
Contains arrays indicating wich are indexes of the 'projections' input.
`projections` are usually filtered and trimmed, so an ix array is kept to keep track of it.
Its values[i] usually gets filtered and some samples are missing.
e.g. [ array([20,21,22,24,25,27,30,...]), array([10,11,12,15,16,17,18,...]), array([3,4,7,9,10,11,12,...]) ]
first_points: list of int
for every proj-angle it indicates how shifted it is from the theoretical one
projs_len: list of int
the expected number of slices that should be taken in account starting from `list_of_points[i]`
interpolation: str
kind interpolation one of "linear", "cubic", "mixed"
verbose: bool
prints min(xi), max(xi), max(xi)-min(xi)+1, n_points, len(xi), len(p)
Returns
-------
final_proj: 1-D array
The projections ready to be given as an imput of a regression problem
"""
final_proj = prepare_observations(projections, xs, first_points, projs_len, interpolation, verbose)
final_proj = np.r_[final_proj, final_proj]
return final_proj
def prepare_regression(projections: List[np.ndarray], xs: List[np.ndarray], design_matrix: np.ndarray,
first_points: List[int], projs_len: List[int], verbose: bool=True) -> Tuple[np.ndarray, np.ndarray]:
'''
Prepare Design matrix and observation vector
Parameters
----------
projections: list of 1-D array
a list of projection. Projections are first selected so that the first value is the first reliable section and
the last the last reliable section
xs: list of 1-D array
Contains arrays indicating wich are indexes of the 'projections' input.
`projections` are usually filtered and trimmed, so an ix array is kept to keep track of it.
Its values[i] usually gets filtered and some samples are missing.
e.g. [ array([20,21,22,24,25,27,30,...]), array([10,11,12,15,16,17,18,...]), array([3,4,7,9,10,11,12,...]) ]
design_matrix: 2-D array
as calculated by the fucntion build_Design_Matrix
first_points: list of int
for every proj-angle it indicates how shifted it is from the theoretical one
projs_len: list of int
the expected number of slices that should be taken in account starting from `list_of_points[i]`
verbose: bool
prints min(xi), max(xi), max(xi)-min(xi)+1, n_points, len(xi), len(p)
Returns
-------
D: 2-D array
The design matrix ready to be given as as a input of a regression problem
final_proj: 1-D array
The projections ready to be given as an imput of a regression problem
Notes
-----
The function includes a mixed cubic-linear-zero interpolation to fill in the missing values.
This is necessary because if one instead omits the equations for the missing projection (as it would be intuitive)
the regularized problem will have less constrains and the respective pixel will be set
to zero or very low numbers.
Input image given to Design Matrix function has to be symmetrical.
'''
D = design_matrix.copy()
final_proj = prepare_observations(projections, xs, first_points, projs_len, verbose=verbose)
return D, final_proj
def prepare_regression_symmetry(projections: List[np.ndarray], xs: List[np.ndarray], design_matrix: np.ndarray, first_points: List[int]=[0,0,0], projs_len: List[int]=[100,100,100], verbose: bool=True) -> Tuple[np.ndarray, np.ndarray]:
'''Currently the best algorythm for reconstruction explointing the simmetry of the input image.
Parameters
----------
projections: list of 1-D array
a list of projection. Projections are first selected so that the first value is the first reliable section and
the last the last reliable section
xs: list of 1-D array
Contains arrays indicating wich are indexes of the 'projections' input.
`projections` are usually filtered and trimmed, so an ix array is kept to keep track of it.
Its values[i] usually gets filtered and some samples are missing.
e.g. [ array([20,21,22,24,25,27,30,...]), array([10,11,12,15,16,17,18,...]), array([3,4,7,9,10,11,12,...]) ]
design_matrix: 2-D array
as calculated by the fucntion build_Design_Matrix
first_points: list of int
for every proj-angle it indicates how shifted it is from the theoretical one
projs_len: list of int
the expected number of slices that should be taken in account starting from `list_of_points[i]`
verbose: bool
prints min(xi), max(xi), max(xi)-min(xi)+1, n_points, len(xi), len(p)
Returns
-------
D: 2-D array
The design matrix ready to be given as as a input of a regression problem
final_proj: 1-D array
The projections ready to be given as an imput of a regression problem
Notes
-----
The function includes a mixed cubic-linear-zero interpolation to fill in the missing values.
This is necessary because if one instead omits the equations for the missing projection (as it would be intuitive)
the regularized problem will have less constrained and the respective pixel will be set
to zero or very low numbers.
Input image given to Design Matrix function has to be symmetrical.
'''
D = design_matrix.copy()
D = prepare_design_symmetry(D)
final_proj = prepare_observations_symmetry(projections, xs, first_points, projs_len, verbose=verbose)
return D, final_proj
def prepare_regression_symmetry_masked(projections: List[np.ndarray], xs: List[np.ndarray],
design_matrix: np.ndarray, mask: np.ndarray,
first_points: List[int], projs_len: List[int],
verbose: bool=True) -> Tuple[np.ndarray, np.ndarray]:
'''
Currently the best and faster algorythm for reconstruction explointing the simmetry of the input image.
Parameters
----------
projections: list of 1-D array
a list of projection. Projections are first selected so that the first value is the first reliable section and
the last the last reliable section
xs: list of 1-D array
Contains arrays indicating wich are indexes of the 'projections' input.
`projections` are usually filtered and trimmed, so an ix array is kept to keep track of it.
Its values[i] usually gets filtered and some samples are missing.
e.g. [ array([20,21,22,24,25,27,30,...]), array([10,11,12,15,16,17,18,...]), array([3,4,7,9,10,11,12,...]) ]
design_matrix: 2-D array
as calculated by the fucntion build_Design_Matrix
mask: 2-D boolean array:
a boolean masked indicating which pixel to reconstruct
first_points: list of int
for every proj-angle it indicates how shifted it is from the theoretical one
projs_len: list of int
the expected number of slices that should be taken in account starting from `list_of_points[i]`
verbose: bool
prints min(xi), max(xi), max(xi)-min(xi)+1, n_points, len(xi), len(p)
Returns
-------
D: 2-D array
The design matrix ready to be given as as a input of a regression problem
final_proj: 1-D array
The projections ready to be given as an imput of a regression problem
Notes
-----
The function includes a mixed cubic-linear-zero interpolation to fill in the missing values.
This is necessary because if one instead omits the equations for the missing projection (as it would be intuitive)
the regularized problem will have less constrained and the respective pixel will be set
to zero or very low numbers.
Input image given to Design Matrix function has to be symmetrical.
'''
D = prepare_design_symmetry_masked(D, mask)
final_proj = prepare_observations_symmetry(projections, xs, first_points, projs_len, verbose=verbose)
return D, final_proj
# From the images Simone took I calculated the the pixel/mm ratio
# using imageJ and the known distance of the mold
#
# 46.35pixels/mm
# for the file
# plateMean_finalmask.png
#
# Using this pixel ratio I extimate the brain slice size
# MaxDiameter = 8.3 mm
# minDiameter = 4.9 mm
|
{
"alphanum_fraction": 0.6544163164,
"author": null,
"avg_line_length": 40.924,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "914cd6091f00d72eddb08cfd2c26ba03e597821b",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-08-20T14:12:00.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-08-20T14:12:00.000Z",
"max_forks_repo_head_hexsha": "773d0116632cb610c3f3b8c34d66e2577e4209c4",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "lamanno-epfl/tomographer",
"max_forks_repo_path": "tomography/core.py",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "773d0116632cb610c3f3b8c34d66e2577e4209c4",
"max_issues_repo_issues_event_max_datetime": "2022-02-11T00:28:13.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-02-06T03:14:51.000Z",
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "lamanno-epfl/tomographer",
"max_issues_repo_path": "tomography/core.py",
"max_line_length": 234,
"max_stars_count": 12,
"max_stars_repo_head_hexsha": "773d0116632cb610c3f3b8c34d66e2577e4209c4",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "lamanno-epfl/tomographer",
"max_stars_repo_path": "tomography/core.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-10T18:03:01.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-08-04T08:07:04.000Z",
"num_tokens": 7762,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 30693
}
|
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import Nn
from utils.sth import sth
from utils.tf2_utils import get_TensorSpecs, gaussian_clip_rsample, gaussian_likelihood_sum, gaussian_entropy
from Algorithms.tf2algos.base.on_policy import On_Policy
class PG(On_Policy):
def __init__(self,
s_dim,
visual_sources,
visual_resolution,
a_dim_or_list,
is_continuous,
lr=5.0e-4,
epoch=5,
hidden_units={
'actor_continuous': [32, 32],
'actor_discrete': [32, 32]
},
**kwargs):
super().__init__(
s_dim=s_dim,
visual_sources=visual_sources,
visual_resolution=visual_resolution,
a_dim_or_list=a_dim_or_list,
is_continuous=is_continuous,
**kwargs)
self.epoch = epoch
self.TensorSpecs = get_TensorSpecs([self.s_dim], self.visual_dim, [self.a_counts], [1])
self.visual_net = Nn.VisualNet('visual_net', self.visual_dim)
if self.is_continuous:
self.net = Nn.actor_mu(self.s_dim, self.visual_dim, self.a_counts, 'pg_net', hidden_units['actor_continuous'], visual_net=self.actor_visual_net)
self.log_std = tf.Variable(initial_value=-0.5 * np.ones(self.a_counts, dtype=np.float32), trainable=True)
self.net.tv+=[self.log_std]
else:
self.net = Nn.actor_discrete(self.s_dim, self.visual_dim, self.a_counts, 'pg_net', hidden_units['actor_discrete'], visual_net=self.actor_visual_net)
self.lr = tf.keras.optimizers.schedules.PolynomialDecay(lr, self.max_episode, 1e-10, power=1.0)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.lr(self.episode))
def show_logo(self):
self.recorder.logger.info('''
xxxxxxxx xxxxxx
xx xx xxx xx
x xxx xx x
x xxx xx
xxxxxx x xxxxx
x xx xxx
x xx x
x xxx xx
xxxxx xxxxxx
xx
''')
def choose_action(self, s, visual_s, evaluation=False):
a = self._get_action(s, visual_s, evaluation).numpy()
return a if self.is_continuous else sth.int2action_index(a, self.a_dim_or_list)
@tf.function
def _get_action(self, s, visual_s, evaluation):
s, visual_s = self.cast(s, visual_s)
with tf.device(self.device):
if self.is_continuous:
mu = self.net(s, visual_s)
sample_op, _ = gaussian_clip_rsample(mu, self.log_std)
else:
logits = self.net(s, visual_s)
norm_dist = tfp.distributions.Categorical(logits)
sample_op = norm_dist.sample()
return sample_op
def calculate_statistics(self):
self.data['total_reward'] = sth.discounted_sum(self.data.r.values, 1, 0, self.data.done.values)
a = np.asarray(sth.discounted_sum(self.data.r.values, self.gamma, 0, self.data.done.values))
a -= np.mean(a)
a /= np.std(a)
self.data['discounted_reward'] = list(a)
def get_sample_data(self, index):
i_data = self.data.iloc[index:index + self.batch_size]
s = np.vstack(i_data.s.values).astype(np.float32)
visual_s = np.vstack(i_data.visual_s.values).astype(np.float32)
a = np.vstack(i_data.a.values).astype(np.float32)
dc_r = np.vstack(i_data.discounted_reward.values).reshape(-1, 1).astype(np.float32)
return s, visual_s, a, dc_r
def learn(self, **kwargs):
assert self.batch_size <= self.data.shape[0], "batch_size must less than the length of an episode"
self.episode = kwargs['episode']
self.calculate_statistics()
for _ in range(self.epoch):
for index in range(0, self.data.shape[0], self.batch_size):
s, visual_s, a, dc_r = map(tf.convert_to_tensor, self.get_sample_data(index))
loss, entropy = self.train.get_concrete_function(
*self.TensorSpecs)(s, visual_s, a, dc_r)
self.write_training_summaries(self.episode, dict([
['LOSS/loss', loss],
['Statistics/entropy', entropy],
['LEARNING_RATE/lr', self.lr(self.episode)]
]))
self.clear()
@tf.function(experimental_relax_shapes=True)
def train(self, s, visual_s, a, dc_r):
s, visual_s, a, dc_r = self.cast(s, visual_s, a, dc_r)
with tf.device(self.device):
with tf.GradientTape() as tape:
if self.is_continuous:
mu = self.net(s, visual_s)
log_act_prob = gaussian_likelihood_sum(mu, a, self.log_std)
entropy = gaussian_entropy(self.log_std)
else:
logits = self.net(s, visual_s)
logp_all = tf.nn.log_softmax(logits)
log_act_prob = tf.reduce_sum(tf.multiply(logp_all, a), axis=1, keepdims=True)
entropy = -tf.reduce_mean(tf.reduce_sum(tf.exp(logp_all) * logp_all, axis=1, keepdims=True))
loss = tf.reduce_mean(log_act_prob * dc_r)
if self.is_continuous:
loss_grads = tape.gradient(loss, self.net.tv)
self.optimizer.apply_gradients(
zip(loss_grads, self.net.tv)
)
else:
loss_grads = tape.gradient(loss, self.net.tv)
self.optimizer.apply_gradients(
zip(loss_grads, self.net.tv)
)
self.global_step.assign_add(1)
return loss, entropy
|
{
"alphanum_fraction": 0.5804853042,
"author": null,
"avg_line_length": 44,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "e6e9da3e405efe066aa78c605f9c79088f82ec4c",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-07-11T17:11:04.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-07-11T17:11:04.000Z",
"max_forks_repo_head_hexsha": "8fcabbbb4aa0322f8aa983be3b63a7163b914272",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "rigo93acosta/RLs",
"max_forks_repo_path": "Algorithms/tf2algos/pg.py",
"max_issues_count": 7,
"max_issues_repo_head_hexsha": "8fcabbbb4aa0322f8aa983be3b63a7163b914272",
"max_issues_repo_issues_event_max_datetime": "2022-02-10T01:26:53.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-09-26T00:43:02.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "rigo93acosta/RLs",
"max_issues_repo_path": "Algorithms/tf2algos/pg.py",
"max_line_length": 160,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "334df1e8afbfff3544413ade46fb12f03556014b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Abluceli/HRG-SAC",
"max_stars_repo_path": "Algorithms/tf2algos/pg.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-11T03:15:25.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-05-20T07:10:17.000Z",
"num_tokens": 1548,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5852
}
|
'''
Author: Tobi and Gundram
'''
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops.rnn import bidirectional_rnn
from util.LoaderUtil import read_image_list, get_list_vals
from random import shuffle
from util.STR2CTC import get_charmap_lp, get_charmap_lp_inv
import os
import time
import numpy as np
import matplotlib.pyplot as plt
# Goes done to 10%
INPUT_PATH_TRAIN = './private/lists/lp_only_train.lst'
INPUT_PATH_VAL = './private/lists/lp_only_val.lst'
cm, nClasses = get_charmap_lp()
# Additional NaC Channel
nClasses += 1
nEpochs = 15
batchSize = 4
learningRate = 0.001
momentum = 0.9
# It is assumed that the TextLines are ALL saved with a consistent height of imgH
imgH = 48
# Depending on the size the image is cropped or zero padded
imgW = 256
channels = 1
nHiddenLSTM1 = 256
os.chdir("../..")
trainList = read_image_list(INPUT_PATH_TRAIN)
stepsPerEpocheTrain = len(trainList) / batchSize
valList = read_image_list(INPUT_PATH_VAL)
stepsPerEpocheVal = len(valList) / batchSize
def inference(images, seqLen):
with tf.variable_scope('conv1') as scope:
kernel = tf.Variable(tf.truncated_normal([6, 5, channels, 32], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(images, kernel, [1, 4, 3, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[32]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
# _activation_summary(conv1)
norm1 = tf.nn.local_response_normalization(conv1, name='norm1')
seqFloat = tf.to_float(seqLen)
seqL2 = tf.ceil(seqFloat * 0.33)
with tf.variable_scope('conv2') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[64]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
# _activation_summary(conv2)
# norm2
# norm2 = tf.nn.local_response_normalization(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm2')
pool2 = tf.nn.max_pool(conv2, ksize=[1, 4, 2, 1], strides=[1, 4, 2, 1], padding='SAME', name='pool2')
seqL3 = tf.ceil(seqL2 * 0.5)
with tf.variable_scope('conv3') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 3, 64, 128], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[128]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(pre_activation, name=scope.name)
pool3 = tf.nn.max_pool(conv3, ksize=[1, 3, 1, 1], strides=[1, 3, 1, 1], padding='SAME', name='pool2')
# NO POOLING HERE -> CTC needs an appropriate length.
seqLenAfterConv = tf.to_int32(seqL3)
with tf.variable_scope('RNN_Prep') as scope:
# (#batch Y X Z) --> (X #batch Y Z)
rnnIn = tf.transpose(pool3, [2, 0, 1, 3])
# (X #batch Y Z) --> (X #batch Y*Z)
shape = rnnIn.get_shape()
steps = shape[0]
rnnIn = tf.reshape(rnnIn, tf.pack([shape[0], shape[1], -1]))
# (X #batch Y*Z) --> (X*#batch Y*Z)
shape = rnnIn.get_shape()
rnnIn = tf.reshape(rnnIn, tf.pack([-1, shape[2]]))
# (X*#batch Y*Z) --> list of X tensors of shape (#batch, Y*Z)
rnnIn = tf.split(0, steps, rnnIn)
with tf.variable_scope('BLSTM1') as scope:
forwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
backwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
outputs, _, _ = bidirectional_rnn(forwardH1, backwardH1, rnnIn, dtype=tf.float32)
fbH1rs = [tf.reshape(t, [batchSize, 2, nHiddenLSTM1]) for t in outputs]
# outH1 = [tf.reduce_sum(tf.mul(t, weightsOutH1), reduction_indices=1) + biasesOutH1 for t in fbH1rs]
outH1 = [tf.reduce_sum(t, reduction_indices=1) for t in fbH1rs]
with tf.variable_scope('LOGIT') as scope:
weightsClasses = tf.Variable(tf.truncated_normal([nHiddenLSTM1, nClasses],
stddev=np.sqrt(2.0 / nHiddenLSTM1)))
biasesClasses = tf.Variable(tf.zeros([nClasses]))
logitsFin = [tf.matmul(t, weightsClasses) + biasesClasses for t in outH1]
logits3d = tf.pack(logitsFin)
return logits3d, seqLenAfterConv
def loss(logits3d, tgt, seqLenAfterConv):
loss = tf.reduce_mean(ctc.ctc_loss(logits3d, tgt, seqLenAfterConv))
return loss
print('Defining graph')
graph = tf.Graph()
with graph.as_default():
####Graph input
inputX = tf.placeholder(tf.float32, shape=(batchSize, imgH, imgW, channels))
targetIxs = tf.placeholder(tf.int64)
targetVals = tf.placeholder(tf.int32)
targetShape = tf.placeholder(tf.int64)
targetY = tf.SparseTensor(targetIxs, targetVals, targetShape)
seqLengths = tf.placeholder(tf.int32, shape=(batchSize))
logits3d, seqAfterConv = inference(inputX, seqLengths)
loss = loss(logits3d, targetY, seqAfterConv)
optimizer = tf.train.MomentumOptimizer(learningRate, momentum).minimize(loss)
# pred = tf.to_int32(ctc.ctc_beam_search_decoder(logits3d, seqAfterConv, merge_repeated=False)[0][0])
pred = tf.to_int32(ctc.ctc_greedy_decoder(logits3d, seqAfterConv)[0][0])
edist = tf.edit_distance(pred, targetY, normalize=False)
tgtLens = tf.to_float(tf.size(targetY.values))
err = tf.reduce_sum(edist) / tgtLens
saver = tf.train.Saver()
with tf.Session(graph=graph) as session:
# writer = tf.train.SummaryWriter('./log', session.graph)
print('Initializing')
tf.global_variables_initializer().run()
# ckpt = tf.train.get_checkpoint_state("./private/models/lp2/")
# if ckpt and ckpt.model_checkpoint_path:
# saver.restore(session, ckpt.model_checkpoint_path)
# print(ckpt)
# workList = valList[:]
# errV = 0
# lossV = 0
# timeVS = time.time()
# cmInv = get_charmap_lp_inv()
# for bStep in range(stepsPerEpocheVal):
# bList, workList = workList[:batchSize], workList[batchSize:]
# batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
# imgW,
# mvn=True)
# feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
# targetShape: batchTargetShape, seqLengths: batchSeqLengths}
# lossB, aErr, p = session.run([loss, err, pred], feed_dict=feedDict)
# print(aErr)
# res = []
# for idx in p.values:
# res.append(cmInv[idx])
# print(res)
# # print(p)
# plt.imshow(batchInputs[0,:,:,0], cmap=plt.cm.gray)
# plt.show()
#
# lossV += lossB
# errV += aErr
# print('Val: CTC-loss ', lossV)
# errVal = errV / stepsPerEpocheVal
# print('Val: CER ', errVal)
# print('Val time ', time.time() - timeVS)
for epoch in range(nEpochs):
workList = trainList[:]
shuffle(workList)
print('Epoch', epoch + 1, '...')
lossT = 0
errT = 0
timeTS = time.time()
for bStep in range(stepsPerEpocheTrain):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgW,
mvn=True)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: batchSeqLengths}
_, lossB, aErr = session.run([optimizer, loss, err], feed_dict=feedDict)
# _, lossB, aErr, sET, sLT = session.run([optimizer, loss, err, err_train, loss_train], feed_dict=feedDict)
lossT += lossB
# writer.add_summary(sET, epoch * stepsPerEpocheTrain + bStep)
# writer.add_summary(sLT, epoch * stepsPerEpocheTrain + bStep)
errT += aErr
print('Train: CTC-loss ', lossT)
cerT = errT / stepsPerEpocheTrain
print('Train: CER ', cerT)
print('Train time ', time.time() - timeTS)
workList = valList[:]
errV = 0
lossV = 0
timeVS = time.time()
for bStep in range(stepsPerEpocheVal):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgW,
mvn=True)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: batchSeqLengths}
lossB, aErr = session.run([loss, err], feed_dict=feedDict)
# lossB, aErr, sE, sL = session.run([loss, err, err_val, loss_val], feed_dict=feedDict)
# writer.add_summary(sE, epoch*stepsPerEpocheVal + bStep)
# writer.add_summary(sL, epoch * stepsPerEpocheVal + bStep)
lossV += lossB
errV += aErr
print('Val: CTC-loss ', lossV)
errVal = errV / stepsPerEpocheVal
print('Val: CER ', errVal)
print('Val time ', time.time() - timeVS)
# Write a checkpoint.
checkpoint_file = os.path.join('./private/models/lp6/', 'checkpoint')
saver.save(session, checkpoint_file, global_step=epoch)
# Defining graph
# Initializing
# Epoch 1 ...
# Train: CTC-loss 129009.017706
# Train: CER 0.635904513293
# Train time 4908.49444389
# Val: CTC-loss 1641.79976816
# Val: CER 0.0801813207567
# Val time 244.049314976
# Epoch 2 ...
# Train: CTC-loss 16020.608585
# Train: CER 0.0717145665077
# Train time 7330.24510384
# Val: CTC-loss 1204.36847229
# Val: CER 0.0566576011727
# Val time 245.118979931
# Epoch 3 ...
# Train: CTC-loss 12435.9589674
# Train: CER 0.0558677665295
# Train time 7285.28540993
# Val: CTC-loss 1003.13010596
# Val: CER 0.0471066227357
# Val time 242.016130924
# Epoch 4 ...
# Train: CTC-loss 11060.2886085
# Train: CER 0.0499579166048
# Train time 7326.90888286
# Val: CTC-loss 969.390615069
# Val: CER 0.0463109914263
# Val time 245.883394003
# Epoch 5 ...
# Train: CTC-loss 10113.6315179
# Train: CER 0.0457048515265
# Train time 7260.16503906
# Val: CTC-loss 964.054605111
# Val: CER 0.0448569302758
# Val time 245.195471048
# Epoch 6 ...
# Train: CTC-loss 9361.70014321
# Train: CER 0.042607394019
# Train time 7276.95676613
# Val: CTC-loss 942.684666969
# Val: CER 0.0438320938696
# Val time 239.865092039
# Epoch 7 ...
# Train: CTC-loss 8693.04606334
# Train: CER 0.0398572982518
# Train time 6516.15737796
# Val: CTC-loss 930.6919411
# Val: CER 0.0422663276643
# Val time 220.383415222
# Epoch 8 ...
# Train: CTC-loss 8161.14864806
# Train: CER 0.0377375896172
# Train time 6433.12666297
# Val: CTC-loss 933.970610965
# Val: CER 0.0425528454781
# Val time 192.637362957
# Epoch 9 ...
# Train: CTC-loss 7658.31400694
# Train: CER 0.0357702803461
# Train time 5611.53865314
# Val: CTC-loss 944.544853458
# Val: CER 0.0428228211651
# Val time 152.010342121
# Epoch 10 ...
# Train: CTC-loss 7171.84027007
# Train: CER 0.0337837695306
# Train time 5177.82906294
# Val: CTC-loss 941.78110862
# Val: CER 0.0423581593285
# Val time 184.70659399
# Epoch 11 ...
# Train: CTC-loss 6820.79927806
# Train: CER 0.0323584240315
# Train time 5460.32187796
# Val: CTC-loss 987.756852884
# Val: CER 0.044123320813
# Val time 153.327903986
# Epoch 12 ...
# Train: CTC-loss 6330.18515219
# Train: CER 0.030294881605
# Train time 5040.84565091
# Val: CTC-loss 971.562253463
# Val: CER 0.0413985775958
# Val time 167.768498898
# Epoch 13 ...
# Train: CTC-loss 5951.8420738
# Train: CER 0.0285477739336
# Train time 5047.84928107
# Val: CTC-loss 1012.34960045
# Val: CER 0.0429205714911
# Val time 167.878767014
# Epoch 14 ...
# Train: CTC-loss 5679.90946481
# Train: CER 0.0276907928977
# Train time 5026.46480107
# Val: CTC-loss 1040.27236869
# Val: CER 0.0416939370632
# Val time 166.396095991
# Epoch 15 ...
# Train: CTC-loss 5316.91454479
# Train: CER 0.0258935857246
# Train time 5059.73199415
# Val: CTC-loss 1048.74418164
# Val: CER 0.0438409063319
# Val time 166.044019938
|
{
"alphanum_fraction": 0.6295971979,
"author": null,
"avg_line_length": 41.1531531532,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f3590afe3559ebb4206f52c53f81fa875960f255",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e68e213f490c3fbf3365878db3aa8661617adb90",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "gundramleifert/exp_tf",
"max_forks_repo_path": "models/lp/bdlstm_lp_v6.py",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "e68e213f490c3fbf3365878db3aa8661617adb90",
"max_issues_repo_issues_event_max_datetime": "2017-03-14T08:35:55.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-12-16T09:40:24.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "gundramleifert/exp_tf",
"max_issues_repo_path": "models/lp/bdlstm_lp_v6.py",
"max_line_length": 119,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e68e213f490c3fbf3365878db3aa8661617adb90",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "gundramleifert/exp_tf",
"max_stars_repo_path": "models/lp/bdlstm_lp_v6.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4240,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 13704
}
|
# Coefficients calculated with https://github.com/simonbyrne/Remez.jl
@inline function approx_sin8(x::Union{T,Vec{<:Any,T},VecUnroll{<:Any,<:Any,T}}) where {T <: Real}
# poly(x) ≈ (xʳ = sqrt(x); sin((xʳ*π)/2)/xʳ)
x² = x * x
c0 = T(2.22144146907918312350794048535203995923494010677251491220479906920966593121882)
c1 = T(-0.9135311874994298224944388934705417261765270518848695099428083902179199377101094)
c2 = T(0.1127023928584587596980569269678174942915399051122642981118394498722218063783927)
c3 = T(-0.006621000193853498898990183110992108352486751535892362800909323879419896057043918)
c4 = T(0.0002268980994233557245363541171760472387529757765245978583128895641498725296271051)
c5 = T(-5.089532691384021959110856232473979525292167742059549332987900223626864039349914e-06)
c6 = T(8.049906344315649609313027324977744156866597923196983008950128144505665619892402e-08)
c7 = T(-9.453796623737636858301034347145347814693537235132105505794304057287442064404052e-10)
c8 = T(8.320735422342537824261297491878000532726851750329165722059039816086266315937799e-12)
p = vfmadd(vfmadd(vfmadd(
vfmadd(vfmadd(vfmadd(
vfmadd(vfmadd(
c8, x², c7),
x², c6), x², c5), x², c4),
x², c3), x², c2), x², c1), x², c0)
p * x
end
# @inline function approx_sin12(x::Union{T,Vec{<:Any,T}}) where {T <: Real}
# # poly(x) ≈ (xʳ = sqrt(x); sin((xʳ*π)/2)/xʳ)
# x² = x * x
# c0 = T(1.570796326794896619231321691639751442087433306473273974291471596002143089408967)
# c1 = T(-0.6459640975062462536557565638714840878228221616991079162636960728776200926827234)
# c2 = T(0.07969262624616704512050554673779356754386556916433562280307069303594093234088455)
# c3 = T(-0.004681754135318688100685379129717344612020387136900677528796079362820120586116841)
# c4 = T(0.0001604411847873598218714490487175497697128038098258443135990371062725913378293313)
# c5 = T(-3.598843235212085330760986854968071769532101571696000087022804157171548649851493e-06)
# c6 = T(5.692172921967922014173914534767998233640265149949183614622512228877789263783426e-08)
# c7 = T(-6.688035109809916561166255796732305220204238240380007953680372148880958984800929e-10)
# c8 = T(6.066935730769290440108765932783579488881358689623800108082605751157383050893581e-12)
# c9 = T(-4.377065417731331420103035981625834800685920271728367262547966148291799276629881e-14)
# c10 = T(2.571418016198708615875917881136145309875324227233510421118020948686662677979356e-16)
# c11 = T(-1.253592449512705798908955136513569509617634496103293074276456663908804526347008e-18)
# c12 = T(5.044383456268885650704416950405914330732446213362030631794638992715099861013542e-21)
# p = vfmadd(vfmadd(vfmadd(
# vfmadd(vfmadd(vfmadd(
# vfmadd(vfmadd(vfmadd(
# vfmadd(vfmadd(vfmadd(
# c12, x², c11), x², c10),
# x², c9), x², c8), x², c7),
# x², c6), x², c5), x², c4),
# x², c3), x², c2), x², c1), x², c0)
# p * x
# end
@inline suboneopenconst(::Type{Float32}) = 1.9999999f0
@inline suboneopenconst(::Type{Float64}) = 1.9999999999999998
@inline function randsincos(u, ::Type{T}) where {T}
# return SLEEFPirates.sincos(mask(u, T))
r = floatbitmask(u, T)
ooc = oneopenconst(T)
sininput = vsub(r, ooc)
cosinput = vfnmadd(ooc, r, suboneopenconst(T))
sc = data(approx_sin8(VecUnroll((sininput, cosinput))))
s = copysign( getfield(sc,1), reinterpret(T, u))
c = copysign( getfield(sc,2), reinterpret(T, u << 1))
s, c
end
# @inline function log12_7(x) # each extra coef cuts max error by about 6.5
# c0 = -3.245537891437475818527978529229908008038541532632077901681793316955253799627853
# c1 = 7.133969761783261596308839380142764345357825207927592180328308957632143143761325
# c2 = -7.494130176731051024066811749217581442825262641998555203442250312457063219780548
# c3 = 5.781439120377507449162563050101602606694402138734039600998751617511391254917479
# c4 = -2.985021102358628224549723815823770715482850258619037789336465429092098006063079
# c5 = 0.9780948488102108131759607721708499684604503249092972679349980253346585174787013
# c6 = -0.1839396423223307845519460189847030764334420832555490882187076088746804014578904
# c7 = 0.01512535916800840093163249616452966347663616377265149854210868241945849663079766
# vfmadd(vfmadd(vfmadd(vfmadd(vfmadd(vfmadd(vfmadd(c7, x, c6), x, c5), x, c4), x, c3), x, c2), x, c1), x, c0)
# end
# # @inline function log12_8(x)
# # c0 = -3.425393083666334067790108512545303457895869147629969342312722110773152768787745
# # c1 = 8.154804072298105118614242101352098813998371180480781803351886784650194633423925
# # c2 = -10.00713525074289150060544073624417642119577303442852729267703418853804728270245
# # c3 = 9.285983673564252215780879208501085478907382239737051965242871871829356007506347
# # c4 = -6.013432610610964086674378270192946254591450512746857671796983783112863051993776
# # c5 = 2.638773868907455181856795850097739510981561473919572777387590013143564751620262
# # c6 = -0.7483574222662208128858646256056442436988142359531261888282163941214837199890769
# # c7 = 0.1238457029355639890633309483614995366456841215797361229753033357177669422002932
# # c8 = -0.009088908104546665130978765013488165959525861872199676157403513022150601871065725
# # vfmadd(vfmadd(vfmadd(vfmadd(vfmadd(vfmadd(vfmadd(vfmadd(c8, x, c7), x, c6), x, c5), x, c4), x, c3), x, c2), x, c1), x, c0)
# # end
# @inline function log12_9(x)
# c0 = -3.585298173957385989978703196069874884252520373044145691918937267709490632531495
# c1 = 9.175512833125597682830796241005729674265545403887738180485766461445305540243307
# c2 = -12.88095295153181316823685136857741572991739006443899617826355360043323954685944
# c3 = 13.97023726212960122584115270603499243664357604160987633843608122694259681596965
# c4 = -10.88490936111543295028953131289531668167628753782395329546272132276910097503374
# c5 = 5.991068536274933212572646135712744769742009519680374067042945152617478421745127
# c6 = -2.274948730892808615344167059260983651564370908737772545840737790860551065669143
# c7 = 0.5675102681955644633507049003267428212647262631800341022730034095800899973034826
# c8 = -0.0837672704777144218198482121635843275577262111753625917138697703746721702699812
# c9 = 0.005547594786690080999583023292516375924224904367437611790985685156038375417640469
# vfmadd(vfmadd(vfmadd(vfmadd(vfmadd(vfmadd(vfmadd(vfmadd(vfmadd(c9, x, c8), x, c7), x, c6), x, c5), x, c4), x, c3), x, c2), x, c1), x, c0)
# end
# @inline function log12_16(x)
# c0 = -4.37978460046975823625062481303177042964655415404536866782931434738284915486727
# c1 = 16.31871050954055333956396222100764137104164259191322821181471405491328356924757
# c2 = -43.09849769930020991569153712762384436449878854640277987997630267083267961690192
# c3 = 94.07576064264437747981356599549555160319548024066792684545068563206644131393042
# c4 = -160.2802910490630628148404579429898607373312993988526153520977924333905240690591
# c5 = 214.2893836574384742952624060466181446104100940691092327871064519814161128088174
# c6 = -227.1155113171482030403928820851457771576391114870681368560167920657167422599979
# c7 = 192.2033142217916816132668362174888403726852676823685986312496157227978471900725
# c8 = -130.2749105774541912642407178793062852794871781993373055603363370412151378757816
# c9 = 70.61291078124372952080517981043356347605704563698401990419283029704191724347773
# c10 = -30.40547369086585240971017001009032050577317283007958566465348067875074495409737
# c11 = 10.26725226239003763659962692748550577872238983137251963601200333368657729640871
# c12 = -2.660799790256174577955863634381340009240338534864386807650530364213169177140881
# c13 = 0.5109263929851040905835535339027051835170022363218384968860233384732631558306201
# c14 = -0.0684866374459784970521584206556922390960261357021144271918860042253780684140548
# c15 = 0.005721132779491026341043740576624751586837718238326317641335022781958996821344917
# c16 = -0.0002242388100013828616223303469764320113892766876681769892427408765524456008665568
# end
# @inline function log12_5_5(x)
# n0 = -6.109900562053389599719353325364255967820553763613149427966199577373249418751788
# n1 = -47.21033890517859950528344185143337802180242634918153480624598815718145572048678
# n2 = -23.72899742183309520410030528859144026073452004231769448510028309291765826686717
# n3 = 52.7756604934258744605387726053489962661521090040028567523770922726855897228027
# n4 = 23.016711670393619896797584282375610751405740717936585553335237271483200924596
# n5 = 1.256864725247499354645915543743422456498233774923938832688724623689696616505147
# d0 = 1.0
# d1 = 17.89075270582490509520473828435679777182649663126891381927673222776567771555027
# d2 = 50.90705741116772592703914300524183279313383357917248215823723397522708772511337
# d3 = 35.99672550568958966594388544740561745244861045550134035742275301908396910863221
# d4 = 6.325336279410182109220032402097269110224428178264585067800799249731808867726456
# d5 = 0.1767766952966368811002110905496910114147496513674936569539785785207122898660294
# n = vfmadd(n5, x, n4)
# d = vfmadd(d5, x, d4)
# # n = vfmadd(n, x, n4)
# # d = vfmadd(d, x, d4)
# n = vfmadd(n, x, n3)
# d = vfmadd(d, x, d3)
# n = vfmadd(n, x, n2)
# d = vfmadd(d, x, d2)
# n = vfmadd(n, x, n1)
# d = vfmadd(d, x, d1)
# n = vfmadd(n, x, n0)
# d = vfmadd(d, x, d0)
# n / d
# end
# unlikely to do anything, but avoids bias when more than 12 of the leading bits are 0
# @inline function shift_excess_zeros(u::AbstractSIMD{W,UInt64}, lz::AbstractSIMD{W,UInt64}) where {W}
@inline function shift_excess_zeros(u, lz)
lzsub = reinterpret(Int64, lz) - 11
bitmask = lzsub > 0
lzshift = reinterpret(UInt64, lzsub)
ifelse(bitmask, ( u << lzshift ), u)
end
# @inline function shift_excess_zerosv2(u::AbstractSIMD{W,UInt64}, lz::AbstractSIMD{W,UInt64}) where {W}
# lzsub = reinterpret(Int64, lz) - 11
# ( u << reinterpret(UInt64, max( vzero(Vec{W,Int64}), lzsub ) ))
# end
# @inline function nlog01v2(u::AbstractSIMD{W,UInt64}, ::Type{Float64}) where {W}
# lz = leading_zeros( u )
# # f = mask(u, Float64) # shift by lz
# f = mask(shift_excess_zeros(u, lz), Float64) # shift by lz
# # l2h = log12_9(f)
# l2h = log12_5_5(f)
# l2 = l2h - (lz + 1)
# -0.6931471805599453 * l2
# end
@inline function log2_3q(v, e)
T = eltype(v)
m1 = v * v
fma1 = muladd(m1, T(0.22119417504560815), T(0.22007686931522777))
fma2 = muladd(fma1, m1, T(0.26237080574885147))
fma3 = muladd(fma2, m1, T(0.32059774779444955))
fma4 = muladd(fma3, m1, T(0.41219859454853247))
fma5 = muladd(fma4, m1, T(0.5770780162997059))
fma6 = muladd(fma5, m1, T(0.9617966939260809))
m2 = v * T(2.8853900817779268)
fma7 = VectorizationBase.vfmsub(v, T(2.8853900817779268), m2)
a1 = e + m2
s1 = e - a1
a2 = m2 + s1
a3 = fma7 + a2
m3 = v * m1
a4 = a1 + a3
muladd(fma6, m3, a4)
end
@inline function nlog01(u, ::Type{T}) where {T}
lz = reinterpret(Base.uinttype(T), leading_zeros( u ))
f = floatbitmask(shift_excess_zeros(u, lz), T) # shift by lz
f = ( f - T(1.3333333333333333) ) / ( f + T(1.3333333333333333) )
# l2h = log12_9(f)
l2 = log2_3q(f, T(-0.5849625007211561814537389439478165087598144076924810604557526545410982277943579) - lz)
T(-0.6931471805599453) * l2
end
# TODO: Add support for Float32
# @inline function nlog01(u::AbstractSIMD{W,UInt64}, ::Type{Float32}) where {W}
# -log(mask(u, Float32) - oneopenconst(Float32))
# end
|
{
"alphanum_fraction": 0.7663337847,
"author": null,
"avg_line_length": 56.2666666667,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "120ca3bd5354b15f11e31dc14e568f9337ad9f86",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 5,
"max_forks_repo_forks_event_max_datetime": "2020-12-12T07:12:48.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-04-05T03:09:40.000Z",
"max_forks_repo_head_hexsha": "7d39e80e782adc505087e6902921a6f608ef64c8",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "chriselrod/VectorizedRNG.jl",
"max_forks_repo_path": "src/special_approximations.jl",
"max_issues_count": 9,
"max_issues_repo_head_hexsha": "7d39e80e782adc505087e6902921a6f608ef64c8",
"max_issues_repo_issues_event_max_datetime": "2022-02-21T00:30:06.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-19T00:11:28.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "JuliaSIMD/VectorizedRNG.jl",
"max_issues_repo_path": "src/special_approximations.jl",
"max_line_length": 143,
"max_stars_count": 11,
"max_stars_repo_head_hexsha": "7d39e80e782adc505087e6902921a6f608ef64c8",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "chriselrod/VectorizedRNG.jl",
"max_stars_repo_path": "src/special_approximations.jl",
"max_stars_repo_stars_event_max_datetime": "2020-12-20T22:29:05.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-04-05T03:09:29.000Z",
"num_tokens": 4691,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 11816
}
|
import re
from collections import OrderedDict
import pytest
import numpy as np
from tests.test_commons.base import mixin_suite
import plums.commons.data as data
import plums.commons.data.mixin
from plums.commons.data.taxonomy import Label, Taxonomy
@pytest.fixture(params=('ordered-dict', 'tile-collection'))
def tiles(request):
if request.param == 'ordered-dict':
return OrderedDict((('tile', data.TileWrapper(np.zeros((5, 5, 3)),
filename='somefile.png',
some_property='some_value')), ))
return data.TileCollection(('tile', data.TileWrapper(np.zeros((5, 5, 3)),
filename='somefile.png',
some_property='some_value')))
class TestBase:
def test_array_interfaced(self):
import numpy as np
a = np.arange(10)
assert isinstance(a, data.ArrayInterfaced)
class Dummy(object):
@property
def __array_interface__(self):
return None
d = Dummy()
assert isinstance(d, data.ArrayInterfaced)
def test_array(self):
import numpy as np
a = np.arange(10)
assert isinstance(a, data.base._Array)
class Dummy(object):
@property
def __array_interface__(self):
return None
d = Dummy()
assert not isinstance(d, data.base._Array)
def test_geo_interfaced(self):
import geojson
a = geojson.loads('{"type":"Point", "coordinates":[0, 5, 6]}')
assert isinstance(a, data.GeoInterfaced)
assert a.is_valid
class Dummy(object):
@property
def __geo_interface__(self):
return None
d = Dummy()
assert isinstance(d, data.GeoInterfaced)
def test_property_container(self):
# Check construction
p = plums.commons.data.mixin.PropertyContainer()
mixin_suite(p) # Base validity tests
def test_id_mixin(self):
id_ = plums.commons.data.mixin.IdentifiedMixIn()
mixin_suite(id_) # Base validity tests
class TestRecord:
def test_record(self):
import geojson
taxonomy = Taxonomy(Label('road vehicle', children=(Label('car'), )))
incomplete_taxonomy = Taxonomy(Label('road vehicle'))
with pytest.raises(ValueError, match='Expected at least 1 label'):
r = data.Record([0, 2, 3], [])
r = data.Record([0, 1, 2], ['car', 'road vehicle'], some_property='some property', another_property=45)
mixin_suite(r) # Base validity tests
assert hasattr(r, 'id')
assert re.match(r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\Z', r.id)
# Check label fetch w/o taxonomy
assert all(isinstance(n, Label) for n in r.labels)
with pytest.raises(ValueError, match='No taxonomy exists for this record but a max_depth was provided'):
r.get_labels(max_depth=1)
assert all(isinstance(n, Label) for n in r.get_labels())
# Check label fetch w/ incomplete taxonomy
r.taxonomy = incomplete_taxonomy
with pytest.raises(KeyError):
assert all(isinstance(n, Label) for n in r.labels)
# Check label fetch w/ taxonomy
r.taxonomy = taxonomy
assert all(isinstance(n, Label) for n in r.labels)
assert r.labels[0].parent.id == taxonomy.road_vehicle.root.id
assert r.labels[1].id == taxonomy.road_vehicle.root.id
assert hasattr(r, 'some_property')
assert hasattr(r, 'another_property')
assert r.is_valid
assert r.type == 'Point'
assert geojson.dumps(r, sort_keys=True) == '{"geometry": {"coordinates": [0, 1, 2], "type": "Point"}, ' \
'"properties": {"another_property": 45, ' \
'"category": ["car", "road vehicle"], ' \
'"confidence": null, "some_property": "some property"}, ' \
'"type": "Feature"}'
assert geojson.dumps(r.to_geojson(style='export-service'), sort_keys=True) \
== '{"geometry": {"coordinates": [0, 1, 2], "type": "Point"}, ' \
'"properties": {"another_property": 45, ' \
'"score": null, ' \
'"some_property": "some property", ' \
'"tags": "car,road vehicle"}, ' \
'"type": "Feature"}'
r = data.Record([[[0, 0], [0, 1], [1, 1], [0, 0]]], ['car', 'road vehicle'],
some_property='some property', another_property=45)
assert hasattr(r, 'some_property')
assert hasattr(r, 'another_property')
assert r.is_valid
assert r.type == 'Polygon'
assert geojson.dumps(r, sort_keys=True) == '{"geometry": {"coordinates": [[[0, 0], [0, 1], [1, 1], [0, 0]]], ' \
'"type": "Polygon"}, ' \
'"properties": {"another_property": 45, ' \
'"category": ["car", "road vehicle"], ' \
'"confidence": null, "some_property": "some property"}, ' \
'"type": "Feature"}'
assert geojson.dumps(r.to_geojson(style='export-service'), sort_keys=True) \
== '{"geometry": {"coordinates": [[[0, 0], [0, 1], [1, 1], [0, 0]]], ' \
'"type": "Polygon"}, ' \
'"properties": {"another_property": 45, ' \
'"score": null, ' \
'"some_property": "some property", ' \
'"tags": "car,road vehicle"}, ' \
'"type": "Feature"}'
def test_record_collection_with_taxonomy_init(self): # noqa: R701
import geojson
taxonomy = Taxonomy(Label('road vehicle'), Label('car'))
invalid_taxonomy = Taxonomy(Label('road vehicle', children=(Label('car'), )), Label('other'))
different_taxonomy = Taxonomy(Label('road vehicle'), Label('other'))
incomplete_taxonomy = Taxonomy(Label('road vehicle'))
r = data.Record([0, 1, 2], ['car', 'road vehicle'], some_property='some property', another_property=45)
with pytest.raises(ValueError, match='Expected at most'):
rc = data.RecordCollection(r, taxonomy=incomplete_taxonomy)
with pytest.raises(ValueError, match='are not part of the taxonomy'):
rc = data.RecordCollection(r, taxonomy=different_taxonomy)
with pytest.raises(ValueError, match='Some labels are part of the same true-root subtree'):
rc = data.RecordCollection(r, taxonomy=invalid_taxonomy)
rc = data.RecordCollection(r, taxonomy=taxonomy)
mixin_suite(rc) # Base validity tests
assert hasattr(rc, 'id')
assert re.match(r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\Z', rc.id)
assert len(rc) == 1
assert rc[0] == r
r2 = data.Record([[[0, 0], [0, 1], [1, 1], [0, 0]]], ['car', 'truck'],
some_property='some property', another_property=45)
with pytest.raises(ValueError, match='are not part of the taxonomy'):
rc.append(r2)
rc.taxonomy = Taxonomy(Label('road vehicle', children=(Label('truck'), )), Label('car'))
rc.append(r2)
assert rc.get()[0].labels == (Label('car'), Label('road vehicle'))
assert rc.get()[1].labels == (Label('car'), Label('truck'))
assert tuple(label.labels for label in rc.get()[0:]) == (rc.get()[0].labels,
rc.get()[1].labels)
assert rc.get(max_depth=1)[0].labels == (Label('car'), Label('road vehicle'))
assert rc.get(max_depth=1)[1].labels == (Label('car'), Label('road vehicle'))
assert tuple(label.labels for label in rc.get(max_depth=1)[0:]) == (rc.get(max_depth=1)[0].labels,
rc.get(max_depth=1)[1].labels)
assert rc.get(max_depth={'road vehicle': 0})[0].labels == (Label('car'), Label('road vehicle'))
assert rc.get(max_depth={'road vehicle': 0})[1].labels == (Label('car'), Label('road vehicle'))
assert tuple(label.labels for label in rc.get(max_depth={'road vehicle': 0})[0:]) \
== (rc.get(max_depth={'road vehicle': 0})[0].labels,
rc.get(max_depth={'road vehicle': 0})[1].labels)
assert rc.get(max_depth={'car': 0})[0].labels == (Label('car'), Label('road vehicle'))
assert rc.get(max_depth={'car': 0})[1].labels == (Label('car'), Label('truck'))
assert tuple(label.labels for label in rc.get(max_depth={'car': 0})[0:]) \
== (rc.get(max_depth={'car': 0})[0].labels,
rc.get(max_depth={'car': 0})[1].labels)
assert len(rc) == 2
assert rc[1] == r2
assert geojson.dumps(rc, sort_keys=True) == '{"features": [' \
'{"geometry": {"coordinates": [0, 1, 2], "type": "Point"}, ' \
'"properties": {"another_property": 45, ' \
'"category": ["car", "road vehicle"], ' \
'"confidence": null, "some_property": "some property"}, ' \
'"type": "Feature"}, ' \
'{"geometry": {"coordinates": [[[0, 0], [0, 1], [1, 1], [0, 0]]], '\
'"type": "Polygon"}, ' \
'"properties": {"another_property": 45, ' \
'"category": ["car", "truck"], ' \
'"confidence": null, ' \
'"some_property": "some property"}, "type": "Feature"}], ' \
'"type": "FeatureCollection"}'
with pytest.raises(ValueError, match='are not part of'):
rc[1] = data.Record([[[0, 0], [0, 1], [1, 1], [0, 0]]], ['car', 'trucks'],
some_property='some property', another_property=45)
with pytest.raises(ValueError, match='Some labels are part of the same true-root subtree'):
rc[1] = data.Record([[[0, 0], [0, 1], [1, 1], [0, 0]]], ['road vehicle', 'truck'],
some_property='some property', another_property=45)
rc[1] = data.Record([[[0, 0], [0, 1], [1, 1], [0, 0]]], ['car'],
some_property='some property', another_property=45)
assert rc[1].labels == (Label('car'), )
def test_record_collection_with_taxonomy_add(self): # noqa: R701
import geojson
taxonomy = Taxonomy(Label('road vehicle'), Label('car'))
invalid_taxonomy = Taxonomy(Label('road vehicle', children=(Label('car'), )), Label('other'))
different_taxonomy = Taxonomy(Label('road vehicle'), Label('other'))
incomplete_taxonomy = Taxonomy(Label('road vehicle'))
r = data.Record([0, 1, 2], ['car', 'road vehicle'], some_property='some property', another_property=45)
rc = data.RecordCollection(r)
mixin_suite(rc) # Base validity tests
assert hasattr(rc, 'id')
assert re.match(r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\Z', rc.id)
with pytest.raises(ValueError, match='Expected at most'):
rc.taxonomy = incomplete_taxonomy
with pytest.raises(ValueError, match='are not part of the taxonomy'):
rc.taxonomy = different_taxonomy
with pytest.raises(ValueError, match='Some labels are part of the same true-root subtree'):
rc.taxonomy = invalid_taxonomy
rc.taxonomy = taxonomy
assert len(rc) == 1
assert rc[0] == r
r2 = data.Record([[[0, 0], [0, 1], [1, 1], [0, 0]]], ['car', 'truck'],
some_property='some property', another_property=45)
with pytest.raises(ValueError, match='are not part of the taxonomy'):
rc.append(r2)
rc.taxonomy = Taxonomy(Label('road vehicle', children=(Label('truck'), )), Label('car'))
rc.append(r2)
assert rc.get()[0].labels == (Label('car'), Label('road vehicle'))
assert rc.get()[1].labels == (Label('car'), Label('truck'))
assert tuple(label.labels for label in rc.get()[0:]) == (rc.get()[0].labels,
rc.get()[1].labels)
assert rc.get(max_depth=1)[0].labels == (Label('car'), Label('road vehicle'))
assert rc.get(max_depth=1)[1].labels == (Label('car'), Label('road vehicle'))
assert tuple(label.labels for label in rc.get(max_depth=1)[0:]) == (rc.get(max_depth=1)[0].labels,
rc.get(max_depth=1)[1].labels)
assert rc.get(max_depth={'road vehicle': 0})[0].labels == (Label('car'), Label('road vehicle'))
assert rc.get(max_depth={'road vehicle': 0})[1].labels == (Label('car'), Label('road vehicle'))
assert tuple(label.labels for label in rc.get(max_depth={'road vehicle': 0})[0:]) \
== (rc.get(max_depth={'road vehicle': 0})[0].labels,
rc.get(max_depth={'road vehicle': 0})[1].labels)
assert rc.get(max_depth={'car': 0})[0].labels == (Label('car'), Label('road vehicle'))
assert rc.get(max_depth={'car': 0})[1].labels == (Label('car'), Label('truck'))
assert tuple(label.labels for label in rc.get(max_depth={'car': 0})[0:]) \
== (rc.get(max_depth={'car': 0})[0].labels,
rc.get(max_depth={'car': 0})[1].labels)
assert len(rc) == 2
assert rc[1] == r2
assert geojson.dumps(rc, sort_keys=True) == '{"features": [' \
'{"geometry": {"coordinates": [0, 1, 2], "type": "Point"}, ' \
'"properties": {"another_property": 45, ' \
'"category": ["car", "road vehicle"], ' \
'"confidence": null, "some_property": "some property"}, ' \
'"type": "Feature"}, ' \
'{"geometry": {"coordinates": [[[0, 0], [0, 1], [1, 1], [0, 0]]], '\
'"type": "Polygon"}, ' \
'"properties": {"another_property": 45, ' \
'"category": ["car", "truck"], ' \
'"confidence": null, ' \
'"some_property": "some property"}, "type": "Feature"}], ' \
'"type": "FeatureCollection"}'
with pytest.raises(ValueError, match='are not part of'):
rc[1] = data.Record([[[0, 0], [0, 1], [1, 1], [0, 0]]], ['car', 'trucks'],
some_property='some property', another_property=45)
with pytest.raises(ValueError, match='Some labels are part of the same true-root subtree'):
rc[1] = data.Record([[[0, 0], [0, 1], [1, 1], [0, 0]]], ['road vehicle', 'truck'],
some_property='some property', another_property=45)
rc[1] = data.Record([[[0, 0], [0, 1], [1, 1], [0, 0]]], ['car'],
some_property='some property', another_property=45)
assert rc[1].labels == (Label('car'), )
def test_record_collection_without_taxonomy(self):
import geojson
r = data.Record([0, 1, 2], ['car', 'road vehicle'], some_property='some property', another_property=45)
rc = data.RecordCollection(r)
mixin_suite(rc) # Base validity tests
assert hasattr(rc, 'id')
assert re.match(r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\Z', rc.id)
assert len(rc) == 1
assert rc[0] == r
r2 = data.Record([[[0, 0], [0, 1], [1, 1], [0, 0]]], ['car', 'truck'],
some_property='some property', another_property=45)
rc.append(r2)
assert len(rc) == 2
assert rc[1] == r2
assert geojson.dumps(rc, sort_keys=True) == '{"features": [' \
'{"geometry": {"coordinates": [0, 1, 2], "type": "Point"}, ' \
'"properties": {"another_property": 45, ' \
'"category": ["car", "road vehicle"], ' \
'"confidence": null, "some_property": "some property"}, ' \
'"type": "Feature"}, ' \
'{"geometry": {"coordinates": [[[0, 0], [0, 1], [1, 1], [0, 0]]], '\
'"type": "Polygon"}, ' \
'"properties": {"another_property": 45, ' \
'"category": ["car", "truck"], ' \
'"confidence": null, ' \
'"some_property": "some property"}, "type": "Feature"}], ' \
'"type": "FeatureCollection"}'
assert 'car' in rc.taxonomy
assert rc.taxonomy['car'].parent == rc.taxonomy.root
assert 'truck' in rc.taxonomy
assert rc.taxonomy['truck'].parent == rc.taxonomy.root
assert 'road vehicle' in rc.taxonomy
assert rc.taxonomy['road vehicle'].parent == rc.taxonomy.root
rc[1] = data.Record([[[0, 0], [0, 1], [1, 1], [0, 0]]], ['trucks'],
some_property='some property', another_property=45)
assert rc[1].labels == (Label('trucks'), )
assert 'trucks' in rc.taxonomy
assert rc.taxonomy['trucks'].parent == rc.taxonomy.root
rc = data.RecordCollection(r)
class TestTile:
def test_tile(self):
import PIL.Image
import numpy as np
im = PIL.Image.fromarray(np.arange(600).astype(np.uint8).reshape(10, 20, 3))
assert isinstance(im, data.Tile)
with pytest.raises(TypeError, match='Tile expect an object which exposes the __array_interface__'):
t = data.Tile(0) # noqa: F841
def test_tile_wrapper(self):
tw = data.TileWrapper(np.zeros((5, 5, 3)), filename='somefile.png', some_property='some_value')
mixin_suite(tw) # Base validity tests
class Dummy(object):
@property
def __array_interface__(self):
return None
with pytest.raises(TypeError, match='TileWrapper expect a ndarray-like object, got:'):
tw = data.TileWrapper(Dummy(), filename='somefile.png', some_property='some_value')
class Dummy(object):
def __init__(self, *shape):
self._shape = shape
@property
def shape(self):
return self._shape
@property
def __array_interface__(self):
return None
with pytest.raises(ValueError, match='TileWrapper expect a 3-dim ndarray, got:'):
tw = data.TileWrapper(Dummy(1, 5, 6, 3), filename='somefile.png', some_property='some_value')
with pytest.raises(ValueError, match='TileWrapper expect a HWC formatted image'):
tw = data.TileWrapper(Dummy(3, 6, 5), filename='somefile.png', some_property='some_value')
with pytest.raises(ValueError, match='TileWrapper expect a HWC formatted image'):
tw = data.TileWrapper(Dummy(3, 6, 3), filename='somefile.png', some_property='some_value')
with pytest.raises(ValueError, match='TileWrapper expect a HWC formatted image'):
tw = data.TileWrapper(Dummy(3, 3, 3), filename='somefile.png', some_property='some_value')
tw = data.TileWrapper(Dummy(5, 10, 3), filename='somefile.png', some_property='some_value')
assert hasattr(tw, 'filename')
assert tw.size == (10, 5)
assert tw.width == 10
assert tw.height == 5
assert tw.some_property == 'some_value'
assert tw.__array_interface__ is None
tw.info['some_other_property'] = 56
assert tw.properties['some_other_property'] == 56
def test_tile_collection(self): # noqa: R701
import PIL.Image
import numpy as np
im = PIL.Image.fromarray(np.arange(600).astype(np.uint8).reshape(10, 20, 3))
tw = data.TileWrapper(np.zeros((5, 5, 3)), filename='somefile.png', some_property='some_value')
tc = data.TileCollection(im, tw)
assert tc.iloc[0] is im
assert tc['tile_0'] is im
assert tc.iloc[1] is tw
assert tc['tile_1'] is tw
tc = data.TileCollection(('first', im), ('second', tw))
assert tc.iloc[0] is im
assert tc['first'] is im
assert tc.iloc[1] is tw
assert tc['second'] is tw
tc = data.TileCollection(im, ('second', tw))
assert tc.iloc[0] is im
assert tc['tile_0'] is im
assert tc.iloc[1] is tw
assert tc['second'] is tw
import sys
if sys.version_info[1] >= 6:
tc = data.TileCollection(first=im, second=tw)
assert tc.iloc[0] is im
assert tc['first'] is im
assert tc.iloc[1] is tw
assert tc['second'] is tw
tc = data.TileCollection(('first', im), second=tw)
assert tc.iloc[0] is im
assert tc['first'] is im
assert tc.iloc[1] is tw
assert tc['second'] is tw
tc = data.TileCollection(im, second=tw)
assert tc.iloc[0] is im
assert tc['tile_0'] is im
assert tc.iloc[1] is tw
assert tc['second'] is tw
with pytest.raises(TypeError, match='Expected each tiles to expose the __array_interface__ attribute'):
_ = data.TileCollection(im, ('second', tw), false=0)
else:
with pytest.raises(ValueError, match='Ordered keyword argument were introduced in Python'):
_ = data.TileCollection(first=im, second=tw)
with pytest.raises(TypeError, match='Expected each tiles to expose the __array_interface__ attribute'):
_ = data.TileCollection(('first', im), 0, ('second', tw))
with pytest.raises(TypeError, match='Expected each tiles to expose the __array_interface__ attribute'):
_ = data.TileCollection(('first', im), ('false', 0), ('second', tw))
class TestMask:
def test_mask(self):
m = data.mask.Mask(name='some_name', some_property='some_value')
mixin_suite(m) # Base validity tests
assert hasattr(m, 'id')
assert re.match(r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\Z', m.id)
assert 'name' in m.__dict__
assert 'name' not in m.properties
assert m.name == 'some_name'
assert m.some_property == 'some_value'
assert m.mask
def test_vector_mask(self):
vm = data.VectorMask([[[0, 0], [0, 1], [1, 1], [0, 0]]], 'data',
some_property='some_value', another_property=45)
mixin_suite(vm) # Base validity tests
assert hasattr(vm, 'id')
assert re.match(r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\Z', vm.id)
assert vm.name == 'data'
assert vm.some_property == 'some_value'
assert vm.another_property == 45
assert vm.mask
def test_raster_mask(self):
rm = data.RasterMask(np.zeros((5, 5, 3)), 'data', some_property='some_value')
mixin_suite(rm) # Base validity tests
class Dummy(object):
@property
def __array_interface__(self):
return None
with pytest.raises(TypeError, match='RasterMask expect a ndarray-like object, got:'):
rm = data.RasterMask(Dummy(), 'data', some_property='some_value')
class Dummy(object):
def __init__(self, *shape):
self._shape = shape
@property
def shape(self):
return self._shape
@property
def __array_interface__(self):
return None
rm = data.RasterMask(Dummy(5, 10), 'data', some_property='some_value')
assert hasattr(rm, 'id')
assert re.match(r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\Z', rm.id)
assert rm.name == 'data'
assert rm.size == (10, 5)
assert rm.width == 10
assert rm.height == 5
assert rm.some_property == 'some_value'
assert rm.__array_interface__ is None
rm.properties['some_other_property'] = 56
assert rm.properties['some_other_property'] == 56
def test_mask_collection(self):
class Dummy(object):
def __init__(self, *shape):
self._shape = shape
@property
def shape(self):
return self._shape
@property
def __array_interface__(self):
return None
rm = data.RasterMask(Dummy(5, 10), 'raster-data', some_property='some_value')
vm = data.VectorMask([[[0, 0], [0, 1], [1, 1], [0, 0]]], 'vector-data',
some_property='some_value', another_property=45)
mc = data.MaskCollection(rm, vm)
mixin_suite(mc) # Base validity tests
assert mc[0] == rm
assert mc[1] == vm
assert mc['vector-data'] == vm
assert mc['raster-data'] == rm
class TestData:
def test_annotation(self):
import geojson
class Dummy(object):
@property
def __geo_interface__(self):
return None
r = data.Record([0, 1, 2], ['car', 'road vehicle'], some_property='some property', another_property=45)
rc = data.RecordCollection(r, data.Record([[[0, 0], [0, 1], [1, 1], [0, 0]]], ['car', 'road vehicle'],
some_property='some property', another_property=45))
vm = data.VectorMask([[[0, 0], [0, 1], [1, 1], [0, 0]]], 'vector-data',
some_property='some_value', another_property=45)
mc = data.MaskCollection(vm)
with pytest.raises(TypeError, match='Expected "record_collection" to expose the __geo_interface__ attribute'):
a = data.Annotation(0, mc, some_property='some_value')
a = data.Annotation(Dummy(), mc, some_property='some_value')
assert a.__geo_interface__ is None
a = data.Annotation(rc, mc, some_property='some_value')
mixin_suite(a) # Base validity tests
assert hasattr(a, 'id')
assert re.match(r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\Z', a.id)
assert a.some_property == 'some_value'
assert a[0] == r
assert geojson.dumps(a, sort_keys=True) == '{"features": [' \
'{"geometry": {"coordinates": [0, 1, 2], "type": "Point"}, ' \
'"properties": {"another_property": 45, ' \
'"category": ["car", "road vehicle"], ' \
'"confidence": null, "some_property": "some property"}, ' \
'"type": "Feature"}, ' \
'{"geometry": {"coordinates": [[[0, 0], [0, 1], [1, 1], [0, 0]]], ' \
'"type": "Polygon"}, ' \
'"properties": {"another_property": 45, ' \
'"category": ["car", "road vehicle"], ' \
'"confidence": null, ' \
'"some_property": "some property"}, "type": "Feature"}], ' \
'"type": "FeatureCollection"}'
def test_deprecated_data_point(self):
# Old deprecated interface
with pytest.deprecated_call():
dp = data.DataPoint(data.TileWrapper(np.zeros((5, 5, 3)),
filename='somefile.png',
some_property='some_value'),
data.Annotation(data.RecordCollection()))
mixin_suite(dp) # Base validity tests
with pytest.deprecated_call():
assert isinstance(dp.tile, data.Tile)
class DummyGeo(object):
@property
def __geo_interface__(self):
return 0
class DummyArray(object):
@property
def __array_interface__(self):
return 1
class DummyTile(data.Tile):
@property
def __array_interface__(self):
return 1
dp = data.DataPoint(DummyTile(DummyArray()), DummyGeo(), some_property='some_value')
assert hasattr(dp, 'id')
assert re.match(r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\Z', dp.id)
assert dp.some_property == 'some_value'
with pytest.raises(TypeError, match='Expected an ordered dictionary like object as tiles'):
dp = data.DataPoint(0, DummyGeo(), some_property='some_value')
with pytest.raises(TypeError, match='Expected "annotation" to expose the __geo_interface__ attribute'):
dp = data.DataPoint(DummyTile(DummyArray()), 0, some_property='some_value')
def test_data_point(self, tiles):
dp = data.DataPoint(tiles,
data.Annotation(data.RecordCollection()))
mixin_suite(dp) # Base validity tests
class DummyGeo(object):
@property
def __geo_interface__(self):
return 0
dp = data.DataPoint(tiles, DummyGeo(), some_property='some_value')
assert hasattr(dp, 'id')
assert re.match(r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\Z', dp.id)
assert dp.some_property == 'some_value'
with pytest.raises(TypeError, match='Expected "annotation" to expose the __geo_interface__ attribute'):
dp = data.DataPoint(tiles, 0, some_property='some_value')
with pytest.raises(TypeError, match='Expected an ordered dictionary like object as tiles'):
dp = data.DataPoint({}, DummyGeo(), some_property='some_value')
|
{
"alphanum_fraction": 0.510905695,
"author": null,
"avg_line_length": 44.8070921986,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "9cb30215bfae015104ef534836513138d9e228a7",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2022-03-09T03:48:12.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-02-03T12:37:53.000Z",
"max_forks_repo_head_hexsha": "a6be79e4c30c7abcbade5581f052a4e8035a2057",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "alexandreMayerowitz/playground-plums",
"max_forks_repo_path": "tests/test_commons/test_data.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a6be79e4c30c7abcbade5581f052a4e8035a2057",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "alexandreMayerowitz/playground-plums",
"max_issues_repo_path": "tests/test_commons/test_data.py",
"max_line_length": 120,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a6be79e4c30c7abcbade5581f052a4e8035a2057",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "alexandreMayerowitz/playground-plums",
"max_stars_repo_path": "tests/test_commons/test_data.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 7361,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 31589
}
|
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, recall_score, precision_score
from emissions.data import load_data, clean_data
def scoring_table(search,
X_test,
y_test):
"""
takes grid search output and index of best params
returns a scoring table
"""
result = search.cv_results_
tmp = pd.DataFrame({'train':{'accuracy': result['mean_train_accuracy'][search.best_index_],
'recall': result['mean_train_recall'][search.best_index_],
'precision': result['mean_train_precision'][search.best_index_]},
'val':{'accuracy': result['mean_test_accuracy'][search.best_index_],
'recall': result['mean_test_recall'][search.best_index_],
'precision': result['mean_test_precision'][search.best_index_]}
})
y_pred = search.best_estimator_.predict(X_test)
y_true = y_test
tmp.loc['accuracy', 'test'] = accuracy_score(y_true, y_pred)
tmp.loc['recall', 'test'] = recall_score(y_true, y_pred)
tmp.loc['precision', 'test'] = precision_score(y_true, y_pred)
return tmp.round(3)
def plot_learning_curve(model, X_train, y_train, scoring='recall'):
"""takes a model, X_train, y_train and plots learning curve"""
cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=42)
train_sizes, train_scores, test_scores = learning_curve(model,
X_train,
y_train,
train_sizes=np.linspace(0.05, 1, 20),
cv=cv,
scoring=scoring,
n_jobs=-1
)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.plot(train_sizes, train_scores_mean, label = 'Train')
plt.fill_between(train_sizes,
train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std,
alpha=0.1)
plt.plot(train_sizes, test_scores_mean, label = 'Val')
plt.fill_between(train_sizes,
test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std,
alpha=0.1)
plt.legend()
plt.ylabel('score')
plt.xlabel('train sizes')
if scoring=='recall':
plt.ylim(0.6, 1)
def make_transform_get(df, make_threshhold="0.01"):
'''
Take cleaned training data and return a list of makes to be converted to 'other'
'''
#create a make label 'other' for all makes that only account for less than 1% of cars each and together aprox <10% of cars
value_counts_norm = df['MAKE'].value_counts(normalize = True)
to_other = value_counts_norm[value_counts_norm < float(make_threshhold)]
#print(f"\n{len(to_other)} make labels each account for less than {round((float(make_threshhold) *100), 2)}% of cars and together account for {(round(to_other.sum(), 4)) *100}% of cars")
to_keep = value_counts_norm[value_counts_norm >= float(make_threshhold)]
makes_keep = list(to_keep.index)
makes_keep.sort()
return makes_keep
if __name__=="__main__":
df = load_data()
df = clean_data(df)
print('Makes to keep:', make_transform_get(df))
|
{
"alphanum_fraction": 0.5924391507,
"author": null,
"avg_line_length": 45.9761904762,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "03dee00fc59d03738cfc359a31f7fad262e4e525",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-05-28T11:06:38.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-05-28T11:06:38.000Z",
"max_forks_repo_head_hexsha": "51c610867666f91446fbe228660c865cf5869e99",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Guli-Y/wimlds_emissions",
"max_forks_repo_path": "emissions/utils.py",
"max_issues_count": 26,
"max_issues_repo_head_hexsha": "51c610867666f91446fbe228660c865cf5869e99",
"max_issues_repo_issues_event_max_datetime": "2021-06-11T06:53:11.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-05-06T13:59:45.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Guli-Y/wimlds_emissions",
"max_issues_repo_path": "emissions/utils.py",
"max_line_length": 190,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "51c610867666f91446fbe228660c865cf5869e99",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Guli-Y/wimlds_emissions",
"max_stars_repo_path": "emissions/utils.py",
"max_stars_repo_stars_event_max_datetime": "2021-04-14T18:32:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-04-14T18:32:56.000Z",
"num_tokens": 804,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3862
}
|
"""
Created on Sun Apr 15 00:39:35 2018
@author: Hrid
Source: https://github.com/hridkamolbiswas/Principal-Component-Analysis-PCA-on-image-dataset/blob/master/pca.py
"""
import numpy as np
from numpy import linalg as LA
import os, os.path
# np.set_printoptions(threshold=np.nan)
# import cv2
from matplotlib.image import imread
from PIL import Image
import glob
# import tensorflow as tf
from matplotlib import pyplot as plt
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.preprocessing import normalize
data = np.empty((0, 2048)) # 2048 is the size of the feature vector/number of pixels after resizing the image
arr = []
for filename in glob.glob("four_dataset/*.jpg"):
# im = cv2.imread(filename, 0)
img = imread(filename)
print(type(img))
print(img.shape)
X = pd.DataFrame(img)
print(" Printing X..")
print(type(X))
print(X.shape)
print(X[0])
# Normalize data by subtracting mean and scaling
X_norm = normalize(X)
print(" Printing [X_norm] ..")
print(X_norm.shape)
print(X_norm[0])
print(" ---- ")
# pca = PCA(n_components=2)
# # Run PCA on normalized image data
# lower_dimension_data = pca.fit_transform(X_norm)
# # Lower dimension data is 5000x353 instead of 5000x1024
# print(" Printing [lower_dimension_data] ..")
# print(lower_dimension_data.shape)
# print(" ---- ")
#
# # Run PCA on normalized image data
# lower_dimension_data = pca.fit_transform(X_norm)
#
# # Project lower dimension data onto original features
# approximation = pca.inverse_transform(lower_dimension_data)
#
# # Reshape approximation and X_norm to 5000x32x32 to display images
# # approximation = approximation.reshape(-1, 32, 32)
# # X_norm = X_norm.reshape(-1, 32, 32)
#
# # Rotate pictures
# for i in range(0, X_norm.shape[0]):
# X_norm[i,] = X_norm[i,].T
# approximation[i,] = approximation[i, ].T
#
# # Display images
# fig4, axarr = plt.subplots(3, 2, figsize=(8, 8))
# axarr[0, 0].imshow(X_norm[0, ], cmap='gray')
# axarr[0, 0].set_title('Original Image')
# axarr[0, 0].axis('off')
# axarr[0, 1].imshow(approximation[0,], cmap='gray')
# axarr[0, 1].set_title('99% Variation')
# axarr[0, 1].axis('off')
# # axarr[1, 0].imshow(X_norm[1,], cmap='gray')
# # axarr[1, 0].set_title('Original Image')
# # axarr[1, 0].axis('off')
# # axarr[1, 1].imshow(approximation[1,], cmap='gray')
# # axarr[1, 1].set_title('99% Variation')
# # axarr[1, 1].axis('off')
# # axarr[2, 0].imshow(X_norm[2,], cmap='gray')
# # axarr[2, 0].set_title('Original Image')
# # axarr[2, 0].axis('off')
# # axarr[2, 1].imshow(approximation[2,], cmap='gray')
# # axarr[2, 1].set_title('99% variation')
# # axarr[2, 1].axis('off')
# plt.show()
break
# # print('size:',im.shape)
# resized = img.reshape(-1,32,32) # ERROR: ValueError: cannot reshape array of size 784 into shape (32,32)
# # resized = cv2.reshape(img, (32, 64))
# im_ravel = resized.ravel()
# arr = np.append(data, [im_ravel], axis=0)
# data = arr
# final_data = arr
# mu = np.mean(final_data, axis=0)
#
# plt.figure(1)
#
# k = 1677
# for i in range(0, 4):
# img1 = final_data[k, :]
# ir = np.reshape(img1, (32, 64))
# ir = np.uint8(ir)
# plt.subplot(2, 2, i + 1)
# plt.imshow(ir, cmap='gray')
# k = k + 1
# print('k=== ', k)
# plt.suptitle('sample image from training dataset')
# plt.show()
#
# data = final_data - mu
# covariance = np.cov(data.T)
# values, vector = LA.eig(covariance)
#
# pov = np.cumsum(np.divide(values, sum(values)))
# plt.figure
# plt.plot(pov)
# plt.title('percentage of variance explained')
#
# vsort = vector[:, 0:301]
# scores = np.dot(data, vsort)
# projection = np.dot(scores, vsort.T) + mu
#
# % matplotlib
# qt
# plt.figure(2)
# k = 1677
# for i in range(0, 4):
# img1_train = projection[k, :]
# ir_train = np.reshape(img1_train, (32, 64))
# ir = np.uint8(ir_train)
# plt.subplot(2, 2, i + 1)
# plt.imshow(ir_train, cmap='gray')
# k = k + 1
# print('k=== ', k)
# plt.suptitle('Image construction using PCA')
# plt.show()
|
{
"alphanum_fraction": 0.6211870419,
"author": null,
"avg_line_length": 29.9929078014,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "46fd7f493f4819b104420beaf3586980c519f100",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-01-07T14:25:54.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-01-07T14:25:54.000Z",
"max_forks_repo_head_hexsha": "db38972bcceac7b95808132457c4de9170546c9d",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "ardihikaru/mlsp",
"max_forks_repo_path": "hw3/references/old/coba3.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "db38972bcceac7b95808132457c4de9170546c9d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "ardihikaru/mlsp",
"max_issues_repo_path": "hw3/references/old/coba3.py",
"max_line_length": 111,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "db38972bcceac7b95808132457c4de9170546c9d",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "ardihikaru/mlsp",
"max_stars_repo_path": "hw3/references/old/coba3.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1338,
"path": null,
"reason": "import numpy,from numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4229
}
|
"""Implements ETL processing for COVID-19 datasets.
It performs the following actions:
1. pull updated datasets from https://github.com/datadista/datasets
1.1. SOURCE environment variable points to a local repository path
2. read .csv data files into pandas dataframes
3. export data to JSONStat format
4. push JSON files to gist
"""
"""Import configuration."""
from config import etl_cfg
from datetime import datetime, timedelta
from etlstat.extractor.extractor import csv
from git import GitCommandError, Repo
import json
from numpy import arange
from pyjstat import pyjstat
def transform(df, variable):
"""Filter rows and drop columns."""
df.drop(df[df.cod_ine != 6].index, inplace=True)
df.reset_index(inplace=True)
df.drop('index', axis=1, inplace=True)
df.drop('cod_ine', axis=1, inplace=True)
df.drop('CCAA', axis=1, inplace=True)
df.rename(columns={'total': variable}, inplace=True)
return df
def deacumulate(df, variable1, variable2):
for i in range(1, len(df)):
df.loc[i, variable2] = df.loc[i, variable1] - \
df.loc[i-1, variable1]
return df
def to_json(df, id_vars, value_vars):
"""Export dataframe to JSON-Stat dataset.
id_vars (list): index columns
value_vars (list): numeric variables (metrics)
"""
df = df.melt(
id_vars=id_vars,
value_vars=value_vars,
var_name='Variables')
id_vars.append('Variables')
df = df.sort_values(by=id_vars)
dataset = pyjstat.Dataset.read(df, source=etl_cfg.metadata.source)
metric = {'metric': ['Variables']}
dataset.setdefault('role', metric)
return dataset.write(output='jsonstat')
def write_to_file(json_data, file_name):
file = open(file_name, 'w')
file.write(json_data)
file.close()
def normalize_ccaa(df, variable):
"""Rename and drop columns."""
df_new = df.rename(
columns={'CCAA': 'ccaa', 'total': variable})
df_new.drop('cod_ine', axis=1, inplace=True)
df_new.drop(df_new[df_new.ccaa == 'Total'].index, inplace=True)
df_new.set_index('fecha', 'ccaa')
return df_new
def delay_date(df):
"""Change dates to previous day."""
for i in range(0, len(df)):
delay_date = datetime.strptime(df.loc[i, 'fecha'], '%Y-%m-%d')
delay_date = delay_date - timedelta(days=1)
delay_date_str = delay_date.strftime('%Y-%m-%d')
df.loc[i, 'fecha'] = delay_date_str
return df
"""First step: pull data from Github repository."""
repo = Repo(etl_cfg.input.source)
o = repo.remotes.origin
o.pull()
"""Second step: load .csv data files into dataframes."""
data = csv(etl_cfg.input.dir_path, sep=',')
"""Third step: ETL processing."""
# Estaciones de servicio
eess = data[etl_cfg.input.files.eess]
eess.rename(
columns = {
'Horario': 'horario',
'Provincia': 'provincia',
'Municipio': 'municipio',
'Código\nPostal': 'codigo_postal',
'Dirección': 'direccion',
'Margen': 'margen',
'Rótulo': 'rotulo'
}, inplace=True)
eess['id'] = arange(len(eess))
eess['Latitud'] = eess['Latitud'].str.replace(',','.')
eess['Longitud'] = eess['Longitud'].str.replace(',','.')
json_file = to_json(
eess,
['id'],
['horario', 'provincia', 'municipio',
'codigo_postal', 'direccion', 'Latitud', 'Longitud',
'margen', 'rotulo'])
write_to_file(json_file, etl_cfg.output.path + 'eess_horario_flexible_habitual.json-stat')
# Puntos de restauración
restauracion = data[etl_cfg.input.files.restauracion]
restauracion.rename(
columns = {
'NOMBRE': 'nombre',
'Tipo': 'tipo',
'Direccion': 'direccion',
'Municipio': 'municipio',
'Provincia': 'provincia',
'Comentarios': 'comentario',
'Horario': 'horario',
'Telefono': 'telefono',
'Bocata_Bebida_Caliente': 'bocadillo_bebida_caliente',
'Comida_Preparada': 'comida_preparada',
'Ducha': 'ducha'
}, inplace=True)
restauracion['id'] = arange(len(restauracion))
json_file = to_json(
restauracion,
['id'],
['nombre', 'tipo', 'direccion', 'municipio',
'provincia', 'Latitud', 'Longitud', 'comentario',
'horario', 'telefono', 'bocadillo_bebida_caliente',
'comida_preparada', 'ducha'])
write_to_file(json_file, etl_cfg.output.path + 'puntos_restauracion.json-stat')
# Alojamientos turísticos BOE 2020 4194
alojamientos = data[etl_cfg.input.files.alojamientos]
alojamientos.rename(
columns={'CCAA': 'ccaa', 'lat': 'Latitud', 'long': 'Longitud'},
inplace=True)
alojamientos.loc[
(alojamientos.provincia == 'Santander'), 'provincia'] = 'Cantabria'
alojamientos['id'] = arange(len(alojamientos))
json_file = to_json(
alojamientos,
['id'],
['ccaa', 'provincia', 'localidad', 'nombre', 'Latitud', 'Longitud'])
write_to_file(json_file, etl_cfg.output.path + 'alojamientos_turisticos.json-stat')
# Datos nacionales acumulados, por comunidad autónoma
ccaa_altas = data[etl_cfg.input.files.altas]
ccaa_altas = normalize_ccaa(ccaa_altas, 'altas')
ccaa_altas = delay_date(ccaa_altas)
ccaa_casos = data[etl_cfg.input.files.casos]
ccaa_casos = normalize_ccaa(ccaa_casos, 'casos')
ccaa_casos = delay_date(ccaa_casos)
ccaa_fallecidos = data[etl_cfg.input.files.fallecidos]
ccaa_fallecidos = normalize_ccaa(ccaa_fallecidos, 'fallecidos')
ccaa_fallecidos = delay_date(ccaa_fallecidos)
ccaa_hospital = data[etl_cfg.input.files.hospital]
ccaa_hospital = normalize_ccaa(ccaa_hospital, 'hospital')
ccaa_hospital = delay_date(ccaa_hospital)
ccaa_uci = data[etl_cfg.input.files.uci]
ccaa_uci = normalize_ccaa(ccaa_uci, 'uci')
ccaa_uci = delay_date(ccaa_uci)
todos_ccaa = ccaa_casos.merge(ccaa_altas, how='left', on=['fecha', 'ccaa'])
todos_ccaa = todos_ccaa.merge(ccaa_fallecidos, how='left', on=['fecha', 'ccaa'])
todos_ccaa = todos_ccaa.merge(ccaa_hospital, how='left', on=['fecha', 'ccaa'])
todos_ccaa = todos_ccaa.merge(ccaa_uci, how='left', on=['fecha', 'ccaa'])
json_file = to_json(
todos_ccaa,
['fecha', 'ccaa'],
['casos', 'altas', 'fallecidos', 'hospital', 'uci'])
write_to_file(json_file, etl_cfg.output.path + 'todos_ccaa_acumulado.json-stat')
# Cifras más recientes, por CCAA
last_date = todos_ccaa['fecha'].max()
casos_ccaa_last = todos_ccaa[['fecha', 'ccaa', 'casos']].copy()
casos_ccaa_last.drop(casos_ccaa_last[casos_ccaa_last.fecha != last_date].index, inplace=True)
casos_ccaa_last.drop('fecha', axis=1, inplace=True)
json_file = to_json(casos_ccaa_last, ['ccaa'], ['casos'])
write_to_file(json_file, etl_cfg.output.path + 'casos_ccaa_1_dato.json-stat')
# Datos nacionales acumulados diarios
# fecha,casos,altas,fallecimientos,ingresos_uci,hospitalizados
nacional = data[etl_cfg.input.files.nacional]
nacional = delay_date(nacional)
nacional.set_index('fecha')
nacional.rename(columns={
'casos_total': 'casos-acumulado',
'altas': 'altas-acumulado',
'fallecimientos': 'fallecidos-acumulado',
'ingresos_uci': 'uci-acumulado',
'hospitalizados': 'hospital-acumulado'},
inplace=True)
# Calcular datos diarios no acumulados
for i in range(1, len(nacional)):
nacional.loc[i, 'casos'] = nacional.loc[i, 'casos-acumulado'] - \
nacional.loc[i-1, 'casos-acumulado']
nacional.loc[i, 'altas'] = nacional.loc[i, 'altas-acumulado'] - \
nacional.loc[i-1, 'altas-acumulado']
nacional.loc[i, 'fallecidos'] = nacional.loc[i, 'fallecidos-acumulado'] - \
nacional.loc[i-1, 'fallecidos-acumulado']
nacional.loc[i, 'uci'] = nacional.loc[i, 'uci-acumulado'] - \
nacional.loc[i-1, 'uci-acumulado']
nacional.loc[i, 'hospital'] = nacional.loc[i, 'hospital-acumulado'] - \
nacional.loc[i-1, 'hospital-acumulado']
# Datos acumulados
nacional_acumulado = nacional[[
'fecha',
'casos-acumulado',
'altas-acumulado',
'fallecidos-acumulado',
'uci-acumulado',
'hospital-acumulado']].copy()
nacional_acumulado.rename(
columns={
'casos-acumulado': 'casos',
'altas-acumulado': 'altas',
'fallecidos-acumulado': 'fallecidos',
'uci-acumulado': 'uci',
'hospital-acumulado': 'hospital'},
inplace=True)
json_file = to_json(
nacional_acumulado,
['fecha'],
['casos', 'altas', 'fallecidos', 'uci', 'hospital'])
write_to_file(json_file, etl_cfg.output.path + 'todos_nacional_acumulado.json-stat')
# Tasa de variación diaria (porcentaje)
# T(d) = 100 * ((Casos(d) - Casos(d-1))/Casos(d-1))
casos_nacional_tasa = nacional_acumulado[['fecha', 'casos']].copy()
casos_nacional_tasa.reset_index(drop=True, inplace=True)
for i in range(1, len(casos_nacional_tasa)):
if casos_nacional_tasa.loc[i-1, 'casos'] > 0:
casos_nacional_tasa.loc[i, 'variacion'] = 100 * (( \
casos_nacional_tasa.loc[i, 'casos'] - casos_nacional_tasa.loc[i-1, 'casos']) / \
casos_nacional_tasa.loc[i-1, 'casos'])
else:
casos_nacional_tasa.loc[i, 'variacion'] = None
casos_nacional_tasa.drop('casos', axis=1, inplace=True)
json_file = to_json(casos_nacional_tasa, ['fecha'], ['variacion'])
write_to_file(json_file, etl_cfg.output.path + 'casos_nacional_variacion.json-stat')
# Datos diarios
nacional_diario = nacional[[
'fecha',
'casos',
'altas',
'fallecidos',
'uci',
'hospital']].copy()
json_file = to_json(
nacional_diario,
['fecha'],
['casos', 'altas', 'fallecidos', 'uci', 'hospital'])
write_to_file(json_file, etl_cfg.output.path + 'todos_nacional_diario.json-stat')
# Cifras más recientes
nacional_last = nacional.tail(1)
# altas
altas_nacional_last = nacional_last[['fecha', 'altas-acumulado']].copy()
altas_nacional_last.rename(columns={'altas-acumulado': 'altas'}, inplace=True)
json_file = to_json(altas_nacional_last, ['fecha'], ['altas'])
write_to_file(json_file, etl_cfg.output.path + 'altas_nacional_1_dato.json-stat')
# casos
casos_nacional_last = nacional_last[['fecha', 'casos-acumulado']].copy()
casos_nacional_last.rename(columns={'casos-acumulado': 'casos'}, inplace=True)
json_file = to_json(casos_nacional_last, ['fecha'], ['casos'])
write_to_file(json_file, etl_cfg.output.path + 'casos_nacional_1_dato.json-stat')
# fallecidos
fallecidos_nacional_last = nacional_last[['fecha', 'fallecidos-acumulado']].copy()
fallecidos_nacional_last.rename(columns={'fallecidos-acumulado': 'fallecidos'}, inplace=True)
json_file = to_json(fallecidos_nacional_last, ['fecha'], ['fallecidos'])
write_to_file(json_file, etl_cfg.output.path + 'fallecidos_nacional_1_dato.json-stat')
# hospital
hospital_nacional_last = nacional_last[['fecha', 'hospital-acumulado']].copy()
hospital_nacional_last.rename(columns={'hospital-acumulado': 'hospital'}, inplace=True)
json_file = to_json(hospital_nacional_last, ['fecha'], ['hospital'])
write_to_file(json_file, etl_cfg.output.path + 'hospital_nacional_1_dato.json-stat')
# uci
uci_nacional_last = nacional_last[['fecha', 'uci-acumulado']].copy()
uci_nacional_last.rename(columns={'uci-acumulado': 'uci'}, inplace=True)
json_file = to_json(uci_nacional_last, ['fecha'], ['uci'])
write_to_file(json_file, etl_cfg.output.path + 'uci_nacional_1_dato.json-stat')
# Series diarias
#altas
altas_nacional_diario = nacional[['fecha', 'altas']]
json_file = to_json(altas_nacional_diario, ['fecha'], ['altas'])
json_obj = json.loads(json_file)
json_obj['dimension']['Variables']['category']['unit'] = \
{'fallecidos': {'decimals': 0, 'label': 'Número de personas'}}
json_file = json.dumps(json_obj)
write_to_file(json_file, etl_cfg.output.path + 'altas_nacional_diario.json-stat')
# casos
casos_nacional_diario = nacional[['fecha', 'casos']]
json_file = to_json(casos_nacional_diario, ['fecha'], ['casos'])
json_obj = json.loads(json_file)
json_obj['dimension']['Variables']['category']['unit'] = \
{'fallecidos': {'decimals': 0, 'label': 'Número de personas'}}
json_file = json.dumps(json_obj)
write_to_file(json_file, etl_cfg.output.path + 'casos_nacional_diario.json-stat')
# fallecidos
fallecidos_nacional_diario = nacional[['fecha', 'fallecidos']]
json_file = to_json(fallecidos_nacional_diario, ['fecha'], ['fallecidos'])
json_obj = json.loads(json_file)
json_obj['dimension']['Variables']['category']['unit'] = \
{'fallecidos': {'decimals': 0, 'label': 'Número de personas'}}
json_file = json.dumps(json_obj)
write_to_file(json_file, etl_cfg.output.path + 'fallecidos_nacional_diario.json-stat')
# hospital
hospital_nacional_diario = nacional[['fecha', 'hospital']]
json_file = to_json(hospital_nacional_diario, ['fecha'], ['hospital'])
json_obj = json.loads(json_file)
json_obj['dimension']['Variables']['category']['unit'] = \
{'fallecidos': {'decimals': 0, 'label': 'Número de personas'}}
json_file = json.dumps(json_obj)
write_to_file(json_file, etl_cfg.output.path + 'hospital_nacional_diario.json-stat')
# uci
uci_nacional_diario = nacional[['fecha', 'uci']]
json_file = to_json(uci_nacional_diario, ['fecha'], ['uci'])
json_obj = json.loads(json_file)
json_obj['dimension']['Variables']['category']['unit'] = \
{'fallecidos': {'decimals': 0, 'label': 'Número de personas'}}
json_file = json.dumps(json_obj)
write_to_file(json_file, etl_cfg.output.path + 'uci_nacional_diario.json-stat')
# Datos nacionales por rango de edad y sexo
nacional_edad = data[etl_cfg.input.files.nacional_edad]
nacional_edad.drop(nacional_edad[nacional_edad.rango_edad == 'Total'].index, inplace=True)
nacional_edad.drop(nacional_edad[nacional_edad.sexo == 'ambos'].index, inplace=True)
last_date = nacional_edad.fecha.max()
nacional_edad.drop(nacional_edad[nacional_edad.fecha != last_date].index, inplace=True)
nacional_edad.drop('fecha', axis=1, inplace=True)
nacional_edad.rename(columns={
'casos_confirmados': 'casos',
'hospitalizados': 'hospital',
'ingresos_uci': 'uci'
}, inplace=True)
nacional_edad_casos = nacional_edad[['rango_edad', 'sexo', 'casos']].copy()
json_file = to_json(nacional_edad_casos, ['rango_edad', 'sexo'], ['casos'])
write_to_file(json_file, etl_cfg.output.path + 'casos_nacional_edad_sexo.json-stat')
nacional_edad_hospital = nacional_edad[['rango_edad', 'sexo', 'hospital']].copy()
json_file = to_json(nacional_edad_hospital, ['rango_edad', 'sexo'], ['hospital'])
write_to_file(json_file, etl_cfg.output.path + 'hospital_nacional_edad_sexo.json-stat')
nacional_edad_uci = nacional_edad[['rango_edad', 'sexo', 'uci']].copy()
json_file = to_json(nacional_edad_uci, ['rango_edad', 'sexo'], ['uci'])
write_to_file(json_file, etl_cfg.output.path + 'uci_nacional_edad_sexo.json-stat')
nacional_edad_fallecidos = nacional_edad[['rango_edad', 'sexo', 'fallecidos']].copy()
json_file = to_json(nacional_edad_fallecidos, ['rango_edad', 'sexo'], ['fallecidos'])
write_to_file(json_file, etl_cfg.output.path + 'fallecidos_nacional_edad_sexo.json-stat')
# Casos en Cantabria
# fecha,cod_ine,CCAA,total
casos = data[etl_cfg.input.files.casos]
casos = delay_date(casos)
casos = transform(casos, 'casos-acumulado')
# cifra más reciente
casos_last = casos.tail(1)
casos_last.rename(columns={'casos-acumulado': 'casos'}, inplace=True)
json_file = to_json(casos_last, ['fecha'], ['casos'])
write_to_file(json_file, etl_cfg.output.path + 'casos_cantabria_1_dato.json-stat')
casos = deacumulate(casos, 'casos-acumulado', 'casos')
# acumulado
casos_acumulado = casos[['fecha', 'casos-acumulado']].copy()
casos_acumulado.rename(columns={'casos-acumulado': 'casos'}, inplace=True)
json_file = to_json(casos_acumulado, ['fecha'], ['casos'])
write_to_file(json_file, etl_cfg.output.path + 'casos_cantabria_acumulado.json-stat')
# diario
casos_diario = casos[['fecha', 'casos']].copy()
json_file = to_json(casos_diario, ['fecha'], ['casos'])
json_obj = json.loads(json_file)
json_obj['dimension']['Variables']['category']['unit'] = \
{'fallecidos': {'decimals': 0, 'label': 'Número de personas'}}
json_file = json.dumps(json_obj)
write_to_file(json_file, etl_cfg.output.path + 'casos_cantabria_diario.json-stat')
# tasa de variación diaria (porcentaje)
# T(d) = 100 * ((Casos(d) - Casos(d-1))/Casos(d-1))
casos_tasa = casos_acumulado[['fecha', 'casos']].copy()
casos_tasa.drop(casos_tasa[casos_tasa.index < 10].index, inplace=True)
casos_tasa.reset_index(drop=True, inplace=True)
for i in range(1, len(casos_tasa)):
if casos_tasa.loc[i-1, 'casos'] > 0:
casos_tasa.loc[i, 'variacion'] = 100 * (( \
casos_tasa.loc[i, 'casos'] - casos_tasa.loc[i-1, 'casos']) / \
casos_tasa.loc[i-1, 'casos'])
else:
casos_tasa.loc[i, 'variacion'] = None
casos_tasa.drop('casos', axis=1, inplace=True)
json_file = to_json(casos_tasa, ['fecha'], ['variacion'])
write_to_file(json_file, etl_cfg.output.path + 'casos_cantabria_variacion.json-stat')
# Altas en Cantabria
# fecha,cod_ine,CCAA,total
altas = data[etl_cfg.input.files.altas]
altas = delay_date(altas)
altas = transform(altas, 'altas-acumulado')
# cifra más reciente
altas_last = altas.tail(1)
altas_last.rename(columns={'altas-acumulado': 'altas'}, inplace=True)
json_file = to_json(altas_last, ['fecha'], ['altas'])
write_to_file(json_file, etl_cfg.output.path + 'altas_cantabria_1_dato.json-stat')
altas = deacumulate(altas, 'altas-acumulado', 'altas')
# acumulado
altas_acumulado = altas[['fecha', 'altas-acumulado']].copy()
altas_acumulado.rename(columns={'altas-acumulado': 'altas'}, inplace=True)
json_file = to_json(altas_acumulado, ['fecha'], ['altas'])
write_to_file(json_file, etl_cfg.output.path + 'altas_cantabria_acumulado.json-stat')
# diario
altas_diario = altas[['fecha', 'altas']].copy()
json_file = to_json(altas_diario, ['fecha'], ['altas'])
json_obj = json.loads(json_file)
json_obj['dimension']['Variables']['category']['unit'] = \
{'fallecidos': {'decimals': 0, 'label': 'Número de personas'}}
json_file = json.dumps(json_obj)
write_to_file(json_file, etl_cfg.output.path + 'altas_cantabria_diario.json-stat')
# Ingresados en UCI en Cantabria
# fecha,cod_ine,CCAA,total
uci = data[etl_cfg.input.files.uci]
uci = delay_date(uci)
uci = transform(uci, 'uci-acumulado')
# cifra más reciente
uci_last = uci.tail(1)
uci_last.rename(columns={'uci-acumulado': 'uci'}, inplace=True)
json_file = to_json(uci_last, ['fecha'], ['uci'])
write_to_file(json_file, etl_cfg.output.path + 'uci_cantabria_1_dato.json-stat')
uci = deacumulate(uci, 'uci-acumulado', 'uci')
# acumulado
uci_acumulado = uci[['fecha', 'uci-acumulado']].copy()
uci_acumulado.rename(columns={'uci-acumulado': 'uci'}, inplace=True)
json_file = to_json(uci_acumulado, ['fecha'], ['uci'])
write_to_file(json_file, etl_cfg.output.path + 'uci_cantabria_acumulado.json-stat')
# diario
uci_diario = uci[['fecha', 'uci']].copy()
json_file = to_json(uci_diario, ['fecha'], ['uci'])
json_obj = json.loads(json_file)
json_obj['dimension']['Variables']['category']['unit'] = \
{'fallecidos': {'decimals': 0, 'label': 'Número de personas'}}
json_file = json.dumps(json_obj)
write_to_file(json_file, etl_cfg.output.path + 'uci_cantabria_diario.json-stat')
# Fallecidos en Cantabria
# fecha,cod_ine,CCAA,total
fallecidos = data[etl_cfg.input.files.fallecidos]
fallecidos = delay_date(fallecidos)
fallecidos = transform(fallecidos, 'fallecidos-acumulado')
# cifra más reciente
fallecidos_last = fallecidos.tail(1)
fallecidos_last.rename(columns={'fallecidos-acumulado': 'fallecidos'}, inplace=True)
json_file = to_json(fallecidos_last, ['fecha'], ['fallecidos'])
write_to_file(json_file, etl_cfg.output.path + 'fallecidos_cantabria_1_dato.json-stat')
fallecidos = deacumulate(fallecidos, 'fallecidos-acumulado', 'fallecidos')
# acumulado
fallecidos_acumulado = fallecidos[['fecha', 'fallecidos-acumulado']].copy()
fallecidos_acumulado.rename(columns={'fallecidos-acumulado': 'fallecidos'}, inplace=True)
json_file = to_json(fallecidos_acumulado, ['fecha'], ['fallecidos'])
json_obj = json.loads(json_file)
json_obj['dimension']['Variables']['category']['unit'] = \
{'fallecidos': {'decimals': 0, 'label': 'Número de personas acumulado'}}
json_file = json.dumps(json_obj)
write_to_file(json_file, etl_cfg.output.path + 'fallecidos_cantabria_acumulado.json-stat')
# diario
fallecidos_diario = fallecidos[['fecha', 'fallecidos']].copy()
json_file = to_json(fallecidos_diario, ['fecha'], ['fallecidos'])
json_obj = json.loads(json_file)
json_obj['dimension']['Variables']['category']['unit'] = \
{'fallecidos': {'decimals': 0, 'label': 'Número de personas'}}
json_file = json.dumps(json_obj)
write_to_file(json_file, etl_cfg.output.path + 'fallecidos_cantabria_diario.json-stat')
# Todas las variables acumulado en Cantabria
todas_acumulado = casos.merge(altas, how='left', on='fecha')
todas_acumulado = todas_acumulado.merge(fallecidos, how='left', on='fecha')
todas_acumulado = todas_acumulado.merge(uci, how='left', on='fecha')
todas_acumulado.drop('casos', axis=1, inplace=True)
todas_acumulado.drop('altas', axis=1, inplace=True)
todas_acumulado.drop('fallecidos', axis=1, inplace=True)
todas_acumulado.drop('uci', axis=1, inplace=True)
todas_acumulado.rename(columns={
'casos-acumulado': 'casos',
'altas-acumulado': 'altas',
'fallecidos-acumulado': 'fallecidos',
'uci-acumulado': 'uci'}, inplace=True)
json_file = to_json(
todas_acumulado,
['fecha'],
['casos', 'altas', 'fallecidos', 'uci'])
json_obj = json.loads(json_file)
json_obj['dimension']['Variables']['category']['unit'] = etl_cfg.metadata.todos_cantabria
json_file = json.dumps(json_obj)
write_to_file(json_file, etl_cfg.output.path + 'todos_cantabria.json-stat')
# Comparación casos Cantabria y España
espana = data[etl_cfg.input.files.nacional]
espana = delay_date(espana)
cant_esp = espana.merge(casos, how='left', on='fecha')
cant_esp.drop('altas-acumulado', axis=1, inplace=True)
cant_esp.drop('fallecidos-acumulado', axis=1, inplace=True)
cant_esp.drop('uci-acumulado', axis=1, inplace=True)
cant_esp.drop('hospital-acumulado', axis=1, inplace=True)
cant_esp.drop('casos_x', axis=1, inplace=True)
cant_esp.drop('altas', axis=1, inplace=True)
cant_esp.drop('fallecidos', axis=1, inplace=True)
cant_esp.drop('uci', axis=1, inplace=True)
cant_esp.drop('hospital', axis=1, inplace=True)
cant_esp.drop('casos_y', axis=1, inplace=True)
cant_esp.rename(columns={
'casos-acumulado_x': 'casos-espana',
'casos-acumulado_y': 'casos-cantabria'}, inplace=True)
json_file = to_json(cant_esp, ['fecha'], ['casos-espana', 'casos-cantabria'])
json_obj = json.loads(json_file)
json_obj['dimension']['Variables']['category']['unit'] = etl_cfg.metadata.casos_cantabria_espana
json_file = json.dumps(json_obj)
write_to_file(json_file, etl_cfg.output.path + 'casos_cantabria_espana.json-stat')
"""Fourth step: push JSON-Stat files to repository."""
repo = Repo(etl_cfg.output.repository)
repo.git.add('--all')
try:
repo.git.commit('-m', '"Automatic update"')
origin = repo.remote(name='origin')
origin.push()
except GitCommandError:
pass
print("Proceso terminado con éxito")
|
{
"alphanum_fraction": 0.7173302929,
"author": null,
"avg_line_length": 41.2368896926,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "c67b6e020a9cb88d7614a276a83a85e858749641",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8cb9d81ec10ce9e4905097cfd61351066d3340d6",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "okaberocks/covid19-ccaa",
"max_forks_repo_path": "etl/main.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8cb9d81ec10ce9e4905097cfd61351066d3340d6",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "okaberocks/covid19-ccaa",
"max_issues_repo_path": "etl/main.py",
"max_line_length": 96,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "8cb9d81ec10ce9e4905097cfd61351066d3340d6",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "okaberocks/covid19-ccaa",
"max_stars_repo_path": "etl/main.py",
"max_stars_repo_stars_event_max_datetime": "2020-04-13T12:07:03.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-04-13T06:06:54.000Z",
"num_tokens": 6781,
"path": null,
"reason": "from numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 22804
}
|
[STATEMENT]
lemma oth_class_taut_3_g[PLM]:
"[(\<phi> \<^bold>\<equiv> \<psi>) \<^bold>\<equiv> (\<psi> \<^bold>\<equiv> \<phi>) in v]"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. [\<phi> \<^bold>\<equiv> \<psi> \<^bold>\<equiv> (\<psi> \<^bold>\<equiv> \<phi>) in v]
[PROOF STEP]
by PLM_solver
|
{
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "PLM_TAO_9_PLM",
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 131,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
}
|
[STATEMENT]
lemma ad_equiv_list_comm: "ad_equiv_list X xs ys \<Longrightarrow> ad_equiv_list X ys xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ad_equiv_list X xs ys \<Longrightarrow> ad_equiv_list X ys xs
[PROOF STEP]
by (auto simp: ad_equiv_list_def) (smt (verit, del_insts) ad_equiv_pair_comm in_set_zip prod.sel(1) prod.sel(2))
|
{
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Eval_FO_Ailamazyan",
"hexsha": null,
"include": null,
"lang": null,
"length": 1,
"llama_tokens": 140,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
}
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/060_callback.core.ipynb (unless otherwise specified).
__all__ = ['TransformScheduler', 'ShowGraph', 'ShowGraphCallback2', 'SaveModel', 'get_lds_kernel_window',
'prepare_LDS_weights', 'WeightedPerSampleLoss', 'BatchSubsampler']
# Cell
from fastai.callback.all import *
from ..imports import *
from ..utils import *
from ..data.preprocessing import *
from ..data.transforms import *
from ..models.layers import *
# Cell
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
# Cell
class TransformScheduler(Callback):
"A callback to schedule batch transforms during training based on a function (sched_lin, sched_exp, sched_cos (default), etc)"
def __init__(self, schedule_func:callable, show_plot:bool=False):
self.schedule_func,self.show_plot = schedule_func,show_plot
self.mult = []
def before_fit(self):
for pct in np.linspace(0, 1, len(self.dls.train) * self.n_epoch): self.mult.append(self.schedule_func(pct))
# get initial magnitude values and update initial value
self.mag = []
self.mag_tfms = []
for t in self.dls.after_batch:
if hasattr(t, 'magnitude'):
self.mag.append(t.magnitude)
t.magnitude *= self.mult[0]
self.mag_tfms.append(t)
def after_batch(self):
if self.training and len(self.mag_tfms)>0 and self.train_iter < len(self.mult):
# set values for next batch
for t,m in zip(self.mag_tfms, self.mag):
t.magnitude = m * self.mult[self.train_iter]
def after_fit(self):
if self.show_plot and self.mult != [] and len(self.mag_tfms)>0:
print()
plt.plot(self.mult)
plt.title('Scheduled tfms')
plt.show()
print()
self.show_plot = False
# set values to initial values
for t,m in zip(self.mag_tfms, self.mag): t.magnitude = m
def __repr__(self):
return f'{self.__class__.__name__}({self.schedule_func})'
# Cell
class ShowGraph(Callback):
"(Modified) Update a graph of training and validation loss"
order,run_valid=65,False
names = ['train', 'valid']
def __init__(self, plot_metrics:bool=True, final_losses:bool=True):
store_attr("plot_metrics,final_losses")
def before_fit(self):
self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds")
if not(self.run): return
self.nb_batches = []
self.learn.recorder.loss_idxs = [i for i,n in enumerate(self.learn.recorder.metric_names[1:-1]) if 'loss' in n]
_metrics_info = [(i,n) for i,n in enumerate(self.learn.recorder.metric_names[1:-1]) if 'loss' not in n]
if len(_metrics_info) > 0:
self.metrics_idxs, self.metrics_names = list(zip(*_metrics_info))
else:
self.metrics_idxs, self.metrics_names = None, None
def after_train(self): self.nb_batches.append(self.train_iter - 1)
def after_epoch(self):
"Plot validation loss in the pbar graph"
if not self.nb_batches: return
rec = self.learn.recorder
if self.epoch == 0:
self.rec_start = len(rec.losses)
iters = range_of(rec.losses)
val_losses = np.stack(rec.values)[:, self.learn.recorder.loss_idxs[-1]].tolist()
x_bounds = (0, len(rec.losses) - 1)
if self.epoch == 0:
y_min = min((min(rec.losses), min(val_losses)))
y_max = max((max(rec.losses), max(val_losses)))
else:
y_min = min((min(rec.losses[self.rec_start-1:]), min(val_losses)))
y_max = max((max(rec.losses[self.rec_start-1:]), max(val_losses)))
margin = (y_max - y_min) * .05
y_bounds = (y_min - margin, y_max + margin)
self.update_graph([(iters, rec.losses), (self.nb_batches, val_losses)], x_bounds, y_bounds)
def after_fit(self):
if hasattr(self, 'graph_ax'):
plt.close(self.graph_ax.figure)
if self.plot_metrics: self.learn.plot_metrics(final_losses=self.final_losses)
def update_graph(self, graphs, x_bounds=None, y_bounds=None, figsize=(6,4)):
if not hasattr(self, 'graph_fig'):
self.graph_fig, self.graph_ax = plt.subplots(1, figsize=figsize)
self.graph_out = display(self.graph_ax.figure, display_id=True)
self.graph_ax.clear()
if len(self.names) < len(graphs): self.names += [''] * (len(graphs) - len(self.names))
for g,n in zip(graphs,self.names):
self.graph_ax.plot(*g, label=n)
self.graph_ax.legend(loc='upper right')
self.graph_ax.grid(color='gainsboro', linewidth=.5)
if x_bounds is not None: self.graph_ax.set_xlim(*x_bounds)
if y_bounds is not None: self.graph_ax.set_ylim(*y_bounds)
self.graph_ax.set_title(f'Losses\nepoch: {self.epoch +1}/{self.n_epoch}')
self.graph_out.update(self.graph_ax.figure)
ShowGraphCallback2 = ShowGraph
# Cell
class SaveModel(TrackerCallback):
"A `TrackerCallback` that saves the model's best during training and loads it at the end with a verbose option."
_only_train_loop,order = True,TrackerCallback.order+1
def __init__(self, monitor='valid_loss', comp=None, min_delta=0., fname='model', every_epoch=False, at_end=False,
with_opt=False, reset_on_fit=True, verbose=False):
super().__init__(monitor=monitor, comp=comp, min_delta=min_delta, reset_on_fit=reset_on_fit)
assert not (every_epoch and at_end), "every_epoch and at_end cannot both be set to True"
# keep track of file path for loggers
self.last_saved_path = None
store_attr('fname,every_epoch,at_end,with_opt,verbose')
def _save(self, name): self.last_saved_path = self.learn.save(name, with_opt=self.with_opt)
def after_epoch(self):
"Compare the value monitored to its best score and save if best."
if self.every_epoch:
if (self.epoch%self.every_epoch) == 0: self._save(f'{self.fname}_{self.epoch}')
else: #every improvement
super().after_epoch()
if self.new_best:
pv(f'Better model found at epoch {self.epoch} with {self.monitor} value: {self.best}.', self.verbose)
self._save(f'{self.fname}')
def after_fit(self, **kwargs):
"Load the best model."
if self.at_end: self._save(f'{self.fname}')
elif not self.every_epoch: self.learn.load(f'{self.fname}', with_opt=self.with_opt)
# Cell
from scipy.ndimage import gaussian_filter1d
from scipy.signal.windows import triang
from scipy.ndimage import convolve1d
def get_lds_kernel_window(lds_kernel="gaussian", lds_ks=9, lds_sigma=1):
r"""Function to determine the label distribution smoothing kernel window
lds_kernel (str): LDS kernel type
lds_ks (int): LDS kernel size (should be an odd number).
lds_sigma (float): LDS gaussian/laplace kernel sigma
"""
assert lds_kernel in ['gaussian', 'triang', 'laplace']
half_ks = (lds_ks - 1) // 2
if lds_kernel == 'gaussian':
base_kernel = [0.] * half_ks + [1.] + [0.] * half_ks
kernel_window = gaussian_filter1d(
base_kernel, sigma=lds_sigma) / max(gaussian_filter1d(base_kernel, sigma=lds_sigma))
elif lds_kernel == 'triang':
kernel_window = triang(lds_ks)
else:
def laplace(x): return np.exp(-abs(x) / lds_sigma) / (2. * lds_sigma)
kernel_window = list(map(laplace, np.arange(-half_ks, half_ks + 1))) / \
max(map(laplace, np.arange(-half_ks, half_ks + 1)))
return kernel_window
def prepare_LDS_weights(labels, n_bins=None, label_range=None, reweight='inv', lds_kernel='gaussian', lds_ks=9, lds_sigma=1,
max_rel_weight=None, show_plot=True):
assert reweight in {'inv', 'sqrt_inv'}
labels_shape = labels.shape
if n_bins is None:
labels = labels.astype(int)
n_bins = np.max(labels) - np.min(labels)
num_per_label, bin_edges = np.histogram(labels, bins=n_bins, range=label_range)
new_labels = np.searchsorted(bin_edges, labels, side='left')
new_labels[new_labels == 0] = 1
if reweight == 'sqrt_inv':
num_per_label = np.sqrt(num_per_label)
lds_kernel_window = get_lds_kernel_window(lds_kernel=lds_kernel, lds_ks=lds_ks, lds_sigma=lds_sigma)
smoothed_value = convolve1d(num_per_label, weights=lds_kernel_window, mode='constant')
if show_plot:
plt.bar(bin_edges[:-1], num_per_label / num_per_label.sum(), width=(bin_edges[1]-bin_edges[0]), color='lime', edgecolor='black', label='original')
plt.plot(bin_edges[:-1], smoothed_value / smoothed_value.sum(), color='red', label='smoothed')
plt.title(f"Label distribution by bin (reweight={reweight})")
plt.legend(loc='best')
plt.show()
num_per_label = smoothed_value[new_labels.flatten() - 1].reshape(*labels_shape)
weights = 1 / num_per_label
weights[num_per_label == 0] = 0
if max_rel_weight is not None:
weights = np.clip(weights, None, np.min(weights) * max_rel_weight)
weights = weights / weights.sum() * len(labels)
return torch.Tensor(weights)
# Cell
class WeightedPerSampleLoss(Callback):
order = 65
r"""Loss wrapper than applies a weight per sample during training
Weights are not applied to the validation loss.
Args:
instance_weights: weights that will be applied. Weights will be normalized to 1.
You can pass weights for the entire dataset or just for the training set.
"""
def __init__(self, instance_weights):
store_attr()
def before_fit(self):
self.old_loss = self.learn.loss_func
self.reduction = getattr(self.learn.loss_func, 'reduction', None)
self.learn.loss_func = _PerInstanceLoss(crit=self.learn.loss_func)
if len(self.instance_weights) == len(self.learn.dls.train.dataset):
self.instance_weights = torch.cat([self.instance_weights, torch.zeros(len(self.learn.dls.valid.dataset))])
assert len(self.instance_weights) == len(self.learn.dls.train.dataset) + len(self.learn.dls.valid.dataset)
self.instance_weights = self.instance_weights / torch.sum(self.instance_weights) * len(self.instance_weights)
self.instance_weights = torch.as_tensor(self.instance_weights, device=self.learn.dls.device)
def before_batch(self):
self.learn.loss_func.training = self.training
if self.training:
input_idxs = self.learn.dls.train.input_idxs
self.learn.loss_func.weights = self.instance_weights[input_idxs]
def after_fit(self):
self.learn.loss_func = self.old_loss
if self.reduction is not None: self.learn.loss_func.reduction = self.reduction
class _PerInstanceLoss(Module):
def __init__(self, crit):
self.crit = crit
self.crit.reduction = 'none'
self.weights = None
self.training = False
def forward(self, input, target):
if not self.training:
return self.crit(input, target).mean()
else:
return ((self.crit(input, target) * self.weights)).mean()
# Cell
class BatchSubsampler(Callback):
""" Callback that selects a percentage of samples and/ or sequence steps with replacement from each training batch
Args:
====
sample_pct: percentage of random samples (or instances) that will be drawn. If 1. the output batch will contain the same number of samples
as the input batch.
step_pct: percentage of random sequence steps that will be drawn. If 1. the output batch will contain the same number of sequence steps
as the input batch. If used with models that don't use a pooling layer, this must be set to 1 to keep the same dimensions.
With CNNs, this value may be different.
same_seq_len: If True, it ensures that the output has the same shape as the input, even if the step_pct chosen is < 1. Defaults to True.
update_y: used with step_pct. If True, it applies the same random indices to y. It can only be used with sequential targets.
"""
def __init__(self, sample_pct:Optional[float]=None, step_pct:Optional[float]=None, same_seq_len:bool=True, update_y:bool=False):
store_attr()
def before_fit(self):
self.run = not hasattr(self, "gather_preds")
if not(self.run): return
def before_batch(self):
if not self.training: return
if self.sample_pct is not None:
B = self.x.shape[0]
if isinstance(self.sample_pct, tuple):
sample_pct = np.random.rand() * (self.sample_pct[1] - self.sample_pct[0]) + self.sample_pct[0]
else:
sample_pct = self.sample_pct
idxs = np.random.choice(B, round(B * sample_pct), True)
self.learn.xb = tuple(xbi[idxs] for xbi in self.learn.xb)
self.learn.yb = tuple(ybi[idxs] for ybi in self.learn.yb)
if self.step_pct is not None:
S = self.x.shape[-1]
if isinstance(self.step_pct, tuple):
step_pct = np.random.rand() * (self.step_pct[1] - self.step_pct[0]) + self.step_pct[0]
else:
step_pct = self.step_pct
if self.step_pct != 1 and self.same_seq_len:
idxs = np.sort(np.tile(np.random.choice(S, round(S * step_pct), True), math.ceil(1 / step_pct))[:S])
else:
idxs = np.sort(np.random.choice(S, round(S * step_pct), True))
self.learn.xb = tuple(xbi[...,idxs] for xbi in self.learn.xb)
if self.update_y:
self.learn.yb = tuple(ybi[...,idxs] for ybi in self.learn.yb)
|
{
"alphanum_fraction": 0.6537878241,
"author": null,
"avg_line_length": 45.4,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f020c7b39f4a7ccce9d69cfc80ed31f16daa4255",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "df8eb53c22701e633b796f5f61b5197d6a2a0872",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "admariner/tsai",
"max_forks_repo_path": "tsai/callback/core.py",
"max_issues_count": 31,
"max_issues_repo_head_hexsha": "0e9a4537452a72392900667a713ce759f19f88ea",
"max_issues_repo_issues_event_max_datetime": "2021-12-29T02:59:49.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-12-01T23:08:51.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "ansari1375/tsai",
"max_issues_repo_path": "tsai/callback/core.py",
"max_line_length": 154,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "0e9a4537452a72392900667a713ce759f19f88ea",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "ansari1375/tsai",
"max_stars_repo_path": "tsai/callback/core.py",
"max_stars_repo_stars_event_max_datetime": "2022-01-02T18:21:27.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-01-02T18:21:27.000Z",
"num_tokens": 3343,
"path": null,
"reason": "from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 13847
}
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('ls', '')
# In[2]:
import numpy as np
import matplotlib.pyplot as plt
# In[4]:
filen = 'feooh'
# In[5]:
data = np.loadtxt(filen+'.ASC')
# In[6]:
plt.plot(data[:,0], data[:,1])
# In[7]:
plt.plot(data[:,0], data[:,2])
# In[8]:
import sys
sys.path.append('../../peakpo/')
import utils
# In[9]:
utils.writechi(filen+'.chi', data[:,0], data[:,1])
# In[ ]:
|
{
"alphanum_fraction": 0.5483870968,
"author": null,
"avg_line_length": 7.6229508197,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "23bfaaaf61eca6895507c8ef362d468b69273875",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2019-06-16T08:09:38.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-05-16T13:32:08.000Z",
"max_forks_repo_head_hexsha": "7a929f735621dfa05bd40e7d64208757161fa43e",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "SHDShim/peakpo-v7",
"max_forks_repo_path": "jnb-tools/5_xrdfile_conversion/ASC_to_CHI.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7a929f735621dfa05bd40e7d64208757161fa43e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "SHDShim/peakpo-v7",
"max_issues_repo_path": "jnb-tools/5_xrdfile_conversion/ASC_to_CHI.py",
"max_line_length": 50,
"max_stars_count": 15,
"max_stars_repo_head_hexsha": "75bdc12da18d70a946a24dde9b12c859150ca0b6",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "SHDShim/PeakPo",
"max_stars_repo_path": "jnb-tools/5_xrdfile_conversion/ASC_to_CHI.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-26T08:20:16.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-09-02T13:55:35.000Z",
"num_tokens": 148,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 465
}
|
import numpy as np
from sklearn import __version__
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
from .base import BaseFeatureLibrary
from .weak_pde_library import WeakPDELibrary
class GeneralizedLibrary(BaseFeatureLibrary):
"""Put multiple libraries into one library. All settings
provided to individual libraries will be applied. Note that this class
allows one to specifically choose which input variables are used for
each library, and take tensor products of any pair of libraries. Tensored
libraries inherit the same input variables specified for the individual
libraries.
Parameters
----------
libraries : list of libraries
Library instances to be applied to the input matrix.
tensor_array : 2D list of booleans, optional, (default None)
Default is to not tensor any of the libraries together. Shape
equal to the # of tensor libraries and the # feature libraries.
Indicates which pairs of libraries to tensor product together and
add to the overall library. For instance if you have 5 libraries,
and want to do two tensor products, you could use the list
[[1, 0, 0, 1, 0], [0, 1, 0, 1, 1]] to indicate that you want two
tensored libraries from tensoring libraries 0 and 3 and libraries
1, 3, and 4.
inputs_per_library : 2D np.ndarray, optional (default None)
Shape should be equal to # feature libraries by # variable input.
Can be used to specify a subset of the variables to use to generate
a feature library. If number of feature libraries > 1, then can be
used to generate a large number of libraries, each using their own
subsets of the input variables. Note that this must be specified for
all the individual feature libraries.
library_ensemble : boolean, optional (default False)
Whether or not to use library bagging (regress on subset of the
candidate terms in the library).
ensemble_indices : integer array, optional (default [0])
The indices to use for ensembling the library. For instance, if
ensemble_indices = [0], it chops off the first column of the library.
Attributes
----------
libraries_ : list of libraries
Library instances to be applied to the input matrix.
tensor_array_ : 2D list of booleans (default None)
Indicates which pairs of libraries to tensor product together and
add to the overall library. For instance if you have 5 libraries,
and want to do two tensor products, you could use the list
[[1, 0, 0, 1, 0], [0, 1, 0, 1, 1]] to indicate that you want two
tensored libraries from tensoring libraries 0 and 3 and libraries
1, 3, and 4. Shape equal to # of tensor libraries to make
by the # feature libraries.
inputs_per_library_ : 2D np.ndarray, (default None)
Default is that all inputs are used for every library.
Can be used to specify a subset of the variables to use to generate
a feature library. If number of feature libraries > 1, then can be
use to generate a large number of libraries, each using their own
subsets of the input variables. Note that this must be specified for
all the individual feature libraries. The shape is equal to
# feature libraries, # variable inputs.
n_input_features_ : int
The total number of input features.
WARNING: This is deprecated in scikit-learn version 1.0 and higher so
we check the sklearn.__version__ and switch to n_features_in if needed.
n_output_features_ : int
The total number of output features. The number of output features
is the sum of the numbers of output features for each of the
concatenated libraries.
Examples
--------
>>> import numpy as np
>>> from pysindy.feature_library import FourierLibrary, CustomLibrary
>>> from pysindy.feature_library import GeneralizedLibrary
>>> x = np.array([[0.,-1],[1.,0.],[2.,-1.]])
>>> functions = [lambda x : np.exp(x), lambda x,y : np.sin(x+y)]
>>> lib_custom = CustomLibrary(library_functions=functions)
>>> lib_fourier = FourierLibrary()
>>> lib_generalized = GeneralizedLibrary([lib_custom, lib_fourier])
>>> lib_generalized.fit(x)
>>> lib_generalized.transform(x)
"""
def __init__(
self,
libraries: list,
tensor_array=None,
inputs_per_library=None,
library_ensemble=False,
ensemble_indices=[0],
):
super(GeneralizedLibrary, self).__init__(
library_ensemble=library_ensemble, ensemble_indices=ensemble_indices
)
if len(libraries) > 0:
self.libraries_ = libraries
weak_libraries = False
nonweak_libraries = False
for lib in self.libraries_:
if isinstance(lib, WeakPDELibrary):
weak_libraries = True
else:
nonweak_libraries = True
if weak_libraries and nonweak_libraries:
raise ValueError(
"At least one of the libraries is a weak form library, "
"and at least one of the libraries is not, which will "
"result in a nonsensical optimization problem. Please use "
"all weak form libraries or no weak form libraries."
)
else:
raise ValueError(
"Empty or nonsensical library list passed to this library."
)
if inputs_per_library is not None:
if inputs_per_library.ndim != 2:
raise ValueError("Input libraries array should form a 2D numpy array.")
if inputs_per_library.shape[0] != len(libraries):
raise ValueError(
"If specifying different inputs for each library, then "
"first dimension of inputs_per_library must be equal to "
"the number of libraries being used."
)
if np.any(inputs_per_library < 0):
raise ValueError(
"The inputs_per_library parameter must be a numpy array "
"of integers with values between 0 and "
"len(input_variables) - 1."
)
if tensor_array is not None:
if np.asarray(tensor_array).ndim != 2:
raise ValueError("Tensor product array should be 2D list.")
if np.asarray(tensor_array).shape[-1] != len(libraries):
raise ValueError(
"If specifying tensor products between libraries, then "
"last dimension of tensor_array must be equal to the "
"number of libraries being used."
)
if np.any(np.ravel(tensor_array) > 1) or np.any(np.ravel(tensor_array) < 0):
raise ValueError(
"The tensor_array parameter must be a numpy array "
"of booleans, so values must be either 0 or 1."
)
for i in range(len(tensor_array)):
if np.sum(tensor_array[i]) < 2:
raise ValueError(
"If specifying libraries to tensor together, must "
"specify at least two libraries (there should be at "
"least two entries with value 1 in the tensor_array)."
)
self.tensor_array_ = tensor_array
self.inputs_per_library_ = inputs_per_library
self.libraries_full_ = self.libraries_
def fit(self, x, y=None):
"""
Compute number of output features.
Parameters
----------
x : array-like, shape (n_samples, n_features)
The data.
Returns
-------
self : instance
"""
_, n_features = check_array(x).shape
if float(__version__[:3]) >= 1.0:
self.n_features_in_ = n_features
else:
self.n_input_features_ = n_features
# If parameter is not set, use all the inputs
if self.inputs_per_library_ is None:
temp_inputs = np.tile(range(n_features), len(self.libraries_))
self.inputs_per_library_ = np.reshape(
temp_inputs, (len(self.libraries_), n_features)
)
else:
# Check that the numbers in inputs_per_library are sensible
if np.any(self.inputs_per_library_ >= n_features):
raise ValueError(
"Each row in inputs_per_library must consist of integers "
"between 0 and the number of total input features - 1. "
)
# First fit all libraries separately below, with subset of the inputs
fitted_libs = [
lib.fit(x[:, np.unique(self.inputs_per_library_[i, :])], y)
for i, lib in enumerate(self.libraries_)
]
# Next, tensor some libraries and append them to the list
if self.tensor_array_ is not None:
num_tensor_prods = np.shape(self.tensor_array_)[0]
for i in range(num_tensor_prods):
lib_inds = np.ravel(np.where(self.tensor_array_[i]))
library_subset = np.asarray(fitted_libs)[lib_inds]
library_full = library_subset[0]
n_output_features = library_subset[0].n_output_features_
for j in range(1, len(library_subset)):
library_full = library_full * library_subset[j]
n_output_features = (
n_output_features * library_subset[j].n_output_features_
)
library_full._set_inputs_per_library(
self.inputs_per_library_[lib_inds, :]
)
library_full.fit(x, y)
fitted_libs.append(library_full)
# Calculate the sum of output features
self.n_output_features_ = sum([lib.n_output_features_ for lib in fitted_libs])
# Save fitted libs
self.libraries_full_ = fitted_libs
return self
def transform(self, x):
"""Transform data with libs provided below.
Parameters
----------
x : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
xp : np.ndarray, shape [n_samples, NP]
The matrix of features, where NP is the number of features
generated from applying the custom functions to the inputs.
"""
for lib in self.libraries_full_:
check_is_fitted(lib)
n_samples, n_features = x.shape
if isinstance(self.libraries_[0], WeakPDELibrary):
n_samples = self.libraries_[0].K * self.libraries_[0].num_trajectories
if float(__version__[:3]) >= 1.0:
n_input_features = self.n_features_in_
else:
n_input_features = self.n_input_features_
if n_features != n_input_features:
raise ValueError("x shape does not match training shape")
# preallocate matrix
xp = np.zeros((n_samples, self.n_output_features_))
current_feat = 0
for i, lib in enumerate(self.libraries_full_):
# retrieve num output features from lib
lib_n_output_features = lib.n_output_features_
start_feature_index = current_feat
end_feature_index = start_feature_index + lib_n_output_features
if i < self.inputs_per_library_.shape[0]:
xp[:, start_feature_index:end_feature_index] = lib.transform(
x[:, np.unique(self.inputs_per_library_[i, :])]
)
else:
xp[:, start_feature_index:end_feature_index] = lib.transform(x)
current_feat += lib_n_output_features
# If library bagging, return xp missing the terms at ensemble_indices
return self._ensemble(xp)
def get_feature_names(self, input_features=None):
"""Return feature names for output features.
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
feature_names = list()
for i, lib in enumerate(self.libraries_full_):
if i < self.inputs_per_library_.shape[0]:
if input_features is None:
input_features_i = [
"x%d" % k for k in np.unique(self.inputs_per_library_[i, :])
]
else:
input_features_i = np.asarray(input_features)[
np.unique(self.inputs_per_library_[i, :])
].tolist()
else:
# Tensor libraries need all the inputs and then internally
# handle the subsampling of the input variables
if input_features is None:
input_features_i = [
"x%d" % k for k in range(self.inputs_per_library_.shape[1])
]
else:
input_features_i = input_features
feature_names += lib.get_feature_names(input_features_i)
return feature_names
|
{
"alphanum_fraction": 0.6037197042,
"author": null,
"avg_line_length": 42.1512345679,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "533cb58418a2fea55821acc4b715168104a8e734",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2020-01-07T20:30:04.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-12-05T13:08:41.000Z",
"max_forks_repo_head_hexsha": "04a8374b8a579f98e26f639444316f77bdc78bed",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "briandesilva/pysindy",
"max_forks_repo_path": "pysindy/feature_library/generalized_library.py",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "04a8374b8a579f98e26f639444316f77bdc78bed",
"max_issues_repo_issues_event_max_datetime": "2020-01-13T17:18:22.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-12-17T01:59:55.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "briandesilva/pysindy",
"max_issues_repo_path": "pysindy/feature_library/generalized_library.py",
"max_line_length": 88,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "04a8374b8a579f98e26f639444316f77bdc78bed",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "briandesilva/pysindy",
"max_stars_repo_path": "pysindy/feature_library/generalized_library.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2799,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 13657
}
|
import time
import numpy as np
from matplotlib import rc
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
rc('text', usetex=True)
from pydrake.all import PiecewisePolynomial
from qsim_old.simulator import QuasistaticSimulator
from qsim_old.problem_definition_pinch import problem_definition
from plotting import PlotForceDistance, PlotLeftFingerPosition
#%%
q_sim = QuasistaticSimulator(problem_definition, is_quasi_dynamic=True,
visualize=True)
#%% new "impedance robot" formulation, which I think is correct.
# define actuated trajectory
r = problem_definition['r']
h = problem_definition['h']
q0 = np.array([0, r, -1.06*r, 1.06*r, 0])
qa_knots = np.zeros((4, 3))
qa_knots[0] = q0[2:]
qa_knots[1] = [-0.9*r, 0.9*r, 0]
qa_knots[2] = [-0.9*r, 0.9*r, -0.03]
qa_knots[3] = [-0.9*r, 0.9*r, -0.03]
n_steps = 35
t_knots = [0, 8*h, (8 + 15)*h, n_steps * h]
q_traj = PiecewisePolynomial.FirstOrderHold(t_knots, qa_knots.T)
t_contact_mode_change = [0.03, 0.13, 0.23]
q = q0.copy()
q_sim.update_visualizer(q)
print(q0)
input("start?")
lambda_n_log = []
lambda_f_log = []
q_log = [q0.copy()]
qa_cmd_log = []
for i in range(n_steps):
q_a_cmd = q_traj.value((i + 1) * h).squeeze()
dq_a, dq_u, lambda_n, lambda_f, result = q_sim.step_lcp(q, q_a_cmd)
# Update q.
q += np.hstack([dq_u, dq_a])
q_sim.update_visualizer(q)
# logging.
lambda_n_log.append(lambda_n)
lambda_f_log.append(lambda_f)
q_log.append(q.copy())
qa_cmd_log.append(q_a_cmd)
time.sleep(h * 10)
q_log = np.array(q_log)
qa_cmd_log = np.array(qa_cmd_log)
lambda_n_log = np.array(lambda_n_log)
lambda_f_log = np.array(lambda_f_log)
# %% compute data for plots
"""
lambda_n_log[i] is the impulse over [h*i, h*(i+1)]
"""
n_c = problem_definition['n_c']
calc_phi = problem_definition['calc_phi']
Jf_u = problem_definition['Jf_u']
Jf_a = problem_definition['Jf_a']
Jn_u = problem_definition['Jn_u']
Jn_a = problem_definition['Jn_a']
t_sim1 = np.arange(n_steps + 1) * h
t_sim = np.arange(n_steps) * h
friction_log = np.zeros((n_steps, n_c))
contact_velocity_log = np.zeros((n_c, n_steps))
phi_log = np.array([calc_phi(q) for q in q_log])
for i in range(n_c):
idx = i * 2
friction_log[:, i] = lambda_f_log[:, idx] - lambda_f_log[:, idx + 1]
Jf = np.hstack((Jf_u, Jf_a))
Jn = np.hstack((Jn_u, Jn_a))
dq = (q_log[1:] - q_log[:-1])
v_tangent = (Jf.dot(dq.T / h)).T
v_normal = (dq / h).dot(Jn.T)
#%%
PlotForceDistance(t_sim, phi_log, lambda_n_log, friction_log,
t_contact_mode_change,
figsize=(6, 4),
save_name="contact_force_distance_lcp.pdf")
PlotLeftFingerPosition(t_sim1, q_log, qa_cmd_log, t_contact_mode_change,
fig_size=(6, 3),
save_name="xy_cmd_vs_xy_true_lcp.pdf")
|
{
"alphanum_fraction": 0.6697247706,
"author": null,
"avg_line_length": 27.5145631068,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "ef9aad3b7e5c23e12d64e61f97fcec3560a5298d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7c6f99cc7237dd922f6eb0b54c580303e86b5223",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "pangtao22/quasistatic_simulator",
"max_forks_repo_path": "examples/gripper_ball_pinch_2d/run_quasistatic_sim.py",
"max_issues_count": 5,
"max_issues_repo_head_hexsha": "7c6f99cc7237dd922f6eb0b54c580303e86b5223",
"max_issues_repo_issues_event_max_datetime": "2022-02-07T18:06:07.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-08-16T22:27:54.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "pangtao22/quasistatic_simulator",
"max_issues_repo_path": "examples/gripper_ball_pinch_2d/run_quasistatic_sim.py",
"max_line_length": 72,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "7c6f99cc7237dd922f6eb0b54c580303e86b5223",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "pangtao22/quasistatic_simulator",
"max_stars_repo_path": "examples/gripper_ball_pinch_2d/run_quasistatic_sim.py",
"max_stars_repo_stars_event_max_datetime": "2021-12-23T17:26:16.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-07-15T03:58:55.000Z",
"num_tokens": 906,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2834
}
|
import os
import subprocess
import shutil
import numpy as np
import pandas as pd
from d3m.primitives.schema_discovery import profiler
from d3m.primitives.data_transformation import column_parser, extract_columns_by_semantic_types, grouping_field_compose
from kf_d3m_primitives.ts_forecasting.nbeats.nbeats import NBEATSPrimitive
from kf_d3m_primitives.ts_forecasting.nbeats.nbeats_pipeline import NBEATSPipeline
import utils as test_utils
class PreProcessPipeline():
def __init__(self, group_compose = False):
profiler_hp = profiler.Common.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
parser_hp = column_parser.Common.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
extract_hp = extract_columns_by_semantic_types.Common.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
group_hp = grouping_field_compose.Common.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
self.prof = profiler.Common(hyperparams = profiler_hp.defaults())
self.parse = column_parser.Common(
hyperparams = parser_hp(
parser_hp.defaults(),
parse_semantic_types = [
"http://schema.org/Boolean",
"http://schema.org/Integer",
"http://schema.org/Float",
"https://metadata.datadrivendiscovery.org/types/FloatVector",
"http://schema.org/DateTime",
]
)
)
self.group = grouping_field_compose.Common(hyperparams = group_hp.defaults())
self.extract_attr = extract_columns_by_semantic_types.Common(
hyperparams = extract_hp(
extract_hp.defaults(),
semantic_types = [
"https://metadata.datadrivendiscovery.org/types/Attribute",
"https://metadata.datadrivendiscovery.org/types/GroupingKey"
]
)
)
self.extract_targ = extract_columns_by_semantic_types.Common(
hyperparams = extract_hp(
extract_hp.defaults(),
semantic_types = ["https://metadata.datadrivendiscovery.org/types/TrueTarget"]
)
)
self.group_compose = group_compose
def fit(self, df):
self.prof.set_training_data(inputs = df)
self.prof.fit()
def produce(self, df):
df = self.prof.produce(inputs = df).value
df = self.parse.produce(inputs = df).value
if self.group_compose:
df = self.group.produce(inputs = df).value
return (
self.extract_attr.produce(inputs = df).value,
self.extract_targ.produce(inputs = df).value
)
datetime_format_strs = {
'56_sunspots_MIN_METADATA': '%Y',
'56_sunspots_monthly_MIN_METADATA': '%Y-%m',
'LL1_736_population_spawn_MIN_METADATA': '%j',
'LL1_736_stock_market_MIN_METADATA': '%m/%d/%Y',
'LL1_terra_leaf_angle_mean_long_form_s4_MIN_METADATA': '%j',
'LL1_terra_canopy_height_long_form_s4_80_MIN_METADATA': '%j',
'LL1_PHEM_Monthly_Malnutrition_MIN_METADATA': '%Y-%m-%d',
'LL1_PHEM_weeklyData_malnutrition_MIN_METADATA': '%Y-%m-%d'
}
freqs = {
'56_sunspots_MIN_METADATA': '12M',
'56_sunspots_monthly_MIN_METADATA': 'M',
'LL1_736_population_spawn_MIN_METADATA': 'D',
'LL1_736_stock_market_MIN_METADATA': 'D',
'LL1_terra_leaf_angle_mean_long_form_s4_MIN_METADATA': 'D',
'LL1_terra_canopy_height_long_form_s4_80_MIN_METADATA': 'D',
'LL1_PHEM_Monthly_Malnutrition_MIN_METADATA': 'M',
'LL1_PHEM_weeklyData_malnutrition_MIN_METADATA': 'W'
}
min_pred_lengths = {
'56_sunspots_MIN_METADATA': (21, 40),
'56_sunspots_monthly_MIN_METADATA': (38, 321),
'LL1_736_population_spawn_MIN_METADATA': (60, 100),
'LL1_736_stock_market_MIN_METADATA': (34, 50),
'LL1_terra_leaf_angle_mean_long_form_s4_MIN_METADATA': (10, 10),
'LL1_terra_canopy_height_long_form_s4_80_MIN_METADATA': (20, 20),
'LL1_PHEM_Monthly_Malnutrition_MIN_METADATA': (10, 15),
'LL1_PHEM_weeklyData_malnutrition_MIN_METADATA': (10, 15)
}
grouping_cols = {
'56_sunspots_MIN_METADATA': [],
'56_sunspots_monthly_MIN_METADATA': [],
'LL1_736_population_spawn_MIN_METADATA': [0,1],
'LL1_736_stock_market_MIN_METADATA': [0],
'LL1_terra_leaf_angle_mean_long_form_s4_MIN_METADATA': [0,1],
'LL1_terra_canopy_height_long_form_s4_80_MIN_METADATA': [0,1],
'LL1_PHEM_Monthly_Malnutrition_MIN_METADATA': [0,1,2],
'LL1_PHEM_weeklyData_malnutrition_MIN_METADATA': [0,1,2],
}
def _test_set_training_data(dataset_name, target_col, group_compose = False, split_train = False):
dataset = test_utils.load_dataset(f'/datasets/seed_datasets_current/{dataset_name}/TRAIN/dataset_TRAIN')
df = test_utils.get_dataframe(dataset, 'learningData', target_col)
time_col = df.metadata.list_columns_with_semantic_types(
(
"https://metadata.datadrivendiscovery.org/types/Time",
"http://schema.org/DateTime",
)
)[0]
original_times = df.iloc[:, time_col]
df.iloc[:, time_col] = pd.to_datetime(
df.iloc[:, time_col],
format = datetime_format_strs[dataset_name]
)
df = df.sort_values(by = df.columns[time_col])
df.iloc[:, time_col] = original_times
train_split = int(0.9 * df.shape[0])
train = df.iloc[:train_split, :].reset_index(drop=True)
val = df.iloc[train_split:, :].reset_index(drop=True)
df = df.reset_index(drop=True)
preprocess = PreProcessPipeline(group_compose=group_compose)
preprocess.fit(train)
train_inputs, train_outputs = preprocess.produce(train)
val_inputs, _ = preprocess.produce(val)
all_inputs, all_outputs = preprocess.produce(df)
nbeats_hp = NBEATSPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
pred_length_idx = 1 if split_train else 0
nbeats_hp = NBEATSPrimitive.metadata.query()['primitive_code']['class_type_arguments']['Hyperparams']
nbeats = NBEATSPrimitive(
hyperparams = nbeats_hp(
nbeats_hp.defaults(),
epochs = 1,
steps_per_epoch = 1,
num_estimators = 1,
prediction_length = min_pred_lengths[dataset_name][pred_length_idx] + 5,
nan_padding = False
)
)
if os.path.isdir(nbeats.hyperparams['weights_dir']):
shutil.rmtree(nbeats.hyperparams['weights_dir'])
if split_train:
nbeats.set_training_data(inputs=train_inputs, outputs=train_outputs)
else:
nbeats.set_training_data(inputs=all_inputs, outputs=all_outputs)
if group_compose:
assert nbeats._grouping_columns == [train_inputs.shape[1]-1]
else:
assert grouping_cols[dataset_name] == nbeats._grouping_columns
assert freqs[dataset_name] == nbeats._freq
nbeats.fit()
return nbeats, preprocess, train_inputs, val_inputs, all_inputs
def _check_interpretable(df, tolerance = 2e-3):
assert df.shape[1] == 3
forecast = df.iloc[:, 0].dropna().values
trend = df.iloc[:, 1].dropna().values
seasonality = df.iloc[:, 2].dropna().values
#print(max(abs(forecast - trend - seasonality)))
assert (abs(forecast - trend - seasonality) < tolerance).all()
def _test_produce_train_data(nbeats, train_inputs, val_inputs, all_inputs):
train_preds = nbeats.produce(inputs = train_inputs).value
assert train_preds.shape[0] == train_inputs.shape[0]
val_preds = nbeats.produce(inputs = val_inputs).value
assert val_preds.shape[0] == val_inputs.shape[0]
all_preds = nbeats.produce(inputs = all_inputs).value
assert all_preds.shape[0] == all_inputs.shape[0]
_check_interpretable(all_preds)
def _test_produce_test_data(nbeats, inputs_test):
test_preds = nbeats.produce(inputs = inputs_test).value
assert test_preds.isna().sum().sum() == 0
assert test_preds.shape[0] == inputs_test.shape[0]
_check_interpretable(test_preds)
def _test_ts(dataset_name, target_col, group_compose = False, split_train = False):
nbeats, preprocess, inputs_train, inputs_val, inputs_all = _test_set_training_data(
dataset_name,
target_col,
group_compose=group_compose,
split_train=split_train
)
#_test_produce_train_data(nbeats, inputs_train, inputs_val, inputs_all)
dataset = test_utils.load_dataset(f'/datasets/seed_datasets_current/{dataset_name}/TEST/dataset_TEST/')
df = test_utils.get_dataframe(dataset, 'learningData', target_col)
inputs_test, _ = preprocess.produce(df)
_test_produce_test_data(nbeats, inputs_test)
def _test_serialize(dataset, group_compose = False):
weights_dir = "/scratch_dir/nbeats"
if os.path.isdir(weights_dir):
shutil.rmtree(weights_dir)
pipeline = NBEATSPipeline(
epochs = 1,
steps_per_epoch = 1,
num_estimators = 1,
prediction_length = min_pred_lengths[dataset][0],
group_compose = group_compose,
weights_dir = weights_dir
)
pipeline.write_pipeline()
pipeline.fit_serialize(dataset)
pipeline.deserialize_score(dataset)
pipeline.delete_pipeline()
pipeline.delete_serialized_pipeline()
# def test_fit_produce_dataset_sunspots():
# _test_ts('56_sunspots_MIN_METADATA', 4)
# def test_fit_produce_dataset_sunspots_monthly():
# _test_ts('56_sunspots_monthly_MIN_METADATA', 2)
# def test_fit_produce_dataset_stock():
# _test_ts('LL1_736_stock_market_MIN_METADATA', 3)
# def test_fit_produce_dataset_pop_spawn():
# _test_ts('LL1_736_population_spawn_MIN_METADATA', 4)
def test_fit_produce_dataset_terra():
_test_ts('LL1_terra_leaf_angle_mean_long_form_s4_MIN_METADATA', 4)
def test_fit_produce_dataset_terra_80():
_test_ts('LL1_terra_canopy_height_long_form_s4_80_MIN_METADATA', 4)
def test_fit_produce_dataset_phem_monthly():
_test_ts('LL1_PHEM_Monthly_Malnutrition_MIN_METADATA', 5)
def test_fit_produce_dataset_phem_weekly():
_test_ts('LL1_PHEM_weeklyData_malnutrition_MIN_METADATA', 5)
# def test_fit_produce_split_dataset_sunspots():
# _test_ts('56_sunspots_MIN_METADATA', 4, split_train=True)
# def test_fit_produce_split_dataset_sunspots_monthly():
# _test_ts('56_sunspots_monthly_MIN_METADATA', 2, split_train=True)
# def test_fit_produce_split_dataset_stock():
# _test_ts('LL1_736_stock_market_MIN_METADATA', 3, split_train=True)
# def test_fit_produce_split_dataset_pop_spawn():
# _test_ts('LL1_736_population_spawn_MIN_METADATA', 4, group_compose=True, split_train=True)
# def test_fit_produce_split_dataset_terra():
# _test_ts('LL1_terra_leaf_angle_mean_long_form_s4_MIN_METADATA', 4, group_compose = True, split_train=True)
# def test_fit_produce_split_dataset_phem_monthly():
# _test_ts('LL1_PHEM_Monthly_Malnutrition_MIN_METADATA', 5, group_compose = True, split_train=True)
# def test_fit_produce_split_dataset_phem_weekly():
# _test_ts('LL1_PHEM_weeklyData_malnutrition_MIN_METADATA', 5, group_compose = True, split_train=True)
# def test_serialization_dataset_sunspots():
# _test_serialize('56_sunspots_MIN_METADATA')
# def test_serialization_dataset_sunspots_monthly():
# _test_serialize('56_sunspots_monthly_MIN_METADATA')
# def test_serialization_dataset_pop_spawn():
# _test_serialize('LL1_736_population_spawn_MIN_METADATA')
# def test_serialization_dataset_stock():
# _test_serialize('LL1_736_stock_market_MIN_METADATA')
|
{
"alphanum_fraction": 0.7142980264,
"author": null,
"avg_line_length": 41.4392857143,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "02f01c5b74c41596d19a61de253e6f358ded0391",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5530da1b8efba7de8cec6890401c5d4091acd45a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "cdbethune/d3m-primitives",
"max_forks_repo_path": "tests/test_nbeats.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5530da1b8efba7de8cec6890401c5d4091acd45a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "cdbethune/d3m-primitives",
"max_issues_repo_path": "tests/test_nbeats.py",
"max_line_length": 135,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "5530da1b8efba7de8cec6890401c5d4091acd45a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "cdbethune/d3m-primitives",
"max_stars_repo_path": "tests/test_nbeats.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2928,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 11603
}
|
import onnx
import numpy as np
from mqbench.utils.logger import logger
from .utils import ONNXGraph
FAKE_QUANTIZE_OP = ['FakeQuantizeLearnablePerchannelAffine', 'FixedPerChannelAffine', 'FakeQuantizeDSQPerchannel',
'LearnablePerTensorAffine', 'FixedPerTensorAffine', 'FakeQuantizeDSQPertensor']
class ONNXQNNPass(object):
def __init__(self, onnx_model_path):
self.onnx_model = ONNXGraph(onnx_model_path)
@property
def qlinear_op_type(self):
return ['QuantizeLinear', 'QLinearConv', 'QLinearAdd', 'QLinearGemm', 'QLinearGlobalAveragePool',
'QLinearAveragePool', 'QLinearConcat']
@staticmethod
def attribute_to_kwarg(attribute):
'''
Convert attribute to kwarg format for use with onnx.helper.make_node.
:parameter attribute: attribute in AttributeProto format.
:return: attribute in {key: value} format.
'''
if (attribute.type == 0):
raise ValueError('attribute {} does not have type specified.'.format(attribute.name))
# Based on attribute type definitions from AttributeProto
# definition in https://github.com/onnx/onnx/blob/master/onnx/onnx.proto
if (attribute.type == 1):
value = attribute.f
elif (attribute.type == 2):
value = attribute.i
elif (attribute.type == 3):
value = attribute.s
elif (attribute.type == 4):
value = attribute.t
elif (attribute.type == 5):
value = attribute.g
elif (attribute.type == 6):
value = attribute.floats
elif (attribute.type == 7):
value = attribute.ints
elif (attribute.type == 8):
value = attribute.strings
elif (attribute.type == 9):
value = attribute.tensors
elif (attribute.type == 10):
value = attribute.graphs
else:
raise ValueError('attribute {} has unsupported type {}.'.format(attribute.name, attribute.type))
return {attribute.name: value}
def quantize_weight(self, weight_name, scale_name, zero_point_name):
weight = self.onnx_model.get_initializer(weight_name)
scale = self.onnx_model.get_initializer(scale_name)
zero_point = self.onnx_model.get_initializer(zero_point_name)
return ((weight / scale).round() + zero_point).astype(np.uint8)
def quantize_bias(self, bias, x_scale, w_scale):
x_scale = self.onnx_model.get_initializer(x_scale)
w_scale = self.onnx_model.get_initializer(w_scale)
bias = self.onnx_model.get_initializer(bias)
return (bias / (x_scale * w_scale)).astype(np.int32)
@property
def node_without_qparams(self):
return ['Flatten']
def replace_conv_gemm(self, node, idx, is_conv):
# Input scale
qlinear_conv_inputs = []
input_fake_quant_node = self.onnx_model.get_tensor_producer(node.input[0])
assert input_fake_quant_node.op_type in FAKE_QUANTIZE_OP
x_scale, x_zero_point = input_fake_quant_node.input[1], input_fake_quant_node.input[2]
# Output scale
qlinear_conv_output = node.output
y_scale, y_zero_point = self.get_node_output_qparams(node)
# Weight scale
weight_fake_quant_node = self.onnx_model.get_tensor_producer(node.input[1])
w_scale, w_zero_point = weight_fake_quant_node.input[1], weight_fake_quant_node.input[2]
weight_name = weight_fake_quant_node.input[0]
W = self.quantize_weight(weight_name, w_scale, w_zero_point)
self.onnx_model.set_initializer(weight_name, W)
qlinear_conv_inputs.extend([node.input[0], x_scale, x_zero_point,
weight_name, w_scale, w_zero_point,
y_scale, y_zero_point])
# Bias
if len(node.input) == 3:
bias_name = node.input[2]
B = self.quantize_bias(bias_name, x_scale, w_scale)
self.onnx_model.set_initializer(bias_name, B)
qlinear_conv_inputs.append(bias_name)
kwargs = {}
for attribute in node.attribute:
kwargs.update(ONNXQNNPass.attribute_to_kwarg(attribute))
node_type = "QLinearConv" if is_conv else "QLinearGemm"
qlinear_conv_node = onnx.helper.make_node(node_type,
qlinear_conv_inputs,
qlinear_conv_output,
node.name + '_quantized',
**kwargs)
self.onnx_model.remove_node_purely(node)
self.onnx_model.remove_node_purely(weight_fake_quant_node)
self.onnx_model.insert_node_purely(qlinear_conv_node, idx)
self.onnx_model.topologize_graph()
def replace_add_to_qlinearadd(self, node, idx):
# First input
qlinear_add_input = []
qlinear_add_output = node.output
first_input_node = self.onnx_model.get_tensor_producer(node.input[0])
assert first_input_node.op_type in FAKE_QUANTIZE_OP
first_input_quantized = first_input_node.output[0]
first_scale = first_input_node.input[1]
first_zero_point = first_input_node.input[2]
# Second input
second_input_node = self.onnx_model.get_tensor_producer(node.input[1])
assert second_input_node.op_type in FAKE_QUANTIZE_OP
second_input_quantized = second_input_node.output[0]
second_scale = second_input_node.input[1]
second_zero_point = second_input_node.input[2]
# Output
output_scale, output_zero_point = self.get_node_output_qparams(node)
qlinear_add_input.extend([first_input_quantized, first_scale, first_zero_point,
second_input_quantized, second_scale, second_zero_point,
output_scale, output_zero_point])
kwargs = {}
for attribute in node.attribute:
kwargs.update(ONNXQNNPass.attribute_to_kwarg(attribute))
qlinear_add_node = onnx.helper.make_node("QLinearAdd",
qlinear_add_input,
qlinear_add_output,
node.name + '_quantized',
domain='com.microsoft',
**kwargs)
self.onnx_model.insert_node_purely(qlinear_add_node, idx)
self.onnx_model.remove_node_purely(node)
self.onnx_model.topologize_graph()
def replace_pool_to_qlinearpool(self, node, idx, is_global):
qlinear_pool_input = []
prev_node = self.onnx_model.get_tensor_producer(node.input[0])
assert prev_node.op_type in FAKE_QUANTIZE_OP
x_scale, x_zero_point = prev_node.input[1], prev_node.input[2]
y_scale, y_zero_point = self.get_node_output_qparams(node)
qlinear_pool_input.extend([node.input[0], x_scale, x_zero_point,
y_scale, y_zero_point])
kwargs = {}
for attribute in node.attribute:
kwargs.update(ONNXQNNPass.attribute_to_kwarg(attribute))
qlinear_add_output = node.output
node_type = "QLinearGlobalAveragePool" if is_global else "QLinearAveragePool"
qlinear_pool_node = onnx.helper.make_node(node_type,
qlinear_pool_input,
qlinear_add_output,
node.name + '_quantized',
domain='com.microsoft',
**kwargs)
self.onnx_model.insert_node_purely(qlinear_pool_node, idx)
self.onnx_model.remove_node_purely(node)
self.onnx_model.topologize_graph()
def get_node_output_qparams(self, node):
fake_quantize_node = self.onnx_model.get_tensor_consumer(node.output[0])[0]
while fake_quantize_node.op_type not in FAKE_QUANTIZE_OP:
assert fake_quantize_node.op_type in self.node_without_qparams
fake_quantize_node = self.onnx_model.get_tensor_consumer(fake_quantize_node.output[0])[0]
return fake_quantize_node.input[1], fake_quantize_node.input[2]
def replace_op_pass(self):
# Replace Conv / Gemm / Add / AvgPool / Concat / LeakyRelu.
for idx, node in enumerate(self.onnx_model.graph.node):
if node.op_type == 'Conv':
self.replace_conv_gemm(node, idx, is_conv=True)
if node.op_type == 'Gemm':
pass
# onnxruntime and tvm is not supported yet.
# self.replace_conv_gemm(node, idx, is_conv=False)
if node.op_type == 'Add':
self.replace_add_to_qlinearadd(node, idx)
if node.op_type == 'GlobalAveragePool':
self.replace_pool_to_qlinearpool(node, idx, is_global=True)
if node.op_type == 'AveragePool':
self.replace_pool_to_qlinearpool(node, idx, is_global=False)
# TODO
if node.op_type == 'Concat':
pass
if node.op_type == 'LeakyRelu':
pass
def replace_qlinear_layer_pass(self):
# Replace FakeQuantize
for node in self.onnx_model.graph.node:
if node.op_type in FAKE_QUANTIZE_OP:
prev_node = self.onnx_model.get_tensor_producer(node.input[0])
next_node = self.onnx_model.get_tensor_consumer(node.output[0])[0]
if prev_node != 'INPUT_TOKEN' and prev_node.op_type in self.qlinear_op_type and \
next_node != 'OUTPUT_TOKEN' and next_node.op_type in self.qlinear_op_type:
self.onnx_model.remove_node_purely(node)
for _next_node in self.onnx_model.get_tensor_consumer(node.output[0]):
assert _next_node.op_type in self.qlinear_op_type
for idx, _input_name in enumerate(_next_node.input):
if _input_name == node.output[0]:
_next_node.input[idx] = node.input[0]
self.onnx_model.topologize_graph()
elif prev_node != 'INPUT_TOKEN' and prev_node.op_type in self.qlinear_op_type:
dequantize_linear_node = onnx.helper.make_node("DequantizeLinear",
node.input[0:3],
node.output,
node.name + '_dequantized')
self.onnx_model.insert_node_purely(dequantize_linear_node)
self.onnx_model.remove_node_purely(node)
self.onnx_model.topologize_graph()
else:
quantize_linear_node = onnx.helper.make_node("QuantizeLinear",
node.input[0:3],
node.output,
node.name + '_quantized')
self.onnx_model.insert_node_purely(quantize_linear_node)
self.onnx_model.remove_node_purely(node)
self.onnx_model.topologize_graph()
def merge_relu_pass(self):
for node in self.onnx_model.graph.node:
if node.op_type == 'Relu':
next_node = self.onnx_model.get_tensor_consumer(node.output[0])[0]
assert next_node.op_type in FAKE_QUANTIZE_OP
# Input idx2 is zero point.
self.onnx_model.set_initializer(next_node.input[2], np.array([0], dtype=np.uint8), raw=False)
self.onnx_model.remove_node_purely(node)
next_node.input[0] = node.input[0]
if node.op_type == 'Clip':
next_node = self.onnx_model.get_tensor_consumer(node.output[0])[0]
assert next_node.op_type in FAKE_QUANTIZE_OP
# Input idx2 is zero point.
scale = self.onnx_model.get_initializer(next_node.input[1])
scale = min(scale, 6.0 / 255)
self.onnx_model.set_initializer(next_node.input[1], np.array([scale], dtype=np.float32), raw=False)
self.onnx_model.set_initializer(next_node.input[2], np.array([0], dtype=np.uint8), raw=False)
self.onnx_model.remove_node_purely(node)
next_node.input[0] = node.input[0]
self.onnx_model.topologize_graph()
def format_qlinear_dtype_pass(self):
for node in self.onnx_model.graph.node:
if node.op_type in FAKE_QUANTIZE_OP:
scale, zero_point, qmin, qmax = node.input[1], node.input[2], node.input[3], node.input[4]
qmin = self.onnx_model.get_constant(qmin)
qmax = self.onnx_model.get_constant(qmax)
assert qmax - qmin == 2 ** 8 - 1, "Only 8 bit quantization support deploy to QNN."
scale_proto = self.onnx_model.initializer[scale][0]
if scale_proto.raw_data != b'' and scale_proto.dims[0] == 1:
scale_data = self.onnx_model.get_initializer(scale)
self.onnx_model.set_initializer(scale, scale_data.astype(np.float32), raw=False)
zero_point_proto = self.onnx_model.initializer[zero_point][0]
zero_point_data = self.onnx_model.get_initializer(zero_point)
# Align sym and asym scheme.
zero_point_data = (zero_point_data - qmin).reshape((1,))
self.onnx_model.set_initializer(zero_point, zero_point_data.astype(np.uint8), raw=False)
def run(self):
self.format_qlinear_dtype_pass()
self.merge_relu_pass()
self.replace_op_pass()
self.replace_qlinear_layer_pass()
self.onnx_model.optimize_model()
self.onnx_model.set_opset_version('com.microsoft', 1)
try:
onnx.checker.check_model(self.onnx_model.model)
except onnx.checker.ValidationError as e:
logger.critical('The model is invalid: %s' % e)
self.onnx_model.save_onnx_model('onnx_quantized_model.onnx')
|
{
"alphanum_fraction": 0.5992568637,
"author": null,
"avg_line_length": 52.0896057348,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "451f501e6fa864748ec4c9b5d31dbb6af3f0886b",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3f8321ec9ab9fd05d99c21700a901b1ff6a90a1e",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "415905716/MQBench",
"max_forks_repo_path": "mqbench/deploy/deploy_onnx_qnn.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3f8321ec9ab9fd05d99c21700a901b1ff6a90a1e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "415905716/MQBench",
"max_issues_repo_path": "mqbench/deploy/deploy_onnx_qnn.py",
"max_line_length": 115,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3f8321ec9ab9fd05d99c21700a901b1ff6a90a1e",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "415905716/MQBench",
"max_stars_repo_path": "mqbench/deploy/deploy_onnx_qnn.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3054,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 14533
}
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn
def prepare_country_stats(_bli, _gdp):
""" Prepare stats to be used in regression """
return print("yo")
def run_model():
""" Method to run linear model against BLI and GDP data """
# Stats grabbed from http://stats.oecd.org/index.aspx?DataSetCode=BLI
# (Organization for Economic Co-op and Development)
better_life_index = pd.read_csv("./better-life-index-2017.csv", thousands=',')
# GDP stats grabbed from www.imf.org
# (International Monetary Fund)
gdp_per_capita = pd.read_csv(
"./gdp-per-capita-2015.csv",
thousands=',',
delimiter='\t',
encoding='latin1',
na_values="n/a"
)
# TODO: Implement preparation function
country_stats = prepare_country_stats(better_life_index, gdp_per_capita)
x_gdp = np.c_[country_stats["GDP per capita"]]
y_ls = np.c_[country_stats["Life satisfaction"]]
# create plot to visualize data
country_stats.plot(kind='scatter', x="GDP per capita", y='Life satisfaction')
plt.show()
lin_reg_model = sklearn.linear_model.LinearRegression()
lin_reg_model.fit(x_gdp, y_ls)
x_new = [[22587]] # Cyprus GDP per capita
return print(lin_reg_model.predict(x_new))
run_model()
|
{
"alphanum_fraction": 0.6864988558,
"author": null,
"avg_line_length": 30.488372093,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "4faf26c22d0067e1e98ae49e4291a68da1872f3d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "fee546f44fe1682b6ce7a8dd95d2879a920fe32e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "dannypaz/handsonml",
"max_forks_repo_path": "chapter1/linear-model.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "fee546f44fe1682b6ce7a8dd95d2879a920fe32e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "dannypaz/handsonml",
"max_issues_repo_path": "chapter1/linear-model.py",
"max_line_length": 82,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "fee546f44fe1682b6ce7a8dd95d2879a920fe32e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "dannypaz/handsonml",
"max_stars_repo_path": "chapter1/linear-model.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 321,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1311
}
|
import ctypes
import os
import numpy as np
'''
Author - Daniel J. Whiting
Date modified - 10/08/2017
'''
class HRMTimeAPI():
def __init__(self):
# Load DLL into memory
SENSL = r'C:\Program Files (x86)\sensL\HRM-TDC\HRM_TDC DRIVERS'
os.environ['PATH'] = ';'.join([SENSL, os.environ['PATH']])
self.dll = ctypes.WinDLL(os.path.join(SENSL, 'HRMTimeAPI.dll'))
# Set up connection to device
self.dll.HRM_RefreshConnectedModuleList()
ModuleCount = self.dll.HRM_GetConnectedModuleCount()
if ModuleCount == 1:
handle_array_type = ctypes.c_void_p*1 # Array of 1 void
HandleArray = handle_array_type()
self.dll.HRM_GetConnectedModuleList.restype = handle_array_type
HandleArray = self.dll.HRM_GetConnectedModuleList(HandleArray)
self.ModuleHandle = HandleArray[0]
else:
print 'Number of modules present =', ModuleCount
def TimeTags2Mem(self,ncounts=10000000,recordinglength=1000,esr=0x000555,microlsb=0,algorithm='ReSync'):
# change esr to account for 3 and 4 channels
if ncounts<=10000000:
# Set resync mode
self.dll.HRM_SetFrequencySelectionRegister(self.ModuleHandle,0xFFFF)
bufsize = 8*ncounts # memory size of buffer in bytes
buftype = ctypes.c_uint32*(2*ncounts)
buf = buftype()
#buf = ctypes.create_string_buffer(bufsize)
buf_p = ctypes.pointer(buf) # pointer to memory buffer
recordedbytes = ctypes.c_int() # location for storing actual number of bytes recorded
recordedbytes_p = ctypes.pointer(recordedbytes) # pointer to location for storing actual number of bytes recorded
self.dll.HRM_StreamTimeTags2Mem(self.ModuleHandle,buf_p,bufsize,recordinglength,esr,microlsb,recordedbytes_p)
ntags = recordedbytes.value / 8
a = np.frombuffer(buf,dtype=int,count=ntags*2)
CHANNEL = np.bitwise_and(a[::2],3)
MICRO = a[::2]>>2
MACRO = a[1::2]
if algorithm == 'ReSync':
MACROoffset = (MACRO[0] - MICRO[0]*26.9851/25000).astype(int)
FrameNo = (MACRO-MACROoffset)/160
Remainder = np.remainder(MACRO-MACROoffset,160)
FrameNo[(Remainder<60) & (MICRO > 100000)] += -1
FrameNo[(Remainder>110) & (MICRO < 40000)] += 1
#Time = FrameNo*4000000. + MICRO*26.9851 # technically correct but use below for integer number of 26.9851ps
Time = FrameNo*4000001.373 + MICRO*26.9851
# Sort time tags into time order and output to array
sorter = np.argsort(Time)
data = np.zeros((len(CHANNEL),2))
data[:,0] = CHANNEL[sorter]
data[:,1] = Time[sorter]
if algorithm == 'FreeRuning':
# NOT CURRENTLY TESTED
# works provided the time between consecutive time tags is not greater than 21.5 seconds
dMACRO = MACRO[1:]-MACRO[:-1]
sel = MACRO[:-1]>MACRO[1:]
dMACRO[sel] += 0x100000000
rMICRO = 143248136.6016 # rollover time in ps
nMICRO = (dMACRO*5000)/rMICRO # *5000 to convert dMACRO into ps
dMICRO = MICRO[1:]-MICRO[:-1]
sel = MICRO[:-1]>MICRO[1:]
dMICRO[sel] += 0x510000
dTIME = nMICRO*rMICRO + dMICRO*26.9851
data = np.zeros((len(CHANNEL),2))
data[:,0] = CHANNEL
data[:,1] = dTIME
return data
def TimeTags2CSV(self,Filename,StreamTime=1000,ESR=0x0055):
# Set resync mode
self.dll.HRM_SetFrequencySelectionRegister(self.ModuleHandle,0xFFFF)
# Set maximum number of time tags to record (if set to zero time tagging continues for StreamTime)
#self.dll.HRM_SetMemoryCountRegister(self.ModuleHandle, 1000) # currently does nothing
# Collect FIFO time tag data in resync mode
self.dll.HRM_StreamTimeTags2File(self.ModuleHandle,Filename+'.raw',StreamTime,ESR,0) # integration time in ms
# Try-Except to ignore the wrong calling convention error
try:
self.dll.HRM_ConvertRAWtoCSV(2,0,Filename+'.raw',Filename+'.csv')
except:
None
if __name__ == "__main__":
HRMTime = HRMTimeAPI()
HRMTime.TimeTags2Mem()
|
{
"alphanum_fraction": 0.7091716749,
"author": null,
"avg_line_length": 37.8910891089,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "0bde6dc9bdcc9cca75b7be730e0f40ffea2f3e4b",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "513c305a6adc6a1552015088660f44576ad6b010",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "DanWhiting/sensl-hrm-tdc",
"max_forks_repo_path": "sensl.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "513c305a6adc6a1552015088660f44576ad6b010",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "DanWhiting/sensl-hrm-tdc",
"max_issues_repo_path": "sensl.py",
"max_line_length": 116,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "513c305a6adc6a1552015088660f44576ad6b010",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "DanWhiting/sensl-hrm-tdc",
"max_stars_repo_path": "sensl.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1221,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3827
}
|
{-# OPTIONS --without-K --safe #-}
open import Relation.Binary.Core
module Definitions
{a ℓ} {A : Set a} -- The underlying set
(_≈_ : Rel A ℓ) -- The underlying equality
where
open import Algebra.Core
open import Data.Product
open import Algebra.Definitions
Alternativeˡ : Op₂ A → Set _
Alternativeˡ _∙_ = ∀ x y → ((x ∙ x) ∙ y) ≈ (x ∙ (y ∙ y))
Alternativeʳ : Op₂ A → Set _
Alternativeʳ _∙_ = ∀ x y → (x ∙ (y ∙ y)) ≈ ((x ∙ y) ∙ y)
Alternative : Op₂ A → Set _
Alternative _∙_ = (Alternativeˡ _∙_ ) × ( Alternativeʳ _∙_)
Flexible : Op₂ A → Set _
Flexible _∙_ = ∀ x y → ((x ∙ y) ∙ x) ≈ (x ∙ (y ∙ x))
Medial : Op₂ A → Set _
Medial _∙_ = ∀ x y u z → ((x ∙ y) ∙ (u ∙ z)) ≈ ((x ∙ u) ∙ (y ∙ z))
LeftSemimedial : Op₂ A → Set _
LeftSemimedial _∙_ = ∀ x y z → ((x ∙ x) ∙ (y ∙ z)) ≈ ((x ∙ y) ∙ (x ∙ z))
RightSemimedial : Op₂ A → Set _
RightSemimedial _∙_ = ∀ x y z → ((y ∙ z) ∙ (x ∙ x)) ≈ ((y ∙ x) ∙ (z ∙ x))
Semimedial : Op₂ A → Set _
Semimedial _∙_ = (LeftSemimedial _∙_) × (RightSemimedial _∙_)
LatinSquare₁ : Op₂ A → Set _
LatinSquare₁ _*_ = ∀ a b x → (a * x) ≈ b
LatinSquare₂ : Op₂ A → Set _
LatinSquare₂ _*_ = ∀ a b y → (y * a) ≈ b
LatinSquare : Op₂ A → Set _
LatinSquare _*_ = (LatinSquare₁ _*_) × (LatinSquare₂ _*_)
LeftBol : Op₂ A → Set _
LeftBol _∙_ = ∀ x y z → (x ∙ (y ∙ (x ∙ z))) ≈ ((x ∙ (y ∙ z)) ∙ z )
RightBol : Op₂ A → Set _
RightBol _∙_ = ∀ x y z → (((z ∙ x) ∙ y) ∙ x) ≈ (z ∙ ((x ∙ y) ∙ x))
MoufangIdentity₁ : Op₂ A → Set _
MoufangIdentity₁ _∙_ = ∀ x y z → (z ∙ (x ∙ (z ∙ y))) ≈ (((z ∙ x) ∙ z) ∙ y)
MoufangIdentity₂ : Op₂ A → Set _
MoufangIdentity₂ _∙_ = ∀ x y z → (x ∙ (z ∙ (y ∙ z))) ≈ (((x ∙ z) ∙ y) ∙ z)
MoufangIdentity₃ : Op₂ A → Set _
MoufangIdentity₃ _∙_ = ∀ x y z → ((z ∙ x) ∙ (y ∙ z)) ≈ ((z ∙ (x ∙ y)) ∙ z)
MoufangIdentity₄ : Op₂ A → Set _
MoufangIdentity₄ _∙_ = ∀ x y z → ((z ∙ x) ∙ (y ∙ z)) ≈ (z ∙ ((x ∙ y) ∙ z))
-- (x²y)x = x²(yx)
JordanIdentity: : Op₂ A → Set _
JordanIdentity: _∙_ = ∀ x y → (((x ∙ x) ∙ y) ∙ x) ≈ (((x ∙ x) ∙ y) ∙ x)
-- x = xyx
PesudoInverse₁ : Op₂ A → Set _
PesudoInverse₁ _∙_ = ∀ x y → ((x ∙ y) ∙ x) ≈ x
-- y = yxy
PseudoInverse₂ : Op₂ A → Set _
PseudoInverse₂ _∙_ = ∀ x y → ((y ∙ x) ∙ y) ≈ y
PseudoInverse : Op₂ A → Set _
PseudoInverse ∙ = (PesudoInverse₁ ∙) × (PseudoInverse₂ ∙)
-- JacobiIdentity is (x ∙ (y ∙ z)) + ((y ∙ (z ∙ x)) + (z ∙ (x ∙ y))) = 0
-- Using the antisymmetry property Jacobi identity may be rewritten as a modification of the associative property
JacobiIdentity : Op₂ A → Op₂ A → Set _
JacobiIdentity _∙_ _-_ = ∀ x y z → (x ∙ (y ∙ z)) ≈ ((y ∙ (z ∙ x)) - (z ∙ (x ∙ y)))
|
{
"alphanum_fraction": 0.5373831776,
"author": null,
"avg_line_length": 30.2117647059,
"converted": null,
"ext": "agda",
"file": null,
"hexsha": "28d229bb6f889abfe6628d37172beb8284304026",
"include": null,
"lang": "Agda",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "72030f78934877ad67bf4e36e74e43845cabbf55",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Akshobhya1234/Agda-Algebra",
"max_forks_repo_path": "src/Definitions.agda",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "72030f78934877ad67bf4e36e74e43845cabbf55",
"max_issues_repo_issues_event_max_datetime": "2022-01-31T18:19:52.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-01-02T20:50:34.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Akshobhya1234/Agda-Algebra",
"max_issues_repo_path": "src/Definitions.agda",
"max_line_length": 113,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "72030f78934877ad67bf4e36e74e43845cabbf55",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Akshobhya1234/Agda-Algebra",
"max_stars_repo_path": "src/Definitions.agda",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1175,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2568
}
|
import math
import re
from collections import namedtuple
from functools import lru_cache
import numpy as np
from m2cgen.ast import TOTAL_NUMBER_OF_EXPRESSIONS
CachedResult = namedtuple('CachedResult', ['var_name', 'expr_result'])
def get_file_content(path):
return path.read_text(encoding="utf-8")
@lru_cache(maxsize=1 << math.ceil(math.log(TOTAL_NUMBER_OF_EXPRESSIONS, 2)))
def _get_handler_name(expr_tpe):
return f"interpret_{_normalize_expr_name(expr_tpe.__name__)}"
def _normalize_expr_name(name):
return re.sub("(?!^)([A-Z]+)", r"_\1", name).lower()
def format_float(value):
return np.format_float_positional(value, unique=True, trim="0")
|
{
"alphanum_fraction": 0.7585692996,
"author": null,
"avg_line_length": 23.9642857143,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f6664a2662985423d036cb14338da501360ae7bf",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 201,
"max_forks_repo_forks_event_max_datetime": "2022-03-12T09:45:46.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-02-13T19:06:44.000Z",
"max_forks_repo_head_hexsha": "3157e0cbd5bd1ee7e044a992223c60224e2b7709",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Symmetry-International/m2cgen",
"max_forks_repo_path": "m2cgen/interpreters/utils.py",
"max_issues_count": 380,
"max_issues_repo_head_hexsha": "3157e0cbd5bd1ee7e044a992223c60224e2b7709",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T20:59:20.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-01-17T15:59:29.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Symmetry-International/m2cgen",
"max_issues_repo_path": "m2cgen/interpreters/utils.py",
"max_line_length": 76,
"max_stars_count": 2161,
"max_stars_repo_head_hexsha": "3157e0cbd5bd1ee7e044a992223c60224e2b7709",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Symmetry-International/m2cgen",
"max_stars_repo_path": "m2cgen/interpreters/utils.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-30T13:24:09.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-01-13T02:37:56.000Z",
"num_tokens": 169,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 671
}
|
using Revise
using DataFrames, CSV, JDF
using WeakRefStrings
a[!, :stringarr] = StringArray(rand(["a", "a", "b"], size(a,1)))
a[!, :cate] = categorical(a[!, :stringarr])
@time a = CSV.read("c:/data/feature_matrix_cleaned.csv");
@time savejdf("c:/data/feature_matrix_cleaned.csv.jdf", a)
a = nothing
@time a = loadjdf("c:/data/feature_matrix_cleaned.csv.jdf")
type_compress!(a, compress_float=true)
@time savejdf("c:/data/feature_matrix_cleaned.csv.compressed.jdf", a)
using BenchmarkTools
@benchmark a = loadjdf("c:/data/feature_matrix_cleaned.csv.jdf")
|
{
"alphanum_fraction": 0.7029360967,
"author": null,
"avg_line_length": 28.95,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "f72aef969da8366f4bc9cbe7fca0f04803bbce64",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 10,
"max_forks_repo_forks_event_max_datetime": "2022-01-27T12:47:09.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-09-19T11:08:44.000Z",
"max_forks_repo_head_hexsha": "195039f53ad708fd76976ac966ec66fddc7759a2",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "xiaodaigh/JDF",
"max_forks_repo_path": "test/misc/mortgage-risk-featuretools.jl",
"max_issues_count": 40,
"max_issues_repo_head_hexsha": "195039f53ad708fd76976ac966ec66fddc7759a2",
"max_issues_repo_issues_event_max_datetime": "2022-02-28T23:07:51.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-09-18T22:18:17.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "xiaodaigh/JDF",
"max_issues_repo_path": "test/misc/mortgage-risk-featuretools.jl",
"max_line_length": 70,
"max_stars_count": 66,
"max_stars_repo_head_hexsha": "195039f53ad708fd76976ac966ec66fddc7759a2",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "xiaodaigh/JDF",
"max_stars_repo_path": "test/misc/mortgage-risk-featuretools.jl",
"max_stars_repo_stars_event_max_datetime": "2022-03-24T18:41:15.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-09-13T17:29:09.000Z",
"num_tokens": 174,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 579
}
|
"""
Training/testing/inference script for COVID-Net CT models for COVID-19 detection in CT images.
"""
import math
import os
import sys
import time
import cv2
import json
import shutil
import numpy as np
from math import ceil
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from dataset import COVIDxCTDataset
from data_utils import auto_body_crop
from utils import parse_args
import pickle as pickle
import socket
from multiprocessing import Process, Queue, Value, Manager
from ctypes import c_char_p
import bingrad_common_updated
import util_gps
# Dict keys
TRAIN_OP_KEY = 'train_op'
TF_SUMMARY_KEY = 'tf_summaries'
LOSS_KEY = 'loss'
# Tensor names
IMAGE_INPUT_TENSOR = 'Placeholder:0'
LABEL_INPUT_TENSOR = 'Placeholder_1:0'
CLASS_PRED_TENSOR = 'ArgMax:0'
CLASS_PROB_TENSOR = 'softmax_tensor:0'
TRAINING_PH_TENSOR = 'is_training:0'
LOSS_TENSOR = 'add:0'
# Names for train checkpoints
CKPT_NAME = 'model.ckpt'
MODEL_NAME = 'COVID-Net_CT'
# Output directory for storing runs
OUTPUT_DIR = 'output'
# Class names ordered by class index
CLASS_NAMES = ('Normal', 'Pneumonia', 'COVID-19')
# TCP_IP = '127.0.0.1'
# port = 17000
# num_iters = 260
# s = 0
# MAX_WORKERS = 1
# global_var_vals = None
TCP_IP = '0.0.0.0'
port = 16000
global_var_vals = None
done_flag = None
ack_q = None
gradients_q = None
scaler_q = None
initial_flag = None
initial_var = None
initial_ack_q = None
def safe_recv(size, server_socket):
data = bytearray()
while 1:
try:
temp = server_socket.recv(size - len(data))
data.extend(temp)
recv_size = len(data)
if recv_size >= size:
break
except:
print("Error")
data = bytes(data)
return data
def handleWorker(port, gradients_q, scaler_q, done_flag, global_var_vals, global_var_scalers,ack_q, n, initial_flag, initial_var, initial_ack_q):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connecting to port : ", port)
s.bind((TCP_IP, port))
s.listen(1)
conn, addr = s.accept()
print('Connection address:', addr)
k = 0
while 1:
size_2 = safe_recv(17, conn)
size_2 = pickle.loads(size_2)
data_2 = safe_recv(size_2, conn)
lps_ternarized_gradients = pickle.loads(data_2)
scaler_size = safe_recv(15, conn)
scaler_size = pickle.loads(scaler_size)
scaler_data = safe_recv(scaler_size, conn)
scaler_data = pickle.loads(scaler_data)
gradients_q.put(lps_ternarized_gradients)
scaler_q.put(scaler_data)
while (done_flag.value == 0):
pass
size_grad = len(global_var_vals.value)
size_grad = pickle.dumps(size_grad, pickle.HIGHEST_PROTOCOL)
conn.sendall(size_grad)
conn.sendall(global_var_vals.value)
size_scaler = len(global_var_scalers.value)
size_scaler = pickle.dumps(size_scaler, pickle.HIGHEST_PROTOCOL)
conn.sendall(size_scaler)
conn.sendall(global_var_scalers.value)
ack_q.put(1)
k = k + 1
if (k == (n + 1)):
print("Working: Breaking from loop")
break
conn.close()
s.close()
def get_placeholder_for_ternarized_gradient():
placeholder_list = [tf.placeholder('float', shape=(27,)),
tf.placeholder('float', shape=(68,)),
tf.placeholder('float', shape=(28,)),
tf.placeholder('float', shape=(27,)),
tf.placeholder('float', shape=(15,)),
tf.placeholder('float', shape=(9,)),
tf.placeholder('float', shape=(7,)),
tf.placeholder('float', shape=(68,)),
tf.placeholder('float', shape=(883,)),
tf.placeholder('float', shape=(1977,)),
tf.placeholder('float', shape=(1027,)),
tf.placeholder('float', shape=(352,)),
tf.placeholder('float', shape=(9,)),
tf.placeholder('float', shape=(27,)),
tf.placeholder('float', shape=(8101,)),
tf.placeholder('float', shape=(460,)),
tf.placeholder('float', shape=(10,)),
tf.placeholder('float', shape=(2129,)),
tf.placeholder('float', shape=(15,)),
tf.placeholder('float', shape=(26,)),
tf.placeholder('float', shape=(15,)),
tf.placeholder('float', shape=(703,)),
tf.placeholder('float', shape=(26,)),
tf.placeholder('float', shape=(47,)),
tf.placeholder('float', shape=(7801,)),
tf.placeholder('float', shape=(4489,)),
tf.placeholder('float', shape=(27,)),
tf.placeholder('float', shape=(676,)),
tf.placeholder('float', shape=(104,)),
tf.placeholder('float', shape=(47,)),
tf.placeholder('float', shape=(46,)),
tf.placeholder('float', shape=(22,)),
tf.placeholder('float', shape=(34,)),
tf.placeholder('float', shape=(34,)),
tf.placeholder('float', shape=(12329,)),
tf.placeholder('float', shape=(217,)),
tf.placeholder('float', shape=(22,)),
tf.placeholder('float', shape=(10,)),
tf.placeholder('float', shape=(68,)),
tf.placeholder('float', shape=(12,)),
tf.placeholder('float', shape=(27,)),
tf.placeholder('float', shape=(68,)),
tf.placeholder('float', shape=(7,)),
tf.placeholder('float', shape=(13597,)),
tf.placeholder('float', shape=(17785,)),
tf.placeholder('float', shape=(769,)),
tf.placeholder('float', shape=(6969,)),
tf.placeholder('float', shape=(16,)),
tf.placeholder('float', shape=(310,)),
tf.placeholder('float', shape=(10,)),
tf.placeholder('float', shape=(2129,)),
tf.placeholder('float', shape=(39,)),
tf.placeholder('float', shape=(39,)),
tf.placeholder('float', shape=(16,)),
tf.placeholder('float', shape=(163,)),
tf.placeholder('float', shape=(9,)),
tf.placeholder('float', shape=(17,)),
tf.placeholder('float', shape=(1429,)),
tf.placeholder('float', shape=(23,)),
tf.placeholder('float', shape=(6701,)),
tf.placeholder('float', shape=(6701,)),
tf.placeholder('float', shape=(104,)),
tf.placeholder('float', shape=(40,)),
tf.placeholder('float', shape=(18,)),
tf.placeholder('float', shape=(39,)),
tf.placeholder('float', shape=(68,)),
tf.placeholder('float', shape=(18541,)),
tf.placeholder('float', shape=(1189,)),
tf.placeholder('float', shape=(541,)),
tf.placeholder('float', shape=(12,)),
tf.placeholder('float', shape=(6969,)),
tf.placeholder('float', shape=(7,)),
tf.placeholder('float', shape=(22,)),
tf.placeholder('float', shape=(68,)),
tf.placeholder('float', shape=(673,)),
tf.placeholder('float', shape=(8113,)),
tf.placeholder('float', shape=(14,)),
tf.placeholder('float', shape=(9,)),
tf.placeholder('float', shape=(7,)),
tf.placeholder('float', shape=(39,)),
tf.placeholder('float', shape=(40,)),
tf.placeholder('float', shape=(27,)),
tf.placeholder('float', shape=(757,)),
tf.placeholder('float', shape=(7237,)),
tf.placeholder('float', shape=(9,)),
tf.placeholder('float', shape=(577,)),
tf.placeholder('float', shape=(595,)),
tf.placeholder('float', shape=(2689,)),
tf.placeholder('float', shape=(1369,)),
tf.placeholder('float', shape=(1,)),
tf.placeholder('float', shape=(6241,)),
tf.placeholder('float', shape=(673,)),
tf.placeholder('float', shape=(27605,)),
tf.placeholder('float', shape=(1405,)),
tf.placeholder('float', shape=(6969,)),
tf.placeholder('float', shape=(8425,)),
tf.placeholder('float', shape=(27,)),
tf.placeholder('float', shape=(6969,)),
tf.placeholder('float', shape=(46,)),
tf.placeholder('float', shape=(14,)),
tf.placeholder('float', shape=(68,)),
tf.placeholder('float', shape=(673,)),
tf.placeholder('float', shape=(28,)),
tf.placeholder('float', shape=(2653,)),
tf.placeholder('float', shape=(104,)),
tf.placeholder('float', shape=(14,)),
tf.placeholder('float', shape=(14,)),
tf.placeholder('float', shape=(28,)),
tf.placeholder('float', shape=(39,)),
tf.placeholder('float', shape=(39,)),
tf.placeholder('float', shape=(17,)),
tf.placeholder('float', shape=(68,)),
tf.placeholder('float', shape=(68,)),
tf.placeholder('float', shape=(11537,)),
tf.placeholder('float', shape=(28,)),
tf.placeholder('float', shape=(2281,)),
tf.placeholder('float', shape=(68,)),
tf.placeholder('float', shape=(29,)),
tf.placeholder('float', shape=(39,)),
tf.placeholder('float', shape=(27,)),
tf.placeholder('float', shape=(18,)),
tf.placeholder('float', shape=(5897,)),
tf.placeholder('float', shape=(703,)),
tf.placeholder('float', shape=(27,)),
tf.placeholder('float', shape=(26,)),
tf.placeholder('float', shape=(3193,)),
tf.placeholder('float', shape=(23,)),
tf.placeholder('float', shape=(21,)),
tf.placeholder('float', shape=(406,)),
tf.placeholder('float', shape=(68,)),
tf.placeholder('float', shape=(7237,)),
tf.placeholder('float', shape=(433,)),
tf.placeholder('float', shape=(730,)),
tf.placeholder('float', shape=(925,)),
tf.placeholder('float', shape=(39,)),
tf.placeholder('float', shape=(68,)),
tf.placeholder('float', shape=(26,)),
tf.placeholder('float', shape=(21,)),
tf.placeholder('float', shape=(9,)),
tf.placeholder('float', shape=(22,)),
tf.placeholder('float', shape=(10185,)),
tf.placeholder('float', shape=(15,)),
tf.placeholder('float', shape=(10,)),
tf.placeholder('float', shape=(18,)),
tf.placeholder('float', shape=(3953,)),
tf.placeholder('float', shape=(757,)),
tf.placeholder('float', shape=(145,)),
tf.placeholder('float', shape=(2521,)),
tf.placeholder('float', shape=(11089,)),
tf.placeholder('float', shape=(18,)),
tf.placeholder('float', shape=(1243,)),
tf.placeholder('float', shape=(1977,)),
tf.placeholder('float', shape=(244,)),
tf.placeholder('float', shape=(505,)),
tf.placeholder('float', shape=(39,)),
tf.placeholder('float', shape=(39,)),
tf.placeholder('float', shape=(16069,)),
tf.placeholder('float', shape=(2433,)),
tf.placeholder('float', shape=(104,)),
tf.placeholder('float', shape=(5361,)),
tf.placeholder('float', shape=(24841,)),
tf.placeholder('float', shape=(27,)),
tf.placeholder('float', shape=(22,)),
tf.placeholder('float', shape=(29,)),
tf.placeholder('float', shape=(104,)),
tf.placeholder('float', shape=(4557,)),
tf.placeholder('float', shape=(15657,)),
tf.placeholder('float', shape=(22,)),
tf.placeholder('float', shape=(104,))]
return placeholder_list
def build_grad_shape():
return [tf.TensorShape([104]),
tf.TensorShape([268]),
tf.TensorShape([108]),
tf.TensorShape([104]),
tf.TensorShape([56]),
tf.TensorShape([32]),
tf.TensorShape([24]),
tf.TensorShape([268]),
tf.TensorShape([7, 7, 3, 24]),
tf.TensorShape([1, 1, 152, 52]),
tf.TensorShape([3, 3, 152, 3]),
tf.TensorShape([3, 3, 52, 3]),
tf.TensorShape([32]),
tf.TensorShape([104]),
tf.TensorShape([1, 1, 300, 108]),
tf.TensorShape([3, 3, 68, 3]),
tf.TensorShape([36]),
tf.TensorShape([1, 1, 56, 152]),
tf.TensorShape([56]),
tf.TensorShape([100]),
tf.TensorShape([56]),
tf.TensorShape([3, 3, 104, 3]),
tf.TensorShape([100]),
tf.TensorShape([184]),
tf.TensorShape([1, 1, 312, 100]),
tf.TensorShape([1, 1, 264, 68]),
tf.TensorShape([104]),
tf.TensorShape([3, 3, 100, 3]),
tf.TensorShape([412]),
tf.TensorShape([184]),
tf.TensorShape([180]),
tf.TensorShape([84]),
tf.TensorShape([132]),
tf.TensorShape([132]),
tf.TensorShape([1, 1, 268, 184]),
tf.TensorShape([3, 3, 32, 3]),
tf.TensorShape([84]),
tf.TensorShape([36]),
tf.TensorShape([268]),
tf.TensorShape([44]),
tf.TensorShape([104]),
tf.TensorShape([268]),
tf.TensorShape([24]),
tf.TensorShape([1, 1, 132, 412]),
tf.TensorShape([1, 1, 456, 156]),
tf.TensorShape([1, 1, 96, 32]),
tf.TensorShape([1, 1, 104, 268]),
tf.TensorShape([60]),
tf.TensorShape([412, 3]),
tf.TensorShape([36]),
tf.TensorShape([1, 1, 56, 152]),
tf.TensorShape([152]),
tf.TensorShape([152]),
tf.TensorShape([60]),
tf.TensorShape([3, 3, 24, 3]),
tf.TensorShape([32]),
tf.TensorShape([64]),
tf.TensorShape([1, 1, 84, 68]),
tf.TensorShape([88]),
tf.TensorShape([1, 1, 100, 268]),
tf.TensorShape([1, 1, 268, 100]),
tf.TensorShape([412]),
tf.TensorShape([156]),
tf.TensorShape([68]),
tf.TensorShape([152]),
tf.TensorShape([268]),
tf.TensorShape([1, 1, 180, 412]),
tf.TensorShape([1, 1, 108, 44]),
tf.TensorShape([3, 3, 80, 3]),
tf.TensorShape([44]),
tf.TensorShape([1, 1, 104, 268]),
tf.TensorShape([24]),
tf.TensorShape([84]),
tf.TensorShape([268]),
tf.TensorShape([1, 1, 32, 84]),
tf.TensorShape([1, 1, 312, 104]),
tf.TensorShape([52]),
tf.TensorShape([32]),
tf.TensorShape([24]),
tf.TensorShape([152]),
tf.TensorShape([156]),
tf.TensorShape([104]),
tf.TensorShape([3, 3, 112, 3]),
tf.TensorShape([1, 1, 108, 268]),
tf.TensorShape([32]),
tf.TensorShape([1, 1, 72, 32]),
tf.TensorShape([3, 3, 88, 3]),
tf.TensorShape([1, 1, 192, 56]),
tf.TensorShape([1, 1, 36, 152]),
tf.TensorShape([3]),
tf.TensorShape([1, 1, 240, 104]),
tf.TensorShape([1, 1, 32, 84]),
tf.TensorShape([1, 1, 268, 412]),
tf.TensorShape([1, 1, 156, 36]),
tf.TensorShape([1, 1, 268, 104]),
tf.TensorShape([1, 1, 324, 104]),
tf.TensorShape([104]),
tf.TensorShape([1, 1, 104, 268]),
tf.TensorShape([180]),
tf.TensorShape([52]),
tf.TensorShape([268]),
tf.TensorShape([1, 1, 84, 32]),
tf.TensorShape([108]),
tf.TensorShape([1, 1, 204, 52]),
tf.TensorShape([412]),
tf.TensorShape([52]),
tf.TensorShape([52]),
tf.TensorShape([108]),
tf.TensorShape([152]),
tf.TensorShape([152]),
tf.TensorShape([64]),
tf.TensorShape([268]),
tf.TensorShape([268]),
tf.TensorShape([1, 1, 412, 112]),
tf.TensorShape([108]),
tf.TensorShape([1, 1, 152, 60]),
tf.TensorShape([268]),
tf.TensorShape([112]),
tf.TensorShape([152]),
tf.TensorShape([104]),
tf.TensorShape([68]),
tf.TensorShape([1, 1, 268, 88]),
tf.TensorShape([3, 3, 104, 3]),
tf.TensorShape([104]),
tf.TensorShape([100]),
tf.TensorShape([1, 1, 84, 152]),
tf.TensorShape([88]),
tf.TensorShape([80]),
tf.TensorShape([3, 3, 60, 3]),
tf.TensorShape([268]),
tf.TensorShape([1, 1, 268, 108]),
tf.TensorShape([3, 3, 64, 3]),
tf.TensorShape([3, 3, 108, 3]),
tf.TensorShape([1, 1, 44, 84]),
tf.TensorShape([152]),
tf.TensorShape([268]),
tf.TensorShape([100]),
tf.TensorShape([80]),
tf.TensorShape([32]),
tf.TensorShape([84]),
tf.TensorShape([1, 1, 152, 268]),
tf.TensorShape([56]),
tf.TensorShape([36]),
tf.TensorShape([68]),
tf.TensorShape([1, 1, 152, 104]),
tf.TensorShape([1, 1, 84, 36]),
tf.TensorShape([1, 1, 24, 24]),
tf.TensorShape([1, 1, 180, 56]),
tf.TensorShape([1, 1, 336, 132]),
tf.TensorShape([68]),
tf.TensorShape([3, 3, 184, 3]),
tf.TensorShape([1, 1, 52, 152]),
tf.TensorShape([3, 3, 36, 3]),
tf.TensorShape([1, 1, 24, 84]),
tf.TensorShape([152]),
tf.TensorShape([152]),
tf.TensorShape([1, 1, 156, 412]),
tf.TensorShape([1, 1, 152, 64]),
tf.TensorShape([412]),
tf.TensorShape([1, 1, 268, 80]),
tf.TensorShape([1, 1, 552, 180]),
tf.TensorShape([104]),
tf.TensorShape([84]),
tf.TensorShape([112]),
tf.TensorShape([412]),
tf.TensorShape([1, 1, 68, 268]),
tf.TensorShape([1, 1, 412, 152]),
tf.TensorShape([84]),
tf.TensorShape([412])]
def get_placeholder_for_main_gradient():
return [tf.placeholder('float', shape=(104,)),
tf.placeholder('float', shape=(268,)),
tf.placeholder('float', shape=(108,)),
tf.placeholder('float', shape=(104,)),
tf.placeholder('float', shape=(56,)),
tf.placeholder('float', shape=(32,)),
tf.placeholder('float', shape=(24,)),
tf.placeholder('float', shape=(268,)),
tf.placeholder('float', shape=(7, 7, 3, 24)),
tf.placeholder('float', shape=(1, 1, 152, 52)),
tf.placeholder('float', shape=(3, 3, 152, 3)),
tf.placeholder('float', shape=(3, 3, 52, 3)),
tf.placeholder('float', shape=(32,)),
tf.placeholder('float', shape=(104,)),
tf.placeholder('float', shape=(1, 1, 300, 108)),
tf.placeholder('float', shape=(3, 3, 68, 3)),
tf.placeholder('float', shape=(36,)),
tf.placeholder('float', shape=(1, 1, 56, 152)),
tf.placeholder('float', shape=(56,)),
tf.placeholder('float', shape=(100,)),
tf.placeholder('float', shape=(56,)),
tf.placeholder('float', shape=(3, 3, 104, 3)),
tf.placeholder('float', shape=(100,)),
tf.placeholder('float', shape=(184,)),
tf.placeholder('float', shape=(1, 1, 312, 100)),
tf.placeholder('float', shape=(1, 1, 264, 68)),
tf.placeholder('float', shape=(104,)),
tf.placeholder('float', shape=(3, 3, 100, 3)),
tf.placeholder('float', shape=(412,)),
tf.placeholder('float', shape=(184,)),
tf.placeholder('float', shape=(180,)),
tf.placeholder('float', shape=(84,)),
tf.placeholder('float', shape=(132,)),
tf.placeholder('float', shape=(132,)),
tf.placeholder('float', shape=(1, 1, 268, 184)),
tf.placeholder('float', shape=(3, 3, 32, 3)),
tf.placeholder('float', shape=(84,)),
tf.placeholder('float', shape=(36,)),
tf.placeholder('float', shape=(268,)),
tf.placeholder('float', shape=(44,)),
tf.placeholder('float', shape=(104,)),
tf.placeholder('float', shape=(268,)),
tf.placeholder('float', shape=(24,)),
tf.placeholder('float', shape=(1, 1, 132, 412)),
tf.placeholder('float', shape=(1, 1, 456, 156)),
tf.placeholder('float', shape=(1, 1, 96, 32)),
tf.placeholder('float', shape=(1, 1, 104, 268)),
tf.placeholder('float', shape=(60,)),
tf.placeholder('float', shape=(412, 3)),
tf.placeholder('float', shape=(36,)),
tf.placeholder('float', shape=(1, 1, 56, 152)),
tf.placeholder('float', shape=(152,)),
tf.placeholder('float', shape=(152,)),
tf.placeholder('float', shape=(60,)),
tf.placeholder('float', shape=(3, 3, 24, 3)),
tf.placeholder('float', shape=(32,)),
tf.placeholder('float', shape=(64,)),
tf.placeholder('float', shape=(1, 1, 84, 68)),
tf.placeholder('float', shape=(88,)),
tf.placeholder('float', shape=(1, 1, 100, 268)),
tf.placeholder('float', shape=(1, 1, 268, 100)),
tf.placeholder('float', shape=(412,)),
tf.placeholder('float', shape=(156,)),
tf.placeholder('float', shape=(68,)),
tf.placeholder('float', shape=(152,)),
tf.placeholder('float', shape=(268,)),
tf.placeholder('float', shape=(1, 1, 180, 412)),
tf.placeholder('float', shape=(1, 1, 108, 44)),
tf.placeholder('float', shape=(3, 3, 80, 3)),
tf.placeholder('float', shape=(44,)),
tf.placeholder('float', shape=(1, 1, 104, 268)),
tf.placeholder('float', shape=(24,)),
tf.placeholder('float', shape=(84,)),
tf.placeholder('float', shape=(268,)),
tf.placeholder('float', shape=(1, 1, 32, 84)),
tf.placeholder('float', shape=(1, 1, 312, 104)),
tf.placeholder('float', shape=(52,)),
tf.placeholder('float', shape=(32,)),
tf.placeholder('float', shape=(24,)),
tf.placeholder('float', shape=(152,)),
tf.placeholder('float', shape=(156,)),
tf.placeholder('float', shape=(104,)),
tf.placeholder('float', shape=(3, 3, 112, 3)),
tf.placeholder('float', shape=(1, 1, 108, 268)),
tf.placeholder('float', shape=(32,)),
tf.placeholder('float', shape=(1, 1, 72, 32)),
tf.placeholder('float', shape=(3, 3, 88, 3)),
tf.placeholder('float', shape=(1, 1, 192, 56)),
tf.placeholder('float', shape=(1, 1, 36, 152)),
tf.placeholder('float', shape=(3,)),
tf.placeholder('float', shape=(1, 1, 240, 104)),
tf.placeholder('float', shape=(1, 1, 32, 84)),
tf.placeholder('float', shape=(1, 1, 268, 412)),
tf.placeholder('float', shape=(1, 1, 156, 36)),
tf.placeholder('float', shape=(1, 1, 268, 104)),
tf.placeholder('float', shape=(1, 1, 324, 104)),
tf.placeholder('float', shape=(104,)),
tf.placeholder('float', shape=(1, 1, 104, 268)),
tf.placeholder('float', shape=(180,)),
tf.placeholder('float', shape=(52,)),
tf.placeholder('float', shape=(268,)),
tf.placeholder('float', shape=(1, 1, 84, 32)),
tf.placeholder('float', shape=(108,)),
tf.placeholder('float', shape=(1, 1, 204, 52)),
tf.placeholder('float', shape=(412,)),
tf.placeholder('float', shape=(52,)),
tf.placeholder('float', shape=(52,)),
tf.placeholder('float', shape=(108,)),
tf.placeholder('float', shape=(152,)),
tf.placeholder('float', shape=(152,)),
tf.placeholder('float', shape=(64,)),
tf.placeholder('float', shape=(268,)),
tf.placeholder('float', shape=(268,)),
tf.placeholder('float', shape=(1, 1, 412, 112)),
tf.placeholder('float', shape=(108,)),
tf.placeholder('float', shape=(1, 1, 152, 60)),
tf.placeholder('float', shape=(268,)),
tf.placeholder('float', shape=(112,)),
tf.placeholder('float', shape=(152,)),
tf.placeholder('float', shape=(104,)),
tf.placeholder('float', shape=(68,)),
tf.placeholder('float', shape=(1, 1, 268, 88)),
tf.placeholder('float', shape=(3, 3, 104, 3)),
tf.placeholder('float', shape=(104,)),
tf.placeholder('float', shape=(100,)),
tf.placeholder('float', shape=(1, 1, 84, 152)),
tf.placeholder('float', shape=(88,)),
tf.placeholder('float', shape=(80,)),
tf.placeholder('float', shape=(3, 3, 60, 3)),
tf.placeholder('float', shape=(268,)),
tf.placeholder('float', shape=(1, 1, 268, 108)),
tf.placeholder('float', shape=(3, 3, 64, 3)),
tf.placeholder('float', shape=(3, 3, 108, 3)),
tf.placeholder('float', shape=(1, 1, 44, 84)),
tf.placeholder('float', shape=(152,)),
tf.placeholder('float', shape=(268,)),
tf.placeholder('float', shape=(100,)),
tf.placeholder('float', shape=(80,)),
tf.placeholder('float', shape=(32,)),
tf.placeholder('float', shape=(84,)),
tf.placeholder('float', shape=(1, 1, 152, 268)),
tf.placeholder('float', shape=(56,)),
tf.placeholder('float', shape=(36,)),
tf.placeholder('float', shape=(68,)),
tf.placeholder('float', shape=(1, 1, 152, 104)),
tf.placeholder('float', shape=(1, 1, 84, 36)),
tf.placeholder('float', shape=(1, 1, 24, 24)),
tf.placeholder('float', shape=(1, 1, 180, 56)),
tf.placeholder('float', shape=(1, 1, 336, 132)),
tf.placeholder('float', shape=(68,)),
tf.placeholder('float', shape=(3, 3, 184, 3)),
tf.placeholder('float', shape=(1, 1, 52, 152)),
tf.placeholder('float', shape=(3, 3, 36, 3)),
tf.placeholder('float', shape=(1, 1, 24, 84)),
tf.placeholder('float', shape=(152,)),
tf.placeholder('float', shape=(152,)),
tf.placeholder('float', shape=(1, 1, 156, 412)),
tf.placeholder('float', shape=(1, 1, 152, 64)),
tf.placeholder('float', shape=(412,)),
tf.placeholder('float', shape=(1, 1, 268, 80)),
tf.placeholder('float', shape=(1, 1, 552, 180)),
tf.placeholder('float', shape=(104,)),
tf.placeholder('float', shape=(84,)),
tf.placeholder('float', shape=(112,)),
tf.placeholder('float', shape=(412,)),
tf.placeholder('float', shape=(1, 1, 68, 268)),
tf.placeholder('float', shape=(1, 1, 412, 152)),
tf.placeholder('float', shape=(84,)),
tf.placeholder('float', shape=(412,))]
def create_session():
"""Helper function for session creation"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
return sess
def load_graph(meta_file):
"""Creates new graph and session"""
graph = tf.Graph()
with graph.as_default():
# Create session and load model
sess = create_session()
# Load meta file
print('Loading meta graph from ' + meta_file)
saver = tf.train.import_meta_graph(meta_file, clear_devices=True)
return graph, sess, saver
class COVIDNetCTRunner:
"""Primary training/testing/inference class"""
def __init__(self, meta_file):
self.meta_file = meta_file
# Load graph/checkpoint and add optimizer
self.graph, self.sess, self.saver = load_graph(self.meta_file)
with self.graph.as_default():
# self.train_op = self._add_optimizer(lr, momentum, fc_only)
# self.grads = self.get_gradients(lr, momentum)
# self.only_gradients = [g for g, _ in self.grads]
self.ternarized_placeholder = get_placeholder_for_ternarized_gradient()
self.scaler_placeholder = tf.placeholder('float', shape=(169,))
self.local_worker_gradients_2 = bingrad_common_updated.decode_from_ternary_gradients_2(
self.ternarized_placeholder, self.scaler_placeholder, build_grad_shape())
self.main_gradient_placeholder = get_placeholder_for_main_gradient()
self.scalers_2 = bingrad_common_updated.gradient_binarizing_scalers_2(self.main_gradient_placeholder, 0)
self.mean_scalers_2 = bingrad_common_updated.max_scalers_2([self.scalers_2])
self.ternarized_gradient_2 = bingrad_common_updated.stochastical_binarize_gradients_2(
self.main_gradient_placeholder,
self.mean_scalers_2[0])
self.ternarized_gradient_2, grad_shape_2 = bingrad_common_updated.encode_to_ternary_gradients_2(
self.ternarized_gradient_2,
get_shape=True)
# Initialize
self.sess.run(tf.global_variables_initializer())
def trainval(self, max_workers):
"""Run training with intermittent validation"""
with self.graph.as_default():
global global_var_vals
global done_flag
global gradients_q
global scaler_q
global ack_q
global global_var_scalers
while 1:
gradients_list = []
for i in range(max_workers):
tern_grads = gradients_q.get()
mean_scaler = scaler_q.get()
feed_dict_2 = {}
for j, tern_grad in enumerate(tern_grads):
feed_dict_2[self.ternarized_placeholder[j]] = tern_grads[j]
feed_dict_2[self.scaler_placeholder] = mean_scaler
recv_grads = self.sess.run(self.local_worker_gradients_2, feed_dict=feed_dict_2)
gradients_list.append(recv_grads)
print("Gradients received")
new_gradients_list = util_gps.get_avg_gradient(gradients_list, max_workers)
feed_dict_2 = {}
for j, grad in enumerate(new_gradients_list):
feed_dict_2[self.main_gradient_placeholder[j]] = grad
updated_quantized_gradient, updated_mean_scaler = self.sess.run(
[self.ternarized_gradient_2, self.mean_scalers_2], feed_dict=feed_dict_2)
updated_mean_scaler = pickle.dumps(updated_mean_scaler[0], pickle.HIGHEST_PROTOCOL)
global_var_scalers.value = updated_mean_scaler
updated_quantized_gradient = pickle.dumps(updated_quantized_gradient, pickle.HIGHEST_PROTOCOL)
global_var_vals.value = updated_quantized_gradient
done_flag.value = 1
for i in range(max_workers):
val = ack_q.get()
done_flag.value = 0
def main():
# Suppress most TF messages
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
mode, args = parse_args(sys.argv[1:])
global global_var_vals
global done_flag
global gradients_q
global scaler_q
global ack_q
global global_var_scalers
global initial_var
global initial_flag
global initial_ack_q
gradients_q = Queue()
scaler_q = Queue()
ack_q = Queue()
initial_ack_q = Queue()
manager = Manager()
global_var_vals = manager.Value(c_char_p, "")
global_var_scalers = manager.Value(c_char_p, "")
initial_var = manager.Value(c_char_p, "")
done_flag = manager.Value('i', 0)
initial_flag = manager.Value('i', 0)
process_list = []
for i in range(args.gs_max_workers):
process_port = port + i + 1
p = Process(target=handleWorker, args=(
process_port, gradients_q, scaler_q, done_flag, global_var_vals, global_var_scalers, ack_q, 100002,
initial_flag, initial_var, initial_ack_q))
p.start()
process_list.append(p)
# Create full paths
meta_file = os.path.join(args.model_dir, args.meta_name)
runner = COVIDNetCTRunner(
meta_file
)
# Run trainval
runner.trainval(
args.gs_max_workers
)
if __name__ == '__main__':
main()
|
{
"alphanum_fraction": 0.5053922545,
"author": null,
"avg_line_length": 44.825255102,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8436c63da97756ea6240395eb6227111708b4f8c",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b4b7bcfa5ace165520507f489dc74da7b695e2f0",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "sabuj7177/CovidProject",
"max_forks_repo_path": "covidnetct/GS.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b4b7bcfa5ace165520507f489dc74da7b695e2f0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "sabuj7177/CovidProject",
"max_issues_repo_path": "covidnetct/GS.py",
"max_line_length": 145,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b4b7bcfa5ace165520507f489dc74da7b695e2f0",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "sabuj7177/CovidProject",
"max_stars_repo_path": "covidnetct/GS.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 8172,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 35143
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import os.path
from optparse import OptionParser
import results
import pylab
import loader
import time
import torch
import numpy as np
from descriptors import raw_gray_descriptor, hardnet_descriptor, hog_descriptor
# parameters according to the paper --
class kcf_params:
padding = 1.0 # extra area surrounding the target
# spatial bandwidth (proportional to target)
output_sigma_factor = 1 / float(16)
sigma = 0.2 # gaussian kernel bandwidth
lambda_value = 1e-2 # regularization
# linear interpolation factor for adaptation
interpolation_factor = 0.075
def get_subwindow(image, box):
xs = pylab.floor(box[0]) \
+ pylab.arange(box[4], dtype=int) - pylab.floor(box[4] / 2)
ys = pylab.floor(box[1]) \
+ pylab.arange(box[5], dtype=int) - pylab.floor(box[5] / 2)
xs = xs.astype(int)
ys = ys.astype(int)
xs[xs < 0] = 0
xs[xs >= image.shape[1]] = image.shape[1] - 1
ys[ys < 0] = 0
ys[ys >= image.shape[0]] = image.shape[0] - 1
return image[pylab.ix_(ys, xs, range(image.shape[2]))]
def apply_cos_window(channels):
global cos_window
if cos_window is None:
cos_window = pylab.outer(pylab.hanning(channels.shape[1]), pylab.hanning(channels.shape[2]))
return pylab.multiply(channels[:] - 0.5, cos_window)
def dense_gauss_kernel(sigma, x, y=None):
xf = pylab.fft2(x) # x in Fourier domain
x_flat = x.flatten()
xx = pylab.dot(x_flat.transpose(), x_flat) # squared norm of x
if y is not None:
yf = pylab.fft2(y)
y_flat = y.flatten()
yy = pylab.dot(y_flat.transpose(), y_flat)
else:
yf = xf
yy = xx
xyf = pylab.multiply(xf, pylab.conj(yf))
xyf_ifft = pylab.ifft2(xyf)
row_shift, col_shift = pylab.floor(pylab.array(x.shape) / 2).astype(int)
xy_complex = pylab.roll(xyf_ifft, row_shift, axis=0)
xy_complex = pylab.roll(xy_complex, col_shift, axis=1)
xy = pylab.real(xy_complex)
scaling = -1 / (sigma ** 2)
xx_yy = xx + yy
xx_yy_2xy = xx_yy - 2 * xy
k = pylab.exp(scaling * pylab.maximum(0, xx_yy_2xy / x.size))
return k
def crop(channels):
halfExtend = 5
halfW = channels.shape[1] / 2
halfH = channels.shape[2] / 2
return channels
# return channels[:, halfW - halfExtend:halfW + halfExtend, halfH - halfExtend:halfH + halfExtend]
# return channels[:, 14:-14, 14:-14]
def distance_matrix_vector(anchor, positive):
"""Given batch of anchor descriptors and positive descriptors calculate distance matrix"""
d1_sq = torch.sum(anchor * anchor, dim=1).unsqueeze(-1)
d2_sq = torch.sum(positive * positive, dim=1).unsqueeze(-1)
eps = 1e-6
return torch.sqrt((d1_sq.repeat(1, positive.size(0)) + torch.t(d2_sq.repeat(1, anchor.size(0)))
- 2.0 * torch.bmm(anchor.unsqueeze(0), torch.t(positive).unsqueeze(0)).squeeze(0)) + eps)
def track(descriptor):
global options
desc_channel_count = descriptor.initialize(options.use_gpu)
roi = loader.track_bounding_box_from_first_frame()
roi = [roi[0] + roi[2] / 2, roi[1] + roi[3] / 2, roi[2], roi[3], roi[2] * (1 + kcf_params.padding),
roi[3] * (1 + kcf_params.padding)]
output_sigma = pylab.sqrt(pylab.prod([roi[3], roi[2]])) * kcf_params.output_sigma_factor
avg_count = 0
global cos_window
cos_window = None
template = [None for i in range(desc_channel_count)]
alpha_f = [None for i in range(desc_channel_count)]
response = [None for i in range(desc_channel_count)]
yf = None
track_time = 0
full_track_time = time.time()
while loader.has_next_frame():
im = loader.next_frame()
if (loader.frame_number() % 10) == 0:
print("Processing frame {}".format(loader.frame_number()))
start_time = time.time()
is_first_frame = loader.frame_number() == 0
cropped = get_subwindow(im, roi)
channels = descriptor.describe(cropped)
subwindow = apply_cos_window(channels)
subwindow = crop(subwindow)
dmv = None
if is_first_frame:
grid_y = pylab.arange(subwindow.shape[1]) - pylab.floor(subwindow.shape[1] / 2)
grid_x = pylab.arange(subwindow.shape[2]) - pylab.floor(subwindow.shape[2] / 2)
rs, cs = pylab.meshgrid(grid_x, grid_y)
y = pylab.exp(-0.5 / output_sigma ** 2 * (rs ** 2 + cs ** 2))
yf = pylab.fft2(y)
else:
for i in range(0, subwindow.shape[0]):
channel = subwindow[i, :, :]
# calculate response of the classifier at all locations
k = dense_gauss_kernel(kcf_params.sigma, channel, template[i])
kf = pylab.fft2(k)
alphaf_kf = pylab.multiply(alpha_f[i], kf)
response[i] = pylab.real(pylab.ifft2(alphaf_kf)) # Eq. 9
# argmax = response[i].argmax()
#
# if response[i].item(argmax) != 0:
# tmp = pylab.unravel_index(argmax, response[i].shape)
# if value < response[i][tmp[0],tmp[1]]:
# avg_x = tmp[1]
# avg_y = tmp[0]
# avg_count = 1
# value = response[i][tmp[0],tmp[1]]
# chosen_i = i
anchor = torch.tensor(channels[:, channels.shape[1] / 2, channels.shape[2] / 2]).unsqueeze(0)
points = torch.tensor(response).view(channels.shape[0], -1).t()
dmv = distance_matrix_vector(anchor, points).view(channels.shape[1], channels.shape[2])
argmax = np.array(dmv).argmax()
tmp = pylab.unravel_index(argmax, subwindow.shape[1:])
moved_by = [float(tmp[0]) - float(subwindow.shape[1]) / 2,
float(tmp[1]) - float(subwindow.shape[2]) / 2]
roi = descriptor.update_roi(roi, moved_by)
cropped = get_subwindow(im, roi)
channels = descriptor.describe(cropped)
subwindow = apply_cos_window(channels)
subwindow = crop(subwindow)
for i in range(0, subwindow.shape[0]):
channel = subwindow[i, :, :]
k = dense_gauss_kernel(kcf_params.sigma, channel)
new_alpha_f = pylab.divide(yf, (pylab.fft2(k) + kcf_params.lambda_value)) # Eq. 7
new_template = channel
if is_first_frame:
alpha_f[i] = new_alpha_f
template[i] = new_template
else:
f = kcf_params.interpolation_factor
alpha_f[i] = (1 - f) * alpha_f[i] + f * new_alpha_f
template[i] = (1 - f) * template[i] + f * new_template
track_time += time.time() - start_time
results.log_tracked(im, roi, False, template[0], dmv)
# end of "for each image in video"
results.log_meta("speed.frames_tracked", loader.frame_number())
results.log_meta("speed.track_no_io_time", str(track_time) + "s")
results.log_meta("speed.track_no_io_fps", loader.frame_number() / track_time)
results.log_meta("speed.track_no_init_time", str(time.time() - full_track_time) + "s")
results.show_precision()
return
def parse_arguments():
parser = OptionParser()
parser.description = "This program will track objects in image sequences"
parser.add_option("-i", "--input", dest="input_path",
metavar="PATH", type="string", default=None,
help="path to a folder with dataset")
parser.add_option("-o", "--output", dest="output_path",
metavar="PATH", type="string", default=None,
help="path to a folder to which output images should be stored. If none is supplied, default will be created")
parser.add_option("--note", dest="note",
type="string", default=None,
help="optional note that will get passed to output data")
parser.add_option("-g", "--use-gpu", dest="use_gpu",
action="store_true",
help="try to run on gpu, where applies")
parser.add_option("-d", "--descriptor", dest="descriptor",
action="store", type="string", default="raw",
help="Set descriptor to run with")
(options, args) = parser.parse_args()
if not options.input_path:
parser.error("'input' option is required to run this program")
if not os.path.exists(options.input_path):
parser.error("Could not find the input data set in %s" % options.video_path)
return options
def main():
global options
run_time = time.time()
options = parse_arguments()
loader.load(options.input_path, options.output_path)
if options.descriptor.lower() == "raw" or \
options.descriptor.lower() == "gray" or \
options.descriptor.lower() == "grey":
descriptor = raw_gray_descriptor
elif options.descriptor.lower() == "hardnet":
descriptor = hardnet_descriptor
elif options.descriptor.lower() == "hog":
descriptor = hog_descriptor
else:
raise Exception("Unknown descriptor '{}'".format(options.descriptor))
results.log_meta("descriptor", descriptor.get_name())
results.log_meta("dataset", options.input_path)
if options.note is not None:
results.log_meta("note", options.note)
if options.use_gpu:
results.log_meta("use_gpu", "true")
else:
results.log_meta("use_gpu", "false")
results.log_meta("tracker.padding", kcf_params.padding)
results.log_meta("tracker.interpolation_factor", kcf_params.interpolation_factor)
results.log_meta("tracker.lambda", kcf_params.lambda_value)
results.log_meta("tracker.sigma", kcf_params.sigma)
results.log_meta("tracker.output_sigma_factor", kcf_params.output_sigma_factor)
track(descriptor)
run_time -= time.time()
run_time *= -1
results.log_meta("speed.total_run_time", str(run_time) + "s")
print("Finished in {}s".format(run_time))
return
if __name__ == "__main__":
main()
|
{
"alphanum_fraction": 0.6112907968,
"author": null,
"avg_line_length": 34.586440678,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "00b70dc05c33afccffe11dc7089aa01f72b6b5b1",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2018-07-30T08:42:41.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-07-30T08:42:41.000Z",
"max_forks_repo_head_hexsha": "e6a2ba7718107c0c7e9f36db8c211e222d64c34f",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "RealJohnSmith/circulant_matrix_tracker",
"max_forks_repo_path": "circulant_matrix_tracker.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e6a2ba7718107c0c7e9f36db8c211e222d64c34f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "RealJohnSmith/circulant_matrix_tracker",
"max_issues_repo_path": "circulant_matrix_tracker.py",
"max_line_length": 132,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e6a2ba7718107c0c7e9f36db8c211e222d64c34f",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "RealJohnSmith/circulant_matrix_tracker",
"max_stars_repo_path": "circulant_matrix_tracker.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2570,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 10203
}
|
################################################
## STEP 4. Number of genes vs UMIs filter
#################################################
# This filter focuses on filter cells that are far from the behaviour of the relationship between the number of genes (it measures the number of
# genes in a cell that has at least one count) and the number of UMIs/molecules (the total number of counts in a cell).
############ NEED TO CHECK CONFIG SCHEMA
# OLD SCHEMA
# "numGenesVsNumUmis": {
# "filterSettings": {
# "regressionType": "gam",
# "smoothing": 13,
# "upperCutoff": 4.8,
# "lowerCutoff": 2.1,
# "stringency": 2.1,
# "binStep": 0.05
# },
# "enabled": true
# }
# PROPUSAL SCHEMA
# "numGenesVsNumUmis": {
# "filterSettings": {
# "regressionType": "gam",
# "regressionTypeSettings": {
# "gam": {
# "p.level": 0.001
# }
# }
# },
# "enabled": true
# "auto": true
# }
#' @description Filters seurat object based on classifier filter
#' @param config list containing the following information
#' - enable: true/false. Refering to apply or not the filter.
#' - auto: true/false. 'True' indicates that the filter setting need to be changed depending on some sensible value (it requires
#' to call generate_default_values_numGenesVsNumUmis)
#' - filterSettings: slot with thresholds
#' - regressionType: String. Regression to be used: {gam}
#' - regressionTypeSettings: list with the config settings for all the regression type options
#' - gam: for the gam option there is only one element:
#' - p.level: which refers to confidence level for deviation from the main trend
#' @export return a list with the filtered seurat object by numGenesVsNumUmis, the config and the plot values
numGenesVsNumUmis <- function(scdata, config){
# Check wheter the filter is set to true or false
if (!as.logical(toupper(config$enabled)))
return(scdata)
# For now, we can get direcly p.level, but when we add more methods need to be change
p.level <- config$filterSettings$regressionTypeSettings[[config$filterSettings$regressionType]]$p.level
# Check if it is required to compute sensible values. Sensible values are based on the funciton "gene.vs.molecule.cell.filter" from the pagoda2 package
if (as.logical(toupper(config$auto)))
p.level <- min(0.001, 1/ncol(scdata))
# Check wheter the filter is set to true or false
if (as.logical(toupper(config$enabled))){
# For now, we are going to suppor only gam as a linear model by robust estimation
if (config$filterSettings$regressionType=="gam"){
# We regress the molecules vs the genes. This information are stored in nCount_RNA and nFeature_RNA respectively
df <- data.frame(molecules = scdata$nCount_RNA, genes = scdata$nFeature_RNA)
# We take log10 following the plot from the mock-up
df <- log10(df)
# Rename the rows to be able to identify the valid cells
rownames(df) <- colnames(scdata)
df <- df[order(df$molecules, decreasing = FALSE), ]
m <- MASS::rlm(genes ~ molecules, data = df)
# Get the interval based on p.level paramter
suppressWarnings(pb <- data.frame(predict(m, interval = "prediction",
level = 1 - p.level, type = "response")))
# Define the outliers those that are below the lower confidence band and above the upper one.
outliers <- rownames(df)[df$genes > pb$upr | df$genes < pb$lwr]
# Keep the ones that are not oulier
scdata.filtered <- subset(scdata, cells = colnames(scdata)[!colnames(scdata)%in%outliers])
}
}else{
scdata.filtered <- scdata
}
# update config
config$filterSettings$regressionTypeSettings[[config$filterSettings$regressionType]]$p.level <- p.level
# the result object will have to conform to this format: {data, config, plotData : {plot1, plot2}}
result <- list(
data = scdata.filtered,
config = config,
plotData = list(
# Scatter plot which is composed of:
# x-axis: log_10_UMIs
# y-axis: log_10_genes
# bands that are conformed with the upper_cutoff and the lower_cutoff. We can print a band or dotted lines.
# Q: Should we return the point out the cells that are going to be excluded from the R side or this task can be done in
# the UI side.
featuresvsUMIsscatterplot = list(log10_UMIs = df$molecules, log10_genes = df$genes, upper_cutoff = pb$upr,
lower_cutoff = pb$lwr)
)
)
return(result)
}
|
{
"alphanum_fraction": 0.607617896,
"author": null,
"avg_line_length": 43.147826087,
"converted": null,
"ext": "r",
"file": null,
"hexsha": "5118f7b6c3a97ac7c37e5745cec5bc5e0dc8bfeb",
"include": null,
"lang": "R",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-11-10T23:17:30.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-11-10T23:17:30.000Z",
"max_forks_repo_head_hexsha": "cbac0d5aae262afa6afdd2ee74b8ef7c58e745f6",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "biomage-ltd/data-ingest",
"max_forks_repo_path": "src/QC_helpers/numGenesVsNumUmis.r",
"max_issues_count": 10,
"max_issues_repo_head_hexsha": "cbac0d5aae262afa6afdd2ee74b8ef7c58e745f6",
"max_issues_repo_issues_event_max_datetime": "2021-06-22T15:46:46.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-01-07T11:34:57.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "biomage-ltd/data-ingest",
"max_issues_repo_path": "src/QC_helpers/numGenesVsNumUmis.r",
"max_line_length": 155,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "cbac0d5aae262afa6afdd2ee74b8ef7c58e745f6",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "biomage-ltd/data-ingest",
"max_stars_repo_path": "src/QC_helpers/numGenesVsNumUmis.r",
"max_stars_repo_stars_event_max_datetime": "2021-02-10T20:50:49.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-10-23T17:41:10.000Z",
"num_tokens": 1181,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 4962
}
|
Load LFindLoad.
From lfind Require Import LFind.
From adtind Require Import goal11.
Require Import Extraction.
Extract Inductive nat => nat [ "(O)" "S" ].
Extract Inductive list => list [ "Nil" "Cons" ].
Definition lfind_example_1 := ( Cons (Succ (Succ Zero)) (Cons Zero Nil)).
Definition lfind_example_2 := ( Cons (Succ (Succ Zero)) Nil).
Definition lfind_example_3 := ( Cons (Succ Zero) (Cons Zero (Cons Zero (Cons (Succ (Succ (Succ (Succ (Succ Zero))))) Nil)))).
Definition lfind_example_4 := ( Cons Zero (Cons Zero (Cons (Succ Zero) (Cons Zero Nil)))).
Definition lfind_example_5 := ( Cons Zero Nil).
Definition lfind_example_6 := ( Cons Zero (Cons (Succ Zero) Nil)).
Definition lfind_example_7 := ( Cons (Succ Zero) (Cons Zero Nil)).
Definition lfind_example_8 := ( Cons (Succ (Succ Zero)) (Cons (Succ (Succ Zero)) Nil)).
Definition lfind_example_9 := ( Cons Zero (Cons (Succ (Succ Zero)) Nil)).
Definition lfind_example_10 := ( Cons Zero (Cons Zero Nil)).
Definition lfind_example_11 := ( Cons (Succ Zero) (Cons (Succ Zero) (Cons (Succ (Succ (Succ (Succ Zero)))) (Cons (Succ (Succ (Succ (Succ Zero)))) Nil)))).
Definition lfind_example_12 := ( Cons Zero (Cons (Succ (Succ (Succ (Succ (Succ Zero))))) Nil)).
Definition lfind_example_13 := ( Cons (Succ Zero) Nil).
Definition lfind_example_14 := ( Nil).
Extraction "/home/yousef/lemmafinder/benchmark/_lfind_clam_lf_goal11_theorem0_57_lem3/lfind_extraction.ml" lfind_example_1 lfind_example_2 lfind_example_3 lfind_example_4 lfind_example_5 lfind_example_6 lfind_example_7 lfind_example_8 lfind_example_9 lfind_example_10 lfind_example_11 lfind_example_12 lfind_example_13 lfind_example_14 .
|
{
"alphanum_fraction": null,
"author": "yalhessi",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/coq/yalhessi-lemmaranker/lemmaranker-53bc2ad63ad7faba0d7fc9af4e1e34216173574a/benchmark/clam/_lfind_clam_lf_goal11_theorem0_57_lem3/lfind_extraction.v",
"reason": null,
"repo": "lemmaranker",
"save_path": "github-repos/coq/yalhessi-lemmaranker",
"sha": "53bc2ad63ad7faba0d7fc9af4e1e34216173574a",
"size": null
}
|
import gym
import numpy as np
import cv2
import copy
class Env:
def __init__(self, vision=False):
self.vision = vision
self.W = 400
self.ACT_SCALE = 0.01
self.TL = 100
self.action_space = gym.spaces.Box(low=-1,high=1, shape=[3])
if not self.vision:
self.observation_space = gym.spaces.Box(low=-1,high=1, shape=[6])
else:
self.observation_space = gym.spaces.Box(low=0.0, high=1, shape=[84,84,3])
self.goal = np.array([0.5,0.5,0.5,0.0,0.0,0.0])
self.state = np.array([0.5,0.5,0.5])
self.reset()
def reset(self):
self.img = np.zeros((self.W, self.W, 3), np.uint8)
self.img[:] = (80,80,80)
# self.goal = np.random.uniform(0, 1, (3))
self.state = np.random.uniform(0, 1, (3))
self.prev_state = self.state
self.ts = 0
return self._get_state()
def step(self, action):
self.state = np.clip(self.state + np.clip(action, -1, 1)*self.ACT_SCALE, 0, 1)
self.ts += 1
reward = self._reward()
done = self._done()
return self._get_state(), reward, done, {}
def _get_state(self):
if not self.vision:
vel = self.state - self.prev_state
state = np.hstack([self.state, vel])
self.prev_state = self.state
else:
state = copy.deepcopy(self.get_image(self.state))
state = cv2.resize(state, (84,84))
return state
def _reward(self):
return -np.sum((self.goal[:3] - self.state)**2)
def _done(self):
outside = np.any(self.state > 1) or np.any(self.state < 0)
return self.ts >= self.TL or outside
def get_image(self, state_list, img = None, iteration=0):
if img is None:
img = np.zeros((self.W, self.W, 3), np.uint8)
img[:] = (150,150,150)
# Convert state to pixels
def to_pix(state):
state = copy.deepcopy(state) * self.W
return int(state[0]), int(state[1])
def depth(state):
return int(state[2]*5+1)
cv2.circle(img, to_pix(self.goal), depth(self.goal), (100,100,200), -1, lineType=cv2.LINE_AA)
for i,state in enumerate(state_list):
color = (120-i*1, iteration*10,100-iteration*10)
cv2.circle(img, to_pix(state), depth(state), color, -1, lineType=cv2.LINE_AA)
# self.img = cv2.addWeighted(self.img, 0.8, img, 0.2, 0)
return img
def render(self, state_list=None, trajectories=[], iteration=0, img = None):
if state_list is None:
# img = None
for state_list in trajectories:
img = self.get_image(state_list, img, iteration=iteration)
cv2.imshow("Render", img)
cv2.waitKey(10)
else:
img = self.get_image(state_list, iteration=iteration)
cv2.namedWindow("Render", cv2.WINDOW_AUTOSIZE)
cv2.imshow("Render", img)
cv2.waitKey(10)
return img
# env = Env()
# obs = env.reset()
# done = False
# while not done:
# act = env.action_space.sample()
# env.step(act)
# env.render()
#
|
{
"alphanum_fraction": 0.6182385576,
"author": null,
"avg_line_length": 27.4666666667,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "4f4f031ed661bfde6655d483d4a978788731da00",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7959b206f9b440d00dd9c0c4a18df5d9587f1fa1",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "rlee3359/probabilistic-model-based-rl",
"max_forks_repo_path": "env.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7959b206f9b440d00dd9c0c4a18df5d9587f1fa1",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "rlee3359/probabilistic-model-based-rl",
"max_issues_repo_path": "env.py",
"max_line_length": 99,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "7959b206f9b440d00dd9c0c4a18df5d9587f1fa1",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "rlee3359/probabilistic-model-based-rl",
"max_stars_repo_path": "env.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 883,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2884
}
|
import numpy as np
from pprint import pprint
def __build_type1_(self,
labelmap_file,
label_col,
color_col,
file_col_sep):
# List of class names in CS dataset (ordered)
global CS_label_names
# Dict of CS labels such { 1: road, 2: ... , 19: ...}
cont_CS_labels_id2name = {}
cont_CS_labels_name2id = {}
for e in enumerate(CS_label_names, 1):
cont_CS_labels_id2name[e[0]] = e[1]
cont_CS_labels_name2id[e[1]] = e[0]
# Output of CVAT system
# format rgb: cvat_class_name
_df_ = pd.read_csv(
labelmap_file,
sep=file_col_sep,
index_col=None
)
default_label = None
# color_to_inputLabel has tupe(rgb) : cityscape label
color_to_inputLabel = {}
for i, row in _df_.iterrows():
input_label = row[label_col]
_color = row[color_col].strip().split(',')
_color = (int(_color[0]), int(_color[1]), int(_color[2]))
if input_label in cvat_2_cityscapes.keys():
color_to_inputLabel[_color] = cvat_2_cityscapes[input_label]
else:
color_to_inputLabel[_color] = default_label
self.color_to_inputLabel = color_to_inputLabel
# -------------------
# Find the set of valid cityscape labels : which are present in cvat
# -------------------
valid_CS_labels = set(
color_to_inputLabel.values()
).intersection(set(CS_label_names))
valid_CS_labels = [_ for _ in CS_label_names if _ in valid_CS_labels]
# ----------------------------------------------------------
# The images should contain label ids present in this set
# ----------------------------------------------------------
self.valid_CS_label2ID = defaultdict(lambda: None)
for _ in valid_CS_labels:
self.valid_CS_label2ID[_] = csLabelData_name2id[_]
self.valid_CS_label2ID = sorted(self.valid_CS_label2ID.items(), key=itemgetter(1))
self.valid_CS_label2ID = defaultdict(
lambda: None,
{i[0]: i[1] for i in self.valid_CS_label2ID}
)
# ---------------------------------------------
# Find the ids of the CS labels ( e.g: road:7)
color_to_CSLabelID = OrderedDict({})
for k, v in color_to_inputLabel.items():
if v in valid_CS_labels:
color_to_CSLabelID[k] = csLabelData_name2id[v]
else:
color_to_CSLabelID[k] = 0
self.color_to_CSLabelID = color_to_CSLabelID
# Account for the "background" class
self.num_classes = len(self.valid_CS_label2ID) + 1
self.synID_2_csID = {}
self.csID_2_synID = {}
# -----------------------------------------------
# Calculate mapping from CS labels to 0 ... n
# 1: 7(road), 2: 8(sidewalk)
# -----------------------------------------------
i = 1
for item in valid_CS_labels:
_ = self.valid_CS_label2ID[item]
if _ is not None:
self.synID_2_csID[i] = _
i += 1
self.csID_2_synID = {v: k for k, v in self.synID_2_csID.items()}
self.csID_2_synID[0] = 0
self.color_to_synID = {
k: self.csID_2_synID[v] for k, v in self.color_to_CSLabelID.items()}
return
# ------------
import os
print(os.getcwd())
obj = anotationGen('./../../labelmap.txt', model_output_sparse=False)
file_name = '1010_SS_D_89f77e6ed4f3a8b4b9199e68b130cdd83a2159f5ece4041ca72aa49164b0c8bb5b7b5732754a9a916fa766ccc20b14d5ee04771fde8739002e6195380814be4a_42'
model_op_path = './../../Data/seg_results/{}.npy'.format(file_name)
pprint(obj.csID_2_synID)
prediction = obj.gen_SynLabel( data_path=model_op_path)
# print(prediction.shape)
print(obj.synID_to_desc)
print('generateSynLabel', prediction[300,550:575])
ss_mask_path = './../../Data/img/{}.png'.format(file_name)
img = cv2.cvtColor(cv2.imread(ss_mask_path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
import matplotlib.pyplot as plt
# plt.imshow(img)
# plt.show()
# print('>',img[300,550:575])
ground_truth = obj.process_SegMask(ss_mask_path)
# print(ground_truth[300,550:575])
valid_class_labels = list(obj.synID_to_desc.keys())
for _class_label_ in valid_class_labels:
mask = np.ones(ground_truth.shape, dtype=int)
mask = mask * int(_class_label_)
gt = np.equal(ground_truth, mask).astype(int)
pred = np.equal(prediction, mask).astype(int)
_intersection = np.logical_and(gt,pred)
_union = np.logical_or(gt,pred)
if np.sum(_union)>0:
IoU = np.sum(_intersection)/np.sum(_union)
else:
IoU = 0
print(_class_label_,IoU)
|
{
"alphanum_fraction": 0.6185544293,
"author": null,
"avg_line_length": 34.9076923077,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f19a6c76cf278bbb5daddecd2867ee2ce77ffe40",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "41d823bb6b4ab6d4403240cc077af2d09663128d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ddatta-DAC/evalSemanticSeg",
"max_forks_repo_path": "evalSemanticSeg/deprectaed_code.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "41d823bb6b4ab6d4403240cc077af2d09663128d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ddatta-DAC/evalSemanticSeg",
"max_issues_repo_path": "evalSemanticSeg/deprectaed_code.py",
"max_line_length": 155,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "41d823bb6b4ab6d4403240cc077af2d09663128d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ddatta-DAC/evalSemanticSeg",
"max_stars_repo_path": "evalSemanticSeg/deprectaed_code.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1237,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4538
}
|
import cv2
import os
import numpy as np
initialize = True
net = None
def get_output_layers(net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
def detect_common_objects(image, confidence=0.05, nms_thresh=0.05, model='yolov3', enable_gpu=False):
Height, Width = image.shape[:2]
scale = 0.00392
global initialize
global net
blob = cv2.dnn.blobFromImage(image, scale, (416,416), (0,0,0), True, crop=False)
if initialize:
weights_file_name = './yolov3-tiny.weights'
config_file_name = './yolov3-tiny.cfg'
net = cv2.dnn.readNet(weights_file_name, config_file_name)
initialize = False
net.setInput(blob)
outs = net.forward(get_output_layers(net))
dogs = 0
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
max_conf = scores[class_id]
if max_conf > confidence:
# TODO CHECK CLASS ID
dogs += 1
return dogs
|
{
"alphanum_fraction": 0.6907317073,
"author": null,
"avg_line_length": 23.8372093023,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "817f0fd6ee01b22878a6d91f1e651744460ad0da",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0e1082fe0bb58f7f8075a9e4e96304d75e29becb",
"max_forks_repo_licenses": [
"Unlicense"
],
"max_forks_repo_name": "cjwood0/RocketBot",
"max_forks_repo_path": "rocket_detection.py",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "0e1082fe0bb58f7f8075a9e4e96304d75e29becb",
"max_issues_repo_issues_event_max_datetime": "2022-03-12T00:22:47.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-06-08T21:16:25.000Z",
"max_issues_repo_licenses": [
"Unlicense"
],
"max_issues_repo_name": "cjwood0/RocketBot",
"max_issues_repo_path": "rocket_detection.py",
"max_line_length": 101,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "0e1082fe0bb58f7f8075a9e4e96304d75e29becb",
"max_stars_repo_licenses": [
"Unlicense"
],
"max_stars_repo_name": "cjwood0/RocketBot",
"max_stars_repo_path": "rocket_detection.py",
"max_stars_repo_stars_event_max_datetime": "2020-04-09T13:19:50.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-04-09T13:19:50.000Z",
"num_tokens": 287,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1025
}
|
[STATEMENT]
lemma jvm_one_step1[trans]:
"\<lbrakk> P \<turnstile> \<sigma> -jvm\<rightarrow>\<^sub>1 \<sigma>'; P \<turnstile> \<sigma>' -jvm\<rightarrow> \<sigma>'' \<rbrakk> \<Longrightarrow> P \<turnstile> \<sigma> -jvm\<rightarrow> \<sigma>''"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>P \<turnstile> \<sigma> -jvm\<rightarrow>\<^sub>1 \<sigma>'; P \<turnstile> \<sigma>' -jvm\<rightarrow> \<sigma>''\<rbrakk> \<Longrightarrow> P \<turnstile> \<sigma> -jvm\<rightarrow> \<sigma>''
[PROOF STEP]
(*<*)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>P \<turnstile> \<sigma> -jvm\<rightarrow>\<^sub>1 \<sigma>'; P \<turnstile> \<sigma>' -jvm\<rightarrow> \<sigma>''\<rbrakk> \<Longrightarrow> P \<turnstile> \<sigma> -jvm\<rightarrow> \<sigma>''
[PROOF STEP]
by (simp add: exec_all_def1)
|
{
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Jinja_JVM_JVMExec",
"hexsha": null,
"include": null,
"lang": null,
"length": 2,
"llama_tokens": 315,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
}
|
using RandomFunctions
using Test
@testset "collatz.jl" begin
@test collatz_steps(10) == [5, 16, 8, 4, 2, 1]
@test collatz_steps_02(10) == [5, 16, 8, 4, 2, 1]
@test max_stop_time(10) == (19,9)
@test max_stop_time_02(10^7) == (8400511, 685)
@test max_stop_time_03(10^7) == (8400511, 685)
end
@testset "edit_distance.jl" begin
@test edit_distance("ATCTCGT", "ACTCCTC")[1] == 3.0
end
@testset "sieve.jl" begin
@test sieve_01(7) == [2, 3, 5, 7]
@test sieve_02(7) == [2, 3, 5, 7]
@test sieve_03(7) == [2, 3, 5, 7]
end
|
{
"alphanum_fraction": 0.6054545455,
"author": null,
"avg_line_length": 27.5,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "a4ac142092d34730c43cfcbff7a0b1e539f381e6",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d7d611c691ab8e1c355718736564e2297098f23f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "camilogarciabotero/RandomFunctions.jl",
"max_forks_repo_path": "test/runtests.jl",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "d7d611c691ab8e1c355718736564e2297098f23f",
"max_issues_repo_issues_event_max_datetime": "2021-05-22T00:53:33.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-05-22T00:53:33.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "camilogarciabotero/RandomFunctions.jl",
"max_issues_repo_path": "test/runtests.jl",
"max_line_length": 55,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d7d611c691ab8e1c355718736564e2297098f23f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "camilogarciabotero/RandomFunctions.jl",
"max_stars_repo_path": "test/runtests.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 250,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 550
}
|
using BenchmarkTools
using Distributed
addprocs()
@everywhere using TrajectoryOptimization
@everywhere using SharedArrays
# Set up problem
model, obj0 = Dynamics.cartpole_analytical
n,m = model.n, model.m
obj = copy(obj0)
obj.x0 = [0;0;0;0.]
obj.xf = [0.5;pi;0;0]
obj.tf = 2.
u_bnd = 50
x_bnd = [0.6,Inf,Inf,Inf]
obj_con = ConstrainedObjective(obj,u_min=-u_bnd, u_max=u_bnd, x_min=-x_bnd, x_max=x_bnd)
obj_con = ConstrainedObjective(obj,u_min=-u_bnd, u_max=u_bnd)
obj_con = to_static(obj_con)
obj = to_static(obj)
dt = 0.1
# Initialize trajectory
solver = Solver(model,obj,dt=dt,integration=:rk3_foh)
n,m,N = get_sizes(solver)
U0 = ones(1,N)*1
X0 = line_trajectory(obj.x0, obj.xf, N)
solver = Solver(model,obj,dt=dt,integration=:rk3)
res = UnconstrainedVectorResults(n,m,N)
reS = UnconstrainedStaticResults(n,m,N)
rollout!(res,solver)
rollout!(reS,solver)
J_prev2 = cost(solver, res)
J_prev3 = cost(solver, reS)
@btime TrajectoryOptimization.update_jacobians!(res, solver)
@btime TrajectoryOptimization.update_jacobians!(reS, solver)
X,U = res.X,res.U
fx,fu = res.fx, res.fu
fxu = [zeros(n,n+m) for i = 1:N-1]
fx_view = [view(F,1:n,1:n) for F in fxu]
fu_view = [view(F,1:n,n.+(1:m)) for F in fxu]
function calc_dyn_jacob(fx,fu,X,U)
N = length(X)
for k = 1:N-1
res.fx[k], res.fu[k] = solver.Fd(res.X[k], res.U[k])
end
return nothing
end
function calc_dyn_jacob_map(fx,fu,X,U)
N = solver.N
jacob = map((x,u)->solver.Fd(x,u),res.X,res.U)
for k = 1:N-1
res.fx[k],res.fu[k] = jacob[k]
end
end
function calc_dyn_jacob_map!(fxu,X,U)
N = solver.N
map!((x,u)->Fd(x,u),fxu,res.X,res.U)
fx_view = [view(F,1:n,1:n) for F in fxu]
fu_view = [view(F,1:n,n.+(1:m)) for F in fxu]
return fx_view,fu_view
end
Xrand = rand(n,N)
Urand = rand(m,N)
copyto!(res.X,Xrand)
copyto!(res.U,Urand)
k = rand(1:N-1)
calc_dyn_jacob(fx,fu,X,U)
A,B = fx[k],fu[k]
calc_dyn_jacob_map(fx,fu,X,U)
fx[k] == A
fu[k] == B
fx_view,fu_view = calc_dyn_jacob_map!(fxu,X,U)
fx_view[k] == fx[k]
fu_view[k] == fu[k]
Fd(X[k],U[k]) == [A B]
@btime calc_dyn_jacob(fx,fu,res.X,res.U)
@btime calc_dyn_jacob_map(fx,fu,res.X,res.U)
@btime calc_dyn_jacob_map!(fxu,res.X,res.U)
###################################
# PARALLEL STUFF #
###################################
N = 1000
@everywhere Fd,Fc = TrajectoryOptimization.generate_dynamics_jacobians($model,$dt,TrajectoryOptimization.rk3,:zoh)
X = SharedArray{Float64,2}((n,N))
U = SharedArray{Float64,2}((m,N))
Fxu = SharedArray{Float64,3}((n,n+m,N))
X .= Xrand
U .= Urand
# Split the timesteps betwen processors
@everywhere function split_timesteps(N::Int,idx::Int=myid()-1)
if idx == 0
return 1:0
end
nchunks = length(workers())
split = [round(Int,s) for s in range(0,stop=N,length=nchunks+1)]
return split[idx]+1:split[idx+1]
end
@everywhere function jacobian_chunk!(Fxu::SharedArray,X::SharedArray,U::SharedArray,inds::UnitRange)
n,N = size(X)
m = size(U,1)
for k in inds
Fxu[:,:,k] = Fd(X[:,k],U[:,k])
end
end
@everywhere function calc_jacobian_chunks(Fxu::SharedArray,X::SharedArray,U::SharedArray)
N = size(X,2)
@sync begin
for w in workers()
@async remotecall_wait(jacobian_chunk!,w,Fxu,X,U,split_timesteps(N,w-1))
end
end
end
inds = split_timesteps(N,1)
jacobian_chunk!(Fxu,X,U,inds)
@time calc_jacobian_chunks(Fxu,X,U)
Fxu[1:n,1:n,k] ≈ fx[k]
TrajectoryOptimization.to_array(fx) ≈ Fxu[1:n,1:n,1:end-1]
@btime calc_jacobian_chunks(Fxu,X,U)
function calculate_jacobians_parallel!(res::ConstrainedIterResults, solver::Solver)::Nothing #TODO change to inplace '!' notation throughout the code
N = solver.N
for k = 1:N-1
if solver.control_integration == :foh
res.fx[k], res.fu[k], res.fv[k] = solver.Fd(res.X[k], res.U[k], res.U[k+1])
res.Ac[k], res.Bc[k] = solver.Fc(res.X[k], res.U[k])
else
res.fx[k], res.fu[k] = solver.Fd(res.X[k], res.U[k])
end
solver.c_jacobian(res.Cx[k], res.Cu[k], res.X[k],res.U[k])
end
if solver.control_integration == :foh
res.Ac[N], res.Bc[N] = solver.Fc(res.X[N], res.U[N])
solver.c_jacobian(res.Cx[N], res.Cu[N], res.X[N],res.U[N])
end
solver.c_jacobian(res.Cx_N, res.X[N])
return nothing
end
|
{
"alphanum_fraction": 0.6434162063,
"author": null,
"avg_line_length": 25.5529411765,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "ff2b55a263c660b622e9af9ec9b7de255dc9ecc5",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c036b790555553b3477c7bebeaea118e17e43142",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "GathererA/TrajectoryOptimization.jl",
"max_forks_repo_path": "dev_notebooks/parallel_jacobians.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c036b790555553b3477c7bebeaea118e17e43142",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "GathererA/TrajectoryOptimization.jl",
"max_issues_repo_path": "dev_notebooks/parallel_jacobians.jl",
"max_line_length": 149,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "c036b790555553b3477c7bebeaea118e17e43142",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "GathererA/TrajectoryOptimization.jl",
"max_stars_repo_path": "dev_notebooks/parallel_jacobians.jl",
"max_stars_repo_stars_event_max_datetime": "2020-05-01T16:16:08.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-05-01T16:16:08.000Z",
"num_tokens": 1494,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 4344
}
|
import numpy as np
import os
from tools import *
def run_stochastic(dataset, name, R, c_0=3.0, L_0=1.0, x_axe_threshold=1000,
max_time=100, timestamps=[20, 20, 20, 20], show_legend=True,
inner_eps=1e-7, resid_eps=1e-6, M=1):
print('STOCHASTIC METHODS: \t %s, \t file: %s, \t R = %f.' %
(name, dataset, R))
output_name = 'output/%s_%d' % (name, R)
cmd = ('./main --dataset=%s --experiment=stochastic ' +
'--logging_time_period=1.0 --n_iters=100000000 ' +
'--output_name=%s --R=%d --max_time=%d --c_0=%g --L_0=%g ' +
'--inner_eps=%g ') % \
(dataset, output_name, R, max_time, c_0, L_0, inner_eps)
print(('COMMAND: "%s"' % cmd), flush=True)
os.system(cmd)
print('PLOT RESULTS ... ', end=' ', flush=True)
(iter_sgd, secs_sgd, func_sgd, data_sgd) = \
read_dump(output_name + '_SGD.csv')
(iter_svrg, secs_svrg, func_svrg, data_svrg) = \
read_dump(output_name + '_SVRG.csv')
(iter_snewton, secs_snewton, func_snewton, data_snewton) = \
read_dump(output_name + '_Stoch_Contr_Newton.csv')
(iter_svrnewton, secs_svrnewton, func_svrnewton, data_svrnewton) = \
read_dump(output_name + '_Stoch_VR_Contr_Newton.csv')
plot_results(
[iter_sgd, iter_svrg, iter_snewton, iter_svrnewton],
[secs_sgd, secs_svrg, secs_snewton, secs_svrnewton],
[func_sgd, func_svrg, func_snewton, func_svrnewton],
[data_sgd, data_svrg, data_snewton, data_svrnewton],
['SGD', 'SVRG', 'SNewton', 'SVRNewton'],
['blue', 'tab:blue', 'red', 'tab:orange'],
[':', '-', '-.', '-', ':', ':'],
[3, 1, 3, 4],
[0.8, 0.8, 1, 1],
('%s, D = %d' % (name, 2 * R)),
x_axe_threshold=x_axe_threshold,
timestamps=timestamps,
filename=output_name + '.pdf',
show_legend=show_legend,
resid_eps=resid_eps,
use_data_accesses=True,
M=M
)
print('DONE.')
print(('=' * 80), flush=True)
run_stochastic('data/covtype_bin_sc', 'covtype', R=10, c_0=3.0, L_0=100.0,
x_axe_threshold=200, max_time=30,
timestamps=[[20], [20], [20], [20]],
show_legend=True, M=581012)
run_stochastic('data/covtype_bin_sc', 'covtype', R=50, c_0=3.0, L_0=100.0,
x_axe_threshold=200, max_time=30,
timestamps=[[20], [20], [20], [20]],
show_legend=False, M=581012)
run_stochastic('data/covtype_bin_sc', 'covtype', R=250, c_0=3.0, L_0=100.0,
x_axe_threshold=200, max_time=30,
timestamps=[[20], [20], [20], [20]],
show_legend=False, M=581012)
# Extra Experiments:
"""
run_stochastic('data/YearPredictionMSD', 'YearPredictionMSD', R=10,
c_0=0.01, L_0=10000000.0,
x_axe_threshold=200, max_time=100,
timestamps=[[20], [20], [20], [20]],
show_legend=True, M=463715)
run_stochastic('data/YearPredictionMSD', 'YearPredictionMSD', R=50,
c_0=0.001, L_0=100000000.0,
x_axe_threshold=200, max_time=100,
timestamps=[[20], [20], [20], [20]],
show_legend=True, M=463715)
run_stochastic('data/YearPredictionMSD', 'YearPredictionMSD', R=250,
c_0=0.001, L_0=100000000.0,
x_axe_threshold=200, max_time=100,
timestamps=[[20], [20], [20], [20]],
show_legend=True, M=463715)
run_stochastic('data/mnist', 'mnist', R=10, c_0=0.01, L_0=1000000.0,
x_axe_threshold=600, max_time=200,
timestamps=[[50], [50], [50], [70]],
show_legend=True, M=60000, inner_eps=1e-5)
run_stochastic('data/mnist', 'mnist', R=50, c_0=0.01, L_0=1000000.0,
x_axe_threshold=600, max_time=200,
timestamps=[[30], [50], [50], [150]],
show_legend=False, M=60000, inner_eps=1e-5)
run_stochastic('data/mnist', 'mnist', R=250, c_0=0.01, L_0=1000000.0,
x_axe_threshold=600, max_time=200,
timestamps=[[50], [50], [50], [50]],
show_legend=False, M=60000, inner_eps=1e-5)
run_stochastic('data/higgs2m.txt', 'HIGGS2m', R=10,
c_0=3.0, L_0=10000.0,
x_axe_threshold=200, max_time=100,
timestamps=[[50], [50], [50], [50]],
show_legend=True, M=2000000)
run_stochastic('data/higgs2m.txt', 'HIGGS2m', R=50,
c_0=0.1, L_0=10000.0,
x_axe_threshold=200, max_time=100,
timestamps=[[50], [50], [40], [50]],
show_legend=False, M=2000000)
run_stochastic('data/higgs2m.txt', 'HIGGS2m', R=250,
c_0=0.1, L_0=10000.0,
x_axe_threshold=200, max_time=100,
timestamps=[[50], [50], [50], [50]],
show_legend=False, M=2000000)
"""
|
{
"alphanum_fraction": 0.5556453242,
"author": null,
"avg_line_length": 38.9842519685,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "e1e464c54ecdc7e2560e8094816912a2498eebc5",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "74feca323f3b3dd9dc76efa97b54ace6258e1792",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "doikov/contracting-newton",
"max_forks_repo_path": "demo_stochastic.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "74feca323f3b3dd9dc76efa97b54ace6258e1792",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "doikov/contracting-newton",
"max_issues_repo_path": "demo_stochastic.py",
"max_line_length": 79,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "74feca323f3b3dd9dc76efa97b54ace6258e1792",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "doikov/contracting-newton",
"max_stars_repo_path": "demo_stochastic.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-25T12:00:05.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-10-21T17:10:06.000Z",
"num_tokens": 1621,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4951
}
|
#!/usr/bin/env python
import os
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
from pyclowder.utils import CheckMessage
from pyclowder.files import upload_to_dataset
from pyclowder.datasets import download_metadata, upload_metadata, submit_extraction
from terrautils.extractors import TerrarefExtractor, is_latest_file, load_json_file, \
build_metadata, build_dataset_hierarchy
from terrautils.metadata import get_extractor_metadata, get_terraref_metadata
from terrautils.formats import create_geotiff, create_image
from terrautils.spatial import geojson_to_tuples
class PSIIBin2Png(TerrarefExtractor):
def __init__(self):
super(PSIIBin2Png, self).__init__()
# parse command line and load default logging configuration
self.setup(sensor='ps2_png')
def get_image_dimensions(self, metadata):
"""Returns (image width, image height)"""
if 'sensor_fixed_metadata' in metadata:
dims = metadata['sensor_fixed_metadata']['camera_resolution']
return dims.split("x")
else:
# Default based on original fixed metadata
return (1936, 1216)
def load_png(self, file_path, height, width):
"""Load PNG image into a numpy array"""
im = Image.open(file_path)
return np.array(im).astype('uint8')
def analyze(self, img_width, img_height, frames, hist_path, coloredImg_path):
fdark = self.load_png(frames[0], img_height, img_width)
fmin = self.load_png(frames[1], img_height, img_width)
# Calculate the maximum fluorescence for each frame
fave = []
fave.append(np.max(fdark))
# Calculate the maximum value for frames 2 through 100. Bin file 101 is an XML file that lists the frame times
for i in range(2, 101):
img = self.load_png(frames[i], img_height, img_width)
fave.append(np.max(img))
# Assign the first image with the most fluorescence as F-max
fmax = self.load_png(frames[np.where(fave == np.max(fave))[0][0]], img_height, img_width)
# Calculate F-variable (F-max - F-min)
fv = np.subtract(fmax, fmin)
# Calculate Fv/Fm (F-variable / F-max)
try:
fvfm = np.divide(fv.astype('float'), fmax.astype('float'))
except:
fvfm = 0
# Fv/Fm will generate invalid values, such as division by zero
# Convert invalid values to zero. Valid values will be between 0 and 1
fvfm[np.where(np.isnan(fvfm))] = 0
fvfm[np.where(np.isinf(fvfm))] = 0
fvfm[np.where(fvfm > 1.0)] = 0
# Plot Fv/Fm (pseudocolored)
plt.imshow(fvfm, cmap="viridis")
plt.savefig(coloredImg_path)
plt.show()
plt.close()
# Calculate histogram of Fv/Fm values from the whole image
hist, bins = np.histogram(fvfm, bins=20)
# Plot Fv/Fm histogram
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.xlabel("Fv/Fm")
plt.ylabel("Pixels")
plt.show()
plt.savefig(hist_path)
plt.close()
def check_message(self, connector, host, secret_key, resource, parameters):
# Check for 0000-0101 bin files before beginning processing
if len(resource['files']) < 102:
self.log_skip(resource, "less than 102 files found")
return CheckMessage.ignore
if not is_latest_file(resource):
self.log_skip(resource, "not latest file")
return CheckMessage.ignore
timestamp = resource['dataset_info']['name'].split(" - ")[1]
hist_path = self.sensors.get_sensor_path(timestamp, opts=['combined_hist'])
coloredImg_path = self.sensors.get_sensor_path(timestamp, opts=['combined_pseudocolored'])
# Count number of bin files in dataset, as well as number of existing outputs
ind_add = 0
ind_output = 0
for ind in range(0, 102):
format_ind = "{0:0>4}".format(ind) # e.g. 1 becomes 0001
for f in resource['files']:
if f['filename'].endswith(format_ind+'.bin'):
ind_add += 1
out_png = self.sensors.get_sensor_path(timestamp, opts=[format_ind])
if os.path.exists(out_png) and not self.overwrite:
ind_output += 1
break
# Do the outputs already exist?
if ind_output == 102 and os.path.exists(hist_path) and os.path.exists(coloredImg_path):
self.log_skip(resource, "outputs already exist")
return CheckMessage.ignore
# Do we have too few input BIN files?
if ind_add < 102:
self.log_skip(resource, "less than 102 .bin files found")
return CheckMessage.ignore
# Check metadata to verify we have what we need
md = download_metadata(connector, host, secret_key, resource['id'])
if get_extractor_metadata(md, self.extractor_info['name']) and not self.overwrite:
self.log_skip(resource, "metadata indicates it was already processed")
return CheckMessage.ignore
if get_terraref_metadata(md):
return CheckMessage.download
else:
self.log_error(resource, "no terraref metadata found; sending to cleaner")
submit_extraction(connector, host, secret_key, resource['id'], "terra.metadata.cleaner")
return CheckMessage.ignore
def process_message(self, connector, host, secret_key, resource, parameters):
self.start_message(resource)
# Get bin files and metadata
metadata = None
for f in resource['local_paths']:
# First check metadata attached to dataset in Clowder for item of interest
if f.endswith('_dataset_metadata.json'):
all_dsmd = load_json_file(f)
metadata = get_terraref_metadata(all_dsmd, "ps2Top")
# Otherwise, check if metadata was uploaded as a .json file
elif f.endswith('_metadata.json') and f.find('/_metadata.json') == -1 and metadata is None:
metadata = load_json_file(f)
frames = {}
for ind in range(0, 101):
format_ind = "{0:0>4}".format(ind) # e.g. 1 becomes 0001
for f in resource['local_paths']:
if f.endswith(format_ind+'.bin'):
frames[ind] = f
if None in [metadata] or len(frames) < 101:
self.log_error(resource, 'could not find all of frames/metadata')
return
# Determine output directory
timestamp = resource['dataset_info']['name'].split(" - ")[1]
hist_path = self.sensors.create_sensor_path(timestamp, opts=['combined_hist'])
coloredImg_path = self.sensors.create_sensor_path(timestamp, opts=['combined_pseudocolored'])
uploaded_file_ids = []
target_dsid = build_dataset_hierarchy(host, secret_key, self.clowder_user, self.clowder_pass, self.clowderspace,
self.sensors.get_display_name(),
timestamp[:4], timestamp[5:7], timestamp[8:10],
leaf_ds_name=self.sensors.get_display_name()+' - '+timestamp)
(img_width, img_height) = self.get_image_dimensions(metadata)
gps_bounds = geojson_to_tuples(metadata['spatial_metadata']['ps2Top']['bounding_box'])
self.log_info(resource, "image dimensions (w, h): (%s, %s)" % (img_width, img_height))
png_frames = {}
for ind in range(0, 101):
format_ind = "{0:0>4}".format(ind) # e.g. 1 becomes 0001
png_path = self.sensors.create_sensor_path(timestamp, opts=[format_ind])
tif_path = png_path.replace(".png", ".tif")
png_frames[ind] = png_path
if not os.path.exists(png_path) or self.overwrite:
self.log_info(resource, "generating and uploading %s" % png_path)
pixels = np.fromfile(frames[ind], np.dtype('uint8')).reshape([int(img_height), int(img_width)])
create_image(pixels, png_path)
create_geotiff(pixels, gps_bounds, tif_path, None, False, self.extractor_info, metadata)
if png_path not in resource['local_paths']:
fileid = upload_to_dataset(connector, host, secret_key, target_dsid, png_path)
uploaded_file_ids.append(fileid)
self.created += 1
self.bytes += os.path.getsize(png_path)
# Generate aggregate outputs
self.log_info(resource, "generating aggregates")
if not (os.path.exists(hist_path) and os.path.exists(coloredImg_path)) or self.overwrite:
# TODO: Coerce histogram and pseudocolor to geotiff?
self.analyze(int(img_width), int(img_height), png_frames, hist_path, coloredImg_path)
self.created += 2
self.bytes += os.path.getsize(hist_path) + os.path.getsize(coloredImg_path)
if hist_path not in resource['local_paths']:
fileid = upload_to_dataset(connector, host, secret_key, target_dsid, hist_path)
uploaded_file_ids.append(fileid)
if coloredImg_path not in resource['local_paths']:
fileid = upload_to_dataset(connector, host, secret_key, target_dsid, coloredImg_path)
uploaded_file_ids.append(fileid)
# Tell Clowder this is completed so subsequent file updates don't daisy-chain
metadata = build_metadata(host, self.extractor_info, target_dsid, {
"files_created": uploaded_file_ids}, 'dataset')
self.log_info(resource, "uploading extractor metadata")
upload_metadata(connector, host, secret_key, resource['id'], metadata)
self.end_message(resource)
if __name__ == "__main__":
extractor = PSIIBin2Png()
extractor.start()
|
{
"alphanum_fraction": 0.6300278773,
"author": null,
"avg_line_length": 46.7162790698,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "6c367a4f7adf4cc300caa41a1e43efc2012f6e1a",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2019-09-11T18:37:41.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-10-26T18:35:11.000Z",
"max_forks_repo_head_hexsha": "dffe6bdb5d8cd2d85578f03752062d2b2f213378",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "terraref/extractors-multispectral",
"max_forks_repo_path": "psii2png/terra_psii2png.py",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "dffe6bdb5d8cd2d85578f03752062d2b2f213378",
"max_issues_repo_issues_event_max_datetime": "2018-09-28T14:09:11.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-10-11T19:56:17.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "terraref/extractors-multispectral",
"max_issues_repo_path": "psii2png/terra_psii2png.py",
"max_line_length": 120,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "dffe6bdb5d8cd2d85578f03752062d2b2f213378",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "terraref/extractors-multispectral",
"max_stars_repo_path": "psii2png/terra_psii2png.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-15T09:05:02.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-10-08T03:43:44.000Z",
"num_tokens": 2270,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 10044
}
|
Inductive day : Type :=
| monday : day
| tuesday : day
| wednesday : day
| thursday : day
| friday : day
| saturday : day
| sunday : day.
Definition next_weekday (d:day) : day :=
match d with
| monday => tuesday
| tuesday => wednesday
| wednesday => thursday
| thursday => friday
| friday => monday
| saturday => monday
| sunday => monday
end.
Inductive bool : Type :=
| true : bool
| false : bool.
Definition negb (b: bool) : bool :=
match b with
| true => false
| false => true
end.
Definition andb (b1:bool) (b2:bool) : bool :=
match b1 with
| true => b2
| false => false
end.
Definition orb (b1:bool) (b2:bool) : bool :=
match b1 with
| true => true
| false => b2
end.
Example test_orb1: (orb true false) = true.
Proof. simpl. reflexivity. Qed.
Example test_orb2: (orb false false) = false.
Proof. simpl. reflexivity. Qed.
Example test_orb3: (orb false true) = true.
Proof. simpl. reflexivity. Qed.
Example test_orb4: (orb true true) = true.
Proof. simpl. reflexivity. Qed.
Infix "&&" := andb.
Infix "||" := orb.
Definition nandb (b1:bool) (b2:bool) : bool := (negb (andb b1 b2)).
Example test_nandb1: (nandb true false) = true.
Proof. simpl. reflexivity. Qed.
Example test_nandb2: (nandb false false) = true.
Proof. simpl. reflexivity. Qed.
Example test_nandb3: (nandb false true) = true.
Proof. simpl. reflexivity. Qed.
Example test_nandb4: (nandb true true) = false.
Proof. simpl. reflexivity. Qed.
Definition andb3 (b1:bool) (b2:bool) (b3:bool) : bool := (andb b1 (andb b2 b3)).
Example test_andb31: (andb3 true true true) = true.
Proof. simpl. reflexivity. Qed.
Example test_andb32: (andb3 false true true) = false.
Proof. simpl. reflexivity. Qed.
Example test_andb33: (andb3 true false true) = false.
Proof. simpl. reflexivity. Qed.
Example test_andb34: (andb3 true true false) = false.
Proof. simpl. reflexivity. Qed.
Inductive nat : Type :=
| O : nat
| S : nat -> nat.
Definition pred (n : nat) : nat :=
match n with
| O => O
| S n' => n'
end.
Definition succ (n:nat) :nat :=
match n with
| O => (S O)
| S n' => S (S n')
end.
|
{
"alphanum_fraction": null,
"author": "soumyadsanyal",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/coq/soumyadsanyal-sf/sf-6103ef0efb46d1e9d4f34f5f8269cdc1a554afc7/first.v",
"reason": null,
"repo": "sf",
"save_path": "github-repos/coq/soumyadsanyal-sf",
"sha": "6103ef0efb46d1e9d4f34f5f8269cdc1a554afc7",
"size": null
}
|
#!/usr/bin/env python3
"""A test file for matrixpng
The canonical source for this package is https://github.com/finitemobius/matrixpng-py"""
import matrixpng
import numpy as np
__author__ = "Finite Mobius, LLC"
__credits__ = ["Jason R. Miller"]
__license__ = "MIT"
__version__ = "alpha"
__maintainer__ = "Finite Mobius, LLC"
__email__ = "jason@finitemobius.com"
__status__ = "Development"
def _main():
write = True
read = True
p = matrixpng.MatrixPNG()
if write:
print(p.mode)
print(p.bitdepth)
print(p.quantization_levels)
# Create a diagonal gradient
a = np.empty([800, 600])
for i in range(len(a)):
for j in range(len(a[i])):
a[i][j] = i + j
# Write to a PNG
with open("test.png", mode='wb') as fp:
p.matrix2png(a, fp, x_axis_first=True)
print(p.quantization_delta)
if read:
p.pngfile2matrix("test.png")
if __name__ == '__main__':
_main()
|
{
"alphanum_fraction": 0.6139112903,
"author": null,
"avg_line_length": 24.8,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "37db56dc47d3fdc8d8dd0e31a93a057cdb187de2",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0f8d4e9da04c56231d0e39c5cc0ee9cbe85da457",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "fm-jason/matrixpng-py",
"max_forks_repo_path": "test/test.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0f8d4e9da04c56231d0e39c5cc0ee9cbe85da457",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "fm-jason/matrixpng-py",
"max_issues_repo_path": "test/test.py",
"max_line_length": 88,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "0f8d4e9da04c56231d0e39c5cc0ee9cbe85da457",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "fm-jason/matrixpng-py",
"max_stars_repo_path": "test/test.py",
"max_stars_repo_stars_event_max_datetime": "2016-12-23T15:52:24.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-12-23T15:52:24.000Z",
"num_tokens": 272,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 992
}
|
/*=============================================================================
Copyright (c) 2009 Hartmut Kaiser
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
==============================================================================*/
#if !defined(BOOST_FUSION_NVIEW_ADVANCE_IMPL_SEP_24_2009_0212PM)
#define BOOST_FUSION_NVIEW_ADVANCE_IMPL_SEP_24_2009_0212PM
#include <boost/fusion/iterator/advance.hpp>
#include <boost/fusion/support/config.hpp>
namespace boost {
namespace fusion {
struct nview_iterator_tag;
template <typename Sequence, typename Pos> struct nview_iterator;
namespace extension {
template <typename Tag> struct advance_impl;
template <> struct advance_impl<nview_iterator_tag> {
template <typename Iterator, typename Dist> struct apply {
typedef typename Iterator::first_type iterator_type;
typedef typename Iterator::sequence_type sequence_type;
typedef nview_iterator<
sequence_type, typename result_of::advance<iterator_type, Dist>::type>
type;
BOOST_CONSTEXPR BOOST_FUSION_GPU_ENABLED static type
call(Iterator const &i) {
return type(i.seq);
}
};
};
} // namespace extension
} // namespace fusion
} // namespace boost
#endif
|
{
"alphanum_fraction": 0.6734386757,
"author": null,
"avg_line_length": 30.2045454545,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "4ffa7e3c77c6cec1a3a270f10207501f6584db06",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "317a2a7c35c1c7e3730986668ad2270dc19809ef",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "henrywarhurst/matrix",
"max_forks_repo_path": "libs/boost_1_72_0/boost/fusion/view/nview/detail/advance_impl.hpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "317a2a7c35c1c7e3730986668ad2270dc19809ef",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "henrywarhurst/matrix",
"max_issues_repo_path": "libs/boost_1_72_0/boost/fusion/view/nview/detail/advance_impl.hpp",
"max_line_length": 80,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "317a2a7c35c1c7e3730986668ad2270dc19809ef",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "henrywarhurst/matrix",
"max_stars_repo_path": "libs/boost_1_72_0/boost/fusion/view/nview/detail/advance_impl.hpp",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 279,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1329
}
|
import pandas
import numpy
import random
DIMS96 = {'rows':8,'columns':12}
def create_constant_column_plate(dims: dict,sources: int):
"""
Generate a dataframe representing a 96-well plate that has one media
ingredient per column.
"""
# initialize an array with one row for each well and one column for each
# media ingredient source
wells = numpy.zeros((dims['rows']*dims['columns'],sources))
# fill each column of the plate with a single ingredient
for i in range(0,sources):
wells[i*dims['rows']:(i+1)*dims['rows'],i] = 1.0
# convert to a dataframe with wells numbered 1-96 in the index and each
# media ingredient labeled as "source N"
df = pandas.DataFrame(wells,
index=list(range(1,dims['rows']*dims['columns']+1)),
columns=list(["source " + str(x) for x in range(1,sources+1)]))
df.index.name = 'wells'
return(df)
def create_random_plate(dims: dict, sources: int):
"""
Generate a plate completely filled in each well with random combinations
of source ingredients, each in intervals of 0.05, which add to 1.0 total
per well.
"""
# initialize an array with one row for each well and one column for each
# media ingredient source
wells = numpy.zeros((dims['rows']*dims['columns'],sources))
# for each well, get the relative amount of each media ingredient source
wellcount = 0
for well in wells:
distribution = list(random_ints_with_sum(sources))
distribution = [d/float(sources) for d in distribution]
# add zeros to get length equal to total number of sources
distribution.extend(
[0.0 for x in range(0,sources-len(distribution))])
# shuffle the entries
random.shuffle(distribution)
wells[wellcount] = distribution
wellcount += 1
df = pandas.DataFrame(wells,
index=list(range(1,dims['rows']*dims['columns']+1)),
columns=list(["source " + str(x) for x in range(1,sources+1)]))
df.index.name = 'wells'
return(df)
def random_ints_with_sum(n):
"""
Generate positive random integers summing to `n`, sampled
uniformly from the ordered integer partitions of `n`.
"""
p = 0
for _ in range(n - 1):
p += 1
if random.randrange(2):
yield p
p = 0
yield p + 1
if __name__ == "__main__":
# if this is executed as a script, generate a few test plates
constant_columns = create_constant_column_plate(dims=DIMS96,sources=12)
constant_columns.to_csv('./generated_test_files/constant_columns.tsv',
sep='\t')
random_plate = create_random_plate(dims=DIMS96,sources=12)
random_plate.to_csv('./generated_test_files/random_plate.tsv', sep='\t')
|
{
"alphanum_fraction": 0.7013137558,
"author": null,
"avg_line_length": 32.35,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "a43f20d941190f1072fec4b326e7ca0e5fb3a4f8",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e7b9a8e87a0db6f1e0f5ab973ed5e59f722bc7c3",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "medlocklab/openHTS",
"max_forks_repo_path": "test/create_test_plates.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e7b9a8e87a0db6f1e0f5ab973ed5e59f722bc7c3",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "medlocklab/openHTS",
"max_issues_repo_path": "test/create_test_plates.py",
"max_line_length": 73,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "e7b9a8e87a0db6f1e0f5ab973ed5e59f722bc7c3",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "medlocklab/openHTS",
"max_stars_repo_path": "test/create_test_plates.py",
"max_stars_repo_stars_event_max_datetime": "2020-11-12T16:25:53.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-11-12T16:25:53.000Z",
"num_tokens": 692,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2588
}
|
// STD headers
#include <assert.h>
#include <limits>
#include <list>
#include <string>
#include <unordered_map>
#include <vector>
// Boost headers
#include <boost/program_options.hpp>
// Custom headers
#include "cache_base.hpp"
#include "cache_belady.hpp"
#include "cache_common.hpp"
#include "utils.hpp"
using namespace caching;
namespace bopt = boost::program_options;
/**
* Implements a single-tiered Belady cache.
*/
class BeladyCache : public BaseCache {
private:
utils::TraceAnalyzer* analyzer_; // A TraceAnalyzer instance
typedef std::list<size_t>::const_iterator IdxIterator;
std::unordered_map<std::string, IdxIterator>
flow_ids_to_current_iters_; // Dict mapping flow IDs to their current
// positions in corresponding idxs lists.
/**
* Given the current state of the and the contending flow,
* returns the flow ID corresponding to the flow to evict.
*/
std::string getFlowIdToEvict(const std::unordered_map<std::string, CacheEntry>&
candidates, const std::string& contender) {
size_t max_candidate_occurence = 0;
std::string flow_id_to_evict;
for (const auto& pair : candidates) {
const std::string& candidate = pair.first;
// First, forward the flow's occurence idx until it corresponds to
// a packet arrival that is GEQ clk. This value indicates when the
// very next packet for this flow arrives.
const auto& indices = analyzer_->getFlowData(candidate).indices();
IdxIterator iter = flow_ids_to_current_iters_.at(candidate);
while (iter != indices.end() && (*iter < clk())) { iter++; }
flow_ids_to_current_iters_[candidate] = iter;
size_t next_occurence = (iter != indices.end()) ?
*iter : std::numeric_limits<size_t>::max();
// If this flow has finished, or the next packet arrival
// for this flow is the furthest in the future, evict it.
if (next_occurence > max_candidate_occurence) {
max_candidate_occurence = next_occurence;
flow_id_to_evict = candidate;
}
}
// Forward the contending flow's occurence idx until it corresponds
// to a packet arrival that is GT clk.
const auto& indices = analyzer_->getFlowData(contender).indices();
IdxIterator iter = flow_ids_to_current_iters_.at(contender);
while (iter != indices.end() && (*iter <= clk())) { iter++; }
flow_ids_to_current_iters_[contender] = iter;
size_t next_contender_occurence = (iter != indices.end()) ?
*iter : std::numeric_limits<size_t>::max();
// If the next occurence to the contender is beyond the
// next occurence to any of the existing cache entries,
// do not admit the contender into the cache.
if (next_contender_occurence >= max_candidate_occurence) {
flow_id_to_evict = contender;
}
return flow_id_to_evict;
}
public:
BeladyCache(const size_t miss_latency, const size_t cache_set_associativity, const size_t
num_cache_sets, const bool penalize_insertions, const HashType hash_type, int
argc, char** argv) : BaseCache(miss_latency, cache_set_associativity,
num_cache_sets, penalize_insertions, hash_type) {
// Command-line arguments
bopt::options_description options{"BeladyCache"};
options.add_options()("trace", bopt::value<std::string>(), "(Input) trace file path");
// Parse model parameters
bopt::variables_map variables;
bopt::store(bopt::command_line_parser(argc, argv).options(
options).allow_unregistered().run(), variables);
bopt::notify(variables);
std::string trace_fp = variables.at("trace").as<std::string>();
// Initialize the TraceAnalyzer and the cache sets
analyzer_ = new utils::TraceAnalyzer(trace_fp);
for (size_t idx = 0; idx < kMaxNumCacheSets; idx++) {
cache_sets_.push_back(new BeladyCacheSet<BeladyCache>(
kCacheSetAssociativity, *this));
}
// Prime the iterators map
const auto& flowIdsToDataMap = analyzer_->getFlowIdsToDataMap();
for (const auto& pair : flowIdsToDataMap) {
flow_ids_to_current_iters_[pair.first] = (
pair.second.indices().begin());
}
}
virtual ~BeladyCache() {
delete(analyzer_);
analyzer_ = nullptr;
}
/**
* Returns the canonical cache name.
*/
virtual std::string name() const override { return "BeladyCache"; }
// Allow access to getFlowIdToEvict()
friend class BeladyCacheSet<BeladyCache>;
};
// Run default benchmarks
int main(int argc, char** argv) {
BaseCache::defaultBenchmark<BeladyCache>(argc, argv);
}
|
{
"alphanum_fraction": 0.6406628941,
"author": null,
"avg_line_length": 38.9606299213,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "fa417421158f169f7ff45a23c018c0b756cb069e",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2021-06-04T14:29:27.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-10-17T21:39:38.000Z",
"max_forks_repo_head_hexsha": "39e062a34ce7d3bb693dceff0a7e68ea1ee6864b",
"max_forks_repo_licenses": [
"BSD-3-Clause-Clear"
],
"max_forks_repo_name": "Ahziu/Delayed-Hits",
"max_forks_repo_path": "caching/src/cache_belady.cpp",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "39e062a34ce7d3bb693dceff0a7e68ea1ee6864b",
"max_issues_repo_issues_event_max_datetime": "2021-02-18T08:14:29.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-02-18T08:14:29.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause-Clear"
],
"max_issues_repo_name": "Ahziu/Delayed-Hits",
"max_issues_repo_path": "caching/src/cache_belady.cpp",
"max_line_length": 94,
"max_stars_count": 15,
"max_stars_repo_head_hexsha": "39e062a34ce7d3bb693dceff0a7e68ea1ee6864b",
"max_stars_repo_licenses": [
"BSD-3-Clause-Clear"
],
"max_stars_repo_name": "Ahziu/Delayed-Hits",
"max_stars_repo_path": "caching/src/cache_belady.cpp",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T08:29:05.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-09-04T18:32:14.000Z",
"num_tokens": 1098,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 4948
}
|
[STATEMENT]
lemma errMOD_igSwapIGVarSTR:
fixes MOD :: "('index,'bindex,'varSort,'sort,'opSym,'var,'gTerm,'gAbs)model"
assumes "igVarIPresIGWls MOD" and "igSwapIGVar MOD"
shows "igSwapIGVar (errMOD MOD)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. igSwapIGVar (errMOD MOD)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
igVarIPresIGWls MOD
igSwapIGVar MOD
goal (1 subgoal):
1. igSwapIGVar (errMOD MOD)
[PROOF STEP]
by (simp add: igVarIPresIGWls_def igSwapIGVar_def) (metis eSwap_simp1)
|
{
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Binding_Syntax_Theory_Iteration",
"hexsha": null,
"include": null,
"lang": null,
"length": 2,
"llama_tokens": 237,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
}
|
from __future__ import print_function, division
#
import sys,os
quspin_path = os.path.join(os.getcwd(),"../../")
sys.path.insert(0,quspin_path)
#
from quspin.operators import hamiltonian # Hamiltonians and operators
from quspin.basis import spin_basis_1d # Hilbert space spin basis
from quspin.tools.measurements import *
import numpy as np # generic math functions
#
L=12 # syste size
# coupling strenghts
J=1.0 # spin-spin coupling
h=0.8945 # x-field strength
g=0.945 # z-field strength
# create site-coupling lists
J_zz=[[J,i,(i+1)%L] for i in range(L)] # PBC
x_field=[[h,i] for i in range(L)]
z_field=[[g,i] for i in range(L)]
# create static and dynamic lists
static_1=[["x",x_field],["z",z_field]]
static_2=[["zz",J_zz],["x",x_field],["z",z_field]]
dynamic=[]
# create spin-1/2 basis
basis=spin_basis_1d(L,kblock=0,pblock=1)
# set up Hamiltonian
H1=hamiltonian(static_1,dynamic,basis=basis,dtype=np.float64)
H2=hamiltonian(static_2,dynamic,basis=basis,dtype=np.float64)
# compute eigensystems of H1 and H2
E1,V1=H1.eigh()
psi1=V1[:,14] # pick any state as initial state
E2,V2=H2.eigh()
# calculate entanglement entropy
Sent=ent_entropy(psi1,basis,chain_subsys=[1,3,6,7,11])
print(Sent['Sent_A'])
#
# calculate long-time (diagonal ensemble) expectations
Diag_Ens=diag_ensemble(L,psi1,E2,V2,Obs=H1,delta_t_Obs=True)
print(Diag_Ens['Obs_pure'],Diag_Ens['delta_t_Obs_pure'])
#
# time-evolve state by decomposing it in an eigensystem (E1,V1)
times=np.linspace(0.0,5.0,10)
psi1_time=ED_state_vs_time(psi1,E1,V1,times,iterate=False)
print(type(psi1_time))
# as above but using a generator
psi1_t=ED_state_vs_time(psi1,E1,V1,times,iterate=True)
print(type(psi1_t))
for i, psi1_n in enumerate(psi1_t):
print("psi1_n is the state at time[%i]"%(i))
#
# calculate expectations of observables
Obs_time=obs_vs_time(psi1_time,times,dict(E1=H1,Energy2=H2))
print("Output keys are same as input keys:", Obs_time.keys())
E1_time=Obs_time['E1']
#
# project Hamiltonian from `kblock=0` and `pblock=1` onto full Hilbert space
proj=basis.get_proj(np.float64) # calculate projector
if sys.platform=="win32":
H2_full=project_op(H2,proj,dtype=np.float64)["Proj_Obs"]
else:
H2_full=project_op(H2,proj,dtype=np.float128)["Proj_Obs"]
print("dimenions of symmetry-reduced and full Hilbert spaces are %i and %i " %(H2.Ns,H2_full.Ns) )
#
# calculate mean level spacing of spectrum E2
d_2=mean_level_spacing(E2)
print("mean level spacings are", d_2)
|
{
"alphanum_fraction": 0.7537006579,
"author": null,
"avg_line_length": 33.3150684932,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "ea6b2426d0c23bffbb0218019e62905cbea6d6ad",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 54,
"max_forks_repo_forks_event_max_datetime": "2022-03-16T06:54:33.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-01-03T18:47:52.000Z",
"max_forks_repo_head_hexsha": "769d3817870f6ff55c4283af46f94e11c36f4121",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "cileeky/QuSpin",
"max_forks_repo_path": "sphinx/doc_examples/measurements.py",
"max_issues_count": 303,
"max_issues_repo_head_hexsha": "769d3817870f6ff55c4283af46f94e11c36f4121",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T16:52:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-10-25T20:08:11.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "cileeky/QuSpin",
"max_issues_repo_path": "sphinx/doc_examples/measurements.py",
"max_line_length": 98,
"max_stars_count": 195,
"max_stars_repo_head_hexsha": "769d3817870f6ff55c4283af46f94e11c36f4121",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "nelsond/QuSpin",
"max_stars_repo_path": "sphinx/doc_examples/measurements.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-29T10:11:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-10-24T18:05:31.000Z",
"num_tokens": 804,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2432
}
|
# encoding=utf-8
import random
import numpy as np
import torch
import torch.utils.data as data
from itertools import chain
import codecs
import json
import collections
import jieba
class MyDataset(data.Dataset):
def __init__(self, corp, config, mode='TRAIN'):
self.data_convs = []
self.data_labels = []
self.data_quotes = []
self.mode = mode
self.print_attention = config.print_attention
if config.print_attention:
self.history = corp.history
self.history_num = config.history_num
if mode == 'TRAIN':
convs = corp.convs
labels = corp.labels
elif mode == 'TEST':
convs = corp.test_convs
labels = corp.test_labels
else:
convs = corp.valid_convs
labels = corp.valid_labels
for cid in convs:
self.data_labels.append(labels[cid])
self.data_convs.append([turn for turn in convs[cid]])
def __getitem__(self, idx):
return self.data_convs[idx], self.data_labels[idx]
def __len__(self):
return len(self.data_labels)
def pad_vector(self, texts, text_size, sent_len): # Pad with 0s to fixed size
text_vec = []
text_len = []
turn_len = []
for one_text in texts:
t = []
tl = []
for sent in one_text:
pad_len = max(0, sent_len - len(sent))
t.append(sent + [0] * pad_len)
tl.append(len(sent))
pad_size = max(0, text_size - len(t))
text_len.append(len(t))
t.extend([[0] * sent_len] * pad_size)
tl.extend([0] * pad_size)
text_vec.append(t)
turn_len.append(tl)
padded_vec = torch.LongTensor(text_vec)
return padded_vec, text_len, turn_len
def my_collate(self, batch):
conv_vecs = [item[0] for item in batch]
my_labels = [item[1] for item in batch]
#quote_vecs = [item[2] for item in batch]
conv_turn_size = max([len(c) for c in conv_vecs])
conv_turn_len = max([len(sent) for sent in chain.from_iterable([c for c in conv_vecs])])
conv_vecs, conv_lens, conv_turn_lens = self.pad_vector(conv_vecs, conv_turn_size, conv_turn_len)
if self.print_attention:
#print(self.history)
# hist_vecs = [self.history[l][:self.history_num] for l in my_labels] # if self.history[l] else []
hist_vecs = [self.history[l] for l in my_labels]
hist_size = max([len(h) for h in hist_vecs])
hist_turn_len = max([len(sent) for sent in chain.from_iterable([h for h in hist_vecs])])
hist_vecs, hist_lens, hist_turn_lens = self.pad_vector(hist_vecs, hist_size, hist_turn_len)
my_labels = torch.Tensor(my_labels)
#padded_quotes = torch.Tensor(padded_quotes)
if self.print_attention:
return conv_vecs, conv_lens, conv_turn_lens, my_labels, hist_vecs, hist_lens, hist_turn_lens
return conv_vecs, conv_lens, conv_turn_lens, my_labels
class Corpus:
def __init__(self, config):
self.turnNum = 0 # Number of messages
self.convNum = 0 # Number of conversations
self.userNum = 0 # Number of users
self.userIDs = {} # Dictionary that maps users to integer IDs
self.r_userIDs = {} # Inverse of last dictionary
self.wordNum = 3 # Number of words
self.wordIDs = {'<Pad>': 0, '<UNK>': 1, '<CLS>': 2} # Dictionary that maps words to integers
self.r_wordIDs = {0: '<Pad>', 1: '<UNK>', 2: '<CLS>'} # Inverse of last dictionary
self.wordCount = collections.Counter()
self.quote_wordNum = 3
self.quote_wordIDs = {'<Pad>': 0, '<UNK>': 1, '<CLS>': 2}
self.quote_r_wordIDs = {0: '<Pad>', 1: '<UNK>', 2: '<CLS>'}
self.quote_wordCount = collections.Counter()
self.use_transformer = (config.turn_encoder == 'transformer')
# Each conv is a list of turns, each turn is [userID, [w1, w2, w3, ...]]
self.convs = collections.defaultdict(list)
self.explanation = dict()
self.example = {}
self.labels = {}
self.quotes = []
self.quotes_exp = []
# Each history is a list of query turns, each turn is [w1, w2, w3, ...]
self.history = collections.defaultdict(list)
self.test_convs = collections.defaultdict(list)
self.test_labels = {}
self.test_quotes = {}
self.valid_convs = collections.defaultdict(list)
self.valid_labels = {}
self.valid_quotes = {}
self.turn_length_max = config.turn_length_max
self.length_max = config.length_max
f1 = codecs.open(config.filename + '_quote_3dicts.json', 'r', 'utf-8')
self.quote_dic, self.exp_dic, self.exa_dic = json.load(f1)
self.r_quote_dic = {}
for k in self.quote_dic:
self.r_quote_dic[self.quote_dic[k]] = k
config.quote_len = len(self.quote_dic)
with codecs.open(config.train_file, 'r', 'utf-8') as f:
lines = f.readlines()
#line = [[turn1, turn2, ...], quote, [quote_expanation, quote_example]]
for line in lines:
msgs = json.loads(line)
#current_turn_num = 0
# msgs[0] = [turn1, turn2, ...]
for turn in msgs[0][-config.turn_length_max:]:
if config.turn_encoder == 'transformer':
words = [2]
else:
words = []
for word in turn.split(' '):
# self.wordIDs: {word: id}
if word not in self.wordIDs:
self.wordIDs[word] = self.wordNum
self.r_wordIDs[self.wordNum] = word
self.wordNum += 1
self.wordCount[self.wordIDs[word]] += 1
words.append(self.wordIDs[word])
words = words[:config.length_max]
self.convs[self.convNum].append(words)
#self.users[self.userIDs[user_id]].append([self.convNum, words])
self.turnNum += 1
if 10 <= len(self.convs[self.convNum][-1]) <= 20:
self.history[self.quote_dic[msgs[1]]].append(self.convs[self.convNum][-1])
self.labels[self.convNum] = self.quote_dic[msgs[1]]
self.convNum += 1
for quote in self.quote_dic:
if config.turn_encoder == 'transformer':
words = [2]
else:
words = []
if config.filename == 'Weibo':
list1 = list(quote)
#print(list1)
elif config.filename == 'Reddit' or config.filename == 'test':
list1 = quote.split(' ')
#print(list1)
else:
print('Wrong')
exit()
list1_exp = jieba.lcut(self.exp_dic[str(self.quote_dic[quote])])
#list2 = list1.exten(list1_exp)
for word in list1:
if not config.same_vocab:
#print('same_vocab')
if word not in self.quote_wordIDs:
self.quote_wordIDs[word] = self.quote_wordNum
self.quote_r_wordIDs[self.quote_wordNum] = word
self.quote_wordNum += 1
self.quote_wordCount[self.quote_wordIDs[word]] += 1
words.append(self.quote_wordIDs[word])
else:
if word not in self.wordIDs:
self.wordIDs[word] = self.wordNum
self.r_wordIDs[self.wordNum] = word
self.wordNum += 1
self.wordCount[self.wordIDs[word]] += 1
words.append(self.wordIDs[word])
self.quotes.append(words)
if config.turn_encoder == 'transformer':
words = [2]
else:
words = []
for word in list1_exp:
if not config.same_vocab:
#print('same_vocab')
if word not in self.quote_wordIDs:
self.quote_wordIDs[word] = self.quote_wordNum
self.quote_r_wordIDs[self.quote_wordNum] = word
self.quote_wordNum += 1
self.quote_wordCount[self.quote_wordIDs[word]] += 1
words.append(self.quote_wordIDs[word])
else:
if word not in self.wordIDs:
self.wordIDs[word] = self.wordNum
self.r_wordIDs[self.wordNum] = word
self.wordNum += 1
self.wordCount[self.wordIDs[word]] += 1
words.append(self.wordIDs[word])
self.quotes_exp.append(words)
max_quote_len = max([len(q) for q in self.quotes])
max_exp_len = max([len(q) for q in self.quotes_exp])
self.padded_quotes = []
self.quote_lens = []
self.padded_exp = []
self.exp_lens = []
for sent in self.quotes:
pad_len = max(0, max_quote_len - len(sent))
self.padded_quotes.append(sent+[0]*pad_len)
self.quote_lens.append(len(sent))
self.padded_quotes = torch.LongTensor(self.padded_quotes)
for sent in self.quotes_exp:
pad_len = max(0, max_exp_len - len(sent))
self.padded_exp.append(sent + [0] * pad_len)
self.exp_lens.append(len(sent))
self.padded_exp = torch.LongTensor(self.padded_exp)
self.train_data = MyDataset(self, config, 'TRAIN')
print("Corpus initialization over! QuoteNum: %d ConvNum: %d TurnNum: %d" % (len(self.quote_lens), self.convNum, self.turnNum))
def test_corpus(self, test_file, mode='TEST'): # mode == 'TEST' or mode == 'VALID'
with codecs.open(test_file, 'r', 'utf-8') as f:
lines = f.readlines()
for line in lines:
msgs = json.loads(line)
for turn in msgs[0][-self.turn_length_max:]:
if self.use_transformer:
words = [2]
else:
words = []
for word in turn.split(' '):
try:
words.append(self.wordIDs[word])
except KeyError: # for the words that is out of vocabulary
words.append(self.wordIDs['<UNK>'])
# if word not in self.oovIDs:
# self.oovIDs[word] = len(self.oovIDs)
# self.r_oovIDs[self.oovIDs[word]] = word
if len(words) == 0: # in case some turns are null turn without words
words.append(self.wordIDs['<UNK>'])
words = words[:self.length_max]
if mode == 'TEST':
self.test_convs[self.convNum].append(words)
else:
self.valid_convs[self.convNum].append(words)
# words = []
# for word in msgs[1].split():
# try:
# words.append(self.wordIDs[word])
# except KeyError:
# words.append(self.wordIDs['<UNK>'])
# if mode == 'TEST':
# self.test_quotes[self.convNum] = words
# else:
# self.valid_quotes[self.convNum] = words
# # current_turn_num += 1
# self.quotes[self.convNum] = words
if mode == 'TEST':
self.test_labels[self.convNum] = self.quote_dic[msgs[1]]
if 10 <= len(self.test_convs[self.convNum][-1]) <= 20:
self.history[self.quote_dic[msgs[1]]].append(self.test_convs[self.convNum][-1])
else:
self.valid_labels[self.convNum] = self.quote_dic[msgs[1]]
if 10 <= len(self.valid_convs[self.convNum][-1]) <= 20:
self.history[self.quote_dic[msgs[1]]].append(self.valid_convs[self.convNum][-1])
self.convNum += 1
print("%s Corpus process over!" % mode)
def create_embedding_matrix(dataname, word_idx, word_num, embedding_dim=200):
pretrain_file = 'Tencent_AILab_ChineseEmbedding.txt' if dataname[0] == 'W' else 'glove.6B.200d.txt'
pretrain_words = {}
with open(pretrain_file, 'r') as f:
for line in f:
infos = line.split()
wd = infos[0]
vec = np.array(infos[1:]).astype(np.float)
pretrain_words[wd] = vec
weights_matrix = np.zeros((word_num, embedding_dim))
for idx in word_idx.keys():
if idx == 0:
continue
try:
weights_matrix[idx] = pretrain_words[word_idx[idx]]
except KeyError:
weights_matrix[idx] = np.random.normal(size=(embedding_dim,))
if torch.cuda.is_available(): # run in GPU
return torch.Tensor(weights_matrix).cuda()
else:
return torch.Tensor(weights_matrix)
def pretrain_corpus_construction(batch, config):
convs, conv_lens, conv_turn_lens = batch[0], batch[1], batch[2]
users, user_lens, user_turn_lens = batch[3], batch[4], batch[5]
labels = batch[-1]
if config.pretrain_type == "RR":
need_replace = torch.rand(len(labels))
need_replace = need_replace.le(config.Prob)
for i in range(len(labels)):
if need_replace[i]:
if user_lens[i] == 0:
turn_len = conv_turn_lens[i][conv_lens[i]-1]
convs[i, conv_lens[i]-1, :turn_len] = convs[i, conv_lens[i]-1, torch.randperm(turn_len)]
else:
replace_idx = random.randint(0, user_lens[i]-1)
replace_turn_len = min(max(convs.size(-1), conv_turn_lens[i][conv_lens[i]-1]), user_turn_lens[i][replace_idx])
convs[i, conv_lens[i]-1, :replace_turn_len] = users[i, replace_idx, :replace_turn_len]
conv_turn_lens[i][conv_lens[i] - 1] = replace_turn_len
labels[i] = 1
else:
labels[i] = 0
elif config.pretrain_type == "REPLACE":
conv_num = len(convs)
turn_num = max(conv_lens)
labels = torch.zeros((conv_num, turn_num))
for c in range(conv_num):
for t in range(turn_num):
if t >= conv_lens[c]:
break
if random.random() <= config.Prob:
labels[c, t] = 1
rc = random.choice([i for i in range(conv_num) if i != c])
rt = random.choice([i for i in range(conv_lens[rc])])
convs[c, t, :] = convs[rc, rt, :]
conv_turn_lens[c][t] = conv_turn_lens[rc][rt]
elif config.pretrain_type == "SWITCH":
conv_num = len(convs)
turn_num = max(conv_lens)
labels = torch.zeros((conv_num, turn_num))
for c in range(conv_num):
need_switch = [random.random() <= config.Prob for i in range(conv_lens[c])]
if sum(need_switch) <= 1:
switch_idx = random.sample(list(range(conv_lens[c])), 2)
else:
switch_idx = [i for i in range(conv_lens[c]) if need_switch[i]]
original_idx = list(switch_idx)
random.shuffle(switch_idx)
for i, idx in enumerate(switch_idx):
if idx == original_idx[i]:
switch_idx[i], switch_idx[(i+1) % len(switch_idx)] = switch_idx[(i+1) % len(switch_idx)], switch_idx[i]
convs[c, original_idx, :] = convs[c, switch_idx, :]
new_turn_len = [l for l in conv_turn_lens[c]]
for i in range(len(original_idx)):
new_turn_len[original_idx[i]] = conv_turn_lens[c][switch_idx[i]]
conv_turn_lens[c] = new_turn_len
labels[c, original_idx] = 1
else:
print('Wrong Pretrain Type!')
exit(0)
return convs, conv_lens, conv_turn_lens, users, user_lens, user_turn_lens, labels
|
{
"alphanum_fraction": 0.5234895338,
"author": null,
"avg_line_length": 44.8426666667,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "ef698612efd83527a771b1ea86437067455615b3",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-06-22T08:24:57.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-06-22T08:24:57.000Z",
"max_forks_repo_head_hexsha": "40a875a41f10a597604206e067a16cbbfc88cdd7",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Lingzhi-WANG/Quotation-Recommendation",
"max_forks_repo_path": "data_process.py",
"max_issues_count": 5,
"max_issues_repo_head_hexsha": "40a875a41f10a597604206e067a16cbbfc88cdd7",
"max_issues_repo_issues_event_max_datetime": "2021-10-24T01:26:17.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-09-23T07:02:19.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Lingzhi-WANG/Quotation-Recommendation",
"max_issues_repo_path": "data_process.py",
"max_line_length": 134,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "40a875a41f10a597604206e067a16cbbfc88cdd7",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Lingzhi-WANG/Quotation-Recommendation",
"max_stars_repo_path": "data_process.py",
"max_stars_repo_stars_event_max_datetime": "2021-11-04T11:30:51.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-05-24T03:46:10.000Z",
"num_tokens": 3764,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 16816
}
|
abstract type AbstractMachine end
function filtrations end
function (m::AbstractMachine)(y₀)
forward, backward = filtrations(m, y₀)
return solve(y₀, m.W, m.σ, (forward, backward))
end
sum_dims(dims::Tuple) = prod(dims[1:end-2]) * sum(dims[end-1:end])
# glorot initialization, from https://github.com/FluxML/Flux.jl
glorot_normal(dims...) = randn(Float32, dims...) .* sqrt(2.0f0 / sum_dims(dims))
glorot_uniform(dims...) = (rand(Float32, dims...) .- 0.5f0) .* sqrt(24.0f0 / sum_dims(dims))
function consecutive_ranges(dims::Vector{Int})
return map(cumsum(dims), dims) do cumdim, dim
return (cumdim - dim + 1):cumdim
end
end
function dense_filtrations(y, dims::Vector{Int})
channels, minibatch = sum(dims), size(y, 2)
shapes = map(rg -> (rg, 1:minibatch), consecutive_ranges(dims))
input(_) = (1:channels, 1:minibatch)
function project(input_shape, output_shape)
return (first(input_shape), first(output_shape))
end
return (
forward=Filtration(shapes, input, project),
backward=Filtration(reverse(shapes), input, project)
)
end
"""
DenseMachine(W, σ, dims::Vector{Int})
Create a `DenseMachine` object from a square weight matrix `W`, pointwise nonlinearity `σ`
and filtration sequence `dims`.
The values of `dims` specify how to split the input space into a sequence of subspaces.
In particular, it is required that `size(W) == (sum(dims), sum(dims))`.
"""
struct DenseMachine{T, F} <: AbstractMachine
W::T
σ::F
dims::Vector{Int}
end
@functor DenseMachine (W, σ)
DenseMachine(σ, dims::Vector{Int}; init=glorot_uniform) = DenseMachine(dims, σ; init)
"""
DenseMachine(dims::Vector{Int}, σ; init=glorot_uniform)
Return a `DenseMachine(W, σ, dims)` object, where `W = init(sum(dims), sum(dims))`.
Default to Glorot uniform initialization.
"""
function DenseMachine(dims::Vector{Int}, σ; init=glorot_uniform)
W = init(sum(dims), sum(dims))
return DenseMachine(W, σ, dims)
end
filtrations(m::DenseMachine, y₀) = dense_filtrations(y₀, m.dims)
nest_pads(::Tuple{}) = ()
function nest_pads(pad::Tuple)
a, b, tail... = pad
return ((a, b), nest_pads(tail)...)
end
function conv_range(output_range, (pad0, pad1)::Tuple{Int,Int})
i0, i1 = first(output_range), last(output_range)
return (i0 - pad0):(i1 + pad1)
end
function conv_filtrations(y::AbstractArray{T, N}, dims::Vector{Int}; pad::NTuple{M, Int}) where {T, N, M}
M == 2 * (N - 2) || throw(ArgumentError("padding must have twice the length of image dimensions"))
pads = nest_pads(pad)
ranges = consecutive_ranges(dims)
channels, minibatch = sum(dims), size(y, N)
shapes = [ntuple(n -> ifelse(n == N - 1, rg, UnitRange(axes(y, n))), N) for rg in ranges]
function input(shape)
data = ntuple(n -> shape[n], N - 2)
zdata = map(conv_range, data, pads)
return (zdata..., 1:channels, 1:minibatch)
end
function input′(shape)
data = ntuple(n -> shape[n], N - 2)
zdata = map(conv_range, data, map(reverse, pads))
return (zdata..., 1:channels, 1:minibatch)
end
function project(input_shape, output_shape)
return ntuple(N) do n
n == N && return output_shape[end - 1]
n == N - 1 && return input_shape[end - 1]
p0, p1 = pads[n]
return 1:(p0 + p1 + 1)
end
end
return (
forward=Filtration(shapes, input, project),
backward=Filtration(reverse(shapes), input′, project)
)
end
"""
ConvMachine(W, σ, dims::Vector{Int}, pad::Dims)
Create a `ConvMachine` object from a weight array `W`, pointwise nonlinearity `σ`,
filtration sequence `dims` and padding `pad`.
The values of `dims` specify how to split the input space into a sequence of subspaces.
In particular, it is required that `size(W) == (kernelsize..., sum(dims), sum(dims))`,
where `kernelsize` is such that convolution by `W` with padding `pad` preserves
input dimension.
Padding `pad` has length twice the number of kernel dimensions (for example, it takes
four values for image convolutions).
"""
struct ConvMachine{T, F, M} <: AbstractMachine
W::T
σ::F
dims::Vector{Int}
pad::NTuple{M, Int}
end
@functor ConvMachine (W, σ)
ConvMachine(W, σ, dims; pad) = ConvMachine(W, σ, dims, pad)
ConvMachine(σ, dims::Vector{Int}; pad, init=glorot_uniform) = ConvMachine(dims, σ; pad, init)
"""
ConvMachine(dims::Vector{Int}, σ; pad, init=glorot_uniform)
Return a `ConvMachine(W, σ, dims, pad)` object, where `W = init(kernelsize..., sum(dims), sum(dims))`.
Default to Glorot uniform initialization.
Here `kernelsize` is such that convolution with a kernel of size `kernelsize`
and padding `pad` preserves input dimension.
Padding `pad` has length twice the number of kernel dimensions (for example, it takes
four values for image convolutions).
"""
function ConvMachine(dims::Vector{Int}, σ; pad, init=glorot_uniform)
kernelsize = map(((p0, p1),) -> p0 + p1 + 1, nest_pads(pad))
W = init(kernelsize..., sum(dims), sum(dims))
return ConvMachine(W, σ, dims; pad)
end
filtrations(m::ConvMachine, y₀) = conv_filtrations(y₀, m.dims; m.pad)
function recur_filtrations(y, dims::Vector{Int}; pad, timeblock)
ranges = consecutive_ranges(dims)
datalength, channels, minibatch = size(y, 1), sum(dims), size(y, 3)
N = ceil(Int, datalength / timeblock)
shapes = [(timeblock * (n - 1) + 1 : timeblock * n, range, 1:minibatch) for n in 1:N for range in ranges]
input((timerange, _, mb)) = (conv_range(timerange, (pad, 0)), 1:channels, mb)
input′((timerange, _, mb)) = (conv_range(timerange, (0, pad)), 1:channels, mb)
function project(input_shape, output_shape)
input_range, output_range = first(input_shape), first(output_shape)
w0 = 1 + clamp(first(output_range) - last(input_range), 0:pad)
w1 = 1 + clamp(last(output_range) - first(input_range), 0:pad)
return (w0:w1, input_shape[end-1], output_shape[end-1])
end
flip = (pad+1):-1:1
function project′(input_shape, output_shape)
input_range, output_range = first(input_shape), first(output_shape)
w0 = 1 + clamp(first(input_range) - last(output_range), 0:pad)
w1 = 1 + clamp(last(input_range) - first(output_range), 0:pad)
return (flip[w1]:flip[w0], input_shape[end-1], output_shape[end-1])
end
return (
forward=Filtration(shapes, input, project),
backward=Filtration(reverse(shapes), input′, project′)
)
end
"""
RecurMachine(W, σ, dims::Vector{Int}, pad::Int, timeblock::Int)
Create a `RecurMachine` object from a weight array `W`, pointwise nonlinearity `σ`,
filtration sequence `dims`, padding `pad` and time block `timeblock`.
The values of `dims` specify how to split the input space into a sequence of subspaces.
In particular, it is required that `size(W) == (pad + 1, sum(dims), sum(dims))`.
"""
struct RecurMachine{T, F} <: AbstractMachine
W::T
σ::F
dims::Vector{Int}
pad::Int
timeblock::Int
end
@functor RecurMachine (W, σ)
RecurMachine(W, σ, dims; pad, timeblock) = RecurMachine(W, σ, dims, pad, timeblock)
RecurMachine(σ, dims::Vector{Int}; pad, timeblock, init=glorot_uniform) = RecurMachine(dims, σ; pad, timeblock, init)
"""
RecurMachine(dims::Vector{Int}, σ; pad, timeblock, init=glorot_uniform)
Return a `RecurMachine(W, σ, dims, pad, timeblock)` object, where `W = init(pad + 1, sum(dims), sum(dims))`.
Default to Glorot uniform initialization.
"""
function RecurMachine(dims::Vector{Int}, σ; pad, timeblock, init=glorot_uniform)
W = init(pad + 1, sum(dims), sum(dims))
return RecurMachine(W, σ, dims; pad, timeblock)
end
filtrations(m::RecurMachine, y₀) = recur_filtrations(y₀, m.dims; m.pad, m.timeblock)
function ChainRulesCore.rrule(::typeof(filtrations), m::AbstractMachine, y)
res = filtrations(m, y)
function filtrations_pullback(_, _)
return NoTangent(), NoTangent(), NoTangent()
end
return res, filtrations_pullback
end
|
{
"alphanum_fraction": 0.670526709,
"author": null,
"avg_line_length": 35.6933333333,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "aefbc330243006b5ed57e80994a960db6b155bae",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8486cdc67efde37cf433d25c9675fc63bd33adc2",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Veos-Digital/ParametricMachines.jl",
"max_forks_repo_path": "src/architectures.jl",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "65bb87b45dc375dbeb7412b0a7012afc02a19d8e",
"max_issues_repo_issues_event_max_datetime": "2022-01-21T12:15:36.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-12-14T11:16:18.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "BeaverResearch/ParametricMachines.jl",
"max_issues_repo_path": "src/architectures.jl",
"max_line_length": 117,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "8486cdc67efde37cf433d25c9675fc63bd33adc2",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Veos-Digital/FiniteDepthParametricMachines.jl",
"max_stars_repo_path": "src/architectures.jl",
"max_stars_repo_stars_event_max_datetime": "2021-12-20T13:28:57.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-12-14T11:39:00.000Z",
"num_tokens": 2393,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 8031
}
|
# Copyright (c) 2018-present, Royal Bank of Canada and other authors.
# See the AUTHORS.txt file for a list of contributors.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import torch
from torch.distributions import laplace
from torch.distributions import uniform
from torch.nn.modules.loss import _Loss
from advertorch.utils import clamp
from advertorch.utils import clamp_by_pnorm
from advertorch.utils import batch_multiply
from advertorch.utils import normalize_by_pnorm
from advertorch.utils import predict_from_logits
from advertorch.loss import ZeroOneLoss
from advertorch.attacks import Attack, LabelMixin
def rand_init_delta(delta, x, ord, eps, clip_min, clip_max):
# TODO: Currently only considered one way of "uniform" sampling
# for Linf, there are 3 ways:
# 1) true uniform sampling by first calculate the rectangle then sample
# 2) uniform in eps box then truncate using data domain (implemented)
# 3) uniform sample in data domain then truncate with eps box
# for L2, true uniform sampling is hard, since it requires uniform sampling
# inside a intersection of cube and ball, so there are 2 ways:
# 1) uniform sample in the data domain, then truncate using the L2 ball
# (implemented)
# 2) uniform sample in the L2 ball, then truncate using the data domain
# for L1: uniform l1 ball init, then truncate using the data domain
if isinstance(eps, torch.Tensor):
assert len(eps) == len(delta)
if ord == np.inf:
delta.data.uniform_(-1, 1)
delta.data = batch_multiply(eps, delta.data)
elif ord == 2:
delta.data.uniform_(clip_min, clip_max)
delta.data = delta.data - x
delta.data = clamp_by_pnorm(delta.data, ord, eps)
elif ord == 1:
ini = laplace.Laplace(
loc=delta.new_tensor(0), scale=delta.new_tensor(1))
delta.data = ini.sample(delta.data.shape)
delta.data = normalize_by_pnorm(delta.data, p=1)
ray = uniform.Uniform(0, eps).sample()
delta.data *= ray
delta.data = clamp(x.data + delta.data, clip_min, clip_max) - x.data
else:
error = "Only ord = inf, ord = 1 and ord = 2 have been implemented"
raise NotImplementedError(error)
delta.data = clamp(
x + delta.data, min=clip_min, max=clip_max) - x
return delta.data
def is_successful(y1, y2, targeted):
if targeted is True:
return y1 == y2
else:
return y1 != y2
class AttackConfig(object):
# a convenient class for generate an attack/adversary instance
def __init__(self):
self.kwargs = {}
for mro in reversed(self.__class__.__mro__):
if mro in (AttackConfig, object):
continue
for kwarg in mro.__dict__:
if kwarg in self.AttackClass.__init__.__code__.co_varnames:
self.kwargs[kwarg] = mro.__dict__[kwarg]
else:
# make sure we don't specify wrong kwargs
assert kwarg in ["__module__", "AttackClass", "__doc__"]
def __call__(self, *args):
adversary = self.AttackClass(*args, **self.kwargs)
print(self.AttackClass, args, self.kwargs)
return adversary
def multiple_mini_batch_attack(
adversary, loader, device="cuda", save_adv=False,
norm=None, num_batch=None):
lst_label = []
lst_pred = []
lst_advpred = []
lst_dist = []
_norm_convert_dict = {"Linf": "inf", "L2": 2, "L1": 1}
if norm in _norm_convert_dict:
norm = _norm_convert_dict[norm]
if norm == "inf":
def dist_func(x, y):
return (x - y).view(x.size(0), -1).max(dim=1)[0]
elif norm == 1 or norm == 2:
from advertorch.utils import _get_norm_batch
def dist_func(x, y):
return _get_norm_batch(x - y, norm)
else:
assert norm is None
idx_batch = 0
for data, label in loader:
data, label = data.to(device), label.to(device)
adv = adversary.perturb(data, label)
advpred = predict_from_logits(adversary.predict(adv))
pred = predict_from_logits(adversary.predict(data))
lst_label.append(label)
lst_pred.append(pred)
lst_advpred.append(advpred)
if norm is not None:
lst_dist.append(dist_func(data, adv))
idx_batch += 1
if idx_batch == num_batch:
break
return torch.cat(lst_label), torch.cat(lst_pred), torch.cat(lst_advpred), \
torch.cat(lst_dist) if norm is not None else None
class MarginalLoss(_Loss):
# TODO: move this to advertorch.loss
def forward(self, logits, targets): # pylint: disable=arguments-differ
assert logits.shape[-1] >= 2
top_logits, top_classes = torch.topk(logits, 2, dim=-1)
target_logits = logits[torch.arange(logits.shape[0]), targets]
max_nontarget_logits = torch.where(
top_classes[..., 0] == targets,
top_logits[..., 1],
top_logits[..., 0],
)
loss = max_nontarget_logits - target_logits
if self.reduction == "none":
pass
elif self.reduction == "sum":
loss = loss.sum()
elif self.reduction == "mean":
loss = loss.mean()
else:
raise ValueError("unknown reduction: '%s'" % (self.recution,))
return loss
class ChooseBestAttack(Attack, LabelMixin):
def __init__(self, predict, base_adversaries, loss_fn=None,
targeted=False):
self.predict = predict
self.base_adversaries = base_adversaries
self.loss_fn = loss_fn
self.targeted = targeted
if self.loss_fn is None:
self.loss_fn = ZeroOneLoss(reduction="none")
else:
assert self.loss_fn.reduction == "none"
for adversary in self.base_adversaries:
assert self.targeted == adversary.targeted
def perturb(self, x, y=None):
# TODO: might want to also retain the list of all attacks
x, y = self._verify_and_process_inputs(x, y)
with torch.no_grad():
maxloss = self.loss_fn(self.predict(x), y)
final_adv = torch.zeros_like(x)
for adversary in self.base_adversaries:
adv = adversary.perturb(x, y)
loss = self.loss_fn(self.predict(adv), y)
to_replace = maxloss < loss
final_adv[to_replace] = adv[to_replace]
maxloss[to_replace] = loss[to_replace]
return final_adv
def attack_whole_dataset(adversary, loader, device="cuda"):
lst_adv = []
lst_label = []
lst_pred = []
lst_advpred = []
for data, label in loader:
data, label = data.to(device), label.to(device)
pred = predict_from_logits(adversary.predict(data))
adv = adversary.perturb(data, label)
advpred = predict_from_logits(adversary.predict(adv))
lst_label.append(label)
lst_pred.append(pred)
lst_advpred.append(advpred)
lst_adv.append(adv)
return torch.cat(lst_adv), torch.cat(lst_label), torch.cat(lst_pred), \
torch.cat(lst_advpred)
|
{
"alphanum_fraction": 0.6384377936,
"author": null,
"avg_line_length": 33.7149321267,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "fed15b1ba2bd8dfbd2ab7393f3b5538ab3718921",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2021-09-23T10:20:56.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-05-31T14:31:29.000Z",
"max_forks_repo_head_hexsha": "58bfd0ee159cbcbebdda2da068984e6edf8b61ec",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "flymin/Rectified-Rejection",
"max_forks_repo_path": "adaptiveCWattack/utils.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "58bfd0ee159cbcbebdda2da068984e6edf8b61ec",
"max_issues_repo_issues_event_max_datetime": "2021-09-01T03:13:33.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-08-31T12:57:30.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "flymin/Rectified-Rejection",
"max_issues_repo_path": "adaptiveCWattack/utils.py",
"max_line_length": 79,
"max_stars_count": 23,
"max_stars_repo_head_hexsha": "58bfd0ee159cbcbebdda2da068984e6edf8b61ec",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "flymin/Rectified-Rejection",
"max_stars_repo_path": "adaptiveCWattack/utils.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-26T09:06:39.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-05-31T14:29:01.000Z",
"num_tokens": 1796,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7451
}
|
import numpy as np
import matplotlib.pyplot as plt
X = np.linspace(-np.pi, np.pi, 256)
C = np.cos(X)
S = np.sin(X)
plt.plot(X, C)
plt.plot(X, S)
plt.show()
|
{
"alphanum_fraction": 0.6130952381,
"author": null,
"avg_line_length": 15.2727272727,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "8ebd64810d7de4e663dc2d41cf7fbf5059664034",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "4187c16a1d6c1269d188a4a039e0a16020de51d0",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "jackh423/python",
"max_forks_repo_path": "CIS41B/MatPlotLib/CosSin.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "4187c16a1d6c1269d188a4a039e0a16020de51d0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "jackh423/python",
"max_issues_repo_path": "CIS41B/MatPlotLib/CosSin.py",
"max_line_length": 36,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "4187c16a1d6c1269d188a4a039e0a16020de51d0",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "jackh423/python",
"max_stars_repo_path": "CIS41B/MatPlotLib/CosSin.py",
"max_stars_repo_stars_event_max_datetime": "2021-09-08T18:34:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-09-08T18:34:56.000Z",
"num_tokens": 52,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 168
}
|
__author__ = 'jlu96'
import causal_pipeline as cp
import sys
import pickle
import pandas as pd
import geneTSmunging as gtm
import os
import numpy as np
def get_parser():
# Parse arguments
import argparse
description = 'Given the baseline, per gene hyperparameter fit results, choose the best hyperparameter'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-tn', '--test_name', default="")
parser.add_argument('-ind', '--int_name_dfname', required=True)
parser.add_argument('-r', '--result_dfname', required=True, help="Save result of hyperparameters")
parser.add_argument('-o', '--output_name', required=True, help="Save the best hyperparameter")
parser.add_argument('-hl', '--hyper_file', required=True, help="Pickle of the original list. Make sure this has same order of the row names in integration")
parser.add_argument('-s', '--sort_by', default='mse_avg')
return parser
def load_and_run(args):
if args.test_name == "":
name = ""
else:
name = args.test_name.capitalize() + " "
hyperlist = pickle.load(open(args.hyper_file, 'rb'))
int_name_df = pd.read_csv(args.int_name_dfname, sep="\t")
print("Loading integrated")
print(int_name_df.head())
hyper_fit_dfs = [pd.read_csv(int_name_df[x].values[0], sep="\t")
if os.path.exists(int_name_df[x].values[0]) else None
for x in int_name_df]
# Remove the Nones for which there is no information.
remove_list = []
for i in range(len(hyper_fit_dfs[:])):
try:
# Check if its empty
if hyper_fit_dfs[i].empty:
remove_list.append(i)
# If it's equal to None will have an AttributeError here
except AttributeError:
remove_list.append(i)
hyper_fit_dfs = [h for i, h in enumerate(hyper_fit_dfs) if i not in remove_list]
hyperlist = [h for i, h in enumerate(hyperlist) if i not in remove_list]
# Get the best hyper
hyper_df = cp.summarize_hyper_fit_dfs(hyper_fit_dfs, hyperlist)
best_hyper, best, hyper_df = cp.get_best_hyper(hyper_df, sort_by=args.sort_by)
# Write the hypers out
pickle.dump(best_hyper, open(args.output_name, 'wb'))
hyper_df.to_csv(args.result_dfname, sep="\t", index=0)
print("Test is ", name)
print("Best hyper is ", best_hyper)
print("Best hyper result is ", best)
print("Best hyper written to ", args.output_name)
print("Hyper result written to ", args.result_dfname)
if not os.path.exists("hyper"):
os.makedirs("hyper")
# Get correlations
mse_vec = np.array([np.array(hyper_fit_df["mse"].values) for hyper_fit_df in hyper_fit_dfs])
print(mse_vec.shape)
mse_corr = np.corrcoef(mse_vec)
gtm.save_gene_matrix("hyper" + os.sep + "mse_corr.txt", mse_corr, hyperlist)
print("MSE Correlation:")
print(mse_corr)
print("MSE corr. matrix saved to ", "hyper" + os.sep + "mse_corr.txt")
r2_vec = np.array([hyper_fit_df["r2"].values for hyper_fit_df in hyper_fit_dfs])
r2_corr = np.corrcoef(r2_vec)
gtm.save_gene_matrix("hyper" + os.sep + "r2_corr.txt", r2_corr, hyperlist)
print("R2 Correlation")
print(r2_corr)
print("R^2 corr. matrix saved to ", "hyper" + os.sep + "r2_corr.txt")
# Plot the hyperparameters
if not os.path.exists("plots"):
os.makedirs("plots")
if not os.path.exists("plots" + os.sep + "hyper"):
os.makedirs("plots" + os.sep + "hyper")
cp.plot_corr_matrix(mse_corr, cp.hyperlist_to_labellist(hyperlist), title="MSE correlation among " + name + "hyperparams", filename="plots" + os.sep + "hyper" + os.sep + "mse_corr")
cp.plot_corr_matrix(r2_corr, cp.hyperlist_to_labellist(hyperlist), title="$r^2$ correlation among " + name + "hyperparams", filename="plots" + os.sep + "hyper" + os.sep + "r2_corr")
cp.plot_hyper_boxplot(cp.hyperlist_to_labellist(hyperlist), hyper_fit_dfs, "r2", xlabel=name + "Hyperparameter", ylabel="$r^2$", title=name + "Hyperparameter VS $r^2$", filename="plots" + os.sep + "hyper"+ os.sep + "hyperVSr2",
hyper_color_labels=[(cp.hyper_to_label(best_hyper), "k", "Best: " + cp.hyper_to_label(best_hyper) + ", $r^2$ = " + str(np.round(best["r2_avg"].values[0], 1)) )],
horizontal_line_color_labels=[(best["r2_avg"].values[0], 'k', None)])
cp.plot_hyper_boxplot(cp.hyperlist_to_labellist(hyperlist), hyper_fit_dfs, "mse", xlabel=name + "Hyperparameter", ylabel="Mean-Squared Error", title=name + "Hyperparameter VS MSE", filename="plots" + os.sep + "hyper" + os.sep + "hyperVSmse",
hyper_color_labels=[(cp.hyper_to_label(best_hyper), "k", "Best: " + cp.hyper_to_label(best_hyper) + ", MSE = " + str(np.round(best["mse_avg"].values[0], 1)))],
horizontal_line_color_labels=[(best["mse_avg"].values[0], 'k', None)])
cp.plot_hyper_boxplot(cp.hyperlist_to_labellist(hyperlist), hyper_fit_dfs, "avg_df", xlabel=name + "Hyperparameter", ylabel="Degrees of Freedom", title=name + "Hyperparameter VS df", filename="plots" + os.sep + "hyper" + os.sep + "hyperVSdof",
hyper_color_labels=[(cp.hyper_to_label(best_hyper), "k", "Best: " + cp.hyper_to_label(best_hyper) + ", df = " + str(int(np.round(best["df_avg"].values[0]))))],
horizontal_line_color_labels=[(best["df_avg"].values[0], 'k', None)])
print("Correlation between hyperparameter results", "plots" + os.sep + "hyper")
print("Hyper box plots of r^2, mse, avg d.o.f. written to ", "plots" + os.sep + "hyper")
def main():
load_and_run(get_parser().parse_args(sys.argv[1:]))
if __name__ == '__main__':
main()
|
{
"alphanum_fraction": 0.6561688874,
"author": null,
"avg_line_length": 39.8551724138,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "637dd5556beec1d73f1ecced0eabd25d6b60cfa9",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2021-02-21T17:47:01.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-07-15T21:34:46.000Z",
"max_forks_repo_head_hexsha": "7121e16281e0e7973c1ee6f631b59ac1f7e7a7d5",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "lujonathanh/BETS",
"max_forks_repo_path": "code/set_hyper.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "7121e16281e0e7973c1ee6f631b59ac1f7e7a7d5",
"max_issues_repo_issues_event_max_datetime": "2019-06-06T16:12:03.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-06-05T20:12:18.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "lujonathanh/BETS",
"max_issues_repo_path": "code/set_hyper.py",
"max_line_length": 247,
"max_stars_count": 12,
"max_stars_repo_head_hexsha": "7121e16281e0e7973c1ee6f631b59ac1f7e7a7d5",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "lujonathanh/BETS",
"max_stars_repo_path": "code/set_hyper.py",
"max_stars_repo_stars_event_max_datetime": "2021-09-18T14:46:29.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-04-02T05:22:03.000Z",
"num_tokens": 1536,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5779
}
|
"""
MatHeatDiffModule
Module for linear heat diffusion material models.
"""
module MatHeatDiffModule
using FinEtools.FTypesModule: FInt, FFlt, FCplxFlt, FFltVec, FIntVec, FFltMat, FIntMat, FMat, FVec, FDataDict
import FinEtools.MatModule: AbstractMat
using FinEtools.MatrixUtilityModule: mulCAB!
"""
MatHeatDiff{MTAN<:Function, MUPD<:Function} <: AbstractMat
Type of material model for heat diffusion.
"""
struct MatHeatDiff{MTAN<:Function, MUPD<:Function} <: AbstractMat
thermal_conductivity::Array{FFlt, 2};# Thermal conductivity
specific_heat::FFlt;# Specific heat per unit volume
mass_density::FFlt # mass density
tangentmoduli!::MTAN
update!::MUPD
end
"""
MatHeatDiff(thermal_conductivity)
Construct material model for heat diffusion.
Supply the matrix of thermal conductivity constants.
"""
function MatHeatDiff(thermal_conductivity)
return MatHeatDiff(thermal_conductivity, 0.0, 0.0, tangentmoduli!, update!)
end
"""
MatHeatDiff(thermal_conductivity, specific_heat)
Construct material model for heat diffusion.
Supply the matrix of thermal conductivity constants.
"""
function MatHeatDiff(thermal_conductivity, specific_heat)
return MatHeatDiff(thermal_conductivity, specific_heat, 0.0, tangentmoduli!, update!)
end
"""
tangentmoduli!(self::MatHeatDiff, kappabar::FFltMat, t::FFlt, dt::FFlt, loc::FFltMat, label::FInt)
Calculate the thermal conductivity matrix.
- `kappabar` = matrix of thermal conductivity (tangent moduli) in material
coordinate system, supplied as a buffer and overwritten.
"""
function tangentmoduli!(self::MatHeatDiff, kappabar::FFltMat, t::FFlt = 0.0, dt::FFlt = 0.0, loc::FFltMat = reshape(FFlt[],0,0), label::FInt = 0)
copyto!(kappabar, self.thermal_conductivity);
return kappabar
end
"""
update!(self::MatHeatDiff, heatflux::FFltVec, output::FFltVec, gradT::FFltVec, t::FFlt= 0.0, dt::FFlt= 0.0, loc::FFltMat=FFltMat[], label::FInt=0, quantity=:nothing)
Update material state.
# Arguments
- `gradT` = thermal gradient vector,
- `t` = current time,
- `dt` = current time step,
- `loc` = location of the quadrature point in global Cartesian coordinates,
- `label` = label of the finite element in which the quadrature point is located.
- `quantity` = quantity to be output (`:heatflux`)
# Output
- `heatflux` = heat flux vector, allocated by the caller with a size of
the embedding space. The components of the heat flux vector are
calculated and stored in the `heatflux` vector.
- `output` = array which is (if necessary) allocated in an appropriate size, filled with the output quantity, and returned.
"""
function update!(self::MatHeatDiff, heatflux::FFltVec, output::FFltVec, gradT::FFltVec, t::FFlt= 0.0, dt::FFlt= 0.0, loc::FFltMat=reshape(FFlt[],0,0), label::FInt=0, quantity=:nothing)
sdim = size(self.thermal_conductivity, 2)
@assert length(heatflux) == sdim
mulCAB!(heatflux, self.thermal_conductivity, -gradT);
if quantity == :nothing
#Nothing to be copied to the output array
elseif quantity == :heatflux
(length(output) >= sdim) || (output = zeros(sdim)) # make sure we can store it
copyto!(output, heatflux);
end
return output
end
end
|
{
"alphanum_fraction": 0.7366447985,
"author": null,
"avg_line_length": 34.4193548387,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "dc546d5d91545eb6f76e6aa6d7a09841f0adabd1",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2020-03-17T04:53:58.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-01-05T14:15:53.000Z",
"max_forks_repo_head_hexsha": "16650bd991fe015f3e2cb6504eb22b2adaab2295",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "PetrKryslUCSD/FinEtoolsHeatDiff",
"max_forks_repo_path": "src/MatHeatDiffModule.jl",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "16650bd991fe015f3e2cb6504eb22b2adaab2295",
"max_issues_repo_issues_event_max_datetime": "2020-02-17T18:38:16.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-02-17T17:18:43.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "PetrKryslUCSD/FinEtoolsHeatDiff",
"max_issues_repo_path": "src/MatHeatDiffModule.jl",
"max_line_length": 184,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "16650bd991fe015f3e2cb6504eb22b2adaab2295",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "PetrKryslUCSD/FinEtoolsHeatDiff",
"max_stars_repo_path": "src/MatHeatDiffModule.jl",
"max_stars_repo_stars_event_max_datetime": "2021-04-10T07:10:28.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-06-12T05:40:44.000Z",
"num_tokens": 932,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 3201
}
|
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
from sklearn.mixture import GaussianMixture as GMM
from .utils import fix_dim_gmm, custom_KDE
class Likelihood(object):
"""A class for computation of the likelihood ratio.
Parameters
----------
model : instance of GPRegression
A GPy model
inputs : instance of Inputs
The input space.
weight_type : str, optional
Type of likelihood weight. Must be one of
- "nominal" : uses w(x) = p(x)
- "importance" : uses w(x) = p(x)/p_y(mu(x))
fit_gmm : boolean, optional
Whether or not to use a GMM approximation for the likelihood
ratio.
kwargs_gmm : dict, optional
A dictionary of keyword arguments for scikit's GMM routine.
Use this to specify the number of Gaussian mixtures and the
type of covariance matrix.
Attributes
----------
model, inputs, weight_type, fit_gmm, kwargs_gmm : see Parameters
fy_interp : scipy 1-D interpolant
An interpolant for the output pdf p_y(mu)
gmm : scikit Gaussian Mixture Model
A GMM object approximating the likelihood ratio.
"""
def __init__(self, model, inputs, weight_type="importance",
fit_gmm=True, kwargs_gmm=None):
self.model = model
self.inputs = inputs
self.weight_type = self.check_weight_type(weight_type)
self.fit_gmm = fit_gmm
if kwargs_gmm is None:
kwargs_gmm = dict(n_components=2, covariance_type="full")
self.kwargs_gmm = kwargs_gmm
self._prepare_likelihood()
def update_model(self, model):
self.model = model
self._prepare_likelihood()
return self
def evaluate(self, x):
"""Evaluates the likelihood ratio at x.
Parameters
----------
x : array
Query points. Should be of size (n_pts, n_dim)
Returns
-------
w : array
The likelihood ratio at x.
"""
if self.fit_gmm:
w = self._evaluate_gmm(x)
else:
w = self._evaluate_raw(x)
return w
def jacobian(self, x):
"""Evaluates the gradients of the likelihood ratio at x.
Parameters
----------
x : array
Query points. Should be of size (n_pts, n_dim)
Returns
-------
w_jac : array
Gradients of the likelihood ratio at x.
"""
if self.fit_gmm:
w_jac = self._jacobian_gmm(x)
else:
w_jac = self._jacobian_raw(x)
return w_jac
def _evaluate_gmm(self, x):
x = np.atleast_2d(x)
w = np.exp(self.gmm.score_samples(x))
return w[:,None]
def _jacobian_gmm(self, x):
x = np.atleast_2d(x)
w_jac = np.zeros(x.shape)
p = np.exp(self.gmm._estimate_weighted_log_prob(x))
precisions = fix_dim_gmm(self.gmm, matrix_type="precisions")
for ii in range(self.gmm.n_components):
w_jac += p[:,ii,None] * np.dot(self.gmm.means_[ii]-x, \
precisions[ii])
return w_jac
def _evaluate_raw(self, x):
x = np.atleast_2d(x)
fx = self.inputs.pdf(x)
if self.weight_type == "nominal":
w = fx
elif self.weight_type == "importance":
mu = self.model.predict(x)[0].flatten()
if self.model.normalizer:
mu = self.model.normalizer.normalize(mu)
fy = self.fy_interp(mu)
w = fx/fy
return w[:,None]
def _jacobian_raw(self, x):
x = np.atleast_2d(x)
fx_jac = self.inputs.pdf_jac(x)
if self.weight_type == "nominal":
w_jac = fx_jac
elif self.weight_type == "importance":
mu = self.model.predict(x)[0].flatten()
if self.model.normalizer:
mu = self.model.normalizer.normalize(mu)
mu_jac, _ = self.model.predictive_gradients(x)
mu_jac = mu_jac[:,:,0]
fx = self.inputs.pdf(x)
fy = self.fy_interp(mu)
fy_jac = self.fy_interp.derivative()(mu)
tmp = fx * fy_jac / fy**2
w_jac = fx_jac / fy[:,None] - tmp[:,None] * mu_jac
return w_jac
def _prepare_likelihood(self):
"""Prepare likelihood ratio for evaluation."""
if self.inputs.input_dim <= 2:
n_samples = int(1e5)
else:
n_samples = int(1e6)
pts = self.inputs.draw_samples(n_samples=n_samples,
sample_method="uni")
fx = self.inputs.pdf(pts)
if self.weight_type == "importance":
mu = self.model.predict(pts)[0].flatten()
if self.model.normalizer:
mu = self.model.normalizer.normalize(mu)
x, y = custom_KDE(mu, weights=fx).evaluate()
self.fy_interp = InterpolatedUnivariateSpline(x, y, k=1)
if self.fit_gmm:
if self.weight_type == "nominal":
w_raw = fx
elif self.weight_type == "importance":
w_raw = fx/self.fy_interp(mu)
self.gmm = self._fit_gmm(pts, w_raw, self.kwargs_gmm)
return self
@staticmethod
def _fit_gmm(pts, w_raw, kwargs_gmm):
"""Fit Gaussian Mixture Model using scikit's GMM framework.
Parameters
----------
pts : array
Sample points.
w_raw : array
Raw likelihood ratio at sample points.
kwargs_gmm : dict
A dictionary of keyword arguments for scikit's GMM routine.
Returns
-------
gmm : scikit Gaussian Mixture Model
A GMM object approximating the likelihood ratio.
"""
# Sample and fit
sca = np.sum(w_raw)
rng = np.random.default_rng()
aa = rng.choice(pts, size=20000, p=w_raw/sca)
gmm = GMM(**kwargs_gmm)
gmm = gmm.fit(X=aa)
# Rescale
gmm_y = np.exp(gmm.score_samples(pts))
scgmm = np.sum(gmm_y)
gmm.weights_ *= (sca/w_raw.shape[0] * gmm_y.shape[0]/scgmm)
return gmm
@staticmethod
def check_weight_type(weight_type):
assert(weight_type.lower() in ["nominal", "importance"])
return weight_type.lower()
|
{
"alphanum_fraction": 0.5623830318,
"author": null,
"avg_line_length": 30.3886255924,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "892b28db625a6593992cd57c79a0083ffb9f4ec6",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 9,
"max_forks_repo_forks_event_max_datetime": "2022-03-22T15:14:14.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-07-18T13:29:46.000Z",
"max_forks_repo_head_hexsha": "8c5758c9fb2b623ef79952c3e9c113cb157d79bc",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Fluid-Dynamics-Group/gpsearch",
"max_forks_repo_path": "gpsearch/core/likelihood.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8c5758c9fb2b623ef79952c3e9c113cb157d79bc",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Fluid-Dynamics-Group/gpsearch",
"max_issues_repo_path": "gpsearch/core/likelihood.py",
"max_line_length": 71,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "8c5758c9fb2b623ef79952c3e9c113cb157d79bc",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Fluid-Dynamics-Group/gpsearch",
"max_stars_repo_path": "gpsearch/core/likelihood.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-11T08:49:27.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-07-13T00:02:17.000Z",
"num_tokens": 1552,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6412
}
|
#!/usr/bin/env python3
#
# Copyright 2019 Peifeng Yu <peifeng@umich.edu>
#
# This file is part of Salus
# (see https://github.com/SymbioticLab/Salus).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 30 01:11:53 2018
@author: peifeng
"""
from __future__ import print_function, absolute_import, division
import re
from datetime import datetime
from collections import defaultdict
import multiprocessing as mp
from pathlib import Path
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import plotutils as pu
import parse_log as pl
def select_steps(df):
# count unique numbers
counts = df.groupby('step').agg({c: 'nunique' for c in ['kernel', 'op']}).reset_index()
ss = counts.query('step > 10 & kernel > 10 & op > 200')
# so the step list is
if len(ss) > 1:
# drop first iteration
return ss.step.astype(int).tolist()[1:]
else:
# nothing we can find programmatically, let the user decide
for _, s, ker, op in counts.itertuples():
print('Step {} has {} tasks, with {} kernels, select?'.format(s, op, ker))
return [int(input('nothing we can find programmatically, let the user decide'))]
def only_step(steps, idx):
ss = steps.step.sort_values().unique().tolist()
if idx >= len(ss):
idx = len(ss) - 1
return steps[steps.step == ss[idx]]
def load_failure(path):
logs = pl.load_file(str(path))
df = pd.DataFrame(l.__dict__ for l in logs)
df = df[df.type == 'optracing_evt']
df = df.drop(['entry_type','level','loc', 'thread', 'type'], axis=1)
# make sure step is int
df['step'] = df.step.astype(int)
ss = select_steps(df)
step25 = df[df.step.isin(ss)]
# discard unneeded event
step25 = step25[step25.evt == 'done']
# add a name column
def name(row):
return '{}[{}]'.format(row['op'], row['kernel'])
step25['name'] = step25.apply(name, axis=1).values
return step25
def load_exp19(directory):
directory = Path(directory)
res = {}
for d in (directory/'salus').iterdir():
res[d.name] = load_failure(d/'perf.output')
return res
def cdf(X, ax=None, **kws):
if ax is None:
_, ax = plt.subplots()
n = np.arange(1,len(X)+1) / np.float(len(X))
Xs = np.sort(X)
ax.step(Xs, n, **kws)
ax.set_ylim(0, 1)
return ax
#%%
path = 'logs/osdi18/cc/exp19'
data = load_exp19(path)
plt.style.use(['seaborn-paper', 'mypaper'])
ax = None
for n, failures in data.items():
ax = cdf(failures, ax=ax, label='{} x resnet50_50'.format(n))
fig = ax.figure
ax.legend()
fig.set_size_inches(3.45, 2.75, forward=True)
fig.tight_layout()
fig.savefig('/tmp/workspace/exp19.pdf', dpi=300)
#plt.close()
|
{
"alphanum_fraction": 0.6503159795,
"author": null,
"avg_line_length": 27.9243697479,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "0951b5f0cea3f5219484532859bf0c6952aad265",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 18,
"max_forks_repo_forks_event_max_datetime": "2021-09-15T22:13:07.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-03-04T07:45:41.000Z",
"max_forks_repo_head_hexsha": "b2a194e7e4654b51dbd8d8fc1577fb1e9915ca6f",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "SymbioticLab/Salus",
"max_forks_repo_path": "scripts/parse_exp19.py",
"max_issues_count": 9,
"max_issues_repo_head_hexsha": "b2a194e7e4654b51dbd8d8fc1577fb1e9915ca6f",
"max_issues_repo_issues_event_max_datetime": "2021-06-06T17:59:07.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-08-24T03:23:21.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "SymbioticLab/Salus",
"max_issues_repo_path": "scripts/parse_exp19.py",
"max_line_length": 91,
"max_stars_count": 104,
"max_stars_repo_head_hexsha": "b2a194e7e4654b51dbd8d8fc1577fb1e9915ca6f",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "SymbioticLab/Salus",
"max_stars_repo_path": "scripts/parse_exp19.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-07T16:58:47.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-02-12T20:41:07.000Z",
"num_tokens": 908,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3323
}
|
# imports
import semseg_vaihingen.config as cfg
from . import model_generator
from . import data_io as dio
import numpy as np
from sklearn import metrics
import keras
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import os, re
import argparse
# label list
glob_label_list = np.array(['Impervious surfaces', 'Building',
'Low vegetation', 'Tree',
'Car', 'Clutter/Background'])
# dictionary with mapping {label - color}:
glob_label_color_dict = {'Impervious surfaces':'gray',
'Building': 'red',
'Low vegetation': 'lightgreen',
'Tree': 'green',
'Car': 'purple',
'Clutter/Background': 'black' }
# calculate the networks prediction at a given window of the image:
def net_predict(data, model, s, k, l):
x = np.reshape(data[k:k + s, l:l + s, :], (1, s, s, 3))
y = np.reshape(model.predict(x), (s, s, 6))
return from_categorical(y) + 1
# inverse function to to_categorical:
def from_categorical(categorical_tensor):
return np.argmax(categorical_tensor, axis=2)
# function to generate a plot of the ground truth or network prediction:
def create_colormap(label_matrix, title, labels=glob_label_list,
colormap=True, legend=False):
fig, ax1 = plt.subplots()
if legend:
#fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2)
# Fake plots to create legend
for label in labels:
ax1.plot(0, 0, "o", c=glob_label_color_dict[label], label=label)
plt.subplots_adjust(right=0.75)
ax1.legend(bbox_to_anchor=(1.04,1), loc="upper left")
if colormap:
# create custom colormap:
colors = [ glob_label_color_dict[label] for label in labels ]
label_cmap = ListedColormap(colors)
# generate and show the map
ax1.imshow(label_matrix, cmap=label_cmap)
else:
print(("[DEBUG] label_matrix.shape={}".format(label_matrix.shape)))
ax1.imshow(label_matrix)
plt.title(title)
#plt.show()
plot_file = title.replace(' ', '_') + '.png'
plt.savefig(os.path.join(cfg.BASE_DIR, 'data', plot_file),
dpi=225, bbox_inches='tight')
plt.clf()
# function to generate a plot of the wrong classified pixels
def create_errormap(error_matrix,title):
# create custom colormap:
error_cmap = ListedColormap(['black', 'white'])
# generate and show the map
plt.imshow(error_matrix, cmap=error_cmap)
plt.title(title)
#plt.show()
plt.savefig(os.path.join(cfg.BASE_DIR, 'data', 'Error_map.png'))
# calculate the confusion matrix and the class accuracy:
def analyze_result(ground_truth, prediction, num_labels):
# reshape to one dimensional arrays:
ground_truth = np.ravel(ground_truth)
prediction = np.ravel(prediction)
# calculate the confusion matrix:
confusion = metrics.confusion_matrix(ground_truth, prediction, np.arange(num_labels)+1)
# labelwise accuracy = correctly classified pixels of this label / pixels of this label
true_pos = np.diag(confusion.astype(float))
pred_pos = np.sum(confusion.astype(float), axis=1)
label_accuracy = np.divide(true_pos, pred_pos, out=np.ones_like(true_pos),
where=pred_pos!=0.0)
return confusion, np.round(label_accuracy, 3)
# print the labelwise accuracy
def print_labelwise_accuracy(confusion, label_accuracy):
overall = np.sum(confusion, axis=1)
print('')
print("[INFO] Results:")
print('Labelwise accuracy: ')
print(('{:20s} \t {:>10s} \t {:>10s}'.format("Labels", "pixels", "accuracy")))
print(("-".rjust(50,"-")))
for i, label in enumerate(glob_label_list):
print(('{:20s} \t {:10d} \t {:10.4f}%'.format(label,
overall[i],
100.*label_accuracy[i])))
print('')
# function to apply a trained network to a whole image:
def predict_complete_image(patch_path, network_weight_file,
convert_gray=False):
image_number = re.search('_(.*).hdf5', patch_path).group(1)
print(('[INFO] Load image number {} ... '.format(image_number)))
data, ground_truth = dio.load_vaihingen_image(patch_path, image_number,
convert_gray=convert_gray)
print(('[INFO] Image size: (%d x %d)' % (data.shape[0], data.shape[1])))
# plot the input:
create_colormap(data, title='Input image patch', colormap=False)
num_labels_in_ground_truth = int(np.max(ground_truth))
label_indecies = np.arange(num_labels_in_ground_truth).tolist()
print(("[DEBUG] label indices: {}".format(label_indecies)))
labels_in_ground_truth = glob_label_list[label_indecies]
print(("[DEBUG] num_labels_ground_truth={}, labels={}".format(
num_labels_in_ground_truth,
labels_in_ground_truth)))
# create a colormap of the ground truth:
create_colormap(ground_truth, title='Groundtruth',
labels=labels_in_ground_truth,
colormap=True, legend=True)
print(('[INFO] Load a trained FCN from {} ...'.format(network_weight_file)))
model = model_generator.generate_resnet50_fcn(use_pretraining=False)
model.load_weights(network_weight_file)
# preprocess (center, normalize) the input, using Keras' build in routine:
data = keras.applications.resnet50.preprocess_input(data.astype(np.float32), mode='tf')
# define image size and network input/output size:
im_h = data.shape[0]
im_w = data.shape[1]
s = cfg.PATCH_SIZE
print('[INFO] Apply network to image ... ')
# iterate over the complete image:
prediction = np.zeros((im_h, im_w))
k = l = 0
while k+s < im_h:
while l+s < im_w:
prediction[k:k+s, l:l+s] = net_predict(data, model, s, k, l)
l += s
# right border:
l = im_w - s
prediction[k:k + s, l:l + s] = net_predict(data, model, s, k, l)
k += s
l = 0
# bottom border:
k = im_h - s
while l + s < im_w:
prediction[k:k + s, l:l + s] = net_predict(data, model, s, k, l)
l += s
# right border:
l = im_w - s
prediction[k:k + s, l:l + s] = net_predict(data, model, s, k, l)
print('[INFO] Calculate error map ... ')
# create a map, showing which pixels were predicted wrongly:
error_map = np.zeros((im_h, im_w))
num_cor = 0
for k in range(im_h):
for l in range(im_w):
if prediction[k, l] == ground_truth[k, l]:
error_map[k, l] = 1
num_cor += 1
create_errormap(error_map,'Misclassified pixels map')
print('[INFO] Analyze the network accuracy ... ')
results = {}
#print('Overall accuracy: {}%'.format(np.round(100*num_cor/(im_w*im_h), 1)))
overall_acc = np.divide(float(100*num_cor), float(im_w*im_h))
print(('[INFO] Overall accuracy: %0.2f'% overall_acc))
results["overall_accuracy"] = '%0.2f' % float(overall_acc)
# calculate the confusion matrix:
confusion, label_accuracy = analyze_result(ground_truth,
prediction,
cfg.NUM_LABELS)
print_labelwise_accuracy(confusion, label_accuracy)
print('[INFO] Confusion matrix: ')
print(confusion)
# store the % of correct predicted pixels per label in a dict
results["label_accuracy"] = {}
i_label = 0
for label in glob_label_list:
results["label_accuracy"][label] = "{}%".format(100.*
label_accuracy[i_label])
i_label += 1
num_labels_in_prediction = int(np.max(prediction))
label_indecies = np.arange(num_labels_in_prediction).tolist()
labels_in_prediction = glob_label_list[label_indecies]
# create a colormap showing the networks predictions:
create_colormap(prediction, title='Classification map',
labels = labels_in_prediction, legend=True)
return results
# function to apply a trained network to a whole image:
def predict_complete_image_jpg(patch_path, network_weight_file,
convert_gray=False):
data = dio.load_image_jpg(patch_path, convert_gray=convert_gray)
print(('[INFO] Image size: (%d x %d)' % (data.shape[0], data.shape[1])))
total_pixels = data.shape[0]*data.shape[1]
# plot the input:
create_colormap(data, title='Input image patch', colormap=False)
print(('[INFO] Load a trained FCN from {} ...'.format(network_weight_file)))
model = model_generator.generate_resnet50_fcn(use_pretraining=False)
model.load_weights(network_weight_file)
# preprocess (center, normalize) the input, using Keras' build in routine:
data = keras.applications.resnet50.preprocess_input(data.astype(np.float32), mode='tf')
# define image size and network input/output size:
im_h = data.shape[0]
im_w = data.shape[1]
s = cfg.PATCH_SIZE
print('[INFO] Apply network to image ... ')
# iterate over the complete image:
prediction = np.zeros((im_h, im_w))
k = l = 0
while k+s < im_h:
while l+s < im_w:
prediction[k:k+s, l:l+s] = net_predict(data, model, s, k, l)
l += s
# right border:
l = im_w - s
prediction[k:k + s, l:l + s] = net_predict(data, model, s, k, l)
k += s
l = 0
# bottom border:
k = im_h - s
while l + s < im_w:
prediction[k:k + s, l:l + s] = net_predict(data, model, s, k, l)
l += s
# right border:
l = im_w - s
prediction[k:k + s, l:l + s] = net_predict(data, model, s, k, l)
num_labels_in_prediction = int(np.max(prediction))
label_indecies = np.arange(num_labels_in_prediction).tolist()
labels_in_prediction = glob_label_list[label_indecies]
# create a colormap showing the networks predictions:
create_colormap(prediction, title='Classification map',
labels = labels_in_prediction, legend=True)
results = { "total_pixels" : int(total_pixels),
"label_pixels" : {},
"label_pixels_fraction": {}
}
print(("[DEBUG] unqiue values in prediction: {}".format(np.unique(prediction))))
i_label = 0
for label in glob_label_list:
label_sum = (prediction == float(i_label + 1.)).sum()
results["label_pixels"][label] = int(label_sum)
results["label_pixels_fraction"][label] = float(np.round(
label_sum/float(total_pixels),
5))
i_label += 1
print("[INFO] Results:")
print(('{:20s} \t {:>12s} \t {:>8s}'.format("Labels", "pixels", "fraction")))
print(("-".rjust(48,"-")))
for label, value in list(results["label_pixels"].items()):
print(('{:20s}: \t {:12d} \t {:8f}'.format(label, value,
results["label_pixels_fraction"][label])))
print(('{:20s}: \t {:12d}'.format("Total pixels", results["total_pixels"])))
return results
def main():
res = predict_complete_image(args.patch_path, args.model)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate the model')
parser.add_argument('--patch_path', type=str,
help='Location of of the patch to test \
(e.g., /srv/semseg_vaihingen/data/raw/vaihingen_15.hdf5 )')
parser.add_argument('--model', type=str,
help='Location of the trained network model \
(e.g., /srv/semseg_vaihingen/models/resnet50_fcn_weights.hdf5)')
args = parser.parse_args()
main()
|
{
"alphanum_fraction": 0.6052631579,
"author": null,
"avg_line_length": 37.8808777429,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "ed0f8fa182abcb2fd4eb926ea81a7ba0a583f81f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-02-27T09:48:53.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-02-27T09:48:53.000Z",
"max_forks_repo_head_hexsha": "0a8bed71836fa892b8a13b7d2c5109dbcae3c549",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "SilkeDH/semseg_vaihingen",
"max_forks_repo_path": "semseg_vaihingen/models/evaluate_network.py",
"max_issues_count": 8,
"max_issues_repo_head_hexsha": "0a8bed71836fa892b8a13b7d2c5109dbcae3c549",
"max_issues_repo_issues_event_max_datetime": "2022-02-10T00:20:46.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-03-24T17:39:59.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "SilkeDH/semseg_vaihingen",
"max_issues_repo_path": "semseg_vaihingen/models/evaluate_network.py",
"max_line_length": 91,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "0a8bed71836fa892b8a13b7d2c5109dbcae3c549",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "SilkeDH/semseg_vaihingen",
"max_stars_repo_path": "semseg_vaihingen/models/evaluate_network.py",
"max_stars_repo_stars_event_max_datetime": "2020-12-12T15:29:49.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-02-03T16:55:50.000Z",
"num_tokens": 2896,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 12084
}
|
%% DOS_DIR_TEST tests the DOS facility for issuing operating system commands.
%
% Discussion:
%
% DIR is a legal command on MS/DOS systems, and returns a list of the
% files in the current directory.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 19 July 2006
%
% Author:
%
% John Burkardt
%
fprintf ( 1, '\n' );
fprintf ( 1, 'DOS_DIR_TEST\n' );
fprintf ( 1, ' MATLAB version\n' );
fprintf ( 1, ' Demonstrate a simple use of the "dos" command,\n' );
fprintf ( 1, ' which allows MATLAB to invoke MS/DOS operating system commands.\n' );
[ status, string ] = dos ( 'dir' );
fprintf ( 1, '\n' );
fprintf ( 1, ' Command status was %d\n', status );
fprintf ( 1, ' Command output was %s\n', string );
fprintf ( 1, '\n' );
fprintf ( 1, ' Repeat the command, but add the -ECHO option.\n' );
[ status, string ] = dos ( 'dir', '-echo' );
fprintf ( 1, '\n' );
fprintf ( 1, ' Command status was %d\n', status );
fprintf ( 1, ' Command output was %s\n', string );
fprintf ( 1, '\n' );
fprintf ( 1, ' Now repeat command, but with a final ampersand.\n' );
[ status, string ] = dos ( 'dir &' );
fprintf ( 1, '\n' );
fprintf ( 1, ' Command status was %d\n', status );
fprintf ( 1, ' Command output was %s\n', string );
fprintf ( 1, '\n' );
fprintf ( 1, 'DOS_DIR_TEST:\n' );
fprintf ( 1, ' Normal end of execution.\n' );
|
{
"alphanum_fraction": null,
"author": "johannesgerer",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/MATLAB/johannesgerer-jburkardt-m/jburkardt-m-1726deb4a34dd08a49c26359d44ef47253f006c1/matlab_os/dos_dir_test.m",
"reason": null,
"repo": "jburkardt-m",
"save_path": "github-repos/MATLAB/johannesgerer-jburkardt-m",
"sha": "1726deb4a34dd08a49c26359d44ef47253f006c1",
"size": null
}
|
#! /usr/bin/env python
"""Tests for the ``preview_image`` module.
Authors
-------
- Johannes Sahlmann
Use
---
These tests can be run via the command line (omit the ``-s`` to
suppress verbose output to ``stdout``):
::
pytest -s test_preview_image.py
"""
import glob
import os
import pytest
from astropy.io import fits
from jwql.utils.preview_image import PreviewImage
# directory to be created and populated during tests running
TEST_DIRECTORY = os.path.join(os.environ['HOME'], 'preview_image_test')
# directory that contains sample images
TEST_DATA_DIRECTORY = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_data')
@pytest.fixture(scope="module")
def test_directory(test_dir=TEST_DIRECTORY):
"""Create a test directory for preview image.
Parameters
----------
test_dir : str
Path to directory used for testing
Yields
-------
test_dir : str
Path to directory used for testing
"""
os.mkdir(test_dir) # creates directory
yield test_dir
print("teardown test directory")
if os.path.isdir(test_dir):
os.rmdir(test_dir)
def test_make_image(test_directory):
"""Use PreviewImage.make_image to create preview images of a sample JWST exposure.
Assert that the number of JPGs created corresponds to the number of integrations.
Parameters
----------
test_directory : str
Path of directory used for testing
"""
filenames = glob.glob(os.path.join(TEST_DATA_DIRECTORY, '*.fits'))
print('\nGenerating preview images for {}.'.format(filenames))
output_directory = test_directory
for filename in filenames:
header = fits.getheader(filename)
# Create and save the preview image or thumbnail
for create_thumbnail in [False, True]:
try:
image = PreviewImage(filename, "SCI")
image.clip_percent = 0.01
image.scaling = 'log'
image.cmap = 'viridis'
image.output_format = 'jpg'
image.thumbnail = create_thumbnail
image.output_directory = output_directory
image.make_image()
except ValueError as error:
print(error)
if create_thumbnail:
extension = 'thumb'
else:
extension = 'jpg'
# list of preview images
preview_image_filenames = glob.glob(os.path.join(test_directory, '*.{}'.format(
extension)))
assert len(preview_image_filenames) == header['NINTS']
# clean up: delete preview images
for file in preview_image_filenames:
os.remove(file)
|
{
"alphanum_fraction": 0.6243174372,
"author": null,
"avg_line_length": 25.6728971963,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "789d887ee9e41e5e5f63fee8c63c28bf5abbc773",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0a97fe618c007883ffbced88ac1cb45a667fcb3c",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "hover2pi/jwql",
"max_forks_repo_path": "jwql/tests/test_preview_image.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0a97fe618c007883ffbced88ac1cb45a667fcb3c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "hover2pi/jwql",
"max_issues_repo_path": "jwql/tests/test_preview_image.py",
"max_line_length": 91,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0a97fe618c007883ffbced88ac1cb45a667fcb3c",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "hover2pi/jwql",
"max_stars_repo_path": "jwql/tests/test_preview_image.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 554,
"path": null,
"reason": "from astropy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2747
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.