text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# 2.4 plotting
tmpexpr = :(
xlabel("Year"); grid(true);
xlim([ idx_year2plot[1] - 1, idx_year2plot[end] + 1 ]);
)
figure(figsize = (13,8))
subplot(2,2,1) # gap / exp
for tmpzeta in 1:length(reform_zeta_levs)
plot( Dt[:Year][idx_plot], res_ContriRatRise["gap/exp"][tmpzeta], reform_zeta_stys[tmpzeta] )
end
eval(tmpexpr); ylabel("Gap / Pool Expenditure (%)");
legend( string.("ζ = ",reform_zeta_levs) , loc = "lower right")
subplot(2,2,2) # gap / in
for tmpzeta in 1:length(reform_zeta_levs)
plot( Dt[:Year][idx_plot], res_ContriRatRise["gap/in"][tmpzeta], reform_zeta_stys[tmpzeta] )
end
eval(tmpexpr); ylabel("Gap / Pool Income (%)");
# legend( string.("ζ = ",reform_zeta_levs) , loc = "upper left")
subplot(2,2,3) # gap / gdp
for tmpzeta in 1:length(reform_zeta_levs)
plot( Dt[:Year][idx_plot], res_ContriRatRise["gap/gdp"][tmpzeta], reform_zeta_stys[tmpzeta] )
end
eval(tmpexpr); ylabel("Gap / GDP (%)");
# legend( string.("ζ = ",reform_zeta_levs) , loc = "upper left")
subplot(2,2,4) # gap / taxrev
for tmpzeta in 1:length(reform_zeta_levs)
plot( Dt[:Year][idx_plot], res_ContriRatRise["gap/taxrev"][tmpzeta], reform_zeta_stys[tmpzeta] )
end
eval(tmpexpr); ylabel("Gap / Tax Revenues (%)");
# legend( string.("ζ = ",reform_zeta_levs) , loc = "upper left")
tight_layout()
# 2.5 save figure
savefig( "./output/ContributionRateRise.pdf", format = "pdf" )
#
|
{"hexsha": "543c19e5909576260450d63e4dcb21fee72b0ddc", "size": 1611, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "scripts/subsections_PlotForPaper/sub_07_Plot4ContributionRateRise.jl", "max_stars_repo_name": "Clpr/OLG4UEBMI", "max_stars_repo_head_hexsha": "53b7e0afab1490e3eba34455c06cbc74277a5953", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/subsections_PlotForPaper/sub_07_Plot4ContributionRateRise.jl", "max_issues_repo_name": "Clpr/OLG4UEBMI", "max_issues_repo_head_hexsha": "53b7e0afab1490e3eba34455c06cbc74277a5953", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/subsections_PlotForPaper/sub_07_Plot4ContributionRateRise.jl", "max_forks_repo_name": "Clpr/OLG4UEBMI", "max_forks_repo_head_hexsha": "53b7e0afab1490e3eba34455c06cbc74277a5953", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-16T02:59:22.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-16T02:59:22.000Z", "avg_line_length": 39.2926829268, "max_line_length": 108, "alphanum_fraction": 0.5859714463, "num_tokens": 532}
|
[STATEMENT]
lemma fsi (*[simp]*):"f \<inter> s^-1 = {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f \<inter> s\<inverse> = {}
[PROOF STEP]
using sfi
[PROOF STATE]
proof (prove)
using this:
s \<inter> f\<inverse> = {}
goal (1 subgoal):
1. f \<inter> s\<inverse> = {}
[PROOF STEP]
by auto
|
{"llama_tokens": 134, "file": "Allen_Calculus_disjoint_relations", "length": 2}
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os.path
import collections
import urllib.parse
import pkg_resources
import itertools
import qiime2
import skbio
import skbio.diversity
import scipy.spatial.distance
import numpy
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import q2templates
from statsmodels.sandbox.stats.multicomp import multipletests
TEMPLATES = pkg_resources.resource_filename('q2_diversity', '_beta')
def bioenv(output_dir: str, distance_matrix: skbio.DistanceMatrix,
metadata: qiime2.Metadata) -> None:
# convert metadata to numeric values where applicable, drop the non-numeric
# values, and then drop samples that contain NaNs
df = metadata.to_dataframe()
df = df.apply(lambda x: pd.to_numeric(x, errors='ignore'))
# filter categorical columns
pre_filtered_cols = set(df.columns)
df = df.select_dtypes([numpy.number]).dropna()
filtered_categorical_cols = pre_filtered_cols - set(df.columns)
# filter 0 variance numerical columns
pre_filtered_cols = set(df.columns)
df = df.loc[:, df.var() != 0]
filtered_zero_variance_cols = pre_filtered_cols - set(df.columns)
# filter the distance matrix to exclude samples that were dropped from
# the metadata, and keep track of how many samples survived the filtering
# so that information can be presented to the user.
initial_dm_length = distance_matrix.shape[0]
distance_matrix = distance_matrix.filter(df.index, strict=False)
filtered_dm_length = distance_matrix.shape[0]
result = skbio.stats.distance.bioenv(distance_matrix, df)
result = result.to_html(classes='table table-striped table-hover').replace(
'border="1"', 'border="0"')
index = os.path.join(TEMPLATES, 'bioenv_assets', 'index.html')
q2templates.render(index, output_dir, context={
'initial_dm_length': initial_dm_length,
'filtered_dm_length': filtered_dm_length,
'filtered_categorical_cols': ', '.join(filtered_categorical_cols),
'filtered_zero_variance_cols': ', '.join(filtered_zero_variance_cols),
'result': result})
_beta_group_significance_fns = {'permanova': skbio.stats.distance.permanova,
'anosim': skbio.stats.distance.anosim}
def _get_distance_boxplot_data(distance_matrix, group_id, groupings):
x_ticklabels = []
all_group_distances = []
# extract the within group distances
within_group_distances = []
group = groupings[group_id]
for i, sid1 in enumerate(group):
for sid2 in group[:i]:
within_group_distances.append(distance_matrix[sid1, sid2])
x_ticklabels.append('%s (n=%d)' %
(group_id, len(within_group_distances)))
all_group_distances.append(within_group_distances)
# extract between group distances for group to each other group
for other_group_id, other_group in groupings.items():
between_group_distances = []
if group_id == other_group_id:
continue
for sid1 in group:
for sid2 in other_group:
between_group_distances.append(distance_matrix[sid1, sid2])
x_ticklabels.append('%s (n=%d)' %
(other_group_id, len(between_group_distances)))
all_group_distances.append(between_group_distances)
return all_group_distances, x_ticklabels
def _get_pairwise_group_significance_stats(
distance_matrix, group1_id, group2_id, groupings, metadata,
beta_group_significance_fn, permutations):
group1_group2_samples = groupings[group1_id] + groupings[group2_id]
metadata = metadata[group1_group2_samples]
distance_matrix = distance_matrix.filter(group1_group2_samples)
return beta_group_significance_fn(distance_matrix, metadata,
permutations=permutations)
def beta_group_significance(output_dir: str,
distance_matrix: skbio.DistanceMatrix,
metadata: qiime2.MetadataCategory,
method: str='permanova',
pairwise: bool=False,
permutations: int=999) -> None:
try:
beta_group_significance_fn = _beta_group_significance_fns[method]
except KeyError:
raise ValueError('Unknown group significance method %s. The available '
'options are %s.' %
(method,
', '.join(_beta_group_significance_fns)))
# Cast metadata to numeric (if applicable), which gives better sorting
# in boxplots. Then filter any samples that are not in the distance matrix,
# and drop samples with have no data for this metadata
# category, including those with empty strings as values.
metadata = pd.to_numeric(metadata.to_series(), errors='ignore')
metadata = metadata.loc[list(distance_matrix.ids)]
metadata = metadata.replace(r'', numpy.nan).dropna()
# filter the distance matrix to exclude samples that were dropped from
# the metadata, and keep track of how many samples survived the filtering
# so that information can be presented to the user.
initial_dm_length = distance_matrix.shape[0]
distance_matrix = distance_matrix.filter(metadata.index)
filtered_dm_length = distance_matrix.shape[0]
# Run the significance test
result = beta_group_significance_fn(distance_matrix, metadata,
permutations=permutations)
# Generate distance boxplots
sns.set_style("white")
# Identify the groups, then compute the within group distances and the
# between group distances, and generate one boxplot per group.
# groups will be an OrderedDict mapping group id to the sample ids in that
# group. The order is used both on the x-axis, and in the layout of the
# boxplots in the visualization.
groupings = collections.OrderedDict(
[(id, list(series.index))
for id, series in sorted(metadata.groupby(metadata))])
for group_id in groupings:
group_distances, x_ticklabels = \
_get_distance_boxplot_data(distance_matrix, group_id, groupings)
ax = sns.boxplot(data=group_distances, flierprops={
'marker': 'o', 'markeredgecolor': 'black', 'markeredgewidth': 0.5,
'alpha': 0.5})
ax.set_xticklabels(x_ticklabels, rotation=90)
ax.set_xlabel('Group')
ax.set_ylabel('Distance')
ax.set_title('Distances to %s' % group_id)
# change the color of the boxes to white
for box in ax.artists:
box.set_facecolor('white')
sns.despine()
plt.tight_layout()
fig = ax.get_figure()
fig.savefig(os.path.join(output_dir, '%s-boxplots.png' %
urllib.parse.quote_plus(str(group_id))))
fig.savefig(os.path.join(output_dir, '%s-boxplots.pdf' %
urllib.parse.quote_plus(str(group_id))))
fig.clear()
result_html = result.to_frame().to_html(classes=("table table-striped "
"table-hover"))
result_html = result_html.replace('border="1"', 'border="0"')
if pairwise:
pairwise_results = []
for group1_id, group2_id in itertools.combinations(groupings, 2):
pairwise_result = \
_get_pairwise_group_significance_stats(
distance_matrix=distance_matrix,
group1_id=group1_id,
group2_id=group2_id,
groupings=groupings,
metadata=metadata,
beta_group_significance_fn=beta_group_significance_fn,
permutations=permutations)
pairwise_results.append([group1_id,
group2_id,
pairwise_result['sample size'],
permutations,
pairwise_result['test statistic'],
pairwise_result['p-value']])
columns = ['Group 1', 'Group 2', 'Sample size', 'Permutations',
result['test statistic name'], 'p-value']
pairwise_results = pd.DataFrame(pairwise_results, columns=columns)
pairwise_results.set_index(['Group 1', 'Group 2'], inplace=True)
pairwise_results['q-value'] = multipletests(
pairwise_results['p-value'], method='fdr_bh')[1]
pairwise_results.sort_index(inplace=True)
pairwise_path = os.path.join(
output_dir, '%s-pairwise.csv' % method)
pairwise_results.to_csv(pairwise_path)
pairwise_results_html = pairwise_results.to_html(
classes=("table table-striped table-hover"))
pairwise_results_html = pairwise_results_html.replace(
'border="1"', 'border="0"')
else:
pairwise_results_html = None
index = os.path.join(
TEMPLATES, 'beta_group_significance_assets', 'index.html')
q2templates.render(index, output_dir, context={
'initial_dm_length': initial_dm_length,
'filtered_dm_length': filtered_dm_length,
'method': method,
'groupings': groupings,
'result': result_html,
'pairwise_results': pairwise_results_html
})
def _metadata_distance(metadata: pd.Series)-> skbio.DistanceMatrix:
# This code is derived from @jairideout's scikit-bio cookbook recipe,
# "Exploring Microbial Community Diversity"
# https://github.com/biocore/scikit-bio-cookbook
distances = scipy.spatial.distance.pdist(
metadata.values[:, numpy.newaxis], metric='euclidean')
return skbio.DistanceMatrix(distances, ids=metadata.index)
def beta_correlation(output_dir: str,
distance_matrix: skbio.DistanceMatrix,
metadata: qiime2.MetadataCategory,
method: str='spearman',
permutations: int=999) -> None:
test_statistics = {'spearman': 'rho', 'pearson': 'r'}
alt_hypothesis = 'two-sided'
try:
metadata = pd.to_numeric(metadata.to_series(), errors='raise')
except ValueError as e:
raise ValueError('Only numeric data can be used with the Mantel test. '
'Non-numeric data was encountered in the sample '
'metadata. Orignal error message follows:\n%s' %
str(e))
initial_metadata_length = len(metadata)
metadata = metadata.loc[list(distance_matrix.ids)]
metadata = metadata.replace(r'', numpy.nan).dropna()
filtered_metadata_length = len(metadata)
ids_with_missing_metadata = set(distance_matrix.ids) - set(metadata.index)
if len(ids_with_missing_metadata) > 0:
raise ValueError('All samples in distance matrix must be present '
'and contain data in the sample metadata. The '
'following samples were present in the distance '
'matrix, but were missing from the sample metadata '
'or had no data: %s' %
', '.join(ids_with_missing_metadata))
metadata_distances = _metadata_distance(metadata)
r, p, n = skbio.stats.distance.mantel(
distance_matrix, metadata_distances, method=method,
permutations=permutations, alternative=alt_hypothesis, strict=True)
result = pd.Series([method.title(), n, permutations, alt_hypothesis,
metadata.name, r, p],
index=['Method', 'Sample size', 'Permutations',
'Alternative hypothesis', 'Metadata category',
'%s %s' % (method.title(),
test_statistics[method]),
'p-value'],
name='Mantel test results')
result_html = result.to_frame().to_html(classes=("table table-striped "
"table-hover"))
result_html = result_html.replace('border="1"', 'border="0"')
scatter_data = []
for id1, id2 in itertools.combinations(distance_matrix.ids, 2):
scatter_data.append((distance_matrix[id1, id2],
metadata_distances[id1, id2]))
x = 'Input distance'
y = 'Euclidean distance of\n%s' % metadata.name
scatter_data = pd.DataFrame(scatter_data, columns=[x, y])
fig = sns.regplot(x=x, y=y, data=scatter_data, fit_reg=False).get_figure()
fig.savefig(os.path.join(output_dir, 'beta-correlation-scatter.png'))
fig.savefig(os.path.join(output_dir, 'beta-correlation-scatter.pdf'))
index = os.path.join(
TEMPLATES, 'beta_correlation_assets', 'index.html')
q2templates.render(index, output_dir, context={
'initial_metadata_length': initial_metadata_length,
'filtered_metadata_length': filtered_metadata_length,
'result': result_html
})
|
{"hexsha": "dc8fd39c9fb7f1520dfd6a1cd7ff685a3bffa670", "size": 13393, "ext": "py", "lang": "Python", "max_stars_repo_path": "q2_diversity/_beta/_visualizer.py", "max_stars_repo_name": "jairideout/diversity", "max_stars_repo_head_hexsha": "0024301a03134b2b05aeb83f6b01bd22da5b8cb2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "q2_diversity/_beta/_visualizer.py", "max_issues_repo_name": "jairideout/diversity", "max_issues_repo_head_hexsha": "0024301a03134b2b05aeb83f6b01bd22da5b8cb2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "q2_diversity/_beta/_visualizer.py", "max_forks_repo_name": "jairideout/diversity", "max_forks_repo_head_hexsha": "0024301a03134b2b05aeb83f6b01bd22da5b8cb2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.4950166113, "max_line_length": 79, "alphanum_fraction": 0.6319719256, "include": true, "reason": "import numpy,import scipy,from statsmodels", "num_tokens": 2726}
|
// Copyright (c) 2005-2008 Hartmut Kaiser
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include <iostream>
#include <boost/lexical_cast.hpp>
#include <saga/saga.hpp>
int main (int argc,
char * argv[])
{
if ( argc > 2 )
{
std::cerr << "\n\tUsage: stream_server [url]\n"
<< "\n\tDefault url is 'any://localhost/'.\n"
<< std::endl;
return -2;
}
try
{
// retrieve parameter values
saga::url url ("any://localhost/");
if ( argc > 1 )
{
url = argv[1];
}
std::cout << "Serving " << url << std::endl;
// actual functionality
saga::stream::server service (url);
while ( 1 )
{
saga::stream::stream strm = service.serve ();
std::cout << "Established connection from: "
<< strm.get_url ()
<< std::endl;
char buff[255];
saga::ssize_t read_bytes = strm.read (saga::buffer(buff));
strm.write (saga::buffer (buff, read_bytes));
}
}
catch ( saga::exception const & e )
{
std::cerr << "saga::exception caught: " << e.what () << std::endl;
return -1;
}
return 0;
}
|
{"hexsha": "a7376ff21baafb0233ddc9cd6c593a354a030835", "size": 1270, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "examples/packages/stream/stream_server.cpp", "max_stars_repo_name": "saga-project/saga-cpp", "max_stars_repo_head_hexsha": "7376c0de0529e7d7b80cf08b94ec484c2e56d38e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2015-09-15T16:24:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-12T11:05:55.000Z", "max_issues_repo_path": "examples/packages/stream/stream_server.cpp", "max_issues_repo_name": "saga-project/saga-cpp", "max_issues_repo_head_hexsha": "7376c0de0529e7d7b80cf08b94ec484c2e56d38e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/packages/stream/stream_server.cpp", "max_forks_repo_name": "saga-project/saga-cpp", "max_forks_repo_head_hexsha": "7376c0de0529e7d7b80cf08b94ec484c2e56d38e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2016-11-17T04:38:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-10T17:23:52.000Z", "avg_line_length": 21.1666666667, "max_line_length": 81, "alphanum_fraction": 0.5456692913, "num_tokens": 359}
|
@testset "Genetic Programming" begin
Random.seed!(9874984737486);
pop = 10
terms = Terminal[:x, :y, rand]
funcs = Function[+,-,*,/]
t = TreeGP(pop, terms, funcs, maxdepth=2)
@test Evolutionary.population_size(t) == pop
@test sort(Evolutionary.terminals(t)) == [:x, :y]
@testset for (term, dim) in t.terminals
@test dim == 1
end
@testset for (func, arity) in t.functions
@test arity == 2
end
@test summary(t) == "TreeGP[P=10,Parameter[x,y],Function[*, +, /, -]]"
popexp = Evolutionary.initial_population(t);
@test length(popexp) == pop
Random.seed!(9874984737484)
ft = rand(t, method=:full)
@test Evolutionary.nodes(ft) == 7
@test Evolutionary.height(ft) == 2
gt = rand(t, method=:grow)
@test Evolutionary.nodes(gt) == 5
@test Evolutionary.height(gt) == 2
@test gt[0] == gt
@test gt[1] == :x
@test gt[2] == :x
@test gt[4] == Expr(:call, /, :x, :x)
# simplification
@test Expr(:call, -, :x, :x) |> Evolutionary.simplify! == 0
@test Expr(:call, /, :x, :x) |> Evolutionary.simplify! == 1
@test Expr(:call, *, 0, :x) |> Evolutionary.simplify! == 0
@test Expr(:call, *, :x, 0) |> Evolutionary.simplify! == 0
@test Expr(:call, /, 0, :x) |> Evolutionary.simplify! == 0
@test Expr(:call, /, 1, 0) |> Evolutionary.simplify! == 1
@test Expr(:call, +, 0, :x) |> Evolutionary.simplify! == :x
@test Expr(:call, +, :x, 0) |> Evolutionary.simplify! == :x
@test Expr(:call, -, :x, 0) |> Evolutionary.simplify! == :x
@test Expr(:call, +, :x, :x) |> Evolutionary.simplify! == Expr(:call, *, 2, :x)
@test Expr(:call, +, 1, Expr(:call, -, 1, :x) ) |> Evolutionary.simplify! == Expr(:call, -, 2, :x)
@test Expr(:call, -, 1, Expr(:call, +, 1, :x) ) |> Evolutionary.simplify! == Expr(:call, +, 0, :x)
@test Expr(:call, *, Expr(:call, *, :x, :y), Expr(:call, /, :z, :x)) |> Evolutionary.simplify! == Expr(:call, *, :y, :z)
@test Expr(:call, *, Expr(:call, *, :x, :y), Expr(:call, /, :z, :y)) |> Evolutionary.simplify! == Expr(:call, *, :x, :z)
@test Expr(:call, *, Expr(:call, /, :z, :x), Expr(:call, *, :x, :y)) |> Evolutionary.simplify! == Expr(:call, *, :z, :y)
@test Expr(:call, *, Expr(:call, /, :z, :y), Expr(:call, *, :x, :y)) |> Evolutionary.simplify! == Expr(:call, *, :z, :x)
@test Expr(:call, *, Expr(:call, *, 2, :y), Expr(:call, /, :z, 2)) |> Evolutionary.simplify! == Expr(:call, *, :y, :z)
@test Expr(:call, *, Expr(:call, *, :y, 2), Expr(:call, /, :z, 2)) |> Evolutionary.simplify! == Expr(:call, *, :y, :z)
@test Expr(:call, *, Expr(:call, /, :z, 2), Expr(:call, *, 2, :y)) |> Evolutionary.simplify! == Expr(:call, *, :z, :y)
@test Expr(:call, *, Expr(:call, /, :z, 2), Expr(:call, *, :y, 2)) |> Evolutionary.simplify! == Expr(:call, *, :z, :y)
@test Expr(:call, +, Expr(:call, -, :x, 1), 1 ) |> Evolutionary.simplify! == Expr(:call, -, :x, 2)
@test Expr(:call, -, Expr(:call, +, :x, 1), 1 ) |> Evolutionary.simplify! == Expr(:call, +, :x, 0)
# evaluation
ex = Expr(:call, +, 1, :x) |> Evolutionary.Expression
xs = rand(10)
@test ex(xs[1]) == xs[1]+1
@test ex.(xs) == xs.+1
depth = 5
fitfun(x) = x*x + x + 1.0
function fitobj(expr)
rg = -5.0:0.5:5.0
ex = Evolutionary.Expression(expr)
sum(v->isnan(v) ? 1.0 : v, abs2.(fitfun.(rg) - ex.(rg)) )/length(rg) |> sqrt
end
Random.seed!(9874984737426);
res = Evolutionary.optimize(fitobj,
TreeGP(50, Terminal[:x, randn], Function[+,-,*,/],
mindepth=1,
maxdepth=depth,
optimizer = GA(
selection = uniformranking(5),
ɛ = 0.1,
mutationRate = 0.8,
crossoverRate = 0.2,
),
)
)
@test minimum(res) < 1
end
|
{"hexsha": "3cf5cc18aa00a26d8f0856cd88d974823408c536", "size": 3856, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/gp.jl", "max_stars_repo_name": "dmolina/Evolutionary.jl", "max_stars_repo_head_hexsha": "928438c682f1c31a08b6f0bd503a2dab3842ecc8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/gp.jl", "max_issues_repo_name": "dmolina/Evolutionary.jl", "max_issues_repo_head_hexsha": "928438c682f1c31a08b6f0bd503a2dab3842ecc8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/gp.jl", "max_forks_repo_name": "dmolina/Evolutionary.jl", "max_forks_repo_head_hexsha": "928438c682f1c31a08b6f0bd503a2dab3842ecc8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.3218390805, "max_line_length": 124, "alphanum_fraction": 0.5251556017, "num_tokens": 1407}
|
import os
import importlib
import numpy as np
from .helpers import util
from .helpers.data import WSGenerator, WSRandGenerator
from sim.helpers.util import get_path as get_network_path
from sim.helpers.data import DataInfo
def compute(args):
network = args.NETWORK
epoch = args.epoch
anon = not args.include_uid
repo = args.datarepo
dataset = args.DATASET
randc = args.randcomp
dinfo = DataInfo(repo)
wpath = get_network_path(repo, network)+str(epoch)+'.h5'
nmod = importlib.import_module('sim.networks.'+network)
model = nmod.model(dinfo)
model.load_weights(wpath)
if randc:
numSamples = 2000000
print("Creating WSRand-generator for "+str(dataset))
gen = WSRandGenerator(dinfo, dataset, numSamples)
tss = []
sims = np.empty((0,2))
print("Generating random similarities for "+str(dataset)+" with "+str(numSamples)+" authors.")
per = 0
for i, (ts, X) in enumerate(gen):
if i >= per*len(gen):
print(str(round(per*100))+"%")
per += max(0.01, 1.0/len(gen))
if per > 1.0:
break
sims = np.vstack([sims, model.predict(X)])
tss += ts
simOut = util.get_path(repo, dataset, network)+'data-random.csv'
with open(simOut, 'w') as fsim:
for (ts,sim) in zip(tss,sims):
fsim.write(str(ts[0])+';'+str(ts[1])+';'+str(sim[1])+'\n')
else:
print("Creating WS-generator for "+str(dataset))
gen = WSGenerator(dinfo, dataset)
res = []
print("Generating similarities for "+str(dataset)+" with "+str(len(gen))+" authors.")
per = 0
for i, (uid, ts, ls, Xs) in enumerate(gen):
if i >= per*len(gen):
print(str(round(per*100))+"%")
per += max(0.01, 1.0/len(gen))
sims = np.empty((0,2))
for x in Xs:
sims = np.vstack([sims, model.predict(x)])
res.append((uid, ts, ls, sims))
simOut = util.get_path(repo, dataset, network)+'data-sim.csv'
metaOut = util.get_path(repo, dataset, network)+'data-meta.csv'
with open(simOut, 'w') as fsim, open(metaOut, 'w') as fmeta:
for (uid,ts,ls,sims) in res:
if anon:
uid = 'author'
fsim.write(str(uid)+';'+';'.join([str(sim[1]) for sim in sims])+'\n')
fmeta.write(str(uid)+';'+';'.join([str(l)+','+str(t) for l,t in zip(ls,ts)])+'\n')
|
{"hexsha": "ad76c08a1b36dc09f1728b571c7c4ec237e13eee", "size": 2564, "ext": "py", "lang": "Python", "max_stars_repo_path": "ws/compute.py", "max_stars_repo_name": "StephanLorenzen/AuthorshipVerification", "max_stars_repo_head_hexsha": "1397f5967ad9f41bf57a9e9e91aaf138b0b5ae6f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ws/compute.py", "max_issues_repo_name": "StephanLorenzen/AuthorshipVerification", "max_issues_repo_head_hexsha": "1397f5967ad9f41bf57a9e9e91aaf138b0b5ae6f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ws/compute.py", "max_forks_repo_name": "StephanLorenzen/AuthorshipVerification", "max_forks_repo_head_hexsha": "1397f5967ad9f41bf57a9e9e91aaf138b0b5ae6f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6486486486, "max_line_length": 102, "alphanum_fraction": 0.5495319813, "include": true, "reason": "import numpy", "num_tokens": 687}
|
"""
This code is based on code found at: https://commons.wikimedia.org/wiki/File:Beta_distribution_pdf.svg by user Horas based on the work of user Krishnavedala
"""
from matplotlib.pyplot import *
from numpy import linspace
from scipy.stats import beta
x = linspace(0,1,75)
fig = figure()
ax = fig.add_subplot(111)
ax.plot(x,beta.pdf(x,0.5,0.5),label=r"$\alpha_1=\alpha_2=0.5$")
ax.plot(x,beta.pdf(x,5,1),label=r"$\alpha_1=5, \alpha_2=1$")
ax.plot(x,beta.pdf(x,1,3),label=r"$\alpha_1=1, \alpha_2=3$")
ax.plot(x,beta.pdf(x,2,2),label=r"$\alpha_1=2, \alpha_2=2$")
ax.plot(x,beta.pdf(x,1,1),label=r"$\alpha_1=1, \alpha_2=1$")
ax.grid(True)
ax.minorticks_on()
ax.legend(loc=9)
setp(ax.get_legend().get_texts(),fontsize='small')
ax.set_ylim(0,2.6)
ax.set_xlabel("Value of $P^x(1)$")
ax.set_ylabel("Probability Density")
fig.savefig("dirichlet_distribution_pdf.pdf",bbox_inches="tight",\
pad_inches=.15)
fig = figure()
ax = fig.add_subplot(111)
ax.plot(x,beta.pdf(x,0.5,0.5),label=r"$\alpha_1=\alpha_2=0.5$")
ax.plot(x,beta.pdf(x,0.1,0.1),label=r"$\alpha_1=\alpha_2=0.1$")
ax.plot(x,beta.pdf(x,0.01,0.01),label=r"$\alpha_1=\alpha_2=0.01$")
ax.grid(True)
ax.minorticks_on()
ax.legend(loc=9)
setp(ax.get_legend().get_texts(),fontsize='small')
ax.set_ylim(0,2.6)
ax.set_xlabel("Value of $P^x(1)$")
ax.set_ylabel("Probability Density")
fig.savefig("dirichlet_distribution_sparse_pdf.pdf",bbox_inches="tight",\
pad_inches=.15)
fig = figure()
ax = fig.add_subplot(111)
ax.plot(x,beta.pdf(x,2,4),label=r"$\alpha_1=2,\alpha_2=4$")
ax.plot(x,beta.pdf(x,100,200),label=r"$\alpha_1=100, \alpha_2=200$")
ax.plot(x,beta.pdf(x,1000,2000),label=r"$\alpha_1=1000, \alpha_2=2000$")
ax.grid(True)
ax.minorticks_on()
ax.legend(loc=9)
setp(ax.get_legend().get_texts(),fontsize='small')
ax.set_ylim(0,2.6)
ax.set_xlabel("Value of $P^x(1)$")
ax.set_ylabel("Probability Density")
fig.savefig("dirichlet_distribution_dense_pdf.pdf",bbox_inches="tight",\
pad_inches=.15)
|
{"hexsha": "33b2855d3090c7ab9145dc9eb0f878c422aba600", "size": 1951, "ext": "py", "lang": "Python", "max_stars_repo_path": "Dirichlet_PDF.py", "max_stars_repo_name": "coli-saar/BayesianNLP2017", "max_stars_repo_head_hexsha": "116e7d8d2e88dea80bdacc20f15a57268adf1a32", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Dirichlet_PDF.py", "max_issues_repo_name": "coli-saar/BayesianNLP2017", "max_issues_repo_head_hexsha": "116e7d8d2e88dea80bdacc20f15a57268adf1a32", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Dirichlet_PDF.py", "max_forks_repo_name": "coli-saar/BayesianNLP2017", "max_forks_repo_head_hexsha": "116e7d8d2e88dea80bdacc20f15a57268adf1a32", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5166666667, "max_line_length": 156, "alphanum_fraction": 0.7134802665, "include": true, "reason": "from numpy,from scipy", "num_tokens": 683}
|
from .query_graph import convert_to_networkx
from .QueryPlan import QueryPlan, TerminalEvent
import networkx as nx
from collections import defaultdict
def generate_plan(trapi_query_graph):
nxgraph = convert_to_networkx(trapi_query_graph)
double_pins, components = decompose(nxgraph)
plan = double_pins #these are already query plans
for component in components:
component_plan = generate_component_plan(component)
plan.append(component_plan)
return plan
def nodes_to_component( nodeset, master_graph, boundnodes ):
"""Given a set of nodes, find the induced subgraph that includes that list of nodes plus any
adjacent boundnodes."""
added_nodes = []
for bn in boundnodes:
for node in nodeset:
if master_graph.has_edge(node,bn):
nodeset.add(bn)
break
return master_graph.subgraph(nodeset).copy()
def get_bound_nodes(graph):
#can this be replaced with the right nx incantation?
bound_nodes = []
nodes = graph.nodes(data = True)
for node,nodedata in nodes:
if nodedata['bound']:
bound_nodes.append(node)
return bound_nodes
def decompose(graph):
"""Given a graph that contains 1 or more bound nodes, find components that can be built entirely independently.
If one part of a query graph is connected to another part only via a bound node, then the two parts are
independent and the final answer is just a cartesian join across the two components."""
#First handle the special case of single edges connecting 2 bound nodes. The rest of the algorithm
# chokes on that.
clean_graph = graph.copy()
direct_edges = []
for u,v in graph.edges():
if graph.nodes[u]['bound'] and graph.nodes[v]['bound']:
direct_edges.append((u,v))
double_pins = []
for u,v in direct_edges:
edge_id = graph.get_edge_data(u,v)['edge_id']
plan = QueryPlan()
plan.add_simple_dependency((frozenset(),frozenset()),edge_id)
plan.add_simple_dependency(edge_id, TerminalEvent("double pin"))
double_pins.append(plan)
clean_graph.remove_edge(u,v)
if clean_graph.number_of_edges() == 0:
return double_pins,[]
working_graph = clean_graph.copy()
bound_nodes = get_bound_nodes(working_graph)
working_graph.remove_nodes_from(bound_nodes)
#having removed the bound nodes, do we have independent components?
if nx.is_connected(working_graph):
return double_pins , [clean_graph]
node_components = nx.connected_components(working_graph)
#Each component is a list of nodes. We want to make them back into graphs, but we want them to include
# the bound node where appropriate.
components = [ nodes_to_component( nc, clean_graph, bound_nodes) for nc in node_components ]
return double_pins, components
def generate_component_plan(component):
hairs,bald_head = dehair(component)
plan,traversed_graph = generate_simple_plan(bald_head)
plan.add_hairs(hairs)
return plan
def next_hair(component):
for node,nd in component.nodes(data=True):
if component.degree[node] == 1 and not nd['bound']:
return node
return None
#TODO: Maybe I need to build this one in a graph before moving it into a QueryPlan?
# The problem is that I'm building it by plucking off edges, but not in any partuclar order
# therefore making it hard to track dependencies
def dehair(component):
"""The important thing about these hairs is that there are no constraints to apply.
You can't do any better than starting at the bound end and walking forward."""
#Hair is defined by degree 1 nodes. As these are removed, more nodes become degree 1, so we backwalk
# until it's all gone
dangler = next_hair(component)
dep_graph = defaultdict(list)
while dangler is not None:
cut_edge = list(component.edges(dangler,data=True))[0]
edge_id =cut_edge[2]['edge_id']
other_node = next(component.neighbors(dangler))
dep_graph[other_node].append(cut_edge[2]['edge_id'])
dep_graph[cut_edge[2]['edge_id']].append(dangler)
component.remove_node(dangler)
dangler = next_hair(component)
#Because we went backwards, and because it was easier to keep track of, our depedency graph has nodes in it
# but there's no event associated with these nodes; no joins are required.
# So remove them from the dep graph to make the query plan
tos = set(sum( list(dep_graph.values()), [] ))
froms = set(dep_graph.keys())
start_nodes = froms.difference(tos)
#Get to the edges
starts = set(sum( [dep_graph[n] for n in start_nodes], [] ))
#The current dep_graph has both edges and nodes, but lets take out the nodes now
plan = QueryPlan()
for start in starts:
plan.add_simple_dependency( (frozenset(), frozenset()), start )
#The plan will have some terminal events as well, so that we have some dependency for the final edge.
terminus = TerminalEvent('Hair')
while len(starts) > 0:
start = starts.pop()
next_nodes = dep_graph[start]
for node in next_nodes:
if node not in dep_graph:
plan.add_simple_dependency(start,terminus)
else:
for next_edge in dep_graph[node]:
plan.add_simple_dependency(start,next_edge)
starts.add(next_edge)
return plan,component
def generate_simple_plan(g):
"""
By this point, we have a single, bald, interdependent component. It may have loops and/or branches, and
will contain one or more bound nodes.
The basic idea here is to find simple paths connecting bound nodes. If this is a self binding, then we have
a loop. We will find those paths, and walk them from either end, joining in the middle.
If we go in order from shortest to longest, then we are likely to apply constraints earlier.
We also need to manage dependencies among the paths.
This won't necessarily cover everything, (like hairs with loops at the end)
so we need a bit of fail-safedness at the end"""
#Stopping condition is when we have crossed all edges
edge_count = g.number_of_edges()
#First find all the simple paths and cycles
paths = get_paths(g) + get_cycles(g)
paths_with_length = [ (len(x),x) for x in paths ]
paths_with_length.sort()
#dep_graph = defaultdict(list)
dep_graph = QueryPlan()
traversed_subgraph = { 'nodes': set(), 'edges': set()}
for l,path in paths_with_length:
process_path(g, path, dep_graph, traversed_subgraph )
if len(traversed_subgraph['edges']) == edge_count:
break
return dep_graph,traversed_subgraph
def process_path(graph,path,dep_graph,traversed_subgraph):
"""
Given a path, generate the dependency graph
:param graph:
:param path:
:param dep_graph:
:return: a tuple of a frozenset of nodes and a frozenset of edges. This is the graph that has been run and
filtered, and is also a key in the dep graph that the next path should use as its dependency
"""
#First thing we need to do is to zing along the path until we get to a a part that we haven't previously traversed
try:
i = 0
while graph.get_edge_data(path[i], path[i+1])['edge_id'] in traversed_subgraph['edges']:
i += 1
path = path[i:]
i = -1
while graph.get_edge_data(path[i], path[i-1])['edge_id'] in traversed_subgraph['edges']:
i -= 1
if i < -1:
path = path[:i+1]
except IndexError:
#It sometimes happens that we want to traverse a path, but we've already got all those edges
return
#Now we have an actual path to traverse
last = freeze_subgraph(traversed_subgraph)
#update traversed subgraph for next time before we start whacking on path
traversed_subgraph['nodes'].update(path)
#Now add one from each end using the most recent join as the starting dependency
startedge = dep_graph.add_dependency(graph, path[0], path[1], last, traversed_subgraph)
endedge = dep_graph.add_dependency(graph, path[-1], path[-2], last, traversed_subgraph)
path = path[1:-1]
#Now add from each end using the node as the starting dependency
while len(path) > 2:
#Front
startedge = dep_graph.add_dependency(graph,path[0],path[1],startedge,traversed_subgraph)
#Back
endedge = dep_graph.add_dependency(graph,path[-1],path[-2],endedge,traversed_subgraph)
#cycle down
path = path[1:-1]
#Now path will either be 1 or 2 nodes long.
if len(path) == 2:
startedge = dep_graph.add_dependency(graph,path[1],path[0],startedge,traversed_subgraph)
end = freeze_subgraph(traversed_subgraph)
#Now add a join node. This will also be used as the starting key for the next path
dep_graph.add_simple_dependency(startedge, end)
dep_graph.add_simple_dependency(endedge, end)
def freeze_subgraph(subgraph):
return (frozenset(subgraph['nodes']), frozenset(subgraph['edges']))
def get_paths(g):
bound_nodes = get_bound_nodes(g)
paths = []
for si,s in enumerate(bound_nodes):
for t in bound_nodes[si+1:]:
paths += nx.all_simple_paths(g,s,t)
return paths
def get_cycles(g):
bound_nodes = get_bound_nodes(g)
cycles = nx.simple_cycles(g.to_directed())
interim_cycles=[]
for cycle in cycles:
if len(cycle) < 3:
continue
for bn in bound_nodes:
if bn in cycle:
interim_cycles.append(format_cycle(cycle,bn))
break
#this gets us cycles in both directions. We want to chuck that.
uniquer = set()
return_cycles = []
for cycle in interim_cycles:
s = frozenset(cycle)
if s in uniquer:
continue
return_cycles.append(cycle)
uniquer.add(s)
return return_cycles
def format_cycle(cycle,bn):
"""
I want the format of the cycles to be something like [A B C A] where A is the bound node.
"""
loc = cycle.index(bn)
return cycle[loc:] + cycle[:loc+1]
|
{"hexsha": "3cbeec57f0a1732f49aab93e4df32d6aa081c18d", "size": 10194, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/generate_plan.py", "max_stars_repo_name": "ranking-agent/query_planner", "max_stars_repo_head_hexsha": "cbe6fd8b7f627658845851b06c73b239746a60f0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/generate_plan.py", "max_issues_repo_name": "ranking-agent/query_planner", "max_issues_repo_head_hexsha": "cbe6fd8b7f627658845851b06c73b239746a60f0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/generate_plan.py", "max_forks_repo_name": "ranking-agent/query_planner", "max_forks_repo_head_hexsha": "cbe6fd8b7f627658845851b06c73b239746a60f0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.8319327731, "max_line_length": 118, "alphanum_fraction": 0.6819697861, "include": true, "reason": "import networkx", "num_tokens": 2419}
|
[STATEMENT]
lemma DSourcesA3_L0: "DSources level0 sA3 = { sA2 }"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. DSources level0 sA3 = {sA2}
[PROOF STEP]
by (simp add: DSources_def AbstrLevel0, auto)
|
{"llama_tokens": 94, "file": "ComponentDependencies_DataDependenciesCaseStudy", "length": 1}
|
/-
Copyright (c) 2022 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
! This file was ported from Lean 3 source module geometry.manifold.metrizable
! leanprover-community/mathlib commit d1bd9c5df2867c1cb463bc6364446d57bdd9f7f1
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Geometry.Manifold.SmoothManifoldWithCorners
import Mathbin.Topology.Paracompact
import Mathbin.Topology.MetricSpace.Metrizable
/-!
# Metrizability of a σ-compact manifold
In this file we show that a σ-compact Hausdorff topological manifold over a finite dimensional real
vector space is metrizable.
-/
open TopologicalSpace
/-- A σ-compact Hausdorff topological manifold over a finite dimensional real vector space is
metrizable. -/
theorem ManifoldWithCorners.metrizableSpace {E : Type _} [NormedAddCommGroup E] [NormedSpace ℝ E]
[FiniteDimensional ℝ E] {H : Type _} [TopologicalSpace H] (I : ModelWithCorners ℝ E H)
(M : Type _) [TopologicalSpace M] [ChartedSpace H M] [SigmaCompactSpace M] [T2Space M] :
MetrizableSpace M := by
haveI := I.locally_compact; haveI := ChartedSpace.locally_compact H M
haveI : NormalSpace M := normal_of_paracompact_t2
haveI := I.second_countable_topology
haveI := ChartedSpace.second_countable_of_sigma_compact H M
exact metrizable_space_of_t3_second_countable M
#align manifold_with_corners.metrizable_space ManifoldWithCorners.metrizableSpace
|
{"author": "leanprover-community", "repo": "mathlib3port", "sha": "62505aa236c58c8559783b16d33e30df3daa54f4", "save_path": "github-repos/lean/leanprover-community-mathlib3port", "path": "github-repos/lean/leanprover-community-mathlib3port/mathlib3port-62505aa236c58c8559783b16d33e30df3daa54f4/Mathbin/Geometry/Manifold/Metrizable.lean"}
|
(*
Copyright 2014 Cornell University
This file is part of VPrl (the Verified Nuprl project).
VPrl is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
VPrl is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with VPrl. If not, see <http://www.gnu.org/licenses/>.
Website: http://nuprl.org/html/verification/
Authors: Abhishek Anand & Vincent Rahli
*)
Require Import type_sys_useful.
Require Import dest_close.
Lemma eq_term_equals_per_tunion_eq_if {p} :
forall (eqa1 eqa2 : per(p)) (eqb1 : per-fam(eqa1)) (eqb2 : per-fam(eqa2)),
eqa1 <=2=> eqa2
-> (forall (a1 a2 : CTerm) (e1 : eqa1 a1 a2) (e2 : eqa2 a1 a2),
(eqb1 a1 a2 e1) <=2=> (eqb2 a1 a2 e2))
-> (per_tunion_eq eqa1 eqb1) <=2=> (per_tunion_eq eqa2 eqb2).
Proof.
introv eqt1 eqt2.
introv; split; intro k; induction k.
- apply @tunion_eq_cl with (t := t); sp.
- dup e as e'; apply eqt1 in e'.
apply @tunion_eq_eq with (a1 := a1) (a2 := a2) (e := e'); sp; spcast.
apply (eqt2 a1 a2 e e'); auto.
- apply @tunion_eq_cl with (t := t); sp.
- dup e as e'; apply eqt1 in e'.
apply @tunion_eq_eq with (a1 := a1) (a2 := a2) (e := e'); sp; spcast.
apply (eqt2 a1 a2 e' e); auto.
Qed.
Lemma per_tunion_eq_sym {p} :
forall (eqa : per(p)) eqb t1 t2,
(forall (a1 a2 : CTerm) (e : eqa a1 a2),
term_equality_symmetric (eqb a1 a2 e))
-> per_tunion_eq eqa eqb t1 t2
-> per_tunion_eq eqa eqb t2 t1.
Proof.
introv tesb per.
induction per.
apply @tunion_eq_cl with (t := t); sp.
apply @tunion_eq_eq with (a1 := a1) (a2 := a2) (e := e); sp.
apply tesb; auto.
Qed.
Lemma per_tunion_eq_trans {p} :
forall (eqa : per(p)) eqb t1 t2 t3,
per_tunion_eq eqa eqb t1 t2
-> per_tunion_eq eqa eqb t2 t3
-> per_tunion_eq eqa eqb t1 t3.
Proof.
introv per1 per2.
apply tunion_eq_cl with (t := t2); sp.
Qed.
Lemma per_tunion_eq_cequiv {p} :
forall lib (eqa : per(p)) eqb t t',
(forall (a1 a2 : CTerm) (e : eqa a1 a2),
term_equality_symmetric (eqb a1 a2 e))
-> (forall (a1 a2 : CTerm) (e : eqa a1 a2),
term_equality_transitive (eqb a1 a2 e))
-> (forall (a1 a2 : CTerm) (e : eqa a1 a2),
term_equality_respecting lib (eqb a1 a2 e))
-> t ~=~(lib) t'
-> per_tunion_eq eqa eqb t t
-> per_tunion_eq eqa eqb t t'.
Proof.
introv tes tet ter ceq per.
revert_dependents t'.
induction per; introv ceq.
apply IHper2; auto.
apply @tunion_eq_eq with (a1 := a1) (a2 := a2) (e := e); sp.
apply (ter a1 a2 e t2 t'); auto.
apply tet with (t2 := t1); auto.
apply tes; auto.
Qed.
Lemma close_type_system_tunion {p} :
forall lib (ts : cts(p))
T T'
(eq : per)
A A' v v' B B' eqa eqb,
type_system lib ts
-> defines_only_universes lib ts
-> computes_to_valc lib T (mkc_tunion A v B)
-> computes_to_valc lib T' (mkc_tunion A' v' B')
-> close lib ts A A' eqa
-> (forall (a a' : CTerm) (e : eqa a a'),
close lib ts (substc a v B) (substc a' v' B') (eqb a a' e))
-> (forall (a a' : CTerm) (e : eqa a a'),
type_system lib ts ->
defines_only_universes lib ts ->
type_sys_props lib (close lib ts) (substc a v B) (substc a' v' B')
(eqb a a' e))
-> (forall t t' : CTerm,
eq t t' <=> per_tunion_eq eqa eqb t t')
-> per_tunion lib (close lib ts) T T' eq
-> type_sys_props lib (close lib ts) A A' eqa
-> type_sys_props lib (close lib ts) T T' eq.
Proof.
introv X X0 c1 c2 X1 clb recb eqiff per IHX1.
rw @type_sys_props_iff_type_sys_props3.
prove_type_sys_props3 SCase; intros.
+ SCase "uniquely_valued".
dclose_lr;
try (complete (apply defines_only_universes_tunion_L with (T2 := T3) (eq2 := eq') in per; sp));
try (complete (apply defines_only_universes_tunion_R with (T2 := T3) (eq2 := eq') in per; sp)).
SSCase "CL_tunion".
allunfold @per_tunion; exrepd.
generalize (eq_term_equals_type_family lib T T3 eqa0 eqa eqb0 eqb (close lib ts) A v B A' v' B' mkc_tunion); intro i.
repeat (autodimp i hyp; try (complete (introv ee; eqconstr ee; sp))); repnd.
generalize (eq_term_equals_type_family lib T T' eqa1 eqa eqb1 eqb (close lib ts) A v B A' v' B' mkc_tunion); intro j.
repeat (autodimp j hyp; try (complete (introv ee; eqconstr ee; sp))); repnd.
apply eq_term_equals_trans with (eq2 := per_tunion_eq eqa1 eqb1); auto.
apply eq_term_equals_trans with (eq2 := per_tunion_eq eqa0 eqb0); auto;
try (complete (apply eq_term_equals_sym; auto)).
apply eq_term_equals_per_tunion_eq_if; auto.
apply eq_term_equals_trans with (eq2 := eqa); auto.
apply eq_term_equals_sym; auto.
introv.
dup e2 as e3.
rw <- i0 in e3.
apply eq_term_equals_trans with (eq2 := eqb a1 a2 e3); auto.
apply eq_term_equals_sym; auto.
+ SCase "type_symmetric"; repdors; subst; dclose_lr;
apply CL_tunion;
clear per;
allunfold @per_tunion; exrepd;
unfold per_tunion;
exists eqa0 eqb0; sp;
allrw <-; sp.
apply eq_term_equals_trans with (eq2 := eq); auto.
apply eq_term_equals_sym; auto.
+ SCase "type_value_respecting"; repdors; subst;
apply CL_tunion; unfold per_tunion; exists eqa eqb; sp.
duplicate c1 as ct.
apply @cequivc_mkc_tunion with (T' := T3) in ct; sp.
apply @type_family_cequivc
with
(A1 := A)
(v1 := v)
(B1 := B)
(A2 := A'0)
(v2 := v'0)
(B2 := B'0)
(A := A')
(v := v')
(B := B'); sp.
duplicate c2 as ct.
apply @cequivc_mkc_tunion with (T' := T3) in ct; sp.
apply @type_family_cequivc2
with
(A1 := A')
(v1 := v')
(B1 := B')
(A2 := A'0)
(v2 := v'0)
(B2 := B'0)
(A := A)
(v := v)
(B := B); sp.
+ SCase "term_symmetric".
unfold term_equality_symmetric; introv eqts.
onedtsp e pp p0 p1 c t t0 t3 tygs tygt dum.
apply eqiff; apply eqiff in eqts; exrepnd.
apply per_tunion_eq_sym; auto.
introv.
pose proof (recb a1 a2 e0) as h; repeat (autodimp h hyp).
onedtsp x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11; auto.
+ SCase "term_transitive".
unfold term_equality_transitive; sp.
apply eqiff; sp.
assert (eq t1 t2) as eq12 by auto.
assert (eq t2 t3) as eq23 by auto.
rw eqiff in eq12; rw eqiff in eq23; exrepnd.
apply (per_tunion_eq_trans eqa eqb t1 t2 t3); auto.
+ SCase "term_value_respecting".
unfold term_equality_respecting; sp.
apply eqiff; sp.
assert (eq t t) as eqtt by auto.
apply eqiff in eqtt; exrepnd.
apply (per_tunion_eq_cequiv lib eqa eqb t t'); auto;
introv;
pose proof (recb a1 a2 e) as h; repeat (autodimp h hyp);
onedtsp x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11; auto.
+ SCase "type_gsymmetric"; repdors; subst; split; sp; dclose_lr;
apply CL_tunion;
clear per;
allunfold @per_tunion; exrepd.
(* 1 *)
generalize (eq_term_equals_type_family
lib T T3 eqa0 eqa eqb0 eqb (close lib ts)
A v B A' v' B' mkc_tunion); intro i.
repeat (autodimp i hyp; try (complete (introv ee; eqconstr ee; sp))).
repnd.
exists eqa eqb; sp.
apply eq_term_equals_trans with (eq2 := per_tunion_eq eqa0 eqb0); auto.
apply eq_term_equals_per_tunion_eq_if; auto.
apply eq_term_equals_sym; auto.
(* 2 *)
generalize (eq_term_equals_type_family2
lib T3 T eqa0 eqa eqb0 eqb (close lib ts)
A v B A' v' B' mkc_tunion); intro i;
repeat (autodimp i hyp; try (complete (introv ee; eqconstr ee; sp)));
repnd.
exists eqa eqb; sp.
apply eq_term_equals_trans with (eq2 := per_tunion_eq eqa0 eqb0); auto.
apply eq_term_equals_per_tunion_eq_if; auto.
apply eq_term_equals_sym; auto.
+ SCase "type_gtransitive"; sp.
+ SCase "type_mtransitive".
repdors; subst; dclose_lr;
try (move_term_to_top (per_tunion lib (close lib ts) T T4 eq2));
try (move_term_to_top (per_tunion lib (close lib ts) T' T4 eq2)).
(* 1 *)
clear per.
allunfold @per_tunion; exrepd.
generalize (eq_term_equals_type_family2
lib T3 T eqa1 eqa eqb1 eqb (close lib ts)
A v B A' v' B' mkc_tunion); intro i.
repeat (autodimp i hyp; try (complete (introv ee; eqconstr ee; sp))).
repnd.
generalize (type_family_trans2
lib mkc_tunion (close lib ts) T3 T T4 eqa eqb eqa0 eqb0 A v B A' v' B'); intro j.
repeat (autodimp j hyp; try (complete (introv ee; eqconstr ee; sp))).
repnd.
dands; apply CL_tunion; unfold per_tunion; exists eqa eqb; sp; allrw.
eapply eq_term_equals_trans; eauto.
apply eq_term_equals_per_tunion_eq_if; auto.
apply eq_term_equals_sym; auto.
eapply eq_term_equals_trans; eauto.
apply eq_term_equals_per_tunion_eq_if; auto.
apply eq_term_equals_sym; auto.
introv.
apply eq_term_equals_sym; auto.
(* 2 *)
clear per.
allunfold @per_tunion; exrepd.
generalize (eq_term_equals_type_family2
lib T3 T' eqa1 eqa eqb1 eqb (close lib ts)
A' v' B' A v B mkc_tunion); intro i.
repeat (autodimp i hyp;
try (complete (introv ee; eqconstr ee; sp));
try (complete (apply type_sys_props_sym; sp))).
onedtsp uv tys tyt tyst tyvr tes tet tevr tygs tygt dum.
intros.
apply type_sys_props_sym.
apply type_sys_props_eqb_comm; sp.
apply tet with (t2 := a'); sp.
apply tet with (t2 := a); sp.
repnd.
generalize (type_family_trans2
lib mkc_tunion (close lib ts) T3 T' T4 eqa eqb eqa0 eqb0 A' v' B' A v B); intro j.
repeat (autodimp j hyp;
try (complete (introv ee; eqconstr ee; sp));
try (complete (apply type_sys_props_sym; sp))).
onedtsp uv tys tyt tyst tyvr tes tet tevr tygs tygt dum.
intros.
apply type_sys_props_sym.
apply type_sys_props_eqb_comm; sp.
apply tet with (t2 := a'); sp.
apply tet with (t2 := a); sp.
repnd.
dands; apply CL_tunion; unfold per_tunion; exists eqa eqb; sp; allrw.
eapply eq_term_equals_trans; eauto.
apply eq_term_equals_per_tunion_eq_if; auto.
apply eq_term_equals_sym; auto.
eapply eq_term_equals_trans; eauto.
apply eq_term_equals_per_tunion_eq_if; auto.
apply eq_term_equals_sym; auto.
introv.
apply eq_term_equals_sym; auto.
Qed.
|
{"author": "vrahli", "repo": "NuprlInCoq", "sha": "0c3d7723836d3f615ea47f56e58b2ea6173e7d98", "save_path": "github-repos/coq/vrahli-NuprlInCoq", "path": "github-repos/coq/vrahli-NuprlInCoq/NuprlInCoq-0c3d7723836d3f615ea47f56e58b2ea6173e7d98/close/close_type_sys_per_tunion.v"}
|
C************************************************************************
C This test routine is used to test CYLPATCH, a FORTRAN subroutine.
C CLYPATCH computes the special line and sample point and special
C latitude and longitude points for the normal cylindrical projection.
C This test routine builds the necessary data in a standard MAP
C buffer. After testing the subroutine using this FORTRAN driver, a
C "C" version is invoked "tzcylpatch". tzcylpatch uses a bridge
C "zcylpatch" to invoke CYLPATCH. The test cases are the same, only
C in "C".
C************************************************************************
C RDATA is a real*4 40 element array as described in CONVEV.
C rdata(1) - sample
C rdata(2) - line
C rdata(3) - Latitude
C rdata(6) - Longitude
C rdata(7) - scale (km/pixel)
C rdata(39) - projection type = 9
C rdata(25) - polar radius (km)
C rdata(26) - equatorial radius (km)
C Some values will be changed after execution of CYLPATCH.
C rdata(1) - special sample point
C rdata(2) - special line point
C rdata(3) - Latitude at sample 1
C rdata(6) - Longitude (west) at sample 1
C If rdata(39) is not equal to 9 (integer), then data is not for a
C cylindrical projection. CYLPATCH returns without making any changes.
C************************************************************************
include 'VICMAIN_FOR'
subroutine main44
implicit none
real rdata(40)
integer idata(40),j
character*80 msg
equivalence(idata(1),rdata(1))
idata(39)=9
rdata(1)=1.
rdata(2)=1.
rdata(3)=85.7461
rdata(6)=239.916
rdata(7)=10.
rdata(25)=1815.
rdata(26)=1815.
call xvmessage
+ ('at line=1. sample=1. lati=85.7461 long=239.916',' ')
call xvmessage('radius=1815., scal=10',' ')
write(msg,40)(RDATA(j),j=25,26)
call xvmessage(msg,' ')
write(msg,50)(RDATA(j),j=1,5)
call xvmessage(msg,' ')
write(msg,51)(RDATA(j),j=6,10)
call xvmessage(msg,' ')
call xvmessage(' ',' ')
call cylpatch(rdata)
call xvmessage('output should be lati=0 at line=182, samp=761 long
*=239.916',' ')
write(msg,41) RDATA(1)
call xvmessage(msg,' ')
write(msg,42) RDATA(2)
call xvmessage(msg,' ')
write(msg,43) RDATA(3)
call xvmessage(msg,' ')
write(msg,44) RDATA(6)
call xvmessage(msg,' ')
write(msg,52)(RDATA(j),j=1,5)
call xvmessage(msg,' ')
write(msg,51)(RDATA(j),j=6,10)
call xvmessage(msg,' ')
call xvmessage(' ',' ')
rdata(1)=100.
rdata(2)=100.
rdata(3)=26.8586
rdata(6)=208.6638
call xvmessage
+ ('at line=100,samp=100,lati=26.8586,long=208.6638',' ')
write(msg,50)(RDATA(j),j=1,5)
call xvmessage(msg,' ')
write(msg,51)(RDATA(j),j=6,10)
call xvmessage(msg,' ')
call xvmessage(' ',' ')
call cylpatch(rdata)
call xvmessage('output should be lati=0 at line=182, samp=761 long
*=239.916', ' ')
write(msg,41) RDATA(1)
call xvmessage(msg,' ')
write(msg,42) RDATA(2)
call xvmessage(msg,' ')
write(msg,43) RDATA(3)
call xvmessage(msg,' ')
write(msg,44) RDATA(6)
call xvmessage(msg,' ')
write(msg,52)(RDATA(j),j=1,5)
call xvmessage(msg,' ')
write(msg,51)(RDATA(j),j=6,10)
call xvmessage(msg,' ')
call xvmessage(' ',' ')
call xvmessage('NOW TRY "C" INTERFACE',' ')
call tzcylpatch
40 format ('RADII=',2f10.4)
41 format ('sample=',f12.5)
42 format ('line= ',f12.5)
43 format ('lati= ',f12.5)
44 format ('long= ',f12.5)
50 format ('input data=',5f12.7)
51 format (' ',5f12.7)
52 format ('output data=',5f12.7)
return
end
|
{"hexsha": "928db527390d29de209d1de44619576bb009c0fe", "size": 3814, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "vos/p2/sub/cylpatch/test/tcylpatch.f", "max_stars_repo_name": "NASA-AMMOS/VICAR", "max_stars_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2020-10-21T05:56:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T10:02:01.000Z", "max_issues_repo_path": "vos/p2/sub/cylpatch/test/tcylpatch.f", "max_issues_repo_name": "NASA-AMMOS/VICAR", "max_issues_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vos/p2/sub/cylpatch/test/tcylpatch.f", "max_forks_repo_name": "NASA-AMMOS/VICAR", "max_forks_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-03-09T01:51:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-23T00:23:24.000Z", "avg_line_length": 34.9908256881, "max_line_length": 73, "alphanum_fraction": 0.565810173, "num_tokens": 1180}
|
[STATEMENT]
lemma n_o_mono: "domo S1 \<subseteq> X \<Longrightarrow> domo S2 \<subseteq> X \<Longrightarrow> S1 \<sqsubseteq> S2 \<Longrightarrow>
n_o (n_st n_ivl X) S1 \<le> n_o (n_st n_ivl X) S2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>domo S1 \<subseteq> X; domo S2 \<subseteq> X; S1 \<sqsubseteq> S2\<rbrakk> \<Longrightarrow> n_o (n_st n_ivl X) S1 \<le> n_o (n_st n_ivl X) S2
[PROOF STEP]
apply(induction S1 S2 rule: le_option.induct)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>x y. \<lbrakk>domo (Some x) \<subseteq> X; domo (Some y) \<subseteq> X; Some x \<sqsubseteq> Some y\<rbrakk> \<Longrightarrow> n_o (n_st n_ivl X) (Some x) \<le> n_o (n_st n_ivl X) (Some y)
2. \<And>y. \<lbrakk>domo None \<subseteq> X; domo y \<subseteq> X; None \<sqsubseteq> y\<rbrakk> \<Longrightarrow> n_o (n_st n_ivl X) None \<le> n_o (n_st n_ivl X) y
3. \<And>uu_. \<lbrakk>domo (Some uu_) \<subseteq> X; domo None \<subseteq> X; Some uu_ \<sqsubseteq> None\<rbrakk> \<Longrightarrow> n_o (n_st n_ivl X) (Some uu_) \<le> n_o (n_st n_ivl X) None
[PROOF STEP]
apply(auto simp: domo_def n_o_def n_st_mono
split: option.splits)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 548, "file": "Abs_Int_ITP2012_Abs_Int3", "length": 3}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 17 15:20:21 2019
@author: michaelwu
"""
import numpy as np
import cv2
import os
import pickle
import torch as t
import torch
import h5py
import pandas as pd
from NNsegmentation.models import Segment
from NNsegmentation.data import predict_whole_map
from SingleCellPatch.instance_clustering import instance_clustering, within_range
from SingleCellPatch.generate_trajectories import frame_matching
import matplotlib
from matplotlib import cm
matplotlib.use('AGG')
import matplotlib.pyplot as plt
from matplotlib.ticker import NullLocator
import seaborn as sns
import imageio
from sklearn.decomposition import PCA
from scipy.stats import pearsonr, spearmanr
from HiddenStateExtractor.vq_vae import VQ_VAE, CHANNEL_MAX, CHANNEL_VAR, CHANNEL_RANGE, prepare_dataset, rescale
from HiddenStateExtractor.naive_imagenet import read_file_path, DATA_ROOT
from HiddenStateExtractor.morphology_clustering import select_clean_trajecteories, Kmean_on_short_trajs
from HiddenStateExtractor.movement_clustering import save_traj
import statsmodels.api as sm
import scipy
color_mg = np.array([240, 94, 56], dtype='uint8')
color_nonmg = np.array([66, 101, 251], dtype='uint8')
color_bg = np.array([150, 150, 150], dtype='uint8')
color_fg = (color_mg * 0.7 + color_nonmg * 0.3).astype('uint8')
sites = ['D%d-Site_%d' % (i, j) for j in range(9) for i in range(3, 6)]
# Contrast Setting
phase_a = 2.
phase_b = -50000.
retardance_a = 3.
retardance_b = 0.
def enhance_contrast(mat, a=1.5, b=-10000):
mat2 = cv2.addWeighted(mat, a, mat, 0, b)
return mat2
def plot_patches(names, out_paths, masked=True):
sites = set(n.split('/')[-2] for n in names)
for site in sites:
image_inds = [i for i, n in enumerate(names) if n.split('/')[-2] == site]
site_dat = pickle.load(open('../data_temp/%s_all_patches.pkl' % site, 'rb'))
for i in image_inds:
if masked:
mat = site_dat[names[i]]["masked_mat"][:, :, 0]
else:
mat = site_dat[names[i]]["mat"][:, :, 0]
mat2 = np.clip(enhance_contrast(mat, phase_a, phase_b), 0, 65535).astype('uint16')
cv2.imwrite(out_paths[i], mat2.astype('uint16'))
def save_movie(names, path, masked=True):
sites = set(n.split('/')[-2] for n in names)
assert len(sites) == 1
site_dat = pickle.load(open('../data_temp/%s_all_patches.pkl' % list(sites)[0], 'rb'))
stacks = []
for n in names:
if masked:
mat = site_dat[n]["masked_mat"][:, :, 0]
else:
mat = site_dat[n]["mat"][:, :, 0]
mat2 = np.clip(enhance_contrast(mat, phase_a, phase_b), 0, 65535).astype('uint16')
stacks.append(mat2)
imageio.mimsave(path, np.stack(stacks, 0))
############################################################################################################
# Fig 2 A1
# Raw input (phase channel)
RAW_DATA_PATH = '/mnt/comp_micro/Projects/CellVAE/Combined'
raw_input_stack = np.load(RAW_DATA_PATH + '/D5-Site_0.npy')
raw_input = raw_input_stack[0, :, :, 0:1]
cv2.imwrite('/home/michaelwu/fig2_raw.png', raw_input)
##########
# Supp Video 1
raw_movie = [cv2.resize(slic[:, :, 0], (512, 512)) for slic in raw_input_stack]
imageio.mimsave('/home/michaelwu/supp_video1_sample_movie.gif', np.stack(raw_movie, 0))
##########
# Fig 2 A2
# Human annotations of (background, mg, non-mg)
annotations = np.load(RAW_DATA_PATH + '/D5-Site_0_Annotations.npy')
annotations = annotations[0]
mat = np.zeros((raw_input.shape[0], raw_input.shape[1], 3), dtype='uint8')
mat[:, :] = (raw_input / 256).astype('uint8')
alpha = 0.7
mat[np.where(annotations == 3)[:2]] = (1 - alpha) * mat[np.where(annotations == 3)[:2]] + alpha * color_nonmg.reshape((1, 3))
mat[np.where(annotations == 2)[:2]] = (1 - alpha) * mat[np.where(annotations == 2)[:2]] + alpha * color_mg.reshape((1, 3))
mat[np.where(annotations == 1)[:2]] = (1 - alpha) * mat[np.where(annotations == 1)[:2]] + alpha * color_bg.reshape((1, 3))
cv2.imwrite('/home/michaelwu/fig2_annotations.png', mat)
##########
# Fig 2 A3
# U-Net prediction
NN_predictions_stack = np.load(RAW_DATA_PATH + '/D5-Site_0_NNProbabilities.npy')
NN_predictions = NN_predictions_stack[0]
mat = np.zeros((raw_input.shape[0], raw_input.shape[1], 3), dtype='uint8')
mat[:, :] = (raw_input / 256).astype('uint8')
alpha = 0.7
mat = mat * (1 - (alpha * NN_predictions[:, :, 0:1])) + np.ones_like(mat) * color_bg.reshape((1, 1, 3)) * (alpha * NN_predictions[:, :, 0:1])
nonmg_positions = np.where(NN_predictions[:, :, 2] > 0.5)[:2]
mat[nonmg_positions] = (mat * (1 - (alpha * NN_predictions[:, :, 2:3])) + np.ones_like(mat) * color_nonmg.reshape((1, 1, 3)) * (alpha * NN_predictions[:, :, 2:3]))[nonmg_positions]
mg_positions = np.where(NN_predictions[:, :, 1] > 0.5)[:2]
mat[mg_positions] = (mat * (1 - (alpha * NN_predictions[:, :, 1:2])) + np.ones_like(mat) * color_mg.reshape((1, 1, 3)) * (alpha * NN_predictions[:, :, 1:2]))[mg_positions]
cv2.imwrite('/home/michaelwu/fig2_nn_predictions.png', mat)
##########
# Supp Fig 1 RF
slice_num = 11
raw_input_off = raw_input_stack[slice_num, :, :, 0:1]
RF_predictions_stack = np.load(RAW_DATA_PATH + '/D5-Site_0_RFProbabilities.npy')
RF_predictions_off = RF_predictions_stack[slice_num]
cv2.imwrite('/home/michaelwu/supp_fig1_raw.png', raw_input_off)
mat = np.zeros((raw_input_off.shape[0], raw_input_off.shape[1], 3), dtype='uint8')
mat[:, :] = (raw_input_off / 256).astype('uint8')
alpha = 0.7
mg_positions = np.where(RF_predictions_off[:, :, 1] > 0.5)[:2]
nonmg_positions = np.where(RF_predictions_off[:, :, 2] > 0.5)[:2]
mat = mat * (1 - (alpha * RF_predictions_off[:, :, 0:1])) + np.ones_like(mat) * color_bg.reshape((1, 1, 3)) * (alpha * RF_predictions_off[:, :, 0:1])
mat[mg_positions] = (mat * (1 - (alpha * RF_predictions_off[:, :, 1:2])) + np.ones_like(mat) * color_mg.reshape((1, 1, 3)) * (alpha * RF_predictions_off[:, :, 1:2]))[mg_positions]
mat[nonmg_positions] = (mat * (1 - (alpha * RF_predictions_off[:, :, 2:3])) + np.ones_like(mat) * color_nonmg.reshape((1, 1, 3)) * (alpha * RF_predictions_off[:, :, 2:3]))[nonmg_positions]
cv2.imwrite('/home/michaelwu/supp_fig1_rf_predictions_annotation_only.png', mat)
##########
# Supp Fig 1 NN-only
model = Segment(input_shape=(256, 256, 2),
unet_feat=32,
fc_layers=[64, 32],
n_classes=3,
model_path='./NNsegmentation/temp_save')
model.load(model.model_path + '/stage0_0.h5')
NN_predictions_off2 = predict_whole_map(raw_input_stack[slice_num:(slice_num + 1)], model, n_supp=20)[0]
mat = np.zeros((raw_input_off.shape[0], raw_input_off.shape[1], 3), dtype='uint8')
mat[:, :] = (raw_input_off / 256).astype('uint8')
alpha = 0.7
mg_positions = np.where(NN_predictions_off2[:, :, 1] > 0.5)[:2]
nonmg_positions = np.where(NN_predictions_off2[:, :, 2] > 0.5)[:2]
mat = mat * (1 - (alpha * NN_predictions_off2[:, :, 0:1])) + np.ones_like(mat) * color_bg.reshape((1, 1, 3)) * (alpha * NN_predictions_off2[:, :, 0:1])
mat[mg_positions] = (mat * (1 - (alpha * NN_predictions_off2[:, :, 1:2])) + np.ones_like(mat) * color_mg.reshape((1, 1, 3)) * (alpha * NN_predictions_off2[:, :, 1:2]))[mg_positions]
mat[nonmg_positions] = (mat * (1 - (alpha * NN_predictions_off2[:, :, 2:3])) + np.ones_like(mat) * color_nonmg.reshape((1, 1, 3)) * (alpha * NN_predictions_off2[:, :, 2:3]))[nonmg_positions]
cv2.imwrite('/home/michaelwu/supp_fig1_nn_predictions_annotation_only.png', mat)
##########
# Supp Fig 1 NN-combined
model.load(model.model_path + '/final.h5')
NN_predictions_off = predict_whole_map(raw_input_stack[slice_num:(slice_num + 1)], model, n_supp=20)[0]
mat = np.zeros((raw_input_off.shape[0], raw_input_off.shape[1], 3), dtype='uint8')
mat[:, :] = (raw_input_off / 256).astype('uint8')
alpha = 0.7
mg_positions = np.where(NN_predictions_off[:, :, 1] > 0.5)[:2]
nonmg_positions = np.where(NN_predictions_off[:, :, 2] > 0.5)[:2]
mat = mat * (1 - (alpha * NN_predictions_off[:, :, 0:1])) + np.ones_like(mat) * color_bg.reshape((1, 1, 3)) * (alpha * NN_predictions_off[:, :, 0:1])
mat[mg_positions] = (mat * (1 - (alpha * NN_predictions_off[:, :, 1:2])) + np.ones_like(mat) * color_mg.reshape((1, 1, 3)) * (alpha * NN_predictions_off[:, :, 1:2]))[mg_positions]
mat[nonmg_positions] = (mat * (1 - (alpha * NN_predictions_off[:, :, 2:3])) + np.ones_like(mat) * color_nonmg.reshape((1, 1, 3)) * (alpha * NN_predictions_off[:, :, 2:3]))[nonmg_positions]
cv2.imwrite('/home/michaelwu/supp_fig1_nn_predictions.png', mat)
##########
# Fig 2 B1
# Instance separation
cells, positions, positions_labels = instance_clustering(NN_predictions, fg_thr=0.2)
mg_cell_positions, non_mg_cell_positions, other_cells = cells
mat = np.zeros((raw_input.shape[0], raw_input.shape[1], 3), dtype='uint8')
mat[:, :] = (raw_input / 256).astype('uint8')
cmap = matplotlib.cm.get_cmap('tab10')
alpha = 0.7
for cell_id, mean_pos in mg_cell_positions:
points = positions[np.where(positions_labels == cell_id)[0]]
for p in points:
mat[p[0], p[1]] = (1 - alpha) * mat[p[0], p[1]] + alpha * np.array(cmap.colors[cell_id%10]) * 255
for cell_id, mean_pos in non_mg_cell_positions:
points = positions[np.where(positions_labels == cell_id)[0]]
for p in points:
mat[p[0], p[1]] = (1 - alpha) * mat[p[0], p[1]] + alpha * np.array(cmap.colors[cell_id%10]) * 255
for cell_id, mean_pos in other_cells:
points = positions[np.where(positions_labels == cell_id)[0]]
for p in points:
mat[p[0], p[1]] = (1 - alpha) * mat[p[0], p[1]] + alpha * np.array(cmap.colors[cell_id%10]) * 255
cv2.imwrite('/home/michaelwu/fig2_nn_predictions_instance.png', mat)
cv2.imwrite('/home/michaelwu/fig2_nn_predictions_instance_small.png', mat[:940, :940])
##########
# Fig 2 B2 - left
# Generate bounding boxes
mat = np.zeros((raw_input.shape[0], raw_input.shape[1], 3), dtype='uint8')
mat[:, :] = (raw_input / 256).astype('uint8')
def add_box(mat, box_center, color):
length = mat.shape[0]
box_range = [(max(box_center[0] - 64., 0), min(box_center[0] + 64., length)),
(max(box_center[1] - 64., 0), min(box_center[1] + 64., length))] # assuming square
# Left edge
x = box_range[0][0]
x_ = (int(max(x - 3., 0)), int(min(x + 3., length)))
mat[x_[0]:x_[1], int(box_range[1][0]):int(box_range[1][1])] = color.reshape((1, 1, 3))
# Right edge
x = box_range[0][1]
x_ = (int(max(x - 3., 0)), int(min(x + 3., length)))
mat[x_[0]:x_[1], int(box_range[1][0]):int(box_range[1][1])] = color.reshape((1, 1, 3))
# Top edge
y = box_range[1][0]
y_ = (int(max(y - 3., 0)), int(min(y + 3., length)))
mat[int(box_range[0][0]):int(box_range[0][1]), y_[0]:y_[1]] = color.reshape((1, 1, 3))
# Bottom edge
y = box_range[1][1]
y_ = (int(max(y - 3., 0)), int(min(y + 3., length)))
mat[int(box_range[0][0]):int(box_range[0][1]), y_[0]:y_[1]] = color.reshape((1, 1, 3))
return mat
for cell_id, mean_pos in non_mg_cell_positions:
mat = add_box(mat, mean_pos, color_nonmg)
for cell_id, mean_pos in mg_cell_positions:
mat = add_box(mat, mean_pos, color_mg)
cv2.imwrite('/home/michaelwu/fig2_nn_predictions_boxed_small.png', mat[:940, :940])
##########
# Fig 2 B2 - right
# Generate boxed samples
np.random.seed(123)
mg_inds = np.random.choice(np.arange(len(mg_cell_positions)), (30,), replace=False)
non_mg_inds = np.random.choice(np.arange(len(non_mg_cell_positions)), (5,), replace=False)
mat = np.zeros((raw_input.shape[0], raw_input.shape[1], 3), dtype='uint8')
mat[:, :] = (raw_input / 256).astype('uint8')
for i in mg_inds:
mean_pos = mg_cell_positions[i][1]
if within_range(((128, 940-128), (128, 940-128)), mean_pos):
patch = mat[(mean_pos[0] - 128):(mean_pos[0] + 128),
(mean_pos[1] - 128):(mean_pos[1] + 128)]
cv2.imwrite('/home/michaelwu/fig2_nn_predictions_boxed_mg_%d.png' % mg_cell_positions[i][0], patch)
for i in non_mg_inds:
mean_pos = non_mg_cell_positions[i][1]
if within_range(((128, 940-128), (128, 940-128)), mean_pos):
patch = mat[(mean_pos[0] - 128):(mean_pos[0] + 128),
(mean_pos[1] - 128):(mean_pos[1] + 128)]
cv2.imwrite('/home/michaelwu/fig2_nn_predictions_boxed_non_mg_%d.png' % non_mg_cell_positions[i][0], patch)
##########
# Fig 2 C1
# Frame Matching
frame0 = raw_input_stack[0, :, :, 0:1]
frame1 = raw_input_stack[1, :, :, 0:1]
pred0 = NN_predictions_stack[0]
pred1 = NN_predictions_stack[1]
res0 = instance_clustering(pred0, fg_thr=0.2)
res1 = instance_clustering(pred1, fg_thr=0.2)
cell_positions = {0: res0[0], 1: res1[0]}
cell_pixel_assignments = {0: res0[1:], 1: res1[1:]}
mg_positions_dict = {k: dict(cell_positions[k][0]) for k in cell_positions}
non_mg_positions_dict = {k: dict(cell_positions[k][1]) for k in cell_positions}
t_points = [0, 1]
intensities_dict = {}
for t_point in t_points:
intensities_d = dict(zip(*np.unique(cell_pixel_assignments[t_point][1], return_counts=True)))
intensities_d = {p[0]: intensities_d[p[0]] for p in cell_positions[t_point][0] + cell_positions[t_point][1]}
intensities_dict[t_point] = intensities_d
mg_matchings = {}
non_mg_matchings = {}
for t_point in t_points[:-1]:
ids1 = sorted(mg_positions_dict[t_point].keys())
ids2 = sorted(mg_positions_dict[t_point+1].keys())
f1 = [mg_positions_dict[t_point][i] for i in ids1]
f2 = [mg_positions_dict[t_point+1][i] for i in ids2]
int1 = [intensities_dict[t_point][i] for i in ids1]
int2 = [intensities_dict[t_point+1][i] for i in ids2]
pairs = frame_matching(f1, f2, int1, int2, dist_cutoff=150)
mg_matchings[t_point] = [(ids1[p1], ids2[p2]) for p1, p2 in pairs]
ids1 = sorted(non_mg_positions_dict[t_point].keys())
ids2 = sorted(non_mg_positions_dict[t_point+1].keys())
f1 = [non_mg_positions_dict[t_point][i] for i in ids1]
f2 = [non_mg_positions_dict[t_point+1][i] for i in ids2]
int1 = [intensities_dict[t_point][i] for i in ids1]
int2 = [intensities_dict[t_point+1][i] for i in ids2]
pairs = frame_matching(f1, f2, int1, int2, dist_cutoff=150)
non_mg_matchings[t_point] = [(ids1[p1], ids2[p2]) for p1, p2 in pairs]
mat0 = np.zeros((raw_input.shape[0], raw_input.shape[1], 3), dtype='uint8')
mat1 = np.zeros((raw_input.shape[0], raw_input.shape[1], 3), dtype='uint8')
mat0[:, :] = (frame0 / 256).astype('uint8')
mat1[:, :] = (frame1 / 256).astype('uint8')
cmap = matplotlib.cm.get_cmap('Set2')
np.random.seed(123)
plotted = []
for i in np.random.permutation(np.arange(len(mg_matchings[0]))):
pair = mg_matchings[0][i]
frame0_position = None
for mg in cell_positions[0][0]:
if mg[0] == pair[0]:
frame0_position = mg[1]
break
frame1_position = None
for mg in cell_positions[1][0]:
if mg[0] == pair[1]:
frame1_position = mg[1]
break
if within_range(((128, 940-128), (128, 940-128)), frame0_position) and \
within_range(((128, 940-128), (128, 940-128)), frame1_position):
mat0 = add_box(mat0, frame0_position, np.array(cmap.colors[(len(plotted) + 1)%10]) * 255)
mat1 = add_box(mat1, frame1_position, np.array(cmap.colors[(len(plotted) + 1)%10]) * 255)
plotted.append((frame0_position, frame1_position))
if len(plotted) > 3:
break
cmap = matplotlib.cm.get_cmap('Set1')
for i in np.random.permutation(np.arange(len(non_mg_matchings[0]))):
pair = non_mg_matchings[0][i]
frame0_position = None
for non_mg in cell_positions[0][1]:
if non_mg[0] == pair[0]:
frame0_position = non_mg[1]
break
frame1_position = None
for non_mg in cell_positions[1][1]:
if non_mg[0] == pair[1]:
frame1_position = non_mg[1]
break
if within_range(((128, 940-128), (128, 940-128)), frame0_position) and \
within_range(((128, 940-128), (128, 940-128)), frame1_position):
mat0 = add_box(mat0, frame0_position, np.array(cmap.colors[(len(plotted) + 1)%10]) * 255)
mat1 = add_box(mat1, frame1_position, np.array(cmap.colors[(len(plotted) + 1)%10]) * 255)
plotted.append((frame0_position, frame1_position))
if len(plotted) > 5:
break
cv2.imwrite('/home/michaelwu/fig2_traj_matching_f0.png', mat0[:940, :940])
cv2.imwrite('/home/michaelwu/fig2_traj_matching_f1.png', mat1[:940, :940])
##########
# Fig 2 C2
# Sample plotted traj zoomed in
mg_trajectories, mg_trajectories_positions = pickle.load(open(os.path.split(RAW_DATA_PATH)[0] + '/Data/DynamicPatches/D5-Site_0/mg_traj.pkl', 'rb'))
non_mg_trajectories, non_mg_trajectories_positions = pickle.load(open(os.path.split(RAW_DATA_PATH)[0] + '/Data/DynamicPatches/D5-Site_0/non_mg_traj.pkl', 'rb'))
sample_non_mg_traj = non_mg_trajectories[0]
sample_non_mg_traj_positions = non_mg_trajectories_positions[0]
sample_mg_traj = mg_trajectories[2]
sample_mg_traj_positions = mg_trajectories_positions[2]
for i in [0, 1, 4, 8, 16]:
mat = raw_input_stack[i][:, :, 0]
center = sample_mg_traj_positions[i]
mg_mat = mat[center[0]-128:center[0]+128,
center[1]-128:center[1]+128]
cv2.imwrite('/home/michaelwu/fig2_sample_mg_traj_%d.png' % i, enhance_contrast(mg_mat, 1.5, -10000))
center = sample_non_mg_traj_positions[i]
non_mg_mat = mat[center[0]-128:center[0]+128,
center[1]-128:center[1]+128]
cv2.imwrite('/home/michaelwu/fig2_sample_non_mg_traj_%d.png' % i, enhance_contrast(non_mg_mat, 1.5, -10000))
##########
# Supp Video 2
# Sample trajectories
inds = [39, 15, 30, 43]
for i in inds:
traj_name = 'D5-Site_0/%d' % i
save_traj(traj_name, '/home/michaelwu/supp_video2_sample_traj_%d.gif' % i)
names = ['/mnt/comp_micro/Projects/CellVAE/Data/StaticPatches/D5-Site_0/%d_%d.h5' % (j, mg_trajectories[i][j]) for j in sorted(mg_trajectories[i].keys())]
save_movie(names, '/home/michaelwu/supp_video2_sample_traj_movie_%d.gif' % i, masked=False)
############################################################################################################
# Fig 3 A
# VAE illustration
cs = [0, 1]
input_shape = (128, 128)
gpu = True
# Order for `dataset`, `relations`
fs_ = pickle.load(open('./HiddenStateExtractor/file_paths_bkp.pkl', 'rb'))
# Order for `trajs`
fs = sorted(pickle.load(open('./HiddenStateExtractor/file_paths_bkp.pkl', 'rb')))
dataset = torch.load('StaticPatchesAll.pt')
dataset = rescale(dataset)
model = VQ_VAE(alpha=0.0005, gpu=gpu)
model = model.cuda()
model.load_state_dict(torch.load('./HiddenStateExtractor/save_0005_bkp4.pt'))
sample_fs = ['/mnt/comp_micro/Projects/CellVAE/Data/StaticPatches/D3-Site_4/1_45.h5',
'/mnt/comp_micro/Projects/CellVAE/Data/StaticPatches/D3-Site_6/3_20.h5',
'/mnt/comp_micro/Projects/CellVAE/Data/StaticPatches/D4-Site_7/50_34.h5',
'/mnt/comp_micro/Projects/CellVAE/Data/StaticPatches/D5-Site_8/47_14.h5']
for i, f in enumerate(sample_fs):
sample_ind = fs_.index(f)
sample = dataset[sample_ind:(sample_ind+1)][0].cuda()
output = model(sample)[0]
inp = sample.cpu().data.numpy()
out = output.cpu().data.numpy()
input_phase = (inp[0, 0] * 65535).astype('uint16')
output_phase = (out[0, 0] * 65535).astype('uint16')
input_retardance = (inp[0, 1] * 65535).astype('uint16')
output_retardance = (out[0, 1] * 65535).astype('uint16')
cv2.imwrite('/home/michaelwu/fig3_VAE_pair%d_input_phase.png' % i, enhance_contrast(input_phase, 1., -10000)) # Note dataset has been rescaled
cv2.imwrite('/home/michaelwu/fig3_VAE_pair%d_output_phase.png' % i, enhance_contrast(output_phase, 1., -10000))
cv2.imwrite('/home/michaelwu/fig3_VAE_pair%d_input_retardance.png' % i, enhance_contrast(input_retardance, 2., 0.))
cv2.imwrite('/home/michaelwu/fig3_VAE_pair%d_output_retardance.png' % i, enhance_contrast(output_retardance, 2., 0.))
##########
# Fig 3 B(PCA) & C
# PCA on VAE latent space
z_bs = {}
z_as = {}
for i in range(len(dataset)):
sample = dataset[i:(i+1)][0].cuda()
z_b = model.enc(sample)
z_a, _, _ = model.vq(z_b)
f_n = fs_[i]
z_as[f_n] = z_a.cpu().data.numpy()
z_bs[f_n] = z_b.cpu().data.numpy()
dats = np.stack([z_bs[f] for f in fs], 0).reshape((len(dataset), -1))
pca = PCA(0.5)
dats_ = pca.fit_transform(dats)
with open('./save_0005_bkp4_latent_space_PCAed.pkl', 'wb') as f:
pickle.dump(dats_, f)
trajs = pickle.load(open('./HiddenStateExtractor/trajectory_in_inds.pkl', 'rb'))
sizes = pickle.load(open(DATA_ROOT + '/Data/EncodedSizes.pkl', 'rb'))
ss = [sizes[f][0] for f in fs]
cmap = matplotlib.cm.get_cmap('BuPu')
range_min = np.log(min(ss))
range_max = np.log(max(ss))
colors = [cmap(((np.log(s) - range_min)/(range_max - range_min))**1.5) for s in ss]
# Supp Fig 6
cum_explained_var_ratio = list(np.cumsum(pca.explained_variance_ratio_))
cum_explained_var_ratio.insert(0, 0)
plt.clf()
plt.plot(np.arange(len(cum_explained_var_ratio)), cum_explained_var_ratio, '.-')
verts = [(0, 0), *zip(np.arange(5), cum_explained_var_ratio[:5]), (4, 0)]
poly = matplotlib.patches.Polygon(verts, facecolor='0.9', edgecolor='0.5')
plt.gca().add_patch(poly)
plt.ylim(0, 0.48)
plt.xlim(-2, 40)
plt.ylabel("(Cumulative) Explained Variance Ratio", fontsize=16)
plt.xlabel("Principle Components", fontsize=16)
plt.savefig('/home/michaelwu/supp_fig6_PCA_explained_variance.eps')
plt.savefig('/home/michaelwu/supp_fig6_PCA_explained_variance.png', dpi=300)
# Supp Fig 7
import umap
reducer = umap.UMAP()
embedding = reducer.fit_transform(dats)
plt.clf()
plt.scatter(embedding[:, 0], embedding[:, 1], c=colors, s=0.5, edgecolors='none')
plt.xlim(0, 11)
plt.ylim(-6, 7.5)
plt.savefig('/home/michaelwu/supp_fig7_UMAP.eps')
plt.savefig('/home/michaelwu/supp_fig7_UMAP.png', dpi=300)
plt.clf()
sns.set_style('white')
fig, ax = plt.subplots()
ax.scatter(dats_[:, 0], dats_[:, 1], c=colors, s=0.5, edgecolors='none')
rec1 = plt.Rectangle((-2, 0), 6, 2, color=(228/256, 34/256, 86/256, 0.7), fc='none')
rec2 = plt.Rectangle((0, -2), 2, 6, color=(0/256, 137/256, 123/256, 0.7), fc='none')
ax.add_patch(rec1)
ax.add_patch(rec2)
# Supp Video 3
traj_samples = ['D4-Site_0/18', 'D3-Site_7/62', 'D3-Site_2/24', 'D3-Site_0/38']
for t in traj_samples:
save_traj(t, '/home/michaelwu/supp_video3_sample_traj_%s.gif' % t.replace('/', '_'))
names = [fs[i] for i in trajs[t]]
save_movie(names, '/home/michaelwu/supp_video3_sample_traj_movie_%s.gif' % t.replace('/', '_'), masked=False)
selected_frames = [np.array([1, 7, 16, 27, 43]),
np.array([1, 7, 12, 16, 21]),
np.array([0, 10, 20, 30, 40]),
np.array([1, 10, 20, 30, 43])]
cmap2 = matplotlib.cm.get_cmap('tab10')
colors2 = [cmap2.colors[1],
cmap2.colors[5],
(0.15, 0.5, 0.15),
(0.2, 0.2, 0.2)]
for ct, (t, inds, c) in enumerate(zip(traj_samples, selected_frames, colors2)):
order = np.array(trajs[t])
ax.plot(dats_[order][:, 0], dats_[order][:, 1], '.--', c=c, linewidth=0.5, markersize=0.5)
ax.plot(dats_[order][inds][:, 0], dats_[order][inds][:, 1], '.', c=c, markersize=2.0)
for i in range(len(inds) - 1):
ind0 = inds[i]
ind1 = inds[i+1]
ax.arrow(dats_[order[ind0], 0],
dats_[order[ind0], 1],
dats_[order[ind1], 0] - dats_[order[ind0], 0],
dats_[order[ind1], 1] - dats_[order[ind0], 1],
fc='none',
ec=c,
length_includes_head=True,
head_width=0.2,
head_length=0.3)
names = []
output_paths = []
for j, ind in enumerate(order[inds]):
f = fs[ind]
names.append(f)
output_paths.append('/home/michaelwu/fig3_state_transition_sample_%d_%d.png' % (ct, j))
plot_patches(names, output_paths, masked=False)
plt.xlim(-6, 8)
plt.ylim(-4, 8)
plt.savefig('/home/michaelwu/fig3_morphology_pca.eps')
plt.savefig('/home/michaelwu/fig3_morphology_pca.png', dpi=300)
plt.clf()
fig, ax = plt.subplots(figsize=(6, 1))
fig.subplots_adjust(bottom=0.5)
cb1 = matplotlib.colorbar.ColorbarBase(ax, cmap='BuPu',
norm=matplotlib.colors.Normalize(vmin=range_min, vmax=range_max),
orientation='horizontal')
plt.savefig('/home/michaelwu/fig3_morphology_pca_cbar.eps')
##########
# Fig 3 B(patches)
# PC1&2 samples
# bins_PC1 = {(i, i+0.5): [] for i in np.arange(-2, 4, 0.5)}
# bins_PC2 = {(i, i+0.5): [] for i in np.arange(-2, 4, 0.5)}
# for i in range(84884):
# val0 = dats_[i, 0]
# val1 = dats_[i, 1]
# for b in bins_PC1:
# if val0 > b[0] and val0 <= b[1] and val1 > 0. and val1 <= 2.:
# bins_PC1[b].append(fs[i])
# for b in bins_PC2:
# if val0 > 0. and val0 <= 1. and val1 > b[0] and val1 <= b[1]:
# bins_PC2[b].append(fs[i])
# os.mkdir('/home/michaelwu/fig3_PC1')
# for i, b in enumerate(sorted(bins_PC1.keys())):
# samples = np.random.choice(bins_PC1[b], (5,), replace=False)
# prefix = 'b%d' % i
# for s in samples:
# name = s.split('/')[-2:]
# name = prefix + '_' + name[0] + '_' + name[1].split('.')[0] + '.png'
# plot_patch(s, '/home/michaelwu/fig3_PC1/%s' % name)
# os.mkdir('/home/michaelwu/fig3_PC2')
# for i, b in enumerate(sorted(bins_PC2.keys())):
# samples = np.random.choice(bins_PC2[b], (5,), replace=False)
# prefix = 'b%d' % i
# for s in samples:
# name = s.split('/')[-2:]
# name = prefix + '_' + name[0] + '_' + name[1].split('.')[0] + '.png'
# plot_patch(s, '/home/michaelwu/fig3_PC2/%s' % name)
sample_PC1s = [
'/mnt/comp_micro/Projects/CellVAE/Data/StaticPatches/D5-Site_2/43_57.h5',
'/mnt/comp_micro/Projects/CellVAE/Data/StaticPatches/D3-Site_5/19_67.h5',
'/mnt/comp_micro/Projects/CellVAE/Data/StaticPatches/D3-Site_6/51_55.h5',
'/mnt/comp_micro/Projects/CellVAE/Data/StaticPatches/D4-Site_0/29_25.h5',
'/mnt/comp_micro/Projects/CellVAE/Data/StaticPatches/D5-Site_1/3_15.h5'
]
sample_PC2s = [
'/mnt/comp_micro/Projects/CellVAE/Data/StaticPatches/D5-Site_7/48_28.h5',
'/mnt/comp_micro/Projects/CellVAE/Data/StaticPatches/D4-Site_5/19_21.h5',
'/mnt/comp_micro/Projects/CellVAE/Data/StaticPatches/D5-Site_1/20_24.h5',
'/mnt/comp_micro/Projects/CellVAE/Data/StaticPatches/D4-Site_7/28_14.h5',
'/mnt/comp_micro/Projects/CellVAE/Data/StaticPatches/D5-Site_4/19_89.h5'
]
plot_patches(sample_PC1s, ['/home/michaelwu/fig3_samples_PC1_%d.png' % i for i in range(len(sample_PC1s))])
plot_patches(sample_PC2s, ['/home/michaelwu/fig3_samples_PC2_%d.png' % i for i in range(len(sample_PC2s))])
##########
# Supp Fig 2
# Scatter plot between PC1 and size
sizes = pickle.load(open('/mnt/comp_micro/Projects/CellVAE/Data/EncodedSizes.pkl', 'rb'))
densities = pickle.load(open('/mnt/comp_micro/Projects/CellVAE/Data/EncodedDensities.pkl', 'rb'))
ss = np.log(np.array([sizes[f][0] for f in fs]))
ds = np.array([densities[f][0][2] for f in fs])
PC1s = dats_[:, 0]
PC2s = dats_[:, 1]
df = pd.DataFrame({'PC1': PC1s,
'PC2': PC2s,
'Size': ss,
'Peak Phase': ds})
sns.set_style('white')
bins_y = np.linspace(6, 9.3, 20)
bins_x = np.linspace(-5, 5, 20)
plt.clf()
g = sns.JointGrid(x='PC1', y='Size', data=df, ylim=(6, 9.3), xlim=(-5, 5))
_ = g.ax_marg_x.hist(df['PC1'], bins=bins_x, color=matplotlib.cm.get_cmap('Blues')(0.5))
_ = g.ax_marg_y.hist(df['Size'], bins=bins_y, orientation='horizontal', color=matplotlib.cm.get_cmap('Blues')(0.5))
g.plot_joint(sns.kdeplot, cmap="Blues", shade=True)
y_ticks = np.array([500, 1000, 2000, 4000, 8000])
g.ax_joint.set_yticks(np.log(y_ticks))
g.ax_joint.set_yticklabels(y_ticks)
g.set_axis_labels('PC1', 'Size', fontsize=16)
plt.tight_layout()
plt.savefig('/home/michaelwu/supp_fig2_PC1_size.eps')
plt.savefig('/home/michaelwu/supp_fig2_PC1_size.png', dpi=300)
sns.set_style('white')
bins_y = np.linspace(0.52, 0.75, 20)
bins_x = np.linspace(-3, 4, 20)
plt.clf()
g = sns.JointGrid(x='PC2', y='Peak Phase', data=df, ylim=(0.52, 0.75), xlim=(-3, 4))
_ = g.ax_marg_x.hist(df['PC2'], bins=bins_x, color=matplotlib.cm.get_cmap('Reds')(0.5))
_ = g.ax_marg_y.hist(df['Peak Phase'], bins=bins_y, orientation='horizontal', color=matplotlib.cm.get_cmap('Reds')(0.5))
g.plot_joint(sns.kdeplot, cmap="Reds", shade=True)
g.set_axis_labels('PC2', 'Peak Phase', fontsize=16)
plt.tight_layout()
plt.savefig('/home/michaelwu/supp_fig2_PC2_density.eps')
plt.savefig('/home/michaelwu/supp_fig2_PC2_density.png', dpi=300)
##########
# Supp Fig 3
# Samples along first 4 PCs
names = []
out_paths = []
np.random.seed(123)
PC1s = dats_[:, 0]
lower_ = np.quantile(PC1s, 0.2)
lower_fs = [f for i, f in enumerate(fs) if PC1s[i] < lower_]
upper_ = np.quantile(PC1s, 0.8)
upper_fs = [f for i, f in enumerate(fs) if PC1s[i] > upper_]
for i, f in enumerate(np.random.choice(lower_fs, (10,), replace=False)):
names.append(f)
out_paths.append('/home/michaelwu/supp_fig3_PC1_lower_sample%d.png' % i)
for i, f in enumerate(np.random.choice(upper_fs, (10,), replace=False)):
names.append(f)
out_paths.append('/home/michaelwu/supp_fig3_PC1_upper_sample%d.png' % i)
PC2s = dats_[:, 1]
lower_ = np.quantile(PC2s, 0.2)
lower_fs = [f for i, f in enumerate(fs) if PC2s[i] < lower_]
upper_ = np.quantile(PC2s, 0.8)
upper_fs = [f for i, f in enumerate(fs) if PC2s[i] > upper_]
for i, f in enumerate(np.random.choice(lower_fs, (10,), replace=False)):
names.append(f)
out_paths.append('/home/michaelwu/supp_fig3_PC2_lower_sample%d.png' % i)
for i, f in enumerate(np.random.choice(upper_fs, (10,), replace=False)):
names.append(f)
out_paths.append('/home/michaelwu/supp_fig3_PC2_upper_sample%d.png' % i)
PC1_range = (np.quantile(PC1s, 0.4), np.quantile(PC1s, 0.6))
PC2_range = (np.quantile(PC2s, 0.4), np.quantile(PC2s, 0.6))
PC3s = dats_[:, 2]
lower_ = np.quantile(PC3s, 0.2)
lower_fs = [f for i, f in enumerate(fs) if PC3s[i] < lower_ and PC1_range[0] < PC1s[i] < PC1_range[1] and PC2_range[0] < PC2s[i] < PC2_range[1]]
upper_ = np.quantile(PC3s, 0.8)
upper_fs = [f for i, f in enumerate(fs) if PC3s[i] > upper_ and PC1_range[0] < PC1s[i] < PC1_range[1] and PC2_range[0] < PC2s[i] < PC2_range[1]]
for i, f in enumerate(np.random.choice(lower_fs, (10,), replace=False)):
names.append(f)
out_paths.append('/home/michaelwu/supp_fig3_PC3_lower_sample%d.png' % i)
for i, f in enumerate(np.random.choice(upper_fs, (10,), replace=False)):
names.append(f)
out_paths.append('/home/michaelwu/supp_fig3_PC3_upper_sample%d.png' % i)
PC4s = dats_[:, 3]
lower_ = np.quantile(PC4s, 0.2)
lower_fs = [f for i, f in enumerate(fs) if PC4s[i] < lower_ and PC1_range[0] < PC1s[i] < PC1_range[1] and PC2_range[0] < PC2s[i] < PC2_range[1]]
upper_ = np.quantile(PC4s, 0.8)
upper_fs = [f for i, f in enumerate(fs) if PC4s[i] > upper_ and PC1_range[0] < PC1s[i] < PC1_range[1] and PC2_range[0] < PC2s[i] < PC2_range[1]]
for i, f in enumerate(np.random.choice(lower_fs, (10,), replace=False)):
names.append(f)
out_paths.append('/home/michaelwu/supp_fig3_PC4_lower_sample%d.png' % i)
for i, f in enumerate(np.random.choice(upper_fs, (10,), replace=False)):
names.append(f)
out_paths.append('/home/michaelwu/supp_fig3_PC4_upper_sample%d.png' % i)
plot_patches(names, out_paths)
np.random.seed(123)
names = []
out_paths = []
# dats = pickle.load(open('./save_0005_bkp4.pkl', 'rb'))
# sizes = pickle.load(open('/mnt/comp_micro/Projects/CellVAE/Data/EncodedSizes.pkl', 'rb'))
# densities = pickle.load(open('/mnt/comp_micro/Projects/CellVAE/Data/EncodedDensities.pkl', 'rb'))
# aps_nr = pickle.load(open('/mnt/comp_micro/Projects/CellVAE/Data/EncodedAspectRatios_NoRotation.pkl', 'rb'))
# aps = pickle.load(open('/mnt/comp_micro/Projects/CellVAE/Data/EncodedAspectRatios.pkl', 'rb'))
# angle_array = []
# for f in fs:
# if aps[f][2] >= 0:
# angle_array.append(aps[f][2] - 90)
# elif 0.8 < aps[f][0]/aps[f][1] < 1.25:
# angle_array.append(-90)
# else:
# angle_array.append(aps[f][2])
# Properties = [[np.log(sizes[f][0]) for f in fs],
# [densities[f][0][2] for f in fs],
# [densities[f][1][2] for f in fs],
# [aps_nr[f][0]/aps_nr[f][1] for f in fs],
# angle_array,
# [aps[f][0]/aps[f][1] for f in fs]]
# X = np.stack(Properties, 1)
# X = sm.add_constant(X)
# dats_residues = []
# for i in range(dats.shape[1]):
# y = dats[:, i]
# model = sm.OLS(y, X)
# results = model.fit()
# residue = y - results.predict(X)
# dats_residues.append(residue)
# dats_residues = np.stack(dats_residues, 1)
dats_residues = pickle.load(open('./save_0005_bkp4_residues.pkl', 'rb'))
pca_r = PCA(3)
dats_residues_ = pca_r.fit_transform(dats_residues)
rPC1s = dats_residues_[:, 0]
lower_ = np.quantile(rPC1s, 0.2)
lower_fs = [f for i, f in enumerate(fs) if rPC1s[i] < lower_ and PC1_range[0] < PC1s[i] < PC1_range[1] and PC2_range[0] < PC2s[i] < PC2_range[1]]
upper_ = np.quantile(rPC1s, 0.8)
upper_fs = [f for i, f in enumerate(fs) if rPC1s[i] > upper_ and PC1_range[0] < PC1s[i] < PC1_range[1] and PC2_range[0] < PC2s[i] < PC2_range[1]]
for i, f in enumerate(np.random.choice(lower_fs, (20,), replace=False)):
names.append(f)
out_paths.append('/home/michaelwu/supp_fig3_rPC1_lower_sample%d.png' % i)
for i, f in enumerate(np.random.choice(upper_fs, (20,), replace=False)):
names.append(f)
out_paths.append('/home/michaelwu/supp_fig3_rPC1_upper_sample%d.png' % i)
plot_patches(names, out_paths, masked=False)
##########
# Supp Fig 4
# Correlation between PC1~6, size, density, aspect ratio, etc.
sizes = pickle.load(open('/mnt/comp_micro/Projects/CellVAE/Data/EncodedSizes.pkl', 'rb'))
densities = pickle.load(open('/mnt/comp_micro/Projects/CellVAE/Data/EncodedDensities.pkl', 'rb'))
aps_nr = pickle.load(open('/mnt/comp_micro/Projects/CellVAE/Data/EncodedAspectRatios_NoRotation.pkl', 'rb'))
aps = pickle.load(open('/mnt/comp_micro/Projects/CellVAE/Data/EncodedAspectRatios.pkl', 'rb'))
angle_array = []
for f in fs:
if aps[f][2] >= 0:
angle_array.append(aps[f][2] - 90)
elif 0.8 < aps[f][0]/aps[f][1] < 1.25:
angle_array.append(-90)
else:
angle_array.append(aps[f][2])
PCs = [PC1s, PC2s, PC3s, PC4s, dats_[:, 4], dats_[:, 5]]
Properties = [[np.log(sizes[f][0]) for f in fs],
[densities[f][0][2] for f in fs],
[densities[f][1][2] for f in fs],
[aps_nr[f][0]/aps_nr[f][1] for f in fs],
angle_array,
[aps[f][0]/aps[f][1] for f in fs]]
sr_mat = np.zeros((len(PCs), len(Properties)))
pr_mat = np.zeros((len(PCs), len(Properties)))
for i, PC in enumerate(PCs):
for j, prop in enumerate(Properties):
sr_mat[i, j] = spearmanr(PC, prop).correlation
pr_mat[i, j] = pearsonr(PC, prop)[0]
plt.clf()
fig, ax = plt.subplots()
cmap = matplotlib.cm.get_cmap('RdBu')
im = ax.imshow(np.transpose(sr_mat), cmap=cmap, vmin=-1.5, vmax=1.5)
ax.set_xticks(np.arange(len(PCs)))
ax.set_yticks(np.arange(len(Properties)))
ax.set_xticklabels(['PC1', 'PC2', 'PC3', 'PC4', 'PC5', 'PC6'])
ax.set_yticklabels(['Size', 'Peak Phase', 'Peak Retardance', 'Aspect Ratio (y-axis)', 'Aspect Ratio', 'Angle (Long axis)'])
for i in range(len(PCs)):
for j in range(len(Properties)):
text = ax.text(i, j, "%.2f" % sr_mat[i, j], ha="center", va="center", color="k")
plt.tight_layout()
plt.savefig('/home/michaelwu/supp_fig4_correlations.eps')
plt.savefig('/home/michaelwu/supp_fig4_correlations.png', dpi=300)
##########
# Supp Fig 5
# Distributional difference between trajectories and non-trajectories
traj_PC1_diffs = []
base_diffs = []
for t in trajs:
traj_PC1 = dats_[np.array(trajs[t])][:, 0]
traj_PC1_diff = np.abs(traj_PC1[1:] - traj_PC1[:-1])
traj_PC1_diffs.append(traj_PC1_diff)
random_PC1 = dats_[np.random.choice(np.arange(dats_.shape[0]), (len(trajs[t]),), replace=False), 0]
base_diffs.append(np.abs(random_PC1[1:] - random_PC1[:-1]))
traj_PC1_diffs = np.concatenate(traj_PC1_diffs)
base_diffs = np.concatenate(base_diffs)
plt.clf()
plt.hist(traj_PC1_diffs, bins=np.arange(0, 8, 0.2), normed=True, color=(1, 0, 0, 0.5), label='Trajectories')
plt.hist(base_diffs, bins=np.arange(0, 8, 0.2), normed=True, color=(0, 0, 1, 0.5), label='Random pairs')
plt.legend(fontsize=16)
plt.xlabel('PC1 diff', fontsize=16)
plt.ylabel('Frequency', fontsize=16)
plt.savefig('/home/michaelwu/supp_fig5_distri_PC1.eps')
plt.savefig('/home/michaelwu/supp_fig5_distri_PC1.png', dpi=300)
traj_PC2_diffs = []
base_diffs = []
for t in trajs:
traj_PC2 = dats_[np.array(trajs[t])][:, 1]
traj_PC2_diff = np.abs(traj_PC2[1:] - traj_PC2[:-1])
traj_PC2_diffs.append(traj_PC2_diff)
random_PC2 = dats_[np.random.choice(np.arange(dats_.shape[0]), (len(trajs[t]),), replace=False), 1]
base_diffs.append(np.abs(random_PC2[1:] - random_PC2[:-1]))
traj_PC2_diffs = np.concatenate(traj_PC2_diffs)
base_diffs = np.concatenate(base_diffs)
plt.clf()
plt.hist(traj_PC2_diffs, bins=np.arange(0, 8, 0.2), normed=True, color=(1, 0, 0, 0.5), label='Trajectories')
plt.hist(base_diffs, bins=np.arange(0, 8, 0.2), normed=True, color=(0, 0, 1, 0.5), label='Random pairs')
plt.legend(fontsize=16)
plt.xlabel('PC2 diff', fontsize=16)
plt.ylabel('Frequency', fontsize=16)
plt.savefig('/home/michaelwu/supp_fig5_distri_PC2.eps')
plt.savefig('/home/michaelwu/supp_fig5_distri_PC2.png', dpi=300)
############################################################################################################
# Fig 4 A
# KDE plot of PC1/speed
feat = 'save_0005_before'
dataset = torch.load('StaticPatchesAll.pt')
fs_ = pickle.load(open('./HiddenStateExtractor/file_paths_bkp.pkl', 'rb'))
fs = sorted(pickle.load(open('./HiddenStateExtractor/file_paths_bkp.pkl', 'rb')))
trajs = pickle.load(open('./HiddenStateExtractor/trajectory_in_inds.pkl', 'rb'))
dats_ = pickle.load(open('./save_0005_bkp4_latent_space_PCAed.pkl', 'rb'))
sizes = pickle.load(open(DATA_ROOT + '/Data/EncodedSizes.pkl', 'rb'))
all_mg_trajs = {}
all_mg_trajs_positions = {}
for site in sites:
mg_trajectories_inds, mg_trajectories_positions = pickle.load(open(DATA_ROOT + '/Data/DynamicPatches/%s/mg_traj.pkl' % site, 'rb'))
for i, traj in enumerate(mg_trajectories_positions):
all_mg_trajs[site + '/%d' % i] = mg_trajectories_inds[i]
all_mg_trajs_positions[site + '/%d' % i] = traj
traj_average_moving_distances = {}
traj_cell_sizes_mean = {}
traj_PC1 = {}
traj_PC2 = {}
for t in all_mg_trajs:
t_keys = sorted(all_mg_trajs[t].keys())
dists = []
for t_point in range(len(t_keys) - 1):
d = np.linalg.norm(all_mg_trajs_positions[t][t_keys[t_point+1]] - \
all_mg_trajs_positions[t][t_keys[t_point]], ord=2)
dists.append(d)
traj_average_moving_distances[t] = np.mean(dists)
traj_sizes = [sizes[fs[ind]][0] for ind in trajs[t]]
traj_cell_sizes_mean[t] = np.mean(traj_sizes)
pc1s = [dats_[ind, 0] for ind in trajs[t]]
pc2s = [dats_[ind, 1] for ind in trajs[t]]
traj_PC1[t] = np.mean(pc1s)
traj_PC2[t] = np.mean(pc2s)
t_arrays = sorted(all_mg_trajs.keys())
df = pd.DataFrame({'PC1': [traj_PC1[t] for t in t_arrays],
'PC2': [traj_PC2[t] for t in t_arrays],
'sizes': [traj_cell_sizes_mean[t] for t in t_arrays],
'dists': [np.log(traj_average_moving_distances[t] * 0.72222) for t in t_arrays]}) #0.72um/h for 1pixel/27min
sns.set_style('white')
bins_y = np.linspace(0.1, 4.3, 20)
bins_x = np.linspace(-4, 4, 20)
plt.clf()
g = sns.JointGrid(x='PC1', y='dists', data=df, ylim=(0.1, 4.3), xlim=(-4, 4))
_ = g.ax_marg_x.hist(df['PC1'], bins=bins_x)
_ = g.ax_marg_y.hist(df['dists'], bins=bins_y, orientation='horizontal')
g.plot_joint(sns.kdeplot, cmap="Blues", shade=True)
y_ticks = np.array([1.5, 3., 6., 12., 24., 48.])
g.ax_joint.set_yticks(np.log(y_ticks))
g.ax_joint.set_yticklabels(y_ticks)
g.set_axis_labels('', '')
plt.savefig('/home/michaelwu/fig4_correlation_kde.eps')
plt.savefig('/home/michaelwu/fig4_correlation_kde.png', dpi=300)
##########
# Fig 4 B
# Sample traj
traj_represented = ['D4-Site_8/16', 'D4-Site_1/14', 'D4-Site_0/15',
'D4-Site_5/1', 'D3-Site_3/56', 'D5-Site_4/33']
colors = [(53, 52, 205)] * 3 + [(176, 177, 0)] * 3
for t, c in zip(traj_represented, colors):
traj = all_mg_trajs[t]
frame0_name = fs[trajs[t][0]]
site_name = frame0_name.split('/')[-2]
site_dat = pickle.load(open('../data_temp/%s_all_patches.pkl' % site_name, 'rb'))
frame0 = site_dat[frame0_name]["masked_mat"][:, :, 0]
frame0 = np.clip(enhance_contrast(frame0, phase_a, phase_b), 0, 65535)
mat = np.zeros((frame0.shape[0], frame0.shape[1], 3), dtype='uint8')
mat[:, :] = (np.expand_dims(frame0, 2) / 256).astype('uint8')
try:
traj_positions = all_mg_trajs_positions[t]
positions = np.stack([traj_positions[k] for k in sorted(traj.keys())])
center_position = positions[0] - np.array([128, 128])
for i in range(positions.shape[0] - 1):
start = positions[i] - center_position
end = positions[i + 1] - center_position
mat = cv2.line(mat, (start[1], start[0]), (end[1], end[0]), c, thickness=2)
cv2.imwrite('/home/michaelwu/fig4_sample_%s.png' % t.replace('/', '_'), mat)
except Exception as e:
print(e)
##########
# Supp Video 4
# Large/small trajectories
for t in traj_represented:
save_traj(t, '/home/michaelwu/supp_video4_sample_traj_%s.gif' % t.replace('/', '_'))
names = [fs[i] for i in trajs[t]]
save_movie(names, '/home/michaelwu/supp_video4_sample_traj_movie_%s.gif' % t.replace('/', '_'), masked=False)
##########
# Fig 4 C
# Violin plot of two modes
small_trajs = []
large_trajs = []
for t in trajs:
traj_dats_ = dats_[np.array(trajs[t])]
if np.quantile(traj_dats_[:, 0], 0.7) < -0.8 and \
np.quantile(traj_dats_[:, 1], 0.3) < 0 and len(traj_dats_) > 20:
small_trajs.append(t)
if np.quantile(traj_dats_[:, 0], 0.3) > 0.8 and len(traj_dats_) > 20:
large_trajs.append(t)
df = pd.DataFrame({'cluster': ['Small'] * len(small_trajs) + ['Large'] * len(large_trajs),
'aver_dist': [np.log(traj_average_moving_distances[t] * 0.72222) for t in small_trajs + large_trajs]})
plt.clf()
sns.set_style('whitegrid')
g = sns.violinplot(x='cluster',
y='aver_dist',
data=df,
order=['Small', 'Large'],
palette={'Small': '#cd3435', 'Large': '#00b1b0'},
orient='v')
g.set_ylim(0.1, 4.3)
y_ticks = np.array([1.5, 3., 6., 12., 24., 48.])
g.set_yticks(np.log(y_ticks))
g.set_yticklabels(y_ticks)
g.set_xticklabels(['', ''])
g.set_xlabel('')
g.set_ylabel('')
plt.savefig('/home/michaelwu/fig4_aver_dist.eps')
plt.savefig('/home/michaelwu/fig4_aver_dist.png', dpi=300)
##########
# Fig 4 D
# MSD plot of two modes
MSD_length = 20
small_traj_ensembles = []
for t in small_trajs:
t_end = max(all_mg_trajs_positions[t].keys()) + 1
for t_start in range(t_end - 20):
if t_start in all_mg_trajs_positions[t]:
s_traj = {(t_now - t_start): all_mg_trajs_positions[t][t_now] \
for t_now in range(t_start, t_start+20) if t_now in all_mg_trajs_positions[t]}
small_traj_ensembles.append(s_traj)
large_traj_ensembles = []
for t in large_trajs:
t_end = max(all_mg_trajs_positions[t].keys())
for t_start in range(t_end - 20):
if t_start in all_mg_trajs_positions[t]:
l_traj = {(t_now - t_start): all_mg_trajs_positions[t][t_now] \
for t_now in range(t_start, t_start+20) if t_now in all_mg_trajs_positions[t]}
large_traj_ensembles.append(l_traj)
small_traj_MSDs = {}
large_traj_MSDs = {}
small_traj_MSDs_trimmed = {}
large_traj_MSDs_trimmed = {}
for i in range(20):
s_dists = [np.square(t[i] - t[0]).sum() for t in small_traj_ensembles if i in t]
l_dists = [np.square(t[i] - t[0]).sum() for t in large_traj_ensembles if i in t]
small_traj_MSDs[i] = s_dists
large_traj_MSDs[i] = l_dists
small_traj_MSDs_trimmed[i] = scipy.stats.trimboth(s_dists, 0.25)
large_traj_MSDs_trimmed[i] = scipy.stats.trimboth(l_dists, 0.25)
def forceAspect(ax,aspect=1):
im = ax.get_images()
extent = im[0].get_extent()
ax.set_aspect(abs((extent[1]-extent[0])/(extent[3]-extent[2]))/aspect)
x = np.arange(1, 20)
y_bins = np.arange(0.9, 11.7, 0.6) # log scale
density_map = np.zeros((20, len(y_bins) - 1))
y = []
for i in range(1, 20):
for d in small_traj_MSDs[i]:
if d == 0:
continue
ind_bin = ((np.log(d) - y_bins) > 0).sum() - 1
if ind_bin < density_map.shape[1] and ind_bin >= 0:
density_map[i][ind_bin] += 1
y.append((np.log(np.mean(small_traj_MSDs[i])) - 0.9)/(y_bins[1] - y_bins[0]))
density_map = density_map/density_map.sum(1, keepdims=True)
sns.set_style('white')
plt.clf()
fig = plt.figure()
ax = fig.add_subplot(121)
ax.imshow(np.transpose(density_map), cmap='Reds', origin='lower', vmin=0.01, vmax=0.3, alpha=0.5)
ax.plot(x, np.array(y) - 0.5, '.-', c='#ba4748') # -0.5 is the adjustment for imshow
ax.set_xscale('log')
xticks = np.array([0.5, 1, 2, 4, 8])
xticks_positions = xticks / (27/60)
ax.set_xticks(xticks_positions)
ax.set_xticklabels(xticks)
ax.xaxis.set_minor_locator(NullLocator())
yticks = np.array([0.5, 2, 8, 32, 128, 512, 2048])
yticks_positions = (np.log(yticks / (0.325 * 0.325)) - 0.9)/(y_bins[1] - y_bins[0]) - 0.5 # same adjustment for imshow
ax.set_yticks(yticks_positions)
ax.set_yticklabels(yticks)
density_map = np.zeros((20, len(y_bins) - 1))
y = []
for i in range(1, 20):
for d in large_traj_MSDs[i]:
if d == 0:
continue
ind_bin = ((np.log(d) - y_bins) > 0).sum() - 1
if ind_bin < density_map.shape[1] and ind_bin >= 0:
density_map[i][ind_bin] += 1
y.append((np.log(np.mean(large_traj_MSDs[i])) - 0.9)/(y_bins[1] - y_bins[0]))
density_map = density_map/density_map.sum(1, keepdims=True)
ax2 = fig.add_subplot(122)
ax2.imshow(np.transpose(density_map), cmap='BuGn', origin='lower', vmax=0.2, alpha=0.5)
ax2.plot(x, np.array(y) - 0.5, '.-', c='#0b6b6a')
ax2.set_xscale('log')
ax2.set_xticks(xticks_positions)
ax2.set_xticklabels(xticks)
ax2.xaxis.set_minor_locator(NullLocator())
ax2.set_yticks(yticks_positions)
ax2.set_yticklabels(yticks)
plt.tight_layout()
fig.savefig('/home/michaelwu/fig4_MSD.eps')
fig.savefig('/home/michaelwu/fig4_MSD.png', dpi=300)
|
{"hexsha": "084d583568108f7bbd4f78ec33a405621244bbe0", "size": 45702, "ext": "py", "lang": "Python", "max_stars_repo_path": "plot_scripts/plottings.py", "max_stars_repo_name": "miaecle/dynamorph", "max_stars_repo_head_hexsha": "9bc04ae771e66938273eee102d404947546a69c5", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-07-28T19:01:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-09T01:41:05.000Z", "max_issues_repo_path": "plot_scripts/plottings.py", "max_issues_repo_name": "miaecle/dynamorph", "max_issues_repo_head_hexsha": "9bc04ae771e66938273eee102d404947546a69c5", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 31, "max_issues_repo_issues_event_min_datetime": "2020-09-11T21:07:16.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-09T16:27:31.000Z", "max_forks_repo_path": "plot_scripts/plottings.py", "max_forks_repo_name": "miaecle/dynamorph", "max_forks_repo_head_hexsha": "9bc04ae771e66938273eee102d404947546a69c5", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-19T03:54:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-19T03:54:00.000Z", "avg_line_length": 43.0338983051, "max_line_length": 190, "alphanum_fraction": 0.6669511181, "include": true, "reason": "import numpy,import scipy,from scipy,import statsmodels", "num_tokens": 15241}
|
# Copyright 2021 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic functions to convert Jax DeviceArrays into PyTorch Tensors and vice-versa."""
from collections import abc
import functools
from typing import Any, Dict, Union
import warnings
from jax._src import dlpack as jax_dlpack
from jax.interpreters.xla import DeviceArray
try:
# pylint:disable=g-import-not-at-top
import torch
from torch.utils import dlpack as torch_dlpack
except ImportError:
warnings.warn(
"brax.io.torch requires PyTorch. Please run `pip install torch` to use "
"functions from this module.")
raise
Device = Union[str, torch.device]
@functools.singledispatch
def torch_to_jax(value: Any) -> Any:
"""Converts values to JAX tensors."""
# Don't do anything by default, and when a handler is registered for this type
# of value, it gets used to convert it to a Jax DeviceArray.
# NOTE: The alternative would be to raise an error when an unsupported value
# is encountered:
# raise NotImplementedError(f"Cannot convert {v} to a Jax tensor")
return value
@torch_to_jax.register(torch.Tensor)
def _tensor_to_jax(value: torch.Tensor) -> DeviceArray:
"""Converts a PyTorch Tensor into a Jax DeviceArray."""
tensor = torch_dlpack.to_dlpack(value)
tensor = jax_dlpack.from_dlpack(tensor)
return tensor
@torch_to_jax.register(abc.Mapping)
def _torch_dict_to_jax(
value: Dict[str, Union[torch.Tensor, Any]]
) -> Dict[str, Union[DeviceArray, Any]]:
"""Converts a dict of PyTorch tensors into a dict of Jax DeviceArrays."""
return type(value)(**{k: torch_to_jax(v) for k, v in value.items()}) # type: ignore
@functools.singledispatch
def jax_to_torch(value: Any, device: Device = None) -> Any:
"""Convert JAX values to PyTorch Tensors.
By default, the returned tensors are on the same device as the Jax inputs,
but if `device` is passed, the tensors will be moved to that device.
"""
# Don't do anything by default, and when a handler is registered for this type
# of value, it gets used to convert it to a torch tensor.
# NOTE: The alternative would be to raise an error when an unsupported value
# is encountered:
# raise NotImplementedError(f"Cannot convert {v} to a Torch tensor")
return value
@jax_to_torch.register(DeviceArray)
def _devicearray_to_tensor(value: DeviceArray,
device: Device = None) -> torch.Tensor:
"""Converts a Jax DeviceArray into PyTorch Tensor."""
dpack = jax_dlpack.to_dlpack(value.astype("float32"))
tensor = torch_dlpack.from_dlpack(dpack)
if device:
return tensor.to(device=device)
return tensor
@jax_to_torch.register(abc.Mapping)
def _jax_dict_to_torch(
value: Dict[str, Union[DeviceArray, Any]],
device: Device = None) -> Dict[str, Union[torch.Tensor, Any]]:
"""Converts a dict of Jax DeviceArrays into a dict of PyTorch tensors."""
return type(value)(
**{k: jax_to_torch(v, device=device) for k, v in value.items()}) # type: ignore
|
{"hexsha": "53c6451c2611b9e0b63f2eea44d28b15e343a775", "size": 3504, "ext": "py", "lang": "Python", "max_stars_repo_path": "brax/io/torch.py", "max_stars_repo_name": "Egiob/brax", "max_stars_repo_head_hexsha": "1baf25d5a713bd5dbc8588a004a5754723626bd0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1162, "max_stars_repo_stars_event_min_datetime": "2021-06-03T20:15:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T19:53:06.000Z", "max_issues_repo_path": "brax/io/torch.py", "max_issues_repo_name": "Egiob/brax", "max_issues_repo_head_hexsha": "1baf25d5a713bd5dbc8588a004a5754723626bd0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 160, "max_issues_repo_issues_event_min_datetime": "2021-06-05T02:32:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T11:39:58.000Z", "max_forks_repo_path": "brax/io/torch.py", "max_forks_repo_name": "Egiob/brax", "max_forks_repo_head_hexsha": "1baf25d5a713bd5dbc8588a004a5754723626bd0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 117, "max_forks_repo_forks_event_min_datetime": "2021-06-04T17:18:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T18:04:48.000Z", "avg_line_length": 36.1237113402, "max_line_length": 88, "alphanum_fraction": 0.734303653, "include": true, "reason": "from jax", "num_tokens": 894}
|
"""Functions for reading light curve data."""
import logging
from astropy.io import fits
from astropy.utils import deprecated
from .detect import detect_filetype
from ..lightcurve import KeplerLightCurve, TessLightCurve
from ..utils import validate_method, LightkurveWarning, LightkurveDeprecationWarning
log = logging.getLogger(__name__)
__all__ = ['open', 'read']
@deprecated("2.0", alternative="read()", warning_type=LightkurveDeprecationWarning)
def open(path_or_url, **kwargs):
"""DEPRECATED. Please use `lk.read()` instead.
This function has been deprecated because its name collides with Python's
built-in `open()` function.
"""
return read(path_or_url, **kwargs)
def read(path_or_url, **kwargs):
"""Reads any valid Kepler or TESS data file and returns an instance of
`~lightkurve.lightcurve.LightCurve` or
`~lightkurve.targetpixelfile.TargetPixelFile`.
This function will use the `detect_filetype()` function to
automatically detect the type of the data product, and return the
appropriate object. File types currently supported are::
* `KeplerTargetPixelFile` (typical suffix "-targ.fits.gz");
* `KeplerLightCurve` (typical suffix "llc.fits");
* `TessTargetPixelFile` (typical suffix "_tp.fits");
* `TessLightCurve` (typical suffix "_lc.fits").
Parameters
----------
path_or_url : str
Path or URL of a FITS file.
Returns
-------
data : a subclass of `~lightkurve.targetpixelfile.TargetPixelFile` or
`~lightkurve.lightcurve.LightCurve`, depending on the detected file type.
Raises
------
ValueError : raised if the data product is not recognized as a Kepler or
TESS product.
Examples
--------
To read a target pixel file using its path or URL, simply use:
>>> tpf = read("mytpf.fits") # doctest: +SKIP
"""
log.debug("Opening {}.".format(path_or_url))
# pass header into `detect_filetype()`
try:
with fits.open(path_or_url) as temp:
filetype = detect_filetype(temp)
log.debug("Detected filetype: '{}'.".format(filetype))
except OSError as e:
filetype = None
# Raise an explicit FileNotFoundError if file not found
if 'No such file' in str(e):
raise e
# Community-provided science products
if filetype == "KeplerLightCurve":
return KeplerLightCurve.read(path_or_url, format='kepler', **kwargs)
elif filetype == "TessLightCurve":
return TessLightCurve.read(path_or_url, format='tess', **kwargs)
# Official data products;
# if the filetype is recognized, instantiate a class of that name
if filetype is not None:
return getattr(__import__('lightkurve'), filetype)(path_or_url, **kwargs)
else:
# if these keywords don't exist, raise `ValueError`
raise ValueError("Not recognized as a Kepler or TESS data product: "
"{}".format(path_or_url))
|
{"hexsha": "c1c62baf1e7ec40b4488f52f11b8b0dcf1bf43f9", "size": 3002, "ext": "py", "lang": "Python", "max_stars_repo_path": "lightkurve/io/read.py", "max_stars_repo_name": "KenMighell/lightkurve", "max_stars_repo_head_hexsha": "bb264899fd8d5fbaa95c13f3b90c75bd96c5a33e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lightkurve/io/read.py", "max_issues_repo_name": "KenMighell/lightkurve", "max_issues_repo_head_hexsha": "bb264899fd8d5fbaa95c13f3b90c75bd96c5a33e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lightkurve/io/read.py", "max_forks_repo_name": "KenMighell/lightkurve", "max_forks_repo_head_hexsha": "bb264899fd8d5fbaa95c13f3b90c75bd96c5a33e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1136363636, "max_line_length": 84, "alphanum_fraction": 0.6718854097, "include": true, "reason": "from astropy", "num_tokens": 708}
|
\vfill \eject
\section{{\tt allInOne.c} -- A Serial $QR$ Driver Program}
\label{section:QR-serial-driver}
\begin{verbatim}
/* QRallInOne.c */
#include "../../misc.h"
#include "../../FrontMtx.h"
#include "../../SymbFac.h"
/*--------------------------------------------------------------------*/
int
main ( int argc, char *argv[] ) {
/*
--------------------------------------------------
QR all-in-one program
(1) read in matrix entries and form InpMtx object
of A and A^TA
(2) form Graph object of A^TA
(3) order matrix and form front tree
(4) get the permutation, permute the matrix and
front tree and get the symbolic factorization
(5) compute the numeric factorization
(6) read in right hand side entries
(7) compute the solution
created -- 98jun11, cca
--------------------------------------------------
*/
/*--------------------------------------------------------------------*/
char *matrixFileName, *rhsFileName ;
ChvManager *chvmanager ;
DenseMtx *mtxB, *mtxX ;
double facops, imag, real, value ;
double cpus[10] ;
ETree *frontETree ;
FILE *inputFile, *msgFile ;
FrontMtx *frontmtx ;
Graph *graph ;
int ient, irow, jcol, jrhs, jrow, msglvl, neqns,
nedges, nent, nrhs, nrow, seed, type ;
InpMtx *mtxA ;
IV *newToOldIV, *oldToNewIV ;
IVL *adjIVL, *symbfacIVL ;
SubMtxManager *mtxmanager ;
/*--------------------------------------------------------------------*/
/*
--------------------
get input parameters
--------------------
*/
if ( argc != 7 ) {
fprintf(stdout,
"\n usage: %s msglvl msgFile type matrixFileName rhsFileName seed"
"\n msglvl -- message level"
"\n msgFile -- message file"
"\n type -- type of entries"
"\n 1 (SPOOLES_REAL) -- real entries"
"\n 2 (SPOOLES_COMPLEX) -- complex entries"
"\n matrixFileName -- matrix file name, format"
"\n nrow ncol nent"
"\n irow jcol entry"
"\n ..."
"\n note: indices are zero based"
"\n rhsFileName -- right hand side file name, format"
"\n nrow "
"\n entry[0]"
"\n ..."
"\n entry[nrow-1]"
"\n seed -- random number seed, used for ordering"
"\n", argv[0]) ;
return(0) ;
}
msglvl = atoi(argv[1]) ;
if ( strcmp(argv[2], "stdout") == 0 ) {
msgFile = stdout ;
} else if ( (msgFile = fopen(argv[2], "a")) == NULL ) {
fprintf(stderr, "\n fatal error in %s"
"\n unable to open file %s\n",
argv[0], argv[2]) ;
return(-1) ;
}
type = atoi(argv[3]) ;
matrixFileName = argv[4] ;
rhsFileName = argv[5] ;
seed = atoi(argv[6]) ;
/*--------------------------------------------------------------------*/
/*
--------------------------------------------
STEP 1: read the entries from the input file
and create the InpMtx object of A
--------------------------------------------
*/
inputFile = fopen(matrixFileName, "r") ;
fscanf(inputFile, "%d %d %d", &nrow, &neqns, &nent) ;
mtxA = InpMtx_new() ;
InpMtx_init(mtxA, INPMTX_BY_ROWS, type, nent, 0) ;
if ( type == SPOOLES_REAL ) {
for ( ient = 0 ; ient < nent ; ient++ ) {
fscanf(inputFile, "%d %d %le", &irow, &jcol, &value) ;
InpMtx_inputRealEntry(mtxA, irow, jcol, value) ;
}
} else {
for ( ient = 0 ; ient < nent ; ient++ ) {
fscanf(inputFile, "%d %d %le %le", &irow, &jcol, &real, &imag) ;
InpMtx_inputComplexEntry(mtxA, irow, jcol, real, imag) ;
}
}
fclose(inputFile) ;
if ( msglvl > 1 ) {
fprintf(msgFile, "\n\n input matrix") ;
InpMtx_writeForHumanEye(mtxA, msgFile) ;
fflush(msgFile) ;
}
/*--------------------------------------------------------------------*/
/*
----------------------------------------
STEP 2: read the right hand side entries
----------------------------------------
*/
inputFile = fopen(rhsFileName, "r") ;
fscanf(inputFile, "%d %d", &nrow, &nrhs) ;
mtxB = DenseMtx_new() ;
DenseMtx_init(mtxB, type, 0, 0, nrow, nrhs, 1, nrow) ;
DenseMtx_zero(mtxB) ;
if ( type == SPOOLES_REAL ) {
for ( irow = 0 ; irow < nrow ; irow++ ) {
fscanf(inputFile, "%d", &jrow) ;
for ( jrhs = 0 ; jrhs < nrhs ; jrhs++ ) {
fscanf(inputFile, "%le", &value) ;
DenseMtx_setRealEntry(mtxB, jrow, jrhs, value) ;
}
}
} else {
for ( irow = 0 ; irow < nrow ; irow++ ) {
fscanf(inputFile, "%d", &jrow) ;
for ( jrhs = 0 ; jrhs < nrhs ; jrhs++ ) {
fscanf(inputFile, "%le %le", &real, &imag) ;
DenseMtx_setComplexEntry(mtxB, jrow, jrhs, real, imag) ;
}
}
}
fclose(inputFile) ;
if ( msglvl > 1 ) {
fprintf(msgFile, "\n\n rhs matrix in original ordering") ;
DenseMtx_writeForHumanEye(mtxB, msgFile) ;
fflush(msgFile) ;
}
/*--------------------------------------------------------------------*/
/*
-------------------------------------------------
STEP 3 : find a low-fill ordering
(1) create the Graph object for A^TA or A^HA
(2) order the graph using multiple minimum degree
-------------------------------------------------
*/
graph = Graph_new() ;
adjIVL = InpMtx_adjForATA(mtxA) ;
nedges = IVL_tsize(adjIVL) ;
Graph_init2(graph, 0, neqns, 0, nedges, neqns, nedges, adjIVL,
NULL, NULL) ;
if ( msglvl > 1 ) {
fprintf(msgFile, "\n\n graph of A^T A") ;
Graph_writeForHumanEye(graph, msgFile) ;
fflush(msgFile) ;
}
frontETree = orderViaMMD(graph, seed, msglvl, msgFile) ;
if ( msglvl > 1 ) {
fprintf(msgFile, "\n\n front tree from ordering") ;
ETree_writeForHumanEye(frontETree, msgFile) ;
fflush(msgFile) ;
}
/*--------------------------------------------------------------------*/
/*
-----------------------------------------------------
STEP 4: get the permutation, permute the matrix and
front tree and get the symbolic factorization
-----------------------------------------------------
*/
oldToNewIV = ETree_oldToNewVtxPerm(frontETree) ;
newToOldIV = ETree_newToOldVtxPerm(frontETree) ;
InpMtx_permute(mtxA, NULL, IV_entries(oldToNewIV)) ;
InpMtx_changeStorageMode(mtxA, INPMTX_BY_VECTORS) ;
symbfacIVL = SymbFac_initFromGraph(frontETree, graph) ;
IVL_overwrite(symbfacIVL, oldToNewIV) ;
IVL_sortUp(symbfacIVL) ;
ETree_permuteVertices(frontETree, oldToNewIV) ;
if ( msglvl > 1 ) {
fprintf(msgFile, "\n\n old-to-new permutation vector") ;
IV_writeForHumanEye(oldToNewIV, msgFile) ;
fprintf(msgFile, "\n\n new-to-old permutation vector") ;
IV_writeForHumanEye(newToOldIV, msgFile) ;
fprintf(msgFile, "\n\n front tree after permutation") ;
ETree_writeForHumanEye(frontETree, msgFile) ;
fprintf(msgFile, "\n\n input matrix after permutation") ;
InpMtx_writeForHumanEye(mtxA, msgFile) ;
fprintf(msgFile, "\n\n symbolic factorization") ;
IVL_writeForHumanEye(symbfacIVL, msgFile) ;
fflush(msgFile) ;
}
/*--------------------------------------------------------------------*/
/*
------------------------------------------
STEP 5: initialize the front matrix object
------------------------------------------
*/
frontmtx = FrontMtx_new() ;
mtxmanager = SubMtxManager_new() ;
SubMtxManager_init(mtxmanager, NO_LOCK, 0) ;
if ( type == SPOOLES_REAL ) {
FrontMtx_init(frontmtx, frontETree, symbfacIVL, type,
SPOOLES_SYMMETRIC, FRONTMTX_DENSE_FRONTS,
SPOOLES_NO_PIVOTING, NO_LOCK, 0, NULL,
mtxmanager, msglvl, msgFile) ;
} else {
FrontMtx_init(frontmtx, frontETree, symbfacIVL, type,
SPOOLES_HERMITIAN, FRONTMTX_DENSE_FRONTS,
SPOOLES_NO_PIVOTING, NO_LOCK, 0, NULL,
mtxmanager, msglvl, msgFile) ;
}
/*--------------------------------------------------------------------*/
/*
-----------------------------------------
STEP 6: compute the numeric factorization
-----------------------------------------
*/
chvmanager = ChvManager_new() ;
ChvManager_init(chvmanager, NO_LOCK, 1) ;
DVzero(10, cpus) ;
facops = 0.0 ;
FrontMtx_QR_factor(frontmtx, mtxA, chvmanager,
cpus, &facops, msglvl, msgFile) ;
ChvManager_free(chvmanager) ;
if ( msglvl > 1 ) {
fprintf(msgFile, "\n\n factor matrix") ;
fprintf(msgFile, "\n facops = %9.2f", facops) ;
FrontMtx_writeForHumanEye(frontmtx, msgFile) ;
fflush(msgFile) ;
}
/*--------------------------------------------------------------------*/
/*
--------------------------------------
STEP 7: post-process the factorization
--------------------------------------
*/
FrontMtx_postProcess(frontmtx, msglvl, msgFile) ;
if ( msglvl > 1 ) {
fprintf(msgFile, "\n\n factor matrix after post-processing") ;
FrontMtx_writeForHumanEye(frontmtx, msgFile) ;
fflush(msgFile) ;
}
/*--------------------------------------------------------------------*/
/*
-------------------------------
STEP 8: solve the linear system
-------------------------------
*/
mtxX = DenseMtx_new() ;
DenseMtx_init(mtxX, type, 0, 0, neqns, nrhs, 1, neqns) ;
FrontMtx_QR_solve(frontmtx, mtxA, mtxX, mtxB, mtxmanager,
cpus, msglvl, msgFile) ;
if ( msglvl > 1 ) {
fprintf(msgFile, "\n\n solution matrix in new ordering") ;
DenseMtx_writeForHumanEye(mtxX, msgFile) ;
fflush(msgFile) ;
}
/*--------------------------------------------------------------------*/
/*
-------------------------------------------------------
STEP 9: permute the solution into the original ordering
-------------------------------------------------------
*/
DenseMtx_permuteRows(mtxX, newToOldIV) ;
if ( msglvl > 0 ) {
fprintf(msgFile, "\n\n solution matrix in original ordering") ;
DenseMtx_writeForHumanEye(mtxX, msgFile) ;
fflush(msgFile) ;
}
/*--------------------------------------------------------------------*/
/*
------------------------
free the working storage
------------------------
*/
InpMtx_free(mtxA) ;
FrontMtx_free(frontmtx) ;
Graph_free(graph) ;
DenseMtx_free(mtxX) ;
DenseMtx_free(mtxB) ;
ETree_free(frontETree) ;
IV_free(newToOldIV) ;
IV_free(oldToNewIV) ;
IVL_free(symbfacIVL) ;
SubMtxManager_free(mtxmanager) ;
/*--------------------------------------------------------------------*/
return(1) ; }
/*--------------------------------------------------------------------*/
\end{verbatim}
|
{"hexsha": "f7962abab35545307efde9af6be3f269b22b258e", "size": 10403, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "ccx_prool/SPOOLES.2.2/documentation/AllInOne/QR_serial_driver.tex", "max_stars_repo_name": "alleindrach/calculix-desktop", "max_stars_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ccx_prool/SPOOLES.2.2/documentation/AllInOne/QR_serial_driver.tex", "max_issues_repo_name": "alleindrach/calculix-desktop", "max_issues_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2017-09-21T17:03:55.000Z", "max_issues_repo_issues_event_max_datetime": "2018-01-25T16:08:31.000Z", "max_forks_repo_path": "ccx_prool/SPOOLES.2.2/documentation/AllInOne/QR_serial_driver.tex", "max_forks_repo_name": "alleindrach/calculix-desktop", "max_forks_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-08-29T18:41:28.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-29T18:41:28.000Z", "avg_line_length": 34.6766666667, "max_line_length": 72, "alphanum_fraction": 0.4974526579, "num_tokens": 2796}
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020-2021 shmilee
'''
Source fortran code:
skip
'''
import numpy
from ..GTCv3 import gtc as gtcv3
_all_Converters = gtcv3._all_Converters
_all_Diggers = gtcv3._all_Diggers
__all__ = _all_Converters + _all_Diggers
class GtcConverter(gtcv3.GtcConverter):
__slots__ = []
@staticmethod
def _c_val_cputime(val):
val = val.strip().split('\n')
val = numpy.array([[float(n) for n in li.split()[1:]] for li in val])
return val.T
@property
def twoDarraypats(self):
# search two dimensional array parameters
# parent_pats = super(GtcConverter, self).twoDarraypats
return [
r'meshte\s+?meshti\s+?meshne\s+?meshni\s*?$'
+ r'(?P<arr1>.*)$'
+ r'\s*?eq_flux at i=\s*?' + self.numpat + r'$',
r'rg_sp/rg - 1,\s+?dtorpsi/q\s*?$'
+ r'(?P<arr2>.*)?$'
+ r'\s*?\*+?$'
+ r'\s*?=+?$'
+ r'\s*?No Radial Boundary Decay',
(r'poisson solver=(\s*?' + self.numpat + r'){4}\s*$'
+ r'(?P<arr3>.*)$'
+ r'\s+routine\s+count\s+rank0.*$', 'float_2d_arr3'),
(r'CPU TIME USAGE \(in SEC\):$'
+ '(?P<cputimeusage>.*)$'
+ r'\s*?MPush/sec:\s+?' + self.numpat + '\s*?$', 'cputime'),
]
def _update_qiflux_rgiflux(self, sd):
'''
if no qiflux in *sd*, try to get it from arr2
'''
try:
arr2, diag_flux = sd['arr2'], sd['diag_flux']
if int(arr2[diag_flux-1][0]) == diag_flux:
row = arr2[diag_flux-1]
else:
for i in range(len(arr2)):
if int(arr2[i][0]) == diag_flux:
row = arr2[i]
break
sd['rgiflux'], sd['qiflux'] = row[1], row[4]
except Exception:
pass
def _convert(self):
'''Read 'gtc.out' parameters.'''
sd = super(GtcConverter, self)._convert()
# arr2: rg/a -> rg, GTCv3 compatibility
if 'arr2' in sd and 'a_minor' in sd:
val = sd['arr2']
val = numpy.insert(val, 1, values=val[:, 1]*sd['a_minor'], axis=1)
sd['arr2'] = val
if 'qiflux' not in sd or 'rgiflux' not in sd:
self._update_qiflux_rgiflux(sd)
return sd
|
{"hexsha": "bf7f2418a77b36e930dcde336f2ed9ac95ed8764", "size": 2369, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/GTCv4/gtc.py", "max_stars_repo_name": "shmilee/gdpy3", "max_stars_repo_head_hexsha": "2e007851fc87793c0038f7b1dacba729271e17a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-08-07T13:28:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-08T04:31:20.000Z", "max_issues_repo_path": "src/GTCv4/gtc.py", "max_issues_repo_name": "shmilee/gdpy3", "max_issues_repo_head_hexsha": "2e007851fc87793c0038f7b1dacba729271e17a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/GTCv4/gtc.py", "max_forks_repo_name": "shmilee/gdpy3", "max_forks_repo_head_hexsha": "2e007851fc87793c0038f7b1dacba729271e17a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-05-05T01:34:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T15:57:10.000Z", "avg_line_length": 30.7662337662, "max_line_length": 78, "alphanum_fraction": 0.4951456311, "include": true, "reason": "import numpy", "num_tokens": 742}
|
import torch, os, datetime
import numpy as np
from .dist_utils import dist_print, dist_tqdm, is_main_process, DistSummaryWriter
from .factory import get_metric_dict, get_loss_dict, get_optimizer, get_scheduler
from .metrics import MultiLabelAcc, AccTopk, Metric_mIoU, update_metrics, reset_metrics
from .common import merge_config, save_model, cp_projects
from .common import get_work_dir, get_logger
import time
def inference(data_label, seg_label,use_aux,cls_out,seg_out):
if use_aux:
# img, cls_label, seg_label = data_label
cls_label, seg_label = data_label, seg_label[:,-38:-1,:]
seg_out = seg_out[:,:,-38:-1,:]
# import pdb;pdb.set_trace()
return {'cls_out': cls_out, 'cls_label': cls_label, 'seg_out':seg_out, 'seg_label': seg_label}
else:
# img, cls_label = data_label
cls_label = cls_label.cuda()
return {'cls_out': cls_out, 'cls_label': cls_label}
def resolve_val_data(results, use_aux):
results['cls_out'] = torch.argmax(results['cls_out'], dim=1)
if use_aux:
results['seg_out'] = torch.argmax(results['seg_out'], dim=1)
return results
def calc_loss(loss_dict, results):
loss = 0
for i in range(len(loss_dict['name'])):
data_src = loss_dict['data_src'][i]
datas = [results[src] for src in data_src]
# import pdb; pdb.set_trace()
loss_cur = loss_dict['op'][i](*datas)
#if global_step % 20 == 0:
# logger.add_scalar('loss/'+loss_dict['name'][i], loss_cur, global_step)
loss += loss_cur * loss_dict['weight'][i]
# if np.isnan(loss):
# import pdb;pdb.set_trace()
return loss
|
{"hexsha": "b266a1c9a8b4d5a8a36306d8b5d70cd7d57732bb", "size": 1687, "ext": "py", "lang": "Python", "max_stars_repo_path": "adet/modeling/ultra_fast/cal_loss.py", "max_stars_repo_name": "GuoHaoren/Unifed-Lane-and-Traffic-Sign-detection", "max_stars_repo_head_hexsha": "80ed2690a7bb90861ccfc85de9a2feb6bce324ff", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-10T08:11:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T08:11:01.000Z", "max_issues_repo_path": "adet/modeling/ultra_fast/cal_loss.py", "max_issues_repo_name": "GuoHaoren/Unifed-Lane-and-Traffic-Sign-detection", "max_issues_repo_head_hexsha": "80ed2690a7bb90861ccfc85de9a2feb6bce324ff", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "adet/modeling/ultra_fast/cal_loss.py", "max_forks_repo_name": "GuoHaoren/Unifed-Lane-and-Traffic-Sign-detection", "max_forks_repo_head_hexsha": "80ed2690a7bb90861ccfc85de9a2feb6bce324ff", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.125, "max_line_length": 102, "alphanum_fraction": 0.6644931832, "include": true, "reason": "import numpy", "num_tokens": 441}
|
from typing import Union
import numpy as np
from sklearn.utils.class_weight import compute_class_weight
def compute_class_weight_dict(
labels: Union[list, np.ndarray], class_weight: Union[dict, str, None] = "balanced"
) -> dict:
"""Compute class weight.
Wrapper for sklearn function that returns Keras compatible dictionary.
Parameters
----------
labels : list, np.ndarray
The array of labels to be balanced.
class_weight : dict, str, None
Additional parameter for sklearn.compute_class_weight
Returns
-------
class_weights : dict
A keras compatible dictionary of class weights. Keys are the labels,
and the values are the weightings.
"""
unique_labels = np.unique(labels)
_weight = compute_class_weight(class_weight, classes=unique_labels, y=labels)
return {unique_labels[k]: w for k, w in enumerate(_weight)}
|
{"hexsha": "21e850979bf0f7e5eff0a056a0d24900e2de2dfa", "size": 905, "ext": "py", "lang": "Python", "max_stars_repo_path": "cellx/train.py", "max_stars_repo_name": "quantumjot/cellx", "max_stars_repo_head_hexsha": "2a3ef965af22f213c4c9e239f097d231040eafe1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-10-26T12:24:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-09T18:29:48.000Z", "max_issues_repo_path": "cellx/train.py", "max_issues_repo_name": "quantumjot/cellx", "max_issues_repo_head_hexsha": "2a3ef965af22f213c4c9e239f097d231040eafe1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 36, "max_issues_repo_issues_event_min_datetime": "2020-10-26T12:21:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T09:20:51.000Z", "max_forks_repo_path": "cellx/train.py", "max_forks_repo_name": "quantumjot/cellx", "max_forks_repo_head_hexsha": "2a3ef965af22f213c4c9e239f097d231040eafe1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-07-27T21:33:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-15T17:17:21.000Z", "avg_line_length": 30.1666666667, "max_line_length": 86, "alphanum_fraction": 0.7016574586, "include": true, "reason": "import numpy", "num_tokens": 193}
|
\documentclass[10pt,landscape]{article}
% \pagestyle{headings}
\usepackage{multicol}
\usepackage[landscape]{geometry}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{latexsym}
\usepackage{enumerate}
\usepackage{verbatim}
\usepackage{multirow}
\usepackage[lofdepth,lotdepth]{subfig}
\usepackage[pdftex]{graphicx}
\setlength{\parindent}{0pt}
\setlength{\parskip}{0pt plus 0.5ex}
% \setlength{\parskip}{1ex plus 0.5ex minus 0.2ex}
\setlength{\topmargin}{-1.35in}
\setlength{\textheight}{8in}
\setlength{\oddsidemargin}{-0.75in}
\setlength{\evensidemargin}{-0.75in}
\setlength{\textwidth}{10.5in}
% \setlength{\baselineskip}{-1pt}
% \renewcommand{\baselinestretch}{0.5}
\makeatletter
\renewcommand{\section}{\@startsection{section}{1}{0mm}%
{-1ex plus -.5ex minus -.2ex}%
{0.5ex plus .2ex}%x
{\normalfont\large\bfseries}}
\renewcommand{\subsection}{\@startsection{subsection}{2}{0mm}%
{-1explus -.5ex minus -.2ex}%
{0.5ex plus .2ex}%
{\normalfont\normalsize\bfseries}}
\renewcommand{\subsubsection}{\@startsection{subsubsection}{3}{0mm}%
{-1ex plus -.5ex minus -.2ex}%
{1ex plus .2ex}%
{\normalfont\small\bfseries}}
\renewcommand{\thesection}{\Roman{section}}
\makeatother
\newcommand{\ps}{\ \vspace{0.05in}}
\newcommand{\dg}{$^\circ$ }
\newcommand{\img}[1]{\includegraphics[scale=0.5]{#1}}
% Don't print subsection numbers
\setcounter{secnumdepth}{1}
\thispagestyle{empty}
\begin{document}
\begin{multicols*}{3}
\raggedright
\section*{Synthesis Study Guide - Feynman Liang\\
CHEM231 - Spring 2012\\
Amherst College}
\begin{scriptsize}
\section{Substitution}
\begin{itemize}
\item S$_n$2 - single step, 100\% inversion, \textbf{1\dg electrophile} (else E2 dominates),
\textbf{DMSO or acetone} solvent (polar aprotic)
\item S$_n$1 - rate determined by carbocation formation, shifts possible, racemic
product, \textbf{3\dg electrophile} (E1 will always be present), \textbf{H$_2$O or
compatible (will not generate other products) ROH} solvent (polar protic)
\item Good nucleophile (sterically unhindered, basic)
\item Good leaving group (strong conj. acid)
\end{itemize}
\subsection{Making alkyl halides (R-X)}
\begin{itemize}
\item \textbf{Alcohol using acid} from R-OH$_2^+$, protonation followed by halide
substitution of H$_2$O$^+$:
\begin{itemize}
\item[] \img{makingrx1.png}
\item S$_N$1 unless 1\dg. S$_N$2 competes with elimination (unhindered substrate and
good Nu to favor substitution)
\end{itemize}
\item \textbf{Alcohol using TsCl}, tosylate (OTs) L-group instead of OH$_2^+$:
\img{makingrx2.png}
\begin{itemize}
\item Two-step process (1. convert, 2. substitute)
\item Could have also eliminated OTs after step 1 in E2 (Saytzeff's rule)
\img{elimot.png}
\end{itemize}
\item \textbf{Alcohol using SOCl$_2$}:
\begin{itemize}
\item[] \img{makingrx3.png}
\item One-step process, hydroxyl attacks S and SO$_2$ + Cl$^-$ is displaced by
Cl$^-$ nucleophilic substitution
\item Pyridine should be used to neutralize HCl
\item Will also convert all COOH to COCl
\end{itemize}
\end{itemize}
\subsection{Williamson ether synthesis (R-O-R')}
\begin{itemize}
\item[] \img{williamson.png}
\item S$_N$2, inversion of configuration
\item Alkoxide (RO$^-$) formed by ROH + NaH (Na$^+$ $^-$OR)
\item Electrophile must be 1\dg, E2 predominates 2\dg and 3\dg
\item Intramolecular forms cyclic ethers, bridged rings, epoxides, etc.
\item Unlike acid Cl or Fisher esterification, does not require carbonyl group
\end{itemize}
\subsection{Alkylation of amines (R$_x$-NH$_x$)}
\begin{itemize}
\item[] \img{aminealkylation.png}
\item S$_N$2, inversion
\item Possible deprotonation of amide product by NH$_3\rightarrow$NH$_4^+$ may result
in multiple alkylations
\item 1\dg substrate required (or else E2 predominates)
\end{itemize}
\subsection{Other nucleophiles for C-C bond making}
\begin{itemize}
\item \textbf{Cyanide ($^-$CN)}::
\begin{itemize}
\item[] \img{cc1.png}
\item Moderate base/good Nu, favors S$_N$2
\item Can hydrolyze -CN to COOH
\end{itemize}
\item \textbf{Acetylide anion ($^-$:C$\equiv$CR)}:
\begin{itemize}
\item[] \img{cc2.png}
\item Anion generated from deprotonation (pKa$\approx$25), (Na$^+$)$^-$NH2 is
good base for this
\item Strong Nu, S$_N$2
\end{itemize}
\item \textbf{Any decent nucleophile} (organometals, metal
hyrdides, enolate-based (doubly-$\alpha$-C))
\end{itemize}
\section{Alkene addition}
\begin{itemize}
\item Formed by \textbf{eliminaton}:
\begin{itemize}
\item Alcohol dehydration: R-R-OH + H$_3$O$^+$ + $\triangle$ $\rightarrow$ R=R + 2
H$_2$O (reversed using strong base )
\item \textbf{E2} occurs between anti-periplanar H and L, favored over S$_N$2 with strong
base, steric hindrance, higher temp
\item \textbf{E1 }has unselective stereochemistry, always accompanies S$_N$1
\item Regiochemistry follows \textbf{Saytzeff's rule}: product favors more highly substituted alkene
b/c hyperconjugation of transition state
\end{itemize}
\item General Rules for Addition:
\begin{itemize}
\item Markovnikov's rule: positively charged adding reagant (usually H$^+$) attaches
to alkene to create more stable carbocation intermediate (to less substituted C so
the carbocation has + charge on higher substituted C)
\item Dimerize/polymerize: the carbocation formed can be attacked by the
nucleophilic $\pi$-bond
\item Br$_2$ and Cl$_2$ form trans-dihalides (through halonium ion). Halonium can
also be attacked by other Nu (\textbf{Note:} Nu will attack carbon with more
positive charge, which is usually more substituted one b/c hyperconjugation
stabilized)
\end{itemize}
\end{itemize}
\subsection{Hydrogenation}
\begin{itemize}
\item[] \img{alkene-hydrog.png}
\item H$_2$ gas and metal catalyst (Pd/C), rxn on surface of metal
\end{itemize}
\subsection{Hydroboration/oxidation}
\begin{itemize}
\item[] \img{hbor1.png}
\item Two-step anti-Markovnikov syn-addition of water across double bond with no
rearrangements
\item[1. ] Hydroboration:
\begin{itemize}
\item[] \img{hbor2.png}
\item Concerted (single step, no rearrangements of carbocation possible)
\item Regioselective: BH$_2$ adds to less substituted end (Markovnikov's rule)
\item Syn-addition (H and BH$_2$ on same face of alkene) consistent w/ concerted
\item Product R-BH$_2$ reacts 3x more until trialkylborane (BR$_3$) is formed
\end{itemize}
\item[2. ] Oxidation of alkylboranewith peroxide:
\begin{itemize}
\item[] \img{hbor3.png}
\item BR$_3$ attacked by $^-$OOH to form B(OR)$_3$, which is then substituted by
$^-$OH
\item \textbf{Stereochemistry of carbon with BH2 is retained}
\end{itemize}
\end{itemize}
\subsection{Oxymercuration/reduction}
\begin{itemize}
\item[] \img{omer1.png}
\item Three-step Markovnikov anti-addition of water across double bond with no
rearrangements
\item Preferred way (vs acid catalyzed) to hydrate alkene
\item[1. ] Oxymercuration:
\begin{itemize}
\item[] \img{omer2.png}
\item Mercurinium prevents rearrangements, can form on both faces of alkene
\end{itemize}
\item[2. ] Opening of mercurinium ion:
\begin{itemize}
\item[] \img{omer3.png}
\item If not symmetric, $^-$OH adds to more substituted end (b/c more + charge, think
halonium attack)
\end{itemize}
\item[3. ] Reduction:
\begin{itemize}
\item[] \img{omer4.png}
\item Stereochemistry of reduction is random
\end{itemize}
\end{itemize}
\subsection{Alkene epoxidation (alkene $\rightarrow$ epoxide $\rightarrow$
1-hydroxy,2-substituted)}
\begin{itemize}
\item[] \img{epox1.png}
\item Single-step formation of epoxide (3-membered ring with O), stereochemistry preserved
\item Epoxides can be opened by Nu to give alcohol:
\begin{itemize}
\item[] \img{epox2.png}
\item Under non-acidic, Nu attacks less-hindered carbon (think S$_N$2) with inversion
\item Examples of possible Nu: NC$^-$, HS$^-$, I$^-$, RC$\equiv$C$^-$,
HO$^-$, RO$^-$, Br$^-$, N$_3$$^-$, NH$_3$, organometals, metal hydrides
\end{itemize}
\end{itemize}
\subsection{Ozonolysis (alkene $\rightarrow$ two carbonyls)}
\begin{itemize}
\item Ozone cleavage of alkene to generate two separate carbonyls:\\
\img{ozon1.png}
\item Alkene $\rightarrow$ Molozonide (unstable) $\rightarrow$ Ozonide:\\
\img{ozon2.png}
\item Reduction of Ozonide (commonly Me$_2$S) yields two carbonyls:\\
\img{ozon3.png}
\item Note: reaction can also be intermolecular, resulting in only one dicarbonyl
product
\end{itemize}
\subsection{Dihydroxylation (alkene $\rightarrow$ 1,2-diol)}
\begin{itemize}
\item Alkene oxidation to 1,2-diol using KMnO$_4$ or OsO$_4$\\
\img{dihy1.png}
\item Concerted first step forms unstable intermediate (followed by removal of MnO$_2$)\\
\img{dihy2.png}
\item OsO$_4$ is similar. Intermed can be isolated but generally transformed to
diol with sodium sulfite:\\
\img{dihy3.png}
\item Use OsO$_4$ if you do not want to oxidize aromatic alkyls to COOH
\end{itemize}
\section{EAS}
\includegraphics[scale=0.35]{easmech.png}
\begin{itemize}
\item Res. stabilized arenium intermed, substitution trumps addition b/c deprotonation
restores aromaticity
\item To determine rate and directing effects of substituents, compare stability (res,
hyperconj, induct) of possible arenium intermed (Hammond Postulate)
\item In general, EDG = o/p activating and EWG = m deactivating (\textbf{exception}: halogens
are o/p deactivating due to induct > res,)
\item Not limited to just benzene, EAS also possible on:
\begin{tabular}{cc}
\img{pyridine.png} & \img{pyrrole.png}\\
Pyridine & Pyrrole
\end{tabular}
\end{itemize}
\subsection{Diazonium ion (R-NO$_2$ $\rightarrow$ R-NH$_2$ $\rightarrow$
R-N$^+\equiv$N)}
\begin{itemize}
\item Nitro (NO$_2$, meta directing) can be reduced to amino (NH$_2$, o/p directing)
\img{diaz1.png}\\
\item \textbf{Careful!} H$_2$ with Pd/C \emph{will also hydrolyze alkenes}
\item Amine (R-NH$_2$) can be converted to diazonium ion (R-N$^+\equiv$N), which can
be further substituted through S$_N$1:\\
\img{diaz2.png}
\end{itemize}
\subsection{Reactions from diazonium (R-N$^+\equiv$N $\rightarrow$ R-X)}
\begin{itemize}
\item \textbf{Sandmeyer reaction}: Cuprous salt substitution of diazonium ion (see
summary for reactants):\\
\img{sandmeyer.png}
\item Can also treat diazonium with KI to form R-I
\item Can also hydrolize with H$_3$O$^+$ to form R-OH
\end{itemize}
\subsection{Clemmensen reduction of acyl to alkyl}
\begin{itemize}
\item[] \img{clemm.png}
\item Requires strongly acidic conditions. Allows for EAS alkylation using acyl groups
(which won't undergo carbocation shifts and can be reduced to alkyl) and many other
pathways.
\end{itemize}
\subsection{Oxidation of alkyl to COOH}
\begin{itemize}
\item[] \img{alkylox.png}
\item Reverse of Clemmenson, basic rxn conditions, mechanism likely through benzylic
\end{itemize}
\subsection{Summary of EAS}
\includegraphics[scale=0.36]{eas.png}
\section{Carbonyl chemistry}
\subsection{Nucleophilic attack at carbonyl}
\begin{itemize}
\item To determine reactivity, look at stability (hyperconj, res, inductive) of charge
separated form of carbonyl: \\
\img{carbonylres.png}
\item Reactivity: Acid Cl $>$ Aldehyde $>$ Ketone $>$ Ester $>$ Amide
\item Aldehydes and ketones undergo \textbf{addition} (because no L-group):\\
\img{carbonyladd.png}
\item Acid Cl, ester, amide, carboxylic acids undergo \textbf{substitution}:\\
\img{carbonylsub.png}
\item Nitriles react similarly to carbonyl:\\
\img{nitrile.png}
\end{itemize}
\subsection{Addition reactions (ketones/aldehydes)}
\begin{itemize}
\item \textbf{Hydration/dehydration}: carbonyl $\rightarrow$ 1,1-diol, acid or base
catalyzed\\
\img{hydrate1.png}
\item \textbf{Acetal formation}, carbonyl $\rightarrow$ acetal, acid catalyzed\\
\img{acetal2.png}
\begin{itemize}
\item Hemiacetal intermed. unstable, only cyclic can be isolated
\item EQ driven towards acetal w/ excess alcohol or removing H$_2$O
\item Reverse is \textbf{acetal hydrolysis}, acid catalyzed
\item No rxn in basic conditions (can't eliminate from hemiacetal)
\end{itemize}
\end{itemize}
\subsection{Addition w/ nitrogen nucleophile}
\begin{itemize}
\item All drived forwards by removing H$_2$O, reverse is hydrolysis
\item \textbf{Imine formation} from primary amine, netural conditions:
\img{nnuc1.png}
\begin{itemize}
\item pH$\geq$4 prevent protonation of amine to ammonium hydrolysis, pH$\leq$6 to prevent
deprotonation to unreactive carboxylate ion
\item \textbf{Reductive amination} can be achieved by reducing the resulting imine
(NaBH$_4$):\\
\img{redamine.png}
\end{itemize}
\item \textbf{Enamine formation} from secondary amine (identical until last step):
\img{enamine.png}
\item \textbf{Tertiary amines are unreactive }b/c can't stabilize + charge
\end{itemize}
\subsection{Substitution reactions (prefer acid Cl unless multiple
COOH)}
\begin{itemize}
\item \textbf{Fischer esterification}, RCO(OH) $\rightarrow$ RCO(OR')
\img{fischer.png}
\begin{itemize}
\item Acid catalyzed (K$\approx$1), driven towards ester w/ excess RCOOH or R'OH or
removing H$_2$O
\item No reaction in base (deprotonate to carboxylate)
\item Can also be done by attacking acid chloride with alcohol
\item Reverse is \textbf{ester hydrolysis} (RCO(OR') $\rightarrow$ RCO(OH)), acid catalyzed but
base induced (deprotonate to carboxylate)
\item \textbf{Transesterification} (RCOOR' + R''OH $\rightarrow$ RCOOR'' + R'OH):
\img{transester.png}
\end{itemize}
\item \textbf{Amide hydrolysis} substitutes amide (-NH2) with (-OH):
\img{amidehydr.png}
\begin{itemize}
\item Acid (NH$_2$R reacts with acid to form NH$_4^+$) and base (NHR reacts with COOH to form
carboxylate) induced
\item Can also be done with NH3 nucleophile and acid Cl
\end{itemize}
\item \textbf{Activating COOH $\rightarrow$ acid Cl with SOCl$_2$} converts COOH to most
reactive acid Cl:
\img{acidcl.png}
\begin{itemize}
\item Pyridine (proton sink) prevents excess HCl
\item Acid Cl can be substituted to any other carboxylic acid derivative (\textbf{add weak
base to neutralize}), superior way to form esters and amides
\includegraphics[scale=0.33]{esterfromcocl.png}\\
\includegraphics[scale=0.33]{amidefromcocl.png}
\end{itemize}
\item \textbf{Activating COOH $\rightarrow$ anhydride} by reacting with acid Cl or
another anhydride:\\
\includegraphics[scale=0.28]{forminganhydride.png}
\begin{itemize}
\item Anhydrides react similarly to acid Cl except eliminates a carboxylic acid
\end{itemize}
\end{itemize}
\subsection{Acetals: carbonyl "protecting" groups}
\begin{itemize}
\item[] \img{acetal1.png}
\item Formed via addition of alcohol to carbonyl
\item Acid catalyzed, driven towards acetal by removal of water water. Reversible
(hydrolyze with excess water and acid)
\item \textbf{Stable in basic conditions, unstable in acidic}. Allows reversible
conversion of carbonyl to diester, removing electrophilicity
\item Examples
\begin{itemize}
\item Protecting carbonyl:\\
\img{pr-cnyl.png}
\item Protecting alcohol/di-alcohol:\\
\img{pr-ol.png}
\item First acid catalyzed acetal formation (H$^3$O$^+$ w/ protecting group), do
reaction, then acid catalyzed hydrolysis in excess water
\item 1,2-ethanediol protects carbonyl:
\item Diethyl carbonate protects diol:
\end{itemize}
\end{itemize}
\subsection{Preparation of organometallic reagents (using R-X)}
\begin{itemize}
\item X = halide (Mg, I)
\item Reagents are very basic (reacts like R$^-$ b/c metal is electron-donating)
and reactive (must be DRY)
\item These nucleophiles are very strong and can participate in all the previous
substitution/addition reactions
\item \textbf{Grignard reagents (R-MgX):} Mg metal with alkyl halide, Mg inserted in
between halide and carbon\\
\img{grign.png}
\item \textbf{Organolithium reagents (R-Li)}: Li + R-X\\
\img{orglith.png}
\item \textbf{Organocuprate reagents (R$_2$)-CuLi}: First make R-Li, then react with
Cu-X twice\\
\img{orgcup.png}
\end{itemize}
\subsection{Reactions with organometallic reagents (carbonyl $\rightarrow$ alcohol/ketone)}
\includegraphics[scale=0.35]{metalsum.png}
\begin{itemize}
\item Electron-donating metal gives electrons to alkyl (forming R-C$^-$H$_2$) which
acts as nucleophile
\item Carboxylic-acid derivatives (have L-group) are substituted, aldehyde/ketone are reduced
\item \textbf{Summary}: Use organocuprate to make 1,4-addition on Michael acceptor
and converting acid chlorides to ketone. All else should use organolithium
\item Don't forget acidic aqueous workup to protonate R-O$^-$ to alcohol
\end{itemize}
\subsection{Metal hydride addition (carbonyl $\rightarrow$ alcohol/amide)}
\includegraphics[scale=0.32]{hydride1.png}
\begin{itemize}
\item \textbf{Summary}: ALWAYS use LiAlH$_4$
\item Electron-donating metal allows hydride (H$^-$) to act as nucleophile
\item The Al metal is ``magical'':
\includegraphics[scale=0.24]{hydr-cooh.png}
\includegraphics[scale=0.4]{hydr-amide.png}
\item Nitriles can be hydrolyzed twice:
\includegraphics[scale=0.4]{hydr-nitrile.png}
\end{itemize}
\subsection{Chromium oxidants (alcohol $\rightarrow$ carbonyl)}
\includegraphics[scale=0.5]{chromiumox.png}
\begin{itemize}
\item \textbf{Summary}: Use H$_2$Cr$_2$O$_4$/H$_2$O fol all except making aldehyde
from 1\dg alcohol
\item Difference is due to hydration in aqueous conditions, thus any Cr oxidation rxn
w/ aqueous conditions will react similar to H$_2$CrO$_4$/H$_2$O
\item Reaction begins with carbonyl oxygen attacking CrO$_3$ to form chomate ester
intermed, HCrO$_3^-$ is eliminated in E2 by any base
\end{itemize}
\subsection{Wittig reaction (carbonyl $\rightarrow$ alkene)}
\begin{itemize}
\item Wittig reagent (``ylide'') prepared from alkyl halide via phosphonium ion
formation and deprotonanion w/ strong base\\
\img{wittig1.png}
\item Converts aldehydes and ketones into alkenes by replacing carbonyl double bond\\
\img{wittig2.png}
\item Reaction proceeds through 4-membered ring (``ylide'' carbon attacks carbonyl)
\end{itemize}
\subsection{Enolates}
\begin{itemize}
\item Properties of enolates:
\begin{itemize}
\item $\alpha$-carbon of ketones/aldehydes have weakly acidic H (resonance with
carbonyl), deprotonation generates enolate
\item Keto/enol forms equilibate, keto is lower energy and favored at neutral
\item Tautomerization to enol catalyzed by base or acid
\end{itemize}
\item \textbf{Must use LDA} to forms enolate quantitatively and explicitly (prevent multiple alkylations):
\img{enol1.png}
\item Enolate can then act as nucleophile in substitution reactions with alkyl halides
($\alpha$-hydrogen $\rightarrow$ $\alpha$-substituted). However, this requires a strong
base and can be avoided
\item If possible, prefer the doubly-$\alpha$ enolates (only one
possible enolate, less reactive base required)
\end{itemize}
\subsection{Acetoacetic ester synthesis(doubly-$\alpha$-proton $\rightarrow$
$\alpha$-substituted carbonyl or $\beta$-ketoester)}
\begin{itemize}
\item[] \img{rcooet.png}
\item $\beta$-ketoester stabilizes enolate and allows quantitative formation with mild
bases ($^-$OEt/EtOH), enolate itself is also less reactive
\item Synthetic equivalence - $\beta$-ketoester decarboxylation (note: requires
$\beta$-carbonyl to COOH) generates same products as regular enolate attack
\item Multiple alkylations before decarboxylation possible (as is stopping and
extracting 1,3-dicarbonyl)
\end{itemize}
\subsection{Malonic ester synthesis (doubly-$\alpha$-proton $\rightarrow$
$\alpha$-substituted carboxylic acid or $\beta$-ketoester)}
\begin{itemize}
\item[] \img{malon1.png}
\item Same as acetoacetic except during acidic workup one COOEt wil decarboxylate and
\textbf{other will hydrolyze to carboxylic acid}
\item Note: \textbf{any proton doubly-$\alpha$ to two anion stabilizing groups} can react similarly
\end{itemize}
\subsection{Aldol condensation (aldehyde $\rightarrow$ $\beta$-hydroxy or
$\alpha$-$\beta$-unsaturated ketone)}
\begin{itemize}
\item[] \img{aldol.png}
\item Aldehyde acceptor, aldehyde/ketone (enolate) donor
\item \textbf{Crossed Aldol:} acceptor is aldehyde w/ \textbf{no $\alpha$-protons}
and donor is \textbf{symmetrical} ketone or has protons on \textbf{only one
$\alpha$-carbon}
\item Ketone acceptor possible \textbf{only in intramolecular ring forming} rxn
\begin{itemize}
\item Last step of Robinson annulation
\end{itemize}
\item Optional: $\beta$-hydroxyl group can be eliminated in E1cb reaction
(1. Deprotonate 2. Eliminate $^-$OH L-group and form $\alpha$-$\beta$-unsaturated carbonyl)
\item Reversible, acid and base catalyzed
\end{itemize}
\subsection{Claisen condensation (ketoester $\rightarrow$ 1,3-dicarbonyl)}
\begin{itemize}
\item[] \img{claisen.png}
\item Ester acceptor, ester/ketone (enolate) donor
\item \textbf{Crossed Claisen Donor}: \textbf{symmetrical ketone with 2/3
protons on each $\alpha$-C} or \textbf{unsymmetrical with 1 H on one $\alpha$-C
and 2/3 H on other}
\item \textbf{Crossed Claisen Acceptor}: \textbf{ester w/ no $\alpha$-C}
\item Reversible, \textbf{$\beta$-ketoester must deprotonate to drive EQ}
\end{itemize}
\subsection{Michael addition ($\alpha$-$\beta$-unsaturated carbonyl $\rightarrow$ 1,5-dicarbonyl)}
\begin{itemize}
\item[] \img{michael1.png}
\item Any good nucleophile (enolate, -CN, organometals, etc) attacks a Michael acceptor
($\alpha-\beta$-unsaturated carbonyl)
\item Competes with normal carbonyl addition, increased by acid (protonated R=O$^+$H
has res. struct. w/ + on $\beta$-carbon)
\item Ketoester can be decarboxylated to give 1,5-dicarbonyl
\item[] \img{michael2.png}
\end{itemize}
\subsection{Robinson Annulation}
Forms bicyclic ring from cyclic enolate donor and Michael acceptor. Enolate adds in
Michael addition, proton shifts form enolate on other side of Michael acceptor's
carbonyl, enolate attacks in intramolecular aldol condensation.
\end{scriptsize}
\end{multicols*}
\end{document}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: t
%%% End:
|
{"hexsha": "8a9ac1dfa2da81665f2e8d807c1b2a847e90f2cb", "size": 24246, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "study-guide/study-guide.tex", "max_stars_repo_name": "feynmanliang/Organic-Synthesis-Study-Guide", "max_stars_repo_head_hexsha": "ee63f7e027ca675dfdf7993907101f9574d61e41", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-04-13T01:09:03.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-28T06:16:06.000Z", "max_issues_repo_path": "study-guide/study-guide.tex", "max_issues_repo_name": "feynmanliang/Organic-Synthesis-Study-Guide", "max_issues_repo_head_hexsha": "ee63f7e027ca675dfdf7993907101f9574d61e41", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "study-guide/study-guide.tex", "max_forks_repo_name": "feynmanliang/Organic-Synthesis-Study-Guide", "max_forks_repo_head_hexsha": "ee63f7e027ca675dfdf7993907101f9574d61e41", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9439868204, "max_line_length": 110, "alphanum_fraction": 0.6801946713, "num_tokens": 7483}
|
"""
Tests for workspace module
"""
import os
import shutil
import tempfile
from six import StringIO
import numpy as np
import pytest
from fsl.data.image import Image
from oxasl import Workspace, AslImage
from oxasl.workspace import text_to_matrix
def test_default_attr():
""" Check attributes are None by default """
wsp = Workspace()
assert(wsp.wibble is None)
def test_set_attr():
""" Check attributes can bet set """
wsp = Workspace()
assert(wsp.wibble is None)
wsp.wibble = 7
assert(wsp.wibble == 7)
def test_ctor_attributes():
""" Check attributes specified in constructor """
wsp = Workspace(wobble="hi")
assert(wsp.wobble == "hi")
def test_log():
""" Check that the log is picked up """
log = StringIO()
wsp = Workspace(log=log)
wsp.log.write("hello")
assert(log.getvalue() == "hello")
def test_ifnone():
wsp = Workspace(wibble=11)
assert(wsp.ifnone("wibble", 12) == 11)
assert(wsp.ifnone("wobble", 12) == 12)
def test_sub():
""" Test sub-workspaces """
wsp = Workspace()
wsp.sub("child")
assert(isinstance(wsp.child, Workspace))
assert(wsp.child.wibble is None)
assert(wsp.child.log == wsp.log)
def test_sub_kwargs():
""" Test creating a sub workspace with kwargs """
wsp = Workspace()
wsp.sub("child", wibble="squid", pudding=4)
assert(isinstance(wsp.child, Workspace))
assert(wsp.child.wibble == "squid")
assert(wsp.child.pudding == 4)
def test_sub_inherit():
""" Test sub workspaces can inherit values from their parent """
wsp = Workspace()
wsp.wibble = 7
wsp.wobble = 6
wsp.sub("child")
wsp.child.wobble = 5
assert(wsp.child.wibble == 7)
assert(wsp.child.wobble == 5)
def test_sub_inherit_wsp():
""" Test sub workspaces can inherit sub-workspaces from their parent """
wsp = Workspace()
wsp.sub("child1")
wsp.child1.wibble = 7
wsp.sub("child2")
assert(wsp.child2.child1 is not None)
assert(wsp.child2.child1.wibble == 7)
def test_input_wsp():
""" Test putting constructor attributes in a default sub workspaces """
wsp = Workspace(input_wsp="cakes", flapjack=4, fruit=3, defaults=[])
assert(wsp.cakes is not None)
assert(wsp.cakes.flapjack == 4)
assert(wsp.cakes.fruit == 3)
assert(wsp.flapjack is None)
assert(wsp.fruit is None)
def test_default_wsp():
""" Test default sub-workspaces for search """
wsp = Workspace(defaults=["cars"])
assert(wsp.cars is None)
wsp.ferrari = 9
wsp.merc = 8
wsp.sub("cars")
wsp.cars.porsche = 6
wsp.cars.ferrari = 4
assert(wsp.cars is not None)
assert(wsp.ferrari == 9)
assert(wsp.porsche == 6)
assert(wsp.merc == 8)
assert(wsp.cars.porsche == 6)
assert(wsp.cars.ferrari == 4)
assert(wsp.cars.merc is None)
def test_default_wsp_multiple():
""" Test multiple default sub-workspaces for search """
wsp = Workspace(defaults=["plants", "trees"])
wsp.daffodil = 9
wsp.larch = 1
wsp.sub("trees")
wsp.trees.oak = 3
wsp.trees.larch = 2
wsp.trees.apple = 7
assert(wsp.daffodil == 9)
assert(wsp.larch == 1)
assert(wsp.oak == 3)
assert(wsp.apple == 7)
assert(wsp.trees.larch == 2)
assert(wsp.trees.oak == 3)
assert(wsp.trees.daffodil is None)
assert(wsp.trees.apple == 7)
wsp.sub("plants")
wsp.plants.lily = 4
wsp.plants.oak = 5
assert(wsp.daffodil == 9)
assert(wsp.larch == 1)
assert(wsp.lily == 4)
assert(wsp.oak == 5)
assert(wsp.apple == 7)
assert(wsp.trees.oak == 3)
assert(wsp.trees.lily is None)
assert(wsp.plants.daffodil is None)
assert(wsp.plants.lily == 4)
assert(wsp.plants.oak == 5)
def test_savedir_created():
""" Test save dirs are created if they don't already exist """
tempdir = tempfile.mktemp("_oxasl")
try:
log = StringIO()
wsp = Workspace(savedir=tempdir, log=log)
assert(wsp.savedir) == tempdir
assert(os.path.isdir(tempdir))
assert("WARNING" not in log.getvalue())
finally:
shutil.rmtree(tempdir)
def test_savedir_created_multilevel():
""" Test multi-level save dirs are created if they don't already exist """
tempdir = os.path.join(tempfile.mktemp("_oxasl"), "extra", "levels")
try:
log = StringIO()
wsp = Workspace(savedir=tempdir, log=log)
assert(wsp.savedir) == tempdir
assert(os.path.isdir(tempdir))
assert("WARNING" not in log.getvalue())
finally:
shutil.rmtree(tempdir)
def test_savedir_sub():
""" Test sub-workspace have subdirs created """
tempdir = tempfile.mktemp("_oxasl")
try:
log = StringIO()
wsp = Workspace(savedir=tempdir, log=log)
wsp.sub("quark")
path = os.path.join(tempdir, "quark")
assert(wsp.quark.savedir == path)
assert(os.path.isdir(path))
assert("WARNING" not in log.getvalue())
finally:
shutil.rmtree(tempdir)
def test_image_save():
"""
Test images are saved in the savedir
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
wsp = Workspace(savedir=tempdir)
img = Image(np.random.rand(5, 5, 5))
wsp.testimg = img
path = os.path.join(tempdir, "testimg.nii.gz")
assert(os.path.isfile(path))
otherimg = Image(path)
assert(np.all(img.data == wsp.testimg.data))
assert(np.all(img.data == otherimg.data))
finally:
shutil.rmtree(tempdir)
def test_image_nosave():
"""
Test setting an image without saving
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
wsp = Workspace(savedir=tempdir)
img = Image(np.random.rand(5, 5, 5))
wsp.set_item("testimg", img, save=False)
path = os.path.join(tempdir, "testimg.nii.gz")
assert(not os.path.exists(path))
assert(np.all(img.data == wsp.testimg.data))
finally:
shutil.rmtree(tempdir)
def test_image_save_name():
"""
Test images are saved in the savedir with the specified name
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
wsp = Workspace(savedir=tempdir)
img = Image(np.random.rand(5, 5, 5))
wsp.set_item("testimg", img, save_name="pumpkin")
path = os.path.join(tempdir, "testimg.nii.gz")
assert(not os.path.exists(path))
path = os.path.join(tempdir, "pumpkin.nii.gz")
assert(os.path.isfile(path))
otherimg = Image(path)
assert(np.all(img.data == wsp.testimg.data))
assert(np.all(img.data == otherimg.data))
finally:
shutil.rmtree(tempdir)
def test_matrix_save():
"""
Test 2D matrices are saved in the savedir
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
wsp = Workspace(savedir=tempdir)
mat = np.random.rand(4, 4)
wsp.testmat = mat
path = os.path.join(tempdir, "testmat.mat")
assert(os.path.isfile(path))
with open(path) as matfile:
othermat = text_to_matrix(matfile.read())
assert(np.all(mat == wsp.testmat))
assert(np.all(mat == othermat))
finally:
shutil.rmtree(tempdir)
def test_matrix_nosave():
"""
Test setting an matrix without saving
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
wsp = Workspace(savedir=tempdir)
mat = np.random.rand(4, 4)
wsp.set_item("testmat", mat, save=False)
path = os.path.join(tempdir, "testmat.mat")
assert(not os.path.exists(path))
assert(np.all(mat == wsp.testmat))
finally:
shutil.rmtree(tempdir)
def test_matrix_save_name():
"""
Test matrices are saved in the savedir with the specified name
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
wsp = Workspace(savedir=tempdir)
mat = np.random.rand(4, 4)
wsp.set_item("testmat", mat, save_name="parsnip")
path = os.path.join(tempdir, "testmat.mat")
assert(not os.path.exists(path))
path = os.path.join(tempdir, "parsnip.mat")
assert(os.path.isfile(path))
with open(path) as matfile:
othermat = text_to_matrix(matfile.read())
assert(np.all(mat == wsp.testmat))
assert(np.all(mat == othermat))
finally:
shutil.rmtree(tempdir)
def _custom_save(mat):
return "Custom Save"
def test_custom_save():
"""
Test matrices are saved in the savedir with the specified name
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
wsp = Workspace(savedir=tempdir)
mat = np.random.rand(4, 4)
wsp.set_item("testmat", mat, save_fn=_custom_save)
path = os.path.join(tempdir, "testmat")
assert(os.path.exists(path))
with open(path) as sfile:
assert("Custom Save" == sfile.read())
finally:
shutil.rmtree(tempdir)
def test_custom_save_name():
"""
Test matrices are saved in the savedir with the specified name
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
wsp = Workspace(savedir=tempdir)
mat = np.random.rand(4, 4)
wsp.set_item("testmat", mat, save_name="potato", save_fn=_custom_save)
path = os.path.join(tempdir, "testmat")
assert(not os.path.exists(path))
path = os.path.join(tempdir, "potato")
assert(os.path.exists(path))
with open(path) as sfile:
assert("Custom Save" == sfile.read())
finally:
shutil.rmtree(tempdir)
def test_custom_save_nosave():
"""
Test matrices are saved in the savedir with the specified name
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
wsp = Workspace(savedir=tempdir)
mat = np.random.rand(4, 4)
wsp.set_item("testmat", mat, save_fn=_custom_save, save=False)
path = os.path.join(tempdir, "testmat")
assert(not os.path.exists(path))
finally:
shutil.rmtree(tempdir)
def test_savedir_already_exists():
"""
Test warning when save dir already exists
"""
tempdir = tempfile.mkdtemp("_oxasl")
try:
log = StringIO()
wsp = Workspace(savedir=tempdir, log=log)
assert("WARNING" in log.getvalue())
assert("already exists" in log.getvalue())
finally:
shutil.rmtree(tempdir)
def test_fsllog_default():
"""
Test the FSL logging context created
"""
log = StringIO()
wsp = Workspace(log=log)
assert(isinstance(wsp.fsllog, dict))
assert(wsp.fsllog.get("stdout", None) is None)
assert(wsp.fsllog.get("stderr", None) == log)
assert(wsp.fsllog.get("cmd", None) is None)
def test_fsllog_debug():
"""
Test the FSL logging context created in debug mode
"""
log = StringIO()
wsp = Workspace(debug=True, log=log)
assert(isinstance(wsp.fsllog, dict))
assert(wsp.fsllog.get("stdout", None) == log)
assert(wsp.fsllog.get("stderr", None) == log)
assert(wsp.fsllog.get("cmd", None) == log)
def test_aslimage():
kwargs = {
"asldata" : np.random.rand(5, 5, 5, 8),
"tis" : [1, 2],
"iaf" : "tc",
"ibf" : "rpt",
}
wsp = Workspace(auto_asldata=True, **kwargs)
assert(isinstance(wsp.asldata, AslImage))
assert(wsp.asldata.tis == [1, 2])
assert(wsp.asldata.iaf == "tc")
assert(wsp.asldata.order == "ltr")
assert(wsp.asldata.rpts == [2, 2])
def test_aslimage_missing():
with pytest.raises(ValueError):
Workspace(auto_asldata=True)
with pytest.raises(ValueError):
Workspace(auto_asldata=True, asldata=None)
def test_text_to_matrix_spaces():
"""
Check that text_to_matrix works with space separated data
"""
text = "1 2 3\n4 5 6\n"
mat = text_to_matrix(text)
assert(np.all(mat == [[1, 2, 3], [4, 5, 6]]))
def test_text_to_matrix_comma():
"""
Check that text_to_matrix works with comma separated data
"""
text = "1, 2, 3\n4,5,6\n"
mat = text_to_matrix(text)
assert(np.all(mat == [[1, 2, 3], [4, 5, 6]]))
def test_text_to_matrix_tabs():
"""
Check that text_to_matrix works with tab separated data
"""
text = "1\t2\t3\n4\t 5\t 6\n"
mat = text_to_matrix(text)
assert(np.all(mat == [[1, 2, 3], [4, 5, 6]]))
def test_text_to_matrix_mixed():
"""
Check that text_to_matrix works with mixed separators
"""
text = "1\t2 3\n4 , 5, \t 6\n"
mat = text_to_matrix(text)
assert(np.all(mat == [[1, 2, 3], [4, 5, 6]]))
def test_text_to_matrix_not_matrix():
text = "1 2 3\n4 5\n"
with pytest.raises(ValueError):
mat = text_to_matrix(text)
def test_text_to_matrix_not_numbers():
text = "1 x 3\n4 5 6\n"
with pytest.raises(ValueError):
mat = text_to_matrix(text)
|
{"hexsha": "2edaca371794f4302e7ff4f8d86edcbfb6009663", "size": 12803, "ext": "py", "lang": "Python", "max_stars_repo_path": "oxasl/test/test_workspace.py", "max_stars_repo_name": "ibme-qubic/oxasl", "max_stars_repo_head_hexsha": "8a0c055752d6e10cd932336ae6916f0c4fc0a2e9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-02T13:01:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-02T13:01:47.000Z", "max_issues_repo_path": "oxasl/test/test_workspace.py", "max_issues_repo_name": "ibme-qubic/oxasl", "max_issues_repo_head_hexsha": "8a0c055752d6e10cd932336ae6916f0c4fc0a2e9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2019-01-14T13:22:00.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-12T20:34:20.000Z", "max_forks_repo_path": "oxasl/test/test_workspace.py", "max_forks_repo_name": "physimals/oxasl", "max_forks_repo_head_hexsha": "e583103f3313aed2890b60190b6ca7b265a46e3c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-03-19T15:46:48.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-13T16:55:48.000Z", "avg_line_length": 29.9836065574, "max_line_length": 78, "alphanum_fraction": 0.6156369601, "include": true, "reason": "import numpy", "num_tokens": 3445}
|
"""
Mask R-CNN
Configurations and data loading code for MS COCO.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 coco.py train --dataset=/path/to/coco/ --model=coco
# Train a new model starting from ImageNet weights. Also auto download COCO dataset
python3 coco.py train --dataset=/path/to/coco/ --model=imagenet --download=True
# Continue training a model that you had trained earlier
python3 coco.py train --dataset=/path/to/coco/ --model=/path/to/weights.h5
# Continue training the last model you trained
python3 coco.py train --dataset=/path/to/coco/ --model=last
# Run COCO evaluatoin on the last model you trained
python3 coco.py evaluate --dataset=/path/to/coco/ --model=last
"""
import os
import sys
import time
import numpy as np
import imgaug # https://github.com/aleju/imgaug (pip3 install imgaug)
# Download and install the Python COCO tools from https://github.com/waleedka/coco
# That's a fork from the original https://github.com/pdollar/coco with a bug
# fix for Python 3.
# I submitted a pull request https://github.com/cocodataset/cocoapi/pull/50
# If the PR is merged then use the original repo.
# Note: Edit PythonAPI/Makefile and replace "python" with "python3".
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils
import zipfile
import urllib.request
import shutil
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
# Path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DEFAULT_DATASET_YEAR = "2014"
############################################################
# Configurations
############################################################
class CocoConfig(Config):
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "coco"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Uncomment to train on 8 GPUs (default is 1)
# GPU_COUNT = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 80 # COCO has 80 classes
############################################################
# Dataset
############################################################
class CocoDataset(utils.Dataset):
def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None,
class_map=None, return_coco=False, auto_download=False):
"""Load a subset of the COCO dataset.
dataset_dir: The root directory of the COCO dataset.
subset: What to load (train, val, minival, valminusminival)
year: What dataset year to load (2014, 2017) as a string, not an integer
class_ids: If provided, only loads images that have the given classes.
class_map: TODO: Not implemented yet. Supports maping classes from
different datasets to the same class ID.
return_coco: If True, returns the COCO object.
auto_download: Automatically download and unzip MS-COCO images and annotations
"""
if auto_download is True:
self.auto_download(dataset_dir, subset, year)
coco = COCO("{}/annotations/instances_{}{}.json".format(dataset_dir, subset, year))
if subset == "minival" or subset == "valminusminival":
subset = "val"
image_dir = "{}/{}{}".format(dataset_dir, subset, year)
# Load all classes or a subset?
if not class_ids:
# All classes
class_ids = sorted(coco.getCatIds())
# All images or a subset?
if class_ids:
image_ids = []
for id in class_ids:
image_ids.extend(list(coco.getImgIds(catIds=[id])))
# Remove duplicates
image_ids = list(set(image_ids))
else:
# All images
image_ids = list(coco.imgs.keys())
# Add classes
for i in class_ids:
self.add_class("coco", i, coco.loadCats(i)[0]["name"])
# Add images
for i in image_ids:
self.add_image(
"coco", image_id=i,
path=os.path.join(image_dir, coco.imgs[i]['file_name']),
width=coco.imgs[i]["width"],
height=coco.imgs[i]["height"],
annotations=coco.loadAnns(coco.getAnnIds(
imgIds=[i], catIds=class_ids, iscrowd=None)))
if return_coco:
return coco
def auto_download(self, dataDir, dataType, dataYear):
"""Download the COCO dataset/annotations if requested.
dataDir: The root directory of the COCO dataset.
dataType: What to load (train, val, minival, valminusminival)
dataYear: What dataset year to load (2014, 2017) as a string, not an integer
Note:
For 2014, use "train", "val", "minival", or "valminusminival"
For 2017, only "train" and "val" annotations are available
"""
# Setup paths and file names
if dataType == "minival" or dataType == "valminusminival":
imgDir = "{}/{}{}".format(dataDir, "val", dataYear)
imgZipFile = "{}/{}{}.zip".format(dataDir, "val", dataYear)
imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format("val", dataYear)
else:
imgDir = "{}/{}{}".format(dataDir, dataType, dataYear)
imgZipFile = "{}/{}{}.zip".format(dataDir, dataType, dataYear)
imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format(dataType, dataYear)
# print("Image paths:"); print(imgDir); print(imgZipFile); print(imgURL)
# Create main folder if it doesn't exist yet
if not os.path.exists(dataDir):
os.makedirs(dataDir)
# Download images if not available locally
if not os.path.exists(imgDir):
os.makedirs(imgDir)
print("Downloading images to " + imgZipFile + " ...")
with urllib.request.urlopen(imgURL) as resp, open(imgZipFile, 'wb') as out:
shutil.copyfileobj(resp, out)
print("... done downloading.")
print("Unzipping " + imgZipFile)
with zipfile.ZipFile(imgZipFile, "r") as zip_ref:
zip_ref.extractall(dataDir)
print("... done unzipping")
print("Will use images in " + imgDir)
# Setup annotations data paths
annDir = "{}/annotations".format(dataDir)
if dataType == "minival":
annZipFile = "{}/instances_minival2014.json.zip".format(dataDir)
annFile = "{}/instances_minival2014.json".format(annDir)
annURL = "https://dl.dropboxusercontent.com/s/o43o90bna78omob/instances_minival2014.json.zip?dl=0"
unZipDir = annDir
elif dataType == "valminusminival":
annZipFile = "{}/instances_valminusminival2014.json.zip".format(dataDir)
annFile = "{}/instances_valminusminival2014.json".format(annDir)
annURL = "https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/instances_valminusminival2014.json.zip?dl=0"
unZipDir = annDir
else:
annZipFile = "{}/annotations_trainval{}.zip".format(dataDir, dataYear)
annFile = "{}/instances_{}{}.json".format(annDir, dataType, dataYear)
annURL = "http://images.cocodataset.org/annotations/annotations_trainval{}.zip".format(dataYear)
unZipDir = dataDir
# print("Annotations paths:"); print(annDir); print(annFile); print(annZipFile); print(annURL)
# Download annotations if not available locally
if not os.path.exists(annDir):
os.makedirs(annDir)
if not os.path.exists(annFile):
if not os.path.exists(annZipFile):
print("Downloading zipped annotations to " + annZipFile + " ...")
with urllib.request.urlopen(annURL) as resp, open(annZipFile, 'wb') as out:
shutil.copyfileobj(resp, out)
print("... done downloading.")
print("Unzipping " + annZipFile)
with zipfile.ZipFile(annZipFile, "r") as zip_ref:
zip_ref.extractall(unZipDir)
print("... done unzipping")
print("Will use annotations in " + annFile)
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. This
function converts the different mask format to one format
in the form of a bitmap [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a COCO image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "coco":
return super(CocoDataset, self).load_mask(image_id)
instance_masks = []
class_ids = []
annotations = self.image_info[image_id]["annotations"]
# Build mask of shape [height, width, instance_count] and list
# of class IDs that correspond to each channel of the mask.
for annotation in annotations:
class_id = self.map_source_class_id(
"coco.{}".format(annotation['category_id']))
if class_id:
m = self.annToMask(annotation, image_info["height"],
image_info["width"])
# Some objects are so small that they're less than 1 pixel area
# and end up rounded out. Skip those objects.
if m.max() < 1:
continue
# Is it a crowd? If so, use a negative class ID.
if annotation['iscrowd']:
# Use negative class ID for crowds
class_id *= -1
# For crowd masks, annToMask() sometimes returns a mask
# smaller than the given dimensions. If so, resize it.
if m.shape[0] != image_info["height"] or m.shape[1] != image_info["width"]:
m = np.ones([image_info["height"], image_info["width"]], dtype=bool)
instance_masks.append(m)
class_ids.append(class_id)
# Pack instance masks into an array
if class_ids:
mask = np.stack(instance_masks, axis=2).astype(np.bool)
class_ids = np.array(class_ids, dtype=np.int32)
return mask, class_ids
else:
# Call super class to return an empty mask
return super(CocoDataset, self).load_mask(image_id)
def image_reference(self, image_id):
"""Return a link to the image in the COCO Website."""
info = self.image_info[image_id]
if info["source"] == "coco":
return "http://cocodataset.org/#explore?id={}".format(info["id"])
else:
super(CocoDataset, self).image_reference(image_id)
# The following two functions are from pycocotools with a few changes.
def annToRLE(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
segm = ann['segmentation']
if isinstance(segm, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, height, width)
rle = maskUtils.merge(rles)
elif isinstance(segm['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, height, width)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann, height, width)
m = maskUtils.decode(rle)
return m
############################################################
# COCO Evaluation
############################################################
def build_coco_results(dataset, image_ids, rois, class_ids, scores, masks):
"""Arrange resutls to match COCO specs in http://cocodataset.org/#format
"""
# If no results, return an empty list
if rois is None:
return []
results = []
for image_id in image_ids:
# Loop through detections
for i in range(rois.shape[0]):
class_id = class_ids[i]
score = scores[i]
bbox = np.around(rois[i], 1)
mask = masks[:, :, i]
result = {
"image_id": image_id,
"category_id": dataset.get_source_class_id(class_id, "coco"),
"bbox": [bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]],
"score": score,
"segmentation": maskUtils.encode(np.asfortranarray(mask))
}
results.append(result)
return results
def evaluate_coco(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None):
"""Runs official COCO evaluation.
dataset: A Dataset object with valiadtion data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
limit: if not 0, it's the number of images to use for evaluation
"""
# Pick COCO images from the dataset
image_ids = image_ids or dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
# Get corresponding COCO image IDs.
coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]
t_prediction = 0
t_start = time.time()
results = []
for i, image_id in enumerate(image_ids):
# Load image
image = dataset.load_image(image_id)
# Run detection
t = time.time()
r = model.detect([image], verbose=0)[0]
t_prediction += (time.time() - t)
# Convert results to COCO format
# Cast masks to uint8 because COCO tools errors out on bool
image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
r["rois"], r["class_ids"],
r["scores"],
r["masks"].astype(np.uint8))
results.extend(image_results)
# Load results. This modifies results with additional attributes.
coco_results = coco.loadRes(results)
# Evaluate
cocoEval = COCOeval(coco, coco_results, eval_type)
cocoEval.params.imgIds = coco_image_ids
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
print("Prediction time: {}. Average {}/image".format(
t_prediction, t_prediction / len(image_ids)))
print("Total time: ", time.time() - t_start)
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on MS COCO.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'evaluate' on MS COCO")
parser.add_argument('--dataset', required=True,
metavar="/path/to/coco/",
help='Directory of the MS-COCO dataset')
parser.add_argument('--year', required=False,
default=DEFAULT_DATASET_YEAR,
metavar="<year>",
help='Year of the MS-COCO dataset (2014 or 2017) (default=2014)')
parser.add_argument('--model', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--limit', required=False,
default=500,
metavar="<image count>",
help='Images to use for evaluation (default=500)')
parser.add_argument('--download', required=False,
default=False,
metavar="<True|False>",
help='Automatically download and unzip MS-COCO files (default=False)',
type=bool)
args = parser.parse_args()
print("Command: ", args.command)
print("Model: ", args.model)
print("Dataset: ", args.dataset)
print("Year: ", args.year)
print("Logs: ", args.logs)
print("Auto Download: ", args.download)
# Configurations
if args.command == "train":
config = CocoConfig()
else:
class InferenceConfig(CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.model.lower() == "coco":
model_path = COCO_MODEL_PATH
elif args.model.lower() == "last":
# Find last trained weights
model_path = model.find_last()
elif args.model.lower() == "imagenet":
# Start from ImageNet trained weights
model_path = model.get_imagenet_weights()
else:
model_path = args.model
# Load weights
print("Loading weights ", model_path)
model.load_weights(model_path, by_name=True)
model.keras_model.save("./tmp")
# Train or evaluate
if args.command == "train":
# Training dataset. Use the training set and 35K from the
# validation set, as as in the Mask RCNN paper.
dataset_train = CocoDataset()
dataset_train.load_coco(args.dataset, "train", year=args.year, auto_download=args.download)
if args.year in '2014':
dataset_train.load_coco(args.dataset, "valminusminival", year=args.year, auto_download=args.download)
dataset_train.prepare()
# Validation dataset
dataset_val = CocoDataset()
val_type = "val" if args.year in '2017' else "minival"
dataset_val.load_coco(args.dataset, val_type, year=args.year, auto_download=args.download)
dataset_val.prepare()
# Image Augmentation
# Right/Left flip 50% of the time
augmentation = imgaug.augmenters.Fliplr(0.5)
# *** This training schedule is an example. Update to your needs ***
# Training - Stage 1
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=40,
layers='heads',
augmentation=augmentation)
# Training - Stage 2
# Finetune layers from ResNet stage 4 and up
print("Fine tune Resnet stage 4 and up")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=120,
layers='4+',
augmentation=augmentation)
# Training - Stage 3
# Fine tune all layers
print("Fine tune all layers")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=160,
layers='all',
augmentation=augmentation)
elif args.command == "evaluate":
# Validation dataset
dataset_val = CocoDataset()
val_type = "val" if args.year in '2017' else "minival"
coco = dataset_val.load_coco(args.dataset, val_type, year=args.year, return_coco=True, auto_download=args.download)
dataset_val.prepare()
print("Running COCO evaluation on {} images.".format(args.limit))
evaluate_coco(model, dataset_val, coco, "bbox", limit=int(args.limit))
else:
print("'{}' is not recognized. "
"Use 'train' or 'evaluate'".format(args.command))
|
{"hexsha": "d9ec13d7e408d5d972a4404531bdc9316aa06c70", "size": 21581, "ext": "py", "lang": "Python", "max_stars_repo_path": "samples/coco/coco.py", "max_stars_repo_name": "xman0810/Mask_RCNN", "max_stars_repo_head_hexsha": "06e51f44961d4803696bcb2eab27352fc83162c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "samples/coco/coco.py", "max_issues_repo_name": "xman0810/Mask_RCNN", "max_issues_repo_head_hexsha": "06e51f44961d4803696bcb2eab27352fc83162c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "samples/coco/coco.py", "max_forks_repo_name": "xman0810/Mask_RCNN", "max_forks_repo_head_hexsha": "06e51f44961d4803696bcb2eab27352fc83162c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.2630597015, "max_line_length": 123, "alphanum_fraction": 0.5886659562, "include": true, "reason": "import numpy", "num_tokens": 4851}
|
# Copyright 2021 qclib project.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" https://arxiv.org/abs/2011.07977 """
import numpy as np
from qiskit import QuantumCircuit, QuantumRegister
from qclib.util import _compute_matrix_angles
class CVQRAM:
""" https://arxiv.org/abs/2011.07977 """
def __init__(self, nbits, data, mode='v-chain'):
self.initialization(nbits, mode)
norm = 1
if mode == 'v-chain':
self.circuit.x(self.qr_u2[0])
elif mode == 'mct':
self.circuit.x(self.qr_u0[1])
control = range(self.memory.size)
for binary_string, amplitude in data:
self._load_binary(binary_string, mode)
self.load_superposition(amplitude, mode, norm, control)
self._load_binary(binary_string, mode)
def initialization(self, nbits, mode):
""" Initialize quantum registers"""
self.nbits = nbits
self.memory = QuantumRegister(self.nbits, name='m')
if mode=='mct':
self.qr_u0 = QuantumRegister(2, name='u0')
self.circuit = QuantumCircuit(self.qr_u0, self.memory)
elif mode=='v-chain':
self.aux = QuantumRegister(nbits-1, name='anc')
self.qr_u1 = QuantumRegister(1, name='u1')
self.qr_u2 = QuantumRegister(1, name='u2')
self.circuit = QuantumCircuit(self.qr_u1, self.qr_u2, self.memory, self.aux, )
def _load_binary(self, binary_string, mode):
for bit_index, bit in enumerate(binary_string):
if bit == '1':
if mode=='v-chain':
self.circuit.cx(self.qr_u1[0], self.memory[bit_index])
elif mode=='mct':
self.circuit.cx(self.qr_u1[1], self.memory[bit_index])
elif bit == '0':
self.circuit.x(self.memory[bit_index])
def load_superposition(self, feature, mode, norm, control):
"""
Load pattern in superposition
"""
alpha, beta, phi = _compute_matrix_angles(feature, norm)
if mode =='v-chain':
self.circuit.rccx(self.memory[control[0]],
self.memory[control[1]], self.aux[0])
for j in range(2, len(control)):
self.circuit.rccx(self.memory[control[j]], self.aux[j - 2], self.aux[j - 1])
self.circuit.cx(self.aux[len(control) - 2], self.qr_u1[0])
self.circuit.cu3(alpha, beta, phi, self.qr_u1[0], self.qr_u2[0])
self.circuit.cx(self.aux[len(control) - 2], self.qr_u1[0])
for j in reversed(range(2, len(control))):
self.circuit.rccx(self.memory[control[j]], self.aux[j - 2], self.aux[j - 1])
self.circuit.rccx(self.memory[control[0]],
self.memory[control[1]], self.aux[0])
if mode =='mct':
self.circuit.mct(self.memory, self.qr_u0[0])
self.circuit.cu3(alpha, beta, phi, self.qr_u0[0], self.qr_u0[1])
self.circuit.mct(self.memory, self.qr_u0[0])
norm = norm - np.absolute(np.power(feature, 2))
def cvqram_initialize(state):
"""
Creates a circuit to initialize a quantum state arXiv:2011.07977
"""
qbit = state[0][0]
size = len(qbit)
n_qubits = int(size)
memory = CVQRAM(n_qubits, state)
return memory.circuit
|
{"hexsha": "04a5cd26a3efe97be96f3896f36afe3163dae671", "size": 3867, "ext": "py", "lang": "Python", "max_stars_repo_path": "qclib/state_preparation/apqm.py", "max_stars_repo_name": "adjs/qclib", "max_stars_repo_head_hexsha": "0c3f1eec68536df4d161297554059da06b7722f7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-05T23:46:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-05T23:46:22.000Z", "max_issues_repo_path": "qclib/state_preparation/apqm.py", "max_issues_repo_name": "israelferrazaraujo/qclib", "max_issues_repo_head_hexsha": "998a98b33a059c59452a50389084a9a747426ea8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "qclib/state_preparation/apqm.py", "max_forks_repo_name": "israelferrazaraujo/qclib", "max_forks_repo_head_hexsha": "998a98b33a059c59452a50389084a9a747426ea8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5267857143, "max_line_length": 92, "alphanum_fraction": 0.605637445, "include": true, "reason": "import numpy", "num_tokens": 998}
|
const TRY_BUT_ALLOW_FAILURES_URL_LIST = String[
]
|
{"hexsha": "d3a922bf1ec50290f17d589186fb6a2cb416d433", "size": 55, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "config/repositories/try-but-allow-failures-url-list.jl", "max_stars_repo_name": "KristofferC/RepoSnapshots.jl", "max_stars_repo_head_hexsha": "357e12a814309b8b751a2927bf37440357e5cd76", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-28T11:41:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-02T18:33:04.000Z", "max_issues_repo_path": "config/repositories/try-but-allow-failures-url-list.jl", "max_issues_repo_name": "KristofferC/RepoSnapshots.jl", "max_issues_repo_head_hexsha": "357e12a814309b8b751a2927bf37440357e5cd76", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2019-03-27T14:33:30.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-17T00:47:42.000Z", "max_forks_repo_path": "config/repositories/try-but-allow-failures-url-list.jl", "max_forks_repo_name": "KristofferC/RepoSnapshots.jl", "max_forks_repo_head_hexsha": "357e12a814309b8b751a2927bf37440357e5cd76", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-09-05T13:04:49.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-05T13:04:49.000Z", "avg_line_length": 13.75, "max_line_length": 47, "alphanum_fraction": 0.7636363636, "num_tokens": 14}
|
!isempty(ARGS) || error("No config supplied.")
isfile(ARGS[1]) || error("Cannot read '$(ARGS[1])'")
isabspath(ARGS[1]) || error("Please use an absolute path for the config.")
println("Config supplied: '$(ARGS[1])'")
config_file = ARGS[1]
include(config_file)
using MLDataUtils
using Random
using DelimitedFiles
function downsample_class(data, labels, target_class, other_class, target_percentage)
target_indicies = shuffle!(findall(x -> x .== target_class, labels))
mask = falses(length(labels))
mask[labels .== other_class] .= true
mask[target_indicies[1:ceil(Int, target_percentage * sum(labels .== other_class) / (1 - target_percentage))]] .= true
return data[mask, :], labels[mask]
end
function process_file(input_file, output_file; num_versions=1)
raw = readdlm(input_file, ',')
num_attributes = length(findall(x -> occursin("@ATTRIBUTE", string(x)), raw[:, 1])) - 2
id_column = findfirst(x -> occursin("@ATTRIBUTE 'id'", string(x)), raw[:, 1]) - 1
label_column = findfirst(x -> occursin("@ATTRIBUTE 'outlier'", string(x)), raw[:, 1]) - 1
data_start_row = findlast(x -> x == "@DATA", raw[:, 1]) + 1
raw[:, label_column] = map(x -> x == "'yes'" ? :outlier : :inlier, raw[:, label_column])
data, labels = raw[data_start_row:end, [i for i in 1:size(raw, 2) if i != id_column && i != label_column]], raw[data_start_row:end, label_column]
data = hcat(data, labels)
@assert size(data, 2) - 1 == num_attributes
@assert size(data, 1) == length(labels)
for i in 1:num_versions
Random.seed!(i)
outlier_percentage = sum(labels .== :outlier) / length(labels)
resampling = outlier_percentage != TARGET_OUTLIER_PERCENTAGE || length(labels) > MAX_VALUES
target_output_file = resampling ? "$(output_file[1:end-4])_r0$i.csv" : output_file
@info "Generating '$target_output_file'."
res_data, res_labels = copy(data), copy(labels)
if outlier_percentage > TARGET_OUTLIER_PERCENTAGE
@info "Downsampling outlier class (outlier_percentage = $(outlier_percentage))."
res_data, res_labels = downsample_class(res_data, res_labels, :outlier, :inlier, TARGET_OUTLIER_PERCENTAGE)
elseif outlier_percentage < TARGET_OUTLIER_PERCENTAGE
@info "Downsampling inlier class (outlier_percentage = $(outlier_percentage))."
res_data, res_labels = downsample_class(res_data, res_labels, :inlier, :outlier, 1 - TARGET_OUTLIER_PERCENTAGE)
end
if length(res_labels) > MAX_VALUES
@info "Downsampling from $(length(res_labels)) to $MAX_VALUES observations."
p = MAX_VALUES / length(res_labels)
(res_data, res_labels), _ = stratifiedobs((res_data, res_labels), p=p, obsdim=1)
end
outlier_percentage = sum(res_labels .== :outlier) / length(res_labels)
@info "Final outlier_percentage = $(outlier_percentage))."
@assert size(res_data, 1) == length(res_labels)
@assert abs(outlier_percentage - TARGET_OUTLIER_PERCENTAGE) < 0.01
writedlm(target_output_file, res_data, ',')
end
end
Random.seed!(0)
MAX_VALUES = 1000
TARGET_OUTLIER_PERCENTAGE = 0.05
target_versions_semantic = r"withoutdupl_norm_05_v0[1-3]"
target_versions_literature = r"withoutdupl_norm"
dataset_dir = normpath(joinpath(data_root, "input", "raw"))
output_path = normpath(joinpath(data_root, "input", "processed"))
mkpath(output_path)
@info "Saving processed files to $output_path."
for dataset_class in ["semantic", "literature"]
for d in data_dirs[dataset_class]
@info d
outdir = joinpath(output_path, d)
isdir(outdir) || mkpath(outdir)
if dataset_class == "semantic"
target_files = filter(x -> occursin(target_versions_semantic, x), readdir(joinpath(dataset_dir, dataset_class, d)))
@assert length(target_files) == 3
@info "[$(d)] Found $(length(target_files)) files."
for f in target_files
process_file(joinpath(dataset_dir, "semantic", d, f), joinpath(outdir, f[1:end-5] * ".csv"))
end
else
target_files = filter(x -> occursin(target_versions_literature, x), readdir(joinpath(dataset_dir, dataset_class, d)))
if (i = findfirst(x -> occursin("catremoved", x), target_files)) !== nothing
target_file = target_files[i]
else
target_file = first(target_files)
end
@info "[$(d)] Resampling '$target_file'."
process_file(joinpath(dataset_dir, dataset_class, d, target_file), joinpath(outdir, target_file[1:end-5] * ".csv"), num_versions=3)
end
end
end
|
{"hexsha": "1b64911a5690ae52360b93015fd9730fe8196155", "size": 4693, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "scripts/preprocess_data.jl", "max_stars_repo_name": "kit-dbis/ocal-evaluation", "max_stars_repo_head_hexsha": "b6dc7c0896a65c56650dd428b43acf398ef198aa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-08-13T14:42:21.000Z", "max_stars_repo_stars_event_max_datetime": "2018-10-24T14:07:58.000Z", "max_issues_repo_path": "scripts/preprocess_data.jl", "max_issues_repo_name": "kit-dbis/ocal-evaluation", "max_issues_repo_head_hexsha": "b6dc7c0896a65c56650dd428b43acf398ef198aa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-08T22:20:04.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-08T22:20:04.000Z", "max_forks_repo_path": "scripts/preprocess_data.jl", "max_forks_repo_name": "kit-dbis/ocal-evaluation", "max_forks_repo_head_hexsha": "b6dc7c0896a65c56650dd428b43acf398ef198aa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-14T18:18:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-14T18:18:30.000Z", "avg_line_length": 49.9255319149, "max_line_length": 149, "alphanum_fraction": 0.6633283614, "num_tokens": 1209}
|
import numpy as np
import base64
from stega.injector import Injector
from kombu import Connection, Exchange, Queue
from kombu.mixins import ConsumerMixin
rabbit_url = 'amqp://guest:guest@localhost:5672//'
class Worker(ConsumerMixin):
def __init__(self, connection, queues):
self.connection = connection
self.queues = queues
def get_consumers(self, Consumer, channel):
return [Consumer(queues=self.queues,
callbacks=[self.on_message])]
def on_message(self, body, message):
body = body["frame"].encode('ascii')
body = base64.b64decode(body)
np_array = np.frombuffer(body, dtype=np.uint8)
np_array = np_array.reshape((720, 1280, 3))
decoded_message = Injector.pull_out_message_from_image(np_array)
if decoded_message != "No message":
print('DECODED MESSAGE: ', decoded_message)
message.ack()
def run():
exchange = Exchange("video-exchange", type="direct")
queues = [Queue("frames", exchange, routing_key="video")]
with Connection(rabbit_url, heartbeat=4) as conn:
worker = Worker(conn, queues)
worker.run()
if __name__ == "__main__":
run()
|
{"hexsha": "a6170a5d4effdd518e5c28205200be178138b434", "size": 1223, "ext": "py", "lang": "Python", "max_stars_repo_path": "viewer.py", "max_stars_repo_name": "vnrdd/stega-live-video", "max_stars_repo_head_hexsha": "f1d7d93248ea7c7c4b484543e8b8494fcc252885", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "viewer.py", "max_issues_repo_name": "vnrdd/stega-live-video", "max_issues_repo_head_hexsha": "f1d7d93248ea7c7c4b484543e8b8494fcc252885", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "viewer.py", "max_forks_repo_name": "vnrdd/stega-live-video", "max_forks_repo_head_hexsha": "f1d7d93248ea7c7c4b484543e8b8494fcc252885", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.575, "max_line_length": 72, "alphanum_fraction": 0.6524938675, "include": true, "reason": "import numpy", "num_tokens": 282}
|
import matplotlib
matplotlib.use('Agg')
from hcipy import *
import numpy as np
import matplotlib.pyplot as plt
import os
import pytest
def test_gif_writer():
grid = make_pupil_grid(256)
mw = GifWriter('test.gif')
for i in range(25):
field = Field(np.random.randn(grid.size), grid)
plt.clf()
imshow_field(field)
mw.add_frame()
mw.close()
assert os.path.isfile('test.gif')
assert not os.path.exists('test.gif.frames')
pytest.raises(RuntimeError, mw.add_frame)
os.remove('test.gif')
def test_imshow_field():
grid = make_pupil_grid(256)
field = Field(np.random.randn(grid.size), grid)
imshow_field(field)
plt.clf()
mask = circular_aperture(1)(grid)
imshow_field(field, mask=mask)
plt.clf()
field = Field(np.random.randn(grid.size) + 1j * np.random.randn(grid.size), grid)
imshow_field(field)
plt.clf()
def test_imsave_field():
grid = make_pupil_grid(256)
field = Field(np.random.randn(grid.size), grid)
imsave_field('field.png', field)
assert os.path.isfile('field.png')
os.remove('field.png')
def test_contour_field():
grid = make_pupil_grid(256)
field = Field(np.random.randn(grid.size), grid)
contour_field(field)
plt.clf()
contourf_field(field)
plt.clf()
def test_imshow_util():
pupil_grid = make_pupil_grid(128)
focal_grid = make_focal_grid(4, 16)
aperture = make_magellan_aperture(True)(pupil_grid)
prop = FraunhoferPropagator(pupil_grid, focal_grid)
wf = Wavefront(aperture)
wf.electric_field *= np.exp(0.1j * zernike(6, 2, radial_cutoff=False)(pupil_grid))
imshow_pupil_phase(wf, remove_piston=True, crosshairs=True, title='phase')
plt.clf()
img = prop(wf)
imshow_psf(img, colorbar_orientation='vertical', normalization='peak', crosshairs=True, title='psf')
plt.clf()
imshow_psf(img, scale='linear', colorbar_orientation='vertical', normalization='peak', crosshairs=True, title='psf')
plt.clf()
|
{"hexsha": "0d7190385dda85c0b6fa833c480d39be2f95cefe", "size": 1881, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_plotting.py", "max_stars_repo_name": "yinzi-xin/hcipy", "max_stars_repo_head_hexsha": "e9abb037ed0d6fe06581c1ce94e5c154fa5069a7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_plotting.py", "max_issues_repo_name": "yinzi-xin/hcipy", "max_issues_repo_head_hexsha": "e9abb037ed0d6fe06581c1ce94e5c154fa5069a7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_plotting.py", "max_forks_repo_name": "yinzi-xin/hcipy", "max_forks_repo_head_hexsha": "e9abb037ed0d6fe06581c1ce94e5c154fa5069a7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.6703296703, "max_line_length": 117, "alphanum_fraction": 0.7315257842, "include": true, "reason": "import numpy", "num_tokens": 503}
|
from __future__ import print_function, division, absolute_import
import os
os.environ['ODIN'] = 'float32,gpu'
import pickle
from collections import OrderedDict, defaultdict
import numpy as np
from scipy.io import savemat
from scipy import stats
import tensorflow as tf
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from odin.ml import PLDA, Scorer
from odin import preprocessing as pp
from odin import fuel as F, nnet as N, backend as K
from odin.utils import (get_module_from_path, get_script_path, ctext,
Progbar, stdio, get_logpath, get_formatted_datetime)
from odin.stats import describe
from helpers import (SCORING_DATASETS, BACKEND_DATASETS,
SCORE_SYSTEM_NAME, SCORE_SYSTEM_ID,
N_PLDA, N_LDA, PLDA_MAXIMUM_LIKELIHOOD, PLDA_SHOW_LLK,
PATH_ACOUSTIC_FEATURES, FEATURE_RECIPE, FEATURE_NAME,
get_model_path, NCPU, get_logpath, prepare_dnn_feeder_recipe,
sre_file_list, Config,
EXP_DIR, VECTORS_DIR, RESULT_DIR,
filter_utterances)
# ====== scoring log ====== #
stdio(get_logpath(name='make_score.log', increasing=True,
odin_base=False, root=EXP_DIR))
print('=' * 48)
print(get_formatted_datetime(only_number=False))
print("System name :", SCORE_SYSTEM_NAME)
print("System id :", SCORE_SYSTEM_ID)
print("Feature recipe :", FEATURE_RECIPE)
print("Feature name :", FEATURE_NAME)
print("Backend dataset:", ','.join(BACKEND_DATASETS.keys()))
print("Scoring dataset:", ','.join(SCORING_DATASETS.keys()))
print('=' * 48)
# ===========================================================================
# Some helper
# ===========================================================================
def _check_running_feature_extraction(feat_dir, n_files):
# True mean need to run the feature extraction
if not os.path.exists(feat_dir):
return True
indices_path = os.path.join(feat_dir, 'indices_%s' % FEATURE_NAME)
if not os.path.exists(indices_path):
return True
try:
indices = F.MmapDict(path=indices_path, read_only=True)
n_indices = len(indices)
indices.close()
except Exception as e:
import traceback
traceback.print_exc()
print("Loading indices error: '%s'" % str(e), "at:", indices_path)
return True
if n_indices != n_files:
return True
return False
# ===========================================================================
# Searching for trained system
# ===========================================================================
sys_dir, _, _ = get_model_path(system_name=SCORE_SYSTEM_NAME,
logging=False)
sys_name = os.path.basename(sys_dir)
all_sys = []
for path in os.listdir(sys_dir):
path = os.path.join(sys_dir, path)
if 'model.ai.' in path:
all_sys.append(path)
# ====== get the right model based on given system index ====== #
if len(all_sys) == 0:
final_sys = os.path.join(sys_dir, 'model.ai')
sys_index = ''
assert os.path.exists(final_sys), \
"Cannot find pre-trained model at path: %s" % sys_dir
else:
all_sys = sorted(all_sys,
key=lambda x: int(x.split('.')[-1]))
final_sys = all_sys[SCORE_SYSTEM_ID]
sys_index = '.' + final_sys.split('.')[-1]
# ====== print the log ====== #
print("Searching pre-trained model:")
print(" Found pre-trained at:", ctext(final_sys, 'cyan'))
print(" System name :", ctext(sys_name, 'cyan'))
print(" System index :", ctext(sys_index, 'cyan'))
# just check one more time
assert os.path.exists(final_sys), \
"Cannot find pre-trained model at: '%s'" % final_sys
# ====== generate path ====== #
def get_vectors_outpath(dsname):
return os.path.join(VECTORS_DIR, '%s%s.%s' % (sys_name, sys_index, dsname))
# ===========================================================================
# Searching for extractor
# ===========================================================================
EXTRACTOR_NAME = FEATURE_RECIPE.split("_")[0]
extractor = get_module_from_path(identifier=EXTRACTOR_NAME,
path=get_script_path(),
prefix='feature_recipes')
assert len(extractor) > 0, \
"Cannot find extractor with name: %s" % EXTRACTOR_NAME
extractor = extractor[0]()
# ====== initializing ====== #
# mapping from
# scoring_data_name -> [features 2-D array,
# indices {name: (start, end)},
# spkid_or_meta {name: spkid_or_meta},
# path {name: path}]
acoustic_features = {}
training_ds = F.Dataset(path=os.path.join(PATH_ACOUSTIC_FEATURES, FEATURE_RECIPE),
read_only=True)
all_training_dataset = set(training_ds['dsname'].values())
print("All training dataset:", ctext(all_training_dataset, 'cyan'))
# ====== extract the feature if not exists ====== #
for dsname, file_list in sorted(list(SCORING_DATASETS.items()) + list(BACKEND_DATASETS.items()),
key=lambda x: x[0]):
# acoustic features already extracted in training dataset
if dsname in all_training_dataset:
assert FEATURE_NAME in training_ds, \
"Cannot find feature with name: %s, from: %s" % (FEATURE_NAME, training_ds.path)
X = training_ds[FEATURE_NAME]
indices = {name: (start, end)
for name, (start, end) in training_ds['indices_%s' % FEATURE_NAME].items()
if training_ds['dsname'][name] == dsname}
# we use everything for PLDA
indices = filter_utterances(X, indices, training_ds['spkid'],
remove_min_length=False,
remove_min_uttspk=True if 'voxceleb' in dsname else False,
n_speakers=800 if 'voxceleb' in dsname else None,
ncpu=4, title=dsname)
meta = {name: meta
for name, meta in training_ds['spkid'].items()
if name in indices}
path = {name: path
for name, path in training_ds['path'].items()
if name in indices}
acoustic_features[dsname] = [X, indices, meta, path]
continue
# extract acoustic feature from scratch
feat_dir = os.path.join(PATH_ACOUSTIC_FEATURES,
'%s_%s' % (dsname, EXTRACTOR_NAME))
log_path = get_logpath(name='%s_%s.log' % (dsname, EXTRACTOR_NAME),
increasing=True, odin_base=False, root=EXP_DIR)
# check if need running the feature extraction
if _check_running_feature_extraction(feat_dir, n_files=len(file_list)):
with np.warnings.catch_warnings():
np.warnings.filterwarnings('ignore')
processor = pp.FeatureProcessor(jobs=file_list,
path=feat_dir,
extractor=extractor,
ncpu=NCPU,
override=True,
identifier='name',
log_path=log_path,
stop_on_failure=False)
processor.run()
# store the extracted dataset
ds = F.Dataset(path=feat_dir, read_only=True)
assert FEATURE_NAME in ds, \
"Cannot find feature with name: %s, from: %s" % (FEATURE_NAME, ds.path)
acoustic_features[dsname] = [
ds[FEATURE_NAME],
dict(ds['indices_%s' % FEATURE_NAME].items()),
dict(ds['spkid'].items()),
dict(ds['path'].items()),
]
# ====== print log ====== #
print("Acoustic features:")
for dsname, (X, indices, y, path) in sorted(acoustic_features.items(),
key=lambda x: x[0]):
all_utt_length = dict([(name, end - start)
for name, (start, end) in indices.items()])
print(" %s" % ctext(dsname, 'yellow'))
print(" #Files :", ctext(len(indices), 'cyan'))
print(" #Noise : %s/%s" % (
ctext(len([i for i in indices if '/' in i]), 'lightcyan'),
ctext(len(indices), 'cyan')))
print(" Loaded features:", ctext(X.path, 'cyan'))
print(" Utt length :", describe(list(all_utt_length.values()), shorten=True))
print(" Min length(+8) :")
min_length = min(all_utt_length.values())
for name, length in all_utt_length.items():
if length <= min_length + 8:
print(' %s | %s' % (name.split('/')[0], path[name]))
# ===========================================================================
# All system must extract following information
# ===========================================================================
# mapping from
# dataset_name -> 'name': 1-D array [n_samples],
# # (path to original audio)
# 'path': 1-D array [n_samples],
# # Extracted latent vectors
# 'X': 2-D array [n_samples, n_latent_dim]}
# # speaker label or meta-data (e.g. 'test', 'enroll', 'unlabeled')
# 'y': 1-D array [n_samples],
all_vectors = {}
# ===========================================================================
# Extract the x-vector for enroll and trials
# ===========================================================================
if 'xvec' == SCORE_SYSTEM_NAME:
# ====== load the network ====== #
x_vec = N.deserialize(path=final_sys,
force_restore_vars=True)
# ====== get output tensors ====== #
y_logit = x_vec()
y_proba = tf.nn.softmax(y_logit)
X = K.ComputationGraph(y_proba).placeholders[0]
z = K.ComputationGraph(y_proba).get(roles=N.Dense, scope='LatentOutput',
beginning_scope=False)[0]
f_z = K.function(inputs=X, outputs=z, training=False)
print('Inputs:', ctext(X, 'cyan'))
print('Latent:', ctext(z, 'cyan'))
# ====== recipe for feeder ====== #
recipe = prepare_dnn_feeder_recipe()
# ==================== extract x-vector from acoustic features ==================== #
for dsname, (ds_feat, ds_indices, ds_meta, ds_path) in sorted(
acoustic_features.items(), key=lambda x: x[0]):
n_files = len(ds_indices)
# ====== check exist scores ====== #
vector_outpath = get_vectors_outpath(dsname)
if os.path.exists(vector_outpath):
with open(vector_outpath, 'rb') as f:
vectors = pickle.load(f)
if (len(vectors['name']) == len(vectors['y']) ==
len(vectors['path']) == len(vectors['X']) <= n_files):
all_vectors[dsname] = vectors
print(' - Loaded vectors at:', ctext(vector_outpath, 'yellow'))
if len(vectors['name']) != n_files:
print(' [WARNING] Extracted scores only for: %s/%s (files)' %
(ctext(len(vectors['name']), 'lightcyan'),
ctext(n_files, 'cyan')))
continue # skip the calculation
# ====== create feeder ====== #
feeder = F.Feeder(
data_desc=F.IndexedData(data=ds_feat, indices=ds_indices),
batch_mode='file', ncpu=8)
feeder.set_recipes(recipe)
# ====== init ====== #
output_name = []
output_meta = []
output_path = []
output_data = []
# progress bar
prog = Progbar(target=len(feeder), print_summary=True,
name='Extract vectors: %s' % dsname)
# ====== make prediction ====== #
for batch_idx, (name, idx, X) in enumerate(feeder.set_batch(
batch_size=100000, seed=None, shuffle_level=0)):
assert idx == 0, "File '%s' longer than maximum batch size" % name
z = f_z(X)
if z.shape[0] > 1:
z = np.mean(z, axis=0, keepdims=True)
output_name.append(name)
output_meta.append(ds_meta[name])
output_path.append(ds_path[name])
output_data.append(z)
# update the progress
prog['ds'] = dsname
prog['name'] = name[:48]
prog['latent'] = z.shape
prog['outpath'] = vector_outpath
prog.add(X.shape[0])
# ====== post-processing ====== #
output_name = np.array(output_name)
output_meta = np.array(output_meta)
output_path = np.array(output_path)
output_data = np.concatenate(output_data, axis=0)
# ====== save the score ====== #
with open(vector_outpath, 'wb') as f:
scores = {'name': output_name,
'path': output_path,
'X': output_data.astype('float32'),
'y': output_meta}
pickle.dump(scores, f)
all_vectors[dsname] = scores
# ===========================================================================
# Extract the i-vector
# ===========================================================================
elif 'ivec' == SCORE_SYSTEM_NAME:
raise NotImplementedError
# ===========================================================================
# Extract the end-to-end system
# ===========================================================================
elif 'e2e' == SCORE_SYSTEM_NAME:
raise NotImplementedError
# ===========================================================================
# Unknown system
# ===========================================================================
else:
raise RuntimeError("No support for system: %s" % SCORE_SYSTEM_NAME)
# ===========================================================================
# Prepare data for training the backend
# ===========================================================================
all_backend_data = {name: all_vectors[name]
for name in BACKEND_DATASETS.keys()}
X_backend = []
y_backend = []
n_speakers = 0
for dsname, vectors in all_backend_data.items():
X, y = vectors['X'], vectors['y']
# add the data
X_backend.append(X)
# add the labels
y_backend += y.tolist()
# create label list
n_speakers += len(np.unique(y))
# create mapping of spk to integer label
all_speakers = sorted(set(y_backend))
spk2label = {j: i
for i, j in enumerate(all_speakers)}
# make sure no overlap speaker among dataset
assert len(all_speakers) == n_speakers
# create the training data
X_backend = np.concatenate(X_backend, axis=0)
y_backend = np.array([spk2label[i] for i in y_backend])
print("Training data for backend:")
print(" #Speakers:", ctext(n_speakers, 'cyan'))
print(" X :", ctext(X_backend.shape, 'cyan'))
print(" y :", ctext(y_backend.shape, 'cyan'))
# ====== fast checking the array ====== #
print("Check backend data statistics:")
print(" Mean :", ctext(np.mean(X_backend), 'cyan'))
print(" Std :", ctext(np.std(X_backend), 'cyan'))
print(" Max :", ctext(np.max(X_backend), 'cyan'))
print(" Min :", ctext(np.min(X_backend), 'cyan'))
print(" NaN :", ctext(np.any(np.isnan(X_backend)), 'cyan'))
n = int(np.prod(X_backend.shape))
n_non_zeros = np.count_nonzero(X_backend)
print(" #Zeros: %s/%s or %.1f%%" %
(ctext(n - n_non_zeros, 'lightcyan'),
ctext(n, 'cyan'),
(n - n_non_zeros) / n * 100))
# ******************** optional save data to matlab for testing ******************** #
with open('/tmp/backend.mat', 'wb') as ftmp:
savemat(ftmp, {'X': np.array(X_backend.astype('float32'), order='F'),
'y': np.array(y_backend.astype('int32'), order='F')})
for dsname in SCORING_DATASETS.keys():
vectors = all_vectors[dsname]
with open(os.path.join('/tmp', '%s.mat' % dsname), 'wb') as ftmp:
y = []
for i in range(len(vectors['X'])):
name = vectors['name'][i]
path = vectors['path'][i]
if path is not None:
name += os.path.splitext(path)[-1]
y.append(name)
savemat(ftmp, {'X': np.array(vectors['X'].astype('float32'), order='F'),
'y': np.array(y)})
# ===========================================================================
# Training the PLDA
# ===========================================================================
# ====== training the LDA ====== #
if N_LDA > 0:
print(" Fitting LDA ...")
lda = LinearDiscriminantAnalysis(n_components=N_LDA)
X_backend = lda.fit_transform(X=X_backend, y=y_backend)
lda_transform = lda.transform
else:
lda_transform = lambda x: x
# ====== training the PLDA ====== #
plda = PLDA(n_phi=N_PLDA,
centering=True, wccn=True, unit_length=True,
n_iter=20, random_state=Config.SUPER_SEED,
verbose=2 if PLDA_SHOW_LLK else 1)
if PLDA_MAXIMUM_LIKELIHOOD:
print(" Fitting PLDA maximum likelihood ...")
plda.fit_maximum_likelihood(X=lda_transform(X_backend), y=y_backend)
plda.fit(X=lda_transform(X_backend), y=y_backend)
# ===========================================================================
# Now scoring
# ===========================================================================
for dsname, scores in sorted(all_vectors.items(),
key=lambda x: x[0]):
# ====== skip non scoring dataset ====== #
if dsname not in SCORING_DATASETS:
continue
# ====== proceed ====== #
print("Scoring:", ctext(dsname, 'yellow'))
# load the scores
(seg_name, seg_meta,
seg_path, seg_data) = (scores['name'], scores['y'],
scores['path'], scores['X'])
name_2_data = {i: j
for i, j in zip(seg_name, seg_data)}
name_2_ext = {i: '' if j is None else os.path.splitext(j)[-1]
for i, j in zip(seg_name, seg_path)}
# get the enroll and trials list
enroll_name = '%s_enroll' % dsname
trials_name = '%s_trials' % dsname
if enroll_name in sre_file_list and trials_name in sre_file_list:
# ====== checking the trials ====== #
trials = np.array([(i, j)
for i, j in sre_file_list[trials_name][:, :2]
if j in name_2_data])
print(" Missing trials: %s/%s" %
(ctext(len(sre_file_list[trials_name]) - len(trials), 'lightcyan'),
ctext(len(sre_file_list[trials_name]), 'cyan')))
# ====== checking the enrollments ====== #
enroll = np.array([(i, j)
for i, j in sre_file_list[enroll_name][:, :2]
if j in name_2_data])
print(" Missing enroll: %s/%s" %
(ctext(len(sre_file_list[enroll_name]) - len(enroll), 'lightcyan'),
ctext(len(sre_file_list[enroll_name]), 'cyan')))
# ====== skip the scoring if necessary ====== #
if len(trials) == 0 or len(enroll) == 0:
print(" Skip scoring for:", ctext(dsname, 'yellow'))
continue
# ====== create the enrollments data ====== #
models = OrderedDict()
# for now we don't care about channel (or size) information
for model_id, segment_id in enroll[:, :2]:
if model_id not in models:
models[model_id] = []
models[model_id].append(name_2_data[segment_id])
# calculate the x-vector for each model
models = OrderedDict([
(model_id, np.mean(seg_list, axis=0, keepdims=True))
for model_id, seg_list in models.items()
])
model_2_index = {j: i for i, j in enumerate(models.keys())}
X_models = np.concatenate(list(models.values()), axis=0)
print(" Enroll:", ctext(X_models.shape, 'cyan'))
# ====== create the trials list ====== #
X_trials = np.concatenate([name_2_data[i][None, :] for i in trials[:, 1]],
axis=0)
print(" Trials:", ctext(X_trials.shape, 'cyan'))
# ====== extract scores ====== #
y_scores = plda.predict_log_proba(X=lda_transform(X_trials),
X_model=X_models)
print(" Scores:", ctext(y_scores.shape, 'cyan'))
# ====== write the scores to file ====== #
score_path = os.path.join(RESULT_DIR,
'%s%s.%s.csv' % (sys_name, sys_index, dsname))
with open(score_path, 'w') as fout:
fout.write('\t'.join(['modelid', 'segmentid', 'side', 'LLR']) + '\n')
for i, (model_id, seg_id) in enumerate(trials):
score = '%f' % y_scores[i][model_2_index[model_id]]
fout.write('\t'.join([model_id, seg_id + name_2_ext[seg_id], 'a', score]) + '\n')
print(" Saved trials:", ctext(score_path, 'cyan'))
else:
raise RuntimeError(
"Cannot find '%s_trials.csv' and '%s_enroll.csv' for dataset: %s" %
(dsname, dsname, dsname))
|
{"hexsha": "688852f8865f841f04ab6627d3fee492e76f3575", "size": 20044, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/nist_sre/make_score.py", "max_stars_repo_name": "tirkarthi/odin-ai", "max_stars_repo_head_hexsha": "7900bef82ad8801d0c73880330d5b24d9ff7cd06", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-12-29T19:35:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-31T21:01:30.000Z", "max_issues_repo_path": "examples/nist_sre/make_score.py", "max_issues_repo_name": "tirkarthi/odin-ai", "max_issues_repo_head_hexsha": "7900bef82ad8801d0c73880330d5b24d9ff7cd06", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-02-06T16:44:17.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-26T05:26:14.000Z", "max_forks_repo_path": "examples/nist_sre/make_score.py", "max_forks_repo_name": "tirkarthi/odin-ai", "max_forks_repo_head_hexsha": "7900bef82ad8801d0c73880330d5b24d9ff7cd06", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-02-14T01:36:28.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-30T13:16:32.000Z", "avg_line_length": 44.44345898, "max_line_length": 96, "alphanum_fraction": 0.553931351, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4770}
|
import numpy as np
import os
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, DecimalField, RadioField, SelectField, SelectMultipleField, IntegerField, FloatField
from wtforms.validators import InputRequired, Length, NumberRange, AnyOf, ValidationError
from wtforms.widgets import ListWidget, CheckboxInput
from exoctk.modelgrid import ModelGrid
from exoctk.utils import get_env_variables, FILTERS_LIST, PROFILES
from svo_filters import svo
class MultiCheckboxField(SelectMultipleField):
"""Makes a list of checkbox inputs"""
widget = ListWidget(prefix_label=False)
option_widget = CheckboxInput()
class BaseForm(FlaskForm):
"""A generic form with target resolve built in"""
# Target Resolve
targname = StringField('targname', default='')
target_url = StringField('target_url', default='')
# Submit button
resolve_submit = SubmitField('Resolve Target')
class FortneyModelForm(BaseForm):
"""Form validation for the forward model tools"""
# Parameters
planet_teff = SelectField('planet_teff', choices=[(500, '500'), (750, '750'), (1000, '1000'), (1250, '1250'), (1500, '1500'), (1750, '1750'), (2000, '2000'), (2250, '2250'), (2500, '2500')], validators=[InputRequired('An effective temperature is required')])
planet_mass = DecimalField('planet_mass', default=1.5, validators=[InputRequired('A planet mass is required!'), NumberRange(min=0.0, message='Planet mass must be positive')])
planet_mass_unit = SelectField('planet_mass_unit', choices=[('M_jup', 'Jupiter Mass'), ('kilogram', 'kilogram'), ('g', 'gram'), ('M_earth', 'Earth Mass'), ('M_sun', 'Solar Mass')], validators=[InputRequired('A mass unit is required')])
planet_radius = DecimalField('planet_radius', default=1.25, validators=[InputRequired('A planet radius is required!'), NumberRange(min=0, message='Planet radius must be positive')])
planet_radius_unit = SelectField('planet_radius_unit', choices=[('R_jup', 'Jupiter Radius'), ('kilometer', 'kilometer'), ('m', 'meter'), ('R_earth', 'Earth Radius'), ('R_sun', 'Solar Radius')], validators=[InputRequired('A planet radius unit is required')])
stellar_radius = DecimalField('stellar_radius', default=1.0, validators=[InputRequired('A stellar radius is required!'), NumberRange(min=0, message='Stellar radius must be positive')])
stellar_radius_unit = SelectField('stellar_radius_unit', choices=[('R_sun', 'Solar Radius'), ('R_jup', 'Jupiter Radius'), ('kilometer', 'kilometer'), ('m', 'meter'), ('R_earth', 'Earth Radius')], validators=[InputRequired('A stellar radius unit is required')])
chemistry = SelectField('chemistry', choices=[('noTiO', '500'), ('eqchem', '750'), (1000, '1000'), (1250, '1250'), (1500, '1500'), (1750, '1750'), (2000, '2000'), (2250, '2250'), (2500, '2500')], validators=[InputRequired('A chemistry type is required')])
clouds = SelectField('clouds', choices=[('0', 'Nothing'), ('ray10', 'Weak Rayleigh'), ('ray100', 'Medium Rayleigh'), ('ray1000', 'Strong Rayleigh'), ('flat10', 'Weak Cloud'), ('flat100', 'Medium Cloud'), ('flat1000', 'Strong Cloud')], validators=[InputRequired('A cloud model is required')])
# Form submits
calculate_submit = SubmitField('Calculate Forward Model')
class LimbDarkeningForm(BaseForm):
"""Form validation for the limb_darkening tool"""
# Model grid
modelgrid_dir = get_env_variables()['modelgrid_dir']
default_modelgrid = os.path.join(modelgrid_dir, 'ATLAS9/')
mg = ModelGrid(default_modelgrid, resolution=500)
teff_rng = mg.Teff_vals.min(), mg.Teff_vals.max()
logg_rng = mg.logg_vals.min(), mg.logg_vals.max()
feh_rng = mg.FeH_vals.min(), mg.FeH_vals.max()
modeldir = RadioField('modeldir', default=default_modelgrid, choices=[(os.path.join(modelgrid_dir, 'ATLAS9/'), 'Kurucz ATLAS9'), (os.path.join(modelgrid_dir, 'ACES/'), 'Phoenix ACES')], validators=[InputRequired('A model grid is required!')])
# Stellar parameters
teff = DecimalField('teff', default=3500, validators=[InputRequired('An effective temperature is required!'), NumberRange(min=float(teff_rng[0]), max=float(teff_rng[1]), message='Effective temperature must be between {} and {} for this model grid'.format(*teff_rng))])
logg = DecimalField('logg', default=4.5, validators=[InputRequired('A surface gravity is required!'), NumberRange(min=float(logg_rng[0]), max=float(logg_rng[1]), message='Surface gravity must be between {} and {} for this model grid'.format(*logg_rng))])
feh = DecimalField('feh', default=0.0, validators=[InputRequired('A surface gravity is required!'), NumberRange(min=float(feh_rng[0]), max=float(feh_rng[1]), message='Metallicity must be between {} and {} for this model grid'.format(*feh_rng))])
mu_min = DecimalField('mu_min', default=0.1, validators=[InputRequired('A minimum mu value is required!'), NumberRange(min=0.0, max=1.0, message='Minimum mu must be between 0 and 1')])
# LD profile
profiles = MultiCheckboxField('profiles', choices=[(x, x) for x in PROFILES], validators=[InputRequired('At least one profile is required!')])
# Bandpass
default_filter = 'Kepler.K'
defilt = svo.Filter(default_filter)
bandpass = SelectField('bandpass', default=default_filter, choices=[('tophat', 'Top Hat')] + [(filt, filt) for filt in FILTERS_LIST], validators=[InputRequired('A filter is required!')])
wave_min = DecimalField('wave_min', default=defilt.wave_min.value, validators=[NumberRange(min=0, max=30, message='Minimum wavelength must be between 0 and 30 microns!')])
wave_max = DecimalField('wave_max', default=defilt.wave_max.value, validators=[NumberRange(min=0, max=30, message='Maximum wavelength must be between 0 and 30 microns!')])
n_bins = IntegerField('n_bins', default=1)
# Form submits
calculate_submit = SubmitField('Calculate Coefficients')
filter_submit = SubmitField('Filter Selected')
modelgrid_submit = SubmitField('Model Grid Selected')
class GroupsIntsForm(BaseForm):
"""Form validation for the groups_integrations tool"""
# Form submits
calculate_submit = SubmitField('Calculate Groups and Integrations')
# Stellar Parameters
kmag = DecimalField('kmag', default=10.5, validators=[InputRequired('A K-band magnitude is required!'), NumberRange(min=5.1, max=11.9, message='K-band mag must be between 5-12, non-inclusive.')])
obs_duration = DecimalField('obs_duration', default=3, validators=[InputRequired('An observation duration is required!'), NumberRange(min=0, message='Observation duration must be a positive number')])
time_unit = SelectField('time_unit', default='hour', choices=[('hour', 'hours'), ('day', 'days')])
models = [('a0i', 'A0I 9750 2.0'), ('aov', 'A0V 9500 2.0'), ('a1v', 'A1V 9250 4.0'), ('a5i', 'A5I 8500 2.0'), ('a3v', 'A3V 8250 4.0'), ('a5v', 'A5V 8250 4.0'), ('f0i', 'F0I 7750 2.0'), ('f0v', 'F0V 7250 1.5'), ('f5i', 'F5I 7000 4.0'), ('f2v', 'F2V 7000 4.0'), ('f5v', 'F5V 6500 4.0'), ('f8v', 'F8V 6250 4.5'), ('g0v', 'G0V 6000 4.5'), ('g0iii', 'G0III 5750 3.0'), ('g2v', 'G2V 5750 4.5'), ('g5v', 'G5V 5750 4.5'), ('g0i', 'G0I 5500 1.5'), ('g8v', 'G8V 5500 4.5'), ('g5iii', 'G5III 5250 2.5'), ('g5i', 'G5I 4740 1.0'), ('k0v', 'K0V 5250 4.5'), ('k0iii', 'K0III 4750 2.0'), ('k2v', 'K2V 4750 4.5'), ('k0i', 'K0I 4500 1.0'), ('k5v', 'K5V 4250 1.5'), ('k5iii', 'K5III 4000 1.5'), ('k7v', 'K7V 4000 4.5'), ('k5i', 'K5I 3750 0.5'), ('m0i', 'M0I 3750 0.0'), ('m0iii', 'M0III 3750 1.5'), ('m0v', 'M0V 3750 4.5'), ('m2i', 'M2I 3500 0.0'), ('m2v', 'M2V 3500 4.5'), ('m5v', 'M5V 3500 5.0')]
mod = SelectField('mod', choices=models)
n_group = IntegerField('n_group', default=0)
ins = SelectField('ins', default='miri', choices=[('niriss', 'NIRISS'), ('nircam', 'NIRCam'), ('nirspec', 'NIRSpec'), ('miri', 'MIRI')])
# Filter selects
miri_filt = SelectField('miri_filt', choices=[('lrs', 'LRS')])
nirspec_filt = SelectField('nirspec_filt', choices=[('f070lp_g140h', 'F070LP/G140H'), ('f100lp_g140h', 'F100LP/G140H'), ('f070lp_g140m', 'F070LP/G140M'), ('f100lp_g140m', 'F100LP/G140M'), ('f170lp_g235h', 'F170LP/G235H'), ('f170lp_g235m', 'F170LP/G235M'), ('f290lp_g395h', 'F290LP/G395H'), ('f290lp_g395m', 'F290LP/G395M')])
niriss_filt = SelectField('niriss_filt', choices=[('soss', 'SOSS')])
nircam_filt = SelectField('nircam_filt', choices=[('f322w2', 'F322W2'), ('f444w', 'F444W'), ('f277w', 'F277W')])
# TA filter selects
miri_filt_ta = SelectField('miri_filt_ta', choices=[('f560w', 'F560W'), ('f100w', 'F100W'), ('f1500w', 'F1500W')])
nirspec_filt_ta = SelectField('nirspec_filt_ta', choices=[('f110w', 'F110W'), ('f140x', 'F140X'), ('clear', 'CLEAR')])
niriss_filt_ta = SelectField('niriss_filt_ta', choices=[('f480m', 'F480M')])
nircam_filt_ta = SelectField('nircam_filt_ta', choices=[('f335m', 'F335M')])
# Subarray selects
miri_subarray = SelectField('miri_subarray', choices=[('slitlessprism', 'SLITLESSPRISM')])
nirspec_subarray = SelectField('nirspec_subarray', choices=[('sub2048', 'SUB2048'), ('sub1024a', 'SUB1024A'), ('sub1024b', 'SUB1024B'), ('sub512', 'SUB512')])
niriss_subarray = SelectField('niriss_subarray', choices=[('substrip256', 'SUBSTRIP256'), ('substrip96', 'SUBSTRIP96')])
nircam_subarray = SelectField('nircam_subarray', choices=[('full', 'FULL FRAME'), ('subgrism256', 'SUBGRISM256'), ('subgrism128', 'SUBGRISM128'), ('subgrism64', 'SUBGRISM64')])
# TA subarray selects
miri_subarray_ta = SelectField('miri_subarray_ta', choices=[('slitlessprism', 'SLITLESSPRISM')])
nirspec_subarray_ta = SelectField('nirspec_subarray_ta', choices=[('full', 'FULL'), ('sub32', 'SUB32'), ('sub2048', 'SUB2048')])
niriss_subarray_ta = SelectField('niriss_subarray_ta', choices=[('nrm', 'SUBTASOSS -- BRIGHT'), ('im', 'SUBTASOSS -- FAINT')])
nircam_subarray_ta = SelectField('nircam_subarray_ta', choices=[('sub32tats', 'SUB32TATS')])
# Saturation
sat_mode = RadioField('sat_mode', default='well', choices=[('counts', 'Counts'), ('well', 'Full well fraction')])
sat_max = DecimalField('sat_max', default=0.95, validators=[InputRequired('A saturation level is required!'), NumberRange(min=0.0, message='Saturation level must be positive.')])
class ContamVisForm(BaseForm):
"""Form validation for the contamination_visibility tool"""
# Form submits
calculate_submit = SubmitField('Calculate Visibility')
calculate_contam_submit = SubmitField('Calculate Visibility and Contamination')
mode_submit = SubmitField('Mode Selected')
# Form inputs
ra = DecimalField('ra', validators=[NumberRange(min=0, max=360, message='RA must be between 0 and 360 degrees')])
dec = DecimalField('dec', validators=[NumberRange(min=-90, max=90, message='Declinaton must be between -90 and 90 degrees')])
inst = SelectField('inst', choices=[('NIRISS', 'NIRISS - SOSS'), ('NIRCam F322W2', 'NIRCam - Grism Time Series (F322W2)'), ('NIRCam F444W', 'NIRCam - Grism Time Series (F444W)'), ('MIRI', 'MIRI - LRS'), ('NIRSpec', 'NIRSpec (Visibility Only)')])
companion = StringField('companion', default='')
pa_min = DecimalField('pa_min', default=0, validators=[NumberRange(min=0, max=360, message='Minimum PA must be between 0 and 360 degrees')])
pa_max = DecimalField('pa_max', default=360, validators=[NumberRange(min=0, max=360, message='Maximum PA must be between 0 and 360 degrees')])
class PhaseConstraint(BaseForm):
"""Form validation for the phase-constraint tool"""
calculate_submit = SubmitField('Calculate Phase Constraint')
orbital_period = FloatField('orbital_period', validators=[InputRequired('Orbital period is a required field')])
eccentricity = FloatField('eccentricity', default=np.nan)
transit_type = SelectField('transit_type', choices=[('primary', 'primary'), ('secondary', 'secondary')])
omega = FloatField('omega', default=np.nan)
inclination = FloatField('inclination', default=np.nan)
transit_time = FloatField('transit_time', default=np.nan)
window_size = FloatField('window_size', default=1.0)
observation_duration = FloatField('observation_duration', default=2.0, validators=[InputRequired('Observation duration is a required field.')])
minimum_phase = DecimalField('minimum_phase', default=0.0)
maximum_phase = DecimalField('maximum_phase', default=0.0)
minimum_phase_sec = DecimalField('minimum_phase_sec', default=0.0)
maximum_phase_sec = DecimalField('maximum_phase_sec', default=0.0)
|
{"hexsha": "0b3f586857c48a4131856b9fee2acc4c5251d8e9", "size": 12499, "ext": "py", "lang": "Python", "max_stars_repo_path": "exoctk/exoctk_app/form_validation.py", "max_stars_repo_name": "bourque/exoctk", "max_stars_repo_head_hexsha": "1d2f8e7b9c00e74033626d81593b1f879b7df6ad", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "exoctk/exoctk_app/form_validation.py", "max_issues_repo_name": "bourque/exoctk", "max_issues_repo_head_hexsha": "1d2f8e7b9c00e74033626d81593b1f879b7df6ad", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "exoctk/exoctk_app/form_validation.py", "max_forks_repo_name": "bourque/exoctk", "max_forks_repo_head_hexsha": "1d2f8e7b9c00e74033626d81593b1f879b7df6ad", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 79.6114649682, "max_line_length": 883, "alphanum_fraction": 0.6938955116, "include": true, "reason": "import numpy", "num_tokens": 3790}
|
"""Mixture model for matrix completion"""
from typing import Tuple
import numpy as np
from scipy.special import logsumexp
from common import GaussianMixture
def estep(X: np.ndarray, mixture: GaussianMixture) -> Tuple[np.ndarray, float]:
"""E-step: Softly assigns each datapoint to a gaussian component
Args:
X: (n, d) array holding the data, with incomplete entries (set to 0)
mixture: the current gaussian mixture
Returns:
np.ndarray: (n, K) array holding the soft counts
for all components for all examples
float: log-likelihood of the assignment
"""
n, d = X.shape
mu, var, pi = mixture # Unpack mixture tuple
K = mu.shape[0]
######## Loop version to calculate norms: 2nd fastest ########
# f(u,j) matrix that's used to store the normal matrix and log of posterior probs: (p(j|u))
# f = np.zeros((n,K), dtype=np.float64)
#
# # Compute the normal matrix: Single loop implementation
# for i in range(n):
# # For each user pick only columns that have ratings
# Cu_indices = X[i,:] != 0
# # Dimension of Cu (no. of non-zero entries)
# dim = np.sum(Cu_indices)
# # log of pre-exponent for this user's gaussian dist.
# pre_exp = (-dim/2.0)*np.log((2*np.pi*var))
# # Calculate the exponent term of the gaussian
# diff = X[i, Cu_indices] - mu[:, Cu_indices] # This will be (K,|Cu|)
# norm = np.sum(diff**2, axis=1) # This will be (K,)
#
# # Now onto the final log normal matrix: log(N(...))
# # We will need log(normal), exp will cancel, so no need to calculate it
# f[i,:] = pre_exp - norm/(2*var) # This is the ith users log gaussian dist vector: (K,)
######## End: loop version ########
######## Vectorized version to calculate norms ########
# Create a delta matrix to indicate where X is non-zero, which will help us pick Cu indices
delta = X.astype(bool).astype(int)
# Exponent term: norm matrix/(2*variance)
# f = np.sum(((X[:, None, :] - mu)*delta[:, None, :])**2, axis=2)/(2*var) # This is using 3D broadcasting: slowest of all
f = (np.sum(X**2, axis=1)[:,None] + (delta @ mu.T**2) - 2*(X @ mu.T))/(2*var) # This is using indicator matrix: fastest of all
# Pre-exponent term: A matrix of shape (n, K)
pre_exp = (-np.sum(delta, axis=1).reshape(-1,1)/2.0) @ (np.log((2*np.pi*var)).reshape(-1,1)).T
# Put them together
f = pre_exp - f
######## End: vectorized version ########
f = f + np.log(pi + 1e-16) # This is the f(u,j) matrix
# log of normalizing term in p(j|u)
logsums = logsumexp(f, axis=1).reshape(-1,1) # Store this to calculate log_lh
log_posts = f - logsums # This is the log of posterior prob. matrix: log(p(j|u))
log_lh = np.sum(logsums, axis=0).item() # This is the log likelihood
return np.exp(log_posts), log_lh
def mstep(X: np.ndarray, post: np.ndarray, mixture: GaussianMixture,
min_variance: float = .25) -> GaussianMixture:
"""M-step: Updates the gaussian mixture by maximizing the log-likelihood
of the weighted dataset
Args:
X: (n, d) array holding the data, with incomplete entries (set to 0)
post: (n, K) array holding the soft counts
for all components for all examples
mixture: the current gaussian mixture
min_variance: the minimum variance for each gaussian
Returns:
GaussianMixture: the new gaussian mixture
"""
n, d = X.shape
mu_rev, _, _ = mixture
K = mu_rev.shape[0]
# Calculate revised pi(j): same expression as in the naive case
pi_rev = np.sum(post, axis=0)/n
# Create delta matrix indicating where X is non-zero
delta = X.astype(bool).astype(int)
# Update means only when sum_u(p(j|u)*delta(l,Cu)) >= 1
denom = post.T @ delta # Denominator (K,d): Only include dims that have information
numer = post.T @ X # Numerator (K,d)
update_indices = np.where(denom >= 1) # Indices for update
mu_rev[update_indices] = numer[update_indices]/denom[update_indices] # Only update where necessary (denom>=1)
# Update variances
denom_var = np.sum(post*np.sum(delta, axis=1).reshape(-1,1), axis=0) # Shape: (K,)
######## Loop version for norms calc. ##########
# Norm matrix for variance calc
# norms = np.zeros((n, K), dtype=np.float64)
#
# for i in range(n):
# # For each user pick only columns that have ratings
# Cu_indices = X[i,:] != 0
# diff = X[i, Cu_indices] - mu_rev[:, Cu_indices] # This will be (K,|Cu|)
# norms[i,:] = np.sum(diff**2, axis=1) # This will be (K,)
######## End: loop version #########
######## Vectorized version for norms calc. ########
# norms = np.sum(((X[:, None, :] - mu_rev)*delta[:, None, :])**2, axis=2)
norms = np.sum(X**2, axis=1)[:,None] + (delta @ mu_rev.T**2) - 2*(X @ mu_rev.T)
######## End: vectorized version #########
# Revised var: if var(j) < 0.25, set it = 0.25
var_rev = np.maximum(np.sum(post*norms, axis=0)/denom_var, min_variance)
return GaussianMixture(mu_rev, var_rev, pi_rev)
def run(X: np.ndarray, mixture: GaussianMixture,
post: np.ndarray) -> Tuple[GaussianMixture, np.ndarray, float]:
"""Runs the mixture model
Args:
X: (n, d) array holding the data
post: (n, K) array holding the soft counts
for all components for all examples
Returns:
GaussianMixture: the new gaussian mixture
np.ndarray: (n, K) array holding the soft counts
for all components for all examples
float: log-likelihood of the current assignment
"""
old_log_lh = None
new_log_lh = None # Keep track of log likelihood to check convergence
# Start the main loop
while old_log_lh is None or (new_log_lh - old_log_lh > 1e-6*np.abs(new_log_lh)):
old_log_lh = new_log_lh
# E-step
post, new_log_lh = estep(X, mixture)
# M-step
mixture = mstep(X, post, mixture)
return mixture, post, new_log_lh
def fill_matrix(X: np.ndarray, mixture: GaussianMixture) -> np.ndarray:
"""Fills an incomplete matrix according to a mixture model
Args:
X: (n, d) array of incomplete data (incomplete entries =0)
mixture: a mixture of gaussians
Returns
np.ndarray: a (n, d) array with completed data
"""
X_pred = X.copy()
mu, _, _ = mixture
post, _ = estep(X, mixture)
# Missing entries to be filled
miss_indices = np.where(X == 0)
X_pred[miss_indices] = (post @ mu)[miss_indices]
return X_pred
|
{"hexsha": "566e3dc2f25c28482aac8aa185b1456e9ef1d524", "size": 6777, "ext": "py", "lang": "Python", "max_stars_repo_path": "project4/netflix/em.py", "max_stars_repo_name": "davysnou/MIT-6.86x-Davy", "max_stars_repo_head_hexsha": "a0ef35692477512c7ac1ced8a0584c413781a401", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "project4/netflix/em.py", "max_issues_repo_name": "davysnou/MIT-6.86x-Davy", "max_issues_repo_head_hexsha": "a0ef35692477512c7ac1ced8a0584c413781a401", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "project4/netflix/em.py", "max_forks_repo_name": "davysnou/MIT-6.86x-Davy", "max_forks_repo_head_hexsha": "a0ef35692477512c7ac1ced8a0584c413781a401", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8315217391, "max_line_length": 130, "alphanum_fraction": 0.6036594363, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1874}
|
"""
Plotly-to-Matplotlib conversion functions.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import gc as _gc
import numpy as _np
from pygsti.report.plothelpers import _eformat
from pygsti.circuits.circuit import Circuit as _Circuit
try:
import matplotlib as _matplotlib
import matplotlib.pyplot as _plt
except ImportError:
raise ValueError(("While not a core requirement of pyGSTi, Matplotlib is "
"required to generate PDF plots. It looks like you "
"don't have it installed on your system (it failed to "
"import)."))
class MplLinLogNorm(_matplotlib.colors.Normalize):
"""
Matplotlib version of lin-log colormap normalization
Parameters
----------
linlog_colormap : LinlogColormap
pyGSTi linear-logarithmic color map to base this colormap off of.
clip : bool, optional
Whether clipping should be performed. See :class:`matplotlib.colors.Normalize`.
"""
def __init__(self, linlog_colormap, clip=False):
cm = linlog_colormap
super(MplLinLogNorm, self).__init__(vmin=cm.vmin, vmax=cm.vmax, clip=clip)
self.trans = cm.trans
self.cm = cm
def inverse(self, value):
"""
Inverse of __call__ as per matplotlib spec.
Parameters
----------
value : float or numpy.ndarray
Color-value to invert back.
Returns
-------
float or numpy.ndarray
"""
norm_trans = super(MplLinLogNorm, self).__call__(self.trans)
deltav = self.vmax - self.vmin
return_value = _np.where(_np.greater(0.5, value),
2 * value * (self.trans - self.vmin) + self.vmin,
deltav * _np.power(norm_trans, 2 * (1 - value)))
if return_value.shape == ():
return return_value.item()
else:
return return_value.view(_np.ma.MaskedArray)
def __call__(self, value, clip=None):
return self.cm.normalize(value)
def mpl_make_linear_norm(vmin, vmax, clip=False):
"""
Create a linear matplotlib normalization
Parameters
----------
vmin : float
Minimum mapped color value.
vmax : float
Maximum mapped color value.
clip : bool, optional
Whether clipping should be performed. See :class:`matplotlib.colors.Normalize`.
Returns
-------
matplotlib.colors.Normalize
"""
return _matplotlib.colors.Normalize(vmin=vmin, vmax=vmax, clip=clip)
def mpl_make_linear_cmap(rgb_colors, name=None):
"""
Make a color map that simply linearly interpolates between a set of colors in RGB space.
Parameters
----------
rgb_colors : list
Each element is a `(value, (r, g, b))` tuple specifying a value and an
RGB color. Both `value` and `r`, `g`, and `b` should be floating point
numbers between 0 and 1.
name : string, optional
A name for the colormap. If not provided, a name will be constructed
from an random integer.
Returns
-------
cmap
"""
if name is None:
name = "pygsti-cmap-" + str(_np.random.randint(0, 100000000))
cdict = {'red': [], 'green': [], 'blue': [], 'alpha': []}
for val, rgb_tup in rgb_colors:
for k, v in zip(('red', 'green', 'blue'), rgb_tup):
cdict[k].append((val, v, v))
cdict['alpha'].append((val, 1.0, 1.0)) # alpha channel always 1.0
return _matplotlib.colors.LinearSegmentedColormap(name, cdict)
def mpl_besttxtcolor(x, cmap, norm):
"""
Determinining function for whether text should be white or black
Parameters
----------
x : float
Value of the cell in question
cmap : matplotlib colormap
Colormap assigning colors to the cells
norm : matplotlib normalizer
Function to map cell values to the interval [0, 1] for use by a
colormap
Returns
-------
{"white","black"}
"""
cell_color = cmap(norm(x))
R, G, B = cell_color[:3]
# Perceived brightness calculation from http://alienryderflex.com/hsp.html
P = _np.sqrt(0.299 * R**2 + 0.587 * G**2 + 0.114 * B**2)
return "black" if 0.5 <= P else "white"
def mpl_process_lbl(lbl, math=False):
"""
Process a (plotly-compatible) text label `lbl` to matplotlb text.
Parameters
----------
lbl : str
A text label to process.
math : bool, optional
Whether math-formatting (latex) should be used.
Returns
-------
str
"""
if not isinstance(lbl, str):
lbl = str(lbl) # just force as a string
math = math or ('<sup>' in lbl) or ('<sub>' in lbl) or \
('_' in lbl) or ('|' in lbl) or (len(lbl) == 1)
try:
float(lbl)
math = True
except: pass
l = lbl
l = l.replace("<i>", "").replace("</i>", "")
l = l.replace("<sup>", "^{").replace("</sup>", "}")
l = l.replace("<sub>", "_{").replace("</sub>", "}")
l = l.replace("<br>", "\n")
if math:
l = l.replace("alpha", "\\alpha")
l = l.replace("beta", "\\beta")
l = l.replace("sigma", "\\sigma")
if math or (len(l) == 1): l = "$" + l + "$"
return l
def mpl_process_lbls(lbl_list):
"""
Process a list of plotly labels into matplotlib ones
Parameters
----------
lbl_list : list
A list of string-valued labels to process.
Returns
-------
list
the processed labels (strings).
"""
return [mpl_process_lbl(lbl) for lbl in lbl_list]
def mpl_color(plotly_color):
"""
Convert a plotly color name to a matplotlib compatible one.
Parameters
----------
plotly_color : str
A plotly color value, e.g. `"#FF0023"` or `"rgb(0,255,128)"`.
Returns
-------
str
"""
plotly_color = plotly_color.strip() # remove any whitespace
if plotly_color.startswith('#'):
return plotly_color # matplotlib understands "#FF0013"
elif plotly_color.startswith('rgb(') and plotly_color.endswith(')'):
tupstr = plotly_color[len('rgb('):-1]
tup = [float(x) / 256.0 for x in tupstr.split(',')]
return tuple(tup)
elif plotly_color.startswith('rgba(') and plotly_color.endswith(')'):
tupstr = plotly_color[len('rgba('):-1]
rgba = tupstr.split(',')
tup = [float(x) / 256.0 for x in rgba[0:3]] + [float(rgba[3])]
return tuple(tup)
else:
return plotly_color # hope this is a color name matplotlib understands
def plotly_to_matplotlib(pygsti_fig, save_to=None, fontsize=12, prec='compacthp',
box_labels_font_size=6):
"""
Convert a pygsti (plotly) figure to a matplotlib figure.
Parameters
----------
pygsti_fig : ReportFigure
A pyGSTi figure.
save_to : str
Output filename. Extension determines type. If None, then the
matplotlib figure is returned instead of saved.
fontsize : int, optional
Base fontsize to use for converted figure.
prec : int or {"compact","compacth"}
Digits of precision to include in labels.
box_labels_font_size : int, optional
The size for labels on the boxes. If 0 then no labels are
put on the boxes
Returns
-------
matplotlib.Figure
Matplotlib figure, unless save_to is not None, in which case
the figure is closed and None is returned.
"""
numMPLFigs = len(_plt.get_fignums())
fig = pygsti_fig.plotlyfig
data_trace_list = fig['data']
if 'special' in pygsti_fig.metadata:
if pygsti_fig.metadata['special'] == "keyplot":
return special_keyplot(pygsti_fig, save_to, fontsize)
else: raise ValueError("Invalid `special` label: %s" % pygsti_fig.metadata['special'])
#if axes is None:
mpl_fig, axes = _plt.subplots() # create a new figure if no axes are given
layout = fig['layout']
h, w = layout['height'], layout['width']
# todo: get margins and subtract from h,w
if mpl_fig is not None and w is not None and h is not None:
mpl_size = w / 100.0, h / 100.0 # heusistic
mpl_fig.set_size_inches(*mpl_size) # was 12,8 for "super" color plot
pygsti_fig.metadata['mpl_fig_size'] = mpl_size # record for later use by rendering commands
def get(obj, x, default):
""" Needed b/c in plotly v3 layout no longer is a dict """
try:
ret = obj[x]
return ret if (ret is not None) else default
except KeyError:
return default
raise ValueError("Non-KeyError raised when trying to access a plotly hierarchy object.")
xaxis, yaxis = layout['xaxis'], layout['yaxis']
#annotations = get(layout,'annotations',[])
title = get(layout, 'title', None)
shapes = get(layout, 'shapes', []) # assume only shapes are grid lines
bargap = get(layout, 'bargap', 0)
xlabel = get(xaxis, 'title', None)
ylabel = get(yaxis, 'title', None)
xlabels = get(xaxis, 'ticktext', None)
ylabels = get(yaxis, 'ticktext', None)
xtickvals = get(xaxis, 'tickvals', None)
ytickvals = get(yaxis, 'tickvals', None)
xaxistype = get(xaxis, 'type', None)
yaxistype = get(yaxis, 'type', None)
xaxisside = get(xaxis, 'side', 'bottom')
yaxisside = get(yaxis, 'side', 'left')
xtickangle = get(xaxis, 'tickangle', 0)
xlim = get(xaxis, 'range', None)
ylim = get(yaxis, 'range', None)
if xaxisside == "top":
axes.xaxis.set_label_position('top')
axes.xaxis.tick_top()
#axes.yaxis.set_ticks_position('both')
if yaxisside == "right":
axes.yaxis.set_label_position('right')
axes.yaxis.tick_right()
#axes.yaxis.set_ticks_position('both')
if title is not None:
# Sometimes Title object still is nested
title_text = title if isinstance(title, str) else get(title, 'text', '')
if xaxisside == "top":
axes.set_title(mpl_process_lbl(title_text), fontsize=fontsize, y=4) # push title up higher
axes.set_title(mpl_process_lbl(title_text), fontsize=fontsize)
if xlabel is not None:
xlabel_text = xlabel if isinstance(xlabel, str) else get(xlabel, 'text', '')
axes.set_xlabel(mpl_process_lbl(xlabel_text), fontsize=fontsize)
if ylabel is not None:
ylabel_text = ylabel if isinstance(ylabel, str) else get(ylabel, 'text', '')
axes.set_ylabel(mpl_process_lbl(ylabel_text), fontsize=fontsize)
if xtickvals is not None:
axes.set_xticks(xtickvals, minor=False)
if ytickvals is not None:
axes.set_yticks(ytickvals, minor=False)
if xlabels is not None:
axes.set_xticklabels(mpl_process_lbls(xlabels), rotation=0, fontsize=(fontsize - 2))
if ylabels is not None:
axes.set_yticklabels(mpl_process_lbls(ylabels), fontsize=(fontsize - 2))
if xtickangle != 0:
_plt.xticks(rotation=-xtickangle) # minus b/c ploty & matplotlib have different sign conventions
if xaxistype == 'log':
axes.set_xscale("log")
if yaxistype == 'log':
axes.set_yscale("log")
if xlim is not None:
if xaxistype == 'log': # plotly's limits are already log10'd in this case
xlim = 10.0**xlim[0], 10.0**xlim[1] # but matplotlib's aren't
axes.set_xlim(xlim)
if ylim is not None:
if yaxistype == 'log': # plotly's limits are already log10'd in this case
ylim = 10.0**ylim[0], 10.0**ylim[1] # but matplotlib's aren't
axes.set_ylim(ylim)
#figure out barwidth and offsets for bar plots
num_bars = sum([get(d, 'type', '') == 'bar' for d in data_trace_list])
currentBarOffset = 0
barWidth = (1.0 - bargap) / num_bars if num_bars > 0 else 1.0
#process traces
handles = []; labels = [] # for the legend
boxes = [] # for violins
for traceDict in data_trace_list:
typ = get(traceDict, 'type', 'unknown')
name = get(traceDict, 'name', None)
showlegend = get(traceDict, 'showlegend', True)
if typ == "heatmap":
#colorscale = get(traceDict,'colorscale','unknown')
# traceDict['z'] is *normalized* already - maybe would work here but not for box value labels
plt_data = pygsti_fig.metadata['plt_data']
show_colorscale = get(traceDict, 'showscale', True)
mpl_size = (plt_data.shape[1] * 0.5, plt_data.shape[0] * 0.5)
mpl_fig.set_size_inches(*mpl_size)
#pygsti_fig.metadata['mpl_fig_size'] = mpl_size #record for later use by rendering commands
colormap = pygsti_fig.colormap
assert(colormap is not None), 'Must separately specify a colormap...'
norm, cmap = colormap.create_matplotlib_norm_and_cmap()
masked_data = _np.ma.array(plt_data, mask=_np.isnan(plt_data))
heatmap = axes.pcolormesh(masked_data, cmap=cmap, norm=norm)
axes.set_xlim(0, plt_data.shape[1])
axes.set_ylim(0, plt_data.shape[0])
if xtickvals is not None:
xtics = _np.array(xtickvals) + 0.5 # _np.arange(plt_data.shape[1])+0.5
axes.set_xticks(xtics, minor=False)
if ytickvals is not None:
ytics = _np.array(ytickvals) + 0.5 # _np.arange(plt_data.shape[0])+0.5
axes.set_yticks(ytics, minor=False)
grid = bool(len(shapes) > 1)
if grid:
def _get_minor_tics(t):
return [(t[i] + t[i + 1]) / 2.0 for i in range(len(t) - 1)]
axes.set_xticks(_get_minor_tics(xtics), minor=True)
axes.set_yticks(_get_minor_tics(ytics), minor=True)
axes.grid(which='minor', axis='both', linestyle='-', linewidth=2)
off = False # Matplotlib used to allow 'off', but now False should be used
if xlabels is None and ylabels is None:
axes.tick_params(labelcolor='w', top=off, bottom=off, left=off, right=off) # white tics
else:
axes.tick_params(top=off, bottom=off, left=off, right=off)
#print("DB ann = ", len(annotations))
#boxLabels = bool( len(annotations) >= 1 ) #TODO: why not plt_data.size instead of 1?
#boxLabels = True # maybe should always be true?
if box_labels_font_size > 0:
# Write values on colored squares
for y in range(plt_data.shape[0]):
for x in range(plt_data.shape[1]):
if _np.isnan(plt_data[y, x]): continue
assert(_np.isfinite(plt_data[y, x])), "%s is not finite!" % str(plt_data[y, x])
axes.text(x + 0.5, y + 0.5, mpl_process_lbl(_eformat(plt_data[y, x], prec), math=True),
horizontalalignment='center',
verticalalignment='center',
color=mpl_besttxtcolor(plt_data[y, x], cmap, norm),
fontsize=box_labels_font_size)
if show_colorscale:
cbar = _plt.colorbar(heatmap)
cbar.ax.tick_params(labelsize=(fontsize - 2))
elif typ == "scatter":
mode = get(traceDict, 'mode', 'lines')
marker = get(traceDict, 'marker', None)
line = get(traceDict, 'line', None)
if marker and (line is None):
line = marker['line'] # 2nd attempt to get line props
if marker:
color = get(marker, 'color', None)
if line and (color is None):
color = get(line, 'color', None)
if color is None:
color = 'rgb(0,0,0)'
if isinstance(color, tuple):
color = [mpl_color(c) for c in color]
else:
color = mpl_color(color)
linewidth = float(line['width']) if (line and get(line, 'width', None) is not None) else 1.0
x = y = None
if 'x' in traceDict and 'y' in traceDict:
x = traceDict['x']
y = traceDict['y']
elif 'r' in traceDict and 't' in traceDict:
x = traceDict['r']
y = traceDict['t']
assert(x is not None and y is not None), "x and y both None in trace: %s" % traceDict
if mode == 'lines':
if isinstance(color, list):
raise ValueError('List of colors incompatible with lines mode')
lines = _plt.plot(x, y, linestyle='-', marker=None, color=color, linewidth=linewidth)
elif mode == 'markers':
lines = _plt.scatter(x, y, marker=".", color=color)
elif mode == 'lines+markers':
if isinstance(color, list):
# List of colors only works for markers with scatter, have default black line
lines = _plt.plot(x, y, linestyle='-', color=(0, 0, 0), linewidth=linewidth)
_plt.scatter(x, y, marker='.', color=color)
else:
lines = _plt.plot(x, y, linestyle='-', marker='.', color=color, linewidth=linewidth)
else: raise ValueError("Unknown mode: %s" % mode)
if showlegend and name:
handles.append(lines[0])
labels.append(name)
elif typ == "scattergl": # currently used only for colored points...
x = traceDict['x']
y = traceDict['y']
assert(x is not None and y is not None), "x and y both None in trace: %s" % traceDict
colormap = pygsti_fig.colormap
if colormap:
norm, cmap = colormap.create_matplotlib_norm_and_cmap()
s = _plt.scatter(x, y, c=y, s=50, cmap=cmap, norm=norm)
else:
s = _plt.scatter(x, y, c=y, s=50, cmap='gray')
if showlegend and name:
handles.append(s)
labels.append(name)
elif typ == "bar":
xlabels = [str(xl) for xl in traceDict['x']] # x "values" are actually bar labels in plotly
#always grey=pos, red=neg type of bar plot for now (since that's all pygsti uses)
y = _np.asarray(traceDict['y'])
if 'plt_yerr' in pygsti_fig.metadata:
yerr = pygsti_fig.metadata['plt_yerr']
else:
yerr = None
# actual x values are just the integers + offset
x = _np.arange(y.size) + currentBarOffset
currentBarOffset += barWidth # so next bar trace will be offset correctly
marker = get(traceDict, 'marker', None)
if marker and ('color' in marker):
if isinstance(marker['color'], str):
color = mpl_color(marker['color'])
elif isinstance(marker['color'], list):
color = [mpl_color(c) for c in marker['color']] # b/c axes.bar can take a list of colors
else: color = "gray"
if yerr is None:
axes.bar(x, y, barWidth, color=color)
else:
axes.bar(x, y, barWidth, color=color,
yerr=yerr.flatten().real)
if xtickvals is not None:
xtics = _np.array(xtickvals) + 0.5 # _np.arange(plt_data.shape[1])+0.5
else: xtics = x
axes.set_xticks(xtics, minor=False)
axes.set_xticklabels(mpl_process_lbls(xlabels), rotation=0, fontsize=(fontsize - 4))
elif typ == "histogram":
#histnorm = get(traceDict,'histnorm',None)
marker = get(traceDict, 'marker', None)
color = mpl_color(marker['color'] if marker and isinstance(marker['color'], str) else "gray")
xbins = traceDict['xbins']
histdata = traceDict['x']
if abs(xbins['size']) < 1e-6:
histBins = 1
else:
histBins = int(round((xbins['end'] - xbins['start']) / xbins['size']))
histdata_finite = _np.take(histdata, _np.where(_np.isfinite(histdata)))[
0] # take gives back (1,N) shaped array (why?)
if yaxistype == 'log':
if len(histdata_finite) == 0:
axes.set_yscale("linear") # no data, and will get an error with log-scale, so switch to linear
#histMin = min( histdata_finite ) if cmapFactory.vmin is None else cmapFactory.vmin
#histMax = max( histdata_finite ) if cmapFactory.vmax is None else cmapFactory.vmax
#_plt.hist(_np.clip(histdata_finite,histMin,histMax), histBins,
# range=[histMin, histMax], facecolor='gray', align='mid')
_, _, patches = _plt.hist(histdata_finite, histBins,
facecolor=color, align='mid')
#If we've been given an array of colors
if marker and ('color' in marker) and isinstance(marker['color'], list):
for p, c in zip(patches, marker['color']):
_plt.setp(p, 'facecolor', mpl_color(c))
elif typ == "box":
boxes.append(traceDict)
if len(boxes) > 0:
_plt.violinplot([box['y'] for box in boxes], [box['x0'] for box in boxes],
points=10, widths=1., showmeans=False,
showextrema=False, showmedians=False)
# above kwargs taken from Tim's original RB plot - we could set some of
# these from boxes[0]'s properties like 'boxmean' (a boolean) FUTURE?
extraartists = [axes]
if len(handles) > 0:
lgd = _plt.legend(handles, labels, bbox_to_anchor=(1.01, 1.0),
borderaxespad=0., loc="upper left")
extraartists.append(lgd)
if save_to:
_gc.collect() # too many open files (b/c matplotlib doesn't close everything) can cause the below to fail
_plt.savefig(save_to, bbox_extra_artists=extraartists,
bbox_inches='tight') # need extra artists otherwise
#axis labels get clipped
_plt.cla()
_plt.close(mpl_fig)
del mpl_fig
_gc.collect() # again, to be safe...
if len(_plt.get_fignums()) != numMPLFigs:
raise ValueError("WARNING: MORE FIGURES OPEN NOW (%d) THAN WHEN WE STARTED %d)!!" %
(len(_plt.get_fignums()), numMPLFigs))
return None # figure is closed!
else:
return mpl_fig
#Special processing for the key-plot: since it uses so much weird
# plotly and matplotlib construction it makes no sense to try to
# automatically convert.
def special_keyplot(pygsti_fig, save_to, fontsize):
"""
Create a plot showing the layout of a single sub-block of a goodness-of-fit box plot.
Parameters
----------
pygsti_fig : ReportFigure
The pyGSTi figure to process.
save_to : str
Filename to save to.
fontsize : int
Fone size to use
Returns
-------
matplotlib.Figure
"""
#Hardcoded
title = ""
prepStrs, effectStrs, xlabel, ylabel = pygsti_fig.metadata['args']
fig, axes = _plt.subplots()
mpl_size = (len(prepStrs) * 0.5, len(effectStrs) * 0.5)
fig.set_size_inches(*mpl_size)
pygsti_fig.metadata['mpl_fig_size'] = mpl_size # record for later use by rendering commands
if title is not None:
axes.set_title(title, fontsize=(fontsize + 4))
if xlabel is not None:
axes.set_xlabel(xlabel, fontsize=(fontsize + 4))
if ylabel is not None:
axes.set_ylabel(ylabel, fontsize=(fontsize + 4))
#Copied from _summable_color_boxplot
def _val_filter(vals): # filter to latex-ify circuits. Later add filter as a possible parameter
formatted_vals = []
for val in vals:
if type(val) in (tuple, _Circuit) and all([type(el) == str for el in val]):
if len(val) == 0:
formatted_vals.append(r"$\{\}$")
else:
formatted_vals.append("$" + "\\cdot".join([("\\mathrm{%s}" % el) for el in val]) + "$")
else:
formatted_vals.append(val)
return formatted_vals
axes.yaxis.tick_right()
axes.xaxis.set_label_position("top")
axes.set_xticklabels(_val_filter(prepStrs), rotation=90, ha='center', fontsize=fontsize)
axes.set_yticklabels(list(reversed(_val_filter(effectStrs))), fontsize=fontsize) # FLIP
axes.set_xticks(_np.arange(len(prepStrs)) + .5)
axes.set_xticks(_np.arange(len(prepStrs) + 1), minor=True)
axes.set_yticks(_np.arange(len(effectStrs)) + .5)
axes.set_yticks(_np.arange(len(effectStrs) + 1), minor=True)
axes.tick_params(which='major', bottom='off', top='off', left='off', right='off', pad=5)
axes.yaxis.grid(True, linestyle='-', linewidth=1.0, which='minor')
axes.xaxis.grid(True, linestyle='-', linewidth=1.0, which='minor')
if save_to is not None:
if len(save_to) > 0: # So you can pass save_to="" and figure will be closed but not saved to a file
_plt.savefig(save_to, bbox_extra_artists=(axes,), bbox_inches='tight')
_plt.cla()
_plt.close(fig) # close the figure if we're saving it to a file
else:
return fig
|
{"hexsha": "67e998e5194642224bd293e37072eaa9b469022b", "size": 26063, "ext": "py", "lang": "Python", "max_stars_repo_path": "pygsti/report/mpl_colormaps.py", "max_stars_repo_name": "colibri-coruscans/pyGSTi", "max_stars_repo_head_hexsha": "da54f4abf668a28476030528f81afa46a1fbba33", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pygsti/report/mpl_colormaps.py", "max_issues_repo_name": "colibri-coruscans/pyGSTi", "max_issues_repo_head_hexsha": "da54f4abf668a28476030528f81afa46a1fbba33", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pygsti/report/mpl_colormaps.py", "max_forks_repo_name": "colibri-coruscans/pyGSTi", "max_forks_repo_head_hexsha": "da54f4abf668a28476030528f81afa46a1fbba33", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.8822674419, "max_line_length": 115, "alphanum_fraction": 0.5809001266, "include": true, "reason": "import numpy", "num_tokens": 6541}
|
# ========================================================================
#
# Imports
#
# ========================================================================
import numpy as np
import pandas as pd
import yaml
import definitions as defs
# ========================================================================
#
# Function definitions
#
# ========================================================================
def get_merged_csv(fnames, **kwargs):
lst = []
for fname in fnames:
try:
df = pd.read_csv(fname, **kwargs)
lst.append(df)
except pd.io.common.EmptyDataError:
pass
return pd.concat(lst, ignore_index=True)
# ========================================================================
def parse_ic(fname):
"""Parse the Nalu yaml input file for the initial conditions"""
with open(fname, "r") as stream:
try:
#dat = yaml.load(stream, Loader=yaml.FullLoader)
dat = yaml.load(stream)
u0 = float(
dat["realms"][0]["initial_conditions"][0]["value"]["velocity"][0]
)
v0 = float(
dat["realms"][0]["initial_conditions"][0]["value"]["velocity"][1]
)
try:
w0 = float(
dat["realms"][0]["initial_conditions"][0]["value"]["velocity"][2]
)
except IndexError:
w0 = 0.0
umag = np.sqrt(u0 ** 2 + v0 ** 2 + w0 ** 2)
rho0 = float(
dat["realms"][0]["material_properties"]["specifications"][0]["value"]
)
mu = float(
dat["realms"][0]["material_properties"]["specifications"][1]["value"]
)
flow_angle = np.arctan2(v0, u0)
return u0, v0, w0, umag, rho0, mu, flow_angle
except yaml.YAMLError as exc:
print(exc)
# ========================================================================
def get_meshname(fname):
"""Parse the Nalu yaml input file for the mesh name"""
with open(fname, "r") as stream:
try:
#dat = yaml.load(stream, Loader=yaml.FullLoader)
dat = yaml.load(stream)
return dat["realms"][0]["mesh"]
except yaml.YAMLError as exc:
print(exc)
# ========================================================================
def get_wing_slices(dim):
"""Return the wing slices"""
return pd.DataFrame(defs.get_wing_slices(dim), columns=["zslice"])
# ========================================================================
def get_vortex_slices():
"""Return the vortex slices"""
return pd.DataFrame(defs.get_vortex_slices(), columns=["xslice"])
# ========================================================================
def get_renames():
return {
"Points:0": "x",
"Points:1": "y",
"Points:2": "z",
"pressure": "p",
"iblank": "iblank",
"iblank_cell": "iblank_cell",
"absIBlank": "absIBlank",
"pressure_force_:0": "fpx",
"pressure_force_:1": "fpy",
"pressure_force_:2": "fpz",
"tau_wall": "tau_wall",
"velocity_:0": "ux",
"velocity_:1": "uy",
"velocity_:2": "uz",
"element_courant": "element_courant",
"time": "time",
"GlobalNodeId": "GlobalNodeId",
"ObjectId": "ObjectId",
"PedigreeElementId": "PedigreeElementId",
"PedigreeNodeId": "PedigreeNodeId",
"vtkValidPointMask": "vtkValidPointMask",
"arc_length": "arc_length",
}
|
{"hexsha": "d99c47f4388b4cdf79aeafe30e0efe926f315eb4", "size": 3609, "ext": "py", "lang": "Python", "max_stars_repo_path": "mcalister/utilities/utilities.py", "max_stars_repo_name": "Exawind/iddes", "max_stars_repo_head_hexsha": "200ddd4a20587c38f4103a32c0001ad5c8d33f22", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-08T12:31:54.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-08T12:31:54.000Z", "max_issues_repo_path": "mcalister/utilities/utilities.py", "max_issues_repo_name": "Exawind/iddes", "max_issues_repo_head_hexsha": "200ddd4a20587c38f4103a32c0001ad5c8d33f22", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-01-05T18:08:51.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-05T18:08:51.000Z", "max_forks_repo_path": "mcalister/utilities/utilities.py", "max_forks_repo_name": "Exawind/iddes", "max_forks_repo_head_hexsha": "200ddd4a20587c38f4103a32c0001ad5c8d33f22", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-10-07T21:23:52.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-29T17:05:26.000Z", "avg_line_length": 32.2232142857, "max_line_length": 85, "alphanum_fraction": 0.4322527016, "include": true, "reason": "import numpy", "num_tokens": 771}
|
x = [-1, 1, 3, 3, -1]
y = [2, 0, -5, 2, -5]
@test_throws MethodError scatterplot()
@test_throws MethodError scatterplot(sin, x)
@test_throws MethodError scatterplot([sin], x)
@test_throws DimensionMismatch scatterplot([1, 2], [1, 2, 3])
@test_throws DimensionMismatch scatterplot([1, 2, 3], [1, 2])
@test_throws DimensionMismatch scatterplot([1, 2, 3], 1:2)
@test_throws DimensionMismatch scatterplot(1:3, [1, 2])
@test_throws DimensionMismatch scatterplot(1:3, 1:2)
@testset "positional types" begin
for p in (
@inferred(scatterplot(x, y)),
@inferred(scatterplot(float.(x), y)),
@inferred(scatterplot(x, float.(y))),
)
@test p isa Plot
test_ref("references/scatterplot/default.txt", @show_col(p))
end
for p in (@inferred(scatterplot(y)), @inferred(scatterplot(float.(y))))
@test p isa Plot
test_ref("references/scatterplot/y_only.txt", @show_col(p))
end
p = @inferred scatterplot(6:10)
@test p isa Plot
test_ref("references/scatterplot/range1.txt", @show_col(p))
p = @inferred scatterplot(11:15, 6:10)
@test p isa Plot
test_ref("references/scatterplot/range2.txt", @show_col(p))
p = @inferred scatterplot(x .* 1e3 .+ 15, y .* 1e-3 .- 15)
test_ref("references/scatterplot/scale1.txt", @show_col(p))
p = @inferred scatterplot(x .* 1e-3 .+ 15, y .* 1e3 .- 15)
test_ref("references/scatterplot/scale2.txt", @show_col(p))
miny = -1.2796649117521434e218
maxy = -miny
p = @inferred scatterplot([1], [miny], xlim = (1, 1), ylim = (miny, maxy))
test_ref("references/scatterplot/scale3.txt", @show_col(p))
p = @inferred scatterplot([1], [miny], xlim = [1, 1], ylim = [miny, maxy])
test_ref("references/scatterplot/scale3.txt", @show_col(p))
end
@testset "keyword arguments" begin
p = @inferred scatterplot(x, y, xlim = (-1.5, 3.5), ylim = (-5.5, 2.5))
test_ref("references/scatterplot/limits.txt", @show_col(p))
p = @inferred scatterplot(x, y, xlim = [-1.5, 3.5], ylim = [-5.5, 2.5])
test_ref("references/scatterplot/limits.txt", @show_col(p))
p = @inferred scatterplot(x, y, grid = false)
test_ref("references/scatterplot/nogrid.txt", @show_col(p))
p = @inferred scatterplot(x, y, color = :blue, name = "points1")
test_ref("references/scatterplot/blue.txt", @show_col(p))
p = @inferred scatterplot(
x,
y,
name = "points1",
title = "Scatter",
xlabel = "x",
ylabel = "y",
)
@test p isa Plot
test_ref("references/scatterplot/parameters1.txt", @show_col(p))
@test @inferred(scatterplot!(p, [0.5, 1, 1.5], name = "points2")) === p
test_ref("references/scatterplot/parameters2.txt", @show_col(p))
@test @inferred(scatterplot!(p, [-0.5, 0.5, 1.5], [0.5, 1, 1.5], name = "points3")) ===
p
test_ref("references/scatterplot/parameters3.txt", @show_col(p))
test_ref("references/scatterplot/nocolor.txt", @show_nocol(p))
p = scatterplot(x, y, title = "Scatter", canvas = DotCanvas, width = 10, height = 5)
@test p isa Plot
test_ref("references/scatterplot/canvassize.txt", @show_col(p))
end
@testset "markers" begin
p = scatterplot(
x,
y,
title = "Vector of markers",
marker = [:circle, "!", '.', :star5, :vline],
color = [:red, :green, :yellow, :blue, :cyan],
)
test_ref("references/scatterplot/markers_vector.txt", @show_col(p))
scatterplot(x, y, marker = :circle)
n = collect(1:length(UnicodePlots.MARKERS))
m = collect(keys(UnicodePlots.MARKERS))
for canvas in (BrailleCanvas, DotCanvas, AsciiCanvas)
p = scatterplot(n, n, title = "Supported markers", marker = m)
test_ref("references/scatterplot/markers_$(canvas).txt", @show_col(p))
end
end
@testset "densityplot" begin
seed!(RNG, 1338)
dx, dy = randn(RNG, 1000), randn(RNG, 1000)
p = @inferred densityplot(dx, dy)
@test @inferred(densityplot!(p, dx .+ 2, dy .+ 2)) === p
@test p isa Plot
test_ref("references/scatterplot/densityplot.txt", @show_col(p))
p = @inferred densityplot(
dx,
dy,
name = "foo",
color = :red,
title = "Title",
xlabel = "x",
)
@test @inferred(densityplot!(p, dx .+ 2, dy .+ 2, name = "bar")) === p
@test p isa Plot
test_ref("references/scatterplot/densityplot_parameters.txt", @show_col(p))
end
|
{"hexsha": "839de9bdb3500dc0c851a4387872a1d3fc1ca8fd", "size": 4431, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/tst_scatterplot.jl", "max_stars_repo_name": "Cvikli/UnicodePlots.jl", "max_stars_repo_head_hexsha": "fecf19a90a4d22a6784a68ff0f402fcb93936a80", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 89, "max_stars_repo_stars_event_min_datetime": "2021-09-07T14:02:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T10:35:35.000Z", "max_issues_repo_path": "test/tst_scatterplot.jl", "max_issues_repo_name": "Cvikli/UnicodePlots.jl", "max_issues_repo_head_hexsha": "fecf19a90a4d22a6784a68ff0f402fcb93936a80", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 92, "max_issues_repo_issues_event_min_datetime": "2021-09-02T08:47:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T19:18:56.000Z", "max_forks_repo_path": "test/tst_scatterplot.jl", "max_forks_repo_name": "Cvikli/UnicodePlots.jl", "max_forks_repo_head_hexsha": "fecf19a90a4d22a6784a68ff0f402fcb93936a80", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2021-09-09T18:21:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T07:51:36.000Z", "avg_line_length": 35.448, "max_line_length": 91, "alphanum_fraction": 0.6222071767, "num_tokens": 1441}
|
import numpy as np
from tensorlib.decomposition import cp
from tensorlib.decomposition.decomposition import _cp3
from tensorlib.decomposition import tucker
from tensorlib.decomposition.decomposition import _tucker3
from tensorlib.datasets import load_bread
from numpy.testing import assert_almost_equal
from nose.tools import assert_raises
def test_generated_cp():
"""
Test CANDECOMP/PARFAC decomposition. Problem from
http://issnla2010.ba.cnr.it/DecompositionsI.pdf
"""
rs = np.random.RandomState(1999)
X = .7 * rs.rand(2, 4, 3) + .25 * rs.rand(2, 4, 3)
assert_raises(ValueError, cp, X)
U1 = cp(X, 2, init_type="hosvd")
U2 = _cp3(X, 2, tol=1E-4, max_iter=500, init_type="hosvd")
for n, i in enumerate(U1):
assert_almost_equal(U1[n], U2[n])
def test_bread_cp():
"""
Test CANDECOMP/PARFAC decomposition using bread dataset.
"""
X, meta = load_bread()
assert_raises(ValueError, cp, X)
U1 = cp(X, 2, init_type="hosvd")
U2 = _cp3(X, 2, tol=1E-4, max_iter=500, init_type="hosvd")
for n, i in enumerate(U1):
assert_almost_equal(U1[n], U2[n])
def test_generated_tucker():
"""
Test CANDECOMP/PARFAC decomposition. Problem from
http://issnla2010.ba.cnr.it/DecompositionsI.pdf
"""
rs = np.random.RandomState(1999)
X = .7 * rs.rand(2, 4, 3) + .25 * rs.rand(2, 4, 3)
assert_raises(ValueError, cp, X)
U1 = tucker(X, 2, init_type="hosvd")
U2 = _tucker3(X, 2, tol=1E-4, max_iter=500, init_type="hosvd")
for n, i in enumerate(U1):
assert_almost_equal(U1[n], U2[n])
|
{"hexsha": "5f4ea093f64ffe184cdca19097900341e6958710", "size": 1591, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorlib/decomposition/tests/test_decomposition.py", "max_stars_repo_name": "tensorlib/tensorlib", "max_stars_repo_head_hexsha": "bd1bf02cbdcb4ea666b557238a4b32effab2943a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2015-03-13T23:21:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-19T20:07:57.000Z", "max_issues_repo_path": "tensorlib/decomposition/tests/test_decomposition.py", "max_issues_repo_name": "jiegege527/tensorlib-1", "max_issues_repo_head_hexsha": "bd1bf02cbdcb4ea666b557238a4b32effab2943a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2016-07-05T20:07:23.000Z", "max_issues_repo_issues_event_max_datetime": "2017-03-29T17:37:42.000Z", "max_forks_repo_path": "tensorlib/decomposition/tests/test_decomposition.py", "max_forks_repo_name": "jiegege527/tensorlib-1", "max_forks_repo_head_hexsha": "bd1bf02cbdcb4ea666b557238a4b32effab2943a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2015-06-05T13:07:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T05:01:38.000Z", "avg_line_length": 32.4693877551, "max_line_length": 66, "alphanum_fraction": 0.6744186047, "include": true, "reason": "import numpy,from numpy", "num_tokens": 508}
|
import numpy as np
def accuracy(preds, labels):
return np.mean(labels == preds.round())
def error(preds, labels):
return 1.0 - accuracy(preds,labels)
def mean_square_error(preds, labels):
return np.mean(np.square(preds - labels))
def mean_absolute_error(preds, labels):
return np.mean(np.abs(preds - labels))
metrics = {"acc": accuracy,
"error": error,
"mse": mean_square_error,
"mae": mean_absolute_error}
def get_metric(eval_metric):
return metrics[eval_metric]
|
{"hexsha": "07b9bd68a8459794e60e8ca85c8c132447cc156e", "size": 531, "ext": "py", "lang": "Python", "max_stars_repo_path": "tgboost/metric.py", "max_stars_repo_name": "BenJamesbabala/tgboost", "max_stars_repo_head_hexsha": "933e666c60c6e828a78a73637efab91345529d6d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-08T19:18:09.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-08T19:18:09.000Z", "max_issues_repo_path": "tgboost/metric.py", "max_issues_repo_name": "BenJamesbabala/tgboost", "max_issues_repo_head_hexsha": "933e666c60c6e828a78a73637efab91345529d6d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tgboost/metric.py", "max_forks_repo_name": "BenJamesbabala/tgboost", "max_forks_repo_head_hexsha": "933e666c60c6e828a78a73637efab91345529d6d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-09-08T19:18:11.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-08T19:18:11.000Z", "avg_line_length": 17.7, "max_line_length": 45, "alphanum_fraction": 0.6629001883, "include": true, "reason": "import numpy", "num_tokens": 124}
|
using QuAlgorithmZoo, Yao,YaoExtensions
using BitBasis: log2i
using Test
using Random, LinearAlgebra
"""
Quantum singular value decomposition algorithm.
* `reg`, input register (A, B) as the target matrix to decompose,
* `circuit_a`, U matrix applied on register A,
* `circuit_b`, V matrix applied on register B,
* `optimizer`, the optimizer, normally we use `Adam(lr=0.1)`,
* `Nc`, log2 number of singular values kept,
* `maxiter`, the maximum number of iterations.
"""
function train_qsvd!(reg, circuit_a::AbstractBlock{Na}, circuit_b::AbstractBlock{Nb}, optimizer; Nc::Int=min(Na, Nb), maxiter::Int=100) where {Na, Nb}
nbit = Na+Nb
c = circuit_qsvd(circuit_a, circuit_b, Nc)
obs = -mapreduce(i->put(nbit, i=>Z), +, (1:Na..., Na+Nc+1:Na+Nb...))
params = parameters(c)
for i = 1:maxiter
grad = expect'(obs, reg => c).second
QuAlgorithmZoo.update!(params, grad, optimizer)
println("Iter $i, Loss = $(Na+expect(obs, copy(reg) |> c))")
dispatch!(c, params)
end
end
"""build the circuit for quantum SVD training."""
function circuit_qsvd(circuit_a::AbstractBlock{Na}, circuit_b::AbstractBlock{Nb}, Nc::Int) where {Na, Nb}
nbit = Na+Nb
cnots = chain(control(nbit, i+Na, i=>X) for i=1:Nc)
c = chain(concentrate(nbit, circuit_a, 1:Na), concentrate(nbit, circuit_b, Na+1:nbit), cnots)
end
"""read QSVD results"""
function readout_qsvd(reg::AbstractRegister, circuit_a::AbstractBlock{Na}, circuit_b::AbstractBlock{Nb}, Nc::Int) where {Na, Nb}
reg = copy(reg) |> concentrate(Na+Nb, circuit_a, 1:Na) |> concentrate(Na+Nb, circuit_b, Na+1:Na+Nb)
_S = [select(reg, b|b<<Na).state[] for b in basis(Nc)]
S = abs.(_S)
order = sortperm(S, rev=true)
S, _S = S[order], _S[order]
mat(circuit_a)[order,:]'.*transpose(_S./S), S, transpose(mat(circuit_b)[order,:])
end
"""
QuantumSVD(M; kwargs...)
Quantum SVD.
* `M`, the matrix to decompose, size should be (2^Na × 2^Nb), the sum of squared spectrum must be 1.
kwargs includes
* `Nc`, log2 number of singular values kept,
* `circuit_a` and `circuit_b`, the circuit ansatz for `U` and `V` matrices,
* `maxiter`, maximum number of iterations,
* `optimizer`, default is `Adam(lr=0.1)`.
"""
function QuantumSVD(M::AbstractMatrix; Nc::Int=log2i(min(size(M)...)),
circuit_a=variational_circuit(log2i(size(M, 1))),
circuit_b=variational_circuit(log2i(size(M, 2))),
maxiter=200, optimizer=Adam(lr=0.1))
dispatch!(circuit_a, :random)
dispatch!(circuit_b, :random)
reg = ArrayReg(vec(M))
train_qsvd!(reg, circuit_a, circuit_b, optimizer, Nc=Nc, maxiter=maxiter)
readout_qsvd(reg, circuit_a, circuit_b, Nc)
end
@testset "QSVD" begin
Random.seed!(2)
# define a matrix of size (2^Na, 2^Nb)
Na = 2
Nb = 2
# the exact result
M = reshape(rand_state(Na+Nb).state, 1<<Na, 1<<Nb)
U_exact, S_exact, V_exact = svd(M)
U, S, V = QuantumSVD(M; maxiter=400)
@test isapprox(U*Diagonal(S)*V', M, atol=1e-2)
@test isapprox(abs.(S), S_exact, atol=1e-2)
@test isapprox(U'*U_exact .|> abs2, I, atol=1e-2)
@test isapprox(V'*V_exact .|> abs2, I, atol=1e-2)
end
|
{"hexsha": "3b2618031b020f45f5f7fb0d1b42d761416747cd", "size": 3200, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/QSVD/QSVD.jl", "max_stars_repo_name": "stanescuUW/QuAlgorithmZoo.jl", "max_stars_repo_head_hexsha": "7d5c2398840cad822a095295e6559c6bafca715e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/QSVD/QSVD.jl", "max_issues_repo_name": "stanescuUW/QuAlgorithmZoo.jl", "max_issues_repo_head_hexsha": "7d5c2398840cad822a095295e6559c6bafca715e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/QSVD/QSVD.jl", "max_forks_repo_name": "stanescuUW/QuAlgorithmZoo.jl", "max_forks_repo_head_hexsha": "7d5c2398840cad822a095295e6559c6bafca715e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-30T06:16:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-30T06:16:09.000Z", "avg_line_length": 37.2093023256, "max_line_length": 150, "alphanum_fraction": 0.6509375, "num_tokens": 1054}
|
#! /usr/bin/env python3
# import roslib
# roslib.load_manifest('motion_plan')
import rospy
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist, Point
from nav_msgs.msg import Odometry
from tf import transformations
from std_srvs.srv import *
import math
import matplotlib.pyplot as plt
import numpy as np
active_ = False
current_position_ = Point()
yaw_ = 0
state_ = 0
desired_position_ = None
desired_position_ = Point()
# desired_position_.x = rospy.get_param('des_pos_x')
# desired_position_.y = rospy.get_param('des_pos_y')
desired_position_.x = 0
desired_position_.y = -0.5
desired_position_.z = 0
yaw_precision_ = math.pi/90
distance_precision_ = 0.05 #0.1
pub = None
def main():
global pub, active_, current_plan_state
rospy.init_node('go_to_goal')
pub = rospy.Publisher('sim_ros_interface/cmd_vel', Twist, queue_size=1)
odomSub = rospy.Subscriber('sim_ros_interface/odom', Odometry, odom_callback)
desiredPoseSub = rospy.Subscriber('sim_ros_interface/desired_pose', Point, desired_pose_callback)
srv = rospy.Service('go_to_point_switch', SetBool, go_to_point_switch)
rate = rospy.Rate(20)
while not rospy.is_shutdown():
if not active_:
continue
if desired_position_ == None:
continue
else:
print(state_)
if state_ == 0:
correct_yaw(desired_position_)
elif state_ == 1:
correct_linear(desired_position_)
elif state_ == 2:
reached()
pass
else:
rospy.logerr('Unknown state')
pass
rate.sleep()
def go_to_point_switch(req):
global active_
active_ = req.data
res = SetBoolResponse()
res.success = True
res.message = 'Done'
return res
def correct_yaw(des_position):
global yaw_, pub, state_, yaw_precision_
required_yaw = math.atan2(des_position.y-current_position_.y, des_position.x-current_position_.x)
error_yaw = required_yaw-yaw_
print(required_yaw, yaw_, error_yaw)
twist = Twist()
mode = None # 0 - clockwise, 1 - anticlockwise
if math.fabs(error_yaw)<=math.pi:
# check opposite shorter path
if yaw_<0:
mode = 1
else:
mode = 0
else:
if yaw_<0:
mode = 0
else:
mode = 1
while(math.fabs(required_yaw-yaw_)>yaw_precision_):
if mode==1:
twist.angular.z = 2.0
elif mode==0:
twist.angular.z = -2.0
pub.publish(twist)
if (math.fabs(required_yaw-yaw_)<=yaw_precision_):
twist.angular.z = 0
print ('Error in yaw: %s' % (required_yaw-yaw_))
update_state(1)
def correct_linear(des_position):
global yaw_, pub, state_, yaw_precision_
required_yaw = math.atan2(des_position.y-current_position_.y, des_position.x-current_position_.x)
error_yaw = required_yaw-yaw_
error_pos = math.sqrt(pow(des_position.y-current_position_.y,2) + pow(des_position.x-current_position_.x,2))
if error_pos > distance_precision_:
twist = Twist()
twist.linear.x = 2 #1
pub.publish(twist)
else:
print ('Error in position: %s' % error_pos)
update_state(2)
if math.fabs(error_yaw) > yaw_precision_:
print ('Error in yaw: %s' % error_yaw)
update_state(0)
def reached():
twist = Twist()
twist.linear.x = 0
twist.angular.z = 0
pub.publish(twist)
def update_state(state):
global state_
state_ = state
print ('State changed to: %s' % state_)
def odom_callback(msg):
global current_position_, yaw_
current_position_ = msg.pose.pose.position
# print(current_position_)
quaternion_data = (msg.pose.pose.orientation.x,
msg.pose.pose.orientation.y,
msg.pose.pose.orientation.z,
msg.pose.pose.orientation.w)
euler_data = transformations.euler_from_quaternion(quaternion_data)
yaw_ = euler_data[2]
# print(yaw_)
def desired_pose_callback(msg):
global desired_position_
desired_position_ = msg
print(desired_position_)
if __name__=='__main__':
main()
|
{"hexsha": "21aa4a659117d16edec435fdda6a73da38557ed6", "size": 3739, "ext": "py", "lang": "Python", "max_stars_repo_path": "ros_pkg/robot_function/scripts/go_to_point.py", "max_stars_repo_name": "Pallav1299/coppeliasim_ros", "max_stars_repo_head_hexsha": "3c4db53be7ea7d64c53c1d56066bb93dd212a476", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ros_pkg/robot_function/scripts/go_to_point.py", "max_issues_repo_name": "Pallav1299/coppeliasim_ros", "max_issues_repo_head_hexsha": "3c4db53be7ea7d64c53c1d56066bb93dd212a476", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ros_pkg/robot_function/scripts/go_to_point.py", "max_forks_repo_name": "Pallav1299/coppeliasim_ros", "max_forks_repo_head_hexsha": "3c4db53be7ea7d64c53c1d56066bb93dd212a476", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5986842105, "max_line_length": 109, "alphanum_fraction": 0.7357582241, "include": true, "reason": "import numpy", "num_tokens": 1035}
|
// (C) Copyright Gennadiy Rozental 2011-2012.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// See http://www.boost.org/libs/test for the library home page.
//
// File : $RCSfile$
//
// Version : $Revision$
//
// Description : defines test case family based on data generator
// ***************************************************************************
#ifndef BOOST_TEST_DATA_TEST_CASE_HPP_102211GER
#define BOOST_TEST_DATA_TEST_CASE_HPP_102211GER
// Boost.Test
#include <boost/test/data/config.hpp>
#include <boost/test/data/dataset.hpp>
// Boost
#include <boost/preprocessor/repetition/enum_params.hpp>
#include <boost/preprocessor/repetition/enum_binary_params.hpp>
#include <boost/preprocessor/repetition/repeat_from_to.hpp>
#include <boost/preprocessor/variadic/to_seq.hpp>
#include <boost/preprocessor/variadic/size.hpp>
#include <boost/preprocessor/cat.hpp>
#include <boost/preprocessor/seq/for_each_i.hpp>
#include <boost/preprocessor/seq/for_each.hpp>
#include <boost/preprocessor/seq/enum.hpp>
#include <boost/preprocessor/control/iif.hpp>
#include <boost/preprocessor/comparison/equal.hpp>
#include <boost/bind.hpp>
#include <boost/test/detail/suppress_warnings.hpp>
//____________________________________________________________________________//
namespace boost {
namespace unit_test {
namespace data {
// ************************************************************************** //
// ************** test_case_template ************** //
// ************************************************************************** //
namespace ds_detail {
template<typename TestCase,typename DS>
class test_case_gen : public test_unit_generator {
public:
// Constructor
#ifndef BOOST_NO_CXX11_RVALUE_REFERENCES
test_case_gen( const_string tc_name, const_string tc_file, std::size_t tc_line, DS&& ds )
: m_tc_name( ut_detail::normalize_test_case_name( tc_name ) )
{
data::for_each_sample( std::forward<DS>( ds ), *this );
}
test_case_gen( test_case_gen&& gen )
: m_tc_name( gen.m_tc_name )
, m_test_cases( std::move(gen.m_test_cases) )
{}
#else
test_case_gen( const_string tc_name, const_string tc_file, std::size_t tc_line, DS const& ds )
: m_tc_name( ut_detail::normalize_test_case_name( tc_name ) )
{
data::for_each_sample( ds, *this );
}
#endif
virtual test_unit* next() const
{
if( m_test_cases.empty() )
return 0;
test_unit* res = m_test_cases.front();
m_test_cases.pop_front();
return res;
}
// !! ?? variadics based implementation
#define TC_MAKE(z,arity,_) \
template<BOOST_PP_ENUM_PARAMS(arity, typename Arg)> \
void operator()( BOOST_PP_ENUM_BINARY_PARAMS(arity, Arg, const& arg) ) const \
{ \
m_test_cases.push_back( new test_case( m_tc_name, m_tc_file, m_tc_line, \
boost::bind( &TestCase::template test_method<BOOST_PP_ENUM_PARAMS(arity,Arg)>, \
BOOST_PP_ENUM_PARAMS(arity, arg) ) ) ); \
} \
BOOST_PP_REPEAT_FROM_TO(1, 4, TC_MAKE, _)
private:
// Data members
std::string m_tc_name;
const_string m_tc_file;
std::size_t m_tc_line;
mutable std::list<test_unit*> m_test_cases;
};
//____________________________________________________________________________//
#ifndef BOOST_NO_CXX11_RVALUE_REFERENCES
template<typename TestCase,typename DS>
test_case_gen<TestCase,DS>
make_test_case_gen( const_string tc_name, const_string tc_file, std::size_t tc_line, DS&& ds )
{
return test_case_gen<TestCase,DS>( tc_name, tc_file, tc_line, std::forward<DS>(ds) );
}
#else
template<typename TestCase,typename DS>
test_case_gen<TestCase,DS>
make_test_case_gen( const_string tc_name, const_string tc_file, std::size_t tc_line, DS const& ds )
{
return test_case_gen<TestCase,DS>( tc_name, tc_file, tc_line, ds );
}
#endif
//____________________________________________________________________________//
} // namespace ds_detail
// ************************************************************************** //
// ************** BOOST_DATA_TEST_CASE ************** //
// ************************************************************************** //
#define BOOST_DATA_TEST_CASE_PARAM(r, _, i, param) (BOOST_PP_CAT(Arg, i) const& param)
#define BOOST_DATA_TEST_CONTEXT(r, _, param) << BOOST_STRINGIZE(param) << " = " << param << "; "
#define BOOST_DATA_TEST_CASE_PARAMS( params ) \
BOOST_PP_SEQ_ENUM( \
BOOST_PP_SEQ_FOR_EACH_I(BOOST_DATA_TEST_CASE_PARAM, _, params)) \
/**/
#define BOOST_DATA_TEST_CASE_IMPL( arity, test_name, dataset, params ) \
struct test_name { \
template<BOOST_PP_ENUM_PARAMS(arity, typename Arg)> \
static void test_method( BOOST_DATA_TEST_CASE_PARAMS( params ) ) \
{ \
BOOST_TEST_CONTEXT( "" \
BOOST_PP_SEQ_FOR_EACH(BOOST_DATA_TEST_CONTEXT, _, params)) \
_impl(BOOST_PP_SEQ_ENUM(params)); \
} \
private: \
template<BOOST_PP_ENUM_PARAMS(arity, typename Arg)> \
static void _impl(BOOST_DATA_TEST_CASE_PARAMS( params )); \
}; \
\
BOOST_AUTO_TU_REGISTRAR( test_name )( \
boost::unit_test::data::ds_detail::make_test_case_gen<test_name>( \
BOOST_STRINGIZE( test_name ), \
__FILE__, __LINE__, \
data::make(dataset) ), \
boost::unit_test::decorator::collector::instance() ); \
\
template<BOOST_PP_ENUM_PARAMS(arity, typename Arg)> \
void test_name::_impl( BOOST_DATA_TEST_CASE_PARAMS( params ) ) \
/**/
#define BOOST_DATA_TEST_CASE_WITH_PARAMS( test_name, dataset, ... ) \
BOOST_DATA_TEST_CASE_IMPL( BOOST_PP_VARIADIC_SIZE(__VA_ARGS__), \
test_name, dataset, \
BOOST_PP_VARIADIC_TO_SEQ(__VA_ARGS__) ) \
/**/
#define BOOST_DATA_TEST_CASE_NO_PARAMS( test_name, dataset ) \
BOOST_DATA_TEST_CASE_WITH_PARAMS( test_name, dataset, sample ) \
/**/
#if BOOST_PP_VARIADICS_MSVC
#define BOOST_DATA_TEST_CASE( ... ) \
BOOST_PP_CAT( \
BOOST_PP_IIF(BOOST_PP_EQUAL(BOOST_PP_VARIADIC_SIZE(__VA_ARGS__),2), \
BOOST_DATA_TEST_CASE_NO_PARAMS, \
BOOST_DATA_TEST_CASE_WITH_PARAMS) (__VA_ARGS__), ) \
/**/
#else
#define BOOST_DATA_TEST_CASE( ... ) \
BOOST_PP_IIF(BOOST_PP_EQUAL(BOOST_PP_VARIADIC_SIZE(__VA_ARGS__),2), \
BOOST_DATA_TEST_CASE_NO_PARAMS, \
BOOST_DATA_TEST_CASE_WITH_PARAMS) (__VA_ARGS__) \
/**/
#endif
} // namespace data
} // namespace unit_test
} // namespace boost
#include <boost/test/detail/enable_warnings.hpp>
#endif // BOOST_TEST_DATA_TEST_CASE_HPP_102211GER
|
{"hexsha": "6155289bc23c8651c97da9d343edf00dfc9eec0a", "size": 8159, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "boost/test/data/test_case.hpp", "max_stars_repo_name": "ballisticwhisper/boost", "max_stars_repo_head_hexsha": "f72119ab640b564c4b983bd457457046b52af9ee", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2015-01-02T14:24:56.000Z", "max_stars_repo_stars_event_max_datetime": "2015-01-02T14:25:17.000Z", "max_issues_repo_path": "boost/test/data/test_case.hpp", "max_issues_repo_name": "ballisticwhisper/boost", "max_issues_repo_head_hexsha": "f72119ab640b564c4b983bd457457046b52af9ee", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2019-01-13T23:45:51.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-03T08:13:26.000Z", "max_forks_repo_path": "boost/test/data/test_case.hpp", "max_forks_repo_name": "ballisticwhisper/boost", "max_forks_repo_head_hexsha": "f72119ab640b564c4b983bd457457046b52af9ee", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2016-05-29T13:41:15.000Z", "max_forks_repo_forks_event_max_datetime": "2016-05-29T13:41:15.000Z", "avg_line_length": 41.2070707071, "max_line_length": 99, "alphanum_fraction": 0.5478612575, "num_tokens": 1600}
|
import nltk
import numpy as np
import string
import pickle
from gsdmm import MovieGroupProcess
from Chapter03.phrases import get_yelp_reviews
from Chapter04.preprocess_bbc_dataset import get_stopwords
tokenizer = nltk.data.load("tokenizers/punkt/english.pickle")
yelp_reviews_file = "Chapter03/yelp-dataset/review.json"
stopwords_file_path = "Chapter06/reviews_stopwords.csv"
stopwords = get_stopwords(stopwords_file_path)
def preprocess(text):
sentences = tokenizer.tokenize(text)
sentences = [nltk.tokenize.word_tokenize(sentence) for sentence in sentences]
sentences = [list(set(word_list)) for word_list in sentences]
sentences = [[word for word in word_list if word not in stopwords and word not in string.punctuation] for word_list in sentences]
return sentences
def top_words_by_cluster(mgp, top_clusters, num_words):
for cluster in top_clusters:
sort_dicts = sorted(mgp.cluster_word_distribution[cluster].items(), key=lambda k: k[1], reverse=True)[:num_words]
print(f'Cluster {cluster}: {sort_dicts}')
def main():
reviews = get_yelp_reviews(yelp_reviews_file)
sentences = preprocess(reviews)
vocab = set(word for sentence in sentences for word in sentence)
n_terms = len(vocab)
mgp = MovieGroupProcess(K=25, alpha=0.1, beta=0.1, n_iters=30)
mgp.fit(sentences, n_terms)
pickle.dump(mgp, open("Chapter06/mgp.pkl", "wb"))
doc_count = np.array(mgp.cluster_doc_count)
print(doc_count)
top_clusters = doc_count.argsort()[-15:][::-1]
print(top_clusters)
top_words_by_cluster(mgp, top_clusters, 10)
if (__name__ == "__main__"):
main()
|
{"hexsha": "2107b9bc036f81e34f594e4ec8a367221c7195fb", "size": 1633, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapter06/topic_short_texts.py", "max_stars_repo_name": "afloera/https-github.com-PacktPublishing-Python-Natural-Language-Processing-Cookbook", "max_stars_repo_head_hexsha": "ffd1bf1a8a6b74bac7e88f7b2cdc92edd4831cd9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-20T02:21:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-20T02:21:55.000Z", "max_issues_repo_path": "Chapter06/topic_short_texts.py", "max_issues_repo_name": "iamdank/Python-Natural-Language-Processing-Cookbook", "max_issues_repo_head_hexsha": "ffd1bf1a8a6b74bac7e88f7b2cdc92edd4831cd9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter06/topic_short_texts.py", "max_forks_repo_name": "iamdank/Python-Natural-Language-Processing-Cookbook", "max_forks_repo_head_hexsha": "ffd1bf1a8a6b74bac7e88f7b2cdc92edd4831cd9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.976744186, "max_line_length": 133, "alphanum_fraction": 0.7501530925, "include": true, "reason": "import numpy", "num_tokens": 400}
|
[STATEMENT]
lemma circ_sup_n:
"(x\<^sup>\<Omega> * y)\<^sup>\<Omega> * x\<^sup>\<Omega> = n((x\<^sup>\<star> * y)\<^sup>\<omega>) * L \<squnion> ((x\<^sup>\<star> * y)\<^sup>\<star> * x\<^sup>\<star> \<squnion> (x\<^sup>\<star> * y)\<^sup>\<star> * n(x\<^sup>\<omega>) * L)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (x\<^sup>\<Omega> * y)\<^sup>\<Omega> * x\<^sup>\<Omega> = n ((x\<^sup>\<star> * y)\<^sup>\<omega>) * L \<squnion> ((x\<^sup>\<star> * y)\<^sup>\<star> * x\<^sup>\<star> \<squnion> (x\<^sup>\<star> * y)\<^sup>\<star> * n (x\<^sup>\<omega>) * L)
[PROOF STEP]
by (smt L_left_zero sup_assoc sup_commute Omega_def mult_L_sup_circ mult_assoc mult_left_dist_sup mult_right_dist_sup)
|
{"llama_tokens": 300, "file": "Correctness_Algebras_N_Semirings", "length": 1}
|
import numpy as np
from adapt.strategy.strategy import Strategy
class RandomStrategy(Strategy):
'''A strategy that randomly selects neurons from all neurons.
This strategy selects neurons from a set of all neurons in the network,
except for the neurons that located in skippable layers.
'''
def select(self, k):
'''Seleck k neurons randomly.
Select k neurons randomly from a set of all neurons in the network,
except for the neurons that located in skippable layers.
Args:
k: A positive integer. The number of neurons to select.
Returns:
A list of location of the selected neurons.
'''
# Choose k neurons and return their location.
indices = np.random.choice(len(self.neurons), size=k, replace=False)
return [self.neurons[i] for i in indices]
|
{"hexsha": "9830d2c134131a72af4688d25e4f0dcbc72cccd8", "size": 811, "ext": "py", "lang": "Python", "max_stars_repo_path": "adapt/strategy/random.py", "max_stars_repo_name": "kupl/adapt", "max_stars_repo_head_hexsha": "8fc024456d21ea2b43fbb2b0b61199ce6324147d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2020-07-06T12:18:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-30T09:31:56.000Z", "max_issues_repo_path": "adapt/strategy/random.py", "max_issues_repo_name": "kupl/adapt", "max_issues_repo_head_hexsha": "8fc024456d21ea2b43fbb2b0b61199ce6324147d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2020-11-24T07:54:06.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-27T05:54:03.000Z", "max_forks_repo_path": "adapt/strategy/random.py", "max_forks_repo_name": "kupl/adapt", "max_forks_repo_head_hexsha": "8fc024456d21ea2b43fbb2b0b61199ce6324147d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-11-24T07:45:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-06T01:56:17.000Z", "avg_line_length": 28.9642857143, "max_line_length": 73, "alphanum_fraction": 0.7151664612, "include": true, "reason": "import numpy", "num_tokens": 176}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_min_rel_ent_point_view [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_min_rel_ent_point_view&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ExViewTheoryPoint).
# +
import numpy as np
from arpym.views import min_rel_entropy_normal
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_min_rel_ent_point_view-parameters)
mu_base = np.array([0.26, 0.29, 0.33]) # base expectation
sig2_base = np.array([[0.18, 0.11, 0.13],
[0.11, 0.23, 0.16],
[0.13, 0.16, 0.23]]) # base covariance
v = np.array([[1, -1, 0], [0, 1, -1]]) # view matrix
z_view = np.array([1.02, -0.5]) # point view
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_min_rel_ent_point_view-implementation-step01): Compute point view updated parameters
# +
k_, n_ = v.shape # market and view dimension
mu_upd, sig2_upd = min_rel_entropy_normal(mu_base, sig2_base, v, z_view, v,
np.zeros((k_)))
|
{"hexsha": "ebff2eaf768395dcee0f3048be569ef2108251f8", "size": 1449, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/sources/s_min_rel_ent_point_view.py", "max_stars_repo_name": "dpopadic/arpmRes", "max_stars_repo_head_hexsha": "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-04-10T13:24:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T08:20:42.000Z", "max_issues_repo_path": "scripts/sources/s_min_rel_ent_point_view.py", "max_issues_repo_name": "dpopadic/arpmRes", "max_issues_repo_head_hexsha": "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/sources/s_min_rel_ent_point_view.py", "max_forks_repo_name": "dpopadic/arpmRes", "max_forks_repo_head_hexsha": "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-08-13T22:02:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T17:49:12.000Z", "avg_line_length": 34.5, "max_line_length": 219, "alphanum_fraction": 0.6404416839, "include": true, "reason": "import numpy", "num_tokens": 447}
|
'''
# This code is to perform detection and recognition analysis
# Programmer: Muhammad Hafidz Misrudin, N8448141
# Method of implementation: Feature Matching using SIFT/Orb descriptors
# It requires an OPENCV library and additional (image processing) packages in order to perform the tasks
# OPENCV versions: 2.4.11 or higher and 3.0 (preferred)
# Programmer: Muhammad Hafidz Misrudin, N8448141
# Mostly the code has been consulted from OPENCV documentations.
# However, some of it has been improved and improvised throughout development.
# Therefore, it is entitled for copyright protection.
# Last developed/tested (analysis): Friday 23rd October 2015
# Last updated: Monday 2nd November 2015
'''
import numpy as np
import cv2
from drawMatches import *
img1 = cv2.imread('/home/muhammad/Desktop/Final/BF_SIFT/crop/ref_temp/type12/t12_1.jpg', 0) # Cropped image - ensure grayscale
img2 = cv2.imread('/home/muhammad/Desktop/Final/BF_SIFT/img/type12/t12_1.jpg', 0) # Original image - ensure grayscale
img3 = cv2.imread('/home/muhammad/Desktop/Final/BF_SIFT/img/type12/t12_1.jpg')
height, weight, channels = img3.shape
# Need to be manually changed (hardcode)
color = 'W' # Colors: R, B, Y, G, W
hsv_img = cv2.cvtColor(img3, cv2.COLOR_BGR2HSV)
'''
The collections of arrays according to respective HSV colours
For example, Set an array for lower resolution and set an array for higher resolution for threshold purposes
'''
lower_red = np.array([170,150,50],dtype=np.uint8)
upper_red = np.array([179,255,255],dtype=np.uint8)
lower_blue = np.array([100,100,100], dtype=np.uint8)
upper_blue = np.array([120,255,255], dtype=np.uint8)
lower_yellow = np.array([20, 80, 80], dtype=np.uint8)
upper_yellow = np.array([40, 255, 255], dtype=np.uint8)
lower_green = np.array([60, 55, 0], dtype=np.uint8)
upper_green = np.array([100, 255, 120], dtype=np.uint8)
sensitivity = 30
#sensitivity = 50
lower_white = np.array([0,0,255-sensitivity], dtype=np.uint8)
upper_white = np.array([255,sensitivity,255], dtype=np.uint8)
if (color == 'R'):
lower_c = lower_red
upper_c = upper_red
if (color == 'B'):
lower_c = lower_blue
upper_c = upper_blue
if (color == 'Y'):
lower_c = lower_yellow
upper_c = upper_yellow
if (color == 'G'):
lower_c = lower_green
upper_c = upper_green
if (color == 'W'):
lower_c = lower_white
upper_c = upper_white
frame_threshed = cv2.inRange(hsv_img, lower_c, upper_c)
imgray = frame_threshed
ret,thresh = cv2.threshold(imgray,127,255,0)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cnt = contours[max_index]
######################################## Detection Analysis Section ################################################
####################################################################################################################
x,y,w,h = cv2.boundingRect(cnt)
print x,y,w,h
x1 = x
y1 = y
x2 = x+w
y2 = y+h
print "(X1,Y1)", (x1, y1)
print "(X2,Y2)",(x2, y2)
cv2.rectangle(img3,(x,y),(x+w,y+h),(0,255,0),3)
cv2.imshow("Image Detection Result",img3)
####################################################################################################################
######################################## Detection Analysis Section ################################################
######################################## Recognition Analysis Section ################################################
####################################################################################################################
# Create ORB detector with 1000 keypoints with a scaling pyramid factor of 1.2
orb = cv2.ORB(1000, 1.2)
#orb = cv2.ORB()
#sift = cv2.SIFT(1000, 1.2)
# Alternative: Detect keypoints of original image (using SIFT object detector)
#(kp1, des1) = sift.detectAndCompute(img1,None)
#(kp2, des2) = sift.detectAndCompute(img2,None)
# Detect keypoints of original image (using orb object detector)
(kp1,des1) = orb.detectAndCompute(img1, None)
# Detect keypoints of cropped image
(kp2,des2) = orb.detectAndCompute(img2, None)
# Create (Brute Force) matcher
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Do matching
matches = bf.match(des1,des2)
#print matches
# Sort the matches based on distance. The Least distance is better
matches = sorted(matches, key=lambda val: val.distance)
# Show only the top 10 (line) matches
out = drawMatches(img1, kp1, img2, kp2, matches[:10], img3, height, weight, x1, y1, x2, y2)
#out = drawMatches(img1, kp1, img2, kp2, matches[:10])
##cv2.waitKey(0)
##cv2.destroyWindow()
####################################################################################################################
######################################## Recognition Analysis Section ################################################
|
{"hexsha": "a146407a2e3ef818a5e5aef2a63cf0786e1f30d2", "size": 4885, "ext": "py", "lang": "Python", "max_stars_repo_path": "FeatureMatching_SIFT/featureMatching.py", "max_stars_repo_name": "MuhammadHafidzMisrudin/python-opencv-finalyearproject", "max_stars_repo_head_hexsha": "94f342554eba900f5d11245c9c689a9e76e0dec5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "FeatureMatching_SIFT/featureMatching.py", "max_issues_repo_name": "MuhammadHafidzMisrudin/python-opencv-finalyearproject", "max_issues_repo_head_hexsha": "94f342554eba900f5d11245c9c689a9e76e0dec5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "FeatureMatching_SIFT/featureMatching.py", "max_forks_repo_name": "MuhammadHafidzMisrudin/python-opencv-finalyearproject", "max_forks_repo_head_hexsha": "94f342554eba900f5d11245c9c689a9e76e0dec5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9236111111, "max_line_length": 126, "alphanum_fraction": 0.6069600819, "include": true, "reason": "import numpy", "num_tokens": 1233}
|
#!/usr/bin/python3
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "astrolove"))
sys.path.append("/usr/lib/astrolove")
import ASI
import time
import scipy.misc
print((ASI.list()))
c = ASI.Camera(0)
print((c.prop()))
c.set({'width': 640, 'height': 480, 'start_x': 320, 'start_y': 240})
s = c.stat()
assert s['width'] == 640
assert s['height'] == 480
assert s['start_x'] == 320
assert s['start_y'] == 240
# Only for color ones 1:
c.set({'type': 1})
c.start()
print((c.stat()))
for i in range(5):
time.sleep(0.5)
s = c.stat()
im = c.get_image()
print(s['captured'], s['vals'][7]/10.0, s['width'], s['height'], s['type'], im.shape)
c.set({'start_x': 320 - 20 * (i + 1)})
scipy.misc.imsave('/tmp/yaaca_test_%d.jpg' % i, im)
s = c.stat()
v = s['vals']
a = s['auto']
v[0] = 33
a[0] = False
v[1] = 111111
a[1] = False
c.set({'vals': v, 'auto': a, 'start_x': 0, 'start_y': 0})
time.sleep(1)
s = c.stat()
print(s)
assert s['start_x'] == 0
assert s['start_y'] == 0
assert s['vals'][0] == 33
assert not s['auto'][0]
assert s['vals'][1] == 111111
assert not s['auto'][1]
c.stop()
c.close()
|
{"hexsha": "a11bc2c2eaea017516655cc8dbac8bd51d95538a", "size": 1134, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "chripell/yaaca", "max_stars_repo_head_hexsha": "9048ca5dc458f9a7dde9ca745f057f7499b19972", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-23T19:56:17.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-23T19:56:17.000Z", "max_issues_repo_path": "test.py", "max_issues_repo_name": "chripell/yaaca", "max_issues_repo_head_hexsha": "9048ca5dc458f9a7dde9ca745f057f7499b19972", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test.py", "max_forks_repo_name": "chripell/yaaca", "max_forks_repo_head_hexsha": "9048ca5dc458f9a7dde9ca745f057f7499b19972", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.8076923077, "max_line_length": 89, "alphanum_fraction": 0.594356261, "include": true, "reason": "import scipy", "num_tokens": 414}
|
import scipy as SP
from . import parMixedForest as parUtils
import random
def checkMaf(X, maf=None):
if maf==None:
maf = 1.0/X.shape[0]
Xmaf = (X>0).sum(axis=0)
Iok = (Xmaf>=(maf*X.shape[0]))
return SP.where(Iok)[0]
def scale_K(K, verbose=False):
"""scale covariance K such that it explains unit variance"""
c = SP.sum((SP.eye(len(K)) - (1.0 / len(K)) * SP.ones(K.shape)) * SP.array(K))
scalar = (len(K) - 1) / c
if verbose:
print(('Kinship scaled by: %0.4f' % scalar))
K = scalar * K
return K
def update_Kernel(unscaled_kernel, X_upd_in, scale=True):
#filter and scale SNPs
X_upd = X_upd_in.copy()
X_upd = X_upd[:,checkMaf(X_upd)]
X_upd -= X_upd.mean(axis=0)
X_upd /= X_upd.std(axis=0)
X_upd = X_upd.T
#update kernel
kernel_out = unscaled_kernel.copy()
kernel_out -= SP.dot(X_upd.T, X_upd)
if scale:
return scale_K(kernel_out)
else:
return kernel_out
def estimateKernel(X, msample=None, maf=None, scale=True):
#1. maf filter
Xpop = X.copy()
Xpop = Xpop[:,checkMaf(X, maf)]
#2. sampling of predictors
if msample != None:
msample = SP.random.permutation(X.shape[1])[:msample]
Xpop = Xpop[:,msample]
Xpop -= Xpop.mean(axis=0)
Xpop /= Xpop.std(axis=0)
Xpop = Xpop.copy().T
Xpop = SP.array(Xpop, dtype='float')
Kpop = SP.dot(Xpop.T,Xpop)
if scale:
return scale_K(Kpop)
else:
return Kpop
def k_fold_cross_validation(items, k, randomize=True, seed=True):
if randomize:
if seed:
random.seed(10) # make shure we get similar partitions across methods
items = list(items)
random.shuffle(items)
slices = [items[i::k] for i in range(k)]
for i in range(k):
validation = slices[i]
training = [item
for s in slices if s is not validation
for item in s]
yield validation
def crossValidationScheme(folds, n):
validationList = []
for validation in k_fold_cross_validation(list(range(n)), folds):
indexes = SP.ones(n) == 0
indexes[validation] = True
validationList.append(indexes)
return validationList
def crossValidate(y, X, K=None, folds=3, model=None, returnModel=False):
errors = SP.empty(folds)
n = y.shape[0]
indexes = crossValidationScheme(folds,n)
predictions = SP.empty(y.shape)
alpha = []
alphas = []
msePath = []
for cvRun in SP.arange(len(indexes)):
testIndexes = indexes[cvRun]
yTrain = y[~testIndexes]
XTrain = X[~testIndexes]
if K == None:
model.fit(XTrain, yTrain)
prediction = SP.reshape(model.predict(X[testIndexes]), (-1,1))
else: # models having population structure
KTrain = K[~testIndexes]
KTrain = KTrain[:,~testIndexes]
KTest=K[testIndexes]
KTest=KTest[:,~testIndexes]
model.reset()
model.kernel = KTrain #TODO: make nice integration
model.fit(XTrain, yTrain)
prediction = SP.reshape(model.predict(X[testIndexes], k=KTest), (-1,1))
predictions[testIndexes] = prediction
errors[cvRun] = predictionError(y[testIndexes], prediction)
print(('prediction error right now is', errors[cvRun]))
if returnModel:
alpha.append(model.alpha)
alphas.append(model.alphas)
msePath.append(model.mse_path)
if returnModel:
return indexes, predictions, errors, alpha, alphas, msePath
else:
return indexes, predictions, errors
def predictionError(yTest, yPredict):
return ((yTest - yPredict)**2).sum()/SP.float_(yTest.shape[0])
def getQuadraticKernel(X, d=.01):
K = SP.empty((X.shape[0], X.shape[0]))
for i in SP.arange(X.shape[0]):
for j in SP.arange(X.shape[0]):
K[i,j] = SP.exp(-0.5/d*(X[i]-X[j])**2)
return scale_K(K)
def generate_linear_data(n_max, n_step, ssv_g, var):
x = SP.arange(0,n_max,n_step).reshape(-1,1)
y = SP.zeros_like(x).reshape(-1,1)*0.0
X = convertToBinaryPredictor(x)
Xbg = (SP.random.rand(X.shape[0], X.shape[1]) < .5) * 1.0
weights = var*SP.random.randn(2,1)
y += X[:,3:4] * weights[0,:]
Xbg[:,3:4] = X[:,3:4]
l = X[:,1:2] * X[:,2:3]
Xbg[:,1:2] = X[:,1:2]
Xbg[:,2:3] = X[:,2:3]
y += l * weights[1,:]
yTr = y.copy()
ssv_v = 1.0-ssv_g
if ssv_g > 0.0:
ldelta = SP.log(ssv_v/SP.float_(ssv_g))
K = scale_K(getQuadraticKernel(x, d=20))
else:
ldelta = None
K = SP.eye(y.shape[0])
y += SP.random.multivariate_normal(SP.zeros(K.shape[0]),ssv_g*K+ssv_v*SP.eye(K.shape[0])).reshape(-1,1)
return Xbg, x, y, yTr, K, ldelta
def convertToBinaryPredictor(x):
arr = []
a = 0
for i in SP.arange(x.size):
arr.append(bin(x[i,0])[2:])
l = max(a, bin(x[i,0])[2:].__len__())
X = SP.zeros((x.size,l))
for i in SP.arange(x.size):
head0=l-arr[i].__len__()
for j in SP.arange(head0):
X[i,j] = 0
for j in SP.arange(arr[i].__len__()):
X[i,head0+j] = SP.int16(arr[i][j])
return X
# generates data sets to test the continous version of the mixed forest
def lin_data_cont_predictors(n=100, m=1):
X = SP.random.randn(n,m)
beta = SP.random.randn(m,1)
beta[1:]=0
y = SP.dot(X,beta)
return X, y
|
{"hexsha": "486d4e7a485c30f9cb591177043fc739a2307854", "size": 5457, "ext": "py", "lang": "Python", "max_stars_repo_path": "svca_limix/limix/modules/mixedForestUtils.py", "max_stars_repo_name": "DenisSch/svca", "max_stars_repo_head_hexsha": "bd029c120ca8310f43311253e4d7ce19bc08350c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 65, "max_stars_repo_stars_event_min_datetime": "2015-01-20T20:46:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-27T14:40:35.000Z", "max_issues_repo_path": "svca_limix/limix/modules/mixedForestUtils.py", "max_issues_repo_name": "DenisSch/svca", "max_issues_repo_head_hexsha": "bd029c120ca8310f43311253e4d7ce19bc08350c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 29, "max_issues_repo_issues_event_min_datetime": "2015-02-01T22:35:17.000Z", "max_issues_repo_issues_event_max_datetime": "2017-08-07T08:18:23.000Z", "max_forks_repo_path": "svca_limix/limix/modules/mixedForestUtils.py", "max_forks_repo_name": "DenisSch/svca", "max_forks_repo_head_hexsha": "bd029c120ca8310f43311253e4d7ce19bc08350c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 35, "max_forks_repo_forks_event_min_datetime": "2015-02-01T17:26:50.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-13T07:06:16.000Z", "avg_line_length": 32.2899408284, "max_line_length": 107, "alphanum_fraction": 0.5865860363, "include": true, "reason": "import scipy", "num_tokens": 1616}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 4 21:35:37 2020
@author: inderpreet
this code plots the PDF of the predictions and errors of best estimate (median)
ICI channels
"""
import matplotlib.pyplot as plt
import numpy as np
import stats as S
from ici_mwi import iciData
plt.rcParams.update({'font.size': 32})
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
from typhon.retrieval.qrnn import set_backend, QRNN
set_backend("pytorch")
#%% input parameters
depth = 4
width = 128
quantiles = np.array([0.002, 0.03, 0.16, 0.5, 0.84, 0.97, 0.998])
batchSize = 128
targets = ['I1V', 'I2V','I3V']
#targets = ['I2V']
test_file = "TB_ICI_test.nc"
output_file = 'Figures/error_distribution_QRNN-single.pdf'
binstep = 0.5
bins = np.arange(-20, 15, binstep)
iq = np.argwhere(quantiles == 0.5)[0,0]
#%% plot PDF of the predictions for ICI channels
N = len(targets)
fig, ax = plt.subplots(1, N, figsize = [N*10, 10])
plt.subplots_adjust(wspace = 0.001)
bins = np.arange(200, 295, 1)
for i,target in enumerate(targets):
inChannels = np.array([target, 'I5V' , 'I6V', 'I7V', 'I8V', 'I9V', 'I10V', 'I11V'])
data = iciData(test_file,
inChannels, target,
batch_size = batchSize)
i183, = np.argwhere(inChannels == target)[0]
# read QRNN
file = 'qrnn_ici_%s_%s_%s_single.nc'%(depth, width, target)
print (file)
qrnn = QRNN.load(file)
y_pre, y_prior, y0, y, y_pos_mean = S.predict(data, qrnn, add_noise = True)
h_prior = np.histogram(y_prior[:, i183], bins, density = True)
h0 = np.histogram(y0, bins, density = True)
h_p = np.histogram(y_pre[:, iq], bins, density = True)
center = (bins[:-1] + bins[1:])/2
ax[i].plot(center, h_prior[0], linewidth = 2.5, color = 'k', label = "All-sky")
ax[i].plot(center, h0[0], linewidth = 2.5, color = 'r', label = "Clear-sky")
ax[i].plot(center, h_p[0], linewidth = 2.5, color = 'b', label = "Predicted")
ax[i].set_yscale('log')
# ax[i].set_xlabel('TB [K]')
ax[i].xaxis.set_minor_locator(MultipleLocator(5))
ax[i].grid(which = 'both', alpha = 0.2)
ax[i].set_title('Channel:%s'%target, fontsize = 32)
# ax[i].set(ylim = [0, 1])
ax[0].set_ylabel('Occurence frequency [#/K]')
ax[1].set_xlabel('TB[K]')
ax[1].set_yticklabels([])
ax[2].set_yticklabels([])
(ax[2].legend(prop={'size': 32}, frameon = False, bbox_to_anchor=(0.1, -0.12),ncol=3))
fig.savefig('Figures/PDF_predictions_ICI.pdf', bbox_inches = 'tight')
fig.savefig('Figures/PDF_predictions_ICI.png', bbox_inches = 'tight')
qrnn = QRNN.load(file)
|
{"hexsha": "0660d348b216fd9426ac41c803af4ce7879aeb47", "size": 4051, "ext": "py", "lang": "Python", "max_stars_repo_path": "ICI/plot_pdf_pred_ici.py", "max_stars_repo_name": "SEE-MOF/QRNN-CloudCorrection", "max_stars_repo_head_hexsha": "ba58f1f4f70ec0f7264d5e98d80552d2fba1bb4d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ICI/plot_pdf_pred_ici.py", "max_issues_repo_name": "SEE-MOF/QRNN-CloudCorrection", "max_issues_repo_head_hexsha": "ba58f1f4f70ec0f7264d5e98d80552d2fba1bb4d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ICI/plot_pdf_pred_ici.py", "max_forks_repo_name": "SEE-MOF/QRNN-CloudCorrection", "max_forks_repo_head_hexsha": "ba58f1f4f70ec0f7264d5e98d80552d2fba1bb4d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-09T10:47:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-09T10:47:40.000Z", "avg_line_length": 33.2049180328, "max_line_length": 115, "alphanum_fraction": 0.4233522587, "include": true, "reason": "import numpy", "num_tokens": 924}
|
import numpy as np
import struct
def load_header(brick_data, double=False):
offset = 4
if double:
nbytes = 8
dtype_float="d"
else:
nbytes = 4
dtype_float="f"
nbodies = struct.unpack("i", brick_data[4:8])[0]
offset += 12
massp = struct.unpack(dtype_float, brick_data[offset:offset+nbytes])[0]
offset += 8 + nbytes
aexp = struct.unpack(dtype_float, brick_data[offset:offset+nbytes])[0]
offset += 8 + nbytes
omegat = struct.unpack(dtype_float, brick_data[offset:offset+nbytes])[0]
offset += 8 + nbytes
age = struct.unpack(dtype_float, brick_data[offset:offset+nbytes])[0]
offset += 8 + nbytes
halnum = struct.unpack("i", brick_data[offset:offset+4])[0]
subnum = struct.unpack("i", brick_data[offset+4:offset+8])[0]
return offset+16, halnum, subnum
def load_a_halo(brick_data, offset, dd, is_gal=True, double=False):
if double:
nbytes = 8
dtype_float="d"
else:
nbytes = 4
dtype_float="f"
npart = struct.unpack("i", brick_data[offset:offset+4])[0]
dd["np"]=npart
offset += 12 # 12 = 4 + 8
ids = struct.unpack_from("<{}i".format(npart), brick_data[offset:offset+4*npart])
offset += 4*npart + 8
dd["id"] = struct.unpack("i", brick_data[offset:offset+4])[0]
offset += 24
dd["level"],dd["host"],dd["sub"],dd["nsub"],dd["nextsub"]\
= struct.unpack_from("<5i", brick_data[offset:offset+20])
offset += 28
dd["m"] = struct.unpack(dtype_float, brick_data[offset:offset+nbytes])[0]
offset += 8 + nbytes
dd["x"],dd["y"],dd["z"] = struct.unpack_from("<3"+dtype_float, brick_data[offset:offset+3*nbytes])
offset += 8 + 3*nbytes
dd["vx"],dd["vy"],dd["vz"] = struct.unpack_from("<3"+dtype_float, brick_data[offset:offset+3*nbytes])
offset += 8 + 3*nbytes
dd["ax"],dd["ay"],dd["az"] = struct.unpack_from("<3"+dtype_float, brick_data[offset:offset+3*nbytes])
offset += 8 + 3*nbytes
radius= struct.unpack_from("<4"+dtype_float, brick_data[offset:offset+4*nbytes])
dd["r"],dd["abc"] = radius[0], radius[1:]
offset += 8 + 4*nbytes
dd["energy"] = struct.unpack_from("<3"+dtype_float, brick_data[offset:offset+3*nbytes])
offset += 8 + 3*nbytes
dd["sp"] = struct.unpack(dtype_float, brick_data[offset:offset+nbytes])[0]
offset += 8 + nbytes
if is_gal:
dd["sig"], dd["sigbulge"], dd["mbulge"]\
= struct.unpack_from("<3"+dtype_float, brick_data[offset:offset+3*nbytes])
offset += 8+ 3*nbytes
dd["mvir"],dd["rvir"],dd["tvir"],dd["cvel"]\
= struct.unpack_from("<4"+dtype_float, brick_data[offset:offset+4*nbytes])
offset += 8+4*nbytes
dd["p_rho"],dd["p_c"] = struct.unpack_from("<2"+dtype_float, brick_data[offset:offset+2*nbytes])
offset += 8+2*nbytes
if is_gal:
g_nbin = struct.unpack("i", brick_data[offset:offset+4])[0]
dd["g_nbin"]=g_nbin
offset += 12
dd["g_rr"] = struct.unpack_from("<{}".format(g_nbin)+dtype_float, brick_data[offset:offset+g_nbin*nbytes])
offset += 8 + g_nbin*nbytes
dd["g_rho"] = struct.unpack_from("<{}".format(g_nbin)+dtype_float, brick_data[offset:offset+g_nbin*nbytes])
offset += 8 + g_nbin*nbytes
return offset, ids
def load_hm(fn, double=True, is_gal=True, return_idlists=[]):
"""
Return catalog in numpy array, and list of member particles in a list.
>>> catalog, member_ids = load_hm("TREE_DM/tree_bricks500", is_gal=False, return_idlist=[1,3,5,7])
Paramters
---------
double : logical
if True, assume real are in double precision
is_gal : logical
If True, read GalaxyMaker output. If False, read HaloMaker output.
return_idlists: sequence(list, array, range, tuple)
Give halo/galaxy ids in a list(sequence) to retrieve member particle ID of the halos.
NOTE
----
Reading tree_bricks in Fortranis 10x faster.
But, maybe it's OK to be a bit slow. NH catalogues are small, anyways.
"""
if double:
dtype_float = "<f8"
else:
dtype_float = "<f4"
dtype_halo = [('np', '<i4'), ('id', '<i4'), ('level', '<i4'),
('host', '<i4'), ('sub', '<i4'), ('nsub', '<i4'),
('nextsub', '<i4'),
('m', dtype_float), ('mvir', dtype_float),
('r', dtype_float), ('rvir', dtype_float),
('tvir', dtype_float), ('cvel', dtype_float),
('x', dtype_float), ('y', dtype_float), ('z', dtype_float),
('vx', dtype_float), ('vy', dtype_float), ('vz', dtype_float),
('ax', dtype_float), ('ay', dtype_float), ('az', dtype_float),
('sp', dtype_float), ('idx', '<i4'),
('p_rho', dtype_float),('p_c', dtype_float),
('energy', '<f8', (3,)), ('abc', '<f8', (3,))]
if is_gal:
dtype_halo += [('sig', dtype_float), ('sigbulge', dtype_float),
('mbulge', dtype_float), ('hosthalo', '<i4'),
('g_nbin', '<i4'), ('g_rr', dtype_float, (100,)),
('g_rho', dtype_float, (100,))]
idlists=[]
f = open(fn, "rb")
brick_data = f.read()
offset, halnum, subnum = load_header(brick_data, double=double)
gcat = np.zeros(halnum+subnum, dtype=dtype_halo)
for i in range(halnum+subnum):
offset,_ = load_a_halo(brick_data, offset, gcat[i], is_gal=is_gal, double=double)
if gcat[i]["id"] in return_idlists:
idlists.append(_)
f.close()
return gcat, idlists
|
{"hexsha": "20605e5821481f371f58f1260baf237fd327d9b5", "size": 5619, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyram/etc/rd_hal_pure.py", "max_stars_repo_name": "Hoseung/pyRamAn", "max_stars_repo_head_hexsha": "f9386fa5a9f045f98590039988d3cd50bc488dc2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-25T16:11:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-25T16:11:56.000Z", "max_issues_repo_path": "pyram/etc/rd_hal_pure.py", "max_issues_repo_name": "Hoseung/pyRamAn", "max_issues_repo_head_hexsha": "f9386fa5a9f045f98590039988d3cd50bc488dc2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-02-17T13:44:43.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-25T15:35:05.000Z", "max_forks_repo_path": "pyram/etc/rd_hal_pure.py", "max_forks_repo_name": "Hoseung/pyRamAn", "max_forks_repo_head_hexsha": "f9386fa5a9f045f98590039988d3cd50bc488dc2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-25T16:11:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-25T16:11:56.000Z", "avg_line_length": 40.4244604317, "max_line_length": 115, "alphanum_fraction": 0.5876490479, "include": true, "reason": "import numpy", "num_tokens": 1652}
|
import numpy
import scipy.optimize
MAXIMUM_REPRESENTABLE_FINITE_FLOAT = numpy.finfo(numpy.float64).max
class MultistartMaximizer(object):
def __init__(self, optimizer, num_multistarts=1, log_sample=False):
assert not isinstance(optimizer, MultistartMaximizer)
self.optimizer = optimizer
assert num_multistarts >= 1
self.num_multistarts = num_multistarts
self.log_sample = log_sample
def optimize(self, **kwargs):
all_starts = self.optimizer.domain.generate_quasi_random_points_in_domain(self.num_multistarts, self.log_sample)
best_point = None
best_function_value = -numpy.inf
for point in all_starts:
try:
self.optimizer.objective_function.current_point = point
self.optimizer.optimize(**kwargs)
except numpy.linalg.LinAlgError:
function_value = float('nan')
success = False
else:
# The negation here is required because the optimizer decorator has already negated the value
function_value = -self.optimizer.optimization_results.fun
success = self.optimizer.optimization_results.success
end_point = self.optimizer.objective_function.current_point
if not self.optimizer.domain.check_point_acceptable(end_point):
function_value = float('nan')
success = False
if best_point is None or (success and function_value > best_function_value):
if best_point is None and not success:
best_point = point
continue
best_point = end_point
best_function_value = function_value if not numpy.isnan(function_value) else best_function_value
return best_point
class LBFGSBOptimizer(object):
def __init__(self, domain, optimizable, approx_grad=False):
self.domain = domain
self.objective_function = optimizable
self.optimization_results = None
self.approx_grad = approx_grad
assert self.objective_function.differentiable or self.approx_grad
@property
def dim(self):
return self.domain.dim
def _domain_as_array(self):
return numpy.array([(interval.min, interval.max) for interval in self.domain.get_bounding_box()])
def joint_function_gradient_eval(self, **kwargs):
def decorated(point):
if numpy.any(numpy.isnan(point)):
return numpy.inf, numpy.zeros((self.dim,))
self.objective_function.current_point = point
value = -self.objective_function.compute_objective_function(**kwargs)
gradient = -self.objective_function.compute_grad_objective_function(**kwargs)
assert numpy.isfinite(value) and gradient.shape == (self.dim, )
return value, gradient
return decorated
def _scipy_decorator(self, func, **kwargs):
def decorated(point):
self.objective_function.current_point = point
return -func(**kwargs)
return decorated
def optimize(self, **kwargs):
self.optimization_results = self._optimize_core(**kwargs)
point = self.optimization_results.x
self.objective_function.current_point = point
def _optimize_core(self, **kwargs):
options = {'eps': 1.0e-8, 'gtol': 1.0e-4, 'maxcor': 10, 'maxfun': 15000, 'ftol': 1e-4}
if self.approx_grad:
return scipy.optimize.minimize(
fun=self._scipy_decorator(self.objective_function.compute_objective_function, **kwargs),
x0=self.objective_function.current_point.flatten(),
method='L-BFGS-B',
bounds=self._domain_as_array(),
options=options,
)
else:
options.pop('eps')
return scipy.optimize.minimize(
fun=self.joint_function_gradient_eval(**kwargs),
x0=self.objective_function.current_point.flatten(),
method='L-BFGS-B',
jac=True,
bounds=self._domain_as_array(),
options=options,
)
|
{"hexsha": "e2569a594423439898740792eaaecb2b148de8f1", "size": 3761, "ext": "py", "lang": "Python", "max_stars_repo_path": "lookahead/model/scalar_optimization.py", "max_stars_repo_name": "ericlee0803/lookahead_release", "max_stars_repo_head_hexsha": "373295f11be81d82b1c69eeadeec32ae96f26b1f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-06-17T20:25:12.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-24T17:21:59.000Z", "max_issues_repo_path": "lookahead/model/scalar_optimization.py", "max_issues_repo_name": "ericlee0803/lookahead_release", "max_issues_repo_head_hexsha": "373295f11be81d82b1c69eeadeec32ae96f26b1f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lookahead/model/scalar_optimization.py", "max_forks_repo_name": "ericlee0803/lookahead_release", "max_forks_repo_head_hexsha": "373295f11be81d82b1c69eeadeec32ae96f26b1f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1495327103, "max_line_length": 116, "alphanum_fraction": 0.712044669, "include": true, "reason": "import numpy,import scipy", "num_tokens": 839}
|
//==============================================================================
// Copyright 2003 - 2012 LASMEA UMR 6602 CNRS/Univ. Clermont II
// Copyright 20012 - 2012 LRI UMR 12623 CNRS/Univ Paris Sud XI
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//==============================================================================
#ifndef BOOST_SIMD_CONSTANT_CONSTANTS_FACT_12_HPP_INCLUDED
#define BOOST_SIMD_CONSTANT_CONSTANTS_FACT_12_HPP_INCLUDED
#include <boost/simd/include/functor.hpp>
#include <boost/simd/constant/register.hpp>
#include <boost/simd/constant/hierarchy.hpp>
#include <boost/config.hpp>
#ifdef BOOST_MSVC
#pragma warning(push)
#pragma warning(disable: 4310) // truncation of constant
#endif
namespace boost { namespace simd
{
namespace tag
{
/*!
@brief Fact_12 generic tag
Represents the Fact_12 constant in generic contexts.
@par Models:
Hierarchy
**/
BOOST_SIMD_CONSTANT_REGISTER( Fact_12,double
, 479001600,0x4de467e0, 0x41bc8cfc00000000ll
)
}
namespace ext
{
template<class Site>
BOOST_FORCEINLINE generic_dispatcher<tag::Fact_12, Site> dispatching_Fact_12(adl_helper, boost::dispatch::meta::unknown_<Site>, ...)
{
return generic_dispatcher<tag::Fact_12, Site>();
}
template<class... Args>
struct impl_Fact_12;
}
/*!
Generates 12! that is 479001600
@par Semantic:
@code
T r = Fact_12<T>();
@endcode
is similar to:
@code
T r = T(479001600);
@endcode
**/
BOOST_SIMD_CONSTANT_IMPLEMENTATION(boost::simd::tag::Fact_12, Fact_12)
} }
#ifdef BOOST_MSVC
#pragma warning(pop)
#endif
#include <boost/simd/constant/common.hpp>
#endif
|
{"hexsha": "caeef2c89b16792f5dde8a2f2fe0a9f8065aa3a2", "size": 1915, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "modules/boost/simd/base/include/boost/simd/constant/constants/fact_12.hpp", "max_stars_repo_name": "feelpp/nt2", "max_stars_repo_head_hexsha": "4d121e2c7450f24b735d6cff03720f07b4b2146c", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 34.0, "max_stars_repo_stars_event_min_datetime": "2017-05-19T18:10:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-04T02:18:13.000Z", "max_issues_repo_path": "modules/boost/simd/base/include/boost/simd/constant/constants/fact_12.hpp", "max_issues_repo_name": "feelpp/nt2", "max_issues_repo_head_hexsha": "4d121e2c7450f24b735d6cff03720f07b4b2146c", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/boost/simd/base/include/boost/simd/constant/constants/fact_12.hpp", "max_forks_repo_name": "feelpp/nt2", "max_forks_repo_head_hexsha": "4d121e2c7450f24b735d6cff03720f07b4b2146c", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2017-12-02T12:59:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-31T12:46:14.000Z", "avg_line_length": 25.5333333333, "max_line_length": 135, "alphanum_fraction": 0.6088772846, "num_tokens": 466}
|
import torchvision.transforms as T
import numpy as np
import cv2
from PIL import Image
def visualize_depth(depth, cmap=cv2.COLORMAP_JET):
"""
depth: (H, W)
"""
x = depth.cpu().numpy()
x = np.nan_to_num(x) # change nan to 0
mi = np.min(x) # get minimum depth
ma = np.max(x)
x = (x-mi)/(ma-mi+1e-8) # normalize to 0~1
x = (255*x).astype(np.uint8)
x_ = Image.fromarray(cv2.applyColorMap(x, cmap))
x_ = T.ToTensor()(x_) # (3, H, W)
return x_
def visualize_mask(mask, cmap=cv2.COLORMAP_BONE):
"""
mask: (H, W) in 0~1
"""
x = mask.cpu().numpy()
x = (255*x).astype(np.uint8)
x_ = Image.fromarray(cv2.applyColorMap(x, cmap))
x_ = T.ToTensor()(x_) # (3, H, W)
return x_
def blend_images(img1, img2, alpha):
"""
alpha blend two images: img1 * alpha + img2 * (1-alpha)
img1 and img2: (3, H, W)
"""
img1 = img1.permute(1, 2, 0).cpu().numpy()
img1 = (255*img1).astype(np.uint8)
img2 = img2.permute(1, 2, 0).cpu().numpy()
img2 = (255*img2).astype(np.uint8)
blend = cv2.addWeighted(img1, alpha, img2, 1-alpha, 2.2)
x_ = Image.fromarray(blend)
x_ = T.ToTensor()(x_) # (3, H, W)
return x_
|
{"hexsha": "d9c07f4ebac70ea25797e7d1119966ed2afc7033", "size": 1204, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/visualization.py", "max_stars_repo_name": "wx-b/nsff_pl", "max_stars_repo_head_hexsha": "b9640ca1d416438bf4dfefa5be0524ad2fd1b27e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 103, "max_stars_repo_stars_event_min_datetime": "2021-06-07T10:09:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T08:58:35.000Z", "max_issues_repo_path": "utils/visualization.py", "max_issues_repo_name": "wx-b/nsff_pl", "max_issues_repo_head_hexsha": "b9640ca1d416438bf4dfefa5be0524ad2fd1b27e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-06-09T17:35:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-12T17:23:17.000Z", "max_forks_repo_path": "utils/visualization.py", "max_forks_repo_name": "wx-b/nsff_pl", "max_forks_repo_head_hexsha": "b9640ca1d416438bf4dfefa5be0524ad2fd1b27e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2021-06-09T02:00:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T19:52:42.000Z", "avg_line_length": 27.3636363636, "max_line_length": 60, "alphanum_fraction": 0.5838870432, "include": true, "reason": "import numpy", "num_tokens": 432}
|
# https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/basic_classification.ipynb
from __future__ import absolute_import, division, print_function
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import os
import numpy as np
import matplotlib.pyplot as plt
# print('Tensorflow version:', tf.__version__)
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("<{}> {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(True)
plt.xticks(np.arange(10), class_names, rotation='vertical')
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
# ensure text is not cut off
plt.tight_layout()
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
# Returns a short sequential model
def create_model():
# setup the layers
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
# compile the model
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
return model
#######################
#######################
## Retrieve the Data ##
#######################
#######################
# import the MNIST dataset
mnist = keras.datasets.mnist
# specify classification values
class_names = ['0','1','2','3','4','5','6','7','8','9']
# labels are an array of integers, 0 to 9, corresponding with `class_names`
# images are 28x28 NumPy arrays
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
######################
######################
## Explore the Data ##
######################
######################
# print('Train set:')
# print(train_images.shape)
# print(len(train_labels))
# print(train_labels)
# # ^ training set has 60,000 images
# # and 60,000 corresponding labels
# print('Test set:')
# print(test_images.shape)
# print(len(test_labels))
# # ^ training set has 10,000 images
# # and 10,000 corresponding labels
# # display a training image
# plt.figure()
# plt.imshow(train_images[0])
# plt.colorbar()
# plt.grid(True)
# plt.show()
########################
########################
## Normalize the Data ##
########################
########################
# scale pixel values so that
# instead of 0-255 they are 0-1
train_images = train_images / 255.0
test_images = test_images / 255.0
#################################
#################################
## Visualize the Training Data ##
#################################
#################################
# display the first 25 images from the training set
# and display the class name below each image
# plt.figure(figsize=(10,10))
# for i in range(25):
# plt.subplot(5,5,i+1)
# plt.xticks([])
# plt.yticks([])
# plt.grid(False)
# plt.imshow(train_images[i], cmap=plt.cm.binary)
# plt.xlabel(class_names[train_labels[i]])
# plt.show()
###############################
###############################
## Setup checkpoint callback ##
###############################
###############################
# checkpoint_path = "training_1/cpo.ckpt"
# checkpoint_dir = os.path.dirname(checkpoint_path)
# # Create checkpoint callback
# cp_callback = keras.callbacks.ModelCheckpoint(
# checkpoint_path,
# save_weights_only=True,
# verbose=1,
# # Save weights, every 5 epochs.
# period=5
# )
##############################
##############################
## Train and save the Model ##
##############################
##############################
# create basic model instance
model = create_model()
# model.save_weights(checkpoint_path.format(epoch=0))
# # train the model, saving with our checkpoint callback
# model.fit(train_images, train_labels, epochs=10, callbacks=[cp_callback])
# train the model
epochs = 10
model.fit(train_images, train_labels, epochs=epochs)
# Save entire model to a HDF5 file
model_path = 'models/{}epochs_mnist_model.h5'.format(epochs)
model.save(model_path)
# evaluate accuracy
loss, acc = model.evaluate(test_images, test_labels)
print("Newly trained model, accuracy: {:5.2f}%".format(100*acc))
######################
######################
## Load saved model ##
######################
######################
# Recreate the exact same model, including weights and optimizer.
model = keras.models.load_model(model_path)
# model.summary()
# evaluate accuracy of loaded model
loss, acc = model.evaluate(test_images, test_labels)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))
######################
######################
## Make Predictions ##
######################
######################
# predictions = model.predict(test_images)
#######################
#######################
## Check Predictions ##
#######################
#######################
# # grab the first prediction
# first_prediction = predictions[0]
# # check which of the 10 label integers
# # has the highest probability value
# predicted = np.argmax(first_prediction)
# # check guess against test label
# actual = test_labels[0]
# print
# if actual == predicted:
# print('Success! First prediction is correct!')
# else:
# print('Whoops... First prediction failed.')
# # display the 0th image, predictions, and prediction array
# i = 0
# plt.figure(figsize=(6,3))
# plt.subplot(1,2,1)
# plot_image(i, predictions, test_labels, test_images)
# plt.subplot(1,2,2)
# plot_value_array(i, predictions, test_labels)
# plt.show()
#########################
#########################
## Display Predictions ##
#########################
#########################
# # Plot the first X test images, their predicted label, and the true label
# # Color correct predictions in blue, incorrect predictions in red
# num_rows = 8
# num_cols = 5
# num_images = num_rows*num_cols
# plt.figure(figsize=(2*2*num_cols, 2*num_rows))
# for i in range(num_images):
# plt.subplot(num_rows, 2*num_cols, 2*i+1)
# plot_image(i, predictions, test_labels, test_images)
# plt.subplot(num_rows, 2*num_cols, 2*i+2)
# plot_value_array(i, predictions, test_labels)
# plt.show()
#######################
#######################
## Single Prediction ##
#######################
#######################
# # Grab an image from the test dataset
# img = np.array([
# 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,192,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,52,0,180,0,7,0,241,0,0,0,254,0,0,0,255,0,0,0,255,0,3,0,250,0,58,0,176,0,60,0,174,0,128,0,128,0,128,0,191,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,13,0,0,0,255,0,0,0,205,0,0,0,56,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,53,0,0,0,205,0,0,0,227,0,0,0,255,0,0,0,255,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,61,0,172,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,10,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,94,0,0,0,243,0,0,0,255,0,0,0,56,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,52,0,0,0,254,0,0,0,238,0,0,0,254,0,0,0,255,0,0,0,255,0,0,0,123,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0,238,0,0,0,118,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,93,0,0,0,255,0,0,0,225,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,252,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,255,0,0,0,215,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,0,0,255,0,0,0,254,0,0,0,251,0,0,0,225,0,0,0,239,0,0,0,236,0,0,0,197,0,0,0,191,0,0,0,191,0,0,0,190,0,0,0,182,0,0,0,171,0,0,0,254,0,0,0,255,0,0,0,199,0,0,0,225,0,0,0,246,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,1,0,0,0,44,0,0,0,254,0,0,0,46,0,0,0,75,0,0,0,88,0,0,0,88,0,0,0,85,0,0,0,103,0,0,0,100,0,0,0,98,0,0,0,137,0,0,0,78,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,232,0,0,0,23,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,254,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,252,0,0,0,16,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,167,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,33,0,0,0,226,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,87,0,0,0,137,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,229,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,43,0,0,0,254,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,207,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,41,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,125,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,245,0,0,0,254,0,0,0,152,0,0,0,0,0,0,0,0,0,0,0,227,0,0,0,37,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,129,0,127,0,128,0,191,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,50,0,182,0,0,0,254,0,0,0,255,0,88,0,151,0,129,0,127,0,129,0,127,0,129,0,127,0,129,0,127,0,128,0,191,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
# ])
# # scale it
# img = img / 255
# # Grab an image from the test dataset
# img = test_images[11]
# print(img.shape)
# # Add the image to a batch where it's the only member.
# img_batch = (np.expand_dims(img,0))
# # print(img.shape)
# # Make prediction
# predictions_single = model.predict(img_batch)
# # print(predictions_single)
# prediction = predictions_single[0]
# predicted = np.argmax(prediction)
# # display test image and prediction
# plt.figure()
# plt.imshow(img, cmap='gray')
# plt.xlabel("Prediction: {}".format(class_names[predicted]))
# plt.show()
|
{"hexsha": "dbe914a0c4db51b4f1523861d3c3bf92e3a4e29e", "size": 14669, "ext": "py", "lang": "Python", "max_stars_repo_path": "training/model-restore-mnist.py", "max_stars_repo_name": "wobkat/ellwood-glacier", "max_stars_repo_head_hexsha": "743112c8ece09f14e68af7fa4d0fd2a33501c4c8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "training/model-restore-mnist.py", "max_issues_repo_name": "wobkat/ellwood-glacier", "max_issues_repo_head_hexsha": "743112c8ece09f14e68af7fa4d0fd2a33501c4c8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "training/model-restore-mnist.py", "max_forks_repo_name": "wobkat/ellwood-glacier", "max_forks_repo_head_hexsha": "743112c8ece09f14e68af7fa4d0fd2a33501c4c8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.1485507246, "max_line_length": 7035, "alphanum_fraction": 0.5753630104, "include": true, "reason": "import numpy", "num_tokens": 7987}
|
import numpy as np
from scipy import linalg
def norm_of_columns(A, p=2):
"""Vector p-norm of each column of a matrix.
Parameters
----------
A : array_like
Input matrix.
p : int, optional
p-th norm.
Returns
-------
array_like
p-norm of each column of A.
"""
_, N = A.shape
return np.asarray([linalg.norm(A[:, j], ord=p) for j in range(N)])
def coherence_of_columns(A):
"""Mutual coherence of columns of A.
Parameters
----------
A : array_like
Input matrix.
p : int, optional
p-th norm.
Returns
-------
array_like
Mutual coherence of columns of A.
"""
A = np.asmatrix(A)
_, N = A.shape
A = A * np.asmatrix(np.diag(1/norm_of_columns(A)))
Gram_A = A.H*A
for j in range(N):
Gram_A[j, j] = 0
return np.max(np.abs(Gram_A))
def asarray_1d(a, **kwargs):
"""Squeeze the input and check if the result is one-dimensional.
Returns *a* converted to a `numpy.ndarray` and stripped of
all singleton dimensions. Scalars are "upgraded" to 1D arrays.
The result must have exactly one dimension.
If not, an error is raised.
"""
result = np.squeeze(np.asarray(a, **kwargs))
if result.ndim == 0:
result = result.reshape((1,))
elif result.ndim > 1:
raise ValueError("array must be one-dimensional")
return result
def matdiagmul(A, b):
"""Efficient multiplication of matrix and diagonal matrix .
Returns the multiplication of a matrix *A* and a diagonal matrix. The
diagonal matrix is given by the vector *b* containing its elements on
the main diagonal. If *b* is a matrix, it is treated as a stack of vectors
residing in the last index and broadcast accordingly.
Parameters
----------
A : array_like
Input matrix.
b : array_like
Main diagonal elements or stack of main diagonal elements.
Returns
-------
array_like
Result of matrix multiplication.
"""
if len(b.shape) == 1:
b = b[np.newaxis, :]
K, N = b.shape
M, N = A.shape
C = np.zeros([K, M, N], dtype=A.dtype)
for k in range(K):
C[k, :, :] = A * b[k, :]
return C
def db(x, power=False):
"""Convert *x* to decibel.
Parameters
----------
x : array_like
Input data. Values of 0 lead to negative infinity.
power : bool, optional
If ``power=False`` (the default), *x* is squared before
conversion.
"""
with np.errstate(divide='ignore'):
return 10 if power else 20 * np.log10(np.abs(x))
|
{"hexsha": "08eb60bb3291a815e642af0a8e18b0c0a27ea099", "size": 2631, "ext": "py", "lang": "Python", "max_stars_repo_path": "micarray/util.py", "max_stars_repo_name": "trojanjay/sfa-numpy", "max_stars_repo_head_hexsha": "bff5737ef429f31228d20a9e1d0ce7d46d3080d3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 39, "max_stars_repo_stars_event_min_datetime": "2017-09-22T10:30:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T15:56:22.000Z", "max_issues_repo_path": "micarray/util.py", "max_issues_repo_name": "trojanjay/sfa-numpy", "max_issues_repo_head_hexsha": "bff5737ef429f31228d20a9e1d0ce7d46d3080d3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2017-11-14T13:02:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-01T09:53:47.000Z", "max_forks_repo_path": "micarray/util.py", "max_forks_repo_name": "trojanjay/sfa-numpy", "max_forks_repo_head_hexsha": "bff5737ef429f31228d20a9e1d0ce7d46d3080d3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2017-12-08T23:54:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-06T21:06:47.000Z", "avg_line_length": 23.9181818182, "max_line_length": 79, "alphanum_fraction": 0.5883694413, "include": true, "reason": "import numpy,from scipy", "num_tokens": 679}
|
#pragma once
#include <memory>
#include <Eigen/Dense>
#include "SdfObject.hpp"
#include "../Ray.hpp"
#include "../accelerate/Bound3.hpp"
class SdfSphere : public SdfObject
{
public:
SdfSphere(Eigen::Vector3f position, float radis) : SdfObject(position), radis(radis){};
float sdf(const Eigen::Vector3f &position) const override
{
return (position - this->position).norm() - radis;
};
std::unique_ptr<Bound3> build_bound3() const override
{
auto min = position - Eigen::Vector3f::Ones(radis);
auto max = position + Eigen::Vector3f::Ones(radis);
return std::make_unique<Bound3>(min, max);
};
private:
float radis;
};
|
{"hexsha": "16ad3ab891fe9eb368e71f233a78fde9f194b7b5", "size": 683, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/render/object/SdfSphere.hpp", "max_stars_repo_name": "yzx9/NeuronSdfViewer", "max_stars_repo_head_hexsha": "454164dfccf80b806aac3cd7cca09e2cb8bd3c2a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-12-31T10:29:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T10:29:56.000Z", "max_issues_repo_path": "src/render/object/SdfSphere.hpp", "max_issues_repo_name": "yzx9/NeuronSdfViewer", "max_issues_repo_head_hexsha": "454164dfccf80b806aac3cd7cca09e2cb8bd3c2a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/render/object/SdfSphere.hpp", "max_forks_repo_name": "yzx9/NeuronSdfViewer", "max_forks_repo_head_hexsha": "454164dfccf80b806aac3cd7cca09e2cb8bd3c2a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.3928571429, "max_line_length": 91, "alphanum_fraction": 0.6573938507, "num_tokens": 185}
|
using GeometryTypes, ColorTypes
using FactCheck
import Base.Test.@inferred
facts("GeometryTypes") do
include("polygons.jl")
include("hyperrectangles.jl")
include("faces.jl")
include("meshes.jl")
include("distancefields.jl")
include("primitives.jl")
include("decompose.jl")
include("simplerectangle.jl")
include("hypersphere.jl")
include("typeutils.jl")
include("simplices.jl")
include("convexhulls.jl")
include("gjk.jl")
include("lines.jl")
end
FactCheck.exitstatus()
|
{"hexsha": "ce562f3e8c83623d9ec2cca5f609cd3ebbea6940", "size": 526, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "JuliaPackageMirrors/GeometryTypes.jl", "max_stars_repo_head_hexsha": "705e5a646dd2177bbb4b9f8c26b52bc832b38d65", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "JuliaPackageMirrors/GeometryTypes.jl", "max_issues_repo_head_hexsha": "705e5a646dd2177bbb4b9f8c26b52bc832b38d65", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "JuliaPackageMirrors/GeometryTypes.jl", "max_forks_repo_head_hexsha": "705e5a646dd2177bbb4b9f8c26b52bc832b38d65", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.8695652174, "max_line_length": 33, "alphanum_fraction": 0.6882129278, "num_tokens": 149}
|
[STATEMENT]
lemma find_sort_least :
assumes "find P (sort xs) = Some x"
shows "\<forall> x' \<in> set xs . x \<le> x' \<or> \<not> P x'"
and "x = (LEAST x' \<in> set xs . P x')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>x'\<in>set xs. x \<le> x' \<or> \<not> P x' &&& x = (LEAST x'. x' \<in> set xs \<and> P x')
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<forall>x'\<in>set xs. x \<le> x' \<or> \<not> P x'
2. x = (LEAST x'. x' \<in> set xs \<and> P x')
[PROOF STEP]
obtain i where "i < length (sort xs)"
and "(sort xs) ! i = x"
and "(\<forall> j < i . \<not> P ((sort xs) ! j))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>i. \<lbrakk>i < length (sort xs); sort xs ! i = x; \<forall>j<i. \<not> P (sort xs ! j)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using find_sort_index[OF assms]
[PROOF STATE]
proof (prove)
using this:
\<exists>i<length (sort xs). sort xs ! i = x \<and> (\<forall>j<i. \<not> P (sort xs ! j))
goal (1 subgoal):
1. (\<And>i. \<lbrakk>i < length (sort xs); sort xs ! i = x; \<forall>j<i. \<not> P (sort xs ! j)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
i < length (sort xs)
sort xs ! i = x
\<forall>j<i. \<not> P (sort xs ! j)
goal (2 subgoals):
1. \<forall>x'\<in>set xs. x \<le> x' \<or> \<not> P x'
2. x = (LEAST x'. x' \<in> set xs \<and> P x')
[PROOF STEP]
have "\<And> j . j > i \<Longrightarrow> j < length xs \<Longrightarrow> (sort xs) ! i \<le> (sort xs) ! j"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>j. \<lbrakk>i < j; j < length xs\<rbrakk> \<Longrightarrow> sort xs ! i \<le> sort xs ! j
[PROOF STEP]
by (simp add: sorted_nth_mono)
[PROOF STATE]
proof (state)
this:
\<lbrakk>i < ?j; ?j < length xs\<rbrakk> \<Longrightarrow> sort xs ! i \<le> sort xs ! ?j
goal (2 subgoals):
1. \<forall>x'\<in>set xs. x \<le> x' \<or> \<not> P x'
2. x = (LEAST x'. x' \<in> set xs \<and> P x')
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>i < ?j; ?j < length xs\<rbrakk> \<Longrightarrow> sort xs ! i \<le> sort xs ! ?j
[PROOF STEP]
have "\<And> j . j < length xs \<Longrightarrow> (sort xs) ! i \<le> (sort xs) ! j \<or> \<not> P ((sort xs) ! j)"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>i < ?j; ?j < length xs\<rbrakk> \<Longrightarrow> sort xs ! i \<le> sort xs ! ?j
goal (1 subgoal):
1. \<And>j. j < length xs \<Longrightarrow> sort xs ! i \<le> sort xs ! j \<or> \<not> P (sort xs ! j)
[PROOF STEP]
using \<open>(\<forall> j < i . \<not> P ((sort xs) ! j))\<close>
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>i < ?j; ?j < length xs\<rbrakk> \<Longrightarrow> sort xs ! i \<le> sort xs ! ?j
\<forall>j<i. \<not> P (sort xs ! j)
goal (1 subgoal):
1. \<And>j. j < length xs \<Longrightarrow> sort xs ! i \<le> sort xs ! j \<or> \<not> P (sort xs ! j)
[PROOF STEP]
by (metis not_less_iff_gr_or_eq order_refl)
[PROOF STATE]
proof (state)
this:
?j < length xs \<Longrightarrow> sort xs ! i \<le> sort xs ! ?j \<or> \<not> P (sort xs ! ?j)
goal (2 subgoals):
1. \<forall>x'\<in>set xs. x \<le> x' \<or> \<not> P x'
2. x = (LEAST x'. x' \<in> set xs \<and> P x')
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
?j < length xs \<Longrightarrow> sort xs ! i \<le> sort xs ! ?j \<or> \<not> P (sort xs ! ?j)
[PROOF STEP]
show "\<forall> x' \<in> set xs . x \<le> x' \<or> \<not> P x'"
[PROOF STATE]
proof (prove)
using this:
?j < length xs \<Longrightarrow> sort xs ! i \<le> sort xs ! ?j \<or> \<not> P (sort xs ! ?j)
goal (1 subgoal):
1. \<forall>x'\<in>set xs. x \<le> x' \<or> \<not> P x'
[PROOF STEP]
by (metis \<open>sort xs ! i = x\<close> in_set_conv_nth length_sort set_sort)
[PROOF STATE]
proof (state)
this:
\<forall>x'\<in>set xs. x \<le> x' \<or> \<not> P x'
goal (1 subgoal):
1. x = (LEAST x'. x' \<in> set xs \<and> P x')
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<forall>x'\<in>set xs. x \<le> x' \<or> \<not> P x'
[PROOF STEP]
show "x = (LEAST x' \<in> set xs . P x')"
[PROOF STATE]
proof (prove)
using this:
\<forall>x'\<in>set xs. x \<le> x' \<or> \<not> P x'
goal (1 subgoal):
1. x = (LEAST x'. x' \<in> set xs \<and> P x')
[PROOF STEP]
using find_set[OF assms] find_condition[OF assms]
[PROOF STATE]
proof (prove)
using this:
\<forall>x'\<in>set xs. x \<le> x' \<or> \<not> P x'
x \<in> set (sort xs)
P x
goal (1 subgoal):
1. x = (LEAST x'. x' \<in> set xs \<and> P x')
[PROOF STEP]
by (metis (mono_tags, lifting) Least_equality set_sort)
[PROOF STATE]
proof (state)
this:
x = (LEAST x'. x' \<in> set xs \<and> P x')
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2055, "file": "FSM_Tests_Util", "length": 18}
|
[STATEMENT]
lemma sq_mtx_vec_mult_sum_cols: "A *\<^sub>V x = sum (\<lambda>i. x $ i *\<^sub>R \<c>\<o>\<l> i A) UNIV"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. A *\<^sub>V x = (\<Sum>i\<in>UNIV. x $ i *\<^sub>R \<c>\<o>\<l> i A)
[PROOF STEP]
by(transfer) (simp add: matrix_mult_sum scalar_mult_eq_scaleR)
|
{"llama_tokens": 147, "file": "Matrices_for_ODEs_SQ_MTX", "length": 1}
|
import numpy as np
import cv2
import cv2.aruco as aruco
import math
from math import sin, cos
import time
frame = np.array([])
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250) # Use 4x4 dictionary to find markers
parameters = aruco.DetectorParameters_create() # Marker detection parameters
def acc(arr, k = 1, ac = 1):
# print(arr, arr.shape, len(arr.shape))
if len(arr.shape) == 1:
r = np.array([round(el * k, ac) for el in arr])
# print(arr, r)
return r
else:
r = np.array([acc(el, k, ac) for el in arr])
# print(r)
return r
def calCamAngle(v):
zA = math.asin(v[0] / ((v[0] ** 2 + v[1] ** 2) ** 0.5))
xA = math.asin(v[2] / ((v[1] ** 2 + v[2] ** 2) ** 0.5))
return np.array([xA, 0, zA * -1])
def rotate(rvec, matrix):
rotM = np.zeros(shape=(3, 3))
cv2.Rodrigues(rvec, rotM, jacobian = 0)
newMatrix = np.dot(rotM, matrix.T).T
return newMatrix
def vecAngle(v):
vecA = math.acos(v[2] / (np.dot(v, v) ** 0.5)) * 180 / math.pi
return vecA * (int(v[0] >= 0) * 2 - 1)
def track(matrix_coefficients, distortion_coefficients):
global frame
# cap = cv2.VideoCapture(0) # Get the camera sourceeeeee
# waiting = time.time()
# while time.time() - waiting < 0.15:
# ret, frame = cap.read()
# cv2.waitKey(3)
# operations on the frame come here
# cap.release()
ret, frame = cap.read()
cv2.waitKey(3)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Change grayscale
# aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_250) # Use 5x5 dictionary to find markers
# parameters = aruco.DetectorParameters_create() # Marker detection parameters
# lists of ids and the corners beloning to each id
corners, ids, rejected_img_points = aruco.detectMarkers(gray, aruco_dict, parameters=parameters, cameraMatrix=matrix_coefficients, distCoeff=distortion_coefficients)
hexes = np.array([])
if np.all(ids is not None): # If there are markers found by detector
hexes = np.array([[None] * 4 for i in range(len(ids))])
for i in range(0, len(ids)): # Iterate in markers
print()
# Estimate pose of each marker and return the values rvec and tvec---different from camera coefficients
rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(corners[i], 0.05, matrix_coefficients, distortion_coefficients)
(rvec - tvec).any() # get rid of that nasty numpy value array error
# markerPoints = np.resize(markerPoints, (4, 3))
# markerPoints = rotate(rvec[0][0][1], rvec[0][0][2], rvec[0][0][0], markerPoints)
# markerPoints = markerPoints + tvec[0][0]
arucoVec = np.array([[0.025, 0, 0], [0, 0.025, 0], [0, 0, 0.025]])
arucoVec = rotate(rvec, arucoVec)
camAngle = calCamAngle(arucoVec[2])
# print(acc(arucoVec[2], 1, 4), acc(camAngle, 1, 4))
arucoVec = rotate(camAngle, arucoVec)
# print(acc(arucoVec, 100, 2))
tvec = rotate(camAngle, tvec[0])[0]
rvec = rvec + camAngle
driveTo = rotate(rvec, positions) + tvec
angles2 = np.array([vecAngle(tvec - v) for v in driveTo])
fromRob = driveTo - robPos
fromRob = np.array([[v[0], 0, v[2]] for v in fromRob])
distances = np.array([np.dot(v, v) ** 0.5 for v in fromRob])
angles1 = np.array([vecAngle(v) for v in fromRob])
# print(ids[i])
# print(acc(distances, 100, 1))
# print(acc(angles1))
# print(acc(angles2))
thisHex = np.array([[ids[i][0], distances[ind], angles1[ind], angles2[ind]] for ind in range(6)])
hexes[i] = min(thisHex, key = lambda el: el[1])
aruco.drawDetectedMarkers(frame, corners) # Draw A square around the markers
# aruco.drawAxis(frame, matrix_coefficients, distortion_coefficients, rvec, tvec, 0.01)
# Draw Axis
# cv2.putText(frame, 'Transition vector: ',
# (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)
# cv2.putText(frame, ' '.join(map(lambda i: str(round(i * 100, 1)), tvec[0][0])),
# (20, 75), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
# cv2.putText(frame, 'Rotation vector: ',
# (20, 110), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)
# cv2.putText(frame, ' '.join(map(lambda i: str(round(i * 180 / math.pi, 1)), rvec[0][0])),
# (20, 145), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
# Drawing camera axis
# cv2.line(frame, (10, 10), (10, 40), (0, 255, 0), 3)
# cv2.line(frame, (10, 10), (40, 10), (0, 0, 255), 3)
# cv2.line(frame, (8, 8), (10, 10), (255, 0, 0), 5)
hexes = hexes[hexes[:, 1].argsort()]
print(hexes)
def load_coefficients(path):
""" Loads camera matrix and distortion coefficients. """
# FILE_STORAGE_READ
cv_file = cv2.FileStorage(path, cv2.FILE_STORAGE_READ)
# note we also have to specify the type to retrieve other wise we only get a
# FileNode object back instead of a matrix
camera_matrix = cv_file.getNode("K").mat()
dist_matrix = cv_file.getNode("D").mat()
cv_file.release()
return [camera_matrix, dist_matrix]
positions = np.array([rotate(np.array([0, 0, 60 / 180 * math.pi]) * i, np.array([0.26, 0, 0])) for i in range(6)])
robPos = np.array([0, 0.095, -0.165])
cap = cv2.VideoCapture(0)
waiting = time.time()
while time.time() - waiting < 0.15:
ret, frame = cap.read()
cv2.waitKey(3)
camera_matrix, dist_matrix = load_coefficients('/home/pi/save_file.YML')
track(camera_matrix, dist_matrix)
cap_reset = time.time()
while 1:
cv2.imshow('frame', frame)
time.sleep(0.2)
key = cv2.waitKey(5)
if key == ord('e'):
break
track(camera_matrix, dist_matrix)
cv2.destroyAllWindows()
cap.release()
|
{"hexsha": "2aac3507e32efe06b011e09f4d3230e0f6bb24e9", "size": 5561, "ext": "py", "lang": "Python", "max_stars_repo_path": "aruco10hexes.py", "max_stars_repo_name": "IldanPetrov/EuroHex", "max_stars_repo_head_hexsha": "e15757b007e21dc5fd951185a578a64b2b9b5380", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "aruco10hexes.py", "max_issues_repo_name": "IldanPetrov/EuroHex", "max_issues_repo_head_hexsha": "e15757b007e21dc5fd951185a578a64b2b9b5380", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aruco10hexes.py", "max_forks_repo_name": "IldanPetrov/EuroHex", "max_forks_repo_head_hexsha": "e15757b007e21dc5fd951185a578a64b2b9b5380", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.75625, "max_line_length": 166, "alphanum_fraction": 0.6414313972, "include": true, "reason": "import numpy", "num_tokens": 1879}
|
import numpy as np
import pandas as pd
from numpy.random import randn
np.random.seed(101) #to get same random number
df = pd.DataFrame(randn(5,4),['A','B', 'C','D','E'],['W','X','Y','Z'])
print(df)
#conditional selection
print(df > 0)
print(df[df > 0])
print(df['W']>0)
print(df[df['W']>0])
print(df[df['W']>0]['X'])
print(df[df['W']>0][['X','Y']])
#multiple condition on df
print(df[(df['W'] > 0) & (df['Y'] < 1)]) #python and to &
print(df[(df['W'] > 0) & (df['Y'] > 1)])
print(df[(df['W'] > 0) | (df['Y'] > 1)]) #python or to |
#set and reset index
print(df.reset_index())
newindex = 'MH UP KA GA TL'.split()
df['STATES'] = newindex
print(df)
print(df.set_index('STATES'))
|
{"hexsha": "caee330d31680e653124951d4d6057d2efc4a7e5", "size": 694, "ext": "py", "lang": "Python", "max_stars_repo_path": "pandas_df_example2.py", "max_stars_repo_name": "ingleashish/python-data-science-machine-learning", "max_stars_repo_head_hexsha": "46fb3daf8cccc4444cc92ab0d48f92604061d5c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pandas_df_example2.py", "max_issues_repo_name": "ingleashish/python-data-science-machine-learning", "max_issues_repo_head_hexsha": "46fb3daf8cccc4444cc92ab0d48f92604061d5c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pandas_df_example2.py", "max_forks_repo_name": "ingleashish/python-data-science-machine-learning", "max_forks_repo_head_hexsha": "46fb3daf8cccc4444cc92ab0d48f92604061d5c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.5238095238, "max_line_length": 70, "alphanum_fraction": 0.5778097983, "include": true, "reason": "import numpy,from numpy", "num_tokens": 243}
|
# Copyright (C) 2019 Project AGI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""PatternCompletionWorkflow Workflow class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
from enum import Enum
import numpy as np
import tensorflow as tf
from pagi.workflows.workflow import Workflow
from pagi.utils import logger_utils, image_utils, np_utils
from pagi.utils.generic_utils import class_filter
from pagi.utils.tf_utils import tf_label_filter, tf_invert, tf_set_min
from pagi.datasets.omniglot_dataset import OmniglotDataset
from aha.datasets.omniglot_lake_dataset import OmniglotLakeDataset
from aha.datasets.omniglot_lake_runs_dataset import OmniglotLakeRunsDataset
from aha.utils.recursive_component_harness import RecursiveComponentHarness
class UseTrainForTest(Enum):
IDENTICAL = 1
SHUFFLED = 2
NO = 3
class PatternCompletionWorkflow(Workflow):
"""Pattern completion experiment workflow."""
def __init__(self, session, dataset_type, dataset_location, component_type, hparams_override, eval_opts, export_opts,
opts=None, summarize=True, seed=None, summary_dir=None, checkpoint_opts=None):
super().__init__(session, dataset_type, dataset_location, component_type, hparams_override, eval_opts, export_opts,
opts=opts, summarize=summarize, seed=seed, summary_dir=summary_dir,
checkpoint_opts=checkpoint_opts)
self._rsummary_from_batch = 0
self._recursive_harness = None
self._input_mode = None
# these are for summaries
self._memorised = None
self._cue = None # cue refers to input presented to Hopfield (not the cue internal to Hop)
self._recalled = None
@staticmethod
def default_opts():
"""Builds an HParam object with default workflow options."""
return tf.contrib.training.HParams(
num_repeats=1,
invert_images=False,
min_val=0, # set any 0 in the input image, to this new min_val. ---> if ==0, then don't do anything.
train_classes=['5', '6', '7', '8', '9'],
test_classes=['5', '6', '7', '8', '9'],
batch_all_classes=False, # Ensure the batch has at least one example of each of the specified classes
batch_no_duplicates=False, # Ensure the batch ONLY one example of each of the specified classes
completion_gain=1.0,
train_recurse=False,
test_recurse=False,
recurse_iterations=0,
rsummary_batches=2, # number of batches at the end, to write recursive summaries i.e. 2, then last 2 batches
degrade_type='horizontal', # none, vertical, horizontal or random (the model completes image degraded)
degrade_factor=0.5, # fraction to be degraded, if supported by the degrade_type option
degrade_value=0.0, # when degrading pixels, set them to this value
noise_val=1.0, # value of 'active' bits of salt and pepper noise
noise_factor=0.2, # fraction of image to be corrupted with noise
input_mode={
"train_first": "complete",
"train_inference": "complete",
"test_first": "complete",
"test_inference": "complete"
},
evaluate=True,
train=True
)
#######################################################
# Utility methods used by training() and evaluate()
def _is_inference_recursive(self):
test_recurse = self._opts['evaluate'] and \
self._opts['test_recurse']
return test_recurse
def _is_train_recursive(self):
train_recurse = self._opts['train'] and \
self._opts['train_recurse']
return train_recurse
def _is_combined_train_and_test_recursive_summary(self):
"""
In the separate recursive summary, should we include
training and testing iterations into one continuous plot.
"""
if self._is_inference_recursive() and self._is_train_recursive():
return True
return False
def _is_write_evaluate_summary(self):
"""If we should write a summary at the end of evaluate, independent of recursive summary."""
return True
#######################################################
def _is_omniglot_lake(self):
return self._dataset_type.__name__.startswith('Omniglot')
# return (self._dataset_type.__name__ == OmniglotDataset.__name__) or (
# self._dataset_type.__name__ == OmniglotLakeDataset.__name__) or (
# self._dataset_type.__name__ == OmniglotLakeRunsDataset.__name__)
@staticmethod
def validate_batch_include_all_classes(feature, label, classes): # pylint: disable=W0613
"""Ensures the batch has the specified classes."""
classes = tf.convert_to_tensor(classes)
unique_labels, _ = tf.unique(label)
return tf.equal(tf.reduce_sum(classes), tf.reduce_sum(unique_labels))
def validate_batch_no_duplicate_classes(self, feature, label, classes): # pylint: disable=W0613
"""Ensures the batch has no more than one each of the specified classes."""
unique_labels, _ = tf.unique(label)
batch_size = self._hparams.batch_size
return tf.equal(tf.size(unique_labels), batch_size)
def _gen_datasets_with_options(self, train_classes=None, test_classes=None, is_superclass=None, class_proportion=0.0,
degrade_test=False, degrade_type='random', degrade_val=0, degrade_factor=0.5,
noise_test=False, noise_type='sp_binary', noise_factor=0.2,
recurse_train=False, recurse_test=False,
num_batch_repeats=1, recurse_iterations=1, additional_test_decodes=0,
evaluate_step=True, use_trainset_for_tests=UseTrainForTest.IDENTICAL,
invert_images=False, min_val=0):
"""
Generate degraded datasets for pattern completion.
The test is a duplication of the train set, but degraded versions.
Each batch of the train set is then duplicated (in place), so it is in pairs.
During operation, the first of this pair is used for training, the second for comparison with test batch in eval.
Repeat each batch 'num_repeats' times.
This is so that you can repeatedly train on the exact same batch (for episodic work).
Add an item for every set an episode, add one to the train set, b/c it is taken for comparison in test step.
E.g.If the stream of batches is A, B, C, D.
Iterations = 3, num_repeats = 2
Without any additional decodes in evaluate()
Train = [[A, A, A], A], [[A, A, A], A], [[B, B, B], B], [[B, B, B], B],
Test = [A, A, A], [A, A, A], [B, B, B], [B, B, B],
With 2 additional decodes in evaluate() e.g. vc_decode() and dg_decode()
Train = [[A, A, A], A], [[A, A, A], A], [[B, B, B], B], [[B, B, B], B],
Test = [[A, A, A], A, A], [[A, A, A], A, A], [[B, B, B], B, B], [[B, B, B], B, B],
:param train_classes: a list specifying the classes for training, all if None
:param test_classes: a list specifying the classes for testing, all if None
:param is_superclass: if true, class labels refer to superclasses
:param class_proportion: for filtering superclass
:param degrade_test: optionally degrade test images
:param degrade_type: if degrading, use this method
:param degrade_val: set 0's to this val
:param degrade_factor: fraction to be degraded (if the degrade type supports that option)
:param recurse_train: recursion used for training
:param recurse_test: recursion used for inference
:param num_batch_repeats: present each batch this many times
:param recurse_iterations: number of iterations for each batch (>1 if recursion)
:param evaluate_step: true if there is an evaluation after the train step (then extra train image taken each step)
:param invert_images: white->black and black->white (assumes image is [0,1])
:param use_trainset_for_tests: UseTrainForTest: identical, shuffled, no
:return:
"""
def get_set(set_type, for_evaluate, seed_increment=0):
"""
'set_type' = 'train', or assumed it is Test set type
'for_evaluate', boolean, when train set is used for the evaluate phase
'seed_increment' is for the case where you are using the same dataset for train and test,
but you want to fetch different exemplars
--------> VERY IMPORTANT: In this version, it assumes that additional things (recursion and decodes)
are done at the end of num_batch_repeats iterations
In previous versions or other workflows, it may expect this EVERY iteration
"""
is_train = set_type == 'train'
repeats = num_batch_repeats
if recurse_train and is_train:
repeats = repeats * recurse_iterations
if recurse_test and (not is_train or for_evaluate):
repeats = repeats + (recurse_iterations-1) # only do recurse_iterations once per 'num_batch_repeats'. -1 because recurse iterations of 1 means 1 normal iteration, none additional
if evaluate_step:
if is_train and not for_evaluate:
# another 'train' input for every batch (num_batch_repeats), will be iterated for comprsn `complete_pattern()`
repeats = repeats + num_batch_repeats
else:
# additional decodes for every every num_batch_repeats
repeats = repeats + additional_test_decodes
logging.debug("-----------------------> is_train={}, repeat={} ({})".format(is_train, repeats, additional_test_decodes))
if is_train:
the_dataset = self._dataset.get_train()
else:
the_dataset = self._dataset.get_test()
if not self._is_omniglot_lake():
if is_train:
the_classes = train_classes
else:
the_classes = test_classes
# Filter dataset to keep specified classes only
if the_classes and len(the_classes) > 0:
the_classes = class_filter(self._dataset, the_classes, is_superclass, class_proportion)
the_dataset = the_dataset.filter(lambda x, y: tf_label_filter(x, y, the_classes))
the_dataset = the_dataset.shuffle(buffer_size=10000, seed=(self._seed+seed_increment))
if self._opts['evaluate_mode'][0] == 'simple' and self._opts['evaluate_mode'][1].startswith('run'):
the_dataset = the_dataset.shuffle(buffer_size=10000, seed=(self._seed+seed_increment))
if invert_images:
the_dataset = the_dataset.map(lambda x, y: tf_invert(x, y))
if min_val != 0:
the_dataset = the_dataset.map(lambda x, y: tf_set_min(x, y, min_val))
the_dataset = the_dataset.apply(tf.contrib.data.batch_and_drop_remainder(self._hparams.batch_size))
if not self._is_omniglot_lake():
# Ensure the batch has at least one example of each of the specified class
if self._opts['batch_all_classes']:
the_dataset = the_dataset.filter(lambda x, y: self.validate_batch_include_all_classes(x, y, train_classes))
# Ensure the batch ONLY one example of each of the specified class
if self._opts['batch_no_duplicates']:
the_dataset = the_dataset.filter(lambda x, y: self.validate_batch_no_duplicate_classes(x, y, train_classes))
# Optionally degrade input image
# TODO just for now specify random value so that you don't get variation within the recursive iterations
if for_evaluate and degrade_test:
the_dataset = the_dataset.map(lambda x, y: image_utils.degrade_image(x, y,
degrade_type,
degrade_val,
degrade_factor=degrade_factor,
random_value=0.3))
if for_evaluate and noise_test:
the_dataset = the_dataset.map(lambda x, y: image_utils.add_image_noise(x, y,
minval=min_val,
noise_type=noise_type,
noise_factor=noise_factor))
logging.debug("isTrain, for_evaluate, repeats: ", is_train, for_evaluate, repeats)
the_dataset = the_dataset.flat_map(lambda x, y: tf.data.Dataset.from_tensors((x, y)).repeat(repeats))
the_dataset = the_dataset.prefetch(1)
the_dataset = the_dataset.repeat()
return the_dataset
train_dataset = get_set(set_type='train', for_evaluate=False)
test_dataset = None
if evaluate_step:
if use_trainset_for_tests == UseTrainForTest.SHUFFLED:
test_dataset = get_set(set_type='train', for_evaluate=True, seed_increment=1)
elif use_trainset_for_tests == UseTrainForTest.IDENTICAL:
test_dataset = get_set(set_type='train', for_evaluate=True)
else: # NO
test_dataset = get_set(set_type='test', for_evaluate=True, seed_increment=2) # use a different seed in case train and test are the same as in artificial or recordset dataset
return train_dataset, test_dataset
def _generate_datasets(self):
"""Override in child classes with your options"""
degrate_test = False
if self._opts['degrade_type'] != 'none':
degrate_test = True
noise_test = False
if self._opts['noise_val'] != 0:
noise_test = True
train_dataset, test_dataset = self._gen_datasets_with_options(self._opts['train_classes'],
self._opts['test_classes'],
degrade_test=degrate_test,
degrade_type=self._opts['degrade_type'],
degrade_factor=self._opts['degrade_factor'],
degrade_val=self._opts['min_val'],
noise_test=noise_test,
noise_factor=self._opts['noise_factor'],
recurse_train=self._is_train_recursive(),
recurse_test=self._is_inference_recursive(),
num_batch_repeats=self._opts['num_repeats'],
recurse_iterations=self._opts['recurse_iterations'],
evaluate_step=self._opts['evaluate'],
invert_images=self._opts['invert_images'],
min_val=self._opts['min_val'])
return train_dataset, test_dataset
def _create_dataset(self):
if self._is_omniglot_lake():
self._dataset = self._dataset_type(self._dataset_location,
self._hparams.batch_size,
self._opts['test_classes'])
else:
self._dataset = self._dataset_type(self._dataset_location)
def _setup_dataset(self):
"""Setup the dataset and retrieve inputs, labels and initializers"""
with tf.variable_scope('dataset'):
self._create_dataset()
resize_factor = self._opts['resize_images_factor']
if resize_factor != 1.0:
if not self._is_omniglot_lake():
raise RuntimeError('Resize images is only supported for Omniglot currently')
# the dataset shape is referenced in other places, so we need to change it directly
# that is only supported by OmniglotDataset. That is why we don't resize here outside of Dataset class.
# Omni uses it to resize internally and then image size is consistent with dataset.dataset_shape
height = int(resize_factor * self._dataset.shape[1])
width = int(resize_factor * self._dataset.shape[2])
self._dataset.set_shape(height, width)
# Setup dataset iterators
train_dataset, test_dataset = self._generate_datasets()
self._placeholders['dataset_handle'] = tf.placeholder(tf.string, shape=[], name='dataset_handle')
# Setup dataset iterators
with tf.variable_scope('dataset_iterators'):
self._iterator = tf.data.Iterator.from_string_handle(self._placeholders['dataset_handle'],
train_dataset.output_types,
train_dataset.output_shapes)
self._inputs, self._labels = self._iterator.get_next()
self._dataset_iterators = {}
with tf.variable_scope('train_dataset'):
self._dataset_iterators['training'] = train_dataset.make_initializable_iterator()
if self._opts['evaluate']:
with tf.variable_scope('test_dataset'):
self._dataset_iterators['test'] = test_dataset.make_initializable_iterator()
def run(self, num_batches, evaluate, train=True):
"""Run Experiment"""
# Training
# -------------------------------------------------------------------------
training_handle = self._session.run(self._dataset_iterators['training'].string_handle())
self._session.run(self._dataset_iterators['training'].initializer)
if evaluate:
test_handle = self._session.run(self._dataset_iterators['test'].string_handle())
self._session.run(self._dataset_iterators['test'].initializer)
self._on_before_training_batches()
# set some hyperparams to instance variables for access in train and complete methods
# (to be compatible with base class method signatures)
self._rsummary_from_batch = num_batches - self._opts['rsummary_batches'] # recursive sums for last n batches
self._input_mode = self._opts['input_mode']
for batch in range(num_batches):
logging.debug("----------------- Batch: %s", str(batch))
feed_dict = {}
if train:
global_step = tf.train.get_global_step(self._session.graph)
if global_step is not None:
training_step = self._session.run(global_step)
training_epoch = self._dataset.get_training_epoch(self._hparams.batch_size, training_step)
else:
training_step = 0
training_epoch = 0
# Perform the training, and retrieve feed_dict for evaluation phase
logging.debug("\t----------------- Train with training_step: %s", str(training_step))
feed_dict, _ = self.training(training_handle, batch)
self._on_after_training_batch(batch, training_step, training_epoch)
# Export any experiment-related data
# -------------------------------------------------------------------------
if self._export_opts['export_filters']:
if (batch == num_batches - 1) or ((batch + 1) % self._export_opts['interval_batches'] == 0):
self.export(self._session, feed_dict)
if self._export_opts['export_checkpoint']:
if (batch == num_batches - 1) or ((batch + 1) % self._export_opts['interval_batches'] == 0):
self._saver.save(self._session, os.path.join(self._summary_dir, 'model.ckpt'), global_step=batch + 1)
if evaluate:
logging.debug("----------------- Complete with training_step: %s", str(batch))
losses = self._complete_pattern(feed_dict, training_handle, test_handle, batch)
self._on_after_evaluate(losses, batch)
def _on_after_evaluate(self, results, batch):
"""Record losses after evaluation is completed."""
for loss, loss_value in results.items():
logger_utils.log_metrics({loss: loss_value})
if self._summarize:
summary = tf.Summary()
for loss, loss_value in results.items():
summary.value.add(tag=self._component.name + '/summaries/completion/' + loss,
simple_value=loss_value)
self._add_completion_summary(summary, batch)
self._writer.add_summary(summary, batch)
self._writer.flush()
def _add_completion_summary(self, summary, batch):
"""Assumes images are in appropriate shape for summary"""
diff1 = np.abs(self._memorised - self._cue)
diff2 = np.abs(self._memorised - self._recalled)
mem_cue_diff = [self._memorised, self._cue, diff1]
mem_out_diff = [self._memorised, self._recalled, diff2]
image_utils.add_arbitrary_images_summary(summary, 'pcw', mem_cue_diff,
['memorised', 'cue', 'diff'], combined=True)
image_utils.add_arbitrary_images_summary(summary, 'pcw', mem_out_diff,
['memorised', 'output', 'diff'], combined=True)
np_utils.print_simple_stats(self._memorised, 'memorised')
np_utils.print_simple_stats(self._cue, 'cue')
np_utils.print_simple_stats(diff1, 'mem_cue_diff')
np_utils.print_simple_stats(diff2, 'mem_out_diff')
def _setup_recursive_train_modes(self, batch_type):
"""
Set the appropriate mode depending on batch type.
This is only called when recursive training
"""
mode = 'training'
if batch_type == 'encoding':
mode = 'inference'
return mode
def training(self, training_handle, training_step, training_fetches=None):
"""The training procedure within the batch loop"""
if training_fetches is None:
training_fetches = {}
batch_type = self._setup_train_batch_types()
feed_dict = self._setup_train_feed_dict(batch_type, training_handle)
self._component.reset() # reset (important for feedback to be zero)
if self._is_train_recursive():
iterations = self._opts['recurse_iterations']
# if setup for recursive operation, wrap in harness and step through the harness
if self._recursive_harness is None:
self._recursive_harness = RecursiveComponentHarness(self._component, recurse_iterations=iterations)
mode = self._setup_recursive_train_modes(batch_type)
summary_step = training_step - self._rsummary_from_batch # make start index 0
if self._is_combined_train_and_test_recursive_summary():
summary_step = summary_step * 2 # because with 'evaluate', train and completion are concatenated
fetched = self._recursive_harness.step(self._session,
self._writer,
summary_step,
feed_dict=feed_dict,
mode=mode,
input_mode=self._input_mode,
inference_batch_type=batch_type,
train_batch_type=batch_type,
fetches=training_fetches)
else:
fetched = self.step_graph(self._component, feed_dict, batch_type, training_fetches)
# write one summary for each step of training (whether it is one graph step, or multiple recursive iterations)
self._component.write_summaries(training_step, self._writer, batch_type=batch_type)
return feed_dict, fetched
def _get_target_switch_to_test(self, feed_dict, training_handle, test_handle):
"""Evaluate the target input for concrete values, then switch to the test dataset."""
# Evaluate the input
target_inputs = self._session.run(self._inputs, feed_dict={
self._placeholders['dataset_handle']: training_handle
})
# Switch to test set
feed_dict.update({
self._placeholders['dataset_handle']: test_handle
})
return target_inputs, feed_dict
def _set_decode_gain(self, feed_dict):
if self._component.get_dual().get('decode_gain') is not None:
completion_gain = self._opts['completion_gain']
decode_gain_pl = self._component.get_dual().get('decode_gain').get_pl()
feed_dict.update({
decode_gain_pl: [completion_gain]
})
def _complete_pattern(self, feed_dict, training_handle, test_handle, test_step):
"""Exposes component to an incomplete pattern and calculates the completion loss."""
losses = {}
target_inputs, feed_dict = self._get_target_switch_to_test(feed_dict, training_handle, test_handle)
self._set_decode_gain(feed_dict)
self._inference(test_step, feed_dict)
# Calculate pattern completion loss
decoded_inputs = self._component.get_decoding()
losses['completion_loss'] = np.square(abs(target_inputs - decoded_inputs)).mean()
self._prep_for_summaries(target_inputs, self._component.get_input('encoding'), decoded_inputs)
return losses
def _prep_for_summaries(self, pc_memorise, pc_direct_input, pc_retrieved):
"""
pc_memorise = the pattern to be memorised (input in 'learning' mode)
pc_cue = the cue that was presented to PC (the input in 'retrieve' mode),
get it from PC as it may have logic to determine which external signal
it uses as a cue (in the case of Hopfield, it could be x_ext or from z=w.x_cue)
pc_retrieved = the memory retrieved from PC (the output)
"""
if len(pc_memorise.shape) == 2: # batch of 1d vectors (other workflows, PC takes input from SAE encoding)
shape, _ = image_utils.square_image_shape_from_1d(pc_memorise.shape[1])
else:
shape = pc_memorise.shape
self._memorised = np.reshape(pc_memorise, shape)
self._cue = np.reshape(pc_direct_input, shape)
self._recalled = np.reshape(pc_retrieved, shape)
def _inference(self, test_step, feed_dict, testing_fetches=None, batch_type='encoding'):
"""
End-to-end inference for pattern completion.
"""
if testing_fetches is None:
testing_fetches = {}
if self._is_inference_recursive():
if self._recursive_harness is None:
self._recursive_harness = RecursiveComponentHarness(self._component, self._opts['recurse_iterations'])
summary_step = test_step - self._rsummary_from_batch # make start index 0
if self._is_combined_train_and_test_recursive_summary():
summary_step = summary_step * 2 + 1 # x2 b/c train+completion, +1 so completion is after train iterations
fetched = self._recursive_harness.step(self._session,
self._writer,
summary_step,
feed_dict=feed_dict,
mode='inference',
input_mode=self._input_mode,
inference_batch_type=batch_type,
fetches=testing_fetches)
else:
fetched = self.step_graph(self._component, feed_dict, batch_type, fetches=testing_fetches)
if self._is_write_evaluate_summary():
self._component.write_summaries(test_step, self._writer, batch_type=batch_type)
return fetched
|
{"hexsha": "bf48bb761abe25034182fd82e2702c5cd6ae7181", "size": 27991, "ext": "py", "lang": "Python", "max_stars_repo_path": "aha/workflows/pattern_completion_workflow.py", "max_stars_repo_name": "ProjectAGI/aha", "max_stars_repo_head_hexsha": "53a98ea42526dca56517dc97fffad874772f10f2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-10-08T13:35:24.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-19T18:33:36.000Z", "max_issues_repo_path": "aha/workflows/pattern_completion_workflow.py", "max_issues_repo_name": "ProjectAGI/aha", "max_issues_repo_head_hexsha": "53a98ea42526dca56517dc97fffad874772f10f2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aha/workflows/pattern_completion_workflow.py", "max_forks_repo_name": "ProjectAGI/aha", "max_forks_repo_head_hexsha": "53a98ea42526dca56517dc97fffad874772f10f2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-02T08:28:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-02T08:28:06.000Z", "avg_line_length": 46.8076923077, "max_line_length": 189, "alphanum_fraction": 0.6356686078, "include": true, "reason": "import numpy", "num_tokens": 5900}
|
#ifndef ATL_FFI_HPP
#define ATL_FFI_HPP
/**
* @file /home/ryan/programming/atl/ffi_2.hpp
* @author Ryan Domigan <ryan_domigan@sutdents@uml.edu>
* Created on Dec 29, 2013
*/
#include <array> // for tuple_element
#include <boost/mpl/aux_/adl_barrier.hpp> // for mpl
#include <cstddef> // for size_t
#include <functional> // for function
#include <string> // for string
#include <tuple> // for tuple
#include <type_traits> // for remove_pointer
#include "./helpers/make_ast.hpp" // for fn
#include "./type.hpp" // for iterator, Ast, tag, value_...
#include "./type_traits.hpp" // for cxx_to_atl
#include "./utility.hpp" // for Apply, BuildIndicies, Indexer
#include "gc/marked.hpp" // for Marked
#include "helpers/misc.hpp" // for from_bytes, to_bytes
#include "wrap.hpp" // for value
namespace atl {
namespace mpl = boost::mpl;
namespace byte_code
{
typedef typename vm_stack::value_type value_type;
template<class T>
vm_stack::value_type to_bytes(T input)
{ return reinterpret_cast<vm_stack::value_type>(input); }
// TODO: use the `std::is_integral` and static cast for all integral (and floating?) types.
value_type to_bytes(long input)
{ return static_cast<value_type>(input); }
value_type to_bytes(bool input)
{ return static_cast<value_type>(input); }
value_type to_bytes(void* input)
{ return reinterpret_cast<value_type>(input); }
value_type to_bytes(Pointer input)
{ return reinterpret_cast<value_type>(input.value); }
template<class R>
struct PntrCaster
{
typedef PntrCaster<R> type;
static R a(value_type input)
{ return reinterpret_cast<R>(input); }
};
template<class I>
struct StaticCaster
{
typedef StaticCaster<I> type;
static I a(value_type input)
{ return static_cast<I>(input); }
};
template<class T>
struct Caster
: public std::conditional<std::is_integral<T>::value,
StaticCaster<T>,
PntrCaster<T>
>::type
{};
template<class R>
R from_bytes(value_type input) { return Caster<R>::a(input); }
}
namespace cxx_functions
{
using namespace tmpl;
template<class T>
struct GuessTag
: public Apply<tag, Apply<type_mapping::cxx_to_atl,
std::remove_pointer<T> >
>::type
{};
struct Metadata
{
template<class R, class ... Rest>
struct apply
{
static constexpr size_t arity()
{ return sizeof...(Rest); }
template<class Alloc>
static Ast parameter_types(Alloc& gc)
{ return *gc(fn_type::fn(GuessTag<Rest>::value..., GuessTag<R>::value)); }
};
};
// wraps a c++ std::function
template<class Sig> struct WrapStdFunction {};
template<class R, class ... Sig>
class WrapStdFunction<R (Sig ...)> :
public Metadata::template apply<R, Sig...>
{
private:
template <std::size_t... Index>
static void call_packed(std::function<R (Sig...)> fn
, vm_stack::iterator begin
, tmpl::Indexer<Index...>)
{
using namespace byte_code;
*begin = to_bytes(atl::value<R>(fn(from_bytes<Sig>(begin[Index])...)));
}
public:
/**
* using the 'a' for 'apply' convention, builds the wrapped version of fn
* @return: wrapped function
*/
template<class Alloc>
static Marked<CxxFunctor> a(std::function<R (Sig...)> const& fn, Alloc &gc
, std::string const & name = "#<Unnamed-CxxFunctor>")
{
return gc.template make<CxxFunctor>
([fn](vm_stack::iterator vv, vm_stack::iterator _)
{
return call_packed(fn, vv,
typename tmpl::BuildIndicies<WrapStdFunction::arity()>::type {});
}
, name
, WrapStdFunction::parameter_types(gc)
, WrapStdFunction::arity());
}
};
}
template<class Sig>
using WrapStdFunction = cxx_functions::WrapStdFunction<Sig>;
namespace signature
{
template<class R, class ... Sig>
struct Pack;
template<class R, class ... Sig>
struct Pack<R (Sig...)>
{
typedef Pack<R (Sig...)> type;
typedef R Return;
typedef std::tuple<Sig...> Args;
static constexpr std::size_t arity = sizeof...(Sig);
};
template<class R, class Fn, class Args, class Indexes>
struct _Unpack;
template<class R, class Target, class Args, std::size_t... Index>
struct _Unpack<R, Target, Args, tmpl::Indexer<Index...> >
{ typedef typename Target::template type<R, typename std::tuple_element<Index, Args>::type...> type; };
// Unpack Pack to instantiate Target
template<class Pack, class Target>
struct Unpack
: public _Unpack<typename Pack::Return,
Target,
typename Pack::Args,
typename tmpl::BuildIndicies<Pack::arity>::type >
{};
struct _StdFunction
{
template<class R, class ... Args>
using type = std::function<R (Args...)>;
};
template<class Pack>
struct StdFunction
: public Unpack<Pack, _StdFunction>
{};
struct _Wrapper
{
template<class R, class ... Args>
using type = cxx_functions::WrapStdFunction<R (Args...)>;
};
template<class Pack>
struct Wrapper
: public Unpack<Pack, _Wrapper>
{};
}
}
#endif
|
{"hexsha": "166f4ce4cc45b3c8b0240865e5791f6aad825ab7", "size": 5495, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "ffi.hpp", "max_stars_repo_name": "rcdomigan/atl", "max_stars_repo_head_hexsha": "6a6777a2f714480366551a4462c986a2f9d7612f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ffi.hpp", "max_issues_repo_name": "rcdomigan/atl", "max_issues_repo_head_hexsha": "6a6777a2f714480366551a4462c986a2f9d7612f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 42.0, "max_issues_repo_issues_event_min_datetime": "2015-01-01T20:20:29.000Z", "max_issues_repo_issues_event_max_datetime": "2017-05-31T02:02:58.000Z", "max_forks_repo_path": "ffi.hpp", "max_forks_repo_name": "rcdomigan/atl", "max_forks_repo_head_hexsha": "6a6777a2f714480366551a4462c986a2f9d7612f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9362745098, "max_line_length": 105, "alphanum_fraction": 0.6036396724, "num_tokens": 1409}
|
# import sys
# sys.path.append('../lib')
import geom, graph
import model
import model_utils
import tileloader
import infer
import numpy
import os
import random
import tensorflow as tf
import time
import argparse
parser = argparse.ArgumentParser(description='Train a RoadTracer model.')
tileloader.tile_dir = '/data/imagery/'
tileloader.graph_dir = '/data/graphs/'
tileloader.pytiles_path = '/data/json/pytiles.json'
tileloader.startlocs_path = '/data/json/starting_locations.json'
mylogs = open('../logs/road_tracer.log', 'w')
MAX_PATH_LENGTH = 8192
SEGMENT_LENGTH = 20
PARALLEL_TILES = 256
SAVE_ITERATIONS = 100
PATHS_PER_TILE_AXIS = 2
TILE_MODE = 'sat'
SAVE_EXAMPLES = False
DETECT_MODE = 'normal'
MODEL_BASE = '../model/'
WINDOW_SIZE = 256
FOLLOW_TARGETS = False
THRESHOLD = 0.4
SINGLE_ANGLE_TARGET = False
PARALLEL_PATHS = PARALLEL_TILES * PATHS_PER_TILE_AXIS * PATHS_PER_TILE_AXIS
def epoch_to_learning_rate(epoch):
if epoch < 100:
return 1e-5
elif epoch < 200:
return 1e-6
elif epoch < 300:
return 1e-7
else:
return 1e-8
tiles = tileloader.Tiles(PATHS_PER_TILE_AXIS, SEGMENT_LENGTH, PARALLEL_TILES, TILE_MODE)
tiles.prepare_training()
test_tile_data = tiles.get_test_tile_data()
# initialize model and session
print('initializing graph')
print('initializing graph', file=mylogs)
m = model.Model(tiles.num_input_channels())
session = tf.Session()
model_path = MODEL_BASE + '/model_latest/model'
best_path = MODEL_BASE + '/model_best/model'
if os.path.isfile(model_path + '.meta'):
print('... loading existing model')
print('... loading existing model', file=mylogs)
m.saver.restore(session, model_path)
else:
print('... initializing a new model')
print('... initializing a new model', file=mylogs)
session.run(m.init_op)
init = tf.global_variables_initializer()
session.run(init)
# initialize subtiles
subtiles = []
for tile in tiles.train_tiles:
big_rect = geom.Rectangle(
tile.scale(1024),
tile.add(geom.Point(1, 1)).scale(1024)
)
for offset in [geom.Point(0, 0)]:
start = big_rect.start.add(offset)
search_rect = geom.Rectangle(start, start.add(geom.Point(1024, 1024)))
search_rect = search_rect.add_tol(-WINDOW_SIZE/2)
starting_locations = tiles.all_starting_locations['{}_{}_{}'.format(tile.region, tile.x, tile.y)]
starting_locations = [loc for loc in starting_locations if search_rect.add_tol(-WINDOW_SIZE/2).contains(loc[0]['point'])]
if len(starting_locations) < 5:
continue
subtiles.append({
'region': tile.region,
'rect': big_rect,
'search_rect': search_rect,
'cache': tiles.cache,
'starting_locations': starting_locations,
'gc': tiles.gcs[tile.region],
'edge_counts': {},
})
print('extracted {} subtiles from {} tiles (missing {})'.format(len(subtiles), len(tiles.train_tiles), len(tiles.train_tiles) - len(subtiles)))
print('extracted {} subtiles from {} tiles (missing {})'.format(len(subtiles), len(tiles.train_tiles), len(tiles.train_tiles) - len(subtiles)), file=mylogs)
# initialize paths, one per subtile
print('loading initial paths')
print('loading initial paths', file=mylogs)
paths = []
for i, subtile in enumerate(subtiles):
start_loc = random.choice(subtile['starting_locations'])
paths.append(model_utils.Path(subtile['gc'], subtile, start_loc=start_loc))
num_sets = int((len(paths) + PARALLEL_PATHS - 1) / PARALLEL_PATHS)
best_accuracy = None
angle_losses = []
detect_losses = []
action_losses = []
losses = []
def vector_to_action(angle_outputs, stop_outputs):
x = numpy.zeros((64,), dtype='float32')
# if not stop then choose the closest angle to move
if stop_outputs[0] > THRESHOLD:
x[numpy.argmax(angle_outputs)] = 1
return x
def action_to_vector(v):
angle_outputs = numpy.zeros((64,), dtype='float32')
action_outputs = numpy.zeros((2,), dtype='float32')
count = 0
for i in range(len(v)):
if v[i] > 0.9:
count += 1
# action_outputs: [go, stop]
if count == 0:
action_outputs[1] = 1
else:
action_outputs[0] = 1
if SINGLE_ANGLE_TARGET:
for i in range(len(v)):
if v[i] > 0.9:
angle_outputs[i] = 1.0 / count
else:
angle_outputs[:] = v[:]
return angle_outputs, action_outputs
outer_it = 0
for outer_it in range(outer_it+1, 400):
set_id = outer_it % num_sets
start_idx = set_id * PARALLEL_PATHS
end_idx = min(start_idx + PARALLEL_PATHS, len(paths))
# if end_idx - start_idx < PARALLEL_PATHS / 2:
# raise Exception('last set has only {} paths, but PARALLEL_PATHS={}'.format(end_idx - start_idx, PARALLEL_PATHS))
times = {
'prepare': 0,
'train': 0,
'save': 0,
'extend': 0,
'train_total': 0,
'test_total': 0,
}
start_time = time.time()
for path_it in range(1000):
stage_time = time.time()
if path_it % SAVE_ITERATIONS == 0:
print('begin step {}, {} ({}...{}/{})'.format(outer_it, path_it, start_idx, end_idx, len(paths)))
print('begin step {}, {} ({}...{}/{})'.format(outer_it, path_it, start_idx, end_idx, len(paths)), file=mylogs)
path_indices = random.sample(range(int(start_idx), int(end_idx)), model.BATCH_SIZE)
# prepare path inputs and target angles
batch_extension_vertices = []
batch_inputs = []
batch_detect_targets = []
batch_angle_targets = numpy.zeros((model.BATCH_SIZE, 64), 'float32')
batch_action_targets = numpy.zeros((model.BATCH_SIZE, 2), 'float32')
for i in range(len(path_indices)):
path_idx = path_indices[i]
extension_vertex = paths[path_idx].pop()
if extension_vertex is None or len(paths[path_idx].graph.vertices) >= MAX_PATH_LENGTH:
start_loc = random.choice(subtiles[path_idx]['starting_locations'])
paths[path_idx] = model_utils.Path(subtiles[path_idx]['gc'], subtiles[path_idx], start_loc=start_loc)
extension_vertex = paths[path_idx].pop()
path_input, path_detect_target = model_utils.make_path_input(paths[path_idx], extension_vertex, SEGMENT_LENGTH, detect_mode=DETECT_MODE, window_size=WINDOW_SIZE)
batch_extension_vertices.append(extension_vertex)
batch_inputs.append(path_input)
batch_detect_targets.append(path_detect_target)
targets = model_utils.compute_targets_by_best(paths[path_idx], extension_vertex, SEGMENT_LENGTH)
angle_targets, action_targets = action_to_vector(targets)
batch_angle_targets[i, :] = angle_targets
batch_action_targets[i, :] = action_targets
# print("running in samples: %d of %d" % (i, len(path_indices)))
times['prepare'] += time.time() - stage_time
stage_time = time.time()
# train model
feed_dict = {
m.is_training: True,
m.inputs: batch_inputs,
m.angle_targets: batch_angle_targets,
m.action_targets: batch_action_targets,
m.detect_targets: batch_detect_targets,
m.learning_rate: epoch_to_learning_rate(outer_it),
}
batch_angle_outputs, batch_action_outputs, batch_detect_outputs, angle_loss, detect_loss, action_loss, loss, _ = session.run([m.angle_outputs, m.action_outputs, m.detect_outputs, m.angle_loss, m.detect_loss, m.action_loss, m.loss, m.optimizer], feed_dict=feed_dict)
angle_losses.append(angle_loss)
detect_losses.append(detect_loss)
action_losses.append(action_loss)
losses.append(loss)
times['train'] += time.time() - stage_time
stage_time = time.time()
if SAVE_EXAMPLES and start_idx in path_indices:
x = path_indices.index(start_idx)
fname = 'E:/RoadTracer/data/outimage/{}_{}_{}_'.format(path_indices[x], outer_it, path_it)
# print(fname)
model_utils.make_path_input(paths[path_indices[x]], batch_extension_vertices[x], SEGMENT_LENGTH, fname=fname, angle_targets=batch_angle_targets[x, :], angle_outputs=batch_angle_outputs[x, :], detect_output=batch_detect_outputs[x, :, :, 0], detect_mode=DETECT_MODE, window_size=WINDOW_SIZE)
with open(fname + 'meta.txt', 'w') as f:
f.write('action={}, angle_bucket={}\n\nactions: {}\nangles: \n'.format(
numpy.argmax(batch_action_outputs[x, :]),
numpy.argmax(batch_angle_outputs[x, :]),
batch_action_outputs[x, :],
batch_angle_outputs[x, :]
))
times['save'] += time.time() - stage_time
stage_time = time.time()
# extend paths based on angle outputs
for i in range(len(path_indices)):
path_idx = path_indices[i]
if FOLLOW_TARGETS == True:
x = vector_to_action(batch_angle_targets[i, :], batch_action_targets[i, :])
elif FOLLOW_TARGETS == 'partial':
# (a) always use stop_targets instead of stop_outputs
# (b) if we are far away from graph, use angle_targets, otherwise use angle_outputs
extension_vertex = batch_extension_vertices[i]
if extension_vertex.edge_pos is None or extension_vertex.edge_pos.point().distance(extension_vertex.point) > SEGMENT_LENGTH * 2:
x = vector_to_action(batch_angle_targets[i, :], batch_action_targets[i, :])
else:
x = vector_to_action(batch_angle_outputs[i, :], batch_action_targets[i, :])
elif FOLLOW_TARGETS == 'npartial':
# always move if gt says to move
if batch_action_outputs[i, 0] > THRESHOLD:
x = vector_to_action(batch_angle_outputs[i, :], batch_action_outputs[i, :])
else:
x = vector_to_action(batch_angle_outputs[i, :], batch_action_targets[i, :])
elif FOLLOW_TARGETS == False:
x = vector_to_action(batch_angle_outputs[i, :], batch_action_outputs[i, :])
else:
raise Exception('invalid FOLLOW_TARGETS setting {}'.format(FOLLOW_TARGETS))
nvertex = len(paths[path_idx].graph.vertices)
paths[path_idx].push(batch_extension_vertices[i], x, SEGMENT_LENGTH)
if len(paths[path_idx].graph.vertices) > nvertex:
pos1 = paths[path_idx].graph.vertices[-1].edge_pos
pos2 = paths[path_idx].graph.vertices[-2].edge_pos
if pos1 is not None and pos2 is not None and pos1.edge != pos2.edge:
subtiles[path_idx]['edge_counts'][pos1.edge.id] = subtiles[path_idx]['edge_counts'].get(pos1.edge.id, 0) + 1
times['extend'] += time.time() - stage_time
stage_time = time.time()
if path_it % SAVE_ITERATIONS == 0:
print('step {},{} train: angle_loss={}, detect_loss={}, action_loss={}, loss={}'.format(outer_it, path_it, numpy.mean(angle_losses), numpy.mean(detect_losses), numpy.mean(action_losses), numpy.mean(losses)))
print('step {},{} train: angle_loss={}, detect_loss={}, action_loss={}, loss={}'.format(outer_it, path_it, numpy.mean(angle_losses), numpy.mean(detect_losses), numpy.mean(action_losses), numpy.mean(losses)), file=mylogs)
del angle_losses[:]
del detect_losses[:]
del action_losses[:]
del losses[:]
m.saver.save(session, model_path)
times['save'] += time.time() - stage_time
stage_time = time.time()
times['train_total'] += time.time() - start_time
start_time = time.time()
print('Done.')
print('Done.', file=mylogs)
#run test
if test_tile_data is not None:
test_paths = []
if not isinstance(test_tile_data, list):
test_tile_data = [test_tile_data]
for t in test_tile_data:
test_paths.append(model_utils.Path(t['gc'], t, start_loc=t['starting_locations'][1]))
angle_loss, detect_loss, action_loss, loss, path_length, accuracy = infer.eval(test_paths, m, session, max_path_length=2048, segment_length=SEGMENT_LENGTH, follow_targets=True, max_batch_size=model.BATCH_SIZE, window_size=WINDOW_SIZE, verbose=False)
print('*** TEST ***: angle_loss={}, detect_loss={}, action_loss={}, loss={}, len={}, accuracy={}/{}'.format(angle_loss, detect_loss, action_loss, loss, path_length, accuracy, best_accuracy))
print('*** TEST ***: angle_loss={}, detect_loss={}, action_loss={}, loss={}, len={}, accuracy={}/{}'.format(angle_loss, detect_loss, action_loss, loss, path_length, accuracy, best_accuracy), file=mylogs)
if best_accuracy is None or accuracy > best_accuracy:
best_accuracy = accuracy
m.saver.save(session, best_path)
times['test_total'] += time.time() - start_time
print(times)
print(times, file=mylogs)
mylogs.close()
|
{"hexsha": "5d24d5b08eac5f0d080d2706de2802c6b42f0899", "size": 11718, "ext": "py", "lang": "Python", "max_stars_repo_path": "Tracer/train.py", "max_stars_repo_name": "astro-ck/Road-Extraction", "max_stars_repo_head_hexsha": "e509ddce9ced558e2e97d3510eb1e4a053113c97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2020-08-01T04:44:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T08:35:53.000Z", "max_issues_repo_path": "Tracer/train.py", "max_issues_repo_name": "astro-ck/Road-Extraction", "max_issues_repo_head_hexsha": "e509ddce9ced558e2e97d3510eb1e4a053113c97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-10-06T16:27:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-22T10:12:51.000Z", "max_forks_repo_path": "Tracer/train.py", "max_forks_repo_name": "astro-ck/Road-Extraction", "max_forks_repo_head_hexsha": "e509ddce9ced558e2e97d3510eb1e4a053113c97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2020-09-09T19:54:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T08:35:44.000Z", "avg_line_length": 37.9223300971, "max_line_length": 292, "alphanum_fraction": 0.7246970473, "include": true, "reason": "import numpy", "num_tokens": 3082}
|
"""
A program analyzing 3D protein structures from PDB to generate 2D binding motives. For Further information see https://github.com/Cardypro/StructureAnalyzer
"""
import math
import os
from typing import Dict, Tuple, List, Union, Optional
from dataclasses import dataclass
from collections import defaultdict
import networkx as nx
import pysmiles as ps
from pymol import cmd, stored
from tabulate import tabulate
vdwRadii: Dict[str, Optional[float]] = {}
def defineDict(defaultRadius: Optional[float]) -> None:
"""
defines the vdw-radii dict as given by Truhlar et al. If the key isn't in the dict, the defaultRadius will be returned.
"""
global vdwRadii
vdwRadii = defaultdict(lambda: defaultRadius)
vdwRadii.update({
"H": 1.10,
"Li": 1.81,
"Na": 2.27,
"K": 2.75,
"Rb": 3.03,
"Cs": 3.43,
"Fr": 3.48, # End I
"Be": 1.53,
"Mg": 1.73,
"Ca": 2.31,
"Sr": 2.49,
"Ba": 2.68,
"Ra": 2.83, # End II
"B": 1.92,
"Al": 1.84,
"Ga": 1.87,
"In": 1.93,
"Tl": 1.96, # End III
"C": 1.70,
"Si": 2.10,
"Ge": 2.11,
"Sn": 2.17,
"Pb": 2.02, # End IV
"N": 1.55,
"P": 1.80,
"As": 1.85,
"Sb": 2.06,
"Bi": 2.07, # End V
"O": 1.52,
"S": 1.80,
"Se": 1.90,
"Te": 2.06,
"Po": 1.97, # End VI
"F": 1.47,
"Cl": 1.75,
"Br": 1.83,
"I": 1.98,
"At": 2.02, # End VII
"He": 1.40,
"Ne": 1.54,
"Ar": 1.88,
"Kr": 2.02,
"Xe": 2.16,
"Rn": 2.20 # End Main Group
})
@dataclass
class Atom:
"""class representing an Atom in the pdb-file
parameter:
float x: pos x
float y: pos y
float z: pos z
str model: which protein, e.g. 6hn0
str chain: which side chain, e.g. A
str resn: name of residue, e.g. DIF or ASN
str resi: identifier of residue, e.g. 607
str name: name of atom, e.g. CL4
str element: element of atom, e.g. CL
"""
x: float = 0 # pos x
y: float = 0 # pos y
z: float = 0 # pos z
model: str = "none" # which protein, e.g. 6hn0
chain: str = "none" # which sidechain, e.g. A
resn: str = "none" # name of residue, e.g. DIF
resi: str = "none" # identifier of residue, e.g. 607
name: str = "none" # name of atom, e.g. CL4
elem: str = "none"
@property
def element(self) -> str:
"""
Returns:
string: element with capital first letter as usual (e.g. CL -> Cl)
"""
return self.elem[0]+self.elem[1:].lower() # element, e.g. Cl
@property
def identifierString(self) -> str:
"""
Returns:
string: identifierString to adress a certain Atom in the pdb structure via pyMOL
"""
return f"{self.model}//{self.chain}/{self.resn}`{self.resi}/{self.name}"
@property
def pos(self) -> Tuple[float, float, float]:
"""
Returns:
triple: cartesian coordinates of the atom
"""
return (self.x, self.y, self.z)
@dataclass
class Interaction:
"""
class representing a Interaction between 2 Atoms
"""
atomA: Atom
atomB: Atom
dist: float
def calcDist(pos1: Tuple[float, float, float], pos2: Tuple[float, float, float]) -> float:
"""
calculates the 3D-distance of two given coordinates
"""
x1 = pos1[0]
y1 = pos1[1]
z1 = pos1[2]
x2 = pos2[0]
y2 = pos2[1]
z2 = pos2[2]
dist = math.sqrt((x2-x1)**2 + (y2-y1)**2 + (z2-z1)**2)
return dist
def calcCogFromStr(selection: str) -> Tuple[float, float, float]:
"""
calculates the center of geometry of a given PyMOL selection
"""
stored.cogX, stored.cogY, stored.cogZ = 0, 0, 0
stored.i = 1
# has to be in an if statement since otherwise there have to be multiple for loops (pyMOL)
cmd.iterate_state(-1, selection, """\
if(True):
stored.cogX += x
stored.cogY += y
stored.cogZ += z
stored.i += 1
""")
return(stored.cogX/stored.i, stored.cogY/stored.i, stored.cogZ/stored.i)
def calcCogFromList(entries: List[Atom]) -> Tuple[float, float, float]:
"""
calculates the center of geometry of a given Array containing atoms
"""
sumX, sumY, sumZ = 0.0, 0.0, 0.0
for entry in entries:
sumX += entry.x
sumY += entry.y
sumZ += entry.z
avgX = sumX/len(entries)
avgY = sumY/len(entries)
avgZ = sumZ/len(entries)
return(avgX, avgY, avgZ)
def calcCog(argument: Union[str, list]) -> Tuple[float, float, float]:
"""
calculates the Center of Geometry of a given selection or list of atoms
Args:
argument (str or list): either a PyMOL-selection name or a List of atoms
Returns:
Tuple[float, float, float]: 3D-coords of CoG
"""
if isinstance(argument, str):
return calcCogFromStr(argument)
if isinstance(argument, list):
return calcCogFromList(argument)
exit("unable to calculate the CoG from the given argument")
return (0, 0, 0)
def analyzeInput(inputString: str) -> Tuple[List[str], List[str], List[str]]:
"""
splits the input string so it can be read
Args:
inputString (str): has to be like "elemA|elemB|... factor*vdw elemC|elemD|..."
Returns:
list: list of lists. Like [['C', 'N'], ['2','vdw'], ['C', 'O']]
"""
inputParts = inputString.split()
inputA = inputParts[0].split("|")
length = inputParts[1].split("*")
inputB = inputParts[2].split("|")
return (inputA, length, inputB)
def getCutoff(array: Tuple[Atom, List[str], Atom]) -> Optional[float]:
"""
calculates cutoff via vdwRadii
Args:
array (list): like [Atom1, ['factor','vdw'], Atom2]
Returns:
float: max distance between the atoms to be evaluated as interaction
"""
elementA = array[0].element
elementB = array[2].element
if elementA not in vdwRadii:
print(f"{elementA} not found. Using default radius instead.")
if elementB not in vdwRadii:
print(f"{elementB} not found. Using default radius instead.")
radiusA = vdwRadii[elementA]
radiusB = vdwRadii[elementB]
if radiusA is None:
print(
f"Unable to evaluate vdwRadii for {elementA} since no default radius is given.")
return None
if radiusB is None:
print(
f"Unable to evaluate vdwRadii for {elementB} since no default radius is given.")
return None
factor = float(array[1][0])
return (radiusA + radiusB) * factor
def buildGraph(atomlist: List[Atom]) -> nx.Graph:
"""
turns the given molecule (list of atoms) into a network graph
Args:
atomlist (list of Atoms): all Atoms belonging to a molecule
Returns:
networkx.Graph
"""
visitedAtoms = []
queue = atomlist
graph = nx.Graph()
cmd.h_add()
while len(queue) != 0:
stored.currNeighbor = []
currentNode = queue.pop(-1)
cmd.select("neighborSelection",
f"neighbor {currentNode.identifierString}")
stored.currentResn = currentNode.resn
cmd.iterate_state(-1, "neighborSelection", """\
if resn == stored.currentResn:
stored.currNeighbor.append(Atom(x, y, z, model, chain, resn, resi, name, elem))
""")
graph.add_node(currentNode.identifierString,
element=currentNode.element, charge=0)
for atom in stored.currNeighbor:
graph.add_edge(currentNode.identifierString, atom.identifierString)
if atom.identifierString not in visitedAtoms:
visitedAtoms.append(atom.identifierString)
queue.append(atom)
ps.fill_valence(graph, respect_hcount=True, respect_bond_order=False)
cmd.remove("hydro")
return graph
# writes a .mrv-file (XML format) that can be opened with e.g. Marvinsketch
def writeXML(graph: nx.Graph, interactionList: List[Interaction], pdbCode: str, ligand: List[Atom]) -> None:
"""
writes a .mrv-file (XML format) that can be opened with e.g. Marvin Sketch
Args:
graph (Networx.Graph):
"""
# creates an output folder
ligandName = f"{ligand[0].resn}{ligand[0].resi}"
file = open(
(f"./Output/{pdbCode} {ligandName}.mrv"), "w", encoding="utf-8")
file.write("<MDocument>\n<MChemicalStruct>\n<molecule>\n")
dictionary = dict()
# all atoms
file.write("<atomArray>\n")
nodeID = 1
for node in list(graph.nodes(data=True)):
nodeIdentifier = node[0]
nodeDict = node[1]
if nodeDict["element"] != "H":
file.write("<atom id=\"a" + str(nodeID) +
"\" elementType=\"" + nodeDict["element"] + "\"/>" + "\n")
dictionary[nodeIdentifier] = nodeID
nodeID += 1
file.write("</atomArray>\n")
# all bonds
file.write("<bondArray>\n")
for edge in graph.edges.data():
startAtom = edge[0]
endAtom = edge[1]
bondOrder = edge[2]["order"]
if graph.nodes[endAtom]["element"] != "H" and graph.nodes[startAtom]["element"] != "H":
file.write("<bond atomRefs2=\"a" + str(dictionary[startAtom]) + " a" + str(
dictionary[endAtom]) + "\" order=\"" + str(bondOrder) + "\"/>\n")
file.write("</bondArray>\n</molecule>\n</MChemicalStruct>\n")
# interactions
interactionID = 0
for interactions in interactionList:
try:
atomA = interactions.atomA
atomB = interactions.atomB
file.write("<MPolyline id=\"line" + str(interactionID) +
"\" lineColor=\"#ff9933\" thickness=\"0.04\">\n")
file.write("<MAtomSetPoint atomRefs=\"m1.a" +
str(dictionary[atomA.identifierString]) + "\"/>\n")
file.write("<MAtomSetPoint atomRefs=\"m1.a" +
str(dictionary[atomB.identifierString]) + "\"/>\n")
file.write("</MPolyline>\n")
except:
print("Error writing interactions tags\n", interactions, ligandName)
file.close()
return
# distances
file.write("<MTextBox id=\"distBox" +
str(interactionID) + "\" autoSize=\"true\">\n")
file.write("<Field name=\"text\"><![CDATA[{D font=Arial,size=9}{fg=#000000}" + str(
round(interactions.dist, 3)) + " \u00c5]]></Field>\n")
file.write("<MPoint x=\"0\" y=\"0\"/>\n")
file.write("<MPoint x=\"0\" y=\"0\"/>\n")
file.write("<MPoint x=\"0\" y=\"0\"/>\n")
file.write("<MPoint x=\"0\" y=\"0\"/>\n")
file.write("</MTextBox>\n")
file.write("<MPolyline id=\"distLine" + str(interactionID) +
"\" lineColor=\"#000000\" thickness=\"0.01\">\n")
file.write("<MRectanglePoint pos=\"4\" rectRef=\"distBox" +
str(interactionID) + "\"/>\n")
file.write("<MMidPoint lineRef=\"line" + str(interactionID) + "\"/>\n")
file.write("</MPolyline>\n")
interactionID += 1
# name tags for interactions
nameID = 0
done = []
for interactions in interactionList:
try:
atomB = interactions.atomB
if (atomB.resn, atomB.resi) not in done and atomB.resn != "HOH": # no water tag
done.append((atomB.resn, atomB.resi))
file.write(
f"<MTextBox id=\"box{nameID}\" autoSize=\"true\">\n")
file.write("<Field name=\"text\"><![CDATA[{D font=Arial,size=11}{fg=#000000}" + atomB.resn[0] +
atomB.resn[1:].lower() + " " + atomB.resi + "]]></Field>\n")
file.write("<MPoint x=\"0\" y=\"0\"/>\n")
file.write("<MPoint x=\"0\" y=\"0\"/>\n")
file.write("<MPoint x=\"0\" y=\"0\"/>\n")
file.write("<MPoint x=\"0\" y=\"0\"/>\n")
file.write("</MTextBox>\n")
file.write("<MPolyline id=\"boxline" + str(nameID) +
"\" thickness=\"0.01\" lineColor=\"#0000ff\">\n")
file.write("<MRectanglePoint pos=\"4\" rectRef=\"box" +
str(nameID) + "\"/>\n")
file.write("<MAtomSetPoint atomRefs=\"m1.a" +
str(dictionary[atomB.identifierString]) + "\"/>\n")
nameID += 1
file.write("</MPolyline>\n")
except:
print("Error writing name tags\n", interactions, ligandName)
file.close()
return
file.write("</MDocument>")
file.close()
def writeTable(file, interactionList: List[Interaction]) -> None:
"""
writes the interaction table to a markdown file
Args:
file (filehandle): the file to be written in
interactionList (list): list of Interaction objects
"""
AtomName = interactionList[0].atomA
file.write(f"\n # {AtomName.resn} {AtomName.resi} \n")
table = []
for interaction in interactionList:
AtomA = interaction.atomA
AtomB = interaction.atomB
dist = interaction.dist
table.append([f"{AtomA.resn} {AtomA.resi}/{AtomA.name}", dist,
f"{AtomB.resn} {AtomB.resi}/{AtomB.name}", f"{AtomB.element}"])
formatedTable = tabulate(table, headers=[
"atom ligand", "distance [A]", "atom pocket", "element"], tablefmt="github")
print(formatedTable)
file.write(formatedTable)
file.close()
def StructureAnalyzer(pdbCode: str = "6hn0", ligandCode: str = "DIF", inputString: str = "* 1*vdw *", ignoreH2O: bool = False, defaultRadius: Optional[float] = None, pocketSize: float = 8.0, writeMD: bool = True) -> None:
"""
Main-code. Calculates the distances between a selected ligand and all atoms within a given cutoff-restriction of a given .pdb-code.
Args:
pdbCode (str, optional): Determines the protein structure from pdb. Defaults to "6hn0".
ligandCode (str, optional): Determines the pdb code of the ligand. Defaults to "DIF".
inputString (str, optional): see readme. Defaults to "* 1*vdw *".
ignoreH2O (bool, optional): Determines if water should be ignored. Defaults to False.
defaultRadius (float, optional): Default atom radius if no radius is given for the element. Defaults to None.
pocketSize (float, optional): View distance of pocket and ligand in pyMOL. Defaults to 8.
writeMD (bool, optional): Determinest if a markdown file should be written. Defaults to True.
"""
try:
os.mkdir("Output")
except:
pass
if writeMD:
mdFile = open((f"./Output/{pdbCode}.md"), "w", encoding="utf-8")
mdFile.close()
defineDict(defaultRadius)
cmd.reinitialize()
condition = analyzeInput(inputString)
cmd.fetch(pdbCode) # downloads given .pdb-file
cmd.remove("hydro")
cmd.select("allLigands", "resn " + ligandCode)
stored.allLigandsAtoms = []
stored.oldResi = ""
# iterates all Atoms belonging to the given ligand code and splits them up so you have an array of atoms
cmd.iterate_state(-1, "allLigands", """\
if(resi == stored.oldResi):
stored.allLigandsAtoms[(len(stored.allLigandsAtoms)-1)].append(Atom(x, y, z, model, chain, resn, resi, name, elem))
else:
stored.oldResi = resi
stored.allLigandsAtoms.append([Atom(x, y, z, model, chain, resn, resi, name, elem)])
""")
# gets the ligand with the least distance to the global cog
for ligands in stored.allLigandsAtoms:
ligandResName = ligands[0].resn # e.g. DIF
ligandResID = ligands[0].resi # e.g. 601
LigandName = ligandResName + str(ligandResID) # e.g. DIFxxx
print(f"Analyzing {LigandName}...")
# drawing pocket and ligand
cmd.hide('all')
cmd.select(LigandName, ligandResName +
"`" + str(ligandResID) + "/")
cmd.select('view', 'br. all within ' + str(pocketSize) +
' of ' + LigandName)
pocketLayerName = f"pocket_{LigandName}"
cmd.select(pocketLayerName, 'view and not ' + LigandName)
cmd.show('sticks', pocketLayerName)
cmd.show('sticks', LigandName)
cmd.show('nb_spheres', pocketLayerName)
cmd.show('nb_spheres', LigandName)
cmd.util.cbaw(pocketLayerName)
cmd.util.cbao(LigandName)
stored.atomsPocket = [] # all Atoms of the Pocket
# reads all informations belonging to the selected binding pocket
cmd.iterate_state(-1, pocketLayerName,
"stored.atomsPocket.append(Atom(x, y, z, model, chain, resn, resi, name, elem))")
interactionList = []
atomsForGraph = []
# main-main-code: calculates the distances of each atom belonging to the pocket to each atom belonging to the ligand. If the distance is less than the cutoff the distance is drawn
for ligandAtoms in ligands:
atomsForGraph.append(ligandAtoms)
conditionElementsLigand = condition[0]
if not (ligandAtoms.element in conditionElementsLigand or "*" in conditionElementsLigand):
continue
for pocketAtoms in stored.atomsPocket:
if (pocketAtoms.resn == "HOH") and ignoreH2O:
continue
conditionElementsPocket = condition[2]
if not (pocketAtoms.element in conditionElementsPocket or "*" in conditionElementsPocket):
continue
conditionDistance = condition[1]
if "vdw" in conditionDistance:
cutoff = getCutoff(
(ligandAtoms, conditionDistance, pocketAtoms))
else:
cutoff = float(conditionDistance[0])
if cutoff is None:
continue
currDist = calcDist(ligandAtoms.pos, pocketAtoms.pos)
if currDist > cutoff:
continue
interactionLayerName = f"inter_{LigandName}"
cmd.distance(
interactionLayerName, ligandAtoms.identifierString, pocketAtoms.identifierString, cutoff+1)
cmd.color("cyan", interactionLayerName)
cmd.show("dashes", interactionLayerName)
interactionList.append(Interaction(
ligandAtoms, pocketAtoms, currDist))
atomsForGraph.append(pocketAtoms)
currGraph = buildGraph(atomsForGraph)
writeXML(currGraph, interactionList, pdbCode, ligands)
print(f"Analyzing {LigandName} finished")
if writeMD:
mdFile = open((f"./Output/{pdbCode}.md"), "a", encoding="utf-8")
writeTable(mdFile, interactionList)
print(f"Analyzing {pdbCode} finished")
def multipleAnalyzer(pdbArray: List[str], ligand: str = "DIF", inputString: str = "* 1*vdw *", ignoreH2O: bool = False, defaultRadius: Optional[float] = None) -> None:
"""
executes the StructureAnalyzer multiple times for a list of pdb-codes
Args:
pdbArray (List[str]): list containing the pdb-codes to be analyzed
ligand (str, optional): pdb-code of the ligand. Defaults to "DIF".
inputString (str, optional): String determining the cutoff criteria. Defaults to "* 1*vdw *".
ignoreH2O (bool, optional): Decides if water should be ignored evaluating the interactions. Defaults to False.
defaultRadius (Optional[float], optional): Fallback radius if a atom radius is not in the list given by Truhlar et al. Defaults to None.
"""
for code in pdbArray:
cmd.reinitialize()
print(f"\n start {code}")
StructureAnalyzer(code, ligand, inputString, ignoreH2O, defaultRadius)
|
{"hexsha": "420fec84c195725d5aacd865db4f25b354f865ca", "size": 19987, "ext": "py", "lang": "Python", "max_stars_repo_path": "StructureAnalyzer.py", "max_stars_repo_name": "Cardypro/StructureAnalyzer", "max_stars_repo_head_hexsha": "7f077058db4ad98b116abb0cbc0d74babd0ec298", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-07-24T21:16:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-01T07:12:22.000Z", "max_issues_repo_path": "StructureAnalyzer.py", "max_issues_repo_name": "Cardypro/StructureAnalyzer", "max_issues_repo_head_hexsha": "7f077058db4ad98b116abb0cbc0d74babd0ec298", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-01-11T09:30:57.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-12T00:14:16.000Z", "max_forks_repo_path": "StructureAnalyzer.py", "max_forks_repo_name": "Cardypro/StructureAnalyzer", "max_forks_repo_head_hexsha": "7f077058db4ad98b116abb0cbc0d74babd0ec298", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-06-25T06:12:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-26T10:00:08.000Z", "avg_line_length": 33.367278798, "max_line_length": 221, "alphanum_fraction": 0.5796767899, "include": true, "reason": "import networkx", "num_tokens": 5352}
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from dgl.model_zoo.chem.gnn import GATLayer
from dgl.nn.pytorch import NNConv, Set2Set
from dgl.nn.pytorch.conv import GINConv
from dgl.nn.pytorch.glob import AvgPooling, MaxPooling, SumPooling
class SELayer(nn.Module):
"""Squeeze-and-excitation networks"""
def __init__(self, in_channels, se_channels):
super(SELayer, self).__init__()
self.in_channels = in_channels
self.se_channels = se_channels
self.encoder_decoder = nn.Sequential(
nn.Linear(in_channels, se_channels),
nn.ELU(),
nn.Linear(se_channels, in_channels),
nn.Sigmoid(),
)
def forward(self, x):
""""""
# Aggregate input representation
x_global = torch.mean(x, dim=0)
# Compute reweighting vector s
s = self.encoder_decoder(x_global)
return x * s
class ApplyNodeFunc(nn.Module):
"""Update the node feature hv with MLP, BN and ReLU."""
def __init__(self, mlp, use_selayer):
super(ApplyNodeFunc, self).__init__()
self.mlp = mlp
self.bn = (
SELayer(self.mlp.output_dim, int(np.sqrt(self.mlp.output_dim)))
if use_selayer
else nn.BatchNorm1d(self.mlp.output_dim)
)
def forward(self, h):
h = self.mlp(h)
h = self.bn(h)
h = F.relu(h)
return h
class MLP(nn.Module):
"""MLP with linear output"""
def __init__(self, num_layers, input_dim, hidden_dim, output_dim, use_selayer):
"""MLP layers construction
Paramters
---------
num_layers: int
The number of linear layers
input_dim: int
The dimensionality of input features
hidden_dim: int
The dimensionality of hidden units at ALL layers
output_dim: int
The number of classes for prediction
"""
super(MLP, self).__init__()
self.linear_or_not = True # default is linear model
self.num_layers = num_layers
self.output_dim = output_dim
if num_layers < 1:
raise ValueError("number of layers should be positive!")
elif num_layers == 1:
# Linear model
self.linear = nn.Linear(input_dim, output_dim)
else:
# Multi-layer model
self.linear_or_not = False
self.linears = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
self.linears.append(nn.Linear(input_dim, hidden_dim))
for layer in range(num_layers - 2):
self.linears.append(nn.Linear(hidden_dim, hidden_dim))
self.linears.append(nn.Linear(hidden_dim, output_dim))
for layer in range(num_layers - 1):
self.batch_norms.append(
SELayer(hidden_dim, int(np.sqrt(hidden_dim)))
if use_selayer
else nn.BatchNorm1d(hidden_dim)
)
def forward(self, x):
if self.linear_or_not:
# If linear model
return self.linear(x)
else:
# If MLP
h = x
for i in range(self.num_layers - 1):
h = F.relu(self.batch_norms[i](self.linears[i](h)))
return self.linears[-1](h)
class UnsupervisedGAT(nn.Module):
def __init__(
self, node_input_dim, node_hidden_dim, edge_input_dim, num_layers, num_heads
):
super(UnsupervisedGAT, self).__init__()
assert node_hidden_dim % num_heads == 0
self.layers = nn.ModuleList(
[
GATLayer(
in_feats=node_input_dim if i == 0 else node_hidden_dim,
out_feats=node_hidden_dim // num_heads,
num_heads=num_heads,
feat_drop=0.0,
attn_drop=0.0,
alpha=0.2,
residual=False,
agg_mode="flatten",
activation=F.leaky_relu if i + 1 < num_layers else None,
)
for i in range(num_layers)
]
)
def forward(self, g, n_feat, e_feat):
for i, layer in enumerate(self.layers):
n_feat = layer(g, n_feat)
return n_feat
class UnsupervisedMPNN(nn.Module):
"""
MPNN from
`Neural Message Passing for Quantum Chemistry <https://arxiv.org/abs/1704.01212>`__
Parameters
----------
node_input_dim : int
Dimension of input node feature, default to be 15.
edge_input_dim : int
Dimension of input edge feature, default to be 15.
output_dim : int
Dimension of prediction, default to be 12.
node_hidden_dim : int
Dimension of node feature in hidden layers, default to be 64.
edge_hidden_dim : int
Dimension of edge feature in hidden layers, default to be 128.
num_step_message_passing : int
Number of message passing steps, default to be 6.
num_step_set2set : int
Number of set2set steps
num_layer_set2set : int
Number of set2set layers
"""
def __init__(
self,
output_dim=32,
node_input_dim=32,
node_hidden_dim=32,
edge_input_dim=32,
edge_hidden_dim=32,
num_step_message_passing=6,
lstm_as_gate=False,
):
super(UnsupervisedMPNN, self).__init__()
self.num_step_message_passing = num_step_message_passing
self.lin0 = nn.Linear(node_input_dim, node_hidden_dim)
edge_network = nn.Sequential(
nn.Linear(edge_input_dim, edge_hidden_dim),
nn.ReLU(),
nn.Linear(edge_hidden_dim, node_hidden_dim * node_hidden_dim),
)
self.conv = NNConv(
in_feats=node_hidden_dim,
out_feats=node_hidden_dim,
edge_func=edge_network,
aggregator_type="sum",
)
self.lstm_as_gate = lstm_as_gate
if lstm_as_gate:
self.lstm = nn.LSTM(node_hidden_dim, node_hidden_dim)
else:
self.gru = nn.GRU(node_hidden_dim, node_hidden_dim)
def forward(self, g, n_feat, e_feat):
"""Predict molecule labels
Parameters
----------
g : DGLGraph
Input DGLGraph for molecule(s)
n_feat : tensor of dtype float32 and shape (B1, D1)
Node features. B1 for number of nodes and D1 for
the node feature size.
e_feat : tensor of dtype float32 and shape (B2, D2)
Edge features. B2 for number of edges and D2 for
the edge feature size.
Returns
-------
res : Predicted labels
"""
out = F.relu(self.lin0(n_feat)) # (B1, H1)
h = out.unsqueeze(0) # (1, B1, H1)
c = torch.zeros_like(h)
for i in range(self.num_step_message_passing):
m = F.relu(self.conv(g, out, e_feat)) # (B1, H1)
if self.lstm_as_gate:
out, (h, c) = self.lstm(m.unsqueeze(0), (h, c))
else:
out, h = self.gru(m.unsqueeze(0), h)
out = out.squeeze(0)
return out
class UnsupervisedGIN(nn.Module):
"""GIN model"""
def __init__(
self,
num_layers,
num_mlp_layers,
input_dim,
hidden_dim,
output_dim,
final_dropout,
learn_eps,
graph_pooling_type,
neighbor_pooling_type,
use_selayer,
):
"""model parameters setting
Paramters
---------
num_layers: int
The number of linear layers in the neural network
num_mlp_layers: int
The number of linear layers in mlps
input_dim: int
The dimensionality of input features
hidden_dim: int
The dimensionality of hidden units at ALL layers
output_dim: int
The number of classes for prediction
final_dropout: float
dropout ratio on the final linear layer
learn_eps: boolean
If True, learn epsilon to distinguish center nodes from neighbors
If False, aggregate neighbors and center nodes altogether.
neighbor_pooling_type: str
how to aggregate neighbors (sum, mean, or max)
graph_pooling_type: str
how to aggregate entire nodes in a graph (sum, mean or max)
"""
super(UnsupervisedGIN, self).__init__()
self.num_layers = num_layers
self.learn_eps = learn_eps
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
for layer in range(self.num_layers - 1):
if layer == 0:
mlp = MLP(
num_mlp_layers, input_dim, hidden_dim, hidden_dim, use_selayer
)
else:
mlp = MLP(
num_mlp_layers, hidden_dim, hidden_dim, hidden_dim, use_selayer
)
self.ginlayers.append(
GINConv(
ApplyNodeFunc(mlp, use_selayer),
neighbor_pooling_type,
0,
self.learn_eps,
)
)
self.batch_norms.append(
SELayer(hidden_dim, int(np.sqrt(hidden_dim)))
if use_selayer
else nn.BatchNorm1d(hidden_dim)
)
# Linear function for graph poolings of output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
for layer in range(num_layers):
if layer == 0:
self.linears_prediction.append(nn.Linear(input_dim, output_dim))
else:
self.linears_prediction.append(nn.Linear(hidden_dim, output_dim))
self.drop = nn.Dropout(final_dropout)
if graph_pooling_type == "sum":
self.pool = SumPooling()
elif graph_pooling_type == "mean":
self.pool = AvgPooling()
elif graph_pooling_type == "max":
self.pool = MaxPooling()
else:
raise NotImplementedError
def forward(self, g, h, efeat):
# list of hidden representation at each layer (including input)
hidden_rep = [h]
for i in range(self.num_layers - 1):
h = self.ginlayers[i](g, h)
h = self.batch_norms[i](h)
h = F.relu(h)
hidden_rep.append(h)
score_over_layer = 0
# perform pooling over all nodes in each graph in every layer
all_outputs = []
for i, h in list(enumerate(hidden_rep)):
pooled_h = self.pool(g, h)
all_outputs.append(pooled_h)
score_over_layer += self.drop(self.linears_prediction[i](pooled_h))
return score_over_layer, all_outputs[1:]
class GraphEncoder(nn.Module):
"""
MPNN from
`Neural Message Passing for Quantum Chemistry <https://arxiv.org/abs/1704.01212>`__
Parameters
----------
node_input_dim : int
Dimension of input node feature, default to be 15.
edge_input_dim : int
Dimension of input edge feature, default to be 15.
output_dim : int
Dimension of prediction, default to be 12.
node_hidden_dim : int
Dimension of node feature in hidden layers, default to be 64.
edge_hidden_dim : int
Dimension of edge feature in hidden layers, default to be 128.
num_step_message_passing : int
Number of message passing steps, default to be 6.
num_step_set2set : int
Number of set2set steps
num_layer_set2set : int
Number of set2set layers
"""
def __init__(
self,
positional_embedding_size=32,
max_node_freq=8,
max_edge_freq=8,
max_degree=128,
freq_embedding_size=32,
degree_embedding_size=32,
output_dim=32,
node_hidden_dim=32,
edge_hidden_dim=32,
num_layers=6,
num_heads=4,
num_step_set2set=6,
num_layer_set2set=3,
norm=False,
gnn_model="mpnn",
degree_input=False,
lstm_as_gate=False,
):
super(GraphEncoder, self).__init__()
if degree_input:
node_input_dim = positional_embedding_size + degree_embedding_size + 1
else:
node_input_dim = positional_embedding_size + 1
edge_input_dim = freq_embedding_size + 1
if gnn_model == "mpnn":
self.gnn = UnsupervisedMPNN(
output_dim=output_dim,
node_input_dim=node_input_dim,
node_hidden_dim=node_hidden_dim,
edge_input_dim=edge_input_dim,
edge_hidden_dim=edge_hidden_dim,
num_step_message_passing=num_layers,
lstm_as_gate=lstm_as_gate,
)
elif gnn_model == "gat":
self.gnn = UnsupervisedGAT(
node_input_dim=node_input_dim,
node_hidden_dim=node_hidden_dim,
edge_input_dim=edge_input_dim,
num_layers=num_layers,
num_heads=num_heads,
)
elif gnn_model == "gin":
self.gnn = UnsupervisedGIN(
num_layers=num_layers,
num_mlp_layers=2,
input_dim=node_input_dim,
hidden_dim=node_hidden_dim,
output_dim=output_dim,
final_dropout=0.5,
learn_eps=False,
graph_pooling_type="sum",
neighbor_pooling_type="sum",
use_selayer=False,
)
self.gnn_model = gnn_model
self.max_node_freq = max_node_freq
self.max_edge_freq = max_edge_freq
self.max_degree = max_degree
self.degree_input = degree_input
if degree_input:
self.degree_embedding = nn.Embedding(
num_embeddings=max_degree + 1, embedding_dim=degree_embedding_size
)
self.set2set = Set2Set(node_hidden_dim, num_step_set2set, num_layer_set2set)
self.lin_readout = nn.Sequential(
nn.Linear(2 * node_hidden_dim, node_hidden_dim),
nn.ReLU(),
nn.Linear(node_hidden_dim, output_dim),
)
self.norm = norm
def forward(self, g, return_all_outputs=False):
"""Predict molecule labels
Parameters
----------
g : DGLGraph
Input DGLGraph for molecule(s)
n_feat : tensor of dtype float32 and shape (B1, D1)
Node features. B1 for number of nodes and D1 for
the node feature size.
e_feat : tensor of dtype float32 and shape (B2, D2)
Edge features. B2 for number of edges and D2 for
the edge feature size.
Returns
-------
res : Predicted labels
"""
if self.degree_input:
device = g.ndata["seed"].device
degrees = g.in_degrees()
if device != torch.device("cpu"):
degrees = degrees.cuda(device)
n_feat = torch.cat(
(
g.ndata["pos_undirected"],
self.degree_embedding(degrees.clamp(0, self.max_degree)),
g.ndata["seed"].unsqueeze(1).float(),
),
dim=-1,
)
else:
n_feat = torch.cat(
(g.ndata["pos_undirected"], g.ndata["seed"].unsqueeze(1).float()),
dim=-1,
)
e_feat = None
if self.gnn_model == "gin":
x, all_outputs = self.gnn(g, n_feat, e_feat)
else:
x, all_outputs = self.gnn(g, n_feat, e_feat), None
x = self.set2set(g, x)
x = self.lin_readout(x)
if self.norm:
x = F.normalize(x, p=2, dim=-1, eps=1e-5)
if return_all_outputs:
return x, all_outputs
else:
return x
|
{"hexsha": "71c9dde41660af803ee1554cafb9a127a451aa29", "size": 16200, "ext": "py", "lang": "Python", "max_stars_repo_path": "cogdl/layers/gcc_module.py", "max_stars_repo_name": "BruceW91/cogdl", "max_stars_repo_head_hexsha": "1ad524375f5ba062103698a0432fc857572a6933", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cogdl/layers/gcc_module.py", "max_issues_repo_name": "BruceW91/cogdl", "max_issues_repo_head_hexsha": "1ad524375f5ba062103698a0432fc857572a6933", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cogdl/layers/gcc_module.py", "max_forks_repo_name": "BruceW91/cogdl", "max_forks_repo_head_hexsha": "1ad524375f5ba062103698a0432fc857572a6933", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-17T02:44:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-17T02:44:09.000Z", "avg_line_length": 32.0792079208, "max_line_length": 87, "alphanum_fraction": 0.5675308642, "include": true, "reason": "import numpy", "num_tokens": 3615}
|
#!/usr/bin/env python
# coding: utf-8
import os
import json
import numpy as np
import pandas as pd
import connector.mysql_connector as mysql_c
def drop_columns(df, columns):
df = df.drop(axis=1, level=0, columns=[columns])
def download_raw_db():
#F_PATH = os.path.abspath('')
with open('download/connector/cfg_files/database_analysis_cfg.json','r') as f:
cfg = json.load(f)
DB_NAME = cfg['DB_NAME']
TB_NAME = cfg['PERSIST_TB_NAME']
cursor = mysql_c.open_connection(DB_NAME, cfg)
mysql_c.select_database(cursor, DB_NAME)
rows = mysql_c.select_data(cursor, TB_NAME)
cursor.close()
database = pd.DataFrame(rows, columns=['id','client_id','payload','topic_path', 'date'], dtype=float)
database.to_csv('../database/raw.csv', index=False)
|
{"hexsha": "b8dcd62bad8e7f01edf1c4969ba78df83ca776bf", "size": 791, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/download/download_db.py", "max_stars_repo_name": "maikereis/consumption_data_analysis", "max_stars_repo_head_hexsha": "2ac8dcbc745c01211bdf22c4287f82225f7c21d6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/download/download_db.py", "max_issues_repo_name": "maikereis/consumption_data_analysis", "max_issues_repo_head_hexsha": "2ac8dcbc745c01211bdf22c4287f82225f7c21d6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-13T03:50:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T03:50:01.000Z", "max_forks_repo_path": "src/download/download_db.py", "max_forks_repo_name": "maikereis/consumption_data_analysis", "max_forks_repo_head_hexsha": "2ac8dcbc745c01211bdf22c4287f82225f7c21d6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3913043478, "max_line_length": 105, "alphanum_fraction": 0.6991150442, "include": true, "reason": "import numpy", "num_tokens": 198}
|
subroutine mcohc(yy,xx,xy,b,iq,ip,coh)
c
c computes multivariate covariance for the multivariate
c complex linear model
c Y = X B
c n x q n x p p x q
c
c input: xx is x*x, yy is y*y (q times q and p times p
c hermitian matrices in full storage mode, xy
c is x*y, B is estimated prediction coefficents,
c output: coh is overall coherence of predicted and
c observed data (Y)
c note that this will give an answer for any estimate
c B, not just the ls estimate; the result returned is the
c real part of the complex coherence between observed
c and predicted
complex xx(ip,ip),yy(iq,iq),xy(ip,iq),b(ip,iq)
real coh,bxxb,yxb,yt
yt = 0.
bxxb = 0.
do 10 i = 1,iq
do 5 j = 1,ip
do 5 k = 1,ip
5 bxxb = bxxb + real(conjg(b(j,i))*xx(j,k)*b(k,i))
yt = yt + real(yy(i,i))
10 continue
yxb = 0.
do 20 i = 1,iq
do 15 j = 1,ip
yxb = yxb + conjg(xy(j,i))*b(j,i)
15 continue
20 continue
coh = yxb*yxb/(yt*bxxb)
if(coh.gt.1) then
print*,'coh',coh
print*,'xx',xx
print*,'yy',yy
print*,'xy',xy
print*,'b',b
print*,'yt,bxxb,yxb',yt,bxxb,yxb
end if
return
end
|
{"hexsha": "9481e67e8981bdaa53f215a890d808a960b2b415", "size": 1420, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "iris_mt_scratch/egbert_codes-20210121T193218Z-001/egbert_codes/EMTF/T/mcohc.f", "max_stars_repo_name": "simpeg-research/iris-mt-scratch", "max_stars_repo_head_hexsha": "ea458f253071db513fd0731118a2a7452a725944", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "iris_mt_scratch/egbert_codes-20210121T193218Z-001/egbert_codes/EMTF/T/mcohc.f", "max_issues_repo_name": "simpeg-research/iris-mt-scratch", "max_issues_repo_head_hexsha": "ea458f253071db513fd0731118a2a7452a725944", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2020-12-23T17:55:48.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-24T21:01:05.000Z", "max_forks_repo_path": "iris_mt_scratch/egbert_codes-20210121T193218Z-001/egbert_codes/EMTF/T/mcohc.f", "max_forks_repo_name": "simpeg-research/iris-mt-scratch", "max_forks_repo_head_hexsha": "ea458f253071db513fd0731118a2a7452a725944", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5833333333, "max_line_length": 68, "alphanum_fraction": 0.5049295775, "num_tokens": 445}
|
#!/usr/bin/venv python
#############################################################################
# #
# Copyright (c) 2020 Saeid Hosseinipoor <https://saeid-h.github.io/> #
# All rights reserved. #
# Licensed under the MIT License #
# #
#############################################################################
from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
import os
import imagecodecs
import cv2
import numpy as np
import cv_io.sintel_io as sintel_io
import cv_io.flowlib as flowlib
import cv_io.pfmutil as pfmutil
NORMAL_IMAGE_FORMATS = ['png', 'pgm', 'jpg', 'jpeg', 'pmg']
IMAGE_TYPES = ['kitti-disparity', 'optical-flow', 'disparity', 'depth', 'file-extension-default']
IMAGE_TYPES_ERR_MSG = "ERROR: The image type is not supported. It should be one of ".format(IMAGE_TYPES)
# Auxiliary methods
def write_image(file_name, image):
"""
Save normal image of any format
:param file_name: path and name of the image file to save
:param image: image array
:return: None
"""
return cv2.imwrite(file_name, image[...,::-1])
# Methods from pfmutil:
read_pfm = lambda file_name: pfmutil.load(file_name)[0]
save_pfm = pfmutil.save
show_pfm = pfmutil.show
pfm_scale = lambda file_name: pfmutil.load(file_name)[1]
# Methods from flowlib:
read_flo = flowlib.read_flow
read_flo_png = flowlib.read_flow_png
save_flo = lambda file_name, flow: flowlib.write_flow(flow, file_name)
read_flo_seg = flowlib.segment_flow
flow_to_image = flowlib.flow_to_image
show_flow_from_file = flowlib.show_flow
show_flow = flowlib.visualize_flow
read_disp_asflow = flowlib.read_disp_png
save_disp_asflow = lambda file_name, disp: flowlib.disp_to_flowfile (disp, file_name)
warp_flow = flowlib.warp_image
scale_image = flowlib.scale_image
# Methods from sintel_io:
read_cam = sintel_io.cam_read
save_cam = sintel_io.cam_write
read_disp_sintel = sintel_io.disparity_read
save_disp_sintel = sintel_io.disparity_write
read_dpt = sintel_io.depth_read
save_dpt = sintel_io.depth_write
read_seg_sintel = sintel_io.segmentation_read
save_seg_sintel = sintel_io.segmentation_write
read_flow_sintel = sintel_io.flow_read
save_flow_sintel = sintel_io.flow_write
# General Modules
read_png = flowlib.read_image
save_png = write_image
read_jpg = flowlib.read_image
save_jpg = write_image
def imread(file_name, image_type='file-extension-default'):
'''
Read image
:param file_name:
File name and path in the disk that function loads from.
:param image_type:
The type of image as:
'kitti-disparity', 'optical-flow', 'disparity', 'file-extension-default'
:return: image array
'''
ext = os.path.splitext(file_name)[1].lower()[1:]
image_type = image_type.lower()
if not image_type in IMAGE_TYPES:
print (IMAGE_TYPES_ERR_MSG)
return
if image_type == 'file-extension-default':
if ext in NORMAL_IMAGE_FORMATS:
return flowlib.read_image(file_name)
elif ext in ['flo']:
return read_flo(file_name)
elif ext in ['dpt']:
return read_dpt(file_name)
elif ext in ['pfm']:
return read_pfm(file_name)
elif ext in ['jxr']:
return imagecodecs.imread(file_name, codec='jpegxr').astype(np.float32) / (2**16 - 1)
elif ext in ['exr']:
img = cv2.imread(file_name, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
else:
print ("ERROR: The image type is unknown.")
elif image_type == 'optical-flow':
return read_flo(file_name)
elif image_type == 'disparity':
return read_pfm(file_name)
elif image_type == 'depth':
return read_dpt(file_name)
elif image_type == 'kitti-disparity':
return flowlib.read_image(file_name).astype(np.float32) / 255.
else:
print ("This image type is not implemented.")
def imwrite(file_name, image, image_type='file-extension-default'):
'''
Save image
:param file_name:
File name and path in the disk that function save to.
:param image:
The image array that will be saved on disl as file_name.
:param image_type:
The type of image as:
'kitti-disparity', 'optical-flow', 'disparity', 'file-extension-default'
:return: None
'''
ext = os.path.splitext(file_name)[1].lower()[1:]
image_type = image_type.lower()
if not image_type in IMAGE_TYPES:
print (IMAGE_TYPES_ERR_MSG)
return
if image_type == 'file-extension-default':
if ext in NORMAL_IMAGE_FORMATS:
return write_image(file_name, image)
elif ext in ['flo']:
return save_flo(file_name, image)
elif ext in ['dpt']:
return save_dpt(file_name, image)
elif ext in ['pfm']:
return save_pfm(file_name, image)
elif ext in ['jxr']:
return imagecodecs.imwrite(file_name, (image*(2**16 - 1)).astype(np.uint16), codec='jpegxr')
elif ext in ['exr']:
return cv2.imwrite(file_name, image, [cv2.IMWRITE_EXR_TYPE_HALF])
else:
print ("ERROR: The image type is unknown.")
elif image_type == 'optical-flow':
if len(image.shape) == 3 and image.shape[2] == 2:
return save_flo(file_name, image.astype(np.float32))
else:
print ("ERROR: The image type is unknown.")
elif image_type == 'disparity':
return save_pfm(file_name, image)
elif image_type == 'depth':
return save_dpt(file_name, image)
elif image_type == 'kitti-disparity':
return write_image(file_name, (image*256.).astype(np.uint16))
else:
print ("This image type is not implemented.")
def imshow(parameter):
'''
Show image
:param parameter:
If it's a string (filename), it reads the image from file and shows it.
If it is a image array it shows it
:return: None
'''
image = imread(parameter) if parameter is str else parameter
if len(parameter.shape) == 3 and parameter.shape[-1] == 2:
show_flow(parameter)
else:
if len(parameter.shape) == 3 and parameter.shape[-1] == 1:
parameter = np.squeeze(parameter)
plt.imshow(parameter.astype(np.float32), cmap='gray')
else:
plt.imshow(parameter.astype(np.float32))
plt.show()
# Just for keeping consistency with previous revision:
read = imread
save = imwrite
show = imshow
imload = imread
imsave = imwrite
if __name__ == "__main__":
pass
|
{"hexsha": "bfbc990ca891d6bfecf61feab47fe0784a7d0d4a", "size": 6108, "ext": "py", "lang": "Python", "max_stars_repo_path": "cv_io/collection.py", "max_stars_repo_name": "saeid-h/computer-vision-file-handler", "max_stars_repo_head_hexsha": "b7903a656727afcfc2e3ae112dbd9fdaba5337d0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cv_io/collection.py", "max_issues_repo_name": "saeid-h/computer-vision-file-handler", "max_issues_repo_head_hexsha": "b7903a656727afcfc2e3ae112dbd9fdaba5337d0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-04-07T16:58:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-07T16:58:05.000Z", "max_forks_repo_path": "cv_io/collection.py", "max_forks_repo_name": "saeid-h/Computer-Vision-IO", "max_forks_repo_head_hexsha": "b7903a656727afcfc2e3ae112dbd9fdaba5337d0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0886699507, "max_line_length": 104, "alphanum_fraction": 0.7116895874, "include": true, "reason": "import numpy", "num_tokens": 1612}
|
#! usr/bin/python3
#%%
import config
from src.model.stldesc_model import define_stl_encoder, EmbStyleNet
from src.support.loss_functions import pairWiseRankingLoss, MarginalAcc, triplet_loss
import os
import logging
import time
import math
from tqdm import tqdm
from datetime import datetime
import pathlib
import pandas as pd
import numpy as np
import random
import tensorflow.keras.backend as K
import tensorflow as tf
import tensorflow.keras.layers.experimental.preprocessing as prep
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras import losses
from tensorflow.keras import metrics
from tensorflow.keras.callbacks import TensorBoard
from matplotlib import pyplot as plt
from livelossplot import PlotLosses
from livelossplot.outputs import MatplotlibPlot
#tf.executing_eagerly()
#%%
#tensorboard logger
logdir = config.LOG_DIR+ "/desc_pre_" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = TensorBoard(log_dir=logdir, histogram_freq=1, profile_batch=1)
# tf.profiler.experimental.server.start(6009)
# set logger
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def process_img(img):
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
return tf.image.resize(img, config.IMAGE_SIZE)
def process_path(file_path):
img = tf.io.read_file(file_path)
img = tf.image.decode_jpeg(img, channels=3)
#print(fp)
img = tf.image.convert_image_dtype(img, tf.float32)
img = tf.image.resize(img, size=(128, 128))
return img
def train_gen():
lower, higher, root_path, n = 1, 2923, './data/data/StyleDataset', 2900
idx = np.array(range(lower, min(higher, lower+n)))
for i in idx:
#i = random.randint(lower, higher)
random_num = random.randint(lower, higher)
random_bool = random.randint(0,1)
if random_bool:
if random_num == int(i):
random_num = random.randint(lower, higher)
else:
random_num = max(random.randint(1,10), int(i)-5)
img1_det = stenc_df.loc[i, ['path', 'style_code']]
img2_det = stenc_df.loc[random_num, ['path', 'style_code']]
# label = 0
# if img1_det['style_code'] == img2_det['style_code']:
# label = 1
#print(os.path.join(root_path, img1_det['path']), os.path.join(root_path, img2_det['path']))
try :
img1 = process_path(os.path.join(root_path, img1_det['path']))
img2 = process_path(os.path.join(root_path, img2_det['path']))
yield img1, img2, img1_det['style_code']-1, img2_det['style_code']-1
except:
print(f"Error in file {img1_det['path']}")
continue
def val_gen():
lower, higher, root_path, n = 2923, 3164, './data/data/StyleDataset', 200
# idx = np.random.choice(range(lower, higher), n, replace=False, seed=111)
# for i in idx:
idx = np.array(range(lower, min(higher, lower+n)))
for i in idx:
#i = random.randint(lower, higher)
random_num = random.randint(lower, higher)
random_bool = random.randint(0,1)
if random_bool:
if random_num == int(i):
random_num = random.randint(lower, higher)
else:
random_num = max(random.randint(1,10), int(i)-5)
img1_det = stenc_df.loc[i, ['path', 'style_code']]
img2_det = stenc_df.loc[random_num, ['path', 'style_code']]
# label = 0
# if img1_det['style_code'] == img2_det['style_code']:
# label = 1
#print(os.path.join(root_path, img1_det['path']), os.path.join(root_path, img2_det['path']))
try :
img1 = process_path(os.path.join(root_path, img1_det['path']))
img2 = process_path(os.path.join(root_path, img2_det['path']))
yield img1, img2, img1_det['style_code']-1, img2_det['style_code']-1
except:
print(f"Error in file {img1_det['path']}")
continue
# image resize and rescale pipeline
resize_and_rescale = tf.keras.Sequential([
prep.Resizing(config.IMG_HEIGHT, config.IMG_WIDTH),
prep.Normalization()
])
# image augmentation pipeline
data_augmentation = tf.keras.Sequential([
prep.RandomContrast(0.2),
prep.RandomFlip("horizontal_and_vertical"),
prep.RandomCrop(config.IMG_HEIGHT, config.IMG_WIDTH),
prep.RandomRotation(0.3, fill_mode='nearest', interpolation='bilinear'),
prep.RandomZoom(height_factor=(-0.2, 0.2), width_factor=(-0.2, 0.2), fill_mode='nearest', interpolation='bilinear')
])
# data_augmentation = tf.keras.Sequential([
# prep.RandomFlip("horizontal_and_vertical"),
# prep.RandomRotation(0.2),
# ])
def prepare(ds, shuffle=False, augment=False):
# ds = ds.map(lambda x: tf.py_function(process_path, [x], [tf.float32, tf.float32, tf.int32]),
# num_parallel_calls=tf.data.AUTOTUNE)
# ds = ds.map(lambda x1, x2, y: (process_path(x1), process_path(x2), y),
# num_parallel_calls=tf.data.AUTOTUNE)
ds = ds.map(lambda x1, x2, y1, y2: (resize_and_rescale(x1), resize_and_rescale(x2), y1, y2),
num_parallel_calls=tf.data.AUTOTUNE)
ds = ds.cache()
if shuffle:
ds = ds.shuffle(1000)
ds = ds.batch(16)
if augment:
ds = ds.map(lambda x1, x2, y1, y2: (data_augmentation(x1, training=True), data_augmentation(x2, training=True), y1, y2),
num_parallel_calls=tf.data.AUTOTUNE)
return ds.prefetch(buffer_size=tf.data.AUTOTUNE)
def get_loss(vec1t, vec1f, vec2t, vec2f):
norm1, norm2, norm3, norm4 = tf.norm(vec1t, axis=0, ord=1) , tf.norm(vec1f, axis=0, ord=1) , tf.norm(vec2t, axis=0, ord=1) , tf.norm(vec2f, axis=0, ord=1)
u = tf.cast(tf.broadcast_to(0, shape=norm2.shape), dtype=tf.float32)
norm2, norm4 = tf.math.reduce_max(tf.stack([u,0.2-norm2]), axis=0), tf.math.reduce_max(tf.stack([u,0.2-norm4]), axis=0)
return tf.math.reduce_mean(norm1+norm2+norm3+norm4)
@tf.function
def train_step(ref_in, style_in, ref_lbl, stl_lbl):
with tf.GradientTape() as tape:
#ref_out, style_out = desc_pre_model([ref_in, style_in])
vec1t, vec1f, vec2t, vec2f = desc_pre_model([ref_in, style_in, ref_lbl, stl_lbl])
loss = get_loss(vec1t, vec1f, vec2t, vec2f)
grads = tape.gradient(loss, base_model.trainable_variables)
opt.apply_gradients(zip(grads, base_model.trainable_variables))
#train_metrics.update_state(ref_out, style_out, label_in)
return loss
@tf.function
def val_step(ref_in, style_in, label_in):
with tf.GradientTape() as tape:
ref_out, style_out = desc_pre_model([ref_in, style_in])
loss = pairWiseRankingLoss(ref_out, style_out, label_in)
#val_metrics.update_state(ref_out, style_out, label_in)
return loss
def train(epochs=3):
tensorboard_callback.set_model(desc_pre_model)
# plotlosses = PlotLosses(outputs=[MatplotlibPlot()], groups={'loss' : ['tr_loss', 'val_loss'], 'accuracy' : ['tr_acc', 'val_acc']})
for epoch in range(epochs):
start_time = time.time()
# Iterate over the batches of the dataset.
pb = tqdm(train_dataset)
e = 0
for ref_batch_train, style_batch_train, reflbl_batch_train, stllbl_batch_train in pb:
pb.set_description(f"[ {epoch}/ {e}] ")
train_loss = train_step(ref_batch_train, style_batch_train, reflbl_batch_train, stllbl_batch_train)
#print(f"Epoch {epoch} / step : {step} : loss {train_loss}",end='\r')
pb.set_postfix(loss=train_loss.numpy())
e += 1
# Run a validation loop at the end of each epoch.
# for ref_batch_val, style_batch_val, label_batch_val in val_dataset:
# val_loss = val_step(ref_batch_val, style_batch_val, label_batch_val)
print(f'Epoch {epoch} : train_loss : {train_loss}')
# tr_acc = train_metrics.result()
# val_acc = val_metrics.result()
# plotlosses.update({
# 'tr_loss' : train_loss,
# 'tr_acc' : tr_acc,
# 'val_loss' : val_loss,
# 'val_acc' : val_acc,
# })
# plotlosses.send()
# train_metrics.reset_states()
# val_metrics.reset_states()
# val_acc = val_metrics.result()
# val_metrics.reset_states()
# print("Validation acc: %.4f" % (float(val_acc),))
print("Time taken: %.2fs" % (time.time() - start_time))
#%%
if __name__ == "__main__":
#data importing
stenc_df = pd.read_csv('./data/data/StyleDataset/StyleEnc.csv', index_col=0)
train_path = pathlib.Path(os.path.join(config.DESC_ROOT_DIR,'train'))
val_path = pathlib.Path(os.path.join(config.DESC_ROOT_DIR,'validation'))
#train_gen = gen(1, 2923, './data/data/StyleDataset', 2900)
#val_gen = gen(2923, 3164, './data/data/StyleDataset', 240)
train_ds = tf.data.Dataset.from_generator(
train_gen,
output_signature=(
tf.TensorSpec(shape=(128,128, 3), dtype=tf.float32),
tf.TensorSpec(shape=(128,128,3), dtype=tf.float32),
tf.TensorSpec(shape=(), dtype=tf.int32),
tf.TensorSpec(shape=(), dtype=tf.int32)
)
)
# val_ds = tf.data.Dataset.from_generator(
# val_gen,
# output_signature=(
# tf.TensorSpec(shape=(128,128, 3), dtype=tf.float32),
# tf.TensorSpec(shape=(128,128,3), dtype=tf.float32),
# tf.TensorSpec(shape=(), dtype=tf.int32),
# tf.TensorSpec(shape=(), dtype=tf.int32)
# )
# )
#filtered_ds = list_ds.filter(lambda x: int(x.split(os.sep)[-1].strip('.jpg')) < config.DESC_TRAIN_SIZE)
#sample_dt = sample_ds.shuffle(buffer_size=1000) #config param
train_dataset = prepare(train_ds, shuffle=True, augment=True)
# val_dataset = prepare(val_ds, shuffle=True, augment=False)
# init model
base_model = define_stl_encoder(config.DESCS_LATENT_SIZE, 36, config.IMAGE_SHAPE)
#train_steps = 100
#lr_fn = tf.optimizers.schedules.PolynomialDecay(1e-3, train_steps, 1e-5, 2)
opt = tf.optimizers.Adam(0.001)
# train_metrics = MarginalAcc()
# val_metrics = MarginalAcc()
#desc_pre_model = define_descrminator((config.IMG_WIDTH, config.IMG_HEIGHT, 3))
desc_pre_model = EmbStyleNet(base_model)
train(10)
# tf.profiler.experimental.client.trace('grpc://localhost:6009',
# config.LOG_DIR+'/profilers', 2000)
# filename = 'descs_wgt1.h5'
# base_model.save_weights(os.path.join(config.MODEL_DIR, filename))
# logger.info(f">> Saved : {filename} ")
# %%
weights = tf.Variable(base_model.get_layer('embedding').get_weights()[0][1:])
# Create a checkpoint from embedding, the filename and key are the
# name of the tensor.
checkpoint = tf.train.Checkpoint(embedding=weights)
checkpoint.save(os.path.join('./logs/gan', "embedding.ckpt"))
# %%
|
{"hexsha": "0a4001503b805d3090716300a232bc1fa9b9fdbf", "size": 11028, "ext": "py", "lang": "Python", "max_stars_repo_path": "stldesc_train2.py", "max_stars_repo_name": "nipdep/STGAN", "max_stars_repo_head_hexsha": "c72ba6cb9d23d33accc0cfa1958a2005db3ed490", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "stldesc_train2.py", "max_issues_repo_name": "nipdep/STGAN", "max_issues_repo_head_hexsha": "c72ba6cb9d23d33accc0cfa1958a2005db3ed490", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stldesc_train2.py", "max_forks_repo_name": "nipdep/STGAN", "max_forks_repo_head_hexsha": "c72ba6cb9d23d33accc0cfa1958a2005db3ed490", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.1063829787, "max_line_length": 158, "alphanum_fraction": 0.6569640914, "include": true, "reason": "import numpy", "num_tokens": 2925}
|
"""
@author: Saikumar Dandla
"""
import numpy as np
name_dict = {0:'Avinash R' ,
1:'Durgendra Pandey',
2:'Rokkam Hari Sankar',
3:'Adurti Sai Mahesh',
4:'Manish Pratap Singh',
5:'RVNK Neeraj',
6:'Saikumar D',
7:'Boora Shiva',
8:'Jated Vikas',
9:'Vinod G',
}
np.save('name_dict.npy',name_dict)
|
{"hexsha": "08ada4d203c1a7160c974f804a00d80b23d9cef5", "size": 445, "ext": "py", "lang": "Python", "max_stars_repo_path": "Attendence system/dict.py", "max_stars_repo_name": "Saidsp19/Intelligent-attendance-system-using-face-recognition", "max_stars_repo_head_hexsha": "d8e588f592d4b7d92756a31f6570464ee1e1bea6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-21T14:43:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-21T14:43:20.000Z", "max_issues_repo_path": "Attendence system/dict.py", "max_issues_repo_name": "Saidsp19/Intelligent-attendance-system-using-face-recognition", "max_issues_repo_head_hexsha": "d8e588f592d4b7d92756a31f6570464ee1e1bea6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Attendence system/dict.py", "max_forks_repo_name": "Saidsp19/Intelligent-attendance-system-using-face-recognition", "max_forks_repo_head_hexsha": "d8e588f592d4b7d92756a31f6570464ee1e1bea6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.25, "max_line_length": 38, "alphanum_fraction": 0.4426966292, "include": true, "reason": "import numpy", "num_tokens": 132}
|
# -*- coding: utf-8 -*-
"""
This file contains the script for defining characteristic functions and using them
as a way to embed distributional information in Euclidean space
"""
import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def characteristic_function(sig,t,plot=False, taus=1, node=1):
''' function for computing the characteristic function associated to a signal at
a point/ set of points t:
f(sig,t)=1/len(sig)* [sum_{s in sig} exp(i*t*s)]
INPUT:
===========================================================================
sig : signal over the graph (vector of coefficients)
t : values at which the characteristic function should be evaluated
plot : boolean: should the resulting point/set of points be plotted
OUTPUT:
===========================================================================
f : empirical characteristic function
'''
f=np.zeros((len(t),3))
if type(t) is list:
f=np.zeros((len(t),3))
f[0,:]=[0,1,0]
vec1=[np.exp(complex(0,sig[i])) for i in range(len(sig))]
for tt in range(1,len(t)):
f[tt,0]=t[tt]
vec=[x**t[tt] for x in vec1]
c=np.mean(vec)
f[tt,1]=c.real
f[tt,2]=c.imag
if plot==True:
plt.figure()
plt.plot(f[:,1],f[:,2], c='g')
plt.title("characteristic function of the distribution")
plt.xlabel('real part')
plt.ylabel('image part')
plt.savefig('../figure/ChaFun_taus%s_node%s.png'%(taus, node))
else:
c=np.mean([np.exp(complex(0,t*sig[i])) for i in range(len(sig))])
f=[t,np.real(c),np.imag(c)]
return f
def featurize_characteristic_function(heat_print,t=[],nodes=[]):
''' same function as above, except the coefficient is computed across all scales and concatenated in the feature vector
Parameters
----------
heat_print
t: (optional) values where the curve is evaluated
nodes: (optional at which nodes should the featurizations be computed (defaults to all)
Returns
-------
chi: feature matrix (pd DataFrame)
'''
if len(t)==0:
t=range(0,100,5)
t+=range(85,100)
t.sort()
t=np.unique(t)
t=t.tolist()
if len(nodes)==0:
nodes=range(heat_print[0].shape[0])
chi=np.empty((len(nodes),2*len(t)*len(heat_print)))
for tau in range(len(heat_print)):
sig=heat_print[tau]
for i in range(len(nodes)):
ind=nodes[i]
s=sig.iloc[:,ind].tolist()
c=characteristic_function(s,t,plot=False, taus=tau, node=i)
# Concatenate all the features into one big vector
chi[i,tau*2*len(t):(tau+1)*2*len(t)]= np.reshape(c[:,1:],[1,2*len(t)])
# chi=pd.DataFrame(chi, index=[nodes[i] for i in range(len(nodes))])
return chi
|
{"hexsha": "3aa83b56784d961b7761a3888d944cc6de003532", "size": 3001, "ext": "py", "lang": "Python", "max_stars_repo_path": "graphWave/src/characteristic_functions.py", "max_stars_repo_name": "CEfanmin/DataMiningProjects", "max_stars_repo_head_hexsha": "b6375f542c68c0001ae2971dd7e8046a0b4afc7a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-04-26T06:44:27.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-01T13:58:21.000Z", "max_issues_repo_path": "graphWave/src/characteristic_functions.py", "max_issues_repo_name": "CEfanmin/DataMiningProjects", "max_issues_repo_head_hexsha": "b6375f542c68c0001ae2971dd7e8046a0b4afc7a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graphWave/src/characteristic_functions.py", "max_forks_repo_name": "CEfanmin/DataMiningProjects", "max_forks_repo_head_hexsha": "b6375f542c68c0001ae2971dd7e8046a0b4afc7a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-09-01T13:58:27.000Z", "max_forks_repo_forks_event_max_datetime": "2018-09-01T13:58:27.000Z", "avg_line_length": 35.7261904762, "max_line_length": 123, "alphanum_fraction": 0.5481506165, "include": true, "reason": "import numpy", "num_tokens": 741}
|
from numpy import zeros, exp, sqrt, pi, arange, allclose, array, polynomial
from scipy import optimize
from scipy.integrate import trapz, odeint
from scipy.optimize import curve_fit
from numba import jit
class analytic_solution:
def analytical_solution(self, NT, NX, TMAX, XMAX, NU):
"""
Returns the velocity field and distance for the analytical solution
"""
# Increments
DT = TMAX / (NT - 1)
DX = XMAX / (NX - 1)
# Initialise data structures
import numpy as np
u_analytical = np.zeros((NX, NT))
x = np.zeros(NX)
t = np.zeros(NT)
# Distance
for i in range(0, NX):
x[i] = i * DX
# Analytical Solution
for n in range(0, NT):
t = n * DT
for i in range(0, NX):
phi = exp(-(x[i] - 4 * t) ** 2 / (4 * NU * (t + 1))) + exp(
-(x[i] - 4 * t - 2 * PI) ** 2 / (4 * NU * (t + 1)))
dphi = (-0.5 * (x[i] - 4 * t) / (NU * (t + 1)) * exp(-(x[i] - 4 * t) ** 2 / (4 * NU * (t + 1)))
- 0.5 * (x[i] - 4 * t - 2 * PI) / (NU * (t + 1)) * exp(
-(x[i] - 4 * t - 2 * PI) ** 2 / (4 * NU * (t + 1))))
u_analytical[i, n] = -2 * NU * (dphi / phi) + 4
return u_analytical, x
def convection_diffusion(self, NT, NX, TMAX, XMAX, NU):
"""
Returns the velocity field and distance for 1D non-linear convection-diffusion
"""
# Increments
DT = TMAX / (NT - 1)
DX = XMAX / (NX - 1)
# Initialise data structures
import numpy as np
u = np.zeros((NX, NT))
u_analytical = np.zeros((NX, NT))
x = np.zeros(NX)
t = np.zeros(NT)
ipos = np.zeros(NX)
ineg = np.zeros(NX)
# Periodic boundary conditions
for i in range(0, NX):
x[i] = i * DX
ipos[i] = i + 1
ineg[i] = i - 1
ipos[NX - 1] = 0
ineg[0] = NX - 1
# Initial conditions
for i in range(0, NX):
phi = exp(-(x[i] ** 2) / (4 * NU)) + exp(-(x[i] - 2 * PI) ** 2 / (4 * NU))
dphi = -(0.5 * x[i] / NU) * exp(-(x[i] ** 2) / (4 * NU)) - (0.5 * (x[i] - 2 * PI) / NU) * exp(
-(x[i] - 2 * PI) ** 2 / (4 * NU))
u[i, 0] = -2 * NU * (dphi / phi) + 4
# Numerical solution
for n in range(0, NT - 1):
for i in range(0, NX):
u[i, n + 1] = (u[i, n] - u[i, n] * (DT / DX) * (u[i, n] - u[ineg[i], n]) +
NU * (DT / DX ** 2) * (u[ipos[i], n] - 2 * u[i, n] + u[ineg[i], n]))
return u, x
def inviscid_solution(self, u0, space, time):
def F(z, space, time):
return z + u0(z) * time - space
exact = zeros([len(time), len(space)])
exact[0, :] = u0(space)
for i in range(1, len(time)):
Z = zeros(len(space))
for j in range(len(space)):
zj = optimize.root(F, array(0.0), args=(space[j], time[i]), tol=10 ** -10)
Z[j] = zj.x
exact[i, :] = u0(Z)
return exact
def exact(self, u0, nu, x, t):
integ_top = lambda z, xi, tj, nu: (xi - z) * exp(- 2.0 * nu * (xi - z) ** 2) / (4.0 * nu * tj)
integ_bottom = lambda z, xi, tj, nu: exp(- 2.0 * nu * (xi - z) ** 2) / (4.0 * nu * tj)
LX = len(x)
if isinstance(t, float):
t = [0.0, t]
LT = len(t)
TX = zeros([LT, LX], dtype=float)
TX[0, :] = u0(x)
for j in range(1, len(t)):
for i in range(0, len(x)):
try:
TX[j, i] = abs(
integ_top(x[i], t[j], nu, j) / integ_bottom(x[i], t[j], nu, j)
) / t[j]
except RuntimeWarning and ZeroDivisionError:
TX[j, i] = 0.0
return TX
@staticmethod
def system(v, p, nu, diff_1, diff_2):
return p ** 2 * nu * diff_1(v) - 0.5 * p * diff_2(v)
@staticmethod
def numerical_solution(df, v0, tdata, p, nu, diff_1, diff_2):
@jit(target='cpu', nopython=True)
def solver():
return odeint(df, v0, tdata, args=[p, nu, diff_1, diff_2], rtol=1.49012e-16)
return solver
def kernel_gauss(self, x, t, alpha):
return exp(- x ** 2 / 4.0 * alpha * t)
def viscid_solution_1(self, u0, x, t, nu):
exact = zeros([len(t), len(x)]); exact[0, :] = u0(x)
rule1 = polynomial.hermite_e.hermegauss(100)
rulesX = rule1[0][::-1]; rulesW = rule1[1]
for j in range(0, len(x)):
for i in range(1, len(t)):
factor = sqrt(2.0 * nu * t[i])
z = x[j] - rulesX * factor
sum1 = 0.0
for k in range(len(z)):
sum1 = sum1 + factor * u0(z[k]) * rulesW[k]
exact[i, j] = (1.0 / sqrt(4.0 * nu * t[i] * pi)) * sum1
return exact
def viscid_solution_2(self, u0, x, t, nu):
y = arange(-200, 200 + 1, 400)
data = zeros([len(t), len(x)])
data[0, :] = u0(x)
for j in range(len(x)):
for i in range(1, len(t)):
sum1 = 0.0
for k in range(1, len(y) - 1):
sum1 = sum1 + self.kernel_gauss(x[j] - y[k], t[i], nu) * u0(y[k])
G_left = self.kernel_gauss(x[j] - y[0], t[i], nu) * u0(y[0])
G_right = self.kernel_gauss(x[j] - y[len(y) - 1], t[i], nu) * u0(y[len(y) - 1])
data[i, j] = (1.0 / sqrt(4.0 * nu * t[i] * pi)) * (sum1 + 0.5 * (G_right - G_left))
return data
class eval_aprox:
@staticmethod
def continuous_expansion(x, v_hat, N):
k = arange(-int(N/2), int(N/2), 1)
data = zeros(len(x))
for i in range(0, len(array(x))):
data[i] = sum(v_hat * exp(1j * k * x[i])).real / N
return data
@staticmethod
def discrete_expansion(x, v, N):
k = arange(-int(N / 2), int(N / 2), 1)
h = 2.0 * pi / N
data = zeros(len(x))
for i in range(len(array(x))):
if allclose(h * i, x[i]):
data[i] = v[i]
else:
data[i] = sum(v * exp(1j * k * x[i])).real / N
return data
class analysis:
@staticmethod
def tester(x, y):
func = lambda x, a, b: a * exp(- b * x)
return curve_fit(func, x, y, p0=(1.0, 0.001))
@staticmethod
def distance(aprox, exact, space):
return sqrt(trapz(abs(aprox - exact) ** 2, space)), max(abs(aprox - exact))
|
{"hexsha": "8492217750f452450feef14dd8be84b84cefac87", "size": 6668, "ext": "py", "lang": "Python", "max_stars_repo_path": "pySpectralPDE/deterministic/helpers.py", "max_stars_repo_name": "alanmatzumiya/Maestria", "max_stars_repo_head_hexsha": "c5e2a019312fb8f9bc193b04b07b7815e6ed4032", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-12-29T10:44:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T11:18:45.000Z", "max_issues_repo_path": "pySpectralPDE/deterministic/helpers.py", "max_issues_repo_name": "alanmatzumiya/spectral-methods", "max_issues_repo_head_hexsha": "c5e2a019312fb8f9bc193b04b07b7815e6ed4032", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pySpectralPDE/deterministic/helpers.py", "max_forks_repo_name": "alanmatzumiya/spectral-methods", "max_forks_repo_head_hexsha": "c5e2a019312fb8f9bc193b04b07b7815e6ed4032", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-04T13:29:56.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T13:29:56.000Z", "avg_line_length": 32.3689320388, "max_line_length": 111, "alphanum_fraction": 0.4430113977, "include": true, "reason": "import numpy,from numpy,from scipy,from numba", "num_tokens": 2186}
|
import tensorflow
import keras
import sklearn
from sklearn import linear_model
import pandas as pd
import numpy as np
from sklearn.utils import shuffle
import matplotlib.pyplot as pyplot
import pickle
from matplotlib import style
data = pd.read_csv("train.csv", sep=",")
mass = data["mean_atomic_mass"]
predict = "critical_temp"
x = np.array(data.drop([predict], 1))
y = np.array(data[predict])
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size=0.5)
linear = linear_model.LinearRegression()
#with open("linearDump.pickle","wb") as f:
# pickle.dump(linear, f)
pickle_in = open("linearDump.pickle","rb")
linear = pickle.load(pickle_in)
linear.fit(x_train, y_train)
score = linear.score(x_test, y_test)
print('Score', score)
print('Coeficiente: \n', linear.coef_)
p = "critical_temp"
style.use("ggplot")
pyplot.scatter(data[p], data["mean_atomic_mass"])
pyplot.xlabel("Critical temperature")
pyplot.ylabel("Atomic mass")
pyplot.savefig("linearImage.png")
|
{"hexsha": "d756f1c6fdeb582734bfada4868221c60dad9370", "size": 1012, "ext": "py", "lang": "Python", "max_stars_repo_path": "algoritmos/linear.py", "max_stars_repo_name": "lucasrbk/Pibiti", "max_stars_repo_head_hexsha": "e60c02af7fe93e1ac65975a199ae1ae11fb88d42", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "algoritmos/linear.py", "max_issues_repo_name": "lucasrbk/Pibiti", "max_issues_repo_head_hexsha": "e60c02af7fe93e1ac65975a199ae1ae11fb88d42", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "algoritmos/linear.py", "max_forks_repo_name": "lucasrbk/Pibiti", "max_forks_repo_head_hexsha": "e60c02af7fe93e1ac65975a199ae1ae11fb88d42", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.5319148936, "max_line_length": 96, "alphanum_fraction": 0.7519762846, "include": true, "reason": "import numpy", "num_tokens": 254}
|
import csv
from shutil import copyfile
import click
import numpy as np
import pandas as pd
from tqdm import tqdm
from src.helpers import paths
from src.helpers.flags import AttackModes, Verbose
from src.multimodal import multimodal
from src.multimodal.data import make_dataset
from src.multimodal.features import build_features
from src.regnet.data.make_dataset import cleanUp
from src.multimodal.models import train_model
@click.command()
def main():
model_checkpoints = [
'1024_1024_50epochs.hdf5',
'2048_1024_50epochs.hdf5',
'2048_1024_512_50epochs.hdf5',
'2048_2048_50epochs.hdf5'
]
root = paths.ROOT_PATH.parent.joinpath('MultimodalCheckpoints')
dst = paths.checkpoints.multimodal()
make_data()
for checkpoint in model_checkpoints:
src = root.joinpath(checkpoint)
copyfile(src, dst)
print(checkpoint)
run_pred()
run_eval()
def make_data():
print('# Getting all frames...')
frames_info = get_all_frames('2011_09_30', 28)
for frame in tqdm(frames_info, desc='Generating Data', ascii=True):
make_dataset.make_data([frame], name='test', attack_type=AttackModes.INPAINT,
verbose=Verbose.SILENT, keep=False)
make_dataset.make_data([frame], name='test', attack_type=AttackModes.TRANSLATE,
verbose=Verbose.SILENT, keep=False)
def run_pred():
print('# Initiating logs...')
output_path = paths.DATA_PROCESSED_PATH.joinpath('logs')
output_path.mkdir(exist_ok=True, parents=True) # ensure directory exists
inp_log = output_path.joinpath('inp.csv')
tra_log = output_path.joinpath('tra.csv')
nrm_log = output_path.joinpath('nrm.csv')
print('# Loading model...')
model = load_pretrained_model()
frames_info = get_all_frames('2011_09_30', 28)
print('# Predicting on Normal data...')
predict_and_log(model, frames_info, log_file=nrm_log, attack=False)
print('# Predicting on Inpainting Attacks...')
predict_and_log(model, frames_info, log_file=tra_log, attack=True, attack_type=True)
print('# Predicting on Translation Attacks...')
predict_and_log(model, frames_info, log_file=inp_log, attack=True, attack_type=False)
def predict_and_log(model, batch, log_file, attack=False, attack_type=False):
# Load Data
batch_test = build_features.get_test_batches(batch_size=1, infinite=False,
attack=attack, normal=not attack,
inpaint=attack_type, translate= not attack_type)
itr_test = build_features.make_iterator(batch_test)
# Predict
pred = model.predict_generator(generator=itr_test, steps=len(batch), workers=0, verbose=1)
# Log Restults
batch = np.array(batch).T
drive_dates = batch[0]
drives = np.array(batch[1]).astype(int)
frames = np.array(batch[2]).astype(int)
pred = np.argmax(pred, axis=1)
labels = np.ones_like(pred) if attack else np.zeros_like(pred)
csv_data = [['drive_date', 'drive', 'frame', 'label', 'prediction']]
csv_data.extend(zip(drive_dates, drives, frames, labels, pred))
with open(log_file, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerows(csv_data)
def get_all_frames(drive_date, drive):
path = paths.depth.external_frame(drive_date, drive, 0)
assert path.parents[3].exists(), 'Drive not found'
frames = paths.similar_files(path, as_int=True)
frames = np.sort(frames)
frame_info = list()
for frame in frames:
frame_info.append((drive_date, int(drive), int(frame)))
return frame_info
def load_pretrained_model():
from tensorflow.python.keras.models import load_model
model_path = str(paths.checkpoints.multimodal())
return load_model(model_path, custom_objects=train_model.CUSTOM_LAYERS)
def run_eval():
filenames = ['nrm', 'inp', 'tra']
root_path = paths.DATA_PROCESSED_PATH.joinpath('logs')
for filename in filenames:
log_file = root_path.joinpath('{}.csv'.format(filename))
max_0, max_1 = evaluate(log_file)
print('{}: (max 0s: {}, max 1s: {})'.format(filename, max_0, max_1))
def evaluate(log_file):
df = pd.read_csv(log_file)
predicate = df.prediction.gt(0)
counts = df.groupby([predicate, (predicate != predicate.shift()).cumsum()])
counts = counts.size().rename_axis(['>0', 'grp'])
max_running_0 = counts.loc[False].max()
max_running_1 = counts.loc[True].max()
return max_running_0, max_running_1
if __name__ == '__main__':
main()
|
{"hexsha": "9cf0568bd1b0996f11d67f714010e1dd2854a333", "size": 4449, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/multimodal/models/predict_test.py", "max_stars_repo_name": "markrofail/multi-modal-deep-learning-for-vehicle-sensor-data-abstraction-and-attack-detection", "max_stars_repo_head_hexsha": "2f252c072f3091bb27506978dd90311f7f82f386", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/multimodal/models/predict_test.py", "max_issues_repo_name": "markrofail/multi-modal-deep-learning-for-vehicle-sensor-data-abstraction-and-attack-detection", "max_issues_repo_head_hexsha": "2f252c072f3091bb27506978dd90311f7f82f386", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-09-25T22:41:00.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-08T21:50:37.000Z", "max_forks_repo_path": "src/multimodal/models/predict_test.py", "max_forks_repo_name": "markrofail/multi-modal-deep-learning-for-vehicle-sensor-data-abstraction-and-attack-detection", "max_forks_repo_head_hexsha": "2f252c072f3091bb27506978dd90311f7f82f386", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0608108108, "max_line_length": 95, "alphanum_fraction": 0.7095976624, "include": true, "reason": "import numpy", "num_tokens": 1126}
|
import numpy as np
from typing import Tuple
import torch
from torch.utils.data import DataLoader
from transformers import BertTokenizer
from enums.run_type import RunType
from services.arguments.arguments_service_base import ArgumentsServiceBase
from services.dataset_service import DatasetService
from services.tokenize.base_tokenize_service import BaseTokenizeService
class DataLoaderService:
def __init__(
self,
arguments_service: ArgumentsServiceBase,
dataset_service: DatasetService):
self._dataset_service = dataset_service
self._arguments_service = arguments_service
def get_train_dataloaders(self) -> Tuple[DataLoader, DataLoader]:
"""Loads and returns train and validation(if available) dataloaders
:return: the dataloaders
:rtype: Tuple[DataLoader, DataLoader]
"""
language = self._arguments_service.language
train_dataset = self._dataset_service.get_dataset(
RunType.Train, language)
data_loader_train: DataLoader = DataLoader(
train_dataset,
batch_size=self._arguments_service.batch_size,
shuffle=self._arguments_service.shuffle)
if train_dataset.use_collate_function():
data_loader_train.collate_fn = train_dataset.collate_function
if not self._arguments_service.skip_validation:
validation_dataset = self._dataset_service.get_dataset(
RunType.Validation, language)
data_loader_validation = DataLoader(
validation_dataset,
batch_size=self._arguments_service.batch_size,
shuffle=False)
if validation_dataset.use_collate_function():
data_loader_validation.collate_fn = validation_dataset.collate_function
else:
data_loader_validation = None
return (data_loader_train, data_loader_validation)
def get_test_dataloader(self) -> DataLoader:
"""Loads and returns the test dataloader
:return: the test dataloader
:rtype: DataLoader
"""
language = self._arguments_service.language
test_dataset = self._dataset_service.get_dataset(
RunType.Test, language)
data_loader_test: DataLoader = DataLoader(
test_dataset,
batch_size=self._arguments_service.batch_size,
shuffle=False)
if test_dataset.use_collate_function():
data_loader_test.collate_fn = test_dataset.collate_function
return data_loader_test
|
{"hexsha": "a23f66a0dd80f9099347e18c612a0d354ec189e5", "size": 2598, "ext": "py", "lang": "Python", "max_stars_repo_path": "services/dataloader_service.py", "max_stars_repo_name": "ktodorov/eval-historical-texts", "max_stars_repo_head_hexsha": "e2994d594525d1d92056a6398935376a96659abb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-08-27T15:03:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-02T10:48:35.000Z", "max_issues_repo_path": "services/dataloader_service.py", "max_issues_repo_name": "ktodorov/eval-historical-texts", "max_issues_repo_head_hexsha": "e2994d594525d1d92056a6398935376a96659abb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2020-09-12T17:37:59.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-18T10:36:32.000Z", "max_forks_repo_path": "services/dataloader_service.py", "max_forks_repo_name": "ktodorov/eval-historical-texts", "max_forks_repo_head_hexsha": "e2994d594525d1d92056a6398935376a96659abb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-08T16:16:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T16:16:52.000Z", "avg_line_length": 31.6829268293, "max_line_length": 87, "alphanum_fraction": 0.6939953811, "include": true, "reason": "import numpy", "num_tokens": 467}
|
"""
Plot class.
"""
import copy
from math import sin, cos
import numpy as np
import param
from dataviews.ndmapping import NdMapping
from topo.base.sheetcoords import SheetCoordinateSystem,Slice
from bitmap import HSVBitmap, RGBBitmap, Bitmap, DrawBitmap
### JCALERT!
### - Re-write the test file, taking the new changes into account.
### - I have to change the order: situate, plot_bb and (normalize)
### - There should be a way to associate the density explicitly
### with the sheet_views, because it must match all SheetViews
### in that dictionary. Maybe as a tuple?
### - Fix the plot name handling along with the view_info sheetview attribute
### - Get rid of release_sheetviews.
class Plot(param.Parameterized):
"""
Simple Plot object constructed from a specified PIL image.
"""
staleness_warning=param.Number(default=10,bounds=(0,None),doc="""
Time length allowed between bitmaps making up a single plot before warning.
If the difference between the SheetView with the earliest
timestamp and the one with the latest timestamp is larger
than this parameter's value, produce a warning.
""")
def __init__(self,image=None,**params):
super(Plot,self).__init__(**params)
self._orig_bitmap = Bitmap(image)
self.bitmap = self._orig_bitmap # Possibly scaled copy (at first identical)
self.scale_factor=1.0
self.plot_src_name = ''
self.precedence = 0.0
self.row_precedence = 0.5
# If False, this plot should be left in its native size
# pixel-for-pixel, (e.g. for a color key or similar static
# image), rather than being resized as necessary.
self.resize=False
# Time at which the bitmaps were created
self.timestamp = -1
def rescale(self,scale_factor):
"""
Change the size of this image by the specified numerical factor.
The original image is kept as-is in _orig_bitmap; the scaled
image is stored in bitmap. The scale_factor argument is
taken as relative to the current scaling of the bitmap. For
instance, calling scale(1.5) followed by scale(2.0) will
yield a final scale of 3.0, not 2.0.
"""
self.scale_factor *= scale_factor
if (self._orig_bitmap):
self.bitmap = copy.copy(self._orig_bitmap)
self.bitmap.image = self._orig_bitmap.zoom(self.scale_factor)
def set_scale(self,scale_factor):
"""
Specify the numerical value of the scaling factor for this image.
The original image is kept as-is in _orig_bitmap; the scaled
image is stored in bitmap. The scale_factor argument is
taken as relative to the original size of the bitmap. For
instance, calling scale(1.5) followed by scale(2.0) will
yield a final scale of 2.0, not 3.0.
"""
self.scale_factor = scale_factor
if (self._orig_bitmap):
self.bitmap = copy.copy(self._orig_bitmap)
self.bitmap.image = self._orig_bitmap.zoom(self.scale_factor)
def label(self):
"""Return a label for this plot."""
return self.plot_src_name + '\n' + self.name
def _sane_plot_data(channels,sheet_views):
# CEBALERT: was sf.net tracker item 1860837
# (Avoid plotting only hue+confidence for a weights plot.)
s_chan = channels.get('Strength')
if s_chan is not None and len(s_chan)>0 and s_chan[0]=='Weights':
return channels['Strength'] in sheet_views
else:
return True
# JABALERT: How can we handle joint normalization, where a set of
# plots (e.g. a CFProjectionPlotGroup, or the jointly normalized
# subset of a ConnectionFields plot) is all scaled by the same amount,
# so that relative strengths can be determined? Maybe we can have
# make_template_plot and the various TemplatePlot types accept a
# parameter 'range_only' that makes them simply calculate a pair
# (min,max) with the values to use for scaling, and then the caller
# (e.g. CFProjectionPlotGroup._create_plots) would run through
# everything twice, first to get the ranges, and then the next time it
# would supply an explicit range for scaling (overriding the default
# single-plot normalization)? See the commented-out code for
# value_range below for a start. I *think* that would work, but maybe
# there is some simpler way?
def make_template_plot(channels,sheet_views,density=None,
plot_bounding_box=None,normalize='None',
name='None',range_=False):
"""
Factory function for constructing a Plot object whose type is not yet known.
Typically, a TemplatePlot will be constructed through this call, because
it selects the appropriate type automatically, rather than calling
one of the Plot subclasses automatically. See TemplatePlot.__init__ for
a description of the arguments.
"""
if _sane_plot_data(channels,sheet_views):
plot_types=[SHCPlot,RGBPlot,PalettePlot,MultiOrPlot]
for pt in plot_types:
plot = pt(channels,sheet_views,density,plot_bounding_box,normalize,
name=name,range_=range_)
if plot.bitmap is not None or range_ is None:
# range_ is None means we're calculating the range
return plot
param.Parameterized(name="make_template_plot").verbose('No',name,'plot constructed for this Sheet')
return None
class TemplatePlot(Plot):
"""
A bitmap-based plot as specified by a plot template (or plot channels).
"""
# Not sure why, but this has to be a Parameter to avoid spurious complaints
warn_time=param.Number(-2,precedence=-1,doc="Time last warned about stale plots")
def __init__(self,channels,sheet_views,density,
plot_bounding_box,normalize,
range_=False,**params):
"""
Build a plot out of a set of SheetViews as determined by a plot_template.
channels is a plot_template, i.e. a dictionary with keys
(i.e. 'Strength','Hue','Confidence' ...). Each key typically
has a string value naming specifies a SheetView in
sheet_views, though specific channels may contain other
types of information as required by specific Plot subclasses.
channels that are not used by a particular Plot subclass will
silently be ignored.
sheet_views is a dictionary of SheetViews, generally (but
not necessarily) belonging to a Sheet object.
density is the density of the Sheet whose sheet_views was
passed.
plot_bounding_box is the outer bounding_box of the plot to
apply if specified. If not, the bounds of
the smallest SheetView are used.
normalize specifies how the Plot should be normalized: any
value of normalize other than 'None' will result in normalization
according to the value of the range argument:
range=(A,B) - scale plot so that A is 0 and B is 1
range=False - scale plot so that min(plot) is 0 and
max(plot) is 1 (i.e. fill the maximim
dynamic range)
range=None - calculate value_range only
name (which is inherited from Parameterized) specifies the name
to use for this plot.
"""
super(TemplatePlot,self).__init__(**params)
# for a template plot, resize is True by default
self.resize=True
self.bitmap = None
self.channels = channels
self.view_dict = copy.copy(sheet_views)
# bounds of the situated plotting area
self.plot_bounding_box = plot_bounding_box
### JCALERT ! The problem of displaying the right plot name is still reviewed
### at the moment we have the plot_src_name and name attribute that are used for the label.
### generally the name is set to the plot_template name, except for connection
# set the name of the sheet that provides the SheetViews
# combined with the self.name parameter when creating the plot (which is generally
# the name of the plot_template), it provides the necessary information for displaying plot label
self._set_plot_src_name()
# # Eventually: support other type of plots (e.g vector fields...) using
# # something like:
# def annotated_bitmap(self):
# enable other construction....
def _get_sv(self, key):
sheet_view_key = self.channels.get(key, None)
sv = self.view_dict.get(key,{}).get(sheet_view_key, None)
if isinstance(sv, NdMapping):
sv = sv.last
return sv
def _get_matrix(self,key):
"""
Retrieve the matrix view associated with a given key, if any.
If the key is found in self.channels and the corresponding
sheetview is found in self.view_dict, the view's matrix is
returned; otherwise None is returned (with no error).
If the sheet_view derives from a cyclic distribution, and it
will be used as Hue, the matrix is normalized in range 0..1
"""
sv = self._get_sv(key)
if sv == None:
matrix = None
else:
matrix = sv.data.copy()
if key=='Hue' and sv.cyclic_range is not None:
matrix /= sv.cyclic_range
# Calculate timestamp for this plot
timestamp = sv.metadata.timestamp
if timestamp >=0:
if self.timestamp < 0:
self.timestamp = timestamp
elif abs(timestamp - self.timestamp) > self.staleness_warning:
if TemplatePlot.warn_time != min(timestamp, self.timestamp):
self.warning("Combining SheetViews from different times (%s,%s) for plot %s; see staleness_warning" %
(timestamp, self.timestamp,self.name))
TemplatePlot.warn_time = min(timestamp, self.timestamp)
return matrix
def _set_plot_src_name(self):
""" Set the Plot plot_src_name. Called when Plot is created"""
for key in self.channels:
sheet_view_key = self.channels.get(key,None)
sv = self.view_dict.get(key,{}).get(sheet_view_key)
if sv != None:
self.plot_src_name = sv.metadata.src_name
self.precedence = sv.metadata.precedence
self.row_precedence = sv.metadata.row_precedence
if hasattr(sv.metadata,'proj_src_name'):
self.proj_src_name=sv.metadata.proj_src_name
### JCALERT: This could be inserted in the code of get_matrix
def _get_shape_and_box(self):
"""
Sub-function used by plot: get the matrix shape and the bounding box
of the SheetViews that constitute the TemplatePlot.
"""
for channel, name in self.channels.items():
sv = self.view_dict.get(channel,{}).get(name, None)
if isinstance(sv, NdMapping): sv = sv.last
if sv != None:
shape = sv.data.shape
box = sv.bounds
return shape, box
# CEBALERT: needs simplification! (To begin work on joint
# normalization, I didn't want to interfere with the existing
# normalization calculations.) Also need to update this
# docstring.
#
# range=None - calculate value_range; don't scale a
# range=(A,B) - scale a so that A is 0 and B is 1
# range=False - scale a so that min(array) is 0 and max(array) is 1
def _normalize(self,a,range_):
"""
Normalize an array s to be in the range 0 to 1.0.
For an array of identical elements, returns an array of ones
if the elements are greater than zero, and zeros if the
elements are less than or equal to zero.
"""
if range_: # i.e. not False, not None (expecting a tuple)
range_min = float(range_[0])
range_max = float(range_[1])
if range_min==range_max:
if range_min>0:
resu = np.ones(a.shape)
else:
resu = np.zeros(a.shape)
else:
a_offset = a - range_min
resu = a_offset/(range_max-range_min)
return resu
else:
if range_ is None:
if not hasattr(self,'value_range'):
self.value_range=(a.min(),a.max())
else:
# If normalizing multiple matrices, take the largest values
self.value_range=(min(self.value_range[0],a.min()),
max(self.value_range[1],a.max()))
return None # (indicate that array was not scaled)
else: # i.e. range_ is False
a_offset = a-a.min()
max_a_offset = a_offset.max()
if max_a_offset>0:
a = np.divide(a_offset,float(max_a_offset))
else:
if min(a.ravel())<=0:
a=np.zeros(a.shape,dtype=np.float)
else:
a=np.ones(a.shape,dtype=np.float)
return a
### JC: maybe density can become an attribute of the TemplatePlot?
def _re_bound(self,plot_bounding_box,mat,box,density):
# CEBHACKALERT: for Julien...
# If plot_bounding_box is that of a Sheet, it will already have been
# setup so that the density in the x direction and the density in the
# y direction are equal.
# If plot_bounding_box comes from elsewhere (i.e. you create it from
# arbitrary bounds), it might need to be adjusted to ensure the density
# in both directions is the same (see Sheet.__init__()). I don't know where
# you want to do that; presumably the code should be common to Sheet and
# where it's used in the plotting?
#
# It's possible we can move some of the functionality
# into SheetCoordinateSystem.
if plot_bounding_box.containsbb_exclusive(box):
ct = SheetCoordinateSystem(plot_bounding_box,density,density)
new_mat = np.zeros(ct.shape,dtype=np.float)
r1,r2,c1,c2 = Slice(box,ct)
new_mat[r1:r2,c1:c2] = mat
else:
scs = SheetCoordinateSystem(box,density,density)
s=Slice(plot_bounding_box,scs)
s.crop_to_sheet(scs)
new_mat = s.submatrix(mat)
return new_mat
class SHCPlot(TemplatePlot):
"""
Bitmap plot based on Strength, Hue, and Confidence matrices.
Constructs an HSV (hue, saturation, and value) plot by choosing
the appropriate matrix for each channel.
"""
def __init__(self,channels,sheet_views,density,
plot_bounding_box,normalize,
range_=False,**params):
super(SHCPlot,self).__init__(channels,sheet_views,density,
plot_bounding_box,normalize,**params)
# catching the empty plot exception
s_mat = self._get_matrix('Strength')
h_mat = self._get_matrix('Hue')
c_mat = self._get_matrix('Confidence')
# If it is an empty plot: self.bitmap=None
if (s_mat is None and c_mat is None and h_mat is None):
self.debug('Empty plot.')
# Otherwise, we construct self.bitmap according to what is specified by the channels.
else:
shape,box = self._get_shape_and_box()
hue,sat,val = self.__make_hsv_matrices((s_mat,h_mat,c_mat),shape,normalize,range_)
if range_ is None:
return ##############################
if self.plot_bounding_box == None:
self.plot_bounding_box = box
hue = self._re_bound(self.plot_bounding_box,hue,box,density)
sat = self._re_bound(self.plot_bounding_box,sat,box,density)
val = self._re_bound(self.plot_bounding_box,val,box,density)
self.bitmap = HSVBitmap(hue,sat,val)
self._orig_bitmap=self.bitmap
def __make_hsv_matrices(self,hsc_matrices,shape,normalize,range_=False):
"""
Sub-function of plot() that return the h,s,v matrices corresponding
to the current matrices in sliced_matrices_dict. The shape of the matrices
in the dict is passed, as well as the normalize boolean parameter.
The result specified a bitmap in hsv coordinate.
Applies normalizing and cropping if required.
"""
zero=np.zeros(shape,dtype=np.float)
one=np.ones(shape,dtype=np.float)
s,h,c = hsc_matrices
# Determine appropriate defaults for each matrix
if s is None: s=one # Treat as full strength by default
if c is None: c=one # Treat as full confidence by default
if h is None: # No color, gray-scale plot.
h=zero
c=zero
# If normalizing, offset the matrix so that the minimum
# value is 0.0 and then scale to make the maximum 1.0
if normalize!='None':
s=self._normalize(s,range_=range_)
# CEBALERT: I meant False, right?
c=self._normalize(c,range_=False)
# This translation from SHC to HSV is valid only for black backgrounds;
# it will need to be extended also to support white backgrounds.
hue,sat,val=h,c,s
return (hue,sat,val)
class RGBPlot(TemplatePlot):
"""
Bitmap plot based on Red, Green, and Blue matrices.
Construct an RGB (red, green, and blue) plot from the Red, Green,
and Blue channels.
"""
def __init__(self,channels,sheet_views,density,
plot_bounding_box,normalize,
range_=False,**params):
super(RGBPlot,self).__init__(channels,sheet_views,density,
plot_bounding_box,normalize,**params)
# catching the empty plot exception
r_mat = self._get_matrix('Red')
g_mat = self._get_matrix('Green')
b_mat = self._get_matrix('Blue')
# If it is an empty plot: self.bitmap=None
if (r_mat==None and g_mat==None and b_mat==None):
self.debug('Empty plot.')
# Otherwise, we construct self.bitmap according to what is specified by the channels.
else:
shape,box = self._get_shape_and_box()
red,green,blue = self.__make_rgb_matrices((r_mat,g_mat,b_mat),shape,
normalize,range_=range_)
if range_ is None:
return ############################
if self.plot_bounding_box == None:
self.plot_bounding_box = box
red = self._re_bound(self.plot_bounding_box,red,box,density)
green = self._re_bound(self.plot_bounding_box,green,box,density)
blue = self._re_bound(self.plot_bounding_box,blue,box,density)
self.bitmap = RGBBitmap(red,green,blue)
self._orig_bitmap=self.bitmap
def __make_rgb_matrices(self, rgb_matrices,shape,normalize,range_=False):
"""
Sub-function of plot() that return the h,s,v matrices
corresponding to the current matrices in
sliced_matrices_dict. The shape of the matrices in the dict is
passed, as well as the normalize boolean parameter. The
result specified a bitmap in hsv coordinate.
Applies normalizing and cropping if required.
"""
zero=np.zeros(shape,dtype=np.float)
r,g,b = rgb_matrices
# Determine appropriate defaults for each matrix
if r is None: r=zero
if g is None: g=zero
if b is None: b=zero
# CEBALERT: have I checked this works?
if normalize!='None':
r = self._normalize(r,range_=range_)
g = self._normalize(g,range_=range_)
b = self._normalize(b,range_=range_)
return (r,g,b)
class PalettePlot(TemplatePlot):
"""
Bitmap plot based on a Strength matrix, with optional colorization.
Not yet implemented.
When implemented, construct an RGB plot from a Strength channel,
optionally colorized using a specified Palette.
"""
def __init__(self,channels,sheet_views,density,
plot_bounding_box,normalize,**params):
super(PalettePlot,self).__init__(channels,sheet_views,density,
plot_bounding_box,normalize,**params)
### JABHACKALERT: To implement the class: If Strength is present,
### ask for Palette if it's there, and make a PaletteBitmap.
class MultiOrPlot(TemplatePlot):
"""
Bitmap plot with oriented lines draws for every units, representing
the most preferred orientations.
Constructs a matrix of drawing directives displaying oriented lines
in each unit, colored according to the order or preference, and selectivity
This plot expects channels named "OrX" "SelX", with "X" the number
ranking the preferred orientations.
"""
unit_size = param.Number(default=25,bounds=(9,None),doc="box size of a single unit")
min_brightness = param.Number(default=30,bounds=(0,50),doc="min brightness of lines")
max_brightness = param.Number(default=90,bounds=(50,100),doc="max brightness of lines")
def __init__(self,channels,sheet_views,density,
plot_bounding_box,normalize,
range_=False,**params):
super(MultiOrPlot,self).__init__(channels,sheet_views,density,
plot_bounding_box,normalize,**params)
n = len( channels.keys() )
if density > 10:
self.unit_size = int( density )
# there should be an even number of channels
if n % 2:
self.debug('Empty plot.')
return
if ( self.unit_size % 2 ) == 0:
self.unit_size = self.unit_size + 1
n = n / 2
m = []
for i in range( n ):
o = self._get_matrix( "Or%d" % (i+1) )
s = self._get_matrix( "Sel%d" % (i+1) )
if ( o==None or s==None ):
self.debug('Empty plot.')
return
m.append( ( o, s ) )
shape,box = self._get_shape_and_box()
dm = self.__make_lines_from_or_matrix( m, shape )
box_size = self.unit_size
self.bitmap = DrawBitmap( dm, box_size )
self._orig_bitmap = self.bitmap
def __vertices_from_or( self, o ):
"""
help function for generating coordinates of line vertices
from normalized orientation value.
Return a list with two tuples, the coordinates of the segment with the
given orientation, in the normalized range [ 0...1 ].
Orientation is expected in range [ 0..pi ]. Space
representation is in ordinary image convention: first coordinate is X,
from left to right, second coordinate Y, from top to bottom.
"""
s = 0.5 * sin( o )
c = 0.5 * cos( o )
return [ ( 0.5 - c, 0.5 + s ), ( 0.5 + c, 0.5 - s ) ]
def __make_line_directive( self, os_list ):
"""
help function for composing the list of line directives
for a single unit.
"""
d_hue = 360 / len( os_list )
hue = 0
p = []
n = self.max_brightness - self.min_brightness
for o,s in os_list:
if s > 0.:
f = "hsl(%d,100%%,%2d%%)" % ( hue, max( self.min_brightness, n * ( 1. - s ) ) )
p.append( { "line": [ o, { "fill": f } ] } )
hue = hue + d_hue
return p
def __make_lines_from_or_matrix( self, matrices, shape ):
"""
return a matrix of line drawing directives for each unit, derived from
the given list of tuples ( o, s ), where o is the orientation view and s
is the selectivity. The list is ordered by the orientation preference.
"""
vertices_from_or = np.vectorize( self.__vertices_from_or, otypes=[np.object_] )
mat_list = []
for o, s in matrices:
a = s.mean()
d = s.std()
ad = a + d
if isinstance( ad, np.number ) and ad > 0:
mat_list.append( ( vertices_from_or( o ), ( s - d ) / ad ) )
lines = np.empty( shape, np.object_ )
for x in range( shape[ 0 ] ):
for y in range( shape[ 1 ] ):
os_list = []
for o, s in mat_list:
os_list.append( ( o[ x, y ], s[ x, y ] ) )
lines[ x, y ] = self.__make_line_directive( os_list )
return lines
|
{"hexsha": "7d2d694ce8358ffcc5100f05b72a4d996f331292", "size": 25161, "ext": "py", "lang": "Python", "max_stars_repo_path": "topo/plotting/plot.py", "max_stars_repo_name": "ceball/topographica", "max_stars_repo_head_hexsha": "ec0eea614409ceb7473e04bc2f6b6c888099160f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "topo/plotting/plot.py", "max_issues_repo_name": "ceball/topographica", "max_issues_repo_head_hexsha": "ec0eea614409ceb7473e04bc2f6b6c888099160f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "topo/plotting/plot.py", "max_forks_repo_name": "ceball/topographica", "max_forks_repo_head_hexsha": "ec0eea614409ceb7473e04bc2f6b6c888099160f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0075528701, "max_line_length": 125, "alphanum_fraction": 0.6068518739, "include": true, "reason": "import numpy", "num_tokens": 5614}
|
import colloidpy as cp
import numpy as np
from dataAnalysis import trace
import matplotlib.pyplot as plt
water = trace('trace_with_water.npy')
water.modify(1, 4)
water.modify(2, 4)
no_water = trace('trace_without_water.npy')
no_water.modify(1, 4)
no_water.modify(2, 4)
print(cp.__version__)
water_data = water.data
no_water_data = no_water.data
def track(data):
N = len(data[:, 3])
dis = np.where((data[1:N, 3] - data[0:N-1, 3]) != 1)
dis_con = np.zeros(N-1)
dis_con[dis] = 1
label = np.hstack((0, np.cumsum(dis_con, dtype=int)))
for i in range(len(dis)-1):
data[dis(i)+1:dis(i+1)+1, 3] = np.arange(0, dis(i+1)-dis(i))
return np.hstack((0, np.cumsum(dis_con, dtype=int)))
pID = track(water_data)
pID2 = track(no_water_data)
print(pID.shape)
print(water_data.shape)
zero = np.zeros_like(water_data[:, 3])
zero2 = np.zeros_like(no_water_data[:, 3])
water_msd = cp.ColloidData(
water_data[:, 0], water_data[:, 1], water_data[:, 3], pID, zero)
no_water_msd = cp.ColloidData(
no_water_data[:, 0], no_water_data[:, 1], no_water_data[:, 3], pID2, zero2)
dts, msd, counts = water_msd.msd()
dts2, msd2, counts2 = no_water_msd.msd()
plt.figure(figsize=(14, 7))
plt.title('with water')
plt.subplot(121)
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$T(s)$', fontsize=25)
plt.ylabel(r'${\Delta r}^2$', fontsize=25)
plt.plot(dts/25, msd[:, 2], '-o', label='$dx^2$')
plt.plot(dts/25, msd[ :, 3], '-o', label='$dy^2$')
plt.plot(dts/25, msd[ :, 4], '-o', label='$dr^2$')
plt.plot(dts/25,4**np.exp(1)*dts/25)
plt.legend(prop={'size': 20})
plt.subplot(122)
plt.xscale('log')
plt.plot(dts/25,counts)
plt.ylabel(r'$counts$', fontsize=25)
plt.savefig('water.png', dpi=200)
plt.show()
plt.figure(figsize=(14, 7))
plt.title('no water')
plt.subplot(121)
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$T(s)$', fontsize=25)
plt.ylabel(r'${\Delta r}^2$', fontsize=25)
plt.plot(dts2/25, msd2[:, 2], '-o', label='$dx^2$')
plt.plot(dts2/25, msd2[ :, 3], '-o', label='$dy^2$')
plt.plot(dts2/25, msd2[ :, 4], '-o', label='$dr^2$')
plt.plot(dts2/25,4*np.exp(1.8)*dts2/25)
plt.legend(prop={'size': 20})
plt.subplot(122)
plt.xscale('log')
plt.plot(dts2/25,counts2)
plt.ylabel(r'$counts$', fontsize=25)
plt.savefig('no_water.png', dpi=200)
plt.show()
|
{"hexsha": "f91ab47b0ec8e30db5d15de6677d7140b519f84b", "size": 2280, "ext": "py", "lang": "Python", "max_stars_repo_path": "example/msd.py", "max_stars_repo_name": "KyQiao/balltrack", "max_stars_repo_head_hexsha": "2e928ae9dcfd72f43514c978f3556723724b34a1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "example/msd.py", "max_issues_repo_name": "KyQiao/balltrack", "max_issues_repo_head_hexsha": "2e928ae9dcfd72f43514c978f3556723724b34a1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example/msd.py", "max_forks_repo_name": "KyQiao/balltrack", "max_forks_repo_head_hexsha": "2e928ae9dcfd72f43514c978f3556723724b34a1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.511627907, "max_line_length": 79, "alphanum_fraction": 0.6526315789, "include": true, "reason": "import numpy", "num_tokens": 808}
|
# -*- coding: utf-8 -*-
# This work is part of the Core Imaging Library (CIL) developed by CCPi
# (Collaborative Computational Project in Tomographic Imaging), with
# substantial contributions by UKRI-STFC and University of Manchester.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cil.optimisation.algorithms import Algorithm
import numpy
import warnings
class CGLS(Algorithm):
r'''Conjugate Gradient Least Squares algorithm
Problem:
.. math::
\min || A x - b ||^2_2
|
Parameters :
:parameter operator : Linear operator for the inverse problem
:parameter initial : Initial guess ( Default initial = 0)
:parameter data : Acquired data to reconstruct
:parameter tolerance: Tolerance/ Stopping Criterion to end CGLS algorithm
Reference:
https://web.stanford.edu/group/SOL/software/cgls/
'''
def __init__(self, initial=None, operator=None, data=None, tolerance=1e-6, **kwargs):
'''initialisation of the algorithm
:param operator : Linear operator for the inverse problem
:param initial : Initial guess ( Default initial = 0)
:param data : Acquired data to reconstruct
:param tolerance: Tolerance/ Stopping Criterion to end CGLS algorithm
'''
super(CGLS, self).__init__(**kwargs)
if kwargs.get('x_init', None) is not None:
if initial is None:
warnings.warn('The use of the x_init parameter is deprecated and will be removed in following version. Use initial instead',
DeprecationWarning, stacklevel=4)
initial = kwargs.get('x_init', None)
else:
raise ValueError('{} received both initial and the deprecated x_init parameter. It is not clear which one we should use.'\
.format(self.__class__.__name__))
if initial is None and operator is not None:
initial = operator.domain_geometry().allocate(0)
if initial is not None and operator is not None and data is not None:
self.set_up(initial=initial, operator=operator, data=data, tolerance=tolerance)
def set_up(self, initial, operator, data, tolerance=1e-6):
'''initialisation of the algorithm
:param operator: Linear operator for the inverse problem
:param initial: Initial guess ( Default initial = 0)
:param data: Acquired data to reconstruct
:param tolerance: Tolerance/ Stopping Criterion to end CGLS algorithm
'''
print("{} setting up".format(self.__class__.__name__, ))
self.x = initial * 0.
self.operator = operator
self.tolerance = tolerance
self.r = data - self.operator.direct(self.x)
self.s = self.operator.adjoint(self.r)
self.p = self.s.copy()
self.q = self.operator.range_geometry().allocate()
self.norms0 = self.s.norm()
self.norms = self.s.norm()
self.gamma = self.norms0**2
self.normx = self.x.norm()
self.xmax = self.normx
self.configured = True
print("{} configured".format(self.__class__.__name__, ))
def update(self):
'''single iteration'''
self.operator.direct(self.p, out=self.q)
delta = self.q.squared_norm()
alpha = self.gamma/delta
self.x.axpby(1, alpha, self.p, out=self.x)
#self.x += alpha * self.p
self.r.axpby(1, -alpha, self.q, out=self.r)
#self.r -= alpha * self.q
self.operator.adjoint(self.r, out=self.s)
self.norms = self.s.norm()
self.gamma1 = self.gamma
self.gamma = self.norms**2
self.beta = self.gamma/self.gamma1
#self.p = self.s + self.beta * self.p
self.p.axpby(self.beta, 1, self.s, out=self.p)
self.normx = self.x.norm()
self.xmax = numpy.maximum(self.xmax, self.normx)
def update_objective(self):
a = self.r.squared_norm()
if a is numpy.nan:
raise StopIteration()
self.loss.append(a)
def should_stop(self):
'''stopping criterion'''
return self.flag() or self.max_iteration_stop_cryterion()
def flag(self):
'''returns whether the tolerance has been reached'''
flag = (self.norms <= self.norms0 * self.tolerance) or (self.normx * self.tolerance >= 1)
if flag:
self.update_objective()
if self.iteration > self._iteration[-1]:
print (self.verbose_output())
print('Tolerance is reached: {}'.format(self.tolerance))
return flag
|
{"hexsha": "40ff326485c9cb5cddfd0ea4843c7fac18301313", "size": 5282, "ext": "py", "lang": "Python", "max_stars_repo_path": "Wrappers/Python/cil/optimisation/algorithms/CGLS.py", "max_stars_repo_name": "Asharits/CIL", "max_stars_repo_head_hexsha": "66848b021fb2c6daca71e276890152f34a87ba36", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 30, "max_stars_repo_stars_event_min_datetime": "2021-05-18T08:54:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T17:42:31.000Z", "max_issues_repo_path": "Wrappers/Python/cil/optimisation/algorithms/CGLS.py", "max_issues_repo_name": "Asharits/CIL", "max_issues_repo_head_hexsha": "66848b021fb2c6daca71e276890152f34a87ba36", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 580, "max_issues_repo_issues_event_min_datetime": "2018-06-01T13:19:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-07T10:28:57.000Z", "max_forks_repo_path": "Wrappers/Python/cil/optimisation/algorithms/CGLS.py", "max_forks_repo_name": "Asharits/CIL", "max_forks_repo_head_hexsha": "66848b021fb2c6daca71e276890152f34a87ba36", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2018-11-29T12:15:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-29T07:13:21.000Z", "avg_line_length": 36.4275862069, "max_line_length": 140, "alphanum_fraction": 0.6156758803, "include": true, "reason": "import numpy", "num_tokens": 1186}
|
import os
import sys
import numpy as np
from run_pid_optimized import PIDEvaluator
from bayes_opt import BayesianOptimization
from bayes_opt.observer import JSONLogger
from bayes_opt.event import Events
def func(rx, ry, px, py, yx, yy):
rz, pz, yz = 0.0001, 0.0001, 0.0001
current_dir = os.path.dirname(__file__)
config_path = os.path.join(current_dir, "../configs/iris.config")
os.environ["GYMFC_CONFIG"] = config_path
evaluator = PIDEvaluator()
rewards = evaluator.main('AttFC_GyroErr-MotorVel_M4_Con-v0', 15, [rx, ry, rz], [px, py, pz], [yx, yy, yz])
return rewards.sum()
# Bounded region of parameter space
# pbounds = {'rx': (0.0001, 50),
# 'ry': (0.0001, 50),
# 'rz': (0.0001, 50),
# 'px': (0.0001, 50),
# 'py': (0.0001, 50),
# 'pz': (0.0001, 50),
# 'yx': (0.0001, 50),
# 'yy': (0.0001, 50),
# 'yz': (0.0001, 50),
# }
pbounds = {'rx': (1, 35),
'ry': (1, 35),
'px': (1, 35),
'py': (1, 35),
'yx': (1, 35),
'yy': (1, 35),
}
optimizer = BayesianOptimization(
f=func,
pbounds=pbounds,
random_state=1,
)
logger = JSONLogger(path="./logs.json")
optimizer.subscribe(Events.OPTMIZATION_STEP, logger)
optimizer.maximize(
init_points=25,
n_iter=1000,
)
print(optimizer.max)
|
{"hexsha": "5b51d7f3c544d8c9dec2091627eb89e2ca4e1d10", "size": 1394, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments_prokhn/controllers/bayesian_pid.py", "max_stars_repo_name": "prokhn/onti-2019-bigdata", "max_stars_repo_head_hexsha": "b9296141958f544177388be94072efce7bdc7814", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "experiments_prokhn/controllers/bayesian_pid.py", "max_issues_repo_name": "prokhn/onti-2019-bigdata", "max_issues_repo_head_hexsha": "b9296141958f544177388be94072efce7bdc7814", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiments_prokhn/controllers/bayesian_pid.py", "max_forks_repo_name": "prokhn/onti-2019-bigdata", "max_forks_repo_head_hexsha": "b9296141958f544177388be94072efce7bdc7814", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.0344827586, "max_line_length": 110, "alphanum_fraction": 0.5616929699, "include": true, "reason": "import numpy", "num_tokens": 456}
|
import tensorflow as tf
import numpy as np
from utils.data import convert_categorical
from models.base_model import BaseModel
class Discriminator:
def __init__(self, discriminator_model, protected_variable):
self.model = discriminator_model
self.protected_variable = protected_variable
class FairClassifier(BaseModel):
def __init__(self, predictor_model, discriminator_model: Discriminator, hyper_parameters=None):
# assigning predictor and discriminator models
self.predictor = predictor_model
self.discriminator = discriminator_model
# losses and optimizers
self.loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
self.cosine_loss = tf.keras.losses.CosineSimilarity()
self.predictor_optimizer = tf.keras.optimizers.Adam(1e-3)
self.discriminator_optimizer = tf.keras.optimizers.Adam(1e-3)
self.metrics = [
tf.keras.metrics.Mean(name='loss_mean'),
tf.keras.metrics.TruePositives(name='tp'),
tf.keras.metrics.FalsePositives(name='fp'),
tf.keras.metrics.TrueNegatives(name='tn'),
tf.keras.metrics.FalseNegatives(name='fn'),
tf.keras.metrics.BinaryAccuracy(name='accuracy')
]
self.hyper_parameters = hyper_parameters if hyper_parameters is not None else {}
def __predictor_gradient(self, gradients_of_predictor_pred_loss, gradients_of_predictor_disc_loss):
"""
Calculate the final form of the gradient of the predictor network
:param gradients_of_predictor_pred_loss: gradient of parameters based on the loss from predictor network
:param gradients_of_predictor_disc_loss: gradient of parameters based on the loss from discriminator network
:return:
"""
gradients_of_predictor = []
num_gradients = len(gradients_of_predictor_disc_loss)
for i in range(num_gradients):
# weighted gradient coming from the discriminator
alpha = self.hyper_parameters.get("alpha", 1.0)
disc_term = alpha*gradients_of_predictor_disc_loss[i]
# projection of the gradient onto the discriminator gradient
cosine_term = self.cosine_loss(gradients_of_predictor_pred_loss[i], gradients_of_predictor_disc_loss[i])
proj_term = (cosine_term*tf.norm(gradients_of_predictor_pred_loss[i])*gradients_of_predictor_disc_loss[i])/\
tf.norm(gradients_of_predictor_disc_loss[i])
# final form of the gradient
gradients_of_predictor.append(gradients_of_predictor_pred_loss[i] - proj_term - disc_term)
return gradients_of_predictor
@tf.function
def _train_step(self, input_features, labels):
with tf.GradientTape() as predictor_tape, tf.GradientTape(persistent=True) as disc_tape:
# predicting the label
predictor_output = self.predictor(input_features, training=True)
predictor_loss = self.loss(labels, predictor_output)
# creating input for the discriminator
labels = tf.cast(labels, dtype=tf.float32)
# (
s = (1.0 + np.abs(self.hyper_parameters.get('c', 1.0)))*predictor_output
discriminator_input = tf.squeeze(tf.stack([s, s*labels, s*(1.0 - labels)], axis=1))
# predicting the protected_variable
discriminator_ouput = self.discriminator.model(discriminator_input, training=True)
# converting protected variable into target column
protected_feature = tf.keras.layers.DenseFeatures(convert_categorical(self.discriminator.protected_variable,
self.hyper_parameters['category_maps']
))
protected_output = tf.gather(protected_feature(input_features), 0, axis=1)
# calculating the loss of the discriminator
disc_loss = self.loss(protected_output, discriminator_ouput)
# calculate and apply the gradient of parameters of the discriminator network
gradients_of_discriminator = disc_tape.gradient(disc_loss,
self.discriminator.model.trainable_variables)
self.discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator,
self.discriminator.model.trainable_variables))
# calculate gradients of parameters of predictor network based on
# loss in the discriminator network
gradients_of_predictor_disc_loss = disc_tape.gradient(disc_loss, self.predictor.trainable_variables)
# loss in the predictor network
gradients_of_predictor_pred_loss = predictor_tape.gradient(predictor_loss, self.predictor.trainable_variables)
gradients_of_predictor = self.__predictor_gradient(gradients_of_predictor_pred_loss,
gradients_of_predictor_disc_loss)
# apply gradient updates
self.predictor_optimizer.apply_gradients(zip(gradients_of_predictor, self.predictor.trainable_variables))
return tf.cast(tf.greater(predictor_output, 0.0), dtype=tf.int32), predictor_loss
|
{"hexsha": "08f9f1328178575c3bd8072cd427320de98a38fb", "size": 5411, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/adversarial_model.py", "max_stars_repo_name": "cryptexis/debias", "max_stars_repo_head_hexsha": "a9e0106dcb8668b95e4654ccb3e7373a70ea37a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/adversarial_model.py", "max_issues_repo_name": "cryptexis/debias", "max_issues_repo_head_hexsha": "a9e0106dcb8668b95e4654ccb3e7373a70ea37a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/adversarial_model.py", "max_forks_repo_name": "cryptexis/debias", "max_forks_repo_head_hexsha": "a9e0106dcb8668b95e4654ccb3e7373a70ea37a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-26T10:05:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-26T10:05:19.000Z", "avg_line_length": 50.1018518519, "max_line_length": 122, "alphanum_fraction": 0.6671594899, "include": true, "reason": "import numpy", "num_tokens": 1010}
|
#!/usr/bin/env python
u"""
hdf5_stokes.py
Written by Tyler Sutterley (10/2021)
Writes spherical harmonic coefficients to HDF5 files
CALLING SEQUENCE:
hdf5_stokes(clm1,slm1,linp,minp,tinp,month,FILENAME=output_HDF5_file)
INPUTS:
clm1: cosine spherical harmonic coefficients
slm1: sine spherical harmonic coefficients
linp: spherical harmonic degree (l)
minp: spherical harmonic order (m)
tinp: date of measurement
month: GRACE/GRACE-FO month
OPTIONS:
FILENAME: output filename HDF5
UNITS: spherical harmonic units
TIME_UNITS: time variable units
TIME_LONGNAME: time variable description
MONTHS_NAME: name of months variable within HDF5 file
MONTHS_UNITS: months variable units
MONTHS_LONGNAME: months variable description
TITLE: description attribute of dataset
REFERENCE: reference attribute of dataset
CLOBBER: will overwrite an existing HDF5 file
VERBOSE: will print to screen the HDF5 structure parameters
DATE: harmonics have date information
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python (https://numpy.org)
h5py: Python interface for Hierarchal Data Format 5 (HDF5)
(https://www.h5py.org)
UPDATE HISTORY:
Updated 10/2021: using python logging for handling verbose output
Updated 05/2021: define int/float precision to prevent deprecation warning
Updated 12/2020: added REFERENCE option to set file attribute
Updated 07/2020: added function docstrings
Updated 03/2020: only include title if not None
Updated 10/2019: changing Y/N flags to True/False
Updated 08/2019: don't include time (HH:MM:SS) in creation date
Updated 07/2019: added creation date as a global attribute
Updated 03/2019: print variables keys in list for Python3 compatibility
Updated 12/2018: using python dictionaries to improve readability
Updated 10/2018: using future division for python3 Compatibility
Updated 02/2017: added MONTHS_UNITS, MONTHS_LONGNAME, MONTHS_NAME parameters
aligned TIME_LONGNAME and TIME_UNITS with attributes
can output a HDF5 file with multiple dates similar to the netcdf program
Updated 06/2016: using __future__ print function
Updated 03/2016: direct calculation of number of harmonics n_harm
Updated 05/2015: minor change for MMAX != LMAX
Updated 11/2014: got back to writing this
in working condition with updated attributes as in netcdf equivalent
Updated 12/2013: converted ncdf code to HDF5 code (alternative data type)
Updated 07/2013: switched from Scientific Python to Scipy
Updated 05/2013 made UNITS an option in case converting the units to
mass harmonics or other harmonic variant
Updated 03/2013: added units to clm and slm as 'Geodesy Normalization'
switched I/O to column arrays for smaller file sizes and compatibility
between languages
made date an option for datasets that have no date
Updated 01/2013 to add time and GRACE/GRACE-FO month number
Written 07/2012
"""
from __future__ import print_function, division
import time
import h5py
import logging
import numpy as np
def hdf5_stokes(clm1, slm1, linp, minp, tinp, month, FILENAME=None,
UNITS='Geodesy_Normalization', TIME_UNITS=None, TIME_LONGNAME=None,
MONTHS_NAME='month', MONTHS_UNITS='number', MONTHS_LONGNAME='GRACE_month',
TITLE=None, REFERENCE=None, DATE=True, CLOBBER=True, VERBOSE=False):
"""
Writes spherical harmonic coefficients to HDF5 files
Arguments
---------
clm1: cosine spherical harmonic coefficients
slm1: sine spherical harmonic coefficients
linp: spherical harmonic degree (l)
minp: spherical harmonic order (m)
tinp: date of measurement
month: GRACE/GRACE-FO month
Keyword arguments
-----------------
FILENAME: HDF5 filename
UNITS: spherical harmonic units
TIME_UNITS: time variable units
TIME_LONGNAME: time variable description
MONTHS_NAME: name of months variable within HDF5 file
MONTHS_UNITS: months variable units
MONTHS_LONGNAME: months variable description
TITLE: description attribute of dataset
REFERENCE: reference attribute of dataset
CLOBBER: will overwrite an existing HDF5 file
VERBOSE: will print to screen the HDF5 structure parameters
DATE: harmonics have date information
"""
#-- setting HDF5 clobber attribute
clobber = 'w' if CLOBBER else 'w-'
#-- opening HDF5 file for writing
fileID = h5py.File(FILENAME, clobber)
#-- Maximum spherical harmonic degree (LMAX) and order (MMAX)
LMAX = np.max(linp)
MMAX = np.max(minp)
#-- Calculating the number of cos and sin harmonics up to LMAX
#-- taking into account MMAX (if MMAX == LMAX then LMAX-MMAX=0)
n_harm = (LMAX**2 + 3*LMAX - (LMAX-MMAX)**2 - (LMAX-MMAX))//2 + 1
#-- Restructuring output matrix to array format
#-- will reduce matrix size and insure compatibility between platforms
if DATE:
if (np.ndim(tinp) == 0):
n_time = 1
clm = np.zeros((n_harm))
slm = np.zeros((n_harm))
else:
n_time = len(tinp)
clm = np.zeros((n_harm,n_time))
slm = np.zeros((n_harm,n_time))
else:
n_time = 0
clm = np.zeros((n_harm))
slm = np.zeros((n_harm))
#-- restructured degree and order
lout = np.zeros((n_harm,), dtype=np.int32)
mout = np.zeros((n_harm,), dtype=np.int32)
#-- create counter variable lm
lm = 0
for m in range(0,MMAX+1):#-- MMAX+1 to include MMAX
for l in range(m,LMAX+1):#-- LMAX+1 to include LMAX
lout[lm] = np.int64(l)
mout[lm] = np.int64(m)
if (DATE and (n_time > 1)):
clm[lm,:] = clm1[l,m,:]
slm[lm,:] = slm1[l,m,:]
else:
clm[lm] = clm1[l,m]
slm[lm] = slm1[l,m]
#-- add 1 to lm counter variable
lm += 1
#-- Defining the HDF5 dataset variables
h5 = {}
h5['l'] = fileID.create_dataset('l', (n_harm,),
data=lout, dtype=np.int64, compression='gzip')
h5['m'] = fileID.create_dataset('m', (n_harm,),
data=mout, dtype=np.int64, compression='gzip')
if DATE:
h5['time'] = fileID.create_dataset('time', (n_time,),
data=tinp, dtype=np.float64, compression='gzip')
h5['month'] = fileID.create_dataset(MONTHS_NAME, (n_time,),
data=month, dtype=np.int64, compression='gzip')
#-- if more than 1 date in file
if (n_time > 1):
h5['clm'] = fileID.create_dataset('clm', (n_harm,n_time,),
data=clm, dtype=np.float64, compression='gzip')
h5['slm'] = fileID.create_dataset('slm', (n_harm,n_time,),
data=slm, dtype=np.float64, compression='gzip')
else:
h5['clm'] = fileID.create_dataset('clm', (n_harm,),
data=clm, dtype=np.float64, compression='gzip')
h5['slm'] = fileID.create_dataset('slm', (n_harm,),
data=slm, dtype=np.float64, compression='gzip')
#-- filling HDF5 dataset attributes
#-- Defining attributes for degree and order
h5['l'].attrs['long_name'] = 'spherical_harmonic_degree'#-- degree long name
h5['l'].attrs['units'] = 'Wavenumber'#-- SH degree units
h5['m'].attrs['long_name'] = 'spherical_harmonic_order'#-- order long name
h5['m'].attrs['units'] = 'Wavenumber'#-- SH order units
#-- Defining attributes for dataset
h5['clm'].attrs['long_name'] = 'cosine_spherical_harmonics'
h5['clm'].attrs['units'] = UNITS
h5['slm'].attrs['long_name'] = 'sine_spherical_harmonics'
h5['slm'].attrs['units'] = UNITS
if DATE:
#-- Defining attributes for date and month (or integer date)
h5['time'].attrs['long_name'] = TIME_LONGNAME
h5['time'].attrs['units'] = TIME_UNITS
h5['month'].attrs['long_name'] = MONTHS_LONGNAME
h5['month'].attrs['units'] = MONTHS_UNITS
#-- description of file
if TITLE:
fileID.attrs['description'] = TITLE
#-- reference of file
if REFERENCE:
fileID.attrs['reference'] = REFERENCE
#-- date created
fileID.attrs['date_created'] = time.strftime('%Y-%m-%d',time.localtime())
#-- Output HDF5 structure information
logging.info(FILENAME)
logging.info(list(fileID.keys()))
#-- Closing the HDF5 file
fileID.close()
|
{"hexsha": "3f9b43c4eeb051c98ef4fb21c861fd28d4424252", "size": 8426, "ext": "py", "lang": "Python", "max_stars_repo_path": "gravity_toolkit/hdf5_stokes.py", "max_stars_repo_name": "tsutterley/read-GRACE-harmonics", "max_stars_repo_head_hexsha": "6feb1ef24402ec02d14cf852e655aa5367ef719e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-07-25T00:32:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-29T13:37:30.000Z", "max_issues_repo_path": "gravity_toolkit/hdf5_stokes.py", "max_issues_repo_name": "tsutterley/read-GRACE-harmonics", "max_issues_repo_head_hexsha": "6feb1ef24402ec02d14cf852e655aa5367ef719e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-08-15T02:28:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T17:59:50.000Z", "max_forks_repo_path": "gravity_toolkit/hdf5_stokes.py", "max_forks_repo_name": "tsutterley/read-GRACE-harmonics", "max_forks_repo_head_hexsha": "6feb1ef24402ec02d14cf852e655aa5367ef719e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2018-08-01T04:37:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T07:36:43.000Z", "avg_line_length": 40.3157894737, "max_line_length": 80, "alphanum_fraction": 0.668525991, "include": true, "reason": "import numpy", "num_tokens": 2311}
|
"""
"""
from pathlib import Path
import argparse
import random
import shutil
import logging
import os, sys
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import torchvision
# import keras
#
# import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from models import TrajectoryGenerator, RNN
from data.loader import data_loader
import utils
from utils import (
displacement_error,
final_displacement_error,
get_dset_path,
int_tuple,
l2_loss,
relative_to_abs,
)
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("--log_dir", default="./", help="Directory containing logging file")
parser.add_argument("--dataset_name", default="drift", type=str)
# parser.add_argument("--dataset_name", default="zara1", type=str)
parser.add_argument("--delim", default="\t")
# parser.add_argument("--delim", default=" ")
parser.add_argument("--dset_type", default="test", type=str)
parser.add_argument("--loader_num_workers", default=4, type=int)
parser.add_argument("--obs_len", default=6, type=int)
parser.add_argument("--pred_len", default=4, type=int)
parser.add_argument("--skip", default=1, type=int)
parser.add_argument("--num_samples", default=20, type=int)
parser.add_argument("--seed", type=int, default=72, help="Random seed.")
parser.add_argument("--batch_size", default=8, type=int)
parser.add_argument("--num_epochs", default=2, type=int)
parser.add_argument("--noise_dim", default=(16,), type=int_tuple)
parser.add_argument("--noise_type", default="gaussian")
#
parser.add_argument(
"--traj_lstm_input_size", type=int, default=2, help="traj_lstm_input_size"
)
parser.add_argument("--traj_lstm_hidden_size", default=32, type=int)
#
parser.add_argument(
"--heads", type=str, default="4,1", help="Heads in each layer, splitted with comma"
)
parser.add_argument(
"--hidden-units",
type=str,
default="16",
help="Hidden units in each hidden layer, splitted with comma",
)
parser.add_argument(
"--graph_network_out_dims",
type=int,
default=32,
help="dims of every node after through GAT module",
)
parser.add_argument("--graph_lstm_hidden_size", default=32, type=int)
#
parser.add_argument(
"--dropout", type=float, default=0, help="Dropout rate (1 - keep probability)."
)
parser.add_argument(
"--alpha", type=float, default=0.2, help="Alpha for the leaky_relu."
)
#
#
parser.add_argument(
"--lr",
default=1e-3,
type=float,
metavar="LR",
help="initial learning rate",
dest="lr",
)
parser.add_argument(
"--start-epoch",
default=0,
type=int,
metavar="N",
help="manual epoch number (useful on restarts)",
)
#
parser.add_argument("--best_k", default=20, type=int) # K=20 samples
parser.add_argument("--print_every", default=10, type=int)
parser.add_argument("--use_gpu", default=1, type=int)
parser.add_argument("--gpu_num", default="0", type=str)
#
parser.add_argument(
"--resume",
default="./checkpoint/checkpoint158.pth.tar",
# default="./checkpoint/checkpoint_lstm_215.pth.tar",
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_num
def evaluate_helper(error, seq_start_end, model_output_traj, model_output_traj_best):
error = torch.stack(error, dim=1)
for (start, end) in seq_start_end:
start = start.item()
end = end.item()
_error = error[start:end]
_error = torch.sum(_error, dim=0)
min_index = _error.min(0)[1].item()
model_output_traj_best[:, start:end, :] = model_output_traj[min_index][
:, start:end, :
]
return model_output_traj_best
def cal_ade_fde(pred_traj_gt, pred_traj_fake):
ade = displacement_error(pred_traj_fake, pred_traj_gt, mode="raw")
fde = final_displacement_error(pred_traj_fake[-1], pred_traj_gt[-1], mode="raw")
de = pred_traj_gt.permute(1, 0, 2) - pred_traj_fake.permute(1, 0, 2)
return ade, fde
def get_generator(checkpoint):
n_units = (
[args.traj_lstm_hidden_size]
+ [int(x) for x in args.hidden_units.strip().split(",")]
+ [args.graph_lstm_hidden_size]
)
n_heads = [int(x) for x in args.heads.strip().split(",")]
model = TrajectoryGenerator(
obs_len=args.obs_len,
pred_len=args.pred_len,
traj_lstm_input_size=args.traj_lstm_input_size,
traj_lstm_hidden_size=args.traj_lstm_hidden_size,
n_units=n_units,
n_heads=n_heads,
graph_network_out_dims=args.graph_network_out_dims,
dropout=args.dropout,
alpha=args.alpha,
graph_lstm_hidden_size=args.graph_lstm_hidden_size,
noise_dim=args.noise_dim,
noise_type=args.noise_type,
)
model.load_state_dict(checkpoint["state_dict"])
model.cuda()
model.eval()
return model
def plot_trajectory(args, loader, generator):
ground_truth_input = []
all_model_output_traj = []
ground_truth_output = []
pic_cnt = 0
traj_arr_lst_all = []
with torch.no_grad():
for bat_id, batch in enumerate(loader):
batch = [tensor.cuda() for tensor in batch]
(
obs_traj,
pred_traj_gt,
obs_traj_rel,
pred_traj_gt_rel,
non_linear_ped,
loss_mask,
seq_start_end,
) = batch
ade = []
ground_truth_input.append(obs_traj)
ground_truth_output.append(pred_traj_gt)
model_output_traj = []
model_output_traj_best = torch.ones_like(pred_traj_gt).cuda()
for _ in range(args.num_samples):
pred_traj_fake_rel = generator(
obs_traj_rel, obs_traj, seq_start_end, 0, 3
)
pred_traj_fake_rel = pred_traj_fake_rel[-args.pred_len :]
pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])
model_output_traj.append(pred_traj_fake)
ade_, fde_ = cal_ade_fde(pred_traj_gt, pred_traj_fake)
ade.append(ade_)
model_output_traj_best = evaluate_helper(
ade, seq_start_end, model_output_traj, model_output_traj_best
)
all_model_output_traj.append(model_output_traj_best)
traj_list = []
for idx, (start, end) in enumerate(seq_start_end):
# plt.figure(figsize=(16,9), dpi=300)
ground_truth_input_x_piccoor = (
obs_traj[:, start:end, :].cpu().numpy()[:, :, 0].T
)
ground_truth_input_y_piccoor = (
obs_traj[:, start:end, :].cpu().numpy()[:, :, 1].T
)
ground_truth_output_x_piccoor = (
pred_traj_gt[:, start:end, :].cpu().numpy()[:, :, 0].T
)
ground_truth_output_y_piccoor = (
pred_traj_gt[:, start:end, :].cpu().numpy()[:, :, 1].T
)
model_output_x_piccoor = (
model_output_traj_best[:, start:end, :].cpu().numpy()[:, :, 0].T
)
model_output_y_piccoor = (
model_output_traj_best[:, start:end, :].cpu().numpy()[:, :, 1].T
)
for i in range(ground_truth_output_x_piccoor.shape[0]):
traj_list.append(np.concatenate([list(ground_truth_input_x_piccoor[i, :]),
list(ground_truth_output_x_piccoor[i, :]),
list(model_output_x_piccoor[i, :]),
list(ground_truth_input_y_piccoor[i, :]),
list(ground_truth_output_y_piccoor[i, :]),
list(model_output_y_piccoor[i, :])
]))
pic_cnt += 1
traj_arr = np.reshape(traj_list, (-1, args.pred_len*4+args.obs_len*2))
xin_true_key_list = ['observed input x_%d'%int(i+1) for i in range(args.obs_len)]
xout_true_key_list = ['ground truth output xt_%d'%int(i+1) for i in range(args.pred_len)]
xout_pred_key_list = ['predicted output xp_%d'%int(i+1) for i in range(args.pred_len)]
yin_true_key_list = ['observed input y_%d'%int(i+1) for i in range(args.obs_len)]
yout_true_key_list = ['ground truth output yt_%d'%int(i+1) for i in range(args.pred_len)]
yout_pred_key_list = ['predicted output yp_%d'%int(i+1) for i in range(args.pred_len)]
key_list = np.concatenate(
[xin_true_key_list,
xout_true_key_list,
xout_pred_key_list,
yin_true_key_list,
yout_true_key_list,
yout_pred_key_list]
)
traj_df = pd.DataFrame(traj_arr, columns=key_list)
traj_df_csv = traj_df
traj_df_csv.to_csv("./visualize/stgat/traj_test_%d.csv" % bat_id)
traj_arr_lst_all.append(traj_arr)
def visualize(args):
checkpoint = torch.load(args.resume)
generator = get_generator(checkpoint)
path = get_dset_path(args.dataset_name, args.dset_type)
print("path: \n" + path)
_, loader = data_loader(args, path)
plot_trajectory(args, loader, generator)
if __name__ == '__main__':
logging.info(
"program start"
)
args = parser.parse_args()
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
visualize(args)
print('complete!!')
|
{"hexsha": "71e50df0eaeded3af3acb07edc4d5cc5ce205dc5", "size": 9992, "ext": "py", "lang": "Python", "max_stars_repo_path": "generate_main_results.py", "max_stars_repo_name": "divergent63/Ocean_Trajectory_Forecast", "max_stars_repo_head_hexsha": "cc1be57c519508b74d08e4595023a6b82dc50b78", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "generate_main_results.py", "max_issues_repo_name": "divergent63/Ocean_Trajectory_Forecast", "max_issues_repo_head_hexsha": "cc1be57c519508b74d08e4595023a6b82dc50b78", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "generate_main_results.py", "max_forks_repo_name": "divergent63/Ocean_Trajectory_Forecast", "max_forks_repo_head_hexsha": "cc1be57c519508b74d08e4595023a6b82dc50b78", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.196013289, "max_line_length": 101, "alphanum_fraction": 0.6236989592, "include": true, "reason": "import numpy", "num_tokens": 2320}
|
abstract type AbstractAxis{T, BL, BR, I} <: AbstractVector{T} end
abstract type AbstractDiscreteAxis{T, BL, BR, I} <: AbstractAxis{T, BL, BR, I} end
"""
DiscreteAxis{T, BL, BR} <: AbstractAxis{T, BL, BR}
* T: Type of ticks
* BL, BR ∈ {:periodic, :reflecting, :infinite, :r0, :fixed}
* BL: left boundary condition
* BR: right boundary condition
* I: IntervalSets.Interval (closed or open boundaries)
"""
struct DiscreteAxis{T, BL, BR, I} <: AbstractAxis{T, BL, BR, I}
interval::I
ticks::Vector{T}
end
@inline size(dx::DiscreteAxis{T, BL, BR}) where {T, BL, BR} = size(dx.ticks)
@inline IndexStyle(::Type{<:DiscreteAxis}) = IndexLinear()
@inline length(dx::DiscreteAxis{T, BL, BR}) where {T, BL, BR} = length(dx.ticks)
@inline getindex(dx::DiscreteAxis{T, BL, BR}, i::Int) where {T, BL, BR} = dx.ticks[i]
@inline setindex!(dx::DiscreteAxis{T, BL, BR}, v::T, i::Int) where {T, BL, BR} = setindex!(dx.ticks, v, i)
@inline axes(dx::DiscreteAxis{T, BL, BR}) where {T, BL, BR} = axes(dx.ticks)
function DiscreteAxis{T, BL, BR}(int::I, ticks::Vector{T})::DiscreteAxis{T, BL, BR, typeof(int)} where {T, BL, BR, I}
return DiscreteAxis{T, BL, BR, typeof(int)}( int, ticks )
end
"""
DiscreteAxis(left_endpoint::T, right_endpoint::T, BL::Symbol, BR::Symbol, L::Symbol, R::Symbol, ticks::AbstractVector{T}) where {T}
* T: Type of ticks
* BL, BR ∈ {:periodic, :reflecting, :infinite, :r0, :fixed}
* L, R {:closed, :open}
* ticks: Ticks of the axis
"""
function DiscreteAxis(left_endpoint::T, right_endpoint::T, BL::Symbol, BR::Symbol, L::Symbol, R::Symbol, ticks::AbstractVector{T}) where {T}
int::Interval{L, R, T} = Interval{L, R, T}( left_endpoint, right_endpoint )
return DiscreteAxis{T, BL, BR, typeof(int)}( int, ticks )
end
function sizeof(dx::DiscreteAxis{T, BL, BR}) where {T, BL, BR}
return sizeof(dx.interval) + sizeof(dx.ticks)
end
function print(io::IO, dx::DiscreteAxis{T, BL, BR}) where {T, BL, BR}
print(io, dx.interval, " - length = ", length(dx))
end
function println(io::IO, dx::DiscreteAxis{T, BL, BR}) where {T, BL, BR}
println(io, dx.interval)
println(io, "length = ", length(dx))
end
function show(io::IO, dx::DiscreteAxis{T, BL, BR}) where {T, BL, BR}
println(io, dx)
end
function show(io::IO, ::MIME"text/plain", dx::DiscreteAxis{T, BL, BR}) where {T, BL, BR}
show(io, dx)
end
function get_boundary_types(int::Interval{L, R})::Tuple{Symbol, Symbol} where {L, R}
return L, R
end
function get_boundary_types(ax::DiscreteAxis{T,LB,RB})::NTuple{4, Symbol} where {T, LB, RB}
return LB, RB, get_boundary_types(ax.interval)...
end
function uniq(v::Vector{T})::Vector{T} where {T <: Real}
v1::Vector{T} = Vector{T}()
if length(v) > 0
laste::T = v[1]
push!(v1, laste)
for e in v
if e != laste
laste = e
push!(v1, laste)
end
end
end
return v1
end
function merge_axis_ticks_with_important_ticks(ax::DiscreteAxis{T}, impticks::Vector{T}; atol::Real = 0.0001 )::Vector{T} where {T}
v::Vector{T} = T[]
for r in impticks if in(r, ax.interval) push!(v, r) end end
for r in ax push!(v, r) end
sort!(v)
v = uniq(v)
delete_idcs::Vector{Int} = Int[]
for i in 1:(length(v) - 1)
if (v[i + 1] - v[i]) < atol
if !in(v[i], impticks) push!(delete_idcs, i) end
if !in(v[i + 1], impticks) push!(delete_idcs, i + 1) end
end
end
delete_idcs = sort(uniq(delete_idcs))
deleteat!(v, delete_idcs)
for impv in impticks
if !in(impv, v) && in(impv, ax.interval)
error("Important ticks were removed.")
end
end
return v
end
function range(interval::Interval{:closed, :closed, T}; step::Union{Missing, T} = missing, length::Union{Missing, Int} = missing) where {T}
stop::T = interval.right
if ismissing(step) && ismissing(length)
range(interval.left, stop = stop)
elseif ismissing(step)
range(interval.left, stop = stop, length=length)
elseif ismissing(length)
range(interval.left, stop = stop, step=step)
else
error(KeyError, ": Both keywords `step` and `length` were given. But only one is allowed.")
end
end
function range(interval::Interval{:closed, :open, T}; step::Union{Missing, T} = missing, length::Union{Missing, Int} = missing) where {T}
if ismissing(step) && ismissing(length)
length::Int = 2
stop::T = (interval.right + interval.left) / 2
range(interval.left, stop = stop, length=2)
elseif ismissing(step)
stop = interval.right - ( interval.right - interval.left ) / length
range(interval.left, stop = stop, length=length)
elseif ismissing(length)
# stop = interval.right - interval.right % step
stop = geom_round(interval.right - step)
range(interval.left, stop = stop, step=step)
else
error(KeyError, ": Both keywords `step` and `length` were given. But only one is allowed.")
end
end
function range(interval::Interval{:open, :closed, T}; step::Union{Missing, T} = missing, length::Union{Missing, Int} = missing) where {T}
stop::T = interval.right
if ismissing(step) && ismissing(length)
step::T = (stop - interval.left) / 2
range(interval.left + step, stop = stop, length=2)
elseif ismissing(step)
step = (stop - interval.left) / length
range(interval.left + step, stop = stop, length=length)
elseif ismissing(length)
range(interval.left + step, stop = stop, step=step)
else
error(KeyError, ": Both keywords `step` and `length` were given. But only one is allowed.")
end
end
function range(interval::Interval{:open, :open, T}; step::Union{Missing, T} = missing, length::Union{Missing, Int} = missing) where {T}
if ismissing(step) && ismissing(length)
step::T = ( interval.right - interval.left ) / 3
range(interval.left + step, stop = interval.right - step, length=2)
elseif ismissing(step)
step = ( interval.right - interval.left ) / (length + 1)
range(interval.left + step, stop = interval.right - step, length=length)
elseif ismissing(length)
tmp::T = interval.right % step
if tmp == 0 tmp = step end
stop = interval.right - tmp
range(interval.left + step, stop = stop, step=step)
else
error(KeyError, ": Both keywords `step` and `length` were given. But only one is allowed.")
end
end
function DiscreteAxis{BL, BR}(interval::Interval{L, R, T}; step::Union{Missing, T} = missing, length::Union{Missing, Int} = missing)::DiscreteAxis{T, BL, BR} where {L, R, T, BL, BR}
ticks::Vector{T} = collect(range(interval, step=step, length=length))
if T == Float32 || T == Float64
ticks = round.(ticks, sigdigits = geom_sigdigits(T))
for iv in eachindex(ticks)
if isapprox(ticks[iv], 0, atol = geom_atol_zero(T))
ticks[iv] = zero(T)
end
end
end
DiscreteAxis{T, BL, BR}(interval, ticks)
end
function midpoints(a::Vector{T})::Vector{T} where {T}
@inbounds r::Vector{T} = a[1:end-1]
@simd for i in eachindex(r)
@inbounds r[i] += 0.5 * (a[i + 1] - a[i])
end
return r
end
function get_extended_ticks( ax::DiscreteAxis{T, :reflecting, :reflecting} )::Vector{T} where {T}
ticks_ext::Vector{T} = Array{T}(undef, length(ax.ticks) + 2)
ticks_ext[2:end-1] = ax.ticks
set_periodic_bondary_ticks!(ticks_ext, ax.interval)
return ticks_ext
end
function get_extended_ticks( ax::DiscreteAxis{T, :fixed, :reflecting} )::Vector{T} where {T}
ticks_ext::Vector{T} = Array{T}(undef, length(ax.ticks) + 2)
ticks_ext[2:end-1] = ax.ticks
set_periodic_bondary_ticks!(ticks_ext, ax.interval)
return ticks_ext
end
function get_extended_ticks( ax::DiscreteAxis{T, :reflecting, :fixed} )::Vector{T} where {T}
ticks_ext::Vector{T} = Array{T}(undef, length(ax.ticks) + 2)
ticks_ext[2:end-1] = ax.ticks
set_periodic_bondary_ticks!(ticks_ext, ax.interval)
return ticks_ext
end
function get_extended_ticks( ax::DiscreteAxis{T, :periodic, :periodic} )::Vector{T} where {T}
ticks_ext::Vector{T} = Array{T}(undef, length(ax.ticks) + 2)
ticks_ext[2:end-1] = ax.ticks
set_periodic_bondary_ticks!(ticks_ext, ax.interval)
return ticks_ext
end
function get_extended_ticks( ax::DiscreteAxis{T, :infinite, :infinite} )::Vector{T} where {T}
ticks_ext::Vector{T} = Array{T}(undef, length(ax.ticks) + 2)
ticks_ext[2:end-1] = ax.ticks
Δ::T = 1 * (ticks_ext[end-1] - ticks_ext[2])
ticks_ext[1] = ticks_ext[2] - Δ
ticks_ext[end] = ticks_ext[end - 1] + Δ
return ticks_ext
end
function get_extended_ticks( ax::DiscreteAxis{T, :r0, :infinite} )::Vector{T} where {T}
ticks_ext::Vector{T} = Array{T}(undef, length(ax.ticks) + 2)
ticks_ext[2:end-1] = ax.ticks
ticks_ext[1] = ticks_ext[2] - (ticks_ext[3] - ticks_ext[2])
Δ::T = 1 * (ticks_ext[end-1] - ticks_ext[2])
ticks_ext[end] = ticks_ext[end - 1] + Δ
return ticks_ext
end
function get_extended_ticks( ax::DiscreteAxis{T, :r0, :fixed} )::Vector{T} where {T}
ticks_ext::Vector{T} = Array{T}(undef, length(ax.ticks) + 2)
ticks_ext[2:end-1] = ax.ticks
ticks_ext[1] = ticks_ext[2] - (ticks_ext[3] - ticks_ext[2])
Δ::T = 1 * (ticks_ext[end-1] - ticks_ext[2])
ticks_ext[end] = ticks_ext[end - 1] + Δ
return ticks_ext
end
function get_extended_ticks( ax::DiscreteAxis{T, :r0, :reflecting} )::Vector{T} where {T}
ticks_ext::Vector{T} = Array{T}(undef, length(ax.ticks) + 2)
ticks_ext[2:end-1] = ax.ticks
ticks_ext[1] = ticks_ext[2] - (ticks_ext[3] - ticks_ext[2])
Δ::T = ticks_ext[end-1] - ticks_ext[end - 2]
ticks_ext[end] = ticks_ext[end - 1] + Δ
return ticks_ext
end
function get_extended_ticks( ax::DiscreteAxis{T, :fixed, :fixed} )::Vector{T} where {T}
# same as get_extended_ticks( ax::DiscreteAxis{T, :reflecting, :reflecting} )::Vector{T} where {T}
ticks_ext::Vector{T} = Array{T}(undef, length(ax.ticks) + 2)
ticks_ext[2:end-1] = ax.ticks
set_periodic_bondary_ticks!(ticks_ext, ax.interval)
return ticks_ext
end
function get_extended_ticks( ax::DiscreteAxis{T, :infinite, :fixed} )::Vector{T} where {T}
ticks_ext::Vector{T} = Array{T}(undef, length(ax.ticks) + 2)
ticks_ext[2:end-1] = ax.ticks
set_periodic_bondary_ticks!(ticks_ext, ax.interval)
Δ::T = 1 * (ticks_ext[end-1] - ticks_ext[2])
ticks_ext[1] = ticks_ext[2] - Δ
return ticks_ext
end
function get_extended_ticks( ax::DiscreteAxis{T, :infinite, :reflecting} )::Vector{T} where {T}
ticks_ext::Vector{T} = Array{T}(undef, length(ax.ticks) + 2)
ticks_ext[2:end-1] = ax.ticks
set_periodic_bondary_ticks!(ticks_ext, ax.interval)
Δ::T = 1 * (ticks_ext[end-1] - ticks_ext[2])
ticks_ext[1] = ticks_ext[2] - Δ
return ticks_ext
end
function get_extended_ticks( ax::DiscreteAxis{T, :fixed, :infinite} )::Vector{T} where {T}
ticks_ext::Vector{T} = Array{T}(undef, length(ax.ticks) + 2)
ticks_ext[2:end-1] = ax.ticks
set_periodic_bondary_ticks!(ticks_ext, ax.interval)
Δ::T = 1 * (ticks_ext[end-1] - ticks_ext[2])
ticks_ext[end] = ticks_ext[end - 1] + Δ
return ticks_ext
end
function get_extended_ticks( ax::DiscreteAxis{T, :reflecting, :infinite} )::Vector{T} where {T}
ticks_ext::Vector{T} = Array{T}(undef, length(ax.ticks) + 2)
ticks_ext[2:end-1] = ax.ticks
set_periodic_bondary_ticks!(ticks_ext, ax.interval)
Δ::T = 1 * (ticks_ext[end-1] - ticks_ext[2])
ticks_ext[end] = ticks_ext[end - 1] + Δ
return ticks_ext
end
function set_periodic_bondary_ticks!( ticks::Vector{T}, interval::Interval{:closed, :open, T})::Nothing where {T}
ticks[1] = ticks[2] - (interval.right - ticks[end - 1])
ticks[end] = interval.right
nothing
end
function set_periodic_bondary_ticks!( ticks::Vector{T}, interval::Interval{:open, :closed, T})::Nothing where {T}
ticks[1] = interval.left
ticks[end] = ticks[end - 1] + (ticks[2] - interval.left)
nothing
end
function set_periodic_bondary_ticks!( ticks::Vector{T}, interval::Interval{:open, :open, T})::Nothing where {T}
ticks[1] = interval.left
ticks[end] = interval.right
nothing
end
function set_periodic_bondary_ticks!( ticks::Vector{T}, interval::Interval{:closed, :closed, T})::Nothing where {T, ispolaraxis}
if length(ticks) == 3
ticks[1] = ticks[2] - 2π
ticks[end] = ticks[2] + 2π # -> Δmidpoint_φ = 2π -> area of circle is 2π * 0.5*r^2
else
ticks[1] = ticks[2] - (ticks[3] - ticks[2])
ticks[end] = ticks[end - 1] + (ticks[end - 1] - ticks[end - 2])
end
nothing
end
function searchsortednearest(a::AbstractVector{T}, x::T)::Int where {T <: Real}
idx::Int = searchsortedfirst(a, x)
if (idx == 1) return idx end
if (idx > length(a)) return length(a) end
if (a[idx] == x) return idx end
if (abs(a[idx] - x) < abs(a[idx - 1] - x))
return idx
else
return idx - 1
end
end
@inline function searchsortednearest(ax::DiscreteAxis{T}, x::T)::Int where {T <: Real}
return searchsortednearest(ax.ticks, x)
end
function searchsortednearest(ax::DiscreteAxis{T, :periodic, :periodic}, x::T)::Int where {T <: Real}
if x in ax.interval
return searchsortednearest(ax.ticks, x)
else
period::T = ax.interval.right - ax.interval.left
v::T = x
while v >= ax.interval.right
v -= period
end
while v < ax.interval.left
v += period
end
return searchsortednearest(ax.ticks, v)
end
end
function DiscreteAxis(nt::NamedTuple; unit = u"m/m")
T = typeof(ustrip(nt.knots[1]))
knots::Vector{T} = convert(Vector{T}, ustrip.(uconvert.(unit, nt.knots)))
lep::T = ustrip(uconvert.(unit, nt.interval.left_boundary.endpoint ))
rep::T = ustrip(uconvert.(unit, nt.interval.right_boundary.endpoint))
int = Interval{nt.interval.left_boundary.closedopen, nt.interval.right_boundary.closedopen}( lep, rep )
return DiscreteAxis{T, nt.interval.left_boundary.boundaryhandling, nt.interval.right_boundary.boundaryhandling, typeof(int)}(
int, knots
)
end
Base.convert(T::Type{DiscreteAxis}, x::NamedTuple; unit = u"m/m") = T(x)
function NamedTuple(ax::DiscreteAxis{T, BL, BR}; unit = u"m/m") where {T, BL, BR}
int::Interval = ax.interval
int_types::Tuple{Symbol, Symbol} = get_boundary_types(int)
return (
knots = ax.ticks * unit,
interval = (
left_boundary = (
endpoint = int.left * unit,
closedopen = int_types[1],
boundaryhandling = BL,
),
right_boundary = (
endpoint = int.right * unit,
closedopen = int_types[2],
boundaryhandling = BR,
),
)
)
end
Base.convert(T::Type{NamedTuple}, x::DiscreteAxis; unit = u"m/m") = T(x)
|
{"hexsha": "65e2a8e83ab8ca2d4d3d4bdd26126f44b4b4a6e3", "size": 15388, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Axes/DiscreteAxis.jl", "max_stars_repo_name": "UnofficialJuliaMirror/SolidStateDetectors.jl-71e43887-2bd9-5f77-aebd-47f656f0a3f0", "max_stars_repo_head_hexsha": "075fbaf67b6d1c2e229e93740847b98f87cf8593", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Axes/DiscreteAxis.jl", "max_issues_repo_name": "UnofficialJuliaMirror/SolidStateDetectors.jl-71e43887-2bd9-5f77-aebd-47f656f0a3f0", "max_issues_repo_head_hexsha": "075fbaf67b6d1c2e229e93740847b98f87cf8593", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Axes/DiscreteAxis.jl", "max_forks_repo_name": "UnofficialJuliaMirror/SolidStateDetectors.jl-71e43887-2bd9-5f77-aebd-47f656f0a3f0", "max_forks_repo_head_hexsha": "075fbaf67b6d1c2e229e93740847b98f87cf8593", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.2827225131, "max_line_length": 182, "alphanum_fraction": 0.6208084221, "num_tokens": 4606}
|
#!/usr/bin/env julia
using Luxor, Colors
using Test
using Random
Random.seed!(42)
function spiral_logo_eps()
gsave()
scale(.3, .3)
r = 200
setcolor("gray")
for i in 0:pi/8:2pi
gsave()
translate(r * cos(i), r * sin(i))
rotate(i)
julialogo()
grestore()
end
grestore()
end
function expandingspiral_eps()
gsave()
scale(.3, .3)
r = 200
for i in pi:pi/12:6pi
gsave()
translate(i/3 * r * cos(i), i/3 * r * sin(i))
scale(0.8, 0.8)
rotate(i)
julialogo()
grestore()
end
grestore()
end
function dropshadow_eps()
steps=20
# white-gray ramp
gramp = range(colorant"white", stop=colorant"gray60", length=steps)
gsave()
r = 200
setopacity(0.1)
for i in 1:steps
sethue(gramp[i])
translate(-0.6, -0.5)
julialogo(color=false)
end
julialogo()
grestore()
end
function colorgrid_eps()
#cols = colormap("RdBu", 5; mid=0.5, logscale=false)
#cols = sequential_palette(rand(10:360), 5, b=0.1)
cols = distinguishable_colors(25)
gsave()
c = 0
for row in 100:100:500
for column in 100:100:500
gsave()
setcolor(color(cols[c+=1]))
translate(row, column)
scale(0.3, 0.3)
julialogo(color=false)
grestore()
end
end
grestore()
end
function draw_logo(fname)
Drawing(1600, 1600, fname)
origin()
background("white")
translate(-500, -200)
spiral_logo_eps()
translate(750, 0)
expandingspiral_eps()
translate(-1000, 500)
dropshadow_eps()
translate(700, -100)
colorgrid_eps()
@test finish() == true
println("...finished test: output in $(fname)")
end
draw_logo("julia-logo-draw-eps.eps")
|
{"hexsha": "ba481be5383655a8ea9534bacfeed004ce193076", "size": 1836, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/julia-logo-draw-eps.jl", "max_stars_repo_name": "guo-yong-zhi/Luxor.jl", "max_stars_repo_head_hexsha": "3b4fe34fe1e05c17bfcc9cc5b074fa527e5d1ebf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 463, "max_stars_repo_stars_event_min_datetime": "2017-01-07T00:48:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:06:58.000Z", "max_issues_repo_path": "test/julia-logo-draw-eps.jl", "max_issues_repo_name": "guo-yong-zhi/Luxor.jl", "max_issues_repo_head_hexsha": "3b4fe34fe1e05c17bfcc9cc5b074fa527e5d1ebf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 200, "max_issues_repo_issues_event_min_datetime": "2017-01-03T12:35:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T16:39:00.000Z", "max_forks_repo_path": "test/julia-logo-draw-eps.jl", "max_forks_repo_name": "guo-yong-zhi/Luxor.jl", "max_forks_repo_head_hexsha": "3b4fe34fe1e05c17bfcc9cc5b074fa527e5d1ebf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 86, "max_forks_repo_forks_event_min_datetime": "2017-01-15T17:36:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T13:55:02.000Z", "avg_line_length": 18.9278350515, "max_line_length": 71, "alphanum_fraction": 0.5626361656, "num_tokens": 563}
|
#!/usr/bin/env python
import numpy as np
import cv2
from commonFunctions_v04 import get_info_from_logfile
from commonFunctions_v04 import flip_horizontally
# History
# v01 : Start
# v02 : add nb_images to read parameter
# v03 : add normalization + mean centering data to 0
# v04 : data augmentation flip horizontally image + inverse measurements
# v05 : use left/right images + measurements with Steering error correction
STEER_CORRECTION_FACTOR = 0.2 # to tune up for left and right images/measurements
# get images + steering angle measurements
images, measurements = get_info_from_logfile(STEER_CORRECTION_FACTOR,nb_images=100)
# data augmentation flip horizontally image + inverse measurements
augm_images, augm_measurements = flip_horizontally(images,measurements)
images.extend(augm_images)
measurements.extend(augm_measurements)
X_train = np.array(images)
y_train = np.array(measurements)
#print(f'X_train shape : {X_train.shape}')
#print(f'images shape : {im.shape}')
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda
from keras.callbacks import ModelCheckpoint,EarlyStopping
model = Sequential()
model.add(Lambda(lambda x: ((x/255) - 0.5),input_shape=(160,320,3)))
model.add(Flatten())
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
# Callbacks to save best model and prevent overfit by early stopping
checkpoint = ModelCheckpoint(filepath='bestModelFolder/model.{epoch:02d}-{val_loss:.2f}.h5', monitor='val_loss', save_best_only=True)
stopper = EarlyStopping(monitor='val_loss', min_delta=0.0003, patience=5)
# model.fit(callbacks=[checkpoint, stopper])
model.fit(X_train,y_train, validation_split=0.2, shuffle = True, epochs=10, callbacks=[checkpoint, stopper])
model.save('model.h5')
|
{"hexsha": "0ebdcfe49ac8b4f5c29cc9f1169878dbd3df8c7c", "size": 1758, "ext": "py", "lang": "Python", "max_stars_repo_path": "archiveOldVersions/clone_v05.py", "max_stars_repo_name": "remichartier/014_selfDrivingCarND_BehavioralCloningProject", "max_stars_repo_head_hexsha": "1dcaa7c5a937929d4481e5efbf7ccc856c04c4ff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-23T08:28:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-23T08:28:54.000Z", "max_issues_repo_path": "archiveOldVersions/clone_v05.py", "max_issues_repo_name": "remichartier/014_selfDrivingCarND_BehavioralCloningProject", "max_issues_repo_head_hexsha": "1dcaa7c5a937929d4481e5efbf7ccc856c04c4ff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "archiveOldVersions/clone_v05.py", "max_forks_repo_name": "remichartier/014_selfDrivingCarND_BehavioralCloningProject", "max_forks_repo_head_hexsha": "1dcaa7c5a937929d4481e5efbf7ccc856c04c4ff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4042553191, "max_line_length": 133, "alphanum_fraction": 0.7889647327, "include": true, "reason": "import numpy", "num_tokens": 433}
|
from __future__ import annotations
from typing import Any, Iterable, Literal, Sequence
import attr
import networkx as nx
__all__ = ["PoSet", "Pair", "Chain", "CMP"]
Pair = tuple[Any, Any]
Chain = Sequence[Any]
CMP = Literal["<", ">", "||", "="]
@attr.frozen
class PoSet:
"""Hasse diagram representation of partially ordered set.
"""
hasse: nx.DiGraph = attr.ib(factory=nx.DiGraph)
def __len__(self) -> int:
return len(self.hasse)
def __iter__(self) -> Iterable[Any]:
yield from self.hasse.nodes
def compare(self, left: Any, right: Any) -> CMP:
if left == right:
return "="
elif nx.has_path(self.hasse, left, right):
return "<"
elif nx.has_path(self.hasse, right, left):
return ">"
return "||"
def __contains__(self, elem: Any) -> bool:
return elem in self.hasse.nodes
def add(self, chain: Chain) -> PoSet:
hasse = nx.DiGraph(self.hasse)
nx.add_path(hasse, chain)
return attr.evolve(self, hasse=nx.transitive_reduction(hasse))
@staticmethod
def from_chains(*chains: list[Chain]) -> PoSet:
hasse = nx.DiGraph()
for chain in chains:
nx.add_path(hasse, chain)
return PoSet(nx.transitive_reduction(hasse))
|
{"hexsha": "6075e546a256dd0ff140d7627a271ca08530449c", "size": 1280, "ext": "py", "lang": "Python", "max_stars_repo_path": "hasse/poset.py", "max_stars_repo_name": "mvcisback/hasse", "max_stars_repo_head_hexsha": "eefd6f4af217a4c44bd2751df6f39bd5b7d37d6c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hasse/poset.py", "max_issues_repo_name": "mvcisback/hasse", "max_issues_repo_head_hexsha": "eefd6f4af217a4c44bd2751df6f39bd5b7d37d6c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hasse/poset.py", "max_forks_repo_name": "mvcisback/hasse", "max_forks_repo_head_hexsha": "eefd6f4af217a4c44bd2751df6f39bd5b7d37d6c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6153846154, "max_line_length": 69, "alphanum_fraction": 0.6203125, "include": true, "reason": "import networkx", "num_tokens": 336}
|
! RUN: %S/test_errors.sh %s %t %flang_fc1
! REQUIRES: shell
! Simple check that if constructs are ok.
if (a < b) then
a = 1
end if
if (a < b) then
a = 2
else
a = 3
endif
if (a < b) then
a = 4
else if(a == b) then
a = 5
end if
if (a < b) then
a = 6
else if(a == b) then
a = 7
elseif(a > b) then
a = 8
end if
if (a < b) then
a = 9
else if(a == b) then
a = 10
else
a = 11
end if
if (a < b) then
a = 12
else if(a == b) then
a = 13
else if(a > b) then
a = 14
end if
if (f()) then
a = 15
end if
contains
logical function f()
f = .true.
end
end
|
{"hexsha": "9fb1344ff259d0384fef0c284e536e01f6f49988", "size": 584, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "flang/test/Semantics/if_construct01.f90", "max_stars_repo_name": "acidburn0zzz/llvm-project", "max_stars_repo_head_hexsha": "7ca7a2547f00e34f5ec91be776a1d0bbca74b7a9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 61, "max_stars_repo_stars_event_min_datetime": "2019-04-12T18:49:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T22:23:16.000Z", "max_issues_repo_path": "flang/test/Semantics/if_construct01.f90", "max_issues_repo_name": "acidburn0zzz/llvm-project", "max_issues_repo_head_hexsha": "7ca7a2547f00e34f5ec91be776a1d0bbca74b7a9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 127, "max_issues_repo_issues_event_min_datetime": "2019-04-09T00:55:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T15:35:41.000Z", "max_forks_repo_path": "flang/test/Semantics/if_construct01.f90", "max_forks_repo_name": "acidburn0zzz/llvm-project", "max_forks_repo_head_hexsha": "7ca7a2547f00e34f5ec91be776a1d0bbca74b7a9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-04-02T18:25:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-15T07:11:37.000Z", "avg_line_length": 10.8148148148, "max_line_length": 41, "alphanum_fraction": 0.5376712329, "num_tokens": 266}
|
using Test
using StatsModelComparisons
using StanSample, StatsFuns
using Printf
using JSON
@testset "Arsenic" begin
ProjDir = @__DIR__
#=
if haskey(ENV, "JULIA_CMDSTAN_HOME")
include(joinpath(ProjDir, "test_demo_wells.jl"))
else
println("\nJULIA_CMDSTAN_HOME not set. Skipping tests")
end
=#
include(joinpath(ProjDir, "cvit.jl"))
# Data
data = JSON.parsefile(joinpath(ProjDir, "wells.data.json"))
y = Float64.(data["switched"])
x = Float64[data["arsenic"] data["dist"]]
n, m = size(x)
# Model
model_str = read(open(joinpath(ProjDir, "arsenic_logistic.stan")), String)
sm1 = SampleModel("arsenic_logistic", model_str)
data1 = (p = m, N = n, y = Int.(y), x = x)
# Fit the model in Stan
rc1 = stan_sample(sm1; data=data1)
if success(rc1)
nt1 = read_samples(sm1)
# Compute LOO and standard error
log_lik = nt1.log_lik'
loo, loos, pk = psisloo(log_lik)
elpd_loo = sum(loos)
se_elpd_loo = std(loos) * sqrt(n)
@test elpd_loo ≈ -1968.3 atol=2.0
@test se_elpd_loo ≈ 15.5 atol=0.5
@test all(pk .< 0.5)
end
println()
# Fit a second model, using log(arsenic) instead of arsenic
x2 = Float64[log.(data["arsenic"]) data["dist"]]
# Model
data2 = (p = m, N = n, y = Int.(y), x = x2)
# Fit the model in Stan
rc2 = stan_sample(sm1; data=data2)
if success(rc2)
nt2 = read_samples(sm1)
# Compute LOO and standard error
log_lik = nt2.log_lik'
loo2, loos2, pk2 = psisloo(log_lik)
elpd_loo = sum(loos2)
se_elpd_loo = std(loos2) * sqrt(n)
@test elpd_loo ≈ -1952.1 atol=2.0
@test se_elpd_loo ≈ 16.2 atol=0.5
@test all(pk .< 0.5)
end
if success(rc1) && success(rc2)
## Compare the models
loodiff = loos - loos2
@test sum(loodiff) ≈ -16.3 atol=0.3
@test std(loodiff) * sqrt(n) ≈ 4.4 atol=0.2
end
end
|
{"hexsha": "e1dd15a3810acf456c7406a915d08473b6923f74", "size": 2013, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_demo_wells.jl", "max_stars_repo_name": "itsdfish/StatsModelComparisons.jl", "max_stars_repo_head_hexsha": "e8683a97bc4cdc57b465fdec245300d691d59240", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_demo_wells.jl", "max_issues_repo_name": "itsdfish/StatsModelComparisons.jl", "max_issues_repo_head_hexsha": "e8683a97bc4cdc57b465fdec245300d691d59240", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_demo_wells.jl", "max_forks_repo_name": "itsdfish/StatsModelComparisons.jl", "max_forks_repo_head_hexsha": "e8683a97bc4cdc57b465fdec245300d691d59240", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1428571429, "max_line_length": 78, "alphanum_fraction": 0.5851962245, "num_tokens": 690}
|
import numpy as np
def int_to_str(arr: np.ndarray) -> np.ndarray:
"""
Convert array of 64-bit integers to S9.
We cannot use arr.byteswap().view("S8") because the trailing zeros are discarded \
in np.char.add. Thus we have to pad with ";".
"""
assert arr.dtype == int
arena = np.full((len(arr), 9), ord(";"), dtype=np.uint8)
arena[:, :8] = arr.byteswap().view(np.uint8).reshape(len(arr), 8)
return arena.ravel().view("S9")
|
{"hexsha": "adeacfcec3841b8d38df15d060037e76affc86de", "size": 461, "ext": "py", "lang": "Python", "max_stars_repo_path": "server/athenian/api/int_to_str.py", "max_stars_repo_name": "athenianco/athenian-api", "max_stars_repo_head_hexsha": "dd5556101a8c49703d6b0516e4268b9e8d8eda5b", "max_stars_repo_licenses": ["RSA-MD"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-10-11T22:12:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-26T02:16:45.000Z", "max_issues_repo_path": "server/athenian/api/int_to_str.py", "max_issues_repo_name": "athenianco/athenian-api", "max_issues_repo_head_hexsha": "dd5556101a8c49703d6b0516e4268b9e8d8eda5b", "max_issues_repo_licenses": ["RSA-MD"], "max_issues_count": 246, "max_issues_repo_issues_event_min_datetime": "2019-12-05T06:37:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T10:00:07.000Z", "max_forks_repo_path": "server/athenian/api/int_to_str.py", "max_forks_repo_name": "athenianco/athenian-api", "max_forks_repo_head_hexsha": "dd5556101a8c49703d6b0516e4268b9e8d8eda5b", "max_forks_repo_licenses": ["RSA-MD"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-12-04T22:38:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-26T00:50:04.000Z", "avg_line_length": 30.7333333333, "max_line_length": 86, "alphanum_fraction": 0.6225596529, "include": true, "reason": "import numpy", "num_tokens": 128}
|
# SPDX-License-Identifier: MIT
# Copyright (c) 2020: Pablo Zubieta
module ReactionCoordinates
using LinearAlgebra
using StaticArrays
export Acylindricity, Angle, Asphericity, Barycenter, DihedralAngle, DistanceFrom,
GyrationTensor, PairwiseKernel, PrincipalMoments, RadiusOfGyration,
RouseMode, Separation, ShapeAnysotropy, TorsionAngle, WeightedBarycenter,
WeightedGyrationTensor, WeightedRadiusOfGyration
### Type definitions
abstract type ReactionCoordinate <: Function end
abstract type AbstractGyrationTensor <: ReactionCoordinate end
abstract type AbstractBarycenter <: ReactionCoordinate end
struct Angle <: ReactionCoordinate end
struct DihedralAngle <: ReactionCoordinate end
struct GyrationTensor <: AbstractGyrationTensor end
struct RadiusOfGyration{T} <: ReactionCoordinate end
struct Separation{T} <: ReactionCoordinate end
const TorsionAngle = DihedralAngle
struct WeightedGyrationTensor{T} <: AbstractGyrationTensor
ws::Vector{T}
end
struct WeightedRadiusOfGyration{T} <: ReactionCoordinate
ws::Vector{T}
end
struct PrincipalMoments{T <: AbstractGyrationTensor} <: ReactionCoordinate
S::T
end
struct Asphericity{T <: AbstractGyrationTensor} <: ReactionCoordinate
S::T
end
struct Acylindricity{T <: AbstractGyrationTensor} <: ReactionCoordinate
S::T
end
struct ShapeAnysotropy{T <: AbstractGyrationTensor} <: ReactionCoordinate
S::T
end
struct Barycenter{F} <: AbstractBarycenter
f::F
end
struct WeightedBarycenter{F, T} <: AbstractBarycenter
f::F
ws::Vector{T}
end
struct PairwiseKernel{F} <: ReactionCoordinate
f::F
end
struct DistanceFrom{T} <: ReactionCoordinate
r::SVector{T}
end
struct RouseMode{N, P, T} <: ReactionCoordinate
c::T
ws::Vector{T}
function RouseMode{N, 0}(::Type{T} = Float64) where {N, T}
@assert (N > 0) "Parameters must satisfy N > P ≥ 0. Got N = $N and P = 0"
c = sqrt(one(T) / N)
return new{N, 0, T}(c, T[])
end
function RouseMode{N, P}(::Type{T} = Float64) where {N, P, T}
@assert (N > P ≥ 0) "Parameters must satisfy N > P ≥ 0. Got N = $N and P = $P"
c = sqrt(2 * one(T) / N)
ws = T[cospi(P * (2i - 1) / 2N) for i = 1:N]
return new{N, P, T}(c, ws)
end
end
struct TransformMatrix{T} <: FieldMatrix{3, 3, T}
xx::T
yx::T
zx::T
xy::T
yy::T
zy::T
xz::T
yz::T
zz::T
end
#=
Explore using FunctionalWrappers.jl for this
struct MultiReactionCoordinate{T <: Tuple} <: ReactionCoordinate
ξs::T
end
=#
### Outer Constructors
function WeightedGyrationTensor(ws::Vector)
w̃s = ws / sum(ws) # weights normalized at construction
T = eltype(w̃s)
return WeightedGyrationTensor{T}(w̃s)
end
function WeightedRadiusOfGyration(ws::Vector)
w̃s = ws / sum(ws)
T = eltype(w̃s)
return WeightedRadiusOfGyration{T}(w̃s)
end
function WeightedBarycenter(ws::Vector; f::F = identity) where {F}
w̃s = ws / sum(ws)
T = eltype(w̃s)
return WeightedBarycenter{F, T}(f, w̃s)
end
PrincipalMoments() = PrincipalMoments(GyrationTensor())
PrincipalMoments(ws::Vector) = PrincipalMoments(WeightedGyrationTensor(ws))
Asphericity() = Asphericity(GyrationTensor())
Asphericity(ws::Vector) = Asphericity(WeightedGyrationTensor(ws))
Acylindricity() = Acylindricity(GyrationTensor())
Acylindricity(ws::Vector) = Acylindricity(WeightedGyrationTensor(ws))
ShapeAnysotropy() = ShapeAnysotropy(GyrationTensor())
ShapeAnysotropy(ws::Vector) = ShapeAnysotropy(WeightedGyrationTensor(ws))
Barycenter() = Barycenter(identity)
### Getters
gyration_tensor(ξ::PrincipalMoments) = ξ.S
gyration_tensor(ξ::Asphericity) = ξ.S
gyration_tensor(ξ::Acylindricity) = ξ.S
gyration_tensor(ξ::ShapeAnysotropy) = ξ.S
weights(ξ::WeightedGyrationTensor) = ξ.ws
weights(ξ::WeightedRadiusOfGyration) = ξ.ws
weights(ξ::WeightedBarycenter) = ξ.ws
weights(ξ::RouseMode) = ξ.ws
op(ξ::AbstractBarycenter) = ξ.f
op(ξ::PairwiseKernel) = ξ.f
reference(ξ::DistanceFrom) = ξ.r
coeff(ξ::RouseMode) = ξ.c
### Functors methods
(ξ::Angle)(rs) = @inbounds angle(rs[1], rs[2], rs[3])
(ξ::DihedralAngle)(rs) = @inbounds dihedral_angle(rs[1], rs[2], rs[3], rs[4])
(ξ::GyrationTensor)(rs) = gyration_tensor(rs)
(ξ::WeightedGyrationTensor)(rs) = gyration_tensor(rs, weights(ξ))
(ξ::RadiusOfGyration)(rs) = radius_of_gyration(rs)
(ξ::WeightedRadiusOfGyration)(rs) = radius_of_gyration(rs, weights(ξ))
(ξ::PrincipalMoments)(rs) = principal_moments(gyration_tensor(ξ)(rs))
(ξ::Asphericity)(rs) = asphericity(gyration_tensor(ξ)(rs))
(ξ::Acylindricity)(rs) = acylindricity(gyration_tensor(ξ)(rs))
(ξ::ShapeAnysotropy)(rs) = shape_anysotropy(gyration_tensor(ξ)(rs))
(ξ::Barycenter)(rs) = barycenter(op(ξ), rs)
(ξ::WeightedBarycenter)(rs) = barycenter(op(ξ), rs, weights(ξ))
(ξ::PairwiseKernel)(rs₁, rs₂) = pairwise(op(ξ), rs₁, rs₂)
(ξ::DistanceFrom)(rs) = distance(rs, reference(ξ))
(ξ::Separation)(rs) = @inbounds distance(rs[1], rs[2])
(ξ::RouseMode)(rs) = rouse_mode(rs, coeff(ξ), weights(ξ))
(ξ::RouseMode{N, 0})(rs) where {N} = rouse_mode₀(rs, coeff(ξ))
#==========#
# Angles #
#==========#
""" angle(r₁, r₂, r₃)
Computes the angle between the two vectors defined by three points in space (around the
point in the middle).
"""
angle(r₁, r₂, r₃) = angle(r₁ - r₂, r₃ - r₂)
#
@inline angle(a, b) = atan(norm(a × b), a ⋅ b)
""" dihedral_angle(r₁, r₂, r₃, r₄)
Computes the dihedral (or torsion) angle defined by four points in space (around the line
defined by the two central points).
"""
dihedral_angle(r₁, r₂, r₃, r₄) = dihedral_angle(r₂ - r₁, r₃ - r₂, r₄ - r₃)
#
@inline function dihedral_angle(a, b, c)
p = a × b
q = b × c
return atan((p × q) ⋅ b, (p ⋅ q) * norm(b))
end
#==============#
# Box Volume #
#==============#
volume(H::TransformMatrix) = det(H)
#=====================#
# Shape Descriptors #
#=====================#
function gyration_tensor(rs)
# Alternative implementation:
# f = r -> r .* r'
# S = sum(f, rs)
S = accumulator(GyrationTensor, rs)
@simd for r in rs
S .= muladd.(r, r', S)
end
return Symmetric(SMatrix(S ./= length(rs)))
end
function gyration_tensor(rs, ws)
# Alternative implementation:
# f = ((w, r),) -> w .* r .* r'
# S = sum(f, rs)
S = accumulator(WeightedGyrationTensor, rs, ws)
@inbounds @simd for i in eachindex(ws, rs)
w, r = ws[i], rs[i]
S .= muladd.(w, r .* r', S)
end
return Symmetric(SMatrix(S))
end
radius_of_gyration(rs) = sum(r -> r ⋅ r, rs) / length(rs)
function radius_of_gyration(rs, ws)
# Alternative implementation:
# f = ((w, r),) -> w * (r ⋅ r)
# R² = sum(f, rs)
R² = accumulator(WeightedRadiusOfGyration, rs, ws)
@inbounds @simd for i in eachindex(ws, rs)
w, r = ws[i], rs[i]
R² = muladd(w, r ⋅ r, R²)
end
return R²
end
const principal_moments = LinearAlgebra.eigvals
function asphericity(S)
λ₁², λ₂², λ₃² = principal_moments(S)
return λ₃² - (λ₁² + λ₂²) / 2
end
function acylindricity(S)
λ₁², λ₂², λ₃² = principal_moments(S)
return (λ₂² - λ₁²)
end
function shape_anysotropy(S)
λ₁², λ₂², λ₃² = principal_moments(S)
λ₁⁴ = λ₁²^2
λ₂⁴ = λ₂²^2
λ₃⁴ = λ₃²^2
return (3 * (λ₁⁴ + λ₂⁴ + λ₃⁴) / (λ₁² + λ₂² + λ₃²)^2 - 1) / 2
end
### Accumulators
@inline function accumulator(::Type{GyrationTensor}, rs)
R = eltype(eltype(rs))
T = typeof(zero(R) / 1)
return zeros(MMatrix{3, 3, T})
end
@inline function accumulator(::Type{WeightedGyrationTensor}, rs, ws)
W = eltype(ws)
R = eltype(eltype(rs))
T = typeof(zero(W) * zero(R))
return zeros(MMatrix{3, 3, T})
end
@inline function accumulator(::Type{WeightedRadiusOfGyration}, rs, ws)
W = eltype(ws)
R = eltype(eltype(rs))
T = typeof(zero(W) * zero(R))
return zero(T)
end
#========================#
# Particle Coordinates #
#========================#
const Identity = typeof(identity)
getx(r) = @inbounds(r[1])
gety(r) = @inbounds(r[2])
getz(r) = @inbounds(r[3])
barycenter(f::F, rs) where {F} = sum(f, rs) / length(rs)
function barycenter(::Identity, rs, ws)
# Alternative implementation:
# f = ((w, r),) -> w * r
# R = sum(f, rs)
R = accumulator(WeightedBarycenter{Identity}, rs, ws)
@inbounds @simd for i in eachindex(ws, rs)
w, r = ws[i], rs[i]
R .= muladd.(w, r, R)
end
return SVector(R)
end
function barycenter(f::F, rs, ws) where {F}
# Alternative implementation:
# g = ((w, r),) -> w * f(r)
# R = sum(g, rs)
R = accumulator(WeightedBarycenter, rs, ws)
@inbounds @simd for i in eachindex(ws, rs)
w, r = ws[i], rs[i]
R = muladd(w, f(r), R)
end
return R
end
### Accumulators
@inline function accumulator(::Type{CV}, rs, ws) where {CV <: WeightedBarycenter}
W = eltype(ws)
R = eltype(eltype(rs))
T = typeof(zero(W) * zero(R) / 1)
return _accumulator(CV, T)
end
@inline _accumulator(::Type{<:WeightedBarycenter}, ::Type{T}) where {T} = zero(T)
@inline function _accumulator(::Type{<:WeightedBarycenter{Identity}}, ::Type{T}) where {T}
return zeros(MVector{3, T})
end
#====================#
# Pairwise Kernels #
#====================#
function pairwise(f::F, r₁, r₂) where {F}
ξ = accumulator(PairwiseKernel, f, r₁, r₂)
@inbounds for i in eachindex(r₁)
@simd for j in eachindex(r₂)
ξ += f(r₁[i], r₂[j])
end
end
return ξ
end
@inline function accumulator(::Type{PairwiseKernel}, f::F, r₁, r₂) where {F}
T₁ = eltype(eltype(r₁))
T₂ = eltype(eltype(r₂))
T = typeof(f(zero(T₁), zero(T₂)))
return zero(T)
end
### Common kernels
struct Gaussian{T} <: Function
μ::T
σ²::T
end
function Gaussian(μ, σ)
μ̃, σ̃² = promote(μ, σ^2)
T = typeof(μ̃)
return Gaussian{T}(μ̃, σ̃²)
end
mean(f::Gaussian) = f.μ
var(f::Gaussian) = f.σ²
(f::Gaussian)(r) = gaussian(mean(f), var(f), r)
gaussian(μ, σ², r) = exp(-(r - μ)^2 / 2σ²)
#=============#
# Distances #
#=============#
distance(r, s) = norm(r - s)
#=============#
# Utilities #
#=============#
abstract type DecayingFunction <: Function end
struct RationalDecay{M, N, T} <: DecayingFunction
d::T
r::T
function RationalDecay{M, N}(d, r) where {M, N}
@assert N > M > 0
d, r = promote(d₀, r₀)
return new{M, N, typeof(d)}(d, r)
end
end
numerator_exponent(::RationalDecay{M}) where {M} = M
denominator_exponent(::RationalDecay{M, N}) where {M, N} = N
origin(f::RationalDecay) = f.d
width(f::RationalDecay) = f.r
RationalDecay(; d₀ = 0.0, r₀ = 1.0) = RationalDecay{6, 12}(d₀, r₀)
RationalDecay{M, N}(; d₀ = 0.0, r₀ = 1.0) where {M, N} = RationalDecay{M, N}(d₀, r₀)
(f::RationalDecay{6, 12})(r) = rational_decay⁶₁₂(r; d₀ = origin(f), r₀ = width(f))
(f::RationalDecay{8, 12})(r) = rational_decay⁸₁₂(r; d₀ = origin(f), r₀ = width(f))
function (f::RationalDecay)(r)
m = numerator_exponent(f)
n = denominator_exponent(f)
return rational_decay(m, n, r; d₀ = origin(f), r₀ = width(f))
end
### More accurate implementation for M = 6, N = 12
@inline function rational_decay⁶₁₂(r; d₀ = 0.0, r₀ = 1.0)
ρ = ((r - d₀) / r₀)
if ρ < 0
return one(ρ)
end
return 1 / (1 + ρ^6)
end
### More accurate implementation for M = 8, N = 12
@inline function rational_decay⁸₁₂(r; d₀ = 0.0, r₀ = 1.0)
ρ = ((r - d₀) / r₀)
if ρ < 0
return one(ρ)
end
ρ⁴ = ρ^4
return 1 / (ρ⁴ + 1 / (1 + ρ⁴))
end
@inline function rational_decay(m, n, r; d₀ = 0.0, r₀ = 1.0)
@assert n > m > 0
ρ = (r - d₀) / r₀
if ρ < 0
return one(ρ)
elseif isone(ρ)
return one(ρ) * m / n
end
ρᵐ = ρ^m
if isinf(ρᵐ)
return zero(ρ)
end
return (1 - ρᵐ) / (1 - ρ^n)
end
#===============#
# Rouse Modes #
#===============#
function rouse_modeₒ(rs, c)
x₀ = sum(rs)
return c * norm(x₀)
end
#
function rouse_mode(rs, c, ws)
xₚ = sum(((w, r),) -> w * r, zip(ws, rs))
return c * norm(xₚ)
end
end # module ReactionCoordinates
|
{"hexsha": "bbd29ec6d432337eb4aebb74bb750572b3d11144", "size": 12018, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/ReactionCoordinates.jl", "max_stars_repo_name": "pabloferz/ReactionCoordinates.jl", "max_stars_repo_head_hexsha": "e88ea117940480cb17f8525d6b2c1467bd108c5d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/ReactionCoordinates.jl", "max_issues_repo_name": "pabloferz/ReactionCoordinates.jl", "max_issues_repo_head_hexsha": "e88ea117940480cb17f8525d6b2c1467bd108c5d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ReactionCoordinates.jl", "max_forks_repo_name": "pabloferz/ReactionCoordinates.jl", "max_forks_repo_head_hexsha": "e88ea117940480cb17f8525d6b2c1467bd108c5d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.8819875776, "max_line_length": 90, "alphanum_fraction": 0.625312032, "num_tokens": 4283}
|
import pandas as pd
import numpy as np
import sys
import click
import os
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
import mlflow
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import RandomizedSearchCV
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D
from keras.utils.np_utils import to_categorical
from keras.callbacks import EarlyStopping
from keras.layers import Dropout
from keras import layers
from keras.wrappers.scikit_learn import KerasClassifier
from keras.models import model_from_json
import matplotlib.pyplot as plt
plt.style.use('ggplot')
'''
plot_history() is optional
if we want to track performance at some point
to run this just take comments off of:
history = model.fit(...)
as well as:
plot_history(history)
'''
def plot_history(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
x = range(1, len(acc) + 1)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(x, acc, 'b', label='training accuracy')
plt.plot(x, val_acc, 'r', label='validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, 'b', label='training loss')
plt.plot(x, val_loss, 'r', label='validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
'''
Make sure to specify csv as sys.argv[1] and outdir as sys.argv[2]
sample command line usage $: python 2_build_train_model.py data/cleaned_data.csv
'''
@click.command()
@click.option("--csv-path")
def main(csv_path):
with mlflow.start_run() as mlrun:
# get our data into a pd dataframe
df = pd.read_csv(csv_path)
print('Dataframe size:', df.shape, '\nremoving null values...\n\n')
# we have to dropna() for now because of the keras tokenizer
# I'm unsure how to run it with keras without this step
df = df.dropna()
print('Dataframe size without null values shape:', df.shape, '\n\n')
# main settings
# for less data
# epochs = 10
# batch_size = 10
# for more data
epochs = 5
batch_size = 64
MAX_NB_WORDS = 50000 # The maximum number of words to be used. (most frequent)
MAX_SEQUENCE_LENGTH = 250 # Max number of words in each post.
EMBEDDING_DIM = 100 # This is fixed.
# tokenize words
# we tokenize in 1_clean_data.py
# however currently only way I know how to get data into keras' format
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(df['Post Text'].values)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index), '\n\n')
# define X/Y
X = tokenizer.texts_to_sequences(df['Post Text'].values)
X = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH) # padding adds zeros to null vectors
print('Shape of data tensor:', X.shape)
Y = pd.get_dummies(df['Subreddit']).values
print('Shape of label tensor:', Y.shape, '\n\n')
# train-test split
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.10, random_state = 42)
print('splitting train/test data')
print('training data shape:', X_train.shape,Y_train.shape)
print('testing data shape', X_test.shape,Y_test.shape, '\n\n')
# create model
model = Sequential()
model.add(Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=X.shape[1]))
model.add(SpatialDropout1D(0.5))
model.add(LSTM(100, dropout=0.5, recurrent_dropout=0.5))
model.add(Dense(2, activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
print(model.summary(), '\n\n')
# print predicted (?) accuracy
# I'm honestly confused about this number vs the number after fitting
accr = model.evaluate(X_test,Y_test)
print('Test set\n Loss: {:0.3f} %\n Accuracy: {:0.3f} %'.format((accr[0]*100),(accr[1]*100)), '\n\n')
# train model
print('\nFitting model\nThis may take a while...\n\n')
'''
FROM DOCS:
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate the loss and any model
metrics on this data at the end of each epoch.
The validation data is selected from the last samples in the
x and y data provided, before shuffling.
This argument is not supported when x is a generator or Sequence instance.
'''
# history = model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size, validation_split=0.1, callbacks=[EarlyStopping(monitor='val_loss', patience=3, min_delta=0.0001)])
model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size, validation_split=0.1, callbacks=[EarlyStopping(monitor='val_loss', patience=3, min_delta=0.0001)])
# reevaluate the model
scores = model.evaluate(X, Y)
print('\n\nAccuracy: {:0.3f} %'.format(scores[1]*100))
mlflow.log_metric("accuracy", scores[1]*100)
##################
# saving our model
##################
mlflow.keras.log_model(model, "keras-model")
# # specify directory to save in
# outdir = sys.argv[2]
# # keras' native model saving tool
# model_json = model.to_json()
# # serialize model to JSON
# # the keras model that's train = 'model'
# with open(f"{outdir}model_num.json", "w") as json_file:
# json_file.write(model_json)
# print("\n\nsaved model parameters to disk as JSON!\n\n")
# # serialize weights to HDF5
# # we can later read in weights and add them to our json model
# model.save_weights(f'{outdir}model_num.h5')
# print("\n\nsaved model weights to disk as HDF5!\n\n")
# commenting out for now
# I'm unclear about model.fit training vs training for history logging
# plot accuracy during training
# print('plotting loss/accuracy over training period \n\n')
# plot_history(history)
if __name__ == "__main__":
main()
|
{"hexsha": "b81487f948fe7bd1841243788aec568c20c9cb0b", "size": 6804, "ext": "py", "lang": "Python", "max_stars_repo_path": "classifier/2_build_train_model.py", "max_stars_repo_name": "justinkaseman/politicate-classifier", "max_stars_repo_head_hexsha": "706af6a604dc076ed6a1ca526159c14a7f0e16af", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-16T02:59:27.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-16T02:59:27.000Z", "max_issues_repo_path": "classifier/2_build_train_model.py", "max_issues_repo_name": "justinkaseman/politicate-classifier", "max_issues_repo_head_hexsha": "706af6a604dc076ed6a1ca526159c14a7f0e16af", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "classifier/2_build_train_model.py", "max_forks_repo_name": "justinkaseman/politicate-classifier", "max_forks_repo_head_hexsha": "706af6a604dc076ed6a1ca526159c14a7f0e16af", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0111731844, "max_line_length": 184, "alphanum_fraction": 0.6518224574, "include": true, "reason": "import numpy", "num_tokens": 1627}
|
using Documenter, ModelingToolkitStandardLibrary
using ModelingToolkitStandardLibrary.Blocks
using ModelingToolkitStandardLibrary.Mechanical
using ModelingToolkitStandardLibrary.Mechanical.Rotational
using ModelingToolkitStandardLibrary.Magnetic
using ModelingToolkitStandardLibrary.Magnetic.FluxTubes
using ModelingToolkitStandardLibrary.Electrical
using ModelingToolkitStandardLibrary.Thermal
makedocs(
sitename="ModelingToolkitStandardLibrary.jl",
authors="Julia Computing",
clean=true,
doctest=false,
modules=[ModelingToolkitStandardLibrary,
ModelingToolkitStandardLibrary.Blocks,
ModelingToolkitStandardLibrary.Mechanical,
ModelingToolkitStandardLibrary.Mechanical.Rotational,
ModelingToolkitStandardLibrary.Magnetic,
ModelingToolkitStandardLibrary.Magnetic.FluxTubes,
ModelingToolkitStandardLibrary.Electrical,
ModelingToolkitStandardLibrary.Thermal],
format=Documenter.HTML(assets=["assets/favicon.ico"],
canonical="https://mtkstdlib.sciml.ai/stable/"),
pages=[
"ModelingToolkitStandardLibrary.jl: A Standard Library for ModelingToolkit" => "index.md",
"Tutorials" => [
"RC Circuit" => "tutorials/rc_circuit.md"
],
"API" => [
"Basic Blocks" => "API/blocks.md",
"Electrical Components" => "API/electrical.md",
"Magnetic Components" => "API/magnetic.md",
"Mechanical Components" => "API/mechanical.md",
"Thermal Components" => "API/thermal.md"
],
]
)
deploydocs(
repo="github.com/SciML/ModelingToolkitStandardLibrary.jl";
push_preview=true
)
|
{"hexsha": "4bf65ef9872e916e37cb1dd448475a05a4bb4d85", "size": 1723, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "baggepinnen/ModelingToolkitStandardLibrary.jl", "max_stars_repo_head_hexsha": "f8bdbb9f91eadcf274c54dfcd5df94f189ffbd15", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 33, "max_stars_repo_stars_event_min_datetime": "2021-11-02T18:58:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T16:32:46.000Z", "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "baggepinnen/ModelingToolkitStandardLibrary.jl", "max_issues_repo_head_hexsha": "f8bdbb9f91eadcf274c54dfcd5df94f189ffbd15", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2021-11-04T20:24:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-27T14:52:18.000Z", "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "baggepinnen/ModelingToolkitStandardLibrary.jl", "max_forks_repo_head_hexsha": "f8bdbb9f91eadcf274c54dfcd5df94f189ffbd15", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-11-05T07:05:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T17:55:20.000Z", "avg_line_length": 35.8958333333, "max_line_length": 98, "alphanum_fraction": 0.7016831109, "num_tokens": 362}
|
module LBNumber
include("./../../constraints/geometric/constants.jl")
include("./1_if.jl")
include("./2_calc_number.jl")
using JuMP
using .Constants
using .IfSimpleLoadBearing
using .CalcNumberLoadBearing
export cons_lb_number_load_bearing
function cons_lb_number_load_bearing(m)
m = if_simple_lb(m)
m = calc_number_lb(m)
return m
end
end
|
{"hexsha": "da048cd1b4b40c5eb587892355fab439acb4bac8", "size": 356, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/constraints/load_bearing/simple/0_number_lb.jl", "max_stars_repo_name": "ToralfFrich/Master_Thesis", "max_stars_repo_head_hexsha": "5d4a51598f1677c2f5c219a88ca9ab4c9b6a5c6f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/constraints/load_bearing/simple/0_number_lb.jl", "max_issues_repo_name": "ToralfFrich/Master_Thesis", "max_issues_repo_head_hexsha": "5d4a51598f1677c2f5c219a88ca9ab4c9b6a5c6f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/constraints/load_bearing/simple/0_number_lb.jl", "max_forks_repo_name": "ToralfFrich/Master_Thesis", "max_forks_repo_head_hexsha": "5d4a51598f1677c2f5c219a88ca9ab4c9b6a5c6f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.9523809524, "max_line_length": 53, "alphanum_fraction": 0.7640449438, "num_tokens": 93}
|
#! /usr/bin/env python
"""
File: root_finder_examples.py
Copyright (c) 2016 Chinmai Raman
License: MIT
Course: PHYS227
Assignment: A.11
Date: Feb 24, 2016
Email: raman105@mail.chapman.edu
Name: Chinmai Raman
Description: Tests different methods for finding roots of a nonlinear function.
"""
import numpy as np
def Newton(f, fprime, x0, eps = 1e-7, N = 100):
"""
Uses Newton's method to estimate the root(s) of a function
"""
n = 1
x = np.zeros(N + 1)
x[0] = x0
while abs(f(x[n - 1])) > eps and n <= N:
if abs(fprime(float(x[n - 1]))) < 1e-14:
raise ValueError("Error. Diverges due to small value of denominator")
x[n] = x[n - 1] - f(float(x[n - 1])) / fprime(float(x[n - 1]))
n += 1
return x[:n]
def bisect(f, a, b, eps = 1e-3):
"""
Uses the bisection method to estimate the root(s) of a function
"""
f_a = f(a)
if f_a * f(b) > 0:
return None, 0
i = 0
m_list = []
while b - a > eps:
i += 1
m = (b + a) / float(2)
f_m = f(m)
if f_a * f_m <= 0:
b = m
else:
a = m
f_a = f_m
m_list.append(m)
return m_list
def secant(f, x0, x1, eps = 1e-7, N = 100):
"""
Uses the secant method to estimate the root(s) of a function
"""
n = 2
x = np.zeros(N + 1)
x[0] = x0
x[1] = x1
while abs(f(x[n - 1]) * (x[n - 1] - x[n - 2])) > eps and n <= N:
if abs((f(float(x[n-1])) - f(float(x[n - 2])))) < 1e-14:
raise ValueError("Error. Diverges due to small value of denominator")
x[n] = x[n - 1] - (f(x[n - 1]) * (x[n - 1] - x[n - 2])) / (f(float(x[n-1])) - f(float(x[n - 2])))
n += 1
return x[:n]
def graph(f, n, xmin, xmax, resolution = 100):
xpactual = np.linspace(xmin, xmax, resolution)
ypactual = f(xpactual)
plt.plot(xpactual, ypactual, 'r-')
plt.xlabel('x')
plt.ylabel('y')
plt.axis([xmin, xmax, -1.1, 1.1])
plt.title('f(x)')
def f1(x):
return np.sin(x)
def f1prime(x):
return np.cos(x)
def f2(x):
return x - np.sin(x)
def f2prime(x):
return 1- np.cos(x)
def f3(x):
return x**5 - np.sin(x)
def f3prime(x):
return 5 * x**4 - np.cos(x)
def f4(x):
return x**4 * np.sin(x)
def f4prime(x):
return 4 * x**3 * np.sin(x) + x**4 * np.cos(x)
def f5(x):
return x**4 - 16
def f5prime(x):
return 4 * x**3
def f6(x):
return x**10 - 1
def f6prime(x):
return 10 * x**9
def f7(x):
return np.tanh(x) - x**10
def f7prime(x):
return 1.0 / (np.cosh(x))**2 - 10 * x**9
def test_Newton():
def f(x):
return 1 - x**2
def fprime(x):
return -2 * x
assert(abs(f(Newton(f, fprime, -1)[-1]) - 0) < 1e-6), 'Failure'
def test_bisect():
def f(x):
return 1 - x**2
assert(abs(f(bisect(f, -2, 0)[0]) - 0) < 1e-6), 'Failure'
def test_secant():
def f(x):
return 1 - x**2
assert(abs(f(secant(f, 5, 3)[-1]) - 0) < 1e-4), 'Failure'
|
{"hexsha": "ff1253e0c4e3f8e7f6f23bc24e9fa0e6cd6fdb4c", "size": 3012, "ext": "py", "lang": "Python", "max_stars_repo_path": "root_finder_examples.py", "max_stars_repo_name": "chapman-phys227-2016s/hw-3-ChinmaiRaman", "max_stars_repo_head_hexsha": "3d3c2a688b656f518d9cef9a5d44ca9fd64a159e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "root_finder_examples.py", "max_issues_repo_name": "chapman-phys227-2016s/hw-3-ChinmaiRaman", "max_issues_repo_head_hexsha": "3d3c2a688b656f518d9cef9a5d44ca9fd64a159e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "root_finder_examples.py", "max_forks_repo_name": "chapman-phys227-2016s/hw-3-ChinmaiRaman", "max_forks_repo_head_hexsha": "3d3c2a688b656f518d9cef9a5d44ca9fd64a159e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.8260869565, "max_line_length": 105, "alphanum_fraction": 0.515936255, "include": true, "reason": "import numpy", "num_tokens": 1103}
|
program main
use class_string
implicit none
integer :: i,n
character, allocatable :: c(:)
character(:), allocatable :: s
type(string) :: str
type(string), allocatable :: w(:)
str = string('Foo')
s = str%get()
c = str%chars()
print *, len(s)
print *, size(c)
print *, str%uc()
print *, str%lc()
call str%put('Foo bar baz.')
w = str%words()
print *, size(w)
do i=1, size(w)
print *, w(i)%get()
end do
call str%put(0.0000038)
print *, str%get_real()
end program main
|
{"hexsha": "45851871860ef200ba495a121d5fa1f7ea0bc599", "size": 562, "ext": "f03", "lang": "FORTRAN", "max_stars_repo_path": "examples/example_string.f03", "max_stars_repo_name": "gemmarx/cbtrie_assoc", "max_stars_repo_head_hexsha": "7998718783ca42965fe8c4eaac6d292ce08bd87c", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2015-07-02T20:27:03.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-04T03:14:05.000Z", "max_issues_repo_path": "examples/example_string.f03", "max_issues_repo_name": "gemmarx/cbtrie_assoc", "max_issues_repo_head_hexsha": "7998718783ca42965fe8c4eaac6d292ce08bd87c", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/example_string.f03", "max_forks_repo_name": "gemmarx/cbtrie_assoc", "max_forks_repo_head_hexsha": "7998718783ca42965fe8c4eaac6d292ce08bd87c", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.1290322581, "max_line_length": 37, "alphanum_fraction": 0.5355871886, "num_tokens": 169}
|
#Karan Vombatkere
#German Enigma Machine
#October 2017
from string import *
import numpy as np
Letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#function to create a dictionary with letters and their indices
#Use this to return the index of a letter (a = 0, .., z = 25)
def genDictionary():
letter_index_pairs = []
for indx, char in enumerate(Letters):
letter_index_pairs.append([char, indx])
Indx_Dict = dict(letter_index_pairs)
print("Generated Letter Dictionary!")
return Indx_Dict
#Call the function to create a global dictionary
Char_Indices = genDictionary()
#Class to implement Plugboard
#Plugboard takes a string input (spaces, special characters removed) and Returns a list after swapping characters
#Implements both forward and reverse swaps of characters
class Plugboard:
'Class to implement plugboard for the German Enigma Machine'
Letters = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
initConfig = Letters
#Initialize Plugboard with a particular configuration
def __init__(self):
self.configList = list(Plugboard.initConfig)
#print("Plugboard Initialized (No Letters Swapped) - Please Set Key: \n", self.configList)
#function to swap a pair of characters in a list
#indx_list is a list with two values
def swapPair(self, indx_list, List1):
a = indx_list[0]
b = indx_list[1]
tmp = List1[a]
List1[a] = List1[b]
List1[b] = tmp
return List1
#set the plugboard key
#function to swap all the characters specified by a 2D list defining the swaps
def swapChars(self, indx_2Dlist):
for i, chars in enumerate(indx_2Dlist): #chars is a tuple with 2 letters
indx1 = Char_Indices[chars[0]]
indx2 = Char_Indices[chars[1]]
self.swapPair([indx1, indx2], self.configList)
#print("Plugboard characters", chars[0], "and", chars[1], "were successfully swapped")
return self.configList
#function to set the plugboard key
def setPBKey(self, swapList):
self.configList = list(Plugboard.initConfig) #reset plugboard before implementing swapping sequence
self.swapChars(swapList)
#self.displayPB() #Display the current key setting
#function to display the plugboard settings
def displayPB(self):
print("Displaying Current Plugboard Configuration with letter swaps:", self.configList)
#Takes a string/list input of characters to be swapped
#Returns an array as the output
def outputSwapped(self, charsX):
PBswapped_chars = []
for i, char in enumerate(charsX):
orig_indx = Char_Indices[char]
PBswapped_chars.append(self.configList[orig_indx])
#print("Message Output after plugboard swaps: ", PBswapped_chars)
return PBswapped_chars
def reverseSwapped(self, charsX):
PBreverseSwapped = ""
for i, char in enumerate(charsX):
pb_indx = getIndex(char, self.configList)
PBreverseSwapped += Letters[pb_indx]
#print("Output after plugboard Reverse swaps: ", PBreverseSwapped)
return PBreverseSwapped
|
{"hexsha": "055b16e0cd30f2da98612b1b9ada06dbb6b85b9f", "size": 3311, "ext": "py", "lang": "Python", "max_stars_repo_path": "Plugboard.py", "max_stars_repo_name": "kvombatkere/Enigma-Machine", "max_stars_repo_head_hexsha": "b7a6e199a8e5ec600771f4740943fa83446f7dcd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Plugboard.py", "max_issues_repo_name": "kvombatkere/Enigma-Machine", "max_issues_repo_head_hexsha": "b7a6e199a8e5ec600771f4740943fa83446f7dcd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Plugboard.py", "max_forks_repo_name": "kvombatkere/Enigma-Machine", "max_forks_repo_head_hexsha": "b7a6e199a8e5ec600771f4740943fa83446f7dcd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6021505376, "max_line_length": 119, "alphanum_fraction": 0.6575052854, "include": true, "reason": "import numpy", "num_tokens": 805}
|
import numpy as n
import cryoops as cops
import geom
class FixedPlanarDomain():
def __init__(self,pts,res):
self.pts = pts
self.resolution = res
self.sqdist_mat = self.get_sqdist_mat(self)
self.dim = self.pts.shape[1]
if pts.shape[0] == 1:
self.pt_resolution = res
else:
sqdist = n.copy(self.get_sqdist_mat())
n.fill_diagonal(sqdist, n.inf)
self.pt_resolution = n.sqrt(sqdist.min(axis=1))
def __len__(self):
return self.pts.shape[0]
def __eq__(self,other):
return type(self) == type(other) and len(self) == len(other) \
and self.dim == other.dim \
and self.resolution == other.resolution \
and n.all(self.pts == other.pts)
def __ne__(self,other):
return not self.__eq__(other)
def get_pts(self,inds = None):
if inds is None:
return self.pts
else:
return self.pts[inds,:]
def get_pt_resolution(self,inds = None):
if len(self) == 1:
return self.resolution
else:
if inds is None:
return self.pt_resolution
else:
return self.pt_resolution[inds]
def get_sqdist_mat(self,other = None,curr_inds = None, other_inds = None):
if other is None:
ret = self.sqdist_mat
if curr_inds is not None:
ret = ret[curr_inds,:]
if other_inds is not None:
ret = ret[:,other_inds]
return ret
else:
self_pts = self.get_pts(curr_inds)
other_pts = other.get_pts(other_inds)
D = self_pts.shape[1]
err = self_pts.reshape((-1,1,D)) - other_pts.reshape((1,-1,D))
return n.sum(err**2,axis=2)
def compute_operator(self,interp_params,inds=None):
pts = self.get_pts(inds)
return cops.compute_shift_phases(pts,interp_params['N'],interp_params['rad'])
class FixedDirectionalDomain():
def __init__(self,dirs,res):
self.dirs = dirs
self.resolution = res
self.dim = self.dirs.shape[1] - 1
def __len__(self):
return self.dirs.shape[0]
def __eq__(self,other):
return type(self) == type(other) and len(self) == len(other) \
and self.dim == other.dim \
and self.resolution == other.resolution \
and n.all(self.dirs == other.dirs)
def __ne__(self,other):
return not self.__eq__(other)
def get_dirs(self,inds = None):
if inds is None:
return self.dirs
else:
return self.dirs[inds,:]
class FixedSphereDomain(FixedDirectionalDomain):
def __init__(self,dirs,res,sym=None):
FixedDirectionalDomain.__init__(self,dirs,res)
self.sym = sym
def compute_operator(self,interp_params,inds=None):
if inds is None:
dirs = self.dirs
else:
dirs = self.dirs[inds]
return cops.compute_projection_matrix(dirs,sym=self.sym,**interp_params)
def get_symmetry_order(self):
if self.sym is None:
return 1
else:
return self.sym.get_order()
class FixedCircleDomain(FixedDirectionalDomain):
def __init__(self,theta,res):
FixedDirectionalDomain.__init__(self,
n.array([n.cos(theta),
n.sin(theta.ravel())]).T,
res)
self.theta = theta
def compute_operator(self,interp_params,inds=None):
if inds is None:
theta = self.theta
else:
theta = self.theta[inds]
N = interp_params['N']
kern = interp_params['kern']
kernsize = interp_params['kernsize']
rad = interp_params['rad']
zeropad = interp_params.get('zeropad',0)
N_src = N if zeropad == 0 else N + 2*int(zeropad*(N/2))
return cops.compute_inplanerot_matrix(theta,N,kern,kernsize,rad,N_src, onlyRs = interp_params.get('onlyRs', False))
class FixedSO3Domain():
def __init__(self,dirs,thetas,res,sym=None):
self.dirs = dirs
self.thetas = thetas
self.resolution = res
self.sym = sym
def __len__(self):
return self.dirs.shape[0] * len(self.thetas)
def compute_operator(self,interp_params,inds=None):
if inds is None:
Rs = n.array([[geom.rotmat3D_dir(d,t)[:,0:2] for t in self.thetas] for d in self.dirs])
Rs = Rs.reshape((-1,3,2))
else:
N_I = len(self.thetas)
Rs = n.array([geom.rotmat3D_dir(self.dirs[i/N_I],self.thetas[n.mod(i,N_I)])[:,0:2] for i in inds])
return cops.compute_projection_matrix(Rs,sym=self.sym,projdirtype='rots',**interp_params)
def get_symmetry_order(self):
if self.sym is None:
return 1
else:
return self.sym.get_order()
|
{"hexsha": "e768d917f8748d20022bfeb161f125243f57d695", "size": 5062, "ext": "py", "lang": "Python", "max_stars_repo_path": "quadrature/domain.py", "max_stars_repo_name": "mbrubake/cryoem-cvpr2015", "max_stars_repo_head_hexsha": "ea0eda3b663364b3b4c7d989bdecfc5263ef3102", "max_stars_repo_licenses": ["Python-2.0", "OLDAP-2.7", "OLDAP-2.8"], "max_stars_count": 33, "max_stars_repo_stars_event_min_datetime": "2015-11-14T14:56:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-03T01:57:55.000Z", "max_issues_repo_path": "quadrature/domain.py", "max_issues_repo_name": "mbrubake/cryoem-cvpr2015", "max_issues_repo_head_hexsha": "ea0eda3b663364b3b4c7d989bdecfc5263ef3102", "max_issues_repo_licenses": ["Python-2.0", "OLDAP-2.7", "OLDAP-2.8"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2015-10-14T13:12:24.000Z", "max_issues_repo_issues_event_max_datetime": "2016-10-11T14:13:00.000Z", "max_forks_repo_path": "quadrature/domain.py", "max_forks_repo_name": "mbrubake/cryoem-cvpr2015", "max_forks_repo_head_hexsha": "ea0eda3b663364b3b4c7d989bdecfc5263ef3102", "max_forks_repo_licenses": ["Python-2.0", "OLDAP-2.7", "OLDAP-2.8"], "max_forks_count": 22, "max_forks_repo_forks_event_min_datetime": "2016-03-14T01:23:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-08T04:34:13.000Z", "avg_line_length": 32.0379746835, "max_line_length": 123, "alphanum_fraction": 0.5645989727, "include": true, "reason": "import numpy", "num_tokens": 1216}
|
#test
import numpy as np
import sympy as sm
wo = sm.Symbol('wo',real=True) #magentic dipole frequency
print(wo)
def decode1(n,Nb_floquet_blocks, No_subspaces = 0):
# n = alpha+ (n1+2)*3+(n2+Nb_floquet_blocks)*(15)
Nb_atomic_states = 3
tot_size = Nb_atomic_states*(2*Nb_floquet_blocks+1)*(2*Nb_floquet_blocks+1)
n-=No_subspaces*tot_size
alpha = n%Nb_atomic_states
N = n//Nb_atomic_states
n2 = N//(2*Nb_floquet_blocks+1)-Nb_floquet_blocks
n1 = N%(2*Nb_floquet_blocks+1)-Nb_floquet_blocks
return alpha, n1, n2
def numberToBase(n, b, Nb_floquet_blocks, nb_drive):
if n == 0:
return [-Nb_floquet_blocks]*nb_drive
digits = []
while n:
digits.append(int(n % b))
n //= b
digits = np.pad(digits, (0, nb_drive-len(digits)), 'constant')
#print(digits)
digits-=np.array([Nb_floquet_blocks]*nb_drive)
return digits
def decode2(n, nb_drive, Nb_floquet_blocks):
# n = alpha+ (n1+2)*3+(n2+Nb_floquet_blocks)*(15)
# tot_size = 3*(2*Nb_floquet_blocks+1)*(2*Nb_floquet_blocks+1)
Nb_atomic_states = 3
alpha = n%Nb_atomic_states
decoded = [alpha]
N = n//Nb_atomic_states
N_list = numberToBase(N,2*Nb_floquet_blocks+1, Nb_floquet_blocks, nb_drive)
#print(N, N_list)
#Works for Nb_floquet_blocks <= 16
return alpha, N_list
print(decode1(10, 5))
print(decode2(10, 2, 5))
|
{"hexsha": "e155c75eca2eb1452a8fa2b918a2de7481a80cda", "size": 1339, "ext": "py", "lang": "Python", "max_stars_repo_path": "comparison_FL_QT/Scripts/test.py", "max_stars_repo_name": "Anthony-Gandon/Floquet_theory", "max_stars_repo_head_hexsha": "c25917986d83974850ecff60f388632087b8b52f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "comparison_FL_QT/Scripts/test.py", "max_issues_repo_name": "Anthony-Gandon/Floquet_theory", "max_issues_repo_head_hexsha": "c25917986d83974850ecff60f388632087b8b52f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "comparison_FL_QT/Scripts/test.py", "max_forks_repo_name": "Anthony-Gandon/Floquet_theory", "max_forks_repo_head_hexsha": "c25917986d83974850ecff60f388632087b8b52f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4318181818, "max_line_length": 78, "alphanum_fraction": 0.6975354742, "include": true, "reason": "import numpy,import sympy", "num_tokens": 470}
|
[STATEMENT]
lemma rank_of_eq_card_basis_in:
assumes "basis_in \<E> B"
shows "rank_of \<E> = card B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rank_of \<E> = card B
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. rank_of \<E> = card B
[PROOF STEP]
have "{card B | B. basis_in \<E> B} = {card B}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {card B |B. basis_in \<E> B} = {card B}
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
basis_in \<E> B
goal (1 subgoal):
1. {card B |B. basis_in \<E> B} = {card B}
[PROOF STEP]
by safe (auto dest: basis_in_card[OF *])
[PROOF STATE]
proof (state)
this:
{card B |B. basis_in \<E> B} = {card B}
goal (1 subgoal):
1. rank_of \<E> = card B
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
{card B |B. basis_in \<E> B} = {card B}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
{card B |B. basis_in \<E> B} = {card B}
goal (1 subgoal):
1. rank_of \<E> = card B
[PROOF STEP]
unfolding rank_of_def
[PROOF STATE]
proof (prove)
using this:
{card B |B. basis_in \<E> B} = {card B}
goal (1 subgoal):
1. Min {card B |B. basis_in \<E> B} = card B
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
rank_of \<E> = card B
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 601, "file": "Matroids_Matroid", "length": 9}
|
# Copyright (c) Open-MMLab. All rights reserved.
import os
import os.path as osp
import tempfile
import mmcv
import numpy as np
import pytest
import torch
from mmdet.core import visualization as vis
def test_color():
assert vis.color_val_matplotlib(mmcv.Color.blue) == (0., 0., 1.)
assert vis.color_val_matplotlib('green') == (0., 1., 0.)
assert vis.color_val_matplotlib((1, 2, 3)) == (3 / 255, 2 / 255, 1 / 255)
assert vis.color_val_matplotlib(100) == (100 / 255, 100 / 255, 100 / 255)
assert vis.color_val_matplotlib(np.zeros(3, dtype=np.int)) == (0., 0., 0.)
# forbid white color
with pytest.raises(TypeError):
vis.color_val_matplotlib([255, 255, 255])
# forbid float
with pytest.raises(TypeError):
vis.color_val_matplotlib(1.0)
# overflowed
with pytest.raises(AssertionError):
vis.color_val_matplotlib((0, 0, 500))
def test_imshow_det_bboxes():
tmp_filename = osp.join(tempfile.gettempdir(), 'det_bboxes_image',
'image.jpg')
image = np.ones((10, 10, 3), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
out_image = vis.imshow_det_bboxes(
image, bbox, label, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
assert image.shape == out_image.shape
assert not np.allclose(image, out_image)
os.remove(tmp_filename)
# test grayscale images
image = np.ones((10, 10), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
out_image = vis.imshow_det_bboxes(
image, bbox, label, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
assert image.shape == out_image.shape[:2]
os.remove(tmp_filename)
# test shaped (0,)
image = np.ones((10, 10, 3), np.uint8)
bbox = np.ones((0, 4))
label = np.ones((0, ))
vis.imshow_det_bboxes(
image, bbox, label, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test mask
image = np.ones((10, 10, 3), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
segms = np.random.random((2, 10, 10)) > 0.5
segms = np.array(segms, np.int32)
vis.imshow_det_bboxes(
image, bbox, label, segms, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test tensor mask type error
with pytest.raises(AttributeError):
segms = torch.tensor(segms)
vis.imshow_det_bboxes(image, bbox, label, segms, show=False)
def test_imshow_gt_det_bboxes():
tmp_filename = osp.join(tempfile.gettempdir(), 'det_bboxes_image',
'image.jpg')
image = np.ones((10, 10, 3), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
annotation = dict(gt_bboxes=bbox, gt_labels=label)
det_result = np.array([[2, 1, 3, 3, 0], [3, 4, 6, 6, 1]])
result = [det_result]
out_image = vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
assert image.shape == out_image.shape
assert not np.allclose(image, out_image)
os.remove(tmp_filename)
# test grayscale images
image = np.ones((10, 10), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
annotation = dict(gt_bboxes=bbox, gt_labels=label)
det_result = np.array([[2, 1, 3, 3, 0], [3, 4, 6, 6, 1]])
result = [det_result]
vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test numpy mask
gt_mask = np.ones((2, 10, 10))
annotation['gt_masks'] = gt_mask
vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test tensor mask
gt_mask = torch.ones((2, 10, 10))
annotation['gt_masks'] = gt_mask
vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test unsupported type
annotation['gt_masks'] = []
with pytest.raises(TypeError):
vis.imshow_gt_det_bboxes(image, annotation, result, show=False)
|
{"hexsha": "9c7969b44ee5b4ee862c09b63f122db14a534b32", "size": 4431, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_utils/test_visualization.py", "max_stars_repo_name": "evgps/mmdetection_trashcan", "max_stars_repo_head_hexsha": "aaf4237c2c0d473425cdc7b741d3009177b79751", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 367, "max_stars_repo_stars_event_min_datetime": "2022-01-14T03:32:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T04:48:20.000Z", "max_issues_repo_path": "tests/test_utils/test_visualization.py", "max_issues_repo_name": "evgps/mmdetection_trashcan", "max_issues_repo_head_hexsha": "aaf4237c2c0d473425cdc7b741d3009177b79751", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 170, "max_issues_repo_issues_event_min_datetime": "2020-09-08T12:29:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T18:28:09.000Z", "max_forks_repo_path": "tests/test_utils/test_visualization.py", "max_forks_repo_name": "evgps/mmdetection_trashcan", "max_forks_repo_head_hexsha": "aaf4237c2c0d473425cdc7b741d3009177b79751", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 61, "max_forks_repo_forks_event_min_datetime": "2021-07-30T07:51:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T14:40:02.000Z", "avg_line_length": 34.6171875, "max_line_length": 78, "alphanum_fraction": 0.6404874746, "include": true, "reason": "import numpy", "num_tokens": 1312}
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Friggeri Resume/CV
% XeLaTeX Template
% Version 1.2 (3/5/15)
%
% This template has been downloaded from:
% http://www.LaTeXTemplates.com
%
% Original author:
% Adrien Friggeri (adrien@friggeri.net)
% https://github.com/afriggeri/CV
%
% License:
% CC BY-NC-SA 3.0 (http://creativecommons.org/licenses/by-nc-sa/3.0/)
%
% Important notes:
% This template needs to be compiled with XeLaTeX and the bibliography, if used,
% needs to be compiled with biber rather than bibtex.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\documentclass[]{friggeri-cv} % Add 'print' as an option into the square bracket to remove colors from this template for printing
%\addbibresource{bibliography.bib} % Specify the bibliography file to include publications
\begin{document}
\header{Chris}{Ridgers}{Systems \& Software Engineer} % Your name and current job title/field
%----------------------------------------------------------------------------------------
% SIDEBAR SECTION
%----------------------------------------------------------------------------------------
\begin{aside} % In the aside, each new line forces a line break
\section{contact}
23 Harefields Way
The Wirral
CH49 4SB
United Kingdom
~
07921573590
~
\href{mailto:chris@digiridge.co.uk}{chris@digiridge.co.uk}
\href{https://github.com/chrisRidgers}{gh://chrisRidgers}
\href{https://chrisridgers.github.io}{https://chrisridgers.github.io}
%\href{https://www.facebook.com/chrisridgers}{fb://cridgers}
\href{http://twitter.com/ffsmonty}{tw://ffsmonty}
\section{programming}
{\large \color{red} $\varheartsuit$} Git, PHP
{\color{red} $\varheartsuit$} Bash, Ansible, Docker
C++
\end{aside}
%----------------------------------------------------------------------------------------
% WORK EXPERIENCE SECTION
%----------------------------------------------------------------------------------------
\section{experience}
\subsection{Full Time}
\begin{entrylist}
%------------------------------------------------
\entry
{2014--2020}
{Mashbo}
{Liverpool, UK}
{\emph{Software Engineer} \\
Developed bespoke applications across multiple platforms/ software stacks.
Maintained and secured production environments on physical and cloud based infrastructure.
Developed and iterated internal work processes as part of an ongoing improvement process.\\
Work duties:
\begin{itemize}
\item Feature development in Symfony Web applications
\begin{itemize}
\item Traditional server applications
\item API development
\item Event driven architecture
\item CLI tools
\end{itemize}
\item WordPress builds across a multitude of stacks -- currently using Sage 9/ Bedrock within a Dockerized build/ deployment system
\item Worked closely with front end developers to facilitate project work and wider platform capabilities. Particularly with newer WordPress functionality in the post Gutenberg era
\begin{itemize}
\item Project inheritance / rescue work
\item Private portal applications
\item WooCommerce stores
\item Systems maintenance:
\begin{itemize}
\item Security patching
\item Automated provisioning of development and production environments
\item Repeatable infrastructure as code: Ansible
\end{itemize}
\end{itemize}
\end{itemize}}
%------------------------------------------------
\end{entrylist}
\subsection{Part Time}
\begin{entrylist}
\entry
{2013--2014}
{Student Support and Development Services}
{Keele University, Keele UK}
{\emph{Resident Support Assistant} \\
Year long position working within student accommodation as a Resident
Support Assistant (RSA). Roles included: out of hours contact regarding
welfare issues and noise complaints, follow up welfare contact, promoting
friendly living environments among students, arranging regular social activities
and representing the support team at events. Specialist training included:
suicide and self harm awareness, first aid and sexual health awareness.}
%------------------------------------------------
\entry
{2013}
{Keele University w/ JANET}
{Keele University, Keele UK}
{\emph{On-site Conference Assistant} \\
Aided JANET in hosting Networkshop41 at Keele University. Roles included:
directing and assisting visiting conference delegates around the conference
area, promoting the event through social media, assisting with technical
equipment and ensuring arrivals and departures occurred smoothly for delegates
and vendors.}
{\emph{Personal highlights included witnessing a live joint performance by musicians
in both Keele (local) and Scotland (remote) via the JANET network and Low
Latency Audio Visual (LOLA).}}
%------------------------------------------------
\end{entrylist}
%----------------------------------------------------------------------------------------
% EDUCATION SECTION
%----------------------------------------------------------------------------------------
\section{education}
\begin{entrylist}
%------------------------------------------------
\entry
{2011--2014}
{Bachelor of Science {\normalfont Creative Computing \& Music Technology - 2.1}}
{Keele University, Keele, UK}
{\emph{SILO - Sound in Landscape Out} \\ Final year project was an implementation of the Fourier transform to produce 3D terrain objects from music input.} \\
{\emph{Course content:} \\ Computer animation, web design, 3D modelling and animation in Blender, games programming with ODE and OGRE, software requirements engineering, design and evaluation, history of sonic arts, sound recording and mixing with Logic Pro, film soundtrack evolution, visual-audio editing, time/ frequency domain digital signal processing, MAX/MSP application creation /sound synthesis, and music programming using C. Semester abroad studying in Concordia University, Montreal Canada.}
%------------------------------------------------
\entry
{2008--2010}
{BTECH National Diploma {\normalfont Media Production (Games Development)}}
{West Cheshire College, Ellesmere Port, UK}
{Study of the computer games industry, 3D Modelling and Animation (3DS
Max), 2D image and texture creation and game design} \\
{\emph{Course content:} \\
History and overview of the computer games industry, 3D modelling and animation
in 3DSMax, image creation/ manipulation in Adobe Photoshop and
games design and production. Awarded D,D,D.}
%------------------------------------------------
\end{entrylist}
%----------------------------------------------------------------------------------------
% AWARDS SECTION
%----------------------------------------------------------------------------------------
\section{awards}
\begin{entrylist}
%------------------------------------------------
\entry
{2019}
{Hub of Hope Ambassador}
{Mashbo, w/ Hub of Hope}
{As part of an ongoing effort to improve ourselves and the industry, Mashbo
partnered with Hub of Hope to deliver training aimed at de-stigmatising talking
about mental health issues and improve our personal abilities to recognise and
respond to such issues in our coworkers.}
%------------------------------------------------
\end{entrylist}
%----------------------------------------------------------------------------------------
% COMMUNICATION SKILLS SECTION
%----------------------------------------------------------------------------------------
%\section{communication skills}
%
%\begin{entrylist}
%
%%------------------------------------------------
%
%\entry
%{2011}
%{Oral Presentation}
%{California Business Conference}
%{Presented the research I conducted for my Masters of Commerce degree.}
%
%%------------------------------------------------
%
%\entry
%{2010}
%{Poster}
%{Annual Business Conference, Oregon}
%{As part of the course work for BUS320, I created a poster analyzing several local businesses and presented this at a conference.}
%
%%------------------------------------------------
%
%\end{entrylist}
%----------------------------------------------------------------------------------------
% EXTRA CURRICULAR SECTION
%----------------------------------------------------------------------------------------
\section{Event Attendance}
\begin{entrylist}
%------------------------------------------------
\entry
{(Covid Permitting) 2021}
{SymfonyCon Disneyland Paris 2021}
{Paris, France}
{Will be attending SymfonyCon to pursue new knowledge relating to the Symfony framework and ecosystem.}
%------------------------------------------------
\entry
{2019}
{SymfonyLive London 2019}
{London, UK}
{Attended SymfonyCon workshops and conference to continue to learn about our favourite framework, and improve my own skills as part of event.}
%------------------------------------------------
\entry
{2019}
{Lead Dev Berlin 2019}
{Berlin, Germany}
{Attended Lead Dev Berlin conference to pursue knowledge relating to development habits and communication skils. Seeking a broader world view to help recongise exactly where value is delivered to the organisation.}
%------------------------------------------------
\end{entrylist}
%----------------------------------------------------------------------------------------
% INTERESTS SECTION
%----------------------------------------------------------------------------------------
\section{interests}
\textbf{professional:} games design and development, operating systems, graphics, system architecture, sound design, recording and synthesis, code refactoring\\
\textbf{personal:} guitar, hiking, video games, film and quality television, World of Warcraft, history, sociology and languages, reading academic texts and sci-fi/ fantasy literature
%----------------------------------------------------------------------------------------
% PUBLICATIONS SECTION
%----------------------------------------------------------------------------------------
%\section{publications}
%
%\printbibsection{article}{article in peer-reviewed journal} % Print all articles from the bibliography
%
%\printbibsection{book}{books} % Print all books from the bibliography
%
%\begin{refsection} % This is a custom heading for those references marked as "inproceedings" but not containing "keyword=france"
%\nocite{*}
%\printbibliography[sorting=chronological, type=inproceedings, title={international peer-reviewed conferences/proceedings}, notkeyword={france}, heading=bibheading]
%\end{refsection}
%
%\begin{refsection} % This is a custom heading for those references marked as "inproceedings" and containing "keyword=france"
%\nocite{*}
%\printbibliography[sorting=chronological, type=inproceedings, title={local peer-reviewed conferences/proceedings}, keyword={france}, heading=bibheading]
%\end{refsection}
%
%\printbibsection{misc}{other publications} % Print all miscellaneous entries from the bibliography
%
%\printbibsection{report}{research reports} % Print all research reports from the bibliography
%----------------------------------------------------------------------------------------
\end{document}
|
{"hexsha": "ecc5cd449ff5274da9b8807aa360117e7b1c5c70", "size": 10880, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "src/cv.tex", "max_stars_repo_name": "chrisRidgers/cv", "max_stars_repo_head_hexsha": "4155ad63273526b1a1c1945a17e05a259954c6fe", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-09T11:56:00.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-09T11:56:00.000Z", "max_issues_repo_path": "src/cv.tex", "max_issues_repo_name": "chrisRidgers/cv", "max_issues_repo_head_hexsha": "4155ad63273526b1a1c1945a17e05a259954c6fe", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/cv.tex", "max_forks_repo_name": "chrisRidgers/cv", "max_forks_repo_head_hexsha": "4155ad63273526b1a1c1945a17e05a259954c6fe", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3879598662, "max_line_length": 503, "alphanum_fraction": 0.6105698529, "num_tokens": 2178}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.