text
stringlengths
0
1.25M
meta
stringlengths
47
1.89k
import numpy from pylab import * from scipy.interpolate import interp1d d1,g1,e1,ee1,f1,ef1,s=numpy.loadtxt("PEC_combined_results.txt",unpack=True,skiprows=1) f1=-f1*31.6e-15 inds=argsort(d1) d1=d1[inds] f1=f1[inds] g1=g1[inds] s=s[inds] inds=numpy.where(s == 0) d1=d1[inds] f1=f1[inds] g1=g1[inds] d1t,g1t,e1t,ee1t,f1t,ef1t,st=numpy.loadtxt("PEC_combined_results_temp.txt",unpack=True,skiprows=1) f1t=-f1t*31.6e-15 inds=argsort(d1t) d1t=d1t[inds] f1t=f1t[inds] g1t=g1t[inds] st=st[inds] inds=numpy.where(st == 0) d1t=d1t[inds] f1t=f1t[inds] g1t=g1t[inds] d2,g2,e2,ee2,f2,ef2,s2=numpy.loadtxt("combined_results.txt",unpack=True,skiprows=1) f2=-f2*31.6e-15 inds=argsort(d2) d2=d2[inds] f2=f2[inds] g2=g2[inds] s2=s2[inds] inds=numpy.where(s2 == 0) d2=d2[inds] f2=f2[inds] g2=g2[inds] d2t,g2t,e2t,ee2t,f2t,ef2t,s2t=numpy.loadtxt("combined_results_temp.txt",unpack=True,skiprows=1) f2t=-f2t*31.6e-15 inds=argsort(d2t) d2t=d2t[inds] f2t=f2t[inds] g2t=g2t[inds] s2t=s2t[inds] inds=numpy.where(s2t == 0) d2t=d2t[inds] f2t=f2t[inds] g2t=g2t[inds] d3,e3,ee3,f3,ef3=numpy.loadtxt("../Comparison/full.txt",unpack=True) f3=-f3*31.6e-15 inds=argsort(d3) d3=d3[inds] f3=f3[inds] d4,e4,ee4,f4,ef4=numpy.loadtxt("../Comparison/PEC.txt",unpack=True) f4=-f4*31.6e-15 inds=argsort(d4) d4=d4[inds] f4=f4[inds] print(f1) print(f2) datafile="../../Mathematica/calculated_vals.tsv" PFA_datafile="../../Mathematica/calculated_pfa_vals.tsv" dist,fpfa,fnaive,fright,ftemp=numpy.loadtxt(PFA_datafile,unpack=True) dist=dist*1e6 figure(figsize=(12,8)) gs=numpy.min(g1) #for i in range(0,len(gs)): inds = numpy.where(g1 == gs) plot(d1[inds],f1[inds],'--',label="PEC, grid="+str(gs),color="black") inds = numpy.where(g1 == 0.4) #plot(d1[inds],f1[inds],'-.',label="PEC, grid="+str(0.4),color="black") gst=numpy.min(g1t) inds = numpy.where(g1t == gst) plot(d1t[inds],f1t[inds],'-.',label="PEC 300K, grid="+str(gst),color="black") inds = numpy.where(g1t == 0.4) #plot(d1t[inds],f1t[inds],'-.',label="PEC 300K, grid="+str(0.4),color="orange") gs=numpy.min(g2) inds = numpy.where(g2 == gs) plot(d2[inds],f2[inds],'--',label="FEC, grid="+str(gs),color="green") gs=numpy.min(g2t) inds = numpy.where(g2t == gs) plot(d2t[inds],f2t[inds],'-.',label="FEC 300K, grid="+str(gs),color="green") plot(d4,f4,':',label="PEC, Large Cantilever",color="black") plot(d3,f3,':',label="FEC, Large Cantilever",color="green") plot(dist,fpfa,label="PFA",linestyle='-',color="black") plot(dist,fright,label="SiO2/Au",linestyle='-',color="green") plot(dist,ftemp,label="SiO2/Au T=300",linestyle='-',color="red") xlim(0.1,30) xscale('log') yscale('log') xlabel('Distance (microns)') ylabel('Force (N)') title('Analytical (Dashed) v Numerical (Solid) Calculations') legend(loc="lower left",ncol=2) savefig('analytic_v_numerical') #show() #data points computed (through similar method) for correction due to aspect ratio L/R from PFA (Canaguier-Durand 2012) cdx=[0,0.1,.2,0.4,0.6,0.8,1] cdy=[1.0,.98,.95,.86,.78,.72,.68] clf() iPFA = interp1d(dist,fpfa) gs=numpy.unique(g1) for i in range(0,len(gs)): inds = numpy.where(g1 == gs[i]) rPFA=f1[inds]/iPFA(d1[inds]) plot(d1[inds]/2.5,rPFA,label="PFA, grid="+str(gs[i])) plot(cdx,cdy,label="Canaguieier-Durand",linestyle=':',color="black") #xscale('log') xlim(0,3) xlabel('Distance/Radius') ylabel('(PFA/BEM) Force Ratio') title('Comparion between Calculations, grid=1 micron') legend() #show() savefig("pfa_v_pec.png") clf() inds=argsort(g1) d1=d1[inds] f1=f1[inds] g1=g1[inds] ds=numpy.unique(d1) for i in range(0,len(ds)): inds=numpy.where(d1 == ds[i]) plot(g1[inds],f1[inds]/f1[inds[0][0]],'--',label=str(ds[i]),alpha=.9) plot([0.1,1.2],[1,1],linestyle=':',color='black') ylim(0.2,1.1) xlim(0.3,1) xscale('log') xlabel('Grid Scale Length') ylabel('Force/Force(smallest gridding)') title("Convergence in Grid Spacing") legend(loc='lower left',title="Separation") savefig("pfa_convergence.png") clf() inds=argsort(g1) d1=d1[inds] f1=f1[inds] g1=g1[inds] ds=numpy.unique(d1) for i in range(0,len(ds)): inds=numpy.where(d1 == ds[i]) plot(g1[inds],f1[inds]/f1[inds[0][0]],'--',label=str(ds[i]),alpha=.9) plot([0.1,1.2],[1,1],linestyle=':',color='black') ylim(0.8,1.1) xlim(0.3,1) xscale('log') xlabel('Grid Scale Length') ylabel('Force/Force(smallest gridding)') title("Convergence in Grid Spacing") legend(loc='lower left',title="Separation") savefig("pfa_convergence_zoom.png")
{"hexsha": "08ef9fd7bcccb8393bb2fac967e6bd017f0d9d79", "size": 4407, "ext": "py", "lang": "Python", "max_stars_repo_path": "casimir/scuffCode/CubeSphere/compare.py", "max_stars_repo_name": "charlesblakemore/opt_lev_analysis", "max_stars_repo_head_hexsha": "704f174e9860907de349688ed82b5812bbb07c2d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "casimir/scuffCode/CubeSphere/compare.py", "max_issues_repo_name": "charlesblakemore/opt_lev_analysis", "max_issues_repo_head_hexsha": "704f174e9860907de349688ed82b5812bbb07c2d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "casimir/scuffCode/CubeSphere/compare.py", "max_forks_repo_name": "charlesblakemore/opt_lev_analysis", "max_forks_repo_head_hexsha": "704f174e9860907de349688ed82b5812bbb07c2d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-27T19:10:25.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-27T19:10:25.000Z", "avg_line_length": 26.2321428571, "max_line_length": 118, "alphanum_fraction": 0.6916269571, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1738}
"""Methods used for INtERAcT.""" import numpy as np import pandas as pd from collections import Counter from numpy.linalg import norm from scipy.stats import entropy from scipy.spatial.distance import pdist, squareform from .nn_tree import NeighborsMode def _nn_data_to_clusters_counts( nn_data, number_of_clusters, number_of_neighbors ): clusters_counts = np.zeros(number_of_clusters) counts = pd.Series(Counter(nn_data)) clusters_counts[counts.index] = counts.values return clusters_counts def jensen_shannon_divergence(p, q, normalize=True): """Compute Jensen-Shannon divergence given two pmfs.""" if normalize: p = p / norm(p, ord=1) q = q / norm(q, ord=1) m = 0.5 * (p + q) return 0.5 * (entropy(p, m) + entropy(q, m)) def _compute_divergence_matrix(words_clusters_distributions, n=None): if n is None: n = words_clusters_distributions.shape[0] divergence_matrix = np.zeros((n, n)) divergence = .0 for i in range(n): for j in range(i): divergence = jensen_shannon_divergence( words_clusters_distributions.values[i], words_clusters_distributions.values[j] ) divergence_matrix[i, j] = divergence divergence_matrix[j, i] = divergence return divergence_matrix def _divergence_matrix_to_table( divergence_matrix, proteins_list, n=None, interaction_symbol='<->' ): if n is None: n = divergence_matrix.shape[0] return pd.DataFrame( [ [ interaction_symbol.join(sorted( [proteins_list[i], proteins_list[j]] )), divergence_matrix[j][i] ] for i in range(n) for j in range(i) ], columns=['interaction', 'divergence'] ).set_index('interaction') def _divergence_table_to_interaction_df( divergence_table, interaction_symbol='<->', alpha=7.5, beta=0.0 ): e1s = {} e2s = {} intensity = {} divergence_table['intensity'] = ( np.exp(-alpha*divergence_table['divergence'] + beta) ) for index, value in zip( divergence_table.index, divergence_table['intensity'] ): interaction_table_index = index.upper() e1, e2 = interaction_table_index.split(interaction_symbol) e1s[interaction_table_index] = e1 e2s[interaction_table_index] = e2 intensity[interaction_table_index] = value return pd.DataFrame({ "e1": e1s, "e2": e2s, "intensity": intensity }) def _distance_matrix_to_table( distance_matrix, proteins_list, n=None, interaction_symbol='<->' ): if n is None: n = distance_matrix.shape[0] return pd.DataFrame( [ [ interaction_symbol.join(sorted( [proteins_list[i], proteins_list[j]] )), distance_matrix[j][i] ] for i in range(n) for j in range(i) ], columns=['interaction', 'distance'] ).set_index('interaction') def _distance_table_to_interaction_df( distance_table, interaction_symbol='<->' ): e1s = {} e2s = {} intensity = {} distance_table['intensity'] = 1 / distance_table['distance'] minimum, maximum = ( min(distance_table['intensity']), max(distance_table['intensity']) ) distance_table['intensity'] = ( distance_table['intensity'] - minimum ) / (maximum - minimum) for index, value in zip(distance_table.index, distance_table['intensity']): interaction_table_index = index.upper() e1, e2 = interaction_table_index.split(interaction_symbol) e1s[interaction_table_index] = e1 e2s[interaction_table_index] = e2 intensity[interaction_table_index] = value return pd.DataFrame({ "e1": e1s, "e2": e2s, "intensity": intensity }) def _interaction_df_to_edge_weight_list(interaction_table, threshold=0.0): """Convert from df to edge_list.""" edge_weight_list = [ tuple(sorted([row['e1'], row['e2']]) + [row['intensity']]) for idx, row in interaction_table.iterrows() if row['intensity'] > threshold ] return edge_weight_list def get_network_from_embedding_using_interact( word_list, embedding_df, nn_tree, number_of_clusters, number_of_neighbors=2000 ): """Return interaction dataframe using INtERAcT.""" vectors = embedding_df.loc[word_list] nn_data_list = nn_tree.kneighbors( X=vectors.values, k=number_of_neighbors, mode=NeighborsMode.CLUSTERS ) words_clusters_distributions = pd.DataFrame( np.array([ _nn_data_to_clusters_counts( nn_data, number_of_clusters, number_of_neighbors ) for nn_data in nn_data_list ]) / number_of_neighbors, index=word_list, columns=[ 'C{}'.format(i) for i in range(number_of_clusters) ] ) divergence_matrix = _compute_divergence_matrix( words_clusters_distributions ) divergence_table = _divergence_matrix_to_table( divergence_matrix, word_list ).sort_values(by='divergence') return _divergence_table_to_interaction_df(divergence_table) def get_network_from_embedding_using_distance_metric( word_list, embedding_df, metric='euclidean' ): """Return interaction dataframe using a distance.""" vectors = embedding_df.loc[word_list] distance = squareform(pdist(vectors.values, metric=metric)) distance_table = _distance_matrix_to_table( distance, word_list ).sort_values(by='distance') interaction_df = _distance_table_to_interaction_df( distance_table ).sort_values(by='intensity') return interaction_df
{"hexsha": "3f702f514b54dee5286b862aa91ab8d95349b883", "size": 5850, "ext": "py", "lang": "Python", "max_stars_repo_path": "interact/core.py", "max_stars_repo_name": "drugilsberg/interact", "max_stars_repo_head_hexsha": "39cf22d8b8b35b9a38e111d0b016331af0c3747a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2019-02-25T08:52:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T20:00:15.000Z", "max_issues_repo_path": "interact/core.py", "max_issues_repo_name": "drugilsberg/interact", "max_issues_repo_head_hexsha": "39cf22d8b8b35b9a38e111d0b016331af0c3747a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-04-28T12:30:37.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-30T10:58:23.000Z", "max_forks_repo_path": "interact/core.py", "max_forks_repo_name": "drugilsberg/interact", "max_forks_repo_head_hexsha": "39cf22d8b8b35b9a38e111d0b016331af0c3747a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2019-02-04T18:22:26.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-30T09:34:32.000Z", "avg_line_length": 30.9523809524, "max_line_length": 79, "alphanum_fraction": 0.6442735043, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 1355}
import argparse import json import networkx as nx import os parser = argparse.ArgumentParser() parser.add_argument('-i', help='Input json', required=True) parser.add_argument('-o', help='Output json', required=True) args = parser.parse_args() def main(): with open(args.i, 'r') as f: data = json.load(f) out_dir, file_name = os.path.split(args.o) # if directory does not exist, create it if not os.path.exists(out_dir): os.makedirs(out_dir) top_num_edges = get_top_num_edges(data) top_total_weight = get_top_total_weight(data) top_betweenness = get_top_betweenness(data) out_dict = {} out_dict["most_connected_by_num"] = top_num_edges out_dict["most_connected_by_weight"] = top_total_weight out_dict["most_central_by_betweenness"] = top_betweenness # print(out_dict) with open(args.o, 'w') as f: json.dump(out_dict, f, indent=4) return 0 def get_top_betweenness(data): G = build_graph(data) bet_list = list(nx.betweenness_centrality(G).items()) bet_list.sort(key=lambda x: -x[1]) top_betweenness = [] for i in range(3): top_betweenness.append(bet_list[i][0]) return top_betweenness def build_graph(data): G = nx.Graph() for u, sub_dict in data.items(): for v, weight in sub_dict.items(): # print(f'w: {weight}') if not G.has_edge(u,v): G.add_edge(u,v,weight=weight) return G # def extract_info(G): # info = {} # for n, nbrs in G.adj.items(): # nested_info = {} # for nbr, eattr in nbrs.items(): # wt = eattr['weight'] # nested_info[nbr] = wt # info[n] = nested_info # # return info def get_top_total_weight(data): info_dict = {} for key, sub_dict in data.items(): sum = 0 for _,weight in sub_dict.items(): sum += weight info_dict[key] = sum sorted_list = list(info_dict.items()) sorted_list.sort(key=lambda x: -x[1]) top_total_weight = [] for i in range(3): top_total_weight.append(sorted_list[i][0]) return top_total_weight def get_top_num_edges(data): info_dict = {} for key, sub_dict in data.items(): num_edges = len(sub_dict) info_dict[key] = num_edges sorted_list = list(info_dict.items()) sorted_list.sort(key= lambda x: -x[1]) top_num_edges = [] for i in range(3): top_num_edges.append(sorted_list[i][0]) return top_num_edges if __name__ == '__main__': main()
{"hexsha": "56838e6143b48544aefc1b4f334cbca74a1d03ff", "size": 2541, "ext": "py", "lang": "Python", "max_stars_repo_path": "assignments/hw9/260831147_submission_template/src/compute_network_stats.py", "max_stars_repo_name": "amalkoodoruth/DATASCIENCE-COMP598", "max_stars_repo_head_hexsha": "e2b212fe6adfdc670ec32ab91bb3535f6f72e472", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "assignments/hw9/260831147_submission_template/src/compute_network_stats.py", "max_issues_repo_name": "amalkoodoruth/DATASCIENCE-COMP598", "max_issues_repo_head_hexsha": "e2b212fe6adfdc670ec32ab91bb3535f6f72e472", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "assignments/hw9/260831147_submission_template/src/compute_network_stats.py", "max_forks_repo_name": "amalkoodoruth/DATASCIENCE-COMP598", "max_forks_repo_head_hexsha": "e2b212fe6adfdc670ec32ab91bb3535f6f72e472", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-04T23:12:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-04T23:12:02.000Z", "avg_line_length": 27.3225806452, "max_line_length": 61, "alphanum_fraction": 0.6312475403, "include": true, "reason": "import networkx", "num_tokens": 653}
#!/usr/bin/env python """ example of libtcod's SDL hook draws a simple white square. """ import tcod import tdl import numpy as np # generate a callback for libtcod @tcod.ffi.callback('SDL_renderer_t') def sdl_hook(surface): tcod.lib.SDL_UpperBlit(my_surface, tcod.ffi.NULL, surface, [{'x':0, 'y':0}]) pixels = np.zeros((100, 150, 4), dtype=np.uint8) my_surface = tcod.lib.SDL_CreateRGBSurfaceWithFormatFrom( tcod.ffi.cast('void*', pixels.ctypes.data), pixels.shape[1], pixels.shape[0], 32, pixels.strides[0], tcod.lib.SDL_PIXELFORMAT_RGBA32, ) if __name__ == '__main__': # hook callback to libtcod tcod.sys_register_SDL_renderer(sdl_hook) con = tdl.init(32, 32, renderer='SDL') # MUST BE SDL RENDERER pixels[:] = 255 tick = 0 while(True): tick += 1 for event in tdl.event.get(): if event.type == 'QUIT': raise SystemExit() tdl.flush() # will call sdl_hook
{"hexsha": "454ad642696b99a4a27b136f9140da77d362dfcb", "size": 971, "ext": "py", "lang": "Python", "max_stars_repo_path": "dev/sdl_hook_test.py", "max_stars_repo_name": "psizek/python-tcod", "max_stars_repo_head_hexsha": "32fcc42d5107c5b212978ea88df1a8f6f4743061", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dev/sdl_hook_test.py", "max_issues_repo_name": "psizek/python-tcod", "max_issues_repo_head_hexsha": "32fcc42d5107c5b212978ea88df1a8f6f4743061", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dev/sdl_hook_test.py", "max_forks_repo_name": "psizek/python-tcod", "max_forks_repo_head_hexsha": "32fcc42d5107c5b212978ea88df1a8f6f4743061", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.119047619, "max_line_length": 80, "alphanum_fraction": 0.6446961895, "include": true, "reason": "import numpy", "num_tokens": 283}
#define BOOST_TEST_MODULE "JosephusModule" #include <boost/test/unit_test.hpp> #include <boost/test/unit_test_parameters.hpp> #include "Josephus.h" #include <list> #include <vector> BOOST_AUTO_TEST_CASE(ERASE_VECTOR_AT_INDEX) { boost::unit_test::unit_test_log.set_threshold_level(boost::unit_test::log_all_errors); BOOST_TEST_MESSAGE("Test the erasing vector at index of"); std::vector<int> vec1{ 1,2,3,4,5,6 }; eraseVectorAtIndex(vec1, 2); // removes the 3 at index 2 BOOST_CHECK(vec1.at(0) == 1); BOOST_CHECK(vec1.at(1) == 2); BOOST_CHECK(vec1.at(2) == 4); BOOST_CHECK(vec1.at(3) == 5); BOOST_CHECK(vec1.at(4) == 6); } BOOST_AUTO_TEST_CASE( JOSEPHUS_TEST_N5K2 ) { boost::unit_test::unit_test_log.set_threshold_level(boost::unit_test::log_all_errors); int N = 5; int K = 2; BOOST_TEST_MESSAGE("Test the Josephus Algorithm with N=" << N << " and K= " << K << " !!!"); std::vector<int> evadedVectorList; int remainder = Josephus(N, K, evadedVectorList); /// 1 2 3 4 5 => 1 3 4 5 => 1 3 5 (1. durchlauf vorbei) => 3 5 => 3 // sol : 2 4 1 5 , rem=3 // math.sol: remainder = 1001 => 00011 = 3 BOOST_CHECK(evadedVectorList.at(0) == 2); BOOST_CHECK(evadedVectorList.at(1) == 4); BOOST_CHECK(evadedVectorList.at(2) == 1); BOOST_CHECK(evadedVectorList.at(3) == 5); BOOST_CHECK(remainder == 3); } BOOST_AUTO_TEST_CASE(JOSEPHUS_TEST_N5K11) { boost::unit_test::unit_test_log.set_threshold_level(boost::unit_test::log_all_errors); int N = 5; int K = 11; BOOST_TEST_MESSAGE("Test the Josephus Algorithm with N=" << N << " and K= "<< K <<" !!!"); std::vector<int> evadedVectorList; int remainder = Josephus(N, K, evadedVectorList); // sol : 1 4 2 3 rem 5 // math.sol: remainder = 5^1 + 5^0 = 5 rem = 5 BOOST_CHECK(evadedVectorList.at(0) == 1); BOOST_CHECK(evadedVectorList.at(1) == 4); BOOST_CHECK(evadedVectorList.at(2) == 2); BOOST_CHECK(evadedVectorList.at(3) == 3); BOOST_CHECK(remainder == 5); } BOOST_AUTO_TEST_CASE(JOSEPHUS_TEST_N13K2) { boost::unit_test::unit_test_log.set_threshold_level(boost::unit_test::log_all_errors); int N = 13; int K = 2; BOOST_TEST_MESSAGE("Test the Josephus Algorithm with N=" << N << " and K= " << K << " !!!"); std::vector<int> evadedVectorList; int remainder = Josephus(N, K, evadedVectorList); /// 11 // sol : 2 4 6 8 10 12 (all even positions done) 1 5 9 13 7 3 rem 11 // math.sol: remainder = 1101 => 1011 = 11 BOOST_CHECK(evadedVectorList.at(0) == 2); BOOST_CHECK(evadedVectorList.at(1) == 4); BOOST_CHECK(evadedVectorList.at(2) == 6); BOOST_CHECK(evadedVectorList.at(3) == 8); BOOST_CHECK(evadedVectorList.at(4) == 10); BOOST_CHECK(evadedVectorList.at(5) == 12); BOOST_CHECK(evadedVectorList.at(6) == 1); BOOST_CHECK(evadedVectorList.at(7) == 5); BOOST_CHECK(evadedVectorList.at(8) == 9); BOOST_CHECK(evadedVectorList.at(9) == 13); BOOST_CHECK(evadedVectorList.at(10) == 7); BOOST_CHECK(evadedVectorList.at(11) == 3); BOOST_CHECK(remainder == 11); } BOOST_AUTO_TEST_CASE(JOSEPHUS_TEST_N13K3) { boost::unit_test::unit_test_log.set_threshold_level(boost::unit_test::log_all_errors); int N = 13; int K = 3; BOOST_TEST_MESSAGE("Test the Josephus Algorithm with N=" << N << " and K= " << K << " !!!"); std::vector<int> evadedVectorList; int remainder = Josephus(N, K, evadedVectorList); /// // sol: 3 6 9 12 2 7 11 4 10 5 1 8 rem 13 // math.sol: remainder = 3^2 + 3^1 + 3^0 = 13 so rem is 13 BOOST_CHECK(evadedVectorList.at(0) == 3); BOOST_CHECK(evadedVectorList.at(1) == 6); BOOST_CHECK(evadedVectorList.at(2) == 9); BOOST_CHECK(evadedVectorList.at(3) == 12); BOOST_CHECK(evadedVectorList.at(4) == 2); BOOST_CHECK(evadedVectorList.at(5) == 7); BOOST_CHECK(evadedVectorList.at(6) == 11); BOOST_CHECK(evadedVectorList.at(7) == 4); BOOST_CHECK(evadedVectorList.at(8) == 10); BOOST_CHECK(evadedVectorList.at(9) == 5); BOOST_CHECK(evadedVectorList.at(10) == 1); BOOST_CHECK(evadedVectorList.at(11) == 8); BOOST_CHECK(remainder == 13); }
{"hexsha": "f56faa3698f25d5297ba33f2c8db943a7166b682", "size": 3977, "ext": "cc", "lang": "C++", "max_stars_repo_path": "tests/JosephusTest.cc", "max_stars_repo_name": "RobertHue/JosephusProblem", "max_stars_repo_head_hexsha": "f4dcf95f21fb11c460832b2eab94da86d6048959", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/JosephusTest.cc", "max_issues_repo_name": "RobertHue/JosephusProblem", "max_issues_repo_head_hexsha": "f4dcf95f21fb11c460832b2eab94da86d6048959", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/JosephusTest.cc", "max_forks_repo_name": "RobertHue/JosephusProblem", "max_forks_repo_head_hexsha": "f4dcf95f21fb11c460832b2eab94da86d6048959", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.358778626, "max_line_length": 93, "alphanum_fraction": 0.6924817702, "num_tokens": 1312}
# -*- coding: utf-8 -*- """ Created on Tue Aug 4 11:01:16 2015 @author: hehu """ import matplotlib.pyplot as plt import numpy as np from sklearn.neighbors import KNeighborsClassifier from sklearn.lda import LDA from sklearn.svm import SVC, LinearSVC from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes from mpl_toolkits.axes_grid1.inset_locator import mark_inset from mpl_toolkits.mplot3d import Axes3D from matplotlib.ticker import NullFormatter from scipy.linalg import eig def gaussian(x, mu, sig): return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.))) def visualize(X, y, clf): fig, ax = plt.subplots(figsize=[6,6]) plt.axis('equal') # create a mesh to plot in #x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 #y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 x_min, x_max = -9, 3 y_min, y_max = -7, 5 if clf is not None: h = .01 # step size in the mesh xx, yy = np.meshgrid(np.arange(x_min-1, x_max+1, h), np.arange(y_min-1, y_max+1, h)) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) ax.contourf(xx, yy, Z, cmap='bwr', alpha=0.5) #plt.cm.Paired cmap='bwr', ymin, ymax = ax.get_ylim() xmin, xmax = ax.get_xlim() if clf.kernel == "linear": # get the separating hyperplane w = clf.coef_[0] a = -w[0] / w[1] xx = np.linspace(-10, 5, 500) yy = a * xx - (clf.intercept_[0]) / w[1] # plot the parallels to the separating hyperplane that pass through the # support vectors margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2)) yy_down = yy + a * margin yy_up = yy - a * margin ax.plot(xx, yy, 'k-') ax.plot(xx, yy_down, 'k--') ax.plot(xx, yy_up, 'k--') for svIdx in range(clf.support_vectors_.shape[0]): sv = [clf.support_vectors_[svIdx, 0], clf.support_vectors_[svIdx, 1]] ax.annotate("Support Vectors", sv, xytext=(-6, 3), size=13, bbox=dict(boxstyle="round4", fc="w", ec = "g"), arrowprops=dict(arrowstyle="simple", connectionstyle="arc3,rad=0.2", shrinkA = 0, shrinkB = 8, fc = "g", ec = "g"), horizontalalignment='center', verticalalignment='middle') # Plot margin x0 = -0.5 y0 = a * x0 - (clf.intercept_[0]) / w[1] distances = np.hypot(x0 - xx, y0 - yy_down) minIdx = np.argmin(distances) x1 = xx[minIdx] y1 = yy_down[minIdx] ax.annotate("", xy=(x0, y0), xycoords='data', xytext=(x1, y1), textcoords='data', arrowprops=dict(arrowstyle="<->", connectionstyle="arc3"), ) distances = np.hypot(x0 - xx, y0 - yy_up) minIdx = np.argmin(distances) x2 = xx[minIdx] y2 = yy_up[minIdx] ax.annotate("", xy=(x0, y0), xycoords='data', xytext=(x2, y2), textcoords='data', arrowprops=dict(arrowstyle="<->", connectionstyle="arc3"), ) ax.annotate("Margin", (0.5*(x0+x1), 0.5*(y0+y1)), xytext=(1.5, -6.7), size=13, bbox=dict(boxstyle="round4", fc="w", ec = "g"), arrowprops=dict(arrowstyle="simple", connectionstyle="arc3,rad=-0.2", shrinkA = 0, shrinkB = 8, fc = "g", ec = "g"), horizontalalignment='center', verticalalignment='middle') ax.annotate("Margin", (0.5*(x0+x2), 0.5*(y0+y2)), xytext=(1.5, -6.7), size=13, bbox=dict(boxstyle="round4", fc="w", ec = "g"), arrowprops=dict(arrowstyle="simple", connectionstyle="arc3,rad=-0.2", shrinkA = 0, shrinkB = 8, fc = "g", ec = "g"), horizontalalignment='center', verticalalignment='middle') ax.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80, facecolors='none', zorder=10) #ax.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired) ax.set_ylim(y_min, y_max) ax.set_xlim(x_min, x_max) X1 = X[y==1, :] X2 = X[y==0, :] ax.plot(X1[:, 0], X1[:, 1], 'ro', zorder = 1, alpha = 0.6) ax.plot(X2[:, 0], X2[:, 1], 'bx', zorder = 1) def generate_data(N): X1 = np.random.randn(2,N) X2 = np.random.randn(2,N) M1 = 0.7*np.array([[1.5151, -0.1129], [0.1399, 0.6287]]) M2 = 0.7*np.array([[0.8602, 1.2461], [-0.0737, -1.5240]]) T1 = np.array([-1, 1]).reshape((2,1)) T2 = np.array([-2, -5]).reshape((2,1)) X1 = np.dot(M1, X1) + np.tile(T1, [1,N]) X2 = np.dot(M2, X2) + np.tile(T2, [1,N]) X1 = X1[::-1,:] X2 = X2[::-1,:] return X1, X2 if __name__ == "__main__": plt.close("all") # Generate random training data N = 200 np.random.seed(2014) X1, X2 = generate_data(N) X = np.concatenate((X1.T, X2.T)) y = np.concatenate((np.ones(N), np.zeros(N))) # Generate test sample np.random.seed(2016) X1_test, X2_test = generate_data(N) X_test = np.concatenate((X1_test.T, X2_test.T)) y_test = np.concatenate((np.ones(N), np.zeros(N))) clf = SVC(kernel = 'linear', C = 100) clf.fit(X, y) visualize(X, y, None) plt.savefig("../images/SVM_data.pdf", bbox_inches = "tight", transparent = True) visualize(X, y, clf) plt.savefig("../images/SVM_boundary.pdf", bbox_inches = "tight", transparent = True) clf = SVC(kernel = 'poly', degree = 2, C = 1) clf.fit(X, y) visualize(X, y, clf) plt.title("SVM with 2nd order Polynomial Kernel") plt.savefig("../images/SVM_boundary_poly2.pdf", bbox_inches = "tight", transparent = True) clf = SVC(kernel = 'rbf', C = 1) clf.fit(X, y) visualize(X, y, clf) plt.title("SVM with the RBF Kernel") plt.savefig("../images/SVM_boundary_RBF.pdf", bbox_inches = "tight", transparent = True)
{"hexsha": "d8b4e47a454ccd17596e7d034c11a5596f9e3150", "size": 7573, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/SVMExample.py", "max_stars_repo_name": "mahehu/SGN-41007", "max_stars_repo_head_hexsha": "c8ed169a0a5f70fb87b99448e39a573c0df584b2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 61, "max_stars_repo_stars_event_min_datetime": "2017-01-09T07:48:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-15T15:13:49.000Z", "max_issues_repo_path": "code/SVMExample.py", "max_issues_repo_name": "mahehu/SGN-41007", "max_issues_repo_head_hexsha": "c8ed169a0a5f70fb87b99448e39a573c0df584b2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/SVMExample.py", "max_forks_repo_name": "mahehu/SGN-41007", "max_forks_repo_head_hexsha": "c8ed169a0a5f70fb87b99448e39a573c0df584b2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 46, "max_forks_repo_forks_event_min_datetime": "2017-01-10T19:32:04.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-20T08:29:20.000Z", "avg_line_length": 34.1126126126, "max_line_length": 100, "alphanum_fraction": 0.4562260663, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1990}
""" Doctests for Nipy / NumPy-specific nose/doctest modifications """ # try the #random directive on the output line def check_random_directive(): ''' >>> 2+2 <BadExample object at 0x084D05AC> #random: may vary on your system ''' # check the implicit "import numpy as np" def check_implicit_np(): ''' >>> np.array([1,2,3]) array([1, 2, 3]) ''' # there's some extraneous whitespace around the correct responses def check_whitespace_enabled(): ''' # whitespace after the 3 >>> 1+2 3 # whitespace before the 7 >>> 3+4 7 ''' def check_empty_output(): """ Check that no output does not cause an error. This is related to nose bug 445; the numpy plugin changed the doctest-result-variable default and therefore hit this bug: http://code.google.com/p/python-nose/issues/detail?id=445 >>> a = 10 """ def check_skip(): """ Check skip directive The test below should not run >>> 1/0 #doctest: +SKIP """ def func(): return 1 def check_have_module_context(): """ Check that, unlike numpy, we do have the module namespace >>> func() 1 """ def check_fails(): """ Check inversion directive The directive is mainly for tests >>> 'black' #doctest: +NOT_EQUAL 'white' >>> 'white' #doctest: +NOT_EQUAL 'black' """ def check_ignore_output(): """ Check IGNORE_OUTPUT option works >>> 'The answer' #doctest: +IGNORE_OUTPUT 42 >>> 'The answer' #doctest: +IGNORE_OUTPUT 'The answer' """ def check_sympy_equal(): """ Check SYMPY_EQUAL option >>> from sympy import symbols >>> a, b, c = symbols('a, b, c') >>> a + b #doctest: +SYMPY_EQUAL b + a >>> a + b #doctest: +SYMPY_EQUAL a + b >>> a + b #doctest: +SYMPY_EQUAL +NOT_EQUAL a + c >>> a + b #doctest: +SYMPY_EQUAL +NOT_EQUAL a - b """ def check_fp_equal(): """ Check floating point equal >>> 0.12345678 #doctest: +FP_6DP 0.1234569 >>> 0.12345678 #doctest: +FP_6DP +NOT_EQUAL 0.1234564 >>> 0.12345678 #doctest: +FP_4DP 0.1235 >>> 0.12345678 #doctest: +FP_6DP +NOT_EQUAL 0.1235 """ def check_array_repr(): """ Stripping of array repr >>> arr = np.arange(5, dtype='i2') The test should match with and without the array repr >>> arr #doctest: +STRIP_ARRAY_REPR [0, 1, 2, 3, 4] >>> arr #doctest: +STRIP_ARRAY_REPR array([0, 1, 2, 3, 4], dtype=int16) """ def check_combinations(): """ Check the processing combines as expected >>> 0.33333 #doctest: +SYMPY_EQUAL +NOT_EQUAL 0.3333 >>> 0.33333 #doctest: +SYMPY_EQUAL +FP_4DP 0.3333 >>> arr = np.arange(5, dtype='i2') This next will not sympify unless the array repr is removed >>> arr #doctest: +STRIP_ARRAY_REPR +SYMPY_EQUAL array([0, 1, 2, 3, 4], dtype=int16) """ if __name__ == '__main__': # Run tests outside nipy test rig import sys import nose from nipy.testing.doctester import NipyDoctest argv = [sys.argv[0], __file__, '--with-nipydoctest'] + sys.argv[1:] nose.core.TestProgram(argv=argv, addplugins=[NipyDoctest()])
{"hexsha": "533fd95475ac173862b078394afa3c3287d4965d", "size": 3188, "ext": "py", "lang": "Python", "max_stars_repo_path": "nipy/testing/tests/test_doctesting.py", "max_stars_repo_name": "neurospin/nipy", "max_stars_repo_head_hexsha": "cc54600a0dca1e003ad393bc05c46f91eef30a68", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-03-08T15:01:06.000Z", "max_stars_repo_stars_event_max_datetime": "2016-03-08T15:01:06.000Z", "max_issues_repo_path": "nipy/testing/tests/test_doctesting.py", "max_issues_repo_name": "fabianp/nipy", "max_issues_repo_head_hexsha": "40e89f3ca7f34df05631623807993026134e6de3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nipy/testing/tests/test_doctesting.py", "max_forks_repo_name": "fabianp/nipy", "max_forks_repo_head_hexsha": "40e89f3ca7f34df05631623807993026134e6de3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.609929078, "max_line_length": 71, "alphanum_fraction": 0.6116687578, "include": true, "reason": "import numpy,from sympy", "num_tokens": 998}
# -*- coding: utf-8 -*- """ Created on Fri Jan 3 08:55:10 2020 @author: akurnizk """ import utm import csv import math import flopy import sys,os import calendar import dateutil import numpy as np import pandas as pd import matplotlib as mpl mpl.rc('xtick', labelsize=22) mpl.rc('ytick', labelsize=22) mpl.rcParams.update({'font.size': 22}) mpl.rcParams['pdf.fonttype'] = 42 import moviepy.editor as mpy import matplotlib.pyplot as plt import matplotlib.dates as mdates import flopy.utils.binaryfile as bf cgw_code_dir = 'E:\Python KMB - CGW' # Location of BitBucket folder containing cgw folder sys.path.insert(0,cgw_code_dir) from mpmath import * from matplotlib import pylab from moviepy.editor import * from scipy.io import loadmat from scipy.optimize import fsolve from shapely.geometry import Point from datetime import datetime, time, timedelta from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.ticker import (MultipleLocator, FormatStrFormatter, AutoMinorLocator) # Assign name and create modflow model object work_dir = os.path.join('E:\Herring Models\Seasonal') data_dir = os.path.join('E:\Data') mean_sea_level = 0.843 # Datum in meters at closest NOAA station (8447435), Chatham, Lydia Cove MA # https://tidesandcurrents.noaa.gov/datums.html?units=1&epoch=0&id=8447435&name=Chatham%2C+Lydia+Cove&state=MA #%% To Do # Compare sea level measurements at Boston, Provincetown, and outside dike. # Find land levels # #%% Loading Information from HR Dike Sensors (Make sure times are in EDT) with open(os.path.join(data_dir,"General Dike Data","USGS 011058798 Herring R at Chequessett Neck Rd.txt")) as f: reader = csv.reader(f, delimiter="\t") HR_dike_all_info = list(reader) HR_dike_lev_disch_cond = HR_dike_all_info[32:] HR_dike_all_df = pd.DataFrame(HR_dike_lev_disch_cond[2:], columns=HR_dike_lev_disch_cond[0]) HR_dike_all_df.drop(HR_dike_all_df.columns[[0,1,3,5,7,9,11,13]],axis=1,inplace=True) HR_dike_all_df.columns = ["datetime","Gage height, ft, Ocean side","Discharge, cfs","Gage height, ft, HR side", "Spec Con, microsiemens/cm, HR side","Spec Con, microsiemens/cm, Ocean side"] # HR_dike_all_df = HR_dike_all_df.replace(r'^\s*$', np.nan, regex=True) HR_dike_all_df = HR_dike_all_df.replace("Eqp", '', regex=True) HR_dike_all_df["datetime"] = pd.to_datetime(HR_dike_all_df["datetime"]) HR_dike_all_df["Gage height, ft, Ocean side"] = pd.to_numeric(HR_dike_all_df["Gage height, ft, Ocean side"]) HR_dike_all_df["Discharge, cfs"] = pd.to_numeric(HR_dike_all_df["Discharge, cfs"]) HR_dike_all_df["Gage height, ft, HR side"] = pd.to_numeric(HR_dike_all_df["Gage height, ft, HR side"]) HR_dike_all_df["Spec Con, microsiemens/cm, HR side"] = pd.to_numeric(HR_dike_all_df["Spec Con, microsiemens/cm, HR side"]) HR_dike_all_df["Spec Con, microsiemens/cm, Ocean side"] = pd.to_numeric(HR_dike_all_df["Spec Con, microsiemens/cm, Ocean side"]) # Merging Duplicate Entries HR_dike_all_df.set_index('datetime',inplace=True) HR_dike_all_df = HR_dike_all_df.mean(level=0) HR_dike_all_df.reset_index(inplace=True) HR_dike_lev_disch_ft = HR_dike_all_df[["datetime","Gage height, ft, Ocean side","Gage height, ft, HR side","Discharge, cfs"]] HR_dike_lev_disch_m = HR_dike_lev_disch_ft.copy() HR_dike_lev_disch_m.columns = ["datetime","Gage height, m, Ocean side","Gage height, m, HR side","Discharge, cms"] HR_dike_lev_disch_m["Gage height, m, Ocean side"] = HR_dike_lev_disch_ft["Gage height, ft, Ocean side"]*0.3048 HR_dike_lev_disch_m["Gage height, m, HR side"] = HR_dike_lev_disch_ft["Gage height, ft, HR side"]*0.3048 HR_dike_lev_disch_m["Discharge, cms"] = HR_dike_lev_disch_ft["Discharge, cfs"]*0.02832 # HR_dike_all_df = HR_dike_all_df.fillna('') x_datenum_dike = mdates.date2num(HR_dike_lev_disch_m["datetime"]) HR_dike_lev_disch_m.insert(1,"datenum",x_datenum_dike,True) ax = HR_dike_lev_disch_m.plot.scatter(x="datenum", y="Gage height, m, Ocean side", color='LightBlue', label = 'Gage height, m , Ocean side') # ax = HR_dike_lev_disch_m.plot.scatter(x="datenum", y="Gage height, m, HR side", color='LightGreen', label = 'Gage height, m , HR side') HR_dike_lev_disch_m.plot.scatter(x="datenum", y="Gage height, m, HR side", color='LightGreen', label = 'Gage height, m , HR side', ax=ax) HR_dike_lev_disch_m.plot.scatter(x="datenum", y="Discharge, cms", color='Turquoise', label = 'Discharge, cms', ax=ax) # ax = HR_dike_lev_disch_m.plot.scatter(x="datenum", y="Discharge, cms", color='Turquoise', label = 'Discharge, cms') # Show X-axis major tick marks as dates loc= mdates.AutoDateLocator() plt.gca().xaxis.set_major_locator(loc) plt.gca().xaxis.set_major_formatter(mdates.AutoDateFormatter(loc)) plt.gcf().autofmt_xdate() plt.xlabel('Date', fontsize=22) plt.ylabel('Elevation (m), Discharge (m^3/s)', fontsize=22) plt.legend() #%% Loading Information from HR CTD Sensors (Make sure times are in EDT) with open(os.path.join(data_dir,"General Dike Data","Water_Elevation,_NAVD88-File_Import-01-22-2020_15-04.txt")) as f: reader = csv.reader(f, delimiter="\t") HR_CTD_all_info = list(reader) HR_CTD_lev = HR_CTD_all_info[1:] HR_CTD_all_df = pd.DataFrame(HR_CTD_lev[2:], columns=HR_CTD_lev[0]) HR_CTD_all_df.drop(HR_CTD_all_df.columns[[0,2,4]],axis=1,inplace=True) HR_CTD_all_df = HR_CTD_all_df.rename(columns={"Time (MDT to EDT)":"datetime"}) # HR_CTD_all_df = HR_CTD_all_df.replace(r'^s*$', np.nan, regex=True) # HR_CTD_all_df = HR_CTD_all_df.replace("Eqp", '', regex=True) HR_CTD_all_df["datetime"] = pd.to_datetime(HR_CTD_all_df["datetime"]) HR_CTD_all_df["High Toss Water Level, NAVD88"] = pd.to_numeric(HR_CTD_all_df["High Toss Water Level, NAVD88"]) HR_CTD_all_df["CNR U/S Water Level, NAVD88"] = pd.to_numeric(HR_CTD_all_df["CNR U/S Water Level, NAVD88"]) HR_CTD_all_df["Dog Leg Water Level, NAVD88"] = pd.to_numeric(HR_CTD_all_df["Dog Leg Water Level, NAVD88"]) HR_CTD_all_df["Old Saw Water Level, NAVD88"] = pd.to_numeric(HR_CTD_all_df["Old Saw Water Level, NAVD88"]) # Merging Duplicate Entries HR_CTD_all_df.set_index('datetime',inplace=True) HR_CTD_all_df = HR_CTD_all_df.mean(level=0) HR_CTD_all_df.reset_index(inplace=True) # Filtering HR_CTD_all_df["High Toss Water Level, NAVD88"][HR_CTD_all_df["High Toss Water Level, NAVD88"] > 1.00] = np.nan HR_CTD_all_df["High Toss Water Level, NAVD88"][HR_CTD_all_df["High Toss Water Level, NAVD88"] < -0.67] = np.nan HR_CTD_all_df["CNR U/S Water Level, NAVD88"][HR_CTD_all_df["CNR U/S Water Level, NAVD88"] < -0.90] = np.nan HR_CTD_all_df["CNR U/S Water Level, NAVD88"][HR_CTD_all_df["CNR U/S Water Level, NAVD88"] > 0.55] = np.nan HR_CTD_all_df["Old Saw Water Level, NAVD88"][HR_CTD_all_df["Old Saw Water Level, NAVD88"] < -2.14] = np.nan HR_CTD_lev_m = HR_CTD_all_df[["datetime","Old Saw Water Level, NAVD88","CNR U/S Water Level, NAVD88", "Dog Leg Water Level, NAVD88","High Toss Water Level, NAVD88"]] HR_CTD_lev_m.columns = ["datetime","Water Level, m, Old Saw","Water Level, m, CNR U/S","Water Level, m, Dog Leg", "Water Level, m, High Toss"] x_datenum_CTD = mdates.date2num(HR_CTD_lev_m["datetime"]) HR_CTD_lev_m.insert(1,"datenum",x_datenum_CTD,True) ax = HR_CTD_lev_m.plot.scatter(x="datenum", y="Water Level, m, Old Saw", color='DarkBlue', label = 'Water Level, m, Old Saw') # HR_CTD_lev_m.plot.scatter(x="datenum", y="Water Level, m, Old Saw", color='DarkBlue', label = 'Water Level, m, Old Saw', ax=ax) HR_CTD_lev_m.plot.scatter(x="datenum", y="Water Level, m, CNR U/S", color='DarkGreen', label = 'Water Level, m, CNR U/S', ax=ax) HR_CTD_lev_m.plot.scatter(x="datenum", y="Water Level, m, Dog Leg", color='DarkRed', label = 'Water Level, m, Dog Leg', ax=ax) HR_CTD_lev_m.plot.scatter(x="datenum", y="Water Level, m, High Toss", color='DarkOrange', label = 'Water Level, m, High Toss', ax=ax) # Show X-axis major tick marks as dates loc= mdates.AutoDateLocator() plt.gca().xaxis.set_major_locator(loc) plt.gca().xaxis.set_major_formatter(mdates.AutoDateFormatter(loc)) plt.gcf().autofmt_xdate() plt.xlabel('Date', fontsize=22) plt.ylabel('Elevation (m), Discharge (m^3/s)', fontsize=22) # plt.ylabel('Elevation (m)', fontsize=22) plt.legend(loc='upper right') # plt.legend(loc='lower right') #%% Combining Information from Dike and CTD, Interpolating CTD to multiples of 5 min. HR_dike_lev_disch_m_di = HR_dike_lev_disch_m.set_index('datetime') # HR_CTD_lev_m_di = HR_CTD_lev_m.set_index('datetime') HR_dike_CTD_lev_disch_m = pd.merge_ordered(HR_dike_lev_disch_m, HR_CTD_lev_m) HR_dike_CTD_lev_disch_m_di = HR_dike_CTD_lev_disch_m.set_index('datetime') HR_dike_CTD_lev_disch_m_di.interpolate(method='index', limit=1,inplace=True) # HR_dike_CTD_lev_disch_m_di.drop(HR_dike_CTD_lev_disch_m_di.columns[[0]],axis=1,inplace=True) HR_dike_CTD_lev_disch_m_di_resam = HR_dike_CTD_lev_disch_m_di.loc[HR_dike_lev_disch_m_di.index] ax = HR_dike_CTD_lev_disch_m_di_resam.plot.scatter(x="datenum", y="Gage height, m, Ocean side", color='LightBlue', label = 'Gage height, m , Ocean side') # ax = HR_dike_CTD_lev_disch_m_di_resam.plot.scatter(x="datenum", y="Gage height, m, HR side", color='LightGreen', label = 'Gage height, m , HR side') HR_dike_CTD_lev_disch_m_di_resam.plot.scatter(x="datenum", y="Gage height, m, HR side", color='LightGreen', label = 'Gage height, m , HR side', ax=ax) HR_dike_CTD_lev_disch_m_di_resam.plot.scatter(x="datenum", y="Discharge, cms", color='Turquoise', label = 'Discharge, cms', ax=ax) HR_dike_CTD_lev_disch_m_di_resam.plot.scatter(x="datenum", y="Water Level, m, Old Saw", color='DarkBlue', label = 'Water Level, m, Old Saw', ax=ax) # HR_dike_CTD_lev_disch_m_di_resam.plot.scatter(x="datenum", y="Water Level, m, Old Saw", color='DarkBlue', label = 'Water Level, m, Old Saw', ax=ax) HR_dike_CTD_lev_disch_m_di_resam.plot.scatter(x="datenum", y="Water Level, m, CNR U/S", color='DarkGreen', label = 'Water Level, m, CNR U/S', ax=ax) HR_dike_CTD_lev_disch_m_di_resam.plot.scatter(x="datenum", y="Water Level, m, Dog Leg", color='DarkRed', label = 'Water Level, m, Dog Leg', ax=ax) HR_dike_CTD_lev_disch_m_di_resam.plot.scatter(x="datenum", y="Water Level, m, High Toss", color='DarkOrange', label = 'Water Level, m, High Toss', ax=ax) # Show X-axis major tick marks as dates loc= mdates.AutoDateLocator() plt.gca().xaxis.set_major_locator(loc) plt.gca().xaxis.set_major_formatter(mdates.AutoDateFormatter(loc)) plt.gcf().autofmt_xdate() plt.xlabel('Date', fontsize=22) plt.ylabel('Elevation (m), Discharge (m^3/s)', fontsize=22) # plt.ylabel('Elevation (m)', fontsize=22) plt.legend(loc='upper right') # plt.legend(loc='lower right') #%% Newton-Raphson Method (to be used in determining gate opening angle) def newton(f,Df,x0,epsilon,max_iter): '''Approximate solution of f(x)=0 by Newton's method. Parameters ---------- f : function Function for which we are searching for a solution f(x)=0. Df : function Derivative of f(x). x0 : number Initial guess for a solution f(x)=0. epsilon : number Stopping criteria is abs(f(x)) < epsilon. max_iter : integer Maximum number of iterations of Newton's method. Returns ------- xn : number Implement Newton's method: compute the linear approximation of f(x) at xn and find x intercept by the formula x = xn - f(xn)/Df(xn) Continue until abs(f(xn)) < epsilon and return xn. If Df(xn) == 0, return None. If the number of iterations exceeds max_iter, then return None. Examples -------- >>> f = lambda x: x**2 - x - 1 >>> Df = lambda x: 2*x - 1 >>> newton(f,Df,1,1e-8,10) Found solution after 5 iterations. 1.618033988749989 ''' xn = x0 for n in range(0,max_iter): fxn = f(xn) if abs(fxn) < epsilon: print('Found solution after',n,'iterations.') return xn Dfxn = Df(xn) if Dfxn == 0: print('Zero derivative. No solution found.') return None xn = xn - fxn/Dfxn print('Exceeded maximum iterations. No solution found.') return None #%% Analytical Estimation of Discharge Through Dike Using Water Levels, My Analysis (all SI) - Version 1 # # Add option for different configurations (number/size/type of openings)? # """ # Sources: # Sluice-gate Discharge Equations by Prabhata K. Swamee, Journal of Irrigation and Drainage Engineering, Vol. 118 # Herring River Full Final Report, Woods Hole Group June 2012 # Hydrodynamic and Salinity Modeling for Estuarine Habitat Restoration at HR, Wellfleet, MA. Spaulding and Grilli October 2001 # (Higher frictional losses on the ebb than on the flood tide, pp. ii, and n~0.06 to 0.09 for HR bed) # *Loss coefficients hard to justify given difference in distances between the HR basin (S&G) and measurements around the dike* # Can solve for the "additional coefficient" (make a K array) at each point by dividing the measured discharge by everything on the RHS. # Need to make several K arrays - one for each scenario, and take the average K of each as the fitting parameter. # """ # inv_el_open = -1.064 # slope_culv = 0.0067 # len_culv = 20.42 # inv_el_HRside = -0.928 # sluice_bot_el = -0.579 # y_sluice_open = sluice_bot_el-inv_el_open # A_sluice_open = y_sluice_open*L_sluice_culv # L_sluice_culv = 1.829 # L_center_culv = 2.184 # L_left_culv = 2.007 # L_flaps_in = 1.829 # L_flaps_out = 2.057 # angle_init_flaps = 0.0872 # radians, ~ 5 degrees # dens_seawater = 1018 # kg/m^3, average is roughly the same on both sides of the dike. # grav = 9.81 # m/s^2 # W_gate = 2000 # Newtons -> see excel calculations using gate parts, volumes, and densities. # h_gate = 2.317 # meters from flap gate bottom to hinge. Assume weight is uniformly distributed. # d_hinge_to_inv = 2.286 # hinge_el_open = inv_el_open+d_hinge_to_inv # P_invert = 0.3048 # "weir" lip height # # Sluice Gate Calculations (no variable coefficients like WHG and Spaulding and Grilli (2001) have used) # # This is from Sluice-Gate Discharge Equations by Prabhata K. Swamee # Q_dike_sluice_calc = np.zeros_like(HR_dike_lev_disch_m["datenum"]) # Add Q to this array (Add at each index the different culvert Qs) # for i in range(len(HR_dike_lev_disch_m)): # H_sea_lev = HR_dike_lev_disch_m["Gage height, m, Ocean side"][i] - inv_el_open # y_d_HR_lev = HR_dike_lev_disch_m["Gage height, m, HR side"][i] - inv_el_open # crossover_sub_free_neg = 0.81*y_d_HR_lev*(y_d_HR_lev/y_sluice_open)**0.72 # from Swamee paper # crossover_sub_free_pos = 0.81*H_sea_lev*(H_sea_lev/y_sluice_open)**0.72 # A_sluice_culv_HRside = (HR_dike_lev_disch_m["Gage height, m, HR side"][i] - inv_el_HRside)*L_sluice_culv # A_sluice_culv_oceanside = H_sea_lev*L_sluice_culv # if (H_sea_lev > y_d_HR_lev): # If sea level is greater than HR level -> Negative Flow # if (H_sea_lev > y_sluice_open): # If sea level is above sluice opening, apply sluice-gate discharge equations. # # High Tide Culvert Free Flow, Negative Direction (Flaps Closed) # if (H_sea_lev >= crossover_sub_free_neg): # Q_dike_sluice_calc[i] = -0.864*A_sluice_open*math.sqrt(grav*H_sea_lev)*((H_sea_lev-y_sluice_open)/ # (H_sea_lev+15*y_sluice_open))**0.072 # # High Tide Submerged Flow, Negative Direction (Flaps Closed) # if (H_sea_lev > y_d_HR_lev) & (H_sea_lev < crossover_sub_free_neg): # Q_dike_sluice_calc[i] = -0.864*A_sluice_open*math.sqrt(grav*H_sea_lev)*((H_sea_lev-y_sluice_open)/ # (H_sea_lev+15*y_sluice_open))**0.072*(H_sea_lev-y_d_HR_lev)**0.7/(0.32* # (0.81*y_d_HR_lev*(y_d_HR_lev/y_sluice_open)**0.72-H_sea_lev)**0.7+(H_sea_lev-y_d_HR_lev)**0.7) # else: # If H is less than y, assume underflow @ sluice and just use energy equation to determine Q(-). # # Do Manning for negative flow? # Q_dike_sluice_calc[i] = -math.sqrt((H_sea_lev-y_d_HR_lev)*2*grav*A_sluice_culv_HRside**2) # Assuming no head loss # elif (H_sea_lev <= y_d_HR_lev): # If sea level is less than HR level -> Positive Flow # if (y_d_HR_lev > y_sluice_open): # If HR level is above sluice opening, apply sluice-gate discharge equations. # # Low Tide Culvert Free Flow, Positive Direction (Flaps Open) # if (y_d_HR_lev >= crossover_sub_free_pos): # Q_dike_sluice_calc[i] = 0.864*A_sluice_open*math.sqrt(grav*y_d_HR_lev)*((y_d_HR_lev-y_sluice_open)/ # (y_d_HR_lev+15*y_sluice_open))**0.072 # # Low Tide Submerged Flow, Positive Direction (Flaps Open) # if (y_d_HR_lev > H_sea_lev) & (y_d_HR_lev < crossover_sub_free_pos): # Q_dike_sluice_calc[i] = 0.864*A_sluice_open*math.sqrt(grav*y_d_HR_lev)*((y_d_HR_lev-y_sluice_open)/ # (y_d_HR_lev+15*y_sluice_open))**0.072*(y_d_HR_lev-H_sea_lev)**0.7/(0.32* # (0.81*H_sea_lev*(H_sea_lev/y_sluice_open)**0.72-y_d_HR_lev)**0.7+(y_d_HR_lev-H_sea_lev)**0.7) # else: # If y_d is less than y, assume underflow @ sluice and just use energy equation to determine Q(+). # # Do weir? - only applies on HR discharge. Do Manning? # Q_dike_sluice_calc[i] = math.sqrt((y_d_HR_lev-H_sea_lev)*2*grav*A_sluice_culv_oceanside**2) # Assuming no head loss # else: # Q_dike_sluice_calc[i] = np.nan # # Center Flap Gate Calculations # Q_dike_centerflap_calc = np.zeros_like(HR_dike_lev_disch_m["datenum"]) # C_one = 1.375 # Discharge coefficient for supercritical weir flow # C_two = 1.375 # Dischrage coefficient for subcritical weir flow # # "Sluice" portion of flap gate only needs discharge coefficients during ebb tides (not flood, when tide moves inland) # # Does ebb tide mean sluice upstream or downstream? Assume downstream -> smaller coefficients, smaller Q # C_three = 0.6 # C_four = 0.8 # for i in range(len(HR_dike_lev_disch_m)): # d_hinge_to_H = hinge_el_open - HR_dike_lev_disch_m["Gage height, m, Ocean side"][i] # d_hinge_to_y_d = hinge_el_open - HR_dike_lev_disch_m["Gage height, m, HR side"][i] # H_sea_lev = HR_dike_lev_disch_m["Gage height, m, Ocean side"][i] - inv_el_open # y_d_HR_lev = HR_dike_lev_disch_m["Gage height, m, HR side"][i] - inv_el_open # A_center_flap_HRside = y_d_HR_lev*L_flaps_in # # A_center_flap_oceanside = H_sea_lev*L_flaps_out # # F_gate_HRside = 0.5*dens_seawater*grav*A_center_flap_HRside*y_d_HR_lev # # F_gate_oceanside = 0.5*dens_seawater*grav*A_center_flap_oceanside*H_sea_lev # # theta_center = np.arcsin((F_gate_HRside - F_gate_oceanside)/(W_gate)) # # A_center_flap_open = complex geometry # # Using Newton Method (Need to fix ) # # p = lambda theta: -W_gate*sin(theta-angle_init_flaps)*h_gate/dens_seawater/grav - L_flaps_out*(h_gate**2* # # cos(theta-angle_init_flaps)**2 - 2*h_gate*d_hinge_to_H - d_hinge_to_H**2/ # # cos(theta-angle_init_flaps))*(h_gate-(1/3)*(h_gate-d_hinge_to_H/ # # cos(theta-angle_init_flaps))) + L_flaps_in*(h_gate**2*cos(theta+ # # angle_init_flaps)**2-2*h_gate*d_hinge_to_y_d - d_hinge_to_y_d**2/ # # cos(theta-angle_init_flaps))*(h_gate-(1/3)*(h_gate - d_hinge_to_y_d/ # # cos(theta-angle_init_flaps))) # # Dp = lambda theta: -W_gate*cos(theta-angle_init_flaps)*h_gate/dens_seawater/grav - L_flaps_out*((1/3)*(h_gate**2* # # cos(theta-angle_init_flaps)**2 - 2*h_gate*d_hinge_to_H - d_hinge_to_H**2/ # # cos(theta-angle_init_flaps))*(d_hinge_to_H*tan(theta-angle_init_flaps)* # # sec(theta-angle_init_flaps)) + (h_gate - (1/3)*(h_gate-d_hinge_to_H/ # # cos(theta-angle_init_flaps)))*(-2*h_gate**2*sin(theta+ # # angle_init_flaps)*cos(theta-angle_init_flaps)-d_hinge_to_H**2* # # tan(theta-angle_init_flaps)* # # sec(theta-angle_init_flaps)))+L_flaps_in*((1/3)* # # (h_gate**2*cos(theta-angle_init_flaps)**2-2*h_gate* # # d_hinge_to_y_d - d_hinge_to_y_d**2/ # # cos(theta-angle_init_flaps))*(d_hinge_to_y_d* # # tan(theta-angle_init_flaps)*sec(theta+ # # angle_init_flaps)) + (h_gate - (1/3)* # # (h_gate-d_hinge_to_y_d/ # # cos(theta-angle_init_flaps)))* # # (-2*h_gate**2*sin(theta-angle_init_flaps)* # # cos(theta-angle_init_flaps)- # # d_hinge_to_y_d**2* # # tan(theta-angle_init_flaps)* # # sec(theta-angle_init_flaps))) # # approx = newton(p,Dp,0,1e-10,10) # # print(approx) # ### End Using Newton Method # # Using SciPy fsolve # def f(theta): # # vn = theta # # np_sin = np.frompyfunc(mp.sin,1,1) # # np_cos = np.frompyfunc(mp.cos,1,1) # return -W_gate*np.sin(theta+angle_init_flaps)*h_gate/dens_seawater/grav - L_flaps_out*(h_gate**2* # np.cos(theta+angle_init_flaps)**2 - 2*h_gate*d_hinge_to_H*np.cos(theta+angle_init_flaps) + d_hinge_to_H**2/ # np.cos(theta+angle_init_flaps))*(h_gate-(1/3)*(h_gate-d_hinge_to_H/ # np.cos(theta+angle_init_flaps))) + L_flaps_in*(h_gate**2*np.cos(theta+ # angle_init_flaps)**2-2*h_gate*d_hinge_to_y_d*np.cos(theta+angle_init_flaps) + d_hinge_to_y_d**2/ # np.cos(theta+angle_init_flaps))*(h_gate-(1/3)*(h_gate - d_hinge_to_y_d/ # np.cos(theta+angle_init_flaps))) # root = float(fsolve(f, 0)) # use root finder to find angle closest to zero (use ifs to deal with negative angles) # # potential issue with root finder: if there is a negative root closer to zero, but a positive root just slightly further away. # # may not be an issue since any negative roots would assume would be less than 5 degrees, keeping the gate closed, # # and 1 is closer to 0 than -6 is, so it would just converge there if it had the opportunity (-6+5=-1, same result). # if h_gate*np.cos(root+angle_init_flaps) > d_hinge_to_H: # for gate to still be submerged, underflow is submerged sluice # if root <= 0: # if theta is less than or equal to zero, no flow (include leakiness?) # Q_dike_centerflap_calc[i] = 0 # elif root > 0: # # calculate area that flow is passing through # A_both_sides = (d_hinge_to_y_d+y_d_HR_lev)**2*(np.tan(theta+angle_init_flaps)-np.tan(angle_init_flaps)) - d_hinge_to_y_d**2*(np.tan(theta+angle_init_flaps)-np.tan(angle_init_flaps)) # A_under = L_flaps_out*(d_hinge_to_y_d+y_d_HR_lev)*(np.tan(theta+angle_init_flaps) - np.tan(angle_init_flaps)) # A_sides_lower = (d_hinge_to_H+H_sea_lev)**2*(np.tan(theta+angle_init_flaps)-np.tan(angle_init_flaps)) - d_hinge_to_H**2*(np.tan(theta+angle_init_flaps)-np.tan(angle_init_flaps)) # A_sides_upper = A_both_sides - A_sides_lower # A_sluice_calcs = A_sides_lower + A_under # A_weir_calcs = A_sides_upper # if y_d_HR_lev > H_sea_lev: # determine Q from weir equation (upper area) and submerged sluice (lower area) # # as long as HR levels are greater than sea levels. Assume area between invert and gate opening is smallest. # # Too much flow with no losses # # Q_dike_centerflap_calc[i] = np.sqrt((y_d_HR_lev-H_sea_lev)*2*grav*A_tot**2) # # Q_dike_centerflap_calc[i] = np.sqrt((y_d_HR_lev-H_sea_lev)*2*grav*A_center_flap_HRside**2) # if (y_d_HR_lev < L_flaps_in) & (H_sea_lev/y_d_HR_lev < 0.66): # supercritical weir flow, from WHG report # # Q_weir_part = C_one*A_weir_calcs/(y_d_HR_lev-H_sea_lev)*(2/3)*np.sqrt((2/3)*grav)*y_d_HR_lev**(2/3) # # Q_sluice_part = C_four*A_sluice_calcs*np.sqrt(2*grav*(y_d_HR_lev-H_sea_lev)) # # Q_dike_centerflap_calc[i] = Q_weir_part + Q_sluice_part # else: # subcritical weir flow # # Q_weir_part = C_two*A_weir_calcs/(y_d_HR_lev-H_sea_lev)*H_sea_lev*np.sqrt(2*grav*(y_d_HR_lev-H_sea_lev)) # # Q_sluice_part = C_four*A_sluice_calcs*np.sqrt(2*grav*(y_d_HR_lev-H_sea_lev)) # # Q_dike_centerflap_calc[i] = Q_weir_part + Q_sluice_part # else: # Q_dike_centerflap_calc[i] = np.nan # else: # root is a nan # Q_dike_centerflap_calc[i] = np.nan # else: # assume gate maximum opening is the surface of the water on the ocean side, underflow becomes free sluice # root = np.arccos(d_hinge_to_H/h_gate)-angle_init_flaps # # calculate area that flow is passing through # A_both_sides = (d_hinge_to_y_d+y_d_HR_lev)**2*np.tan(theta+angle_init_flaps)-(d_hinge_to_y_d+y_d_HR_lev)**2*np.tan(angle_init_flaps)-d_hinge_to_y_d**2*np.tan(theta+angle_init_flaps)-d_hinge_to_y_d**2*np.tan(angle_init_flaps) # A_under = L_flaps_out*(d_hinge_to_y_d+y_d_HR_lev)*(np.tan(theta+angle_init_flaps) - np.tan(angle_init_flaps)) # A_sides_lower = (d_hinge_to_H+H_sea_lev)**2*(np.tan(theta+angle_init_flaps)-np.tan(angle_init_flaps)) - d_hinge_to_H**2*(np.tan(theta+angle_init_flaps)-np.tan(angle_init_flaps)) # A_sides_upper = A_both_sides - A_sides_lower # A_sluice_calcs = A_sides_lower + A_under # A_weir_calcs = A_sides_upper # if y_d_HR_lev > H_sea_lev: # determine Q from weir equation (upper area) and free sluice (lower area) # # as long as HR levels are greater than sea levels. Assume area between invert and gate opening is smallest. # # Too much flow with no losses # # Q_dike_centerflap_calc[i] = np.sqrt((y_d_HR_lev-H_sea_lev)*2*grav*A_tot**2) # # Q_dike_centerflap_calc[i] = np.sqrt((y_d_HR_lev-H_sea_lev)*2*grav*A_center_flap_HRside**2) # if (y_d_HR_lev < L_flaps_in) & (H_sea_lev/y_d_HR_lev < 0.66): # supercritical weir flow, from WHG report # # Q_weir_part = C_one*A_weir_calcs/(y_d_HR_lev-H_sea_lev)*(2/3)*np.sqrt((2/3)*grav)*y_d_HR_lev**(2/3) # # Q_sluice_part = C_three*A_sluice_calcs*np.sqrt(2*grav*y_d_HR_lev) # # Q_dike_centerflap_calc[i] = Q_weir_part + Q_sluice_part # else: # subcritical weir flow # # Q_weir_part = C_two*A_weir_calcs/(y_d_HR_lev-H_sea_lev)*H_sea_lev*np.sqrt(2*grav*(y_d_HR_lev-H_sea_lev)) # # Q_sluice_part = C_three*A_sluice_calcs*np.sqrt(2*grav*y_d_HR_lev) # # Q_dike_centerflap_calc[i] = Q_weir_part + Q_sluice_part # else: # Q_dike_centerflap_calc[i] = np.nan # # if H < y_d, use algorithm to determine angle, else use "leakiness"? # # or condition if negative or domain error? check some points. # # Left Flap Gate Has Same Conditions as Center (smaller culvert, but same gate size) # Q_dike_leftflap_calc = Q_dike_centerflap_calc.copy() # Q_total = Q_dike_leftflap_calc + Q_dike_centerflap_calc + Q_dike_sluice_calc # # Should I be using Manning instead of Energy Eqn to determine Q for open-channel flow through dike? #%% Analytical Estimation of Discharge Through Dike Using Water Levels, My Analysis (all SI) # Add option for different configurations (number/size/type of openings)? """ Sources: Sluice-gate Discharge Equations by Prabhata K. Swamee, Journal of Irrigation and Drainage Engineering, Vol. 118 Herring River Full Final Report, Woods Hole Group June 2012 Hydrodynamic and Salinity Modeling for Estuarine Habitat Restoration at HR, Wellfleet, MA. Spaulding and Grilli October 2001 (Higher frictional losses on the ebb than on the flood tide, pp. ii, and n~0.06 to 0.09 for HR bed) *Loss coefficients hard to justify given difference in distances between the HR basin (S&G) and measurements around the dike* Can solve for the "additional coefficient" (make a K array) at each point by dividing the measured discharge by everything on the RHS. Need to make several K arrays - one for each scenario, and take the average K of each as the fitting parameter. """ # slope_culv = 0.0067 # len_culv = 20.42 # L_center_culv = 2.184 # L_left_culv = 2.007 # P_invert = 0.3048 # "weir" lip height inv_el_open = -1.064 inv_el_HRside = -0.928 sluice_bot_el = -0.579 y_sluice_open = sluice_bot_el-inv_el_open L_sluice_culv = 1.829 A_sluice_open = y_sluice_open*L_sluice_culv L_flaps_in = 1.829 L_flaps_out = 2.057 angle_init_flaps = 0.0872 # radians, ~ 5 degrees dens_seawater = 1018 # kg/m^3, average is roughly the same on both sides of the dike. grav = 9.81 # m/s^2 W_gate = 2000 # Newtons -> see excel calculations using gate parts, volumes, and densities. h_gate = 2.317 # meters from flap gate bottom to hinge. Assume weight is uniformly distributed. d_hinge_to_inv = 2.286 hinge_el_open = inv_el_open+d_hinge_to_inv # HL_max = 0.6 # maximum headloss, meters, from WHG report HL_max = 0.9 # 1.17 # maximum headloss, meters, tester (assumed to be maximum difference in levels (HR - ocean)) HLsluice_max = 1.0 # maximum sluice headloss/gain, meters, tests (assumed at ~ half maximum difference in levels (ocean - HR)) # D_HL = 0.884 # headloss parameter, meters, from WHG report D_HL = 0.4 # 0.41 # headloss parameter, meters, tester (mean of the means of HR and Ocean levels) Dsluice_HL = 1.0 # optimize # n = 0.01 # C_d_ebb_update = 1 # C_d_ebb_std_array = [] # y_discharge_calc_maxes_mean_array = [] # while D_HL > 0.2: # For optimizing flap gate head loss coefficients. # D_HL = D_HL - n # Initialize Discharge Arrays and set to nans Q_flood_free = np.zeros_like(HR_dike_lev_disch_m["datenum"]) Q_flood_transit = np.zeros_like(HR_dike_lev_disch_m["datenum"]) Q_flood_submer_or = np.zeros_like(HR_dike_lev_disch_m["datenum"]) Q_flood_subcrit_weir = np.zeros_like(HR_dike_lev_disch_m["datenum"]) Q_flood_supcrit_weir = np.zeros_like(HR_dike_lev_disch_m["datenum"]) Q_ebb_free = np.zeros_like(HR_dike_lev_disch_m["datenum"]) Q_ebb_transit = np.zeros_like(HR_dike_lev_disch_m["datenum"]) Q_ebb_submer_or = np.zeros_like(HR_dike_lev_disch_m["datenum"]) Q_ebb_subcrit_weir = np.zeros_like(HR_dike_lev_disch_m["datenum"]) Q_ebb_supcrit_weir = np.zeros_like(HR_dike_lev_disch_m["datenum"]) Q_ebb_flap_subcrit_weir = np.zeros_like(HR_dike_lev_disch_m["datenum"]) Q_ebb_flap_supcrit_weir = np.zeros_like(HR_dike_lev_disch_m["datenum"]) Q_flood_free[:] = np.nan Q_flood_transit[:] = np.nan Q_flood_submer_or[:] = np.nan Q_flood_subcrit_weir[:] = np.nan Q_flood_supcrit_weir[:] = np.nan Q_ebb_free[:] = np.nan Q_ebb_transit[:] = np.nan Q_ebb_submer_or[:] = np.nan Q_ebb_subcrit_weir[:] = np.nan Q_ebb_supcrit_weir[:] = np.nan Q_ebb_flap_subcrit_weir[:] = np.nan Q_ebb_flap_supcrit_weir[:] = np.nan # Initialize Discharge Coefficient Arrays and set to nans C_Swamee = np.zeros_like(HR_dike_lev_disch_m["datenum"]) # This is from Free Flow Sluice-Gate C_d by Prabhata K. Swamee C_d_flood_free = np.zeros_like(HR_dike_lev_disch_m["datenum"]) # This is in addition to the Swamee coefficient. C_d_flood_transit = np.zeros_like(HR_dike_lev_disch_m["datenum"]) C_d_flood_submer_or = np.zeros_like(HR_dike_lev_disch_m["datenum"]) C_d_flood_subcrit_weir = np.zeros_like(HR_dike_lev_disch_m["datenum"]) C_d_flood_supcrit_weir = np.zeros_like(HR_dike_lev_disch_m["datenum"]) C_d_ebb_free = np.zeros_like(HR_dike_lev_disch_m["datenum"]) C_d_ebb_transit = np.zeros_like(HR_dike_lev_disch_m["datenum"]) C_d_ebb_submer_or = np.zeros_like(HR_dike_lev_disch_m["datenum"]) C_d_ebb_subcrit_weir = np.zeros_like(HR_dike_lev_disch_m["datenum"]) C_d_ebb_supcrit_weir = np.zeros_like(HR_dike_lev_disch_m["datenum"]) C_d_ebb_flap_subcrit_weir = np.zeros_like(HR_dike_lev_disch_m["datenum"]) C_d_ebb_flap_supcrit_weir = np.zeros_like(HR_dike_lev_disch_m["datenum"]) C_Swamee[:] = np.nan C_d_flood_free[:] = np.nan C_d_flood_transit[:] = np.nan C_d_flood_submer_or[:] = np.nan C_d_flood_subcrit_weir[:] = np.nan C_d_flood_supcrit_weir[:] = np.nan C_d_ebb_free[:] = np.nan C_d_ebb_transit[:] = np.nan C_d_ebb_submer_or[:] = np.nan C_d_ebb_subcrit_weir[:] = np.nan C_d_ebb_supcrit_weir[:] = np.nan C_d_ebb_flap_subcrit_weir[:] = np.nan C_d_ebb_flap_supcrit_weir[:] = np.nan theta_ebb_flap_deg = np.zeros_like(HR_dike_lev_disch_m["datenum"]) theta_ebb_flap_deg[:] = np.nan HL = np.zeros_like(HR_dike_lev_disch_m["datenum"]) HL[:] = np.nan HLsluice = np.zeros_like(HR_dike_lev_disch_m["datenum"]) HLsluice[:] = np.nan flow_frac_sluice_culv = np.zeros_like(HR_dike_lev_disch_m["datenum"]) flow_frac_sluice_culv[:] = np.nan flow_frac_center_culv = np.zeros_like(HR_dike_lev_disch_m["datenum"]) flow_frac_center_culv[:] = np.nan flow_frac_left_culv = flow_frac_center_culv.copy() for i in range(len(HR_dike_lev_disch_m)): # Levels relative to culvert invert at sluice/flaps. H_sea_lev = HR_dike_lev_disch_m["Gage height, m, Ocean side"][i] - inv_el_open y_d_HR_lev = HR_dike_lev_disch_m["Gage height, m, HR side"][i] - inv_el_open # Vertical distances from flap gate hinge to water levels. d_hinge_to_H = hinge_el_open - HR_dike_lev_disch_m["Gage height, m, Ocean side"][i] d_hinge_to_y_d = hinge_el_open - HR_dike_lev_disch_m["Gage height, m, HR side"][i] if (H_sea_lev > y_d_HR_lev): # If sea level is greater than HR level -> Negative Flow (Flood Tide, Flap Gates Closed) """ Test: Supercritical Broad-crested Weir/Free Sluice, Transitional, Subcritical Broad-crested Weir/Submerged Orifice """ if (y_d_HR_lev/H_sea_lev < (2/3)): # supercritical BC weir/free sluice if (H_sea_lev < y_sluice_open): # Supercritical Broad-crested Weir Flow Q_flood_supcrit_weir[i] = -(2/3)*L_sluice_culv*H_sea_lev*np.sqrt((2/3)*grav*H_sea_lev) C_d_flood_supcrit_weir[i] = HR_dike_lev_disch_m["Discharge, cms"][i]/Q_flood_supcrit_weir[i] else: # Free Sluice Flow HLsluice[i] = HLsluice_max*(1-0.5*(y_d_HR_lev+H_sea_lev)/Dsluice_HL) C_Swamee[i] = 0.611*((H_sea_lev-y_d_HR_lev)/(H_sea_lev+15*y_d_HR_lev))**0.072 Q_flood_free[i] = -A_sluice_open*np.sqrt(2*grav*(H_sea_lev-HLsluice[i])) C_d_flood_free[i] = HR_dike_lev_disch_m["Discharge, cms"][i]/Q_flood_free[i] else: if (H_sea_lev < y_sluice_open): # Subcritical Broad-crested Weir Flow Q_flood_subcrit_weir[i] = -L_sluice_culv*y_d_HR_lev*np.sqrt(2*grav*(H_sea_lev-y_d_HR_lev)) C_d_flood_subcrit_weir[i] = HR_dike_lev_disch_m["Discharge, cms"][i]/Q_flood_subcrit_weir[i] elif (y_d_HR_lev/H_sea_lev > 0.8): # Submerged Orifice Flow Q_flood_submer_or[i] = -A_sluice_open*np.sqrt(2*grav*(H_sea_lev-y_d_HR_lev)) C_d_flood_submer_or[i] = HR_dike_lev_disch_m["Discharge, cms"][i]/Q_flood_submer_or[i] else: # Transitional Flow Q_flood_transit[i] = -A_sluice_open*np.sqrt(2*grav*3*(H_sea_lev-y_d_HR_lev)) C_d_flood_transit[i] = HR_dike_lev_disch_m["Discharge, cms"][i]/Q_flood_transit[i] else: # If sea level is less than HR level -> Positive Flow (Ebb Tide, Flap Gates Open) # Center Flap Gate Calculations A_center_flap_HRside = y_d_HR_lev*L_flaps_in A_center_flap_oceanside = H_sea_lev*L_flaps_out # Should L change? # Using SciPy fsolve def f(theta): return -W_gate*np.sin(theta+angle_init_flaps)*h_gate/dens_seawater/grav - L_flaps_out*(h_gate**2* np.cos(theta+angle_init_flaps)**2 - 2*h_gate*d_hinge_to_H*np.cos(theta+angle_init_flaps) + d_hinge_to_H**2/ np.cos(theta+angle_init_flaps))*(h_gate-(1/3)*(h_gate-d_hinge_to_H/ np.cos(theta+angle_init_flaps))) + L_flaps_in*(h_gate**2*np.cos(theta+ angle_init_flaps)**2-2*h_gate*d_hinge_to_y_d*np.cos(theta+angle_init_flaps) + d_hinge_to_y_d**2/ np.cos(theta+angle_init_flaps))*(h_gate-(1/3)*(h_gate - d_hinge_to_y_d/ np.cos(theta+angle_init_flaps))) root = float(fsolve(f, 0)) # use root finder to find angle closest to zero theta_ebb_flap_deg[i] = np.rad2deg(root) # Flow fractions of total measured discharge through each culvert (NEED TO OPTIMIZE) """ Test: Supercritical/Free Sluice, Transitional, Subcritical/Submerged Orifice """ if (H_sea_lev/y_d_HR_lev < (2/3)): # supercritical BC weir/free sluice - OPTIMIZE COEFFIENT BETWEEN FLAPS AND SLUICE! if (root > 0): HL[i] = HL_max*(1-0.5*(y_d_HR_lev+H_sea_lev)/D_HL) Q_ebb_flap_supcrit_weir[i] = (2/3)*(y_d_HR_lev+HL[i])*L_flaps_in*np.sqrt((2/3)*grav*(y_d_HR_lev+HL[i])) if (y_d_HR_lev < y_sluice_open): # Supercritical Broad-crested Weir Flow Q_ebb_supcrit_weir[i] = (2/3)*L_sluice_culv*y_d_HR_lev*np.sqrt((2/3)*grav*y_d_HR_lev) else: # Free Sluice Flow C_Swamee[i] = 0.611*((y_d_HR_lev-H_sea_lev)/(y_d_HR_lev+15*H_sea_lev))**0.072 Q_ebb_free[i] = A_sluice_open*np.sqrt(2*grav*y_d_HR_lev) else: # subcritical BC weir/submerged orifice - OPTIMIZE COEFFIENT BETWEEN FLAPS AND SLUICE! if (root > 0): HL[i] = HL_max*(1-0.5*(y_d_HR_lev+H_sea_lev)/D_HL) Q_ebb_flap_subcrit_weir[i] = A_center_flap_oceanside*np.sqrt(2*grav*((y_d_HR_lev+HL[i])-H_sea_lev)) if (y_d_HR_lev < y_sluice_open): # Subcritical Broad-crested Weir Flow Q_ebb_subcrit_weir[i] = L_sluice_culv*H_sea_lev*np.sqrt(2*grav*(y_d_HR_lev-H_sea_lev)) elif (H_sea_lev/y_d_HR_lev > 0.8): # Submerged Orifice Flow Q_ebb_submer_or[i] = A_sluice_open*np.sqrt(2*grav*(y_d_HR_lev-H_sea_lev)) else: # Transitional Flow Q_ebb_transit[i] = A_sluice_open*np.sqrt(2*grav*3*(y_d_HR_lev-H_sea_lev)) flow_sluice_culv = np.nansum((Q_ebb_free[i],Q_ebb_transit[i],Q_ebb_submer_or[i],Q_ebb_supcrit_weir[i],Q_ebb_subcrit_weir[i])) flow_flap_culv = np.nansum((Q_ebb_flap_supcrit_weir[i],Q_ebb_flap_subcrit_weir[i])) flow_frac_sluice_culv[i] = flow_sluice_culv/(flow_sluice_culv+2*flow_flap_culv) flow_frac_center_culv[i] = flow_flap_culv/(flow_sluice_culv+2*flow_flap_culv) flow_frac_left_culv[i] = flow_frac_center_culv[i] if (H_sea_lev/y_d_HR_lev < (2/3)): # supercritical BC weir/free sluice - OPTIMIZE COEFFIENT BETWEEN FLAPS AND SLUICE! if (root > 0): C_d_ebb_flap_supcrit_weir[i] = flow_frac_center_culv[i]*HR_dike_lev_disch_m["Discharge, cms"][i]/Q_ebb_flap_supcrit_weir[i] if (y_d_HR_lev < y_sluice_open): # Supercritical Broad-crested Weir Flow C_d_ebb_supcrit_weir[i] = flow_frac_sluice_culv[i]*HR_dike_lev_disch_m["Discharge, cms"][i]/Q_ebb_supcrit_weir[i] else: # Free Sluice Flow C_d_ebb_free[i] = flow_frac_sluice_culv[i]*HR_dike_lev_disch_m["Discharge, cms"][i]/Q_ebb_free[i] else: # subcritical BC weir/submerged orifice - OPTIMIZE COEFFIENT BETWEEN FLAPS AND SLUICE! if (root > 0): C_d_ebb_flap_subcrit_weir[i] = flow_frac_center_culv[i]*HR_dike_lev_disch_m["Discharge, cms"][i]/Q_ebb_flap_subcrit_weir[i] if (y_d_HR_lev < y_sluice_open): # Subcritical Broad-crested Weir Flow C_d_ebb_subcrit_weir[i] = flow_frac_sluice_culv[i]*HR_dike_lev_disch_m["Discharge, cms"][i]/Q_ebb_subcrit_weir[i] elif (H_sea_lev/y_d_HR_lev > 0.8): # Submerged Orifice Flow if (HR_dike_lev_disch_m["Discharge, cms"][i] > 0): C_d_ebb_submer_or[i] = flow_frac_sluice_culv[i]*HR_dike_lev_disch_m["Discharge, cms"][i]/Q_ebb_submer_or[i] else: # Transitional Flow C_d_ebb_transit[i] = flow_frac_sluice_culv[i]*HR_dike_lev_disch_m["Discharge, cms"][i]/Q_ebb_transit[i] """ Ebb C_d means and stdevs. """ C_d_ebb_free_mean = np.nanmean(C_d_ebb_free) + 0.05 C_d_ebb_transit_mean = np.nanmean(C_d_ebb_transit) C_d_ebb_submer_or_mean = np.nanmean(C_d_ebb_submer_or) C_d_ebb_subcrit_weir_mean = np.nanmean(C_d_ebb_subcrit_weir) C_d_ebb_supcrit_weir_mean = np.nanmean(C_d_ebb_supcrit_weir) C_d_ebb_flap_subcrit_weir_mean = np.nanmean(C_d_ebb_flap_subcrit_weir) C_d_ebb_flap_supcrit_weir_mean = np.nanmean(C_d_ebb_flap_supcrit_weir) + 0.05 C_d_ebb_free_std = np.nanstd(C_d_ebb_free) C_d_ebb_transit_std = np.nanstd(C_d_ebb_transit) C_d_ebb_submer_or_std = np.nanstd(C_d_ebb_submer_or) C_d_ebb_subcrit_weir_std = np.nanstd(C_d_ebb_subcrit_weir) C_d_ebb_supcrit_weir_std = np.nanstd(C_d_ebb_supcrit_weir) C_d_ebb_flap_subcrit_weir_std = np.nanstd(C_d_ebb_flap_subcrit_weir) C_d_ebb_flap_supcrit_weir_std = np.nanstd(C_d_ebb_flap_supcrit_weir) # C_d_ebb_std_peak = C_d_ebb_flap_supcrit_weir_std + C_d_ebb_free_std # optimizing coefficients continued. # C_d_ebb_std_array.append(C_d_ebb_std_peak) # if (C_d_ebb_std_peak > C_d_ebb_update): # break # else: # C_d_ebb_update = C_d_ebb_std_peak """ Flood C_d means and stdevs. """ C_d_flood_free_mean = np.nanmean(C_d_flood_free) + 0.1 C_d_flood_transit_mean = np.nanmean(C_d_flood_transit) C_d_flood_submer_or_mean = np.nanmean(C_d_flood_submer_or) C_d_flood_subcrit_weir_mean = np.nanmean(C_d_flood_subcrit_weir) C_d_flood_supcrit_weir_mean = np.nanmean(C_d_flood_supcrit_weir) C_d_flood_free_std = np.nanstd(C_d_flood_free) C_d_flood_transit_std = np.nanstd(C_d_flood_transit) C_d_flood_submer_or_std = np.nanstd(C_d_flood_submer_or) C_d_flood_subcrit_weir_std = np.nanstd(C_d_flood_subcrit_weir) C_d_flood_supcrit_weir_std = np.nanstd(C_d_flood_supcrit_weir) """ Coefficients from Swamee Paper and WHG Report """ C_Swamee_mean = np.nanmean(C_Swamee) C_Swamee_std = np.nanstd(C_Swamee) C_one_flood = 1.375 # Discharge coefficient for supercritical b-c weir flow C_two_flood = 1.375 # Dischrage coefficient for subcritical b-c weir flow C_three_flood = 1.4 # Discharge coefficient for free sluice flow C_four_flood = 1.35 # Discharge coefficient for submerged orifice flow C_one_ebb = 1 C_two_ebb = 1 C_three_ebb = 0.6 C_four_ebb = 0.8 """ Total Flow """ Q_flood_free_adj = C_d_flood_free_mean*Q_flood_free Q_flood_transit_adj = C_d_flood_transit_mean*Q_flood_transit Q_flood_submer_or_adj = C_d_flood_submer_or_mean*Q_flood_submer_or Q_flood_subcrit_weir_adj = C_d_flood_subcrit_weir_mean*Q_flood_subcrit_weir Q_flood_supcrit_weir_adj = C_d_flood_supcrit_weir_mean*Q_flood_supcrit_weir Q_ebb_free_adj = C_d_ebb_free_mean*Q_ebb_free Q_ebb_transit_adj = C_d_ebb_transit_mean*Q_ebb_transit Q_ebb_submer_or_adj = C_d_ebb_submer_or_mean*Q_ebb_submer_or Q_ebb_subcrit_weir_adj = C_d_ebb_subcrit_weir_mean*Q_ebb_subcrit_weir Q_ebb_supcrit_weir_adj = C_d_ebb_supcrit_weir_mean*Q_ebb_supcrit_weir Q_ebb_flap_subcrit_weir_adj = C_d_ebb_flap_subcrit_weir_mean*Q_ebb_flap_subcrit_weir Q_ebb_flap_supcrit_weir_adj = C_d_ebb_flap_supcrit_weir_mean*Q_ebb_flap_supcrit_weir # Add Q to this array (Add at each index the different culvert Qs) Q_dike_sluice_calc_flood = np.nansum((Q_flood_free_adj,Q_flood_transit_adj,Q_flood_submer_or_adj),axis=0) Q_dike_sluice_weir_calc_flood = np.nansum((Q_flood_subcrit_weir_adj,Q_flood_supcrit_weir_adj),axis=0) Q_dike_sluice_calc_ebb = np.nansum((Q_ebb_free_adj,Q_ebb_transit_adj,Q_ebb_submer_or_adj),axis=0) Q_dike_sluice_weir_calc_ebb = np.nansum((Q_ebb_subcrit_weir_adj,Q_ebb_supcrit_weir_adj),axis=0) Q_dike_sluice_calc = Q_dike_sluice_calc_flood+Q_dike_sluice_weir_calc_flood+Q_dike_sluice_calc_ebb+Q_dike_sluice_weir_calc_ebb Q_dike_centerflap_calc = np.nansum((Q_ebb_flap_subcrit_weir_adj,Q_ebb_flap_supcrit_weir_adj),axis=0) # Left Flap Gate Has Same Conditions as Center (smaller culvert, but same gate size) Q_dike_leftflap_calc = Q_dike_centerflap_calc.copy() Q_total = Q_dike_leftflap_calc + Q_dike_centerflap_calc + Q_dike_sluice_calc Q_total[Q_total==0] = np.nan tidal_peaktopeak_interval = 12/24 + 25/(60*24) # bin width in days # Max/Min/Range of discharge through dike bin_start = 0 x_discharge_rangedates = [] y_discharge_calc_mins = [] y_discharge_calc_maxes = [] y_discharge_meas_mins = [] y_discharge_meas_maxes = [] for bin_index in range(len(x_datenum_dike)): datestart = x_datenum_dike[bin_start] dateend = datestart + (x_datenum_dike[bin_index] - x_datenum_dike[bin_start]) date_interval = dateend - datestart bin_end = bin_index if (date_interval >= tidal_peaktopeak_interval): x_discharge_rangedates.append(x_datenum_dike[int((bin_start+bin_end)/2)]) y_discharge_calc_mins.append(np.nanmin(Q_total[bin_start:bin_end])) y_discharge_calc_maxes.append(np.nanmax(Q_total[bin_start:bin_end])) y_discharge_meas_mins.append(np.nanmin(HR_dike_lev_disch_m["Discharge, cms"][bin_start:bin_end])) y_discharge_meas_maxes.append(np.nanmax(HR_dike_lev_disch_m["Discharge, cms"][bin_start:bin_end])) bin_start = bin_end x_discharge_rangedates = np.array(x_discharge_rangedates) y_discharge_calc_mins = np.array(y_discharge_calc_mins) y_discharge_calc_maxes = np.array(y_discharge_calc_maxes) y_discharge_calc_mins[y_discharge_calc_mins > np.nanmean(y_discharge_calc_maxes)] = np.nan y_discharge_calc_maxes[y_discharge_calc_maxes < np.nanmean(y_discharge_calc_mins)] = np.nan y_discharge_calc_ranges = y_discharge_calc_maxes - y_discharge_calc_mins y_discharge_meas_mins = np.array(y_discharge_meas_mins) y_discharge_meas_maxes = np.array(y_discharge_meas_maxes) y_discharge_meas_mins[y_discharge_meas_mins > np.nanmean(y_discharge_meas_maxes)] = np.nan y_discharge_meas_maxes[y_discharge_meas_maxes < np.nanmean(y_discharge_meas_mins)] = np.nan y_discharge_meas_ranges = y_discharge_meas_maxes - y_discharge_meas_mins y_discharge_calc_maxes_ovrlp_mean = np.nanmean(y_discharge_calc_maxes[61:66]) y_discharge_meas_maxes_ovrlp_mean = np.nanmean(y_discharge_meas_maxes[61:66]) y_discharge_calc_mins_ovrlp_mean = np.nanmean(y_discharge_calc_mins[61:66]) y_discharge_meas_mins_ovrlp_mean = np.nanmean(y_discharge_meas_mins[61:66]) y_discharge_calc_maxes_mean = np.nanmean(y_discharge_calc_maxes) y_discharge_meas_maxes_mean = np.nanmean(y_discharge_meas_maxes) y_discharge_calc_mins_mean = np.nanmean(y_discharge_calc_mins) y_discharge_meas_mins_mean = np.nanmean(y_discharge_meas_mins) # y_discharge_calc_maxes_mean_array.append(y_discharge_calc_maxes_ovrlp_mean) # if (y_discharge_calc_maxes_ovrlp_mean > y_discharge_meas_maxes_ovrlp_mean): # break """ Condition for optimization: Q_total[i] = HR_dike_lev_disch_m["Discharge, cms"][i] """ """ Plots """ ax = HR_dike_lev_disch_m.plot.scatter(x="datenum", y="Discharge, cms", color='Turquoise', label = 'Discharge, cms') plt.scatter(x_datenum_dike, Q_total, label = 'Calculated Discharge, cms') # Show X-axis major tick marks as dates loc= mdates.AutoDateLocator() plt.gca().xaxis.set_major_locator(loc) plt.gca().xaxis.set_major_formatter(mdates.AutoDateFormatter(loc)) plt.gcf().autofmt_xdate() plt.xlabel('Date', fontsize=22) plt.ylabel('Discharge (m^3/s)', fontsize=22) plt.legend(loc='upper right', bbox_to_anchor=(0.9,0.4)) # Should I be using Manning instead of Energy Eqn to determine Q for open-channel flow through dike? #%% Analytical Estimation of Discharge Through Dike Using Water Levels, WHG Report Analysis (all SI) # Add option for different configurations (number/size/type of openings)? inv_el_open = -1.064 slope_culv = 0.0067 len_culv = 20.42 inv_el_HRside = -0.928 sluice_bot_el = -0.579 y_sluice_open = sluice_bot_el-inv_el_open A_sluice_open = y_sluice_open*L_sluice_culv L_sluice_culv = 1.829 L_center_culv = 2.184 L_left_culv = 2.007 L_flaps_in = 1.829 L_flaps_out = 2.057 angle_init_flaps = 0.0872 # radians, ~ 5 degrees dens_seawater = 1018 # kg/m^3, average is roughly the same on both sides of the dike. grav = 9.81 # m/s^2 W_gate = 2000 # gate weight, Newtons -> see excel calculations using gate parts, volumes, and densities. h_gate = 2.317 # meters from flap gate bottom to hinge. Assume weight is uniformly distributed. d_hinge_to_inv = 2.286 hinge_el_open = inv_el_open+d_hinge_to_inv C_one_flood = 1.375 C_two_flood = 1.375 C_three_flood = 1.4 C_four_flood = 1.35 C_one_ebb = 1 C_two_ebb = 1 C_three_ebb = 0.6 C_four_ebb = 0.8 HL_max = 0.6 # maximum headloss, meters D_HL = 0.884 # headloss parameter, meters # Sluice Gate Calculations (no variable coefficients like WHG and Spaulding and Grilli (2001) have used) # Q_supcrit_weir equation is wrong in WHG report. Q_dike_sluice_calc_WHG = np.zeros_like(HR_dike_lev_disch_m["datenum"]) # Add Q to this array (Add at each index the different culvert Qs) Q_dike_centerflap_calc_WHG = np.zeros_like(HR_dike_lev_disch_m["datenum"]) for i in range(len(HR_dike_lev_disch_m)): H_sea_lev = HR_dike_lev_disch_m["Gage height, m, Ocean side"][i] - inv_el_open y_d_HR_lev = HR_dike_lev_disch_m["Gage height, m, HR side"][i] - inv_el_open if (H_sea_lev > y_d_HR_lev): # If sea level is greater than HR level -> Negative Flow Q_supcrit_weir = -C_one_flood*L_sluice_culv*(2/3)*np.sqrt((2/3)*grav)*H_sea_lev**(3/2) Q_subcrit_weir = -C_two_flood*L_sluice_culv*(y_d_HR_lev)*np.sqrt(2*grav*(H_sea_lev-y_d_HR_lev)) Q_free_sluice = -C_three_flood*L_sluice_culv*y_sluice_open*np.sqrt(2*grav*H_sea_lev) Q_sub_orifice = -C_four_flood*L_sluice_culv*y_sluice_open*np.sqrt(2*grav*(H_sea_lev-y_d_HR_lev)) """ Compare Upstream Head (H_sea_lev) to Downstream Head (y_d_HR_lev) """ if (y_d_HR_lev/H_sea_lev < 0.64): # Supercritical """ Compare Upstream Head (H_sea_lev) to Gate Opening (y_sluice_open) """ if (H_sea_lev > 1.25*y_sluice_open): # Free sluice flow Q_dike_sluice_calc_WHG[i] = Q_free_sluice elif (H_sea_lev < y_sluice_open): # Supercritical weir flow Q_dike_sluice_calc_WHG[i] = Q_supcrit_weir else: # Weighted average of supercritical weir and free sluice flow Q_dike_sluice_calc_WHG[i] = (Q_free_sluice+Q_supcrit_weir)/2 # This is just the average - how to weight? elif (y_d_HR_lev/H_sea_lev > 0.68): # Subcritical """ Compare Upstream Head (H_sea_lev) to Gate Opening (y_sluice_open) """ if (H_sea_lev > 1.25*y_sluice_open): # Submerged orifice flow Q_dike_sluice_calc_WHG[i] = Q_sub_orifice elif (H_sea_lev < y_sluice_open): # Subcritical weir flow Q_dike_sluice_calc_WHG[i] = Q_subcrit_weir else: # Weighted average of subcritical weir and submerged orifice flow Q_dike_sluice_calc_WHG[i] = (Q_sub_orifice+Q_subcrit_weir)/2 # This is just the average - how to weight? else: # Weighted average of Supercritical and Subcritical """ Compare Upstream Head (H_sea_lev) to Gate Opening (y_sluice_open) """ if (H_sea_lev > 1.25*y_sluice_open): # Weighted average of free sluice and submerged orifice flow Q_dike_sluice_calc_WHG[i] = (Q_free_sluice+Q_sub_orifice)/2 # This is just the average - how to weight? elif (H_sea_lev < y_sluice_open): # Weighted average of supercritical weir and subcritical weir flow Q_dike_sluice_calc_WHG[i] = (Q_supcrit_weir+Q_subcrit_weir)/2 # This is just the average - how to weight? else: # Weighted average of weighted averages of weir and sluice flow. Q_dike_sluice_calc_WHG[i] = ((Q_free_sluice+Q_sub_orifice)/2+(Q_supcrit_weir+Q_subcrit_weir)/2)/2 Q_dike_centerflap_calc_WHG[i] = 0 elif (H_sea_lev <= y_d_HR_lev): # If sea level is less than HR level -> Positive Flow Q_supcrit_weir = C_one_ebb*L_sluice_culv*(2/3)*np.sqrt((2/3)*grav)*y_d_HR_lev**(3/2) Q_subcrit_weir = C_two_ebb*L_sluice_culv*(H_sea_lev)*np.sqrt(2*grav*(y_d_HR_lev-H_sea_lev)) Q_free_sluice = C_three_ebb*L_sluice_culv*y_sluice_open*np.sqrt(2*grav*y_d_HR_lev) Q_sub_orifice = C_four_ebb*L_sluice_culv*y_sluice_open*np.sqrt(2*grav*(y_d_HR_lev-H_sea_lev)) HL = HL_max*(1-0.5*(y_d_HR_lev+H_sea_lev)/D_HL) Q_supcrit_weir_flap = C_one_ebb*L_flaps_in*(2/3)*np.sqrt((2/3)*grav)*(y_d_HR_lev+HL)**(3/2) Q_subcrit_weir_flap = C_two_ebb*L_flaps_in*(H_sea_lev)*np.sqrt(2*grav*((y_d_HR_lev+HL)-H_sea_lev)) """ Compare Upstream Head (y_d_HR_lev) to Downstream Head (H_sea_lev) """ if (H_sea_lev/y_d_HR_lev < 0.64): # Supercritical """ Compare Upstream Head (y_d_HR_lev) to Gate Opening (y_sluice_open) """ if (y_d_HR_lev > 1.25*y_sluice_open): # Free sluice flow Q_dike_sluice_calc_WHG[i] = Q_free_sluice elif (y_d_HR_lev < y_sluice_open): # Supercritical weir flow Q_dike_sluice_calc_WHG[i] = Q_supcrit_weir else: # Weighted average of supercritical weir and free sluice flow Q_dike_sluice_calc_WHG[i] = (Q_free_sluice+Q_supcrit_weir)/2 # This is just the average - how to weight? elif (H_sea_lev/y_d_HR_lev > 0.68): # Subcritical """ Compare Upstream Head (y_d_HR_lev) to Gate Opening (y_sluice_open) """ if (y_d_HR_lev > 1.25*y_sluice_open): # Submerged orifice flow Q_dike_sluice_calc_WHG[i] = Q_sub_orifice elif (y_d_HR_lev < y_sluice_open): # Subcritical weir flow Q_dike_sluice_calc_WHG[i] = Q_subcrit_weir else: # Weighted average of subcritical weir and submerged orifice flow Q_dike_sluice_calc_WHG[i] = (Q_sub_orifice+Q_subcrit_weir)/2 # This is just the average - how to weight? else: # Weighted average of Supercritical and Subcritical """ Compare Upstream Head (y_d_HR_lev) to Gate Opening (y_sluice_open) """ if (y_d_HR_lev > 1.25*y_sluice_open): # Weighted average of free sluice and submerged orifice flow Q_dike_sluice_calc_WHG[i] = (Q_free_sluice+Q_sub_orifice)/2 # This is just the average - how to weight? elif (y_d_HR_lev < y_sluice_open): # Weighted average of supercritical weir and subcritical weir flow Q_dike_sluice_calc_WHG[i] = (Q_supcrit_weir+Q_subcrit_weir)/2 # This is just the average - how to weight? else: # Weighted average of weighted averages of weir and sluice flow. Q_dike_sluice_calc_WHG[i] = ((Q_free_sluice+Q_sub_orifice)/2+(Q_supcrit_weir+Q_subcrit_weir)/2)/2 """ Flap Gate Conditions """ if (H_sea_lev/(y_d_HR_lev) < 0.64): # Supercritical Q_dike_centerflap_calc_WHG[i] = Q_supcrit_weir_flap elif (H_sea_lev/(y_d_HR_lev) > 0.68): # Subcritical Q_dike_centerflap_calc_WHG[i] = Q_subcrit_weir_flap else: # Weighted average Q_dike_centerflap_calc_WHG[i] = (Q_supcrit_weir_flap+Q_subcrit_weir_flap)/2 else: # One of the values is nan, can't calculate. Q_dike_sluice_calc_WHG[i] = np.nan Q_dike_centerflap_calc_WHG[i] = np.nan # Left Flap Gate Has Same Conditions as Center (smaller culvert, but same gate size) Q_dike_leftflap_calc_WHG = Q_dike_centerflap_calc_WHG.copy() Q_total_WHG = Q_dike_leftflap_calc_WHG + Q_dike_centerflap_calc_WHG + Q_dike_sluice_calc_WHG # Should I be using Manning instead of Energy Eqn to determine Q for open-channel flow through dike? #%% Calculating Discharge from Dog Leg to Dike #%% Plot of 2D Side View of HR Dike import pylab as pl from matplotlib import collections as mc # WF Harbor Bath WF Harbor to Base of Culvert Sluice Gate dike_lines = [[(0,-1.369), (3.048,-1.369)], [(3.048,-1.369), (3.048,-1.064)], [(3.048,-0.579), (3.048,1.095)], [(3.048,-1.064), (23.468,-0.928)], [(3.048,0.463), (23.468,0.600)], [(23.468,-0.928), (25.906,-0.926)]] # Oceanside levels are H in sluice gate formula oceanside_level = 1.73 # this is high tide, m oceanside_level_co = 0.16 # crossover approaching low tide at HR peak level, m # HR levels are y_d in sluice gate formula. Formula switches from submerged to free flow if y_d drops below base of sluice. HR_level = -0.10 # this is at high tide, m HR_level_co = 0.16 # crossover approaching low tide at HR peak level, m # Sluice height above culvert is y in sluice gate formula sluice_height = -0.579 WF_opening = 1.984 # height of opening to Wellfleet Harbor, m dike_levels = [[(0, oceanside_level), (3.048, oceanside_level)], [(23.468, HR_level), (25.906, HR_level)]] dl_colors = np.array([(0, 1, 0, 1), (0, 0, 1, 1)]) dike_levels_co = [[(0, oceanside_level_co), (3.048, oceanside_level_co)], [(23.468, HR_level_co), (25.906, HR_level_co)]] dl_colors_co = np.array([(1, 0, 0, 1), (1, 0, 0, 1)]) lc_geom = mc.LineCollection(dike_lines, color='grey', linewidths=2) lc_levels = mc.LineCollection(dike_levels, color=dl_colors, linewidths=2) lc_levels_co = mc.LineCollection(dike_levels_co, color=dl_colors_co, linewidths=2) fig, ax = pl.subplots() ax.add_collection(lc_geom) ax.add_collection(lc_levels) ax.add_collection(lc_levels_co) ax.autoscale() ax.margins(0.1) ax.set_xlim(0,25.906) ax.set(xlabel='Distance from HR dike face [WF Harbor] to rear [HR] (m)', ylabel='Elevations (m NAVD88)') ax.grid() # if y_d < H < 0.81*y_d*(y_d/y)**0.72 then flow is submerged # if H >= 0.81*y_d*(y_d/y)**0.72 then flow is free #%% Translating Open-Channel Flow Project (WSP and Q with McCormack/Pr) range_HRside_avg = 0.766 range_oceanside_avg = 2.535 # note that low tide is not well represented given the river discharge tide_amp = range_oceanside_avg/2 meanmins_oceanside = -1.02 meanmins_HRside = -0.65 # River mouth depth def tidal_cycle(time_sec): return mean_sea_level + tide_amp*math.sin(math.pi*time_sec/22350) tide_times = np.arange(0,89700,300) tide_heights = [] for x in tide_times: tide_heights = np.append(tide_heights,tidal_cycle(x)) fig, ax = plt.subplots() ax.plot(tide_times,tide_heights) ax.set_xlim(0,89400) ax.xaxis.set_ticks(np.arange(0, 104300, 14900)) ax.set(xlabel='Time (s)', ylabel='Water Depth Outside Dike (m NAVD88)') ax.grid()
{"hexsha": "a45ce8194e6638d4de6f4a086e97391bc831c32c", "size": 61390, "ext": "py", "lang": "Python", "max_stars_repo_path": "SLR_Dike_HR.py", "max_stars_repo_name": "akurnizk/diked_hr_estuary_gw", "max_stars_repo_head_hexsha": "2430c756c635952502a5120ca65369f02da8c23f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SLR_Dike_HR.py", "max_issues_repo_name": "akurnizk/diked_hr_estuary_gw", "max_issues_repo_head_hexsha": "2430c756c635952502a5120ca65369f02da8c23f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SLR_Dike_HR.py", "max_forks_repo_name": "akurnizk/diked_hr_estuary_gw", "max_forks_repo_head_hexsha": "2430c756c635952502a5120ca65369f02da8c23f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 58.972142171, "max_line_length": 235, "alphanum_fraction": 0.6796220883, "include": true, "reason": "import numpy,from scipy,from mpmath", "num_tokens": 19080}
[STATEMENT] lemma eq_nextl_class_in_left_lang_im: "eq_nextl `` {u} \<in> left_lang ` states M" [PROOF STATE] proof (prove) goal (1 subgoal): 1. eq_nextl `` {u} \<in> left_lang ` states M [PROOF STEP] apply (rule rev_image_eqI [of "nextl (init M) u"]) [PROOF STATE] proof (prove) goal (2 subgoals): 1. nextl (init M) u \<in> states M 2. eq_nextl `` {u} = left_lang (nextl (init M) u) [PROOF STEP] apply (auto simp: eq_nextl_def left_lang_def) [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done
{"llama_tokens": 230, "file": "Finite_Automata_HF_Finite_Automata_HF", "length": 3}
# -*- coding: utf-8 -*- """ Created on Thu Jul 4 16:42:32 2019 @author: Dominic """ import numpy as np from builtins import super def initialize_sigma2(X, Y): (N, D), (M, _) = X.shape, Y.shape diff = X[np.newaxis,...] - Y[:,np.newaxis,:] err = diff * diff return np.sum(err) / (D * M * N) class expectation_maximization_registration(object): def __init__(self, X, Y, sigma2=None, max_iterations=1000, tolerance=0.001, w=0, *args, **kwargs): if X.shape[1] != Y.shape[1]: raise ValueError("Both point clouds need to have the same number of dimensions.") self.X, self.Y = X, Y self.sigma2 = sigma2 (self.N, self.D),(self.M, _) = self.X.shape, self.Y.shape self.tolerance = tolerance self.w = w self.max_iterations = max_iterations self.iteration = 0 self.err = self.tolerance + 1 self.P, self.Pt1, self.P1 = np.zeros((self.M, self.N)), np.zeros((self.N, )), np.zeros((self.M, )) self.Np = 0 def register(self, callback=lambda **kwargs: None): self. TY = self.transform_point_cloud(self.Y) if self.sigma2 is None: self.sigma2 = initialize_sigma2(self.X, self.TY) self.q = -self.err - self.N * self.D/2 * np.log(self.sigma2) while self.iteration < self.max_iterations and self.err > self.tolerance: self.do_iteration() if callable(callback): callback(iteration=self.iteration, error=self.err, X=self.X, Y=self.TY) return self.TY, self.registration_parameters() def registration_parameters(self): raise NotImplementedError("Registration parameters should be defined in child classes.") def do_iteration(self): self.e_step() self.m_step() self.iteration += 1 def e_step(self): """ Perform E step of registration """ #compute distance between points diff = self.X[:,np.newaxis] - self.TY diff = diff*diff #store dists P = np.sum(diff, axis=-1).T #compute constant factor in denominator c = ((2 * np.pi * self.sigma2) ** (self.D / 2)) * (self.w / (1 - self.w)) * self.M / self.N #compute denominator P = np.exp(-P / (2 * self.sigma2)) denom = np.sum(P, axis=0) denom[denom==0] = np.finfo(float).eps denom += c #compute P self.P = np.divide(P,denom) def m_step(self): """ Perform M step of registration """ self.Pt1 = np.sum(self.P, axis=0) self.P1 = np.sum(self.P, axis=1) self.Np = np.sum(self.P1) self.solve() self.TY = self.transform_point_cloud(self.Y) self.update_variance() class simple_affine_registration(expectation_maximization_registration): def __init__(self, B=None, t=True, *args, **kwargs): super().__init__(*args, **kwargs) self.B = np.eye(self.D) if B is None else B self.t = np.zeros([1, self.D]) if t is True else t def solve(self): """ Main bulk if the m step calculations specific to type of registration """ muX = np.divide(np.sum(np.dot(self.P, self.X), axis=0), self.Np) muY = np.divide(np.sum(np.dot(np.transpose(self.P), self.Y), axis=0), self.Np) self.Xhat = self.X - muX Yhat = self.Y - muY self.A = np.transpose(self.Xhat) @ np.transpose(self.P) @ Yhat self.YPY = np.transpose(Yhat) @ np.diag(self.P1) @ Yhat self.B = np.linalg.solve(np.transpose(self.YPY), np.transpose(self.A)) if self.t is not None: self.t = np.transpose(muX) - np.transpose(self.B) @ np.transpose(muY) def transform_point_cloud(self, Y): """ Transform a given point cloud """ if self.t is None: return Y @ self.B else: return Y @ self.B + self.t def inverse_transform_point_cloud(self,Y): """ Inverse transform a given point cloud """ return (Y - self.t) @ np.linalg.inv(self.B) def update_variance(self): """ Compute new sigma """ qprev = self.q trAB = np.trace(self.A @ self.B) xPx = np.transpose(self.Pt1) @ np.sum(self.Xhat*self.Xhat, axis=1) self.q = (xPx - 2 * trAB + np.trace(self.B @ self.YPY @ self.B)) / (2 * self.sigma2) + self.D * self.Np/2 * np.log(self.sigma2) self.err = np.abs(self.q - qprev) self.sigma2 = (xPx - trAB) / (self.Np * self.D) if self.sigma2 <= 0: self.sigma2 = self.tolerance / 10 def registration_parameters(self): return self.B, self.t class notranslation_affine_registration(expectation_maximization_registration): def __init__(self, B=None, t=True, *args, **kwargs): super().__init__(*args, **kwargs) self.B = np.eye(self.D) if B is None else B #self.t = np.zeros([1, self.D]) if t is True else t def solve(self): """ Main bulk if the m step calculations specific to type of registration """ #muX = np.divide(np.sum(np.dot(self.P, self.X), axis=0), self.Np) #muY = np.divide(np.sum(np.dot(np.transpose(self.P), self.Y), axis=0), self.Np) self.Xhat = self.X #- muX Yhat = self.Y #- muY self.A = np.transpose(self.Xhat) @ np.transpose(self.P) @ Yhat self.YPY = np.transpose(Yhat) @ np.diag(self.P1) @ Yhat self.B = np.linalg.solve(np.transpose(self.YPY), np.transpose(self.A)) #if self.t is not None: # self.t = np.transpose(muX) - np.transpose(self.B) @ np.transpose(muY) def transform_point_cloud(self, Y): """ Transform a given point cloud """ #if self.t is None: return Y @ self.B #else: #return Y @ self.B + self.t def inverse_transform_point_cloud(self,Y): """ Inverse transform a given point cloud """ return (Y) @ np.linalg.inv(self.B) def update_variance(self): """ Compute new sigma """ qprev = self.q trAB = np.trace(self.A @ self.B) xPx = np.transpose(self.Pt1) @ np.sum(self.Xhat*self.Xhat, axis=1) self.q = (xPx - 2 * trAB + np.trace(self.B @ self.YPY @ self.B)) / (2 * self.sigma2) + self.D * self.Np/2 * np.log(self.sigma2) self.err = np.abs(self.q - qprev) self.sigma2 = (xPx - trAB) / (self.Np * self.D) if self.sigma2 <= 0: self.sigma2 = self.tolerance / 10 def registration_parameters(self): return self.B#, self.t def gaussian_kernel(Y, beta): (M, D) = Y.shape XX = np.reshape(Y, (1, M, D)) YY = np.reshape(Y, (M, 1, D)) XX = np.tile(XX, (M, 1, 1)) YY = np.tile(YY, (1, M, 1)) diff = XX-YY diff = np.multiply(diff, diff) diff = np.sum(diff, 2) return np.exp(-diff / (2 * beta)) class deformable_registration(expectation_maximization_registration): def __init__(self, alpha=2, beta=2, *args, **kwargs): super().__init__(*args, **kwargs) self.alpha = 2 if alpha is None else alpha self.beta = 2 if alpha is None else beta self.W = np.zeros((self.M, self.D)) self.G = gaussian_kernel(self.Y, self.beta) def solve(self): A = np.dot(np.diag(self.P1), self.G) + self.alpha * self.sigma2 * np.eye(self.M) B = np.dot(self.P, self.X) - np.dot(np.diag(self.P1), self.Y) self.W = np.linalg.solve(A, B) def transform_point_cloud(self, Y=None): if Y is None: self.TY = self.Y + np.dot(self.G, self.W) return else: return Y + np.dot(self.G, self.W) def update_variance(self): qprev = self.sigma2 xPx = np.dot(np.transpose(self.Pt1), np.sum(np.multiply(self.X, self.X), axis=1)) yPy = np.dot(np.transpose(self.P1), np.sum(np.multiply(self.TY, self.TY), axis=1)) trPXY = np.sum(np.multiply(self.TY, np.dot(self.P, self.X))) self.sigma2 = (xPx - 2 * trPXY + yPy) / (self.Np * self.D) if self.sigma2 <= 0: self.sigma2 = self.tolerance / 10 self.err = np.abs(self.sigma2 - qprev) def registration_parameters(self): return self.G, self.W
{"hexsha": "13aeef4ab3368eafed99bd919573e8d83480f2af", "size": 8642, "ext": "py", "lang": "Python", "max_stars_repo_path": "Registration/registration_core.py", "max_stars_repo_name": "antonykamp/AutoDot", "max_stars_repo_head_hexsha": "4be9912b312aa6725c6d3d49f7e2fe034124e208", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-09-16T23:50:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T13:31:35.000Z", "max_issues_repo_path": "Registration/registration_core.py", "max_issues_repo_name": "antonykamp/AutoDot", "max_issues_repo_head_hexsha": "4be9912b312aa6725c6d3d49f7e2fe034124e208", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-15T14:50:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-15T14:50:16.000Z", "max_forks_repo_path": "Registration/registration_core.py", "max_forks_repo_name": "antonykamp/AutoDot", "max_forks_repo_head_hexsha": "4be9912b312aa6725c6d3d49f7e2fe034124e208", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-08-20T11:52:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-12T08:04:35.000Z", "avg_line_length": 33.626459144, "max_line_length": 137, "alphanum_fraction": 0.5491784309, "include": true, "reason": "import numpy", "num_tokens": 2300}
import numpy as np import os import torch from torch.utils.data import Dataset from utils import * from .utils import make_classes_counts class MOSI(Dataset): data_name = 'MOSI' label_modes = ['binary','five','seven','regression'] supported_feature_names = {'covarep':'COVAREP','opensmile':'OpenSmile-emobase2010','facet':'FACET 4.1','glove':'glove_vectors','bert':'BERT embeddings','label':'Opinion Segment Labels'} output_names = ['covarep','facet','glove','label'] feature_dim = {'covarep':2,'opensmile':2,'facet':2,'glove':2,'bert':2} def __init__(self, root, split, label_mode, transform=None, download=False): if label_mode not in self.label_modes: raise ValueError('label mode not found') self.root = root self.split = split if download: self.download() if not self._check_exists(): raise RuntimeError('Dataset not found.' + ' You can use download=True to download it') if(self.split=='train'): self.data = load(os.path.join(self.root, 'processed', 'train.pt')) elif(self.split=='val'): self.data = load(os.path.join(self.root, 'processed', 'validation.pt')) elif(self.split=='trainval'): train_data = load(os.path.join(self.root, 'processed', 'train.pt')) validation_data = load(os.path.join(self.root, 'processed', 'validation.pt')) for k in train_data: train_data[k].extend(validation_data[k]) self.data = train_data elif(self.split=='test'): self.data = load(os.path.join(self.root, 'processed', 'test.pt')) else: raise ValueError('Data split not supported') self.data['label'] = torch.tensor(self.data['label']) if(label_mode == 'binary'): self.classes = ['negative','positive'] self.classes_size = len(self.classes) self.classes_to_labels = {self.classes[i]:i for i in range(len(self.classes))} self.data['label'][self.data['label'] >= 0] = 1 self.data['label'][self.data['label'] < 0] = 0 self.data['label'] = self.data['label'].long() self.classes_counts = make_classes_counts(self.data['label'],self.classes_size) elif(label_mode == 'five'): self.classes = ['negative','somewhat negative','neutral','somewhat positive','positive'] self.classes_size = len(self.classes) self.classes_to_labels = {self.classes[i]:i for i in range(len(self.classes))} self.data['label'] = torch.round(self.data['label']/3*2 + 2).long() self.classes_counts = make_classes_counts(self.data['label'],self.classes_size) elif(label_mode == 'seven'): self.classes = ['very negative','negative','somewhat negative','neutral','somewhat positive','positive','very positive'] self.classes_size = len(self.classes) self.classes_to_labels = {self.classes[i]:i for i in range(len(self.classes))} self.data['label'] = torch.round(self.data['label'] + 3).long() self.classes_counts = make_classes_counts(self.data['label'],self.classes_size) elif(label_mode == 'regression'): pass else: raise ValueError('label mode not supported') self.transform = transform def __len__(self): return len(self.data[self.output_names[0]]) def __getitem__(self, idx): input = {} for k in self.output_names: input[k] = torch.tensor(self.data[k][idx]) if(not isinstance(self.data[k][idx], torch.Tensor)) else self.data[k][idx] if self.transform is not None: input = self.transform(input) return input def _check_exists(self): return os.path.exists(os.path.join(self.root, 'processed')) def download(self): if self._check_exists(): return self.download_MOSI_data() def download_MOSI_data(self): sys.path.append("./CMU-MultimodalSDK") from mmsdk import mmdatasdk def myavg(intervals,features): return np.average(features,axis=0) dirname = os.path.dirname(self.root) makedir_exist_ok(dirname) if(not os.path.exists(os.path.join(self.root, 'cmumosi'))): makedir_exist_ok(os.path.join(self.root, 'cmumosi')) cmumosi_highlevel = mmdatasdk.mmdataset(mmdatasdk.cmu_mosi.highlevel,os.path.join(self.root, 'cmumosi')) else: cmumosi_highlevel = mmdatasdk.mmdataset(os.path.join(self.root, 'cmumosi')) if(not os.path.exists(os.path.join(self.root, 'deployed'))): makedir_exist_ok(os.path.join(self.root, 'deployed')) cmumosi_highlevel.align('glove_vectors',collapse_functions=[myavg]) cmumosi_highlevel.add_computational_sequences(mmdatasdk.cmu_mosi.labels,os.path.join(self.root, 'cmumosi')) cmumosi_highlevel.align('Opinion Segment Labels') deploy_files={x:x for x in cmumosi_highlevel.computational_sequences.keys()} cmumosi_highlevel.deploy(os.path.join(self.root, 'deployed'),deploy_files) aligned_cmumosi_highlevel = mmdatasdk.mmdataset(os.path.join(self.root, 'deployed')) self.train_keys = mmdatasdk.dataset.standard_datasets.CMU_MOSI.cmu_mosi_std_folds.standard_train_fold self.validation_keys = mmdatasdk.dataset.standard_datasets.CMU_MOSI.cmu_mosi_std_folds.standard_valid_fold self.test_keys = mmdatasdk.dataset.standard_datasets.CMU_MOSI.cmu_mosi_std_folds.standard_test_fold train_data = {k:[] for k in self.supported_feature_names} validation_data = {k:[] for k in self.supported_feature_names} test_data = {k:[] for k in self.supported_feature_names} for k in self.supported_feature_names: exec('{} = aligned_cmumosi_highlevel.computational_sequences[\'{}\'].data'.format(k,self.supported_feature_names[k])) exec('for m in {0}:\n'.format(k) + ' tmp_data = {0}[m][\'features\'][:]\n'.format(k) + ' tmp_data[tmp_data == -np.inf] = 0\n' + ' if(k==\'label\'):\n' + ' tmp_data = tmp_data.item(0)\n' + ' else:\n' + ' tmp_data = tmp_data.astype(np.float32)\n' + ' if(m[:m.index(\'[\')] in self.train_keys):\n' + ' train_data[\'{0}\'].append(tmp_data)\n'.format(k) + ' elif(m[:m.index(\'[\')] in self.validation_keys):\n' + ' validation_data[\'{0}\'].append(tmp_data)\n'.format(k) + ' elif(m[:m.index(\'[\')] in self.test_keys):\n' + ' test_data[\'{0}\'].append(tmp_data)\n'.format(k) + ' else:\n' + ' raise ValueError(\'key not found in folds\')') save(train_data,os.path.join(self.root, 'processed', 'train.pt')) save(validation_data,os.path.join(self.root, 'processed', 'validation.pt')) save(test_data,os.path.join(self.root, 'processed', 'test.pt')) return def __repr__(self): fmt_str = 'Dataset ' + self.__class__.__name__ + '\n' fmt_str += ' Number of datapoints: {}\n'.format(self.__len__()) fmt_str += ' Root Location: {}\n'.format(self.root) tmp = ' Transforms (if any): ' fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp))) return fmt_str
{"hexsha": "93867918ae4f11c76d533b8f023b5114f1189872", "size": 7649, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/datasets/mosi.py", "max_stars_repo_name": "dem123456789/Speech-Emotion-Recognition-with-Dual-Sequence-LSTM-Architecture", "max_stars_repo_head_hexsha": "a072cb940201bbcdb2d0f4d0dfa1dde478fa4464", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-08-03T03:13:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-11T08:32:10.000Z", "max_issues_repo_path": "src/datasets/mosi.py", "max_issues_repo_name": "dem123456789/Speech-Emotion-Recognition-with-Dual-Sequence-LSTM-Architecture", "max_issues_repo_head_hexsha": "a072cb940201bbcdb2d0f4d0dfa1dde478fa4464", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-09-08T16:10:38.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-08T16:10:38.000Z", "max_forks_repo_path": "src/datasets/mosi.py", "max_forks_repo_name": "dem123456789/Speech-Emotion-Recognition-with-Dual-Sequence-LSTM-Architecture", "max_forks_repo_head_hexsha": "a072cb940201bbcdb2d0f4d0dfa1dde478fa4464", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-08-03T21:37:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-26T02:19:17.000Z", "avg_line_length": 54.2482269504, "max_line_length": 189, "alphanum_fraction": 0.6021702183, "include": true, "reason": "import numpy", "num_tokens": 1819}
from controllers.utility import compute_spline, line_parameters, line_profile, line_profile_n import numpy as np from controllers.processing_template import QSuperThread from controllers.micro_services import profile_painter_2, profile_collector, mic_project_generator class QProcessThread(QSuperThread): """ Processing thread to compute Microtubule profiles. Extending the QThread class keeps the GUI running while the evaluation runs in the background. """ def __init__(self, *args, parent=None): super(QProcessThread, self).__init__(*args, parent) def _set_image(self, slice): """ Preprocess image Parameters ---------- slice: int Current slice of image stack """ if len(self.image_stack.shape) == 2: self.current_image = self.image_stack else: self.current_image = self.image_stack[0,slice].astype(np.uint16) processing_image = np.clip(self.current_image/self.intensity_threshold, 0, 255).astype(np.uint8) # Spline fit skeletonized image self.splines = compute_spline( processing_image, self.blur, expansion=self.spline_parameter, expansion2=self.spline_parameter) def _show_profiles(self): """ Create and evaluate line profiles. """ if not isinstance(self.data_z, np.ndarray): self.z_project_collection = False profiles = [] counter = -1 count = 0 for spl in self.splines: count += spl.n_points current_profile_width = int(2*self.profil_width/3*self.px_size*1000) if current_profile_width % 2 != 0: current_profile_width += 1 painter = profile_painter_2(self.current_image/self.intensity_threshold, self.path) for i,spline in enumerate(self.splines): color = self.colormap(i/len(self.splines)) collector = profile_collector(self.path, i) mic_generator = mic_project_generator(self.path, i) line_profiles = spline.profiles(spline.n_points, self.profil_width, self.px_size, self.sampling) for line in line_profiles: counter+=1 self.sig.emit(int((counter) / count* 100)) if self.z_project_collection: for z in range(self.data_z.shape[0]): z_profile = line_profile_n(self.data_z[z], line) mic_generator.send((z_profile, z)) profile = line_profile_n(self.current_image, line) profile = profile[int(profile.shape[0]/2-current_profile_width/2):int(profile.shape[0]/2+current_profile_width/2)] if profile.shape[0]< int(current_profile_width): print("to short") continue collector.send(profile) painter.send((line, color)) if self.z_project_collection: try: mic_generator.send(None) except StopIteration as err: print("created z-profile") try: collector.send(None) except StopIteration as err: result = err.value profiles += result["red"] red = np.array(result["red"]) red_mean = np.mean(red, axis=0) self.sig_plot_data.emit(red_mean, profiles[0].shape[0]/2, i, self.path, color, red.shape[0]) try: painter.send(None) except StopIteration: print("Overlay sucess") red = np.array(profiles) red_mean = np.mean(red, axis=0) try: np.savetxt(self.path + r"\red_mean.txt", red_mean) self.sig_plot_data.emit(red_mean, profiles[0].shape[0]/2, 9999, self.path, (1.0, 0.0, 0.0, 1.0), red.shape[0]) np.savetxt(self.path + r"\red.txt", red) except ValueError: print(self.path + " couldnt be evaluated") def run(self,): """ Start computation and run thread """ for i in range(self.image_stack.shape[1]): self._set_image(i) self._show_profiles() self.done.emit(self.ID)
{"hexsha": "45ef2419c337dfad3915974b4b0041cc7adf7d33", "size": 4322, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/controllers/processing_microtubule.py", "max_stars_repo_name": "super-resolution/line_profiler", "max_stars_repo_head_hexsha": "472ee9433298327263ea1e1423b37fbe78d2c861", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/controllers/processing_microtubule.py", "max_issues_repo_name": "super-resolution/line_profiler", "max_issues_repo_head_hexsha": "472ee9433298327263ea1e1423b37fbe78d2c861", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/controllers/processing_microtubule.py", "max_forks_repo_name": "super-resolution/line_profiler", "max_forks_repo_head_hexsha": "472ee9433298327263ea1e1423b37fbe78d2c861", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5826086957, "max_line_length": 130, "alphanum_fraction": 0.5853771402, "include": true, "reason": "import numpy", "num_tokens": 908}
# -*- coding: utf-8 -*- """ Created on Fri Oct 29 12:14:51 2021 @author: ag """ import numpy as np from glob import glob import pandas as pd import matplotlib.pyplot as plt from tqdm import tqdm import corner import hadcrut5 import re import os from settings import scenariocolors, baseline_period, datafolder from misc_tools import confidence_ellipse from plot_comparison_data import plot_comparison import glob scenarios = {'SSP1-1.9': [], 'SSP1-2.6': [], 'SSP2-4.5': [], 'SSP3-7.0': , 'SSP5-8.5':} for scenario in scenarios: tasfile = f"{datafolder}/raw_data/AR6 Fig spm08ad/tas_global_{scenario.replace('-','_').replace('.','_')}.csv" tas =pd.read_csv(tasfile) if True: Sar6 = pd.read_csv(f"{datafolder}/raw_data/AR6 Fig spm08ad/global_sea_level_projected.csv",index_col='Year') scenarios = ['SSP1-1.9', 'SSP1-2.6', 'SSP2-4.5', 'SSP3-7.0', 'SSP5-8.5'] tasfile = f"{datafolder}/raw_data/AR6 Fig spm08ad/tas_global_Historical.csv" tas =pd.read_csv(tasfile) tasbase = tas[(tas.Year>=baseline_period[0]) & (tas.Year<=baseline_period[1])].mean() for scenario in scenarios: tasfile = f"{datafolder}/raw_data/AR6 Fig spm08ad/tas_global_{scenario.replace('-','_').replace('.','_')}.csv" tas =pd.read_csv(tasfile) s = Sar6[f'{scenario} Central'] dSdt = (s[2100]-s[2050])/50 t = tas[(tas.Year>=2050) & (tas.Year<=2100)].mean() - tasbase plt.plot(t['Mean'],dSdt*1000,'bo') # dSdt = (s[2050]-s[2020])/30 # t = tas[(tas.Year>=2020) & (tas.Year<=2050)].mean() - tasbase # plt.plot(t['Mean'],dSdt*1000,'mo') # dSdt = (s[2100]-s[2020])/70 # t = tas[(tas.Year>=2020) & (tas.Year<=2100)].mean() - tasbase # plt.plot(t['Mean'],dSdt*1000,'ro')
{"hexsha": "8b0a0d185cbd309d62a84f743d7da35bc9bb68db", "size": 1815, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/Aslak/ar6_tas_comparison.py", "max_stars_repo_name": "cmip6moap/project01", "max_stars_repo_head_hexsha": "749dea5fe572b9853d019489b4deea77ce01efd8", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-02T12:09:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-03T08:24:29.000Z", "max_issues_repo_path": "code/Aslak/ar6_tas_comparison.py", "max_issues_repo_name": "cmip6moap/project01", "max_issues_repo_head_hexsha": "749dea5fe572b9853d019489b4deea77ce01efd8", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/Aslak/ar6_tas_comparison.py", "max_forks_repo_name": "cmip6moap/project01", "max_forks_repo_head_hexsha": "749dea5fe572b9853d019489b4deea77ce01efd8", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2452830189, "max_line_length": 118, "alphanum_fraction": 0.6225895317, "include": true, "reason": "import numpy", "num_tokens": 610}
;;; -*- syntax: common-lisp; package: OMEGA; base: 10; mode: Keim -*- ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; ;; ;; Copyright (C) 1996 by AG Siekmann, Fachbereich Informatik, ;; ;; Universitaet des Saarlandes, Saarbruecken, Germany. ;; ;; All rights reserved. ;; ;; For information about this program, write to: ;; ;; OMEGA Project ;; ;; AG Siekmann/FB Informatik ;; ;; Universitaet des Saarlandes ;; ;; Bau 36, 4. Stock ;; ;; D-66041 Saarbruecken ;; ;; Germany ;; ;; electronic mail: keim@cs.uni-sb.de ;; ;; ;; ;; The author makes no representations about the suitability of this ;; ;; software for any purpose. It is provided "AS IS" without express or ;; ;; implied warranty. In particular, it must be understood that this ;; ;; software is an experimental version, and is not suitable for use in ;; ;; any safety-critical application, and the author denies a license for ;; ;; such use. ;; ;; ;; ;; You may use, copy, modify and distribute this software for any ;; ;; noncommercial and non-safety-critical purpose. Use of this software ;; ;; in a commercial product is not included under this license. You must ;; ;; maintain this copyright statement in all copies of this software that ;; ;; you modify or distribute. ;; ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; (in-package :omega) (eval-when (load compile eval) (unless (com~find-category 'post) (com~defcategory post (help "Tactics of the theory POST.")))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; These are the common tactics for the theory POST. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;todo: automation of the sort inference and more of the outline-cases in the equal-tactics. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; sforalli ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; ; ;(infer~deftactic sforalli ; (outline-mappings (((existent nonexistent) sforalli-b) ; ((existent existent) sforalli-a))) ; (parameter-types termsym) ; (expansion-function potac=expand-sforalli) ; (help "FORALL-SORT-Elimination.")) ; ;(tac~deftactic sforalli-b sforalli (in post) ; (parameters (X term+constant "A new constant.")) ; (conclusions C) ; (premises P ) ; (sideconditions ; (potac=forall-sort-p (formula C)) ; (pds~not-free-in-nodes-or-hyps-p X C)) ; (computations ; (P (potac=compute-sforalli X (formula C)))) ; (description "S-Universal introduction backwards")) ; ;(tac~deftactic sforalli-a sforalli (in post) ; (parameters (X term+constant "A new constant.")) ; (conclusions C) ; (premises P ) ; (sideconditions ; (potac=forall-sort-p (formula C)) ; (potac=sforall-applicable-p (formula C) (formula P) X) ; (pds~not-free-in-nodes-or-hyps-p X C)) ; (computations ) ; (description "S-Universal introduction application")) ; ; ;(defun potac=forall-sort-p (formula) ; (and ; (term~appl-p formula) ; (data~schema-equal (data~appl-function formula) ; (env~lookup-object :forall-sort (pds~environment omega*current-proof-plan))))) ; ;(defun potac=sforall-applicable-p (con pre term) ; (term~alpha-equal pre (potac=compute-sforalli term con))) ; ; ;(defun potac=compute-sforalli (term formula) ; (let* ((args (data~appl-arguments formula)) ; (sort (second args)) ; (abstr (first args))) ; (term~appl-create ; (env~lookup-object :implies (pds~environment omega*current-proof-plan)) ; (list ; (term~appl-create sort (list term)) ; (beta~normalize (term~appl-create abstr (list term))))))) ; ; ;(defun potac=expand-sforalli (outline parameters) ; (let* ((fs-def (th~find-assumption "forall-sort" (prob~theory omega*current-proof-plan))) ; (definiendum (th~definition-constant fs-def)) ; (definiens (data~copy (th~ass-node fs-def) :downto '(term+constant type+primitive)))) ; (tacl~init outline) ; (tacl~sequence ; (defni-res ('defni (list (car outline) nil) (list definiendum definiens (pos~add-front 0)))) ; (foralli-res ('foralli (list (cadr defni-res) (cadr outline)) parameters))) ; (tacl~end))) ; ;(com~defcommand sforalli ; (argnames univ-line param line) ; (argtypes ndline termsym ndline) ; (arghelps "A universal line to prove" "New parameter" "A line") ; (function potac=sforalli) ; (defaults potac=sforalli-defaults) ; (frag-cats tactics post) ; (log-p T) ; (help "Introduce a sorted universal quantifier.")) ; ;(defun potac=sforalli (univ-line param line) ; (infer~compute-outline 'sforalli (list univ-line line) (list param))) ; ; ;(defun potac=sforalli-defaults (uni-line param line) ; (cond ((not (com~specified-arg-p uni-line)) ; (list (pds~find-open-node #'(lambda (x) (data~schema-equal ; (data~appl-function x) ; (env~lookup-object :forall-sort ; (pds~environment omega*current-proof-plan))))) ; (com~unspecified) (com~unspecified))) ; ((not (com~specified-arg-p param)) ; (list uni-line ; (or ; (potac=generate-defaults-sforalli ; uni-line ; (pds~environment omega*current-proof-plan)) ; (com~unspecified)) ; (com~unspecified))) ; ((not (com~specified-arg-p line)) ; (list uni-line param (oc~nil-argument))) ; (t (list uni-line param line)))) ; ;(defun potac=generate-defaults-sforalli (line env) ; (when (node~p line) ; (let ((form (node~formula line))) ; (when (data~schema-equal ; (data~appl-function form) ; (env~lookup-object :forall-sort ; (pds~environment omega*current-proof-plan))) ; (let ((var (logic~quantification-bound-variable form))) ; (term~generate-term-primitive-with-new-name ; (keim~name var) (term~type var) 'term+constant env)))))) ; ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; sforalle ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; ; ;(infer~deftactic sforalle ; (outline-mappings (((nonexistent existent) sforalle-f) ; ((existent existent) sforalle-a))) ; (parameter-types term) ; (expansion-function potac=expand-sforalle) ; (help "FORALL-SORT-Elimination.")) ; ;(tac~deftactic sforalle-f sforalle (in post) ; (parameters (X term+term "A term.")) ; (conclusions C) ; (premises P ) ; (sideconditions ; (potac=forall-sort-p (formula P))) ; (computations ; (C (potac=compute-sforalle X (formula P)))) ; (description "S-Universal elimination forwards")) ; ;(tac~deftactic sforalle-a sforalle (in post) ; (parameters (X term+constant "A new constant.")) ; (conclusions C) ; (premises P ) ; (sideconditions ; (potac=forall-sort-p (formula P)) ; (potac=sforall-applicable-p (formula P) (formula C) X) ; (pds~node-supported-by-p C P)) ; (computations ) ; (description "S-Universal elimination application")) ; ;(defun potac=compute-sforalle (term formula) ; (let* ((args (data~appl-arguments formula)) ; (sort (second args)) ; (abstr (first args))) ; (term~appl-create ; (env~lookup-object :implies (pds~environment omega*current-proof-plan)) ; (list ; (term~appl-create sort (list term)) ; (beta~normalize (term~appl-create abstr (list term))))))) ; ; ;(defun potac=expand-sforalle (outline parameters) ; (let* ((fs-def (th~find-assumption "forall-sort" (prob~theory omega*current-proof-plan))) ; (definiendum (th~definition-constant fs-def)) ; (definiens (data~copy (th~ass-node fs-def) :downto '(term+constant type+primitive)))) ; (tacl~init outline) ; (tacl~sequence ; (defne-res ('defne (list nil (cadr outline)) (list definiendum definiens (pos~add-front 0)))) ; (foralle-res ('foralle (list (car outline) (car defne-res)) parameters))) ; (tacl~end))) ; ;(com~defcommand sforalle ; (argnames univ-line line term) ; (argtypes ndline ndline term) ; (arghelps "Universal line" "A line" "Term to substitute") ; (function potac=sforalle) ; (defaults potac=sforalle-defaults) ; (frag-cats tactics post) ; (log-p T) ; (help "Eliminate a sorted universal quantifier.")) ; ;(defun potac=sforalle (univ-line line param) ; (infer~compute-outline 'sforalle (list line univ-line) (list param ))) ; ; ;(defun potac=sforalle-defaults (univ elim term) ; (cond ((not (com~specified-arg-p univ)) ; (list (pds~find-support #'logic~universal-quantification-p) (com~unspecified) (com~unspecified))) ; ((not (com~specified-arg-p elim)) ; (list univ ; (if (and univ (logic~universal-quantification-p (node~formula univ))) ; (let ((scope (logic~quantification-scope (node~formula univ)))) ; (pds~find-open-node #'(lambda (x) (term~alpha-match scope x)))) ; (oc~nil-argument)) ; (com~unspecified))) ; ((not (com~specified-arg-p term)) ; (list univ ; elim ; (if (and univ ; (logic~universal-quantification-p (node~formula univ)) ; elim) ; (let* ((subst (term~alpha-match (logic~quantification-scope (node~formula univ)) ; (node~formula elim)))) ; (if subst ; (car (subst~codomain subst)) ; (com~unspecified))) ; (com~unspecified)))) ; (t (list univ elim term)))) ; ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; foralle-sort ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (infer~deftactic foralle-sort (outline-mappings (((nonexistent existent existent) foralle-sort-f2) ((nonexistent existent nonexistent) foralle-sort-f) ((existent existent nonexistent) foralle-sort-s) ((existent existent existent) foralle-sort-a))) (parameter-types term) (expansion-function potac=expand-foralle-sort) (help "FORALL-SORT-Elimination.")) (tac~deftactic foralle-sort-s foralle-sort (in post) (parameters (X term+term "A term.")) (conclusions C) (premises P S) (sideconditions (potac=forall-sort-formula? (formula P)) (potac=foralle-sort-applicable-conc-p (formula P) (formula C) X)) (computations (S (potac=compute-forall-sort-hyp X (formula P)))) (description "S-Universal elimination sideward")) (tac~deftactic foralle-sort-f2 foralle-sort (in post) (parameters (X term+term "A term.")) (conclusions C) (premises P S) (sideconditions (potac=forall-sort-formula? (formula P)) (potac=foralle-sort-applicable-sort-p (formula p) (formula S) X)) (computations (C (potac=compute-forall-sort X (formula P)))) (description "S-Universal elimination forward")) (tac~deftactic foralle-sort-f foralle-sort (in post) (parameters (X term+term "A term.")) (conclusions C) (premises P S) (sideconditions (potac=forall-sort-formula? (formula P))) (computations (C (potac=compute-forall-sort X (formula P))) (S (potac=compute-forall-sort-hyp X (formula P)))) (description "S-Universal elimination forwards")) (tac~deftactic foralle-sort-a foralle-sort (in post) (parameters (X term+term "A term.")) (conclusions C) (premises P S) (sideconditions (potac=forall-sort-formula? (formula P)) (potac=foralle-sort-applicable-p (formula P) (formula C) (formula S) X)) (computations ) (description "S-Universal elimination application")) (defun potac=foralle-sort-applicable-p (pre con sort term) (and (term~alpha-equal con (potac=compute-forall-sort term pre)) (term~alpha-equal sort (potac=compute-forall-sort-hyp term pre)))) (defun potac=foralle-sort-applicable-sort-p (pre sort term) (term~alpha-equal sort (potac=compute-forall-sort-hyp term pre))) (defun potac=foralle-sort-applicable-conc-p (pre con term) (term~alpha-equal con (potac=compute-forall-sort term pre))) (defun potac=expand-foralle-sort (outline parameters) (let* ((quantorstring (string (keim~name (data~appl-function (node~formula (cadr outline)))))) (fs-def (th~find-assumption quantorstring (prob~theory omega*current-proof-plan))) (definiendum (th~definition-constant fs-def)) (definiens (data~copy (th~ass-node fs-def) :downto '(term+constant type+primitive)))) (tacl~init outline) (tacl~sequence (defne-res ('defne (list nil (cadr outline)) (list definiendum definiens (pos~add-front 0)))) (foralle-res ('foralle (list nil (car defne-res)) parameters)) (impe-res ('impe (list (car outline) (caddr outline) (car foralle-res)) nil))) (tacl~end))) (com~defcommand foralle-sort (argnames univ-line line term so-line) (argtypes ndline ndline term ndline) (arghelps "Universal line" "A line" "Term to substitute" "A line with sort") (function potac=foralle-sort) (defaults potac=foralle-sort-defaults) (frag-cats tactics post) (log-p T) (help "Eliminate a sorted universal quantifier.")) (defun potac=foralle-sort (univ-line line param so-line) (infer~compute-outline 'foralle-sort (list line univ-line so-line) (list param ))) (defgeneric potac=forall-sort-formula? (form) (:method ((form node+node)) (potac=forall-sort-formula? (node~formula form))) (:method ((form term+appl)) (let ((func (data~appl-function form)) (env (pds~environment omega*current-proof-plan))) (or (data~schema-equal func (env~lookup-object :forall-sort env)) (data~schema-equal func (env~lookup-object :forall-defined env))))) (:method ((form term+term)) nil)) (defun potac=foralle-sort-defaults (univ elim term so-line) (cond ((not (com~specified-arg-p univ)) (list (pds~find-support #'potac=forall-sort-formula?) (com~unspecified) (com~unspecified) (com~unspecified))) ((not (com~specified-arg-p elim)) (list univ (if (and univ (logic~universal-quantification-p (node~formula univ))) (let ((scope (logic~quantification-scope (node~formula univ)))) (pds~find-open-node #'(lambda (x) (term~alpha-match scope x)))) (oc~nil-argument)) (com~unspecified) (com~unspecified))) ((not (com~specified-arg-p term)) (list univ elim (if (and univ (logic~universal-quantification-p (node~formula univ)) elim) (let* ((subst (term~alpha-match (logic~quantification-scope (node~formula univ)) (node~formula elim)))) (if subst (car (subst~codomain subst)) (com~unspecified))) (com~unspecified)) (com~unspecified))) ((not (com~specified-arg-p so-line)) (list univ elim term (if (and univ (= (length (data~appl-arguments (node~formula univ))) 2) term) (let* ((sort (second (data~appl-arguments (node~formula univ)))) (sort-node (pds~find-support #'(lambda (x) (and (data~appl-p x) (term~alpha-equal sort (data~appl-function x)) (term~alpha-equal term (car (data~appl-arguments x)))))))) (if sort-node sort-node (oc~nil-argument))) (oc~nil-argument)))) (t (list univ elim term so-line)))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; foralli-sort ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (infer~deftactic foralli-sort (outline-mappings (((existent nonexistent) foralli-sort-b) ((existent existent) foralli-sort-a))) (parameter-types termsym) (expansion-function potac=expand-foralli-sort) (help "FORALL-SORT-Elimination.")) (tac~deftactic foralli-sort-b foralli-sort (in post) (parameters (X term+constant "A new constant.")) (conclusions C) (premises P) (hypotheses ((H P) "H is a hypothesis for P")) (sideconditions (potac=forall-sort-formula? (formula C)) (pds~not-free-in-nodes-or-hyps-p X C)) (computations (P (potac=compute-forall-sort X (formula C))) (H (potac=compute-forall-sort-hyp X (formula C)))) (description "S-Universal introduction backwards")) (tac~deftactic foralli-sort-a foralli-sort (in post) (parameters (X term+constant "A new constant.")) (conclusions C) (premises P) (hypotheses ((H P) "H is a hypothesis for P")) (sideconditions (potac=forall-sort-formula? (formula C)) (potac=foralli-sort-applicable-p (formula C) (formula P) X (hyps P)) (pds~not-free-in-nodes-or-hyps-p X C)) (description "S-Universal introduction application")) (defun potac=foralli-sort-applicable-p (con pre term hyp-list) (let ((hyp (potac=compute-forall-sort-hyp term con))) (and (term~alpha-equal pre (potac=compute-forall-sort term con)) (some #'(lambda (h) (term~alpha-equal hyp (node~formula h))) hyp-list)))) (defun potac=compute-forall-sort (term formula) (let* ((args (data~appl-arguments formula)) (abstr (first args))) (beta~normalize (term~appl-create abstr (list term))))) (defun potac=compute-forall-sort-hyp (term formula) (let* ((env (pds~environment omega*current-proof-plan)) (sort (if (data~schema-equal (data~appl-function formula) (env~lookup-object :forall-sort env)) (second (data~appl-arguments formula)) (env~lookup-object :defined env)))) (term~appl-create sort (list term)))) (defun potac=expand-foralli-sort (outline parameters) (let* ((quantorstring (string (keim~name (data~appl-function (node~formula (car outline)))))) (fs-def (th~find-assumption quantorstring (prob~theory omega*current-proof-plan))) (definiendum (th~definition-constant fs-def)) (definiens (data~copy (th~ass-node fs-def) :downto '(term+constant type+primitive))) (conc (car outline)) (prem (cadr outline)) (old-hyp (car (set-difference (pdsn~hyps prem) (pdsn~hyps conc))))) (tacl~init outline) (let ((result (tacl~sequence (defni-res ('defni (list conc nil) (list definiendum definiens (pos~add-front 0)))) (foralli-res ('foralli (list (cadr defni-res) nil) parameters)) (impi-res ('impi (list (cadr foralli-res) nil) nil))))) (tac~forget&destroy-hyp (list (second result)) old-hyp (third result)) (tacl~apply 'weaken (list (second result) prem) nil)) (tacl~end))) (com~defcommand foralli-sort (argnames univ-line param line) (argtypes ndline termsym ndline) (arghelps "Universal line to prove" "New parameter" "A line" ) (function potac=foralli-sort) (defaults potac=foralli-sort-defaults) (frag-cats tactics post) (log-p T) (help "Introduce a sorted universal quantifier.")) (defun potac=foralli-sort (univ-line param line ) (infer~compute-outline 'foralli-sort (list univ-line line) (list param))) (defun potac=foralli-sort-defaults (uni-line param line) (cond ((not (com~specified-arg-p uni-line)) (list (pds~find-open-node #'potac=forall-sort-formula?) (com~unspecified) (com~unspecified))) ((not (com~specified-arg-p param)) (list uni-line (or (potac=generate-new-constant uni-line (pds~environment omega*current-proof-plan)) (com~unspecified)) (com~unspecified))) ((not (com~specified-arg-p line)) (list uni-line param (oc~nil-argument))) (t (list uni-line param line)))) (defun potac=generate-new-constant (line env) (when (node~p line) (let ((form (node~formula line))) (when (or (potac=forall-sort-formula? form) (potac=exists-sort-formula? form)) (let ((var (logic~quantification-bound-variable form))) (term~generate-term-primitive-with-new-name (keim~name var) (term~type var) 'term+constant env)))))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; existsi-sort ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (infer~deftactic existsi-sort (outline-mappings (((existent existent nonexistent) existsi-sort-r) ((existent nonexistent existent) existsi-sort-l) ((existent nonexistent nonexistent) existsi-sort-b) ((nonexistent existent existent) existsi-sort-f) ((existent existent existent) existsi-sort-a))) (parameter-types term position-list) (expansion-function potac=expand-existsi-sort) (help "EXISTS-SORT-Introduction.")) (tac~deftactic existsi-sort-r existsi-sort (in post) (parameters (X term+term "The witness term.") (PList list "positions of the witness term in the premise")) (conclusions C) (premises P S) (sideconditions (potac=exists-sort-formula? (formula C)) (potac=existsi-sort-applicable-prem-p (formula C) (formula P) X)) (computations (S (potac=compute-exists-sort-sort X (formula C)))) (description "S-Existential introduction backwards")) (tac~deftactic existsi-sort-f existsi-sort (in post) (parameters (X term+term "The witness term.") (PList list "positions of the witness term in the premise")) (conclusions C) (premises P S) (sideconditions ) ;(orules=equal-at-positions-p X P PList)) (computations (C (potac=compute-exists-sort-exists X (formula P) (formula S) PList))) (description "S-Existential introduction forward")) (tac~deftactic existsi-sort-l existsi-sort (in post) (parameters (X term+term "The witness term.") (PList list "positions of the witness term in the premise")) (conclusions C) (premises P S) (sideconditions (potac=exists-sort-formula? (formula C)) (potac=existsi-sort-applicable-sort-p (formula C) (formula S) X)) (computations (P (potac=compute-exists-sort X (formula C)))) (description "S-Existential introduction backwards")) (tac~deftactic existsi-sort-b existsi-sort (in post) (parameters (X term+term "The witness term.") (PList list "positions of the witness term in the premise")) (conclusions C) (premises P S) (sideconditions (potac=exists-sort-formula? (formula C))) (computations (P (potac=compute-exists-sort X (formula C))) (S (potac=compute-exists-sort-sort X (formula C)))) (description "S-Existential introduction backwards")) (tac~deftactic existsi-sort-a existsi-sort (in post) (parameters (X term+term "The witness term.") (PList list "positions of the witness term in the premise")) (conclusions C) (premises P S) (sideconditions (potac=exists-sort-formula? (formula C)) (potac=existsi-sort-applicable-p (formula C) (formula P) (formula S) X)) (computations) (description "S-Existential introduction application")) (defun potac=compute-exists-sort-exists (term prem sort Pos-List) (let* ((new-var (orules=new-variable (term~type term))) (new-range (orules=replace-at-positions prem pos-list new-var))) (term~appl-create (env~lookup-object :exists-sort (pds~environment omega*current-proof-plan)) (list (term~abstr-create (list new-var) new-range) (data~appl-function sort))))) (defun potac=existsi-sort-applicable-p (con pre sort term) (and (term~alpha-equal pre (potac=compute-exists-sort term con)) (term~alpha-equal sort (potac=compute-exists-sort-sort term con)))) (defun potac=existsi-sort-applicable-sort-p (con sort term) (term~alpha-equal sort (potac=compute-exists-sort-sort term con))) (defun potac=existsi-sort-applicable-prem-p (con pre term) (term~alpha-equal pre (potac=compute-exists-sort term con))) (defun potac=compute-exists-sort (term formula) (let* ((args (data~appl-arguments formula)) (abstr (first args))) (beta~normalize (term~appl-create abstr (list term))))) (defun potac=compute-exists-sort-sort (term formula) (let* ((env (pds~environment omega*current-proof-plan)) (sort (if (data~schema-equal (data~appl-function formula) (env~lookup-object :exists-sort env)) (second (data~appl-arguments formula)) (env~lookup-object :defined env)))) (term~appl-create sort (list term)))) (defun potac=expand-existsi-sort (outline param) (let* ((quantorstring (string (keim~name (data~appl-function (node~formula (car outline)))))) (fs-def (th~find-assumption quantorstring (prob~theory omega*current-proof-plan))) (definiendum (th~definition-constant fs-def)) (definiens (data~copy (th~ass-node fs-def) :downto '(term+constant type+primitive))) (pos-list (append (mapcar #'(lambda (x) (pos~add-front 1 x)) (data~substruct-positions (first param) (node~formula (third outline)))) (mapcar #'(lambda (x) (pos~add-front 2 x)) (second param))))) (tacl~init outline) (tacl~sequence (defni-res ('defni (list (car outline) nil) (list definiendum definiens (pos~add-front 0)))) (existsi-res ('existsi (list (cadr defni-res) nil) (list (car param) pos-list))) (andi-res ('andi (list (cadr existsi-res) (third outline) (second outline)) nil))) (tacl~end))) (com~defcommand existsi-sort (argnames ex-line param line so-line pos-list) (argtypes ndline term ndline ndline position-list) (arghelps "Existential line to prove" "Witness term" "A line" "A line with sort" "The position(s) of the witness term") (function potac=existsi-sort) (defaults potac=existsi-sort-defaults) (frag-cats tactics post) (log-p T) (help "Introduce a sorted existential quantifier.")) (defun potac=existsi-sort (ex-line param line so-line pos-list) (infer~compute-outline 'existsi-sort (list ex-line line so-line) (list param pos-list))) (defgeneric potac=exists-sort-formula? (form) (:method ((form node+node)) (potac=exists-sort-formula? (node~formula form))) (:method ((form term+appl)) (let ((func (data~appl-function form)) (env (pds~environment omega*current-proof-plan))) (or (data~schema-equal func (env~lookup-object :exists-sort env)) (data~schema-equal func (env~lookup-object :exists-defined env))))) (:method ((form term+term)) nil)) (defun potac=existsi-sort-defaults (ex-line param line so-line pos-list) (cond ((not (com~specified-arg-p ex-line)) (list (pds~find-open-node #'potac=exists-sort-formula?) (com~unspecified) (com~unspecified) (com~unspecified) (com~unspecified))) ((not (com~specified-arg-p param)) (list ex-line (com~unspecified) (com~unspecified) (com~unspecified) (com~unspecified))) ((not (com~specified-arg-p line)) (list ex-line param (oc~nil-argument)(com~unspecified)(com~unspecified))) ((not (com~specified-arg-p so-line)) (list ex-line param line (if (and ex-line (= (length (data~appl-arguments (node~formula ex-line))) 2) param) (let* ((sort (second (data~appl-arguments (node~formula ex-line)))) (sort-node (pds~find-support #'(lambda (x) (and (term~appl-p x) (term~alpha-equal sort (data~appl-function x)) (term~alpha-equal param (car (data~appl-arguments x)))))))) (if sort-node sort-node (oc~nil-argument))) (oc~nil-argument)) (com~unspecified))) ((not (com~specified-arg-p pos-list)) (list ex-line param line so-line (if (and param line) (data~substruct-positions param (node~formula line)) (if (logic~existential-quantification-p (node~formula ex-line)) (data~substruct-positions (logic~quantification-bound-variable (node~formula ex-line)) (logic~quantification-scope (node~formula ex-line))) (com~unspecified))))) (t (list ex-line param line so-line pos-list)))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; existse-sort ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (infer~deftactic existse-sort (outline-mappings (((existent existent nonexistent) existse-sort-b) ((existent existent existent) existse-sort-a))) (parameter-types termsym) (expansion-function potac=expand-existse-sort) (help "EXISTS-SORT-Elimination.")) (tac~deftactic existse-sort-b existse-sort (in post) (parameters (X term+constant "The new term.")) (conclusions C) (premises E P) (hypotheses (H P)) (sideconditions (potac=exists-sort-formula? (formula E)) (pds~not-free-in-nodes-or-hyps-p X C E)) (computations (H (potac=compute-existse-sort-hyp X (formula E))) (P (batac=compute-existse-sort (formula C)))) (description "S-Existential elimination backwards")) (tac~deftactic existse-sort-a existse-sort (in post) (parameters (X term+constant "The new term.")) (conclusions C) (premises E P) (hypotheses (H P)) (sideconditions (potac=exists-sort-formula? (formula E)) (pds~not-free-in-nodes-or-hyps-p X C E) (potac=existse-sort-applicable-p (formula C) (formula E) (formula P) (hyps P) X )) (description "S-Existential introduction application")) (defun potac=existse-sort-applicable-p (con ex pre hyp-list term) (let ((hyp (potac=compute-existse-sort-hyp term ex))) (and (term~alpha-equal pre con) (some #'(lambda (h) (term~alpha-equal hyp (node~formula h))) hyp-list)))) (defun potac=compute-existse-sort-hyp (term ex) (term~appl-create (env~lookup-object :and (pds~environment omega*current-proof-plan)) (list (potac=compute-exists-sort-sort term ex) (potac=compute-exists-sort term ex)))) (defun potac=expand-existse-sort (outline param) (let* ((quantorstring (string (keim~name (data~appl-function (node~formula (second outline)))))) (fs-def (th~find-assumption quantorstring (prob~theory omega*current-proof-plan))) (definiendum (th~definition-constant fs-def)) (definiens (data~copy (th~ass-node fs-def) :downto '(term+constant type+primitive))) (conc (car outline)) (exi (cadr outline)) (prem (caddr outline)) (old-hyp (car (set-difference (pdsn~hyps prem) (pdsn~hyps conc))))) (tacl~init outline) (let ((result (tacl~sequence (defne-res ('defne (list nil exi) (list definiendum definiens (pos~add-front 0)))) (existse-res ('existse (list conc (car defne-res) nil) param))))) (tac~forget&destroy-hyp (list (third result)) old-hyp (fourth result) :test 'term~alpha-equal) (tacl~apply 'weaken (list (third result) prem) nil)) (tacl~end))) (com~defcommand existse-sort (argnames ex-line line param prem) (argtypes ndline ndline termsym ndline) (arghelps "An existential line" "A line to be proved" "A term" "The second premise line") (function potac=existse-sort) (defaults potac=existse-sort-defaults) (frag-cats tactics post) (log-p T) (help "Eliminate a sorted existential quantifier.")) (defun potac=existse-sort (ex-line line param prem) (infer~compute-outline 'existse-sort (list line ex-line prem) (list param))) (defun potac=existse-sort-defaults (ex-line line param prem) (cond ((not (com~specified-arg-p ex-line)) (list (pds~find-support #'potac=exists-sort-formula?) (com~unspecified) (com~unspecified) (com~unspecified))) ((not (com~specified-arg-p line)) (list ex-line (oc~default-current-planline) (com~unspecified) (com~unspecified))) ((not (com~specified-arg-p param)) (list ex-line line (or (potac=generate-new-constant ex-line (pds~environment omega*current-proof-plan)) (com~unspecified)) (com~unspecified))) ((not (com~specified-arg-p prem)) (list ex-line line param (oc~nil-argument))) (t (list ex-line line param prem)))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; wellsorted ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (infer~defwild-tactic wellsorted (outline-mappings (((existent list) wellsorted-b) ((existent nonexistent) wellsorted-b))) (parameter-types anything-list) (expansion-function potac=expand-wellsorted) (passkey :node) (help "Proves that a formula is wellsorted")) (defun wellsorted-b (conc prems parameters) (if (or prems (car parameters)) t nil)) (defun potac=expand-numbers (node &key (forward nil)) (typecase node (cons (cons (potac=expand-numbers (car node) :forward forward) (potac=expand-numbers (cdr node) :forward forward))) (null nil) (t (let* ((env (pds~environment omega*current-proof-plan)) (theory (prob~theory omega*current-proof-plan)) (senv (th~senv theory)) (numbers (remove-duplicates (remove-if-not #'term~number-p (data~all-substructs (node~formula node))) :test #'data~equal))) (dolist (num numbers node) (let* ((numsym (make-symbol (format nil "~A" (keim~name num)))) (def (or (th~find-assumption numsym theory) (make-instance 'th+def :name numsym :constant num :theory theory :node (natac=numbers-2-function num) :help ""))) (definiens (data~copy (th~ass-node def) :downto '(term+constant type+primitive))) (poslist (data~substruct-positions num (node~formula node) :test #'data~equal))) (if forward (setf node (car (tacl~apply 'defne* (list nil node) (list num definiens poslist)))) (setf node (cadr (tacl~apply 'defni* (list node nil) (list num definiens poslist))))))))))) (defun potac=expand-wellsorted (concs prems params) ; (mapc #'potac=node-formula! concs) ; (mapc #'potac=node-formula! prems) (let* ((thms (car params)) (env (pds~environment omega*current-proof-plan)) (theory (prob~theory omega*current-proof-plan)) (senv (th~senv theory))) (labels ((expand-node (nodes) (when nodes (let* ((node (car nodes)) (form (node~formula node)) (weaken? (find-if #'(lambda (prem) (term~alpha-equal (node~formula prem) form)) prems))) ;(omega~trace "expand-node: node ~A node-formula ~A weaken? ~A" node (node~formula node) weaken?) (if weaken? (progn (tacl~apply 'weaken (list node weaken?) nil) (expand-node (rest nodes))) (let* ((new-node (potac=expand-numbers node)) (formula (node~formula new-node)) (sort (sort~sort-of-pred (data~appl-function formula) senv)) (term (car (data~appl-arguments formula))) (theo? (find-if #'(lambda (th) (and sort (data~equal sort (second th)) (if (data~abstr-p term) (term~alpha-match (data~abstr-range term) (car th)) (term~alpha-equal term (car th))))) thms))) ;(omega~trace "expand-node: node ~A node-formula ~A theo? ~A" node (node~formula node) theo?) (if theo? (progn (setf thms (remove theo? thms)) (expand-node (append (rest nodes) (second (tacl~apply 'apply-theorem (list new-node nil) (rest (rest theo?))))))) (progn (fun-case new-node) (expand-node (rest nodes))))))))) (fun-case (node) (let* ((funsort (th~find-assumption "fun-sort" theory)) (funsortdefiniendum (th~definition-constant funsort)) (funsortdefiniens (data~copy (th~ass-node funsort) :downto '(term+constant type+primitive))) (dummy)) (tacl~sequence (deffun ('defni (list node nil) (list funsortdefiniendum funsortdefiniens (pos~list-position '(0))))) (alli ('foralli-sort* (list (second deffun) nil) (list (orules=generate-defaults-foralli (second deffun) env)))) (sorted ('wellsorted (progn (setq dummy (potac=wellsorted-check-and-return-theorems (car (second alli)) (append (third alli) prems))) (list (car (second alli)) (car dummy))) (rest dummy)))))) (ugly-case (node) (let* ((func (th~find-assumption "functions" theory)) (funcdefiniendum (th~definition-constant func)) (funcdefiniens (data~copy (th~ass-node func) :downto '(term+constant type+primitive))) (total (th~find-assumption "total" theory)) (totaldefiniendum (th~definition-constant total)) (totaldefiniens (data~copy (th~ass-node total) :downto '(term+constant type+primitive))) (image (th~find-assumption "image-of-domain" theory)) (imagedefiniendum (th~definition-constant image)) (imagedefiniens (data~copy (th~ass-node image) :downto '(term+constant type+primitive))) (subset (th~find-assumption "subset" theory)) (subsetdefiniendum (th~definition-constant subset)) (subsetdefiniens (data~copy (th~ass-node subset) :downto '(term+constant type+primitive))) (dummy)) ;; super bescheuert (sys~handler-case (tacl~sequence (deffunc ('defni (list node nil) (list funcdefiniendum funcdefiniens (pos~list-position '(0))))) (deftot ('defni (list (second deffunc) nil) (list totaldefiniendum totaldefiniens (pos~list-position '(1 0))))) (defsub ('defni (list (second deftot) nil) (list subsetdefiniendum subsetdefiniens (pos~list-position '(2 0))))) (defimg ('defni (list (second defsub) nil) (list imagedefiniendum imagedefiniens (pos~list-position '(2 1 0 1 0))))) (andi ('andi (list (second defimg) nil nil) nil)) (lalli ('foralli-sort (list (second andi) nil) (list (potac=generate-new-constant (second andi) env)))) (lexi ('existsi-sort (list (second lalli) nil nil) (list (data~struct-at-position (node~formula (second lalli)) (pos~list-position '(1 0 2))) (list (pos~list-position '(1)))))) (lref ('=ref (list (second lexi)) (list (data~struct-at-position (node~formula (second lexi)) (pos~list-position '(1)))))) (lsort ('wellsorted (progn (setq dummy (potac=wellsorted-check-and-return-theorems (third lexi) (cons (third lalli) prems))) (list (third lexi) (car dummy))) (rest dummy))) (ralli ('foralli-sort (list (third andi) nil) (list (potac=generate-new-constant (second andi) env)))) (rimp ('impi (list (second ralli) nil) nil)) (rexe ('existse-sort (list (second rimp) (third rimp) nil) (list (potac=generate-new-constant (second andi) env)))) (rand ('ande (list nil nil (fourth rexe)) nil)) (rsubst ('=subst (list (third rexe) nil (second rand)) (list (pos~list-position '(1))))) (rsort ('wellsorted (progn (setq dummy (potac=wellsorted-check-and-return-theorems (second rsubst) (cons (third ralli) (cons (first rand) prems)))) (list (second rsubst) (car dummy))) (rest dummy)))) (error (cond) (omega~error "\"~A\" in the expansion of WELLSORTED: call MP." cond)))))) (tacl~init (append concs prems)) (expand-node concs))) (tacl~end)) (com~defcommand wellsorted (argnames line premises) (argtypes ndline ndline-list) (arghelps "A line with sort" "A list of premises" ) (function potac=wellsorted-outline) (defaults potac=wellsorted-defaults) (frag-cats tactics post) (log-p T) (help "Prove that a formula is wellsorted.")) (defun potac=wellsorted-outline (line prems) (let* ((allthms (potac=wellsorted-check-and-return-theorems line prems)) (newprems (car allthms)) (param (second allthms))) (infer~compute-outline 'wellsorted (list line (remove-duplicates newprems)) (list param)))) (defun potac=wellsorted-check-and-return-theorems (line prems) (let* ((senv (th~senv (prob~proof-theory omega*current-proof-plan))) (sortprems (potac=wellsorted-prems prems senv)) (sortterm (potac=node-formula line)) (sort (when (data~appl-p sortterm) (sort~sort-of-pred (data~appl-c-function sortterm) senv))) (term (when sort (data~appl-c-argument sortterm))) (newsenv (if sortprems (sort~env-create :parents (list senv) :unsortedenv (pds~environment omega*current-proof-plan)) senv))) (when sort (mapc #'(lambda (node) (let ((premterm (potac=node-formula node))) (sort~env-enter (list (data~appl-c-argument premterm) (sort~sort-of-pred (data~appl-c-function premterm) senv) node) newsenv :termdecl t))) sortprems) (let* ((allthms (suni~sortcheck term sort newsenv)) (newprems (mapcan #'(lambda (node) (when (node~p (third node)) (list node))) allthms)) (thms (set-difference allthms newprems))) (list (mapcar #'third newprems) thms))))) (defun potac=node-formula (node) (let ((formula (if (node~p node) (node~formula node) node))) (if (th~find-theory 'natural) (natac=numbers-2-function formula ) formula))) (defun potac=node-formula! (node) (let ((formula (if (node~p node) (node~formula node) node))) (if (th~find-theory 'natural) (natac=numbers-2-function! formula) bformula))) (defun potac=wellsorted-prems (premlist sorted-env) (mapcan #'(lambda (node) (let ((form (node~formula node))) (when (and (data~appl-p form) (sort~sort-of-pred (data~appl-c-function form) sorted-env)) (list node)))) premlist)) (defun potac=wellsorted-defaults (line prems) (cond ((not (com~specified-arg-p line)) (list (oc~default-current-planline) (com~unspecified))) ((not (com~specified-arg-p prems)) (list line (if line (let* ((sorted-env (th~senv (prob~proof-theory omega*current-proof-plan))) (sorted-nodes (potac=wellsorted-prems (pds~node-supports line) sorted-env))) (if sorted-nodes sorted-nodes (oc~nil-argument))) (oc~nil-argument)))) (t (list line prems)))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; apply-theorem ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (infer~defwild-tactic apply-theorem (outline-mappings (((existent list) apply-theorem-b) ((existent nonexistent) apply-theorem-b) ((nonexistent list) apply-theorem-f))) (parameter-types symbol) (expansion-function potac=expand-apply-theorem) (passkey :formula) (help "Proves that a formula by apply-theorem")) (defun apply-theorem-b (conc prems parameters) ;;noch fehler ;(omega~trace "apply-theorem-b: thm ~A on ~A with ~A" parameters conc prems) (let ((newpremsubst (potac=apply-theorem-test (car conc) prems (car parameters)))) ;(omega~trace "apply-theorem-b ~A" newpremsubst) (cond ((null newpremsubst) (omega~warn "The conlusion ~A didn't match the succedent of the theorem." (car conc))) ((null (second newpremsubst)) (omega~warn "The premises didn't match with prems of the theorem.")) ((consp (car newpremsubst)) (values nil (remove-duplicates (mapcar #'(lambda (prem) (subst~apply (second newpremsubst) prem)) (car newpremsubst)) :test #'data~equal))) (T T)))) (defun apply-theorem-f (conc prems parameters) ;(omega~trace "apply-theorem-f: thm ~A on ~A with ~A" parameters conc prems) (let ((newpremsubst (potac=apply-theorem-test (car conc) prems (car parameters)))) ;(omega~trace "apply-theorem-f ~A" newpremsubst) (cond ((null newpremsubst) (omega~warn "The conlusion ~A didn't match the succedent of the theorem." (car conc))) ((null (second newpremsubst)) (omega~warn "The premises didn't match with prems of the theorem.")) ((null (third newpremsubst)) (omega~warn "No conclusion from theorem.")) ((consp (car newpremsubst)) (values (list (subst~apply (second newpremsubst) (third newpremsubst))) (remove-duplicates (mapcar #'(lambda (prem) (subst~apply (second newpremsubst) prem)) (car newpremsubst)) :test #'data~equal))) (T T)))) (defun potac=expand-apply-theorem (conc prems params) (let* ((newpremsubst (potac=apply-theorem-test (node~formula (car conc)) (mapcar #'node~formula prems) (car params))) (subst (second newpremsubst)) (premises (first newpremsubst)) (kappatheo (tacl~insert&return-assumption (prob~theory omega*current-proof-plan) (car params))) (fs-def (th~find-assumption "forall-sort" (prob~theory omega*current-proof-plan))) (definiendum (th~definition-constant fs-def)) (definiens (data~copy (th~ass-node fs-def) :downto '(term+constant type+primitive)))) (tacl~init (append conc prems)) (let* ((theo (if (data~schema-p (node~formula kappatheo)) (car (tacl~apply 'kappae (list nil kappatheo) (subst~apply subst (list (data~schema-domain (node~formula kappatheo)))))) kappatheo)) (pos (data~substruct-positions definiendum (node~formula theo) :test #'data~schema-equal)) (defni (if pos (car (tacl~apply 'defne* (list nil theo) (list definiendum definiens pos))) theo)) (weak (when (term~alpha-equal (node~formula defni)(node~formula (car conc))) (tacl~apply 'weaken (list (car conc) defni) nil)))) (tacl~end) (unless weak (alift~apply-command! prems defni (car conc)))))) (defun potac=apply-theorem-get-formula-vars (thy) (let ((imp (logic~implication-constant)) (all (env~lookup-object 'forall (pds~environment omega*current-proof-plan))) (all-sort (env~lookup-object 'forall-sort (pds~environment omega*current-proof-plan)))) (labels ((get-forms-vars (conc &optional forms vars) ;(omega~trace "get-formula-vars: conc ~A forms ~A vars ~A" conc forms vars) (cond ((data~schema-p conc) (let ((termcopy (data~alpha-copy (data~schema-range conc) nil))) (get-forms-vars termcopy forms (append (term~type-variables-rec termcopy) vars)))) ((data~appl-p conc) (let ((fct (data~appl-function conc)) (args (data~appl-arguments conc))) (cond ((data~schema-equal all fct) (get-forms-vars (data~abstr-scope (car args)) forms (append (data~abstr-domain (car args)) vars))) ((data~schema-equal all-sort fct) (get-forms-vars (data~abstr-scope (car args)) (cons (term~appl-create (second args)(data~abstr-domain (car args))) forms) (append (data~abstr-domain (car args)) vars))) ((data~equal imp fct) (get-forms-vars (second args) (cons (car args) forms) vars)) (T (list (cons conc forms) vars))))) (t (list (cons conc forms) vars))))) (let* ((thm (th~find-assumption thy (prob~theory omega*current-proof-plan))) (conc (th~ass-formula thm))) (get-forms-vars conc))))) (defun potac=apply-theorem-test (conc premises thm) (declare (edited "28-MAR-2000") (authors Pollet) (input "A conclusion formula, a list of premise formulas, a theorem name.") (effect "-") (value "If PREMISES is a subset of the antecedents a list with two lists:" "the first list contains all additional premise formulas," "the second list contains substitions for the variables of the theorem." "else an error message and NIL when CONC and the succedent of THM are" "unmatchable.")) (let* ((formvarlist (potac=apply-theorem-get-formula-vars thm)) (thmvars (cadr formvarlist))) (labels ((check-prem (prem thmprems subst) ;(omega~trace "check-prem: prem ~A thmprems ~A subst ~A" prem thmprems subst) (cond ((null prem) (list thmprems subst)) ((null thmprems) (omega~error "Premise ~A did not match with any premise of theorem ~A." prem thm)) (T (let ((match? (term~alpha-match (car thmprems) prem :subst subst))) (if match? (list (car thmprems) (subst~compose-substitution match? subst)) (check-prem prem (cdr thmprems) subst))))))) (let* ((theconc (if conc conc (caar formvarlist))) ;(concsubst (uni~substitution (car (uni~unify (list (list theconc (caar formvarlist))) :match-only thmvars)))) (concsubst (term~alpha-match (caar formvarlist) theconc)) (theoremprems (cdar formvarlist))) ;(setf bla concsubst)(setf term1 theconc)(setf term2 (caar formvarlist)) (if concsubst (do* ((prems premises (rest prems)) (result (check-prem (car prems) theoremprems concsubst) (check-prem (car prems) thyprems newsubst)) (thyprems (remove (car result) theoremprems) (remove (car result) thyprems)) (newsubst (second result) (second result))) ((or (null result)(null prems)) (list thyprems newsubst theconc))) nil))))) (defun potac=apply-theorem-outline (line thy prems) (infer~compute-outline 'apply-theorem (list line prems) (list (keim~name thy)))) (defun potac=apply-theorem-defaults (line prems) (cond ((not (com~specified-arg-p line)) (list (oc~default-current-planline) (com~unspecified))) ((not (com~specified-arg-p prems)) (list line (if line (let* ((sorted-env (th~senv (prob~proof-theory omega*current-proof-plan))) (sorted-nodes (potac=apply-theorem-prems (pds~node-supports line) sorted-env))) (if sorted-nodes sorted-nodes (oc~nil-argument))) (oc~nil-argument)))) (t (list line prems)))) (com~defcommand apply-theorem (argnames line thm premlist) (argtypes ndline thy-assumption ndline-list) (arghelps "An open line" "A theorem" "A list of premises" ) (function potac=apply-theorem-outline) (defaults );potac=apply-theorem-defaults) (frag-cats tactics post) (log-p T) (help "Prove a formula by apply-theorem.")) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; equalref ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; #| (infer~deftactic equalref (outline-mappings (((nonexistent existent) equalref-f) ((nonexistent nonexistent) equalref-u) ((existent existent) equalref-a) ((existent nonexistent) equalref-b) )) (parameter-types term) (expansion-function potac=expand-equalref) (help "Reflexivity of equal.")) (tac~deftactic equalref-f equalref (in post) (parameters (Term term+term "A term.")) (premises P) (conclusions L1) (computations (L1 (potac=equalref-create Term))) (sideconditions (potac=equalref-defined-create term (formula P))) (description "Forward application equal-reflexivity.")) (tac~deftactic equalref-b equalref (in post) (parameters (Term term+term "A term.")) (premises P) (conclusions L1) (computations (P (potac=equalref-defined-create term))) (sideconditions (potac=equalref-p (formula L1) Term)) (description "Backward equal-reflexivity.")) (tac~deftactic equalref-u equalref (in post) (parameters (Term term+term "A term.")) (premises P) (conclusions L1) (computations (P (potac=equalref-defined-create term)) (L1 (potac=equalref-create Term))) (sideconditions ) (description "equal-reflexivity.")) (tac~deftactic equalref-a equalref (in post) (parameters (Term term+term "A term.")) (premises P) (conclusions L1) (computations ) (sideconditions (potac=equalref-p (formula L1) Term) (potac=equalref-defined-create term (formula P))) (description "Closing equal-reflexivity.")) (defun potac=equalref-defined-create (term &optional form) (if form (and (data~appl-p form) (data~schema-equal (data~appl-function form) (env~lookup-object :defined (pds~environment omega*current-proof-plan))) (data~equal (car (data~appl-arguments form)) term)) (term~appl-create (env~lookup-object :defined (pds~environment omega*current-proof-plan)) (list Term)))) (defun potac=equalref-create (Term) (term~appl-create (env~lookup-object :equal (pds~environment omega*current-proof-plan)) (list Term Term))) (defun potac=equalref-p (formula Term) (when (and (logic~equality-p formula) (data~schema-equal (data~appl-function formula) (env~lookup-object :equal (pds~environment omega*current-proof-plan)))) (and (data~equal (car (data~appl-arguments formula)) (cadr (data~appl-arguments formula))) (data~equal (car (data~appl-arguments formula)) Term)))) (defun potac=expand-equalref (outline parameters) (tacl~init outline) (tacl~apply 'apply-theorem (list (car outline)(rest outline)) (list 'equal-reflexivity)) (tacl~end)) (com~defcommand equalref (argnames equality-line term) (argtypes ndline term) (arghelps "A line with equality" "A term") (function potac=equalref) (frag-cats tactics post) (defaults potac=equalref-defaults) (log-p T) (help "Equality-reflexity.")) (defun potac=equalref (conc term) (infer~compute-outline 'equalref (list conc nil) (list term))) (defun potac=equalref-defaults (equality-line term) (cond ((not (com~specified-arg-p equality-line)) (list (pds~find-open-node #'logic~equality-p) (com~unspecified)(com~unspecified))) ((not (com~specified-arg-p term)) (if (and equality-line (logic~equality-p (node~formula equality-line))) (list equality-line (car (data~appl-arguments (node~formula equality-line)))) (list equality-line (com~unspecified)))) (t (list equality-line term)))) |# ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; equalsym ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; #| (infer~deftactic equalsym (outline-mappings (((nonexistent existent nonexistent nonexistent) equalsym-f) ((existent existent nonexistent nonexistent) equalsym-a) ((existent nonexistent nonexistent nonexistent) equalsym-b) )) (parameter-types) (expansion-function potac=expand-equalsym) (help "Commutativity of equality.")) (tac~deftactic equalsym-f equalsym (in post) (premises L1 defl defr) (conclusions L2) (computations (L2 (potac=equalsym-create (formula L1))) (defr (potac=equalsym-defined-create-l (formula L2))) (defl (potac=equalsym-defined-create-r (formula L2)))) (sideconditions (logic~equality-p (formula L1))) (description "Forward application equality-symmetry.")) (tac~deftactic equalsym-a equalsym (in post) (premises L1 defl defr) (conclusions L2) (computations (defr (potac=equalsym-defined-create-l (formula L2))) (defl (potac=equalsym-defined-create-r (formula L2)))) (sideconditions (batac=equality-sym-p (formula L1) (formula L2))) (description "Closing equality.")) (tac~deftactic equalsym-b equalsym (in post) (premises L1 defl defr) (conclusions L2) (computations (L1 (potac=equalsym-create (formula L2))) (defr (potac=equalsym-defined-create-l (formula L2))) (defl (potac=equalsym-defined-create-r (formula L2)))) (sideconditions (logic~equality-p (formula L2))) (description "Backward equality-symmetry.")) (defun potac=equalsym-defined-create-l (equality) (potac=equalref-defined-create (first (data~appl-arguments equality)))) (defun potac=equalsym-defined-create-r (equality) (potac=equalref-defined-create (second (data~appl-arguments equality)))) (defun potac=equalsym-create (term) (term~appl-create (env~lookup-object :equal (pds~environment omega*current-proof-plan)) (list (second (data~appl-arguments term)) (first (data~appl-arguments term))))) (defun potac=equalsym-p (term1 term2) (let ((equal (env~lookup-object :equal (pds~environment omega*current-proof-plan)))) (and (logic~equality-p term1) (data~schema-equal (data~appl-function term1) equal) (logic~equality-p term2) (data~schema-equal (data~appl-function term2) equal)) (let ((args1 (data~appl-arguments term1)) (args2 (data~appl-arguments term2))) (and (term~alpha-equal (first args1) (second args2)) (term~alpha-equal (second args1) (first args2)))))) (defun potac=expand-equalsym (outline parameters) (tacl~init outline) (tacl~apply 'apply-theorem (list (car outline)(rest outline)) (list 'equal-symmetry)) (tacl~end)) (com~defcommand equalsym (argnames equality-line1 equality-line2) (argtypes ndline ndline) (arghelps "A line with the conclusion equality" "Another line with the premise equality") (function potac=equalsym) (frag-cats tactics post) (defaults potac=equalsym-defaults) (log-p T) (help "Equality-symmetry.")) (defun potac=equalsym (P P2) (infer~compute-outline 'equalsym (list P P2 nil nil) nil)) (defun potac=equalsym-defaults (equality-line1 equality-line2) (cond ((not (com~specified-arg-p equality-line1)) (list (pds~find-open-node #'logic~equality-p) (com~unspecified))) ((not (com~specified-arg-p equality-line2)) (list equality-line1 (if (and (pdsn~p equality-line1) (logic~equality-p (node~formula equality-line1))) (pds~find-node-support equality-line1 #'(lambda (p) (data~equal p (potac=equalsym-create (node~formula equality-line1))))) (pds~find-support #'logic~equality-p)))) (t (list equality-line1 equality-line2)))) |# ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; equalsubst ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; #| (infer~deftactic equalsubst (outline-mappings (((nonexistent existent existent nonexistent nonexistent) equalsubst-f) ((existent existent existent nonexistent nonexistent) equalsubst-a) ((existent existent nonexistent nonexistent nonexistent) equalsubst-l) ((existent nonexistent existent nonexistent nonexistent) equalsubst-r) )) (parameter-types position) (expansion-function potac=expand-equalsubst) (help "Replacement property of equality.")) (tac~deftactic equalsubst-f equalsubst (in post) (parameters (position pos+position "A position.")) (premises L1 L2 defl defr) (conclusions L3) (computations (L3 (potac=equalsubst-create-f (formula L1) (formula L2) position)) (defr (potac=equalsym-defined-create-l (formula L2))) (defl (potac=equalsym-defined-create-r (formula L2)))) (sideconditions (potac=equalsubst-f-p (formula L1) (formula L2) position)) (description "Forward application equality substitution.")) (tac~deftactic equalsubst-a equalsubst (in post) (parameters (position pos+position "A position.")) (premises L1 L2 defl defr) (conclusions L3) (computations (defr (potac=equalsym-defined-create-l (formula L2))) (defl (potac=equalsym-defined-create-r (formula L2)))) (sideconditions (potac=equalsubst-a-p (formula L3) (formula L1) (formula L2) position)) (description "Closing equality substitution.")) (tac~deftactic equalsubst-l equalsubst (in post) (parameters (position pos+position "A position.")) (premises L1 L2 defl defr) (conclusions L3) (computations (L2 (potac=equalsubst-create-l (formula L3) (formula L1) position)) (defr (potac=equalsym-defined-create-l (formula L2))) (defl (potac=equalsym-defined-create-r (formula L2)))) (sideconditions (potac=equalsubst-l-p (formula L3) (formula L1) position)) (description "Creating equation for a substitution.")) (tac~deftactic equalsubst-r equalsubst (in post) (parameters (position pos+position "A position.")) (premises L1 L2 defl defr) (conclusions L3) (computations (L1 (potac=equalsubst-create-f (formula L3) (formula L2) position)) (defr (potac=equalsym-defined-create-l (formula L2))) (defl (potac=equalsym-defined-create-r (formula L2)))) (sideconditions (potac=equalsubst-f-p (formula L3) (formula L2) position)) (description "Creating equation for a substitution.")) (defun potac=equalsubst-f-p (term equal-term position) (when (logic~equality-p equal-term) (let* ((arg1 (first (data~appl-arguments equal-term))) (arg2 (second (data~appl-arguments equal-term))) (positions-of-arg1 (data~substruct-positions arg1 term :test 'term~alpha-equal)) (positions-of-arg2 (data~substruct-positions arg2 term :test 'term~alpha-equal))) (or (find position positions-of-arg1 :test 'keim~equal) (find position positions-of-arg2 :test 'keim~equal))))) (defun potac=equalsubst-create-f (term equal-term position) (let* ((term-at-position (data~struct-at-position term position)) (args (data~appl-arguments equal-term))) (cond ((term~alpha-equal term-at-position (first args)) (data~replace-at-position term position (second args))) (t (data~replace-at-position term position (first args)))))) (defun potac=equalsubst-a-p (conclusion term equal-term position) (and (potac=equalsubst-f-p term equal-term position) (term~alpha-equal (potac=equalsubst-create-f term equal-term position) conclusion))) (defun potac=equalsubst-l-p (conclusion term position) (let ((positions-of-conc (data~positions conclusion #'(lambda (arg) 't))) (positions-of-term (data~positions term #'(lambda (arg) 't)))) (when (and (find position positions-of-conc :test 'keim~equal) (find position positions-of-term :test 'keim~equal)) (term~alpha-equal (data~replace-at-position conclusion position (data~struct-at-position term position)) term)))) (defun potac=equalsubst-create-l (conclusion term position) (term~appl-create (env~lookup-object :equal (pds~environment omega*current-proof-plan)) (list (data~struct-at-position conclusion position) (data~struct-at-position term position)))) (defun potac=expand-equalsubst (outline parameters) (let* ((concform (node~formula (car outline))) (equality (data~appl-arguments (node~formula (third outline)))) (newvar (term~variable-create (gentemp 's) (term~type (data~struct-at-position concform (car parameters))))) (pred (term~abstr-create (list newvar) (data~replace-at-position concform (car parameters) newvar))) (newconc (term~appl-create pred (list (car equality)))) (newprem (term~appl-create pred (rest equality)))) (tacl~init outline) (tacl~sequence (prem ('denormalize (list nil (second outline)) (list newprem))) (conc ('denormalize (list (car outline) nil) (list newconc))) (conj-res ('andi (list nil (third outline) (car prem)) nil)) (thm ('apply-theorem (list (second conc)(cons (car conj-res)(cdddr outline))) (list 'equal-substitution)))) (tacl~end))) (com~defcommand equalsubst (argnames line1 line2 equality-line position) (argtypes ndline ndline ndline position) (arghelps "The substituted line" "The unsubstituted line" "The equation to be applied." "A position.") (function potac=equalsubst) (frag-cats tactics post) (defaults ((oc~default-current-planline) (com~unspecified) (com~unspecified) (com~unspecified))) (level 1) (log-p T) (help "Equality-Substitution.")) (defun potac=equalsubst (P P2 P3 position) (infer~compute-outline 'equalsubst (list P P2 P3 nil nil) (list position))) |# ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; denormalize ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (infer~deftactic denormalize (outline-mappings (((nonexistent existent) denormalize-f) ((existent existent) denormalize-a) ((existent nonexistent) denormalize-b) )) (parameter-types term) (expansion-function potac=expand-denormalize) (help "Introduce a line with a term that is alpha/beta/eta-equal.")) (tac~deftactic denormalize-f denormalize (in post) (parameters (t term+term "A term.")) (premises L1) (conclusions L2) (computations (L2 (potac=denormalize t (formula l1)))) (description "Forward application denormalize.")) (tac~deftactic denormalize-b denormalize (in post) (parameters (t term+term "A term.")) (premises L1) (conclusions L2) (computations (L1 (potac=denormalize t (formula l2)))) (description "Backward application denormalize.")) (tac~deftactic denormalize-a denormalize (in post) (parameters (t term+term "A term.")) (premises L1) (conclusions L2) (sideconditions (lam~equal-p (formula l1)(formula l2))) (description "Closed application denormalize.")) (defun potac=denormalize (term term2) (when (lam~equal-p term term2) term)) (defun potac=expand-denormalize (outline parameters) (tacl~init outline) (tacl~apply 'lambda outline nil) (tacl~end)) ;don't know if this is useful as a command ;(com~defcommand denormalize ; (argnames line1 line2 term) ; (argtypes ndline ndline term) ; (arghelps "Conc" "Prem" "Term") ; (function potac=normalize-outline) ; (frag-cats tactics post) ; (defaults) ; (level 1) ; (log-p T)) ; ;(defun potac=normalize-outline (C P term) ; (infer~compute-outline 'denormalize (list C P) (list term))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; foralle-sort* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (infer~defwild-tactic foralle-sort* (outline-mappings (((nonexistent list) foralle-sort*-f) ((existent list) foralle-sort*-f))) (parameter-types term-list) (expansion-function potac=expand-foralle-sort*) (passkey :formula) (help "FORALL-SORT*-Elimination.")) (defun foralle-sort*-f (conc prems parameters) (let* ((all (potac=forall-sort*-prems (car parameters) (car prems))) (allprems (butlast all)) (newconc (last all))) (if (subsetp (rest prems) allprems :test #'term~alpha-equal) (if conc (when (term~alpha-equal (car conc) (car newconc)) (if (subsetp allprems (rest prems) :test #'term~alpha-equal) t (values nil (set-difference allprems prems :test #'term~alpha-equal)))) (values newconc (set-difference allprems prems :test #'term~alpha-equal))) (omega~warn "More premises are inserted than needed.")))) (defun potac=forall-sort*-prems (terms conc) (cond ((consp terms) (if (potac=forall-sort-formula? conc) (cons (potac=compute-forall-sort-hyp (car terms) conc) (potac=forall-sort*-prems (rest terms) (potac=compute-forall-sort (car terms) conc))) (omega~warn "~A is not universal quantified" conc))) (t (list conc)))) (defun potac=expand-foralle-sort* (concs prems params) (let ((newline (car prems)) (term-list (car params))) (tacl~init (append concs prems)) (do* ((rest term-list (cdr rest)) (term (car rest) (car rest))) ((null rest) t) (let* ((premterm (potac=compute-forall-sort-hyp term (node~formula newline))) (prem (first (remove-if-not #'(lambda (node) (term~alpha-equal premterm (node~formula node))) (rest prems))))) ;;(format t "~%For PREMTERM: ~A found node: ~A" premterm prem) (setf newline (car (if (null (rest rest)) (tacl~apply 'foralle-sort (list (car concs) newline prem) (list term)) (tacl~apply 'foralle-sort (list nil newline prem) (list term))))))) (tacl~end))) (com~defcommand foralle-sort* (argnames univ-line line term so-line) (argtypes ndline ndline term-list ndline-list) (arghelps "Universal line" "A line" "A list with terms" "A list with lines that are contain the sort for the terms") (function potac=foralle-sort*) (defaults potac=foralle-sort*-defaults) (frag-cats tactics post) (log-p T) (help "Eliminate a sorted universal quantifier.")) (defun potac=foralle-sort* (univ-line line param so-line) (infer~compute-outline 'foralle-sort* (list line (cons univ-line so-line)) (list param ))) (defun potac=foralle-sort*-defaults (univ elim term so-line) (cond ((not (com~specified-arg-p univ)) (list (pds~find-support #'potac=forall-sort-formula?) (com~unspecified) (com~unspecified) (com~unspecified))) ((not (com~specified-arg-p elim)) (list univ (oc~nil-argument) (com~unspecified) (com~unspecified))) ((not (com~specified-arg-p term)) (list univ elim (com~unspecified) (com~unspecified))) ((not (com~specified-arg-p so-line)) (list univ elim term (oc~nil-argument))) (t (list univ elim term so-line)))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; foralli-sort* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (infer~defwild-tactic foralli-sort* (outline-mappings (((existent nonexistent) foralli-sort*-b))) (parameter-types termsym-list) (expansion-function potac=expand-foralli-sort*) (passkey :formula) (help "FORALL-SORT-Elimination.")) (defun foralli-sort*-b (conc prem parameters) (let ((all (reverse (potac=forall-sort*-prems (car parameters) (car conc))))) ;;termsyms free in hyps!!! (values nil (list all)))) (defun potac=expand-foralli-sort* (concs prems params) (let* ((old-hyps (set-difference (pdsn~hyps (car prems)) (pdsn~hyps (car concs)))) (forall-line (first concs)) (const-list (first params)) (last-const (first (last const-list)))) (tacl~init (append concs prems)) (do ((rest-consts const-list (rest rest-consts))) ((null rest-consts) t) (let* ((head-const (first rest-consts)) (newline (tacl~apply 'foralli-sort (list forall-line nil) (list head-const))) (new-hyp (third newline)) (old-hyp (car (mapcan #'(lambda (node) (when (data~equal (node~formula node) (node~formula new-hyp)) (list node))) old-hyps)))) (setq forall-line (second newline)) (tac~forget&destroy-hyp (list forall-line) old-hyp new-hyp))) (tacl~apply 'weaken (cons forall-line prems) nil)) (tacl~end)) (com~defcommand foralli-sort* (argnames univ-line param line) (argtypes ndline termsym-list ndline) (arghelps "Universal line to prove" "A list of parameters" "A line" ) (function potac=foralli-sort*) (defaults potac=foralli-sort*-defaults) (frag-cats tactics post) (log-p T) (help "Introduce a sorted universal quantifier.")) (defun potac=foralli-sort* (univ-line param line) (infer~compute-outline 'foralli-sort* (list univ-line line) (list param ))) (defun potac=foralli-sort*-defaults (univ term line) (cond ((not (com~specified-arg-p univ)) (list (pds~find-open-node #'potac=forall-sort-formula?) (com~unspecified) (com~unspecified))) ((not (com~specified-arg-p term)) (list univ (if univ (orules=generate-defaults-foralli univ (pds~environment omega*current-proof-plan)) (oc~nil-argument)) (com~unspecified))) ((not (com~specified-arg-p line)) (list univ term (oc~nil-argument))) (t (list univ term line)))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; existse-sort* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (infer~defwild-tactic existse-sort* (outline-mappings (((existent existent nonexistent) existse-sort*-b))) (parameter-types termsym-list) (expansion-function potac=expand-existse-sort*) (passkey :formula) (help "EXISTS-SORT*-Elimination.")) (defun existse-sort*-b (concs prems parameters) (when (potac=exists-sort-formula? (first prems)) (let* ((conc (first concs)) (exsort-formula (first prems)) (consts (first parameters)) (hyps (potac=get-hyps-recursive exsort-formula consts)) (counter 0) (marked-hyps (mapcar #'(lambda (hyp) (list hyp (incf counter))) hyps))) (values nil (list (cons conc marked-hyps)))))) ;; The direct combination with ANDE Steps is nt possible! ;; (values (apply #'append (mapcar #'(lambda (marked-hyp) ;; (list (list (first (data~appl-arguments (first marked-hyp))) marked-hyp) ;; (list (second (data~appl-arguments (first marked-hyp))) marked-hyp))) ;; marked-hyps)) ;; (list (cons conc marked-hyps)))))) (defun potac=expand-existse-sort* (conclusions premises parameters) (let* ((conc-line (first conclusions)) (ex-line (first premises)) (subgoal-line (second premises)) (consts (first parameters))) (tacl~init (append conclusions premises)) (let* ((new-subgoal-line (do* ((current-ex-line ex-line) (current-conc-line conc-line) (current-consts consts (rest consts))) ((or (null current-consts) (null (potac=exists-sort-formula? current-ex-line))) current-conc-line) (let* ((result-exsort (tacl~apply 'existse-sort (list current-conc-line current-ex-line nil) (list (first current-consts)))) (new-subgoal (third result-exsort)) (new-hyp (fourth result-exsort)) (old-hyp (first (remove-if-not #'(lambda (hyp-line) (term~alpha-equal (node~formula hyp-line) (node~formula new-hyp))) (pdsn~hyps subgoal-line))))) (tac~forget&destroy-hyp (list new-subgoal) old-hyp new-hyp :test #'term~alpha-equal) (let* ((result-ander (tacl~apply 'ander (list nil old-hyp) nil)) (new-ex-line (first result-ander))) (setf current-conc-line new-subgoal) (setf current-ex-line new-ex-line)))))) (tacl~apply 'weaken (list new-subgoal-line subgoal-line) nil) (tacl~end)))) (defun potac=get-hyps-recursive (formula consts) (if (potac=exists-sort-formula? formula) (if consts (let* ((args (data~appl-arguments formula)) (abstr (first args)) (sort (second args)) (bound-variable (first (data~abstr-domain abstr))) (range (data~abstr-range abstr)) (head-const (first consts)) (sort-term (term~appl-create sort (list head-const))) (range-term (data~replace-struct range bound-variable head-const)) (new-hyp (term~appl-create (env~lookup-object 'and (th~env 'base)) (list sort-term range-term)))) (cons new-hyp (potac=get-hyps-recursive range-term (rest consts)))) nil) nil)) (com~defcommand existse-sort* (argnames concline exline subgoal parameter) (argtypes ndline ndline ndline termsym-list) (arghelps "Conclusion Line." "An existentially quanitified line" "Subgoal Line." "Termsym List.") (function potac=existse-sort*) (frag-cats tactics post) (defaults potac=existse-sort*-defaults) (log-p T) (help "Apply a series of Exists-Sort-Elminations.")) (defun potac=existse-sort* (C exline P param) (infer~compute-outline 'existse-sort* (list C exline P) (list param))) (defun potac=existse-sort*-defaults (conc-line ex-line subgoal-line consts) (cond ((not (com~specified-arg-p conc-line)) (list (oc~default-current-planline) (com~unspecified) (com~unspecified) (com~unspecified))) ((not (com~specified-arg-p ex-line)) (list conc-line (pds~find-support #'potac=exists-sort-formula?) (com~unspecified) (com~unspecified))) ((not (com~specified-arg-p subgoal-line)) (list conc-line ex-line (oc~nil-argument) (com~unspecified))) ((not (com~specified-arg-p consts)) (list conc-line ex-line subgoal-line (batac=generate-defaults-existse* ex-line))) (t (list conc-line ex-line subgoal-line consts)))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Application of rewrite rules (i.e. a formula of the form ;; (forall (lam (x aa) (forall (lam (y bb) ..... ;; (implies (and (in x Set1) (and (in y Set2) .... ;; (= (quack x y ...) ;; (ruelps y x ...) ....) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (infer~defwild-tactic rewrite-with (outline-mappings (((existent list) rewrite-with-b) ((existent nonexistent) rewrite-with-b) ((nonexistent list) rewrite-with-f))) (parameter-types position symbol symbol) (expansion-function potac=expand-rewrite-with) (passkey :formula) (help "Apply a rewrite rule at a given position in a certain direction.")) (defun rewrite-with-b (conc prems parameters) (multiple-value-bind (newconc newprems) (potac=rew-apply (car conc) (second parameters) (first parameters) (third parameters)) (when newconc (values nil (remove-duplicates (cons newconc newprems) :test #'data~equal))))) (defun rewrite-with-f (conc prems parameters) (multiple-value-bind (newconc newprems) (potac=rew-apply (car prems) (second parameters) (first parameters) (third parameters)) (when newconc (values (list newconc) (remove-duplicates newprems :test #'data~equal))))) (defun potac=rew-decompose (ax &optional vars prems) (cond ((data~schema-p ax) (let ((termcopy (data~alpha-copy (data~schema-range ax) nil))) (potac=rew-decompose termcopy (append (term~type-variables-rec termcopy) vars) prems))) ((logic~universal-quantification-p ax) (potac=rew-decompose (logic~quantification-scope ax) (cons (logic~quantification-bound-variable ax) vars) (if (data~schema-equal (env~lookup-object :forall-sort (pds~environment omega*current-proof-plan)) (data~appl-function ax)) (cons (data~appl-create (second (data~appl-arguments ax)) (list (logic~quantification-bound-variable ax))) prems) prems))) ((logic~implication-p ax) (potac=rew-decompose (second (data~appl-arguments ax)) vars (if (logic~conjunction-p (car (data~appl-arguments ax))) (append (batac=split-on-and (car (data~appl-arguments ax))) prems) (cons (car (data~appl-arguments ax)) prems)))) ((or (logic~equality-p ax) (logic~equivalence-p ax)) (values (car (data~appl-arguments ax)) (second (data~appl-arguments ax)) vars prems)) (T nil))) (defun potac=rew-apply (goal ax pos direction) (declare (edited "24-JUN-2002") (authors Pollet) (input "A goal formula, a theorem, a position, a direction.") (effect "-") (value "If the theorem can be applied the rewritten formula and" "the premises of the theorem, else NIL.")) (let ((term-to-rew (data~struct-at-position goal pos))) (multiple-value-bind (subst prems rhs lhs) (potac=rew-apply-term term-to-rew ax direction) (when subst (values (subst~apply subst (data~replace-at-position goal pos (if (string-equal direction "rl") lhs rhs))) (mapcar #'(lambda (pre) (subst~apply subst pre)) prems)))))) (defgeneric potac=rew-apply-term (term ax direction) (declare (edited "24-JUN-2002") (authors Pollet) (input "A term, a theorem, a direction.") (effect "-") (value "If the theorem can be applied to term," "a substitution, the premises and the lhs and rhs of the theorem," "else NIL.")) (:method (term (ax symbol) direction) (potac=rew-apply-term term (th~find-assumption ax (prob~proof-theory omega*current-proof-plan)) direction)) (:method (term (ax prob+problem) direction) (potac=rew-apply-term term (node~formula (prob~conclusion ax)) direction)) (:method ((term term+term) (ax term+term) direction) (multiple-value-bind (rhs lhs vars prems) (potac=rew-decompose ax) (when (and lhs rhs) (let ((subst (term~alpha-match (if (string-equal direction "lr") lhs rhs) term))) (when subst (values subst prems rhs lhs))))))) (com~defcommand rewrite-with (argnames oldline newline axiom position direction) (argtypes ndline ndline thy-assumption position symbol) (arghelps "An open line to apply rewrite rule" "A premise to apply rewrite rule" "The rewrite rule" "A position for the application" "A direction") (function potac=rewrite-with) (frag-cats tactics base) (defaults ((oc~default-current-planline) (com~unspecified) (com~unspecified) (com~unspecified) (com~unspecified))) (log-p T) (help "Rewriting with a given equality rule.")) (defun potac=rewrite-with (C P Ax Pos direction) (if (or (string-equal direction "lr") (string-equal direction "rl")) (infer~compute-outline 'rewrite-with (list C P) (list pos (keim~name ax) direction)) (omega~error "~A is not a valid direction, use 'lr' or 'rl'" direction))) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; 'MIZAR style' proof construction: ;; introduction of statements/formula into the proof ;; now with sorts! ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (infer~defwild-tactic assert (outline-mappings (((nonexistent list) assert-f) ((list nonexistent) assert-b) ((list list) assert-s))) (parameter-types term thy-ass-list) (expansion-function potac=expand-assert) (help "")) (defun assert-f (conc prems parameters) (declare (ignore conc)) (let ((para (list (car parameters)))) (when (potac=assert-check para prems (second parameters)) (values para nil )))) (defun assert-b (conc prems parameters) (declare (ignore prems)) (let ((para (list (car parameters)))) (when (potac=assert-check conc para (second parameters)) (values nil para)))) (defun assert-s (conc prems parameters) (let ((para (list (car parameters)))) (when (potac=assert-check conc (append para prems) (second parameters)) (values nil para)))) (com~defcommand assert (argnames formula proof-lines defis) (argtypes term ndline-list thy-ass-list) (arghelps "A formula" "Depends on proof lines" "A list of definitions that should be expanded") (function potac=assert) (frag-cats tactics base) (defaults potac=assert-defaults) (log-p T) (help "")) (defun potac=assert (term lines defis) (let* ((prems (remove-if #'pdsn~open-node-p lines)) (openlines (set-difference lines prems))) (infer~compute-outline 'assert (list openlines prems) (list term defis)))) (defun potac=assert-defaults (form nodes defis) (cond ((not (com~specified-arg-p form)) (list (node~formula (oc~default-current-planline)) (com~unspecified)(com~unspecified))) ((not (com~specified-arg-p nodes)) (list form (pds~support-nodes omega*current-proof-plan) (com~unspecified))) ((not (com~specified-arg-p defis)) (list form nodes (when form (remove-if #'(lambda (defi) (or (eq defi (th~find-assumption 'forall-sort (prob~theory omega*current-proof-plan))) (eq defi (th~find-assumption 'exists-sort (prob~theory omega*current-proof-plan))))) (orules=contained-definitions form))))) (t (list form nodes defis)))) (defun potac=assert-sort-thms-for (nodes) (let* ((thy (prob~proof-theory omega*current-proof-plan)) (senv (th~senv thy)) (sort-preds (mapcan #'(lambda (node) (remove-if-not #'(lambda (sub) (and (term~constant-p sub) (sort~sort-of-pred sub senv))) (data~all-substructs (node~formula node)))) nodes))) (mapcan #'(lambda (pred) (mapcar #'(lambda (td) (th~find-assumption (keim::sort~td-theorem td) thy)) (sort~all-term-decls (sort~sort-of-pred pred senv) senv))) sort-preds))) (defun potac=assert-check (conc prems defis &key (time 10)) (let* ((new-name (intern (string-upcase (format nil "problem-~A" (gentemp 'temp))))) (pds omega*current-proof-plan) (thy (prob~proof-theory pds)) (notexpand (potac=use-defis defis)) (pds-env (pds~environment pds)) (pds-problem (prob~proof-problem pds)) (pds-problem-env (prob~environment pds-problem)) (new-env (env~create (list pds-problem-env))) (new-assumptions (mapcar #'(lambda (supp) ;;ass from the current proof (node~create (gentemp 'a0) (potac=num2func (gentac=substitute-defis supp notexpand)) (just~create (infer~find-method 'hyp) nil))) prems)) (new-conclusion (node~create 'c (potac=num2func (gentac=substitute-defis (if (consp conc) (batac=assemble-conjunction conc) conc) notexpand)) (just~create (infer~find-method 'open) nil))) (sort-thms (mapcar #'(lambda (thm) ;;sort-thms (let ((thmform (th~ass-formula thm))) (node~create (gentemp 't0) (gentac=substitute-defis thmform notexpand) (just~create (infer~find-method 'hyp) nil)))) (potac=assert-sort-thms-for (cons new-conclusion new-assumptions)))) (new-problem (prob~create new-name (prob~theory pds) new-env (append sort-thms new-assumptions) new-conclusion)) (all-type-vars (append (env~class-keys pds-env 'type+variable nil) (env~class-keys pds-problem-env 'type+variable nil))) (all-type-constants (append (env~class-keys pds-env 'type+constant nil) (env~class-keys pds-problem-env 'type+constant nil))) (all-constants (append (env~class-keys pds-env 'term+constant nil) (env~class-keys pds-problem-env 'term+constant nil))) (all-variables (append (env~class-keys pds-env 'term+variable nil) (env~class-keys pds-problem-env 'term+variable nil)))) (mapcar #'(lambda (key) (let* ((obj (env~lookup-object key pds-env))) (env~enter key obj new-env))) (append all-type-vars all-type-constants all-constants all-variables)) (let ((new-pds (pds~start-proof-plan new-problem (ot~new-proof-plan-name new-problem)))) (setf omega*current-proof-plan new-pds keim::pds*current-proof-plan new-pds) (setf testpds new-pds) (let ( (result '(spass~call-spass new-conclusion new-pds (atptop~default-directory) time T 0 nil)) ; (result (otter~call-otter new-conclusion ;;for testing ; new-pds ; (atptop~default-directory) ; time ; 'AUTO ; nil ; nil ; "" "")) ) (setf keim::pds*current-proof-plan pds omega*current-proof-plan pds) result)))) (defun potac=use-defis (defis) (set-difference (th~definitions-recursed (prob~theory omega*current-proof-plan)) (cons (th~find-assumption 'forall-sort (prob~theory omega*current-proof-plan)) (cons (th~find-assumption 'exists-sort (prob~theory omega*current-proof-plan)) defis)))) (defun potac=expand-assert (conclusions premises parameters) (tacl~init (append conclusions premises)) (let* ((notexpand (potac=use-defis (second parameters))) (prems (mapcar #'(lambda (prem) (if (gentac=substituted-differs-p (node~formula prem) notexpand) (car (tacl~apply 'defse (list nil prem) (list notexpand))) prem)) premises)) ;potac=expand-numbers ;already included in defse (conc (cond ((and (consp conclusions) (> (length conclusions) 1)) (car (second (tacl~apply 'ande* (list conclusions nil) nil)))) ((consp conclusions) (car conclusions)) (T conclusions))) ;potac=expand-numbers ;already included in defsi (node (if (gentac=substituted-differs-p (node~formula conc) notexpand) (second (tacl~apply 'defsi (list conc nil) (list notexpand))) conc)) (thmnodes (mapcar #'(lambda (thm) (pds~add-thy-assertion thm omega*current-proof-plan)) (potac=assert-sort-thms-for (cons node prems)))) (thmprems (mapcar #'(lambda (prem) (if (gentac=substituted-differs-p (node~formula prem) notexpand) (car (tacl~apply 'defse (list nil prem) (list notexpand))) prem)) thmnodes)) ) (setf (pds~node-supports node) (append thmprems prems)) (setf (just~method (node~justification node)) (infer~find-method 'otter)) ;; replaced SPASS by OTTER AMEIER (setf (just~premises (node~justification node)) (append thmprems prems)) (setf (pdsj~parameters (node~justification node)) (list t)) (tacl~end) (setf (pdsj~status (node~justification node)) "untested"))) (defun potac=num2func (node) (if (th~find-theory 'natural) (natac=numbers-2-function node) node)) #| expand every defi except =, this is not always useful (defun potac=expand-assert (conclusions premises parameters) (declare (ignore parameters)) (tacl~init (append conclusions premises)) (let* ((notexpand (list (th~find-assumption '= 'base))) (prems (mapcar #'(lambda (prem) (if (gentac=substituted-differs-p (node~formula prem) notexpand) (car (tacl~apply 'defse (list nil prem) (list notexpand))) prem)) premises)) (conc (cond ((and (consp conclusions) (> (length conclusions) 1)) (car (second (tacl~apply 'ande* (list conclusions nil) nil)))) ((consp conclusions) (car conclusions)) (T conclusions))) (node (if (gentac=substituted-differs-p (node~formula conc) notexpand) (second (tacl~apply 'defsi (list conc nil) (list notexpand))) conc))) (setf (pds~node-supports node) prems) (setf (just~method (node~justification node)) (infer~find-method 'spass)) (setf (just~premises (node~justification node)) prems) (setf (pdsj~parameters (node~justification node)) (list t)) (tacl~end) (setf (pdsj~status (node~justification node)) "untested"))) (defun potac=assert-check (conc prems &key (time 10)) (let* ((new-name (intern (string-upcase (format nil "problem-~A" (gentemp 'temp))))) (notexpand (list (th~find-assumption '= 'base))) (pds omega*current-proof-plan) (pds-env (pds~environment pds)) (pds-problem (prob~proof-problem pds)) (pds-problem-env (prob~environment pds-problem)) (new-env (env~create (list pds-problem-env))) (new-assumptions (mapcar #'(lambda (supp) (node~create (gentemp 'a0) (gentac=substitute-defis supp notexpand) (just~create (infer~find-method 'hyp) nil))) prems)) (new-conclusion (node~create 'c (gentac=substitute-defis (if (consp conc) (batac=assemble-conjunction conc) conc) notexpand) (just~create (infer~find-method 'spass) new-assumptions))) (new-problem (prob~create new-name (prob~theory pds) new-env new-assumptions new-conclusion)) (all-type-vars (append (env~class-keys pds-env 'type+variable nil) (env~class-keys pds-problem-env 'type+variable nil))) (all-type-constants (append (env~class-keys pds-env 'type+constant nil) (env~class-keys pds-problem-env 'type+constant nil))) (all-constants (append (env~class-keys pds-env 'term+constant nil) (env~class-keys pds-problem-env 'term+constant nil))) (all-variables (append (env~class-keys pds-env 'term+variable nil) (env~class-keys pds-problem-env 'term+variable nil)))) (mapcar #'(lambda (key) (let* ((obj (env~lookup-object key pds-env))) (env~enter key obj new-env))) (append all-type-vars all-type-constants all-constants all-variables)) (setf test new-problem) (setf co conc) (setf pre prems) (let ((result (spass~call-spass new-conclusion (pds~start-proof-plan new-problem (ot~new-proof-plan-name new-problem)) (atptop~default-directory) time T 0 nil)) (result1 '(otter~call-otter new-conclusion ;;for testing (pds~start-proof-plan new-problem (ot~new-proof-plan-name new-problem)) (atptop~default-directory) time 'AUTO nil nil "" ""))) (setf keim::pds*current-proof-plan omega*current-proof-plan) result))) |# ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; existse-sort*-special ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;better don't ask: the disjuncts are introduced as hyps of the new premise ;;in the expansion these hyps are converted to the result of andel/ander of the real hyps ;;and the dummy hyps have to replaced in the hyp-lists of the corresponding nodes. ;;Expansion of hypotheses! ;; ;; We have the following situation : ;; ;; ExistingPrem Hyp_1 ... Hyp_n |- NewPrem ;; ----------------------------------- ;; ExistingConc Hyp_1 |- NewConc_1 ... Hyp_n |- NewConc_n ;; ;; ;; What would be cleaner? ;;1. Allow hyps for conclusions in wild-tactics ;;2. I have the feeling that the plain execution would be much easier for simple tactics ;; like this one. Additionally, some kind of inverse expansion after execution could restore ;; it as more abstract inference step. (infer~defwild-tactic existse-sort*-special (outline-mappings (((existent list ) existse-sort*-special-b))) (parameter-types termsym-list) (expansion-function potac=existse-sort*-special-expand) (help "Iterated EXISTS-SORT-Elimination.")) (defun existse-sort*-special-b (concs prems parameters) (let* ((ex-term (car prems)) (new-hyps (potac=compute-existse-sort*-special-sorts ex-term (car parameters) nil))) (when (> (length new-hyps 1)) (values nil (list (cons (car concs) new-hyps)))))) (defun potac=compute-existse-sort*-special-sorts (ex-term params hyps) (if (and (potac=exists-sort-formula? ex-term) (consp params)) (let* ((new-hyp (potac=compute-exists-sort-sort (car params) ex-term)) (new-ex (potac=compute-exists-sort (car params) ex-term))) (potac=compute-existse-sort*-special-sorts new-ex (rest params) (cons new-hyp hyps))) (cons ex-term hyps))) (com~defcommand existse-sort*-special (argnames ex-line line param) (argtypes ndline ndline termsym-list) (arghelps "An existential line" "A line to be proved" "A list of new constants") (function potac=existse-sort*-special) (defaults potac=existse-sort*-special-defaults) (frag-cats tactics post) (log-p T) (help "Iterated elimination of a sorted existential quantifier.")) (defun potac=existse-sort*-special (ex-line line params) (infer~compute-outline 'existse-sort*-special (list line (list ex-line)) (list params))) (defun potac=existse-sort*-special-defaults (ex-line line param) (cond ((not (com~specified-arg-p ex-line)) (list (pds~find-support #'potac=exists-sort-formula?) (com~unspecified) (com~unspecified))) ((not (com~specified-arg-p line)) (list ex-line (oc~default-current-planline) (com~unspecified))) ((not (com~specified-arg-p param)) (list ex-line line (batac=generate-defaults-existse* ex-line))) (t (list ex-line line param)))) (defun potac=existse-sort*-special-expand (concs prems parameters) (let* ((conc-line (car concs)) (exi-line (car prems)) (prem-line (second prems)) (const-list (first parameters)) (hyps (set-difference (pdsn~hyps prem-line) (pdsn~hyps conc-line)))) (tacl~init (append concs prems)) (potac=existse-sort*-special-expand-rec conc-line exi-line prem-line hyps const-list) (tacl~end))) (defun potac=existse-sort*-special-expand-rec (conc exi-prem prem hyps params &optional last-hyp) (if (and (potac=exists-sort-formula? exi-prem) (consp params)) (let* ((exe (tacl~apply 'existse-sort (list conc exi-prem nil) (list (first params)))) (newhyp (fourth exe)) (oldhyp (potac=hyp2open (find-if #'(lambda (hy) (data~equal (node~formula hy) (car (data~appl-arguments (node~formula newhyp))))) hyps) (list newhyp))) (ander (tacl~apply 'ander (list nil newhyp) nil)) (andel (tacl~apply 'andel (list oldhyp newhyp) nil))) (potac=existse-sort*-special-expand-rec (third exe) (first ander) prem (remove oldhyp hyps) (rest params) newhyp)) (let ((oldhyp (potac=hyp2open (find-if #'(lambda (hy) (data~equal (node~formula hy) (node~formula exi-prem))) hyps) (list last-hyp)))) (tacl~apply 'weaken (list oldhyp exi-prem) nil) (setf (pdsn~hyps prem)(pdsn~hyps conc)) (tacl~apply 'weaken (list conc prem) nil)))) (defun potac=hyp2open (hyp newhyps) (setf (pdsn~hyps hyp) newhyps) (setf (node~justification hyp)(pdsj~open-just-create)) (setf bla hyp) hyp)
{"author": "theoremprover-museum", "repo": "OMEGA", "sha": "b95b25f8bb16847a2e18d106510446a175f7145a", "save_path": "github-repos/isabelle/theoremprover-museum-OMEGA", "path": "github-repos/isabelle/theoremprover-museum-OMEGA/OMEGA-b95b25f8bb16847a2e18d106510446a175f7145a/theories/post/post-tactics.thy"}
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import nibabel as nb import numpy as np import external.transformations as tf import Trekker import vtk import time import psutil import dti_funcs as dti def main(): SHOW_AXES = True AFFINE_IMG = True NO_SCALE = True COMPUTE_TRACTS = True n_tracts = 240 # n_tracts = 24 n_threads = 2*psutil.cpu_count() img_shift = 0 # 255 data_dir = os.environ.get('OneDrive') + r'\data\dti_navigation\joonas' filenames = {'T1': 'sub-S1_ses-S8741_T1w', 'FOD': 'FOD_T1_space', 'ACT': 'trekkerACTlabels', 'COIL': 'magstim_fig8_coil', 'HEAD': 'head_inv', 'BRAIN': 'brain_inv', 'BRAINSIM': 'gm', 'WM': 'skin'} img_path = os.path.join(data_dir, filenames['T1'] + '.nii') trk_path = os.path.join(data_dir, filenames['FOD'] + '.nii') act_path = os.path.join(data_dir, filenames['ACT'] + '.nii') coil_path = os.path.join(data_dir, filenames['COIL'] + '.stl') head_inv_path = os.path.join(data_dir, filenames['HEAD'] + '.stl') brain_inv_path = os.path.join(data_dir, filenames['BRAIN'] + '.stl') brain_sim_path = os.path.join(data_dir, filenames['BRAINSIM'] + '.stl') wm_sim_path = os.path.join(data_dir, filenames['WM'] + '.stl') imagedata = nb.squeeze_image(nb.load(img_path)) imagedata = nb.as_closest_canonical(imagedata) imagedata.update_header() pix_dim = imagedata.header.get_zooms() img_shape = imagedata.header.get_data_shape() act_data = nb.squeeze_image(nb.load(act_path)) act_data = nb.as_closest_canonical(act_data) act_data.update_header() act_data_arr = act_data.get_fdata() # print(imagedata.header) # print("pix_dim: {}, img_shape: {}".format(pix_dim, img_shape)) print("Pixel size: \n") print(pix_dim) print("\nImage shape: \n") print(img_shape) print("\nSform: \n") print(imagedata.get_qform(coded=True)) print("\nQform: \n") print(imagedata.get_sform(coded=True)) print("\nFall-back: \n") print(imagedata.header.get_base_affine()) if AFFINE_IMG: affine = imagedata.affine if NO_SCALE: scale, shear, angs, trans, persp = tf.decompose_matrix(imagedata.affine) affine = tf.compose_matrix(scale=None, shear=shear, angles=angs, translate=trans, perspective=persp) else: affine = np.identity(4) print("affine: {0}\n".format(affine)) # Create a rendering window and renderer ren = vtk.vtkRenderer() ren.SetUseDepthPeeling(1) ren.SetOcclusionRatio(0.1) ren.SetMaximumNumberOfPeels(100) ren_win = vtk.vtkRenderWindow() ren_win.AddRenderer(ren) ren_win.SetSize(800, 800) ren_win.SetMultiSamples(0) ren_win.SetAlphaBitPlanes(1) # Create a renderwindowinteractor iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(ren_win) repos = [0., 0., 0., 0., 0., 0.] # brain in invesalius space (STL as exported by invesalius) _ = load_stl(head_inv_path, ren, opacity=.7, colour=[1.0, 1.0, 1.0], replace=repos, user_matrix=np.identity(4)) _ = load_stl(wm_sim_path, ren, opacity=.7, colour=[1.0, 1.0, 1.0], replace=repos, user_matrix=np.identity(4)) # simnibs brain in RAS+ space # _ = load_stl(brain_sim_path, ren, opacity=1., colour=[1.0, 0., 0.], replace=repos, user_matrix=np.identity(4)) # brain in RAS+ space inv2ras = affine.copy() inv2ras[1, 3] += pix_dim[1] * img_shape[1] inv2ras[0, 3] -= 12 # _ = load_stl(brain_inv_path, ren, opacity=.6, colour="SkinColor", replace=repos, user_matrix=inv2ras) # brain in voxel space inv2voxel = np.identity(4) inv2voxel[1, 3] = inv2voxel[1, 3] + pix_dim[1] * img_shape[1] # _ = load_stl(brain_inv_path, ren, opacity=.6, colour=[0.482, 0.627, 0.698], replace=repos, user_matrix=inv2voxel) # simnibs brain in RAS+ space ras2inv = np.linalg.inv(affine.copy()) ras2inv[1, 3] -= pix_dim[1] * img_shape[1] _ = load_stl(wm_sim_path, ren, opacity=.7, colour=[0.482, 0.627, 0.698], replace=repos, user_matrix=ras2inv) repos_1 = [0., 0., 0., 0., 0., 180.] # _ = load_stl(wm_sim_path, ren, opacity=.7, colour=[1., 0., 0.], replace=repos_1, user_matrix=np.linalg.inv(affine)) # create fiducial markers # rowise the coordinates refer to: right ear, left ear, nasion # fids_inv = np.array([[168.300, 126.600, 97.000], # [9.000, 120.300, 93.700], # [90.100, 33.500, 150.000]]) fids_inv = np.array([[167.7, 120.9, 96.0], [8.2, 122.7, 91.0], [89.0, 18.6, 129.0]]) fids_inv_vtk = np.array([[167.7, 120.9, 96.0], [8.2, 122.7, 91.0], [89.0, 18.6, 129.0]]) # from the invesalius exported fiducial markers you have to multiply the Y coordinate by -1 to # transform to the regular 3D invesalius space where coil location is saved fids_inv_vtk[:, 1] *= -1 # the following code converts from the invesalius 3D space to the MRI scanner coordinate system fids_inv_vtk_w = fids_inv_vtk.copy() fids_inv_vtk_w = np.hstack((fids_inv_vtk_w, np.ones((3, 1)))) fids_scan = np.linalg.inv(ras2inv) @ fids_inv_vtk_w.T fids_vis = fids_scan.T[:3, :3] # --- fiducial markers seed = np.array([60.0, 147.0, 204.0]) seed_inv = np.array([60.0, -147.0, 204.0]) coil_pos = [43.00, 155.47, 225.22, -21.00, -37.45, 58.41] m_coil = coil_transform_pos(coil_pos) # show coil repos_coil = [0., 0., 0., 0., 0., 90.] # _ = load_stl(coil_path, ren, opacity=.6, replace=repos_coil, colour=[1., 1., 1.], user_matrix=m_coil) # create coil vectors vec_length = 75 p1 = m_coil[:-1, -1] coil_dir = m_coil[:-1, 0] coil_face = m_coil[:-1, 1] p2_face = p1 + vec_length * coil_face p2_dir = p1 + vec_length * coil_dir coil_norm = np.cross(coil_dir, coil_face) p2_norm = p1 - vec_length * coil_norm add_line(ren, p1, p2_dir, color=[1.0, .0, .0]) add_line(ren, p1, p2_face, color=[.0, 1.0, .0]) add_line(ren, p1, p2_norm, color=[.0, .0, 1.0]) # --- coil vectors p1_change = p1.copy() p1_change[1] = -p1_change[1] # offset = 40 # coil_norm = coil_norm/np.linalg.norm(coil_norm) # coord_offset_nav = p1 - offset * coil_norm # convert to world coordinate space to use as seed for fiber tracking seed_world = np.append(seed, 1)[np.newaxis, :].T seed_world = affine @ seed_world seed_world = seed_world[:3, 0, np.newaxis].T # convert to world coordinate space to use as seed for fiber tracking seed_world_true = np.append(seed_inv, 1)[np.newaxis, :].T seed_world_true = inv2ras @ seed_world_true seed_world_true = seed_world_true[:3, 0, np.newaxis].T # convert to voxel coordinate space seed_mri = np.append(seed_inv, 1)[np.newaxis, :].T seed_mri = inv2voxel @ seed_mri seed_mri = seed_mri[:3, 0, np.newaxis].T # 0: red, 1: green, 2: blue, 3: maroon (dark red), # 4: purple, 5: teal (petrol blue), 6: yellow, 7: orange colours = [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [1., .0, 1.], [0.45, 0., 0.5], [0., .5, .5], [1., 1., 0.], [1., .4, .0]] # for n in range(3): # _ = add_marker(fids_inv[n, :], ren, colours[n], radius=2) for n in range(3): _ = add_marker(fids_inv_vtk[n, :], ren, colours[n], radius=2) for n in range(3): _ = add_marker(fids_vis[n, :], ren, colours[n], radius=2) _ = add_marker(p1, ren, colours[4], radius=2) _ = add_marker(seed_inv, ren, colours[5], radius=2) _ = add_marker(np.squeeze(seed_world), ren, colours[6], radius=2) _ = add_marker(np.squeeze(seed_world_true), ren, colours[3], radius=2) _ = add_marker(seed, ren, colours[7], radius=2) _ = add_marker(np.squeeze(seed_mri), ren, colours[1], radius=2) # create tracts if COMPUTE_TRACTS: # Show tracks repos_trk = [0., -(pix_dim[1] * img_shape[1]), 0., 0., 0., 0.] matrix_vtk = vtk.vtkMatrix4x4() trans = np.identity(4) trans[1, -1] = repos_trk[1] final_matrix = np.linalg.inv(affine) @ trans print("final_matrix: {}".format(final_matrix)) for row in range(0, 4): for col in range(0, 4): matrix_vtk.SetElement(row, col, final_matrix[row, col]) root = vtk.vtkMultiBlockDataSet() start_time = time.time() tracker = Trekker.initialize(bytes(trk_path, 'utf-8')) tracker.seed_maxTrials(1) tracker.minFODamp(0.1) tracker.writeInterval(50) tracker.maxLength(200) tracker.minLength(20) tracker.maxSamplingPerStep(100) tracker.numberOfThreads(n_threads) duration = time.time() - start_time print("Initialize Trekker: {:.2f} ms".format(1e3 * duration)) count_tracts = 0 start_time_all = time.time() for n in range(round(n_tracts/n_threads)): # branch = dti.multi_block(tracker, seed, n_threads) branch = dti.multi_block(tracker, seed_world_true, n_threads) count_tracts += branch.GetNumberOfBlocks() # start_time = time.time() # root = dti.tracts_root(out_list, root, n) root.SetBlock(n, branch) # duration = time.time() - start_time # print("Compute root {}: {:.2f} ms".format(n, 1e3*duration)) duration = time.time() - start_time_all print("Compute multi {}: {:.2f} ms".format(n, 1e3*duration)) print("Number computed tracts {}".format(count_tracts)) print("Number computed branches {}".format(root.GetNumberOfBlocks())) start_time = time.time() tracts_actor = dti.compute_actor(root, matrix_vtk) duration = time.time() - start_time print("Compute actor: {:.2f} ms".format(1e3*duration)) ren.AddActor(tracts_actor) # Add axes to scene origin if SHOW_AXES: add_line(ren, [0, 0, 0], [150, 0, 0], color=[1.0, 0.0, 0.0]) add_line(ren, [0, 0, 0], [0, 150, 0], color=[0.0, 1.0, 0.0]) add_line(ren, [0, 0, 0], [0, 0, 150], color=[0.0, 0.0, 1.0]) # Enable user interface interactor iren.Initialize() ren_win.Render() iren.Start() def load_stl(stl_path, ren, opacity=1., visibility=1, position=False, colour=False, replace=False, user_matrix=np.identity(4)): vtk_colors = vtk.vtkNamedColors() vtk_colors.SetColor("SkinColor", [233, 200, 188, 255]) vtk_colors.SetColor("BkgColor", [51, 77, 102, 255]) reader = vtk.vtkSTLReader() reader.SetFileName(stl_path) reader.Update() poly_normals = vtk.vtkPolyDataNormals() poly_normals.SetInputData(reader.GetOutput()) poly_normals.ConsistencyOn() poly_normals.AutoOrientNormalsOn() poly_normals.SplittingOff() poly_normals.Update() if replace: transx, transy, transz, rotx, roty, rotz = replace # create a transform that rotates the stl source transform = vtk.vtkTransform() transform.PostMultiply() transform.RotateX(rotx) transform.RotateY(roty) transform.RotateZ(rotz) transform.Translate(transx, transy, transz) transform_filt = vtk.vtkTransformPolyDataFilter() transform_filt.SetTransform(transform) transform_filt.SetInputConnection(poly_normals.GetOutputPort()) transform_filt.Update() mapper = vtk.vtkPolyDataMapper() if vtk.VTK_MAJOR_VERSION <= 5: if replace: mapper.SetInput(transform_filt.GetOutput()) else: mapper.SetInput(poly_normals.GetOutput()) else: if replace: mapper.SetInputConnection(transform_filt.GetOutputPort()) else: mapper.SetInputConnection(poly_normals.GetOutputPort()) mapper.ScalarVisibilityOff() actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetOpacity(opacity) actor.SetVisibility(visibility) actor.GetProperty().SetBackfaceCulling(1) # outline outline = vtk.vtkOutlineFilter() outline.SetInputConnection(transform_filt.GetOutputPort()) mapper_outline = vtk.vtkPolyDataMapper() mapper_outline.SetInputConnection(outline.GetOutputPort()) actor_outline = vtk.vtkActor() actor_outline.SetMapper(mapper_outline) if colour: if type(colour) is str: actor.GetProperty().SetDiffuseColor(vtk_colors.GetColor3d(colour)) actor.GetProperty().SetSpecular(.3) actor.GetProperty().SetSpecularPower(20) actor_outline.GetProperty().SetDiffuseColor(vtk_colors.GetColor3d("SkinColor")) actor_outline.GetProperty().SetSpecular(.3) actor_outline.GetProperty().SetSpecularPower(20) else: actor.GetProperty().SetColor(colour) actor_outline.GetProperty().SetColor(colour) if position: actor.SetPosition(position) matrix_vtk = vtk.vtkMatrix4x4() for row in range(0, 4): for col in range(0, 4): matrix_vtk.SetElement(row, col, user_matrix[row, col]) actor.SetUserMatrix(matrix_vtk) actor_outline.SetUserMatrix(matrix_vtk) # Assign actor to the renderer ren.AddActor(actor) ren.AddActor(actor_outline) return actor def add_line(renderer, p1, p2, color=[0.0, 0.0, 1.0]): line = vtk.vtkLineSource() line.SetPoint1(p1) line.SetPoint2(p2) mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(line.GetOutputPort()) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(color) renderer.AddActor(actor) def add_marker(coord, ren, color, radius): # x, y, z = coord ball_ref = vtk.vtkSphereSource() ball_ref.SetRadius(radius) ball_ref.SetCenter(coord) mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(ball_ref.GetOutputPort()) prop = vtk.vtkProperty() prop.SetColor(color) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.SetProperty(prop) ren.AddActor(actor) return actor def coil_transform_pos(pos): pos[1] = -pos[1] a, b, g = np.radians(pos[3:]) r_ref = tf.euler_matrix(a, b, g, 'sxyz') t_ref = tf.translation_matrix(pos[:3]) m_img = tf.concatenate_matrices(t_ref, r_ref) return m_img if __name__ == '__main__': np.set_printoptions(suppress=True, precision=2) main()
{"hexsha": "989461c5fddaf1217723099e09d6c61f85cacce1", "size": 14472, "ext": "py", "lang": "Python", "max_stars_repo_path": "tractography/vtk_inv_tracts_coil.py", "max_stars_repo_name": "vhosouza/xcoord", "max_stars_repo_head_hexsha": "9226a6f919b3edec933753ff17815092ab95df9a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tractography/vtk_inv_tracts_coil.py", "max_issues_repo_name": "vhosouza/xcoord", "max_issues_repo_head_hexsha": "9226a6f919b3edec933753ff17815092ab95df9a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tractography/vtk_inv_tracts_coil.py", "max_forks_repo_name": "vhosouza/xcoord", "max_forks_repo_head_hexsha": "9226a6f919b3edec933753ff17815092ab95df9a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-15T13:54:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-15T13:54:15.000Z", "avg_line_length": 34.5393794749, "max_line_length": 127, "alphanum_fraction": 0.6358485351, "include": true, "reason": "import numpy", "num_tokens": 4354}
#!/usr/bin/python """ esr_visualizer.py: version 0.1.0 Todo: convert rosbag can_raw ESR track data to image History: 2016/10/28: Initial version to display visual radar data from ros topic 'esr_front'. """ import math import numpy as np import argparse import sys import numpy as np import rospy import datetime import struct import json import std_msgs try: import cv2 except ImportError: print "Error importing opencv" pass ''' IMG_WIDTH = 512 IMG_HEIGHT = 512 IMG_CHANNELS = 3 ''' class RadarVisualizer(object): def __init__(self, width=250, height=250, channels=3): self.width = width self.height = height self.channels = channels self.font = cv2.FONT_HERSHEY_SIMPLEX self.img = np.zeros((self.height, self.width, self.channels), np.uint8) cv2.imshow("Radar", self.img) pass def update(self, radarData): self.img = np.zeros((self.height, self.width, self.channels), np.uint8) cv2.line(self.img, (10, 0), (self.width/2 - 5, self.height), (100, 255, 255)) cv2.line(self.img, (self.width - 10, 0), (self.width/2 + 5, self.height), (100, 255, 255)) for track_number in range(1, 65): if str(track_number)+'_track_range' in radarData: track_range = radarData[str(track_number)+'_track_range'] track_angle = (float(radarData[str(track_number)+'_track_angle'])+90.0)*math.pi/180 x_pos = math.cos(track_angle)*track_range*4 y_pos = math.sin(track_angle)*track_range*4 cv2.circle(self.img, (self.width/2 + int(x_pos), self.height - int(y_pos) - 10), 5, (255, 255, 255)) #cv2.putText(self.img, str(track_number), # (self.width/2 + int(x_pos)-2, self.height - int(y_pos) - 10), self.font, 1, (255,255,255), 2) cv2.imshow("Radar", self.img) cv2.waitKey(2) visualizer = RadarVisualizer() def callback(data): print "data: ", data object = json.loads(data.data) visualizer.update(object) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Udacity SDC Micro Challenge Radar viewer') parser.add_argument('--debug', action='store_true', default=False, help='display debug messages') args = parser.parse_args() debug = args.debug # In ROS, nodes are uniquely named. If two nodes with the same # node are launched, the previous one is kicked off. The # anonymous=True flag means that rospy will choose a unique # name for our 'listener' node so that multiple listeners can # run simultaneously. rospy.init_node('ros_esr_visualizer', anonymous=True) rospy.Subscriber("esr_front", std_msgs.msg.String, callback) # spin() simply keeps python from exiting until this node is stopped rospy.spin()
{"hexsha": "cc3c6bfe8f9afa67f5b5f2fedf9a6a41e5ea7476", "size": 2830, "ext": "py", "lang": "Python", "max_stars_repo_path": "ros/src/sensing/drivers/can/packages/kvaser/nodes/esr_ros_can_source/esr_visualizer.py", "max_stars_repo_name": "MichaelOdum/Udacity-SDC-Radar-Driver-Micro-Challenge", "max_stars_repo_head_hexsha": "37491578631b74e1b1d13ea7d65731d0b7e9099a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 56, "max_stars_repo_stars_event_min_datetime": "2016-10-06T02:10:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-29T07:02:25.000Z", "max_issues_repo_path": "ros/src/sensing/drivers/can/packages/kvaser/nodes/esr_ros_can_source/esr_visualizer.py", "max_issues_repo_name": "MichaelOdum/Udacity-SDC-Radar-Driver-Micro-Challenge", "max_issues_repo_head_hexsha": "37491578631b74e1b1d13ea7d65731d0b7e9099a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2017-05-09T03:33:13.000Z", "max_issues_repo_issues_event_max_datetime": "2018-11-19T06:52:45.000Z", "max_forks_repo_path": "ros/src/sensing/drivers/can/packages/kvaser/nodes/esr_ros_can_source/esr_visualizer.py", "max_forks_repo_name": "MichaelOdum/Udacity-SDC-Radar-Driver-Micro-Challenge", "max_forks_repo_head_hexsha": "37491578631b74e1b1d13ea7d65731d0b7e9099a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 41, "max_forks_repo_forks_event_min_datetime": "2016-10-06T07:33:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-02T17:08:06.000Z", "avg_line_length": 32.1590909091, "max_line_length": 116, "alphanum_fraction": 0.6600706714, "include": true, "reason": "import numpy", "num_tokens": 738}
import numpy as np from sklearn import linear_model from m2cgen import assemblers, ast from tests import utils def test_single_feature(): estimator = linear_model.LinearRegression() estimator.coef_ = [1] estimator.intercept_ = 3 assembler = assemblers.LinearModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinNumExpr( ast.NumVal(3), ast.BinNumExpr( ast.FeatureRef(0), ast.NumVal(1), ast.BinNumOpType.MUL), ast.BinNumOpType.ADD) assert utils.cmp_exprs(actual, expected) def test_two_features(): estimator = linear_model.LinearRegression() estimator.coef_ = [1, 2] estimator.intercept_ = 3 assembler = assemblers.LinearModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinNumExpr( ast.BinNumExpr( ast.NumVal(3), ast.BinNumExpr( ast.FeatureRef(0), ast.NumVal(1), ast.BinNumOpType.MUL), ast.BinNumOpType.ADD), ast.BinNumExpr( ast.FeatureRef(1), ast.NumVal(2), ast.BinNumOpType.MUL), ast.BinNumOpType.ADD) assert utils.cmp_exprs(actual, expected) def test_multi_class(): estimator = linear_model.LogisticRegression() estimator.coef_ = np.array([[1, 2], [3, 4], [5, 6]]) estimator.intercept_ = np.array([7, 8, 9]) assembler = assemblers.LinearModelAssembler(estimator) actual = assembler.assemble() expected = ast.VectorVal([ ast.SubroutineExpr( ast.BinNumExpr( ast.BinNumExpr( ast.NumVal(7), ast.BinNumExpr( ast.FeatureRef(0), ast.NumVal(1), ast.BinNumOpType.MUL), ast.BinNumOpType.ADD), ast.BinNumExpr( ast.FeatureRef(1), ast.NumVal(2), ast.BinNumOpType.MUL), ast.BinNumOpType.ADD)), ast.SubroutineExpr( ast.BinNumExpr( ast.BinNumExpr( ast.NumVal(8), ast.BinNumExpr( ast.FeatureRef(0), ast.NumVal(3), ast.BinNumOpType.MUL), ast.BinNumOpType.ADD), ast.BinNumExpr( ast.FeatureRef(1), ast.NumVal(4), ast.BinNumOpType.MUL), ast.BinNumOpType.ADD)), ast.SubroutineExpr( ast.BinNumExpr( ast.BinNumExpr( ast.NumVal(9), ast.BinNumExpr( ast.FeatureRef(0), ast.NumVal(5), ast.BinNumOpType.MUL), ast.BinNumOpType.ADD), ast.BinNumExpr( ast.FeatureRef(1), ast.NumVal(6), ast.BinNumOpType.MUL), ast.BinNumOpType.ADD))]) assert utils.cmp_exprs(actual, expected) def test_binary_class(): estimator = linear_model.LogisticRegression() estimator.coef_ = np.array([[1, 2]]) estimator.intercept_ = np.array([3]) assembler = assemblers.LinearModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinNumExpr( ast.BinNumExpr( ast.NumVal(3), ast.BinNumExpr( ast.FeatureRef(0), ast.NumVal(1), ast.BinNumOpType.MUL), ast.BinNumOpType.ADD), ast.BinNumExpr( ast.FeatureRef(1), ast.NumVal(2), ast.BinNumOpType.MUL), ast.BinNumOpType.ADD) assert utils.cmp_exprs(actual, expected)
{"hexsha": "4f03849beb02e2580e45a3460f95bfc89fda1635", "size": 3867, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/assemblers/test_linear.py", "max_stars_repo_name": "ggerrein/m2cgen", "max_stars_repo_head_hexsha": "e916f555b42e3a1d46828942c6b4e5c365c6a624", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-06-29T02:43:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T07:41:59.000Z", "max_issues_repo_path": "tests/assemblers/test_linear.py", "max_issues_repo_name": "ggerrein/m2cgen", "max_issues_repo_head_hexsha": "e916f555b42e3a1d46828942c6b4e5c365c6a624", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/assemblers/test_linear.py", "max_forks_repo_name": "ggerrein/m2cgen", "max_forks_repo_head_hexsha": "e916f555b42e3a1d46828942c6b4e5c365c6a624", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-08-06T07:51:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T07:41:42.000Z", "avg_line_length": 29.7461538462, "max_line_length": 58, "alphanum_fraction": 0.5234031549, "include": true, "reason": "import numpy", "num_tokens": 900}
import numpy as np import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Activation from keras.optimizers import SGD from datetime import datetime from dlimage.mnist import MNISTLoader def vectorize(j): e = np.zeros(10) e[j] = 1.0 return e mndata = MNISTLoader('dlimage/mnist/data') images, lables = mndata.load_training() x_train = np.ndarray((len(images), len(images[0]))) y_train = np.ndarray((len(lables), 10)) for i in range(len(images)): x_train[i] = images[i] for i in range(len(lables)): y_train[i] = vectorize(lables[i]) print("Loading training data finished.") mndata = MNISTLoader('dlimage/mnist/data') images, lables = mndata.load_testing() x_test = np.ndarray((len(images), len(images[0]))) y_test = np.ndarray((len(lables), 10)) for i in range(len(images)): x_test[i] = images[i] for i in range(len(lables)): y_test[i] = vectorize(lables[i]) print("Loading testing data finished.") model = Sequential() model.add(Dense(80, activation='sigmoid', input_dim=784)) model.add(Dense(10, activation='sigmoid')) sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss='mse', optimizer=sgd, metrics=['accuracy']) print("Start training: " + str(datetime.now())) model.fit(x_train, y_train, epochs=20, batch_size=20, verbose=1) print("End training: " + str(datetime.now())) print("Start evaluating: " + str(datetime.now())) score = model.evaluate(x_test, y_test, batch_size=20) print(score) print("End evaluating: " + str(datetime.now()))
{"hexsha": "b616af95170df4d27dbcb8568021524f1b240575", "size": 1539, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/keras/ann_mnist_test.py", "max_stars_repo_name": "morningkyle/DLImage", "max_stars_repo_head_hexsha": "52e1ca27387126233f6ba6c8533d2d0e236a58a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-01-27T13:09:07.000Z", "max_stars_repo_stars_event_max_datetime": "2018-01-27T13:09:07.000Z", "max_issues_repo_path": "examples/keras/ann_mnist_test.py", "max_issues_repo_name": "morningkyle/DLImage", "max_issues_repo_head_hexsha": "52e1ca27387126233f6ba6c8533d2d0e236a58a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/keras/ann_mnist_test.py", "max_forks_repo_name": "morningkyle/DLImage", "max_forks_repo_head_hexsha": "52e1ca27387126233f6ba6c8533d2d0e236a58a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5961538462, "max_line_length": 64, "alphanum_fraction": 0.7128005198, "include": true, "reason": "import numpy", "num_tokens": 415}
from PIL import Image import numpy as np import argparse parser = argparse.ArgumentParser() parser.add_argument('inputImage', help='Enter the path to image') parser.add_argument('outputFile', help='Enter the path to output File') parser.add_argument( '-w', '--width', help='Enter width of output image', type=int, default=75) parser.add_argument('-c', '--colorInvert', help='Enter to invert color of image', action='store_true') args = parser.parse_args() inputImagePath = args.inputImage outputPath = args.outputFile widd = args.width asci = r"@%#*+=-:. "[::1] if args.colorInvert: asci = r"@%#*+=-:. "[:: - 1] # input image img = Image.open(inputImagePath) wid, height = img.size img = img.resize((widd, int(widd * ((height * 9) / (wid * 20))))) wid, height = img.size img = img.convert("L") def avg(imggg): return (np.average(np.array(imggg))) # opening file f = open(outputPath, "w") for j in range(height): for i in range(wid): img1 = img.crop((i, int(j), i + 1, int((j + 1)))) f.write(asci[int((avg(img1) * 9) / 255)]) print(asci[int((avg(img1) * 9) / 255)], end="") print("\n", end="") f.write("\n") f.close()
{"hexsha": "41e77b9999b827eb67f64c3e72542962e6f5f851", "size": 1201, "ext": "py", "lang": "Python", "max_stars_repo_path": "ascii_image/ascii_image.py", "max_stars_repo_name": "TheFenrisLycaon/Automation-scripts", "max_stars_repo_head_hexsha": "ce5b32e3b0960e78680ca3a1113c801101263530", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 496, "max_stars_repo_stars_event_min_datetime": "2020-10-07T15:45:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T16:40:30.000Z", "max_issues_repo_path": "ascii_image/ascii_image.py", "max_issues_repo_name": "TheFenrisLycaon/Automation-scripts", "max_issues_repo_head_hexsha": "ce5b32e3b0960e78680ca3a1113c801101263530", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 550, "max_issues_repo_issues_event_min_datetime": "2020-10-07T15:31:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-20T22:00:38.000Z", "max_forks_repo_path": "ascii_image/ascii_image.py", "max_forks_repo_name": "TheFenrisLycaon/Automation-scripts", "max_forks_repo_head_hexsha": "ce5b32e3b0960e78680ca3a1113c801101263530", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 388, "max_forks_repo_forks_event_min_datetime": "2020-10-07T15:45:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T14:54:46.000Z", "avg_line_length": 23.0961538462, "max_line_length": 79, "alphanum_fraction": 0.6211490425, "include": true, "reason": "import numpy", "num_tokens": 346}
import numpy as np from ...domain import interpolate_to_height_levels, interpolate_to_pressure_levels from ...utils.interpolation import methods as interpolation_methods def weighted_velocity(ds_column, pres_cutoff_start, pres_cutoff_end): """Weighted velocity: needs more work""" height_factor = interpolation_methods.cos_transition( ds_column["p_f"][:, 1:, :, :].values, pres_cutoff_start, pres_cutoff_end ) weights = ( (ds_column["p_h"][:, :-1, :, :].values - ds_column["p_h"][:, 1:, :, :].values) * ds_column["q"][:, 1:, :, :].values * height_factor ) inv_weights = 1.0 / np.sum(weights) u_weighted = inv_weights * np.sum(ds_column["u"][:, 1:, :, :].values * weights) v_weighted = inv_weights * np.sum(ds_column["v"][:, 1:, :, :].values * weights) return u_weighted, v_weighted def velocity_at_height(ds_column, height): """Velocit at one height: needs more work""" ds_on_height_level = interpolate_to_height_levels(ds_column, height) # For a single colum, data dimensions are all 1 return np.mean(ds_on_height_level["u"]), np.mean(ds_on_height_level["v"]) def velocity_at_pressure(ds_column, pressure): """Velocit at one height: needs more work""" ds_on_pressure_level = interpolate_to_pressure_levels(ds_column, pressure) # For a single colum, data dimensions are all 1 return np.mean(ds_on_pressure_level["u"]), np.mean(ds_on_pressure_level["v"]) def estimate_horizontal_velocities(ds_column, method, **kwargs): """Estimate the zonal and meridonal winds using a specific method""" if method == "lower_troposphere_humidity_weighted": if "pres_cutoff_start" not in kwargs or "pres_cutoff_end" not in kwargs: raise Exception( f"To use the `{method}` velocity method the" " `pres_cutoff_start` and `pres_cutoff_end` kwargs" " are required" ) u_traj, v_traj = weighted_velocity(ds_column, **kwargs) elif method == "single_height_level": if "height" not in kwargs: raise Exception( f"To use the `{method}` velocity method the" " `height` kwarg is required" ) u_traj, v_traj = velocity_at_height(ds_column, **kwargs) elif method == "single_pressure_level": if "pressure" not in kwargs: raise Exception( f"To use the `{method}` velocity method the" " `pressure` kwarg is required" ) u_traj, v_traj = velocity_at_pressure(ds_column, **kwargs) else: raise NotImplementedError( f"`{method}` trajectory velocity method" " not implemented" ) return np.float64(u_traj), np.float64(v_traj)
{"hexsha": "155b6ade5df88e9cd3d5210f12b68e8eae6587ee", "size": 2777, "ext": "py", "lang": "Python", "max_stars_repo_path": "lagtraj/trajectory/integration/velocity_estimation.py", "max_stars_repo_name": "xychen-ocn/lagtraj", "max_stars_repo_head_hexsha": "fbcae751faa7f9b9f9a72d20abf71bb92e007bdd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lagtraj/trajectory/integration/velocity_estimation.py", "max_issues_repo_name": "xychen-ocn/lagtraj", "max_issues_repo_head_hexsha": "fbcae751faa7f9b9f9a72d20abf71bb92e007bdd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lagtraj/trajectory/integration/velocity_estimation.py", "max_forks_repo_name": "xychen-ocn/lagtraj", "max_forks_repo_head_hexsha": "fbcae751faa7f9b9f9a72d20abf71bb92e007bdd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.8382352941, "max_line_length": 86, "alphanum_fraction": 0.6492617933, "include": true, "reason": "import numpy", "num_tokens": 645}
import h5py import math import os import matplotlib.pyplot as plt import numpy as np import torch import torch.optim as optim import torch.nn as nn from mpl_toolkits import mplot3d from net import CVAE_stgcn as CVAE from utils import loader_stgcn as loader from utils import losses from utils.common import * from torchlight import torchlight def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv1d') != -1: m.weight.data.normal_(0.0, 0.02) if m.bias is not None: m.bias.data.fill_(0) elif classname.find('Conv2d') != -1: m.weight.data.normal_(0.0, 0.02) if m.bias is not None: m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) def vae_loss(x_in, x_out, mean, lsig, beta=1.): # BCE = nn.functional.l1_loss(x_out, x_in) # BCE = nn.functional.binary_cross_entropy(x_out, x_in) # BCE = losses.affective_loss(x_in, x_out) BCE = losses.between_frame_loss(x_in, x_out) KLD = -0.5 * torch.sum(1 + lsig - mean.pow(2) - lsig.exp()) return BCE + beta*KLD class Processor(object): """ Processor for gait generation """ def __init__(self, args, ftype, data_loader, C, T, V, num_classes, graph_dict, n_z=32, device='cuda:0'): self.args = args self.ftype = ftype self.data_loader = data_loader self.num_classes = num_classes self.result = dict() self.iter_info = dict() self.epoch_info = dict() self.meta_info = dict(epoch=0, iter=0) self.device = device self.io = torchlight.IO( self.args.work_dir, save_log=self.args.save_log, print_log=self.args.print_log) # model self.C = C self.T = T self.V = V self.n_z = n_z if not os.path.isdir(self.args.work_dir): os.mkdir(self.args.work_dir) self.model = CVAE.CVAE(C, T, V, self.n_z, num_classes, graph_dict) self.model.cuda('cuda:0') self.model.apply(weights_init) self.loss = vae_loss self.best_loss = math.inf self.loss_updated = False self.step_epochs = [math.ceil(float(self.args.num_epoch * x)) for x in self.args.step] self.best_epoch = 0 self.mean = 0. self.lsig = 1. # optimizer if self.args.optimizer == 'SGD': self.optimizer = optim.SGD( self.model.parameters(), lr=self.args.base_lr, momentum=0.9, nesterov=self.args.nesterov, weight_decay=self.args.weight_decay) elif self.args.optimizer == 'Adam': self.optimizer = optim.Adam( self.model.parameters(), lr=self.args.base_lr, weight_decay=self.args.weight_decay) else: raise ValueError() self.lr = self.args.base_lr def adjust_lr(self): # if self.args.optimizer == 'SGD' and\ if self.meta_info['epoch'] in self.step_epochs: lr = self.args.base_lr * ( 0.1 ** np.sum(self.meta_info['epoch'] >= np.array(self.step_epochs))) for param_group in self.optimizer.param_groups: param_group['lr'] = lr self.lr = lr def show_epoch_info(self): for k, v in self.epoch_info.items(): self.io.print_log('\t{}: {:.4f}. Best so far: {:.4f} (epoch: {:d}).'. format(k, v, self.best_loss, self.best_epoch)) if self.args.pavi_log: self.io.log('train', self.meta_info['iter'], self.epoch_info) def show_iter_info(self): if self.meta_info['iter'] % self.args.log_interval == 0: info = '\tIter {} Done.'.format(self.meta_info['iter']) for k, v in self.iter_info.items(): if isinstance(v, float): info = info + ' | {}: {:.4f}'.format(k, v) else: info = info + ' | {}: {}'.format(k, v) self.io.print_log(info) if self.args.pavi_log: self.io.log('train', self.meta_info['iter'], self.iter_info) def per_train(self, epoch): self.model.train() self.adjust_lr() train_loader = self.data_loader['train'] loss_value = [] for data, label in train_loader: # get data data = data.float().to(self.device) ldec = label.float().to(self.device) lenc = ldec.unsqueeze(2).unsqueeze(2).unsqueeze(2)\ .repeat([1, 1, data.shape[2], data.shape[3], data.shape[4]]) # forward output, self.mean, self.lsig, _ = self.model(data, lenc, ldec) loss = self.loss(data, output, self.mean, self.lsig) # backward self.optimizer.zero_grad() loss.backward() self.optimizer.step() # statistics self.iter_info['loss'] = loss.data.item() self.iter_info['lr'] = '{:.6f}'.format(self.lr) loss_value.append(self.iter_info['loss']) self.show_iter_info() self.meta_info['iter'] += 1 # temp1 = data.permute(0, 2, 3, 1, 4).contiguous().view(data.shape[0], data.shape[2], # data.shape[1] * data.shape[3]).detach().cpu().numpy() # temp2 = output.permute(0, 2, 3, 1, 4).contiguous().view(data.shape[0], data.shape[2], # data.shape[1] * data.shape[ # 3]).detach().cpu().numpy() # temp3 = temp2 - np.tile(temp2[:, :, 0:3], (1, 1, 16)) # xdata_gt = temp1[0, 0, ::3] # ydata_gt = temp1[0, 0, 1::3] # zdata_gt = temp1[0, 0, 2::3] # xdata_sn = temp2[0, 0, ::3] # ydata_sn = temp2[0, 0, 1::3] # zdata_sn = temp2[0, 0, 2::3] # fig = plt.figure() # ax = plt.axes(projection='3d') # ax.plot3D(xdata_gt[0:4], ydata_gt[0:4], zdata_gt[0:4]) # ax.plot3D(xdata_gt[[2, 4, 5, 6]], ydata_gt[[2, 4, 5, 6]], zdata_gt[[2, 4, 5, 6]]) # ax.plot3D(xdata_gt[[2, 7, 8, 9]], ydata_gt[[2, 7, 8, 9]], zdata_gt[[2, 7, 8, 9]]) # ax.plot3D(xdata_gt[[0, 10, 11, 12]], ydata_gt[[0, 10, 11, 12]], zdata_gt[[0, 10, 11, 12]]) # ax.plot3D(xdata_gt[[0, 13, 14, 15]], ydata_gt[[0, 13, 14, 15]], zdata_gt[[0, 13, 14, 15]]) # ax.plot3D(xdata_sn[0:4], ydata_sn[0:4], zdata_sn[0:4]) # ax.plot3D(xdata_sn[[2, 4, 5, 6]], ydata_sn[[2, 4, 5, 6]], zdata_sn[[2, 4, 5, 6]]) # ax.plot3D(xdata_sn[[2, 7, 8, 9]], ydata_sn[[2, 7, 8, 9]], zdata_sn[[2, 7, 8, 9]]) # ax.plot3D(xdata_sn[[0, 10, 11, 12]], ydata_sn[[0, 10, 11, 12]], zdata_sn[[0, 10, 11, 12]]) # ax.plot3D(xdata_sn[[0, 13, 14, 15]], ydata_sn[[0, 13, 14, 15]], zdata_sn[[0, 13, 14, 15]]) # plt.show() # plt.savefig(os.path.join(self.args.work_dir, 'epoch{}_output.png'.format(epoch))) self.epoch_info['mean_loss'] = np.mean(loss_value) self.show_epoch_info() self.io.print_timer() def per_test(self, evaluation=True): self.model.eval() test_loader = self.data_loader['test'] loss_value = [] result_frag = [] label_frag = [] for data, label in test_loader: # get data data = data.float().to(self.device) ldec = label.float().to(self.device) lenc = ldec.unsqueeze(2).unsqueeze(2).unsqueeze(2)\ .repeat([1, 1, data.shape[2], data.shape[3], data.shape[4]]) # inference with torch.no_grad(): output, mean, lsig, _ = self.model(data, lenc, ldec) result_frag.append(output.data.cpu().numpy()) # get loss if evaluation: loss = self.loss(data, output, mean, lsig) loss_value.append(loss.item()) label_frag.append(label.data.cpu().numpy()) self.result = np.concatenate(result_frag) if evaluation: self.label = np.concatenate(label_frag) self.epoch_info['mean_loss'] = np.mean(loss_value) if self.epoch_info['mean_loss'] < self.best_loss: self.best_loss = self.epoch_info['mean_loss'] self.best_epoch = self.meta_info['epoch'] self.loss_updated = True else: self.loss_updated = False self.show_epoch_info() def train(self): print("log train() enter") for epoch in range(self.args.start_epoch, self.args.num_epoch): self.meta_info['epoch'] = epoch # training self.io.print_log('Training epoch: {}'.format(epoch)) self.per_train(epoch) self.io.print_log('Done.') # evaluation if (epoch % self.args.eval_interval == 0) or ( epoch == self.args.num_epoch): self.io.print_log('Eval epoch: {}'.format(epoch)) self.per_test() self.io.print_log('Done.') # save model and weights if self.loss_updated: torch.save(self.model.state_dict(), os.path.join(self.args.work_dir, 'epoch{}_model.pth.tar'.format(epoch))) self.generate(epoch=str(epoch)) # for epoch in range(self.args.start_epoch, self.args.num_epoch): # self.meta_info['epoch'] = epoch # # # training # self.io.print_log('Training epoch: {}'.format(epoch)) # self.per_train() # self.io.print_log('Done.') # # # save model and weights # # serialize model to JSON # if ((epoch + 1) % self.args.save_interval == 0) or\ # (epoch + 1 == self.args.num_epoch): # torch.save(self.model.state_dict(), # os.path.join(self.args.work_dir, 'epoch{}_model.pth.tar'.format(epoch + 1))) # # filename = 'epoch{}_model.pt'.format(epoch + 1) # # self.io.save_model(self.model, filename) # # # evaluation # if ((epoch + 1) % self.args.eval_interval == 0) or ( # epoch + 1 == self.args.num_epoch): # self.io.print_log('Eval epoch: {}'.format(epoch)) # self.per_test() # self.io.print_log('Done.') def test(self): # the path of weights must be appointed if self.args.weights is None: raise ValueError('Please appoint --weights.') self.io.print_log('Model: {}.'.format(self.args.model)) self.io.print_log('Weights: {}.'.format(self.args.weights)) # evaluation self.io.print_log('Evaluation Start:') self.per_test() self.io.print_log('Done.\n') # save the output of model if self.args.save_result: result_dict = dict( zip(self.data_loader['test'].dataset.sample_name, self.result)) self.io.save_pkl(result_dict, 'test_result.pkl') # def generate(self, base_path, data_max, data_min, max_z=1.5, total_samples=10, fill=5): def generate(self, data_max=1., data_min=0., max_z=1.5, total_samples=10, fill=5, epoch=''): # load model filename = os.path.join(self.args.work_dir, 'epoch{}_model.pth.tar'.format(self.best_epoch)) self.model.load_state_dict(torch.load(filename)) emotions = ['Angry', 'Neutral', 'Happy', 'Sad'] ffile = 'features'+self.ftype+'CVAEGCN' ffile += '_'+epoch+'.h5' if epoch else '.h5' lfile = 'labels'+self.ftype+'CVAEGCN' lfile += '_'+epoch+'.h5' if epoch else '.h5' h5Featr = h5py.File(os.path.join(self.args.data_dir, ffile), 'w') h5Label = h5py.File(os.path.join(self.args.data_dir, lfile), 'w') for count in range(total_samples): gen_seqs = np.empty((self.num_classes, self.T, self.C*self.V)) for cls in range(self.num_classes): lenc = np.zeros((1, self.num_classes), dtype='float32') lenc[0, cls] = 1. # z = np.zeros((1, self.n_z), dtype='float32') # z[0, 0] = np.random.random_sample() * max_z * 2 - max_z # z[0, 1] = np.random.random_sample() * max_z * 2 - max_z z = np.float32(np.random.randn(1, self.n_z))*max_z*2 - max_z with torch.no_grad(): z = to_var(torch.from_numpy(z)) lenc = to_var(torch.from_numpy(lenc)) gen_seq_curr = self.model.decoder(z, lenc, self.T, self.V) gen_seq_curr = gen_seq_curr.permute(0, 2, 3, 1, 4).contiguous() gen_seq_curr = gen_seq_curr.view(gen_seq_curr.size()[0], gen_seq_curr.size()[1], gen_seq_curr.size()[2]*gen_seq_curr.size()[3]) gen_seqs[cls, :, :] = gen_seq_curr.cpu().numpy() # gen_seqs[cls, :, :] -= np.tile(gen_seqs[cls, :, 0:self.C], (1, self.V)) for idx in range(gen_seqs.shape[0]): h5Featr.create_dataset(str(count + 1).zfill(fill) + '_' + emotions[idx], # data=loader.descale(gen_seqs[idx, :, :], data_max, data_min)) data=gen_seqs[idx, :, :]) h5Label.create_dataset(str(count + 1).zfill(fill) + '_' + emotions[idx], data=idx) print('\rGenerating data: {:d} of {:d} ({:.2f}%).' .format(count+1, total_samples, 100*(count+1)/total_samples), end='') h5Featr.close() h5Label.close() print()
{"hexsha": "0ef12d2f49f641d00ce3037fb81fa9c48d1b369c", "size": 14012, "ext": "py", "lang": "Python", "max_stars_repo_path": "generator_cvae/utils/processor_stgcn.py", "max_stars_repo_name": "1suancaiyu/STEP", "max_stars_repo_head_hexsha": "54195112990feaee137f5137775c736d07c2d26f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "generator_cvae/utils/processor_stgcn.py", "max_issues_repo_name": "1suancaiyu/STEP", "max_issues_repo_head_hexsha": "54195112990feaee137f5137775c736d07c2d26f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "generator_cvae/utils/processor_stgcn.py", "max_forks_repo_name": "1suancaiyu/STEP", "max_forks_repo_head_hexsha": "54195112990feaee137f5137775c736d07c2d26f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.2117647059, "max_line_length": 117, "alphanum_fraction": 0.5320439623, "include": true, "reason": "import numpy", "num_tokens": 3646}
include("INCLUDEME.jl") using Yao, Yao.Blocks, JLD2 # using Yao, Circuit, UnicodePlots, GradOptim, Utils, ArgParse, JLD2, FileIO import Kernels # n = 6 # qcbm = QCBM{n, 10}(get_nn_pairs(n)) @load "data.jld" output layer(::Val{:first}) = rollrepeat(chain(Rx(), Rz())) layer(::Val{:last}) = rollrepeat(chain(Rz(), Rx())) layer(::Val{:mid}) = rollrepeat(chain(Rz(), Rx(), Rz())) c = kron(6, i=>chain(Rx(), Rz()) for i = 1:6) # c = layer(Val(:first))(6) dispatch!(c, [i for i=1:12]) c out = statevec(apply!(register(bit"000000"), c)) # dispatch!(qcbm, params) # out = statevec(qcbm()) @show out ≈ output out
{"hexsha": "bc67aa5f11b92aaf0ecdcf38fac248806e1fd317", "size": 612, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "check.jl", "max_stars_repo_name": "Roger-luo/QCBM", "max_stars_repo_head_hexsha": "db44eca3add49f26e2ff70addfe75e97a6684186", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "check.jl", "max_issues_repo_name": "Roger-luo/QCBM", "max_issues_repo_head_hexsha": "db44eca3add49f26e2ff70addfe75e97a6684186", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "check.jl", "max_forks_repo_name": "Roger-luo/QCBM", "max_forks_repo_head_hexsha": "db44eca3add49f26e2ff70addfe75e97a6684186", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.8571428571, "max_line_length": 76, "alphanum_fraction": 0.6323529412, "num_tokens": 219}
[STATEMENT] lemma invar_insert: "invar t \<Longrightarrow> invar (insert xs t)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. invar t \<Longrightarrow> invar (Trie_Map.insert xs t) [PROOF STEP] apply(induction xs t rule: insert.induct) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>b m. invar (trie_map.Nd b m) \<Longrightarrow> invar (Trie_Map.insert [] (trie_map.Nd b m)) 2. \<And>x xs b m. \<lbrakk>invar (case lookup m x of None \<Rightarrow> Trie_Map.empty | Some t \<Rightarrow> t) \<Longrightarrow> invar (Trie_Map.insert xs (case lookup m x of None \<Rightarrow> Trie_Map.empty | Some t \<Rightarrow> t)); invar (trie_map.Nd b m)\<rbrakk> \<Longrightarrow> invar (Trie_Map.insert (x # xs) (trie_map.Nd b m)) [PROOF STEP] apply(auto simp: M.map_specs RBT_Set.empty_def[symmetric] split: option.split) [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done
{"llama_tokens": 354, "file": null, "length": 3}
[STATEMENT] lemma enn2real_leD: "\<lbrakk> enn2real x < y; x \<noteq> \<top> \<rbrakk> \<Longrightarrow> x < ennreal y" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>enn2real x < y; x \<noteq> \<top>\<rbrakk> \<Longrightarrow> x < ennreal y [PROOF STEP] by(cases x)(simp_all add: ennreal_lessI)
{"llama_tokens": 133, "file": "CryptHOL_GPV_Expectation", "length": 1}
// Boost.Geometry (aka GGL, Generic Geometry Library) // Unit Test // Copyright (c) 2010 Alfredo Correa // Copyright (c) 2010-2012 Barend Gehrels, Amsterdam, the Netherlands. // Use, modification and distribution is subject to the Boost Software License, // Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <boost/config.hpp> #include <geometry_test_common.hpp> #include<boost/geometry/geometry.hpp> #include<boost/geometry/geometries/adapted/boost_array.hpp> #include<boost/geometry/geometries/adapted/c_array.hpp> #include<boost/geometry/geometries/adapted/boost_tuple.hpp> #include<iostream> BOOST_GEOMETRY_REGISTER_C_ARRAY_CS(cs::cartesian) BOOST_GEOMETRY_REGISTER_BOOST_ARRAY_CS(cs::cartesian) BOOST_GEOMETRY_REGISTER_BOOST_TUPLE_CS(cs::cartesian) #ifndef BOOST_NO_CXX11_HDR_ARRAY #include<boost/geometry/geometries/adapted/std_array.hpp> BOOST_GEOMETRY_REGISTER_STD_ARRAY_CS(cs::cartesian) #endif //BOOST_NO_CXX11_HDR_ARRAY int test_main(int, char* []) { bg::model::point<double, 3, bg::cs::cartesian> p1(1,2,3); double p2[3] = {4,5,6}; boost::tuple<double, double, double> p3(7,8,9); boost::array<double, 3> p4 = {{10,11,12}}; std::clog << bg::distance(p1, p2) << std::endl; std::clog << bg::distance(p2, p3) << std::endl; std::clog << bg::distance(p3, p4) << std::endl; #ifndef BOOST_NO_CXX11_HDR_ARRAY #ifndef BOOST_NO_CXX11_HDR_INITIALIZER_LIST std::array<double, 3> p5 = {13,14,15}; #else std::array<double, 3> p5; p5[0] = 13; p5[1] = 14; p5[2] = 15; #endif // BOOST_NO_CXX11_HDR_INITIALIZER_LIST std::clog << bg::distance(p4, p5) << std::endl; #endif //BOOST_NO_CXX11_HDR_ARRAY return 0; }
{"hexsha": "86e4ee5ef3e7ea6cbef00c8f6bfbf2d0ce471cb6", "size": 1719, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "deps/src/boost_1_65_1/libs/geometry/test/geometries/boost_array_as_point.cpp", "max_stars_repo_name": "shreyasvj25/turicreate", "max_stars_repo_head_hexsha": "32e84ca16aef8d04aff3d49ae9984bd49326bffd", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 11356.0, "max_stars_repo_stars_event_min_datetime": "2017-12-08T19:42:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T16:55:25.000Z", "max_issues_repo_path": "deps/src/boost_1_65_1/libs/geometry/test/geometries/boost_array_as_point.cpp", "max_issues_repo_name": "shreyasvj25/turicreate", "max_issues_repo_head_hexsha": "32e84ca16aef8d04aff3d49ae9984bd49326bffd", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2402.0, "max_issues_repo_issues_event_min_datetime": "2017-12-08T22:31:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T19:25:52.000Z", "max_forks_repo_path": "deps/src/boost_1_65_1/libs/geometry/test/geometries/boost_array_as_point.cpp", "max_forks_repo_name": "shreyasvj25/turicreate", "max_forks_repo_head_hexsha": "32e84ca16aef8d04aff3d49ae9984bd49326bffd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1343.0, "max_forks_repo_forks_event_min_datetime": "2017-12-08T19:47:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T11:31:36.000Z", "avg_line_length": 34.38, "max_line_length": 79, "alphanum_fraction": 0.7335660268, "num_tokens": 525}
__description__ = \ """ Plot barplot with epistatic coefficients. """ __author__ = "Zach Sailer" import gpmap import matplotlib.pyplot as plt from matplotlib.path import Path import matplotlib.patches as patches import matplotlib as mpl import numpy as np from scipy.stats import norm as scipy_norm class Bunch: """ Classic bunch object for constructing empty objects. Used to make readable options.color etc. """ def __init__(self, **kwds): self.__dict__.update(kwds) def update(self, **kwargs): """ Turn a dictionary into an object with """ types = dict([(key, type(val)) for key, val in self.__dict__.items()]) for key, value in kwargs.items(): typed = types[key] if typed == np.ufunc: typed_val = value elif self.__dict__[key] is None: typed_val = value else: typed_val = types[key](value) setattr(self, key, typed_val) def plot_coefs(model,**kwargs): """Create a barplot with the values from model, drawing the x-axis as a grid of boxes indicating the coordinate of the epistatic parameter. Should automatically generate an almost publication-quality figure. Parameters ---------- model: BaseModel object epistasis model. Keyword arguments ----------------- order_colors : list/tuple of colors for each order (rgb,html string-like) significance : how to treat signifiance. should be 1. "bon" -> Bonferroni corrected p-values (default) 2. "p" -> raw p-values 3. None -> ignore significance significance_cutoff : value above which to consider a term significant sigmas : number of sigmas to show for each error bar y_scalar : how much to scale the y-axis above and beyond y-max y_axis_name : what to put on the y-axis of the barplot figsize : tuple of figure width,height height_ratio : how much to scale barplot relative to xbox star_cutoffs : signifiance cutoffs for star stack. should go from highest p to lowest p (least to most significant) star_spacer : constant that scales how closely stacked stars are from one another ybounds : tuple (default=None) bar_borders : bool (default=True) xgrid : bool (default=True) ecolor : color (default='black') elinewidth : float (default=1) capthick : float (default=1) capsize : float (default=1) gridlines : float (default=1) x grid linewidth Returns ------- fig : matplotlib.pyplot.Figure Figure object ax : matplotlib.pyplot.Axes Axes object """ # Some sanity checks. sites = model.epistasis.sites[1:] values = model.epistasis.values[1:] # Set up plotting user options. Type check the options to make sure nothing # will break. Also helps with widgets. sites = list(sites) # Prepare an cycle of colors order = len(sites[-1:]) prop_cycle = plt.rcParams['axes.prop_cycle'] color_cycle = prop_cycle.by_key()['color'] color_scalar = int(order / len(color_cycle)) + 1 color_cycle *= color_scalar defaults = { "order_colors": color_cycle, "logbase": np.log10, "log_transform": False, "significance": "bon", "significance_cutoff": 0.05, "sigmas": 0, "log_space": False, "y_scalar": 1.5, "y_axis_name": "", "figwidth": 5, "figheight": 3, "figsize": (5, 3), "height_ratio": 12, "star_cutoffs": (0.05, 0.01, 0.001), "star_spacer": 0.0075, "ybounds": None, "bar_borders": True, "xgrid": True, "ecolor": "black", "capthick": 1, "capsize": 1, "elinewidth": 1, "save": False, "fname": "figure.svg", "format": "svg", "gridlines": 1, } # types = dict([(key, type(val)) for key, val in defaults.items()]) # defaults.update(kwargs) # options = objectify(defaults) options = Bunch(**defaults) options.update(**kwargs) # Construct keyword arguments error_kw = { "ecolor": options.ecolor, "capsize": options.capsize, "elinewidth": options.elinewidth, "capthick": options.capthick, } if "figsize" in kwargs: options.figsize = kwargs["figsize"] else: options.figsize = (options.figwidth, options.figheight) # Name all variables that matter for this function if sites[0] == [0]: sites = sites[1:] values = values[1:] options.sigmas = 0 # Sanity check on the errors if options.sigmas == 0: significance = None elif options.significance is None: sigmas = 0 # Figure out the length of the x-axis and the highest epistasis observed num_terms = len(sites) highest_order = max([len(l) for l in sites]) # Figure out how many sites are in the dataset (in case of non-binary # system) all_sites = [] for l in sites: all_sites.extend(l) all_sites = list(dict([(s, []) for s in all_sites]).keys()) all_sites.sort() num_sites = len(all_sites) # Figure out how to color each order if options.order_colors is None: options.order_colors = ["gray" for i in range(highest_order + 1)] else: if len(options.order_colors) < highest_order: raise ValueError("order_colors has too few entries " "(at least {:d} needed)\n".format(highest_order)) # Stick gray in the 0 position for insignificant values options.order_colors = list(options.order_colors) options.order_colors.insert(0, "gray") # ---------------------- # # Deal with significance # # ---------------------- # # NEED TO RETURN TO SIGNIFICANCE FUNCTIONS if options.sigmas == 0: options.significance = None else: # If log transformed, need to get raw values for normal distribution if options.log_transform: z_score = abs((values - 1) / upper) # else, just grab standard values else: z_score = abs((values) / upper) # if z_score is > 5, set z_score to largest possible range # where p-value is within floating point z_score[z_score > 8.2] = 8.2 # straight p-values if options.significance == "p": p_values = 2 * (1 - scipy_norm.cdf(z_score)) # bonferroni corrected p-values elif options.significance == "bon": p_values = 2 * (1 - scipy_norm.cdf(z_score)) * len(values) # ignore p-values and color everything elif options.significance is None: p_values = [0 for i in range(len(sites))] options.significance_cutoff = 1.0 # or die else: raise ValueError("signifiance argument {:s} not " "recognized\n".format(options.significance)) # Create color array based on significance color_array = np.zeros((len(sites)), dtype=int) for i, l in enumerate(sites): if p_values[i] < options.significance_cutoff: color_array[i] = len(l) - 1 else: color_array[i] = -1 # ---------------- # # Create the plots # # ---------------- # # Make a color map cmap = mpl.colors.ListedColormap(colors=options.order_colors) # set the 'bad' values (nan) to be white and transparent cmap.set_bad(color='w', alpha=0) bounds = range(-1, len(options.order_colors)) norm = mpl.colors.BoundaryNorm(bounds, cmap.N) if options.xgrid is True: fig = plt.figure(figsize=options.figsize) n_coefs = len(sites) n_sites = max([max(l) for l in sites]) # Calculate the height_ratio of the grid and the bar graph box_size = options.figsize[0] / float(n_coefs) grid_height = box_size * n_sites bar_height = options.figsize[1] - grid_height height_ratio = bar_height / grid_height # Create a plot with an upper and lower panel, sharing the x-axis gs = mpl.gridspec.GridSpec(2, 1, height_ratios=[height_ratio, 1], hspace=0.00) ax = [plt.subplot(gs[0])] ax.append(plt.subplot(gs[1], sharex=ax[0])) bar_axis = ax[0] grid_axis = ax[1] # Create the box-array x-axis # path codes for drawing the boxes box_codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY] color_vector = options.order_colors for i in range(n_coefs): for j in range(n_sites): color = "None" if j + 1 in sites[i]: color = color_vector[len(sites[i])] # vertices for a given square verts = [ (i, n_coefs - j), (i, n_coefs - j - 1), (i + 1, n_coefs - j - 1), (i + 1, n_coefs - j), (i, n_coefs - j), ] # Create a patch for a square path = Path(verts, box_codes) patch = patches.PathPatch(path, facecolor=color, lw=options.gridlines) grid_axis.add_patch(patch) grid_axis.axis('equal') grid_axis.axis('off') else: fig, ax = plt.subplots(figsize=options.figsize) bar_axis = ax # ------------------ # # Create the barplot # # ------------------ # # set up bar colors # prop_cycle = plt.rcParams['axes.prop_cycle'] # colors_for_bar = prop_cycle.by_key()['color'] colors_for_bar = np.array([mpl.colors.colorConverter.to_rgba( options.order_colors[(i + 1)]) for i in color_array]) # Plot without errors if options.sigmas == 0: if options.log_space: bar_y = options.logbase(values) else: bar_y = values bar_axis.bar(np.arange(len(bar_y)) + .55, bar_y, width=.9, color=colors_for_bar, edgecolor="none") # plot with errors else: bar_y = values upper = options.sigmas * upper lower = options.sigmas * lower # Plot the graph on a log scale if options.log_space: new_bar_y = options.logbase(bar_y) new_upper = gpmap.errors.upper_transform(bar_y, upper, options.logbase) new_lower = gpmap.errors.lower_transform(bar_y, lower, options.logbase) # else if the space is log transformed, # plot the non-log interaction values else: new_upper = upper new_lower = lower new_bar_y = bar_y yerr = [new_lower, new_upper] # Plot bar_axis.bar(np.arange(len(bar_y)) + 0.05, new_bar_y, width=0.9, yerr=yerr, color=colors_for_bar, error_kw=error_kw, edgecolor="none", linewidth=2) # Add horizontal lines for each order bar_axis.hlines(0, 0, len(values), linewidth=1, linestyle="-", zorder=0) # Label barplot y-axis bar_axis.set_ylabel(options.y_axis_name, fontsize=14) # Set barplot y-scale if options.ybounds is None: ymin = -options.y_scalar * max(abs(bar_y)) ymax = options.y_scalar * max(abs(bar_y)) else: ymin = options.ybounds[0] ymax = options.ybounds[1] # Make axes pretty pretty bar_axis.axis([-1, len(bar_y) + 1, ymin, ymax]) bar_axis.set_frame_on(False) # axis("off") bar_axis.get_xaxis().set_visible(False) bar_axis.get_yaxis().tick_left() bar_axis.get_yaxis().set_tick_params(direction='out') bar_axis.add_artist(mpl.lines.Line2D((-1, -1), (bar_axis.get_yticks() [1], bar_axis.get_yticks()[-2]), color='black', linewidth=1)) # add vertical lines between order breaks previous_order = 1 for i in range(len(sites)): if len(sites[i]) != previous_order: bar_axis.add_artist(mpl.lines.Line2D((i, i), (ymin, ymax), color="black", linestyle=":", linewidth=1)) previous_order = len(sites[i]) # ------------------------- # # Create significance stars # # ------------------------- # if options.sigmas != 0: min_offset = options.star_spacer * (ymax - ymin) for i in range(len(p_values)): star_counter = 0 for j in range(len(options.star_cutoffs)): if p_values[i] < options.star_cutoffs[j]: star_counter += 1 else: break for j in range(star_counter): bar_axis.text(x=(i + 0), y=ymin + (j * min_offset), s="*", fontsize=16) # remove x tick labels try: plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False) except IndexError: pass # Draw the final figure # fig.tight_layout() if options.save: fig.savefig(options.fname, format=options.format) return fig, ax
{"hexsha": "1c3e011942f345472ff9a4ff308d85d8c4906208", "size": 13815, "ext": "py", "lang": "Python", "max_stars_repo_path": "epistasis/pyplot/coefs.py", "max_stars_repo_name": "harmsm/epistasis", "max_stars_repo_head_hexsha": "741b25b3e28015aeeba8d4efc94af1e1d811cd63", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "epistasis/pyplot/coefs.py", "max_issues_repo_name": "harmsm/epistasis", "max_issues_repo_head_hexsha": "741b25b3e28015aeeba8d4efc94af1e1d811cd63", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "epistasis/pyplot/coefs.py", "max_forks_repo_name": "harmsm/epistasis", "max_forks_repo_head_hexsha": "741b25b3e28015aeeba8d4efc94af1e1d811cd63", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-04-02T00:58:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-16T13:30:30.000Z", "avg_line_length": 31.5410958904, "max_line_length": 79, "alphanum_fraction": 0.5520810713, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3232}
"""Power Spectrum rebinning in log space, derriving gaussian-like errors from the standard distribution. All is scaled to log10, which makes the calculation of the parameters correct. Uses the uncertainty fitting in fit.py.""" from numpy import log10,zeros,sqrt,abs,arange,concatenate,array,alltrue from pylab import find from fit import fit def rebin(f,p,binsize=0.1,minpoints=10,sampling=4): """Rebin the power spectrum (f,p) in log space, producing uncertainties that look Gaussian in log space. This can then be fit or plotted. binsize is in dex sampling is the number of points per independant frequency minpoints is also in terms of independant points (nfreqs*sampling). WARNING: you get unexpected results if some of the powers=0.""" assert len(f)==len(p) logf = log10(f) logp = log10(p) nbin = int(( logf.max() - logf.min() )/binsize) logbinp = zeros(nbin)*0.1 logbine = zeros(nbin)*0.1 logbinf = arange(logf.min()+binsize/2,logf.max()-0.99*binsize/2,binsize) for i in range(nbin): ind = (logf>logbinf[i]-binsize/2)*(logf<logbinf[i]+binsize/2) if ind.sum() < minpoints*sampling: logbinp[i]=-1e50 continue logbinf[i] = logf[ind].mean() logbinp[i] = logp[ind].mean() if logp[ind].mean()<-0.9e50: print susan logbine[i] = sqrt(sampling*0.31/len(logp[ind])) if alltrue(logbinp>-1e50): #return if no underdone bins return logbinf, logbinp, logbine maxbadlogf = find(logbinp<-0.9e50)[-1] #this is an index badf = f[f<=10**logbinf[maxbadlogf]] #set to be redone indices = arange(len(badf),0,int(-minpoints*sampling)) mlogbinf = [] ; mlogbinp = [] ; mlogbine = [] for i in arange(len(indices)-1)+1: ind = arange(indices[-i],indices[-i-1],1) mlogbinf.append(logf[ind].mean()) mlogbinp.append(logp[ind].mean()) mlogbine.append(sqrt(0.31/minpoints)) logbinf = array(mlogbinf+logbinf[maxbadlogf+1:].tolist()) logbine = array(mlogbine+logbine[maxbadlogf+1:].tolist()) logbinp = array(mlogbinp+logbinp[maxbadlogf+1:].tolist()) return logbinf, logbinp, logbine def plotting_rebin(f,p,minfreq=0,binsize=0.1,minpoints=10,sampling=4): """Perform the same rebin as above, but return the upper and lower bounds on the binned power values for plotting with errorbars""" logbinf, logbinp, logbine = rebin(f[f>minfreq],p[f>minfreq],binsize,minpoints,sampling) f = 10**logbinf p = 10**logbinp plow = p - 10**(logbinp-logbine) phigh = 10**(logbinp+logbine) - p result = array((plow,phigh)) return f,p,result def fit_withrebin(f,p,fitfuncin=None,p0=None,minfreq=0,binsize=0.1,minpoints=10,sampling=4, **args): """Without modification, this fits log-binned to a powerlaw plus constant. Change fitfunc and supply correct p0 to fit for arbitary function. Binning parameters are as in "rebin"; returns parameters and uncertainties for successful fit, full message otherwise. Minpoints and sampling really shouldn't matter here. Extra args are passed through to leastsq (eg, maxfev=). """ if fitfuncin: #need function in rescaled logarithmicaly fitfunc = lambda p,x: log10(fitfuncin(p,10**x)) else: fitfunc = lambda p,x : log10(abs(p[0]) * (10**x)**p[1] + abs(p[2])) p[p==0] = min(p[p>0]) ind = f>minfreq logbinf, logbinp, logbine = rebin(f[ind],p[ind],binsize,minpoints,sampling) if p0==None: p0 = [1000,-1,1e-5] return fit(logbinf,logbinp,logbine,p0,fitfunc,**args)
{"hexsha": "d71877d3655069253e2ee144a0eb1c865c327d80", "size": 3547, "ext": "py", "lang": "Python", "max_stars_repo_path": "powspec_rebin.py", "max_stars_repo_name": "martindurant/misc", "max_stars_repo_head_hexsha": "96cebe7aac9c417b5afe065099df6b8724578719", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-11-03T09:47:30.000Z", "max_stars_repo_stars_event_max_datetime": "2017-12-31T23:00:57.000Z", "max_issues_repo_path": "powspec_rebin.py", "max_issues_repo_name": "martindurant/misc", "max_issues_repo_head_hexsha": "96cebe7aac9c417b5afe065099df6b8724578719", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2018-03-20T12:55:49.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-06T20:22:02.000Z", "max_forks_repo_path": "powspec_rebin.py", "max_forks_repo_name": "martindurant/misc", "max_forks_repo_head_hexsha": "96cebe7aac9c417b5afe065099df6b8724578719", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-03-20T05:59:10.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-21T04:18:39.000Z", "avg_line_length": 44.8987341772, "max_line_length": 91, "alphanum_fraction": 0.67775585, "include": true, "reason": "from numpy", "num_tokens": 1120}
# numpy官方教程: https://docs.scipy.org/doc/numpy-dev/user/quickstart.html # numpy官方教程中文翻译: NumPy的详细教程 #1. 创建数组和数组变形 import numpy as np # # 创建数组 a = np.array([1,2,3,4,5,6]) print(a) # 直接给a.shape赋值是最简单的变形方式 a.shape = (2,3) print('变形之后:') print(a) # [1 2 3 4 5 6] # [[1 2 3] # [4 5 6]] a.ravel() # 拉直数组 #array([1, 2, 3, 4, 5, 6]) #2.数组拼接 A = np.floor(np.random.randn(2,3) * 10) print('A:\n', A) B = np.floor(np.random.randn(2,3) * 10) print('B:\n', B) # A: # [[ -2. 3. -10.] # [ 5. 4. 7.]] # B: # [[-14. -7. 3.] # [ 10. 6. -8.]] # 按第一个轴拼接 print('按行拼接:') print(np.vstack([A,B])) # 按第二个轴拼接 print('按列拼接:') print(np.hstack([A,B])) # 按行拼接: # [[ -2. 3. -10.] # [ 5. 4. 7.] # [-14. -7. 3.] # [ 10. 6. -8.]] # 按列拼接: # [[ -2. 3. -10. -14. -7. 3.] # [ 5. 4. 7. 10. 6. -8.]] #3. 基本操作和基本运算 np.exp(2) #7.3890560989306504 np.exp2(2) #4.0 np.sqrt(4) #2.0 np.sin([2,3]) #array([ 0.90929743, 0.14112001]) np.log(2) #0.69314718055994529 np.log10(2) #0.3010299956639812 np.log2(2) # 1.0 np.max([1,2,3,4]) #4 #4.二维数组完成矩阵操作 A = np.array([[1, 2], [-1, 4]]) B = np.array([[2, 0], [3, 4]]) print('对应元素乘:') print(A * B) print('矩阵乘法:') print(np.dot(A, B)) # 或者 A.dot(B) # 对应元素想乘: # [[ 2 0] # [-3 16]] # 矩阵乘法 # [[ 8 8] # [10 16]] # 线性代数 from numpy import linalg # 求A的转置 print('A的转置:') print(A.transpose()) # 求A的逆矩阵 print('A的逆矩阵:') print(linalg.inv(A)) # 特征值和特征向量 eigenvalues, eigenvectors = linalg.eig(A) print('A 的特征值:') print(eigenvalues) # 特征值 print('A 的特征向量:') print(eigenvectors) # 特征向量 # A的转置: # [[ 1 -1] # [ 2 4]] # A的逆矩阵: # [[ 0.66666667 -0.33333333] # [ 0.16666667 0.16666667]] # A 的特征值: # [ 2. 3.] # A 的特征向量: # [[-0.89442719 -0.70710678] # [-0.4472136 -0.70710678]]
{"hexsha": "3a1cae33280c9eddcba11977a2b383a9c0c93b79", "size": 1732, "ext": "py", "lang": "Python", "max_stars_repo_path": "aura/AI Engineer/course2_20191117/a3_feature_engineering/5_feature_all/numpy_array_math_op.py", "max_stars_repo_name": "linksdl/futuretec-project-coursera_cerficates", "max_stars_repo_head_hexsha": "278a533501b702abd90ac3124739d3d85935e1f8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "aura/AI Engineer/course2_20191117/a3_feature_engineering/5_feature_all/numpy_array_math_op.py", "max_issues_repo_name": "linksdl/futuretec-project-coursera_cerficates", "max_issues_repo_head_hexsha": "278a533501b702abd90ac3124739d3d85935e1f8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aura/AI Engineer/course2_20191117/a3_feature_engineering/5_feature_all/numpy_array_math_op.py", "max_forks_repo_name": "linksdl/futuretec-project-coursera_cerficates", "max_forks_repo_head_hexsha": "278a533501b702abd90ac3124739d3d85935e1f8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.6377952756, "max_line_length": 71, "alphanum_fraction": 0.5340646651, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1028}
%!TEX root = main.tex \section{Results} \label{sec:results} \begin{table*}[tp] \centering \begin{minipage}[b]{0.35\textwidth} \centering \caption{Resource utilization of design and submodules.} \label{tab:results_resource_utilization} \begin{tabular}{l r r r r} \toprule & LUT & FF & Slice & BRAM \\ \midrule Overall & 64.8\% & 13.06\% & 93.29\% & 95.71\% \\ \midrule quad-core & 2,777 & 720 & 801 & 13 \\ single core & 617 & 132 & 197 & 3 \\ Blowfish core & 354 & 64 & 71 & 0 \\ Password Generator & 216 & 205 & 81 & 0 \\ \bottomrule\\ \end{tabular} \end{minipage}% \hspace{1.5cm} \begin{minipage}[b]{0.5\textwidth} \centering \caption{Comparison of multiple implementations and platforms, considering full system power consumption.} \label{tab:results_different_plattforms} \begin{tabular}{l c c c c c c} \toprule & \multicolumn{2}{c}{cost parameter 5} & \multicolumn{2}{c}{cost parameter 12} & & \\ & $\frac{\text{Hashes}}{\text{Second}}$ & $\frac{\text{Hashes}}{\text{Watt Second}}$ & $\frac{\text{Hashes}}{\text{Second}}$ & $\frac{\text{Hashes}}{\text{Watt Second}}$ & Power & Price \\ \midrule zedboard & 6,511 & 1,550 & 51.95 & 12.37 & \SI{4.2}{\watt} & \$319 \\ Virtex-7 & 51,437 & 2,572 & 410.4 & 20.52 & \SI{ 20}{\watt} & \$3,495 \\ \midrule Xeon E3-1240 & 6,210 & 20.7 & 50 & 0.17 & \SI{300}{\watt} & \$262 \\ GTX 750 Ti & 1,920 & 6.4 & 15 & 0.05 & \SI{300}{\watt} & \$120 \\ \midrule \cite{WOOT/Malvoni14}~Epiphany 16 & 1,207 & 132.64 & 9.64 & 1.06 & \SI{9.1}{\watt} & \$149 \\ \cite{WOOT/Malvoni14}~zedboard & 4,571 & 682.24 & 64.83 & 9.68 & \SI{6.7}{\watt} & \$319 \\ \bottomrule\\ \end{tabular} \end{minipage}% \end{table*} In this section we will present the results of our implementation. We used Xilinx ISE 14.7 and -- if needed Xilinx Vivado 2014.1 -- during the design flow and verified the design both in simulation and on the zedboard after Place and Route. Table~\ref{tab:results_resource_utilization} provides the post place-and-route results of the full design on the zedboard. We implemented the design using ten parallel bcrypt quad-cores and a Xillybus interface. The design achieves a clock frequency of 100 MHz. The optimizations from Section~\ref{sec:implementation} reduced the LUT consumption to roughly 600 LUTs, the amount of BRAMs to 3.25 per single core. We therefore can fit ten quad-cores -- and thus 40 single cores -- on a zedboard, including the on-chip password generation. The bcrypt cores need constant cycles $c$ for hash generation, in detail: \begin{equation*} \begin{aligned}[c] c_\text{Reset} &= 1\\ c_\text{Delay} &= 19\\ c_\text{bf} &= 18\\ c_\text{key xor} &= 19\\ \end{aligned} %\quad %\begin{aligned}[c] %\end{aligned} \quad \begin{aligned}[c] c_\text{Init} &= 256\\ c_\text{Pipeline} &= n,\, (n = 2)\\ c_\text{updateP} &= 9 \cdot (c_\text{bf})\\ c_\text{updateSBox} &= 512 \cdot (c_\text{bf})\\ \end{aligned} \end{equation*} \begin{align*} c_\text{ExpandKey} &= c_\text{key xor} + c_\text{upP} + c_\text{upSBox} = 9,397\\ c_\text{EncryptECB} &= 3 \cdot 64 \cdot (c_\text{bf} - 1) = 3,264 \end{align*} Following these values, one bcrypt hashing needs \begin{align*} c_\text{bcrypt} &= c_\text{Reset} + c_\text{Pipeline} + c_\text{Init} + c_\text{Delay} +\\ &\quad (1 + 2^{\text{cost}+1} \cdot c_\text{ExpandKey}) + c_\text{EncryptECB}\\ &= 12,939 + 2^{\text{cost}+1} \cdot 9,397 \end{align*} cycles to finish. This leads to a total of 614,347 cycles per password (cost 5) and 76,993,163 (cost 12), respectively. In order to compare the design with other architectures, especially with the previous results on the zedboard, we measured the power consumption of the board during a running attack and used (ocl)Hashcat to benchmark a Xeon E3-1240 CPU (4 cores@3.1 GHz) and a GTX 750 Ti (Maxwell architecture) as representatives for the classes of CPUs and GPUs. Furthermore, we synthesized our quad-core architecture on the Virtex 7 XC7VX485T FPGA, which is available on the VC707 development board, and estimated the number of available cores with respect to the area a new interface may occupy. We assume a worst-case upper bound of 20W as the power consumption for the full evaluation board. For the CPU and the GPU attack, we also consider the complete system. While there are smaller power supplies available, we consider a 300W power supply, which is the recommended minimum for the GPU to run stable. \begin{figure}[tp] \centering \begin{tikzpicture}[color/.style={draw=#1!80!black,fill=#1!20}] \pgfplotsset{ height=4cm, width=.4\textwidth, scale only axis, legend style={draw=none,legend cell align=left}} \begin{axis}[ axis x line*=bottom, x tick label style={rotate=15,anchor=south,yshift=-5mm,font=\scriptsize}, y tick label style={font=\scriptsize}, xtick={1, 2, 3, 4, 5, 6}, xticklabels={ Xeon E3-1240$^*$, GTX 750 Ti$^*$, zedboard, Virtex-7, \cite{WOOT/Malvoni14}~Epiphany 16, \cite{WOOT/Malvoni14}~zedboard}, xmin=0.5, xmax=6.5, ymin=0, ymax=200000, ymode=log, ymajorgrids=true, ybar=3pt, bar width=10pt, ] \addplot+[color=red!75!black] plot coordinates{ (1, 6210) (2, 1920) (3, 6511) (4, 51437) (5, 1207) (6, 4571)}; \addplot+[color=green!75!black] plot coordinates{ (1, 20.7) (2, 6.4) (3, 1550) (4, 2572) (5, 132.64) (6, 682.24)}; \legend{$\frac{\text{H}}{\text{s}}$,$\frac{\text{H}}{\text{Ws}}$} \end{axis} \end{tikzpicture} \caption{Comparison of different implementations for cost parameter 5. Left bars (red) show the hashes-per-seconds rate, right bars (green) the hashes-per-watt-seconds rate. Results with $^*$ were measured with (ocl)Hashcat. The axial scale is logarithmic.} \label{fig:implementation_comparison} \end{figure} \begin{figure*}[tp] \centering \hspace{-.5cm} \begin{minipage}[b]{0.45\textwidth} \centering \begin{tikzpicture} \pgfplotsset{ height=3.55cm, width=.8\textwidth, scale only axis, legend columns=1, legend style={at={(1,0.5)}, xshift=0.25cm, yshift=1.5cm, anchor=north west, nodes=right, font=\scriptsize, }, every axis plot post/.append style={ mark=none, domain=0:50, samples=100, thick}, } \begin{axis}[ scatter/classes={a={mark=|,black}}, axis y line=left, axis x line=bottom, x tick label style={font=\scriptsize}, y tick label style={font=\scriptsize}, xlabel style={font=\footnotesize}, xlabel=Number of attacked passwords, ylabel style={font=\footnotesize}, ylabel=Total costs in \$1\,000\,000, %xtick={25, 50, 75, 100, 125, 150, 175}, xmin=1, xmax=50, ytick={5000000, 7000000, 10000000, 15000000, 20000000}, yticklabels={$5$, $7$, $10$, $15$, $20$}, ymin=4000000, ymax=25000000, ymode=log, cycle list name=linestyles* ] \addplot+[color=black,only marks,scatter,scatter src=explicit symbolic] coordinates{ (2.75, 10525390.42) [a] (1.91, 5895676.33) [a] (20.33, 11335789.16) [a] (1.43, 5752687.56) [a] (24.86, 11544263.66) [a] (8.69, 7897759.68) [a] (1017.0, 8177871.0) [a] }; \addplot+[color=blue]{5330652+295327*x}; %CPU \addplot+[color=brown]{7896960+955217*x}; %GPU \addplot+[color=orange]{5936853+225588*x}; %CPU+GPU \addplot+[color=green]{5749275+2388*x}; %Virtex7 \addplot+[color=red]{4146681+3963*x}; %zedboard \addplot+[color=gray]{10398561+46092*x}; %Epiphany \addplot+[color=purple]{5878532+8961*x}; %OWzb \legend{ break-even, CPU$^*$, GPU$^*$, CPU+GPU$^*$, Virtex-7, zedboard, \cite{WOOT/Malvoni14}~Epiphany, \cite{WOOT/Malvoni14}~zedboard } \end{axis} \end{tikzpicture} \caption{Total costs in \textit{millions USD} for attacking $n$ passwords of length 8 from a set of 62 characters, with logarithmic scale. Each attack finishes within \textit{one month}. Both the acquisition costs for enough devices and the total power costs where considered.} \label{fig:costs_for_password_cracking} \end{minipage}% \hspace{2.cm} \begin{minipage}[b]{0.45\textwidth} \centering \begin{tikzpicture} \pgfplotsset{ height=3.55cm, width=.8\textwidth, scale only axis, legend columns=3, legend style={column sep=1ex, at={(0,0)}, xshift=0.25cm, yshift=-1.0cm, anchor=north west, nodes=right, font=\scriptsize }, every axis plot post/.append style={ mark=none, domain=1:200, samples=100, thick}, } \begin{axis}[ scatter/classes={a={mark=|,black}}, axis y line=left, axis x line=bottom, x tick label style={font=\scriptsize}, y tick label style={font=\scriptsize}, xlabel style={font=\footnotesize}, xlabel=Number of attacked passwords, ylabel style={font=\footnotesize}, ylabel=Total costs in \$1\,000, %xtick={25, 50, 75, 100, 125, 150, 175}, xmin=1, xmax=200, ytick={7000, 10000, 15000, 20000, 30000, 40000}, yticklabels={$7$, $10$, $15$, $20$, $30$, $40$}, ymin=5000, ymax=50000, ymode=log, cycle list name=linestyles* ] \addplot+[color=black,only marks,scatter,scatter src=explicit symbolic] coordinates{ (0.5, 12128.04) [a] (1.57, 13992.59) [a] (26.3, 26776.98) [a] (2.64, 24268.6) [a] (1581.0, 26628.0) [a] (464.0, 17692.0) [a] (21.55, 26273.62) [a] (3.3, 14006.39) [a] (8.96, 17811.99) [a] }; \addplot+[color=blue]{11790+672*x}; % CPU \addplot+[color=brown]{18360+2240*x}; % GPU \addplot+[color=orange]{13179+517*x}; % CPU+GPU \addplot+[color=green]{13980+8*x}; % Virtex7 \addplot+[color=red]{12122+12*x}; % zedboard \addplot+[color=gray]{23989+106*x}; % Epiphany \addplot+[color=purple]{7656+12*x}; % OWzb \end{axis} \end{tikzpicture} \caption{Total costs in \textit{thousands USD} for attacking $n$ passwords of length 8 from a set of 62 characters using a cost parameter of 12 (which is commonly recommended), with logarithmic scale. Each attack finishes within \textit{one day}, with a \textit{dictionary attack} where 65\% are covered ($\text{4} \cdot \text{10}^\text{9}$ Tests).} \label{fig:costs_for_dict_attack} \end{minipage}% \end{figure*} Table~\ref{tab:results_different_plattforms} compares the different implementation platforms for cost parameter of 5 and 12. For better comparison, Figure~\ref{fig:implementation_comparison} shows the performance and efficiency graphically only for the first case. % Our zedboard implementation outperforms the previous implementation from \cite{WOOT/Malvoni14} by a factor of 1.42, computing 6511~pps at a measured power consumption of only 4.2W compared to 6.7W of the previous implementation. Thus, this implementation yields also a better power efficiency of 1550 pps per watt, which is more than twice as efficient as the previous implementation. The CPU attack on a Xeon computes 5\% less pps, at a significantly higher power consumption. Even considering only the power consumption of the CPU itself of 80W, the efficiency of the zedboard is still about 20 times higher. The estimated Virtex-7 design shows that the high-performance board is a decent alternative to the zedboard: it outperforms all other platforms with 51437 pps and has a very high power-efficiency rating. The drawback is the high price of \$3495 for the development board. To analyze the full costs of an attack, including the necessary power consumption (at the price of 10.08 cents per kWh\footnote{Taken from the \enquote{Independent Statistics \& Analysis U.S. Energy Information Administration}, average retail price of electricity United States all sectors. \url{http://www.eia.gov/electricity/data/browser}}), we consider two different scenarios. The first uses the fairly low cost parameter of 5 for a simple brute-force attack on passwords of length 8 with 62 different characters and requires the runtime to be at most 1 month. We chose the considerably low cost parameter for comparison with the related work, as it is typically used for bcrypt benchmarks. However, this value is insecure for practical applications, where a common choice seems to be 12, which is also used in the related work. Thus, we use this more reasonable parameter in the second setting. Here, the adversary uses more sophisticated attacks and aims for a reduction of the number of necessary password guesses and for a reduced runtime of one day per cracked password: We consider an adversary with access to meaningful, target-specific, custom dictionaries -- for example generated through social engineering -- and derivation rules. % In~\cite{PBKDEvalutation}, the authors trained on a random subset of 90\% from the leaked RockYou passwords to attack the remaining 10\% and estimated that $\text{4} \cdot \text{10}^\text{9}$ guesses are needed for about 67\% chance of success, which we use as a basis for the computational power. Figure~\ref{fig:costs_for_password_cracking} shows the costs of running brute-force attacks in the first scenario. To achieve the requested amount of password tests in one month, we need 13564 single CPUs, 43872 GPUs, 10361 CPUs + GPUs, 12999 zedboards or 1645 Virtex-7 boards. The figure shows the total costs considering acquisition costs (fixed cost) and the power consumption. It reveals the infeasibility of CPUs for attacking password hashes, and even more clearly the efficiency of special-purpose devices. Even high-performance FPGAs like the Virtex-7 are more profitable after only a few password cracks, than a combination of CPU and GPU. Figure~\ref{fig:costs_for_dict_attack} shows the costs of attacking multiple passwords in the second scenario. Here, we need 30 CPUs, 102 GPUs, 23 CPUs + GPUs, 38 zedboards or 4 Virtex-7 boards. %Even though we consider a much higher cost parameter and require a runtime of %one day per password, the attack is still cheaper due to the smarter derivation %of password candidates. With the higher cost parameter our current zedboard implementation does not yield similar good results and thus \cite{WOOT/Malvoni14} implementation is currently better suited for this attack when mounted on a zedboard. With the higher cost parameter, their implementation can conceal an interface bottleneck coming from the initialization of the bcrypt cores. As our implementation does not suffer from this bottleneck, we can run several cores on a bigger FPGA without negative consequences. Please note that the Virtex-7, after amortizing its acquisition costs, outperforms every other platform (reaching the break-even point with \cite{WOOT/Malvoni14} zedboard after attacking about 1500 passwords). %Comparing CPUs with low-power targets like the zedboard leads to the same result %as before -- power efficiency has a great impact on overall attack costs. %Therefore, FPGA platforms are better suited for attacking passwords and a %successful attacker does not have to spend more than \$10000 for cracking %passwords.
{"hexsha": "199fce5d958255b3fccc61a71f71299dffb2b232", "size": 15060, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "paper/results.tex", "max_stars_repo_name": "pfasante/high-speed_bcrypt", "max_stars_repo_head_hexsha": "5cf57c04c90b3ebd246ddb13ad65312772aa684a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "paper/results.tex", "max_issues_repo_name": "pfasante/high-speed_bcrypt", "max_issues_repo_head_hexsha": "5cf57c04c90b3ebd246ddb13ad65312772aa684a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "paper/results.tex", "max_forks_repo_name": "pfasante/high-speed_bcrypt", "max_forks_repo_head_hexsha": "5cf57c04c90b3ebd246ddb13ad65312772aa684a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.8130081301, "max_line_length": 101, "alphanum_fraction": 0.704249668, "num_tokens": 5046}
"""Classification methods.""" import numpy as np from machine_learning.constants import N_CLASSES, FOLDS, MAX_K, RANDOM_SEED from machine_learning.utilities import k_fold_split_indexes, get_k_nn def classification(method, error_func, train, test, **kwargs): """Perform classification for data and return error. Arguments: method {function} -- Classification method. error_func {function} -- Error function. train {DataTuple} -- Training data. test {DataTuple} -- Test data. All extra keyword arguments are passed to method. Returns: float -- Error value returned by error_func. """ y_pred = method(train, test, **kwargs) return error_func(y_pred, test.y.values) def max_classifier(train, test): """Maximum classifier. Classifies using the most common class in training data. Arguments: train {DataTuple} -- Training data. test {DataTuple} -- Test data. Returns: ndarray -- Predicted values. """ max_category = max_classifier_fit(train.X, train.y) y_pred = max_classifier_predict(test.X, max_category) return y_pred def max_classifier_fit(X, y): """Determines the most common class in input. Arguments: X {DataFrame} -- Indendent variables. y {DataFrame} -- Dependent variable. Returns: int -- Most common class. """ y = y.values max_category = np.bincount(y.astype(int)).argmax() return max_category def max_classifier_predict(X, max_category): """Classify using max classifier. Arguments: X {DataFrame} -- Independent variables. max_category {int} -- Class to classify to. Returns: ndarray -- Predicted values. """ y_pred = np.ones((X.shape[0], 1), dtype=np.int) * max_category return y_pred def multinomial_naive_bayes_classifier(train, test, n_classes=N_CLASSES): """Multinomial naive bayes classifier. See more at: https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Multinomial_naive_Bayes Arguments: train {DataTuple} -- Training data. test {DataTuple} -- Test data. Keyword Arguments: n_classes {int} -- Number of classes. (default: {N_CLASSES}) Returns: ndarray -- Predicted values. """ train_X = train.X.values train_y = train.y.values test_X = test.X.values class_priors, feature_likelihoods = mnb_classifier_fit(train_X, train_y, n_classes) y_pred = mnb_classifier_predict(test_X, class_priors, feature_likelihoods) return y_pred def mnb_classifier_fit(X, y, n_classes): """Fit MNB classifier. Calculates class priors and feature likelihoods. Arguments: X {ndarray} -- Independent variables. y {ndarray} -- Dependent variables. n_classes {int} -- Number of classes. Returns: ndarray -- Class priors. ndarray -- Feature likelihoods. """ class_priors = mnb_class_priors(y, n_classes) feature_likelihoods = mnb_feature_likelihoods(X, y, n_classes) return class_priors, feature_likelihoods def mnb_class_priors(y, n_classes): """Calculates the logaritm of the probability of belonging to each class. Arguments: y {ndarray} -- Class labels. n_classes {int} -- Number of class labels. Returns: ndarray -- Log of prior probabilities. """ priors = np.zeros(n_classes) for c in range(n_classes): priors[c] = np.log(np.sum(y == c) / y.size) return priors def mnb_feature_likelihoods(X, y, n_classes): """Calculates the probability of feature j, given class k, using Laplace smoothing. Arguments: X {ndarray} -- Features. y {ndarray} -- Class labels. n_classes {int} -- Number of classes. Returns: ndarray -- Logs of feature likelihoods. """ n_features = X.shape[1] p_ij = np.zeros((n_classes, n_features)) for c in range(n_classes): Fc_sum = np.sum(X[y == c, :]) for j in range(n_features): Fnc = np.sum(X[y == c, j]) p_ij[c, j] = np.log((1.0 + Fnc) / (n_features + Fc_sum)) return p_ij def mnb_classifier_predict(X, class_priors, feature_likelihoods): """Classify using MNB classifier. Arguments: X {ndarray} -- Independent variables. class_priors {ndarray} -- Class priors. feature_likelihoods {ndarray} -- Feature likelihoods. Returns: ndarray -- Predicted values. """ n_classes = class_priors.size N = X.shape[0] posterior = np.zeros((N, n_classes)) for i in range(N): posterior[i, :] = feature_likelihoods.dot(X[i, :]) for c in range(n_classes): posterior[:, c] = posterior[:, c] + class_priors[c] y_pred = np.argmax(posterior, axis=1) return y_pred def k_nn_classifier(train, test, k): """K-nearest neighbors classifier. Arguments: train {DataTuple} -- Training data. test {DataTuple} -- Test data. k {int} -- Value for k. Returns: ndarray -- Predicted values. """ y_pred = k_nn_classifier_predict(test.X, train.X, train.y, k) return y_pred def k_nn_classifier_fit(train, n_folds=FOLDS, max_k=MAX_K): """'Fit' K-nearest neighbors classifier by finding optimal value for k using cross validation. Arguments: train {DataTuple} -- Training data. Keyword Arguments: n_folds {int} -- Number of folds to use for validation. (default: {FOLDS}) max_k {int} -- Maximum value for k. (default: {MAX_K}) Returns: int -- Optimal value for k. float -- Error for selected k. """ # TODO: combine with k_nn_regression_fit()? X = train.X.values y = train.y.values N = X.shape[0] folds = k_fold_split_indexes(N, n_folds) min_error = np.infty best_k = 1 for k in range(1, max_k): errors = np.zeros(n_folds) for i in range(n_folds): tmp_folds = folds[:] valid_ix = tmp_folds.pop(i) train_ix = np.concatenate(tmp_folds) y_pred = k_nn_classifier_predict(X[valid_ix, :], X[train_ix, :], y[train_ix], k) error = classification_error(y_pred, y[valid_ix]) errors[i] = (valid_ix.size * error) mean_error = np.sum(errors) / N if mean_error < min_error: min_error = mean_error best_k = k return int(best_k), min_error def k_nn_classifier_predict(X, X_train, y_train, k, n_classes=N_CLASSES): """Classify using K-nearest neighbors classifier. Assigns class labels based on the most common class in k-nearest neighbors. Arguments: X {DataFrame} -- Independent variables. X_train {DataFrame} -- Independent training variables. y_train {DataFrame} -- Dependent training variables. k {int} -- Value of k. Keyword Arguments: n_classes {int} -- Number of classes. (default: {N_CLASSES}) Returns: ndarray -- Predicted variables. """ try: X = X.values except AttributeError: pass try: X_train = X_train.values except AttributeError: pass try: y_train = y_train.values except AttributeError: pass assert X.shape[1] == X_train.shape[1] N = X.shape[0] y_pred = np.zeros((N, 1)) for i in range(N): point = X[i, :] neighbors, _ = get_k_nn(point, X_train, k) train_labels = y_train[neighbors] class_sums = [np.sum(train_labels == i) for i in range(n_classes)] y_pred[i] = k_nn_assign_label(class_sums) return y_pred def k_nn_assign_label(class_sums): """Assing label according the most common class. If there are multiple candidates, pick one randomly. Arguments: class_sums {list} -- Class frequencies. Returns: int -- Assinged class label. """ order = np.argsort(class_sums)[::-1] candidates = [x for x in order if x == order[0]] return np.random.RandomState(RANDOM_SEED).choice(candidates) def classification_error(y_pred, y_true): """Return classification error. Sum of incorrectly assinged classes divided by the number of points. Arguments: y_pred {ndarray} -- Predicted values. y_true {ndarray} -- True values. Returns: float -- Error. """ y_true = y_true.reshape(y_pred.shape) return np.sum(y_pred.astype(np.int) != y_true.astype(np.int)) / float(y_pred.size)
{"hexsha": "d06e42ee0b373f4420c69a853056915fb5aa21df", "size": 8652, "ext": "py", "lang": "Python", "max_stars_repo_path": "machine_learning/classification.py", "max_stars_repo_name": "soikkea/python-algorithms", "max_stars_repo_head_hexsha": "e1e1c3a81d24f80a4c3d7ba5a6b3da3be6a1ea19", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "machine_learning/classification.py", "max_issues_repo_name": "soikkea/python-algorithms", "max_issues_repo_head_hexsha": "e1e1c3a81d24f80a4c3d7ba5a6b3da3be6a1ea19", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "machine_learning/classification.py", "max_forks_repo_name": "soikkea/python-algorithms", "max_forks_repo_head_hexsha": "e1e1c3a81d24f80a4c3d7ba5a6b3da3be6a1ea19", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.7307692308, "max_line_length": 98, "alphanum_fraction": 0.6289875173, "include": true, "reason": "import numpy", "num_tokens": 2077}
from __future__ import print_function import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Ellipse from sklearn.datasets import make_blobs from sklearn.mixture import GaussianMixture # Set random seed for reproducibility np.random.seed(1000) # Total number of samples nb_samples = 800 if __name__ == '__main__': # Create the dataset X, Y = make_blobs(n_samples=nb_samples, n_features=2, centers=3, cluster_std=2.2, random_state=1000) # Show the original dataset fig, ax = plt.subplots(figsize=(15, 8)) ax.scatter(X[Y == 0, 0], X[Y == 0, 1], c='r', s=20, marker='p', label='Class 0') ax.scatter(X[Y == 1, 0], X[Y == 1, 1], c='g', s=20, marker='d', label='Class 1') ax.scatter(X[Y == 2, 0], X[Y == 2, 1], c='b', s=20, marker='s', label='Class 2') ax.set_xlabel(r'$x_0$') ax.set_ylabel(r'$x_1$') ax.legend() ax.grid() plt.show() # Create a fit a Gaussian Mixture model gm = GaussianMixture(n_components=3, max_iter=1000, random_state=1000) gm.fit(X) # Print means, covariances, and weights print('Means:\n') print(gm.means_) print('\nCovariances:\n') print(gm.covariances_) print('\nWeights:\n') print(gm.weights_) # Show the clustered dataset with the final Gaussian distributions fig, ax = plt.subplots(figsize=(15, 8)) c = gm.covariances_ m = gm.means_ g1 = Ellipse(xy=m[0], width=4 * np.sqrt(c[0][0, 0]), height=4 * np.sqrt(c[0][1, 1]), fill=False, linestyle='dashed', linewidth=2) g1_1 = Ellipse(xy=m[0], width=3 * np.sqrt(c[0][0, 0]), height=3 * np.sqrt(c[0][1, 1]), fill=False, linestyle='dashed', linewidth=3) g1_2 = Ellipse(xy=m[0], width=1.5 * np.sqrt(c[0][0, 0]), height=1.5 * np.sqrt(c[0][1, 1]), fill=False, linestyle='dashed', linewidth=4) g2 = Ellipse(xy=m[1], width=4 * np.sqrt(c[1][0, 0]), height=4 * np.sqrt(c[1][1, 1]), fill=False, linestyle='dashed', linewidth=2) g2_1 = Ellipse(xy=m[1], width=3 * np.sqrt(c[1][0, 0]), height=3 * np.sqrt(c[1][1, 1]), fill=False, linestyle='dashed', linewidth=3) g2_2 = Ellipse(xy=m[1], width=1.5 * np.sqrt(c[1][0, 0]), height=1.5 * np.sqrt(c[1][1, 1]), fill=False, linestyle='dashed', linewidth=4) g3 = Ellipse(xy=m[2], width=4 * np.sqrt(c[2][0, 0]), height=4 * np.sqrt(c[2][1, 1]), fill=False, linestyle='dashed', linewidth=2) g3_1 = Ellipse(xy=m[2], width=3 * np.sqrt(c[2][0, 0]), height=3 * np.sqrt(c[2][1, 1]), fill=False, linestyle='dashed', linewidth=3) g3_2 = Ellipse(xy=m[2], width=1.5 * np.sqrt(c[2][0, 0]), height=1.5 * np.sqrt(c[2][1, 1]), fill=False, linestyle='dashed', linewidth=4) ax.add_artist(g1) ax.add_artist(g1_1) ax.add_artist(g1_2) ax.add_artist(g2) ax.add_artist(g2_1) ax.add_artist(g2_2) ax.add_artist(g3) ax.add_artist(g3_1) ax.add_artist(g3_2) ax.scatter(X[Y == 0, 0], X[Y == 0, 1], c='r', s=20, marker='p', label='Class 0') ax.scatter(X[Y == 1, 0], X[Y == 1, 1], c='g', s=20, marker='d', label='Class 1') ax.scatter(X[Y == 2, 0], X[Y == 2, 1], c='b', s=20, marker='s', label='Class 2') ax.set_xlabel(r'$x_0$') ax.set_ylabel(r'$x_1$') ax.legend() ax.grid() plt.show() # Compute AICs and BICs nb_components = [2, 3, 4, 5, 6, 7, 8] aics = [] bics = [] for n in nb_components: gm = GaussianMixture(n_components=n, max_iter=1000, random_state=1000) gm.fit(X) aics.append(gm.aic(X)) bics.append(gm.bic(X)) fig, ax = plt.subplots(2, 1, figsize=(15, 8)) ax[0].plot(nb_components, aics) ax[0].set_ylabel('AIC') ax[0].grid() ax[1].plot(nb_components, bics) ax[1].set_xlabel('Number of components') ax[1].set_ylabel('BIC') ax[1].grid() plt.show()
{"hexsha": "08d6d5ee2a493d5f3f944b10366490b383592976", "size": 3934, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapter09/gaussian_mixture.py", "max_stars_repo_name": "PacktPublishing/Machine-Learning-Algorithms-Second-Edition", "max_stars_repo_head_hexsha": "2ddacea1c9f81b4ef9a0a51c4230687350afba6c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 50, "max_stars_repo_stars_event_min_datetime": "2018-08-13T13:11:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T23:00:20.000Z", "max_issues_repo_path": "Chapter09/gaussian_mixture.py", "max_issues_repo_name": "srikanthlakkoju/Machine-Learning-Algorithms-Second-Edition", "max_issues_repo_head_hexsha": "b25d3607e9d5cc388bcf5f1a029bae39bb2b837b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter09/gaussian_mixture.py", "max_forks_repo_name": "srikanthlakkoju/Machine-Learning-Algorithms-Second-Edition", "max_forks_repo_head_hexsha": "b25d3607e9d5cc388bcf5f1a029bae39bb2b837b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 29, "max_forks_repo_forks_event_min_datetime": "2018-06-08T10:56:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-19T06:26:23.000Z", "avg_line_length": 32.5123966942, "max_line_length": 120, "alphanum_fraction": 0.582104728, "include": true, "reason": "import numpy", "num_tokens": 1370}
import numpy as np def _calcgrlimits1(gr): meansattrname = 'means_' try: from sklearn.mixture import GaussianMixture as GMM covarsattrname = 'covariances_' except: from sklearn.mixture import GMM covarsattrname = 'covars_' em = GMM(n_components=3) em.fit(gr[np.isfinite(gr)]) means = getattr(em, meansattrname) covars = getattr(em, covarsattrname) idxmin = np.argmin(means) idxmax = np.argmax(means) grmin = means[idxmin] - covars[idxmin]**0.5 grmax = means[idxmax] + covars[idxmax]**0.5 return grmin, grmax def _calcgrlimits2(gr): where = np.isfinite(gr) grmin = np.percentile(gr[where], 5) grmax = np.percentile(gr[where], 95) return grmin, grmax def calcgrlimits(gr): try: grmin, grmax = _calcgrlimits1(gr) except: grmin, grmax = _calcgrlimits2(gr) return grmin, grmax
{"hexsha": "4596a6f1ed1b5d1bd37b73ba5463f9c6f607882d", "size": 928, "ext": "py", "lang": "Python", "max_stars_repo_path": "BasicVShale/Algo.py", "max_stars_repo_name": "gcruff/GRIPyPluginsUFF", "max_stars_repo_head_hexsha": "9ea136cedde4b7ae7013fd5cf5816b25923e97e9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-06-06T17:14:23.000Z", "max_stars_repo_stars_event_max_datetime": "2017-06-06T17:14:23.000Z", "max_issues_repo_path": "BasicVShale/Algo.py", "max_issues_repo_name": "gcruff/GRIPyPluginsUFF", "max_issues_repo_head_hexsha": "9ea136cedde4b7ae7013fd5cf5816b25923e97e9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BasicVShale/Algo.py", "max_forks_repo_name": "gcruff/GRIPyPluginsUFF", "max_forks_repo_head_hexsha": "9ea136cedde4b7ae7013fd5cf5816b25923e97e9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.4210526316, "max_line_length": 58, "alphanum_fraction": 0.6293103448, "include": true, "reason": "import numpy", "num_tokens": 281}
# ===== VSB Verifications: Velocity Profiles ===== println("=== BEGINNING Verifications4.jl ===") # === Imports === println(" IMPORTING PACKAGES...") using VSB using Plots pyplot() cd("/Users/Damyn/Documents/BYU/FLOW Lab/VSB/verifications") # === Geomerty of problem === println(" GENERATING GEOMETRY...") NPTS = 250 # Number of Points on Boundary del_theta = (2*pi)/(NPTS - 1) # Step in theta around body R = 1.0 # Cylinder Radius X_coor = zeros(NPTS) # X-coordinates Y_coor = zeros(NPTS) # Y-coordinates X_tHat = zeros(NPTS) # X Tangent Vector Y_tHat = zeros(NPTS) # X Tangent Vector X_nHat = zeros(NPTS) # Y Normal Vector Y_nHat = zeros(NPTS) # Y Normal Vector theta = zeros(NPTS) # Angle from positive X-axis for i=1:NPTS X_coor[i] = R*cos((i-1)*del_theta) Y_coor[i] = R*sin((i-1)*del_theta) X_tHat[i] = sin((i-1)*del_theta) Y_tHat[i] = -cos((i-1)*del_theta) X_nHat[i] = cos((i-1)*del_theta) Y_nHat[i] = sin((i-1)*del_theta) theta[i] = (i-1)*del_theta end body_pts = [[X_coor[i], Y_coor[i], 0.0] for i in 1:NPTS] t_hats = [[X_tHat[i], Y_tHat[i], 0.0] for i in 1:NPTS] n_hats = [[X_nHat[i], Y_nHat[i], 0.0] for i in 1:NPTS] body = VSB.Boundary(body_pts,t_hats,n_hats) # === Vortex sheet calculations === println(" CALCULATING PROBLEM PARAMETERS...") magU_inf = 1.0 U_inf(X) = magU_inf .* [1.0, 0.0, 0.0] params = VSB.Parameters(body, U_inf) alphas = params.alphas U(X) = VSB.CalcVSVelocityCirlce(body, alphas, X) # === Generate velocity field === println(" CALCULATING VELOCITY FIELD...") println("=== END OF Verifications1.jl ===")
{"hexsha": "3c3a9a8f71b3dbfb453c3c6fdad92044f8f9ed00", "size": 1790, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "verifications/VSB_Verification4.jl", "max_stars_repo_name": "DamynChipman/VSB.jl", "max_stars_repo_head_hexsha": "9d9ade28383134e1b20953dd85ab0dc8c6188eb8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-05T13:42:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-05T13:42:00.000Z", "max_issues_repo_path": "verifications/VSB_Verification4.jl", "max_issues_repo_name": "DamynChipman/VSB.jl", "max_issues_repo_head_hexsha": "9d9ade28383134e1b20953dd85ab0dc8c6188eb8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-20T02:40:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-20T02:40:36.000Z", "max_forks_repo_path": "verifications/VSB_Verification4.jl", "max_forks_repo_name": "DamynChipman/VSB.jl", "max_forks_repo_head_hexsha": "9d9ade28383134e1b20953dd85ab0dc8c6188eb8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4035087719, "max_line_length": 70, "alphanum_fraction": 0.5815642458, "num_tokens": 567}
// Copyright (c) 2015-2020 Daniel Cooke // Use of this source code is governed by the MIT license that can be found in the LICENSE file. #include "vcf_record.hpp" #include <algorithm> #include <iterator> #include <boost/lexical_cast.hpp> #include "vcf_spec.hpp" namespace octopus { // public methods const GenomicRegion& VcfRecord::mapped_region() const noexcept { return region_; } const GenomicRegion::ContigName& VcfRecord::chrom() const noexcept { return region_.contig_name(); } GenomicRegion::Position VcfRecord::pos() const noexcept { return region_.begin() + 1; } const std::string& VcfRecord::id() const noexcept { return id_; } const VcfRecord::NucleotideSequence& VcfRecord::ref() const noexcept { return ref_; } unsigned VcfRecord::num_alt() const noexcept { return static_cast<unsigned>(alt_.size()); } const std::vector<VcfRecord::NucleotideSequence>& VcfRecord::alt() const noexcept { return alt_; } boost::optional<VcfRecord::QualityType> VcfRecord::qual() const noexcept { return qual_; } bool VcfRecord::has_filter(const KeyType& filter) const noexcept { return std::find(std::cbegin(filter_), std::cend(filter_), filter) != std::cend(filter_); } const std::vector<VcfRecord::KeyType>& VcfRecord::filter() const noexcept { return filter_; } bool VcfRecord::has_info(const KeyType& key) const noexcept { return info_.count(key) == 1; } std::vector<VcfRecord::KeyType> VcfRecord::info_keys() const { std::vector<KeyType> result {}; result.reserve(info_.size()); std::transform(info_.cbegin(), info_.cend(), std::back_inserter(result), [] (const auto& p) { return p.first; }); return result; } const std::vector<VcfRecord::ValueType>& VcfRecord::info_value(const KeyType& key) const { return info_.at(key); } bool VcfRecord::has_format(const KeyType& key) const noexcept { return std::find(std::cbegin(format_), std::cend(format_), key) != std::cend(format_); } boost::optional<unsigned> VcfRecord::format_cardinality(const KeyType& key) const noexcept { boost::optional<unsigned> result {}; if (has_format(key)) { for (const auto& p : samples_) { const auto sample_format_cardinality = p.second.other.at(key).size(); if (result) { if (*result != sample_format_cardinality) return boost::none; } else { result = sample_format_cardinality; } } } return result; } const std::vector<VcfRecord::KeyType>& VcfRecord::format() const noexcept { return format_; } unsigned VcfRecord::num_samples() const noexcept { return samples_.size(); } bool VcfRecord::has_genotypes() const noexcept { return std::find(std::cbegin(format_), std::cend(format_), vcfspec::format::genotype) != std::cend(format_); } unsigned VcfRecord::ploidy(const SampleName& sample) const { return get_genotype(sample).indices.size(); } bool VcfRecord::is_sample_phased(const SampleName& sample) const { return get_genotype(sample).phased; } bool VcfRecord::is_homozygous(const SampleName& sample) const { const auto& genotype = get_genotype(sample).indices; return std::adjacent_find(std::cbegin(genotype), std::cend(genotype), std::not_equal_to<>{}) == std::cend(genotype); } bool VcfRecord::is_heterozygous(const SampleName& sample) const { return !is_homozygous(sample); } bool VcfRecord::is_homozygous_ref(const SampleName& sample) const { const auto& genotype = get_genotype(sample).indices; return std::all_of(std::cbegin(genotype), std::cend(genotype), [] (const auto& allele) { return allele == 0; }); } bool VcfRecord::is_refcall() const { const auto is_ref = [] (const auto& allele) { return allele == 0; }; const auto is_hom_ref = [&] (const auto& p) { return std::all_of(std::cbegin(p.second.genotype->indices), std::cend(p.second.genotype->indices), is_ref); }; return std::all_of(std::cbegin(samples_), std::cend(samples_), is_hom_ref); } bool VcfRecord::is_homozygous_non_ref(const SampleName& sample) const { const auto& genotype = get_genotype(sample).indices; return genotype.front() > 0 && is_homozygous(sample); } bool VcfRecord::has_ref_allele(const SampleName& sample) const { const auto& genotype = get_genotype(sample).indices; return std::find(std::cbegin(genotype), std::cend(genotype), 0) != std::cend(genotype); } bool VcfRecord::has_alt_allele(const SampleName& sample) const { const auto& genotype = get_genotype(sample).indices; return std::find_if_not(std::cbegin(genotype), std::cend(genotype), [] (const auto& allele) { return allele == 0; }) != std::cend(genotype); } const std::vector<VcfRecord::AlleleIndex>& VcfRecord::genotype(const SampleName& sample) const { return get_genotype(sample).indices; } const std::vector<VcfRecord::ValueType>& VcfRecord::get_sample_value(const SampleName& sample, const KeyType& key) const { return samples_.at(sample).other.at(key); } // helper non-members needed for printing namespace { template <typename T> std::ostream& print(std::ostream& os, const std::vector<T>& v, const std::string& delim = ",", const std::string& empty_value = ".") { if (v.empty()) { os << empty_value; } else { std::copy(std::cbegin(v), std::prev(std::cend(v)), std::ostream_iterator<T>(os, delim.c_str())); os << v.back(); } return os; } template <typename T> std::ostream& operator<<(std::ostream& os, const std::vector<T>& v) { return print(os, v); } } // namespace // private methods std::vector<VcfRecord::SampleName> VcfRecord::samples() const { std::vector<SampleName> result {}; result.reserve(samples_.size()); for (const auto& p : samples_) { result.push_back(p.first); } return result; } const VcfRecord::Genotype& VcfRecord::get_genotype(const SampleName& sample) const { return *samples_.at(sample).genotype; } std::string VcfRecord::get_allele_number(const NucleotideSequence& allele) const { if (allele == vcfspec::missingValue) { return vcfspec::missingValue; } else if (allele == ref_) { return "0"; } else { const auto it = std::find(std::cbegin(alt_), std::cend(alt_), allele); return std::to_string(std::distance(std::cbegin(alt_), it) + 1); } } void VcfRecord::print_info(std::ostream& os) const { if (info_.empty()) { os << vcfspec::missingValue; } else { auto last = std::next(std::cbegin(info_), info_.size() - 1); std::for_each(std::cbegin(info_), last, [&os] (const auto& p) { os << p.first; if (!p.second.empty()) { os << "=" << p.second; } os << ';'; }); os << last->first; if (!last->second.empty()) { os << "=" << last->second; } } } void VcfRecord::print_genotype_allele_numbers(std::ostream& os, const SampleName& sample) const { std::vector<std::string> allele_numbers(ploidy(sample)); const auto& genotype = get_genotype(sample); std::transform(std::cbegin(genotype.indices), std::cend(genotype.indices), std::begin(allele_numbers), [] (auto number) -> std::string { if (number < 0) { return std::to_string(number); } else { return vcfspec::missingValue; }}); print(os, allele_numbers, (genotype.phased) ? "|" : "/"); } void VcfRecord::print_other_sample_data(std::ostream& os, const SampleName& sample) const { if (!samples_.empty()) { if (samples_.at(sample).other.empty()) { os << vcfspec::missingValue; } else { const auto& data = samples_.at(sample).other; auto last = std::next(cbegin(data), data.size() - 1); std::for_each(std::cbegin(data), last, [&os] (const auto& p) { print(os, p.second, ","); os << ":"; }); print(os, last->second, ","); } } } void VcfRecord::print_sample_data(std::ostream& os) const { if (num_samples() > 0) { print(os, format_, ":"); os << '\t'; auto samples = this->samples(); std::for_each(std::cbegin(samples), std::prev(std::cend(samples)), [this, &os] (const SampleName& sample) { auto it = std::cbegin(format_); if (*it == vcfspec::format::genotype) { print_genotype_allele_numbers(os, sample); ++it; } std::for_each(it, std::cend(format_), [this, &os, &sample] (const KeyType& key) { os << ':'; print(os, get_sample_value(sample, key), ","); }); os << '\t'; }); auto it = std::cbegin(format_); if (*it == vcfspec::format::genotype) { print_genotype_allele_numbers(os, samples.back()); ++it; } std::for_each(it, std::cend(format_), [this, &os, &samples] (const KeyType& key) { os << ':'; print(os, get_sample_value(samples.back(), key), ","); }); } } // non-member functions const VcfRecord::NucleotideSequence& get_allele(const VcfRecord& record, const VcfRecord::AlleleIndex index) { const static std::string missing_value {vcfspec::missingValue}; if (index < 0) return missing_value; if (index == 0) return record.ref(); return record.alt()[index - 1]; } std::vector<VcfRecord::NucleotideSequence> get_genotype(const VcfRecord& record, const VcfRecord::SampleName& sample) { const auto& gt = record.genotype(sample); std::vector<VcfRecord::NucleotideSequence> result(gt.size()); std::transform(std::cbegin(gt), std::cend(gt), std::begin(result), [&] (auto index) { return get_allele(record, index); }); return result; } bool is_missing(const std::vector<VcfRecord::ValueType>& values) noexcept { return values.size() < 2 && values.front() == vcfspec::missingValue; } bool is_info_missing(const VcfRecord::KeyType& key, const VcfRecord& record) { return !record.has_info(key) || is_missing(record.info_value(key)); } bool is_refcall(const VcfRecord& record) { return record.is_refcall(); } bool is_filtered(const VcfRecord& record) noexcept { const auto& filters = record.filter(); return !filters.empty() && !(filters[0] == vcfspec::filter::pass || filters[0] == vcfspec::missingValue); } bool is_dbsnp_member(const VcfRecord& record) noexcept { return record.has_info(vcfspec::info::dbSNPMember); } bool is_hapmap2_member(const VcfRecord& record) noexcept { return record.has_info(vcfspec::info::hapmap2Member); } bool is_hapmap3_member(const VcfRecord& record) noexcept { return record.has_info(vcfspec::info::hapmap3Member); } bool is_1000g_member(const VcfRecord& record) noexcept { return record.has_info(vcfspec::info::thousandGenomes); } bool is_somatic(const VcfRecord& record) noexcept { return record.has_info(vcfspec::info::somatic); } bool is_validated(const VcfRecord& record) noexcept { return record.has_info(vcfspec::info::validated); } boost::optional<GenomicRegion> get_phase_region(const VcfRecord& record, const VcfRecord::SampleName& sample) { if (record.is_sample_phased(sample) && record.has_format(vcfspec::format::phaseSet)) { return GenomicRegion { record.chrom(), boost::lexical_cast<ContigRegion::Position>(record.get_sample_value(sample, vcfspec::format::phaseSet).front()) - 1, static_cast<ContigRegion::Position>(record.pos() + record.ref().size()) - 1 }; } else { return boost::none; } } bool operator==(const VcfRecord& lhs, const VcfRecord& rhs) { // TODO: this should really compare other fields return mapped_region(lhs) == mapped_region(rhs) && lhs.ref() == rhs.ref() && lhs.alt() == rhs.alt(); } bool operator<(const VcfRecord& lhs, const VcfRecord& rhs) { if (mapped_region(lhs) == mapped_region(rhs)) { if (lhs.ref() == rhs.ref()) { return lhs.alt() < rhs.alt(); } else { return lhs.ref() < rhs.ref(); } } else { return mapped_region(lhs) < mapped_region(rhs); } } std::ostream& operator<<(std::ostream& os, const VcfRecord& record) { os << record.chrom() << "\t"; os << record.pos() << "\t"; os << record.id_ << "\t"; os << record.ref_ << "\t"; os << record.alt_ << "\t"; if (record.qual_) { os << static_cast<float>(*record.qual_) << "\t"; } else { os << vcfspec::missingValue << "\t"; } os << record.filter_ << "\t"; record.print_info(os); os << "\t"; record.print_sample_data(os); return os; } // VcfRecord::Builder VcfRecord::Builder::Builder(const VcfRecord& call) : chrom_ {call.chrom()} , pos_ {call.pos()} , id_ {call.id()} , ref_ {call.ref()} , alt_ {call.alt()} , qual_ {call.qual()} , filter_ {call.filter()} , info_ {call.info_} , format_ {call.format()} , samples_ {call.samples_} {} VcfRecord::Builder& VcfRecord::Builder::set_chrom(std::string name) { chrom_ = std::move(name); return *this; } VcfRecord::Builder& VcfRecord::Builder::set_pos(GenomicRegion::Position pos) { pos_ = pos; return *this; } VcfRecord::Builder& VcfRecord::Builder::set_id(std::string id) { id_ = std::move(id); return *this; } VcfRecord::Builder& VcfRecord::Builder::set_ref(const char allele) { ref_ = allele; return *this; } VcfRecord::Builder& VcfRecord::Builder::set_ref(NucleotideSequence allele) { ref_ = std::move(allele); return *this; } VcfRecord::Builder& VcfRecord::Builder::set_alt(const char allele) { alt_.resize(1); alt_.front() = allele; return *this; } VcfRecord::Builder& VcfRecord::Builder::set_alt(NucleotideSequence allele) { alt_.resize(1); alt_.front() = std::move(allele); return *this; } VcfRecord::Builder& VcfRecord::Builder::set_alt(std::vector<NucleotideSequence> alleles) { alt_ = std::move(alleles); return *this; } VcfRecord::Builder& VcfRecord::Builder::set_qual(QualityType quality) { qual_ = quality; return *this; } VcfRecord::Builder& VcfRecord::Builder::set_passed() { filter_.assign({vcfspec::filter::pass}); return *this; } VcfRecord::Builder& VcfRecord::Builder::set_filter(std::vector<KeyType> filter) { filter_ = std::move(filter); return *this; } VcfRecord::Builder& VcfRecord::Builder::set_filter(std::initializer_list<KeyType> filter) { filter_ = filter; return *this; } VcfRecord::Builder& VcfRecord::Builder::add_filter(KeyType filter) { filter_.push_back(std::move(filter)); return *this; } VcfRecord::Builder& VcfRecord::Builder::clear_filter() noexcept { filter_.clear(); return *this; } VcfRecord::Builder& VcfRecord::Builder::reserve_info(unsigned n) { info_.reserve(n); return *this; } VcfRecord::Builder& VcfRecord::Builder::add_info(const KeyType& key) { info_.emplace(key, std::vector<ValueType> {}); return *this; } VcfRecord::Builder& VcfRecord::Builder::set_info(const KeyType& key, const ValueType& value) { return this->set_info(key, {value}); } VcfRecord::Builder& VcfRecord::Builder::set_info(const KeyType& key, std::vector<ValueType> values) { if (key == "END") { if (values.size() != 1) throw std::runtime_error {"VcfRecord::Builder INFO key END requires 1 value"}; end_ = std::stoll(values.front()); } info_[key] = std::move(values); return *this; } VcfRecord::Builder& VcfRecord::Builder::set_info(const KeyType& key, std::initializer_list<ValueType> values) { return this->set_info(key, std::vector<ValueType> {values}); } VcfRecord::Builder& VcfRecord::Builder::set_info_flag(KeyType key) { return this->set_info(std::move(key), {}); } VcfRecord::Builder& VcfRecord::Builder::set_info_missing(const KeyType& key) { return this->set_info(key, {vcfspec::missingValue}); } VcfRecord::Builder& VcfRecord::Builder::clear_info() noexcept { info_.clear(); return *this; } VcfRecord::Builder& VcfRecord::Builder::clear_info(const KeyType& key) { info_.erase(key); return *this; } VcfRecord::Builder& VcfRecord::Builder::set_format(std::vector<KeyType> format) { format_ = std::move(format); return *this; } VcfRecord::Builder& VcfRecord::Builder::set_format(std::initializer_list<KeyType> format) { format_ = std::move(format); return *this; } VcfRecord::Builder& VcfRecord::Builder::add_format(KeyType key) { format_.push_back(std::move(key)); return *this; } VcfRecord::Builder& VcfRecord::Builder::add_format(std::initializer_list<KeyType> keys) { format_.insert(std::cend(format_), keys); return *this; } VcfRecord::Builder& VcfRecord::Builder::reserve_samples(unsigned n) { samples_.reserve(n); return *this; } VcfRecord::Builder&VcfRecord::Builder:: set_homozygous_ref_genotype(const SampleName& sample, const unsigned ploidy) { auto& genotype = samples_[sample].genotype; genotype = VcfRecord::Genotype {}; genotype->indices.resize(ploidy, 0); genotype->phased = true; return *this; } VcfRecord::Builder& VcfRecord::Builder::set_genotype(const SampleName& sample, const std::vector<NucleotideSequence>& alleles, const Phasing phasing) { auto& genotype = samples_[sample].genotype; genotype = VcfRecord::Genotype {}; genotype->indices.resize(alleles.size()); std::transform(std::cbegin(alleles), std::cend(alleles), std::begin(genotype->indices), [this] (const auto& allele) -> VcfRecord::AlleleIndex { if (allele == vcfspec::missingValue) { return -1; } else if (allele == ref_) { return 0; } else { const auto itr = std::find(std::cbegin(alt_), std::cend(alt_), allele); if (itr != std::cend(alt_)) { return std::distance(std::cbegin(alt_), itr) + 1; // + 1 for ref } else { return -1; } } }); genotype->phased = (phasing == Phasing::phased); return *this; } VcfRecord::Builder& VcfRecord::Builder::set_genotype(const SampleName& sample, const std::vector<boost::optional<unsigned>>& alleles, const Phasing phasing) { auto& genotype = samples_[sample].genotype; genotype = VcfRecord::Genotype {}; genotype->indices.resize(alleles.size()); std::transform(std::cbegin(alleles), std::cend(alleles), std::begin(genotype->indices), [] (const auto& allele) -> VcfRecord::AlleleIndex { if (allele) { return *allele; } else { return -1; } }); genotype->phased = (phasing == Phasing::phased); return *this; } VcfRecord::Builder& VcfRecord::Builder::clear_genotype(const SampleName& sample) noexcept { samples_.at(sample).genotype = boost::none; return *this; } VcfRecord::Builder& VcfRecord::Builder::set_format(const SampleName& sample, const KeyType& key, const ValueType& value) { return this->set_format(sample, key, std::vector<ValueType> {value}); } VcfRecord::Builder& VcfRecord::Builder::set_format(const SampleName& sample, const KeyType& key, std::vector<ValueType> values) { samples_[sample].other[key] = std::move(values); return *this; } VcfRecord::Builder& VcfRecord::Builder::set_format(const SampleName& sample, const KeyType& key, std::initializer_list<ValueType> values) { return this->set_format(sample, key, std::vector<ValueType> {values}); } VcfRecord::Builder& VcfRecord::Builder::set_format_missing(const SampleName& sample, const KeyType& key) { return this->set_format(sample, key, std::string {vcfspec::missingValue}); } VcfRecord::Builder& VcfRecord::Builder::clear_format() noexcept { format_.clear(); samples_.clear(); return *this; } VcfRecord::Builder& VcfRecord::Builder::clear_format(const SampleName& sample) noexcept { samples_.erase(sample); return *this; } VcfRecord::Builder& VcfRecord::Builder::clear_format(const SampleName& sample, const KeyType& key) noexcept { if (key == vcfspec::format::genotype) { clear_genotype(sample); } else { const auto sample_itr = samples_.find(sample); if (sample_itr != std::cend(samples_)) { sample_itr->second.other.erase(key); } } return *this; } VcfRecord::Builder& VcfRecord::Builder::set_passed(const SampleName& sample) { return this->set_format(sample, vcfspec::format::filter, {vcfspec::filter::pass}); } VcfRecord::Builder& VcfRecord::Builder::set_filter(const SampleName& sample, std::vector<KeyType> filter) { return this->set_format(sample, vcfspec::format::filter, std::move(filter)); } VcfRecord::Builder& VcfRecord::Builder::set_filter(const SampleName& sample, std::initializer_list<KeyType> filter) { return this->set_format(sample, vcfspec::format::filter, std::move(filter)); } VcfRecord::Builder& VcfRecord::Builder::add_filter(const SampleName& sample, KeyType filter) { samples_[sample].other[vcfspec::format::filter].push_back(std::move(filter)); return *this; } VcfRecord::Builder& VcfRecord::Builder::clear_filter(const SampleName& sample) noexcept { return this->clear_format(sample, vcfspec::format::filter); } VcfRecord::Builder& VcfRecord::Builder::clear_all_sample_filters() noexcept { for (const auto& p : samples_) { this->clear_filter(p.first); } return *this; } VcfRecord::Builder& VcfRecord::Builder::set_somatic() { return this->set_info_flag(vcfspec::info::somatic); } VcfRecord::Builder& VcfRecord::Builder::set_denovo() { return this->set_info_flag("DENOVO"); } VcfRecord::Builder& VcfRecord::Builder::set_reference_reversion() { return this->set_info_flag("REVERSION"); } VcfRecord::Builder& VcfRecord::Builder::set_blocked_reference() { const static std::vector<VcfRecord::NucleotideSequence> refcall_alts {vcfspec::allele::nonref}; if (alt_ != refcall_alts) throw std::runtime_error {"Cannot block a non reference call"}; if (ref_.size() > 1) { set_info("END", pos_ + ref_.size() - 1); ref_.resize(1); } return *this; } GenomicRegion::Position VcfRecord::Builder::pos() const noexcept { return pos_; } void VcfRecord::Builder::collapse_spanning_deletions() { for (auto& alt : alt_) { if (alt.size() > 1 && std::find(std::cbegin(alt), std::cend(alt), vcfspec::deleteMaskAllele[0]) != std::cend(alt)) { alt = vcfspec::deleteMaskAllele; } } } VcfRecord VcfRecord::Builder::build() const { if (format_.empty()) { if (end_) { GenomicRegion region {chrom_, pos_ - 1, *end_ - 1}; return VcfRecord {std::move(region), id_, ref_, alt_, qual_, filter_, info_}; } else { return VcfRecord {chrom_, pos_, id_, ref_, alt_, qual_, filter_, info_}; } } else { if (end_) { GenomicRegion region {chrom_, pos_ - 1, *end_ - 1}; return VcfRecord {std::move(region), id_, ref_, alt_, qual_, filter_, info_, format_, samples_}; } else { return VcfRecord {chrom_, pos_, id_, ref_, alt_, qual_, filter_, info_, format_, samples_}; } } } VcfRecord VcfRecord::Builder::build_once() noexcept { if (format_.empty()) { if (end_) { GenomicRegion region {std::move(chrom_), pos_ - 1, *end_ - 1}; return VcfRecord {std::move(region), std::move(id_), std::move(ref_), std::move(alt_), qual_, std::move(filter_), std::move(info_)}; } else { return VcfRecord {std::move(chrom_), pos_, std::move(id_), std::move(ref_), std::move(alt_), qual_, std::move(filter_), std::move(info_)}; } } else { if (end_) { GenomicRegion region {std::move(chrom_), pos_ - 1, *end_ - 1}; return VcfRecord {std::move(region), std::move(id_), std::move(ref_), std::move(alt_), qual_, std::move(filter_), std::move(info_), std::move(format_), std::move(samples_)}; } else { return VcfRecord {std::move(chrom_), pos_, std::move(id_), std::move(ref_), std::move(alt_), qual_, std::move(filter_), std::move(info_), std::move(format_), std::move(samples_)}; } } } } // namespace octopus
{"hexsha": "738056e344fe8ccd4a0c2105686dfbcefd4f71e4", "size": 25632, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/io/variant/vcf_record.cpp", "max_stars_repo_name": "roryk/octopus", "max_stars_repo_head_hexsha": "0ec2839c33b846107278696ee04ce6d7d0f69a54", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/io/variant/vcf_record.cpp", "max_issues_repo_name": "roryk/octopus", "max_issues_repo_head_hexsha": "0ec2839c33b846107278696ee04ce6d7d0f69a54", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/io/variant/vcf_record.cpp", "max_forks_repo_name": "roryk/octopus", "max_forks_repo_head_hexsha": "0ec2839c33b846107278696ee04ce6d7d0f69a54", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6666666667, "max_line_length": 137, "alphanum_fraction": 0.6169241573, "num_tokens": 6583}
#!/usr/bin/env python3 ############################################################################### # # # RMG - Reaction Mechanism Generator # # # # Copyright (c) 2002-2021 Prof. William H. Green (whgreen@mit.edu), # # Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # # # # Permission is hereby granted, free of charge, to any person obtaining a # # copy of this software and associated documentation files (the 'Software'), # # to deal in the Software without restriction, including without limitation # # the rights to use, copy, modify, merge, publish, distribute, sublicense, # # and/or sell copies of the Software, and to permit persons to whom the # # Software is furnished to do so, subject to the following conditions: # # # # The above copyright notice and this permission notice shall be included in # # all copies or substantial portions of the Software. # # # # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # # DEALINGS IN THE SOFTWARE. # # # ############################################################################### """ This module provides classes for fitting atom energies based on a very small, predetermined set of molecules. """ import importlib import json import logging from collections import Counter from typing import Dict, Hashable, List, Union import numpy as np from scipy.stats import distributions from rmgpy import constants from rmgpy.molecule import get_element, Molecule import arkane.encorr.data as data from arkane.encorr.reference import ReferenceDatabase from arkane.modelchem import LevelOfTheory, CompositeLevelOfTheory # List of species labels that will be used for fitting (labels should match reference database) SPECIES_LABELS = [ 'Dihydrogen', 'Dinitrogen', 'Dioxygen', 'Disulfur', 'Difluorine', 'Dichlorine', 'Dibromine', 'Hydrogen fluoride', 'Hydrogen chloride', 'Hydrogen bromide', 'Hydrogen sulfide', 'Water', 'Methane', 'Methyl', 'Ammonia', 'Chloromethane' ] class AEJob: """ A job for fitting atom energies. """ def __init__(self, species_energies: Dict[str, float], level_of_theory: Union[LevelOfTheory, CompositeLevelOfTheory] = None, write_to_database: bool = False, overwrite: bool = False): """ Initialize an AEJob instance. Notes: The species energies should be provided as a dictionary containing the species labels as keys and their single- point electronic energies in Hartree as values. The energies should be calculated using the experimental geometry provided for the species in the reference database, and the zero-point energy should not be included in the electronic energy. Args: species_energies: Dictionary of species labels with single-point electronic energies (Hartree). level_of_theory: Dictionary key for saving atom energies to the database. write_to_database: Save the fitted atom energies directly to the RMG database. overwrite: Overwrite atom energies in the RMG database if they already exist. """ self.spcs_energies = species_energies self.level_of_theory = level_of_theory self.write_to_database = write_to_database self.overwrite = overwrite self.ae = AE(species_energies) def execute(self, output_file: str = None): """ Execute the atom energy job. Args: output_file: Write the fitted energies to this file. """ if self.level_of_theory is None: logging.info('Fitting atom energies') else: logging.info(f'Fitting atom energies for {self.level_of_theory}') self.ae.fit() if output_file is not None: with open(output_file, 'a') as f: if self.level_of_theory is not None: f.write(f'# {self.level_of_theory}\n') for element, energy in self.ae.atom_energies.items(): f.write(f'# {element:2}: {energy:15.8f} +/- {self.ae.confidence_intervals[element]:.8f} Hartree\n') f.writelines(self.ae.format_atom_energies( 'atom_energies' if self.level_of_theory is None else self.level_of_theory)) if self.write_to_database: if self.level_of_theory is None: raise Exception('Level of theory is required for writing to database') try: self.ae.write_to_database(self.level_of_theory, overwrite=self.overwrite) except ValueError as e: logging.warning('Could not write atom energies to database. Captured error:') logging.warning(str(e)) class AE: """ A class for fitting atom energies. """ ref_data_src = 'CCCBDB' # Use CCCBDB data ref_data = None # Dictionary of reference data entries def __init__(self, species_energies: Dict[str, float]): self.species_energies = species_energies # Hartree self.atom_energies = None self.confidence_intervals = None for lbl in SPECIES_LABELS: if lbl not in self.species_energies: logging.warning(f'{lbl} missing from provided species energies!') @classmethod def _load_refdata(cls): if cls.ref_data is None: logging.info('Loading reference database') db = ReferenceDatabase() db.load() cls.ref_data = {lbl: spc for lbl, spc in zip(SPECIES_LABELS, db.get_species_from_label(SPECIES_LABELS))} def fit(self): """ Fit atom energies using the provided species energies and corresponding atomization energies from the reference data. """ self._load_refdata() mols = [ Molecule().from_adjacency_list( self.ref_data[lbl].adjacency_list, raise_atomtype_exception=False, raise_charge_exception=False ) for lbl in self.species_energies ] atom_counts = [Counter(atom.element.symbol for atom in mol.atoms) for mol in mols] elements = sorted({element for ac in atom_counts for element in ac}, key=lambda s: get_element(s).number) x = np.array([[ac[element] for element in elements] for ac in atom_counts]) # Nmols x Nelements atomization_energies = np.array([ self.ref_data[lbl].reference_data[self.ref_data_src].atomization_energy.value_si / constants.E_h / constants.Na for lbl in self.species_energies ]) zpes = np.array([ self.ref_data[lbl].reference_data[self.ref_data_src].zpe.value_si / constants.E_h / constants.Na for lbl in self.species_energies ]) elec_energies = np.array(list(self.species_energies.values())) # Should already be in Hartree y = atomization_energies + elec_energies + zpes w = np.linalg.solve(x.T @ x, x.T @ y) self.atom_energies = dict(zip(elements, w)) # Get confidence intervals n = len(y) # Ndata k = len(w) # Nparam ypred = x @ w sigma2 = np.sum((y - ypred)**2) / (n - k - 1) # MSE cov = sigma2 * np.linalg.inv(x.T @ x) # covariance matrix se = np.sqrt(np.diag(cov)) # standard error alpha = 0.05 # 95% confidence level tdist = distributions.t.ppf(1 - alpha/2, n - k - 1) # student-t ci = tdist * se # confidence interval half-width self.confidence_intervals = dict(zip(elements, ci)) # Parameter estimates are w +/- ci def write_to_database(self, key: Hashable, overwrite: bool = False, alternate_path: str = None): """ Write atom energies to database. Args: key: Dictionary key to use for atom energies in database. overwrite: Overwrite existing atom energies. alternate_path: Write atom energies and existing database to this path instead. """ if self.atom_energies is None: raise ValueError('No atom energies available for writing') data_path = data.quantum_corrections_path with open(data_path) as f: lines = f.readlines() ae_formatted = self.format_atom_energies(key, indent=True) # Add new atom energies to file without changing existing formatting for i, line in enumerate(lines): if 'atom_energies' in line: if key in data.atom_energies: if overwrite: # Does not overwrite comments del_idx_start = del_idx_end = None for j, line2 in enumerate(lines[i:]): if repr(key) in line2: del_idx_start = i + j del_idx_end = None elif line2.rstrip() == ' },': # Can't have a comment after final brace del_idx_end = i + j + 1 if del_idx_start is not None and del_idx_end is not None: if (lines[del_idx_start - 1].lstrip().startswith('#') or lines[del_idx_end + 1].lstrip().startswith('#')): logging.warning('There may be left over comments from previous atom energies') lines[del_idx_start:del_idx_end] = ae_formatted break else: raise ValueError(f'{key} already exists. Set `overwrite` to True.') else: lines[(i+1):(i+1)] = ['\n'] + ae_formatted break with open(data_path if alternate_path is None else alternate_path, 'w') as f: f.writelines(lines) # Reload data to update atom energy dictionary if alternate_path is None: importlib.reload(data) def format_atom_energies(self, key: Hashable, indent: bool = False) -> List[str]: """ Obtain a list of nicely formatted atom energies suitable for writelines. Args: key: Dictionary key to use for formatting dictionary. indent: Indent each line. Returns: Formatted list of atom energies. """ ae_formatted = json.dumps(self.atom_energies, indent=4).replace('"', "'").split('\n') ae_formatted[0] = f'"{key}": ' + ae_formatted[0] ae_formatted[-1] += ',' ae_formatted = [e + '\n' for e in ae_formatted] if indent: ae_formatted = [' ' + e for e in ae_formatted] return ae_formatted
{"hexsha": "f70ae137d2eb886399cbe83df02b37e5d3c5be8f", "size": 11876, "ext": "py", "lang": "Python", "max_stars_repo_path": "arkane/encorr/ae.py", "max_stars_repo_name": "tza0035/RMG-Py", "max_stars_repo_head_hexsha": "38c49f7107d1b19e4a534408a1040ddd313b8596", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 250, "max_stars_repo_stars_event_min_datetime": "2015-06-06T23:32:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T16:45:16.000Z", "max_issues_repo_path": "arkane/encorr/ae.py", "max_issues_repo_name": "tza0035/RMG-Py", "max_issues_repo_head_hexsha": "38c49f7107d1b19e4a534408a1040ddd313b8596", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1781, "max_issues_repo_issues_event_min_datetime": "2015-05-26T23:52:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T19:07:54.000Z", "max_forks_repo_path": "arkane/encorr/ae.py", "max_forks_repo_name": "tza0035/RMG-Py", "max_forks_repo_head_hexsha": "38c49f7107d1b19e4a534408a1040ddd313b8596", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 161, "max_forks_repo_forks_event_min_datetime": "2015-06-02T14:28:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T19:37:14.000Z", "avg_line_length": 43.0289855072, "max_line_length": 119, "alphanum_fraction": 0.5758672954, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2488}
from typing import Optional, Union, NamedTuple, Callable, Sequence from functools import partial import matplotlib as mpl import matplotlib.pyplot as plt import torch import numpy as np from .metrics import compute_precisions class ContactAndAttentionArtists(NamedTuple): image: mpl.image.AxesImage contacts: mpl.lines.Line2D false_positives: mpl.lines.Line2D true_positives: mpl.lines.Line2D title: Optional[mpl.text.Text] = None def plot_attentions( attentions: Union[torch.Tensor, np.ndarray], ax: Optional[mpl.axes.Axes] = None, img: Optional[mpl.image.AxesImage] = None, cmap: str = "Blues", ) -> mpl.image.AxesImage: if isinstance(attentions, torch.Tensor): attentions = attentions.detach().cpu().numpy() if ax is None: ax = plt.gca() if img is not None: img.set_data(attentions) else: img = ax.imshow(attentions, cmap=cmap) return img def plot_contacts_and_attentions( predictions: Union[torch.Tensor, np.ndarray], contacts: Union[torch.Tensor, np.ndarray], ax: Optional[mpl.axes.Axes] = None, artists: Optional[ContactAndAttentionArtists] = None, cmap: str = "Blues", ms: float = 1, title: Union[bool, str, Callable[[float], str]] = True, animated: bool = False, ) -> ContactAndAttentionArtists: if isinstance(predictions, torch.Tensor): predictions = predictions.detach().cpu().numpy() if isinstance(contacts, torch.Tensor): contacts = contacts.detach().cpu().numpy() if ax is None: ax = plt.gca() seqlen = contacts.shape[0] relative_distance = np.add.outer(-np.arange(seqlen), np.arange(seqlen)) bottom_mask = relative_distance < 0 masked_image = np.ma.masked_where(bottom_mask, predictions) invalid_mask = np.abs(np.add.outer(np.arange(seqlen), -np.arange(seqlen))) < 6 predictions = predictions.copy() predictions[invalid_mask] = float("-inf") topl_val = np.sort(predictions.reshape(-1))[-seqlen] pred_contacts = predictions >= topl_val true_positives = contacts & pred_contacts & ~bottom_mask false_positives = ~contacts & pred_contacts & ~bottom_mask other_contacts = contacts & ~pred_contacts & ~bottom_mask if isinstance(title, str): title_text: Optional[str] = title elif title: long_range_pl = compute_precisions(predictions, contacts, minsep=24)[ "P@L" ].item() if callable(title): title_text = title(long_range_pl) else: title_text = f"Long Range P@L: {100 * long_range_pl:0.1f}" else: title_text = None if artists is not None: artists.image.set_data(masked_image) artists.contacts.set_data(*np.where(other_contacts)) artists.false_positives.set_data(*np.where(false_positives)) artists.true_positives.set_data(*np.where(true_positives)) if artists.title is not None and title_text is not None: artists.title.set_data(title_text) else: img = ax.imshow(masked_image, cmap=cmap, animated=animated) oc = ax.plot(*np.where(other_contacts), "o", c="grey", ms=ms)[0] fn = ax.plot(*np.where(false_positives), "o", c="r", ms=ms)[0] tp = ax.plot(*np.where(true_positives), "o", c="b", ms=ms)[0] ti = ax.set_title(title_text) if title_text is not None else None artists = ContactAndAttentionArtists(img, oc, fn, tp, ti) ax.axis("square") ax.set_xlim([0, seqlen]) ax.set_ylim([0, seqlen]) return artists def animate_contacts_and_attentions( predictions: Sequence[Union[torch.Tensor, np.ndarray]], contacts: Union[torch.Tensor, np.ndarray], fig: Optional[mpl.figure.Figure] = None, ax: Optional[mpl.axes.Axes] = None, artists: Optional[ContactAndAttentionArtists] = None, cmap: str = "Blues", ms: float = 1, title: Union[bool, str, Callable[[int, float], str]] = True, interval: int = 500, repeat_delay: int = 1000, blit: bool = True, ) -> mpl.animation.Animation: if fig is None: fig = plt.gcf() if ax is None: ax = plt.gca() initial_title = partial(title, 0) if callable(title) else title artists = plot_contacts_and_attentions( predictions[0], contacts, ax, cmap=cmap, ms=ms, title=initial_title, animated=True, ) def update(i): iter_title = partial(title, i) if callable(title) else title return plot_contacts_and_attentions( predictions[i], contacts, ax, artists=artists, cmap=cmap, ms=ms, title=iter_title, animated=True, ) ani = mpl.animation.FuncAnimation( fig, update, len(predictions), interval=interval, blit=blit, repeat_delay=repeat_delay, ) return ani
{"hexsha": "be9d6999560c9587a51f8c306f900f4fa07e6ebd", "size": 4950, "ext": "py", "lang": "Python", "max_stars_repo_path": "evo/visualize.py", "max_stars_repo_name": "rmrao/evo", "max_stars_repo_head_hexsha": "ac86e2b8a6f78d5e3a0b8bf47a978a12735ca8c4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-02-23T00:34:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-31T14:49:01.000Z", "max_issues_repo_path": "evo/visualize.py", "max_issues_repo_name": "rmrao/evo", "max_issues_repo_head_hexsha": "ac86e2b8a6f78d5e3a0b8bf47a978a12735ca8c4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "evo/visualize.py", "max_forks_repo_name": "rmrao/evo", "max_forks_repo_head_hexsha": "ac86e2b8a6f78d5e3a0b8bf47a978a12735ca8c4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-11T21:30:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-11T21:30:30.000Z", "avg_line_length": 31.935483871, "max_line_length": 82, "alphanum_fraction": 0.6408080808, "include": true, "reason": "import numpy", "num_tokens": 1228}
__author__ = 'Cameron Summers' import os import unittest import numpy as np from nps_acoustic_discovery.output import probs_to_pandas, probs_to_raven_detections from nps_acoustic_discovery.discover import AcousticDetector from nps_acoustic_discovery.model import EventModel THIS_DIR = os.path.dirname(os.path.abspath(__file__)) FFMPEG_PATH = 'ffmpeg' class TestSmoke(unittest.TestCase): def setUp(self): self.test_model_dir = os.path.join(THIS_DIR, '../models/SWTH') self.test_input = np.ones((1, 84)) self.test_audio_filepath = os.path.join(THIS_DIR, 'SWTH_test_30s.wav') def test1_model(self): model = EventModel(self.test_model_dir) model.process(self.test_input) def test2_detector(self): detector = AcousticDetector([self.test_model_dir], [0.5], ffmpeg_path=FFMPEG_PATH) detector.process(self.test_audio_filepath) def test3_probs_df(self): detector = AcousticDetector([self.test_model_dir], [0.5], ffmpeg_path=FFMPEG_PATH) model_prob_map = detector.process(self.test_audio_filepath) model_probs_df_map = probs_to_pandas(model_prob_map) def test4_probs_raven(self): detector = AcousticDetector([self.test_model_dir], [0.5], ffmpeg_path=FFMPEG_PATH) model_prob_map = detector.process(self.test_audio_filepath) model_probs_df_map = probs_to_pandas(model_prob_map) model_raven_df_map = probs_to_raven_detections(model_probs_df_map) class TestMP3(unittest.TestCase): def setUp(self): self.test_model_dir = os.path.join(THIS_DIR, '../models/SWTH') self.test_mp3_320k_audio_filepath = os.path.join(THIS_DIR, 'SWTH_test_30s_320k.mp3') self.test_mp3_60k_audio_filepath = os.path.join(THIS_DIR, 'SWTH_test_30s_60k.mp3') self.test_wav_audio_filepath = os.path.join(THIS_DIR, 'SWTH_test_30s.wav') def test1_smoke(self): mp3_detector = AcousticDetector([self.test_model_dir], [0.5], ffmpeg_path=FFMPEG_PATH) mp3_320k_model_prob_map = mp3_detector.process(self.test_mp3_320k_audio_filepath) mp3_320k_model_probs_df_map = probs_to_pandas(mp3_320k_model_prob_map) mp3_320k_model_raven_df_map = probs_to_raven_detections(mp3_320k_model_probs_df_map) mp3_60k_model_prob_map = mp3_detector.process(self.test_mp3_60k_audio_filepath) mp3_60k_model_probs_df_map = probs_to_pandas(mp3_60k_model_prob_map) mp3_60k_model_raven_df_map = probs_to_raven_detections(mp3_60k_model_probs_df_map) wav_detector = AcousticDetector([self.test_model_dir], [0.5], ffmpeg_path=FFMPEG_PATH) wav_model_prob_map = wav_detector.process(self.test_wav_audio_filepath) wav_model_probs_df_map = probs_to_pandas(wav_model_prob_map) wav_model_raven_df_map = probs_to_raven_detections(wav_model_probs_df_map) for model, probs_320k_df in mp3_320k_model_probs_df_map.items(): mp3_320k_probs = probs_320k_df[model.event_code] for model, probs_60k_df in mp3_60k_model_probs_df_map.items(): mp3_60k_probs = probs_60k_df[model.event_code] for model, probs_df in wav_model_probs_df_map.items(): wav_probs = probs_df[model.event_code] assert abs(len(mp3_320k_probs) - len(wav_probs)) <= 1 num_samples = 1000 # check over ten seconds prob_diff_sum = np.sum(abs(mp3_320k_probs[:num_samples] - wav_probs[:num_samples])) acceptable_prob_error_per_sample = 0.05 assert prob_diff_sum < acceptable_prob_error_per_sample * num_samples if __name__ == '__main__': unittest.main()
{"hexsha": "a5821aeb88b85fdf977da711cb62ca869da9ddda", "size": 3621, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_model.py", "max_stars_repo_name": "scaubrey/nps_acoustic_discovery", "max_stars_repo_head_hexsha": "03bf134f2cff9d2d564b4c9ac211dff9aaaf33a7", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_model.py", "max_issues_repo_name": "scaubrey/nps_acoustic_discovery", "max_issues_repo_head_hexsha": "03bf134f2cff9d2d564b4c9ac211dff9aaaf33a7", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_model.py", "max_forks_repo_name": "scaubrey/nps_acoustic_discovery", "max_forks_repo_head_hexsha": "03bf134f2cff9d2d564b4c9ac211dff9aaaf33a7", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.7912087912, "max_line_length": 94, "alphanum_fraction": 0.7464788732, "include": true, "reason": "import numpy", "num_tokens": 939}
using Base: KeySet #= keys(::Dict{K,V})::KeySet{Symbol,Dict{Symbol,Any}} # KeySet for a Dict{Symbol,Any} values(::Dict{K,V})::ValueIterator{Dict{Symbol,Any}} # ValueIterator for a Dict{Symbol,Any} =# (getkeys(dict::Dict{K,V})::Array{Symbol,1}) where {K,V} = dict.keys[getslotidxs(dict)] (getvalues(dict::Dict{K,V})::Array{Any,1}) where {K,V} = dict.vals[getslotidxs(dict)] (getslotidxs(dict::Dict{K,V})::Array{Int, 1}) where {K,V} = filter(i->isone(dict.slots[i]), 1:length(dict.slots))
{"hexsha": "462110f22ed9652a38a7342e2b9dd4a97be205c8", "size": 512, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/support/dict.jl", "max_stars_repo_name": "JuliaTagBot/GUI.jl", "max_stars_repo_head_hexsha": "b2ab393c6286ede8d0b7d00929098f99699de226", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-09-20T20:20:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T21:00:59.000Z", "max_issues_repo_path": "src/support/dict.jl", "max_issues_repo_name": "JuliaTagBot/GUI.jl", "max_issues_repo_head_hexsha": "b2ab393c6286ede8d0b7d00929098f99699de226", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/support/dict.jl", "max_forks_repo_name": "JuliaTagBot/GUI.jl", "max_forks_repo_head_hexsha": "b2ab393c6286ede8d0b7d00929098f99699de226", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:26:06.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:26:06.000Z", "avg_line_length": 42.6666666667, "max_line_length": 113, "alphanum_fraction": 0.650390625, "num_tokens": 181}
-- Andreas, 2012-04-18, bug reported by pumpkingod on 2012-04-16 module Issue610 where import Common.Level open import Common.Equality data ⊥ : Set where record ⊤ : Set where record A : Set₁ where constructor set field .a : Set .get : A → Set get x = helper x module R where helper : .A -> Set helper x = A.a x ack : A → Set ack x = R.helper x x -- Expected error: -- Identifier R.helper is declared irrelevant, so it cannot be used here hah : set ⊤ ≡ set ⊥ hah = refl .moo : ⊥ moo with cong ack hah moo | q = subst (λ x → x) q _ baa : .⊥ → ⊥ baa () yoink : ⊥ yoink = baa moo
{"hexsha": "cce06a4dfb29c4c752263e32910325ba00270fe3", "size": 600, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "test/Fail/Issue610.agda", "max_stars_repo_name": "redfish64/autonomic-agda", "max_stars_repo_head_hexsha": "c0ae7d20728b15d7da4efff6ffadae6fe4590016", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-03-28T14:51:03.000Z", "max_stars_repo_stars_event_max_datetime": "2015-12-07T20:14:00.000Z", "max_issues_repo_path": "test/Fail/Issue610.agda", "max_issues_repo_name": "redfish64/autonomic-agda", "max_issues_repo_head_hexsha": "c0ae7d20728b15d7da4efff6ffadae6fe4590016", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/Fail/Issue610.agda", "max_forks_repo_name": "redfish64/autonomic-agda", "max_forks_repo_head_hexsha": "c0ae7d20728b15d7da4efff6ffadae6fe4590016", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-03-05T20:02:38.000Z", "max_forks_repo_forks_event_max_datetime": "2019-03-05T20:02:38.000Z", "avg_line_length": 15.3846153846, "max_line_length": 72, "alphanum_fraction": 0.6416666667, "num_tokens": 213}
\documentclass[11pt]{article} \usepackage{setspace} \usepackage{pxfonts} \usepackage{graphicx} \usepackage{geometry} \geometry{letterpaper,left=.5in,right=.5in,top=1in,bottom=.75in,headsep=5pt,footskip=20pt} \title{Lecture 4 -- Hodgkin-Huxley neuron model} \author{Computational Neuroscience Summer Program} \date{June, 2011} \begin{document} \maketitle \paragraph{Motivation.} The integrate-and-fire model allows us to model spiking rates, but ignores the biophysical underpinnings of the action potential. The Hodgkin-Huxley model, proposed by Alan Hodgkin and Andrew Huxley in 1952 explains how the action potential arises from ionic currents. They won the Nobel Prize for this model in 1963, and their model is still taught today in neuroscience classes around the world. \paragraph{Recap.} The model neuron equation we've been using is: \[ \tau_m\frac{dV}{dt} = E - V + R_mI_e \] In this equation, all of the membrane biophysics are (implicitly) lumped into the $E - V$ term. The $R_mI_e$ term represents how external currents affect the cell. The basic idea of the Hodgkin-Huxley model is that we'll expand on the membrane biophysics portion of the equation by modeling ionic currents flowing through ion channels. We'll consider two ions: sodium and potassium. Before we fully explain the Hodgkin-Huxley model we'll review over some basic principles from physics and chemistry to gain a deeper understanding of the forces acting on ions inside and surrounding the cell. \paragraph{Diffusion.} Imagine adding a drop of food coloring to a beaker of water. The food coloring diffuses (``spreads out'') throughout the water uniformly. This is because of principle 1: \textit{molecules flow down their concentration gradient.} \paragraph{Selective permeability.} Let's add a barrier to our imaginary beaker. If we drop some blue food coloring in the left side, it will spread throughout that side but won't spread to the right side. Now suppose the barrier is permeable to red, but not to blue. If we drop some red food coloring in either side, it will spread throughout the entire beaker, even though blue coloring is confined to the side it was dropped into. Ion channels allow the cell's membrane to become selectively permeable to a single type of ion. \paragraph{Charges.} Now let's suppose blue represents negatively charged ions and red represents positive charged ions. Since opposite charges attract, we need to consider this force acting on the ions in addition to simple diffusion. In particular, rather than spreading out evenly throughout the beaker, some of the red ions will be attracted over to the blue side. This is because of principle 2: \textit{molecules flow down their charge gradient.} \paragraph{Energy required to transport ions across the membrane.} Membrane potentials are small, so ions are transported across the membrane by thermal fluccuations. The thermal energy of an ion is $k_BT$, where $k_B$ is Boltzman's constant and $T$ is the temperature in degrees Kelvin. Biologists and chemists typically like to think about moles of ions rather than single ions. A mole of ions has Avagadro's number times as much energy as a single ion, or $RT$, where $R$ is the universal gas constant (8.31 Joules/mole $^\circ$K = 1.99 cal/mole $^\circ$K. At normal temperatures, $RT \approx 2,500$ joules/mole, or 0.6 kCal/mole. We can compute the energy gained or lost when a mole of ions crosses the membrane with a potential difference $V_T$ across it. This energy is equal to $FV_T$, where $F$ is Faraday's constant (F = 96,480 Columbs/mole), or Avagadro's number times the charge of a single proton, $q$. Setting $FV_T = RT$ gives: \[ V_T = \frac{RT}{F} = {k_BT}{q} \] \paragraph{Equilibrium potential.} The \textit{Equilibrium potential} is the membrane voltage at which the net flow of a particular ion into or out of the cell is zero (i.e., the concentration gradient perfectly offsets the charge gradient). If an ion has an electrical charge $zq$, it must have a thermal energy of at least $-zqV$ to cross the membrane (this quantity is positive for $z > 0$ and $V < 0$). The probability that an ion has a thermal energy greater than or equal to $-zqV$ when the temperature (degrees Kelvin) is $T$ is $e^\frac{zqV}{k_BT}$, which is determined by integrating the Boltzmann distribution for energies $\geq -zqV$. In molar units, this can be written as: \[ e^\frac{zFV}{RT} \] Then the concentration gradient offsets the charge gradient when \[ \mathrm{[outside]} = \mathrm{[inside]}e^\frac{zFE}{RT} \] Where $E$ is the ``equilibrium potential'' -- the membrane potential at which the gradients cancel. We can solve for $E$: \[ \frac{\mathrm{[outside]}}{\mathrm{[inside]}} = e^\frac{zFE}{RT} \] \[ ln(\frac{\mathrm{[outside]}}{\mathrm{[inside]}}) = \frac{zFE}{RT} \] \[ E = \frac{RT}{zF}ln(\frac{\mathrm{[outside]}}{\mathrm{[inside]}}) \] Plugging in the appropriate sodium and potassium concentrations, we find that $E_{K} \approx$ -70 to -90 mV and $E_{Na} \approx$ 50 mV. \paragraph{Reversal potential.} When $V < E$, positive charges flow into the cell, causing the cell to depolarize (become more positive). When $V > E$, positive charges flow out of the cell, causing it to hyperpolarize (become less positive). Because $E$ is the membrane potential at which the direction of net current flow reverses, $E$ is also often called the \textit{reversal potential}. \paragraph{Resting potential.} When no current is being pumped into the neuron, the equilibrium potentials of the different ions contained in the neuron and extracellular fluid all fight to bring the neuron's membrane voltage to their respective equilibrium potentials. The ``strength'' with which each ion pulls the membrane potential towards its equilibrium potential is proportional to the permeability (``conductance'') of the the membrane to that ion, $g_i$ -- this depends on the number of open ion channels for that ion. The resting membrane potential is equal to: \[ V_{rest} = \frac{\sum_i (g_iE_i)}{\sum_i g_i} \] This is called the Goldman-Hodgkin-Katz equation. \paragraph{Intuition underlying the shape of the action potential.} The key biophysical property of neurons that gives rise to the characteristic shape of the action potential is that the membrane's permeability to different ions depends on the membrane voltage. During the rising phase of the action potential, sodium channels open quickly and potassium channels open slowly. Since the sodium conductance outweighs the potassium conductance, we see from the Goldman-Hodgkin-Katz equation that the membrane voltage will head towards $E_{Na}$. At the peak of the action potential, sodium channels become blocked, and the membrane becomes almost exclusively permeable to potassium -- so during the falling phase, the membrane plummets towards $E_K$. Then some resetting happens, and the membrane potential returns to $V_{rest}$. Now let's get into the details and the equations... \paragraph{Voltage-gated potassium channels.} The voltage-gated potassium channel is comprised of four identical (independent) subunits. For the channel to conduct potassium ions, all four subunits have to be in their open configuration. The probability that a given potassium channel is open, $P_K$ is equal to the probability that a given subunit is open, $n$, raised to the fourth power: \[ P_K = n^4 \] Note that if $n$ is the probability that a given subunit is open, then $1-n$ is the probability that the subunit is closed. The potassium channel subunits contain voltage sensors which make it more likely that the subunits will be open (activated) when the membrane is depolarized and less likely that the subunits will be open (deactivated) when the membrane is hyperpolarized. Thus, we need to use (and update) the membrane voltage at each time $t$ in our calculations. Let's define an opening rate, $\alpha_n(V)$ and a closing rate, $\beta_n(V)$ for the Potassium channel subunits. The probability that a subunit gate opens over a short interval of time is equal to the probability of finding the gate closed ($1-n$) multiplied by the opening rate, $\alpha_n(V)$. Likewise, the probability that a subunit gate closes over a short interval of time is equal to the probability of finding the gate open ($n$) multiplied by the closing rate, $\beta_n(V)$. The the rate at which the open probability for a subunit gate changes is given by the difference between these two terms: \[ \frac{dn}{dt} =\alpha_n(V)(1-n) - \beta_n(V)n \] Another useful form of this equation is to divide through by the term $\alpha_n(V) + \beta_n(V)$: \[ \tau_n(V)\frac{dn}{dt} = n_\infty(V) - n\mathrm{,~where} \] \[ \tau_n = \frac{1}{\alpha_n(V) + \beta_n(V)}\mathrm{~and} \] \[ n_\infty(V) = \frac{\alpha_n(V)}{\alpha_n(V) + \beta_n(V)} \] This equation indicates that for a given voltage, $V$, $n$ approaches the limiting value $n_\infty(V)$ exponentially with time constant $\tau_n$. Hodgkin-Huxley found that the following rate functions fit their data: \[ \alpha_n(V) = \frac{0.1(V + 55)}{1 - e^{-0.1(V+55)}} \] This function first increases gradually, and then increases approximately linearly. \[ \beta_n(V) = 0.125e^{-0.0125(V+65)} \] This function decays exponentially towards zero. Since the membrane contains a very large number of potassium channels, the fraction of channels open at any given time is equal to $P_K = n^4$. The membrane's ability to conduct potassum ($g_K$) is equal to $P_K$ times the membrane's maximal potassium conductance, $\bar{g}_K$. \paragraph{Voltage-gated sodium channels.} Voltage-gated sodium channels consist of three main subunits, each of which are open with probability $m$. As with potassium channel subunits, sodium subunit opening and closing rates are given by $\alpha_m$ and $\beta_m$, respectively. These subunits open (activate) quickly when the membrane is depolarized and close (inactivate) when the membrane is hyperpolarized. In addition to the three main subunits, the sodium channel contains a fourth subunit which has a negative charge, and is open with probability $h$. When the cell is depolarized, the subunit gets attracted to the inside of the cell and blocks (``inactivates'') the channel. In order to ``unblock'' the channel, the cell needs to be sufficiently hyperpolarized (past its normal resting potential). The unblocking is called ``de-inactivation.'' The equations for $dm$ and $dh$ are identical to the equations for $dn$, but with $n$ replacing with $m$ or $h$. The equations for $\alpha_m$ and $\beta_m$ are: \[ \alpha_m(V) = \frac{0.1(V+40)}{1 - e^{-.1(V+40)}} \] \[ \beta_m(V) = 4e^{-0.0556(V + 65)} \] (these are of the same form as the equations for $\alpha_n$ and $\beta_n$. The equations for $\alpha_h$ and $\beta_h$ are: \[ \alpha_h(V) = 0.07e^{-.05(V+65)} \] \[ \beta_n(V) = \frac{1}{1 + e^{-.1(V+35)}} \] which are of the opposite form as for the $n$ and $m$ equations, since the $h$ subunit inactivates with depolarization and de-inactivates with hyperpolarization. Since the membrane contains a very large number of sodium channels, the fraction of channels open at any given time is equal to $P_{Na} = m^3h$. The membrane's ability to conduct sodium ($g_{Na}$) is equal to $P_{Na}$ times the membrane's maximal Potassium conductance, $\bar{g}_{Na}$. \paragraph{Leak channels.} The last type of channels in the Hodgkin-Huxley model is the leak channel. Leak channels are always open, regardless of membrane voltage. The total leak conductance is represented by $\bar{g}_L$. \paragraph{Derivation of full model.} For the integrate-and-fire model we used: \[ \tau_m\frac{dV}{dt} = E - V + R_mI_e \] We can re-write as: \[ c_mr_m\frac{dV}{dt} = (E - V) + \frac{r_m}{A}I_e \] Now we divide both sides by $r_m$: \[ c_m\frac{dV}{dt} = \frac{1}{r_m}(E - V) + \frac{I_e}{A} \] The full model is of the form: \[ c_m\frac{dV}{dt} = -i_m + \frac{I_e}{A}, \] where \[ i_m = \bar{g}_L(V - E_L) + g_K(V - E_K) + g_{Na}(V - E_{Na}) \] This is simply the written-out form of the Goldman-Hodgkin-Katz equation. As in the integrate-and-fire model, we start by solving for $dV$ and setting $V(t+dt) = V(t) + dV$ in each time step of the simulation. \end{document}
{"hexsha": "11e7639b8022769e4c251473b3c00b20c7361f90", "size": 12256, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "hodgkin_huxley_simple/hodgkin_huxley_simple_lecture.tex", "max_stars_repo_name": "ContextLab/computational-neuroscience", "max_stars_repo_head_hexsha": "b0a3812a46fe4387de2655a9072f8910a7f212f3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 35, "max_stars_repo_stars_event_min_datetime": "2018-01-22T21:51:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-04T20:44:42.000Z", "max_issues_repo_path": "hodgkin_huxley_simple/hodgkin_huxley_simple_lecture.tex", "max_issues_repo_name": "ContextLab/computational-neuroscience", "max_issues_repo_head_hexsha": "b0a3812a46fe4387de2655a9072f8910a7f212f3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2018-10-31T02:19:06.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-31T14:03:00.000Z", "max_forks_repo_path": "hodgkin_huxley_simple/hodgkin_huxley_simple_lecture.tex", "max_forks_repo_name": "ContextLab/computational-neuroscience", "max_forks_repo_head_hexsha": "b0a3812a46fe4387de2655a9072f8910a7f212f3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-08-11T20:56:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-24T09:23:11.000Z", "avg_line_length": 79.5844155844, "max_line_length": 884, "alphanum_fraction": 0.7495104439, "num_tokens": 3286}
SUBROUTINE READ1 (DM,MR,SCR1,SCR2,SCR3,PHIA,USET,NR1,LAMA,SCR4) C INTEGER DM,MR,IMR(7),SYSBUF,SCR1,SCR2,ISCR1(7),PHIA, 1 SCR4,SCR3,NAM(2) DOUBLE PRECISION DCORE(1),SI,TERM CHARACTER UFM*23 COMMON /XMSSG / UFM COMMON /SYSTEM/ SYSBUF,NOUT,KSYSTM(63) COMMON /ZZZZZZ/ CORE(1) COMMON /UNPAKX/ ITB,II,JJ,INCUR COMMON /PACKX / ITA1,ITB1,II1,JJ1,INCUR1 COMMON /BITPOS/ UM,UO,UR,USG,USB,UL,UA,UF,US,UN,UG EQUIVALENCE (DCORE(1),CORE(1)) DATA NAM / 4HREAD,4H1 / C C BRING MR INTO CORE C LC = KORSZ(CORE) - SYSBUF CALL GOPEN (MR,CORE(LC+1),0) IMR(1) = MR CALL RDTRL (IMR) NR = IMR(2) NR1 = NR II = 1 JJ = NR INCUR= 1 ITB = IMR(5) NR2 = ITB*NR IVI = NR*NR IPHI = IVI IVI2 = ITB*IVI IALPH= 2*IVI ILOOP= 0 K = 0 DO 20 I = 1,NR CALL UNPACK (*12,MR,CORE(K+1)) GO TO 16 C C NULL COLUMN C 12 DO 14 J = 1,NR2 CORE(J+K) = 0.0 14 CONTINUE 16 KKK = K + IVI2 DO 10 J = 1,NR2 CORE(J+KKK) = 0.0 10 CONTINUE IF (ITB .EQ. 1) GO TO 18 KKK = KKK/2 DCORE(KKK+I) = 1.0D0 GO TO 19 18 CORE(KKK+I) = 1.0 19 K = K + NR2 20 CONTINUE CALL CLOSE (MR,1) C C COMPUTE SI C IF (ITB .NE. 2) GO TO 35 30 SI = 0.0D0 DO 50 I = 1,NR TERM = 0.0D0 DO 40 J = 1,NR K = (J-1)*NR + I KK = IVI + J 40 TERM = TERM + DCORE(K)*DCORE(KK) K = IVI + I SI = SI + TERM*DCORE(K) 50 CONTINUE IF (SI .GT. 0.0D0) GO TO 51 53 WRITE (NOUT,52) UFM 52 FORMAT (A23,' 2200, INCONSISTENT RIGID BODY SYSTEM.') CALL MESAGE (-61,0,NAM) 51 CONTINUE SI = 1.0D0/DSQRT(SI) C C CONVERT VI INTO PHI C DO 60 I = 1,NR K = IVI + I 60 DCORE(K) = DCORE(K) *SI ILOOP = ILOOP + 1 IF (ILOOP .EQ. NR) GO TO 120 C C CALCULATE ALPHAJ C DO 90 J = 1,ILOOP K = IALPH + J DCORE(K) = 0.0D0 DO 80 I = 1,NR TERM = 0.0D0 DO 70 L = 1,NR KK = (L-1)*NR + I KKK = IVI + NR + L 70 TERM = TERM + DCORE(KK)*DCORE(KKK) KK = IPHI + (J-1)*NR + I 80 DCORE(K) = DCORE(K)+TERM*DCORE(KK) 90 CONTINUE C C COMPUTE NEXT V VECTOR C DO 110 I = 1,NR TERM = 0.0D0 DO 100 J = 1,ILOOP KK = IALPH + J K = IPHI + (J-1)*NR + I 100 TERM = TERM + DCORE(KK)*DCORE(K) K = IVI + NR + I 110 DCORE(K) = DCORE(K) - TERM IVI = IVI + NR GO TO 30 35 SSI = 0.0 DO 55 I = 1,NR STERM = 0.0 DO 45 J = 1,NR K = (J-1)*NR + I KK = IVI + J 45 STERM = STERM + CORE(K)*CORE(KK) K = IVI + I SSI = SSI + STERM*CORE(K) 55 CONTINUE IF (SSI .LE. 0.0) GO TO 53 SSI = 1.0/SQRT(SSI) C C CONVERT VI INTO PHI C DO 65 I = 1,NR K = IVI + I 65 CORE(K) = CORE(K)*SSI ILOOP = ILOOP + 1 IF (ILOOP .EQ. NR) GO TO 120 C C CALCULATE ALPHAJ C DO 95 J = 1,ILOOP K = IALPH + J CORE(K) = 0.0 DO 85 I = 1,NR STERM = 0.0 DO 75 L = 1,NR KK = (L-1)*NR + I KKK = IVI + NR + L 75 STERM = STERM + CORE(KK)*CORE(KKK) KK = IPHI + (J-1)*NR + I 85 CORE(K) = CORE(K) + STERM*CORE(KK) 95 CONTINUE C C COMPUTE NEXT V VECTOR C DO 115 I = 1,NR STERM = 0.0 DO 105 J = 1,ILOOP KK = IALPH + J K = IPHI + (J-1)*NR + I 105 STERM = STERM + CORE(KK)*CORE(K) K = IVI + NR + I 115 CORE(K) = CORE(K) - STERM IVI = IVI + NR GO TO 35 C C PACK PHIRO C 120 ITA1 = ITB ITB1 = ITB II1 = 1 JJ1 = NR INCUR1 = 1 CALL GOPEN (SCR1,CORE(LC+1),1) CALL MAKMCB (ISCR1,SCR1,NR,1,ITB) DO 130 I = 1,NR K = IVI2 + (I-1)*NR2 130 CALL PACK (CORE(K+1),SCR1,ISCR1) CALL CLOSE (SCR1,1) CALL WRTTRL (ISCR1(1)) C C COMPUTE PHILO = DM*PHIRO C CALL SSG2B (DM,SCR1,0,SCR2,0,ITB,1,SCR4) C C MERGE PHIRP AND PHILO TO FORM PHIA C CALL SDR1B (SCR3,SCR2,SCR1,SCR4,UA,UL,UR,USET,0,0) CALL GOPEN (SCR4,CORE(LC+1),0) LC = LC - SYSBUF CALL GOPEN (PHIA,CORE(LC+1),1) IMR(1) = SCR4 CALL RDTRL (IMR(1)) NPROB = IMR(3) DCORE(1) = 0.D0 JJ = NPROB INCUR = 1 I3 = 3 DO 170 J = 1,NR II = 0 CALL UNPACK (*150,SCR4,CORE(I3)) II1 = II JJ1 = JJ CALL PACK (CORE(I3),PHIA,ISCR1) GO TO 170 C C NULL COLUMN C 150 II1 = 1 JJ1 = 1 CALL PACK (CORE,PHIA,ISCR1) 170 CONTINUE CALL CLOSE (SCR4,1) CALL CLOSE (PHIA,1) LC = LC + SYSBUF C C PUT NR ZEROS ON LAMA C CALL GOPEN (LAMA,CORE(LC+1),1) DCORE(1) = 0.D0 DO 180 I = 1,NR 180 CALL WRITE (LAMA,CORE,ITB,1) CALL CLOSE (LAMA,2) RETURN END
{"hexsha": "b5880cf68962c4f74bc1ec2b8be7d007b2dd1a5d", "size": 5128, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "mis/read1.f", "max_stars_repo_name": "ldallolio/NASTRAN-95", "max_stars_repo_head_hexsha": "6d2c175f5b53ebaec4ba2b5186f7926ef9d0ed47", "max_stars_repo_licenses": ["NASA-1.3"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2016-01-09T14:33:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-18T11:51:42.000Z", "max_issues_repo_path": "mis/read1.f", "max_issues_repo_name": "gassive/NASTRAN95", "max_issues_repo_head_hexsha": "98cb3acaa7990d639360601648498834c7782056", "max_issues_repo_licenses": ["NASA-1.3"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2016-01-17T07:30:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-06T19:37:44.000Z", "max_forks_repo_path": "mis/read1.f", "max_forks_repo_name": "gassive/NASTRAN95", "max_forks_repo_head_hexsha": "98cb3acaa7990d639360601648498834c7782056", "max_forks_repo_licenses": ["NASA-1.3"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-04-07T20:51:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-04T14:16:01.000Z", "avg_line_length": 23.3090909091, "max_line_length": 70, "alphanum_fraction": 0.473673947, "num_tokens": 2235}
// Copyright 2017 Antony Polukhin. // // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt // or copy at http://www.boost.org/LICENSE_1_0.txt) #include <boost/stacktrace/detail/void_ptr_cast.hpp> #include <boost/core/lightweight_test.hpp> int foo1_func(int) { return 0; } void foo2_func(int, int, ...) {} struct test_struct { int foo1_memb(int) const { return 0; } void foo2_memb(int, int, ...) {} }; template <class F1, class F2> void test(F1 foo1, F2 foo2) { using boost::stacktrace::detail::void_ptr_cast; typedef void(*void_f_ptr)(); // Function/variable to void(*)() void_f_ptr fp1 = void_ptr_cast<void_f_ptr>(foo1); void_f_ptr fp2 = void_ptr_cast<void_f_ptr>(foo2); BOOST_TEST(fp1); BOOST_TEST(fp2); BOOST_TEST(fp1 != fp2); // Function/variable to void* void* vp1 = void_ptr_cast<void*>(foo1); void* vp2 = void_ptr_cast<void*>(foo2); BOOST_TEST(vp1); BOOST_TEST(vp2); BOOST_TEST(vp1 != vp2); // void* to void(*)() void_f_ptr fp1_2 = void_ptr_cast<void_f_ptr>(vp1); void_f_ptr fp2_2 = void_ptr_cast<void_f_ptr>(vp2); BOOST_TEST(fp1_2); BOOST_TEST(fp2_2); BOOST_TEST(fp1_2 != fp2_2); BOOST_TEST(fp1 == fp1_2); BOOST_TEST(fp2 == fp2_2); // void(*)() to void* BOOST_TEST(void_ptr_cast<void*>(fp1) == vp1); BOOST_TEST(void_ptr_cast<void*>(fp2) == vp2); // void(*)() to function/variable BOOST_TEST(void_ptr_cast<F1>(fp1) == foo1); BOOST_TEST(void_ptr_cast<F2>(fp2) == foo2); // void* to function/variable BOOST_TEST(void_ptr_cast<F1>(vp1) == foo1); BOOST_TEST(void_ptr_cast<F2>(vp2) == foo2); } int main() { // Testing for functions test(foo1_func, foo2_func); typedef void(func_t)(); test( boost::stacktrace::detail::void_ptr_cast<func_t* const>(foo1_func), boost::stacktrace::detail::void_ptr_cast<func_t* const>(foo2_func) ); // Testing for variables (just in case...) int i = 0; double j= 1; test(&i, &j); return boost::report_errors(); }
{"hexsha": "10828fac66e771a8935c7469f70d1a050f3a63e9", "size": 2180, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/stacktrace/test/test_void_ptr_cast.cpp", "max_stars_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_stars_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 32.0, "max_stars_repo_stars_event_min_datetime": "2019-02-27T06:57:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-29T10:56:19.000Z", "max_issues_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/stacktrace/test/test_void_ptr_cast.cpp", "max_issues_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_issues_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2019-03-04T11:21:00.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-24T01:36:31.000Z", "max_forks_repo_path": "REDSI_1160929_1161573/boost_1_67_0/libs/stacktrace/test/test_void_ptr_cast.cpp", "max_forks_repo_name": "Wultyc/ISEP_1718_2A2S_REDSI_TrabalhoGrupo", "max_forks_repo_head_hexsha": "eb0f7ef64e188fe871f47c2ef9cdef36d8a66bc8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2019-08-20T13:45:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T18:23:49.000Z", "avg_line_length": 27.25, "max_line_length": 76, "alphanum_fraction": 0.6279816514, "num_tokens": 630}
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import numpy as np from numpy import linalg as LA from deeprobust.image.attack.base_attack import BaseAttack class FGSM(BaseAttack): def __init__(self, model, device = 'cuda'): super(FGSM, self).__init__(model, device) def generate(self, image, label, **kwargs): label = label.type(torch.FloatTensor) ## check and parse parameters for attack assert self.check_type_device(image, label) assert self.parse_params(**kwargs) return fgm(self.model, self.image, self.label, self.epsilon, self.order, self.clip_min, self.clip_max, self.device) def parse_params(self, epsilon = 0.2, order = np.inf, clip_max = None, clip_min = None): self.epsilon = epsilon self.order = order self.clip_max = clip_max self.clip_min = clip_min return True def fgm(model, image, label, epsilon, order, clip_min, clip_max, device): imageArray = image.cpu().detach().numpy() X_fgsm = torch.tensor(imageArray).to(device) #print(image.data) X_fgsm.requires_grad = True opt = optim.SGD([X_fgsm], lr=1e-3) opt.zero_grad() loss = nn.CrossEntropyLoss()(model(X_fgsm), label) loss.backward() #print(X_fgsm) #print(X_fgsm.grad) if order == np.inf: d = epsilon * X_fgsm.grad.data.sign() elif order == 2: gradient = X_fgsm.grad d = torch.zeros(gradient.shape, device = device) for i in range(gradient.shape[0]): norm_grad = gradient[i].data/LA.norm(gradient[i].data.cpu().numpy()) d[i] = norm_grad * epsilon else: raise ValueError('Other p norms may need other algorithms') x_adv = X_fgsm + d if clip_max == None and clip_min == None: clip_max = np.inf clip_min = -np.inf x_adv = torch.clamp(x_adv,clip_min, clip_max) return x_adv
{"hexsha": "8a9402e51d7a46bceedbeac9087c90e05a29fa15", "size": 2179, "ext": "py", "lang": "Python", "max_stars_repo_path": "deeprobust/image/attack/fgsm.py", "max_stars_repo_name": "HenryKenlay/DeepRobust", "max_stars_repo_head_hexsha": "ea8871d970257a9c11715cd059a5331177a00395", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-12T07:45:06.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-12T07:45:06.000Z", "max_issues_repo_path": "deeprobust/image/attack/fgsm.py", "max_issues_repo_name": "lorenzobasile/DeepRobust", "max_issues_repo_head_hexsha": "3f56dcc45f1fed788423d32cc179c26513416e2e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deeprobust/image/attack/fgsm.py", "max_forks_repo_name": "lorenzobasile/DeepRobust", "max_forks_repo_head_hexsha": "3f56dcc45f1fed788423d32cc179c26513416e2e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.9404761905, "max_line_length": 80, "alphanum_fraction": 0.5819183112, "include": true, "reason": "import numpy,from numpy", "num_tokens": 510}
#!/usr/bin/python3 import os import sys import json import csv import numpy as np _dir = sys.argv[1] output_file = sys.argv[2] config_fp = open(os.path.join(_dir, "list.json"), "rb") json_str = config_fp.read() config_fp.close() config = json.loads(json_str.decode()) fp = open(os.path.join(_dir, output_file), 'wb') fp.write((_dir + '\n\n\n').encode()) fp.write('name\ttest_acc\ttest_loss\ttrain_acc\ttrain_loss\n'.encode()) test_acc = [] test_loss = [] train_acc = [] train_loss = [] for cur_set in config: cur_fp = open(os.path.join(_dir, cur_set), 'r') cur_reader = csv.DictReader(cur_fp, dialect='excel-tab') for row in cur_reader: test_acc.append(float(row['test_acc'])) test_loss.append(float(row['test_loss'])) train_acc.append(float(row['train_acc'])) train_loss.append(float(row['train_loss'])) fp.write((cur_set + '\t' + str(test_acc[-1]) + "\t" + str(test_loss[-1]) + "\t" + str(train_acc[-1]) + "\t" + str(train_loss[-1]) + '\n').encode()) cur_fp.close() fp.write('\nsummary\n'.encode()) fp.write('stat\ttest_acc\ttest_loss\ttrain_acc\ttrain_loss\n'.encode()) fp.write(('mean\t%.4f\t%.4f\t%.4f\t%.4f\n' % (np.mean(test_acc), np.mean(test_loss), np.mean(train_acc), np.mean(train_loss),)).encode()) fp.write(('std\t%.4f\t%.4f\t%.4f\t%.4f\n' % (np.std(test_acc), np.std(test_loss), np.std(train_acc), np.std(train_loss),)).encode()) fp.close()
{"hexsha": "52fcf4147113b2a221d8dfe97ad7c0e095a0c9c8", "size": 1462, "ext": "py", "lang": "Python", "max_stars_repo_path": "RsNet/verify_models_cat.py", "max_stars_repo_name": "gehuangyi20/random_spiking", "max_stars_repo_head_hexsha": "c98b550420ae4061b9d47ca475e86c981caf5514", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-03T17:47:40.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-03T17:47:40.000Z", "max_issues_repo_path": "RsNet/verify_models_cat.py", "max_issues_repo_name": "gehuangyi20/random_spiking", "max_issues_repo_head_hexsha": "c98b550420ae4061b9d47ca475e86c981caf5514", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "RsNet/verify_models_cat.py", "max_forks_repo_name": "gehuangyi20/random_spiking", "max_forks_repo_head_hexsha": "c98b550420ae4061b9d47ca475e86c981caf5514", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6666666667, "max_line_length": 102, "alphanum_fraction": 0.6381668947, "include": true, "reason": "import numpy", "num_tokens": 412}
import os import sys curDir = os.path.dirname(__file__) sys.path.append('{0}/../scripts/'.format(curDir)) import pandas as pd import numpy as np from indicators import Indicators from auto_sklearn_model import AutoSklearnModel # start = pd.to_datetime('2012-01-01') # end = datetime.date.today() # ind_obj = Indicators('SPY', start, end) # print(ind_obj.calculate_all_indicators()) # adj = ind_obj.adj_close_price() # print(utils.create_classify_labels(adj, 2)) START_DATE = '2012-01-01' END_DATE = '2017-01-01' STOCK = 'COF' NUM_DAYS = 10 def create_classify_labels(adj_close_prices, num_days): classified = adj_close_prices.rolling(window=num_days+1).\ apply(lambda t: 1 if t[num_days] >= t[0] else -1)[num_days:] return pd.DataFrame(classified.values, columns=['Labels']) def get_indicators(stock): start = pd.to_datetime(START_DATE) end = pd.to_datetime(END_DATE) indicators = Indicators(stock, start, end) return indicators.calculate_all_indicators() # Get indicators data and labels indicators_df_origin = get_indicators(STOCK) labels_df = create_classify_labels(indicators_df_origin['Adj Close Price'], NUM_DAYS) indicators_df = indicators_df_origin[:len(labels_df)] X = np.array(indicators_df) Y = np.array(labels_df.transpose().values[0]) auto_model = AutoSklearnModel() (model, score) = auto_model.get_model(X, Y) print(score)
{"hexsha": "a6b43f104fbd191e4343d63cde452ac21f8a3571", "size": 1389, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/test.py", "max_stars_repo_name": "jaeminsung/ml_stock_trading", "max_stars_repo_head_hexsha": "32f5305f5111ba86eb428e982ac6f0f18842558d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/test.py", "max_issues_repo_name": "jaeminsung/ml_stock_trading", "max_issues_repo_head_hexsha": "32f5305f5111ba86eb428e982ac6f0f18842558d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/test.py", "max_forks_repo_name": "jaeminsung/ml_stock_trading", "max_forks_repo_head_hexsha": "32f5305f5111ba86eb428e982ac6f0f18842558d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8666666667, "max_line_length": 85, "alphanum_fraction": 0.7465802736, "include": true, "reason": "import numpy", "num_tokens": 355}
####### UTILITIES import os import numpy as np import random import torch # random sequences def randomly(seq): shuffled = list(seq) random.shuffle(shuffled) return iter(shuffled) # voting ensemble def convert_to_10(a): idx = a.argmax(axis = 1) out = np.zeros_like(a,dtype = float) out[np.arange(a.shape[0]), idx] = 1 return out # device-aware printing def smart_print(expression, CFG): if CFG['device'] != 'TPU': print(expression) else: xm.master_print(expression) # device-aware model save def smart_save(weights, path, CFG): if CFG['device'] != 'TPU': torch.save(weights, path) else: xm.save(weights, path) # randomness def seed_everything(seed, CFG): os.environ['PYTHONHASHSEED'] = str(seed) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True smart_print('- setting random seed to {}...'.format(seed), CFG)
{"hexsha": "bfc674ec0867c7e509a89aecbcb49028891dabf7", "size": 1046, "ext": "py", "lang": "Python", "max_stars_repo_path": "codes/utilities.py", "max_stars_repo_name": "lizzzi111/Kaggle_Leaf_Disease_Classification", "max_stars_repo_head_hexsha": "0ff3904164e81cb03ef0054d3574ac5b6cb3d897", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "codes/utilities.py", "max_issues_repo_name": "lizzzi111/Kaggle_Leaf_Disease_Classification", "max_issues_repo_head_hexsha": "0ff3904164e81cb03ef0054d3574ac5b6cb3d897", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "codes/utilities.py", "max_forks_repo_name": "lizzzi111/Kaggle_Leaf_Disease_Classification", "max_forks_repo_head_hexsha": "0ff3904164e81cb03ef0054d3574ac5b6cb3d897", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.7727272727, "max_line_length": 67, "alphanum_fraction": 0.6606118547, "include": true, "reason": "import numpy", "num_tokens": 262}
# -*- coding: utf-8 -*- # Copyright (c) 2015-2020, Exa Analytics Development Team # Distributed under the terms of the Apache License 2.0 """ Geometry ====================== Functions for constructing molecular and solid state geometries with symmetry adapted or crystalline structures. """ import numpy as np import pandas as pd from exa.util.units import Length columns = ['x', 'y', 'z', 'symbol', 'frame', 'label'] def make_small_molecule(center, ligand, distance, geometry, offset=None, plane=None, axis=None, domains=None, unit='Angstrom', angle=None): """ A minimal molecule builder for simple one-center, homogeneous ligand molecules of various general chemistry molecular geometries. If 'domains' is not specified and geometry is ambiguous (like 'bent'), it just guesses the simplest geometry (smallest number of domains). Args: center (str): atomic symbol of central atom ligand (str): atomic symbol of ligand atoms distance (float): distance between central atom and ligand geometry (str): molecular geometry offset (np.array): 3-array of position of central atom plane (str): cartesian plane of molecule (eg. for 'square_planar') axis (str): cartesian axis of molecule (eg. for 'linear') domains (int): number of electronic domains unit (str): unit of distance (default 'Angstrom') Returns: df (:class:`~exatomic.core.atom.Atom`): Atom table of small molecule """ distance *= Length[unit, 'au'] funcs = {2: _2_domain, 3: _3_domain, 4: _4_domain, 5: _5_domain, 6: _6_domain} if domains is not None: return funcs[domains](center, ligand, distance, geometry, offset, plane, axis, angle) # 2 domains if geometry in ['linear', 'bent']: return _2_domain(center, ligand, distance, geometry, offset, plane, axis, angle) # 3 domains if geometry in ['trigonal_planar', 'trigonal_pyramidal', 't_shaped']: return _3_domain(center, ligand, distance, geometry, offset, plane, axis, angle) # 4 domains if geometry in ['tetrahedral', 'square_planar', 'seesaw']: return _4_domain(center, ligand, distance, geometry, offset, plane, axis, angle) # 5 domains if geometry in ['trigonal_bipyramidal', 'square_pyramidal']: return _5_domain(center, ligand, distance, geometry, offset, plane, axis, angle) # 6 domains if geometry in ['octahedral']: return _6_domain(center, ligand, distance, geometry, offset, plane, axis, angle) raise NotImplementedError def _2_domain(center, ligand, distance, geometry, offset, plane, axis, angle): if axis is None: axis = 'z' if geometry == 'linear': cart = ['x', 'y', 'z'] arr = np.array([0, 0, 0]) arr[cart.index(axis)] = distance origin = np.array([0., 0., 0.]) if offset is not None: arr += offset origin += offset geom = [[origin[0], origin[1], origin[2], center, 0, 0]] cnt = 1 for xi, yi, zi in [arr, -arr]: geom.append([xi, yi, zi, ligand, 0, cnt]) cnt += 1 return pd.DataFrame(geom, columns=columns) else: raise NotImplementedError def _3_domain(center, ligand, distance, geometry, offset, plane, axis, angle): if geometry == 'trigonal_pyramidal': raise NotImplementedError('trigonal pyramidal not supported yet') if geometry == 'trigonal_planar': raise NotImplementedError('trigonal planar not supported yet') if geometry == 'bent': origin = np.array([0., 0., 0.]) arr = np.array([distance, distance, 0.]) if offset is not None: raise NotImplementedError('bent and offset not supported yet') geom = [[origin[0], origin[1], origin[2], center, 0, 0]] cnt = 1 for angle in [0, 2 * np.pi / 3]: geom.append([arr[0] * np.cos(angle), arr[1] * np.sin(angle), arr[2], ligand, 0, cnt]) cnt += 1 return pd.DataFrame(geom, columns=columns) def _4_domain(center, ligand, distance, geometry, offset, plane, axis, angle): if geometry == 'bent': origin = np.array([0., 0., 0.]) arr = np.array([distance, distance, 0.]) if offset is not None: raise NotImplementedError('bent and offset not supported yet') geom = [[origin[0], origin[1], origin[2], center, 0, 0]] cnt = 1 for angle in [0, 109.5 * np.pi / 180]: geom.append([arr[0] * np.cos(angle), arr[1] * np.sin(angle), arr[2], ligand, 0, cnt]) cnt += 1 return pd.DataFrame(geom, columns=columns) if geometry == 'tetrahedral': raise NotImplementedError('tetrahedral not supported yet') if geometry == 'square_planar': cart = ['x', 'y', 'z'] if plane is None: plane = 'xy' if axis == 'x': plane = 'yz' elif axis == 'y': plane = 'xz' if plane not in ['xy', 'yx', 'xz', 'zx', 'yz', 'zy']: raise NotImplementedError('pick a cartesian plane, eg. yz') ax1, ax2 = plane origin = np.array([0., 0., 0.]) arr1 = np.array([0., 0., 0.]) arr2 = np.array([0., 0., 0.]) arr1[cart.index(ax1)] = distance arr2[cart.index(ax2)] = distance if offset is not None: origin += offset arr1 += offset arr2 += offset geom = [[origin[0], origin[1], origin[2], center, 0, 0]] cnt = 1 for xi, yi, zi in [arr1, -arr1, arr2, -arr2]: geom.append([xi, yi, zi, ligand, 0, cnt]) cnt += 1 return pd.DataFrame(geom, columns=columns) if geometry == 'seesaw': cart = ['x', 'y', 'z'] if offset is not None: raise NotImplementedError('seesaw and offset no bueno at the moment') if axis is None: axis = 'z' arr1 = np.array([0., 0., 0.]) arr2 = np.array([0., 0., 0.]) arr1[cart.index(axis)] = distance for car in cart: if car != axis: arr2[cart.index(car)] = distance origin = np.array([0., 0., 0.]) geom = [[origin[0], origin[1], origin[2], center, 0, 0]] cnt = 1 for xi, yi, zi in [arr1, -arr1]: geom.append([xi, yi, zi, ligand, 0, cnt]) cnt += 1 if axis == 'z': for angle in [0, 2 * np.pi / 3]: geom.append([arr2[0] * np.cos(angle), arr2[1] * np.sin(angle), arr2[2], ligand, 0, cnt]) cnt += 1 elif axis == 'y': for angle in [0, 2 * np.pi / 3]: geom.append([arr2[0] * np.cos(angle), arr2[1], arr2[2] * np.sin(angle), ligand, 0, cnt]) cnt += 1 elif axis == 'x': for angle in [0, 2 * np.pi / 3]: geom.append([arr2[0], arr2[1] * np.cos(angle), arr2[2] * np.sin(angle), ligand, 0, cnt]) cnt += 1 return pd.DataFrame(geom, columns=columns) def _5_domain(center, ligand, distance, geometry, offset, plane, axis, angle): raise NotImplementedError('5 coordinate complexes not implemented yet') def _6_domain(center, ligand, distance, geometry, offset, plane, axis, angle): if geometry != 'octahedral': raise NotImplementedError('only octahedral geometry supported currently') origin = np.array([0., 0., 0.]) x = np.array([distance, 0., 0.]) nx = np.array([-distance, 0., 0.]) y = np.array([0., distance, 0.]) ny = np.array([0., -distance, 0.]) z = np.array([0., 0., distance]) nz = np.array([0., 0., -distance]) if offset is not None: origin += offset x += offset y += offset z += offset nx += offset ny += offset nz += offset geom = [[origin[0], origin[1], origin[2], center, 0, 0]] cnt = 1 for xi, yi, zi in [x, nx, y, ny, z, nz]: geom.append([xi, yi, zi, ligand, 0, cnt]) cnt += 1 return pd.DataFrame(geom, columns=columns)
{"hexsha": "b6aed729e65029bd425e9b8640adf395f3c47d71", "size": 8234, "ext": "py", "lang": "Python", "max_stars_repo_path": "exatomic/algorithms/geometry.py", "max_stars_repo_name": "herbertludowieg/exatomic", "max_stars_repo_head_hexsha": "d177781a649ba3a12e5c1147672767ac4a388a6c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2016-08-01T15:11:39.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-18T00:00:29.000Z", "max_issues_repo_path": "exatomic/algorithms/geometry.py", "max_issues_repo_name": "herbertludowieg/exatomic", "max_issues_repo_head_hexsha": "d177781a649ba3a12e5c1147672767ac4a388a6c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 121, "max_issues_repo_issues_event_min_datetime": "2016-07-09T03:20:15.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-07T01:43:02.000Z", "max_forks_repo_path": "exatomic/algorithms/geometry.py", "max_forks_repo_name": "herbertludowieg/exatomic", "max_forks_repo_head_hexsha": "d177781a649ba3a12e5c1147672767ac4a388a6c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2016-07-13T12:39:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T06:58:43.000Z", "avg_line_length": 40.5615763547, "max_line_length": 97, "alphanum_fraction": 0.5602380374, "include": true, "reason": "import numpy", "num_tokens": 2268}
\section{Summary} \label{sec:summary} It has been demonstrated that for the case of a high energy physics event selection application, a drone neural network is able to accurately approximate and learn the features of a neural network with a different structure. The proposed algorithm design allows the drone to learn the aforementioned features without ever having access to the training data, or indeed any data, but only with appropriate questioning of the original model. The equivalency of the outputs of the drone and original model enables an analyst to treat both the original and the drone in the same way. The creation of a drone in a standardised form permits an analyst to use any desired machine-learning package to isolate a decay signature, and from this create a classifier guaranteed to be suitable for execution in the {\tt C++} real-time data selection frameworks.
{"hexsha": "7ca5685dc40a253be9246bdcb0cca4f38eb8c806", "size": 887, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "documents/paper/summary.tex", "max_stars_repo_name": "Tevien/NNDrone", "max_stars_repo_head_hexsha": "76dce457324ea03a8757d74f6403fbf60132294b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-11-06T11:21:20.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-20T14:47:21.000Z", "max_issues_repo_path": "documents/paper/summary.tex", "max_issues_repo_name": "Tevien/NNDrone", "max_issues_repo_head_hexsha": "76dce457324ea03a8757d74f6403fbf60132294b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2018-01-12T15:49:40.000Z", "max_issues_repo_issues_event_max_datetime": "2018-04-06T02:29:56.000Z", "max_forks_repo_path": "documents/paper/summary.tex", "max_forks_repo_name": "Tevien/NNDrone", "max_forks_repo_head_hexsha": "76dce457324ea03a8757d74f6403fbf60132294b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2018-01-12T15:46:35.000Z", "max_forks_repo_forks_event_max_datetime": "2018-06-21T22:41:56.000Z", "avg_line_length": 55.4375, "max_line_length": 93, "alphanum_fraction": 0.8139797069, "num_tokens": 180}
import pickle import gzip from sparse_gp import SparseGP import scipy.stats as sps import numpy as np import sys import os sys.path.append('%s/../prog_common' % os.path.dirname(os.path.realpath(__file__))) from cmd_args import cmd_args gold_prog_list = [] with open('%s/../prog_data/gold_prog.txt' % os.path.dirname(os.path.realpath(__file__))) as f: for row in f: gold_prog_list.append(row.strip()) import argparse cmd_opt = argparse.ArgumentParser(description='Argparser for encoding') cmd_opt.add_argument('-seed', type=int, help='random seed') cmd_opt.add_argument('-min_len', type=int, help='min # of statements') cmd_opt.add_argument('-max_len', type=int, help='max # of statements') cmd_opt.add_argument('-phase', type=str, help='train / test') cmd_opt.add_argument('-prefix', type=str, help='data prefix') cmd_opt.add_argument('-data_dir', type=str, help='data folder') cmd_opt.add_argument('-prog_idx', type=int, help='index of gold program') cmd_opt.add_argument('-feature_dump', type=str, help='feature numpy dump') cmd_opt.add_argument('-gp_lr', type=float, help='learning rate of gaussian process') args, _ = cmd_opt.parse_known_args() if __name__ == '__main__': print(cmd_args) print(args) np.random.seed(args.seed) fmt = args.feature_dump.split('.')[-1] if fmt == 'npy': X = np.load(args.feature_dump) elif fmt == 'txt': X = np.loadtxt(args.feature_dump) else: print('unknown feature dump format ' + fmt) raise NotImplementedError gold_prog = gold_prog_list[args.prog_idx] y = [] for l in range(args.min_len, args.max_len + 1): if args.phase == 'train': fname = '%s/%s-number-50000-nbstat-%d.txt.target_for_[%s].txt' % (args.data_dir, args.prefix, l, gold_prog) else: fname = '%s/%s-number-50000-nbstat-%d.test.txt.target_for_[%s].txt' % (args.data_dir, args.prefix, l, gold_prog) cur_scores = np.loadtxt(fname) y.append(np.reshape(cur_scores, [-1, 1])) y = np.vstack(y) # y /= np.max(y) assert X.shape[0] == y.shape[0] n = X.shape[ 0 ] permutation = np.random.choice(n, n, replace = False) X_train = X[ permutation, : ][ 0 : np.int(np.round(0.9 * n)), : ] X_test = X[ permutation, : ][ np.int(np.round(0.9 * n)) :, : ] y_train = y[ permutation ][ 0 : np.int(np.round(0.9 * n)) ] y_test = y[ permutation ][ np.int(np.round(0.9 * n)) : ] np.random.seed(0) M = 500 sgp = SparseGP(X_train, 0 * X_train, y_train, M) sgp.train_via_ADAM(X_train, 0 * X_train, y_train, X_test, X_test * 0, \ y_test, minibatch_size = 10 * M, max_iterations = cmd_args.num_epochs, learning_rate = args.gp_lr) with open('%s/sgp-e-%d-seed-%d-lr-%.4f.txt' % (cmd_args.save_dir, cmd_args.num_epochs, args.seed, args.gp_lr), 'w') as f: pred, uncert = sgp.predict(X_test, 0 * X_test) error = np.sqrt(np.mean((pred - y_test)**2)) testll = np.mean(sps.norm.logpdf(pred - y_test, scale = np.sqrt(uncert))) f.write('Test RMSE: %.10f\n' % error) f.write('Test ll: %.10f\n' % testll) print 'Test RMSE: ', error print 'Test ll: ', testll pred, uncert = sgp.predict(X_train, 0 * X_train) error = np.sqrt(np.mean((pred - y_train)**2)) trainll = np.mean(sps.norm.logpdf(pred - y_train, scale = np.sqrt(uncert))) f.write('Train RMSE: %.10f\n' % error) f.write('Train ll: %.10f\n' % trainll) print 'Train RMSE: ', error print 'Train ll: ', trainll
{"hexsha": "8885193a5db8179aa1b7379a3e6fd2d920f97dbb", "size": 3581, "ext": "py", "lang": "Python", "max_stars_repo_path": "prog_vae/sparse_gp_regression/regression.py", "max_stars_repo_name": "Hanjun-Dai/sdvae", "max_stars_repo_head_hexsha": "bd26ea949c496419634fd2cf4802fc8e19a9194c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 70, "max_stars_repo_stars_event_min_datetime": "2018-02-24T07:50:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-27T02:42:37.000Z", "max_issues_repo_path": "prog_vae/sparse_gp_regression/regression.py", "max_issues_repo_name": "Hanjun-Dai/sdvae", "max_issues_repo_head_hexsha": "bd26ea949c496419634fd2cf4802fc8e19a9194c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2018-05-31T00:50:19.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-28T11:58:22.000Z", "max_forks_repo_path": "prog_vae/sparse_gp_regression/regression.py", "max_forks_repo_name": "Hanjun-Dai/sdvae", "max_forks_repo_head_hexsha": "bd26ea949c496419634fd2cf4802fc8e19a9194c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2019-01-11T10:56:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T23:09:39.000Z", "avg_line_length": 36.9175257732, "max_line_length": 125, "alphanum_fraction": 0.6313878805, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1016}
from openpathsampling.engines.toy.pes import PES, PES_Add, OuterWalls, Gaussian import numpy as np class DoublewellPotential(PES_Add): def __init__(self): super(DoublewellPotential, self).__init__( OuterWalls([1.0, 1.0], [0.0, 0.0]), PES_Add( Gaussian(-0.7, [7.5, 7.5], [-0.5, 0.0]), Gaussian(-0.7, [7.5, 7.5], [0.5, 0.0]))) class ZPotential(PES): """Z-Potential energy surface as described by Buijsman and Bolhuis (2020): https: / / aip.scitation.org / doi / pdf / 10.1063 / 1.5130760 :math:'V(x,y) = \frac{x^{4} + y^{4}}{20480} - 3 e^{-0.01(x + 5)^{2} - 0.2(y + 5)^{2}} - 3 e^{-0.01(x - 5)^{2} - 0.2(y - 5)^{2}} + \frac{5 e^{-0.2(x + 3(y - 3))^{2}}}{1 + e^{-x - 3}} + \frac{5 e^{-0.2(x + 3(y + 3))^{2}}}{1 + e^{x - 3}} +3 e^{-0.01\left(x^{2} + y^{2}\right)}' Parameters ---------- dim: integer number of dimensions """ def __init__(self): super(ZPotential, self).__init__() self._local_dVdx = np.zeros(2) def __repr__(self): return "Z-Potential" def V(self, sys): """Potential energy Parameters ---------- sys : :class:`.ToyEngine` engine contains its state, including velocities and masses Returns ------- float the potential energy """ pos = sys.positions myV = (pos[0]**4 + pos[1]**4) / 20480 \ - 3 * np.exp(-0.01 * (pos[0] + 5)**2 - 0.2 * (pos[1] + 5)**2) \ - 3 * np.exp(-0.01 * (pos[0] - 5)**2 - 0.2 * (pos[1] - 5)**2) \ + (5 * np.exp(-0.2 * (pos[0] + 3 * (pos[1] - 3))**2)) \ / (1 + np.exp(-pos[0] - 3)) \ + (5 * np.exp(-0.2 * (pos[0] + 3 * (pos[1] + 3))**2)) \ / (1 + np.exp(pos[0] - 3)) \ + 3 * np.exp(-0.01 * (pos[0]**2 + pos[1]**2)) return myV def dVdx(self, sys): """Derivative of potential energy (-force) Parameters ---------- sys : :class:`.ToyEngine` engine contains its state, including velocities and masses Returns ------- np.array the derivatives of the potential at this point """ pos = sys.positions self._local_dVdx[0] = pos[0]**3 / 5120 - 0.06 * pos[0] \ * np.exp(-0.01 * (pos[0]**2 + pos[1]**2)) + 0.06 * (pos[0] - 5) \ * np.exp(-0.01 * (pos[0] - 5)**2 - 0.2 * (pos[1] - 5)**2) \ + 0.06 * (pos[0] + 5) \ * np.exp(-0.01 * (pos[0] + 5)**2 - 0.2 * (pos[1] + 5)**2) \ - (40.1711 * np.exp(pos[0] - 0.2 * (pos[0] + 3 * pos[1] - 9)**2) * (pos[0] + 3 * pos[1] - 9)) / (np.exp(pos[0] + 3) + 1) \ - (40.1711 * np.exp(-0.2 * (pos[0] + 3 * pos[1] + 9)**2) * (pos[0] + 3 * pos[1] + 9)) / (np.exp(pos[0]) + np.exp(3)) \ - (5 * np.exp(-0.2 * (pos[0] + 3 * pos[1] + 9)**2 + pos[0] + 3)) \ / (np.exp(pos[0]) + np.exp(3))**2 \ + (5 * np.exp(-0.2 * (pos[0] + 3 * pos[1] - 9)**2 + pos[0] + 3)) \ / (np.exp(pos[0] + 3) + 1)**2 self._local_dVdx[1] = pos[1]**3 / 5120 \ - 0.06 * pos[1] * np.exp(-0.01 * (pos[0]**2 + pos[1]**2)) \ + 1.2 * (pos[1] - 5) \ * np.exp(-0.01 * (pos[0] - 5)**2 - 0.2 * (pos[1] - 5)**2) \ + 1.2 * (pos[1] + 5) \ * np.exp(-0.01 * (pos[0] + 5)**2 - 0.2 * (pos[1] + 5)**2) \ - (120.513 * np.exp(pos[0] - 0.2 * (pos[0] + 3 * pos[1] - 9)**2) * (pos[0] + 3 * pos[1] - 9)) / (np.exp(pos[0] + 3) + 1) \ - (120.513 * np.exp(-0.2 * (pos[0] + 3 * pos[1] + 9)**2) * (pos[0] + 3 * pos[1] + 9)) / (np.exp(pos[0]) + np.exp(3)) return self._local_dVdx
{"hexsha": "61d6f14d9e351464290ae22e0538cf23bb76d330", "size": 3848, "ext": "py", "lang": "Python", "max_stars_repo_path": "NucleationModel/potentials.py", "max_stars_repo_name": "MFrassek/CommittorEAE", "max_stars_repo_head_hexsha": "88a467e4500bc9ab69834209f4eaec9f2d0d7a61", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "NucleationModel/potentials.py", "max_issues_repo_name": "MFrassek/CommittorEAE", "max_issues_repo_head_hexsha": "88a467e4500bc9ab69834209f4eaec9f2d0d7a61", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NucleationModel/potentials.py", "max_forks_repo_name": "MFrassek/CommittorEAE", "max_forks_repo_head_hexsha": "88a467e4500bc9ab69834209f4eaec9f2d0d7a61", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.48, "max_line_length": 79, "alphanum_fraction": 0.4093035343, "include": true, "reason": "import numpy", "num_tokens": 1525}
include "bug-2601829-mid.h" end
{"hexsha": "1e950eba4bf939eef9d4a2e69894e38d13599955", "size": 33, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "tests/bug-reports-sf/bug-2601829.f90", "max_stars_repo_name": "OpenFortranProject/ofp-sdf", "max_stars_repo_head_hexsha": "202591cf4ac4981b21ddc38c7077f9c4d1c16f54", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2015-03-05T14:41:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-22T23:51:25.000Z", "max_issues_repo_path": "tests/bug-reports-sf/bug-2601829.f90", "max_issues_repo_name": "OpenFortranProject/ofp-sdf", "max_issues_repo_head_hexsha": "202591cf4ac4981b21ddc38c7077f9c4d1c16f54", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 33, "max_issues_repo_issues_event_min_datetime": "2015-11-05T09:50:04.000Z", "max_issues_repo_issues_event_max_datetime": "2018-05-10T21:32:48.000Z", "max_forks_repo_path": "tests/bug-reports-sf/bug-2601829.f90", "max_forks_repo_name": "OpenFortranProject/ofp-sdf", "max_forks_repo_head_hexsha": "202591cf4ac4981b21ddc38c7077f9c4d1c16f54", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2015-06-24T01:22:58.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-16T06:47:15.000Z", "avg_line_length": 8.25, "max_line_length": 27, "alphanum_fraction": 0.7272727273, "num_tokens": 13}
import scannertools as st from scannertools.prelude import * from scipy.spatial import distance import numpy as np from typing import Sequence import pickle WINDOW_SIZE = 500 BOUNDARY_BATCH = 10000000 POSITIVE_OUTLIER = 2.5 NEGATIVE_OUTLIER = 1.0 @scannerpy.register_python_op(name='ColorHistogramShotLabels', batch=BOUNDARY_BATCH) def color_histogram_shot_labels(config, histograms: Sequence[bytes]) -> Sequence[bytes]: hists = [readers.histograms(byts, config.protobufs) for byts in histograms] # Compute the mean difference between each pair of adjacent frames diffs = np.array([ np.mean([distance.chebyshev(hists[i - 1][j], hists[i][j]) for j in range(3)]) for i in range(1, len(hists)) ]) diffs = np.insert(diffs, 0, 0) n = len(diffs) # Do simple outlier detection to find boundaries between shots positive_boundaries = [] negative_boundaries = [] for i in range(1, n): window = diffs[max(i - WINDOW_SIZE, 0):min(i + WINDOW_SIZE, n)] if diffs[i] - np.mean(window) > POSITIVE_OUTLIER * np.std(window): positive_boundaries.append(i) if diffs[i] - np.mean(window) < NEGATIVE_OUTLIER * np.std(window): negative_boundaries.append(i) return [pickle.dumps((positive_boundaries, negative_boundaries))] + ['\0' for _ in range(len(histograms) - 1)] class ColorHistogramShotLabelsPipeline(Pipeline): job_suffix = 'color_histogram_shot_labels' base_sources = ['videos', 'histograms'] run_opts = { 'io_packet_size': BOUNDARY_BATCH, 'work_packet_size': BOUNDARY_BATCH } def build_pipeline(self): return { 'color_histogram_shot_labels': self._db.ops.ColorHistogramShotLabels(histograms=self._sources['histograms'].op) } def parse_output(self): boundaries = super().parse_output() return [ pickle.loads(next(b._column.load(rows=[0]))) if b is not None else None for b in boundaries ] compute_color_histogram_shot_labels = ColorHistogramShotLabelsPipeline.make_runner()
{"hexsha": "e6551c8fa9c1d373eea6378db05114407a7f0ee0", "size": 2097, "ext": "py", "lang": "Python", "max_stars_repo_path": "app/esper/shot_labeling_functions.py", "max_stars_repo_name": "DanFu09/esper", "max_stars_repo_head_hexsha": "ccc5547de3637728b8aaab059b6781baebc269ec", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-12-27T07:21:38.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-04T10:35:02.000Z", "max_issues_repo_path": "app/esper/shot_labeling_functions.py", "max_issues_repo_name": "DanFu09/esper", "max_issues_repo_head_hexsha": "ccc5547de3637728b8aaab059b6781baebc269ec", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app/esper/shot_labeling_functions.py", "max_forks_repo_name": "DanFu09/esper", "max_forks_repo_head_hexsha": "ccc5547de3637728b8aaab059b6781baebc269ec", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.95, "max_line_length": 123, "alphanum_fraction": 0.6890796376, "include": true, "reason": "import numpy,from scipy", "num_tokens": 517}
MODULE maneig_I INTERFACE !...Generated by Pacific-Sierra Research 77to90 4.3E 14:07:38 1/ 5/07 !...Modified by Charlotte Froese Fischer ! Gediminas Gaigalas 10/05/17 SUBROUTINE maneig (IATJPO, IASPAR) INTEGER, INTENT(OUT) :: IATJPO INTEGER, INTENT(OUT) :: IASPAR END SUBROUTINE END INTERFACE END MODULE
{"hexsha": "ea7093d32298cb9e0a0632ae8a758503db0cef01", "size": 377, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/appl/rci90/maneig_I.f90", "max_stars_repo_name": "sylas/grasp-continuum", "max_stars_repo_head_hexsha": "f5e2fb18bb2bca4f715072190bf455fba889320f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 33, "max_stars_repo_stars_event_min_datetime": "2019-03-10T04:00:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T22:01:15.000Z", "max_issues_repo_path": "src/appl/rci90/maneig_I.f90", "max_issues_repo_name": "sylas/grasp-continuum", "max_issues_repo_head_hexsha": "f5e2fb18bb2bca4f715072190bf455fba889320f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 65, "max_issues_repo_issues_event_min_datetime": "2019-03-07T17:56:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-16T16:45:24.000Z", "max_forks_repo_path": "src/appl/rci90/maneig_I.f90", "max_forks_repo_name": "sylas/grasp-continuum", "max_forks_repo_head_hexsha": "f5e2fb18bb2bca4f715072190bf455fba889320f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2019-03-10T04:00:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T02:06:40.000Z", "avg_line_length": 31.4166666667, "max_line_length": 73, "alphanum_fraction": 0.6233421751, "num_tokens": 127}
# Python > Numpy > Floor, Ceil and Rint # Use the floor, ceil and rint tools of NumPy on the given array. # # https://www.hackerrank.com/challenges/floor-ceil-and-rint/problem # import numpy if numpy.version.version >= '1.14.': numpy.set_printoptions(legacy='1.13') a = numpy.array(input().split(), numpy.float) print(numpy.floor(a)) print(numpy.ceil(a)) print(numpy.rint(a))
{"hexsha": "ca04715993538c653d8e18a3105341acf80d7cce", "size": 383, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/numpy/floor-ceil-and-rint.py", "max_stars_repo_name": "PingHuskar/hackerrank", "max_stars_repo_head_hexsha": "1bfdbc63de5d0f94cd9e6ae250476b4a267662f2", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 41, "max_stars_repo_stars_event_min_datetime": "2018-05-11T07:54:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T19:02:32.000Z", "max_issues_repo_path": "python/numpy/floor-ceil-and-rint.py", "max_issues_repo_name": "PingHuskar/hackerrank", "max_issues_repo_head_hexsha": "1bfdbc63de5d0f94cd9e6ae250476b4a267662f2", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-09-13T10:03:26.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-04T10:21:05.000Z", "max_forks_repo_path": "python/numpy/floor-ceil-and-rint.py", "max_forks_repo_name": "PingHuskar/hackerrank", "max_forks_repo_head_hexsha": "1bfdbc63de5d0f94cd9e6ae250476b4a267662f2", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2019-01-23T19:06:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-23T16:03:47.000Z", "avg_line_length": 23.9375, "max_line_length": 67, "alphanum_fraction": 0.7101827676, "include": true, "reason": "import numpy", "num_tokens": 107}
""" plot_horizontal_cross_section_from_netcdf.py: plot the horizontal cross section from the netcdf model file. """ import cartopy import cartopy.crs as ccrs import click import matplotlib.pyplot as plt import numpy as np from cartopy.mpl.ticker import LatitudeFormatter, LongitudeFormatter from netCDF4 import Dataset def extract_data(f, depth, parameter): depth_pos = np.where(f.variables["depth"][:] == depth) if (len(depth_pos) != 1): raise Exception("no such depth in the netcdf file.") depth_pos = depth_pos[0][0] data_all = f.variables[parameter][:, :, :].copy() data_all = data_all.transpose([2, 1, 0]) data = data_all[:, :, depth_pos].copy() # we usually use a large value to represent nan. data[data > 9e6] = np.nan data_all[data_all > 9e6] = np.nan print(np.nanmin(data_all), np.nanmax(data_all)) mesh_lon, mesh_lat = np.meshgrid( f.variables["longitude"][:], f.variables["latitude"][:], indexing="ij") return mesh_lon, mesh_lat, data def plot_h(mesh_lon, mesh_lat, data, parameter, depth, vmin, vmax, region, scale, percentage, abs): # pylint: disable=unused-argument plt.figure() ax = plt.axes(projection=ccrs.PlateCarree()) print(np.nanmin(data), np.nanmax(data)) if (scale): min_absdata = np.abs(np.nanmin(data)) max_absdata = np.abs(np.nanmax(data)) range_val = np.max([min_absdata, max_absdata]) vmin = -range_val vmax = range_val if (abs): data = np.abs(data) plt.pcolormesh(mesh_lon, mesh_lat, data, transform=ccrs.PlateCarree(), vmin=vmin, vmax=vmax, cmap=plt.cm.jet_r) # pylint: disable=no-member ax.coastlines() # input format lon1/lat1/lon2/lat2 lon1, lat1, lon2, lat2 = map(float, region.split("/")) ax.set_extent([lon1, lon2, lat1, lat2], crs=ccrs.PlateCarree()) ax.set_xticks(np.arange(lon1, lon2, 10), crs=ccrs.PlateCarree()) ax.set_yticks(np.arange(lat1, lat2, 10), crs=ccrs.PlateCarree()) lon_formatter = LongitudeFormatter(zero_direction_label=True) lat_formatter = LatitudeFormatter() ax.xaxis.set_major_formatter(lon_formatter) ax.yaxis.set_major_formatter(lat_formatter) # in general disable it, since the map boundary excludes some part of China. ax.add_feature(cartopy.feature.BORDERS) colorbar = plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04, extend="neither") colorbar.set_ticks([round(i, 2) for i in colorbar.get_ticks().tolist()]) colorbar.set_ticklabels( [str(i) for i in colorbar.get_ticks().tolist()]) # colorbar.set_label(label=f"${{dlnV_{{{parameter[1:]}}}}}$", size=15) plt.grid() plt.text(0.1, 0.9, f"{parameter}\n{depth}km", ha='center', va='center', transform=ax.transAxes, fontsize=20) ax.xaxis.set_tick_params(labelsize=15) ax.yaxis.set_tick_params(labelsize=15) # plt.title(f"{parameter} at {depth}km") plt.show() @click.command() @click.option('--netcdf_file', required=True, type=str, help="the netcdf file") @click.option('--parameter', required=True, type=str, help="the parameter to plot") @click.option('--depth', required=True, type=float, help="the depth to extract data (km)") @click.option('--vmin', required=False, default=0, type=float, help="the min limit for colorbar") @click.option('--vmax', required=False, default=0, type=float, help="the max limit for colorbar") @click.option('--region', required=True, type=str, help="the region to plot, lon1/lat1/lon2/lat2") @click.option('--scale/--no-scale', default=False, required=False, help="if scale the range based on the maximum value") @click.option('--percentage/--no-percentage', default=False, required=False, help="if use percentage in colorbar") @click.option('--abs/--no-abs', default=False, required=False, help="if plot the absolute value") def main(netcdf_file, parameter, depth, vmin, vmax, region, scale, percentage, abs): f = Dataset(netcdf_file, 'r') mesh_lon, mesh_lat, data = extract_data(f, depth, parameter) if (percentage): data = data*100 plot_h(mesh_lon, mesh_lat, data, parameter, depth, vmin, vmax, region, scale, percentage, abs) if __name__ == "__main__": main() # pylint: disable=no-value-for-parameter
{"hexsha": "3895d668fc157ffd88a141e886c83a49c414d651", "size": 4306, "ext": "py", "lang": "Python", "max_stars_repo_path": "seisflow/scripts/plot/plot_horizontal_cross_section_from_netcdf.py", "max_stars_repo_name": "ziyixi/seisflow", "max_stars_repo_head_hexsha": "722c2445f4a5316f42bfbc8b9010d31caad4c76e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-05-17T13:17:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-14T02:51:35.000Z", "max_issues_repo_path": "seisflow/scripts/plot/plot_horizontal_cross_section_from_netcdf.py", "max_issues_repo_name": "ziyixi/seisflow", "max_issues_repo_head_hexsha": "722c2445f4a5316f42bfbc8b9010d31caad4c76e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "seisflow/scripts/plot/plot_horizontal_cross_section_from_netcdf.py", "max_forks_repo_name": "ziyixi/seisflow", "max_forks_repo_head_hexsha": "722c2445f4a5316f42bfbc8b9010d31caad4c76e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.3010752688, "max_line_length": 134, "alphanum_fraction": 0.6832326986, "include": true, "reason": "import numpy", "num_tokens": 1152}
import galsim import numpy import os class CosmosSampler(object): _req_params = {} _opt_params = { 'min_r50' : float, 'max_r50': float, 'min_flux' : float, 'max_flux': float, 'kde_factor' : float } _single_params = [] _takes_rng = True # It doesn't actually need an rng, but this marks it as "unsafe" # to the ProcessInput function, which avoids some multiprocessing # pickle problems. def __init__(self, min_r50=0.05, max_r50=2.0, min_flux=0.5, max_flux=100, kde_factor=0.01, rng=None): # Make sure required dependencies are checked right away, so the user gets timely # feedback of what this code requires. import scipy import fitsio self.r50_range = (min_r50, max_r50) self.flux_range = (min_flux, max_flux) self.r50_sanity_range=0.05,2.0 self.flux_sanity_range=0.5,100.0 self.kde_factor=kde_factor self._load_data() self._make_kde() def resample(self, size, rand): # Equivalent to this line: # return self.kde.resample(size=size) # except we do this using a numpy RandomState, rather than using the global # numpy.random state. # The following is basically copied from the scipy code, but patching in the use # of the RandomState where appropriate. if size is None: size = self.kde.n norm = numpy.transpose(rand.multivariate_normal(numpy.zeros((self.kde.d,), float), self.kde.covariance, size=size)) indices = rand.randint(0, self.kde.n, size=size) means = self.kde.dataset[:, indices] return means + norm def sample(self, rng, size=None): """ get [r50, flux] or [:, r50_flux] """ if size is None: size=1 is_scalar=True else: is_scalar=False r50min,r50max=self.r50_range fmin,fmax=self.flux_range data=numpy.zeros( (size,2) ) ngood=0 nleft=data.shape[0] rand = numpy.random.RandomState(rng.raw()) while nleft > 0: r = self.resample(nleft, rand).T w,=numpy.where( (r[:,0] > r50min) & (r[:,0] < r50max) & (r[:,1] > fmin) & (r[:,1] < fmax) ) if w.size > 0: data[ngood:ngood+w.size,:] = r[w,:] ngood += w.size nleft -= w.size if is_scalar: data=data[0,:] return data def _load_data(self): import fitsio fname='real_galaxy_catalog_25.2_fits.fits' fname=os.path.join( #sys.exec_prefix, #'share', #'galsim', galsim.meta_data.share_dir, 'COSMOS_25.2_training_sample', fname, ) r50min,r50max=self.r50_sanity_range fmin,fmax=self.flux_sanity_range alldata=fitsio.read(fname, lower=True) w,=numpy.where( (alldata['viable_sersic']==1) & (alldata['hlr'][:,0] > r50min) & (alldata['hlr'][:,0] < r50max) & (alldata['flux'][:,0] > fmin) & (alldata['flux'][:,0] < fmax) ) self.alldata=alldata[w] def _make_kde(self): import scipy.stats data=numpy.zeros( (self.alldata.size, 2) ) data[:,0] = self.alldata['hlr'][:,0] data[:,1] = self.alldata['flux'][:,0] self.kde=scipy.stats.gaussian_kde( data.transpose(), bw_method=self.kde_factor, ) def CosmosR50Flux(config, base, name): index, index_key = galsim.config.GetIndex(config, base) rng = galsim.config.GetRNG(config, base) if base.get('_cosmos_sampler_index',None) != index: cosmos_sampler = galsim.config.GetInputObj('cosmos_sampler', config, base, name) r50, flux = cosmos_sampler.sample(rng) base['_cosmos_sampler_r50'] = r50 base['_cosmos_sampler_flux'] = flux base['_cosmos_sampler_index'] = index else: r50 = base['_cosmos_sampler_r50'] flux = base['_cosmos_sampler_flux'] return float(r50), float(flux) def CosmosR50(config, base, value_type): r50, flux = CosmosR50Flux(config,base,'CosmosR50') return r50, False def CosmosFlux(config, base, value_type): r50, flux = CosmosR50Flux(config,base,'CosmosFlux') return flux, False galsim.config.RegisterInputType('cosmos_sampler', galsim.config.InputLoader(CosmosSampler)) galsim.config.RegisterValueType('CosmosR50', CosmosR50, [float], input_type='cosmos_sampler') galsim.config.RegisterValueType('CosmosFlux', CosmosFlux, [float], input_type='cosmos_sampler')
{"hexsha": "9e2c48afb0e03164460d34f92e13ab4b089195d6", "size": 4855, "ext": "py", "lang": "Python", "max_stars_repo_path": "galsim_extra/cosmos_sampler.py", "max_stars_repo_name": "esheldon/galsim_extra", "max_stars_repo_head_hexsha": "1e93a35db16943f6ce0251cddcd253defe0d5c61", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2016-08-30T20:31:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-05T19:59:51.000Z", "max_issues_repo_path": "galsim_extra/cosmos_sampler.py", "max_issues_repo_name": "esheldon/galsim_extra", "max_issues_repo_head_hexsha": "1e93a35db16943f6ce0251cddcd253defe0d5c61", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2017-02-15T18:48:57.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-19T16:26:05.000Z", "max_forks_repo_path": "galsim_extra/cosmos_sampler.py", "max_forks_repo_name": "esheldon/galsim_extra", "max_forks_repo_head_hexsha": "1e93a35db16943f6ce0251cddcd253defe0d5c61", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2016-11-29T19:29:23.000Z", "max_forks_repo_forks_event_max_datetime": "2017-05-26T14:42:53.000Z", "avg_line_length": 31.7320261438, "max_line_length": 95, "alphanum_fraction": 0.5734294542, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1279}
from environments import rlgymenv import policyopt from policyopt import SimConfig, rl, util, nn, tqdm import gym import numpy as np import argparse def main(): np.set_printoptions(suppress=True, precision=5, linewidth=1000) parser = argparse.ArgumentParser() parser.add_argument('env', type=str) parser.add_argument('--num_eval_trajs', type=int, default=50) parser.add_argument('--max_traj_len', type=int, default=None) parser.add_argument('--out', type=str, default=None) args=parser.parse_args() # Initialize the mdp mdp = rlgymenv.RLGymMDP(args.env) env = gym.make(args.env) print "Initialized environment %s" % args.env util.header('MDP observation space, action space sizes: %d, %d\n' % (mdp.obs_space.dim, mdp.action_space.storage_size)) if args.max_traj_len is None: args.max_traj_len = mdp.env_spec.timestep_limit util.header('Max traj len is {}'.format(args.max_traj_len)) # Run the simulation returns = [] lengths = [] sim = mdp.new_sim() for i_traj in range(args.num_eval_trajs): print i_traj, args.num_eval_trajs sim.reset() totalr = 0. l = 0 while not sim.done and l < args.max_traj_len: #a = [np.random.uniform(mdp.action_space.low[i], mdp.action_space.high[i]) for i in range(len(mdp.action_space.shape[0]))] a = env.action_space.sample() if isinstance(mdp.action_space, policyopt.FiniteSpace): a = np.asarray([a]) r = sim.step(a) totalr += r l += 1 returns.append(totalr) lengths.append(l) print "Mean reward: {}, Std reward: {}, Mean length: {}, Std length: {}\n".format(np.asarray(returns).mean(), np.asarray(returns).std(), np.asarray(lengths).mean(), np.asarray(lengths).std()) if args.out is not None: with open(args.out,'w') as f: f.write("Mean reward: {}, Std reward: {}, Mean length: {}, Std length: {}\n".format(np.asarray(returns).mean(), np.asarray(returns).std(), np.asarray(lengths).mean(), np.asarray(lengths).std())) f.close() if __name__=='__main__': main()
{"hexsha": "62cabef574216bb87fcd466971937d805c300369", "size": 2031, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/evaluate_random_policy.py", "max_stars_repo_name": "Santara/RAIL", "max_stars_repo_head_hexsha": "f36998f4852d274132490cd16ccda7e8839888f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2018-03-05T21:49:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-10T05:59:15.000Z", "max_issues_repo_path": "reference_src/0.rail/scripts/evaluate_random_policy.py", "max_issues_repo_name": "sangyongjeong1604/safegail", "max_issues_repo_head_hexsha": "76828169fbf1f9dce7bcc7fc03638abc6ef7a425", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-04-25T08:46:13.000Z", "max_issues_repo_issues_event_max_datetime": "2017-04-25T09:13:22.000Z", "max_forks_repo_path": "reference_src/0.rail/scripts/evaluate_random_policy.py", "max_forks_repo_name": "sangyongjeong1604/safegail", "max_forks_repo_head_hexsha": "76828169fbf1f9dce7bcc7fc03638abc6ef7a425", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-09-03T02:27:10.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-15T19:37:40.000Z", "avg_line_length": 35.0172413793, "max_line_length": 198, "alphanum_fraction": 0.6834071886, "include": true, "reason": "import numpy", "num_tokens": 536}
Describe Users/StephHolm here. 20110807 15:27:19 nbsp Welcome to the Wiki! I saw your comment on Taste of Thai page, and I have to say KetMoRee across has free Thai Ices Tea refills, and has had it since Day One. Users/NikhilDahal 20120125 11:41:35 nbsp Hey, neat idea for a page! Ive been a fan of Tumbleweed for many years now, although they arent the only players around. Im a big fan of modern vardos as well. Did you get permission to release those photos to Creative Commons? If not, you can probably just write and ask... and plenty of people have photos of their small houses available. Users/JabberWokky Evan JabberWokky Edwards 20120126 09:48:51 nbsp As a bit of information: Theres some confusion if Bruce has progressed to full blown trolling at this point, or its just a reflection of his very unique perspective on well, pretty much everything, including the wiki. Hes a nice guy, but probably not the most representative voice when it comes to the traditions of what works on the wiki. Alas, the tiny homes page seems to have acquired some of aura of the recent edits that he has made. Its not fair (and hopefully my pointing it out will help make people try to separate things), but the wiki like all groups of people is a social community, so such things happen from time to time. Users/JabberWokky Evan JabberWokky Edwards, jw.dw@timewarp.org 20120126 11:06:43 nbsp Steph, I want to echo what JW has said and what others have said on the page about tiny houses that you created. There are two issues going on here. One is that you inadvertently walked into a very long and very frustrating conversation that many of us have had with BH. Many of the pages and edits hes created are far less relevant than what youre proposing, so some of the disagreement is aimed at him rather than at you. The second issue is that, BH aside, some people still want to see more Davis relevance on the page, but others of us (including me) see the page as being on a good track. All of this is part of the collaborative wiki editing process. It can be a bit messy sometimes, and sometimes feathers get ruffled, but we hope that in the end the process is a productive one. Anyway, I hope you stick with the project, the page, and the wiki and continue to work things out with us. So far, so good, as far as I am concerned! Users/CovertProfessor 20120126 11:18:37 nbsp Thanks CP! I will do so this evening or tomorrow eve. I can do some mods for sure based on the conversation. And Im serious about starting a group! I could just see a cooperative village of these tiny houses, with a community kitchen and laundry facility. That would be SO Davis! A nonstudent, modern, mobile version of the Domes. With a couple planned empty spots for traveling houses to park and set awhile. I can dream.... :) SH Users/StephHolm That would be very Davis, and very cool! Go, go, go! Users/CovertProfessor 20120126 11:59:51 nbsp Hi SH, upon thinking about it and reading comments, I agree that this page has a lot of potential. I changed the page a bit so it didnt irk me as much. Of course, it can be changed :) but now I feel like its more Davis like and is a good start for your vision. Users/jsbmeb 20120126 16:23:45 nbsp Like I said I feel the Grande School site could be easily (well not so easily bought) repurposed for small home style / coop style living with a great addition to the greenbelt When I as a young warthog I dreamed my idear up Users/StevenDaubert How exactly do people go about aquiring land within city limits and seeing if this can get done? 20120126 16:36:24 nbsp I believe either the City or the DJUSD owns the Grande school site. But if it were another plot of land, it would be a regular purchase and need to go through zoning, I imagine. Users/jsbmeb DJUSD owns it, its too small for a elementary school (which is a joke cause its larger than Korematsu) its already zoned for usage, but easements / integration into the bike path would be my chief concern Daubert 20120126 16:38:07 nbsp Oh, one more thing... it would be cool if the old Sunrise Farm land on E 8th could be used, but it looks like the developer already may have plans for it. Users/jsbmeb Sunrise farm is a wonderful beautifully place with a horrible stigma and past, any developer would be foolish to develop that land. Have you been there at night / seen / heard of the sketchyness that occurs there? Daubert Hm. What are the western boundaries of the city limits? The city limits page of the wiki should be updated with the exact boundary lines, the exact street names. From the map there it LOOKS like the western boundary is the final line of houses at the west side of Marina Circle....directly behind those houses happens to be some farmland for sale (or lease)....hmmmm. Bet its a pretty penny tho. StephHolm The West side of the Covell Dranage channel is as far as houses / the city limits go on my 500 size (3ftx8ft) address map Daubert Whahuh? So all of us over in West Davis, west of the West Pond, are not within the city limits? That cant be right. The map on the City Limits page here on the wiki has a blue outline and it looks like it covers ALL the residential up to the backside of marina circle and those neighborhoods north of it. That huge plot of land right behind that is farmland and its for sale. StephHolm I bought this map from the city, and had public works engineer merge the databases they use (they were upgraded so my map request made them finally get that ball rolling) The Covell Dranage channel goes North to South on the West end of the city, that is the line on the map. West pond is East of that... It curves West to East on Covell, and then heads up 113 to then make the North Boundary for North Davis. The little pockets next to the town (Wisteria, Patwin, Tierra for that little nook near Cactus Corner Barry, Central Sharron for that development down 113 but before MUNI) arent in Davis. Also El Macero is not in Davis. That ditch goes behind the houses west of Marina Circle about halfway from Covell to Russell before it heads west. Theres only a few yards at most between the ditch and the back yards of the houses. Its one line of backyard fences from Covell to Russell. Those houses are the last ones in Davis Users/BruceHansen Thanks Bruce that is what I wasnt sure about.....if it went BEHIND the Marina Cir houses. I was pretty sure I am in the city limits! However I could throw a rock outside the limits from my house! Thanks Everyone for clarifying the city limits issue! Steph Holm 20120126 17:08:30 nbsp Then there is that crazy land of blight, the old processing factory or whatever it is, next to the farmland that is across, more or less, from the North Nugget shopping center. THey have a carnival there once a year I think. What is up with that place? Users/StephHolm That would be the old Hunts factory When I was young it was a huge warehouse / processing plant for tomatoes. Its since been razed down to the foundation. They left that one tank for when they were going to do the Cannery development, (which would have been fused to Covell Village) 20120126 19:45:48 nbsp Well whaddya know.....I just found out on Facebook that 2012 is the United Nations International Year of Cooperatives. I quote here (http://social.un.org/coopsyear/aboutiyc.html) from their website: With the theme of “Cooperative Enterprises Build a Better World”, the Year seeks to encourage the growth and establishment of cooperatives all over the world. It also encourages individuals, communities and governments to recognize the agency of cooperatives in helping to achieve internationally agreed upon development goals, such as the Millennium Development Goals The United Nations General Assembly Resolution A/RES/64/136 encourages all member States, the United Nations and all relevant stakeholders to take advantage of the IYC to promote cooperatives and raise awareness of their contribution to social and economic development and promote the formation and growth of cooperatives. Verrryyy interesting. Maybe something could get started this year! Land donated? Who knows?! Users/StephHolm 20120126 20:18:55 nbsp Two comments: 1) Developer plans have been drawn up for the Cannery; whether anything actually happens with them still remains to be seen (still not approved). 2) See cooperatives page. Users/CovertProfessor 20120126 20:34:14 nbsp It would be cool if there could be a demonstration of Tiny Homes on Grande with temporary utility hookups. The City and the voters have limited Davis outward expansion and so theres a call for infill. These little houses could be a form of infill. Users/BruceHansen 20120126 22:16:09 nbsp I wonder if the Solar Community Housing Association would be interested in starting a new coop of these tiny houses....do they acquire the land? The diff would be that they would allow the future residents to actually construct their tiny homes there on the land, and other future residents could help and learn. Cooperativeness even in building the very community! Users/StephHolm One of the issues is that homes below a certain square footage threshold can not be legally constructed. Thats why vardos are popular and the reason most Tumbleweed plans call for building on a trailer. By declaring it a mobile home, you bypass those minimum size requirements. As a result, theres an opportunity because the classification is as a mobile home... and potential pitfalls in zoning for the same reason. If you think thats bad, try one of my other two architectural passions: underground homes. In some states its pretty much impossible. ⁓Users/JabberWokky ʝ⍵ Well, either the SCHA would have to be ok to run a mobile home coop, or we would do it right outside the city limits. Or, we get the code changed. Perhaps because these tiny homes look so fabulous and craftsmanlike, they would change the code for them. Maybe skirting around the trailer would be required. Anyway a mobile home coop of THESE mobile homes, perhaps that would be an easy sell.... StephHolm They are also affordable housing. ⁓Users/JabberWokky ʝ⍵ I was talking to a buddy of mine we can make them like as seen on that site for roughly half the cost.. Daubert Thats pretty much the point. They hold workshops to encourage people to build their own, and encourage using reclaimed wood and similar techniques. ⁓Users/JabberWokky ʝ⍵ One client from Portland Alternative Dwellings HAD HERS built by PAD for 33k. That is significantly less than Tumbleweed building a Tumbleweed. Then, a lady who built her own Tumbleweed in Washington, I think, did so for about $7,000. Obviously there is a cost difference between building it with the plumbing and electrical of an RV and the plumbing and electrical for composting/solar/propane or whatnot, but..... anyway, here in 2012 which is the International Year of Cooperatives, maybe a special grant could be gotten, or something. Some kind of featured project. I guess I would have to just reserve my spot now, since I wont have my tiny house for a while :) StephHolm 20120130 14:28:36 nbsp There are no rules on the wiki, only agreements between individuals. So, if your store seems relevant, then its fine. Its a collaborative effort among people who have equal rights to the content. (Okay, there are actually a couple rules that the IRS says we have to abide by to stay nonprofit, but those seldom come up... make it about the business rather than a commercial, and youre fine). Users/JabberWokky 20120130 18:25:32 nbsp there is also a list of Davisite owned websites Users/StevenDaubert 20120130 19:16:09 nbsp Looks like JW and SD already offered their opinions, so Ill add mine: go for it! I dont see why a Davisbased online business is that different from any other Davisbased business. In fact, when Alphabet Moon closed and cited competition from the Internet as one of their reasons for closing, I thought, geez, they could have had an online store and offered free delivery within Davis (one possibility: hire students on bicycles). So, glad to hear that other Davis businesses are doing that. (Of course, competition from Target is another story, but I wont get into that). Users/CovertProfessor 20120202 06:20:48 nbsp Hey, whats on the Facebook page? I cant see it. Users/JabberWokky 20120202 12:52:19 nbsp I dont have a Facebook account. Is there any way to visit without one? Users/JabberWokky 20120205 18:49:38 nbsp Steph, It looks like the Tiny House page started to get converted to third person usage by putting quotes around some first person phrases, but the article hasnt been completely converted. Its possible that might help the movement. I can see of course that yourre one of Davis most ENTHUSIASTIC members. Users/BruceHansen 20120206 00:07:37 nbsp Bruce: I would be shocked if no one else in Davis is considering one of these homes or planning on building one! I was thinking I should mosey down to the Community Cooperative Network and pick some brains about how to get a village started:) Users/StephHolm You could also try the Davis Community Network. An active member of the Board of Directors that I met and could be helpful is G. Richard Yamagata. Users/BruceHansen 20120206 12:01:21 nbsp Yahoo is kind of seen as dying (as a company). Their groups have gotten so erratic that a large national group Im a member of that depends on them is actively looking for other options due to dropped and heavily delayed messages. Id avoid Yahoo at this point. You could always just wiki:wikispot:create a wiki... when you do so, you can choose to require logins to edit or not. Note that Im not saying create a wiki page but rather a new wiki, like Davis Wiki or Sacramento Wiki. That said, a wiki is a great resource, but does not have mailing lists and the like that are really useful for a more focused group. Google Groups has everything youd need, but they are changing things around across the board this year. In the past they have allowed you to use them without a Google account (like Yahoo), but I dont know if that will be the case soon. I think so, but Id verify before committing. Google does have the benefit of having their http://dataliberation.org Data Liberation working group that makes sure you can always leave Google for another service. Theres a big discussion Im supposed to be following (as Im the member representative for our local group) about replacing Yahoo. Ill read through it and see what good online community resources have been suggested. Users/JabberWokky 20120206 12:47:11 nbsp Its not so much that as the fact that I cant get any information about the group without signing up to a service. Thats kind of the opposite of the point of the world wide web and publishing information. Youre publishing information about a group and then hiding it away. Seems like it should be openly visible. Also, I dont think Google or Bing will index it if its private, so people who are interested in tiny houses will have trouble finding it. Users/JabberWokky
{"hexsha": "8cbc147bf9f8ec223cf48a13ff64dad4b12abe76", "size": 15118, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/StephHolm.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/StephHolm.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/StephHolm.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 188.975, "max_line_length": 1091, "alphanum_fraction": 0.7887948141, "num_tokens": 3581}
# coding=utf-8 from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import os import modeling import optimization import tokenization import tensorflow as tf import regex import numpy as np flags = tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "data_dir", None, "The input data dir for the task.") flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string("task_name", None, "The name of the task to train.") flags.DEFINE_string("vocab_file", None, "The vocabulary file that the BERT model was trained on.") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_bool( "do_predict", False, "Whether to run the model in inference mode on the test set.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_float("num_train_epochs", 3.0, "Total number of training epochs to perform.") flags.DEFINE_float( "warmup_proportion", 0.1, "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10% of training.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, tokens, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. tokens: list of string. The tokenized text of the first sequence. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.tokens = tokens self.label = label class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_words, input_ids, input_mask, segment_ids, label_id, is_real_example=True): self.input_words = input_words self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.is_real_example = is_real_example class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for prediction.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with tf.gfile.Open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines def get_label_info(): label_list = ['B', 'I'] label_map = {} rev_label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i rev_label_map[i] = label return label_list, label_map, rev_label_map class SequenceLabellingProcessor(DataProcessor): def _get_examples(self, data_dir, type): assert type in {"train", "dev", "test"} lines = [i for i in tf.gfile.Open(os.path.join(data_dir,"%s.txt" % type))] examples = [] for (i, line) in enumerate(lines): guid = "%s-%d" % (type, i) tokens, label = self.gen_text_and_IOB2_label(line) examples.append( InputExample(guid=guid, tokens=tokens, label=label)) return examples def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" return self._get_examples(data_dir, "train") def gen_text_and_IOB2_label(self, line): line = tokenization.convert_to_unicode(line).strip() tokens = regex.split(ur'\p{Z}+', line) chars = [] tags = [] for t in tokens: if not t: continue chars.extend([i for i in t]) tags.append('B') for _ in range(len(t) - 1): tags.append('I') assert len(chars) == len(tags), '%s:%s' % (line.encode('utf8'),t.encode('utf8')) return chars, tags def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" return self._get_examples(data_dir, "dev") def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for prediction.""" return self._get_examples(data_dir, "test") def get_labels(self): """Gets the list of labels for this data set.""" return get_label_info()[0] def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer): """Converts a single `InputExample` into a single `InputFeatures`.""" label_map = get_label_info()[1] labels = example.label tokens_a = example.tokens # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0:(max_seq_length - 2)] if labels: assert len(labels) == len(example.tokens) if len(labels) > max_seq_length - 2: labels = labels[:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. label_ids = map(lambda x: label_map[x], labels) tokens = [] segment_ids = [] tokens.append("[CLS]") label_ids = [0] + label_ids segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) label_ids.append(0) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: tokens.append("[SEP]") input_ids.append(0) input_mask.append(0) segment_ids.append(0) label_ids.append(0) assert len(tokens) == max_seq_length assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(label_ids) == max_seq_length if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.logging.info("label: %s " % ' '.join(example.label)) tf.logging.info("label_ids: %s " % ' '.join([str(x) for x in label_ids])) feature = InputFeatures( input_words=tokens, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_ids, is_real_example=True) return feature def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file): """Convert a set of `InputExample`s to a TFRecord file.""" writer = tf.python_io.TFRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f def create_string_feature(values): values = map(lambda x: x if isinstance(x, str) else x.encode('utf8'), values) return tf.train.Feature(bytes_list=tf.train.BytesList(value=list(values))) features = collections.OrderedDict() features["input_words"] = create_string_feature(feature.input_words) features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature(feature.label_id) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_words": tf.FixedLenFeature([seq_length], tf.string), "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([seq_length], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) return example def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = tf.data.TFRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings): """Creates a seq labelling model.""" model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) with tf.variable_scope("finetune/seq"): #get sequnece output final_hidden = model.get_sequence_output() final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3) batch_size = final_hidden_shape[0] seq_length = final_hidden_shape[1] hidden_size = final_hidden_shape[2] final_hidden = tf.reshape(final_hidden, [batch_size*seq_length, hidden_size]) output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) logits = tf.matmul(final_hidden, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) logits_out = tf.reshape(logits, [batch_size, seq_length, num_labels]) probabilities = tf.nn.softmax(logits_out) log_probs = tf.nn.log_softmax(logits) labels = tf.reshape(labels, [-1]) label_weights = tf.cast(tf.reshape(input_mask, [-1]), dtype=tf.float32) one_hot_labels = tf.one_hot( labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) * label_weights numerator = tf.reduce_sum(per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator/denominator per_example_loss = tf.reshape(per_example_loss, [batch_size, seq_length]) return (loss, per_example_loss, logits_out, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op, _lr = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, True) logging_hook = tf.train.LoggingTensorHook({"loss" : total_loss, "learning_rate" : _lr, 'global_step': tf.train.get_global_step()}, every_n_iter=100) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn, training_hooks=[logging_hook]) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): # batch_size * sequence_len shape_list = modeling.get_shape_list(label_ids, expected_rank=2) label_ids = tf.reshape(label_ids, [-1]) is_real_example = tf.tile(is_real_example[:, tf.newaxis], [1, shape_list[1]]) is_real_example = tf.reshape(is_real_example, [-1]) per_example_loss = tf.reshape(per_example_loss, [-1]) logits = tf.reshape(logits, [-1]) predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits, is_real_example]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={"probabilities": probabilities, "input_words" : features["input_words"], "label_ids": label_ids}, scaffold_fn=scaffold_fn) return output_spec return model_fn def main(_): tf.logging.set_verbosity(tf.logging.INFO) for k, v in tf.flags.FLAGS.__flags.items(): print(k,':"', v.value, '"') processors = { "seq": SequenceLabellingProcessor, } tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint) if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict: raise ValueError( "At least one of `do_train`, `do_eval` or `do_predict' must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) tf.gfile.MakeDirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=None, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=None, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int( len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) model_fn = model_fn_builder( bert_config=bert_config, num_labels=len(label_list), init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=False, use_one_hot_embeddings=False) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=False, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") file_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) tf.logging.info("***** Running training *****") tf.logging.info(" Num examples = %d", len(train_examples)) tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) tf.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) num_actual_eval_examples = len(eval_examples) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) tf.logging.info("***** Running evaluation *****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # This tells the estimator to run through the entire set. eval_steps = None eval_drop_remainder = False eval_input_fn = file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.gfile.GFile(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) tf.logging.info("***** Running prediction*****") tf.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_drop_remainder = False predict_input_fn = file_based_input_fn_builder( input_file=predict_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder) result = estimator.predict(input_fn=predict_input_fn) output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with tf.gfile.GFile(output_predict_file, "w") as writer: num_written_lines = 0 tf.logging.info("***** Predict results *****") for (i, prediction) in enumerate(result): probabilities = prediction["probabilities"] input_words = prediction["input_words"] golden_labels = prediction["label_ids"] if i >= num_actual_predict_examples: break output_line = "%s\n\n" % gen_iob2_str(input_words, probabilities, golden_labels) writer.write(output_line) num_written_lines += 1 assert num_written_lines == num_actual_predict_examples def gen_iob2_str(words, prob, golden_labels): label = np.argmax(prob, axis=-1) words = words.tolist() label = label.tolist() assert len(label) == len(words) == len(golden_labels) def get_actual_len(words): al = 0 for w in words: if w == '[SEP]': break al += 1 return al rev_label_map = get_label_info()[2] actual_len = get_actual_len(words) words = words[1:actual_len] label = map(lambda x: rev_label_map[x], label[1:actual_len]) golden_labels = map(lambda x: rev_label_map[x], golden_labels[1:actual_len]) return '\n'.join(map(lambda x: '%s\t%s\t%s' % x, zip(words, golden_labels, label))) if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") tf.app.run()
{"hexsha": "a5d07d2732f7ea3966ad40c0c6539a3d5ce87876", "size": 27533, "ext": "py", "lang": "Python", "max_stars_repo_path": "run_seq.py", "max_stars_repo_name": "quark-tech/quark_cws", "max_stars_repo_head_hexsha": "9105fbb4648ed7939456db45a9692ec849f4d9a8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-03-18T03:02:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-15T09:25:03.000Z", "max_issues_repo_path": "run_seq.py", "max_issues_repo_name": "quark-tech/quark_cws", "max_issues_repo_head_hexsha": "9105fbb4648ed7939456db45a9692ec849f4d9a8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-25T13:06:11.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-25T13:06:11.000Z", "max_forks_repo_path": "run_seq.py", "max_forks_repo_name": "quark-tech/quark_cws", "max_forks_repo_head_hexsha": "9105fbb4648ed7939456db45a9692ec849f4d9a8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-04-04T01:23:48.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-15T07:21:08.000Z", "avg_line_length": 38.8335684062, "max_line_length": 126, "alphanum_fraction": 0.6405041223, "include": true, "reason": "import numpy", "num_tokens": 6069}
theory Cell_Decomp_Theorem_Helpers imports Denef_Lemma_2_4 Denef_Lemma_2_3 Algebras_of_Cells begin locale common_decomp_proof_context = denef_I + denef_II locale common_refinement_locale = common_decomp_proof_context + fixes \<C> A c a1 a2 I f m assumes f_closed: "f \<in> carrier (UP (SA m))" assumes f_deg: " deg (SA m) f \<le> (Suc d)" assumes \<C>_def: "\<C> = Cond m A c a1 a2 I" assumes \<C>_cond: "is_cell_condition \<C>" assumes f_taylor_cfs: "\<And> i. (taylor_expansion (SA m) c f i = \<zero>\<^bsub>SA m\<^esub>) \<or> (taylor_expansion (SA m) c f i \<in> Units (SA m))" (**************************************************************************************************) (**************************************************************************************************) section\<open>Partitions by Zero Sets\<close> (**************************************************************************************************) (**************************************************************************************************) context padic_fields begin definition zero_set_partition where "zero_set_partition m Fs = atoms_of (gen_boolean_algebra (carrier (Q\<^sub>p\<^bsup>m\<^esup>)) (SA_zero_set m ` Fs))" lemma nonzero_set_as_diff: "SA_nonzero_set m f = (carrier (Q\<^sub>p\<^bsup>m\<^esup>)) - (SA_zero_set m f)" unfolding SA_nonzero_set_def SA_zero_set_def by auto lemma zero_set_partition_semialg: assumes "Fs \<subseteq> carrier (SA m)" assumes "finite Fs" assumes "a \<in> zero_set_partition m Fs" shows "is_semialgebraic m a" proof- have 0: "(SA_zero_set m ` Fs) \<subseteq> semialg_sets m" apply(rule subsetI) using SA_zero_set_is_semialg assms unfolding SA_zero_set_def is_semialgebraic_def by auto have "zero_set_partition m Fs \<subseteq> semialg_sets m" unfolding semialg_sets_def zero_set_partition_def apply(rule atoms_of_gen_boolean_algebra, rule gen_boolean_algebra_subalgebra) using 0 unfolding semialg_sets_def apply blast apply(rule gen_boolean_algebra_finite) using assms by auto thus ?thesis using assms using is_semialgebraicI by auto qed lemma partition_by_zero_sets: assumes "finite Fs" assumes "Fs \<subseteq> carrier (SA m)" assumes "a \<in> zero_set_partition m Fs" assumes "f \<in> Fs" shows "(\<forall> x \<in> a. f x = \<zero>) \<or> (\<forall> x \<in> a. f x \<noteq> \<zero>)" proof(cases "a \<subseteq> SA_zero_set m f") case True then show ?thesis unfolding SA_zero_set_def by auto next case False have F0: "SA_zero_set m f \<in> (gen_boolean_algebra (carrier (Q\<^sub>p\<^bsup>m\<^esup>)) (SA_zero_set m ` Fs))" apply(rule generator_closed) using assms unfolding SA_zero_set_def by auto then have F1: "a \<inter> SA_zero_set m f = {}" using assms atoms_are_minimal[of a _ "SA_zero_set m f"] False unfolding zero_set_partition_def by blast have "a \<subseteq> SA_nonzero_set m f" unfolding nonzero_set_as_diff apply(intro atom_in_comp[of "SA_zero_set m ` Fs" _ _ "SA_zero_set m f"] F1 F0) using assms unfolding zero_set_partition_def by auto thus ?thesis unfolding SA_nonzero_set_def by auto qed lemma of_gen_boolean_algebra_un: "\<Union> (gen_boolean_algebra S Xs) = S" using gen_boolean_algebra_subset[of _ S Xs] gen_boolean_algebra.universe[of S Xs] by auto lemma gen_boolean_algebra_atom_un: assumes "finite Xs" assumes "Y \<in> gen_boolean_algebra S Xs" shows "Y = \<Union> {a \<in> atoms_of (gen_boolean_algebra S Xs). a \<subseteq> Y}" by(intro gen_boolean_algebra_elem_uni_of_atoms[of "gen_boolean_algebra S Xs" S] gen_boolean_algebra_finite assms, unfold of_gen_boolean_algebra_un gen_boolean_algebra_idempotent, auto simp: assms) lemma gen_boolean_algebra_atoms_cover: assumes "finite Xs" shows "S = \<Union> (atoms_of (gen_boolean_algebra S Xs))" using assms gen_boolean_algebra_atom_un[of Xs S S] by (simp add: atoms_of_covers' of_gen_boolean_algebra_un) lemma induced_partition: assumes "Xs partitions S" assumes "Y \<subseteq> S" shows "(\<inter>) Y ` Xs partitions Y" apply(intro is_partitionI disjointI) using assms is_partitionE disjointE apply (smt (verit, best) Sup_upper boolean_algebra_cancel.inf1 image_iff inf.absorb_iff1 inf_Sup inf_bot_right) using assms by (metis inf.orderE inf_Sup is_partitionE(2)) lemma partition_by_zero_sets_covers: assumes "finite Fs" shows "carrier (Q\<^sub>p\<^bsup>m\<^esup>) = \<Union> (zero_set_partition m Fs)" unfolding zero_set_partition_def apply(rule gen_boolean_algebra_atoms_cover) using assms by blast lemma partition_by_zero_sets_disjoint: assumes "finite Fs" shows "disjoint (zero_set_partition m Fs)" apply(rule disjointI) using assms unfolding zero_set_partition_def by (simp add: atoms_of_disjoint) lemma partition_by_zero_sets_partitions: assumes "finite Fs" shows "(zero_set_partition m Fs) partitions (carrier (Q\<^sub>p\<^bsup>m\<^esup>))" apply(rule is_partitionI) using partition_by_zero_sets_covers partition_by_zero_sets_disjoint assms by auto definition poly_cfs_car_part where "poly_cfs_car_part m f = zero_set_partition m (f ` {..deg (SA m) f})" lemma poly_cfs_car_part_semialg: assumes "f \<in> carrier (UP (SA m))" assumes "a \<in> poly_cfs_car_part m f" shows "is_semialgebraic m a" apply(rule zero_set_partition_semialg[of "f ` {..deg (SA m) f}"]) using assms cfs_closed poly_cfs_car_part_def by auto lemma poly_cfs_car_part_memE: assumes "f \<in> carrier (UP (SA m))" assumes "a \<in> poly_cfs_car_part m f" shows "(\<forall> x \<in> a. f i x = \<zero>) \<or> (\<forall> x \<in> a. f i x \<noteq> \<zero>)" proof(cases "i > deg (SA m) f") case True then have T0: "f i = \<zero>\<^bsub>SA m\<^esub>" using assms UPSA.deg_leE by blast have "a \<subseteq> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using assms poly_cfs_car_part_semialg is_semialgebraic_closed by presburger then have T1: "(\<forall>x\<in>a. \<zero>\<^bsub>SA m\<^esub> x = \<zero>)" using SA_zeroE by auto show ?thesis unfolding T0 using T1 by auto next case False show ?thesis apply(intro partition_by_zero_sets[of "f ` {..deg (SA m) f}" m]) using False assms cfs_closed poly_cfs_car_part_def by auto qed lemma poly_cfs_car_part_finite: "finite (poly_cfs_car_part m f)" unfolding poly_cfs_car_part_def zero_set_partition_def apply(rule atoms_finite) by auto lemma poly_cfs_car_part_covers: "carrier (Q\<^sub>p\<^bsup>m\<^esup>) = \<Union> (poly_cfs_car_part m f)" using gen_boolean_algebra_elem_uni_of_atoms unfolding poly_cfs_car_part_def zero_set_partition_def using partition_by_zero_sets_covers zero_set_partition_def by force definition poly_cfs_part where "poly_cfs_part m f A = ((\<inter>) A ` (poly_cfs_car_part m f)) - {{}}" lemma poly_cfs_part_subset: "\<And> a. a \<in> poly_cfs_part m f A \<Longrightarrow> a \<subseteq> A" unfolding poly_cfs_part_def by auto lemma partition_minus_empty: assumes "As partitions A" shows "(As - {{}}) partitions A" apply(rule is_partitionI) using assms is_partitionE disjointE disjointI apply fastforce using assms is_partitionE(2) by auto[1] lemma poly_cfs_part_partitions: assumes "A \<subseteq> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" shows "(poly_cfs_part m f A) partitions A" unfolding poly_cfs_part_def poly_cfs_car_part_def apply(intro partition_minus_empty) apply(intro induced_partition[of _ "carrier (Q\<^sub>p\<^bsup>m\<^esup>)"] ) by(rule partition_by_zero_sets_partitions, auto simp: assms) lemma poly_cfs_part_finite: "finite (poly_cfs_part m f A)" unfolding poly_cfs_part_def using poly_cfs_car_part_finite by auto lemma poly_cfs_part_memE: assumes "f \<in> carrier (UP (SA m))" assumes "a \<in> poly_cfs_part m f A" shows "(\<forall> x \<in> a. f i x = \<zero>) \<or> (\<forall> x \<in> a. f i x \<noteq> \<zero>)" using poly_cfs_car_part_memE[of f m _ i] assms unfolding poly_cfs_part_def by auto lemma poly_cfs_part_semialg: assumes "is_semialgebraic m A" assumes "f \<in> carrier (UP (SA m))" assumes "a \<in> poly_cfs_part m f A" shows "is_semialgebraic m a" proof- obtain a' where a'_def: "a' \<in> poly_cfs_car_part m f \<and> a = A \<inter> a'" using assms poly_cfs_part_def by auto thus ?thesis using assms poly_cfs_part_def intersection_is_semialg poly_cfs_car_part_semialg by auto qed definition poly_unit_replacement where "poly_unit_replacement m f a = (\<lambda> i::nat. if (\<forall> x \<in> a \<inter> (carrier (Q\<^sub>p\<^bsup>m\<^esup>)). f i x = \<zero>) then \<zero>\<^bsub>SA m\<^esub> else to_fun_unit m (f i))" lemma poly_unit_replacement_dichotomy: assumes "f \<in> carrier (UP (SA m))" assumes "is_semialgebraic m a" shows "\<And>i. poly_unit_replacement m f a i = \<zero>\<^bsub>SA m\<^esub> \<or> poly_unit_replacement m f a i \<in> Units (SA m) " unfolding poly_unit_replacement_def using assms to_fun_unit_is_unit cfs_closed by auto lemma poly_unit_replacement_cfs_closed: assumes "f \<in> carrier (UP (SA m))" shows "poly_unit_replacement m f a i \<in> carrier (SA m)" apply(cases "\<forall> x \<in> a \<inter> (carrier (Q\<^sub>p\<^bsup>m\<^esup>)). f i x = \<zero>", unfold poly_unit_replacement_def) using assms to_fun_unit_closed[of "f i" m] cfs_closed[of f m i] by auto lemma poly_unit_replacement_above_deg: assumes "f \<in> carrier (UP (SA m))" assumes "i > deg (SA m) f" shows "poly_unit_replacement m f a i = \<zero>\<^bsub>SA m\<^esub>" proof- have "f i = \<zero>\<^bsub>SA m\<^esub>" using assms UPSA.deg_leE by blast hence "\<forall>x\<in>a \<inter> carrier (Q\<^sub>p\<^bsup>m\<^esup>). f i x = \<zero>" using SA_zeroE by auto thus ?thesis unfolding poly_unit_replacement_def by auto qed lemma poly_unit_replacement_closed: assumes "f \<in> carrier (UP (SA m))" shows "poly_unit_replacement m f a \<in> carrier (UP (SA m))" apply(intro UP_car_memI[of "deg (SA m) f"]) apply(intro poly_unit_replacement_above_deg assms, auto) by(rule poly_unit_replacement_cfs_closed, rule assms) lemma poly_unit_replacement_cfs1: assumes "f \<in> carrier (UP (SA m))" assumes "(\<forall> x \<in> a. f i x = \<zero>)" shows "poly_unit_replacement m f a i = \<zero>\<^bsub>SA m\<^esub>" using assms unfolding poly_unit_replacement_def by auto lemma poly_unit_replacement_deg: assumes "f \<in> carrier (UP (SA m))" shows "deg (SA m) (poly_unit_replacement m f a) \<le> deg (SA m) f" apply(rule deg_leqI) apply (simp add: assms poly_unit_replacement_closed) by (simp add: UPSA.deg_leqI assms padic_fields.poly_unit_replacement_closed padic_fields_axioms poly_unit_replacement_above_deg) lemma poly_unit_replacement_cfs2: assumes "f \<in> carrier (UP (SA m))" assumes "(\<forall> x \<in> a. f i x \<noteq> \<zero>)" assumes "is_semialgebraic m a" assumes "a \<noteq> {}" shows "poly_unit_replacement m f a i = (to_fun_unit m (f i))" proof- have "\<not> (\<forall>x\<in>a \<inter> carrier (Q\<^sub>p\<^bsup>m\<^esup>). f i x = \<zero>)" using assms is_semialgebraic_closed by blast thus ?thesis unfolding poly_unit_replacement_def by auto qed lemma poly_unit_replacement_on_cfs_part: assumes "is_semialgebraic m A" assumes "f \<in> carrier (UP (SA m))" assumes "a \<in> poly_cfs_part m f A" shows "poly_unit_replacement m f a \<in> carrier (UP (SA m))" "\<And>x. x \<in> a \<Longrightarrow> poly_unit_replacement m f a i x = f i x" "\<And>x. x \<in> a \<Longrightarrow> SA_poly_to_Qp_poly m x f = SA_poly_to_Qp_poly m x (poly_unit_replacement m f a)" proof- have a_closed: "a \<subseteq> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using assms poly_cfs_part_subset is_semialgebraic_closed by blast have a_nonempty: "a \<noteq> {}" using assms unfolding poly_cfs_part_def by auto show 1: "poly_unit_replacement m f a \<in> carrier (UP (SA m))" using assms by (simp add: poly_unit_replacement_closed) show 2: "\<And>x i. x \<in> a \<Longrightarrow> poly_unit_replacement m f a i x = f i x" proof- fix x i assume A: "x \<in> a" show "poly_unit_replacement m f a i x = f i x" proof(cases "(\<forall> x \<in> a. f i x = \<zero>)") case True show ?thesis using a_closed assms A True poly_unit_replacement_def SA_zeroE by (simp add: Set.basic_monos(7)) next case False hence "(\<forall> x \<in> a. f i x \<noteq> \<zero>)" using assms by (meson poly_cfs_part_memE) have "\<not> (\<forall> x \<in> a \<inter> carrier (Q\<^sub>p\<^bsup>m\<^esup>). f i x = \<zero>)" using a_nonempty a_closed by (simp add: False Int_absorb2) hence "poly_unit_replacement m f a i = to_fun_unit m (f i)" unfolding poly_unit_replacement_def by auto then show ?thesis using a_closed assms poly_unit_replacement_cfs2 to_fun_unit_eq[of "f i" m] A UPSA.UP_car_memE(1) \<open>\<forall>x\<in>a. f i x \<noteq> \<zero>\<close> by auto qed qed show 3: "\<And>x. x \<in> a \<Longrightarrow> SA_poly_to_Qp_poly m x f = SA_poly_to_Qp_poly m x (poly_unit_replacement m f a)" proof- fix x assume A: "x \<in> a" show "SA_poly_to_Qp_poly m x f = SA_poly_to_Qp_poly m x (poly_unit_replacement m f a)" proof(rule ext) fix j have 30: "SA_poly_to_Qp_poly m x f j = f j x" using SA_poly_to_Qp_poly_coeff[of _ m f j] A assms(2) local.a_closed by auto have 31: "SA_poly_to_Qp_poly m x (poly_unit_replacement m f a) j = (poly_unit_replacement m f a) j x" using a_closed assms 1 2 SA_poly_to_Qp_poly_coeff[of _ m f j] A SA_poly_to_Qp_poly_coeff[of _ m "poly_unit_replacement m f a" j] by blast show "SA_poly_to_Qp_poly m x f j = SA_poly_to_Qp_poly m x (poly_unit_replacement m f a) j" unfolding 30 31 using 1 2[of x j] A by auto qed qed qed lemma(in UP_cring) taylor_expansion_inv: assumes "f \<in> carrier (UP R)" assumes "c \<in> carrier R" shows "f = taylor_expansion R (\<ominus>c) (taylor_expansion R c f)" "f = taylor_expansion R c (taylor_expansion R (\<ominus>c) f)" proof- have 0: "\<And> c. c \<in> carrier R \<Longrightarrow> f = taylor_expansion R (\<ominus>c) (taylor_expansion R c f)" proof- fix x c assume A: "c \<in> carrier R" have 0: "X_poly_minus R c = X_poly_plus R (\<ominus> c)" by (simp add: A UP_cring.X_minus_plus assms(2) is_UP_cring) have 1: "f = Cring_Poly.compose R (taylor c f) (X_poly_minus R c)" using A taylor_id[of c f] assms P_def by fastforce show "f = taylor_expansion R (\<ominus>c) (taylor_expansion R c f)" using 1 0 A unfolding taylor_expansion_def taylor_def by auto qed show "f = taylor_expansion R (\<ominus>c) (taylor_expansion R c f)" by(intro 0 assms) show "f = taylor_expansion R c (taylor_expansion R (\<ominus>c) f)" using 0[of "\<ominus> c"] assms by auto qed lemma(in UP_cring) taylor_expansion_closed: assumes "f \<in> carrier (UP R)" assumes "c \<in> carrier R" shows "taylor_expansion R c f \<in> carrier (UP R)" using assms taylor_closed[of f c] unfolding P_def taylor_def by auto lemma poly_unit_replacement_deg_lemma: assumes "is_semialgebraic m A" assumes "f \<in> carrier (UP (SA m))" assumes "c \<in> carrier (SA m)" assumes "a \<in> poly_cfs_part m (taylor_expansion (SA m) c f) A" assumes "g = taylor_expansion (SA m) (\<ominus>\<^bsub>SA m\<^esub> c) (poly_unit_replacement m (taylor_expansion (SA m) c f) a)" shows "deg (SA m) g \<le> deg (SA m) f" proof- have 0: "deg (SA m) f = deg (SA m) (taylor_expansion (SA m) c f)" using assms UPSA.taylor_def UPSA.taylor_deg by presburger have 1: "deg (SA m) f \<ge> deg (SA m) (poly_unit_replacement m (taylor_expansion (SA m) c f) a)" unfolding 0 using assms by (meson UPSA.taylor_expansion_closed poly_unit_replacement_deg) thus ?thesis unfolding assms using 1 assms UPSA.taylor_deg UPSA.taylor_def UPSA.taylor_deg unfolding UPSA.taylor_def using R.cring_simprules(3) UPSA.taylor_expansion_closed padic_fields.poly_unit_replacement_closed padic_fields_axioms by presburger qed lemma poly_unit_replacement_on_cfs_part_taylor: assumes "is_semialgebraic m A" assumes "f \<in> carrier (UP (SA m))" assumes "c \<in> carrier (SA m)" assumes "a \<in> poly_cfs_part m (taylor_expansion (SA m) c f) A" assumes "g = taylor_expansion (SA m) (\<ominus>\<^bsub>SA m\<^esub> c) (poly_unit_replacement m (taylor_expansion (SA m) c f) a)" shows "g \<in> carrier (UP (SA m))" "\<And>x i . x \<in> a \<Longrightarrow> g i x = f i x" "\<And> x i. x \<in> a \<Longrightarrow> UPSA.pderiv m g i x = UPSA.pderiv m f i x" "\<And>x. x \<in> a \<Longrightarrow> SA_poly_to_Qp_poly m x g = SA_poly_to_Qp_poly m x f" "\<And>x. x \<in> a \<Longrightarrow> SA_poly_to_Qp_poly m x (UPSA.pderiv m g) = SA_poly_to_Qp_poly m x (UPSA.pderiv m f)" "\<And> i. taylor_expansion (SA m) c g i = \<zero>\<^bsub>SA m\<^esub> \<or> taylor_expansion (SA m) c g i \<in> Units (SA m)" proof- obtain h where h_def: "h = (poly_unit_replacement m (taylor_expansion (SA m) c f) a)" by blast show g_closed: "g \<in> carrier (UP (SA m))" unfolding assms apply(intro taylor_expansion_closed poly_unit_replacement_closed) using assms by auto have taylor: "taylor_expansion (SA m) c f \<in> carrier (UP (SA m))" using assms UPSA.taylor_closed UPSA.taylor_def by force have a_sub: "a \<subseteq> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using assms poly_cfs_part_subset[of a m "taylor_expansion (SA m) c f" A] taylor is_semialgebraic_closed by auto have h_props: "h \<in> carrier (UP (SA m))" "\<And>x i. x \<in> a \<Longrightarrow> h i x = taylor_expansion (SA m) c f i x" "\<And>x i. x \<in> a \<Longrightarrow> SA_poly_to_Qp_poly m x (taylor_expansion (SA m) c f) = SA_poly_to_Qp_poly m x h" proof- show "h \<in> carrier (UP (SA m))" unfolding h_def by(intro poly_unit_replacement_on_cfs_part[of m A "taylor_expansion (SA m) c f" a] taylor assms) show " \<And>x i. x \<in> a \<Longrightarrow> h i x = taylor_expansion (SA m) c f i x" unfolding h_def by(intro poly_unit_replacement_on_cfs_part[of m A "taylor_expansion (SA m) c f" a] taylor assms, auto) show "\<And>x i. x \<in> a \<Longrightarrow> SA_poly_to_Qp_poly m x (taylor_expansion (SA m) c f) = SA_poly_to_Qp_poly m x h" unfolding h_def by(intro poly_unit_replacement_on_cfs_part[of m A "taylor_expansion (SA m) c f" a] taylor assms, auto) qed show 1: "\<And>x. x \<in> a \<Longrightarrow> SA_poly_to_Qp_poly m x g = SA_poly_to_Qp_poly m x f" proof- fix x assume A: "x \<in> a" have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using a_sub A by auto have 0: "SA_poly_to_Qp_poly m x (taylor_expansion (SA m) c f) = SA_poly_to_Qp_poly m x h" using h_props A by auto have 1: "f = taylor_expansion (SA m) (\<ominus>\<^bsub>SA m\<^esub> c) (taylor_expansion (SA m) c f)" by(intro taylor_expansion_inv assms) have 2: "SA_poly_to_Qp_poly m x (taylor_expansion (SA m) (\<ominus>\<^bsub>SA m\<^esub> c) (taylor_expansion (SA m) c f)) = taylor_expansion Q\<^sub>p ((\<ominus>\<^bsub>SA m\<^esub> c) x) (SA_poly_to_Qp_poly m x (taylor_expansion (SA m) c f))" apply(intro SA_poly_to_Qp_poly_taylor_poly taylor) using assms apply auto[1] using a_sub A by auto hence 3: "SA_poly_to_Qp_poly m x f = taylor_expansion Q\<^sub>p ((\<ominus>\<^bsub>SA m\<^esub> c) x) (SA_poly_to_Qp_poly m x (taylor_expansion (SA m) c f))" using 1 by auto have 4: "SA_poly_to_Qp_poly m x g = taylor_expansion Q\<^sub>p ((\<ominus>\<^bsub>SA m\<^esub> c) x) (SA_poly_to_Qp_poly m x (poly_unit_replacement m (taylor_expansion (SA m) c f) a))" unfolding assms apply(intro SA_poly_to_Qp_poly_taylor_poly x_closed) using h_props assms unfolding h_def by auto have 5: " (SA_poly_to_Qp_poly m x (taylor_expansion (SA m) c f)) = (SA_poly_to_Qp_poly m x (poly_unit_replacement m (taylor_expansion (SA m) c f) a))" using A h_props h_def by auto show "SA_poly_to_Qp_poly m x g = SA_poly_to_Qp_poly m x f " unfolding 3 4 5 by auto qed show 2: "\<And>x i. x \<in> a \<Longrightarrow> g i x = f i x" proof- fix i x assume A: "x \<in> a" then have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using a_sub by auto show "g i x = f i x" using 1[of x] g_closed assms(2) x_closed A SA_poly_to_Qp_poly_coeff[of x m f i] SA_poly_to_Qp_poly_coeff[of x m g i] by auto qed have h_eq: "h = taylor_expansion (SA m) c g" unfolding h_def assms apply(rule taylor_expansion_inv) using assms h_props h_def by auto show "\<And>i. taylor_expansion (SA m) c g i = \<zero>\<^bsub>SA m\<^esub> \<or> taylor_expansion (SA m) c g i \<in> Units (SA m)" using assms taylor_closed h_def h_eq poly_cfs_part_semialg poly_unit_replacement_dichotomy padic_fields_axioms taylor by presburger have derivs_closed: "UPSA.pderiv m g \<in> carrier (UP (SA m))" "UPSA.pderiv m f \<in> carrier (UP (SA m))" by(auto simp: UPSA.pderiv_closed g_closed assms(2)) show 3: "\<And>x i. x \<in> a \<Longrightarrow> UPSA.pderiv m g i x = UPSA.pderiv m f i x" proof- fix i x assume A: "x \<in> a" then have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using a_sub by auto have p: "g (Suc i) x = f (Suc i) x" by(intro A 2) have q: "UPSA.pderiv m g i = [Suc i] \<cdot>\<^bsub>SA m\<^esub> g (Suc i)" "UPSA.pderiv m f i = [Suc i] \<cdot>\<^bsub>SA m\<^esub> f (Suc i)" using g_closed assms(2) x_closed A derivs_closed UPSA.pderiv_cfs[of g m i] UPSA.pderiv_cfs[of f m i] by auto have r: "([Suc i] \<cdot>\<^bsub>SA m\<^esub> g (Suc i)) x = [Suc i] \<cdot> g (Suc i) x" "([Suc i] \<cdot>\<^bsub>SA m\<^esub> f (Suc i)) x = [Suc i] \<cdot> f (Suc i) x" using p x_closed cfs_closed[of f m "Suc i"] SA_add_pow_apply[of "g (Suc i)" m x "Suc i"] cfs_closed[of g m "Suc i"] SA_add_pow_apply[of "f (Suc i)" m x "Suc i"] g_closed assms by auto show "UPSA.pderiv m g i x = UPSA.pderiv m f i x" unfolding p q r by auto qed show "\<And>x. x \<in> a \<Longrightarrow> SA_poly_to_Qp_poly m x (UPSA.pderiv m g) = SA_poly_to_Qp_poly m x (UPSA.pderiv m f)" proof fix x i assume A: "x \<in> a" then have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using a_sub by auto have p: "UPSA.pderiv m g i x = UPSA.pderiv m f i x" using A 3 by auto show "SA_poly_to_Qp_poly m x (UPSA.pderiv m g) i = SA_poly_to_Qp_poly m x (UPSA.pderiv m f) i" using SA_poly_to_Qp_poly_coeff[of x m "UPSA.pderiv m g" i] SA_poly_to_Qp_poly_coeff[of x m "UPSA.pderiv m f" i] derivs_closed x_closed unfolding p by auto qed qed definition decomp_by_cfs where "decomp_by_cfs m f \<C> = (\<lambda> C. refine_fibres \<C> C) ` poly_cfs_part m f (fibre_set \<C>)" lemma decomp_by_cfs_is_decomp: assumes "f \<in> carrier (UP (SA m))" assumes "is_cell_condition \<C>" assumes "arity \<C> = m" shows "is_cell_decomp m (decomp_by_cfs m f \<C>) (condition_to_set \<C>)" proof- obtain C c a1 a2 I where params: "\<C> = Cond m C c a1 a2 I" using assms arity.simps by (meson equal_CondI) have C_semialg: "is_semialgebraic m C" using assms params is_cell_conditionE by smt have C_closed: "C \<subseteq> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using C_semialg is_semialgebraic_closed by auto have sa: "\<And> x. x \<in> poly_cfs_part m f C \<Longrightarrow> is_semialgebraic m x" by(rule poly_cfs_part_semialg[of _ C f], intro C_semialg, rule assms, auto) show ?thesis unfolding decomp_by_cfs_def apply(intro partition_to_cell_decomp[of \<C> m C c a1 a2 I] assms params) unfolding params fibre_set.simps apply(intro poly_cfs_part_partitions C_closed) unfolding are_semialgebraic_def using sa apply blast by(rule poly_cfs_part_finite) qed lemma decomp_by_cfs_params: assumes "\<B> \<in> (decomp_by_cfs m f (Cond m C c a1 a2 I))" shows "center \<B> = c" "l_bound \<B> = a1" "u_bound \<B> = a2" "boundary_condition \<B> = I" using assms unfolding decomp_by_cfs_def refine_fibres_def by auto end (**************************************************************************************************) (**************************************************************************************************) subsection\<open>Cell Decomposition Properties are Hereditary (up to Common Centers)\<close> (**************************************************************************************************) (**************************************************************************************************) context common_decomp_proof_context begin lemma SA_poly_ubounded_mono: assumes "SA_poly_ubounded p m f c A N" assumes "B \<subseteq> A" shows "SA_poly_ubounded p m f c B N" using assms proof - have f1: "\<forall>R Ra rs. (\<not> R \<subseteq> Ra \<or> (rs::((nat \<Rightarrow> int) \<times> (nat \<Rightarrow> int)) set list) \<notin> R) \<or> rs \<in> Ra" by blast have f2: "A \<subseteq> carrier (Frac (padic_int p)\<^bsup>Suc m\<^esup>)" by (meson SA_poly_ubounded.A_closed assms(1)) have "\<forall>i n f fa R ia. SA_poly_ubounded_axioms i n f fa R ia = (f \<in> carrier (UP (padic_fields.SA i n)) \<and> fa \<in> carrier (padic_fields.SA i n) \<and> R \<subseteq> carrier (Frac (padic_int i)\<^bsup>Suc n\<^esup>) \<and> (\<forall>na rs r. r # rs \<notin> R \<or> padic_fields.val i (UP_cring.to_fun (Frac (padic_int i)) (padic_fields.SA_poly_to_Qp_poly i n rs f) r) \<le> padic_fields.val i (UP_cring.to_fun (Frac (padic_int i)) (UP_cring.taylor_term (Frac (padic_int i)) (fa rs) (padic_fields.SA_poly_to_Qp_poly i n rs f) na) r) + eint ia))" using SA_poly_ubounded_axioms_def by presburger then have "SA_poly_ubounded_axioms p m f c B N" using f2 f1 SA_poly_ubounded.P_closed SA_poly_ubounded.c_closed SA_poly_ubounded.ubounded assms(1) assms(2) by force then show ?thesis by (simp add: SA_poly_ubounded.intro padic_fields_axioms) qed end (**************************************************************************************************) (**************************************************************************************************) subsection\<open>Reducing the Proof to the Sets $A_0$ and its Complement\<close> (**************************************************************************************************) (**************************************************************************************************) context common_refinement_locale begin lemma \<C>_memE: assumes "x \<in> condition_to_set \<C>" shows "tl x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" "hd x \<in> carrier Q\<^sub>p" "tl x \<in> A" using assms unfolding \<C>_def condition_to_set.simps cell_def mem_Collect_eq apply (meson cartesian_power_tail) apply (metis Qp_pow_ConsE(2) assms cell_condition_set_memE(1) common_refinement_locale.\<C>_cond common_refinement_locale.\<C>_def common_refinement_locale_axioms) using \<C>_cond \<C>_def assms condition_to_set_memE(1) by presburger lemma c_closed: "c \<in> carrier (SA m)" using \<C>_cond is_cell_conditionE(2) unfolding \<C>_def by blast lemma a1_closed: "a1 \<in> carrier (SA m)" using \<C>_cond unfolding \<C>_def by fastforce lemma a2_closed: "a2 \<in> carrier (SA m)" using \<C>_cond unfolding \<C>_def by (meson is_cell_conditionE''(7)) lemma A_semialg: "is_semialgebraic m A" using \<C>_cond unfolding \<C>_def by simp text\<open>For the sake of matching the text and brevity, we give a name to the taylor coefficients of f expanded at c.\<close> definition a where "a = taylor_expansion (SA m) c f" lemma a_closed: "a \<in> carrier (UP (SA m))" unfolding a_def by (metis c_closed UPSA.taylor_def UP_cring.taylor_closed UP_cring_def f_closed padic_fields.SA_is_cring padic_fields_axioms) lemma a_cfs_closed: "a i \<in> carrier (SA m)" by (meson UPSA.UP_car_memE(1) local.a_closed) lemma a_deg: "deg (SA m) a = deg (SA m) f" unfolding a_def using c_closed UPSA.taylor_def UPSA.taylor_deg f_closed by force lemma a_eval: assumes "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" shows "a i x \<in> carrier Q\<^sub>p" by(intro SA_car_closed[of _ m] a_cfs_closed assms) text\<open>The set of indices for the nonzero taylor coefficients:\<close> definition inds where "inds = {i. a i \<in> Units (SA m) }" lemma inds_bounded: "i \<in> inds \<Longrightarrow> i \<le> deg (SA m) f" unfolding inds_def mem_Collect_eq by (metis SA_units_not_zero UPSA.deg_eqI a_deg le_cases local.a_closed) lemma inds_bounded': "i \<in> inds \<Longrightarrow> i \<le> Suc d" by (meson f_deg inds_bounded le_trans) lemma inds_finite: "finite inds" by (meson finite_nat_set_iff_bounded_le inds_bounded) lemma inds_memE: "i \<in> inds \<Longrightarrow> a i \<in> Units (SA m)" using inds_def by blast lemma inds_non_memE: "i \<notin> inds \<Longrightarrow> x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> a i x = \<zero>" by (metis SA_zeroE a_def f_taylor_cfs inds_def mem_Collect_eq) definition ind_pairs where "ind_pairs = {(i, j) \<in> inds \<times> inds. i \<noteq> j}" lemma finite_ind_pairs: "finite (ind_pairs)" apply(rule finite_subset[of ind_pairs "inds \<times>inds"]) unfolding ind_pairs_def apply blast using inds_finite by blast lemma a_quotient_closed: "\<And>i j. i \<in> inds \<Longrightarrow> j \<in> inds \<Longrightarrow> (a j) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> (a i) \<in> carrier (SA m)" using inds_memE by blast lemma a_quotient_unit: "\<And>i j. i \<in> inds \<Longrightarrow> j \<in> inds \<Longrightarrow> (a j) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> (a i) \<in> Units (SA m)" using inds_memE by blast lemma f_eval_formula: "\<And>x t. x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> t \<in> carrier Q\<^sub>p \<Longrightarrow> SA_poly_to_SA_fun m f (t#x) = (\<Oplus>i\<in>inds. (a i x)\<otimes>(t \<ominus> c x)[^]i)" proof- fix x t assume a: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" "t \<in> carrier Q\<^sub>p" have 0: "SA_poly_to_SA_fun m f (t#x) = (\<Oplus>i\<in>{..deg (SA m) f}. (a i (tl (t#x)))\<otimes>(hd (t#x) \<ominus> c (tl (t#x)))[^]i)" unfolding a_def apply(rule SA_poly_to_SA_fun_taylor_expansion) apply (simp add: f_closed) apply (simp add: c_closed) by (simp add: Qp_pow_ConsI a(1) a(2)) show "SA_poly_to_SA_fun m f (t # x) = (\<Oplus>i\<in>inds. a i x \<otimes> (t \<ominus> c x) [^] i)" unfolding 0 list_tl list_hd apply(rule Qp.finsum_mono_neutral_cong) apply(rule , intro Qp.ring_simprules Qp.nat_pow_closed a SA_car_closed[of _ m] a_cfs_closed c_closed, simp) using inds_non_memE Qp.l_null Qp.minus_closed Qp.nat_pow_closed SA_car_closed a(1) a(2) c_closed apply presburger by (simp add: inds_bounded subset_eq) qed lemma \<C>_mem_tl: "\<And> x. x \<in> condition_to_set \<C> \<Longrightarrow> tl x \<in>A" by (metis cell_memE(2) condition_to_set.simps common_refinement_locale.\<C>_def common_refinement_locale_axioms) lemma \<C>_mem_hd: "\<And> x. x \<in> condition_to_set \<C> \<Longrightarrow> hd x \<in> carrier Q\<^sub>p" by (metis Qp_pow_ConsE(2) \<C>_def cell_condition_set_memE(1) common_refinement_locale.\<C>_cond common_refinement_locale_axioms) lemma f_eval_formula': "\<And>x. x \<in> condition_to_set \<C> \<Longrightarrow> SA_poly_to_SA_fun m f x = (\<Oplus>i\<in>inds. (a i (tl x))\<otimes>((hd x) \<ominus> c (tl x))[^]i)" proof- fix x assume A: "x \<in> condition_to_set \<C>" have 0: "x = hd x # tl x" using A by (metis \<C>_def cartesian_power_car_memE cell_memE(1) condition_to_set.simps list.exhaust_sel list.size(3) nat.simps(3)) have " SA_poly_to_SA_fun m f (hd x # tl x) = (\<Oplus>i\<in>inds. (a i (tl x))\<otimes>((hd x) \<ominus> c (tl x))[^]i)" apply(rule f_eval_formula) using A unfolding \<C>_def apply (simp add: cartesian_power_tail cell_memE(1)) by (simp add: A \<C>_mem_hd) thus " SA_poly_to_SA_fun m f x = (\<Oplus>i\<in>inds. a i (tl x) \<otimes> (lead_coeff x \<ominus> c (tl x)) [^] i)" using 0 by auto qed end locale one_val_point_decomp = common_refinement_locale + fixes B\<^sub>0 Ls As Fs assumes subset: "B\<^sub>0 \<subseteq> condition_to_set \<C>" assumes Ls_finite: "finite Ls" assumes nonempty: "Ls \<noteq> {}" assumes semialg: "\<And>l. l \<in> Ls \<Longrightarrow> Fs l \<in> carrier (SA m)" assumes semialg_fibres: "\<And> l. l \<in> Ls \<Longrightarrow> is_semialgebraic m (As l)" assumes covers: "B\<^sub>0 = (\<Union>l \<in> Ls. condition_to_set (Cond m (As l) c (Fs l) (Fs l) closed_interval))" context one_val_point_decomp begin lemma is_cell: "l \<in> Ls \<Longrightarrow> is_cell_condition (Cond m (As l) c (Fs l) (Fs l) closed_interval)" apply(rule is_cell_conditionI') using semialg_fibres c_closed semialg is_convex_condition_def by auto lemma one_val_point_decomposable: "one_val_point_c_decomposable m c (Fs ` Ls) (carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)) (\<Union>l \<in> Ls. condition_to_set (Cond m (As l) c (Fs l) (Fs l) closed_interval))" apply(rule finite_union_one_val_point_c_decomposable) using c_closed apply blast using Ls_finite apply blast using nonempty apply blast using semialg apply blast proof(rule subsetI) fix x assume A: "x \<in> (\<lambda>l. condition_to_set (Cond m (As l) c (Fs l) (Fs l) closed_interval)) ` Ls" then obtain l where l_def: "l \<in> Ls" "x = condition_to_set (Cond m (As l) c (Fs l) (Fs l) closed_interval)" by blast have 00: "Cond m (As l) c (Fs l) (Fs l) closed_interval \<in> c_cells_at_one_val_point m c (Fs ` Ls) (carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>))" unfolding c_cells_at_one_val_point_def mem_Collect_eq condition_to_set.simps arity.simps center.simps u_bound.simps l_bound.simps boundary_condition.simps cell_def using is_cell l_def by auto thus "x \<in> condition_to_set ` c_cells_at_one_val_point m c (Fs ` Ls) (carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>))" using l_def by blast qed definition first_decomp where "first_decomp = (SOME S'. S' \<noteq> {} \<and> is_cell_decomp m S' B\<^sub>0 \<and> S' \<subseteq> c_cells_at_one_val_point m c (Fs ` Ls) (carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)))" lemma first_decomp_prop: "first_decomp \<noteq> {} \<and> is_cell_decomp m first_decomp B\<^sub>0 \<and> first_decomp \<subseteq> c_cells_at_one_val_point m c (Fs ` Ls) (carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>))" proof- obtain S' where S'_def: "S' \<noteq> {} \<and> is_cell_decomp m S' B\<^sub>0 \<and> S' \<subseteq> c_cells_at_one_val_point m c (Fs ` Ls) (carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>))" using one_val_point_decomposable nonempty semialg one_val_point_c_decomposable_nonempty[of c m "(Fs ` Ls)" "carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)" B\<^sub>0] c_closed unfolding covers by blast show ?thesis unfolding first_decomp_def using S'_def SomeE by smt qed lemma bounds: assumes "C \<in> first_decomp" shows "u_bound C \<in> Fs ` Ls" using first_decomp_prop assms unfolding c_cells_at_one_val_point_def by auto lemma decomp: "\<exists>S'. (is_cell_decomp m S' B\<^sub>0 \<and> (\<forall>B\<in>S'. (\<exists> \<phi>. \<phi> \<in> (Fs ` Ls) \<and> center B = c \<and> l_bound B = \<phi> \<and> u_bound B = \<phi> \<and> boundary_condition B = closed_interval)))" using first_decomp_prop unfolding c_cells_at_one_val_point_def by (smt (verit, best) in_mono mem_Collect_eq) end context common_refinement_locale begin text\<open>This is just the set that denef also calls $A_0$. The proof proceeds by showing that both $A_0$ and its complement can be decomposed as desired.\<close> definition A\<^sub>0 where "A\<^sub>0 = {x \<in> condition_to_set \<C>. (\<forall> i \<in> inds. (\<forall> j \<in> inds. i < j \<longrightarrow> val (a i (tl x)) \<noteq> val (a j (tl x) \<otimes> (hd x \<ominus> c (tl x))[^](j- i))))}" lemma A\<^sub>0_closed: "A\<^sub>0 \<subseteq> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)" unfolding A\<^sub>0_def using condition_to_set.simps[of m A c a1 a2 I] unfolding \<C>_def cell_def by blast definition ordered_ind_pairs where "ordered_ind_pairs = {(i,j) \<in> ind_pairs. i < j}" lemma ordered_ind_pairs_unit: assumes "i \<in> inds" assumes "j \<in> inds" assumes "i < j" shows "\<exists>\<eta>\<in>Units (SA m). \<forall>x\<in>carrier (Q\<^sub>p\<^bsup>m\<^esup>). (int j - int i) * ord (\<eta> x) + ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) mod (int j - int i) = ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) " proof- have 0: "(int j - int i) = int (j - i)" using assms by auto show ?thesis unfolding 0 apply(rule denef_lemma_2_4_floor[of d]) apply (simp add: denef_II_axioms) apply (simp add: Suc_leI assms(3)) using assms(2) diff_le_self inds_bounded' order.trans apply blast using a_quotient_unit assms(1) assms(2) by presburger qed lemma ordered_ind_pairs_unit': "\<And>ps. ps \<in> ordered_ind_pairs \<Longrightarrow> \<exists>\<phi> \<in> Units (SA m). (\<forall>x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). (int (snd ps) - int (fst ps))*ord (\<phi> x) + ord ((a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x) mod (int (snd ps) - int (fst ps)) = ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x))" proof- fix ps assume A: "ps \<in> ordered_ind_pairs" obtain i j where ij_def: "ps = (i,j)" using A unfolding ordered_ind_pairs_def mem_Collect_eq by auto have i_closed: "i \<in> inds" using A unfolding ij_def mem_Collect_eq ordered_ind_pairs_def ind_pairs_def by auto have j_closed: "j \<in> inds" using A unfolding ij_def mem_Collect_eq ordered_ind_pairs_def ind_pairs_def by auto have le: "i < j" using A unfolding ij_def mem_Collect_eq ordered_ind_pairs_def ind_pairs_def by auto show "\<exists>\<phi> \<in> Units (SA m). (\<forall>x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). (int (snd ps) - int (fst ps))*ord (\<phi> x) + ord ((a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x) mod (int (snd ps) - (fst ps)) = ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x))" unfolding ij_def fst_conv snd_conv by(intro ordered_ind_pairs_unit i_closed j_closed le) qed lemma ordered_ind_pairs_memE: assumes "ps \<in> ordered_ind_pairs" shows "fst ps \<in> inds" "snd ps \<in> inds" "fst ps < snd ps" using assms unfolding ordered_ind_pairs_def ind_pairs_def mem_Collect_eq by auto lemma ordered_ind_pairs_finite: "finite ordered_ind_pairs" unfolding ordered_ind_pairs_def ind_pairs_def using inds_finite by (metis (no_types, lifting) Collect_case_prod_mono case_prodD finite_ind_pairs ind_pairs_def mem_Collect_eq predicate2I rev_finite_subset) lemma semialg_ineq_set: assumes "(i,j) \<in> ordered_ind_pairs" assumes "F = {x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>). val (a i (tl x)) \<noteq> val (a j (tl x) \<otimes> (hd x \<ominus> c (tl x))[^](j- i))}" shows "is_semialgebraic (Suc m) F" proof- have i_in: "i \<in> inds" using assms ordered_ind_pairs_def ind_pairs_def by force have j_in: "j \<in>inds" using assms ordered_ind_pairs_def ind_pairs_def by force have i_leq_j: "i < j" using assms unfolding ordered_ind_pairs_def by blast obtain Ai where Ai_def: "Ai = (\<lambda>x\<in>carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>). a i (tl x))" by blast obtain Aj where Aj_def: "Aj = (\<lambda>x\<in>carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>). a j (tl x))" by blast obtain C where C_def: "C = (\<lambda>x\<in>carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>). c (tl x))" by blast obtain Hd where Hd_def: "Hd = (\<lambda>x\<in>carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>). hd x)" by blast have Hd_closed: "Hd \<in> carrier (SA (Suc m))" using hd_is_semialg_function[of "Suc m"] unfolding Hd_def using restrict_in_SA_car by blast have Ai_closed: "Ai \<in> carrier (SA (Suc m))" unfolding Ai_def apply(rule tl_comp_in_SA) using a_cfs_closed by blast have Aj_closed: "Aj \<in> carrier (SA (Suc m))" unfolding Aj_def apply(rule tl_comp_in_SA) using a_cfs_closed by blast have C_closed: "C \<in> carrier (SA (Suc m))" unfolding C_def apply(rule tl_comp_in_SA) using c_closed by blast obtain G where G_def: "G = Aj \<otimes>\<^bsub>SA (Suc m)\<^esub> (Hd \<ominus>\<^bsub>SA (Suc m)\<^esub> C)[^]\<^bsub>SA (Suc m)\<^esub>(j-i)" by blast have G_closed: "G \<in> carrier (SA (Suc m))" unfolding G_def by(rule R.ring_simprules, rule Aj_closed, rule R.nat_pow_closed, rule R.minus_closed, rule Hd_closed, rule C_closed) have G_eval_1: "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) \<Longrightarrow> G x = (Aj x \<otimes> (Hd x \<ominus> C x)[^](j- i))" unfolding G_def using Aj_closed Hd_closed C_closed SA_minus_eval SA_mult SA_nat_pow by presburger have G_eval_2: "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) \<Longrightarrow> G x = (a j (tl x) \<otimes> (hd x \<ominus> c (tl x))[^](j- i))" using G_eval_1 restrict_apply unfolding Aj_def Hd_def C_def by (smt restrict_apply) have 2: "F = {x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>). val (Ai x) \<noteq> val (G x)}" apply(rule equalityI') unfolding assms mem_Collect_eq apply(rule conjI, blast) proof- fix x assume A: "x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) \<and> val (a i (tl x)) \<noteq> val (a j (tl x) \<otimes> (lead_coeff x \<ominus> c (tl x)) [^] (j - i))" have 00: "Ai x = a i (tl x)" using A restrict_apply unfolding Ai_def by metis show "val (Ai x) \<noteq> val (G x)" unfolding 00 using G_eval_2[of x] A by smt next show "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) \<and> val (Ai x) \<noteq> val (G x) \<Longrightarrow> x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) \<and> val (a i (tl x)) \<noteq> val (a j (tl x) \<otimes> (lead_coeff x \<ominus> c (tl x)) [^] (j - i))" apply(rule conjI, blast) proof- fix x assume A: "x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) \<and> val (Ai x) \<noteq> val (G x)" have 00: "Ai x = a i (tl x)" unfolding Ai_def using A restrict_apply by smt have 01: " G x = (a j (tl x) \<otimes> (hd x \<ominus> c (tl x))[^](j- i))" apply(rule G_eval_2) using A by blast show "val (a i (tl x)) \<noteq> val (a j (tl x) \<otimes> (lead_coeff x \<ominus> c (tl x)) [^] (j - i))" using A unfolding 00 01 by blast qed qed have 3: "F = carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) - {x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>). val (Ai x) = val (G x)}" unfolding 2 by blast show "is_semialgebraic (Suc m) F" unfolding 3 apply(rule diff_is_semialgebraic, rule carrier_is_semialgebraic) by(rule semialg_val_eq_set_is_semialg, rule Ai_closed, rule G_closed) qed definition term_ineq_set where "term_ineq_set ps = {x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>). val (a (fst ps) (tl x)) \<noteq> val (a (snd ps) (tl x) \<otimes> (hd x \<ominus> c (tl x))[^]((snd ps)- (fst ps)))}" lemma term_ineq_set_semialg: assumes "ps \<in> ordered_ind_pairs" shows "is_semialgebraic (Suc m) (term_ineq_set ps)" proof- obtain i j where ij_def: "i \<in> inds \<and> j \<in> inds \<and> i< j" "ps = (i,j)" using assms unfolding ordered_ind_pairs_def ind_pairs_def by blast show ?thesis using semialg_ineq_set[of i j "term_ineq_set ps"] assms ij_def unfolding ij_def ordered_ind_pairs_def term_ineq_set_def fst_conv snd_conv by auto qed lemma A\<^sub>0_as_intersection: "A\<^sub>0 = condition_to_set \<C> \<inter> \<Inter> (term_ineq_set ` ordered_ind_pairs)" proof(rule equalityI') show "\<And>x. x \<in> A\<^sub>0 \<Longrightarrow> x \<in> condition_to_set \<C> \<inter> \<Inter> (term_ineq_set ` ordered_ind_pairs)" proof(rule IntI) show " \<And>x. x \<in> A\<^sub>0 \<Longrightarrow> x \<in> condition_to_set \<C>" unfolding A\<^sub>0_def by blast show "\<And>x. x \<in> A\<^sub>0 \<Longrightarrow> x \<in> \<Inter> (term_ineq_set ` ordered_ind_pairs)" proof fix x ps assume A: "x \<in> A\<^sub>0" "ps \<in> ordered_ind_pairs" obtain i j where ij_def: "i \<in> inds \<and> j \<in> inds \<and> i< j \<and> ps = (i,j)" using A(2) unfolding ordered_ind_pairs_def ind_pairs_def by blast have ps_eq: "ps = (i,j)" using ij_def by blast have 0: "term_ineq_set ps = {x \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>). val (a i (tl x)) \<noteq> val (a j (tl x) \<otimes> (lead_coeff x \<ominus> c (tl x)) [^] (j - i))}" unfolding ps_eq term_ineq_set_def by auto show "x \<in> term_ineq_set ps" using A\<^sub>0_closed ij_def unfolding A\<^sub>0_def unfolding 0 mem_Collect_eq using A(1) A\<^sub>0_def by blast qed qed show "\<And>x. x \<in> condition_to_set \<C> \<inter> \<Inter> (term_ineq_set ` ordered_ind_pairs) \<Longrightarrow> x \<in> A\<^sub>0" proof- fix x assume A: "x \<in> condition_to_set \<C> \<inter>\<Inter> (term_ineq_set ` ordered_ind_pairs)" show " x \<in> A\<^sub>0" unfolding A\<^sub>0_def mem_Collect_eq apply(rule conjI) using A apply blast proof fix i assume i_inds: "i \<in> inds" show "\<forall>j\<in>inds. i < j \<longrightarrow> val (a i (tl x)) \<noteq> val (a j (tl x) \<otimes> (lead_coeff x \<ominus> c (tl x)) [^] (j - i))" proof fix j assume j_inds: "j \<in> inds" show " i < j \<longrightarrow> val (a i (tl x)) \<noteq> val (a j (tl x) \<otimes> (lead_coeff x \<ominus> c (tl x)) [^] (j - i))" proof assume le: "i < j" have ordered_ind_pairs_el: "(i,j) \<in> ordered_ind_pairs" unfolding ordered_ind_pairs_def ind_pairs_def using i_inds j_inds le by blast show "val (a i (tl x)) \<noteq> val (a j (tl x) \<otimes> (lead_coeff x \<ominus> c (tl x)) [^] (j - i))" using A ordered_ind_pairs_el fst_conv snd_conv unfolding term_ineq_set_def by auto qed qed qed qed qed lemma A\<^sub>0_semialg: "is_semialgebraic (Suc m) A\<^sub>0" unfolding A\<^sub>0_as_intersection apply(cases "ordered_ind_pairs = {}") apply auto[1] apply(intro condition_to_set_is_semialg[of \<C> m] \<C>_cond) using \<C>_def arity.simps apply blast apply(rule intersection_is_semialg, rule condition_to_set_is_semialg, rule \<C>_cond) unfolding \<C>_def arity.simps apply blast apply(rule finite_intersection_is_semialg, rule ordered_ind_pairs_finite, blast) using term_ineq_set_semialg unfolding is_semialgebraic_def by blast lemma A\<^sub>0_closures: assumes "t#x \<in> A\<^sub>0" assumes "i \<in> inds" assumes "j \<in> inds" shows "a i x \<in> Units Q\<^sub>p" "a j x \<in> Units Q\<^sub>p" "t \<ominus> c x \<in> carrier Q\<^sub>p" "t \<ominus> c x \<noteq> \<zero> \<Longrightarrow> t \<ominus> c x \<in> Units Q\<^sub>p" proof- have t_closed: "t \<in> carrier Q\<^sub>p" using assms A\<^sub>0_closed cartesian_power_head by force have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using assms A\<^sub>0_closed cartesian_power_tail by fastforce show 0: "a i x \<in> Units Q\<^sub>p" "a j x \<in> Units Q\<^sub>p" "(t \<ominus> c x) \<in> carrier Q\<^sub>p" unfolding Units_eq_nonzero nonzero_def mem_Collect_eq using x_closed inds_memE SA_Units_memE' a_eval assms apply auto[1] using x_closed inds_memE SA_Units_memE' a_eval assms apply auto[1] using t_closed x_closed assms Qp.ring_simprules(4) c_closed SA_car_closed by auto[1] show "t \<ominus> c x \<noteq> \<zero> \<Longrightarrow> t \<ominus> c x \<in> Units Q\<^sub>p" using 0 assms Qp.nonzero_memI Units_eq_nonzero by presburger qed lemma A\<^sub>0_memE: assumes "t#x \<in> A\<^sub>0" assumes "i \<in> inds" assumes "j \<in> inds" assumes "i < j" assumes "t \<ominus> c x \<noteq> \<zero>" shows "val (a i x \<otimes> (t \<ominus> c x)[^]i) \<noteq> val (a j x \<otimes> (t \<ominus> c x)[^]j)" "ord (a i x \<otimes> (t \<ominus> c x)[^]i) \<noteq> ord (a j x \<otimes> (t \<ominus> c x)[^]j)" proof- have units: "a i x \<in> Units Q\<^sub>p" "a j x \<in> Units Q\<^sub>p" "(t \<ominus> c x) \<in> Units Q\<^sub>p" using assms A\<^sub>0_closures by auto have 0: "val (a i x) \<noteq> val (a j x \<otimes> (t \<ominus> c x) [^] (j - i))" using assms unfolding A\<^sub>0_def mem_Collect_eq list_tl list_hd by auto hence 1: "ord (a i x) \<noteq> ord (a j x \<otimes> (t \<ominus> c x) [^] (j - i))" using units unfolding val_def by (simp add: Qp.Units_pow_closed Qp.ring_in_Units_imp_not_zero) have 2: "ord (a j x \<otimes> (t \<ominus> c x) [^] (j - i)) = ord (a j x) + (int j - int i)* ord (t \<ominus> c x)" using units assms Qp.Units_pow_closed Units_eq_nonzero nonzero_nat_pow_ord ord_mult by force hence 3: "ord (a i x) + int i*ord(t \<ominus> c x) \<noteq> ord (a j x) + int j* ord (t \<ominus> c x)" using 1 2 int_distrib(3) by force thus "ord (a i x \<otimes> (t \<ominus> c x) [^] i) \<noteq> ord (a j x \<otimes> (t \<ominus> c x) [^] j)" using units Qp.Units_pow_closed Units_eq_nonzero nonzero_nat_pow_ord ord_mult by auto thus "val (a i x \<otimes> (t \<ominus> c x) [^] i) \<noteq> val (a j x \<otimes> (t \<ominus> c x) [^] j)" unfolding val_def using units by auto qed lemma A\<^sub>0_memE': assumes "t#x \<in> A\<^sub>0" assumes "i \<in> inds" assumes "j \<in> inds" assumes "i < j" assumes "t \<ominus> c x = \<zero>" shows "i = 0 \<Longrightarrow> val (a i x \<otimes> (t \<ominus> c x)[^]i) \<noteq> val (a j x \<otimes> (t \<ominus> c x)[^]j)" "i > 0 \<Longrightarrow> val (a i x \<otimes> (t \<ominus> c x)[^]i) = \<infinity>" "val (a j x \<otimes> (t \<ominus> c x)[^]j) = \<infinity>" proof- have t_closed: "t \<in> carrier Q\<^sub>p" using assms A\<^sub>0_closed cartesian_power_head by force have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using assms A\<^sub>0_closed cartesian_power_tail by fastforce have units: "a i x \<in> Units Q\<^sub>p" "a j x \<in> Units Q\<^sub>p" using assms A\<^sub>0_closures by auto show 0: "val (a j x \<otimes> (t \<ominus> c x)[^]j) = \<infinity>" using assms units unfolding assms(5) val_def by (simp add: Qp.Units_closed Qp.nat_pow_zero) show "i > 0 \<Longrightarrow> val (a i x \<otimes> (t \<ominus> c x)[^]i) = \<infinity>" using assms units unfolding assms(5) val_def by (simp add: Qp.Units_closed Qp.nat_pow_zero) show "i = 0 \<Longrightarrow> val (a i x \<otimes> (t \<ominus> c x)[^]i) \<noteq> val (a j x \<otimes> (t \<ominus> c x)[^]j)" unfolding 0 using units by (metis (no_types, lifting) Group.nat_pow_0 Qp.Units_not_right_zero_divisor Qp.nat_pow_closed Qp.zero_closed Qp.zero_not_one eint.distinct(2) val_def) qed text\<open>This lemma formalizes equation (3) from Denef's proof of this result.\<close> lemma val_f_on_A\<^sub>0: "\<And>x. x \<in> A\<^sub>0 \<Longrightarrow> inds \<noteq> {} \<Longrightarrow> val (SA_poly_to_SA_fun m f x) = (MIN i\<in>inds. (val ( (a i (tl x))\<otimes>((hd x) \<ominus> c (tl x))[^]i)))" proof- fix xs assume A0: "xs \<in> A\<^sub>0" "inds \<noteq> {} " obtain t x where tx_def: "xs = t#x" using A0 A\<^sub>0_closed Qp_pow_ConsE by (metis (mono_tags, lifting) Suc_n_not_n cartesian_power_car_memE list.exhaust_sel list.sel(2) subset_iff) have t_x_closed: "t \<in> carrier Q\<^sub>p" "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using A0 A\<^sub>0_closed Qp_pow_ConsE unfolding tx_def apply force using A0 A\<^sub>0_closed Qp_pow_ConsE unfolding tx_def by force have diff_closed: "t \<ominus> c x \<in> carrier Q\<^sub>p" using t_x_closed Qp.ring_simprules c_closed SA_car_closed by auto have 100: "SA_poly_to_SA_fun m f (t#x) = (\<Oplus>i\<in>inds. (a i x)\<otimes>(t \<ominus> c x)[^]i)" by(rule f_eval_formula, auto simp: t_x_closed) have 101: "(\<lambda>i. a i x \<otimes> (t \<ominus> c x) [^] i) \<in> inds \<rightarrow> carrier Q\<^sub>p" using diff_closed t_x_closed by (simp add: a_eval) show "val (SA_poly_to_SA_fun m f xs) = (MIN i\<in>inds. val (a i (tl xs) \<otimes> (hd xs \<ominus> c (tl xs)) [^] i))" unfolding tx_def list_tl list_hd proof(cases "(t \<ominus> c x) = \<zero>") case True have T0: "\<And>i. i \<noteq> 0 \<Longrightarrow> val (a i x \<otimes> (t \<ominus> c x) [^] i) = \<infinity>" using Qp.nat_pow_zero Qp.r_null True a_eval local.val_zero t_x_closed(2) by presburger then have T1: "\<And>i. i \<in> inds \<Longrightarrow> val (a i x \<otimes> (t \<ominus> c x) [^] i) \<ge> val (a 0 x \<otimes> (t \<ominus> c x) [^] (0::nat))" by (metis basic_trans_rules(20) eint_ord_code(3) notin_closed) have T2: "\<And>i. i \<noteq> 0 \<Longrightarrow> a i x \<otimes> (t \<ominus> c x) [^] i = \<zero>" using Qp.nat_pow_zero Qp.r_null True a_eval t_x_closed(2) by presburger show "val (SA_poly_to_SA_fun m f (t # x)) = (MIN i\<in>inds. val (a i x \<otimes> (t \<ominus> c x) [^] i))" proof(cases "(0::nat) \<in> inds") case True have T00: " (a 0 x \<otimes> (t \<ominus> c x) [^] (0::nat)) \<in> carrier Q\<^sub>p" by (simp add: a_eval t_x_closed(2)) have T01: "\<And>i. i \<in> inds \<Longrightarrow> a i x \<otimes> (t \<ominus> c x) [^] i \<in> carrier (Q\<^sub>p)" using T00 T2 by (metis Qp.zero_closed) have T02: "inds = insert 0 (inds - {0})" using True by blast have "(\<Oplus>i\<in>insert 0 (inds - {0}). a i x \<otimes> (t \<ominus> c x) [^] i) = a(0::nat) x \<otimes> (t \<ominus> c x)[^] (0::nat) \<oplus> (\<Oplus>i\<in>inds-{(0::nat)}. a i x \<otimes> (t \<ominus> c x) [^] i)" apply(rule Qp.finsum_insert[of "inds-{0}" "0::nat" "(\<lambda> i. a i x \<otimes> (t \<ominus> c x) [^] i)"]) using inds_finite apply blast apply blast using "101" apply blast using T00 by blast hence T03: "(SA_poly_to_SA_fun m f (t#x)) = (a 0 x \<otimes> (t \<ominus> c x) [^] (0::nat)) \<oplus> (\<Oplus>i\<in>inds - {0}. a i x \<otimes> (t \<ominus> c x) [^] i)" using T02 unfolding 100 by auto have T04: "(MIN i\<in>inds. val (a i x \<otimes> (t \<ominus> c x) [^] i)) = val (a 0 x \<otimes> (t \<ominus> c x) [^] (0::nat))" apply(rule Min_eqI ) using inds_finite apply blast using T1 apply blast using True by blast show "val (SA_poly_to_SA_fun m f (t#x)) = (MIN i\<in>inds. val (a i x \<otimes> (t \<ominus> c x) [^] i))" using T2 Qp.finsum_zero unfolding T03 T04 tx_def by (smt (verit, best) DiffD2 Qp.add.finprod_one_eqI Qp.r_zero T00 insertI1) next case False then have F0: "\<And>i. i \<in> inds \<Longrightarrow> (a i x \<otimes> (t \<ominus> c x) [^] i) = \<zero>" using T2 by metis hence F1: "(SA_poly_to_SA_fun m f (t#x)) = \<zero>" unfolding 100 using Qp.finsum_zero by (smt Qp.add.finprod_one_eqI Qp.r_zero singletonI) have F2: " (MIN i\<in>inds. val (a i x \<otimes> (t \<ominus> c x) [^] i)) = \<infinity>" apply(rule Min_eqI) using inds_finite apply blast using F0 True local.val_zero apply force using A0(2) F0 local.val_zero by fastforce show "val (SA_poly_to_SA_fun m f (t # x)) = (MIN i\<in>inds. val (a i x \<otimes> (t \<ominus> c x) [^] i))" unfolding F1 F2 val_zero by blast qed next case False show "val (SA_poly_to_SA_fun m f (t # x)) = (MIN i\<in>inds. val (a i x \<otimes> (t \<ominus> c x) [^] i))" unfolding 100 proof(rule finsum_val_ultrametric_diff') show "(\<lambda>i. a i x \<otimes> (t \<ominus> c x) [^] i) \<in> inds \<rightarrow> carrier Q\<^sub>p" using 101 by blast show "finite inds" using inds_def inds_finite by blast show " inds \<noteq> {}" by (simp add: A0(2)) show "\<And>i b. i \<in> inds \<Longrightarrow> b \<in> inds \<Longrightarrow> i \<noteq> b \<Longrightarrow> val (a i x \<otimes> (t \<ominus> c x) [^] i) \<noteq> val (a b x \<otimes> (t \<ominus> c x) [^] b)" proof- fix i b assume A: "i \<in> inds" "b \<in> inds" "i \<noteq> b" show "val (a i x \<otimes> (t \<ominus> c x) [^] i) \<noteq> val (a b x \<otimes> (t \<ominus> c x) [^] b)" apply(cases "i < b") using A A\<^sub>0_memE[of t x i b] A\<^sub>0_memE[of t x b i] A0 False unfolding tx_def by auto qed qed qed qed text\<open>This lemma formalizes the statement from Denef's proof that ``The cells contained in $A \setminus A_0$ have the form \[ B = \{ (x,t) \mid x \in C \text{ and ord}(t - c(x)) = \text{ord}(\theta(x)) \}, ... \]" \<close> definition \<Theta> where \<Theta>_def: "\<Theta> = (\<lambda>ps. (SOME \<phi> .\<phi> \<in> Units (SA m) \<and> (\<forall>x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). (int (snd ps) - int (fst ps))*ord (\<phi> x) + ord ((a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x) mod (int (snd ps) - int (fst ps)) = ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x))))" lemma \<Theta>_unit: "\<And>ps. ps \<in> ordered_ind_pairs \<Longrightarrow> \<Theta> ps \<in> Units (SA m)" "\<And>ps. ps \<in> ordered_ind_pairs \<Longrightarrow> (\<forall>x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). (int (snd ps) - int (fst ps))*ord (\<Theta> ps x) + ord ((a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x) mod (int (snd ps) - int (fst ps)) = ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x))" proof- fix ps assume A: "ps \<in> ordered_ind_pairs" then obtain i j where ij_def: "ps = (i,j)" using bezw.cases by blast have F10010: "(i,j) \<in> ordered_ind_pairs" using A unfolding ordered_ind_pairs_def mem_Collect_eq ij_def by metis obtain \<phi> where \<phi>_def: "\<phi>\<in>Units (SA m) \<and> (\<forall>x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). (int (snd ps) - int (fst ps))*ord (\<phi> x) + ord ((a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x) mod (int (snd ps) - int (fst ps)) = ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x))" using F10010 F_def ordered_ind_pairs_unit'[of "(i,j)"] ij_def by blast have a: "\<Theta> ps \<in> Units (SA m) \<and> (\<forall>x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). (int (snd ps) - int (fst ps))*ord (\<Theta> ps x) + ord ((a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x) mod (int (snd ps) - int (fst ps)) = ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x))" apply(rule SomeE[of "\<Theta> ps" _ \<phi> ]) using F10010 \<phi>_def SomeE unfolding \<Theta>_def ij_def by auto show "\<Theta> ps \<in> Units (SA m)" "(\<forall>x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). (int (snd ps) - int (fst ps))*ord (\<Theta> ps x) + ord ((a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x) mod (int (snd ps) - int (fst ps)) = ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x))" using a unfolding ij_def by auto qed lemma \<Theta>_ord: "\<And>i j x. (i, j) \<in> ordered_ind_pairs \<Longrightarrow> x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> (int j - int i)*ord ((\<Theta> (i,j)) x) + ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) mod (int j - i) = ord (((a i) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a j ) x)" proof- fix i j x assume F10010: "(i, j) \<in> ordered_ind_pairs" "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" have "\<exists>\<eta>\<in>Units (SA m). \<forall>x\<in>carrier (Q\<^sub>p\<^bsup>m\<^esup>). (int j - int i) * ord (\<eta> x) + ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) mod (int j - int i) = ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) " apply(rule ordered_ind_pairs_unit) using ordered_ind_pairs_unit[of i j ] F10010(1) unfolding ordered_ind_pairs_def ind_pairs_def mem_Collect_eq by auto then obtain \<phi> where \<phi>_def: "\<phi>\<in>Units (SA m) \<and> ( \<forall>x\<in>carrier (Q\<^sub>p\<^bsup>m\<^esup>). (int j - int i) * ord (\<phi> x) + ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) mod (int j - int i) = ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x))" by blast have a:"(\<Theta> (i,j))\<in>Units (SA m) \<and> ( \<forall>x\<in>carrier (Q\<^sub>p\<^bsup>m\<^esup>). (int j - int i) * ord ((\<Theta> (i,j)) x) + ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) mod (int j - int i) = ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x))" apply(rule SomeE[of "\<Theta> (i,j)" _ \<phi>]) using F10010 unfolding \<Theta>_def fst_conv snd_conv apply blast using \<phi>_def by auto have 000: "snd (case (i, j) of (x, y) \<Rightarrow> (x, int y)) = j" by auto have 001: "fst (case (i, j) of (x, xa) \<Rightarrow> (int x, xa)) = i" by auto have 002: "(\<Theta> (i,j)) \<in>Units (SA m) \<and> (\<forall>x\<in>carrier (Q\<^sub>p\<^bsup>m\<^esup>). (int j - int i) * ord ((\<Theta> (i,j)) x) + ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) mod (int j - int i) = ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x))" using a unfolding 000 001 fst_conv snd_conv by auto show "(int j - int i) * ord (\<Theta>(i,j) x) + ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) mod (int j - int i) = ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x)" using 002 F10010 by auto qed definition A\<^sub>0_comp_fibre_cover where "A\<^sub>0_comp_fibre_cover ps = {x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). int (snd ps - fst ps)* ord ((\<Theta> ps) x) = ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x) } \<inter> {x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). val (\<Theta> ps x) \<in> I (val (a1 x)) (val (a2 x))} \<inter> A " lemma A\<^sub>0_comp_fibre_cover_semialg: assumes "ps \<in> ordered_ind_pairs" shows "is_semialgebraic m (A\<^sub>0_comp_fibre_cover ps)" proof- obtain i j where ij_def: "ps = (i,j)" using assms unfolding ordered_ind_pairs_def ind_pairs_def by auto obtain G where G_def: "G = {x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). int (snd ps - fst ps)* ord ((\<Theta> ps) x) = ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x) }" by blast have 0: "is_semialgebraic m G" proof- have 0: "(a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) \<in> carrier (SA m)" using inds_memE[of "fst ps"] inds_memE[of "snd ps"] assms ordered_ind_pairs_memE by auto have 1: "snd ps - fst ps > 0" using assms ordered_ind_pairs_memE[of ps] by linarith have 2: "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> ((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x \<noteq>\<zero>" by(intro SA_Units_memE'[of _ m] a_quotient_unit ordered_ind_pairs_memE assms, auto ) have 3: "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> (a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x \<in> carrier Q\<^sub>p \<and> (a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x \<noteq> \<zero>" using 2 0 SA_car_memE by blast have 4: " {x \<in> carrier (Q\<^sub>p\<^bsup>m + 0\<^esup>). (a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x \<in> nonzero Q\<^sub>p \<and> ord ((a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x) mod int (snd ps - fst ps) = 0} = {x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x) mod (snd ps - fst ps) = 0}" apply(rule equalityI') unfolding mem_Collect_eq nonzero_def apply (metis add_cancel_left_right) using 3 add_cancel_left_right[of m 0] by metis have 5: "is_semialgebraic m {x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). int (snd ps - fst ps)* ord ((\<Theta> ps) x) = ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x) }" proof- have 50: "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> (\<Theta> ps x) \<in> nonzero Q\<^sub>p" using \<Theta>_unit assms SA_Units_memE' SA_Units_closed SA_car_memE unfolding nonzero_def by (metis (mono_tags, lifting) function_ring_car_closed mem_Collect_eq) have 51: "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> (int (snd ps - fst ps))* ord ((\<Theta> ps) x) = ord ((\<Theta> ps [^] \<^bsub>SA m\<^esub> (snd ps - fst ps)) x)" proof- fix x assume AAA: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" have 510: "(\<Theta> ps [^] \<^bsub>SA m\<^esub> (snd ps - fst ps)) x = (\<Theta> ps x [^] (snd ps - fst ps))" using \<Theta>_unit AAA SA_Units_memE SA_Units_closed by (meson SA_nat_pow) show "int (snd ps - fst ps) * ord (\<Theta> ps x) = ord ((\<Theta> ps [^]\<^bsub>SA m\<^esub> (snd ps - fst ps)) x)" using 50 unfolding 510 using AAA nonzero_nat_pow_ord by presburger qed have 52: "{x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). (snd ps - fst ps)* ord ((\<Theta> ps) x) = ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x) } = {x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>).ord ((\<Theta> ps [^] \<^bsub>SA m\<^esub> (snd ps - fst ps)) x) = ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x) }" apply(rule equalityI') unfolding mem_Collect_eq using 51 50 apply (metis SA_nat_pow mult_of_nat_commute) using 51 50 by presburger have 53: "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> (\<Theta> ps [^] \<^bsub>SA m\<^esub> (snd ps - fst ps)) x \<in> nonzero Q\<^sub>p" using 50 \<Theta>_unit Qp_nat_pow_nonzero SA_nat_pow by presburger have 54: "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x) \<in> nonzero Q\<^sub>p" using inds_memE by (meson "3" not_nonzero_Qp) have 55: "{x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). (snd ps - fst ps)* ord ((\<Theta> ps) x) = ord (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x) } = {x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). val ((\<Theta> ps [^] \<^bsub>SA m\<^esub> (snd ps - fst ps)) x) = val (((a (fst ps)) \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a (snd ps) ) x) }" unfolding 52 apply(rule equalityI') unfolding mem_Collect_eq using inds_memE 50 apply (metis "3" Qp.nonzero_memE(1) Qp.nonzero_memE(2) Qp_nat_pow_nonzero SA_nat_pow val_ord') using 53 54 unfolding val_def nonzero_def mem_Collect_eq by (meson eint.simps(1)) have 56: "(\<Theta> ps [^]\<^bsub>SA m\<^esub> (snd ps - fst ps)) \<in> carrier (SA m)" using assms \<Theta>_unit by blast have 57: "(a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) \<in> carrier (SA m)" using inds_memE ordered_ind_pairs_memE[of ps] assms by auto show "is_semialgebraic m {x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). int (snd ps - fst ps) * ord (\<Theta> ps x) = ord ((a (fst ps) \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a (snd ps)) x)}" unfolding 55 using 56 57 semialg_val_eq_set_is_semialg by blast qed thus ?thesis unfolding G_def by auto qed obtain G' where G'_def: "G' = {x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). val (\<Theta> ps x) \<in> I (val (a1 x)) (val (a2 x))} \<inter> G" by blast have 1: "is_semialgebraic m {x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>). val (\<Theta> ps x) \<in> I (val (a1 x)) (val (a2 x))}" apply(rule cell_cond_semialg) using \<C>_def \<C>_cond is_cell_conditionE(5) apply blast using \<Theta>_unit(1) assms SA_Units_closed apply auto[1] using \<C>_cond unfolding \<C>_def by auto show ?thesis unfolding A\<^sub>0_comp_fibre_cover_def apply(intro intersection_is_semialg) using 1 A_semialg 0 unfolding G_def by auto qed lemma A\<^sub>0_comp_fibre_cover_covers: "condition_to_set \<C> - A\<^sub>0 = (\<Union> ps \<in> ordered_ind_pairs. condition_to_set (Cond m (A\<^sub>0_comp_fibre_cover ps) c (\<Theta> ps)(\<Theta> ps) closed_interval))" proof(rule equalityI') fix xs assume A: "xs \<in> condition_to_set \<C> - A\<^sub>0" then obtain ps where ps_def: "ps \<in> ordered_ind_pairs" "xs \<notin> term_ineq_set ps" unfolding Diff_iff Int_iff A\<^sub>0_as_intersection Inter_iff by auto obtain i j where ij_def: "i \<in> inds" "j \<in> inds" "i < j" "ps = (i,j)" using ps_def unfolding ordered_ind_pairs_def ind_pairs_def by auto have xs_closed: "xs \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)" using A unfolding condition_to_set.simps \<C>_def cell_def by auto obtain t x where tx_def: "xs = t#x" "t \<in> carrier Q\<^sub>p" "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using xs_closed by (metis Qp_pow_ConsE(1) Qp_pow_ConsE(2) cartesian_power_car_memE list.exhaust_sel list.size(3) nat.distinct(2)) have 0: "val (a i x) = val (a j x \<otimes> (t \<ominus> c x) [^] (j - i))" using xs_closed ps_def(2) unfolding ij_def term_ineq_set_def fst_conv snd_conv mem_Collect_eq tx_def list_tl list_hd by auto have 1: "a i x \<in> Units Q\<^sub>p" "a j x \<in> Units Q\<^sub>p" using tx_def ij_def SA_Units_nonzero inds_memE unfolding Units_eq_nonzero by auto have 2: "t \<ominus> c x \<in> Units Q\<^sub>p" using tx_def 0 1 val_zero Qp.Units_closed Qp.nat_pow_zero Qp.nonzero_memE(2) Units_eq_nonzero ij_def(3) c_closed SA_car_closed unfolding Units_eq_nonzero nonzero_def mem_Collect_eq by (metis (no_types, opaque_lifting) Qp.cring_simprules(27) Qp.cring_simprules(4) Qp.pow_zero eint.simps(3) val_def zero_less_diff) have closures: "a i x \<in> carrier Q\<^sub>p" "a j x \<in> carrier Q\<^sub>p" "t \<ominus> c x \<in> carrier Q\<^sub>p" "(t \<ominus> c x) [^](j-i) \<in> carrier Q\<^sub>p" "\<Theta> ps x \<in> Units Q\<^sub>p" proof- show 0: "a i x \<in> carrier Q\<^sub>p" "a j x \<in> carrier Q\<^sub>p" "t \<ominus> c x \<in> carrier Q\<^sub>p" "(t \<ominus> c x) [^](j-i) \<in> carrier Q\<^sub>p" using 1 using 2 by auto show "\<Theta> ps x \<in> Units Q\<^sub>p" by (metis SA_Units_nonzero Units_eq_nonzero \<Theta>_unit(1) ps_def(1) tx_def(3)) qed have 3: "ord (a i x) = ord (a j x \<otimes> (t \<ominus> c x) [^] (j - i))" using 0 val_ord 1 2 Units_eq_nonzero by (simp add: Qp.Units_closed equal_val_imp_equal_ord(1)) have 4: "ord (a i x) = ord (a j x) + (j - i)*ord (t \<ominus> c x)" by (metis 1 2 3 Qp.Units_pow_closed Units_nonzero_Qp int_pow_int int_pow_ord ord_mult) have 5: "ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv \<^bsub>SA m\<^esub> a j) x) = (j - i)*ord (t \<ominus> c x)" using 4 tx_def 1 SA_div_eval Units_eq_nonzero a_cfs_closed ij_def(2) inds_memE ord_fract by force have 6: "ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) mod (int j - int i) = 0" using 5 ij_def by (simp add: of_nat_diff) have 7: " (int j - int i) * ord (\<Theta> (i, j) x) + ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x) mod (int j - int i) = ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x)" using \<Theta>_unit[of "(i,j)"] tx_def ij_def(4) ps_def(1) by auto have 8: "ord (t \<ominus> c x) = ord (\<Theta> ps x)" using 0 7 unfolding 6 unfolding 5 using ij_def by (simp add: int_ops(6)) hence 9: "val (\<Theta> ps x) = val (t \<ominus> c x)" using 7 closures 2 Units_eq_nonzero by force have 10: "x \<in> A\<^sub>0_comp_fibre_cover ps" unfolding A\<^sub>0_comp_fibre_cover_def mem_Collect_eq Int_iff 9 unfolding fst_conv snd_conv ij_def using tx_def 7 ij_def A unfolding 6 \<C>_def condition_to_set.simps cell_def mem_Collect_eq tx_def Diff_iff by (simp add: "5" "8") have "xs \<in> condition_to_set (Cond m (A\<^sub>0_comp_fibre_cover ps) c (\<Theta> ps) (\<Theta> ps) closed_interval)" unfolding condition_to_set.simps by(intro cell_memI xs_closed, unfold tx_def list_tl list_hd closed_interval_def, intro 10, auto simp: 9) thus "xs \<in> (\<Union>ps\<in>ordered_ind_pairs. condition_to_set (Cond m (A\<^sub>0_comp_fibre_cover ps) c (\<Theta> ps) (\<Theta> ps) closed_interval))" using ps_def by auto next fix xs assume A: "xs \<in> (\<Union>ps\<in>ordered_ind_pairs. condition_to_set (Cond m (A\<^sub>0_comp_fibre_cover ps) c (\<Theta> ps) (\<Theta> ps) closed_interval))" then obtain ps where ps_def: "ps \<in> ordered_ind_pairs" "xs \<in> condition_to_set (Cond m (A\<^sub>0_comp_fibre_cover ps) c (\<Theta> ps) (\<Theta> ps) closed_interval)" by auto obtain i j where ij_def: "i \<in> inds" "j \<in> inds" "i < j" "ps = (i,j)" using ps_def unfolding ordered_ind_pairs_def ind_pairs_def by auto have xs_closed: "xs \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)" using ps_def unfolding condition_to_set.simps \<C>_def cell_def by auto obtain t x where tx_def: "xs = t#x" "t \<in> carrier Q\<^sub>p" "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using xs_closed by (metis Qp_pow_ConsE(1) Qp_pow_ConsE(2) cartesian_power_car_memE list.exhaust_sel list.size(3) nat.distinct(2)) have props: "val (t \<ominus> c x) = val (\<Theta> ps x)" "x \<in> A\<^sub>0_comp_fibre_cover ps" using ps_def unfolding tx_def condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd closed_interval_def by auto have closures: "\<Theta> ps x \<in> Units Q\<^sub>p" "t \<ominus> c x \<in> carrier Q\<^sub>p" "t \<ominus> c x \<in> Units Q\<^sub>p" "a i x \<in> Units Q\<^sub>p" "a j x \<in> Units Q\<^sub>p" proof- show 0: "a i x \<in> Units Q\<^sub>p" "a j x \<in> Units Q\<^sub>p" using ij_def apply (metis SA_Units_nonzero Units_eq_nonzero inds_memE tx_def(3)) using ij_def by (metis SA_Units_nonzero Units_eq_nonzero inds_memE tx_def(3)) show 1: "\<Theta> ps x \<in> Units Q\<^sub>p" by (metis SA_Units_nonzero Units_eq_nonzero \<Theta>_unit(1) ps_def(1) tx_def(3)) show 2: "t \<ominus> c x \<in> carrier Q\<^sub>p" using tx_def c_closed Qp.cring_simprules(4) SA_car_closed by presburger show 3: "t \<ominus> c x \<in> Units Q\<^sub>p" using 1 2 props val_zero by (metis Units_eq_nonzero equal_val_imp_equal_ord(2)) qed have 1: "int (j - i) * ord (\<Theta> (i, j) x) = ord ((a i \<otimes>\<^bsub>SA m\<^esub> inv\<^bsub>SA m\<^esub> a j) x)" using props unfolding A\<^sub>0_comp_fibre_cover_def mem_Collect_eq ij_def snd_conv fst_conv Int_iff by auto hence 2: "int (j - i) * ord (t \<ominus> c x) = ord (a i x) - ord (a j x)" using props 1 closures by (metis SA_div_eval Units_eq_nonzero a_cfs_closed equal_val_imp_equal_ord(1) ij_def(2) ij_def(4) inds_memE ord_fract tx_def(3)) hence 3: "val (a i x) = val (a j x \<otimes> (t \<ominus> c x) [^] (j - i))" using ij_def closures val_ord Qp.Units_m_closed Qp.nat_pow_nonzero Units_eq_nonzero nonzero_nat_pow_ord ord_mult by auto have 4: "xs \<notin> A\<^sub>0" using props ij_def 3 unfolding A\<^sub>0_comp_fibre_cover_def mem_Collect_eq ij_def snd_conv fst_conv Int_iff A\<^sub>0_def by (metis list.sel(1) list.sel(3) tx_def(1)) have 5: "xs \<in> condition_to_set \<C>" unfolding \<C>_def condition_to_set.simps apply(intro cell_memI xs_closed, unfold tx_def list_tl list_hd) using props unfolding A\<^sub>0_comp_fibre_cover_def Int_iff mem_Collect_eq props by auto show "xs \<in> condition_to_set \<C> - A\<^sub>0" using 4 5 by auto qed lemma A\<^sub>0_comp_decomp: "\<exists>S'. (is_cell_decomp m S' (condition_to_set \<C> - A\<^sub>0) \<and> (\<forall>B\<in>S'. (\<exists> \<phi>. \<phi> \<in> Units (SA m) \<and> center B = c \<and> l_bound B = \<phi> \<and> u_bound B = \<phi> \<and> boundary_condition B = closed_interval)))" proof(cases "ordered_ind_pairs = {}") case True hence 0: "(condition_to_set \<C> - A\<^sub>0) = {}" unfolding A\<^sub>0_as_intersection True by auto have "is_cell_decomp m {} (condition_to_set \<C> - A\<^sub>0)" unfolding 0 is_partition_def disjoint_def is_cell_decomp_def by auto thus ?thesis by blast next case False interpret one_val_point_decomp _ _ _ _ _ _ _ _ _ _ _ _ _ "condition_to_set \<C> - A\<^sub>0" ordered_ind_pairs A\<^sub>0_comp_fibre_cover \<Theta> apply(intro one_val_point_decomp.intro one_val_point_decomp_axioms.intro common_refinement_locale_axioms A\<^sub>0_comp_fibre_cover_semialg ordered_ind_pairs_finite False) apply auto[1] using \<Theta>_unit unfolding A\<^sub>0_comp_fibre_cover_covers Q\<^sub>p_def Z\<^sub>p_def \<iota>_def by auto show ?thesis using decomp \<Theta>_unit(1) by (metis (no_types, opaque_lifting) image_iff) qed definition A\<^sub>0_comp_decomp where "A\<^sub>0_comp_decomp = (SOME S'. (is_cell_decomp m S' (condition_to_set \<C> - A\<^sub>0) \<and> (\<forall>B\<in>S'. (\<exists> \<phi>. \<phi> \<in> Units (SA m) \<and> center B = c \<and> l_bound B = \<phi> \<and> u_bound B = \<phi> \<and> boundary_condition B = closed_interval))))" lemma A\<^sub>0_comp_decompE: "(is_cell_decomp m A\<^sub>0_comp_decomp (condition_to_set \<C> - A\<^sub>0) \<and> (\<forall>B \<in> A\<^sub>0_comp_decomp. (\<exists> \<phi>. \<phi> \<in> Units (SA m) \<and> center B = c \<and> l_bound B = \<phi> \<and> u_bound B = \<phi> \<and> boundary_condition B = closed_interval)))" proof- obtain S' where S'_def: "(is_cell_decomp m S' (condition_to_set \<C> - A\<^sub>0) \<and> (\<forall>B\<in>S'. (\<exists> \<phi>. \<phi> \<in> Units (SA m) \<and> center B = c \<and> l_bound B = \<phi> \<and> u_bound B = \<phi> \<and> boundary_condition B = closed_interval)))" using A\<^sub>0_comp_decomp by blast show ?thesis apply(rule SomeE[of "A\<^sub>0_comp_decomp" _ S']) unfolding A\<^sub>0_comp_decomp_def apply blast by(rule S'_def) qed text\<open>That $A_0$ can be decomposed as desired is relatively easy to show:\<close> lemma A\<^sub>0_decomp: assumes "inds \<noteq> {}" shows "\<exists>S. is_cell_decomp m S A\<^sub>0 \<and> (\<forall>B\<in>S. center B = c \<and> (\<exists>N. SA_poly_ubounded p m f (center B) (condition_to_set B) N))" proof- have 0: "\<exists>S. is_cell_decomp m S A\<^sub>0 \<and> (\<forall>B\<in>S. center B = c)" proof- have 0: "\<exists>S. is_cell_decomp m S (condition_to_set \<C> - (condition_to_set \<C> - A\<^sub>0)) \<and> (\<forall>A\<in>S. center A = c)" apply(rule cell_decomp_same_center[of \<C> m A c a1 a2 I "condition_to_set \<C> - A\<^sub>0"]) apply (simp add: \<C>_cond) using \<C>_def apply blast apply blast using A\<^sub>0_comp_decompE by auto have 1: "(condition_to_set \<C> - (condition_to_set \<C> - A\<^sub>0)) = A\<^sub>0" using A\<^sub>0_def by auto show ?thesis using "0" "1" by auto qed then obtain S where S_def: "is_cell_decomp m S A\<^sub>0 \<and> (\<forall>B\<in>S. center B = c)" by blast have "(\<forall>B\<in>S. \<exists>N. SA_poly_ubounded p m f (center B) (condition_to_set B) N)" proof fix B assume A: "B \<in> S" have center_B: "center B = c" using A S_def by blast show "\<exists>N. SA_poly_ubounded p m f (center B) (condition_to_set B) N" apply(rule exI, rule SA_poly_uboundedI[of _ _ _ _ 0]) using f_closed apply blast unfolding center_B apply (simp add: c_closed) using A S_def apply (metis is_cellI is_cell_decompE(3) is_cell_decompE(4) is_cell_subset) proof- fix x t i assume B': " t # x \<in> condition_to_set B" then have P0: "t # x \<in> A\<^sub>0" using A S_def is_cell_decompE by (meson in_mono is_cell_decomp_subset) hence P1: " val (SA_poly_to_SA_fun m f (t # x)) = (MIN i\<in>inds. (val ( (a i x)\<otimes>(t \<ominus> c x)[^]i)))" using val_f_on_A\<^sub>0[of "t#x"] assms P0 unfolding list_tl list_hd by auto have t_closed: "t \<in> carrier Q\<^sub>p" using P0 A\<^sub>0_def cartesian_power_head by (metis (no_types, lifting) B' cell_memE(1) condition_to_set.simps list_hd padic_fields.condition_decomp' padic_fields_axioms) have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using P0 A\<^sub>0_def cartesian_power_tail[of "t#x" Q\<^sub>p m] by (metis (no_types, lifting) A\<^sub>0_closed list_tl subsetD) have x_closed': "x \<in> A" using P0 A\<^sub>0_def \<C>_memE(3) by fastforce have P2: "val (SA_poly_to_Qp_poly m x f \<bullet> t) = (MIN i\<in>inds. (val ( (a i x)\<otimes>(t \<ominus> c x)[^]i)))" using P1 SA_poly_to_SA_fun_formula[of f m x t] A x_closed t_closed using f_closed by force have P3: "i \<in> inds \<Longrightarrow> val (SA_poly_to_Qp_poly m x f \<bullet> t) \<le> (val ( (a i x)\<otimes>(t \<ominus> c x)[^]i))" apply(rule MinE''[of inds]) using inds_finite apply blast apply blast using P2 apply blast by blast have P4: "UPQ.taylor_term (c x) (SA_poly_to_Qp_poly m x f) i \<bullet> t = taylor_expansion Q\<^sub>p (c x) (SA_poly_to_Qp_poly m x f) i \<otimes> (t \<ominus> c x) [^] i" using A UPQ.to_fun_taylor_term[of "SA_poly_to_Qp_poly m x f" t "c x" i] SA_poly_to_Qp_poly_closed[of x m f] t_closed c_closed x_closed SA_car_memE(3)[of c m] unfolding UPQ.taylor_def using f_closed by blast have P5: "taylor_expansion (SA m) c f i x = taylor_expansion Q\<^sub>p (c x) (SA_poly_to_Qp_poly m x f) i" using SA_poly_to_Qp_poly_taylor_cfs[of f m x c i] c_closed x_closed f_closed by blast have P6: "i \<in> inds \<Longrightarrow> (UPQ.taylor_term (c x) (SA_poly_to_Qp_poly m x f) i \<bullet> t) = (a i x)\<otimes>(t \<ominus> c x)[^]i" using a_eval a_def x_closed' unfolding P4 P5 a_def by auto have P7: "i \<in> inds \<Longrightarrow> val (SA_poly_to_Qp_poly m x f \<bullet> t) \<le> val (UPQ.taylor_term (c x) (SA_poly_to_Qp_poly m x f) i \<bullet> t)" using P6 unfolding UPQ.taylor_term_def using P3 by presburger have P8: "i \<notin> inds \<Longrightarrow> UPQ.taylor_term (c x) (SA_poly_to_Qp_poly m x f) i \<bullet> t = \<zero>" using x_closed' inds_memE c_closed x_closed t_closed SA_car_memE(3)[of c m] unfolding P4 P5 a_def by (metis P5 Qp.cring_simprules(26) Qp.cring_simprules(4) Qp.nat_pow_closed SA_car_closed a_def inds_non_memE) have P9: "i \<notin> inds \<Longrightarrow> val (UPQ.taylor_term (c x) (SA_poly_to_Qp_poly m x f) i \<bullet> t) = \<infinity>" using val_zero unfolding P8 by blast show "val (SA_poly_to_Qp_poly m x f \<bullet> t) \<le> val (UPQ.taylor_term (c x) (SA_poly_to_Qp_poly m x f) i \<bullet> t) + eint 0" apply(cases "i \<in> inds") using P7 apply (metis add.right_neutral eint_defs(1)) unfolding P9 by (metis add.right_neutral eint_defs(1) eint_ord_code(3)) qed qed thus ?thesis using S_def by auto qed end locale A\<^sub>0_refinement = common_refinement_locale + fixes B b b1 b2 J assumes B_cell: "is_cell_condition B" assumes B_eq: "B = Cond m b c b1 b2 J" assumes B_subset: "condition_to_set B \<subseteq> A\<^sub>0" context A\<^sub>0_refinement begin text\<open>We wish to decompose the set $A_0$ into finer cells so that on each cell, there is always a fixed $i_0$ so that $val (a_{i_0}(x)(t- c(x))^{i_0})$ is minimal. This was easy to do on the complement of $A_0$ because this value did not depend on $t$, but here this will take some extra work. Here we assume we already have obtained a cell in a decomposition of $A_0$, and will further decompose this cell until we have our desired property.\<close> definition refinement_functions where "refinement_functions = insert \<zero>\<^bsub>SA m\<^esub> (\<Theta> ` ordered_ind_pairs)" definition refined_decomp where "refined_decomp = (SOME S. is_cell_decomp m S (condition_to_set (Cond m b c b1 b2 J)) \<and> (\<forall>C\<in>S. center C = c \<and> (\<forall>f\<in>refinement_functions. \<forall>g\<in>refinement_functions. \<forall>I. is_convex_condition I \<longrightarrow> condition_to_set C \<subseteq> condition_to_set (Cond m b c f g I) \<or> condition_to_set C \<inter> condition_to_set (Cond m b c f g I) = {})))" lemma refined_decomp_prop: "is_cell_decomp m refined_decomp (condition_to_set (Cond m b c b1 b2 J)) \<and> (\<forall>C\<in> refined_decomp. center C = c \<and> (\<forall>f\<in>refinement_functions. \<forall>g\<in>refinement_functions. \<forall>I. is_convex_condition I \<longrightarrow> condition_to_set C \<subseteq> condition_to_set (Cond m b c f g I) \<or> condition_to_set C \<inter> condition_to_set (Cond m b c f g I) = {}))" proof- have 0: "finite refinement_functions" proof- have 0: "refinement_functions \<subseteq> insert \<zero>\<^bsub>SA m\<^esub> (\<Theta> ` ind_pairs)" unfolding refinement_functions_def ordered_ind_pairs_def by auto have "finite (insert \<zero>\<^bsub>SA m\<^esub> (\<Theta> ` ind_pairs))" using finite_ind_pairs by auto thus ?thesis using 0 finite_subset by blast qed have 1: "refinement_functions \<subseteq> carrier (SA m)" unfolding refinement_functions_def using \<Theta>_unit by blast have 0: " \<exists>S. is_cell_decomp m S (condition_to_set (Cond m b c b1 b2 J)) \<and> (\<forall>C\<in>S. center C = c \<and> (\<forall>f\<in>refinement_functions. \<forall>g\<in>refinement_functions. \<forall>I. is_convex_condition I \<longrightarrow> condition_to_set C \<subseteq> condition_to_set (Cond m b c f g I) \<or> condition_to_set C \<inter> condition_to_set (Cond m b c f g I) = {}))" using 0 1 semialg_boundary_cell_decomp[of refinement_functions m B b c b1 b2 J] refinement_functions_def B_eq B_cell by auto then obtain S where S_def: "is_cell_decomp m S (condition_to_set (Cond m b c b1 b2 J)) \<and> (\<forall>C\<in>S. center C = c \<and> (\<forall>f\<in>refinement_functions. \<forall>g\<in>refinement_functions. \<forall>I. is_convex_condition I \<longrightarrow> condition_to_set C \<subseteq> condition_to_set (Cond m b c f g I) \<or> condition_to_set C \<inter> condition_to_set (Cond m b c f g I) = {}))" by blast thus ?thesis using refined_decomp_def SomeE[of refined_decomp _ S] by blast qed lemma refined_decomp_subset: assumes "\<B> \<in> refined_decomp" shows "condition_to_set \<B> \<subseteq> condition_to_set B" using assms is_cell_decomp_subset[of m refined_decomp "condition_to_set B" \<B>] refined_decomp_prop unfolding B_eq by auto lemma refined_decomp_closure: assumes "\<B> \<in> refined_decomp" assumes "t#x \<in> condition_to_set \<B>" shows "t \<in> carrier Q\<^sub>p" "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" "t \<ominus> c x \<in> carrier Q\<^sub>p" proof- show "t \<in> carrier Q\<^sub>p" "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using assms B_cell refined_decomp_subset Qp_pow_ConsE[of "t#x" m] unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd by auto thus "t \<ominus> c x \<in> carrier Q\<^sub>p" using Qp.cring_simprules(4) SA_car_closed c_closed by presburger qed lemma refined_decomp_static_order1: assumes "\<B> \<in> refined_decomp" assumes "t#x \<in> condition_to_set \<B>" assumes "(i,j) \<in> ordered_ind_pairs" shows "\<And>s y. s#y \<in> condition_to_set \<B> \<Longrightarrow> val (t \<ominus> c x) \<le> val (\<Theta>(i,j) x) \<Longrightarrow> val (s \<ominus> c y) \<le> val (\<Theta>(i,j) y)" "\<And>s y. s#y \<in> condition_to_set \<B> \<Longrightarrow> val (t \<ominus> c x) < val (\<Theta>(i,j) x) \<Longrightarrow> val (s \<ominus> c y) < val (\<Theta>(i,j) y)" proof- fix s y assume a: "s#y \<in> condition_to_set \<B>" have sy_in: "s#y \<in> condition_to_set B" using a assms refined_decomp_prop is_cell_decomp_subset B_eq by blast have s_closed: "s \<in> carrier Q\<^sub>p" using assms B_cell sy_in unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq by (metis Qp_pow_ConsE(2) list.sel(1)) have y_closed: "y \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using assms B_cell sy_in unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq by (metis Qp_pow_ConsE(1) list.sel(3)) have tx_in: "t#x \<in> condition_to_set B" using assms refined_decomp_prop is_cell_decomp_subset B_eq by blast have t_closed: "t \<in> carrier Q\<^sub>p" using assms refined_decomp_closure by auto have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using assms refined_decomp_closure by auto have i_le_j: "i < j" using assms ordered_ind_pairs_def by auto have in_inds: "i \<in> inds" "j \<in> inds" using assms ordered_ind_pairs_def ind_pairs_def by auto have F0: "t#x \<in> A\<^sub>0" using tx_in assms B_subset by blast show "val (\<Theta>(i,j) x) \<ge> val (t \<ominus> c x) \<Longrightarrow> val (\<Theta>(i,j) y) \<ge> val (s \<ominus> c y)" proof- assume A: "val (\<Theta>(i,j) x) \<ge> val (t \<ominus> c x)" then have 0: "t#x \<in> condition_to_set (Cond m b c (\<Theta>(i,j)) (\<Theta>(i,j)) closed_ray)" unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd closed_ray_def using B_cell B_eq Qp_pow_ConsI padic_fields.condition_to_set_memE'(1) padic_fields_axioms t_closed tx_in x_closed by auto hence 1: "condition_to_set \<B> \<subseteq> condition_to_set (Cond m b c (\<Theta>(i,j)) (\<Theta>(i,j)) closed_ray)" using assms refined_decomp_prop unfolding refinement_functions_def is_convex_condition_def by blast hence "s#y \<in> condition_to_set (Cond m b c (\<Theta>(i,j)) (\<Theta>(i,j)) closed_ray)" using a by auto thus "val (\<Theta>(i,j) y) \<ge> val (s \<ominus> c y)" unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd closed_ray_def by auto qed show "val (\<Theta>(i,j) x) > val (t \<ominus> c x) \<Longrightarrow> val (\<Theta>(i,j) y) > val (s \<ominus> c y)" proof- assume A: "val (\<Theta>(i,j) x) > val (t \<ominus> c x)" then have 0: "t#x \<in> condition_to_set (Cond m b c (\<Theta>(i,j)) (\<Theta>(i,j)) open_ray)" unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd using A\<^sub>0_closed B_cell B_eq F0 condition_to_set_memE'(1) open_ray_memI tx_in by auto hence 1: "condition_to_set \<B> \<subseteq> condition_to_set (Cond m b c (\<Theta>(i,j)) (\<Theta>(i,j)) open_ray)" using assms refined_decomp_prop unfolding refinement_functions_def is_convex_condition_def by blast hence "s#y \<in> condition_to_set (Cond m b c (\<Theta>(i,j)) (\<Theta>(i,j)) open_ray)" using a by auto thus "val (\<Theta>(i,j) y) > val (s \<ominus> c y)" unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd open_ray_def by auto qed qed lemma refined_decomp_static_order2: assumes "\<B> \<in> refined_decomp" assumes "t#x \<in> condition_to_set \<B>" assumes "(i,j) \<in> ordered_ind_pairs" shows "\<And>s y. s#y \<in> condition_to_set \<B> \<Longrightarrow> val (t \<ominus> c x) \<ge> val (\<Theta>(i,j) x) \<Longrightarrow> val (s \<ominus> c y) \<ge> val (\<Theta>(i,j) y)" "\<And>s y. s#y \<in> condition_to_set \<B> \<Longrightarrow> val (t \<ominus> c x) > val (\<Theta>(i,j) x) \<Longrightarrow> val (s \<ominus> c y) > val (\<Theta>(i,j) y)" proof- fix s y assume A: "s#y \<in> condition_to_set \<B> " have 0: "val (s \<ominus> c y) < val (\<Theta> (i, j) y) \<Longrightarrow> val (t \<ominus> c x) < val (\<Theta> (i, j) x)" using A assms refined_decomp_static_order1(2)[of \<B> s y i j t x] by auto have 1: "val (s \<ominus> c y) \<le> val (\<Theta> (i, j) y) \<Longrightarrow> val (t \<ominus> c x) \<le> val (\<Theta> (i, j) x)" using A assms refined_decomp_static_order1(1)[of \<B> s y i j t x] by auto have 2: "\<And> x y::eint. x < y \<longleftrightarrow> \<not> y \<le> x" by auto show "val (t \<ominus> c x) \<ge> val (\<Theta>(i,j) x) \<Longrightarrow> val (s \<ominus> c y) \<ge> val (\<Theta>(i,j) y)" using 0 unfolding 2 by auto show "val (t \<ominus> c x) > val (\<Theta>(i,j) x) \<Longrightarrow> val (s \<ominus> c y) > val (\<Theta>(i,j) y)" using 1 unfolding 2 by auto qed lemma val_in_B_zero: assumes "\<B> \<in> refined_decomp" assumes "t#x \<in> condition_to_set \<B>" assumes "(i,j) \<in> ordered_ind_pairs" assumes "t \<ominus> c x = \<zero>" shows "\<And>s y. s#y \<in> condition_to_set \<B> \<Longrightarrow> s \<ominus> c y = \<zero>" proof- fix s y assume A: "s#y \<in> condition_to_set \<B>" have sy_in: "s#y \<in> condition_to_set B" using A assms refined_decomp_prop is_cell_decomp_subset B_eq by blast have s_closed: "s \<in> carrier Q\<^sub>p" using assms B_cell sy_in unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq by (metis Qp_pow_ConsE(2) list.sel(1)) have y_closed: "y \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using assms B_cell sy_in unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq by (metis Qp_pow_ConsE(1) list.sel(3)) have tx_in: "t#x \<in> condition_to_set B" using assms refined_decomp_prop is_cell_decomp_subset B_eq by blast have t_closed: "t \<in> carrier Q\<^sub>p" using assms refined_decomp_closure by auto have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using assms refined_decomp_closure by auto have F0: "t#x \<in> A\<^sub>0" using tx_in assms B_subset by blast have "t#x \<in> condition_to_set (Cond m b c \<zero>\<^bsub>SA m\<^esub> \<zero>\<^bsub>SA m\<^esub> closed_interval)" unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd closed_interval_def assms val_def using A\<^sub>0_closed B_cell B_eq SA_zeroE F0 padic_fields.condition_to_set_memE'(1) padic_fields_axioms tx_in x_closed by auto then have "condition_to_set \<B> \<subseteq>condition_to_set (Cond m b c \<zero>\<^bsub>SA m\<^esub> \<zero>\<^bsub>SA m\<^esub> closed_interval)" using assms refined_decomp_prop unfolding is_convex_condition_def refinement_functions_def by blast hence "s#y \<in> condition_to_set (Cond m b c \<zero>\<^bsub>SA m\<^esub> \<zero>\<^bsub>SA m\<^esub> closed_interval)" using A by auto thus "s \<ominus> c y = \<zero>" unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd closed_interval_def val_def by (smt (verit, best) Extended_Int.infinity_ileE SA_zeroE y_closed) qed lemma val_in_B_nonzero: assumes "\<B> \<in> refined_decomp" assumes "t#x \<in> condition_to_set \<B>" assumes "(i,j) \<in> ordered_ind_pairs" assumes "t \<ominus> c x \<noteq> \<zero>" shows "\<And>s y. s#y \<in> condition_to_set \<B> \<Longrightarrow> s \<ominus> c y \<noteq> \<zero>" proof- fix s y assume A: "s#y \<in> condition_to_set \<B>" have sy_in: "s#y \<in> condition_to_set B" using A assms refined_decomp_prop is_cell_decomp_subset B_eq by blast have s_closed: "s \<in> carrier Q\<^sub>p" using assms B_cell sy_in unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq by (metis Qp_pow_ConsE(2) list.sel(1)) have y_closed: "y \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using assms B_cell sy_in unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq by (metis Qp_pow_ConsE(1) list.sel(3)) have tx_in: "t#x \<in> condition_to_set B" using assms refined_decomp_prop is_cell_decomp_subset B_eq by blast have t_closed: "t \<in> carrier Q\<^sub>p" using assms refined_decomp_closure by auto have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using assms refined_decomp_closure by auto have F0: "t#x \<in> A\<^sub>0" using tx_in assms B_subset by blast have "t#x \<notin> condition_to_set (Cond m b c \<zero>\<^bsub>SA m\<^esub> \<zero>\<^bsub>SA m\<^esub> closed_interval)" unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd closed_interval_def val_def using A\<^sub>0_closed B_cell B_eq SA_zeroE F0 padic_fields.condition_to_set_memE'(1) padic_fields_axioms assms tx_in x_closed by auto then have "condition_to_set \<B> \<inter> condition_to_set (Cond m b c \<zero>\<^bsub>SA m\<^esub> \<zero>\<^bsub>SA m\<^esub> closed_interval) = {}" using assms refined_decomp_prop unfolding is_convex_condition_def refinement_functions_def by blast hence "s#y \<notin> condition_to_set (Cond m b c \<zero>\<^bsub>SA m\<^esub> \<zero>\<^bsub>SA m\<^esub> closed_interval)" using A by auto thus "s \<ominus> c y \<noteq> \<zero>" unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd closed_interval_def by (metis A assms(1) assms(2) assms(3) assms(4) val_in_B_zero) qed lemma ineq_equivalence: assumes "\<alpha> \<in> Units Q\<^sub>p" assumes "\<beta> \<in> Units Q\<^sub>p" assumes "x \<in> Units Q\<^sub>p" shows "val (\<alpha> \<otimes> x[^](i::nat)) < val (\<beta> \<otimes> x[^](j::nat)) \<Longrightarrow> ord \<alpha> - ord \<beta> < (int j- int i)*ord x" "ord \<alpha> - ord \<beta> < (int j- int i)*ord x \<Longrightarrow> val (\<alpha> \<otimes> x[^](i::nat)) < val (\<beta> \<otimes> x[^](j::nat))" "val (\<alpha> \<otimes> x[^](i::nat)) > val (\<beta> \<otimes> x[^](j::nat)) \<Longrightarrow> ord \<alpha> - ord \<beta> > (int j- int i)*ord x" "ord \<alpha> - ord \<beta> > (int j- int i)*ord x \<Longrightarrow> val (\<alpha> \<otimes> x[^](i::nat)) > val (\<beta> \<otimes> x[^](j::nat))" by(auto simp: Qp.Units_pow_closed Units_nonzero_Qp assms(1) assms(2) assms(3) int_distrib(3) nonzero_nat_pow_ord ord_mult) lemma val_ineq_theta_ineq1: assumes "\<B> \<in> refined_decomp" assumes "t#x \<in> condition_to_set \<B>" assumes "(i,j) \<in> ordered_ind_pairs" assumes "t \<ominus> c x \<noteq> \<zero>" shows "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) < val ((a j x)\<otimes>(t \<ominus> c x)[^]j) \<Longrightarrow> val (t \<ominus> c x) > val (\<Theta>(i,j) x)" "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) > val ((a j x)\<otimes>(t \<ominus> c x)[^]j) \<Longrightarrow> val (\<Theta> (i, j) x) \<ge> val (t \<ominus> c x)" "val (t \<ominus> c x) > val (\<Theta> (i, j) x) \<Longrightarrow> val (a i x \<otimes> (t \<ominus> c x) [^] i) < val (a j x \<otimes> (t \<ominus> c x) [^] j)" "val (t \<ominus> c x) \<le> val (\<Theta>(i,j) x) \<Longrightarrow> val ((a i x)\<otimes>(t \<ominus> c x)[^]i) > val ((a j x)\<otimes>(t \<ominus> c x)[^]j)" proof- have tx_in: "t#x \<in> condition_to_set B" using assms refined_decomp_prop is_cell_decomp_subset B_eq by blast have inA0: "t#x \<in> A\<^sub>0" using tx_in assms B_subset by blast have t_closed: "t \<in> carrier Q\<^sub>p" using assms refined_decomp_closure by auto have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using assms refined_decomp_closure by auto have i_le_j: "i < j" using assms ordered_ind_pairs_def by auto have in_inds: "i \<in> inds" "j \<in> inds" using assms ordered_ind_pairs_def ind_pairs_def by auto have 0: "(int j - int i) * ord (\<Theta>(i,j) x) + (ord (a i x) - ord (a j x)) mod (int j - int i) = ord (a i x) - ord (a j x)" using x_closed \<Theta>_ord[of i j x] assms by (metis (mono_tags, opaque_lifting) SA_Units_nonzero SA_div_eval a_cfs_closed in_inds(1) in_inds(2) inds_memE ord_fract) have units: "a i x \<in> Units Q\<^sub>p" "a j x \<in> Units Q\<^sub>p" "t \<ominus> c x \<in> Units Q\<^sub>p" using i_le_j in_inds A\<^sub>0_closures assms inA0 by auto have diff_pos: "(int j - int i) > 0" using i_le_j by auto have mod_pos: "(ord (a i x) - ord (a j x)) mod (int j - int i) \<ge> 0" using assms by (simp add: i_le_j) have ineq: "val (a i x \<otimes> (t \<ominus> c x) [^] i) \<noteq> val (a j x \<otimes> (t \<ominus> c x) [^] j)" "ord (a i x \<otimes> (t \<ominus> c x) [^] i) \<noteq> ord (a j x \<otimes> (t \<ominus> c x) [^] j)" using inA0 assms in_inds i_le_j A\<^sub>0_memE[of t x i j] by auto show g1: "val (a i x \<otimes> (t \<ominus> c x) [^] i) < val (a j x \<otimes> (t \<ominus> c x) [^] j) \<Longrightarrow> val (\<Theta> (i, j) x) < val (t \<ominus> c x)" proof- assume A: "val (a i x \<otimes> (t \<ominus> c x) [^] i) < val (a j x \<otimes> (t \<ominus> c x) [^] j)" have 1: "ord (a i x) - ord (a j x) < (int j- int i)*ord (t \<ominus> c x)" by(rule ineq_equivalence, auto simp: units A) hence 2: "(int j - int i) * ord (\<Theta>(i,j) x) < (int j - int i)* ord(t \<ominus> c x)" using mod_pos 1 0 by auto hence 3: "ord (\<Theta>(i,j) x) < ord(t \<ominus> c x)" by (simp add: i_le_j) thus "val (\<Theta> (i, j) x) < val (t \<ominus> c x)" by (metis (mono_tags, lifting) SA_Units_memE' \<Theta>_unit assms(3) assms(4) eint_ord_simps(2) val_def x_closed) qed show g2: "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) > val ((a j x)\<otimes>(t \<ominus> c x)[^]j) \<Longrightarrow> val (\<Theta> (i, j) x) \<ge> val (t \<ominus> c x)" proof- assume A: "val (a i x \<otimes> (t \<ominus> c x) [^] i) > val (a j x \<otimes> (t \<ominus> c x) [^] j)" have 1: "ord (a i x) - ord (a j x) > (int j- int i)*ord (t \<ominus> c x)" by(rule ineq_equivalence, auto simp: units A) hence 2: "(int j - int i) * ord (\<Theta>(i,j) x) + (ord (a i x) - ord (a j x)) mod (int j - int i) > (int j - int i)* ord(t \<ominus> c x)" using mod_pos 1 0 by auto hence 3: "(ord (a i x) - ord (a j x)) mod (int j - int i) > (int j - int i)*( ord(t \<ominus> c x) - ord (\<Theta>(i,j) x))" by (smt (verit, ccfv_SIG) nat_distrib(2)) have 4: "( ord(t \<ominus> c x) - ord (\<Theta>(i,j) x)) \<le> 0" proof- have R: "\<And> m a b::int. m > 0 \<Longrightarrow> a mod m > m*b \<Longrightarrow> b \<le> 0" by (smt (verit, ccfv_SIG) Euclidean_Division.pos_mod_bound mod_mult_self1_is_0 mod_pos_pos_trivial mult_less_cancel_right mult_sign_intros(1)) show ?thesis apply(rule R[of "(int j - int i)" _ "(ord (a i x) - ord (a j x)) mod (int j - int i)"]) using i_le_j 3 by auto qed hence 3: "ord (\<Theta>(i,j) x) \<ge> ord(t \<ominus> c x)" by (simp add: i_le_j) thus "val (\<Theta> (i, j) x) \<ge> val (t \<ominus> c x)" using Units_eq_nonzero eint_ord_simps(1) eint_ord_simps(3) units(3) val_def val_ord by presburger qed have "val (t \<ominus> c x) > val (\<Theta> (i, j) x) \<Longrightarrow> val (a i x \<otimes> (t \<ominus> c x) [^] i) \<le> val (a j x \<otimes> (t \<ominus> c x) [^] j)" using g2 notin_closed by blast thus g3: "val (t \<ominus> c x) > val (\<Theta> (i, j) x) \<Longrightarrow> val (a i x \<otimes> (t \<ominus> c x) [^] i) < val (a j x \<otimes> (t \<ominus> c x) [^] j)" using g2 ineq using ineq by auto have "val (t \<ominus> c x) \<le> val (\<Theta> (i, j) x) \<Longrightarrow> val (a j x \<otimes> (t \<ominus> c x) [^] j) \<le> val (a i x \<otimes> (t \<ominus> c x) [^] i)" using g1 notin_closed by blast thus g4: "val (t \<ominus> c x) \<le> val (\<Theta> (i, j) x) \<Longrightarrow> val (a j x \<otimes> (t \<ominus> c x) [^] j) < val (a i x \<otimes> (t \<ominus> c x) [^] i)" using g1 ineq using ineq by auto qed lemma val_in_B0: assumes "\<B> \<in> refined_decomp" assumes "t#x \<in> condition_to_set \<B>" assumes "(i,j) \<in> ordered_ind_pairs" assumes "t \<ominus> c x \<noteq> \<zero>" assumes "val (t \<ominus> c x) = val (\<Theta> (i,j) x)" shows "\<And>s y. s#y \<in> condition_to_set \<B> \<Longrightarrow> val (s \<ominus> c y) = val (\<Theta> (i,j) y)" proof- fix s y assume A: "s#y \<in> condition_to_set \<B>" have sy_in: "s#y \<in> condition_to_set B" using A assms refined_decomp_prop is_cell_decomp_subset B_eq by blast have s_closed: "s \<in> carrier Q\<^sub>p" using assms B_cell sy_in unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq by (metis Qp_pow_ConsE(2) list.sel(1)) have y_closed: "y \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using assms B_cell sy_in unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq by (metis Qp_pow_ConsE(1) list.sel(3)) have tx_in: "t#x \<in> condition_to_set B" using assms refined_decomp_prop is_cell_decomp_subset B_eq by blast have t_closed: "t \<in> carrier Q\<^sub>p" using assms refined_decomp_closure by auto have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using assms refined_decomp_closure by auto have i_le_j: "i < j" using assms ordered_ind_pairs_def by auto have in_inds: "i \<in> inds" "j \<in> inds" using assms ordered_ind_pairs_def ind_pairs_def by auto have F0: "t#x \<in> A\<^sub>0" using tx_in assms B_subset by blast have F1: "t#x \<in> condition_to_set (Cond m b c (\<Theta>(i,j)) (\<Theta>(i,j)) closed_interval)" using assms tx_in unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd closed_interval_def by (simp add: SA_zeroE val_def x_closed) hence "condition_to_set \<B> \<subseteq> condition_to_set (Cond m b c (\<Theta>(i,j)) (\<Theta>(i,j)) closed_interval)" using assms refined_decomp_prop unfolding is_convex_condition_def refinement_functions_def by blast hence F3: "s#y \<in> condition_to_set (Cond m b c (\<Theta>(i,j)) (\<Theta>(i,j)) closed_interval)" using A by auto thus "val (s \<ominus> c y) = val (\<Theta> (i, j) y)" unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd closed_interval_def by auto qed lemma val_in_B1: assumes "\<B> \<in> refined_decomp" assumes "t#x \<in> condition_to_set \<B>" assumes "(i,j) \<in> ordered_ind_pairs" assumes "t \<ominus> c x \<noteq> \<zero>" assumes "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) < val ((a j x)\<otimes>(t \<ominus> c x)[^]j)" shows "\<And>s y. s#y \<in> condition_to_set \<B> \<Longrightarrow> val ((a i y)\<otimes>(s \<ominus> c y)[^]i) < val ((a j y)\<otimes>(s \<ominus> c y)[^]j)" proof- fix s y assume A: "s#y \<in> condition_to_set \<B>" have sy_in: "s#y \<in> condition_to_set B" using A assms refined_decomp_prop is_cell_decomp_subset B_eq by blast have s_closed: "s \<in> carrier Q\<^sub>p" using assms B_cell sy_in unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq by (metis Qp_pow_ConsE(2) list.sel(1)) have y_closed: "y \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using assms B_cell sy_in unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq by (metis Qp_pow_ConsE(1) list.sel(3)) have tx_in: "t#x \<in> condition_to_set B" using assms refined_decomp_prop is_cell_decomp_subset B_eq by blast have t_closed: "t \<in> carrier Q\<^sub>p" using assms refined_decomp_closure by auto have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using assms refined_decomp_closure by auto have i_le_j: "i < j" using assms ordered_ind_pairs_def by auto have in_inds: "i \<in> inds" "j \<in> inds" using assms ordered_ind_pairs_def ind_pairs_def by auto have F0: "t#x \<in> A\<^sub>0" using tx_in assms B_subset by blast have F1: "val (t \<ominus> c x) > val (\<Theta>(i,j) x)" using val_ineq_theta_ineq1 assms by auto have F2: "t#x \<in> condition_to_set (Cond m b c (\<Theta>(i,j)) \<zero>\<^bsub>SA m\<^esub> left_closed_interval)" using assms tx_in F1 unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd left_closed_interval_def by (simp add: SA_zeroE val_def x_closed) hence "condition_to_set \<B> \<subseteq>condition_to_set (Cond m b c (\<Theta>(i,j)) \<zero>\<^bsub>SA m\<^esub> left_closed_interval)" using assms refined_decomp_prop unfolding is_convex_condition_def refinement_functions_def by blast hence F3: "s#y \<in> condition_to_set (Cond m b c (\<Theta>(i,j)) \<zero>\<^bsub>SA m\<^esub> left_closed_interval)" using A by auto hence F4: "val (s \<ominus> c y) \<ge> val (\<Theta>(i,j) y)" unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd left_closed_interval_def by auto have F5: "s \<ominus> c y \<noteq> \<zero>" using F3 unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd left_closed_interval_def using local.val_zero by force have F6: "val (s \<ominus> c y) \<noteq> val (\<Theta>(i,j) y)" using val_in_B0[of \<B> s y i j t x] assms A F1 by (metis F5 basic_trans_rules(20)) hence F7: "val (s \<ominus> c y) > val (\<Theta>(i,j) y)" using F4 F6 by auto show "val (a i y \<otimes> (s \<ominus> c y) [^] i) < val (a j y \<otimes> (s \<ominus> c y) [^] j)" apply(rule val_ineq_theta_ineq1[of \<B>]) using assms A F7 F5 by auto qed lemma val_in_B2: assumes "\<B> \<in> refined_decomp" assumes "t#x \<in> condition_to_set \<B>" assumes "(i,j) \<in> ordered_ind_pairs" assumes "t \<ominus> c x \<noteq> \<zero>" assumes "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) > val ((a j x)\<otimes>(t \<ominus> c x)[^]j)" shows "\<And>s y. s#y \<in> condition_to_set \<B> \<Longrightarrow> val ((a i y)\<otimes>(s \<ominus> c y)[^]i) > val ((a j y)\<otimes>(s \<ominus> c y)[^]j)" proof- fix s y assume A: "s#y \<in> condition_to_set \<B>" have sy_in: "s#y \<in> condition_to_set B" using A assms refined_decomp_prop is_cell_decomp_subset B_eq by blast have s_closed: "s \<in> carrier Q\<^sub>p" using assms B_cell sy_in unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq by (metis Qp_pow_ConsE(2) list.sel(1)) have y_closed: "y \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using assms B_cell sy_in unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq by (metis Qp_pow_ConsE(1) list.sel(3)) have tx_in: "t#x \<in> condition_to_set B" using assms refined_decomp_prop is_cell_decomp_subset B_eq by blast have t_closed: "t \<in> carrier Q\<^sub>p" using assms refined_decomp_closure by auto have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using assms refined_decomp_closure by auto have i_le_j: "i < j" using assms ordered_ind_pairs_def by auto have in_inds: "i \<in> inds" "j \<in> inds" using assms ordered_ind_pairs_def ind_pairs_def by auto have F0: "t#x \<in> A\<^sub>0" "s#y \<in> A\<^sub>0" using tx_in sy_in assms B_subset by auto have F1: "val (a j y \<otimes> (s \<ominus> c y) [^] j) \<noteq> val (a i y \<otimes> (s \<ominus> c y) [^] i)" "val (a j x \<otimes> (t \<ominus> c x) [^] j) \<noteq> val (a i x \<otimes> (t \<ominus> c x) [^] i)" using F0 A\<^sub>0_memE assms apply (metis A i_le_j in_inds(1) in_inds(2) val_in_B_nonzero) using F0 A\<^sub>0_memE assms by auto show "val (a j y \<otimes> (s \<ominus> c y) [^] j) < val (a i y \<otimes> (s \<ominus> c y) [^] i)" using val_in_B1[of \<B> s y i j t x] F1 assms by (metis A basic_trans_rules(20) notin_closed val_in_B_zero val_ineq_theta_ineq1(3) val_ineq_theta_ineq1(4)) qed lemma pre_val_in_B: assumes "\<B> \<in> refined_decomp" assumes "(i,j) \<in> ordered_ind_pairs" shows "\<And>s y t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> s#y \<in> condition_to_set \<B> \<Longrightarrow> val ((a i x)\<otimes>(t \<ominus> c x)[^]i) > val ((a j x)\<otimes>(t \<ominus> c x)[^]j) \<longleftrightarrow> val ((a i y)\<otimes>(s \<ominus> c y)[^]i) > val ((a j y)\<otimes>(s \<ominus> c y)[^]j) " proof- fix s y t x assume A: " t#x \<in> condition_to_set \<B>" "s#y \<in> condition_to_set \<B>" have 0: "condition_to_set \<B> \<subseteq> condition_to_set B" using assms B_eq is_cell_decomp_subset refined_decomp_prop by blast have units: "a j x \<in> Units Q\<^sub>p" "a i x \<in> Units Q\<^sub>p" "a i y \<in> Units Q\<^sub>p" "a j y \<in> Units Q\<^sub>p" using A\<^sub>0_closures A B_subset assms 0 unfolding ordered_ind_pairs_def ind_pairs_def by auto have 1: "i \<in> inds" "j \<in> inds" "i < j" using assms unfolding ordered_ind_pairs_def ind_pairs_def by auto show "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) > val ((a j x)\<otimes>(t \<ominus> c x)[^]j) \<longleftrightarrow> val ((a i y)\<otimes>(s \<ominus> c y)[^]i) > val ((a j y)\<otimes>(s \<ominus> c y)[^]j) " proof(cases "t \<ominus> c x = \<zero>") case True then have T0: "s \<ominus> c y = \<zero>" using A val_in_B_zero[of \<B> t x i j] assms by auto have j_pos: "j > 0" using assms ordered_ind_pairs_def by auto hence T1: "(a j y \<otimes> \<zero> [^] j) = \<zero>" "(a j x \<otimes> \<zero> [^] j) = \<zero>" using assms units Qp.Units_closed Qp.pow_zero Qp.r_null by auto show ?thesis unfolding T1 val_def True using T0 T1(1) eint_ord_code(6) by presburger next case False then have F0: "s \<ominus> c y \<noteq> \<zero>" using A val_in_B_nonzero[of \<B> t x i j] assms by auto have F1: "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<noteq> val ((a j x)\<otimes>(t \<ominus> c x)[^]j)" "val ((a i y)\<otimes>(s \<ominus> c y)[^]i) \<noteq> val ((a j y)\<otimes>(s \<ominus> c y)[^]j)" using A\<^sub>0_memE[of _ _ i j] assms A 1 apply (meson 0 B_subset False subset_iff) using A\<^sub>0_memE[of _ _ i j] assms A 1 by (meson "0" B_subset F0 basic_trans_rules(31)) show ?thesis proof show 0: "val (a j x \<otimes> (t \<ominus> c x) [^] j) < val (a i x \<otimes> (t \<ominus> c x) [^] i) \<Longrightarrow> val (a j y \<otimes> (s \<ominus> c y) [^] j) < val (a i y \<otimes> (s \<ominus> c y) [^] i)" apply(rule val_in_B2[of \<B> t x]) using assms A False by auto show 1: "val (a j y \<otimes> (s \<ominus> c y) [^] j) < val (a i y \<otimes> (s \<ominus> c y) [^] i) \<Longrightarrow> val (a j x \<otimes> (t \<ominus> c x) [^] j) < val (a i x \<otimes> (t \<ominus> c x) [^] i)" using assms F0 A val_in_B2[of \<B> s y i j] 0 F1 by blast qed qed qed lemma val_in_B: assumes "\<B> \<in> refined_decomp" assumes "i \<in> inds" assumes "j \<in> inds" assumes "i \<noteq> j" shows "\<And>s y t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> s#y \<in> condition_to_set \<B> \<Longrightarrow> val ((a i x)\<otimes>(t \<ominus> c x)[^]i) > val ((a j x)\<otimes>(t \<ominus> c x)[^]j) \<longleftrightarrow> val ((a i y)\<otimes>(s \<ominus> c y)[^]i) > val ((a j y)\<otimes>(s \<ominus> c y)[^]j)" "\<And>t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> t \<ominus> c x \<noteq> \<zero> \<Longrightarrow> val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<noteq> val ((a j x)\<otimes>(t \<ominus> c x)[^]j)" "\<And>s y t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> s#y \<in> condition_to_set \<B> \<Longrightarrow> t \<ominus> c x = \<zero> \<longleftrightarrow> s \<ominus> c y = \<zero>" proof- have sub: "condition_to_set \<B> \<subseteq> A\<^sub>0" using assms B_eq B_subset is_cell_decomp_subset refined_decomp_prop by blast show "\<And>t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> t \<ominus> c x \<noteq> \<zero> \<Longrightarrow> val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<noteq> val ((a j x)\<otimes>(t \<ominus> c x)[^]j)" proof(cases "i < j") case True show "\<And>t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> t \<ominus> c x \<noteq> \<zero> \<Longrightarrow> val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<noteq> val ((a j x)\<otimes>(t \<ominus> c x)[^]j)" using A\<^sub>0_memE(1)[of _ _ i j] sub assms True by auto next case False show "\<And>t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> t \<ominus> c x \<noteq> \<zero> \<Longrightarrow> val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<noteq> val ((a j x)\<otimes>(t \<ominus> c x)[^]j)" using A\<^sub>0_memE(1)[of _ _ j i] sub assms False by (smt (z3) nat_neq_iff subset_iff) qed next show "\<And>s y t x. t # x \<in> condition_to_set \<B> \<Longrightarrow> s # y \<in> condition_to_set \<B> \<Longrightarrow> (val (a j x \<otimes> (t \<ominus> c x) [^] j) < val (a i x \<otimes> (t \<ominus> c x) [^] i)) = (val (a j y \<otimes> (s \<ominus> c y) [^] j) < val (a i y \<otimes> (s \<ominus> c y) [^] i))" proof(cases "i < j") case True then have "(i,j) \<in> ordered_ind_pairs" unfolding ordered_ind_pairs_def ind_pairs_def using assms by auto thus "\<And>s y t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> s#y \<in> condition_to_set \<B> \<Longrightarrow> val ((a i x)\<otimes>(t \<ominus> c x)[^]i) > val ((a j x)\<otimes>(t \<ominus> c x)[^]j) \<longleftrightarrow> val ((a i y)\<otimes>(s \<ominus> c y)[^]i) > val ((a j y)\<otimes>(s \<ominus> c y)[^]j) " using assms pre_val_in_B by metis next case False then have ind: "(j,i) \<in> ordered_ind_pairs" unfolding ordered_ind_pairs_def ind_pairs_def using assms by auto hence F0: "\<And>s y t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> s#y \<in> condition_to_set \<B> \<Longrightarrow> val ((a j x)\<otimes>(t \<ominus> c x)[^]j) > val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<longleftrightarrow> val ((a j y)\<otimes>(s \<ominus> c y)[^]j) > val ((a i y)\<otimes>(s \<ominus> c y)[^]i) " using assms pre_val_in_B by metis fix t x s y assume A: " t # x \<in> condition_to_set \<B>" " s # y \<in> condition_to_set \<B>" have inA0: "t#x \<in> A\<^sub>0" "s#y \<in> A\<^sub>0" using A B_subset assms is_cell_decomp_subset refined_decomp_prop B_eq basic_trans_rules(31) apply metis using A B_subset assms is_cell_decomp_subset refined_decomp_prop B_eq basic_trans_rules(31) by metis have units: "a j x \<in> Units Q\<^sub>p" "a i x \<in> Units Q\<^sub>p" "a i y \<in> Units Q\<^sub>p" "a j y \<in> Units Q\<^sub>p" using A\<^sub>0_closures(1,2) A inA0 B_subset assms A ind unfolding ordered_ind_pairs_def ind_pairs_def by auto show "(val (a j x \<otimes> (t \<ominus> c x) [^] j) < val (a i x \<otimes> (t \<ominus> c x) [^] i)) = (val (a j y \<otimes> (s \<ominus> c y) [^] j) < val (a i y \<otimes> (s \<ominus> c y) [^] i))" proof(cases "t \<ominus> c x = \<zero>") case T: True then have T0: "s \<ominus> c y = \<zero>" using ind assms A val_in_B_zero[of \<B> t x j i s y] by auto have i_pos: "i > 0" using ind assms ordered_ind_pairs_def by auto hence T1: "(a i y \<otimes> \<zero> [^] i) = \<zero>" "(a i x \<otimes> \<zero> [^] i) = \<zero>" using assms A units Qp.Units_closed Qp.pow_zero Qp.r_null by auto hence T2: "val (a i y \<otimes> \<zero> [^] i) = \<infinity>" "val (a i x \<otimes> \<zero> [^] i) = \<infinity>" using val_def by auto show ?thesis using units unfolding T0 T2 T by (metis (no_types, opaque_lifting) Qp.Units_closed Qp.Units_not_right_zero_divisor Qp.cring_simprules(2) Qp.cring_simprules(27) Qp.nat_pow_closed eint.distinct(2) eint_ord_simps(4) val_def) next case F: False then have 0: "s \<ominus> c y \<noteq> \<zero>" using ind assms A val_in_B_nonzero[of \<B> t x j i s y] by auto have 1: "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<noteq> val ((a j x)\<otimes>(t \<ominus> c x)[^]j)" "val ((a i y)\<otimes>(s \<ominus> c y)[^]i) \<noteq> val ((a j y)\<otimes>(s \<ominus> c y)[^]j)" using False F 0 inA0 assms A\<^sub>0_memE[of s y j i] A\<^sub>0_memE[of t x j i] by auto then show ?thesis using A F0[of t x s y] by auto qed qed next have "\<And>s y t x. t # x \<in> condition_to_set \<B> \<Longrightarrow> s # y \<in> condition_to_set \<B> \<Longrightarrow> (t \<ominus> c x = \<zero>) \<Longrightarrow> (s \<ominus> c y = \<zero>)" proof- fix t x s y assume A: " t # x \<in> condition_to_set \<B>" " s # y \<in> condition_to_set \<B>" show "(t \<ominus> c x = \<zero>) \<Longrightarrow> (s \<ominus> c y = \<zero>)" proof- assume B: "t \<ominus> c x = \<zero>" then have "t#x\<in> condition_to_set (Cond m b c \<zero>\<^bsub>SA m\<^esub> \<zero>\<^bsub>SA m\<^esub> closed_interval)" unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd closed_interval_def using A by (metis (mono_tags, lifting) A\<^sub>0_closed B_cell B_eq B_subset SA_zeroE assms(1) cartesian_power_tail eint_ord_simps(3) list.sel(3) local.val_zero padic_fields.condition_to_set_memE'(1) padic_fields.is_cell_decomp_subset padic_fields_axioms refined_decomp_prop subset_iff) hence "condition_to_set \<B> \<subseteq> condition_to_set (Cond m b c \<zero>\<^bsub>SA m\<^esub> \<zero>\<^bsub>SA m\<^esub> closed_interval)" using assms A refined_decomp_prop unfolding is_convex_condition_def refinement_functions_def by blast hence "s#y\<in> condition_to_set (Cond m b c \<zero>\<^bsub>SA m\<^esub> \<zero>\<^bsub>SA m\<^esub> closed_interval)" using A by auto thus "s \<ominus> c y = \<zero>" unfolding condition_to_set.simps cell_def mem_Collect_eq list_tl list_hd closed_interval_def by (metis Qp.cring_simprules(4) Qp_pow_ConsE(2) SA_car_closed SA_zeroE c_closed cartesian_power_tail list.sel(1) list.sel(3) val_ineq) qed qed thus "\<And>s y t x. t # x \<in> condition_to_set \<B> \<Longrightarrow> s # y \<in> condition_to_set \<B> \<Longrightarrow> (t \<ominus> c x = \<zero>) = (s \<ominus> c y = \<zero>)" by metis qed lemma static_order: assumes "\<B> \<in> refined_decomp" assumes "i \<in> inds" assumes "j \<in> inds" shows "\<And>s y t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> s#y \<in> condition_to_set \<B> \<Longrightarrow> val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<ge> val ((a j x)\<otimes>(t \<ominus> c x)[^]j) \<longleftrightarrow> val ((a i y)\<otimes>(s \<ominus> c y)[^]i) \<ge> val ((a j y)\<otimes>(s \<ominus> c y)[^]j)" proof(cases "i = j") case True then show "\<And>s y t x . t#x \<in> condition_to_set \<B> \<Longrightarrow> s#y \<in> condition_to_set \<B> \<Longrightarrow> val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<ge> val ((a j x)\<otimes>(t \<ominus> c x)[^]j) \<longleftrightarrow> val ((a i y)\<otimes>(s \<ominus> c y)[^]i) \<ge> val ((a j y)\<otimes>(s \<ominus> c y)[^]j)" by auto next case ne: False fix s y t x assume A: "t#x \<in> condition_to_set \<B>" "s#y \<in> condition_to_set \<B>" show "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<ge> val ((a j x)\<otimes>(t \<ominus> c x)[^]j) \<longleftrightarrow> val ((a i y)\<otimes>(s \<ominus> c y)[^]i) \<ge> val ((a j y)\<otimes>(s \<ominus> c y)[^]j)" proof(cases "t \<ominus> c x = \<zero>") case True then have T0: "s \<ominus> c y = \<zero>" using A assms val_in_B(3)[of \<B> i j t x s y] ne by auto show ?thesis unfolding T0 True apply(cases "i = 0") apply (smt (z3) A(1) A(2) A\<^sub>0_memE'(3) B_eq B_subset ne T0 True assms(1) assms(2) assms(3) bot_nat_0.not_eq_extremum eint_ord_simps(4) notin_closed padic_fields.is_cell_decomp_subset padic_fields_axioms refined_decomp_prop subset_iff val_in_B(1)) apply(cases "j = 0") apply (smt (verit) A(1) A(2) A\<^sub>0_memE'(3) B_eq B_subset T0 True assms(1) assms(2) assms(3) basic_trans_rules(31) bot_nat_0.not_eq_extremum eint_ord_simps(3) padic_fields.is_cell_decomp_subset padic_fields_axioms refined_decomp_prop) by (smt (z3) A(1) A(2) A\<^sub>0_closures(2) B_eq B_subset Qp.Units_closed Qp.cring_simprules(27) Qp.nat_pow_zero assms(1) assms(2) assms(3) padic_fields.is_cell_decomp_subset padic_fields_axioms refined_decomp_prop subset_iff) next case False have F0: "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) > val ((a j x)\<otimes>(t \<ominus> c x)[^]j) \<longleftrightarrow> val ((a i y)\<otimes>(s \<ominus> c y)[^]i) > val ((a j y)\<otimes>(s \<ominus> c y)[^]j)" using A assms val_in_B(1)[of \<B> i j t x s y] by auto have F1: "val ((a i x)\<otimes>(t \<ominus> c x)[^]i) \<noteq> val ((a j x)\<otimes>(t \<ominus> c x)[^]j)" using ne False A assms val_in_B(2)[of \<B> i j t x] by auto have F2: "val ((a i y)\<otimes>(s \<ominus> c y)[^]i) \<noteq> val ((a j y)\<otimes>(s \<ominus> c y)[^]j)" using False A ne assms val_in_B(3)[of \<B> i j t x s y] val_in_B(2)[of \<B> i j s y] by auto show ?thesis using F1 F2 F0 by auto qed qed lemma exists_uniform_i0: assumes "\<B> \<in> refined_decomp" assumes "inds \<noteq> {}" shows "\<exists>i\<^sub>0 \<in> inds . (\<forall>j. \<forall>t. \<forall>x. t#x \<in> condition_to_set \<B> \<longrightarrow> val ((a i\<^sub>0 x)\<otimes>(t \<ominus> c x)[^]i\<^sub>0) \<le> val ((a j x)\<otimes>(t \<ominus> c x)[^]j))" proof(cases "condition_to_set \<B> = {}") case True then show ?thesis using assms by blast next case False have R: "\<And> xs. xs \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) \<Longrightarrow> \<exists> t x. xs = t#x" proof- have "\<And> xs. xs \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) \<Longrightarrow> length xs > 0" by (simp add: cartesian_power_car_memE) thus "\<And> xs. xs \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>) \<Longrightarrow> \<exists> t x. xs = t#x" by (meson cartesian_power_car_memE length_Suc_conv) qed have bsub: "condition_to_set \<B> \<subseteq> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)" using assms refined_decomp_prop by (metis is_cellI is_cell_decompE(3) is_cell_decompE(4) is_cell_subset) then obtain t x where tx_def: "t#x \<in> condition_to_set \<B>" using False R by blast have "\<exists>i\<^sub>0 \<in> inds. val ((a i\<^sub>0 x)\<otimes>(t \<ominus> c x)[^]i\<^sub>0) = (MIN i\<in>inds. (val ( (a i x)\<otimes>(t \<ominus> c x)[^]i)))" using assms Min_in inds_finite by (smt (verit, best) finite_imageI imageE image_is_empty) then obtain i\<^sub>0 where i\<^sub>0_def: "i\<^sub>0 \<in> inds \<and> val ((a i\<^sub>0 x)\<otimes>(t \<ominus> c x)[^]i\<^sub>0) = (MIN i\<in>inds. (val ( (a i x)\<otimes>(t \<ominus> c x)[^]i)))" by blast have i\<^sub>0_in: "i\<^sub>0 \<in> inds" using i\<^sub>0_def by auto have i\<^sub>0_min: "\<And> j. j \<in> inds \<Longrightarrow> val ((a i\<^sub>0 x)\<otimes>(t \<ominus> c x)[^]i\<^sub>0) \<le> val ( (a j x)\<otimes>(t \<ominus> c x)[^]j)" using inds_finite i\<^sub>0_in i\<^sub>0_def by auto have d: "\<forall>j . \<forall>s y. s # y \<in> condition_to_set \<B> \<longrightarrow> val (a i\<^sub>0 y \<otimes> (s \<ominus> c y) [^] i\<^sub>0) \<le> val (a j y \<otimes> (s \<ominus> c y) [^] j)" proof- have d0: "\<And> j s y. j \<notin> inds \<Longrightarrow> s # y \<in> condition_to_set \<B> \<Longrightarrow> val (a i\<^sub>0 y \<otimes> (s \<ominus> c y) [^] i\<^sub>0) \<le> val (a j y \<otimes> (s \<ominus> c y) [^] j)" proof- fix j s y assume A: " j \<notin> inds" "s # y \<in> condition_to_set \<B>" have diff: "s \<ominus> c y \<in> carrier Q\<^sub>p" using A by (metis (no_types, lifting) Qp.cring_simprules(4) Qp_pow_ConsE(2) SA_car_closed bsub c_closed cartesian_power_tail list.sel(1) list.sel(3) subsetD) have y_closed: "y \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using A bsub cartesian_power_tail by fastforce have zero: "a j y = \<zero>" using A y_closed inds_non_memE[of j y] y_closed by auto have inf: "val (a j y \<otimes> (s \<ominus> c y) [^] j) = \<infinity>" using diff unfolding zero val_def by auto show "val (a i\<^sub>0 y \<otimes> (s \<ominus> c y) [^] i\<^sub>0) \<le> val (a j y \<otimes> (s \<ominus> c y) [^] j)" unfolding inf by auto qed have d1: "\<And> j s y. j \<in> inds \<Longrightarrow> s # y \<in> condition_to_set \<B> \<Longrightarrow> val (a i\<^sub>0 y \<otimes> (s \<ominus> c y) [^] i\<^sub>0) \<le> val (a j y \<otimes> (s \<ominus> c y) [^] j)" using assms i\<^sub>0_min tx_def i\<^sub>0_in static_order[of \<B> _ i\<^sub>0] i\<^sub>0_in by smt show ?thesis using d0 d1 by smt qed show ?thesis by(rule bexI[of _ i\<^sub>0], rule d, rule i\<^sub>0_in) qed lemma exists_uniform_i: assumes "\<B> \<in> refined_decomp" shows "\<exists>i\<^sub>0 . (\<forall>j. \<forall>t. \<forall>x. t#x \<in> condition_to_set \<B> \<longrightarrow> val ((a i\<^sub>0 x)\<otimes>(t \<ominus> c x)[^]i\<^sub>0) \<le> val ((a j x)\<otimes>(t \<ominus> c x)[^]j))" proof(cases "inds = {}") case True have "\<And> j t x. t#x \<in> condition_to_set \<B> \<Longrightarrow> val ((a j x)\<otimes>(t \<ominus> c x)[^]j) = \<infinity>" proof- fix j t x assume A: "t#x \<in> condition_to_set \<B>" have 0: "t \<ominus> c x \<in> carrier Q\<^sub>p" "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" "t \<in> carrier Q\<^sub>p" using A assms refined_decomp_closure by auto have 1: "a j x = \<zero>" using A True inds_non_memE 0 by auto show "val (a j x \<otimes> (t \<ominus> c x) [^] j) = \<infinity> " using 0 unfolding 1 val_def by auto qed thus ?thesis by auto next case False then show ?thesis using assms exists_uniform_i0 by blast qed end context common_refinement_locale begin definition has_minimal_i where "has_minimal_i \<B> = (\<exists>i\<^sub>0 . (\<forall>j. \<forall>t. \<forall>x. t#x \<in> condition_to_set \<B> \<longrightarrow> val ((a i\<^sub>0 x)\<otimes>(t \<ominus> c x)[^]i\<^sub>0) \<le> val ((a j x)\<otimes>(t \<ominus> c x)[^]j)))" text\<open>This lemma statement is long-winded because we need to simultaneously extract a piece of information relevant to the proof of cell decomposition theorem $I$ as well as one relevant to theorem II.\<close> lemma A\<^sub>0_comp_minimal_i_decomp: assumes "inds \<noteq> {}" shows "\<exists> S. is_cell_decomp m S (condition_to_set \<C> - A\<^sub>0) \<and> (\<forall> \<B> \<in> S. has_minimal_i \<B> \<and> (\<exists> \<phi> i\<^sub>0. \<phi> \<in> Units (SA m) \<and> center \<B> = c \<and> l_bound \<B> = \<phi> \<and> u_bound \<B> = \<phi> \<and> boundary_condition \<B> = closed_interval \<and> (\<forall>j. \<forall>t. \<forall>x. t#x \<in> condition_to_set \<B> \<longrightarrow> val ((a i\<^sub>0 x)\<otimes>(\<phi> x)[^]i\<^sub>0) \<le> val ((a j x)\<otimes>(\<phi> x)[^]j))))" proof- obtain S where S_def: "(is_cell_decomp m S (condition_to_set \<C> - A\<^sub>0) \<and> (\<forall>B\<in>S. (\<exists> \<phi>. \<phi> \<in> Units (SA m) \<and> center B = c \<and> l_bound B = \<phi> \<and> u_bound B = \<phi> \<and> boundary_condition B = closed_interval)))" using A\<^sub>0_comp_decomp by blast show ?thesis apply(rule refine_each_cell[of _ S]) using S_def apply blast proof- fix B assume A: "B \<in> S" obtain b where b_def: "b = fibre_set B" by blast obtain \<phi> where \<phi>_def: " \<phi> \<in> Units (SA m) \<and> center B = c \<and> l_bound B = \<phi> \<and> u_bound B = \<phi> \<and> boundary_condition B = closed_interval" using A S_def by blast have B_eq: "B = Cond m b c \<phi> \<phi> closed_interval" using A \<phi>_def b_def condition_decomp' S_def is_cell_decompE(4) by metis have \<phi>_closed: "\<phi> \<in> carrier (SA m)" using \<phi>_def SA_Units_closed by blast have \<phi>_nonzero: "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> \<phi> x \<noteq> \<zero>" using \<phi>_def SA_Units_memE' by blast have \<phi>_nonzero': "\<And>x. x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> \<phi> x \<in> nonzero Q\<^sub>p" using \<phi>_closed \<phi>_nonzero SA_car_memE(3) unfolding nonzero_def by blast have B_cell_cond: "is_cell_condition B" using A S_def is_cell_decompE by meson have B0_semialg: "is_semialgebraic m b" using B_eq B_cell_cond is_cell_conditionE by blast obtain H where H_def: "H = (\<lambda>i. a i \<otimes>\<^bsub>SA m\<^esub>\<phi>[^]\<^bsub>SA m\<^esub> i)" by blast have H_closed: "\<And> i. i \<in> inds \<Longrightarrow> H i \<in> carrier (SA m)" unfolding H_def using inds_memE \<phi>_closed SA_Units_closed[of _ m] by blast have H_unit: "\<And>i. i \<in> inds \<Longrightarrow> H i \<in> Units (SA m)" unfolding H_def using inds_memE \<phi>_def R.Units_pow_closed by blast have H_eval: "\<And>x i. i \<in> inds \<Longrightarrow> x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> H i x = a i x \<otimes> (\<phi> x [^] i)" unfolding H_def using \<phi>_closed inds_memE a_closed SA_mult SA_nat_pow by presburger have H_nonzero: "\<And>x i. i \<in> inds \<Longrightarrow> x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> H i x \<noteq> \<zero>" using H_unit SA_Units_memE' by blast have H_nonzero': "\<And>x i. i \<in> inds \<Longrightarrow> x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> H i x \<in> nonzero Q\<^sub>p" unfolding nonzero_def mem_Collect_eq using SA_car_memE(3) H_closed H_nonzero by blast have H_ord: "\<And>x i. i \<in> inds \<Longrightarrow> x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> ord (H i x) = ord (a i x) + i*ord(\<phi> x)" using H_eval \<phi>_nonzero inds_memE ord_mult nonzero_nat_pow_ord by (metis Qp_nat_pow_nonzero SA_Units_nonzero \<phi>_nonzero') have H_val: "\<And>x i. i \<in> inds \<Longrightarrow> x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>) \<Longrightarrow> val (H i x) = val (a i x) + val (\<phi> x [^] i)" using \<phi>_nonzero inds_memE H_eval val_mult by (metis Qp_nat_pow_nonzero SA_Units_nonzero \<phi>_nonzero' val_mult0) have b_semialg: "is_semialgebraic m b" using b_def B0_semialg by linarith have "\<exists>Bs. finite Bs \<and> Bs partitions b \<and> (\<forall>b\<in>Bs. is_semialgebraic m b \<and> static_order_type (H ` inds) b)" apply(rule static_order_type_decomp[of "H ` inds" m b]) using inds_finite apply blast using H_unit apply blast using b_semialg by auto then obtain Bs0 where Bs0_def: "finite Bs0 \<and> Bs0 partitions b \<and> (\<forall>b\<in>Bs0. is_semialgebraic m b \<and> static_order_type (H ` inds) b)" by blast obtain Bs where Bs_def: "Bs = Bs0 - {{}}" by blast have Bs_finite: "finite Bs" using Bs_def Bs0_def by blast have Bs_semialg: "\<And>b. b \<in> Bs \<Longrightarrow> is_semialgebraic m b" using Bs_def Bs0_def by blast have Bs_partitions: "Bs partitions b" unfolding Bs_def apply(rule is_partitionI) using Bs0_def is_partitionE Generated_Boolean_Algebra.disjoint_def apply fastforce using Bs0_def is_partitionE(2)[of Bs0 b] by auto have Bs_covers: "\<Union> Bs = b" using Bs_partitions is_partitionE[of Bs b] by auto have Bs_static_order_type: "\<And>b'. b' \<in> Bs \<Longrightarrow> static_order_type (H ` inds) b'" using Bs_def Bs0_def by auto have B_vals: "\<And>x. x \<in> condition_to_set B \<Longrightarrow> val (hd x \<ominus> c (tl x)) = val (\<phi> (tl x))" apply(rule basic_trans_rules(24)) unfolding B_eq condition_to_set.simps cell_def mem_Collect_eq closed_interval_def apply blast by blast obtain S' where S'_def: "S' = refine_fibres B ` Bs" by blast have S'_decomp: "is_cell_decomp m S' (condition_to_set B)" apply(unfold S'_def, rule partition_to_cell_decomp[of B m b c \<phi> \<phi> closed_interval] ) unfolding are_semialgebraic_def using B_cell_cond B_eq Bs_partitions Bs_finite Bs_semialg by auto have "(\<forall>\<B>\<in>S'. has_minimal_i \<B> \<and> (\<exists>\<phi> i\<^sub>0. \<phi> \<in> Units (SA m) \<and> center \<B> = c \<and> l_bound \<B> = \<phi> \<and> u_bound \<B> = \<phi> \<and> boundary_condition \<B> = closed_interval \<and> (\<forall>j. \<forall>t. \<forall>x. t#x \<in> condition_to_set \<B> \<longrightarrow> val ((a i\<^sub>0 x)\<otimes>(\<phi> x)[^]i\<^sub>0) \<le> val ((a j x)\<otimes>(\<phi> x)[^]j))))" proof fix \<B> assume a: "\<B> \<in> S'" obtain b0 where b0_def: "b0 = fibre_set \<B>" by blast have b0_in: "b0 \<in> Bs" using b0_def a unfolding S'_def refine_fibres_def by auto have \<phi>_fact: " \<phi> \<in> Units (SA m) \<and> center \<B> = c \<and> l_bound \<B> = \<phi> \<and> u_bound \<B> = \<phi> \<and> boundary_condition \<B> = closed_interval" using a S'_def \<phi>_def unfolding B_eq refine_fibres_def by auto show "has_minimal_i \<B> \<and> (\<exists>\<phi> i\<^sub>0. \<phi> \<in> Units (SA m) \<and> center \<B> = c \<and> l_bound \<B> = \<phi> \<and> u_bound \<B> = \<phi> \<and> boundary_condition \<B> = closed_interval \<and> (\<forall>j. \<forall>t. \<forall>x. t#x \<in> condition_to_set \<B> \<longrightarrow> val ((a i\<^sub>0 x)\<otimes>(\<phi> x)[^]i\<^sub>0) \<le> val ((a j x)\<otimes>(\<phi> x)[^]j)))" proof(cases "condition_to_set \<B> = {}") case True show ?thesis using \<phi>_fact unfolding has_minimal_i_def True by auto next case False obtain xs where xs_def: "xs \<in> condition_to_set \<B>" using False by blast have xs_closed: "xs \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)" by (meson xs_def a S'_decomp is_cell_decompE is_cell_decomp_subset subset_iff) obtain t x where tx_def: "xs = t#x" by (metis xs_closed Suc_length_conv cartesian_power_car_memE) have x_closed: "x \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using tx_def xs_closed Qp_pow_ConsE(1) by force have x_in_b0: "x \<in> b0" using xs_def b0_def unfolding tx_def by (metis cell_formula(2) condition_decomp' condition_to_set.simps) have ex: "\<exists>i\<^sub>0 \<in> inds. val ((a i\<^sub>0 x)\<otimes>(\<phi> x)[^]i\<^sub>0) = (MIN i\<in>inds. (val ((a i x)\<otimes>(\<phi> x)[^]i)))" by (smt (verit, best) assms Min_in inds_finite finite_imageI imageE image_is_empty) then obtain i\<^sub>0 where i\<^sub>0_def: "i\<^sub>0 \<in> inds \<and> val ((a i\<^sub>0 x)\<otimes>(\<phi> x)[^]i\<^sub>0) = (MIN i\<in>inds. (val ((a i x)\<otimes>(\<phi> x)[^]i)))" by blast have i\<^sub>0_ineq: "\<And> j. j \<in> inds \<Longrightarrow> val ((a i\<^sub>0 x)\<otimes>(\<phi> x)[^]i\<^sub>0) \<le> val ((a j x)\<otimes>(\<phi> x)[^]j)" proof- fix j assume inds: "j \<in> inds" show " val (a i\<^sub>0 x \<otimes> \<phi> x [^] i\<^sub>0) \<le> val (a j x \<otimes> \<phi> x [^] j)" using inds i\<^sub>0_def MinE inds_finite by auto qed have i\<^sub>0_ineq': "\<And> j s y. s#y \<in> condition_to_set \<B> \<Longrightarrow> val ((a i\<^sub>0 y)\<otimes>(s \<ominus> c y)[^]i\<^sub>0) \<le> val ((a j y)\<otimes>(s \<ominus> c y)[^]j)" "\<And> j s y. s#y \<in> condition_to_set \<B> \<Longrightarrow> val ((a i\<^sub>0 y)\<otimes>(\<phi> y)[^]i\<^sub>0) \<le> val ((a j y)\<otimes>(\<phi> y)[^]j)" proof- fix j s y assume b: " s#y \<in> condition_to_set \<B>" have sy_closed: "s#y \<in> carrier (Q\<^sub>p\<^bsup>Suc m\<^esup>)" by (meson b a S'_decomp is_cell_decompE is_cell_decomp_subset subset_iff) have y_closed: "y \<in> carrier (Q\<^sub>p\<^bsup>m\<^esup>)" using sy_closed Qp_pow_ConsE(1) by force have y_in_b0: "y \<in> b0" by (metis b b0_def cell_formula(2) condition_decomp' condition_to_set.simps) have diff: "s \<ominus> c y \<in> carrier Q\<^sub>p" using y_closed b by (metis Qp.cring_simprules(4) Qp_pow_ConsE(2) SA_car_closed list.sel(1) sy_closed common_refinement_locale.c_closed common_refinement_locale_axioms) have phiy: "\<phi> y \<in> carrier Q\<^sub>p" using y_closed SA_car_closed \<phi>_closed by auto have "val (a i\<^sub>0 y \<otimes> (s \<ominus> c y) [^] i\<^sub>0) \<le> val (a j y \<otimes> (s \<ominus> c y) [^] j) \<and> val ((a i\<^sub>0 y)\<otimes>(\<phi> y)[^]i\<^sub>0) \<le> val ((a j y)\<otimes>(\<phi> y)[^]j)" proof(cases "j \<in> inds") case True have 0: "val (H i\<^sub>0 x) \<le> val (H j x)" unfolding H_def using True i\<^sub>0_ineq[of j] x_closed using H_def H_eval i\<^sub>0_def by fastforce hence i\<^sub>0_inds: "i\<^sub>0 \<in> inds" using i\<^sub>0_def True x_closed inds_non_memE[of i\<^sub>0 x] unfolding H_def by force hence 1: "val (H i\<^sub>0 y) \<le> val (H j y)" using Bs_static_order_type[of b0] b0_in i\<^sub>0_inds True by (smt (z3) 0 basic_trans_rules(20) image_eqI notin_closed static_order_type_def x_in_b0 y_in_b0) have 2: "val (s \<ominus> c y) = val (\<phi> y)" using B_vals[of "t#x"] B_vals[of "s#y"] b xs_def a S'_decomp unfolding list_tl list_hd tx_def by (meson basic_trans_rules(31) is_cell_decomp_subset) have 3: "H i\<^sub>0 y = (a i\<^sub>0 y)\<otimes>(\<phi> y)[^]i\<^sub>0" "H j y = (a j y)\<otimes>(\<phi> y)[^]j" using H_eval i\<^sub>0_inds y_closed True by auto have un: "\<phi> y \<in> Units Q\<^sub>p" using y_closed Units_eq_nonzero \<phi>_nonzero' by blast show "val (a i\<^sub>0 y \<otimes> (s \<ominus> c y) [^] i\<^sub>0) \<le> val (a j y \<otimes> (s \<ominus> c y) [^] j) \<and> val ((a i\<^sub>0 y)\<otimes>(\<phi> y)[^]i\<^sub>0) \<le> val ((a j y)\<otimes>(\<phi> y)[^]j)" using 1 2 diff H_val H_ord un by (smt (verit, ccfv_SIG) "3"(1) "3"(2) Qp.nat_pow_closed True Units_eq_nonzero a_eval equal_val_imp_equal_ord(2) i\<^sub>0_inds val_mult val_of_power y_closed) next case False have F1: "a j y = \<zero>" using False inds_non_memE y_closed by auto show "val (a i\<^sub>0 y \<otimes> (s \<ominus> c y) [^] i\<^sub>0) \<le> val (a j y \<otimes> (s \<ominus> c y) [^] j) \<and> val ((a i\<^sub>0 y)\<otimes>(\<phi> y)[^]i\<^sub>0) \<le> val ((a j y)\<otimes>(\<phi> y)[^]j)" using diff phiy unfolding F1 val_def by auto qed thus "val (a i\<^sub>0 y \<otimes> (s \<ominus> c y) [^] i\<^sub>0) \<le> val (a j y \<otimes> (s \<ominus> c y) [^] j)" " val ((a i\<^sub>0 y)\<otimes>(\<phi> y)[^]i\<^sub>0) \<le> val ((a j y)\<otimes>(\<phi> y)[^]j)" by auto qed thus "has_minimal_i \<B> \<and> (\<exists>\<phi> i\<^sub>0. \<phi> \<in> Units (SA m) \<and> center \<B> = c \<and> l_bound \<B> = \<phi> \<and> u_bound \<B> = \<phi> \<and> boundary_condition \<B> = closed_interval \<and> (\<forall>j t x. t # x \<in> condition_to_set \<B> \<longrightarrow> val (a i\<^sub>0 x \<otimes> \<phi> x [^] i\<^sub>0) \<le> val (a j x \<otimes> \<phi> x [^] j)))" by (metis \<phi>_fact has_minimal_i_def) qed qed thus "\<exists>S. is_cell_decomp m S (condition_to_set B) \<and> (\<forall>\<B>\<in>S. has_minimal_i \<B> \<and> (\<exists>\<phi> i\<^sub>0. \<phi> \<in> Units (SA m) \<and> center \<B> = c \<and> l_bound \<B> = \<phi> \<and> u_bound \<B> = \<phi> \<and> boundary_condition \<B> = closed_interval \<and> (\<forall>j. \<forall>t. \<forall>x. t#x \<in> condition_to_set \<B> \<longrightarrow> val ((a i\<^sub>0 x)\<otimes>(\<phi> x)[^]i\<^sub>0) \<le> val ((a j x)\<otimes>(\<phi> x)[^]j))))" using S'_decomp by auto qed qed lemma A\<^sub>0_minimal_i_decomp: assumes "inds \<noteq> {}" shows "\<exists> S. is_cell_decomp m S A\<^sub>0 \<and> (\<forall> \<B> \<in> S. center \<B> = c \<and> has_minimal_i \<B>)" proof- obtain S where S_def: " is_cell_decomp m S A\<^sub>0 \<and> (\<forall>B\<in>S. center B = c \<and> (\<exists>N. SA_poly_ubounded p m f (center B) (condition_to_set B) N))" using A\<^sub>0_decomp assms by auto show ?thesis proof(rule refine_each_cell[of m S]) show " is_cell_decomp m S A\<^sub>0" using S_def by auto fix B assume A: "B \<in> S" have B_center: "center B = c" using S_def A by auto have sub: "condition_to_set B \<subseteq> A\<^sub>0" using A S_def is_cell_decomp_subset[of m S A\<^sub>0] by auto have cell: "is_cell_condition B" using A S_def is_cell_decompE by auto obtain b b1 b2 J where params: "B = Cond m b c b1 b2 J" using A S_def B_center condition_decomp' is_cell_decompE(4) by blast have 0: "A\<^sub>0_refinement p d \<C> A c a1 a2 I f m B b b1 b2 J" using sub cell params by (meson A\<^sub>0_refinement.intro A\<^sub>0_refinement_axioms.intro common_refinement_locale_axioms) show "\<exists>S. is_cell_decomp m S (condition_to_set B) \<and> (\<forall>\<B>\<in>S. center \<B> = c \<and> has_minimal_i \<B>)" using 0 A\<^sub>0_refinement.exists_uniform_i Q\<^sub>p_def Z\<^sub>p_def A\<^sub>0_refinement.refined_decomp_prop by (smt (z3) params common_refinement_locale.has_minimal_i_def common_refinement_locale_axioms) qed qed lemma \<C>_comp_minimal_i_decomp: shows "\<exists> S. is_cell_decomp m S (condition_to_set \<C>) \<and> (\<forall> \<B> \<in> S. center \<B> = c \<and> has_minimal_i \<B>)" proof- have A: "is_cell_decomp m {\<C>} (condition_to_set \<C>)" using \<C>_cond \<C>_def arity.simps condition_to_set_cell_decomp by blast show ?thesis proof(cases "inds = {}") case True have "\<And> t x j. t # x \<in> condition_to_set \<C> ==> val (a j x \<otimes> (t \<ominus> c x) [^] j) = \<infinity>" proof- fix t x j assume A: "t#x \<in> condition_to_set \<C>" have 0: "a j x = \<zero>" using inds_non_memE A unfolding True by (metis \<C>_memE(1) empty_iff list.sel(3)) have 1: "(t \<ominus> c x) \<in> carrier Q\<^sub>p" using A by (metis Qp.cring_simprules(4) SA_car_closed \<C>_cond \<C>_def \<C>_mem_hd cartesian_power_tail cell_condition_set_memE(1) list.sel(1) list.sel(3) common_refinement_locale.c_closed common_refinement_locale_axioms) show "val (a j x \<otimes> (t \<ominus> c x) [^] j) = \<infinity>" using 1 unfolding 0 val_def by auto qed hence "has_minimal_i \<C>" unfolding has_minimal_i_def by auto thus "\<exists>S. is_cell_decomp m S (condition_to_set \<C>) \<and> (\<forall>\<B>\<in>S. center \<B> = c \<and> has_minimal_i \<B>)" using A \<C>_def center.simps by auto next case False show ?thesis proof(rule binary_refinement[of _ "{\<C>}"], rule A) have "is_semialgebraic (Suc m) A\<^sub>0 \<and> A\<^sub>0 \<subseteq> condition_to_set \<C> \<and> (\<exists>S. is_cell_decomp m S A\<^sub>0 \<and> (\<forall>\<B>\<in>S. center \<B> = c \<and> has_minimal_i \<B>)) \<and> (\<exists>S. is_cell_decomp m S (condition_to_set \<C> - A\<^sub>0) \<and> (\<forall>\<B>\<in>S. center \<B> = c \<and> has_minimal_i \<B>))" using A\<^sub>0_semialg A\<^sub>0_def A\<^sub>0_minimal_i_decomp A\<^sub>0_comp_minimal_i_decomp False by auto thus "\<And>C. C \<in> {\<C>} \<Longrightarrow> \<exists>C0. is_semialgebraic (Suc m) C0 \<and> C0 \<subseteq> condition_to_set C \<and> (\<exists>S. is_cell_decomp m S C0 \<and> (\<forall>\<B>\<in>S. center \<B> = c \<and> has_minimal_i \<B>)) \<and> (\<exists>S. is_cell_decomp m S (condition_to_set C - C0) \<and> (\<forall>\<B>\<in>S. center \<B> = c \<and> has_minimal_i \<B>))" by auto qed qed qed end end
{"author": "Aaroncri", "repo": "Macintyre-Theorem-in-Isabelle", "sha": "a13c75b5d3fc12fb3290b7c92326e13618612c6c", "save_path": "github-repos/isabelle/Aaroncri-Macintyre-Theorem-in-Isabelle", "path": "github-repos/isabelle/Aaroncri-Macintyre-Theorem-in-Isabelle/Macintyre-Theorem-in-Isabelle-a13c75b5d3fc12fb3290b7c92326e13618612c6c/Macintyre_Theorem/Cell_Decomp_Theorem_Helpers.thy"}
# Authors: Stephane Gaiffas <stephane.gaiffas@gmail.com> # License: BSD 3 clause import numpy as np from scipy.linalg import toeplitz from scipy.special import expit from sklearn.datasets import make_classification import pytest def simulate_true_logistic( n_samples=150, n_features=5, fit_intercept=True, corr=0.5, random_state=0, return_coef=False, ): rng = np.random.RandomState(random_state) coef0 = rng.randn(n_features) if fit_intercept: intercept0 = -2.0 else: intercept0 = 0.0 cov = toeplitz(corr ** np.arange(0, n_features)) X = rng.multivariate_normal(np.zeros(n_features), cov, size=n_samples) logits = X.dot(coef0) logits += intercept0 p = expit(logits) y = rng.binomial(1, p, size=n_samples) if return_coef: return X, y, coef0, intercept0 else: return X, y # Turns out that random_state=1 is linearly separable while it is not for # random_state=2 def simulate_linear(n_samples, random_state=2): X, y = make_classification( n_samples=n_samples, n_features=2, n_redundant=0, n_informative=2, random_state=random_state, n_clusters_per_class=1, ) rng = np.random.RandomState(2) X += 2 * rng.uniform(size=X.shape) return X, y def approx(v, abs=1e-15): return pytest.approx(v, abs) def parameter_test_with_min( class_tested, parameter, valid_val, invalid_type_val, invalid_val, min_value=None, min_value_strict=None, min_value_str=None, mandatory=False, fixed_type=None, required_args=None, ): """Tests for an attribute of integer type Parameters ---------- valid_val A valid value for the parameter invalid_type_val A value with invalid type invalid_val A value which is invalid because of its value parameter min_value mandatory Returns ------- """ if required_args is None: required_args = {} def get_params(param, val): """If the parameter is not 'n_classes', we need to specify `n_classes`, since it's mandatory to create the class """ required_args[param] = val return required_args # If the parameter is mandatory, we check that an exception is raised # if not passed to the constructor if mandatory: with pytest.raises(TypeError) as exc_info: class_tested() assert exc_info.type is TypeError assert ( exc_info.value.args[0] == "__init__() missing 1 required " "positional argument: '%s'" % parameter ) if min_value is not None and min_value_strict is not None: raise ValueError( "You can't set both `min_value` and " "`min_value_strict` at the same time" ) clf = class_tested(**get_params(parameter, valid_val)) assert getattr(clf, parameter) == valid_val # If valid_val is valid, than valid_val + 1 is also valid setattr(clf, parameter, valid_val + 1) assert getattr(clf, parameter, valid_val + 1) with pytest.raises( ValueError, match="`%s` must be of type `%s`" % (parameter, fixed_type.__name__), ): setattr(clf, parameter, invalid_type_val) with pytest.raises( ValueError, match="`%s` must be of type `%s`" % (parameter, fixed_type.__name__), ): class_tested(**get_params(parameter, invalid_type_val)) if min_value is not None: with pytest.raises( ValueError, match="`%s` must be >= %s" % (parameter, min_value_str) ): setattr(clf, parameter, invalid_val) with pytest.raises( ValueError, match="`%s` must be >= %s" % (parameter, min_value_str) ): class_tested(**get_params(parameter, invalid_val)) if min_value_strict is not None: with pytest.raises( ValueError, match="`%s` must be > %s" % (parameter, min_value_str) ): setattr(clf, parameter, invalid_val) with pytest.raises( ValueError, match="`%s` must be > %s" % (parameter, min_value_str) ): class_tested(**get_params(parameter, invalid_val)) clf = class_tested(**get_params(parameter, valid_val)) # TODO: we should not need to change the dtype here X = np.random.randn(2, 2) y = np.array([0.0, 1.0]) clf.partial_fit(X, y) with pytest.raises( ValueError, match="You cannot modify `%s` " "after calling `partial_fit`" % parameter, ): setattr(clf, parameter, valid_val) def parameter_test_with_type( class_tested, parameter, valid_val, invalid_type_val, mandatory, fixed_type ): # TODO: code it pass
{"hexsha": "8df35c47565a43d57a5d13fd28554d9e22cf9076", "size": 4782, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/utils.py", "max_stars_repo_name": "LinLearn/linlearn", "max_stars_repo_head_hexsha": "de5752d47bbe8e2fb62d41b0dcf2526f87545e1c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-12T21:02:33.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-12T21:02:33.000Z", "max_issues_repo_path": "tests/utils.py", "max_issues_repo_name": "LinLearn/linlearn", "max_issues_repo_head_hexsha": "de5752d47bbe8e2fb62d41b0dcf2526f87545e1c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 40, "max_issues_repo_issues_event_min_datetime": "2019-12-11T17:36:19.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-27T11:20:56.000Z", "max_forks_repo_path": "tests/utils.py", "max_forks_repo_name": "LinLearn/linlearn", "max_forks_repo_head_hexsha": "de5752d47bbe8e2fb62d41b0dcf2526f87545e1c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-01-30T21:58:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-19T08:40:34.000Z", "avg_line_length": 27.0169491525, "max_line_length": 87, "alphanum_fraction": 0.6321622752, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1196}
import sys, os, time, uuid, random, multiprocessing, traceback PY2 = sys.version_info < (3,) PY3 = sys.version_info >= (3,) if PY2: import cPickle as pickle else: import pickle import numpy as np from striped.pythreader import PyThread, Primitive, synchronized from threading import Event import socket, traceback, random, time from striped.common import DXMessage, WorkerRequest, DataExchangeSocket, BulkDataSender def distribute_items(lst, n): N = len(lst) k = N % n m = (N-k)//n i = 0 out = [] for _ in range(k): out.append(lst[i:i+m+1]) i += m+1 for _ in range(n-k): out.append(lst[i:i+m]) i += m return out def distribute_items_modulo(lst, n): if n == 0: return [] lists = [] for _ in range(n): lists.append([]) for i in lst: lists[i%n].append(i) return lists class ParamsSender(multiprocessing.Process): def __init__(self, msg, dxsock): multiprocessing.Process.__init__(self) self.Msg = msg self.Sock = dxsock def run(self): self.Sock.send(self.Msg) class SocketWorkerInterface(PyThread): def __init__(self, contract, wid, nworkers, params, worker_address, worker_key, log): PyThread.__init__(self) self.WID = wid self.Contract = contract self.WorkerAddress = worker_address self.WorkerKey = worker_key self.Params = params self.TStart = None self.NEvents = 0 self.LastNEvents = 0 self.Buffer = "" self.Abort = False self.Log = log self.log("interface created with worker address %s" % (self.WorkerAddress,)) self.Created = time.time() def log(self, msg): #print "WorkerInterface %d: %s" % (self.WID, msg) if self.Log: self.Log("WorkerInterface %d: %s" % (self.WID, msg)) else: print(("WorkerInterface %d: %s" % (self.WID, msg))) def abort(self): self.Abort = True def run(self): try: self.Contract.waitForStart() tstart = time.time() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect(self.WorkerAddress) dxsock = DataExchangeSocket(sock) #print "SocketWorkerInterface: connected to %s" % (self.WorkerAddress,) #print "SocketWorkerInterface: params:" #for k, v in self.Params.__dict__.items(): # if not k.startswith("_"): # print k, v msg = self.Params.toDXMsg() signature, t, salt, alg = self.Params.generateSignature(self.WorkerKey) msg["worker_authenticator"] = "%s:%s:%s:%s" % (signature, t, salt, alg) dxsock.send(msg) self.log("Worker parameters sent. Time since created=%f" % (time.time() - self.Created,)) eof = False while not eof: #print "%s: readCunk..." % (self.WID,) if self.Abort: sock.close() break #print "DXMessage.fromSocket..." msg = dxsock.recv() if msg is None: eof = True self.log("EOF") else: self.log("message(%s)" % (msg.Type,)) if msg.Type == 'message': self.Contract.forward(self, msg) elif msg.Type == 'hist': self.Contract.forward(self, msg) elif msg.Type == 'stream': name = msg["name"] #format = msg["format"] #assert format == "pickle", "Unknown stream serialization format %s" % (format,) streams[name] = msg["data"] # do not unpickle yet ! elif msg.Type == 'data': self.log("data mesage: events_delta=%s" % (msg["events_delta"],)) self.Contract.dataReceived(self, msg["events_delta"], msg["data"]) elif msg.Type == 'events': n = msg["events_delta"] self.log("events delta: %s" % (n,)) self.Contract.eventsDelta(self, n) elif msg.Type == 'exception': #print "Contract: exception received" self.Contract.exceptionReceived(self, msg["info"]) elif msg.Type == 'data_load_failure': self.Contract.dataLoadFailureReceived(self, msg["rgid"]) except: self.log("run(): exception: %s" % (traceback.format_exc(),)) finally: self.Contract.workerExited(self, 0, time.time() - tstart) self.Contract = None # break circular dependencies class Contract(Primitive): def __init__(self, jid, data_server_url, bulk_transport_port, dataset, job_desc, workers, callback_delegate, log, T): #print "SocketGangContract: worker_text=%s" % (worker_text,) Primitive.__init__(self) #print "Contract: data_server_url=", data_server_url self.JID = jid self.BulkTransportPort = bulk_transport_port self.WorkerInterfaces = {} self.Params = {} self.DoneWorkers = {} self.CallbackDelegate = callback_delegate self.WorkerText = job_desc.WorkerText self.Workers = workers self.EventsProcessed = 0 self.Log = log self.T = T self.StartEvent = Event() self.UseDataCache = job_desc.UseDataCache self.TotalEvents = None self.SelectedEvents = None self.SelectedFrames = None self.BulkData = job_desc.BulkData self.BulkDataName = "job_%s.bulk" % (self.JID,) if self.BulkData is not None else None nworkers = len(workers) # for now with self.T["Contract.__init__"]: hdescriptors = {hid:h if isinstance(h, dict) else h.descriptor() for hid, h in job_desc.HDescriptors.items()} #print hdescriptors # assign rgids dataset.UseDataCache = self.UseDataCache self.NRGs, rgid_dist = self.distributeWork(nworkers, len(self.Workers), dataset, job_desc.FrameSelector, job_desc.Fraction) #print "Contract: work distribution: nworkers=%d, frames=%d" % (nworkers, self.NRGs) #print " ", rgid_dist self.NWorkers = min(nworkers, len(self.Workers)) if self.NRGs > 0: #print "rgids_dist:", rgids_dist worker_module_name = "wm_%s" % (uuid.uuid1(),) for iw in range(len(self.Workers)): rgid_list = rgid_dist[iw] #print iw, len(rgid_list) if len(rgid_list): with self.T["Contract.__init__/create_params"]: self.Params[iw] = WorkerRequest(self.JID, iw, data_server_url, dataset.Name, rgid_list, self.NWorkers, worker_module_name, job_desc.WorkerText, job_desc.HDescriptors, job_desc.UserParams, job_desc.UseDataCache, job_desc.DataModificationURL, job_desc.DataModificationToken, self.BulkDataName ) self.log("Contract created") def distributeWork(self, nworkers_job, nworkers_available, dataset, frame_selector, fraction): rgids_initial = sorted(dataset.rgids[:]) nworkers = min(nworkers_job, nworkers_available) workers = list(range(nworkers_available)) if nworkers < nworkers_available: rstate = random.getstate() seed = hash(dataset.Name) random.seed(seed) workers = random.sample(workers, nworkers) random.setstate(rstate) initial_distribution = distribute_items(rgids_initial, nworkers) rgids = set(rgids_initial) with self.T["Contract.__init__/distribute/rginfos"]: rginfos = dataset.rginfos(list(rgids)) self.TotalEvents = self.SelectedEvents = sum([i.NEvents for i in rginfos]) rginfo_dict = {i.RGID:i for i in rginfos} if frame_selector is not None: rgids = set() for rginfo in rginfos: if frame_selector.eval(rginfo.Profile): rgids.add(rginfo.RGID) if isinstance(fraction, float) and len(rgids): nrgids = float(len(rgids)) * fraction n = int(nrgids) if n < nrgids: n += 1 if n < len(rgids): rstate = random.getstate() seed = hash(dataset.Name) random.seed(seed) rgids = set(random.sample(rgids, n)) random.setstate(rstate) rgid_dist = [[][:] for _ in range(nworkers_available)] for iw, lst in enumerate(initial_distribution): iw_actual = workers[iw] for rgid in lst: if rgid in rgids: rgid_dist[iw_actual].append(rgid) self.SelectedFrames = sorted(list(rgids)) self.SelectedEvents = sum([rginfo_dict[rgid].NEvents for rgid in rgids]) #self.log("RG distributed: %s" % (rgid_dist,)) return len(rgids), rgid_dist def start(self): self.WorkerInterfaces = {} transport_client = None if self.BulkData is not None: addresses = sorted([wi.Addr[0] for wi in self.Workers]) #print "Contract.start(): bulk transfer addresses: %s" % (addresses,) random.shuffle(addresses) transport_client = BulkDataSender(self.BulkDataName, self.BulkData, self.BulkTransportPort, addresses) transport_client.start() for i, params in self.Params.items(): wi = self.Workers[i] w = SocketWorkerInterface(self, i, self.NWorkers, params, wi.Addr, wi.Key, self.Log) self.WorkerInterfaces[i] = w for w in self.WorkerInterfaces.values(): with self.T["WorkerInterface.start()"]: w.start() #time.sleep(0.001) if transport_client is not None: transport_client.wait() self.StartEvent.set() def waitForStart(self): self.StartEvent.wait() def log(self, msg): if self.Log: self.Log("Contract: %s" % (msg,)) else: print(("Contract: %s" % (msg,))) """ @synchronized def nevents(self): n = sum(w.NEvents for w in self.WorkerInterfaces.values()) + sum(w.NEvents for w in self.DoneWorkers.values()) #print "sum of workers:", n return n """ """ @synchronized def updateReceived(self, worker, hists, streams, nevents_delta): self.CallbackDelegate.updateReceived(worker.WID, hists, streams, nevents_delta) """ @synchronized def forward(self, worker, msg, add_wid=True): if add_wid: msg(wid=worker.WID) self.CallbackDelegate.forward(msg) @synchronized def eventsDelta(self, worker, events_delta): self.CallbackDelegate.eventsDelta(worker.WID, events_delta) @synchronized def dataReceived(self, worker, events_delta, data): self.CallbackDelegate.dataReceived(worker.WID, events_delta, data) @synchronized def exceptionReceived(self, worker, info): self.CallbackDelegate.exceptionReceived(worker.WID, info) @synchronized def messageReceived(self, worker, nevents, message): self.CallbackDelegate.messageReceived(worker.WID, message) @synchronized def dataLoadFailureReceived(self, worker, rgid): self.CallbackDelegate.dataLoadFailureReceived(worker.WID, rgid) @synchronized def workerExited(self, worker, status, t): del self.WorkerInterfaces[worker.WID] self.DoneWorkers[worker.WID] = worker self.CallbackDelegate.workerExited(worker.WID, status, t, worker.NEvents, self.nrunning()) self.log("workerExited(%s, %s). %d still running: %s" % (worker.WID, status, self.nrunning(), ','.join(["%s:%s" % wi.WorkerAddress for i, wi in sorted(self.WorkerInterfaces.items())]) )) if self.nrunning() <= 0: self.wakeup() def nrunning(self): return len(self.WorkerInterfaces) @synchronized def wait(self, timeout=None): t0 = time.time() while self.nrunning() > 0 and (timeout is None or time.time() - t0 < timeout): #print "Contract.wait: nrunning = ", self.nrunning() with self.T["Contract.wait()/loop"]: dt = None if timeout is None else max(0.0, t0+timeout - time.time()) if dt is None: dt = 1.0 #print "Contract.wait: await(%s)" % (dt, ) self.sleep(dt) @synchronized def abort(self): for w in self.WorkerInterfaces.values(): w.abort()
{"hexsha": "1b9195adaef90c73152400dfe856ba9927b3e41e", "size": 13561, "ext": "py", "lang": "Python", "max_stars_repo_path": "job_server/Contract.py", "max_stars_repo_name": "ivmfnal/striped", "max_stars_repo_head_hexsha": "eef1a4d544fa1b97fde39d7ee5ef779071218891", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-01T15:19:43.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-01T15:19:43.000Z", "max_issues_repo_path": "job_server/Contract.py", "max_issues_repo_name": "ivmfnal/striped", "max_issues_repo_head_hexsha": "eef1a4d544fa1b97fde39d7ee5ef779071218891", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "job_server/Contract.py", "max_forks_repo_name": "ivmfnal/striped", "max_forks_repo_head_hexsha": "eef1a4d544fa1b97fde39d7ee5ef779071218891", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-21T21:18:01.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-21T21:18:01.000Z", "avg_line_length": 37.0519125683, "max_line_length": 147, "alphanum_fraction": 0.5507705921, "include": true, "reason": "import numpy", "num_tokens": 3014}
function varargout = scl_slope(varargin) % file_array's scl_slope property % For getting the value % dat = scl_slope(obj) % % For setting the value % obj = scl_slope(obj,dat) %__________________________________________________________________________ % Copyright (C) 2005-2017 Wellcome Trust Centre for Neuroimaging % % $Id: scl_slope.m 7147 2017-08-03 14:07:01Z spm $ if nargin==2 varargout{1} = asgn(varargin{:}); elseif nargin==1 varargout{1} = ref(varargin{:}); else error('Wrong number of arguments.'); end %========================================================================== % function dat = ref(obj) %========================================================================== function dat = ref(obj) dat = obj.scl_slope; %========================================================================== % function obj = asgn(obj,dat) %========================================================================== function obj = asgn(obj,dat) if isnumeric(dat) % && numel(dat)<=1, obj.scl_slope = double(dat); else error('"scl_slope" must be numeric.'); end
{"author": "spm", "repo": "spm12", "sha": "3085dac00ac804adb190a7e82c6ef11866c8af02", "save_path": "github-repos/MATLAB/spm-spm12", "path": "github-repos/MATLAB/spm-spm12/spm12-3085dac00ac804adb190a7e82c6ef11866c8af02/@file_array/private/scl_slope.m"}
program sesh ! SESH MAIN PROGRAM ! ! MANUAL: F.H. FROEHNER, ! "SESH - A FORTRAN IV CODE FOR CALCULATING THE SELF- ! SHIELDING AND MULTIPLE SCATTERING EFFECTS FOR ! NEUTRON CROSS SECTION DATA INTERPRETATION ! IN THE UNRESOLVED RESONANCE REGION", ! REPORT GA-8380 (1968) ! ! GILBERT-CAMERON COMPOSITE LEVEL DENSITY AND GIANT DIPOLE RESONANCE ! MODEL ARE USED FOR THE ENERGY DEPENDENCE OF LEVEL SPACINGS AND ! RADIATION WIDTHS IN VERSION 1975. THE INPUT REMAINS AS DESCRIBED ! IN GA-8380 WITH TWO EXCEPTIONS: (1) THE NUCLEAR TEMPERATURE IS ! REPLACED BY THE GILBERT-CAMERON PAIRING ENERGY OF THE COMPOUND ! NUCLEUS, (2) ONLY THE S-WAVE LEVEL SPACING MUST BE GIVEN (SPACING ! INPUT FOR L>0 IS IGNORED). FURTHERMORE, CHANNEL RADIUS AND ! EFFECTIVE NUCLEAR RADIUS ARE DISTINCT FOR ALL PARTIAL WAVES: ! THE CHANNEL RADIUS IS TAKEN AS chan_rad(I)=(1.23*A**(1/3)+0.80) FM, ! THE EFFECTIVE NUCLEAR RADII R(L,I) ARE INPUT NUMBERS WITH THE ! DEFAULT VALUES R(L,I)=chan_rad(I). ! ! INPUT LIMITATIONS: ! UP TO 10 ISOTOPES ! " " 4 PARTIAL WAVES ! " " 5 SAMPLE GEOMETRIES ! " " 24 NEUTRON ENERGIES ! " " 100000 MONTE CARLO HISTORIES PER ENERGY use mc_routines implicit none ! real(8), allocatable, dimension(:) :: COMM,ZL,AP,UM,A,SC,ST,abundance,BE,PE,eff_temp, & spin,AA,chan_rad,XN,E,SG,SP,R_in,R_outer,ZH,R_beam integer, allocatable, dimension(:) :: JX2,JN2,JMX,NL real(8), allocatable, dimension(:,:) :: SGI,GG,D,S,SI,R,det_effic,SUMGJ,STI,SPI integer, allocatable, dimension(:,:) :: J2X,J2N real(8), allocatable, dimension(:,:,:) :: SGL,G,GNR,GNRIN,DJL,STL,SPL integer, allocatable, dimension(:,:,:) :: LJX,LJN real(8) :: AI,AK,CC,DPO,DPS,DSGM,DSSCF,DSTM,DTAU,DTMC,DUMMY,EDD,EDGG,EJL, & ETA,FJ,GN,GNIN,PL,PO,PS,PSI0,QI,RK,S3,SGM,SNC,SQ,SSCF,STM,SUM, & TAU,TMC,U,V,VARJ2,VL,X2I,X2J,XNSS,XO,XX integer :: I,I2,IHIST,ITYPE,ITYPO,IY,J,J2,J2MN,J2MX,JX,K,KQ,KZ,L,LL,LX,M, & M4,MJ,N,NE,NI,NN,NQ,numResPairs ! ------------------------------------------------------------------------------ CHARACTER (LEN=100) :: inp_file_name CHARACTER (LEN=100) :: out_file_name CHARACTER (LEN=100) :: results_file_name CHARACTER (LEN=100) :: cor_file_name ! --- dimension variables now ------- allocate(COMM(18),ZL(4),SGI(10,100),SGL(10,100,4),AP(10),UM(10),A(11), & SC(100),ST(100),abundance(11),BE(11),PE(11),eff_temp(11),spin(11),NL(10), & AA(10),chan_rad(10),GG(5,11),D(5,11),S(5,11),SI(5,11),R(5,11),det_effic(5,11), & J2X(4,10),J2N(4,10),SUMGJ(4,10),XN(6),E(100),SG(100),SP(100), & G(8,4,10),GNR(8,4,10),GNRIN(8,4,10),DJL(8,4,10),R_in(6),R_outer(6), & ZH(100),LJX(2,8,10),LJN(2,8,10),JX2(10),JN2(10),JMX(10),R_beam(6), & STI(10,100),SPI(10,100),STL(10,100,4),SPL(10,100,4)) ! ----------------------------------- print *, "Input file name?" read(*,*) inp_file_name print *, "Output file name?" read(*,*) out_file_name print *, "Analytical results file name?" read(*,*) results_file_name print *, "Correction file name?" read(*,*) cor_file_name ! ------------------------------------------------------------------------------ open (unit=5, file=inp_file_name, status='old') ! -- Input file open (unit=8, file=out_file_name, status='unknown') ! -- Main output file open (unit=11, file=results_file_name,status='unknown') ! -- Formatted partial wave cross section output (analytical calc.)) open (unit=12, file=cor_file_name, status='unknown') ! -- Formatted self-shielding correction factor output call random_seed ! ! READ INPUT ! IHIST IS A TAG FOR HISTOGRAMS OF ST, SG, T, F0; ! IHIST=1 MEANS HISTOGRAM VALUES ARE CALCULATED AND PRINTED ! IHIST=0 MEANS NO SUCH VALUES ARE CALCULATED OR PRINTED ! CALL ERRSET(217,0 ,-1,1,0,100) ! CALL ERRSET(208,300,-1,0,1,208) IY=4751 1 READ(5,100,END=999)COMM,IHIST 100 FORMAT(18A4,1I8) SUM=0. I=0 I=I+1 ! I-TH ISOTOPE (MAXIMUM OF 10 ISOTOPES): ! NUCLEON NUMBER A(I), ABUNDANCE abundance(I), BINDING ENERGY BE(I) [MeV], PAIRING ! ENERGY PE(I) [MeV], EFFECTIVE SAMPLE TEMPERATURE eff_temp(I) [DEG. K], ! TARGET SPIN QUANTUM NUMBER spin(I); READ(5,101)A(I),abundance(I),BE(I),PE(I),eff_temp(I),spin(I) 101 FORMAT(6E10.5) L=0 3 L=L+1 ! L-TH PARTIAL WAVE (UP TO F-WAVE): ! RADIATION WIDTH GG(L,I) [eV], AVERAGE LEVEL SPACING D(L,I) [eV], STRENGTH ! FUNCTION FOR ELASTIC SCATTERING S(L,I), STRENGTH FUNCTION FOR ! INELASTIC SCATTERING SI(L,I), NUCLEAR RADIUS R(L,I) [fm], DETECTION ! EFFICIENCY READ(5,102)GG(L,I),D(L,I),S(L,I),SI (L,I),R(L,I),det_effic(L,I) 102 FORMAT(6E10.5) ! CHECK FOR SAMPLE THICKNESS CARD (FIRST WORD ZERO) IF(GG(L,I).EQ.0.0)GO TO 4 ! CHECK FOR LAST PARTIAL WAVE IF(S(L,I).LT.1.)GO TO 3 ! LAST CARD WAS ISOTOPE CARD. STORE CORRESPONDINGLY NL(I)=L-1 I=I+1 A(I)=GG(L,I-1) abundance(I)=D(L,I-1) BE(I)=S(L,I-1) PE(I)=SI(L,I-1) eff_temp(I)=R(L,I-1) spin(I)=det_effic(L,I-1) GG(L,I-1)=0. D(L,I-1) =0. S(L,I-1) =0. SI(L,I-1)=0. R(L,I-1) =0. det_effic(L,I-1)=0. L=0 GO TO 3 ! LAST CARD WAS SAMPLE THICKNESS CARD (NUCLEI/B), ! STORE CORRESPONDINGLY. 4 NI=I NL(I)=L-1 XN(1)=D(L,I) XN(2)=S(L,I) XN(3)=SI(L,I) XN(4)=R(L,I) XN(5)=det_effic(L,I) XN(6)=0. ! FIND NUMBER OF SAMPLE THICKNESSES ! FOR TRANSMISSION AND CAPTURE DATA DO 5 N=2,6 IF(XN(N).NE.0.)GO TO 5 NN=N-1 GO TO 6 5 CONTINUE ! READ OUTER RADII (NUCLEI/BARN) 6 READ(5,103)(R_outer(N),N=1,5) R_outer(6)=0. IF(NN.GT.1)GO TO 8 ! FIND NUMBER OF SHELL THICKNESSES FOR SHELL TRANSMISSION ! DATA DO 7 N=2,6 IF(R_outer(N).NE.0.)GO TO 7 NN=N-1 GO TO 8 7 CONTINUE ! READ INNER RADII (NUCLEI/BARN) FOR SHELL TRANSMISSION ! CALCULATIONS. R_in(N)=0. MEANS CYLINDRICAL SAMPLE 8 READ(5,103)(R_in(N),N=1,5) 103 FORMAT(E20.5,4E10.5) ! BJM Modification 10/20/2015-- READ BEAM RADIUS (NUCLEI/BARN) ! FOR CAPTURE SAMPLES R_beam(N)=0.0 MEANS ! BEAM RADIUS >= SAMPLE RADIUS ! 88 READ(5,103)(R_beam(N),N=1,5) ! READ NUMBER OF RESONANCE PAIRS READ(5,104)numResPairs 104 FORMAT(I10) DO 9 M=1,25 M4=(M-1)*4 ! READ ENERGIES (KEV) AND NUMBERS OF MONTE CARLO HISTORIES READ(5,105)E(M4+1),ZH(M4+1),E(M4+2),ZH(M4+2) & ,E(M4+3),ZH(M4+3),E(M4+4),ZH(M4+4) 105 FORMAT(8E10.5) ! BLANK CARD SIGNALS END OF INPUT IF(E(M4+1).EQ.0.)GO TO 10 9 CONTINUE ! FIND NUMBER OF ENERGIES, NE 10 DO 11 J=1,4 MJ=M4-4+J IF(E(MJ).GT.0.)NE=MJ 11 CONTINUE ! WRITE INPUT HEADING WRITE(8,107) 107 FORMAT( & ' I N P U T '/ & ' ========= '// & ' NUCLEON ABUN- BINDING PAIRING EFF. NUCL. ', & 'ORB.ANG. AV. RAD. AV. LEVEL STRENGTH STRENGTH', & ' NUCLEAR EFFI- '/ & ' NUMBER DANCE ENERGY ENERGY TEMP. SPIN ', & ' MOM. WIDTH SPACING FCT. FOR FCT. FOR', & ' RADIUS CIENCY'/ & ' (MEV) (MEV) (DEG.K) Q.NO. ', & ' Q.NO. (EV) (EV) EL. SCATT. INEL. SC', & '. (FM) '/) ! ------------- End of file reading -------------------------------------------- ! PREPARE ENERGY-INDEPENDENT PARAMETERS AND PRINT INPUT DO 16 I=1,NI ! CHANNEL RADIUS chan_rad(I)=1.23*A(I)**(1./3.)+0.8 ! GILBERT-CAMERON MATCHING ENERGY AI=FLOAT(INT(A(I)+1.5)) ! write (6,*)'a= ',a(i),ai UM(I)=2.5+150./AI ! FIND FERMI GAS MODEL A-PARAMETER FROM S-WAVE SPACING QI=spin(I)+.5 U=BE(I)-PE(I) CC=LOG(0.2367E6*AI*U/(D(1,I)*QI)) XO=CC DO 12 M=1,12 VARJ2=0.1460*XO*AI**(2./3.) FJ=.5*(EXP(-(QI-1.)/VARJ2)-EXP(-(QI+1.)/VARJ2))*VARJ2/QI XX=CC-LOG(FJ)+2.*LOG(XO) IF(ABS(XX-XO).LT.1.E-6)GO TO 13 XO=XX 12 CONTINUE 13 AP(I)=XX**2/(4.*U) ! DETERMINE MINIMUM AND MAXIMUM COMPOUND SPIN POSSIBLE X2I=2.*spin(I) I2=int(X2I+.01) LX=NL(I) DO 15 L=1,LX J2X(L,I)=I2+2*L-1 J2N(L,I)=1 IF(I2.GT.2*L-2)J2N(L,I)=I2-2*L+1 IF(I2.LT.2*L-2)J2N(L,I)=2*L-I2-3 ! CALCULATE SUM OF SPIN FACTORS SUMGJ(L,I)=FLOAT((J2X(L,I)+J2N(L,I)+2)*(J2X(L,I)-J2N(L,I)+2))/8./(X2I+1.) ! LEVEL DENSITY FOR GIVEN L IF(L.GT.1)D(L,I)=D(1,I)/SUMGJ(L,I) ! IF(L.GT.1)D(L,I)=D(L,I)-- modified by BJM 2/5/2015 to override auto-calculation of level spacings ! and accept user input ! -- End BJM J=0 J2MN=J2N(L,I) J2MX=J2X(L,I) DO 14 J2=J2MN,J2MX,2 J=J+1 ! CALCULATE SPIN FACTOR X2J=FLOAT(J2) G(J,L,I)=(X2J+1.)/2./(X2I+1.) ! LEVEL DENSITY FOR GIVEN L AND J DJL(J,L,I)=D(1,I)/G(J,L,I) ! DJL(J,L,I)=D(L,I)/G(J,L,I)-- modified by BJM 2/5/2015 to override auto-calculation of level spacings ! and accept user input ! PRINT*, DJL(J,L,I) ! -- End BJM ZL(L)=FLOAT(L-1) 14 CONTINUE IF(R(L,I).EQ.0.)R(L,I)=chan_rad(I) 15 CONTINUE WRITE(8,108)A(I),abundance(I),BE(I),PE(I),eff_temp(I),spin(I), & (ZL(L),GG(L,I),D(L,I),S(L,I),SI (L,I),R(L,I),det_effic(L,I),L=1,LX) 108 FORMAT(F6.1,F11.4,F8.3,F9.3,2F8.1,F7.0,1PE16.4,1P,4E12.4,0PF9.4/(50X,0PF7.0,1PE16.4,1P,4E12.4,0PF9.4)) 16 CONTINUE ! FIRST PART ! ANALYTICAL CROSS SECTION CALCULATION FOR ALL ENERGIES ! BEGIN K-LOOP (ENERGIES) ! PRINT*, "Running analytical calculations..." DO 21 K=1,NE SG(K)=0. SC(K)=0. SP(K)=0. ST(K)=0. ! BEGIN ISOTOPE LOOP DO 20 I=1,NI SGI(I,K)=0. STI(I,K)=0. SPI(I,K)=0. do L=1,4 SGL(I,K,L)=0. end do AA(I)=(1.+1./A(I))**2 ! GET ENERGY DEPENDENCE FACTORS FOR LEVEL SPACINGS AND ! RADIATION WIDTHS ! CALL ENDEP(E(K),A(I),BE(I),PE(I),AP(I),UM(I),EDGG,EDD) ! -- Modified by BJM on 2/6/2015 to remove energy dependence ! EDD=1 ! EDGG=1 ! -- End BJM ! BEGIN LOOP OF PARTIAL WAVES LX=NL(I) DO 19 L=1,LX ! GET PENETRABILITIES, SHIFTS, HARD SPHERE PHASE SHIFTS AK=chan_rad(I)*SQRT(E(K)/AA(I))/143.92 ! AK = radius*sqrt(Energy/A_mass)/H_bar ! CALL PEPS(AK,L,DUMMY,VL) ! RK=AK*R(L,I)/chan_rad(I) ! CALL PEPS(RK,L,PL,DUMMY) ! ! CALCULATE SUM OVER ALL POSSIBLE COMPOUND SPINS S3=0. J=0 J2MN=J2N(L,I) J2MX=J2X(L,I) DO 18 J2=J2MN,J2MX,2 J=J+1 ! FIND NUMBER OF POSSIBLE CHANNEL SPINS FOR GIVEN J AND L EJL=1. IF(J2.LT.J2X(L,I).AND.J2.GT.J2N(L,I).OR.2*L.EQ.I2+2.AND.L.NE.1.0.AND.J2.EQ.J2N(L,I)) EJL=2. ! REDUCED WIDTHS FOR ELASTIC AND INELASTIC SCATTERING GNR(J,L,I) =S(L,I) *EJL*DJL(J,L,I) GNRIN(J,L,I)=SI(L,I)*EJL*DJL(J,L,I) ! NEUTRON WIDTHS FOR ELASTIC AND INELASTIC SCATTERING SQ=SQRT(E(K)*1000.)*VL GN =GNR (J,L,I)*SQ GNIN=GNRIN(J,L,I)*SQ ! FIND PSI-FUNCTION GIVING PORTER-THOMAS AVERAGE ETA=SQRT((GG(L,I)*EDGG+GNIN)/(2.*GN*EDD)) ! CALL PFCN(0.0d0,ETA,U,V,KZ) ! PSI0=1.77245 *ETA*U ! FORM J-SUM (COMPOUND SPINS) S3=S3+G(J,L,I)**2*(1.-PSI0) 18 CONTINUE ! CONTRIBUTION TO EFFECTIVE CAPTURE CROSS SECTION SGL(I,K,L)=4.09E3*AA(I)*abundance(I)*GG(L,I)*EDGG*S3/E(K)/D(L,I)/EDD/SUMGJ(L,I)*det_effic(L,I) ! PURE CAPTURE CROSS SECTION SC(K)=SC(K)+SGL(I,K,L)/det_effic(L,I) ! CONTRIBUTION TO POTENTIAL SCATTERING CROSS SECTION SPL(I,K,L)=2.605E3/E(K)*AA(I)*abundance(I)*(2.*FLOAT(L)-1.)*SIN(PL)**2 ! CONTRIBUTION TO TOTAL CROSS SECTION STL(I,K,L)=SPL(I,K,L)+4.09E6/SQRT(1000.*E(K))*AA(I)*abundance(I)*S(L,I)*(2.*FLOAT(L)-1.)*VL*COS(2.*PL) ! FORM L-SUMS (PARTIAL WAVES) SGI(I,K)=SGI(I,K)+SGL(I,K,L) SPI(I,K)=SPI(I,K)+SPL(I,K,L) STI(I,K)=STI(I,K)+STL(I,K,L) 19 CONTINUE ! FORM I-SUMS (ISOTOPES) ! AVERAGE EFFECTIVE CAPTURE CROSS SECTION, K-TH ENERGY SG(K)=SG(K)+SGI(I,K) ! AVERAGE TOTAL CROSS SECTION, K-TH ENERGY ST(K)=ST(K)+STI(I,K) ! POTENTIAL SCATTERING CROSS SECTION, K-TH ENERGY SP(K)=SP(K)+SPI(I,K) 20 CONTINUE 21 CONTINUE WRITE(8,109) 109 FORMAT(//,//, & ' A N A L Y T I C A L R E S U L T S '/ & ' =================================== '// & ' AVERAGE CROSS SECTIONS ', & 'ISOTOPIC CONTRIBUTIONS PARTIAL-WAVE', & ' CONTRIBUTIONS '// & ' NEUTRON TOTAL POTENTIAL PURE EFFECTIVE ', & 'NUCLEON TOTAL POTENTIAL EFFECTIVE L TOTAL ', & ' POTENTIAL EFFECTIVE'/ & ' ENERGY SCATTERING CAPTURE CAPTURE ', & 'NUMBER SCATTERING CAPTURE ', & ' SCATTERING CAPTURE'/ & ' (KEV) (BARN) (BARN) (BARN) (BARN) ', & ' (BARN) (BARN) (BARN) (BARN) ', & ' (BARN) (BARN) '/) ! Loop over energies DO 25 K=1,NE WRITE(8,110)E(K),ST(K),SP(K),SC(K),SG(K),A(1),STI(1,K),SPI(1,K),SGI(1,K),STL(1,K,1),SPL(1,K,1),SGL(1,K,1) 110 FORMAT(/,0PF7.1,1P,4E11.3,0PF7.0,1P,3E11.3,4H 0,1P,3E11.3) ! Loop over isotopes DO 24 I=1,NI WRITE(11, 212) E(K),STL(I,K,1),SPL(I,K,1),SGL(I,K,1), & STL(I,K,2),SPL(I,K,2),SGL(I,K,2),STL(I,K,3),SPL(I,K,3), & SGL(I,K,3) IF(I.EQ.1)GO TO 22 WRITE(8,111) A(I),STI(I,K),SPI(I,K),SGI(I,K),STL(I,K,1),SPL(I,K,1),SGL(I,K,1) 111 FORMAT(/,051X ,0PF7.0,1P,3E11.3,4H 0,1P,3E11.3) 22 IF(NL(I).EQ.1)GO TO 24 LX=NL(I) ! Loop over L-values DO 23 L=2,LX LL=L-1 WRITE(8,112)LL,STL(I,K,L),SPL(I,K,L),SGL(I,K,L) 112 FORMAT(94X,I1,1P,3E11.3) 212 FORMAT(0PF7.1,9E11.3) 23 CONTINUE 24 CONTINUE 25 CONTINUE ! END OF ANALYTICAL CALCULATION ! print *, "Done!" ! IF(ZH(1).LT.1.)GO TO 1 ! SECOND PART ! MONTE CARLO CALCULATION OF CROSS SECTIONS AND OF ! PROBABILITY FOR DETECTED CAPTURE ! print *, "Beginning Monte Carlo simulations..." ! IF(XN(1).GT.0.0.AND.R_outer(1).GT.0.0.AND.R_in(1).EQ.0.0)ITYPO=1 ! -- Circular capture sample geometry IF(XN(1).EQ.0.0.AND.R_outer(1).GT.0.0.AND.R_in(1).GT.0.0)ITYPO=2 ! -- Spherical shell geometry IF(XN(1).GT.0.0.AND.R_outer(1).EQ.0.0.AND.R_in(1).EQ.0.0)ITYPO=3 ! -- Transmission geometry IF(XN(1).GT.0.0.AND.R_outer(1).GT.0.0.AND.R_in(1).GT.0.0)ITYPO=4 ! -- Self-indication geometry IF(ITYPO.EQ.1)WRITE(8,113) IF(ITYPO.EQ.2)WRITE(8,114) IF(ITYPO.EQ.3)WRITE(8,115) IF(ITYPO.EQ.4)WRITE(8,116) 113 FORMAT(//,//, & ' M O N T E C A R L O R E S U L T S '/ & ' ===================================== '// & ' CAPTURE DATA, CIRCULAR DISC SAMPLE '// & ' SAMPLE NEUTRON AV. EFF. AVERAGE FIRST-COLL. P', & 'ROB. FOR SELF-SHIELD. AVERAGE NUMBER OF NUMBER', & ' OF SAMPLE AV. '/ & ' THICK- ENERGY CAPTURE TOTAL CAPTURE C', & 'APTURE CORRECTION NUMBER OF HISTORIES RESONA', & 'NCE RADIUS TRANS-'/ & ' NESS CROSS CROSS PROBAB./ A', & 'FTER 1 FACTOR/ COLLISIONS PAIRS ', & ' MISS./'/ & ' SECTION/ SECTION/ UNCERT. C', & 'OLLISION/ UNCERT. ', & ' UNCERT.'/ & ' UNCERT. UNCERT. U', & 'NCERT. '/ & ' (NUC./B) (KEV) (B)/(B) (B)/(B) ', & ' /(PERCENT) ', & ' (NUC./B)' /) 114 FORMAT(//,//, & ' M O N T E C A R L O R E S U L T S '/ & ' ===================================== '// & ' CAPTURE DATA, SPHERICAL SHELL'// & ' SHELL NEUTRON AV. EFF. AVERAGE FIRST-COLL. P', & 'ROB. FOR SHELL AVERAGE NUMBER OF NUMBER', & ' OF SAMPLE AV. '/ & ' THICK- ENERGY CAPTURE TOTAL CAPTURE C', & 'APTURE TRANS- NUMBER OF HISTORIES RESONA', & 'NCE RADIUS TRANS- '/ & ' NESS CROSS CROSS PROBAB./ A', & 'FTER 1 MISSION/ COLLISIONS PAIRS ', & ' MISS./ '/ & ' SECTION/ SECTION/ UNCERT. C', & 'OLLISION/ UNCERT. ', & ' UNCERT.'/ & ' UNCERT. UNCERT. U', & 'NCERT. '/ & ' (NUC./B) (KEV) (B)/(B) (B)/(B) ', & ' (NUC./B)' /) 115 FORMAT(//,//, & ' M O N T E C A R L O R E S U L T S '/ & ' ===================================== '// & ' TRANSMISSION DATA '// & ' SAMPLE NEUTRON AV. EFF. AVERAGE AVERAGE SELF', & '-SHIELD. NUMBER OF NUMBER OF '/ & ' THICK- ENERGY CAPTURE TOTAL TRANS. CORR', & 'ECTION HISTORIES RESONANCE '/ & ' NESS CROSS CROSS MISSION/ FACT', & 'OR/ PAIRS '/ & ' SECTION/ SECTION/ UNCERT. UNCE', & 'RT. '/ & ' UNCERT. UNCERT. ', & ' '/ & ' (NUC./B) (KEV) (B)/(B) (B)/(B) /(', & 'PERCENT) '/) 116 FORMAT(//,//, & ' M O N T E C A R L O R E S U L T S '/ & ' ===================================== '// & ' SELF-INDICATION DATA, DISC SAMPLES '// & ' CAPTURE NEUTRON AV. EFF. AVERAGE FIRST-COLL. P', & 'ROB. FOR SELF-SHIELD. AVERAGE NUMBER OF NUMBER', & ' OF SAMPLE FIRST '/ & ' SAMPLE ENERGY CAPTURE TOTAL CAPTURE C', & 'APTURE CORRECTION NUMBER OF HISTORIES RESONA', & 'NCE RADIUS SAMPLE'/ & ' THICK. CROSS CROSS PROBAB./ A', & 'FTER 1 FACTOR/ COLLISIONS PAIRS ', & ' THICK.'/ & ' SECTION/ SECTION/ UNCERT. C', & 'OLLISION/ UNCERT. '/ & ' UNCERT. UNCERT. U', & 'NCERT. '/ & ' (NUC./B) (KEV) (B)/(B) (B)/(B) ', & ' /(PERCENT) ', & ' (NUC./B) (NUC./B)'/) ! DETERMINATION OF EXTREME VALUES OF L FOR EACH PARITY AND J DO 36 I=1,NI LX=NL(I) ! TWICE EXTREME COMPOUND SPIN QUANTUM NUMBERS I2=int(2.*spin(I) + 0.01) JX2(I)=I2+2*LX-1 JN2(I)=I2-2*LX+1 IF(I2.GT.6)GO TO 26 IF(MOD(I2,2).EQ.0)JN2(I)=1 IF(MOD(I2,2).EQ.1)JN2(I)=0 ! JMX(I): NUMBER OF POSSIBLE J VALUES 26 JMX(I)=(JX2(I)-JN2(I))/2+1 JX=JMX(I) ! J: COUNTER FOR COMPOUND SPIN IN ASCENDING ORDER DO 35 J=1,JX J2=JN2(I)+2*J-2 ! M: PARITY LABEL, M=1 FOR PARITY OF TARGET GROUND STATE M=1 ! EXTREMAL L VALUES FOR GIVEN PARITY, J, AND ISOTOPE 27 IF(LX.GT.M+1)GO TO 30 IF(J2N(M,I).LE.J2.AND.J2X(M,I).GE.J2)GO TO 29 28 LJX(M,J,I)=-1 LJN(M,J,I)=-1 GO TO 34 29 LJX(M,J,I)=M LJN(M,J,I)=M GO TO 34 30 IF(J2N(M ,I).LE.J2.AND.J2X(M ,I).GE.J2)GO TO 31 IF(J2N(M+2,I).LE.J2.AND.J2X(M+2,I).GE.J2)GO TO 33 GO TO 28 31 IF(J2N(M+2,I).LE.J2.AND.J2X(M+2,I).GE.J2)GO TO 32 GO TO 29 32 LJX(M,J,I)=M+2 LJN(M,J,I)=M GO TO 34 33 LJX(M,J,I)=M+2 LJN(M,J,I)=M+2 34 IF(M.GE.2)GO TO 35 M=M+1 IF(M.LE.LX)GO TO 27 LJX(M,J,I)=-1 LJN(M,J,I)=-1 35 CONTINUE 36 CONTINUE ! BEGIN N-LOOP (SAMPLE THICKNESSES) 37 DO 46 NQ=1,NN N=NQ ! BEGIN K-LOOP (ENERGIES) DO 45 KQ=1,NE PRINT*, "Running energy bin ", KQ, " of ", NE K=KQ IF(ZH(KQ).EQ.0.)GO TO 45 IF(ZH(KQ).GT.0..AND.ZH(KQ).LT.100.)ZH(KQ)=100. IF(XN(N).GT.0.0 .AND. R_outer(N).NE.0.0 .AND. R_in(N).EQ.0.0)ITYPE=1 IF(XN(N).EQ.0.0 .AND. R_outer(N).NE.0.0 .AND. R_in(N).NE.0.0)ITYPE=2 IF(XN(N).GT.0.0 .AND. R_outer(N).EQ.0.0 .AND. R_in(N).EQ.0.0)ITYPE=3 IF(XN(N).GT.0.0 .AND. R_outer(N).NE.0.0 .AND. R_in(N).NE.0.0)ITYPE=4 IF(ITYPE.EQ.ITYPO)GO TO 38 IF(ITYPE.EQ.1)WRITE(8,113) IF(ITYPE.EQ.2)WRITE(8,114) IF(ITYPE.EQ.3)WRITE(8,115) IF(ITYPE.EQ.4)WRITE(8,116) ITYPO=ITYPE 38 CONTINUE GO TO (39,40,42,43),ITYPE ! 39 xx and 43 off to shut off multiple scattering (39, 40, 42, 43) ! CYLINDRICAL SAMPLE CAPTURE 39 CALL MUSC(AP,UM,A,abundance,BE,PE,eff_temp,NL,AA,chan_rad,GG, & R,det_effic,J2X,J2N,XN,E,SG,SP,G,GNR,GNRIN,DJL,SSCF,DSSCF, & N,K,I,L,J,NI,LX,R_in,R_outer,ZH,PO,PS,DPO,DPS,numResPairs,SGM,STM,DSGM,DSTM, & TMC,DTMC,SNC,ITYPE,IHIST,LJX,LJN,JN2,JMX, R_beam) ! GO TO 41 ! ! SPHERICAL SHELL CAPTURE 40 CALL MUSS(AP,UM,A,abundance,BE,PE,eff_temp,NL,AA,chan_rad,GG, & R,det_effic,J2X,J2N,E,SG,SP,G,GNR,GNRIN,DJL,SSCF,DSSCF, & N,K,I,L,J,NI,LX,R_in,R_outer,ZH,PO,PS,DPO,DPS,numResPairs,SGM,STM,DSGM,DSTM, & TMC,DTMC,SNC,XNSS,ITYPE,IHIST,LJX,LJN,JN2,JMX) ! XN(N)=R_outer(N)-R_in(N) 41 IF(K.EQ.1) & WRITE(8,117)XN(N),E(K),SGM,STM,PO,PS,SSCF,SNC, ZH(K),float(numResPairs),R_outer(N), & TMC ,DSGM,DSTM,DPO,DPS,DSSCF,DTMC IF(K.GT.1) & WRITE(8,118) E(K),SGM,STM,PO,PS,SSCF,SNC, ZH(K),float(numResPairs),R_outer(N), & TMC ,DSGM,DSTM,DPO,DPS,DSSCF,DTMC WRITE(12,312)E(K),STM,DSTM,SGM,DSGM,SSCF,DSSCF GO TO 44 ! ! TRANSMISSION 42 CALL MOCT(AP,UM,A,abundance,BE,PE,eff_temp,NL,AA,chan_rad,GG, & R,det_effic,J2X,J2N,XN,E,SG,SP,G,GNR,GNRIN,DJL, & N,K,I,L,J,NI,LX,ZH,numResPairs,SGM,STM,DSGM,DSTM, & TMC,DTMC,TAU,DTAU,IHIST,LJX,LJN,JN2,JMX) ! IF(K.EQ.1) & WRITE(8,119)XN(N),E(K),SGM,STM,TMC,TAU,ZH(K),float(numResPairs),DSGM,DSTM,DTMC,DTAU IF(K.GT.1) & WRITE(8,120) E(K),SGM,STM,TMC,TAU,ZH(K),float(numResPairs),DSGM,DSTM,DTMC,DTAU GO TO 44 ! ! SELF-INDICATION 43 CALL MUSC(AP,UM,A,abundance,BE,PE,eff_temp,NL,AA,chan_rad,GG, & R,det_effic,J2X,J2N,XN,E,SG,SP,G,GNR,GNRIN,DJL,SSCF,DSSCF, & N,K,I,L,J,NI,LX,R_in,R_outer,ZH,PO,PS,DPO,DPS,numResPairs,SGM,STM,DSGM,DSTM, & TMC,DTMC,SNC,ITYPE,IHIST,LJX,LJN,JN2,JMX, R_beam) ! IF(K.EQ.1) & WRITE(8,121)XN(N),E(K),SGM,STM,PO,PS,SSCF,SNC, ZH(K),numResPairs,R_outer(N), & R_in(N),DSGM,DSTM,DPO,DPS,DSSCF IF(K.GT.1) & WRITE(8,122) E(K),SGM,STM,PO,PS,SSCF,SNC, ZH(K),numResPairs,R_outer(N), & R_in(N),DSGM,DSTM,DPO,DPS,DSSCF 117 FORMAT(/ & 1PE10.3,0PF7.3,1PE11.3,2E10.3,3E13.3,0P,2F10.0,1X,1P,2E10.3/ & 17X,1PE11.3,2E10.3,2E13.3,44X ,1P E10.3/) 118 FORMAT(0PF17.3,1PE11.3,2E10.3,3E13.3,0P,2F10.0,1X,1P,2E10.3/ & 17X,1PE11.3,2E10.3,2E13.3,44X ,1P E10.3/) 119 FORMAT(/ & 1PE10.3,0PF7.3,1PE11.3,2E10.3,E13.4,0P,2F11.0/ & 17X,1PE11.3,2E10.3,E13.4/) 120 FORMAT(0PF17.3,1PE11.3,2E10.3,E13.4,0P,2F11.0/ & 17X,1PE11.3,2E10.3,E13.4/) 121 FORMAT(/ & 1PE10.3,0PF7.3,1PE11.3,2E10.3,3E13.3,0P,2F10.0,1X,1P,2E10.3/ & 17X,1PE11.3,2E10.3,2E13.3/) 122 FORMAT(0PF17.3,1PE11.3,2E10.3,3E13.3,0P,2F10.0,1X,1P,2E10.3/ & 17X,1PE11.3,2E10.3,2E13.3/) 312 FORMAT(0PF10.3,6E11.3) 44 CONTINUE 45 CONTINUE 46 CONTINUE ! GO TO 1 ! JMB: I don't know why this was here print *, new_line('a')," sesh is done!",new_line('a') 999 STOP end program sesh ! !--------------------------- END MAIN --------------------------------- !
{"hexsha": "2affe72ee8f4086375d8d4f6ebe92b4132d2d92a", "size": 26769, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/sesh.f90", "max_stars_repo_name": "brownjm1968/sesh", "max_stars_repo_head_hexsha": "6ae09fffedecd7f36e46392da75ca56004a74f26", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/sesh.f90", "max_issues_repo_name": "brownjm1968/sesh", "max_issues_repo_head_hexsha": "6ae09fffedecd7f36e46392da75ca56004a74f26", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/sesh.f90", "max_forks_repo_name": "brownjm1968/sesh", "max_forks_repo_head_hexsha": "6ae09fffedecd7f36e46392da75ca56004a74f26", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-31T12:59:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T12:59:45.000Z", "avg_line_length": 38.7956521739, "max_line_length": 129, "alphanum_fraction": 0.4725615451, "num_tokens": 9743}
*----------------------------------------------------------------------* subroutine prop_evaluate(ndens,rank,label_den,trplt, & env_type,op_info,str_info,orb_info) *----------------------------------------------------------------------* * * for a given list of densities (all have rank "rank") evaluate * all properties available in the environment * *----------------------------------------------------------------------* implicit none include 'opdim.h' include 'ioparam.h' include 'def_graph.h' include 'def_strinf.h' include 'def_orbinf.h' include 'mdef_operator_info.h' integer, intent(in) :: & ndens, rank logical, intent(in) :: & trplt character(*), intent(in) :: & label_den(ndens) character(*), intent(in) :: & env_type type(operator_info) :: & op_info type(strinf) :: & str_info type(orbinf) :: & orb_info integer :: & cmo_type, idens, idxden character :: & label*8 type(filinf) :: & ffcmo, ffdao, ffprop integer, external :: & idx_mel_list ! get MO-AO trafo from environment call file_init(ffcmo,'CMO',ftyp_da_unf,lblk_da) cmo_type = -1 call import_cmo(ffcmo,cmo_type,env_type,orb_info) do idens = 1, ndens idxden = idx_mel_list(label_den(idens),op_info) if (idxden.le.0) & call quit(1,'prop_evaluate', & 'label not found: '//trim(label_den(idens))) ! back-transform densities if (rank.eq.1) then call file_init(ffdao,'DAO',ftyp_da_unf,lblk_da) call btran_one(ffdao,ffcmo,trplt, & op_info%mel_arr(idxden)%mel,orb_info,str_info) else call quit(1,'prop_evaluate','only rank==1 supported') end if ! calculate trace with one-electron integrals ! provided by environment call oneprop_ao(ffdao,op_info%mel_arr(idxden)%mel, & env_type,orb_info) end do call file_delete(ffcmo) return end
{"hexsha": "0e34051df38e3b171bf870aab81dec0907a4d07c", "size": 2145, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "solve/prop_evaluate.f", "max_stars_repo_name": "ak-ustutt/GeCCo-public", "max_stars_repo_head_hexsha": "8d43a6c9323aeba7eb54625b95553bfd4b2418c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "solve/prop_evaluate.f", "max_issues_repo_name": "ak-ustutt/GeCCo-public", "max_issues_repo_head_hexsha": "8d43a6c9323aeba7eb54625b95553bfd4b2418c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "solve/prop_evaluate.f", "max_forks_repo_name": "ak-ustutt/GeCCo-public", "max_forks_repo_head_hexsha": "8d43a6c9323aeba7eb54625b95553bfd4b2418c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5, "max_line_length": 72, "alphanum_fraction": 0.5174825175, "num_tokens": 545}
# License: MIT # Author: Karl Stelzner import os import sys import torch from torch.utils.data import Dataset from torch.utils.data import DataLoader import numpy as np from numpy.random import random_integers from PIL import Image from torch.utils.data._utils.collate import default_collate import json def progress_bar(count, total, status=''): bar_len = 60 filled_len = int(round(bar_len * count / float(total))) percents = round(100.0 * count / float(total), 1) bar = '=' * filled_len + '-' * (bar_len - filled_len) sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', status)) sys.stdout.flush() def make_sprites(n=50000, height=64, width=64): images = np.zeros((n, height, width, 3)) counts = np.zeros((n,)) print('Generating sprite dataset...') for i in range(n): num_sprites = random_integers(0, 2) counts[i] = num_sprites for j in range(num_sprites): pos_y = random_integers(0, height - 12) pos_x = random_integers(0, width - 12) scale = random_integers(12, min(16, height-pos_y, width-pos_x)) cat = random_integers(0, 2) sprite = np.zeros((height, width, 3)) if cat == 0: # draw circle center_x = pos_x + scale // 2.0 center_y = pos_y + scale // 2.0 for x in range(height): for y in range(width): dist_center_sq = (x - center_x)**2 + (y - center_y)**2 if dist_center_sq < (scale // 2.0)**2: sprite[x][y][cat] = 1.0 elif cat == 1: # draw square sprite[pos_x:pos_x + scale, pos_y:pos_y + scale, cat] = 1.0 else: # draw square turned by 45 degrees center_x = pos_x + scale // 2.0 center_y = pos_y + scale // 2.0 for x in range(height): for y in range(width): if abs(x - center_x) + abs(y - center_y) < (scale // 2.0): sprite[x][y][cat] = 1.0 images[i] += sprite if i % 100 == 0: progress_bar(i, n) images = np.clip(images, 0.0, 1.0) return {'x_train': images[:4 * n // 5], 'count_train': counts[:4 * n // 5], 'x_test': images[4 * n // 5:], 'count_test': counts[4 * n // 5:]} class Sprites(Dataset): def __init__(self, directory, n=50000, canvas_size=64, train=True, transform=None): np_file = 'sprites_{}_{}.npz'.format(n, canvas_size) full_path = os.path.join(directory, np_file) if not os.path.isfile(full_path): try: os.mkdir('./data') except: print("data folder found !") gen_data = make_sprites(n, canvas_size, canvas_size) np.savez(full_path, **gen_data) data = np.load(full_path) self.transform = transform self.images = data['x_train'] if train else data['x_test'] self.counts = data['count_train'] if train else data['count_test'] def __len__(self): return self.images.shape[0] def __getitem__(self, idx): img = self.images[idx] if self.transform is not None: img = self.transform(img).float() return img, self.counts[idx] class Clevr(Dataset): def __init__(self, directory, train=True, transform=None): self.images_path = directory + 'images/train/' self.filenames = os.listdir(self.images_path) json_path = directory + 'scenes/CLEVR_train_scenes.json' with open(json_path) as json_file: data = json.load(json_file) self.labels = data['scenes'] self.n = len(self.filenames) self.transform = transform def __len__(self): return self.n def _name2idx(self, key, value): if key == 'shape': if value == 'cube': return 0 elif value == 'cylinder': return 1 elif value == 'sphere': return 2 elif key == 'size': if value == 'small': return 0 elif value == 'large': return 1 elif key == 'material': if value == 'metal': return 0 elif value == 'rubber': return 1 elif key == 'color': if value == 'red': return 0 elif value == 'blue': return 1 elif value == 'purple': return 2 elif value == 'gray': return 3 elif value == 'cyan': return 4 elif value == 'brown': return 5 elif value == 'yellow': return 6 elif value == 'green': return 7 elif key == '3d_coords': return (np.array(value) + 3)/6 else: return value def __getitem__(self, idx): #Image imgpath = os.path.join(self.images_path, self.filenames[idx]) img = Image.open(imgpath) if self.transform is not None: img = self.transform(img).float() #Label image_idx = self.labels[idx]['image_index'] assert image_idx == idx objects = self.labels[idx]['objects'] num_objects = len(objects) assert num_objects != 0 keys = objects[0].keys() label = {k:[] for k in keys} for i in range(num_objects): for k in keys: label[k].append(self._name2idx(k, objects[i][k])) for k in keys: t = label[k] label[k] = torch.as_tensor(t) return img, label class ClevrRela(Dataset): def __init__(self, directory, train=True, transform=None): self.images_path = directory + 'images/' self.filenames = os.listdir(self.images_path) json_path = directory + 'CLEVR_scenes.json' with open(json_path) as json_file: data = json.load(json_file) self.labels = data['scenes'] self.n = len(self.filenames) self.transform = transform def __len__(self): return self.n def _name2idx(self, key, value): if key == 'shape': if value == 'cube': return 0 elif value == 'cylinder': return 1 elif value == 'sphere': return 2 elif key == 'size': if value == 'small': return 0 elif value == 'large': return 1 elif key == 'material': if value == 'metal': return 0 elif value == 'rubber': return 1 elif key == 'color': if value == 'red': return 0 elif value == 'blue': return 1 elif value == 'purple': return 2 elif value == 'gray': return 3 elif value == 'cyan': return 4 elif value == 'brown': return 5 elif value == 'yellow': return 6 elif value == 'green': return 7 elif key == '3d_coords': return (np.array(value) + 3)/6 else: return value def __getitem__(self, idx): #Image imgpath = os.path.join(self.images_path, self.filenames[idx]) img = Image.open(imgpath) if self.transform is not None: img = self.transform(img).float() #Label image_idx = self.labels[idx]['image_index'] assert image_idx == idx objects = self.labels[idx]['objects'] num_objects = len(objects) assert num_objects != 0 keys = objects[0].keys() label = {k:[] for k in keys} for i in range(num_objects): for k in keys: label[k].append(self._name2idx(k, objects[i][k])) for k in keys: t = label[k] label[k] = torch.as_tensor(t) label['relation'] = torch.as_tensor(self.labels[idx]['relationships']) return img, label ######################################################################## # # ADDED # ######################################################################### class MultiObjectDataset(Dataset): def __init__(self, data_path, train, split=0.9, transform = None): super().__init__() # Load data data = np.load(data_path, allow_pickle=True) # Rescale images and permute dimensions x = np.asarray(data['x'], dtype=np.float32) / 255 x = np.transpose(x, [0, 3, 1, 2]) # batch, channels, h, w # Get labels try: labels = data['labels'].item() except: labels = data['labels'] print(type(labels)) # Split train and test split = int(split * len(x)) if train: indices = range(split) else: indices = range(split, len(x)) # From numpy/ndarray to torch tensors (labels are lists of tensors as # they might have different sizes) self.x = torch.from_numpy(x[indices]) try: labels.pop('text', None) labels.pop('vertices', None) except: print("No text to pop !") self.labels = self._labels_to_tensorlist(labels, indices) @staticmethod def _labels_to_tensorlist(labels, indices): out = {k: [] for k in labels.keys()} for i in indices: for k in labels.keys(): t = labels[k][i] t = torch.as_tensor(t) out[k].append(t) return out def __getitem__(self, index): x = self.x[index] try: labels = {k: self.labels[k][index] for k in self.labels.keys()} except: labels = self.labels return x, labels def __len__(self): return self.x.size(0) class MultiObjectDataLoader(DataLoader): def __init__(self, *args, **kwargs): assert 'collate_fn' not in kwargs kwargs['collate_fn'] = self.collate_fn super().__init__(*args, **kwargs) @staticmethod def collate_fn(batch): # The input is a batch of (image, label_dict) _, item_labels = batch[0] keys = item_labels.keys() # Max label length in this batch # max_len[k] is the maximum length (in batch) of the label with name k # If at the end max_len[k] is -1, labels k are (probably all) scalars max_len = {k: -1 for k in keys} # If a label has more than 1 dimension, the padded tensor cannot simply # have size (batch, max_len). Whenever the length is >0 (i.e. the sequence # is not empty, store trailing dimensions. At the end if 1) all sequences # (in the batch, and for this label) are empty, or 2) this label is not # a sequence (scalar), then the trailing dims are None. trailing_dims = {k: None for k in keys} # Make first pass to get shape info for padding for _, labels in batch: for k in keys: try: max_len[k] = max(max_len[k], len(labels[k])) if len(labels[k]) > 0: trailing_dims[k] = labels[k].size()[1:] except TypeError: # scalar pass # For each item in the batch, take each key and pad the corresponding # value (label) so we can call the default collate function pad = MultiObjectDataLoader._pad_tensor for i in range(len(batch)): for k in keys: if trailing_dims[k] is None: continue if k == 'relation': size = [max_len[k], max_len[k]] + list(trailing_dims[k])[1:] batch[i][1][k] = MultiObjectDataLoader._pad_tensor_relation(batch[i][1][k], size, value = 0.) else: size = [max_len[k]] + list(trailing_dims[k]) batch[i][1][k] = pad(batch[i][1][k], size) return default_collate(batch) @staticmethod def _pad_tensor(x, size, value=None): assert isinstance(x, torch.Tensor) input_size = len(x) if value is None: value = float('nan') # Copy input tensor into a tensor filled with specified value # Convert everything to float, not ideal but it's robust out = torch.zeros(*size, dtype=torch.float) out.fill_(value) if input_size > 0: # only if at least one element in the sequence out[:input_size] = x.float() return out @staticmethod def _pad_tensor_relation(x, size, value=None): assert isinstance(x, torch.Tensor) input_size = x.shape[:2] if value is None: value = float('nan') # Copy input tensor into a tensor filled with specified value # Convert everything to float, not ideal but it's robust out = torch.zeros(*size, dtype=torch.float) out.fill_(value) #if input_size > 0: # only if at least one element in the sequence out[:input_size[0], :input_size[1],:] = x.float() return out
{"hexsha": "ec32efa0e5e4c32a4aa33caf37b12b35cdac995d", "size": 13647, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/datasets.py", "max_stars_repo_name": "GpNico/Relations_MONet", "max_stars_repo_head_hexsha": "a9c6cabc8f34819af15f55e9f7203a9e055d351d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-18T03:42:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-18T03:42:05.000Z", "max_issues_repo_path": "src/datasets.py", "max_issues_repo_name": "GpNico/Relations_Nets", "max_issues_repo_head_hexsha": "a9c6cabc8f34819af15f55e9f7203a9e055d351d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/datasets.py", "max_forks_repo_name": "GpNico/Relations_Nets", "max_forks_repo_head_hexsha": "a9c6cabc8f34819af15f55e9f7203a9e055d351d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4928571429, "max_line_length": 113, "alphanum_fraction": 0.5132996263, "include": true, "reason": "import numpy,from numpy", "num_tokens": 3193}
import numpy i = 122
{"hexsha": "79bc8127c0a8f6177f933f1c0994f76f469c9ea0", "size": 21, "ext": "py", "lang": "Python", "max_stars_repo_path": "1.py", "max_stars_repo_name": "fxphero/FIRST", "max_stars_repo_head_hexsha": "6350eca5a34c748e8d7f483c455be82966ae9d52", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "1.py", "max_issues_repo_name": "fxphero/FIRST", "max_issues_repo_head_hexsha": "6350eca5a34c748e8d7f483c455be82966ae9d52", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "1.py", "max_forks_repo_name": "fxphero/FIRST", "max_forks_repo_head_hexsha": "6350eca5a34c748e8d7f483c455be82966ae9d52", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 7.0, "max_line_length": 12, "alphanum_fraction": 0.7142857143, "include": true, "reason": "import numpy", "num_tokens": 8}
# Copyright (c) Microsoft. All rights reserved. # Licensed under the MIT license. import numpy as np from sklearn.datasets import load_diabetes from sklearn.linear_model import Ridge from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split import mlflow import mlflow.sklearn import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt with mlflow.start_run(): X, y = load_diabetes(return_X_y=True) columns = ['age', 'gender', 'bmi', 'bp', 's1', 's2', 's3', 's4', 's5', 's6'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) data = { "train": {"X": X_train, "y": y_train}, "test": {"X": X_test, "y": y_test}} mlflow.log_metric("Training samples", len(data['train']['X'])) mlflow.log_metric("Test samples", len(data['test']['X'])) # Log the algorithm parameter alpha to the run mlflow.log_metric('alpha', 0.03) # Create, fit, and test the scikit-learn Ridge regression model regression_model = Ridge(alpha=0.03) regression_model.fit(data['train']['X'], data['train']['y']) preds = regression_model.predict(data['test']['X']) # Log mean squared error print('Mean Squared Error is', mean_squared_error(data['test']['y'], preds)) mlflow.log_metric('mse', mean_squared_error(data['test']['y'], preds)) # Save the model to the outputs directory for capture mlflow.sklearn.log_model(regression_model, "model") # Plot actuals vs predictions and save the plot within the run fig = plt.figure(1) idx = np.argsort(data['test']['y']) plt.plot(data['test']['y'][idx], preds[idx]) fig.savefig("actuals_vs_predictions.png") mlflow.log_artifact("actuals_vs_predictions.png")
{"hexsha": "a89ab77638e16e06f55ec970594a42224c26a953", "size": 1770, "ext": "py", "lang": "Python", "max_stars_repo_path": "how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-remote/train_diabetes.py", "max_stars_repo_name": "lobrien/MachineLearningNotebooks", "max_stars_repo_head_hexsha": "a56b69448c070b243125b66c303ba670a5a157c7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-08-04T18:37:21.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-21T20:09:31.000Z", "max_issues_repo_path": "how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-remote/train_diabetes.py", "max_issues_repo_name": "vijetajo/MachineLearningNotebooks", "max_issues_repo_head_hexsha": "7e2c1ca152e280dc544f3c9654e9906a7f17c89b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-08-14T23:21:54.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-14T23:34:35.000Z", "max_forks_repo_path": "how-to-use-azureml/track-and-monitor-experiments/using-mlflow/train-remote/train_diabetes.py", "max_forks_repo_name": "vijetajo/MachineLearningNotebooks", "max_forks_repo_head_hexsha": "7e2c1ca152e280dc544f3c9654e9906a7f17c89b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-05-03T20:20:53.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-04T13:01:49.000Z", "avg_line_length": 37.6595744681, "max_line_length": 92, "alphanum_fraction": 0.6892655367, "include": true, "reason": "import numpy", "num_tokens": 462}
import tensorflow as tf import numpy as np import os # Utility functions to apply data augmentations. # some of the functions directly borrowed from https://www.wouterbulten.nl/blog/tech/data-augmentation-using-tensorflow-data-dataset/ def flip(x): """Flip augmentation Args: x: Image to flip Returns: Augmented image """ x = tf.image.random_flip_left_right(x) x = tf.image.random_flip_up_down(x) return x def color(x): """Color augmentation Args: x: Image Returns: Augmented image """ x = tf.image.random_hue(x, 0.08) x = tf.image.random_saturation(x, 0.6, 1.6) x = tf.image.random_brightness(x, 0.05) x = tf.image.random_contrast(x, 0.7, 1.3) return x def rotate(x): """Rotation augmentation Args: x: Image Returns: Augmented image """ return tf.image.rot90(x, tf.random_uniform(shape=[], minval=0, maxval=4, dtype=tf.int32)) def zoom(x): """Zoom augmentation Args: x: Image Returns: Augmented image """ # Generate 20 crop settings, ranging from a 1% to 20% crop. scales = list(np.arange(0.8, 1.0, 0.01)) boxes = np.zeros((len(scales), 4)) for i, scale in enumerate(scales): x1 = y1 = 0.5 - (0.5 * scale) x2 = y2 = 0.5 + (0.5 * scale) boxes[i] = [x1, y1, x2, y2] def random_crop(img): # Create different crops for an image crops = tf.image.crop_and_resize([img], boxes=boxes, box_ind=np.zeros(len(scales)), crop_size=(32, 32)) # Return a random crop return crops[tf.random_uniform(shape=[], minval=0, maxval=len(scales), dtype=tf.int32)] choice = tf.random_uniform(shape=[], minval=0., maxval=1., dtype=tf.float32) # Only apply cropping 50% of the time return tf.cond(choice < 0.75, lambda: x, lambda: random_crop(x)) def load_image(image_path, args, augment=False): label = encode_label(image_path, args["class_names"]) img = tf.io.read_file(image_path) img = tf.image.decode_jpeg(img, channels=3) img = tf.image.convert_image_dtype(img, tf.float32) if augment: augmentations = [flip, color, zoom, rotate] for f in augmentations: img = tf.cond(tf.random_uniform([], 0, 1) > 0.75, lambda: f(img), lambda: img) img = tf.clip_by_value(img, 0, 1) img = tf.image.resize(img, (args["input_size"], args["input_size"])) img = tf.multiply(img, 1./255.) return img, label def encode_label(filepath, classes): label = tf.strings.split(filepath, os.path.sep, result_type='RaggedTensor')[-2] label = label == classes return tf.cast(label, tf.int32) def create_dataset(args): train_ds = tf.data.Dataset.list_files(os.path.join(args["train_dir"], '*', '*.jpg')).map(lambda x: load_image(x, args, augment=True), num_parallel_calls=8) val_ds = tf.data.Dataset.list_files(os.path.join(args["validation_dir"], '*', '*.jpg')).map(lambda x: load_image(x, args, augment=False), num_parallel_calls=8) return train_ds, val_ds
{"hexsha": "6048b3782becf547b8d21e3a2d8a318447d7386d", "size": 3083, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "monatis/aiplatform-workshop", "max_stars_repo_head_hexsha": "946ea6f931da1cd8f8e51f78d025d51b1a842ec7", "max_stars_repo_licenses": ["WTFPL"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-12-22T14:31:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-02T16:39:35.000Z", "max_issues_repo_path": "utils.py", "max_issues_repo_name": "monatis/aiplatform-workshop", "max_issues_repo_head_hexsha": "946ea6f931da1cd8f8e51f78d025d51b1a842ec7", "max_issues_repo_licenses": ["WTFPL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "monatis/aiplatform-workshop", "max_forks_repo_head_hexsha": "946ea6f931da1cd8f8e51f78d025d51b1a842ec7", "max_forks_repo_licenses": ["WTFPL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.932038835, "max_line_length": 163, "alphanum_fraction": 0.6357444048, "include": true, "reason": "import numpy", "num_tokens": 854}
# Copyright 2019 The TensorNetwork Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import Optional, Any, Sequence, Tuple from tensornetwork.backends import base_backend from tensornetwork.backends.pytorch import decompositions import numpy as np # This might seem bad, but pytype treats tf.Tensor as Any anyway, so # we don't actually lose anything by doing this. Tensor = Any class PyTorchBackend(base_backend.BaseBackend): """See base_backend.BaseBackend for documentation.""" def __init__(self, dtype: Optional[Any] = None): super(PyTorchBackend, self).__init__() try: import torch except ImportError: raise ImportError("PyTorch not installed, please switch to a different " "backend or install PyTorch.") self.torch = torch self.name = "pytorch" self.dtype = dtype def tensordot(self, a: Tensor, b: Tensor, axes: Sequence[Sequence[int]]): return self.torch.tensordot(a, b, dims=axes) def reshape(self, tensor: Tensor, shape: Tensor): return self.torch.reshape(tensor, tuple(np.array(shape).astype(int))) def transpose(self, tensor, perm): return tensor.permute(perm) def svd_decomposition(self, tensor: Tensor, split_axis: int, max_singular_values: Optional[int] = None, max_truncation_error: Optional[float] = None ) -> Tuple[Tensor, Tensor, Tensor, Tensor]: return decompositions.svd_decomposition(self.torch, tensor, split_axis, max_singular_values, max_truncation_error) def qr_decomposition( self, tensor: Tensor, split_axis: int, ) -> Tuple[Tensor, Tensor]: return decompositions.qr_decomposition(self.torch, tensor, split_axis) def rq_decomposition( self, tensor: Tensor, split_axis: int, ) -> Tuple[Tensor, Tensor]: return decompositions.rq_decomposition(self.torch, tensor, split_axis) def concat(self, values: Tensor, axis: int) -> Tensor: return np.concatenate(values, axis) def shape(self, tensor: Tensor) -> Tensor: return self.torch.tensor(list(tensor.shape)) def shape_tuple(self, tensor: Tensor) -> Tuple[Optional[int], ...]: return tuple(tensor.shape) def prod(self, values: Tensor) -> int: return np.prod(np.array(values)) def sqrt(self, tensor: Tensor) -> Tensor: return self.torch.sqrt(tensor) def diag(self, tensor: Tensor) -> Tensor: return self.torch.diag(tensor) def convert_to_tensor(self, tensor: Tensor) -> Tensor: result = self.torch.as_tensor(tensor) if self.dtype is not None and result.dtype is not self.dtype: raise TypeError( "Backend '{}' cannot convert tensor of dtype {} to dtype {}".format( self.name, result.dtype, self.dtype)) return result def trace(self, tensor: Tensor) -> Tensor: return self.torch.einsum('...jj', tensor) def outer_product(self, tensor1: Tensor, tensor2: Tensor) -> Tensor: return self.torch.tensordot(tensor1, tensor2, dims=0) def einsum(self, expression: str, *tensors: Tensor) -> Tensor: return self.torch.einsum(expression, *tensors) def norm(self, tensor: Tensor) -> Tensor: return self.torch.norm(tensor) def eye(self, N: int, dtype: Optional[Any] = None, M: Optional[int] = None) -> Tensor: if not dtype: dtype = self.dtype if self.dtype is not None else self.torch.float64 if not M: M = N #torch crashes if one passes M = None with dtype!=None return self.torch.eye(n=N, m=M, dtype=dtype) def ones(self, shape: Tuple[int, ...], dtype: Optional[Any] = None) -> Tensor: if not dtype: dtype = self.dtype if self.dtype is not None else self.torch.float64 return self.torch.ones(shape, dtype=dtype) def zeros(self, shape: Tuple[int, ...], dtype: Optional[Any] = None) -> Tensor: if not dtype: dtype = self.dtype if self.dtype is not None else self.torch.float64 return self.torch.zeros(shape, dtype=dtype) def randn(self, shape: Tuple[int, ...], dtype: Optional[Any] = None, seed: Optional[int] = None) -> Tensor: if seed: self.torch.manual_seed(seed) if not dtype: dtype = self.dtype if self.dtype is not None else self.torch.float64 return self.torch.randn(shape, dtype=dtype) def conj(self, tensor: Tensor) -> Tensor: return tensor #pytorch does not support complex dtypes
{"hexsha": "4ff9216c569a741ad4097d5083124bf17c334264", "size": 5201, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensornetwork/backends/pytorch/pytorch_backend.py", "max_stars_repo_name": "esgantivar/TensorNetwork", "max_stars_repo_head_hexsha": "bf7092a677136356ee8bd48bc963cde3490ae1c7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tensornetwork/backends/pytorch/pytorch_backend.py", "max_issues_repo_name": "esgantivar/TensorNetwork", "max_issues_repo_head_hexsha": "bf7092a677136356ee8bd48bc963cde3490ae1c7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensornetwork/backends/pytorch/pytorch_backend.py", "max_forks_repo_name": "esgantivar/TensorNetwork", "max_forks_repo_head_hexsha": "bf7092a677136356ee8bd48bc963cde3490ae1c7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-04-27T10:28:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-02T10:02:14.000Z", "avg_line_length": 35.6232876712, "max_line_length": 80, "alphanum_fraction": 0.6683330129, "include": true, "reason": "import numpy", "num_tokens": 1224}
[STATEMENT] lemma TBOUNDD: "TBOUND m t \<Longrightarrow> time m h \<le> t" [PROOF STATE] proof (prove) goal (1 subgoal): 1. TBOUND m t \<Longrightarrow> time m h \<le> t [PROOF STEP] by (auto simp: TBOUND_def)
{"llama_tokens": 87, "file": "Van_Emde_Boas_Trees_Time_Reasoning_Time_Reasoning", "length": 1}
function chi2 = chi_squared(y,fit,P,eb) % returns *reduced* chi^2 value for use in data modelling % "y" is a vector of data, "fit" is a vector of model values (size(fit)=size(y)), P is the number of % parameters fit in the model, and eb is a vector of error bars (1-to-1 correspondnce with y) % Ref: John R. Taylor, "An Introduction to Error Analysis", (2nd ed., 1997) % 11/11/01 Mike Scarpulla. Please direct questions or comments to scarps@uclink.berkeley.edu %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% if nargin<3 error('Wrong number of arguments passed to "chi_squared"') end % if error bars are not availible, evaluate chi^2 by normalizing deviation^2 by magnitude of data. % This assumes that the STDEV of a value scales as SQRT(value). USE WITH THIS CAVEAT IN MIND if nargin==3 N = max(size(y)); terms = ((y-fit).^2)./abs(y); chi2 = 1/(N-P)*sum(terms); end %if error bars are availible, normalize the deviation to the expectred error if nargin==4 N = max(size(y)); terms = ((y-fit)./eb).^2; chi2 = 1/(N-P)*sum(terms); end
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/1049-chisquared-m/chi_squared.m"}
import numpy as np import pandas as pd import itertools as it import matplotlib.pyplot as plt from sklearn.metrics import mean_absolute_error import funciones as f from InputsRevolvente import InputsRevolvente lista_nombres=['TSN'] ruta_real=['/Users/renzomartinch/Downloads/Comite_0622/TSN_Reales.csv'] ruta_teorico=['/Users/renzomartinch/Downloads/Comite_0622/TSN_Inputs.csv'] ruta_tmin=['/Users/renzomartinch/Downloads/Comite_0622/TSN_Precios.csv'] lista_cortes=[[],['C_LINEA'],['C_SUBSEGMENTO'],['C_PD']] for i in range(len(lista_nombres)): REAL = pd.read_csv(ruta_real[i]) TEORICO = pd.read_csv(ruta_teorico[i]) TMIN = pd.read_csv(ruta_tmin[i]) product = InputsRevolvente(REAL,TEORICO,mincosecha=201901,maxcosecha=201912) for j in range(len(lista_cortes)): cortes = lista_cortes[j] product.condensar(cortes) product.optimizar() product.impactoTmin(TMIN,impactoTIR=True) temp = pd.concat([product.promedios,product.stats,product.Tmin,product.TIR,product.curvas], axis=1) name=temp.columns[0] temp.rename(columns={name:"CORTE"}, inplace=True) if i==0 and j==0: imprimir = temp else: imprimir = imprimir.append(temp,ignore_index=True) print(imprimir) imprimir.to_excel("plancha3.xlsx")
{"hexsha": "e09c479471cd0901796b0cb9d9cf4984880939d4", "size": 1312, "ext": "py", "lang": "Python", "max_stars_repo_path": "ExtraccionRev.py", "max_stars_repo_name": "joelortizlt/Monitoreo", "max_stars_repo_head_hexsha": "31434871ec2e9345fb471fdc5ffcdf8737ff7b6d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-03-31T16:48:59.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-31T21:00:01.000Z", "max_issues_repo_path": "ExtraccionRev.py", "max_issues_repo_name": "joelortizlt/Monitoreo", "max_issues_repo_head_hexsha": "31434871ec2e9345fb471fdc5ffcdf8737ff7b6d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2021-02-02T22:35:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-25T15:00:57.000Z", "max_forks_repo_path": "ExtraccionRev.py", "max_forks_repo_name": "joelortizlt/Monitoreo", "max_forks_repo_head_hexsha": "31434871ec2e9345fb471fdc5ffcdf8737ff7b6d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-05-22T23:43:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-18T20:28:38.000Z", "avg_line_length": 36.4444444444, "max_line_length": 107, "alphanum_fraction": 0.7210365854, "include": true, "reason": "import numpy", "num_tokens": 380}
import pandas as pd import numpy as np from sklearn.linear_model import ARDRegression from sklearn.linear_model import HuberRegressor from sklearn.linear_model import LinearRegression from sklearn.neighbors import KNeighborsRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.svm import SVR def get_artif(): train = pd.read_csv('./artificial/artificial_2x_test.tsv', names=['x', 'target'], index_col=None, header=None, sep='\t') test = pd.read_csv('./artificial/artificial_2x_train.tsv', names=['x', 'target'], index_col=None, header=None, sep='\t') return (train, test) def get_prague(): names = ['area', 'construction', 'ownership', 'status', 'floor', 'equip', 'cellar', 'balcony', 'target', 'nth'] train = pd.read_csv('./pragueestateprices/pragueestateprices_train.tsv', index_col=None, names=names, header=None, sep='\t') test = pd.read_csv('./pragueestateprices/pragueestateprices_test.tsv', index_col=None, names=names, header=None, sep='\t') train_size = len(train) tog = train.append(test) for col in tog.columns[np.where(tog.dtypes == 'object')]: tog[col] = pd.Categorical(tog[col]) tog = tog.drop('nth', axis=1) train, test = (tog[:train_size], tog[train_size:]) return (train, test) def get_X(df): return pd.get_dummies(df[df.columns[:-1]]) def get_Y(df): return df[df.columns[-1]] def create_models(): return [ (ARDRegression(n_iter = 10000), "ARD"), (HuberRegressor(), "Huber"), (LinearRegression(normalize=False), "LR"), (KNeighborsRegressor(n_neighbors=5), "KNN"), (DecisionTreeRegressor(max_depth=10, min_samples_split=5,min_samples_leaf=3), "Tree"), #SVR() ] def R2ToMSE(r2, df_test): score = (1-r2) score *= ((get_Y(df_test) - get_Y(df_test).mean()) ** 2).sum() return score/len(df_test) def report(name, mse, r2): print(name + "\t" + "MSE: " + str(mse) + "\t sqrt(MSE): " + str(mse ** (1/2)) + "\t R2: " + str(r2)) def eval_dataset(data): df_train, df_test = data for (m, name) in create_models(): m = m.fit(get_X(df_train), get_Y(df_train)) r2 = m.score(get_X(df_test), get_Y(df_test)) report(name,R2ToMSE(r2, df_test), r2) if __name__ == "__main__": print("Prague:") eval_dataset(get_prague()) print("Artif:") eval_dataset(get_artif())
{"hexsha": "4cda04bae2f36be16ae87df87d96c8966dd1d814", "size": 2406, "ext": "py", "lang": "Python", "max_stars_repo_path": "hw/scikit-regression/regression_runner.py", "max_stars_repo_name": "petrroll/npfl104", "max_stars_repo_head_hexsha": "241646b02e91c14ac885dd6cc981b5bb63d4561c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hw/scikit-regression/regression_runner.py", "max_issues_repo_name": "petrroll/npfl104", "max_issues_repo_head_hexsha": "241646b02e91c14ac885dd6cc981b5bb63d4561c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hw/scikit-regression/regression_runner.py", "max_forks_repo_name": "petrroll/npfl104", "max_forks_repo_head_hexsha": "241646b02e91c14ac885dd6cc981b5bb63d4561c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6578947368, "max_line_length": 132, "alphanum_fraction": 0.6512884456, "include": true, "reason": "import numpy", "num_tokens": 668}
(* * Copyright 2014, General Dynamics C4 Systems * * This software may be distributed and modified according to the terms of * the GNU General Public License version 2. Note that NO WARRANTY is provided. * See "LICENSE_GPLv2.txt" for details. * * @TAG(GD_GPL) *) theory LevityCatch imports Include "../../../lib/LemmaBucket" begin (* Try again, clagged from Include *) no_notation bind_drop (infixl ">>" 60) lemma no_fail_getCurThread: "no_fail \<top> getCurThread" by (clarsimp simp: getCurThread_def no_fail_def gets_def bind_def return_def get_def) lemma no_fail_getSchedulerAction: "no_fail \<top> getSchedulerAction" by (auto simp: getSchedulerAction_def) lemma projectKO_def2: "projectKO x = assert_opt (projectKO_opt x)" by (simp add: assert_opt_def projectKO_def) lemma magnitudeCheck_assert: "magnitudeCheck x y n = assert (case y of None \<Rightarrow> True | Some z \<Rightarrow> 1 << n \<le> z - x)" apply (simp add: magnitudeCheck_def assert_def when_def split: option.split) apply fastforce done context begin interpretation Arch . (*FIXME: arch_split*) lemmas makeObject_simps = makeObject_endpoint makeObject_notification makeObject_cte makeObject_tcb makeObject_user_data makeObject_pde makeObject_pte makeObject_asidpool end definition "diminished' cap cap' \<equiv> \<exists>R. cap = maskCapRights R cap'" lemma projectKO_inv : "\<lbrace>P\<rbrace> projectKO ko \<lbrace>\<lambda>rv. P\<rbrace>" by (simp add: projectKO_def fail_def valid_def return_def split: option.splits) (****** From GeneralLib *******) lemma alignCheck_assert: "alignCheck ptr n = assert (is_aligned ptr n)" by (simp add: is_aligned_mask alignCheck_def assert_def alignError_def unless_def when_def) lemma magnitudeCheck_inv: "\<lbrace>P\<rbrace> magnitudeCheck x y n \<lbrace>\<lambda>rv. P\<rbrace>" apply (clarsimp simp add: magnitudeCheck_def split: option.splits) apply (wp hoare_when_wp) apply simp done lemma alignCheck_inv: "\<lbrace>P\<rbrace> alignCheck x n \<lbrace>\<lambda>rv. P\<rbrace>" apply (simp add: alignCheck_def unless_def alignError_def) apply (wp hoare_when_wp) apply simp done lemma updateObject_default_inv: "\<lbrace>P\<rbrace> updateObject_default obj ko x y n \<lbrace>\<lambda>rv. P\<rbrace>" unfolding updateObject_default_def by (simp, wp magnitudeCheck_inv alignCheck_inv projectKO_inv, simp) context begin interpretation Arch . (*FIXME: arch_split*) lemma to_from_apiType [simp]: "toAPIType (fromAPIType x) = Some x" by (cases x) (auto simp add: fromAPIType_def ARM_H.fromAPIType_def toAPIType_def ARM_H.toAPIType_def) end end
{"author": "SEL4PROJ", "repo": "jormungand", "sha": "bad97f9817b4034cd705cd295a1f86af880a7631", "save_path": "github-repos/isabelle/SEL4PROJ-jormungand", "path": "github-repos/isabelle/SEL4PROJ-jormungand/jormungand-bad97f9817b4034cd705cd295a1f86af880a7631/case_study/l4v/proof/refine/ARM/LevityCatch.thy"}
# -*- coding: utf-8 -*- """ Created on Sun Aug 7 22:58:58 2016 @author: isaacdk """ from __future__ import division, print_function import matplotlib.pyplot as plt import numpy as np from scipy import interpolate import scipy.optimize #get data file filename= 'scope2.csv' xaxis_label = 'Time (s)' yaxis_label = 'Voltage (V)' titleName = 'Saturated Absorbtion' #get data #from our scope, the data was on the fourth and fifth columns of the csv data = np.genfromtxt(filename,delimiter=',') x_values = data[:,3] y_values = data[:,4] npts = np.size(x_values) print('Click on the beginning and ending of our range--must be from left to right. Then the peak.') print() #show plot plt.figure(1) plt.clf() plt.plot(x_values,y_values,'.') plt.grid() plt.xlabel(xaxis_label,fontsize=15) plt.ylabel(yaxis_label,fontsize=15) plt.title(titleName,fontsize=20) #input from user click = plt.ginput(3, timeout=-1) x_1 = click[0][0] x_2 = click[1][0] x_3 = click[2][0] y_1 = click[0][1] y_2 = click[1][1] y_3 = click[2][1] x_c_g = x_3 y_0_g = (y_1 + y_2)/2 h_g = y_3 - y_0_g midhght_g = y_0_g + (h_g/2) #define the regions of interest around each peak beg = np.int(np.rint(np.interp(x_1, x_values, np.arange(npts)))) end = np.int(np.rint(np.interp(x_2, x_values, np.arange(npts)))) x_roi = x_values[beg:end] y_roi = y_values[beg:end] #find a guess for FWHM half = np.int(((beg + end)/2)) first_x = x_values[beg:half] first_y = y_values[beg:half] f_1 = interpolate.interp1d(first_y, first_x) sec_x = x_values[half:end] sec_y = y_values[half:end] f_2 = interpolate.interp1d(sec_y, sec_x) w_g = f_2(midhght_g) - f_1(midhght_g) #the function itself def Lorentzian(x_val, h, w, x_c, y_0): return ((h * w**2)/((w**2)+(4*(x_val - x_c)**2)) + y_0) #h = height #w = fwhm #b = x val of peak #c = y val of asymptote #best fit lines (guesses help the process) p_guess = [h_g, w_g, x_c_g, y_0_g] peak, pcov = scipy.optimize.curve_fit(Lorentzian, x_roi, y_roi, p0 = p_guess) perr = np.sqrt(np.diag(pcov)) #plot the fit plt.plot(x_values, Lorentzian(x_values, *p_guess), 'g') plt.plot(x_values, Lorentzian(x_values, *peak), 'r') #the data #R**2 var = 0 roi_len = end - beg for x in range(roi_len): model_val = Lorentzian(x_roi, *peak)[x] y_val = y_roi[x] chi2step = (y_val - model_val)**2/(.003**2) var = var + chi2step varr = (1/(roi_len - 4)) * var print("chi**2: ", varr, sep="") s_val = 2/(np.sqrt(roi_len-4)) print("s = ", s_val, sep="") print() h_f = '%+.4e' % peak[0] h_e = '%.4e' % perr[0] h_p = '%.2g' % (np.abs(perr[0]/peak[0]) * 100) w_f = '%+.4e' % peak[1] w_e = '%.4e' % perr[1] w_p = '%.2g' % (np.abs(perr[1]/peak[1]) * 100) x_c_f = '%+.4e' % peak[2] x_c_e = '%.4e' % perr[2] x_c_p = '%.2g' % (np.abs(perr[2]/peak[2]) * 100) y_0_f = '%+.4e' % peak[3] y_0_e = '%.4e' % perr[3] y_0_p = '%.2g' % (np.abs(perr[3]/peak[3]) * 100) print("Our estimates:") print("Height:", h_g) print("FWHM:", w_g) print("Center: x =", x_c_g) print("Flatline: y =", y_0_g) print() print("Our exact fitted values:") print("Height:", peak[0]) print("FWHM :", peak[1]) print("Center: x =", peak[2]) print("Flatline: y =", peak[3]) print() print("Our rounded fits:") print("Height: ", h_f, " ±", h_e, " (", h_p, "%)", sep="") print("FWHM: ", w_f, " ±", w_e, " (", w_p, "%)", sep="") print("Centere: x = ", x_c_f, " ±", x_c_e, " (", x_c_p, "%)", sep="") print("Flatline: y = ", y_0_f, " ±", y_0_e, " (", y_0_p, "%)", sep="") print() print("Fitted equation: (", h_f, " * (", w_f, ")^2) / ( (", w_f, ")^2 + 4*(x - ", x_c_f, ")^2) + ", y_0_f, sep="")
{"hexsha": "44c9a78743465fd9c5907b094a6583c066a25905", "size": 3540, "ext": "py", "lang": "Python", "max_stars_repo_path": "lorentz_clean.py", "max_stars_repo_name": "flamingh2o/236_py", "max_stars_repo_head_hexsha": "6c7da0f4bd0724b94a180630ab53d357a8db2e4d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lorentz_clean.py", "max_issues_repo_name": "flamingh2o/236_py", "max_issues_repo_head_hexsha": "6c7da0f4bd0724b94a180630ab53d357a8db2e4d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lorentz_clean.py", "max_forks_repo_name": "flamingh2o/236_py", "max_forks_repo_head_hexsha": "6c7da0f4bd0724b94a180630ab53d357a8db2e4d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6165413534, "max_line_length": 114, "alphanum_fraction": 0.6276836158, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 1348}
import sys sys.path.append("..") import scipy import numpy as np from numpy.linalg import matrix_rank, matrix_power, cholesky, inv import torch from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm import util.geometry_util as geo_util from solvers.rigidity_solver.gradient import gradient_analysis from solvers.rigidity_solver.internal_structure import tetrahedron from solvers.rigidity_solver.algo_core import solve_rigidity, spring_energy_matrix from solvers.rigidity_solver.models import Beam, Model, Joint from solvers.rigidity_solver import gradient as gd from solvers.rigidity_solver.eigen_analysis import eigen_analysis from visualization.model_visualizer import visualize_3D from tests.testcases import tetra from matplotlib import pyplot as plt axes = np.array([ [0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1], ]) model = tetra.tetrahedron( np.array([ [0, 0, 0], [1 / 2, np.sqrt(3) / 2, 0], [1, 0, 0], [1 / 2, np.sqrt(3) / 6, np.sqrt(6) / 3], ]), axes ) pairs = eigen_analysis( model.point_matrix(), model.edge_matrix(), model.constraint_matrix(), fix_stiffness=True, ) zero_eigenvalues = [e for e, v in pairs if e < 1e-8] print("DoF:", len(zero_eigenvalues)) objectives = [] x_range = np.linspace(-1.5, 1.5, num=50) y_range = np.linspace(-0.5, 2.5, num=50) from itertools import product xy_range = product(x_range, y_range) for it, (x, y) in enumerate(tqdm(xy_range)): vertices = np.array([ [0, 0, 0], [1 / 2, np.sqrt(3) / 2, 0], [1, 0, 0], [x, y, np.sqrt(6) / 3], ]) model = tetra.tetrahedron(vertices=vertices, axes=axes) points = model.point_matrix() extra_constraints = geo_util.trivial_basis(points, 3) pairs = eigen_analysis( points, model.edge_matrix(), np.vstack(( model.constraint_matrix(), extra_constraints, )), fix_stiffness=True, ) seventh_eig, eigv = pairs[0] objectives.append(seventh_eig) Z = np.array(objectives).reshape(len(x_range), len(y_range)).transpose() ax = plt.gca() ax.set_aspect('equal') plt.contour(x_range, y_range, Z, levels=50) y_ind, x_ind = np.unravel_index(np.argmax(Z), Z.shape) plt.scatter([x_range[x_ind]], [y_range[y_ind]]) plt.show()
{"hexsha": "ca69a752dfd8694e331f6bff0afbc38bbf31875d", "size": 2322, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/optimization/tetrahedron_top_plane.py", "max_stars_repo_name": "Anthony102899/Lego-ImageGenerator", "max_stars_repo_head_hexsha": "52b19c8bb20f77a3394675e7c037c943a50c1e15", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-20T10:23:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T10:23:38.000Z", "max_issues_repo_path": "tests/optimization/tetrahedron_top_plane.py", "max_issues_repo_name": "Anthony102899/Lego-ImageGenerator", "max_issues_repo_head_hexsha": "52b19c8bb20f77a3394675e7c037c943a50c1e15", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/optimization/tetrahedron_top_plane.py", "max_forks_repo_name": "Anthony102899/Lego-ImageGenerator", "max_forks_repo_head_hexsha": "52b19c8bb20f77a3394675e7c037c943a50c1e15", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8, "max_line_length": 82, "alphanum_fraction": 0.6718346253, "include": true, "reason": "import numpy,from numpy,import scipy", "num_tokens": 674}
""" Process iMaterialist Fashion 2019 https://www.kaggle.com/c/imaterialist-fashion-2019-FGVC6 """ import argparse import shutil from pathlib import Path import cv2 import numpy as np import pandas as pd from PIL import Image from tqdm import tqdm from iglovikov_helper_functions.utils.mask_utils import rle2mask def get_args(): parser = argparse.ArgumentParser() parser.add_argument("-i", "--image_folder", type=Path, help="Path to folder with images") parser.add_argument("-l", "--label_path", type=Path, help="Path to csv with labels") parser.add_argument("-o", "--output_folder", type=Path, help="Path to the output folder") return parser.parse_args() def main(): args = get_args() output_image_folder = args.output_folder / "images" output_image_folder.mkdir(exist_ok=True, parents=True) output_label_folder = args.output_folder / "labels" output_label_folder.mkdir(exist_ok=True, parents=True) df = pd.read_csv(args.label_path) for file_name, dft in tqdm(df.groupby("ImageId")): if not (args.image_folder / file_name).exists(): continue height = dft.iloc[0]["Height"] width = dft.iloc[0]["Width"] size = Image.open(args.image_folder / file_name).size if (width, height) != size: continue mask = np.zeros((height, width), dtype=np.uint8) for i in dft.index: seg = dft.loc[i, "EncodedPixels"] mask = mask | rle2mask(seg, (width, height)) if mask.sum() == 0: continue shutil.copy(str(args.image_folder / file_name), str(output_image_folder / file_name)) cv2.imwrite(str(output_label_folder / f"{Path(file_name).stem}.png"), mask * 255) if __name__ == "__main__": main()
{"hexsha": "403794f67f3a6ec6766bb7fcc1d8894cbfffb2dc", "size": 1788, "ext": "py", "lang": "Python", "max_stars_repo_path": "iglovikov_helper_functions/data_processing/prepare_cloths_segmentation/prepare_imaterialist2019.py", "max_stars_repo_name": "AIChuY/iglovikov_helper_functions", "max_stars_repo_head_hexsha": "46383c7a8b0f8dbdbf7907e119b6c2417877ad33", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 46, "max_stars_repo_stars_event_min_datetime": "2019-09-21T02:05:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-02T10:27:56.000Z", "max_issues_repo_path": "iglovikov_helper_functions/data_processing/prepare_cloths_segmentation/prepare_imaterialist2019.py", "max_issues_repo_name": "AIChuY/iglovikov_helper_functions", "max_issues_repo_head_hexsha": "46383c7a8b0f8dbdbf7907e119b6c2417877ad33", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-04-05T01:19:56.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-02T16:53:18.000Z", "max_forks_repo_path": "iglovikov_helper_functions/data_processing/prepare_cloths_segmentation/prepare_imaterialist2019.py", "max_forks_repo_name": "AIChuY/iglovikov_helper_functions", "max_forks_repo_head_hexsha": "46383c7a8b0f8dbdbf7907e119b6c2417877ad33", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2019-09-21T02:54:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T11:58:34.000Z", "avg_line_length": 26.6865671642, "max_line_length": 93, "alphanum_fraction": 0.6644295302, "include": true, "reason": "import numpy", "num_tokens": 433}
using Geotherm.Geometry: Point2D using Geotherm.Integrate: runge_kutta_iter, runge_kutta @testset "Test `runge_kutta`" begin @test runge_kutta_iter( Point2D(1.0, 1.0), (x, y) -> x * 2 + y * 3, ) == Point2D(1.01, 1.05085856375) @test runge_kutta( Point2D(1.0, 1.0), (x, y) -> x * 2 + y * 3, 0.01, 4, ) == [ Point2D(1.0, 1.0), Point2D(1.01, 1.05085856375), Point2D(1.02, 1.103469031571201), Point2D(1.03, 1.157884756885266), ] end
{"hexsha": "8a51fcf5a00ab252beba3a7ae2e30703877f83aa", "size": 530, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/Integrate.jl", "max_stars_repo_name": "MineralsCloud/Geotherm.jl", "max_stars_repo_head_hexsha": "6c26803989558d8a9fe5f61c36c29dbdce234ade", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/Integrate.jl", "max_issues_repo_name": "MineralsCloud/Geotherm.jl", "max_issues_repo_head_hexsha": "6c26803989558d8a9fe5f61c36c29dbdce234ade", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-02-14T22:31:12.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-02T05:36:13.000Z", "max_forks_repo_path": "test/Integrate.jl", "max_forks_repo_name": "MineralsCloud/Geotherm.jl", "max_forks_repo_head_hexsha": "6c26803989558d8a9fe5f61c36c29dbdce234ade", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:04:02.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:04:02.000Z", "avg_line_length": 25.2380952381, "max_line_length": 55, "alphanum_fraction": 0.541509434, "num_tokens": 224}
test:main:zero test:main:one:1 test:main:two:2:3 test:main:three:4:5:7 test:main:four:7:8:9:10 test:main:five:11:12:13:14:15 test:main:six:16:17:18:19:20:21 test:main:seven:22:23:24:25:26:27:28 test:main:eight:29:30:31:32:33:34:35:36 test:main:nine:37:38:39:40:41:42:43:44:45 test:main:ten:46:47:48:49:50:51:52:53:54:55 test:main:eleven:56:57:58:59:60:61:62:63:64:65 test:main:twelve:67:68:69:70:71:72:73:74:75:76
{"hexsha": "db6d18cb0541373e26dfd0dd42759ee032a3921a", "size": 415, "ext": "r", "lang": "R", "max_stars_repo_path": "test/unittest/usdt/tst.allargs.r", "max_stars_repo_name": "alan-maguire/dtrace-utils", "max_stars_repo_head_hexsha": "53b33a89ef7eaeba5ce06d50a4c73fe91c1fa99e", "max_stars_repo_licenses": ["UPL-1.0"], "max_stars_count": 66, "max_stars_repo_stars_event_min_datetime": "2018-04-16T14:28:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T14:36:05.000Z", "max_issues_repo_path": "test/unittest/usdt/tst.allargs.r", "max_issues_repo_name": "tjfontaine/dtrace-utils", "max_issues_repo_head_hexsha": "1bd5b3825ca0dd641694f795734b9bbbfd3f2ebb", "max_issues_repo_licenses": ["UPL-1.0"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2021-01-06T16:28:59.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-10T18:46:58.000Z", "max_forks_repo_path": "test/unittest/usdt/tst.allargs.r", "max_forks_repo_name": "tjfontaine/dtrace-utils", "max_forks_repo_head_hexsha": "1bd5b3825ca0dd641694f795734b9bbbfd3f2ebb", "max_forks_repo_licenses": ["UPL-1.0"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2018-07-23T22:35:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T01:04:36.000Z", "avg_line_length": 27.6666666667, "max_line_length": 46, "alphanum_fraction": 0.7228915663, "num_tokens": 230}
using Pkg Pkg.activate(".") using Optim using ForwardDiff using JLD using LineSearches using AdvancedMH using AdaptiveMCMC using MCMCChains using Distributions using StatsBase using Base.Threads nthreads() using Printf using Plots using Plots.PlotMeasures using LaTeXStrings using Distributions using StatsPlots @time begin ######################################################################################################################### # User inputs ######################################################################################################################### # Goniometer settings (must be set before including set_up.jl file as they enter into model definitions) TwoDeltaTheta = 0 phi = 2.5E-3 chi = 0.015*pi/180 include("set_up.jl") # Define lattice vector miller_vec = [0;0;1] # Define bounds of prior prior_bounds[char,:] = (width of prior, height of prior) in nm in the lab frame prior_bounds = [800 800; 500 1000; 500 1000; 800 800; 1500 1500; 1500 1500; 1000 1500; 1000 1500; 500 1200; 1000 1000; 1000 1000; 500 1200 ] # Set character of dislocation char = 2 #parse(Int64,ARGS[1]) dis_lab = get_dis_lab(char, phi, chi) # Set noise parameters background_percent = 0.05 #parse(Float64,ARGS[2]) # magnitude of background noise as percentage of image intensity noise_level = 0.01 #parse(Float64,ARGS[3]) # magnitude of electic readout noise as percentage of image intensity # Set prior center in image frame loc_center = [0.;0.] # Define the prior based on the character lower = loc_center - prior_bounds[char,:]/2 upper = loc_center + prior_bounds[char,:]/2 prior = Product(Uniform.(lower, upper)) # Define name for result files (plot generation at bottom of script) save_str = "example_data//inference_example_char_$(char)_background_$(background_percent)_noise_$(noise_level)" save_str = replace(save_str, "."=>"p") # Grid sizes for computing the log-posterior contour plots gridsize_1 = 50 gridsize_2 = 5 ######################################################################################################################### # Generate a noisy image to act as an evaluation image ######################################################################################################################### # Draw the true location from the prior and transform it to the dislocation frame true_loc_im = rand(prior) true_loc_dis = image_to_dis(true_loc_im, dis_lab) # Compute the noise-free image im_true = compute_image(char, true_loc_dis, phi, chi, TwoDeltaTheta, miller_vec) # Define noise model based on noise parameters noise_std = noise_level*maximum(im_true) noise_var = noise_std^2 background = background_percent*maximum(im_true) pixel_var = im_true .+ background .+ noise_var # Compute and save noisy evaluation image im_obs = im_true .+ background + sqrt.(pixel_var).*randn(Npixels,Npixels) save(save_str*"_im_obs.jld","im_obs", im_obs) ######################################################################################################################### # MAP optimization and Laplace approximation ######################################################################################################################### # Define negative log-likelihood function of dislocation position (in dislocation frame) function nlogl(locs) -1*compute_loglikelihood(im_obs, noise_var, char, locs, phi, chi, TwoDeltaTheta, miller_vec, background) end # Define (unnormalized) log posterior through log-likelihood (the prior is uniform and so does not contribution) logpi(locs) = -nlogl(locs) # optimization through Optim routine loc_opt = optimize(nlogl, loc_center, GradientDescent(linesearch=LineSearches.BackTracking(order=2)), Optim.Options(extended_trace=true, store_trace=true, allow_f_increases=true, iterations=50, show_trace=true, g_abstol = 1E-5, x_abstol = 0.01) ) # Unpack optimization results we want to save num_iter = size(hcat(Optim.f_trace(loc_opt)...),2) map_point = Optim.minimizer(loc_opt) f_trace = hcat(Optim.f_trace(loc_opt)...) loc_trace = hcat(Optim.x_trace(loc_opt)...) runtime = Optim.time_run(loc_opt) # Compute the Laplace approximation covariance (the negative inverse hessian of the posterior log-density at the MAP point) map_cov = -inv(ForwardDiff.hessian(logpi, map_point)) # Save the data save(save_str*"_opt_data.jld","true_loc_dis", true_loc_dis, "loc_trace", loc_trace, "f_trace", f_trace, "map_point", map_point, "num_iter", num_iter, "runtime", runtime, "map_cov", map_cov) ######################################################################################################################### # MCMC sampling ######################################################################################################################### # Define a distribution for drawing chain initialization points centered at the MAP point (alt. one could start chains at the MAP point) mcmc_init = Product([Uniform(map_point[1] - 5, map_point[1] + 5),Uniform(map_point[2] - 5, map_point[2] + 5)]) # Define number and length of chains to compute [1000 length chain takes my laptop ~7-8 minutes to run] nchains = 1 n_samples = 100 adaptive_rwhm_chains = zeros(n_samples, 2, nchains) times = zeros(nchains) Threads.@threads for i in 1:nchains θ_init = rand(mcmc_init) samples = @timed adaptive_rwm(θ_init, logpi, n_samples; algorithm=:asm, b=1) adaptive_rwhm_chains[:,:,i] = Array(samples.value.X)' times[i] = samples.time end save(save_str*"_mcmc.jld","chains", adaptive_rwhm_chains) adaptive_rwhm_chains = load(save_str*"_mcmc.jld","chains") all_chains = Chains(adaptive_rwhm_chains) all_samples = Array(all_chains) ######################################################################################################################### # Computing log-posterior contour for plotting ######################################################################################################################### ##################################################################### # A course sampling of a region larger than the prior in image plane ##################################################################### xs_im = 2*lower[1]:gridsize_1:2*upper[1] ys_im = 2*lower[2]:gridsize_1:2*upper[2] density_matrix_im = zeros(length(ys_im),length(xs_im)) dis_xyz = [] @threads for i = 1:length(ys_im) y = ys_im[i] for j = 1:length(xs_im) x = xs_im[j] loc = [x;y] append!(dis_xyz, image_to_dis(loc, dis_lab, dims=3)) density_matrix_im[i,j] = nlogl(image_to_dis(loc,dis_lab)) end end save(save_str*"_mat_im.jld","mat", density_matrix_im, "xs_im", xs_im, "ys_im", ys_im) dis_xyz = reshape(dis_xyz, (3, Int(length(dis_xyz)/3))) xs_dis = minimum(dis_xyz[1,:]):gridsize_1:maximum(dis_xyz[1,:]) ys_dis = minimum(dis_xyz[2,:]):gridsize_1:maximum(dis_xyz[2,:]) density_matrix_dis = zeros(length(ys_dis),length(xs_dis)) @threads for i = 1:length(ys_dis) y = ys_dis[i] for j = 1:length(xs_dis) x = xs_dis[j] loc = [x;y] density_matrix_dis[i,j] = nlogl(loc) end end save(save_str*"_mat_dis.jld","mat", density_matrix_dis, "xs_dis", xs_dis, "ys_dis", ys_dis) ######################################################################### # Refined density data near the MAP point in the dislocation frame ######################################################################### xs_0 = minimum(all_samples[:,1]) xs_0 = minimum([xs_0, true_loc_dis[1]]) - 10 xs_1 = maximum(all_samples[:,1]) xs_1 = maximum([xs_1, true_loc_dis[1]]) + 10 xs_mid = 0.5*(xs_1 + xs_0) xs_length = xs_1 - xs_0 ys_0 = minimum(all_samples[:,2]) ys_0 = minimum([ys_0, true_loc_dis[2]]) - 10 ys_1 = maximum(all_samples[:,2]) ys_1 = maximum([ys_1, true_loc_dis[2]]) + 10 ys_mid = 0.5*(ys_1 + ys_0) ys_length = ys_1 - ys_0 max_length = maximum([xs_length,ys_length]) xs_f_dis = xs_mid - max_length/2:gridsize_2:xs_mid + max_length/2 ys_f_dis = ys_mid - max_length/2:gridsize_2:ys_mid + max_length/2 density_matrix_f_dis = zeros(length(ys_f_dis),length(xs_f_dis)) @threads for i = 1:length(ys_f_dis) y = ys_f_dis[i] for j = 1:length(xs_f_dis) x = xs_f_dis[j] loc = [x;y] density_matrix_f_dis[i,j] = nlogl(loc) end end save(save_str*"_mat_f_dis.jld","mat", density_matrix_f_dis, "xs_f_dis", xs_f_dis, "ys_f_dis", ys_f_dis) ############################################################## # Refined density data near the MAP point in the image plane ############################################################## all_samples_im = reshape(dis_to_image(all_samples',dis_lab),size(all_samples'))' xs_0 = minimum(all_samples_im[:,1]) xs_0 = minimum([xs_0, true_loc_im[1]]) - 10 xs_1 = maximum(all_samples_im[:,1]) xs_1 = maximum([xs_1, true_loc_im[1]]) + 10 xs_mid = 0.5*(xs_1 + xs_0) xs_length = xs_1 - xs_0 ys_0 = minimum(all_samples_im[:,2]) ys_0 = minimum([ys_0, true_loc_im[2]]) - 10 ys_1 = maximum(all_samples_im[:,2]) ys_1 = maximum([ys_1, true_loc_im[2]]) + 10 ys_mid = 0.5*(ys_1 + ys_0) ys_length = ys_1 - ys_0 max_length = maximum([xs_length,ys_length]) xs_f_im = xs_mid - max_length/2:gridsize_2:xs_mid + max_length/2 ys_f_im = ys_mid - max_length/2:gridsize_2:ys_mid + max_length/2 density_matrix_f_im = zeros(length(ys_f_im),length(xs_f_im)) @threads for i = 1:length(ys_f_im) y = ys_f_im[i] for j = 1:length(xs_f_im) x = xs_f_im[j] loc = [x;y] density_matrix_f_im[i,j] = nlogl(image_to_dis(loc,dis_lab)) end end save(save_str*"_mat_f_im.jld","mat", density_matrix_f_im, "xs_f_im", xs_f_im, "ys_f_im", ys_f_im) # ############################################################################################################################ # # Plotting # ############################################################################################################################ include("plotting.jl") load_name = save_str figure_dir = "example_figures" # Load data im_obs = load(load_name*"_im_obs.jld","im_obs") d = load(load_name*"_opt_data.jld") true_loc_dis = d["true_loc_dis"] true_loc_im = dis_to_image(true_loc_dis, dis_lab) loc_trace_dis =d["loc_trace"] loc_trace_im = reshape(dis_to_image(loc_trace_dis,dis_lab),size(loc_trace_dis)) f_trace =d["f_trace"] map_point_dis =d["map_point"] map_point_im = dis_to_image(map_point_dis,dis_lab) num_iter = d["num_iter"] runtime = d["runtime"] map_cov = d["map_cov"] adaptive_rwhm_chains = load(load_name*"_mcmc.jld","chains") all_chains = Chains(adaptive_rwhm_chains) all_samples = Array(all_chains) all_samples_im = reshape(dis_to_image(all_samples',dis_lab),size(all_samples'))' d = load(load_name*"_mat_im.jld") xs_im = d["xs_im"] ys_im = d["ys_im"] density_matrix_im = d["mat"] d = load(load_name*"_mat_dis.jld") xs_dis = d["xs_dis"] ys_dis = d["ys_dis"] density_matrix_dis = d["mat"] d = load(load_name*"_mat_f_dis.jld") xs_f_dis = d["xs_f_dis"] ys_f_dis = d["ys_f_dis"] density_matrix_f_dis = d["mat"] d = load(load_name*"_mat_f_im.jld") xs_f_im = d["xs_f_im"] ys_f_im = d["ys_f_im"] density_matrix_f_im = d["mat"] # Plot evaluation image dfxm_image_plot_base(im_obs, char, true_loc_dis, figure_dir*"\\obs", prior=true, width=500) # Figure 5c contourf(xs_im, ys_im, density_matrix_im, levels=50, aspect_ratio=:equal, color=:reds) rect = prior_bounds[char,:] plot!(rectangle(rect...), opacity=.5, c=:gray, label=false, legend=:bottomright) for t = 1:num_iter - 1 plot!([loc_trace_im[1,t], loc_trace_im[1,t+1]], [loc_trace_im[2,t], loc_trace_im[2,t+1]],label=false,linewidth=3,c=:black) end scatter!(loc_trace_im[1,:],loc_trace_im[2,:], c=:orange, markershape = :utriangle, markersize = 4, label="iterates") scatter!([map_point_im[1]],[map_point_im[2]],c=:lightgreen, markershape = :star5, markersize = 8, label=L"\xi^{\mathrm{MAP}}") scatter!([true_loc_im[1]],[true_loc_im[2]], c=:cyan, markershape = :star7, markersize = 8, label=L"\xi^{\mathrm{true}}", legend=false)# outertop) xlims!(-500,500) ylims!(-600,600) xticks!([-500,0,500]) yticks!([-500,0,500]) xlabel!(L"y_{\ell} \ (\mathrm{nm})", xtickfontsize=12, xguidefontsize=18, margin=5mm) ylabel!(L"x_{\ell} \ (\mathrm{nm})", ytickfontsize=12, yguidefontsize=18) plot!(size=(450,450),right_margin=15mm) Plots.savefig(figure_dir*"\\iters.pdf") # Figure 5b prior_corners = [250; 500; -250; 500; -250; -500; 250; -500] prior_corners_dis = reshape(image_to_dis(prior_corners, dis_lab), (2,4)) s_prior = Shape(prior_corners_dis[1,:],prior_corners_dis[2,:]) contourf(xs_dis, ys_dis, density_matrix_dis, levels=10, aspect_ratio=:equal, color=cgrad(:grays,rev=true)) plot!(s_prior, opacity=.5, c=:gray, linecolor=:white, label=false) #plot!(s_region, c=false, linecolor=:white, label=false) for t = 1:num_iter - 1 plot!([loc_trace_dis[1,t], loc_trace_dis[1,t+1]], [loc_trace_dis[2,t], loc_trace_dis[2,t+1]],label=false,linewidth=3,c=:black) end scatter!(loc_trace_dis[1,:],loc_trace_dis[2,:], c=:orange, markershape = :utriangle, markersize = 4, label="iterates") scatter!([map_point_dis[1]],[map_point_dis[2]],c=:lightgreen, markershape = :star5, markersize = 8, label=L"\xi^{\mathrm{MAP}}") scatter!([true_loc_dis[1]],[true_loc_dis[2]], c=:cyan, markershape = :star7, markersize = 8, label=L"\xi^{\mathrm{true}}", legend=false)# outertop) xlabel!(L"x_{d} \mathrm{ \ (nm)}", xtickfontsize=12, xguidefontsize=18, margin=5mm) ylabel!(L"y_{d} \mathrm{ \ (nm)}", ytickfontsize=12, yguidefontsize=18) xlims!(xs_dis[1],xs_dis[end]) ylims!(ys_dis[1],ys_dis[end]) xticks!([-500,0,500]) yticks!([-400,0,400]) plot!(size=(450,450),right_margin=15mm) Plots.savefig(figure_dir*"\\iters_dis.pdf") # Figure 6a contourf(xs_f_dis,ys_f_dis,density_matrix_f_dis, levels=50, aspect_ratio=:equal, color=cgrad(:grays,rev=true)) scatter!(all_samples[:,1],all_samples[:,2],c=:plum1, alpha=.1, label="MCMC samples") covellipse!(vec(map_point_dis), map_cov, n_std=2, aspect_ratio=1, color=(:yellow), label=false, alpha=.5) scatter!([map_point_dis[1]],[map_point_dis[2]],c=:lightgreen, markershape = :star5, markersize = 8, label=L"\xi^{\mathrm{MAP}}") scatter!([true_loc_dis[1]],[true_loc_dis[2]], c=:cyan, markershape = :star7, markersize = 8, label=L"\xi^{\mathrm{true}}", legend=false)# outertop) xlabel!(L"x_{d} \mathrm{ \ (nm)}", xtickfontsize=14, xguidefontsize=18, margin=5mm) ylabel!(L"y_{d} \mathrm{ \ (nm)}", ytickfontsize=14, yguidefontsize=18) plot!(size=(450,450), right_margin=15mm) Plots.savefig(figure_dir*"\\uq_dis.pdf") # Figure 6b vals,vecs = eigen(map_cov) vecs_im = reshape(dis_to_image(vecs,dis_lab),(2,2)) map_cov_im = vecs_im*diagm(vals)*vecs_im' contourf(xs_f_im,ys_f_im,density_matrix_f_im, levels=50,aspect_ratio=:equal, color=:reds) scatter!(all_samples_im[:,1],all_samples_im[:,2], alpha=.2, c=:plum1, label="MCMC samples") covellipse!(vec(map_point_im), map_cov_im, n_std=2, aspect_ratio=1, color=(:yellow), label=false, alpha=.5) scatter!([map_point_im[1]],[map_point_im[2]],c=:lightgreen, markershape = :star5, markersize = 8, label=L"\xi^{\mathrm{MAP}}") scatter!([true_loc_im[1]],[true_loc_im[2]], c=:cyan, markershape = :star7, markersize = 8, label=L"\xi^{\mathrm{true}}", legend=false) xlabel!(L"y_{\ell} \mathrm{ \ (nm)}", xtickfontsize=14, xguidefontsize=18, margin=5mm) ylabel!(L"x_{\ell} \mathrm{ \ (nm)}", ytickfontsize=14, yguidefontsize=18) plot!(size=(450,450), right_margin=15mm) Plots.savefig(figure_dir*"\\uq_im.pdf") end
{"hexsha": "7c17fd7ca5cdd9d37d6a9e0daa492a057cfa45ca", "size": 15848, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "julia/inference_example.jl", "max_stars_repo_name": "michael-c-brennan/DFXMTools", "max_stars_repo_head_hexsha": "bf0bdef30045d0b8e05094e74243caaa28651d0e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "julia/inference_example.jl", "max_issues_repo_name": "michael-c-brennan/DFXMTools", "max_issues_repo_head_hexsha": "bf0bdef30045d0b8e05094e74243caaa28651d0e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "julia/inference_example.jl", "max_forks_repo_name": "michael-c-brennan/DFXMTools", "max_forks_repo_head_hexsha": "bf0bdef30045d0b8e05094e74243caaa28651d0e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.0344827586, "max_line_length": 189, "alphanum_fraction": 0.6176804644, "num_tokens": 4370}
function [w,run] = train_bfgs(x,w,lambda) % TRAIN_BFGS Train a logistic regression model by BFGS. % % W = TRAIN_BFGS(X,W) returns maximum-likelihood weights given data and a % starting guess. % Data is columns of X, each column already scaled by the output (+1 or -1). % W is the starting guess for the parameters (a column). % Written by Thomas P Minka if nargin < 3 lambda = 0; end flops(0); [d,n] = size(x); old_g = zeros(size(w)); ih = eye(d); for iter = 1:1000 old_w = w; % s1 = 1-sigma s1 = 1./(1+exp(w'*x)); g = x*s1' - lambda*w; flops(flops + flops_mul(w',x) + n*(flops_exp+2) + flops_mul(x,s1') + 2*d); if iter > 1 dw = w - prev_w; dg = g - old_g; dwdg = dw'*dg; ihdg = ih*dg; b = 1 + (dg'*ihdg)/dwdg; ihdgdw = ihdg*dw'; ih = ih + (b*dw*dw' - ihdgdw' - ihdgdw)/dwdg; flops(flops + d + d + flops_mul(dw',dg) + flops_mul(ih,dg) + ... flops_mul(dg',ihdg)+2 + flops_mul(ihdg,dw') + ... flops_mul(b,dw) + flops_mul(dw,dw') + 4*d*d); end u = -ih*g; flops(flops + flops_mul(ih,g)); prev_w = w; % line search along u ug = u'*g; ux = u'*x; a = s1.*(1-s1); uhu = (ux.^2)*a' + lambda*(u'*u); w = w + (ug/uhu)*u; old_g = g; flops(flops + flops_mul(u',g) + flops_mul(u',x) + 2*n + ... n+flops_mul(1,n,1) + 2*d+1); if lambda > 0 flops(flops + 1+flops_mul(u',u)); end run.w(:,iter) = w; run.flops(iter) = flops; run.e(iter) = logProb(x,w) -0.5*lambda*w'*w; if max(abs(w - old_w)) < 1e-5 break end end figure(2) plot(run.e) if iter == 1000 warning('not enough iters') end
{"author": "FuzhenZhuang", "repo": "Transfer-Learning-Toolkit", "sha": "24b5323b354aee844b8b7df9fcad17fdfb191dc4", "save_path": "github-repos/MATLAB/FuzhenZhuang-Transfer-Learning-Toolkit", "path": "github-repos/MATLAB/FuzhenZhuang-Transfer-Learning-Toolkit/Transfer-Learning-Toolkit-24b5323b354aee844b8b7df9fcad17fdfb191dc4/utilities/TLLibrary64/LR/logreg/train_bfgs.m"}
All Boy Scouts Boy Scout Troops and Venture Crews in Davis cooperate every year to run the Boy Scout Christmas Tree Lot http://davischristmastrees.com/. For decades the lot was downtown at the Boy Scout Cabin, but after leaving in 2002 they moved the lot to Madson Place next to Center City Automotive Inc who also donated electricity to the lot. In 2003, the lot moved to the Pole Line Road Baptist Church. Since 2010, the lot has been on the vacant lot on the corner of Mace Blvd. & Cowell Blvd., South of I80. Map link: http://maps.google.com/maps?fq&sources_q&hlen&geocode&q480+Mace+Blvd,+Davis,+CA+95618&sll37.0625,95.677068&sspn47.838189,93.076172&ieUTF8&hq&hnear480+Mace+Blvd,+Davis,+Yolo,+California+95618&z16
{"hexsha": "e1835c0236e02a3d15be838769da1996c271f6f5", "size": 721, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Boy_Scout_Christmas_Tree_Lot.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Boy_Scout_Christmas_Tree_Lot.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Boy_Scout_Christmas_Tree_Lot.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 180.25, "max_line_length": 513, "alphanum_fraction": 0.7850208044, "num_tokens": 211}
from conformer_rl.utils import chem_utils import numpy as np def test_tfd_matrix(mocker): tf = mocker.patch('conformer_rl.utils.chem_utils.TorsionFingerprints') tf.GetTFDMatrix.return_value = [3, 5, 7, 9, 11, 13, 15, 17, 19, 21] mat = chem_utils.tfd_matrix('mol') assert np.array_equal(mat, np.array( [[0., 3, 5, 9, 15], [3, 0, 7, 11, 17,], [5, 7, 0, 13, 19], [9, 11, 13, 0, 21], [15, 17, 19, 21, 0]] ))
{"hexsha": "c8ea54d6ef121dbc9058df6f66c5f5c54b1f1431", "size": 464, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/utils/test_chem_utils.py", "max_stars_repo_name": "ZimmermanGroup/conformer-rl", "max_stars_repo_head_hexsha": "beb98cbee6ba6efba686d7c6eebbf33fd737f279", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-09-03T18:46:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T05:47:20.000Z", "max_issues_repo_path": "tests/utils/test_chem_utils.py", "max_issues_repo_name": "ZimmermanGroup/conformer-rl", "max_issues_repo_head_hexsha": "beb98cbee6ba6efba686d7c6eebbf33fd737f279", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-07-15T03:57:26.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-03T06:27:28.000Z", "max_forks_repo_path": "tests/utils/test_chem_utils.py", "max_forks_repo_name": "ZimmermanGroup/conformer-rl", "max_forks_repo_head_hexsha": "beb98cbee6ba6efba686d7c6eebbf33fd737f279", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-17T01:59:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T01:59:36.000Z", "avg_line_length": 33.1428571429, "max_line_length": 74, "alphanum_fraction": 0.5797413793, "include": true, "reason": "import numpy", "num_tokens": 185}
\documentclass[a4paper]{article} %import packages \usepackage[utf8]{inputenc} \usepackage{graphicx} \usepackage{wrapfig} \usepackage{float} \usepackage{listings} \usepackage{amsmath} \usepackage{epigraph} \usepackage{multicol} \usepackage[a4paper, total={7in, 8in}]{geometry} %define variables \newcommand{\projectname}{0xchan} %set up header \title{\projectname} \author{ sumpunk\\ \texttt{sumpunk@protonmail.com} \and ARitz Cracker\\ \texttt{aritz@aritzcracker.ca} } \begin{document} \maketitle \begin{figure}[H] \centering \texttt{WORKING DRAFT v1.0.6} \end{figure} \begin{abstract} \projectname{} is a decentralized and immutable message board system on the Ethereum blockchain where users can post messages and media files. Storage of those messages is handled via IPFS and a smart contract is used to store a ledger of all messages. A Proof of Stake style mechanism ensures quality content and protects the system against spam, while also incentivising users to stake their Ether. The immutable and decentralized nature of the system allows for free speech and gives way to create truly censorship resistant and self sustaining platforms. \end{abstract} \section*{Acknowledgement} This project is a collaborative effort between sumpunk (full stack) and ARitz Cracker (solidity). We thank ToCsIcK for cross-checking solidity codebase and ideas as well as helping out with full stack development. We thank our community staff Capex, Crypto McPump, Cryptoknight, EthRabbi, kadaz, Lil Stronghands, Ms Incognito and SciPanda for keeping the Discord server in check and maojk for being the translating bridge to our chinese community. We thank our voluntary community support and helpdesk staff for swiftly answering community questions. We also thank Vitalik Buterin for creating Ethereum in the first place. \vspace*{\fill} \epigraph{I disapprove of what you say, but I will defend to the death your right to say it.}{Evelyn Beatrice Hall} \pagebreak \section{What \projectname{} is} Like many other dApps \projectname{} consists of a website that interfaces with a smart contract. The contract serves as a decentralized ledger to write to and read from while larger chunks of data are stored on the decentralized IPFS network. The smart contract and the storage is decentralized, the website however is not. Users can clone or fork the website repository and set up another interface by themselves and either host it for others to use as well or run it on a local webserver for themselves. \begin{figure}[H] \centering \includegraphics[width=0.25\textwidth]{0x_flow.png} \caption{Simplified interaction flow between users, smart contract and IPFS} \label{fig:flow} \end{figure} By crafting the necessary transaction data themselves, users could also send messages directly to the smart contract through the use of tools like MyEtherWallet or Etherscan or even command line interfaces. There is no way to restrict a user from sending messages to the contract. The messages sent to the system usually consist of at least a short or long comment made by the user, an optional subject and optional media attachments. This allows users to freely express their thoughts, discuss various topics and engage with ongoing discussions or start new ones. All of this happens free from censorship by government or other authorities. \section{What IPFS is} IPFS, or the ``InterPlanetary FileSystem'' is a self described peer-to-peer hypermedia protocol to make the web faster, safer, and more open. It relies on users operating so called nodes to distribute server load and content storage across multiple machines with a certain level of redundancy. Nodes will keep stored content as long as the node has it pinned. If content gets unpinned it will be purged from the node once no more requests for that content are made to preserve storage space. Users can set up their own nodes and require them to replicate data from another node they specify. That way a network can be established with redundancy. Users can interact with IPFS either through the use of a command line interface, desktop or mobile applications, or by using web interfaces. More on IPFS can be read in their own whitepaper\footnote{IPFS Whitepaper https://github.com/ipfs/papers/raw/master/ipfs-cap2pfs/ipfs-p2p-file-system.pdf}. \section{Decentralization and immutability} As stated before, data on IPFS can \emph{vanish} if the content is unpinned and when not enough requests are made for the content to be saved from the IPFS garbage collector tool, although the deletion propagates very slowly as most files remain inside a node's cache for a prolonged period of time and can be retrieved by requesting the file again. By default, 0xchan.net will operate it's own IPFS node which other users can tap into and replicate to create redundant data storage. They can also choose to refuse garbage collection on their nodes which makes it possible to further keep files even though they are removed on the replicated node. If content is deleted on a node and is being re-added to the same node directory, the generated hash will remain the same. This will always be the case if users use the 0xchan.net interface to upload their files, since it'll always use the same directories. The immutability stems from the fact that the smart contract can never be stopped at any time unless the entire Ethereum network is halted. As more users deploy their own nodes to replicate the 0xchan.net node, the level of decentralization rises and by not allowing files to be arbitrarily deleted the content distribution network becomes immutable as well. By hosting their own nodes and participating in the content distribution, users are also strengthening the network against single point of failures and censorship by the operator of the 0xchan.net node. \section{Using \projectname} Users can either browse \projectname{} and just read the conversations or actively participate in them. Simply browsing the dApp for content comes at no extra cost to the user, while posting a comment will invoke a fee. This prevents spam bots from using \projectname{} as a dumping ground for malicious or otherwise irritating content, so users can focus on real human to human discussions. \subsection{Proof of Stake} The requirement for each user that wants to post on \projectname{} is to have a minimum of 0.5 Ether locked in the contract. This will be used as stake and collateral at the same time. Staking requirement for sending messages with attachments is an additional 1.5 Ether. The Ether sent as stake can be withdrawn by the users anytime if they did not post in the past 24 hours and there are no pending reports. The stake will also assign users a certain percentage in the distributed rewards system. Stake can be deposited by either using the proper functions of the smart contract or by simply sending Ether to the contract's address. The default action for received Ether is to turn it into stake. If the maximum amount of 2 Ether that can be staked is reached, additional Ether will be used to purchase ZCH. \subsubsection{ZCH} Each post sent to \projectname{} costs an additional 0.0001 Ether fee which is distributed among all stake holders (80\%) will purchase P3D (15\%) and a portion of it will stay in the contract to be used to settle expenses (5\%). This fee should make it expensive enough for people to not spam the system with useless content, but low enough to allow for easy entry into free discussions. The price for a message can and should be adjusted to meet market conditions. Users can also pay this fee in advance by purchasing ZCH, an ERC-20 token. This will also increase their overall percentage in the stake sharing pool. When users choose to use ZCH to pay for a post, they will only have to pay the Ethereum network fee which is kept at the lowest minimum possible due to gas optimization processes in the smart contract. There is only a theoretical limit to the amount of ZCH that can ever be minted but the contract does not have a restriction on the maximum supply for ZCH. The use of ZCH burns the token. \subsubsection{\projectname{} and P3D} \projectname{} will put the 15\% of the posting fee into a pool which will purchase P3D tokens\footnote{PoWH3D Wiki (2018) https://powh3d.hostedwiki.co/} from the P3D smart contract. The pool will be managed by the \projectname{} smart contract and users can force a purchase whenever they like, however it won't be possible to use a masternode\footnote{A so called ``masternode'' is a referral program used by P3D} during those purchases. This will generate dividends for the P3D network but the contract will also receive those dividends just the same. The dividends collected that way will be released to all ZCH token holders everytime a new P3D purchase is made. \subsection{Posting a message} Users will make use of the website's input forms similar to the one in figure \ref{fig:input} to create their threads and replies, as well as attach media files to their posts. \begin{figure}[H] \centering \includegraphics[width=0.6\textwidth]{0x_4chan_post.png} \caption{Example for an input field as found on 4chan} \label{fig:input} \end{figure} Since storing data on the Ethereum blockchain is quite expensive in terms of gas cost, we push the comment data and optional images to IPFS and only write a modified storage hash to our smart contract ledger. Since IPFS returns a hash that starts with ``Qm'' in it's base configuration, we can strip the first two bytes of the returned hash and use cheaper storager for a ``byte32'' entity since the modified hash will be exactly 32 bytes long. We can safely assume that other node operators will not change the default hashing behavior and thus reconstruct the hash on the interface. \begin{figure}[H] \centering \includegraphics[width=0.6\textwidth]{0x_logic.png} \caption{Disassembly of a message to 0xchan and storage procedure, content stored is subject to change} \label{fig:logic} \end{figure} All text based key values will be transformed into a single JSON object, images will be added as they are to the IPFS node. The frontend will initiate the process, wait until the files have been added and pinned and return their IPFS hashes accordingly so the transaction can be properly written to the blockchain. \subsubsection{Temporary content} To increase user experience \projectname{} is going to make use of the user's browser localstorage, which allows safe and temporary storing of data similar to how cookies work. When a user creates a new post or thread, the data will not only be sent to IPFS but also stored in the localstorage and \projectname{} will use that to display the content that was sent to IPFS to the user. Since localstorage is a local unit, other users will not see the content. This will be made clear in a little notification attached to the message. Once the user submitted transaction on the Ethereum network has been processed, other users will also be able to see the content and interact with it. The user's localstorage for temporary content will be purged regularly either automatically or it can be purged manually by the user. \subsubsection{Interacting with posts} Each post can be interacted with in many ways, replying to them is just one of those possibilities. Users can award each other \emph{(you)} tokens that can be collected and later be redeemed for cosmetic things. (you) don't account for any additional stake nor can they be traded or sold. Another way to thank users for a post would be through an integrated \emph{donation} function that engages a standard Ether transfer from the user clicking the button on 0xchan.net to the user that made the post where users can choose how much Ether will be sent themselves. No Ether will be transferred to the \projectname{} contract that way. Feature inclusion do be decided for final release of whitepaper. \subsubsection{Dropping of posts} Each board can hold a maximum amount of pages ($A\textsubscript{p}$) worth of threads. Each new thread pushes all other threads back in the hierarchy of ordering when first created. Order changes with each reply to each individual thread. If a new thread would put the total amount of threads to $A\textsubscript{p}+1$, the ``oldest'' thread in the storage array would be dropped from the smart contract's index. Nodes can query this index to understand if they can unpin content and free up storage for new content. \subsection{User profiles} Each user will have an automatically populated profile page displaying their address, amount of posts made, amount of ZCH and stake, amount of (you) awarded and some other things that may get added with future extensions. Since the profile page will be populated using the currently used Ethereum address, the page may only be viewed by the owner of the profile. The page also serves as a hub to manage ZCH balances, withdrawal of outstanding rewards, stake deposit and withdrawal. \subsection{Creation of custom boards} Users will have the opportunity to create their own so called \emph{boards}. Boards serve as a general filter for categories of various interests, i.e. automobiles, photography, drawing but also adult themed content like pornography, fetishes and so on. To create a board users can either send a creation transaction directly by calling the corresponding function from Etherscan or similar tools, or by using a form on the website. The creation process of a board will ask the user for a shortcode for the board that should not be larger than 4 letters or numbers (i.e. \emph{wsg}), a complete name (i.e. \emph{Work Safe GIF}) and a short description of the content the creator would like to see on that board (i.e. \emph{A collection of safe for work animated images}). The creation process will burn 100 ZCH from the user's account or if the user doesn't have 100 ZCH to burn ask for 0.01 Ether which will be distributed in the same way as a regular posting fee. \projectname{} will provide a few default boards at launch to serve as a base, custom boards can be added post launch. The list of available boards will be automatically updated each time a new board has been added. \subsubsection{Removal of custom boards} To remove cruft and keep the smart contract performant, the removal of custom boards is a necessary step. Users can not remove the boards themselves, instead the contract has to be queried from within the interface to check for unused boards. A board becomes unused if there has been no new post or thread in a timeframe of 1 month. After that period the board will be open to removal. The process is fairly simple. Users can manually query the contract through the interface and will be presented with a list of boards that are deemed ``unused''. They can then initiate a transaction which removes the boards from storage. \subsection{Moderation} \begin{wrapfigure}{r}[32pt]{0.5\textwidth} \includegraphics[width=0.4\textwidth]{0x_moderation.png} \caption{Simplified flow of reporting process, moderation voting and appeal process} \label{fig:mod} \end{wrapfigure} Since \projectname{} aims to give the power of moderation to its actual userbase instead of enabling despotism, no elaborate additional scheme like \emph{Delegated Proof of Stake} will take place, as those are regularly prone to abuse by accounts with enough funding and will subsequently turn a free speech and unregulated platform into a place of collusion and voting fraud. The moderation process starts with a report from a user, which costs 0.001 Ether and will set in motion following mechanics: \begin{enumerate} \item Freezing of the accused user's Stake (Ether) \item Suspension of transfer of user's ZCH to addresses other than the contract \item Letting the moderation contract select 21 randomly chosen accounts to vote on this issue \end{enumerate} Selection of voters will take place inside a smart contract to increase the level of transparency for the users. The first 11 votes will be accounted for, all other votes will be rejected. If the majority votes for a permanent hiding of the offending content, \emph{all participating voters and the reporting user} are granted 10 ZCH each, the reporting user will receive their reporting fee back and the content will be hidden permanently on 0xchan.net; if the majority thinks the report was false, the reporting fee will be frozen and depending on the outcome of a possible appeal will be used to purchase P3D. The option to vote on a report will stay open for 7 days. Voting results will be displayed on a page hosted with 0xchan.net and users can file for appeals from within that page. Should they choose to appeal, 21 new users will be selected to vote and essentially repeat the previous process once more. Appealing does not incur additional fees. The option to appeal stays available for 7 days. Should the user choose not to appeal will the liquidation of their assets take place; the ZCH will be burned and the staked Ether will be used to purchase P3D. If an appeal was successful, the accused user's assets will be unfrozen, the hidden content will be visible again and the assets of the voters who initially voted to permanently hide the content will be liquidated instead; ZCH will be burned and the staked Ether will be used to purchase P3D. A report will be discarded if less than 11 votes are cast on it, or if the maximum time to live for the report is reached (7 days in this example). The reporting fee will be scheduled to be refunded to the reporter and no action will take place regarding the content. The moderation contract is a seperately deployed contract which can be dynamically linked to the \projectname{} main contract. This allows us to update the codebase should any issues arise or new features be needed. \subsection{Withdrawal or reinvesting of user funds} Since users are subject to receiving a reward based on their staked amount and total amount of ZCH in posession, the website will also provide an easy to use withdrawal functionality where users can always see how much Ether has accumulated since the last withdrawal and then decide to move that to their wallet, or use the funds to directly purchase ZCH instead. \section{Using your ZCH for other things} ZCH are the main currency for \projectname{} and can be used for more than just pre-purchasing posting permissions. \subsection{Purchasing wordfilters DRAFT} A wordfilter is an automatic process which takes the original word and replaces it with a target word, chosen by the user buying the rights to deploy the wordfilter. On 4chan for example wordfilters exist to change for example the abbreviated phrase ``tbh'' into ``desu'', following the ``desu-meme'' of around 2006\footnote{"Desu on Know Your Meme", https://knowyourmeme.com/memes/desu}. Users can purchase the right to enact their own wordfilters to all other users\footnote{Please note that this feature will only be visible on 0xchan.net and other access points hosting the same frontend codebase.} by using the supplied interface. By making use of a slightly modified version of a Harberger Tax based system\footnote{"What is Harberger Tax \& Where Does The Blockchain Fit In?", Simon de la Rouviere (July 2018) https://medium.com/@simondlr/what-is-harberger-tax-where-does-the-blockchain-fit-in-1329046922c6} the users can enjoy this feature as a kind of mini-game on top of \projectname{}. The price ($P$) of the wordfilter is determined by the user purchasing it, however they will have to stake 50\% of the sales price as a liquidity pool ($L$), which the system uses to deduct a 2\% daily tax ($P\textsubscript{t}$). The system will automatically remove the wordfilter again should the funding of the current owner run out. A new wordfilter can be bought to replace the currently registered filter by paying $P$ Following example should make it easier to understand how the procedure of acquiring a wordfilter, funding it and replacing an existing wordfilter works. Assume User A determines the wordfilter is worth $P=100$ ZCH. Taking the previous rules into account User A would have to deposit $L$ amount of ZCH (in this case a total of 50 ZCH) to keep the liquidity pool funded. The system will deduct the daily tax in a manner of $P\textsubscript{t}=\frac{P}{100} \times 2$. If User B decides to buy the wordfilter, User A would receive the price of the filter $P$ as well as a refund ($R$) of the tax in a manner of $R=P\textsubscript{t} \times T$ where $T$ is defined by taking into account the run time of the wordfilter in days, multiplied by the tax amount per day. User B can now also determine a new Price $P$ for the wordfilter (ideally at a higher price than what User B paid) and has to pay $P\textsubscript{t}$ on the newly defined $P$ just as User A had to do before. There can only be one active word filter at any given time per board. Users wanting to create new wordfilters will have to either be the first to activate them, or purchase the rights to existing wordfilters should a wordfilter already exist. \section{Crowdfunding participants} Users who participated in the crowdfunding campaign will be able to trade their ZCI tokens in a ratio of 1:1 with the smart contract and receive ZCH in return. This will be done by using a function on the \projectname{} smart contract which takes ZCI as payment and returns that payment with the ZCH token. \pagebreak \section*{Changelog} \begin{itemize} \item Oct. 02, 2019 (v1.0.6): Changelog permanently moved to Git, added section 4.2.1 \item Oct. 01, 2019 (v1.0.5): Added sections 4.2.2 and 4.4.1 \item Sep. 29, 2019 (v1.0.4): Changed draft section 5.1 from banners to wordfilters \item Sep. 11, 2019 (v1.0.3): Removed mention of Team JUST from first page \item Mar. 2, 2019 (v1.0.2): Added draft section of system to enable users to purchase site banners employing Harberger Tax, mainly inspired by Troopy \item Jan. 10, 2019 (v1.0.1): Added Changelog; changed functionality of moderation process to not freeze ZCH of accused user, but instead only prohibit transfer to other addresses to still allow for posting \item Jan. 2, 2019 (v1.0.0): Release of Working Draft \end{itemize} \end{document}
{"hexsha": "a6fc52c4d9bda6124ec4fcd2749f1dc106e986f4", "size": 22312, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "0xchan_whitepaper.tex", "max_stars_repo_name": "n4n0GH/0xchan-whitepaper", "max_stars_repo_head_hexsha": "c973540b75542174fc10f3d473f9487f53bc460b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-12T03:44:42.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-12T03:44:42.000Z", "max_issues_repo_path": "0xchan_whitepaper.tex", "max_issues_repo_name": "n4n0GH/0xchan-whitepaper", "max_issues_repo_head_hexsha": "c973540b75542174fc10f3d473f9487f53bc460b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "0xchan_whitepaper.tex", "max_forks_repo_name": "n4n0GH/0xchan-whitepaper", "max_forks_repo_head_hexsha": "c973540b75542174fc10f3d473f9487f53bc460b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 94.9446808511, "max_line_length": 817, "alphanum_fraction": 0.7939225529, "num_tokens": 5057}
/- Copyright (c) 2022 Jujian Zhang. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Jujian Zhang, Scott Morrison ! This file was ported from Lean 3 source module category_theory.abelian.injective_resolution ! leanprover-community/mathlib commit 956af7c76589f444f2e1313911bad16366ea476d ! Please do not edit these lines, except to modify the commit id ! if you have ported upstream changes. -/ import Mathbin.Algebra.Homology.QuasiIso import Mathbin.CategoryTheory.Preadditive.InjectiveResolution import Mathbin.CategoryTheory.Abelian.Homology import Mathbin.Algebra.Homology.HomotopyCategory /-! # Main result When the underlying category is abelian: * `category_theory.InjectiveResolution.desc`: Given `I : InjectiveResolution X` and `J : InjectiveResolution Y`, any morphism `X ⟶ Y` admits a descent to a chain map `J.cocomplex ⟶ I.cocomplex`. It is a descent in the sense that `I.ι` intertwines the descent and the original morphism, see `category_theory.InjectiveResolution.desc_commutes`. * `category_theory.InjectiveResolution.desc_homotopy`: Any two such descents are homotopic. * `category_theory.InjectiveResolution.homotopy_equiv`: Any two injective resolutions of the same object are homotopy equivalent. * `category_theory.injective_resolutions`: If every object admits an injective resolution, we can construct a functor `injective_resolutions C : C ⥤ homotopy_category C`. * `category_theory.exact_f_d`: `f` and `injective.d f` are exact. * `category_theory.InjectiveResolution.of`: Hence, starting from a monomorphism `X ⟶ J`, where `J` is injective, we can apply `injective.d` repeatedly to obtain an injective resolution of `X`. -/ noncomputable section open CategoryTheory open CategoryTheory.Limits universe v u namespace CategoryTheory variable {C : Type u} [Category.{v} C] open Injective namespace InjectiveResolution section variable [HasZeroMorphisms C] [HasZeroObject C] [HasEqualizers C] [HasImages C] /-- Auxiliary construction for `desc`. -/ def descFZero {Y Z : C} (f : Z ⟶ Y) (I : InjectiveResolution Y) (J : InjectiveResolution Z) : J.cocomplex.pt 0 ⟶ I.cocomplex.pt 0 := factorThru (f ≫ I.ι.f 0) (J.ι.f 0) #align category_theory.InjectiveResolution.desc_f_zero CategoryTheory.InjectiveResolution.descFZero end section Abelian variable [Abelian C] /-- Auxiliary construction for `desc`. -/ def descFOne {Y Z : C} (f : Z ⟶ Y) (I : InjectiveResolution Y) (J : InjectiveResolution Z) : J.cocomplex.pt 1 ⟶ I.cocomplex.pt 1 := Exact.desc (descFZero f I J ≫ I.cocomplex.d 0 1) (J.ι.f 0) (J.cocomplex.d 0 1) (Abelian.Exact.op _ _ J.exact₀) (by simp [← category.assoc, desc_f_zero]) #align category_theory.InjectiveResolution.desc_f_one CategoryTheory.InjectiveResolution.descFOne @[simp] theorem descFOne_zero_comm {Y Z : C} (f : Z ⟶ Y) (I : InjectiveResolution Y) (J : InjectiveResolution Z) : J.cocomplex.d 0 1 ≫ descFOne f I J = descFZero f I J ≫ I.cocomplex.d 0 1 := by simp [desc_f_zero, desc_f_one] #align category_theory.InjectiveResolution.desc_f_one_zero_comm CategoryTheory.InjectiveResolution.descFOne_zero_comm /-- Auxiliary construction for `desc`. -/ def descFSucc {Y Z : C} (I : InjectiveResolution Y) (J : InjectiveResolution Z) (n : ℕ) (g : J.cocomplex.pt n ⟶ I.cocomplex.pt n) (g' : J.cocomplex.pt (n + 1) ⟶ I.cocomplex.pt (n + 1)) (w : J.cocomplex.d n (n + 1) ≫ g' = g ≫ I.cocomplex.d n (n + 1)) : Σ'g'' : J.cocomplex.pt (n + 2) ⟶ I.cocomplex.pt (n + 2), J.cocomplex.d (n + 1) (n + 2) ≫ g'' = g' ≫ I.cocomplex.d (n + 1) (n + 2) := ⟨@Exact.desc C _ _ _ _ _ _ _ _ _ (g' ≫ I.cocomplex.d (n + 1) (n + 2)) (J.cocomplex.d n (n + 1)) (J.cocomplex.d (n + 1) (n + 2)) (Abelian.Exact.op _ _ (J.exact _)) (by simp [← category.assoc, w]), by simp⟩ #align category_theory.InjectiveResolution.desc_f_succ CategoryTheory.InjectiveResolution.descFSucc /-- A morphism in `C` descends to a chain map between injective resolutions. -/ def desc {Y Z : C} (f : Z ⟶ Y) (I : InjectiveResolution Y) (J : InjectiveResolution Z) : J.cocomplex ⟶ I.cocomplex := CochainComplex.mkHom _ _ (descFZero f _ _) (descFOne f _ _) (descFOne_zero_comm f I J).symm fun n ⟨g, g', w⟩ => ⟨(descFSucc I J n g g' w.symm).1, (descFSucc I J n g g' w.symm).2.symm⟩ #align category_theory.InjectiveResolution.desc CategoryTheory.InjectiveResolution.desc /-- The resolution maps intertwine the descent of a morphism and that morphism. -/ @[simp, reassoc.1] theorem desc_commutes {Y Z : C} (f : Z ⟶ Y) (I : InjectiveResolution Y) (J : InjectiveResolution Z) : J.ι ≫ desc f I J = (CochainComplex.single₀ C).map f ≫ I.ι := by ext n rcases n with (_ | _ | n) <;> · dsimp [desc, desc_f_one, desc_f_zero] simp #align category_theory.InjectiveResolution.desc_commutes CategoryTheory.InjectiveResolution.desc_commutes -- Now that we've checked this property of the descent, -- we can seal away the actual definition. /-- An auxiliary definition for `desc_homotopy_zero`. -/ def descHomotopyZeroZero {Y Z : C} {I : InjectiveResolution Y} {J : InjectiveResolution Z} (f : I.cocomplex ⟶ J.cocomplex) (comm : I.ι ≫ f = 0) : I.cocomplex.pt 1 ⟶ J.cocomplex.pt 0 := Exact.desc (f.f 0) (I.ι.f 0) (I.cocomplex.d 0 1) (Abelian.Exact.op _ _ I.exact₀) (congr_fun (congr_arg HomologicalComplex.Hom.f comm) 0) #align category_theory.InjectiveResolution.desc_homotopy_zero_zero CategoryTheory.InjectiveResolution.descHomotopyZeroZero /-- An auxiliary definition for `desc_homotopy_zero`. -/ def descHomotopyZeroOne {Y Z : C} {I : InjectiveResolution Y} {J : InjectiveResolution Z} (f : I.cocomplex ⟶ J.cocomplex) (comm : I.ι ≫ f = (0 : _ ⟶ J.cocomplex)) : I.cocomplex.pt 2 ⟶ J.cocomplex.pt 1 := Exact.desc (f.f 1 - descHomotopyZeroZero f comm ≫ J.cocomplex.d 0 1) (I.cocomplex.d 0 1) (I.cocomplex.d 1 2) (Abelian.Exact.op _ _ (I.exact _)) (by simp [desc_homotopy_zero_zero, ← category.assoc]) #align category_theory.InjectiveResolution.desc_homotopy_zero_one CategoryTheory.InjectiveResolution.descHomotopyZeroOne /-- An auxiliary definition for `desc_homotopy_zero`. -/ def descHomotopyZeroSucc {Y Z : C} {I : InjectiveResolution Y} {J : InjectiveResolution Z} (f : I.cocomplex ⟶ J.cocomplex) (n : ℕ) (g : I.cocomplex.pt (n + 1) ⟶ J.cocomplex.pt n) (g' : I.cocomplex.pt (n + 2) ⟶ J.cocomplex.pt (n + 1)) (w : f.f (n + 1) = I.cocomplex.d (n + 1) (n + 2) ≫ g' + g ≫ J.cocomplex.d n (n + 1)) : I.cocomplex.pt (n + 3) ⟶ J.cocomplex.pt (n + 2) := Exact.desc (f.f (n + 2) - g' ≫ J.cocomplex.d _ _) (I.cocomplex.d (n + 1) (n + 2)) (I.cocomplex.d (n + 2) (n + 3)) (Abelian.Exact.op _ _ (I.exact _)) (by simp [preadditive.comp_sub, ← category.assoc, preadditive.sub_comp, show I.cocomplex.d (n + 1) (n + 2) ≫ g' = f.f (n + 1) - g ≫ J.cocomplex.d n (n + 1) by rw [w] simp only [add_sub_cancel]]) #align category_theory.InjectiveResolution.desc_homotopy_zero_succ CategoryTheory.InjectiveResolution.descHomotopyZeroSucc /-- Any descent of the zero morphism is homotopic to zero. -/ def descHomotopyZero {Y Z : C} {I : InjectiveResolution Y} {J : InjectiveResolution Z} (f : I.cocomplex ⟶ J.cocomplex) (comm : I.ι ≫ f = 0) : Homotopy f 0 := Homotopy.mkCoinductive _ (descHomotopyZeroZero f comm) (by simp [desc_homotopy_zero_zero]) (descHomotopyZeroOne f comm) (by simp [desc_homotopy_zero_one]) fun n ⟨g, g', w⟩ => ⟨descHomotopyZeroSucc f n g g' (by simp only [w, add_comm]), by simp [desc_homotopy_zero_succ, w]⟩ #align category_theory.InjectiveResolution.desc_homotopy_zero CategoryTheory.InjectiveResolution.descHomotopyZero /-- Two descents of the same morphism are homotopic. -/ def descHomotopy {Y Z : C} (f : Y ⟶ Z) {I : InjectiveResolution Y} {J : InjectiveResolution Z} (g h : I.cocomplex ⟶ J.cocomplex) (g_comm : I.ι ≫ g = (CochainComplex.single₀ C).map f ≫ J.ι) (h_comm : I.ι ≫ h = (CochainComplex.single₀ C).map f ≫ J.ι) : Homotopy g h := Homotopy.equivSubZero.invFun (descHomotopyZero _ (by simp [g_comm, h_comm])) #align category_theory.InjectiveResolution.desc_homotopy CategoryTheory.InjectiveResolution.descHomotopy /-- The descent of the identity morphism is homotopic to the identity cochain map. -/ def descIdHomotopy (X : C) (I : InjectiveResolution X) : Homotopy (desc (𝟙 X) I I) (𝟙 I.cocomplex) := by apply desc_homotopy (𝟙 X) <;> simp #align category_theory.InjectiveResolution.desc_id_homotopy CategoryTheory.InjectiveResolution.descIdHomotopy /-- The descent of a composition is homotopic to the composition of the descents. -/ def descCompHomotopy {X Y Z : C} (f : X ⟶ Y) (g : Y ⟶ Z) (I : InjectiveResolution X) (J : InjectiveResolution Y) (K : InjectiveResolution Z) : Homotopy (desc (f ≫ g) K I) (desc f J I ≫ desc g K J) := by apply desc_homotopy (f ≫ g) <;> simp #align category_theory.InjectiveResolution.desc_comp_homotopy CategoryTheory.InjectiveResolution.descCompHomotopy -- We don't care about the actual definitions of these homotopies. /-- Any two injective resolutions are homotopy equivalent. -/ def homotopyEquiv {X : C} (I J : InjectiveResolution X) : HomotopyEquiv I.cocomplex J.cocomplex where Hom := desc (𝟙 X) J I inv := desc (𝟙 X) I J homotopyHomInvId := (descCompHomotopy (𝟙 X) (𝟙 X) I J I).symm.trans <| by simpa [category.id_comp] using desc_id_homotopy _ _ homotopyInvHomId := (descCompHomotopy (𝟙 X) (𝟙 X) J I J).symm.trans <| by simpa [category.id_comp] using desc_id_homotopy _ _ #align category_theory.InjectiveResolution.homotopy_equiv CategoryTheory.InjectiveResolution.homotopyEquiv @[simp, reassoc.1] theorem homotopyEquiv_hom_ι {X : C} (I J : InjectiveResolution X) : I.ι ≫ (homotopyEquiv I J).Hom = J.ι := by simp [HomotopyEquiv] #align category_theory.InjectiveResolution.homotopy_equiv_hom_ι CategoryTheory.InjectiveResolution.homotopyEquiv_hom_ι @[simp, reassoc.1] theorem homotopyEquiv_inv_ι {X : C} (I J : InjectiveResolution X) : J.ι ≫ (homotopyEquiv I J).inv = I.ι := by simp [HomotopyEquiv] #align category_theory.InjectiveResolution.homotopy_equiv_inv_ι CategoryTheory.InjectiveResolution.homotopyEquiv_inv_ι end Abelian end InjectiveResolution section variable [Abelian C] /-- An arbitrarily chosen injective resolution of an object. -/ abbrev injectiveResolution (Z : C) [HasInjectiveResolution Z] : CochainComplex C ℕ := (HasInjectiveResolution.out Z).some.cocomplex #align category_theory.injective_resolution CategoryTheory.injectiveResolution /-- The cochain map from cochain complex consisting of `Z` supported in degree `0` back to the arbitrarily chosen injective resolution `injective_resolution Z`. -/ abbrev injectiveResolution.ι (Z : C) [HasInjectiveResolution Z] : (CochainComplex.single₀ C).obj Z ⟶ injectiveResolution Z := (HasInjectiveResolution.out Z).some.ι #align category_theory.injective_resolution.ι CategoryTheory.injectiveResolution.ι /-- The descent of a morphism to a cochain map between the arbitrarily chosen injective resolutions. -/ abbrev injectiveResolution.desc {X Y : C} (f : X ⟶ Y) [HasInjectiveResolution X] [HasInjectiveResolution Y] : injectiveResolution X ⟶ injectiveResolution Y := InjectiveResolution.desc f _ _ #align category_theory.injective_resolution.desc CategoryTheory.injectiveResolution.desc variable (C) [HasInjectiveResolutions C] /-- Taking injective resolutions is functorial, if considered with target the homotopy category (`ℕ`-indexed cochain complexes and chain maps up to homotopy). -/ def injectiveResolutions : C ⥤ HomotopyCategory C (ComplexShape.up ℕ) where obj X := (HomotopyCategory.quotient _ _).obj (injectiveResolution X) map X Y f := (HomotopyCategory.quotient _ _).map (injectiveResolution.desc f) map_id' X := by rw [← (HomotopyCategory.quotient _ _).map_id] apply HomotopyCategory.eq_of_homotopy apply InjectiveResolution.desc_id_homotopy map_comp' X Y Z f g := by rw [← (HomotopyCategory.quotient _ _).map_comp] apply HomotopyCategory.eq_of_homotopy apply InjectiveResolution.desc_comp_homotopy #align category_theory.injective_resolutions CategoryTheory.injectiveResolutions end section variable [Abelian C] [EnoughInjectives C] theorem exact_f_d {X Y : C} (f : X ⟶ Y) : Exact f (d f) := (Abelian.exact_iff _ _).2 <| ⟨by simp, zero_of_comp_mono (ι _) <| by rw [category.assoc, kernel.condition]⟩ #align category_theory.exact_f_d CategoryTheory.exact_f_d end namespace InjectiveResolution /-! Our goal is to define `InjectiveResolution.of Z : InjectiveResolution Z`. The `0`-th object in this resolution will just be `injective.under Z`, i.e. an arbitrarily chosen injective object with a map from `Z`. After that, we build the `n+1`-st object as `injective.syzygies` applied to the previously constructed morphism, and the map from the `n`-th object as `injective.d`. -/ variable [Abelian C] [EnoughInjectives C] /-- Auxiliary definition for `InjectiveResolution.of`. -/ @[simps] def ofCocomplex (Z : C) : CochainComplex C ℕ := CochainComplex.mk' (Injective.under Z) (Injective.syzygies (Injective.ι Z)) (Injective.d (Injective.ι Z)) fun ⟨X, Y, f⟩ => ⟨Injective.syzygies f, Injective.d f, (exact_f_d f).w⟩ #align category_theory.InjectiveResolution.of_cocomplex CategoryTheory.InjectiveResolution.ofCocomplex /-- In any abelian category with enough injectives, `InjectiveResolution.of Z` constructs an injective resolution of the object `Z`. -/ irreducible_def of (Z : C) : InjectiveResolution Z := { cocomplex := ofCocomplex Z ι := CochainComplex.mkHom _ _ (Injective.ι Z) 0 (by simp only [of_cocomplex_d, eq_self_iff_true, eq_to_hom_refl, category.comp_id, dite_eq_ite, if_true, comp_zero] exact (exact_f_d (injective.ι Z)).w) fun n _ => ⟨0, by ext⟩ Injective := by rintro (_ | _ | _ | n) <;> · apply injective.injective_under exact₀ := by simpa using exact_f_d (injective.ι Z) exact := by rintro (_ | n) <;> · simp apply exact_f_d Mono := Injective.ι_mono Z } #align category_theory.InjectiveResolution.of CategoryTheory.InjectiveResolution.of instance (priority := 100) (Z : C) : HasInjectiveResolution Z where out := ⟨of Z⟩ instance (priority := 100) : HasInjectiveResolutions C where out _ := inferInstance end InjectiveResolution end CategoryTheory namespace HomologicalComplex.Hom variable {C : Type u} [Category.{v} C] [Abelian C] /-- If `X` is a cochain complex of injective objects and we have a quasi-isomorphism `f : Y[0] ⟶ X`, then `X` is an injective resolution of `Y.` -/ def HomologicalComplex.Hom.fromSingle₀InjectiveResolution (X : CochainComplex C ℕ) (Y : C) (f : (CochainComplex.single₀ C).obj Y ⟶ X) [QuasiIso f] (H : ∀ n, Injective (X.pt n)) : InjectiveResolution Y where cocomplex := X ι := f Injective := H exact₀ := f.from_single₀_exact_f_d_at_zero exact := f.from_single₀_exact_at_succ Mono := f.from_single₀_mono_at_zero #align homological_complex.hom.homological_complex.hom.from_single₀_InjectiveResolution HomologicalComplex.Hom.HomologicalComplex.Hom.fromSingle₀InjectiveResolution end HomologicalComplex.Hom
{"author": "leanprover-community", "repo": "mathlib3port", "sha": "62505aa236c58c8559783b16d33e30df3daa54f4", "save_path": "github-repos/lean/leanprover-community-mathlib3port", "path": "github-repos/lean/leanprover-community-mathlib3port/mathlib3port-62505aa236c58c8559783b16d33e30df3daa54f4/Mathbin/CategoryTheory/Abelian/InjectiveResolution.lean"}
from sympy import * x = Symbol('x') #x**4-1/3*x**3-3/2*x**2 f = 1/2*x**2+1/4*x**4-1/2*x**2 fx = lambdify(x, f, modules=['numpy']) df = diff(fx(x), x) dfx = lambdify(x, df, modules=['numpy']) raiz_primeira_df = solve(df) segunda_df = diff(dfx(x), x) seg_dfx = lambdify(x, segunda_df, modules=['numpy']) print(f'Raizes da primeira derivada {raiz_primeira_df} \n') for i in range(len(raiz_primeira_df)): if seg_dfx(raiz_primeira_df[i]) > 0: print(f"Min f({raiz_primeira_df[i]}) = {seg_dfx(raiz_primeira_df[i])}") if seg_dfx(raiz_primeira_df[i]) < 0: print(f"Max f({raiz_primeira_df[i]}) = {seg_dfx(raiz_primeira_df[i])}") if seg_dfx(raiz_primeira_df[i]) == 0: print(f"Indefinido f({raiz_primeira_df[i]}) = {seg_dfx(raiz_primeira_df[i])}") raizes_segunda_df = solve(seg_dfx(x)) print(f'\nPontos de inflexao (Muda a concavidade)') #CARMEN CECÍLIA CENTENO #99652-7542 for i in range(len(raizes_segunda_df)): print(f'Pontos = (({raizes_segunda_df[i]}), f({fx(raizes_segunda_df[i])}))') print(solve(1/2*(1-x)**2*(1)**2))
{"hexsha": "fecde5d26ee964f03b530d2d6828aafa49e55934", "size": 1063, "ext": "py", "lang": "Python", "max_stars_repo_path": "auxilia/Max_Min_calc.py", "max_stars_repo_name": "HigorAnjos/Fundamentos-VI", "max_stars_repo_head_hexsha": "e0aa3cb37e4c54d24ac7123ea3bd8038196e0edb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "auxilia/Max_Min_calc.py", "max_issues_repo_name": "HigorAnjos/Fundamentos-VI", "max_issues_repo_head_hexsha": "e0aa3cb37e4c54d24ac7123ea3bd8038196e0edb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "auxilia/Max_Min_calc.py", "max_forks_repo_name": "HigorAnjos/Fundamentos-VI", "max_forks_repo_head_hexsha": "e0aa3cb37e4c54d24ac7123ea3bd8038196e0edb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3714285714, "max_line_length": 86, "alphanum_fraction": 0.6594543744, "include": true, "reason": "from sympy", "num_tokens": 398}
using OrdinaryDiffEq, ParameterizedFunctions, Plots, LSODA, DiffEqDevTools, Sundials using LinearAlgebra LinearAlgebra.BLAS.set_num_threads(1) gr() #gr(fmt=:png) f_hires = @ode_def Hires begin dy1 = -1.71*y1 + 0.43*y2 + 8.32*y3 + 0.0007 dy2 = 1.71*y1 - 8.75*y2 dy3 = -10.03*y3 + 0.43*y4 + 0.035*y5 dy4 = 8.32*y2 + 1.71*y3 - 1.12*y4 dy5 = -1.745*y5 + 0.43*y6 + 0.43*y7 dy6 = -280.0*y6*y8 + 0.69*y4 + 1.71*y5 - 0.43*y6 + 0.69*y7 dy7 = 280.0*y6*y8 - 1.81*y7 dy8 = -280.0*y6*y8 + 1.81*y7 end u0 = zeros(8) u0[1] = 1 u0[8] = 0.0057 prob = ODEProblem(f_hires,u0,(0.0,321.8122)) sol = solve(prob,Rodas5(),abstol=1/10^14,reltol=1/10^14) test_sol = TestSolution(sol) # group 1 setups = [Dict(:alg=>Rosenbrock23()), Dict(:alg=>TRBDF2()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>lsoda())] # High Tolerance abstols = 1 ./ 10 .^ (5:8) reltols = 1 ./ 10 .^ (1:4); wp = WorkPrecisionSet(prob,abstols,reltols,setups;dense = false,verbose=false, appxsol=test_sol,maxiters=Int(1e5),error_estimate=:l2) DIAGRAMS["HIRES-HighTol-Group1"] = plot(wp) # group 2 setups = [Dict(:alg=>Rodas4()), Dict(:alg=>Rodas5()), Dict(:alg=>KenCarp4()), Dict(:alg=>RadauIIA5()), Dict(:alg=>CVODE_BDF()), Dict(:alg=>lsoda())] # Low Tolerance abstols = 1 ./ 10 .^ (7:13) reltols = 1 ./ 10 .^ (4:10) wp = WorkPrecisionSet(prob,abstols,reltols,setups;verbose=false, dense=false,appxsol=test_sol,maxiters=Int(1e5),error_estimate=:l2) DIAGRAMS["HIRES-LowTol-Group2"] = plot(wp)
{"hexsha": "dc99957b348a965d96c00b54a1851cba00490ccf", "size": 1591, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "diagrams/hires.jl", "max_stars_repo_name": "ordicker/OrdinaryDiffEq.jl", "max_stars_repo_head_hexsha": "6fdc99fa4da79633e0161f0bb8aaff3f39cd39bc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 216, "max_stars_repo_stars_event_min_datetime": "2020-04-09T12:02:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T01:51:07.000Z", "max_issues_repo_path": "diagrams/hires.jl", "max_issues_repo_name": "ordicker/OrdinaryDiffEq.jl", "max_issues_repo_head_hexsha": "6fdc99fa4da79633e0161f0bb8aaff3f39cd39bc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 381, "max_issues_repo_issues_event_min_datetime": "2020-03-26T11:41:21.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T12:05:58.000Z", "max_forks_repo_path": "diagrams/hires.jl", "max_forks_repo_name": "ordicker/OrdinaryDiffEq.jl", "max_forks_repo_head_hexsha": "6fdc99fa4da79633e0161f0bb8aaff3f39cd39bc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 66, "max_forks_repo_forks_event_min_datetime": "2020-03-30T11:07:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T23:01:12.000Z", "avg_line_length": 28.4107142857, "max_line_length": 88, "alphanum_fraction": 0.6015084852, "num_tokens": 668}
! ! Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. ! See https://llvm.org/LICENSE.txt for license information. ! SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception ! ! check that loops in containing routines don't update counter in container program p integer i,j,k integer result(4) integer expect(4) data expect/5,50,15,150/ j = 0 result(2) = 0 do i = 1,5 j = j + 1 call sub(k) result(2) = result(2) + k enddo result(1) = j j = 0 result(4) = 0 do i = 1,15 j = j + 1 call sub(k) result(4) = result(4) + k enddo result(3) = j !print *,result call check(result,expect,4) contains subroutine sub(k) integer i,k k = 0 do i = 1,10 k = k + 1 enddo end subroutine end program
{"hexsha": "ed5b9021ad2bff2a0dabd13c1640c28d424cf19b", "size": 748, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "test/f90_correct/src/ip12.f90", "max_stars_repo_name": "abrahamtovarmob/flang", "max_stars_repo_head_hexsha": "bcd84b29df046b6d6574f0bfa34ea5059092615a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 716, "max_stars_repo_stars_event_min_datetime": "2017-05-17T17:58:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T11:20:58.000Z", "max_issues_repo_path": "test/f90_correct/src/ip12.f90", "max_issues_repo_name": "abrahamtovarmob/flang", "max_issues_repo_head_hexsha": "bcd84b29df046b6d6574f0bfa34ea5059092615a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 794, "max_issues_repo_issues_event_min_datetime": "2017-05-18T19:27:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:22:11.000Z", "max_forks_repo_path": "test/f90_correct/src/ip12.f90", "max_forks_repo_name": "abrahamtovarmob/flang", "max_forks_repo_head_hexsha": "bcd84b29df046b6d6574f0bfa34ea5059092615a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 157, "max_forks_repo_forks_event_min_datetime": "2017-05-17T18:50:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T07:06:45.000Z", "avg_line_length": 18.243902439, "max_line_length": 79, "alphanum_fraction": 0.6577540107, "num_tokens": 262}
from typing import Any, Optional, Callable, Tuple, List import torch import torch.nn.functional as F import torch.nn as nn import numpy as np from defaults import * winit_funcs = { 'normal':nn.init.normal_, 'uniform':nn.init.uniform_, 'xavier-normal':nn.init.xavier_normal_, 'xavier-uniform':nn.init.xavier_uniform_, 'kaiming-normal':lambda x :nn.init.kaiming_normal_(x,nonlinearity='relu'), 'kaiming-uniform':lambda x :nn.init.kaiming_uniform_(x,nonlinearity='relu'), 'leaky-kaiming-normal':nn.init.kaiming_normal_, 'leaky-kaiming-uniform':nn.init.kaiming_uniform_ } actv_funcs = { 'sigmoid':torch.sigmoid, 'softmax':F.softmax, 'tanh':torch.tanh, 'relu':F.relu, 'elu':F.elu, 'leaky-relu':F.leaky_relu, 'rrelu':F.rrelu } def init_weights(m, fn): if hasattr(m,'weight'): fn(m.weight) def get_out_dims( inputs: np.ndarray, padding: np.ndarray, dilation: np.ndarray, kernel: np.ndarray, stride: np.ndarray )->np.ndarray: """ calculate the output dimensions of a `nn.Conv3d` layer :param inputs: 3 element vector for shape of input :param padding: 3 element vector for padding parameters :param dilation: 3 element vector for dilation parameters :param kernel: 3 element vector for kernel parameters :param stride: 3 element vector for stride parameters """ return np.floor(1+(inputs-1+(2*padding)-(dilation*(kernel-1)))/stride).astype(np.int32) def get_even_padding( inputs: np.ndarray, dilation: np.ndarray, kernel: np.ndarray, stride: np.ndarray, preserve_inputs: bool = False )->np.ndarray: """ Calculate the padding vector necessary to ensure perfect overlap of kernel application with input tensor. :param inputs: 3 element vector for shape of input :param dilation: 3 element vector for dilation parameters :param kernel: 3 element vector for kernel parameters :param stride: 3 element vector for stride parameters :param preserve_inputs: require output dimensions to be unchanged """ if preserve_inputs and np.sum(stride == 1) != 3: raise ValueError("If input dimension is to be preserved, stride should not be > 1.") if preserve_inputs: return (dilation*(kernel-1)).astype(np.int32) else: return ((dilation*(kernel-1)+1-inputs)%stride).astype(np.int32) class MAPnet(nn.Module): def __init__( self, input_shape: Tuple[int,int,int], n_conv_layers: Optional[int]=CONV_LAYERS, padding: Optional[List[int]]=[PADDING], dilation: Optional[List[int]]=[DILATION], kernel: Optional[List[int]]=[KERNEL_SIZE], stride: Optional[List[int]]=[STRIDE], filters: Optional[List[int]]=FILTERS, input_channels: Optional[int]=1, conv_actv: Optional[List[Callable[[nn.Module],nn.Module]]]=[F.relu], fc_actv: Optional[List[Callable[[nn.Module],nn.Module]]]=[F.relu,F.tanh,F.relu], even_padding: Optional[bool]=False, pool: Optional[str]=None, output_size: Optional[int]=1 ): """ Initialize an instance of MAPnet. :param input_shape: shape of input image. :param n_conv_layers: number of `nn.Conv3d` layers to use. :param padding: `padding` parameter for `nn.Conv3d`. :param dilation: `dilation` parameter for `nn.Conv3d`. :param kernel: `kernel_size` parameter for `nn.Conv3d`. :param stride: `stride` parameter for `nn.Conv3d`. :param filters: List of filters per layer. :param input_channels: Number of input channels to the model. :param conv_actv: List of of activation functions to be used in convolutional layers. If only one element is supplied, then this activation function will be used for all layers. :param fc_actv: List of activation functions to be used in fully conneced layers. If only one element is supplied, then this activation function will be used for all layers. :param even_padding: setting this to True will result in the padding parameter being ignored. Padding will be added to the input of each convolutional layer to ensure convolutions line up exactly with the input. Furthermore, layers with stride == 1 will have their input dimensions preserved in the output. :param pool: which pooling method to apply ('max' or 'avg'). If None, no pooling will be applied (which is the default). Pooling will be performed with a kernel size and stride of 2, and padding will be added to ensure the whole of the input is used. If pool = 'avg', padding will not be used to calculate the average. :param output_size: number of nodes in the output layer """ ####################################################################### # Sanitizing input ####################################################################### if len(input_shape) != 3: raise ValueError("Expected input_shape to have 3 dimensions not {}".format(len(input_shape))) elif not ((len(filters) == n_conv_layers) or (len(filters) == 1)): raise ValueError("Length of filters ({}) does not match n_conv_layers ({})".format(len(filters),n_conv_layers)) elif not ((len(conv_actv) == 1) or (len(conv_actv) == n_conv_layers)): raise ValueError("conv_actv arguments has incorrect length") elif not ((len(padding) == 1) or (len(padding) == n_conv_layers)): raise ValueError("padding arguments has incorrect length") elif not ((len(dilation) == 1) or (len(dilation) == n_conv_layers)): raise ValueError("dilation arguments has incorrect length") elif not ((len(kernel) == 1) or (len(kernel) == n_conv_layers)): raise ValueError("kernel arguments has incorrect length") elif not ((len(stride) == 1) or (len(stride) == n_conv_layers)): raise ValueError("stride arguments has incorrect length") elif not ((len(fc_actv) == 1) or (len(fc_actv) == 3)): raise ValueError("conv_actv arguments has incorrect length") elif not (output_size > 0): raise ValueError("Invalid output_size: {}".format(output_size)) super(MAPnet,self).__init__() ####################################################################### # Handle the case where only 1 number is supplied ####################################################################### if len(filters) == 1: filters = list(np.repeat(filters,n_conv_layers)) if len(padding) == 1: padding = list(np.repeat(padding,n_conv_layers)) if len(dilation) == 1: dilation = list(np.repeat(dilation,n_conv_layers)) if len(kernel) == 1: kernel = list(np.repeat(kernel,n_conv_layers)) if len(stride) == 1: stride = list(np.repeat(stride,n_conv_layers)) # note that conv_layer_sizes with have length n_conv_layers + 1 # because it also holds the input shape self.conv_layer_sizes = list([np.array(input_shape)]) self.even_padding = even_padding # We will need this later self.pool = False if pool is None else True self.pool_layer_sizes = list() ####################################################################### # Calculate layer sizes and padding if needed ####################################################################### for i in range(0,n_conv_layers): ################################################################### # *** This bit is a little complicated, so I might leave a detailed # note explaining this next little section of code *** # # When the normal `padding` parameter is used, padding will be # implemented through the Conv3d layers; HOWEVER, when # `even_padding` is set, we must be aple to pad with an odd number # of zeros (i.e only on one side). Thus we will need to use a # `torch.nn.ConstPad3d` layer to get the necessary size. # # Furthermore there is an extra layer of messyness introduced by # my attempt to allow for supbooling ################################################################### if even_padding: #preserve = True if stride[i] == 1 else False preserve = False # TODO: # This is really bad... I'm changing types from # int to list/array here. Find a better/more clear way! padding[i] = get_even_padding( inputs = self.pool_layer_sizes[-1] if self.pool and i \ else self.conv_layer_sizes[-1], dilation = np.repeat(dilation[i],3), kernel = np.repeat(kernel[i],3), stride = np.repeat(stride[i],3), preserve_inputs = preserve ) if (self.pool is None) or (i == 0): self.conv_layer_sizes[-1] += padding[i] else: self.pool_layer_sizes[-1] += padding[i] self.conv_layer_sizes.append( get_out_dims( self.pool_layer_sizes[-1] if self.pool and i \ else self.conv_layer_sizes[-1], # input dimensions np.repeat(0 if even_padding else padding[i],3), # *padding np.repeat(dilation[i],3), # dilation np.repeat(kernel[i],3), # kernel np.repeat(stride[i],3) # stride ) ) if self.pool is not None: self.pool_layer_sizes.append( get_out_dims( self.conv_layer_sizes[-1], self.conv_layer_sizes[-1]%2, np.repeat(1,3), np.repeat(2,3), np.repeat(2,3) ) ) ####################################################################### # Initialize Conv3d & Pooling layers ####################################################################### conv_layers = list() pool_layers = list() pad_layers = list() self.conv_actv = list() self.n_channels=list([input_channels]) for i in range(0,n_conv_layers): self.n_channels.append(self.n_channels[-1] * filters[i]) conv_layers.append( nn.Conv3d( in_channels=int(self.n_channels[-2]), out_channels=int(self.n_channels[-1]), kernel_size=kernel[i], stride=stride[i], padding=0 if even_padding else padding[i], dilation=dilation[i], groups=int(self.n_channels[-2]), bias=True, #padding_mode='zeros' ) ) if self.pool: if pool == 'max': pool_layers.append( nn.MaxPool3d(2, padding=tuple((self.conv_layer_sizes[i+1]%2).astype(np.int32))) ) else: pool_layers.append( nn.AvgPool3d(2,padding=tuple((self.conv_layer_sizes[+1]%2).astype(np.int32)), count_include_pad=False) ) # Manage our activation functions self.conv_actv.append(conv_actv[i] if len(conv_actv) > 1 else conv_actv[0]) # And if `even_padding` was set, we will need to generate these layers # but the complication here is that we now need to calculate # (P_Left,P_Right,P_Up,P_Down,P_Front,P_Back) if even_padding: d,h,w = padding[i] pad = (int(np.floor(d/2)), int(np.ceil(d/2)), int(np.floor(h/2)), int(np.ceil(h/2)), int(np.floor(w/2)), int(np.ceil(w/2))) pad_layers.append(nn.ConstantPad3d(pad,0)) self.conv_layers = nn.ModuleList(conv_layers) self.pool_layers = nn.ModuleList(pool_layers) self.pad_layers = nn.ModuleList(pad_layers) if even_padding else None ####################################################################### # Initialize Fully Connected layers ####################################################################### # calculate the size of flattening out the last conv layer layer_size = self.pool_layer_sizes[-1] if self.pool \ else self.conv_layer_sizes[-1] self.fc_input_size = int(np.prod(layer_size))*self.n_channels[-1] """ print(self.conv_layer_sizes) print(self.pool_layer_sizes) print(self.fc_input_size) """ fc_layers = list() self.output_size = output_size fc_layers.append(nn.Linear(self.fc_input_size,int(self.fc_input_size/2))) fc_layers.append(nn.Linear(int(self.fc_input_size/2),100)) fc_layers.append(nn.Linear(100,output_size)) self.fc_layers = nn.ModuleList(fc_layers) self.fc_actv = fc_actv * 3 if len(fc_actv) == 1 else fc_actv self.d1 = nn.Dropout() def forward(self,x): for i,conv in enumerate(self.conv_layers): if self.even_padding: x = self.pad_layers[i](x) actv = self.conv_actv[i] x = actv(conv(x)) if self.pool: x = self.pool_layers[i](x) x = x.view(-1,self.fc_input_size) x = self.d1(x) for i,fc in enumerate(self.fc_layers): actv = self.fc_actv[i] x = actv(fc(x)) return x
{"hexsha": "183954e2a7128704e334fcfefdd2ca7843409ea4", "size": 14216, "ext": "py", "lang": "Python", "max_stars_repo_path": "mapnet/model.py", "max_stars_repo_name": "ForrestCKoch/MAPnet", "max_stars_repo_head_hexsha": "2d49f20ce3c29f6b97a5ae6260bea949e6e685b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mapnet/model.py", "max_issues_repo_name": "ForrestCKoch/MAPnet", "max_issues_repo_head_hexsha": "2d49f20ce3c29f6b97a5ae6260bea949e6e685b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mapnet/model.py", "max_forks_repo_name": "ForrestCKoch/MAPnet", "max_forks_repo_head_hexsha": "2d49f20ce3c29f6b97a5ae6260bea949e6e685b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.7044025157, "max_line_length": 123, "alphanum_fraction": 0.5453714125, "include": true, "reason": "import numpy", "num_tokens": 3005}
<a href="https://colab.research.google.com/github/aidenaislinn/python-for-text-analysis/blob/master/Kopie_van_glm.ipynb" target="_parent"></a> # Neuroimaging week 2: modeling fMRI with the GLM This week will be all about how most fMRI analyses are done: using the **GLM**. We'll use example data for this notebook, the notebook assumes it's stored in a folder next to the notebook called `data`. You can download these data from Canvas, or the course GitHub page. When you upload these data to CoLab, make sure that you're pointing the data in the right direction. If you've saved the data to your `Colab Notebooks/data` folder, that is the following location: `/content/drive/My Drive/Colab Notebooks/data/`. The code below tries to set it up so that you don't need to worry about it. ```python # this will ask you to authenticate with Google from google.colab import drive drive.mount('/content/drive') import os os.chdir('/content/drive/My Drive/Colab Notebooks/') ``` ## About this week's lab The GLM, or the General Linear Model, is a statistical model that underlies a range of statistical models that you're probably already familiar with: (M)ANOVA, t-test, F-test, and most importantly ordinary *linear regression*. Basically, the type of fMRI analysis you are going to learn in this course (often called 'univariate analysis' or 'Statistical Parametric Mapping') is just an ordinary linear regression model adapted to time-series data. We are going to assume that you know the basics of linear regression, such as what a beta-parameter is, what R-squared means, and what residuals are (but we'll give you a short recap on these concepts). Given that you have some basic familiarity with these concepts, you will see during this tutorial that univariate fMRI analyses using the GLM are actually very straightforward. However, while relatively simple, it is **VERY** important that you understand all the concepts in this tutorial thoroughly, because it will be the basis for ALL the upcoming lectures and tutorials from this course. You will definitely use what you learn here in the Project. As a consequence of the importance of the GLM, this week's lab is probably going to take quite long again (substantially longer than the upcoming weeks). So, you'll have to work hard this week, but it'll definitely pay off. Also, the material will seem quite mathematical, but it often serves a symbolic purpose: to show you how results (e.g. t-statistics) are influenced by different parts of the formulas within the GLM. Moreover, after showing and explaining you the formulas, we'll work it out in code examples (which are often *way* easier to understand!). Also, after explaining a certain aspect of the GLM, we'll ask you to think about it and practice with it in ToThink and ToDo questions. That being said, by working through this tutorial and understanding the concepts it will introduce, you will have completed the most difficult and most elaborate part of this course; from here on, it will only get easier (and will take less time)! ## What you'll learn After this week's lab ... * you know how the GLM is applied to fMRI data * you are able to implement a univariate (single-voxel) t-test from scratch in Python **Estimated time needed to complete**: 8-12 hours ## 1. Recap of linear regression To refresh your memory on linear regression, we'll walk you through a recap of the technique's most important concepts. We are going to work through a simple example. In the code below, `y` will denote our *dependent variable* (the variable we try to model/explain) and `X` will denote our *independent variable(s)* (the variables we're using to try to explain `y`). Throughout the entire tutorial will use `X` to refer to our matrix of independent variables (also called "predictors" or "regressors", or simply "design matrix") and use `y` to refer to our dependent variable (also sometimes called "target"). Moreover, the independent variables are often grouped in a single matrix (a 2D array, so to speak) - which is sometimes called the "design matrix" (because it 'designs' the way we want to model our dependent variable). As stated before, in this tutorial we store our design matrix - the set of our independent variables - in the variable `X` (or slight variations on that, like `X_new` or something). Importantly, it is often assumed (e.g. by statistics functions/software) that the design matrix takes the shape of $N\ (observations) \times P\ (predictors)$. So, the rows refer to the sampled observations (also often called "samples", "instances", or simply "data points"). The columns refer to the separate independent variables that we use to model the dependent variable. For the dependent variable, it is often assumed that this is a single row-vector of shape $N \times 1$. ### 1.1. Notation Next, let's define some more conventions in notation. We will denote the total number of observations with **$N$**. Moreover, we'll denote **$i$** as the index of samples. To give an example, the formula below gives you the sum of our target variable: \begin{align} \mathrm{sum}(y) = \sum_{i=1}^{N} y_{i} \end{align} Lastly, we denote the total number of predictors **$P$** and **$j$** as the index of our predictors. So, for example, if we wanted to sum over our predictors for a given sample **$i$**, we'd write: \begin{align} \mathrm{sum}(X_{i}) = \sum_{j=1}^{P} X_{ij} \end{align} To practice with this notation, let's do a ToDo! ```python # First some imports import numpy as np import matplotlib.pyplot as plt %matplotlib inline ``` <div class='alert alert-warning'> <b>ToDo</b> </div> From the variable `arr` below (an array of shape $100 \times 25$), calculate the mean over all samples ($N$) for the predictor at index $j = 4$ (i.e., the fourth predictor). Store the result in a variable named `mean_predictor_4`. Remember: Python has 0-based indexing! ```python np.random.seed(42) arr = np.random.normal(0, 1, size=(100, 25)) # Implement your ToDo here mean_predictor_4 = ... ``` ```python '''Tests the above ToDo. ''' np.testing.assert_almost_equal(mean_predictor_4, np.mean(arr[:, 3])) ``` Now, let's look at an example. Throughout the example below, we will gradually explain the components of linear regression. For the example, we will use randomly generated data to create a dependent variable with 30 observations ("samples"; $N = 30$) and a single independent variable ($P = 1$) with, of course, also 30 observations. So both the independent and dependent variable are of shape $30 \times 1$. Alright, let's get started! First, we have to import a Python implementation of linear regression. We'll use the `lstsq` ("least squares") function from the `linalg` (linear algebra) subpackage of `numpy`, but we could have also used `scipy` or `sklearn` implementations - they're all the same under the hood. ```python from numpy.linalg import lstsq ``` Now, for our example let's create some randomly generated data. As discussed, we'll create two variables (of shape $30\times 1$), which have a prespecified correlation of 0.8 (normally, you don't know this before doing the analysis of course, but we specify it here for the sake of the example). We'll denote our independent variable `X` and our dependent variable `y`. ```python np.random.seed(1) prespecified_covariance = np.array([[1, .8], [.8, 1]]) data = np.random.multivariate_normal(mean=[3, 7], cov=prespecified_covariance, size=30) """ By default, when you slice out a single column (or row), numpy returns an array of shape (some_number,) instead of (some_number, 1). However, for our examples, we often actually want shape (some_number, 1) so essentially we want to "add" an extra axis. This is done by the np.newaxis command. Mess around with it yourself to see how it works! """ X = data[:, 0, np.newaxis] # Here, we slice the first column (0) and immediately add a new axis! y = data[:, 1, np.newaxis] # same here print('The shape of X is: %s' % (X.shape,)) print('The shape of y is: %s' % (y.shape,)) ``` ### 1.2. Modeling the intercept (offset) As you probably were told in your previous statistics classes, you should always "model the intercept" when running any (regression) model. Technically, the intercept models some of the signal using a constant term. The parameter corresponding to the intercept (as calculated by linear regression), then, refers to *the average value of your $y$ variable when all predictors in $X$ are 0*. So, conceptually, the intercept models the mean when controlling for our (other) predictors. To "model the intercept", you should add an extra "constant predictor" to your design matrix (`X`). This "constant predictor" means simply an array of shape $N \times 1$ with a constant value, usually all ones. (You'll figure out *why* you should do this later in the tutorial.) Remember from week 1 how to create an array with ones? We can just use `np.ones(shape_of_desired_array)`! ```python n_obs = y.size intercept = np.ones((n_obs, 1)) # creates intercept of shape (N, 1) ``` Now, we want to add it to our design matrix (`X`). We can do this using the numpy function `np.hstack` (which is short for "horizontal stack", i.e. "stacking columns horizontally"). This function takes a tuple with arrays which show have the same amount of rows (for our data: both have 30 rows) and returns the a new array in which the arrays from the tuple are stacked (stacked shape should be $30 \times 2$): ```python tuple_with_arrays = (intercept, X) X_with_icept = np.hstack(tuple_with_arrays) # Note: you could also simply do ... # X_with_icept = np.hstack((np.ones((y.size, 1)), X)) # ... but arguably this is less 'readable' than the implementation above print("Shape of X is now: %s" % (X_with_icept.shape,)) ``` Let's take a look at the X matrix ("design matrix") we have now. As you'll see, we have two columns: the first one is our intercept-predictor, and the second one is our 'regular' predictor. ```python print(X_with_icept) ``` Now, let's take a look at the data. We'll create a scatter-plot for this (we'll leave out the intercept): ```python plt.figure(figsize=(10, 10)) plt.scatter(X_with_icept[:, 1], y) plt.xlabel('X', fontsize=25) plt.ylabel('y', fontsize=25) plt.xlim((0, 5)) plt.ylim((0, 10)) plt.show() ``` ### 1.3. Interpreting parameters in linear regression As you can see, there seems to be some positive linear relationship between $X$ (just the independent variable without the intercept) and $y$. In other words, an increase in $X$ will lead to an increase in $y$. But, at this moment, *how much exactly* $y$ changes for a increase in $X$ is unknown. By doing a linear regression with $X$ as our predictor of $y$, we can quantify this! The parameter, i.e. the "thing" that quantifies the influence of $X$ on $y$, calculated by this model is often called the **beta-parameter(s)** (but sometimes they're denoted as theta, or any other greek symbol/letter). The beta-parameter quantifies exactly how much $y$ changes if you increase $X$ by 1. Or, in other words, it quantifies how much influence $X$ has on $y$. In a formula ($\delta$ stands for "change in")\*: \begin{align} \beta_{j} = \frac{\delta y}{\delta X_{j}} \end{align} As you probably realize, each predictor in $X$ (i.e., $X_{j}$) has a parameter ($\beta_{j}$) that quantifies how much influence that predictor has on our target variable ($y$). This includes the intercept, our vector of ones (which is in textbooks often denoted by $\beta_{0}$; they often don't write out $\beta_{0}X_{0}$ because, if a vector of ones is used, $\beta_{0}\cdot 1$ simplifies to $\beta_{0}$). Thus, linear regression describes a model in which a set of beta-parameters are calculated to characterize the influence of each predictor in $X$ on $y$, that together explain $y$ as well as possible (but the model is usually not perfect, so there will be some *error*, or "unexplained variance"). As such, we can formulate the linear regression model as follows: \begin{align} y = \beta_{0} + X_{1}\beta_{1} + X_{2}\beta_{2} ... + X_{P}\beta_{P} + \epsilon \end{align} which is often written out as (and is equivalent to the formula above): \begin{align} y = \sum_{j=1}^{P}X_{j}\beta_{j} + \epsilon \end{align} Here, $\epsilon$ is the variance of $y$ that cannot be explained by our predictors (i.e, the *error*). But how does linear regression calculate the beta-parameters? The method most often used is called **'ordinary least squares'** (OLS; or just 'least squares' - remember the "`from numpy.linalg import lstsq`" ?). This method tries to find a "weight(s)" for the independent variable(s) such that when you multiply the weight(s) with the independent variable(s), it produces an estimate of $y$ (often denoted as $\hat{y}$, or "y-hat") that is as 'close' to the true $y$ as possible. In other words, least squares tries to 'choose' the beta-parameter(s) such that the difference between $X$ multiplied with the beta(s) (i.e. our best guess of $y$, denoted as $\hat{y}$) and the true $y$ is minimized\*. Let's just formalize this formula for the 'best estimate of $y$' (i.e. $\hat{y}$): \begin{align} \hat{y}_{i} = \sum_{j=1}^{P}X_{ij}\beta_{j} \end{align} -------- \* Actually, least squares yields *an estimate* of the "true" (i.e. the population) beta-parameter. Usually, therefore, the beta-parameter is denoted with a "hat" ($\hat{\beta}$), to indicate that it is estimated, but because that clutters the formulas too much, we leave out the hat. Before we're going into the estimation of these beta-parameters, let's practice with calculating $\hat{y}$! <div class='alert alert-warning'> <b>ToDo</b> </div> Below, we've defined a design matrix with two predictors (`this_X`) and an array with beta-estimates (`these_betas`; just pretend that these betas were estimated by us beforehand). Now, given this data, can you calculate the predicted $y$-values (i.e., $\hat{y}$)? Store these predicted $y$-values in an array named `this_y_hat`. Hint: your `this_y_hat` array should be of shape `(100,)` ```python this_X = np.random.normal(0, 1, (100, 2)) these_betas = np.array([5, 3]) this_y_hat = ... ``` ```python ''' Tests the above ToDo''' np.testing.assert_array_almost_equal(this_X.dot(these_betas), this_y_hat) ``` In ordinary least squares, the difference that is tried to be minized is expressed as the sum of squared differences (hence the name 'least squares'!): \begin{align} \min_{\beta} \sum_{i=1}^{N}\sum_{j=1}^{P}(y_{i} - X_{ij}\hat{\beta}_{j})^2 \end{align} While it may look daunting, this formula simply says: "find the beta(s) that minimize the difference of my prediction of $y$ (calculated as $X \cdot \beta$) and the true $y$. While the book describes how OLS finds beta-parameters (namely by the vectorized formula: $\beta = (\mathbf{X}'\mathbf{X})^{-1}\mathbf{X}'y$), we don't expect you to understand how this works exactly. But you should understand the objective of least squares (minimizing prediction of $y$ and true $y$) and what role the beta-parameters play in this process (i.e. a kind of weighting factor of the predictors). Alright, that's a lot of text (and math, ugh...). Let's actually run least squares to get the beta-parameters of our model! ```python # Note the inputs to lstsq: the design matrix (X) and the dependent variable (y) # We also input "rcond=None"; this is only to silence a warning from numpy, # it doesn't change the function itself (you can ignore this for now) output_lstsq = lstsq(X_with_icept, y, rcond=None) beta = output_lstsq[0] print('The betas of my model are: %r' % beta.tolist()) # Also note that there are more outputs of the function lstsq(). # For now, we're only interested in the first output, which are the model's estimates betas. # To get these, we immediately index the outputs by [0] # We could have done this more concisely by (but didn't for clarity): # beta = lstsq(X_with_icept, y, rcond=None)[0] ``` "What? Why are there two beta-parameters?", you might think. This is of course because you also use the intercept as a predictor, which also has an associated beta-value (weighting factor). Here, the first beta refers to the intercept of the model (because it's the first column in the design-matrix)! The second beta refers to our 'original' predictor. Thus, the model found by least squares for our generated data is (i.e. that leads to our best estimate of $y$, i.e. $\hat{y}$: \begin{align} \hat{y} = X_{1} \cdot 4.259 + X_{2} \cdot 0.882 \end{align} And since our intercept (here $X_{1}$) is a vector of ones, the formula simplifies to: \begin{align} \hat{y} = 4.259 + X_{2} \cdot 0.882 \end{align} Now, let's calculate our predicted value of $y$ ($\hat{y}$) by implementing the above formula by multiplying our betas with the corresponding predictors (intercept and original predictor). Here, because we have two predictors, we simply add the two "`predictor * beta`" terms to get the final $\hat{y}$. ```python y_hat = X_with_icept[:, 0] * beta[0] + X_with_icept[:, 1] * beta[1] print('The predicted y-values are: \n\n%r' % y_hat) ``` Actually, using matrix algebra (which is often used in the text book), there is a 'trick' to quickly sum the results of two vector multiplications, called the dot-product. Check it out below: ```python y_hat2 = X_with_icept.dot(beta) print('The predicted y-values (using dot-product) are: \n\n%r' % y_hat2.T) ``` In this (and upcoming) tutorials, you probably see the dot-notation for matrix multiplication more often, so understand that it (in this context) simply multiplies the columns of X with the corresponding betas and (element-wise) sums these columns to get the $\hat{y}$ values! Thus, this notation: \begin{align} \hat{y}_{i} = \sum_{j=1}^{P}X_{ij}\hat{\beta}_{j} \end{align} ... is exactly the same as the dot-product notation using matrix algebra: \begin{align} \hat{y}_{i} = \mathbf{X}_{i}\mathbf{\hat{\beta}} \end{align} You can usually recognize the implementations in formulas using algebra by the use of bold variables (such as $\mathbf{X}$) here above. *You will calculate `y_hat` quite a lot throughout this lab; please use the dot-product-method to calculate `y_hat`, because this will prevent errors in the future!* So, use this ... ```python y_hat = X.dot(betas) ``` instead of ... ```python y_hat = X[:, 0] * betas[0] + X[:, 1] * betas[1] ``` Now, let's plot the predicted $y$ values ($\hat{y}$) against the true $y$ values ($y$). ```python plt.figure(figsize=(10, 10)) plt.scatter(X, y) plt.xlabel('X', fontsize=25) plt.ylabel('y', fontsize=25) x_lim = (0, 5) plt.xlim(x_lim) plt.ylim((0, 10)) y_hat = X_with_icept.dot(beta) # using the matrix algebra approach! plt.plot(X, y_hat, marker='.', c='tab:orange', markersize=10) plt.legend(['Predicted y', 'True y']) plt.show() ``` Actually, let's just plot the predicted y-values as a line (effectively interpolating between adjacent predictions) - this gives us the linear regression plot as you've probably seen many times in your statistics classes! ```python plt.figure(figsize=(10, 10)) plt.scatter(X, y) plt.xlabel('X', fontsize=25) plt.ylabel('y', fontsize=25) plt.xlim(x_lim) plt.ylim((0, 10)) y_min_pred = beta[0] + beta[1] * x_lim[0] y_max_pred = beta[0] + beta[1] * x_lim[1] plt.plot(x_lim, [y_min_pred, y_max_pred], ls='-', c='tab:orange', lw=3) plt.legend(['Predicted y', 'True y']) plt.title('Linear regression of X onto y') plt.show() ``` ### 1.4. Residuals and model fit Alright, so now we have established the beta-values that lead to the best prediction of $y$ - in other words, the best fit of our model. But how do we quantify the fit of our model? One way is to look at the difference between $\hat{y}$ and y, which is often referred to as the model's **residuals**. This difference between $\hat{y}$ and $y$ - the residuals - is the exact same thing as the $\epsilon$ in the linear regression model, i.e. the **error** of the model. Thus: \begin{align} residual = y - \hat{y} = \epsilon \end{align} To visualize the residuals (plotted as red dashed lines): ```python plt.figure(figsize=(10, 10)) plt.scatter(X, y) plt.xlabel('X') plt.ylabel('y') plt.xlim(x_lim) plt.ylim((0, 10)) y_min_pred = beta[0] + beta[1] * x_lim[0] y_max_pred = beta[0] + beta[1] * x_lim[1] plt.plot(x_lim, [y_min_pred, y_max_pred], ls='-', c='orange') plt.title('Linear regression of X onto y') for i in range(y.size): plt.plot((X[i], X[i]), (y_hat[i], y[i]), linestyle='--', c='red', lw=2) plt.legend(['Predicted y', 'True y', 'Residual']) plt.show() ``` In fact, the model fit is often summarized as the **mean of the squared residuals** (also called the 'mean squared error' or MSE), which is thus simply the (length of the) red lines squared and averaged. In other words, the MSE refers to the average squared difference between our predicted $y$ and the true $y$\*: \begin{align} MSE = \frac{1}{N}\sum_{i=1}^{N} (y_{i} - \hat{y}_{i})^2 \end{align} \* The "$\frac{1}{N}\sum_{i=1}^{N}$" is just a different (but equally correct) way of writing "the average of all residuals from sample 1 to sample N". <div class='alert alert-warning'> <b>ToDo</b> </div> Calculate the MSE for our previous model predictions (`y_hat`) based on our linear regression model predicting `y` from `X_with_intercept`. *Do not use a for-loop for this.* You know how to do this without a loop, using vectorized numpy array math. Store the result in a variable named `mse`. ```python # Implement your ToDo here mse = ... ``` ```python ''' Tests the above ToDo. ''' np.testing.assert_almost_equal(mse, np.mean((y - y_hat) ** 2)) ``` Another metric for model fit in linear regression is "R-squared" ($R²$). R-squared is calculated as follows: \begin{align} R^2 = 1 - \frac{\sum_{i=1}^{N}(y_{i} - X_{i}\hat{\beta})^2}{\sum_{i=1}^{N}(y_{i} - \bar{y})^2} \end{align} where $\bar{y}$ represents the mean of $y$. As you can see, the formula for R-squared consists of two parts: the numerator ($\sum_{i=1}^{N}(y_{i} - \hat{y}_{i})^2$) and the denominator ($\sum_{i=1}^{N}(y_{i} - \bar{y}_{i})^2$). The denominator represents the *total* amount of squared error of the actual values ($y$) relative to the mean ($\bar{y}$). The numerator represents the *reduced* squared errors when incorporating knowledge from our (weighted) independent variables ($X_{i}\hat{\beta}$). So, in a way you can interpret R-squared as *how much better my model is including `X` versus a model that only uses the mean*. Another conventional interpretation of R-squared is the amount of variance our predictors ($X$) together can explain of our target ($y$). As expected, the code is quite straightforward: ```python numerator = np.sum((y - y_hat) ** 2) # remember, y_hat equals X * beta denominator = np.sum((y - np.mean(y)) ** 2) r_squared = 1 - numerator / denominator print('The R² value is: %.3f' % r_squared) ``` <div class='alert alert-warning'> <b>ToDo</b> </div> Below, we've defined a design matrix (`X_test`, including an intercept) and a dependent variable (`y_test`). Run a linear regression model and calculate R-squared. Store the R-squared value (which should be a single number, a float) in a variable named `r_squared_test`. ```python data_tmp = np.load('data/data_todo_rsquared.npz') X_test, y_test = data_tmp['X'], data_tmp['y'] b = lstsq... ``` <div class='alert alert-info'> <b>ToThink</b> </div> As discussed earlier, it's important to model the intercept in regression models. This is because it often greatly *improves model fit*! In this ToThink, you have to explain *why* modelling the intercept (usually) improves model fit. To give you some clues, we re-did the linear regression computation from above, but now without the intercept in the design matrix. We plotted the data (`X_no_icept`, `y`) and the model fit to get some intuition about the use of an intercept in models. In the text-cell below the plot, explain (concisely!) why modelling the intercept (usually) improves model fit (this is not graded). ```python X_no_icept = X_with_icept[:, 1, np.newaxis] beta_no_icept = lstsq(X_no_icept, y, rcond=None)[0] y_hat_no_icept = beta_no_icept * X_no_icept plt.figure(figsize=(10, 10)) plt.scatter(X, y) plt.xlabel('X', fontsize=25) plt.ylabel('y', fontsize=25) plt.xlim((0, 5)) plt.ylim((0, 10)) y_min_pred = beta_no_icept[0] * x_lim[0] y_max_pred = beta_no_icept[0] * x_lim[1] plt.plot(x_lim, [y_min_pred, y_max_pred], ls='-', c='orange') plt.title('Linear regression of X (without intercept!) onto y', fontsize=20) for i in range(y.size): plt.plot((X[i], X[i]), (y_hat_no_icept[i], y[i]), 'k-', linestyle='--', c='red', lw=2) plt.show() ``` A model without an intercept is "forced" to draw its line through the origin (0, 0), failing to explain much of the variance of targets (potential $y$ vectors) that have an offset/scale that is clearly far from 0 (which should be clear from the plot.) ### Summary: linear regression Alright, hopefully this short recap on linear regression has refreshed your knowledge and understanding of important concepts such as predictors/design matrix ($X$), target ($y$), least squares, beta-parameters, intercept, $\hat{y}$, residuals, MSE, and $R^2$. In sum, for a linear regression analysis you need some predictors ($X$) to model some target ($y$). You perform ordinary least squares to find the beta-parameters that minimize the sum of squared residuals. To assess model fit, you can look at the mean squared error (mean of $(\hat{y} - y)^2$) or simply the squared correlation between the the predicted and the actual $y$ values ($R² = corr(\hat{y}, y)^2$). If you understand the above sentence, you're good to go! Before we go on to the real interesting stuff (modelling fMRI data with linear regression), let's test how well you understand linear regression so far. <div class='alert alert-warning'> <b>ToDo</b> </div> Now, you're going to implement your own linear regression on a new set of variables, but with a twist: you're going to use 5 predictors this time - we've generated the data for you already. You'll notice that the code isn't much different from when you'd implement linear regression for just a single predictor (+ intercept). In the end, you should have calculated MSE and $R^2$, which should be stored in variables named `mse_todo` and `r2_todo` respectively. *Note, though, that it **isn't** possible to plot the data (either X, y, or y_hat) because we have more than one predictor now; X is 5-dimensional (6-dimensional if you include the intercept) - and it's impossible to plot data in 5 dimensions!* To give you some handles on how to approach the problem, you can follow these steps: 1. Check the shape of your data: is the shape of X `(N, P)`? is the shape of y `(N, 1)`? 2. Add an intercept to the model, use: `np.hstack`; 3. Calculate the beta-parameters use `lstsq()`; 4. Evaluate the model fit by calculating the MSE and R-squared; ```python # Here, we load the data data = np.load('ToDo.npz') X, y = data['X'], data['y'] ``` ```python # 1. Check the shape of X and y ``` ```python # 2. Add the intercept (perhaps define N first, so that your code will be more clear?) using np.hstack() ``` ```python # 3. Calculate the betas using lstsq() ``` ```python # 4. Calculate the MSE (store it in a variable named mse_todo) mse_todo = ... ``` ```python # 5. Calculate R-squared (store it in a variable named r2_todo) r2_todo = ... ``` ```python ''' Tests the ToDo above, MSE part (only hidden tests). ''' print("Your answer is tested later by hidden tests! " "(i.e., you can't see whether it's correct at this moment)") assert(np.round(mse_todo, 3) == 0.656) ``` ```python ''' Tests the ToDo above, R2-part part (only hidden tests). ''' print("Your answer is tested later by hidden tests! " "(i.e., you can't see whether it's correct at this moment)") assert(np.round(r2_todo, 4) == 0.3409) ``` <div class='alert alert-info'> <b>ToThink</b> </div> Let's check whether you understand what a particular beta-parameter means. - Some of the betas are negative (i.e., $< 0$); what does this tell you about the effect of that particular condition/predictor? (.5 point; first text-cell) - The intercept-parameter (i.e., $\beta_{0}$) should be about 6.6. What does this value tell us about the signal? Write your answers in the text-cells below. Negative betas simply state that an increase in particular predictor leads to a decrease in the target (and vice versa). The intercept-parameter represents the 'baseline' of the signal, i.e., the average activity when there's no event (stimulus). In other words, it represents the average activity of $y$ when all predictors are held constant ($X_{j} = 0$ for every predictor $j$). If you've finished the ToDo exercise and you're confident that you understand linear regression, you're ready to start with the fun part: applying linear regression to fMRI data! ## 2. GLM in fMRI analyses Univariate fMRI analyses basically use the same linear regression model as we've explained above to model the activation of voxels (with some minor additions) based on some design-matrix. ### 2.1. The target ($y$) However, compared to "regular" data, one major difference is that *the dependent variable ($y$) in fMRI analyses is timeseries data*, which means that the observations of the dependent variable (activation of voxels) vary across time. How does such a time-series data look like? Let's look at a (simulated) time-series from a single voxel: ```python # import some stuff if you haven't done that already import numpy as np import matplotlib.pyplot as plt from numpy.linalg import lstsq %matplotlib inline ``` ```python voxel_signal = np.load('data/example_voxel_signal.npy') plt.figure(figsize=(25, 5)) plt.plot(voxel_signal, 'o') plt.xlabel('Time points (volumes)', fontsize=20) plt.ylabel('Activity (arbitrary units)', fontsize=20) x_lim, y_lim = (0, 400), (-2.5, 4) plt.xlim(x_lim) plt.ylim(y_lim) plt.title('Example of voxel signal', fontsize=25) plt.show() ``` So, the voxel timeseries (i.e. activation over time; often called 'signal') is our dependent variable ($y$). Thus, the different time points (with corresponding activity values) make up our observations/samples! <div class='alert alert-info'> <b>ToThink</b> </div> Suppose that the TR ("time to repetition", i.e. how long it takes to measure each volume) of our acquisition was 2 seconds and we acquired 400 volumes (measurements) in our fMRI run (as you can see on the x-axis in the plot above) -- then how long did the experiment take in *seconds*? (not graded, so you don't have to write anything down!) So, in the plot above, the data points represent the activity (in arbitrary units) of a single voxel across time (measured in volumes). This visualization of the time-series data as discrete measurements is not really intuitive. Usually, we plot the data as continuous line over time (but always remember: fMRI data is a discretely sampled signal -- *not* a continuous one). Let's plot it as a line: ```python plt.figure(figsize=(25, 5)) plt.plot(voxel_signal) plt.xlabel('Time points (volumes)', fontsize=20) plt.ylabel('Activity (arbitrary units)', fontsize=20) plt.xlim(x_lim) plt.ylim(y_lim) plt.title('Example of voxel signal', fontsize=25) plt.show() ``` Alright, this looks better. One important difference between time-series data and "regular" (non-time-series) data (as is common in most psychology research) is that *measurements in time-series data are often dependent*, while non-time-series data usually isn't. For example, suppose that I measure the height of 100 people (i.e., non-time-series data). My measurement of person 25 is not dependent on the measurement of person 24 or person 26. In other words, it does not matter if I shuffle the measurements (i.e., the vector with 100 height measurements) for my analyses (e.g., if I wanted to do a t-test between the height of men and women in my sample). This is basically what is meant by the statement that the observations are *independent*. For time-series data, however, measurements are usually dependent from one observation to the next. For example, if I observe the value of a certain stock on the stock market at a particular day; suppose that I observe that the next day the stock increases slightly in value. It is then (relatively) likely that the stock the day after that again increases in value. In other words, the *measurements are dependent* (in time; another term that is used is, they are "autocorrelated"). Consequently, shuffling time-series data will usually mess up analyses. ### 2.2. The predictors ($X$), or: what should we use to model our signal ($y$)? So, we know what our target is (the time-series data), but what do we use to model/explain our signal? Well, in most neuroimaging research, your predictors are defined by your experimental design! In other words, your predictors consist of *whatever you think influenced your signal*. This probably sounds nonsensical, which is likely caused by the fact that we derive our independent variables (predictors) in most (observational) psychological research differently. This is because in (observational) psychological studies *both the independent variables and the dependent variables are __measured__*. In other words, our predictors are just other variables that you measured in your study. In neuroimaging research, however, we often derive our predictors not from measures variables but from properties of the particular experiment that we use in the MRI-scanner (or during EEG/MEG acquisiton, for that matter). In other words, we can use any property of the experiment that we believe explains our signal. Alright, probably still sounds vague. Let's imagine a (hypothetical) experiment in which we show subjects images of either circles or squares during fMRI acquisition, as depicted in the image below: Note that the interstimulus interval (ISI, i.e., the time between consecutive stimuli) of 50 seconds, here, is quite unrealistic; often, fMRI experiments have a much shorter ISI (e.g., around 3 seconds). Here, we will use an hypothetical experiment with an ISI of 50 seconds because that simplifies things a bit and will make figures easier to interpret. Anyway, let's talk about what predictors we could use given our experimental paradigm. One straighforward suggestion about properties that influence our signal is that our signal is influenced by the stimuli we show the participant during the experiment. As such, we could construct a predictor that predicts some response in the signal when a stimulus (here: a square or a circle) is present, and no response when a stimulus is absent. Fortunately, we kept track of the onsets (in seconds!) of our stimuli during the experiment: ```python onsets_squares = np.array([10, 110, 210, 310, 410, 510, 610, 710]) onsets_circles = np.array([60, 160, 260, 360, 460, 560, 660, 760]) ``` In other words, the first circle-stimulus was presented at 60 seconds after the scan started and the last square-stimulus was presented 710 seconds after the can started. For now, we'll ignore the difference between square-stimuli and circle-stimuli by creating a predictor that lumps the onsets of these two types of stimuli together in one array. This predictor thus reflects the hypothesis that the signal is affected by the presence of a stimulus (regardless of whether this was a square or a circle). (Later in the tutorial, we'll explain how to *compare* the effects of different conditions.) We'll call this predictor simply `onsets_all`: ```python onsets_all = np.concatenate((onsets_squares, onsets_circles)) print(onsets_all) ``` Now, we need to do one last thing: convert the `onsets_all` vector into a proper predictor. Right now, the variable contains only the onsets, but a predictor should be an array with the same shape as the target (here: $400 \times 1$). Given that our predictor should represent the hypothesis that the signal responds to the presence of a stimulus (and doesn't respond when a stimulus is absent), we can construct our predictor as a vector of all zeros, except at indices corresponding to the onsets of our stimuli. We do this below: ```python predictor_all = np.zeros((800, 1)) # indexing only works with an array/list of integers (we had floats), so we have to convert # the datatype of values in onsets_all to int (using the method astype()) predictor_all[onsets_all.astype(int)] = 1 print("Shape of predictor: %s" % (predictor_all.shape,)) print("\nContents of our predictor array:\n%r" % predictor_all.T) ``` However, if you look back at the plot of the voxel signal, you might notice that there is a problem in our stimulus-predictor - it seems to be on a different scale than the signal. And that's true! The signal from the voxel is measured in volumes (in total 400) while the stimulus-onsets are defined in seconds (in total 800)! We can solve this by "downsampling" our onsets-array to represent the onsets on the scale of our TR. (Usually, you would downsample your onsets/predictors much later in your analysis, but for the sake of the example, we'll do it here already.) To downsample, we're simply going to keep only our "even" samples (i.e., timepoint 0, 2, 4, ... 798) using a fancy Python slice-operation: ```python predictor_all_ds = predictor_all[0::2] print("The downsampled predictor has now a shape of: %s" % (predictor_all_ds.shape,)) ``` Awesome! Now, we have a predictor ($X$) and a target ($y$) of the same shape, so we can apply linear regression! But before we do this, let's plot the predictor and the signal in the same plot: ```python plt.figure(figsize=(25, 10)) plt.plot(voxel_signal) plt.plot(predictor_all, lw=2) plt.xlim(x_lim) plt.ylim(y_lim) plt.xlabel('Time (in volumes)', fontsize=20) plt.ylabel('Activity (A.U.)', fontsize=20) plt.legend(['Voxel-timeseries', 'Predictor'], fontsize=15, loc='upper right') plt.title("Signal and the associated design", fontsize=25) plt.show() ``` Realize that plotting the predictor and the signal in the same plot is different than in the case of non-timeseries data! In non-timeseries data, we would plot a scatterplot with the target ($y$) on the y-axis and the predictor (column of $X$) on the x-axis. This is also possible for timeseries-data, but due to the fact that both the predictor and the target represent values across time, we can plot them "on the same axis". The nice thing about this is that for timeseries data, we can plot as many predictors in the same plot as we want! Anyway, in the above plot the orange line represents our predictor, which represents the hypothesis that the activity of the signal (the blue line) is significantly different when a stimulus is presented (the peaks in the orange line) than when no stimulus is presented (the flat parts of the orange line). Or, phrased differently (but mathematically equivalent): what is the effect of a unit increase in the predictor ($X = 0 = no\ stimulus \rightarrow X = 1 = stimulus$) on the target (the signal)? We can answer this question with linear regression of course! ### 2.3. Regression on fMRI data & interpretation parameters As said before, applying regression analysis on fMRI data is done largely the same as on regular non-timeseries data. As always, we first need to stack an intercept. <div class='alert alert-warning'> <b>ToDo</b> </div> Stack an intercept to the predictor (`predictor_all_ds`) and store the result in a variable named `X_simple`. Then, run linear regression on the signal (`voxel_signal`) and save the beta-parameters in a new variable named `betas_simple`. Finally, calculate MSE and $R^ 2$ for this model and store these values in new variables named `mse_simple` and `r2_simple`. ```python # Implement the ToDo here X_simple = ... betas_simple = ... y_hat_simple = ... mse_simple = ... r2_simple = ... ``` If you've done the ToDo correctly, you should have found the the following beta-parameters: 0.229 for the intercept and 0.290 for our stimulus-predictor. This means that our linear regression model for that voxel is as follows: \begin{align} y_{voxel} = \beta_{intercept} + X_{stim}\beta_{stim} + \epsilon = 0.229 + X_{stim}0.290 + \epsilon \end{align} This simply means that for a unit increase in $X$ (i.e., $X = 0 \rightarrow X = 1$), $y$ increases with 0.290. In other words, on average the signal is 0.290 higher when a stimulus is present compared to when a stimulus is absent! To aid interpretation, let's plot the signal ($y$) and the predicted signal ($\hat{y} = \beta X$) in the same plot. ```python des = np.hstack((np.ones((400, 1)), predictor_all_ds)) betas_simple = np.linalg.lstsq(des, voxel_signal, rcond=None)[0] plt.figure(figsize=(25, 10)) plt.plot(voxel_signal) plt.plot(des.dot(betas_simple), lw=2) plt.xlabel('Time (in volumes)', fontsize=20) plt.ylabel('Activity (A.U.)', fontsize=20) plt.xlim(x_lim) plt.ylim(y_lim) plt.legend(['True signal', 'Predicted signal'], loc='upper right', fontsize=15) plt.title("Signal and predicted signal", fontsize=25) ``` The orange line represents the predicted signal, which is based on the original predictor ($X$) multiplied (or "scaled") by the associated beta-parameters ($\beta$). Graphically, you can interpret the beta-parameter of the stimulus-predictor ($\beta_{stim}$) as the maximum height of the peaks in the orange line\* and the beta-parameter of the intercept ($\beta_{intercept}$) as the difference from the flat portion of the orange line and 0 (i.e. the "offset" of the signal). --- \* This holds true only when the maximum value of the original predictor is 1 (which is true in our case) Let's zoom in on a portion of the data to show this: ```python des = np.hstack((np.ones((400, 1)), predictor_all_ds)) betas_simple = np.linalg.lstsq(des, voxel_signal, rcond=None)[0] des = des[20:60, :] plt.figure(figsize=(10, 5)) plt.plot(voxel_signal[20:60]) plt.plot(des.dot(betas_simple), lw=3) plt.xlabel('Time (in volumes)', fontsize=20) plt.ylabel('Activity (A.U.)', fontsize=20) plt.xlim(0, 40) plt.ylim(0, 1.1) plt.annotate('', xy=(10, betas_simple[0]), xytext=(10, betas_simple[1] + betas_simple[0]), arrowprops=dict(arrowstyle='<->', lw=3)) plt.text(10, betas_simple.sum() + 0.05, r'$\beta_{stim}$', horizontalalignment='center', fontsize=20) plt.annotate('', xy=(35, betas_simple[0]), xytext=(35, betas_simple[1] + betas_simple[0]), arrowprops=dict(arrowstyle='<->', lw=3)) plt.text(35, betas_simple.sum() + 0.05, r'$\beta_{stim}$', horizontalalignment='center', fontsize=20) plt.annotate('', xy=(12, 0), xytext=(12, betas_simple[0]), arrowprops=dict(arrowstyle='<->', lw=2)) plt.text(12.5, 0.05, r'$\beta_{intercept}$', fontsize=20) plt.legend(['True signal', 'Predicted signal'], fontsize=15, loc='upper right') plt.xticks(np.arange(0, 41, 5), np.arange(20, 61, 5)); ``` Anyway, there seems to be an effect on voxel activity when we show a stimulus (increase of 0.290 in the signal on average), but (if you've done the ToDo correctly) you've also seen that the model fit is quite bad ($R^2 = 0.006$, about 0.6% explained variance) ... What is happening here? Is our voxel just super noisy? Or is something wrong with our model? We'll talk about this in the next section! ### 2.4. Using the BOLD-response in GLM models Let's go back to our original idea behind the predictor we created. We assumed that in order to model activity in response to our stimuli, our predictor should capture an increase/decrease in activity *at the moment of stimulus onset*. But this is, given our knowledge of the BOLD-response, kind of unrealistic to assume: it is impossible to measure instantaneous changes in neural activity in response to stimuli or tasks with fMRI, *because the BOLD-response is quite slow and usually peaks around 5-7 seconds **after** the 'true' neuronal activity (i.e. at cellular level)*. In the above model, we have not incorporated either the lag (i.e. ~6 seconds) or the shape of the BOLD-response: we simply modelled activity as a response to an instantaneous stimulus event. You can imagine that if you incorporate this knowledge about the BOLD-response into our model, the fit will likely get better! In this section, we'll investigate different ways to incorporate knowledge of the BOLD-response in our predictors. #### 2.4.1. The canonical HRF The easiest and most often-used approach to incorporating knowledge about the BOLD-response in univariate analyses of fMRI data is to assume that each voxel responds to a stimulus in a fixed way. In other words, that voxels always respond (activate/deactivate) to a stimulus in the same manner. This is known as using a "canonical haemodynamic response function (HRF)". Basically, an HRF is a formalization of how we think the a voxel is going to respond to a stimulus. A *canonical* HRF is the implementation of an HRF in which you use the same HRF for each voxel, participant, and condition. There are other implementations of HRFs (apart from the canonical), in which you can adjust the exact shape of the HRF based on the data you have; examples of these HRFs are *temporal basis sets* and *finite impulse reponse models* (FIR), which we'll discuss later. There are different types of (canonical) HRFs; each models the assumed shape of the BOLD-response slightly differently. For this course, we'll use the most often used canonical HRF: the double-gamma HRF (which is a combination of different gamma functions). The double-gamma HRF looks like this: ```python def double_gamma(x, lag=6, a2=12, b1=0.9, b2=0.9, c=0.35, scale=True): a1 = lag d1 = a1 * b1 d2 = a2 * b2 hrf = np.array([(t/(d1))**a1 * np.exp(-(t-d1)/b1) - c*(t/(d2))**a2 * np.exp(-(t-d2)/b2) for t in x]) if scale: hrf = (1 - hrf.min()) * (hrf - hrf.min()) / (hrf.max() - hrf.min()) + hrf.min() return hrf def single_gamma(x, lag=6, b=0.9, scale=True): b = b a = lag d = a * b hrf = (x/d)**a * np.exp(-(x-d)/b) if scale: hrf = (1 - hrf.min()) * (hrf - hrf.min()) / (hrf.max() - hrf.min()) + hrf.min() return hrf # Time-points refers to the desired length of the array # representing the HRF. Does not matter too much (as long # as it incorporates the full shape of the HRF, here: 25 seconds) time_points = np.arange(25) dg_hrf = double_gamma(time_points, lag=6) plt.plot(dg_hrf) plt.xlabel('Time (in seconds!) after stimulus onset') plt.ylabel('Activity (A.U.)') plt.title('Double gamma HRF'); ``` Note that the output of the HRF is defined in seconds! That is, it's at the same scale as our stimulus-predictor (the one that's not yet downsampled). But how should we incorporate this HRF into our model? Traditionally, this is done using a mathematical operation called **convolution**. Basically, it "slides" the HRF across our 0-1 coded stimulus-vector from left to right and elementwise multiplies the HRF with the stimulus-vector. This is often denoted as: \begin{align} X_{conv} = \mathrm{HRF} * X_{original} \end{align} in which $*$ is the symbol for convolution, $X_{original}$ is the original stimulus-vector, and $X_{conv}$ the result of the convolution. Let's plot an example to make it clearer. Suppose we have an onset-vector of length 100 (i.e., the experiment was 100 seconds long) with three stimulus presentations: at $t = 10$, $t = 40$, and $t = 70$. The stimulus-vector (upper plot), double-gamma HRF (right plot), and the result of the convolution of the stimulus-vector and the HRF (lower plot) looks as follows: ```python random_stimulus_onsets = [10, 40, 70] random_stim_vector = np.zeros(400) random_stim_vector[random_stimulus_onsets] = 1 plt.figure(figsize=(15, 6)) plt.subplot2grid((3, 3), (0, 0), colspan=2) plt.plot(random_stim_vector) plt.xlim((0, 100)) plt.ylim((0, 1)) plt.ylabel('Activity (A.U.)') plt.xlabel('Time (seconds)') plt.title('Stimulus events', fontsize=20) plt.subplot2grid((3, 3), (0, 2), rowspan=2) plt.plot(dg_hrf) plt.title('HRF', fontsize=20) plt.xlim(0, 24) plt.xlabel("Time (seconds)") convolved_stim_vector = np.convolve(random_stim_vector, dg_hrf, 'full') plt.subplot2grid((3, 3), (1, 0), colspan=2) plt.plot(convolved_stim_vector) plt.title('Convolved stimulus-vector', fontsize=20) plt.ylabel('Activity (A.U.)') plt.xlabel('Time (seconds)') plt.xlim(0, 100) plt.tight_layout() plt.show() ``` The result -- the convolved stimulus-vector -- is basically the output of a the multiplication of the HRF and the stimulus-events when you would "slide" the HRF across the stimulus vector. As you can see, the convolved stimulus-vector correctly shows the to-be-expected lag and shape of the BOLD-response! Given that this new predictor incorporates this knowledge of the to-be expected response, it will probably model the activity of our voxel way better. Note that the temporal resolution of your convolved regressor is necessary limited by the resolution of your data (i.e. the TR of your fMRI acquisition). That's why the convolved regressor doesn't look as "smooth" as the HRF. As you can see in the code for the plot above, numpy provides us with a function to convolve two arrays: ```python np.convolve(array_1, array_2) ``` Now, we can convolve the HRF with out stimulus-predictor. Importantly, we want to do this convolution operation in the resolution of our onsets (here: seconds), not in the resolution of our signal (TR) (the reason for this is explained clearly in Jeanette Mumford's [video on the HRF](https://www.youtube.com/watch?v=5JNX34gYG7Q).) Therefore, we need to perform the convolution on the variable `predictor_all` (*not* the downsampled variable: `predictor_all_ds`)! We'll do this below (we'll reuse the `dg_hrf` variable defined earlier): ```python '''We need to "squeeze" out the extra singleton axis, because that's what the np.convolve function expects, i.e., arrays of shape (N,) and NOT (N, 1) To go from (N, 1) --> (N,) we'll use the squeeze() method''' predictor_conv = np.convolve(predictor_all.squeeze(), dg_hrf) print("The shape of the convolved predictor after convolution: %s" % (predictor_conv.shape,)) # After convolution, we also neem to "trim" off some excess values from # the convolved signal (the reason for this is not important to understand) predictor_conv = predictor_conv[:predictor_all.size] print("After trimming, the shape is: %s" % (predictor_conv.shape,)) # And we have to add a new axis again to go from shape (N,) to (N, 1), # which is important for stacking the intercept, later predictor_conv = predictor_conv[:, np.newaxis] print("Shape after adding the new axis: %s" % (predictor_conv.shape,)) ``` It's a bit of a hassle (squeezing out the singleton axis, trimming, adding the axis back ...), but now we have a predictor which includes information about the expected HRF! Let's look at the predictor before and after convolution in the same plot: ```python plt.figure(figsize=(25, 5)) plt.plot(predictor_all) plt.plot(predictor_conv) plt.xlim(-1, 800) plt.title("Predictor before and after convolution", fontsize=25) plt.xlabel("Time (seconds!)", fontsize=20) plt.ylabel("Activity (A.U.)", fontsize=20) plt.legend(['Before', 'After'], loc='upper right', fontsize=15) plt.show() ``` Great! Our predictor now includes the expected 'lag' and shape of the HRF, and we can start analyzing our signal with our new convolved predictor! But before we'll do this, there is one more concept that we'll demonstrate. Remember the concept of **linear scaling** of the BOLD-response? This property of the BOLD-response states that it will linearly scale with the input it is given. Let's see how that works: ```python plt.figure(figsize=(15, 5)) one_stim = np.zeros(100) one_stim[5] = 1 one_stim_conv = np.convolve(one_stim, dg_hrf)[:100] two_stim = np.zeros(100) two_stim[[5, 7]] = 1 two_stim_conv = np.convolve(two_stim, dg_hrf)[:100] three_stim = np.zeros(100) three_stim[[5, 7, 9]] = 1 three_stim_conv = np.convolve(three_stim, dg_hrf)[:100] plt.subplot2grid((2, 3), (0, 0)) plt.plot(one_stim) plt.title("One stimulus", fontsize=25) plt.subplot2grid((2, 3), (0, 1)) plt.plot(two_stim, c='tab:orange') plt.title("Two stimuli", fontsize=25) plt.subplot2grid((2, 3), (0, 2)) plt.plot(three_stim, c='tab:green') plt.title("Three stimuli", fontsize=25) plt.subplot2grid((2, 3), (1, 0), colspan=3) plt.plot(one_stim_conv) plt.plot(two_stim_conv) plt.plot(three_stim_conv) plt.legend(['One stim', 'Two stim', 'Three stim']) plt.title('Linear scaling of HRF', fontsize=25) plt.ylabel('Activity (A.U.)', fontsize=20) plt.xlabel('Time (TR)', fontsize=20) plt.xlim(0, 100) plt.tight_layout() plt.show() ``` Also, in our random stimulus-vector above (and also in the example we showed earlier) we assumed that each image was only showed briefly (i.e. we only modelled the onset) - but what if a stimulus (or task) may take longer, say, 15 seconds? Let's see what happens. ```python random_stimulus_onsets2 = list(range(10, 25)) + list(range(40, 55)) + list(range(70, 85)) random_stim_vector2 = np.zeros(100) random_stim_vector2[random_stimulus_onsets2] = 1 plt.figure(figsize=(10, 6)) plt.subplot(2, 1, 1) plt.plot(random_stim_vector2, c='tab:blue') plt.xlim((0, 100)) plt.ylim((-.5, 1.2)) plt.ylabel('Activity (A.U.)', fontsize=15) plt.title('Stimulus events', fontsize=20) convolved_stim_vector2 = np.convolve(random_stim_vector2, dg_hrf)[:random_stim_vector2.size] plt.subplot(2, 1, 2) plt.plot(convolved_stim_vector2) plt.title('Convolved stimulus-vector', fontsize=20) plt.ylabel('Activity (A.U.)', fontsize=15) plt.xlabel('Time (seconds)', fontsize=15) plt.tight_layout() plt.show() ``` As you can see, convolution takes care to model the shape of the BOLD-response according to how long you specify the stimulus to take! <div class='alert alert-info'> <b>ToThink</b> </div> Given the properties of the BOLD-response (and assuming linear-time invariance is not violated), would you expect the same or a different BOLD-response in response to 3 consecutive stimuli (of the same condition) of half a second second each (which follow each other immediately, i.e. without interstimulus interval) versus 1 stimulus of 1.5 seconds? Why? (Write your answer in the text-cell below) Because the BOLD-response is so slow, it cannot distinguish between short consecutive stimuli and one longer stimulus (which is evident by the fact that after convolution of these two hypothetical stimulus-vectors, they look identical). Actually, convolution can model *any* sequence of stimulus events, even stimuli with random onsets - just look at the plot below! (you can execute this cell below multiple times to see different random regressor shapes!) ```python random_stimulus_onsets3 = np.random.randint(0, 100, 25) random_stim_vector3 = np.zeros(100) random_stim_vector3[random_stimulus_onsets3] = 1 plt.figure(figsize=(16, 5)) plt.subplot(2, 1, 1) plt.axhline(0) for i, event in enumerate(random_stim_vector3): if event != 0.0: plt.plot((i, i), (0, 1), 'k-', c='tab:blue') plt.xlim((0, 100)) plt.ylim((-0.1, 1.1)) plt.ylabel('Activity (A.U.)', fontsize=15) plt.title('Stimulus events', fontsize=15) convolved_stim_vector3 = np.convolve(random_stim_vector3 * .5, dg_hrf, 'full')[:random_stim_vector3.size] plt.subplot(2, 1, 2) plt.plot(convolved_stim_vector3) plt.xlim(0, 100) plt.title('Convolved stimulus-vector', fontsize=20) plt.ylabel('Activity (A.U.)', fontsize=15) plt.xlabel('Time (seconds)', fontsize=15) plt.tight_layout() plt.show() ``` So, in summary, convolving the stimulus-onsets (and their duration) with the HRF gives us (probably) a better predictor of the voxel signal than just the stimulus-onset, because (1) it models the lag of the BOLD-response and (2) models the shape of the BOLD-response (accounting for the linear scaling principle). Now, we're *almost* ready to start analyzing our signal with the convolved predictor! The problem, at this moment, however is that the convolved predictor and the signal are on different scales! ```python print("Shape of convolved predictor: %s" % (predictor_conv.shape,)) print("Shape of signal: %s" % (voxel_signal.shape,)) ``` To fix this, we need to downsample the predictor again, like we did earlier: ```python predictor_conv_ds = predictor_conv[::2] plt.figure(figsize=(25, 6)) plt.plot(predictor_conv_ds) plt.xlim(x_lim) plt.title("Downsampled convolved predictor", fontsize=25) plt.xlabel("Time (in volumes!)", fontsize=20) plt.ylabel("Activity (A.U.)", fontsize=20) plt.show() print("Shape of downsampled predictor is now: %s" % (predictor_all_ds.shape,)) ``` Finally ... we're ready to see whether the HRF-based predictor *actually* models our original voxel signal (`voxel_signal`, from earlier in the tutorial) more accurately! Let's create a proper design-matrix ($X$) by stacking an intercept with the stimulus-regressor, perform the regression analysis, and check out the results (by plotting the predicted signal against the true signal). For comparison, we'll also plot the original (unconvolved) model as well! ```python intercept = np.ones((predictor_conv_ds.size, 1)) X_conv = np.hstack((intercept, predictor_conv_ds)) betas_conv = lstsq(X_conv, voxel_signal, rcond=None)[0] plt.figure(figsize=(19, 8)) plt.subplot(2, 1, 1) plt.plot(voxel_signal) plt.plot(X_conv.dot(betas_conv)) plt.xlim(x_lim) plt.ylabel("Activity (A.U.)", fontsize=15) plt.title("Model fit with *convolved* regressor", fontsize=20) plt.legend(['True signal', 'Predicted signal'], fontsize=12, loc='upper right') plt.subplot(2, 1, 2) plt.plot(voxel_signal) plt.plot(X_simple.dot(betas_simple)) plt.xlim(x_lim) plt.ylabel("Activity (A.U.)", fontsize=15) plt.title("Model fit with original (*unconvolved*) regressor", fontsize=20) plt.legend(['True signal', 'Predicted signal'], fontsize=12, loc='upper right') plt.xlabel("Time (volumes)", fontsize=15) plt.tight_layout() plt.show() ``` Wow, that looks much better, right! First, let's inspect the beta-parameters: ```python print('The beta-parameter of our stimulus-predictor is now: %.3f' % betas_conv[1]) print('... which is %.3f times larger than the beta of our original ' 'beta (based on the unconvolved predictors)!' % (betas_conv[1] / 0.290)) ``` Like we did before, we'll zoom in and show you how the estimated beta-parameters relate tho the data: ```python plt.figure(figsize=(10, 5)) plt.plot(voxel_signal[25:65]) plt.plot(X_conv[25:65, :].dot(betas_conv), lw=3) plt.xlabel('Time (in volumes)', fontsize=20) plt.ylabel('Activity (A.U.)', fontsize=20) plt.xlim(0, 40) plt.ylim(-.5, 2) plt.annotate('', xy=(8, betas_conv[0]), xytext=(8, betas_conv[1]), arrowprops=dict(arrowstyle='<->', lw=3)) plt.text(8, betas_conv.sum() + 0.05, r'$\beta_{stim}$', horizontalalignment='center', fontsize=20) plt.annotate('', xy=(33, betas_conv[0]), xytext=(33, betas_conv[1]), arrowprops=dict(arrowstyle='<->', lw=3)) plt.text(33, betas_conv.sum() + 0.05, r'$\beta_{stim}$', horizontalalignment='center', fontsize=20) plt.annotate('', xy=(20, -0.03), xytext=(20, betas_conv[0] + 0.03), arrowprops=dict(arrowstyle='<->', lw=1)) plt.text(20.5, 0.0, r'$\beta_{intercept}$', fontsize=20) plt.axhline(0, ls='--', c='k', lw=0.5) plt.legend(['True signal', 'Predicted signal'], fontsize=15, loc='upper center') plt.xticks(np.arange(0, 41, 5), np.arange(25, 65, 5)) plt.show() ``` Alright, so we seem to measure a way larger effect of our stimulus on the voxel activity, but is the model fit actually also better? Let's find out. ```python y_hat_conv = X_conv.dot(betas_conv) des_tmp = np.hstack((np.ones((400, 1)), predictor_all_ds)) y_hat_orig = des_tmp.dot(lstsq(des_tmp, voxel_signal, rcond=None)[0]) MSE_conv = ((y_hat_conv - voxel_signal) ** 2).mean() MSE_orig = ((y_hat_orig - voxel_signal) ** 2).mean() print("MSE of model with convolution is %.3f while the MSE of the model without convolution is %.3f" % (MSE_conv, MSE_orig)) R2_conv = 1 - (np.sum((voxel_signal - y_hat_conv) ** 2) / np.sum((voxel_signal - voxel_signal.mean()) ** 2)) R2_orig = 1 - (np.sum((voxel_signal - y_hat_orig) ** 2) / np.sum((voxel_signal - voxel_signal.mean()) ** 2)) print("R-squared of model with convolution is %.5f and without convolution it is %.5f" % (R2_conv, R2_orig)) ``` From the model fit metrics above, we can safely conclude that (at least for this voxel), a design ($X$) in which we include information about the expected lag/shape of the HRF is *way* better than a 'HRF-naive' design (i.e. an unconvolved design). <div class='alert alert-warning'> <b>ToDo</b> </div> So far, our examples were based on the stimulus-onsets of the two conditions (circles and squares) lumped together. This tested the hypothesis of our voxel responded to *any kind* of stimulus -- regardless of the condition (squares/circles) of the stimulus. Usually, however, you want to estimate the betas for each condition separately (i.e., how much each condition on average activates a voxel) and test the influence of each condition on the voxel separately (but estimated in the same model)! This is what you're going to do in this ToDo. We provide you with the predictors for circles (`predictor_circles`) and for squares (`predictor_squares`) below. You have to do the following: - convolve each predictor with the double-gamma HRF (use `dg_hrf`) separately (don't forget to squeeze, trim, and add the axis back) - downsample the convolved predictors - stack an intercept and the two predictors **in a single design-matrix** ($X$) -- use `np.hstack((intercept, pred1, pred2))` for this - calculate the beta-parameters (estimated in a single model!) - calculate MSE (store this in the variable `mse_new`) and $R^2$ (store this in the variable `r2_new`) ```python predictor_circles = np.zeros((800, 1)) predictor_circles[onsets_circles] = 1 predictor_squares = np.zeros((800, 1)) predictor_squares[onsets_squares] = 1 # Implement your ToDo below pred_circ_conv = ... pred_squar_conv = ... X_new = ... X_new = ... b_new = ... y_hat_new = ... mse_new = ... r2_new = ... ``` <div class='alert alert-info'> <b>ToThink</b> </div> If you've done the above ToDo correctly, you should have found that the model fit of the design-matrix with the circles and squares predictors separately (as you did in the ToDo) leads to a (somewhat) better model fit (lower MSE/higher $R^2$) than the design-matrix with the conditions lumped together in a single predictor (as we did earlier). Argue why you think this is the case here. Because with separate predictors, the model can assign different effects to the two predictors. If the two conditions would in fact have a different effect on the voxel, then this would be impossible to model in the lumped-together scenario, because this model can only explain a "common" effect of the conditions. #### 2.4.2. Temporal basis functions Most studies use a canonical HRF to convolve with their predictors. However, remember that using a canonical HRF assumes that the particular shape of that HRF will be appropriate for each voxel, each condition, and each subject in your study. This is quite a strong assumption. In fact, studies have shown that the exact shape of the HRF often differs between voxels, conditions, and subjects (as is explained in detail by the [video on basis sets](https://www.youtube.com/watch?v=YfeMIcDWwko&index=21&list=PLcvMDPDk-dSmTBejANv7kY2mFo1ni_gkA) by Tor Wager). In fact, this might also be the case in our data! If you've done the ToDo correctly, you might have seen that the predictions ($\hat{y}$) seem to "peak" too late for the circle-stimuli... In fact, let's plot the data ($y$) and the prediction based on the circles-predictor ($X_{circles}\beta_{1}$) and the prediction based on the squares-predictor ($X_{squares}\beta_{2}$) separately: ```python dg_hrf = double_gamma(np.arange(50)) pred_circ_conv = np.convolve(predictor_circles.squeeze(), dg_hrf)[:800][:, np.newaxis] pred_squar_conv = np.convolve(predictor_squares.squeeze(), dg_hrf)[:800][:, np.newaxis] X_new = np.hstack((np.ones((800, 1)), pred_circ_conv, pred_squar_conv)) X_new = X_new[::2, :] b_new = np.linalg.lstsq(X_new, voxel_signal, rcond=None)[0] circ_hat = b_new[0] + b_new[1] * X_new[:, 1] squar_hat = b_new[0] + b_new[2] * X_new[:, 2] y_hat_new = X_new.dot(b_new) plt.figure(figsize=(25, 8)) plt.plot(voxel_signal[250:]) plt.plot(circ_hat[250:], lw=3) plt.plot(squar_hat[250:], lw=3) plt.xticks(np.arange(0, 151, 50), np.arange(250, 401, 50)) plt.xlim(0, 150) plt.xlabel("Time (volumes)", fontsize=20) plt.ylabel("Activity (A.U.)", fontsize=20) plt.title("Model fit per predictor (for last 150 volumes)", fontsize=25) plt.legend(['signal', 'circles-predictor', 'squares-predictor'], fontsize=15) ``` So, what should be do about this? Well, one solution is to use *temporal basis functions* (also called *temporal basis sets*). Temporal basis functions model the HRF as *a combination of (haemodynamic response) functions*. In practice, this amounts to convolving your predictor with not one, but multiple HRFs. This results in multiple predictors per stimulus-condition! Each HRF measures a "part" (or property) of the total HRF. Together, these predictors aim to estimate the complete HRF for a given stimulus-vector (condition). We're going to use *single-gamma basis functions* as an example of a temporal basis set (but there are other sets, like the *sine basis set* and *finite impulse response* set). In this particular basis set, the original single-gamma HRF is used in combination with its first derivative (often called the 'temporal derivative') and its second derivative (the derivative of the derivative, so to say; often called the 'dispersion derivative'). Suppose we have only one stimulus condition. Then, the signal ($y$) is not modelled by only one convolved predictor ($\beta X$) but by three predictors: a predictor convolved with the original HRF ($X_{orig}\beta_{1}$), a predictor convolved with the temporal derivative of the HRF ($X_{temp}\beta_{2}$), and a predictor convolved with the dispersion derivative of the HRF ($X_{disp}\beta_{3}$). Formally: \begin{align} y = \beta_{0} + X_{orig}\beta_{1} + X_{temp}\beta_{2} + X_{disp}\beta_{3} + \epsilon \end{align} Alright, but how do we compute these derivatives and how do they look like? Well, the derivatives are easily computed using the `np.diff` function, which takes an array and returns the value-by-value difference (i.e., for array $x$, it returns for each value $x_{i}$ the value $x_{i} - x_{i+1}$). Let's calculate and plot the first (temporal) derivative and second (dispersion) derivative: ```python sg_hrf = single_gamma(np.arange(0, 50)) sg_hrf_temp = np.diff(sg_hrf) sg_hrf_disp = np.diff(sg_hrf_temp) # Differentiation trims of one value, so we need to add that back sg_hrf_temp = np.append(sg_hrf_temp, 0) sg_hrf_disp = np.append(sg_hrf_disp, [0, 0]) plt.figure(figsize=(20, 5)) plt.subplot(1, 3, 1) plt.plot(sg_hrf, lw=3) plt.ylim(-0.3, 1.1) plt.ylabel("Activity (A.U.)", fontsize=25) plt.xlabel("Time (seconds)", fontsize=15) plt.title("Original HRF", fontsize=20) plt.subplot(1, 3, 2) plt.plot(sg_hrf_temp, c='tab:orange', lw=3) plt.ylim(-0.3, 1.1) plt.xlabel("Time (seconds)", fontsize=15) plt.title("First (temporal) derivative", fontsize=20) plt.subplot(1, 3, 3) plt.plot(sg_hrf_disp, c='tab:green', lw=3) plt.ylim(-0.3, 1.1) plt.xlabel("Time (seconds)", fontsize=15) plt.title("Second (dispersion) derivative", fontsize=20) plt.tight_layout() plt.show() ``` The cool thing about this single-gamma basis set is that the derivatives can (to a certain extent) correct for slight deviations in the lag and shape of the HRF based on the data! Specifically, the first (temporal) derivative can correct for slight differences in lag (compared to the canonical single-gamma HRF) and the second (dispersoin) derivative can correct for slight difference in the width (or "dispersion") of the HRF (compared to the canonical single-gamma HRF). "How does this 'correction' work, then?", you might ask. Well, think about it this way: the original (canonical) HRF measures the increase/decrease -- or amplitude -- of the BOLD-response. In a similar way, the temporal derivative measures the *onset* -- or lag -- of the BOLD-response. And finally the dispersion derivative measures the *width* of the BOLD-response. When we use our three predictors (one convolved with the canonical HRF, one with the temporal derivative, and one with the dispersion derivative) in a linear regression model, the model will assign each predictor (each part of the HRF) a beta-weight, as you know. These beta-weights are chosen such that model the data -- some response of the voxel to a stimulus -- as well as possible. Basically, assigning a (relatively) high beta-weight to the predictor convolved with the temporal derivative will "shift" the HRF (increases/decreases the onset of the HRF). Assigning a (relatively) high beta-weight to the predictor convolved with the dispersion derivative will increase/decrease the width of the HRF. Alright, let's visualize this. Suppose we have a voxel that we know does not conform to the specific assumptions about lag (onset) and width of the canonical (single-gamma) HRF. We'll show below that it suboptimally explains this voxel: ```python example_data = np.load('data/voxel_basissets_example.npz') example_vox, onset_array = example_data['example_vox'], example_data['onset_array'] # Then make design-matrix (by convolving the hrf with the onset-array) predictor_hrf_canonical = np.convolve(onset_array, sg_hrf)[:example_vox.size] design_mat = np.hstack((np .ones((example_vox.size, 1)), predictor_hrf_canonical[:, np.newaxis])) # Do regression beta1 = lstsq(design_mat, example_vox, rcond=None)[0] yhat1 = design_mat.dot(beta1) # Plot the data and the prediction (y_hat) plt.figure(figsize=(15, 5)) plt.plot(example_vox) plt.plot(yhat1) plt.xlim(0, 98) plt.xlabel("Time (seconds)", fontsize=12) plt.ylabel("Activation (A.U.)", fontsize=12) plt.annotate('', xy=(9, 0), xytext=(7, 0.2), arrowprops=dict(arrowstyle='->', lw=2)) plt.text(8, 0.22, 'Stim\nonset', horizontalalignment='right', fontsize=15) plt.legend(['signal', 'predicted signal'], fontsize=15) plt.title("Prediction with canonical HRF only", fontsize=20) plt.show() ``` As you can see, the predicted signal (orange line) misses the peak of the BOLD-response and is also slightly too narrow. Now, let's see what happens if we add the temporal derivate to the model and both the temporal and the dispersion derivative: ```python predictor_hrf_temporal = np.convolve(onset_array, sg_hrf_temp)[:example_vox.size] design_mat2 = np.hstack((design_mat, predictor_hrf_temporal[:, np.newaxis])) # Do regression with HRF + temp deriv HRF beta2 = lstsq(design_mat2, example_vox, rcond=None)[0] yhat2 = design_mat2.dot(beta2) # Replot the canonical HRF fit plt.figure(figsize=(15, 10)) plt.subplot(3, 1, 1) plt.plot(example_vox) plt.plot(yhat1) plt.xlim(0, 98) plt.ylabel("Activation (A.U.)", fontsize=12) plt.annotate('', xy=(9, 0), xytext=(7, 0.2), arrowprops=dict(arrowstyle='->', lw=2)) plt.text(8, 0.22, 'Stim\nonset', horizontalalignment='right', fontsize=15) plt.title("Prediction with canonical HRF only", fontsize=20) plt.legend(['signal', 'predicted signal'], fontsize=15) # Plot model with temp deriv HRF plt.subplot(3, 1, 2) plt.plot(example_vox) plt.plot(yhat2) plt.xlim(0, 98) plt.ylabel("Activation (A.U.)", fontsize=12) plt.annotate('', xy=(9, 0), xytext=(7, 0.2), arrowprops=dict(arrowstyle='->', lw=2)) plt.text(8, 0.22, 'Stim\nonset', horizontalalignment='right', fontsize=15) plt.title("Prediction with canonical HRF + temporal deriv", fontsize=20) # Make dispersion HRF predictor and do regression predictor_hrf_dispersion = np.convolve(onset_array, sg_hrf_disp)[:example_vox.size] design_mat3 = np.hstack((design_mat2, predictor_hrf_dispersion[:, np.newaxis])) beta3 = lstsq(design_mat3, example_vox, rcond=None)[0] yhat3 = design_mat3.dot(beta3) # Plot model with temp deriv HRF + dispersion deriv HRF plt.subplot(3, 1, 3) plt.plot(example_vox) plt.plot(yhat3) plt.xlim(0, 98) plt.xlabel("Time (seconds)", fontsize=12) plt.ylabel("Activation (A.U.)", fontsize=12) plt.annotate('', xy=(9, 0), xytext=(7, 0.2), arrowprops=dict(arrowstyle='->', lw=2)) plt.text(8, 0.22, 'Stim\nonset', horizontalalignment='right', fontsize=15) plt.title("Prediction with canonical HRF + temporal deriv + dispersion deriv", fontsize=20) plt.tight_layout() plt.show() ``` As you can see, the prediction improves quite a bit when including the temporal derivative and (although to a lesser extent) the dispersion derivative! But how should we interpret the beta-parameters? Well, usually people don't really interpret the temporal and dispersion derivative HRFs (unless they're interested in lag/width of the HRF), because most researchers are interesting in the activation/deactivation (the amplitude) of voxels in response to a stimulus, which corresponds to the beta-parameters associated with the canonical HRF. So, basically, the temporal and dispersion derivatives are only used to "correct" for deviations in terms of lag/shape from the canonical HRF! So, should you then always use a (gamma) basis set? To be honest, people are quite divided on the topic of whether to use basis sets or a canonical HRF. In our experience, derivatives (e.g. in the gamma basis sets) offers little improvement over a canonical HRF, but it doesn't hurt either (given that you have 'enough' degrees of freedom). Anyway, time for a ToDo! <div class='alert alert-warning'> <b>ToDo: Large</b> </div> Reanalyze the voxel signal with the separate conditions (like the last ToDo), but this time with a gamma basis set instead of the canonical HRF! Calculate the beta-parameters, MSE, and $R^2$. Store the MSE in a variable named `mse_gbf` and $R^2$ in a variable named `r2_gbf`. Please implement this ToDo in "steps", such that we can test intermediate output: 1. Convolve the circle predictor (`predictor_circles`) and the squares predictor (`predictor_squares`) with the three HRF basis functions (canonical, temporal deriv., dispersion deriv.) separately, giving you 6 predictors, stack them together and add an intercept (make sure the intercept is the first column). Store your design matrix in a variable named `X_gbf`; (2 points) 2. Run linear regression (your DV is the variable `voxel_signal`) and store your betas in a variable named `betas_gbf`; (1 point) 3. Calculate R-squared and store it in a variable named `r2_gbf`; (1 point) 4. Calculate MSE and store it in a variable named `mse_gbf`; (1 point) Some tips: - you can use the definitions of the HRFs from earlier (`sg_hrf`, `sg_hrf_temp`, and `sg_hrf_disp`) - make sure that your design-matrix has, eventually, 7 colums (3 predictors x 2 conditions + intercept) - don't forget to trim and downsample your predictors/design matrix after convolution! (remember: our fMRI signal has 400 samples) ```python # Step 1: convolve the predictors (don't forget to trim and downsample)! # Hint: print the shape of your predictors after convolving, trimming, and downsampling - # does this shape correspond to the number of datapoints of the experiment? # We have created the binary predictors for you already predictor_circles = np.zeros(800) predictor_circles[onsets_circles] = 1 predictor_squares = np.zeros(800) predictor_squares[onsets_squares] = 1 pred_ci_conv1 = ... pred_ci_conv2 = ... pred_ci_conv3 = ... pred_sq_conv1 = ... pred_sq_conv2 = ... pred_sq_conv3 = ... icept = np.ones((800, 1)) design_mat_todo = ... X_gbf = design_mat_todo[::2, :] ``` ```python # Step 2: run linear regression betas_gbf = ... ``` ```python ''' Tests the above steps (hidden tests only)''' betas_gbf_ans = ... ``` ```python # Step 3: calculate R-squared (and store it in a variable named r2_gbf) y_hat_gbf = ... r2_gbf = ... ``` ```python ''' Tests the above ToDo (only hidden tests)''' y_hat_gbf = ... r2_gbf_ans = ... ``` ```python # Step 3: calculate MSE (and store it in a variable named mse_gbf) y_hat_gbf = ... mse_gbf = ... ``` ```python ''' Tests the above ToDo (only hidden tests)''' y_hat_gbf = ... mse_gbf_ans = ... ``` From what we've showed so far, hopefully, you noticed that how linear regression is applied to model a voxel signal is not that much different from 'regular' data, except for the convolution/HRF part. At this moment, you already know 95% of how univariate analysis works! There are, however, still a couple of concepts we need to address, which we'll do in the next section: statistical inference of model parameters. ## 3. Statistical inference of model parameters From your statistics classes, you might remember that many software packages (e.g. SPSS or R) do not only return beta-parameters of linear regression models, but also t-values and p-values associated with the beta-parameters. Like beta-parameters, these statistics evaluate whether a beta-parameter (or combination of beta-parameters) differs significantly from 0 (or in fMRI terms: whether a voxel activates/deactivates significantly in response to a stimulus). "Why would you need t-values and p-values - can't you just look at the beta-parameters?", you might ask. Well, the problem is, that **you should never interpret raw beta-values** - not in analyses of regular data nor in analyses of fMRI data. To illustrate the problem with this, let's look at an example. In this example, we try to predict someone's height (in meters; y) using someone's weight (in kilos; X). (Note that the data is not really representative of the true relationship between height and weight.) Anyway, let's run a linear regression using weight (in kilos) as a predictor for height (in meters). ```python data = np.load('data/weight_height_data.npz') X, y = data['X'], data['y'] plt.figure(figsize=(10, 6)) plt.scatter(X, y) plt.title('Relation between weight and height (in meters)', y=1.05, fontsize=20) plt.xlabel('Weight (kg)', fontsize=20) plt.ylabel('Height (meters)', fontsize=20) Xn = np.hstack((np.ones((y.size, 1)), X)) beta = lstsq(Xn, y, rcond=None)[0] y_hat = Xn.dot(beta) mse = np.mean((y_hat - y) ** 2) plt.plot(X, Xn.dot(beta)) plt.xlim((X.min(), X.max())) plt.text(70, 1.9, r'$\beta_{weight} = %.5f$' % beta[1], fontsize=18) plt.text(70, 1.8, r'$MSE = %.5f$' % mse, fontsize=18) plt.show() ``` Well, quite a modest beta-parameter on the one hand, but on the other hand the Mean Squared Error is also quite low. Now, to illustrate the problem of interpretating 'raw' beta-weights, let's rephrase our objective of predicting height based on weight: we'll try to predict **height in centimeters** based on weight (still in kilos). So, what we'll do is just rescale the data points of y (height in meters) so that they reflect height in centimeters. We can simply do this by multipling our y-variable by 100. ```python y_cm = y * 100 ``` Now, you wouldn't expect our model to change, right? We only rescaled our target ... As you'll see below, this actually changes a lot! <div class='alert alert-warning'> <b>ToDo</b> </div> Run linear regression like the previous code block, but with `y_cm` instead of `y` as the target variable. You can use the same design (`Xn`). Calculate the beta-parameter and MSE. ```python # implement linear regression for y_cm using Xn here: beta_cm = ... y_hat_cm = ... mse_cm = ... ``` If you did it correctly, when you compare the beta-parameters between the two models (one where y is in meters, and one where y is in centimeters), you see a massive difference - a 100 fold difference to be exact\*! This is a nice example where you see that the (raw) value of the beta-parameter is completely dependent on the scale of your variables. (Actually, you could either rescale X or y; both will have a similar effect on your estimated beta-parameter.) ----------- \* Note that the MSE is a 100,000 times larger in the model with y_cm compared to y (in meters). This is because the influence of scale (factor 100) is squared when calculating mean **squared** error! ### 3.1. How to compute statistics of the GLM So, you've seen that interpreting beta-parameters by themselves is useless because their value depends very much on the scale of your variables. But how should we, then, interpret the effects of our predictors on our target-variable? From the plots above, you probably guessed already that it has something to do with the MSE of our model (or, more generally, the model fit). That is indeed the case. As you might have noticed, not only the beta-parameters depend on the scale of your data, the errors (residuals) depend on the scale as well. In other words, not only the *effect* (beta-values) but also the *noise* (errors, MSE) depend on the scale of the variables! #### 3.1.1. T-values In fact, the key to getting interpretable effects of our predictors is to divide ("normalize") our beta-parameter(s) by some quantity that summarizes how well our model describes the data. This quantity is the **standard error of the beta-parameter**, usually denoted by $SE_{\beta}$. The standard error of the beta-parameter can be computed by taking the square root of the **variance of the beta-parameter**. If we'd divide our beta-estimate with it's standard error, we compute a statistic you are all familiar with: the t-statistic! Formally: \begin{align} t_{\hat{\beta}} = \frac{\hat{\beta}}{SE_{\hat{\beta}}} = \frac{\hat{\beta}}{\sqrt{\mathrm{variance}(\hat{\beta})}} \end{align} <div class='alert alert-info'> <b>ToThink</b> </div> Suppose that I know the $SE$ of a particular beta-parameter. How can I derive the variance of that parameter (i.e., how do I go from the $SE$ to the variance)? And yes, the answer is as straightforward as you'd think. Another way to think about it is that the t-value is the "effect" ($\hat{\beta}$) divided by your (un)certainty or confidence in the effect ($SE_{\hat{\beta}}$). In a way, you can think of t-values as "uncertainty-normalized" effects. So, what drives (statistical) uncertainty about "effects" (here: $\hat{\beta}$ parameters)? To find out, let's dissect the uncertainty term, $SE_{\beta}$, a little more. The standard error of a parameter can interpreted conceptually as the "unexplained variance of the model" (or **noise**) multiplied with the "design variance" (or: **the variance of the parameter due to the design**). In this lab, we won't explain what *design variance* means or how to compute this, because it will complicate things too much for now. Next week will be all about this term. For now, we treat "design variance", here, as some known (constant) value. So, with this information, we can construct a conceptual formula for the standard error of our parameter(s): \begin{align} SE_{\hat{\beta}} = \sqrt{\mathrm{noise} \cdot \mathrm{design\ variance}} \end{align} Now we also create a "conceptual formula" for the t-statistic: \begin{align} t_{\hat{\beta}} = \frac{\hat{\beta}}{SE_{\hat{\beta}}} = \frac{\mathrm{effect}}{\sqrt{\mathrm{noise} \cdot \mathrm{design\ variance}}} \end{align} This (conceptual) formula involving effects, noise, and design variance is probably **the most important concept of this course**. The effects (t-values) we measure in GLM analyses of fMRI data depend on two things: the effect measured ($\hat{\beta}$) and the (un)certainty of the effect ($SE_{\hat{\beta}}$), of which the latter term can be divided into the unexplained variance ("noise") and the design variance (uncertainty of the parameter due to the design). These two terms (noise and design variance) will be central to the next couple of weeks of this course. In week 3 (topic: design of experiments), we'll focus on how to optimize our t-values by minimizing the "design variance" term. In week 4 (topic: preprocessing), we'll focus on how to optimize our t-values by minimizing the error. While we're going to ignore the design variance, we are, however, going to learn how to calculate the "noise" term. In fact, the noise term is *very* similar to the MSE, but instead of taking the *mean* of the squared residuals, we sum the squared residuals ("sums of squared erros", SSE) and divide it by the model's degrees of freedom (DF). People usually use the $\hat{\sigma}^{2}$ symbol for this noise term: \begin{align} \mathrm{noise} = \hat{\sigma}^{2} = \frac{\sum_{i=1}^{N}(\hat{y_{i}} - y_{i})^2}{\mathrm{df}} \end{align} where the degrees of freedom (df) are defined as the number of samples ($N$) minus the number of predictors *including the intercept* ($P$): \begin{align} \mathrm{df} = N - P \end{align} So, the formula of the t-statistic becomes: \begin{align} t_{\hat{\beta}} = \frac{\hat{\beta}}{\sqrt{\frac{\sum_{i=1}^{N}(\hat{y_{i}} - y_{i})^2}{\mathrm{df}} \cdot \mathrm{design\ variance}}} \end{align} Alright, enough formulas. Let's see how we can compute these terms in Python. We're going to calculate the t-statistic of the weight-predictor for both models (the meter and the centimeter model) to see whether we can show that essentially the (normalized) effect of weight on height in meters is the same as the effect on heigh in centimeters; in other words, we are going to investigate whether the conversion to t-values "normalizes" the beta-parameters. First, we'll create a function for you to calculate the design-variance. You *don't* have to understand how this works; we're going to explain this to you in detail next week. ```python def design_variance(X, which_predictor=1): ''' Returns the design variance of a predictor (or contrast) in X. Parameters ---------- X : numpy array Array of shape (N, P) which_predictor : int or list/array The index of the predictor you want the design var from. Note that 0 refers to the intercept! Alternatively, "which_predictor" can be a contrast-vector (which will be discussed later this lab). Returns ------- des_var : float Design variance of the specified predictor/contrast from X. ''' is_single = isinstance(which_predictor, int) if is_single: idx = which_predictor else: idx = np.array(which_predictor) != 0 c = np.zeros(X.shape[1]) c[idx] = 1 if is_single == 1 else which_predictor[idx] des_var = c.dot(np.linalg.pinv(X.T.dot(X))).dot(c.T) return des_var ``` So, if you want the design variance of the 'weight' parameter in the varianble `Xn` from before, you do: ```python # use which_predictor=1, because the weight-column in Xn is at index 1 (index 0 = intercept) design_variance_weight_predictor = design_variance(Xn, which_predictor=1) print("Design variance of weight predictor is: %.6f " % design_variance_weight_predictor) ``` Alright, now we only need to calculate our noise-term ($\hat{\sigma}^2$): ```python # Let's just redo the linear regression (for clarity) beta_meter = lstsq(Xn, y, rcond=None)[0] y_hat_meter = Xn.dot(beta_meter) N = y.size P = Xn.shape[1] df = (N - P) print("Degrees of freedom: %i" % df) sigma_hat = np.sum((y - y_hat_meter) ** 2) / df print("Sigma-hat (noise) is: %.3f" % sigma_hat) design_variance_weight = design_variance(Xn, 1) ``` Now we can calculate the t-value: ```python t_meter = beta_meter[1] / np.sqrt(sigma_hat * design_variance_weight) print("The t-value for the weight-parameter (beta = %.3f) is: %.3f" % (beta_meter[1], t_meter)) ``` That's it! There's not much more to calculating t-values in linear regression. Now it's up to you to do the same thing and calculate the t-value for the model of height in centimeters, and check if it is the same as the t-value for the weight parameter in the model with height in meters. <div class='alert alert-warning'> <b>ToDo</b> </div> Calculate the t-statistic for the beta from the centimeter-model you calculated earlier. Store the value in a new variable named `t_centimeter`. Note: you don't have to calculate the design variance again (because `X` hasn't changed!) - you can reuse the variable `design_variance_weight`. ```python # Calculate the t-value here! ``` #### 3.2.2. P-values As you can see, calculating t-values completely solves the 'problem' of uninterpretable beta-coefficients! So, remember never to interpret raw beta-coefficients (at least in fMRI), and always to convert them to t-values first! Now, the last thing you need to know is how to calculate the significance of your t-value, or in other words, how you calculate the corresponding p-value. You probably remember that the p-value corresponds to the area under the curve of a t-distribution associated with your t-value *and more extreme values*: The function `t.sf(t_value, df)` from the `stats` module of the `scipy` package does exactly this. Importantly, this function ALWAYS returns the right-tailed p-value. For negative t-values, however, you'd want the left-tailed p-value. One way to remedy this, is to always pass the absolute value of your t-value - `np.abs(t_value)` to the `t.sf()` function. Also, the `t.sf()` function by default returns the one-sided p-value. In practice you'd want the two-sided p-value, so what you can simply do is multiply the returned p-value by two to get the corresponding two-sided p-value. Let's see how we'd do that in practice: ```python from scipy.stats import t # take the absolute by np.abs(t) p_value = t.sf(np.abs(t_meter), df) * 2 # multiply by two to create a two-tailed p-value print('The p-value corresponding to t(%i) = %.3f is: %.8f' % (df, t_meter, p_value)) ``` <div class='alert alert-info'> <b>ToThink</b> </div> So by now you understand why it is important **not** to interpret raw beta parameters, because these depend heavility on the scale of your data. One could argue that this is not relevant for fMRI data because all data (i.e. different voxels in the brain) all measure the same type of signal, so their scale shouldn't differ that much. This, however, is a false assumption. Think of two reasons why voxels might differ in their scale and write them down in the text cell below. *Some possible answers:* 1. Inhomogeneity of the signal at some spots (lower signal) 2. Type of scanner. 3. Different tissue types (white matter, gray matter, CSF, mix) 4. Closeness to the headcoil (subcortical structures for example have generally a lower SNR) ### 3.3. Contrasts in the GLM We're almost done! We're really at 99% of what you should know about the GLM and fMRI analysis\*. The only thing that we need to discuss is **contrasts**. Contrasts are basically follow-up statistical tests of beta-parameter(s), and most importantly, *between* beta-parameters, to test hypotheses you might have about your predictors. Essentially, it is just an extension of the t-test we've explained earlier. --- \* Those who remembered their intro statistics classes accurately might recall the assumptions of linear regression, which as some might have noticed, could be violated in linear regression of fMRI data! This is because one of linear regression's assumptions is about **independent errors**, meaning that their should be no temporal correlation ("autocorrelation") between the residuals of a linear regression model. The residuals of univariate fMRI models are, often, autocorrelated due to low-frequency drifts, which make the inference of t-values in fMRI problematic. Fortunately, there are ways to deal with this autocorrelation-issue. This will be explained next week (preprocessing). It is important to realize that, fundamentally, linear regression of univariate fMRI data (or regression of *any* temporal signal, really) likely violates an important assumption, but also realize that the "mechanics" and logic of linear regression of how we learned it thus far still holds! There are two main types of contrasts for t-tests: **1. contrast of a beta-parameter 'against baseline'.** This type of contrast basically tests the hypothesis: "Does my predictor(s) have *any* effect on my dependent variable?" In other words, it tests the following hypothesis: * $H_{0}: \beta = 0$ (our null-hypothesis, i.e. no effect) * $H_{a}: \beta \neq 0$ (our alternative hypotehsis, i.e. *some* effect) **2. contrast between beta-parameters.** This type of contrast basically tests hypotheses such as "Does predictor 1 have a larger effect on my dependent variable than predictor 1?". In other words, it tests the following hypothesis: * $H_{0}: \beta_{1} - \beta_{2} = 0$ (our null-hypothesis, i.e. there is no difference) * $H_{a}: \beta_{1} - \beta_{2} \neq 0$ (our alternative hypotehsis, i.e. there is some difference) Let's look at an example of how we would evaluate a simple hypothesis that a beta has an *some* effect on the dependent variable. Say we'd have an experimental design with 6 conditions: * condition 1: images of male faces with a happy expression * condition 2: images of male faces with a sad expression * condition 3: images of male faces with a neutral expression * condition 4: images of female faces with a happy expression * condition 5: images of female faces with a sad expression * condition 6: images of female faces with a neutral expression Let's assume we have fMRI data from a run with 100 volumes. We then have a target-signal of shape ($100 \times 1$) and a design-matrix (after convolution with a canonical HRF) of shape ($100 \times 7$) (the first predictor is the intercept!). We load in this data below: ```python data = np.load('data/data_contrast_example.npz') X, y = data['X'], data['y'] print("Shape of X: %s" % (X.shape,)) print("Shape of y: %s" % (y.shape,)) ``` After performing linear regression with these 6 predictors (after convolving the stimulus-onset times with an HRF, etc. etc.), you end up with 7 beta values: ```python betas = lstsq(X, y, rcond=None)[0] betas = betas.squeeze() # this is important for later print("Betas corresponding to our 6 conditions (and intercept):\n%r" % betas.T) ``` The first beta corresponds to the intercept, the second beta to the male/happy predictor, the third beta to the male/sad predictor, etc. etc. Now, suppose that we'd like to test whether images of male faces with a sad expression have an influence on voxel activity (our dependent variable). The first thing you need to do is extract this particular beta value from the array with beta values (I know this sounds really trivial, but bear with me): ```python beta_male_sad = betas[2] print("The extracted beta is %.3f" % beta_male_sad) ``` In neuroimaging analyses, however, this is usually done slightly differently: using **contrast-vectors**. Basically, it specifies your specific hypothesis about your beta(s) of interest in a vector. Before explaining it in more detail, let's look at it in a code example: ```python # Again, we'd want to test whether the beta of "male_sad" is different from 0 contrast_vector = np.array([0, 0, 1, 0, 0, 0, 0]) contrast = (betas * contrast_vector).sum() # we simply elementwise multiply the contrast-vector with the betas and sum it! print('The beta-contrast is: %.3f' % contrast) ``` "Wow, what a tedious way to just select the third value of the beta-array", you might think. And, in a way, this is indeed somewhat tedious for a contrast against baseline. But let's look at a case where you would want to investigate whether two betas are different - let's say whether male sad faces have a larger effect on our voxel than male happy faces. Again, you *could* do this: ```python beta_difference = betas[2] - betas[1] print("Difference between betas: %.3f" % beta_difference) ``` ... but you could also use a contrast-vector: ```python contrast_vector = np.array([0, -1, 1, 0, 0, 0, 0]) contrast = (betas * contrast_vector).sum() print('The contrast between beta 2 and beta 1 is: %.3f' % contrast) print('This is exactly the same as beta[2] - beta[1]: %.3f' % (betas[2]-betas[1])) ``` "Alright, so using contrast-vectors is just a fancy way of extracting and subtracting betas from each other ...", you might think. In a way, that's true. But you have to realize that once the hypotheses you want to test become more complicated, using contrast-vectors actually starts to make sense. Let's look at some more elaborate hypotheses. First, let's test whether male faces lead to higher voxel activity than female faces, *regardless of emotion*: ```python # male faces > female faces contrast_vector = [0, 1, 1, 1, -1, -1, -1] male_female_contrast = (contrast_vector * betas).sum() print("Male - female contrast (regardless of expression): %.2f" % male_female_contrast) ``` ... or whether emotional faces (regardless of *which* exact emotion) lead to higher activity than neutral faces: ```python # Emotion (regardless of which emotion, i.e., regardless of sad/happy) - neutral contrast_vector = [0, 1, 1, -2, 1, 1, -2] emo_neutral_contrast = (contrast_vector * betas).sum() print("Emotion - neutral contrast (regardless of which emotion): %.2f" % emo_neutral_contrast) ``` See how contrast-vectors come in handy when calculating (more intricate) comparisons? In the male-female contrast, for example, instead 'manually' picking out the betas of 'sad_male' and 'happy_male', averaging them, and subtracting their average beta from the average 'female' betas ('happy_female', 'sad_female'), you can simply specify a contrast-vector, multiply it with your betas, and sum them. That's it. <div class='alert alert-info'> <b>ToThink</b> </div> In the last contrast (`emo_neural_contrast`), we set all the "emotional" predictors (sad/happy) to 1, but the neutral predictors to minus *2* ... Why are these set to -2 and not -1? Write your answer below. They have to sum to 0. If you'd use -1, you would "weigh" the emotional predictors twice as heavy as the neutral predictors. <div class='alert alert-warning'> <b>ToDo</b> </div> Create a contrast vector for the hypothesis: sad faces (regardless whether it's male or female) activate this voxel more than neutral faces (regardless of whether it's male/female). Multiply this contrast vector with the betas and store the result in a variable named `contrast_todo`. ```python # Implement the sad - neutral contrast here: cvec = ... contrast_todo = ... ``` ```python ''' Tests the above ToDo (only hidden tests). ''' assert(np.round(contrast_todo, 3) == -0.521) ``` We're not only telling you about contrasts because we think it's an elegant way of computing beta-comparisons, but also because virtually every major neuroimaging software package uses them, so that you can specify what hypotheses you exactly want to test! You'll also see this when we're going to work with FSL (in week 5!) to perform automated whole-brain linear regression analyses. Knowing how contrast-vectors work, we now can extend our formula for t-tests of beta-parameters such that they can describe **every possible test** (not only t-tests, but also ANOVAs, F-tests, etc.) of betas (against 'baseline') or between betas that you can think of: Our 'old' formula of the t-test of a beta-parameter: \begin{align} t_{\hat{\beta}} = \frac{\hat{\beta}_{j}}{SE_{\hat{\beta}}} \end{align} And now our 'generalized' version of the t-test of *any* contrast/hypothesis: \begin{align} t_{\mathbf{c}\hat{\beta}} = \frac{\sum_{j=1}^{P}{c_{j}\hat{\beta}_{j}}}{SE_{\mathbf{c}\hat{\beta}}} \end{align} in which $\mathbf{c}$ represents the entire contrast-vector, and $c_{j}$ represents the $j^{th}$ value in our contrast vector. By the way, we can simplify the (notation of the) numerator a little bit using some matrix algebra trick. Remember that multiplying two (equal lengt) vectors with each other and then summing the values together is the same thing as the (inner) "dot product" between the two vectors? Note that you can also write this elementwise multiplication and sum of the contrast-vector and the betas in a vectorized way (using the dot-product): \begin{align} t_{\mathbf{c}\hat{\beta}} = \frac{\mathbf{c}\hat{\beta}}{SE_{\mathbf{c}\hat{\beta}}} \end{align} <div class='alert alert-warning'> <b>ToDo</b> </div> Convince yourself that the elementwise multiplication and sum is mathematically exactly the same as the dot product! Below, we initialized a hypothetical vector with beta-values (`some_betas`) and a hypothetical contrast-vector (`some_cvec`). First, implement the "multiply and sum" approach and then implement the "dot product" approach. You should find that it gives you exactly the same value: -3.34 ```python some_betas = np.array([1.23, 2.95, 3.33, 4.19]) some_cvec = np.array([1, 1, -1, -1]) # Try to implement both approaches and convince yourself that it's # mathematically the same! ``` So, you need the contrast vector in the *numerator* of the t-value formula (i.e., $\mathbf{c}\hat{\beta}$), but it turns out that you actually also need the contrast-vector in the denominator, because it's part of the calculation of design variance. Again, we will discuss how this works exactly next week. In the function `design_variance`, it is also possible to calculate design variance for a particular contrast (not just a single predictor) by passing a contrast vector to the `which_predictor` argument. We'll show this below: ```python # E.g., get design-variance of happy/male - sad/male c_vec = np.array([0, 1, -1, 0, 0, 0, 0]) # our contrast vector! dvar = design_variance(X, which_predictor=c_vec) # pass c_vec to which_predictor print("Design variance of happy/male - sad/male: %.3f" % dvar) ``` For the rest of ToDos this lab (you're almost done, don't worry!), make sure to pass your contrast-vector to the `design_variance` function in order to calculate it correctly. Now you know enough to do it yourself! <div class='alert alert-warning'> <b>ToDo</b> </div> Calculate the t-value and p-value for the hypothesis "sad faces have a larger effect than happy faces (regardless of gender) on our dependent variabe" (i.e. voxel activity). In other words, test the hypothesis: $\beta_{sad} - \beta_{happy} \neq 0$ (note that this is a two-sided test!). Store the t-value and p-value in the variables `tval_todo` and `pval_todo` respectively. We reload the variables below (we'll call them `X_new` and `y_new`) to make sure you're working with the correct data. Note that the `X_new` variable already contains an intercept; the other six columns correspond to the different predictors (male/hapy, male/sad, etc.). In summary, you have to do the following: - (you don't have to calculate the betas; this has already been done (stored in the variable `betas`) - calculate "sigma-hat" ($SSE / \mathrm{df}$) - calculate design-variance (use the `design_variance` function with a proper contrast-vector) - calculate the contrast ($\mathbf{c}\hat{\beta}$) - calculate the t-value and p-value ```python data = np.load('data_contrast_example.npz') X_new, y_new = data['X'], data['y'] print("Shape of X: %s" % (X_new.shape,)) print("Shape of y: %s" % (y_new.shape,)) from scipy.stats import t cvec = ... this_dvar = ... y_hat = ... this_sse = ... tval_todo = ... pval_todo = ... ``` ```python ''' Part 1 of testing the above ToDo (only hidden tests). ''' print("Only hidden tests!") np.testing.assert_almost_equal(tval_todo, 1.2646, decimal=4) ``` ```python ''' Part 2 of testing the above ToDo (only hidden tests). ''' print("Ony hidden tests!") np.testing.assert_almost_equal(pval_todo, 0.2092, decimal=4) ``` ### 3.4. F-tests on contrasts In the previous section we discussed how to calculate t-values for single contrasts. However, sometimes you might have an hypothesis about multiple contrasts at the same time. This may sound weird, but let's consider an experiment. Suppose you have data from an experiment in which you showed images circles which were either blue, red, or green. In that case, you have three predictors. Then, you could have very specific question, like "Do blue circles activate a voxel significantly compared to baseline", which corresponds to the following null and alternative hypothesis: * $H_{0}: \beta_{blue} = 0$ (our null-hypothesis, i.e. there is no activation compared to baseline) * $H_{a}: \beta_{blue} > 0$ (our alternative hypotehsis, i.e. blue activates relative to baseline) However, you can also have a more general question, like "Does the presentation of *any* circle (regardless of color) activate a voxel compared to baseline?". This question represents the following null and alternative hypothesis: * $H_{0}: \beta_{blue} = \beta_{red} = \beta_{green} = 0$ * $H_{a}: (\beta_{blue} > 0) \vee (\beta_{red} > 0) \vee (\beta_{green} > 0)$ The $\vee$ symbol in the alternative hypothesis means "or". So the alternative hypothesis nicely illustrates our question: is there *any* condition (circle) that activates a voxel more than baseline? This hypothesis-test might sound familiar, because it encompasses the **F-test**! In other words, an F-test tests *a collection of contrasts* together. In the example here, the F-test tests the following contrasts together (ignoring the intercept) of our beta-parameters: * `[1, 0, 0]` ($red > 0$) * `[0, 1, 0]` ($blue > 0$) * `[0, 0, 1]` ($green > 0$) Thus, a F-test basically tests this contrast-*matrix* all at once! Therefore, the F-tests is a type of "omnibus test"! Now, let's look at the math behind the F-statistic. The F-statistic for set of $K$ contrasts (i.e., the number of rows in the contrast-matrix) is defined as follows: \begin{align} F = (\mathbf{c}\hat{\beta})'[K\mathbf{c}((X'X)^{-1}\hat{\sigma}^{2})\mathbf{c}']^{-1}(\mathbf{c}\hat{\beta}) \end{align} With a little imagination, you can see how the F-test is an extension of the t-test of a single contrast to accomodate testing a set of contrasts together. Don't worry, you don't have to understand how the formula for the F-statistic works mathematically and you don't have to implement this in Python. But you **do** need to understand what type of hypothesis an F-test tests! Let's practice this in a ToDo! <div class='alert alert-warning'> <b>ToDo</b> </div> Remember the temporal basis sets from before? Suppose we have an experiment with two conditions ("A" and "B") and suppose we've created a design matrix based on convolution with a single-gamma basis set (with a canonical HRF, its temporal derivative, and its dispersion derivative). Together with the intercept, the design matrix thus has 7 columns (2 conditions * 3 HRF + intercept). The order of the columns is as follows: * column 1: intercept * column 2: canonical HRF "A" * column 3: temporal deriv "A" * column 4: dispersion deriv "A" * column 5: canonical HRF "B" * column 6: temporal deriv "B" * column 7: dispersion deriv "B" Suppose I want to test whether there is *any* difference in response to condition "A" ($A > 0$) compared to baseline, and *I don't care what element of the HRF caused it*. I can use an F-test for this. What would the corresponding contrast-*matrix* (in which each row represents a different contrast) look like? We've created an 'empty' (all-zeros) 2D matrix below with three rows. It's up to you to fill in the matrix such that it can be used to test the above question/hypothesis. ```python # Fill in the correct values! contrast_matrix = np.array([ [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0] ]) ```
{"hexsha": "eccea2b739e090cc6f6c1e8883c3e2b957ed3be7", "size": 165919, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Kopie_van_glm.ipynb", "max_stars_repo_name": "aidenaislinn/python-for-text-analysis", "max_stars_repo_head_hexsha": "e27fb4a0a982246d2f5c7e9b40de9140f7956cd3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Kopie_van_glm.ipynb", "max_issues_repo_name": "aidenaislinn/python-for-text-analysis", "max_issues_repo_head_hexsha": "e27fb4a0a982246d2f5c7e9b40de9140f7956cd3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Kopie_van_glm.ipynb", "max_forks_repo_name": "aidenaislinn/python-for-text-analysis", "max_forks_repo_head_hexsha": "e27fb4a0a982246d2f5c7e9b40de9140f7956cd3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.7100511991, "max_line_length": 996, "alphanum_fraction": 0.5729964621, "converted": true, "num_tokens": 28282}
[STATEMENT] lemma is_subprob_densityI[intro]: "\<lbrakk>f \<in> borel_measurable M; \<And>x. x \<in> space M \<Longrightarrow> f x \<ge> 0; space M \<noteq> {}; (\<integral>\<^sup>+x. f x \<partial>M) \<le> 1\<rbrakk> \<Longrightarrow> is_subprob_density M f" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>f \<in> borel_measurable M; \<And>x. x \<in> space M \<Longrightarrow> 0 \<le> f x; space M \<noteq> {}; integral\<^sup>N M f \<le> 1\<rbrakk> \<Longrightarrow> is_subprob_density M f [PROOF STEP] unfolding is_subprob_density_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>f \<in> borel_measurable M; \<And>x. x \<in> space M \<Longrightarrow> 0 \<le> f x; space M \<noteq> {}; integral\<^sup>N M f \<le> 1\<rbrakk> \<Longrightarrow> f \<in> borel_measurable M \<and> space M \<noteq> {} \<and> (\<forall>x\<in>space M. 0 \<le> f x) \<and> integral\<^sup>N M f \<le> 1 [PROOF STEP] by simp
{"llama_tokens": 384, "file": "Density_Compiler_Density_Predicates", "length": 2}
import numpy as np from utils.load_config import load_config from models.RBF import RBF """ small script to test the RBF 2d convolution run: python -m tests.RBF.t01_2d_rbf """ # define configuration config_path = 'RBF_t01_2d_m0001.json' # load config config = load_config(config_path, path='configs/RBF') data = np.zeros((7, 7)) data[1, 1] = .2 data[1, 2] = .5 data[1, 3] = .2 data[2, 1] = .5 data[2, 2] = 1 data[2, 3] = .5 data[3, 1] = .2 data[3, 2] = .5 data[3, 3] = .2 print("data") print(data) # expand data for RBF data = np.expand_dims(data, axis=0) data = np.expand_dims(data, axis=3) print("shape data", np.shape(data)) print() template = data[:, 1:4, 1:4, :] print("template") print(template[0, ..., 0]) print() rbf = RBF(config) pred = rbf.fit2d(template) print("pred", np.shape(pred)) print(pred[0, ..., 0]) test = rbf.predict2d(data) print("test", np.shape(data)) print(test[:, ..., 0])
{"hexsha": "c24b23cf5dbde150ecf3b722ab72bc3a9cf2dcaa", "size": 906, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/RBF/t01_2d_rbf.py", "max_stars_repo_name": "michaelStettler/BVS", "max_stars_repo_head_hexsha": "947f1e505ccc3ebcf1926c8f52924d823bd1e101", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/RBF/t01_2d_rbf.py", "max_issues_repo_name": "michaelStettler/BVS", "max_issues_repo_head_hexsha": "947f1e505ccc3ebcf1926c8f52924d823bd1e101", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/RBF/t01_2d_rbf.py", "max_forks_repo_name": "michaelStettler/BVS", "max_forks_repo_head_hexsha": "947f1e505ccc3ebcf1926c8f52924d823bd1e101", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.2765957447, "max_line_length": 53, "alphanum_fraction": 0.6556291391, "include": true, "reason": "import numpy", "num_tokens": 331}
# (GEM) iJR904 (Reed et al., 2003) (download link: https://darwin.di.uminho.pt/models) # TODO: change to alternative link!!! # (GEM) Alternative link http://bigg.ucsd.edu/static/models/iJR904.mat module Chemostat_Heerden2013 import Chemostat const Ch = Chemostat import UtilsJL const UJL = UtilsJL UJL.gen_top_proj(@__MODULE__) include("Utils/Utils.jl") include("BegData/BegData.jl") include("HeerdenData/HeerdenData.jl") include("iJR904/iJR904.jl") # include("EColiCore/EColiCore.jl") function __init__() UJL.create_proj_dirs(@__MODULE__) end end # module
{"hexsha": "f358a8a82a574f0c5fb3d868dfef414ffbd83b76", "size": 618, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Chemostat_Heerden2013.jl", "max_stars_repo_name": "josePereiro/Chemostat_Heerden2013.jl", "max_stars_repo_head_hexsha": "00f97a57a64daf8d175b2eb22871c5fc83d2e6f0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Chemostat_Heerden2013.jl", "max_issues_repo_name": "josePereiro/Chemostat_Heerden2013.jl", "max_issues_repo_head_hexsha": "00f97a57a64daf8d175b2eb22871c5fc83d2e6f0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Chemostat_Heerden2013.jl", "max_forks_repo_name": "josePereiro/Chemostat_Heerden2013.jl", "max_forks_repo_head_hexsha": "00f97a57a64daf8d175b2eb22871c5fc83d2e6f0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.75, "max_line_length": 86, "alphanum_fraction": 0.6957928803, "num_tokens": 199}
#!/usr/bin/env python # coding: utf-8 # $\newcommand{\mb}[1]{\mathbf{ #1 }}$ # $\newcommand{\bs}[1]{\boldsymbol{ #1 }}$ # $\newcommand{\bb}[1]{\mathbb{ #1 }}$ # # $\newcommand{\R}{\bb{R}}$ # # $\newcommand{\ip}[2]{\left\langle #1, #2 \right\rangle}$ # $\newcommand{\norm}[1]{\left\Vert #1 \right\Vert}$ # # $\newcommand{\der}[2]{\frac{\mathrm{d} #1 }{\mathrm{d} #2 }}$ # $\newcommand{\derp}[2]{\frac{\partial #1 }{\partial #2 }}$ # # # Finite Dimensional Koopman Bilinear System # Consider a nonlinear dynamical system that allows an exact finite dimensional Koopman canonical transform such that the control-affine dynamics can be transformed to a bilinear system. Consider the dynamical system # \begin{equation} # \mb{\dot{x}}=\mb{f}_0(\mb x) + \mb f_1 ( \mb x) u_1 + \mb f_2(\mb x) u_2, # \end{equation} # where we for this example choose $\mb f_0, \mb f_1$ as follows: # \begin{equation} # \mb f_0(\mb x) = \begin{bmatrix} x_3 \\ x_4 \\ \lambda x_3 \\ \mu x_4 + (2 \lambda - \mu) c x_3^2 \end{bmatrix}, \qquad # \mb f_1(\mb x) = \begin{bmatrix} 0 \\ 0 \\ 1 \\ 0 \end{bmatrix}, \qquad # \mb f_2(\mb x) = \begin{bmatrix} 0 \\ 0 \\ 0 \\ x_1+1 \end{bmatrix}, # \end{equation} # and $\lambda, \mu, c \in \mathbb{R}$ are scalar parameters of the system. Setting $ \mb x = [q_1 \, q_2 \, \dot{q_1} \, \dot{q_2}]^T$, # these equations of motion can be expressed as robotic dynamics of the form $\mb{D}(\mb{q})\ddot{\mb{q}} + \mb{C}(\mb{q}, \dot{\mb{q}})\dot{\mb{q}} + \mb{G}(\mb{q}) = \mb{B}\mb{u}$, where $\mb D$ is the inertia matrix, $\mb C$ is the matrix of Coriolis terms, $\mb G$ is the matrix of gravitational terms, and $\mb B$ is the static actuation matrix. Rewriting $\mb f_0, \mb f_1, \mb f_2$ in terms of $\mb D, \mb C, \mb G,$ and $\mb B$ yield # # # \begin{equation} # \mb D(\mb q) = \begin{bmatrix} 1 & 0\\ 0 & \frac{1}{q_1+1} \end{bmatrix}, # \qquad \mb C(\mb q, \mb{\dot{q}}) = -\begin{bmatrix} \lambda & 0 \\ \frac{1}{q_1 + 1}(2 \lambda - \mu) c \dot{q}sys_id_inertia_x_1 & \frac{1}{q_1 +1} \mu \end{bmatrix}, \qquad # \mb G(\mb q) = \begin{bmatrix} 0 \\ 0 \end{bmatrix} # \qquad \mb B = \begin{bmatrix}1 & 0 \\ 0 & 1 \end{bmatrix}, # \end{equation} # As a result of the careful construction of this system, there exists a Koopman canonical transform, $\mb z = T(\mb x)$ that exactly transforms the control-affine dynamics into a bilinear system. Consider the transformation: # \begin{equation} # T(\mb q, \mb{\dot{q}}) = \begin{bmatrix} # \phi_1(\mb q, \mb{\dot{q}})\\ # \phi_2(\mb q, \mb{\dot{q}})\\ # \phi_3(\mb q, \mb{\dot{q}})\\ # \phi_4(\mb q, \mb{\dot{q}})\\ # \phi_5(\mb q, \mb{\dot{q}})\\ # \phi_6(\mb q, \mb{\dot{q}})\\ # \end{bmatrix} # = \begin{bmatrix} # 1\\ # q_1 - \frac{1}{\lambda}\dot{q}sys_id_inertia_x_1\\ # q_2 - \frac{1}{\mu} \dot{q}_2 + \frac{(2 \lambda - \mu)c}{2\lambda \mu} \dot{q}sys_id_inertia_x_1^2\\ # \dot{q}sys_id_inertia_x_1\\ # \dot{q}_2 - c \dot{q}sys_id_inertia_x_1^2\\ # \dot{q}sys_id_inertia_x_1^2\\ # \end{bmatrix}, # \end{equation} # where $\phi_1, \phi_2, \phi_3, \phi_4, \phi_5, \phi_6$ are eigenfunctions of the Koopman operator associated with the drift # vector field $\mb f_0$. The matrix with the eigenvalue associated with the i-th eigenfunction on the i-th diagonal # element is $F=\text{diag}(0, 0, \lambda, \mu, 2 \lambda, 0)$. Then, to reformulate the dynamics we have: # \begin{equation} # L_{\mb f_1} T(\mb q, \mb{\dot{q}}) = \begin{bmatrix} 0\\ -\frac{1}{\lambda}\\ \frac{(2\lambda - \mu)c}{\lambda \mu}\dot{q}sys_id_inertia_x_1\\ 1 \\ -2c\dot{q}sys_id_inertia_x_1 \\ 2\dot{q_1} \end{bmatrix}, \qquad # L_{\mb f_2} T(\mb q, \mb{\dot{q}}) = \begin{bmatrix} 0 \\ 0\\ -\frac{1}{\mu}(q_1 + 1)\\0 \\ q_1 + 1 \\ 0 \end{bmatrix} # \end{equation} # and the dynamics can be equivalently transformed to a bilinear form $\mb{\dot{z}} = F \mb z + G_1\mb z u_1 + G_2\mb z u_2$ with # \begin{equation} # F = \begin{bmatrix} # 0 &0 & 0 & 0 & 0 & 0\\ # 0 & 0 & 0 & 0 & 0 & 0\\ # 0 &0 & 0 & 0 & 0 & 0\\ # 0 &0 & 0 & \lambda & 0 & 0\\ # 0 &0 & 0 & 0 & \mu & 0 \\ # 0 &0 & 0 & 0 & 0 & 2 \lambda\\ # \end{bmatrix}, \qquad # G_1 = \begin{bmatrix} # 0 & 0 & 0 & 0 & 0 & 0\\ # -\frac{1}{\lambda}& 0 & 0 & 0 & 0 & 0\\ # 0 & 0 & 0 & \frac{(2\lambda - \mu)c}{\lambda \mu} & 0 & 0\\ # 1 & 0 & 0 & 0 & 0 & 0\\ # 0 & 0 & 0 & -2c & 0 & 0\\ # 0 & 0 & 0 & 2 & 0 & 0\\ # \end{bmatrix} # , \qquad # G_2 = \begin{bmatrix} # 0 & 0 & 0 & 0 & 0 & 0\\ # 0 & 0 & 0 & 0 & 0 & 0\\ # -\frac{1}{\mu} & -\frac{1}{\mu} & 0 & -\frac{1}{\lambda \mu} & 0 & 0\\ # 0 & 0 & 0 & 0 & 0 & 0\\ # 1 & 1 & 0 & \frac{1}{\lambda} & 0 & 0\\ # 0 & 0 & 0 & 0 & 0 & 0\\ # \end{bmatrix} # \end{equation} # In[1]: import numpy as np import sys sys.path.append('../../') # # Define experiment parameters # In[2]: from core.dynamics import RoboticDynamics, ConfigurationDynamics class KoopPdOutput(ConfigurationDynamics): def __init__(self, dynamics, xd, n, m): ConfigurationDynamics.__init__(self, dynamics, 1) self.xd = xd self.n = n self.m = m def proportional(self, x, t): q = x[:int(n/2)] q_d = self.xd[:int(n/2)] return q - q_d def derivative(self, x, t): q_dot = x[int(n/2):] q_dot_d = self.xd[int(n/2):] return q_dot - q_dot_d class FiniteDimKoopSys(RoboticDynamics): def __init__(self, lambd, mu, c): RoboticDynamics.__init__(self, 2, 2) self.params = lambd, mu, c def D(self, q): return np.array([[1, 0],[0, (q[0]+1)**(-1)]]) def C(self, q, q_dot): labmd, mu, c = self.params return -np.array([[lambd, 0], [(q[0]+1)**(-1)*(2*lambd - mu)*c*q_dot[0], (q[0]+1)**(-1)*mu]]) def G(self, q): return np.array([0, 0]) def B(self, q): return np.array([[1, 0], [0, 1]]) n, m = 4, 2 lambd, mu, c = .3, .2, -.5 sys_name = 'bilinearizable_sys' system = FiniteDimKoopSys(lambd, mu, c) # In[3]: from koopman_core.dynamics import LinearLiftedDynamics A_lin = np.array([[0, 0, 1, 0], [0, 0, 0, 1], [0, 0, lambd, 0], [0, 0, 0, mu]]) B_lin = np.array([[0, 0], [0, 0], [1, 0], [0, 1]]) dt = 1e-2 linearized_sys = LinearLiftedDynamics(A_lin, B_lin, np.eye(n), lambda x: x) # # Collect data for learning # In[4]: import scipy as sc import os q_dc, r_dc = 5e2, 1 # State and actuation penalty values, data collection Q_dc = q_dc * np.identity(n) # State penalty matrix, data collection R_dc = r_dc*np.identity(m) # Actuation penalty matrix, data collection P_dc = sc.linalg.solve_continuous_are(A_lin, B_lin, Q_dc, R_dc) # Algebraic Ricatti equation solution, data collection K_dc = np.linalg.inv(R_dc)@B_lin.T@P_dc # LQR feedback gain matrix, data collection K_dc_p = K_dc[:,:int(n/2)] # Proportional control gains, data collection K_dc_d = K_dc[:,int(n/2):] # Derivative control gains, data collection # Data collection parameters: collect_data = False dt = 1.0e-2 # Time step length traj_length_dc = 2. # Trajectory length, data collection n_pred_dc = int(traj_length_dc/dt) # Number of time steps, data collection t_eval = dt * np.arange(n_pred_dc + 1) # Simulation time points n_traj_train = 100 # Number of trajectories to execute, data collection n_traj_val = int(0.2*n_traj_train) noise_var = 5. # Exploration noise to perturb controller, data collection xmax = np.array([1., 1., 1., 1.]) # State constraints, trajectory generation xmin = -xmax umax = np.array([10., 10.]) # Actuation constraint, trajectory generation umin = -umax x0_max = xmax/2 # Initial value limits sub_sample_rate = 1 # Rate to subsample data for training model_fname = 'examples/' # Path to save learned models n_cols = 10 # Number of columns in training data plot directory = os.path.abspath("") # Path to save learned models # In[5]: from koopman_core.util import run_experiment import dill if collect_data: xs_train, us_train, t_train = run_experiment(system, n, n_traj_train, n_pred_dc, t_eval, x0_max, plot_experiment_data=True, m=m, K_p=K_dc_p, K_d=K_dc_d, noise_var=noise_var) xs_val, us_val, t_val = run_experiment(system, n, n_traj_val, n_pred_dc, t_eval, x0_max, m=m, K_p=K_dc_p, K_d=K_dc_d, noise_var=noise_var) data_list = [xs_train, us_train, t_train, n_traj_train, xs_val, us_val, t_val, n_traj_val] outfile = open(directory + '/data/' + sys_name + '_data.pickle', 'wb') dill.dump(data_list, outfile) outfile.close() else: infile = open(directory + '/data/' + sys_name + '_data.pickle', 'rb') xs_train, us_train, t_train, n_traj_train, xs_val, us_val, t_val, n_traj_val = dill.load(infile) infile.close() # # Learn Koopman-based models of the dynamics # ### Learn bilinear EDMD model # In[6]: #Bilinear EDMD parameters: alpha_bedmd = 2.4e-5 # Regularization strength (LASSO) bEDMD tune_mdl_bedmd = False # In[7]: from sklearn import preprocessing, linear_model from koopman_core.learning import BilinearEdmd from koopman_core.dynamics import BilinearLiftedDynamics bedmd_features = preprocessing.PolynomialFeatures(2) bedmd_features.fit(np.zeros((1,n))) n_lift_bedmd = bedmd_features.transform((np.zeros((1,n)))).shape[1] C_bedmd = np.zeros((n,n_lift_bedmd)) C_bedmd[:,1:n+1] = np.eye(n) basis_bedmd = lambda x: bedmd_features.transform(x) optimizer_bedmd = linear_model.MultiTaskLasso(alpha=alpha_bedmd, fit_intercept=False, selection='random') cv_bedmd = linear_model.MultiTaskLassoCV(fit_intercept=False, n_jobs=-1, cv=3, selection='random') #standardizer_bedmd = preprocessing.StandardScaler(with_mean=False) standardizer_bedmd = None model_bedmd = BilinearEdmd(n, m, basis_bedmd, n_lift_bedmd, n_traj_train, optimizer_bedmd, cv=cv_bedmd, standardizer=standardizer_bedmd, C=C_bedmd, continuous_mdl=False, dt=dt) X_bedmd, y_bedmd = model_bedmd.process(xs_train, us_train, np.tile(t_train,(n_traj_train,1)), downsample_rate=sub_sample_rate) model_bedmd.fit(X_bedmd, y_bedmd, cv=tune_mdl_bedmd, override_kinematics=True) sys_bedmd = BilinearLiftedDynamics(model_bedmd.n_lift, m, model_bedmd.A, model_bedmd.B, model_bedmd.C, model_bedmd.basis, continuous_mdl=False, dt=dt) if tune_mdl_bedmd: print('$\\alpha$ bilinear EDMD: ', model_bedmd.cv.alpha_) # ### Learn Koopman DNN model # In[8]: import dill, os, torch load_tuned_params = False if load_tuned_params: infile = open(os.path.abspath('') + '/data/analytic_koop_sys_best_params.pickle', 'rb') best_config, val_loss, test_loss, open_loop_mse, open_loop_std = dill.load(infile) infile.close() else: net_params = {} net_params['state_dim'] = n net_params['ctrl_dim'] = m net_params['encoder_hidden_width'] = 100 net_params['encoder_hidden_depth'] = 1 net_params['encoder_output_dim'] = 1 net_params['optimizer'] = 'adam' net_params['activation_type'] = 'relu' net_params['lr'] = 2e-3 net_params['epochs'] = 100 net_params['batch_size'] = 128 net_params['lin_loss_penalty'] = 0.5 net_params['l2_reg'] = 0 net_params['l1_reg'] = 0 net_params['first_obs_const'] = True net_params['override_kinematics'] = False # TODO: Fix override kin... net_params['dt'] = dt print(net_params) # In[9]: from koopman_core.learning import KoopDnn, KoopmanNetCtrl from koopman_core.util import fit_standardizer standardizer_x_kdnn = fit_standardizer(xs_train, preprocessing.StandardScaler()) standardizer_u_kdnn = fit_standardizer(us_train, preprocessing.StandardScaler()) n_tot = n + net_params['encoder_output_dim'] + int(net_params['first_obs_const']) net = KoopmanNetCtrl(net_params, standardizer_x=standardizer_x_kdnn, standardizer_u=standardizer_u_kdnn) model_koop_dnn = KoopDnn(net) model_koop_dnn.set_datasets(xs_train, t_train, u_train=us_train, x_val=xs_val, u_val=us_val, t_val=t_val) model_koop_dnn.model_pipeline(net_params) model_koop_dnn.construct_koopman_model() sys_koop_dnn = BilinearLiftedDynamics(n_tot, m, model_koop_dnn.A, model_koop_dnn.B, model_koop_dnn.C, model_koop_dnn.basis_encode, continuous_mdl=False, dt=dt, standardizer_x=standardizer_x_kdnn, standardizer_u=standardizer_u_kdnn) # In[10]: sys_koop_dnn.A # # Evaluate open-loop prediction performance # In[11]: # Prediction performance evaluation parameters: folder_plots = 'figures/' # Path to save plots n_traj_ol = 50 # Number of trajectories to execute, open loop # In[12]: from koopman_core.util import evaluate_ol_pred from tabulate import tabulate import random as rand xs_ol, us_ol, t_ol = run_experiment(system, n, n_traj_ol, n_pred_dc, t_eval, x0_max, m=m, K_p=K_dc_p, K_d=K_dc_d, noise_var=noise_var) mdl_lst = [sys_koop_dnn, sys_bedmd] mdl_names = ['Koop DNN', 'bEDMD'] error, mse, std = [], [], [] for sys in mdl_lst: err_tmp, mse_tmp, std_tmp = evaluate_ol_pred(sys, xs_ol, t_eval, us=us_ol) error.append(err_tmp) mse.append(mse_tmp) std.append(std_tmp) print('\nOpen loop performance statistics:') table_data = [] for name, mse_mdl, std_mdl in zip(mdl_names, mse, std): table_data.append([name, "{:.5f}".format(mse_mdl), "{:.5f}".format(std_mdl)]) print(tabulate(table_data, headers=['Mean squared error', 'Standard deviation'])) # In[13]: import matplotlib.pyplot as plt import matplotlib figwidth = 12 lw = 2 fs = 14 y_lim_gain = 1.2 row = 2 col = n/row #Plot open loop results: plt.figure(figsize=(figwidth,4)) axs = [plt.subplot(row,col,jj+1) for jj in range(n)] for ii, err in enumerate(error): err_mean = np.mean(err, axis=0) err_std = np.std(err, axis=0) for jj in range(n): axs[jj].plot(t_eval[1:], err_mean[:,jj], label=mdl_names[ii]) axs[jj].fill_between(t_eval[1:], err_mean[:,jj]-err_std[:,jj], err_mean[:,jj]+err_std[:,jj], alpha=0.1) for jj in range(n): axs[jj].grid() axs[jj].set_xlabel('Time (sec)', fontsize=fs) axs[jj].set_ylabel('$x_'+ str(jj+1) + '$', fontsize=fs) plt.legend(frameon=False, fontsize=fs) stitle=plt.suptitle('Open loop prediction performance of learned models', fontsize=fs+2) matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 plt.savefig(folder_plots + 'koop_sys_prediction.pdf', format='pdf', dpi=2400, bbox_extra_artists=(stitle,), bbox_inches="tight") plt.show() # In[14]: print(standardizer_u_kdnn.mean_) print(standardizer_u_kdnn.scale_) print(standardizer_x_kdnn.mean_) print(standardizer_x_kdnn.scale_) # In[ ]:
{"hexsha": "4731a6e0e41551b3e94aafab36c006e655748843", "size": 15733, "ext": "py", "lang": "Python", "max_stars_repo_path": "working_files/bkeedmd/bilinearizable_sys_mpc_nn.py", "max_stars_repo_name": "Cafolkes/koopman_learning_and_control", "max_stars_repo_head_hexsha": "0152a2bd5079da4d672dbaac404b6c084410297d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-11-06T11:32:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T00:06:23.000Z", "max_issues_repo_path": "working_files/bkeedmd/bilinearizable_sys_mpc_nn.py", "max_issues_repo_name": "Cafolkes/koopman-learning-and-control", "max_issues_repo_head_hexsha": "0152a2bd5079da4d672dbaac404b6c084410297d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "working_files/bkeedmd/bilinearizable_sys_mpc_nn.py", "max_forks_repo_name": "Cafolkes/koopman-learning-and-control", "max_forks_repo_head_hexsha": "0152a2bd5079da4d672dbaac404b6c084410297d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-04T09:34:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T09:34:58.000Z", "avg_line_length": 36.3348729792, "max_line_length": 441, "alphanum_fraction": 0.6091654484, "include": true, "reason": "import numpy,import scipy", "num_tokens": 5120}
# SPDX-License-Identifier: Apache-2.0 """ tf2onnx.tfjs_utils - utilities for parsing tfjs files into onnx graphs Main functions of interest are graphs_from_tfjs and read_tfjs_graph """ import json import os import base64 import gzip import struct import logging from onnx import numpy_helper, helper import numpy as np from google.protobuf.json_format import ParseDict import tensorflow as tf from tensorflow.python.framework import c_api_util from tensorflow.core.framework import types_pb2, node_def_pb2 from tf2onnx import utils from tf2onnx.graph import Graph from tf2onnx import tf_utils logger = logging.getLogger(__name__) tf_api_def_map = c_api_util.ApiDefMap() def read_tfjs_attr(attr, tf_dtypes=False): """ Reads the value of a single tfjs node attribute. If tf_dtypes is True, tensorflow dtypes are returned instead of onnx dtypes """ k = list(attr.keys())[0] return read_tfjs_attr_helper(k, attr[k], tf_dtypes) def fix_string_attr(tfjs_node): """ Older tfjs models store strings as lists of ints (representing byte values). This function finds and replaces those strings, so protobuf can correctly decode the json. """ def fix(v): if isinstance(v, list): return base64.encodebytes(bytes(v)).decode() return v if 'attr' not in tfjs_node: return for v in tfjs_node['attr'].values(): if 's' in v: v['s'] = fix(v['s']) if 'list' in v and 's' in v['list']: for i, x in enumerate(v['list']['s']): v['list']['s'][i] = fix(x) def read_tfjs_attr_helper(k, v, tf_dtypes=False): """ A tfjs attribute value is itself a dict with a single key specifying the type and a value with the actual data like { axis: { i: -1 }} or { pads: { list: { i: [1, 2, 3, 4] } } }. This helper takes the key specifying the type (like 'i' or 'list') and the value and decodes the attribute value. """ supported_types = ['func', 'shape', 'type', 'list', 's', 'i', 'f', 'b'] utils.make_sure(k in supported_types, "Unrecognized tfjs attribute type %s", k) if k == 'list': non_empty_keys = [k2 for k2, v2 in v.items() if len(v2) > 0] if len(non_empty_keys) == 0: return [] k2 = non_empty_keys[0] return [read_tfjs_attr_helper(k2, v2, tf_dtypes) for v2 in v[k2]] if k == 'type': dtype = v if not isinstance(dtype, int): dtype = getattr(types_pb2, dtype) if not tf_dtypes: dtype = tf_utils.map_tf_dtype(dtype) return dtype if k == 'func': return v['name'] if k == 'shape': return [int(d['size']) for d in v.get('dim', [])] if k == 's': return base64.decodebytes(v.encode()) if k == 'i': # ints are stored in the tfjs json as strings return int(v) return v def tfjs_node_to_tf_node_def(node): """Converts a tfjs node to a tf node_def for use in tf shape inferencing""" node_def = node_def_pb2.NodeDef() ParseDict(node, node_def) return node_def def resolve_output(output, op_info, func_name=None): """ Given an output name from a tfjs model and an op_info dict containing info about the nodes available, (and the function name if this is a subgraph), returns the canonical name to use as the output name in the onnx model. The resulting string is always "node_name:port_number" """ cnt = output.count(':') # outputs in the tfjs model can use one of 3 different formats interchangably. if cnt == 0: # If no port is specified, it is referring to port 0 if output in op_info: return output + ':0' # Output isn't from an op and may be an input (no port number) return output if cnt == 1: # Already in our standard format return output # Format is node_name:output_name:subindex node, output_arg_name, index = output.split(':') if node not in op_info and func_name is not None: # In very rare cases, tfjs prepends the func_name to a node but forgets to fix the outputs long_node_name = func_name + "/" + node if long_node_name in op_info: node = long_node_name op_type, tf_attr, inp_dtypes = op_info[node] names, _ = get_output_names_and_dtypes(op_type, tf_attr, inp_dtypes) idx = names.index(output_arg_name) + int(index) return node + ':' + str(idx) def get_output_names_and_dtypes(op_type, tf_attr, inp_dtypes): """Parses the tf documentation to determine the names and dtypes of the outputs of the op""" # TODO: ['Prelu', 'Conv1D', 'DepthwiseConv2d', 'FusedDepthwiseConv2dNative', 'Ones', 'Zeros'] if op_type == 'Prelu': return ['activations'], [inp_dtypes[0]] try: tf_op_def = tf_api_def_map.get_op_def(op_type) except ValueError: raise ValueError("Failed to determine dtypes for op type %s. May be an unsupported op type." % op_type) dtypes = [] names = [] for arg in tf_op_def.output_arg: num_copies = 1 if arg.type_list_attr: dtypes += tf_attr[arg.type_list_attr] num_copies = len(tf_attr[arg.type_list_attr]) else: if arg.type_attr: dtype = tf_attr[arg.type_attr] else: dtype = arg.type if arg.number_attr: dtypes += [dtype] * tf_attr[arg.number_attr] num_copies = tf_attr[arg.number_attr] else: dtypes.append(dtype) names += [arg.name] * num_copies return names, dtypes def get_output_dtypes(op_type, tf_attr, inp_dtypes): """Returns a list of the tf dtypes for the op's outputs""" _, out_dtypes = get_output_names_and_dtypes(op_type, tf_attr, inp_dtypes) return out_dtypes def get_output_shapes(node_def, input_dtypes, input_shapes, inp_consts): """Returns a list of the output shapes of an op. input_dtypes should be tf dtypes.""" from tf2onnx.tf_loader import tf_session, tf_placeholder # pylint: disable=import-outside-toplevel if node_def.op in ["Prelu", "Enter"]: return [input_shapes[0]] if node_def.op == "Merge": # Find the first non-None shape (if it exists) and return it non_none = ([t for t in input_shapes if t is not None] + [None])[0] # The second output of merge is a scalar int indicating which input was selected return [non_none, []] del node_def.input[:] node_def.name = "node" if "_class" in node_def.attr: # Remove colocation information (list of nodes tf wants computed on same device) del node_def.attr["_class"] g = tf.Graph() with g.as_default(): for i, (dtype, shape, const) in enumerate(zip(input_dtypes, input_shapes, inp_consts)): inp = "input" + str(i) if const is None: if shape is not None and -1 in shape: shape = [d if d != -1 else None for d in shape] tf_placeholder(dtype, name=inp, shape=shape) else: tf.constant(const, dtype, name=inp) node_def.input.append(inp) mini_graph_def = g.as_graph_def() mini_graph_def.node.append(node_def) g2 = tf.Graph() with g2.as_default(): with tf_session() as sess: tf.import_graph_def(mini_graph_def, name='') node = sess.graph.get_operation_by_name("node") outputs_shapes = [tf_utils.get_tf_tensor_shape(out) for out in node.outputs] return outputs_shapes def sort_tfjs_functions(funcs): """Topologically sorts a list of tfjs functions""" dependencies = {} name_to_func = {} for f in funcs: name = f['signature']['name'] dependencies[name] = get_tfjs_func_dependencies(f) name_to_func[name] = f ordered = utils.topological_sort(dependencies) return [name_to_func[n] for n in ordered] def get_tfjs_func_dependencies(func): """Returns a list of names of functions the provided tfjs func depends on""" dependencies = set() for node in func.get('nodeDef', []): for v in node.get('attr', {}).values(): if list(v.keys())[0] == 'func': dependencies.add(read_tfjs_attr(v)) return list(dependencies) def read_model_json(model_path): """Given the path to a model.json file, parses the json and returns a dict (and flag indicating if the weights are compressed)""" zip_compressed = False with open(model_path, "rb") as f: magic_number = f.read(2) f.seek(0) if magic_number == b'\x1f\x8b': # Sometimes models from tfhub look normal but are gzip compressed without warning unziped_bytes = gzip.decompress(f.read()) model = json.loads(unziped_bytes) zip_compressed = True else: model = json.load(f) return model, zip_compressed def graphs_from_tfjs(model_path, input_names=None, output_names=None, shape_override=None, ignore_default=None, use_default=None): """Given the path to a model.json file, parses the model into onnx graphs and returns the main graph and a topologically sorted list of subgraphs.""" model, zip_compressed = read_model_json(model_path) model_format = model['modelTopology'].get('format') if model_format is None: if 'keras_version' in model['modelTopology']: model_format = 'layers-model' else: model_format = 'graph-model' utils.make_sure(model_format == 'graph-model', "tf2onnx only supports conversion from tfjs graph models, " "not format %s. Use Google's tfjs converter to convert to a graph model, then try again.", model_format) weights_manifest = model['weightsManifest'][0] sharded_data = [] for path in weights_manifest["paths"]: with open(os.path.join(os.path.dirname(model_path), path), "rb") as f: shard_bytes = f.read() if zip_compressed: shard_bytes = gzip.decompress(shard_bytes) sharded_data.append(shard_bytes) weights_data = b''.join(sharded_data) weights = {} i = 0 for weight in weights_manifest['weights']: weight_name, np_arr, num_bytes = read_tfjs_weight(weight, weights_data, offset=i) weights[weight_name] = np_arr i += num_bytes utils.make_sure(len(weights_data) == i, "Total weight bytes %d doesn't match read bytes %d", len(weights_data), i) topology = model['modelTopology'] if output_names is None and 'signature' in model: output_names = list(model['signature']['outputs'].keys()) main_g = read_tfjs_graph(topology['node'], weights, None, input_names, output_names, shape_override, ignore_default, use_default) subgraphs = [] funcs = sort_tfjs_functions(topology.get('library', {}).get('function', [])) for func in funcs: sub_g = read_tfjs_graph(func.get('nodeDef', []), weights, func, None, None, shape_override, ignore_default, use_default) subgraphs.append(sub_g) return main_g, subgraphs def read_tfjs_weight(weight, weights_data, offset): """Returns the name, numpy array, and number of bytes for a tfjs weight""" name = weight['name'] count = np.product(weight['shape'], dtype=np.int64) if weight['dtype'] == 'string': num_strings = np.product(weight['shape']) string_list, num_bytes = read_string_weight(weights_data, offset, num_strings) np_arr = np.array(string_list).reshape(weight['shape']) return name, np_arr, num_bytes np_dtype = np.dtype(weight['dtype']) if 'quantization' in weight: q_info = weight['quantization'] q_dtype = np.dtype(q_info['dtype']) np_arr = np.frombuffer(weights_data, dtype=q_dtype, count=count, offset=offset) num_bytes = np_arr.nbytes if 'scale' in q_info: np_arr = np_arr.astype(np_dtype) * q_info['scale'] + q_info['min'] else: np_arr = np_arr.astype(np_dtype) else: np_arr = np.frombuffer(weights_data, dtype=np_dtype, count=count, offset=offset) num_bytes = np_arr.nbytes np_arr = np_arr.reshape(weight['shape']) return name, np_arr, num_bytes def read_string_weight(weights_data, offset, num_strings): """Decodes binary weight data for a tfjs string""" string_list = [] j = offset for _ in range(num_strings): # TFJS strings start with a 4 byte unsigned int indicating their length, followed by the bytes of the string length = struct.unpack('<I', weights_data[j:j + 4])[0] j += 4 string_list.append(weights_data[j:j + length]) j += length return string_list, j - offset def read_tfjs_function(func): """Parses properties of a tfjs function.""" tf_dtypes = {} output_shapes = {} signature = func['signature'] inputs = [] for i, inp in enumerate(signature['inputArg']): inp_name = inp['name'] inputs.append(inp_name) tf_dtypes[inp_name] = getattr(types_pb2, inp['type']) out_shapes_attr = func.get('argAttr', {}).get(str(i), {}).get('attr', {}).get('_output_shapes') if out_shapes_attr is not None: output_shapes[inp_name] = read_tfjs_attr(out_shapes_attr)[0] else: output_shapes[inp_name] = None ret_map = func['ret'] outputs = [ret_map[out['name']] for out in signature['outputArg']] name = signature['name'] return tf_dtypes, output_shapes, inputs, outputs, name def read_tfjs_graph(nodes, weights, func=None, graph_inputs=None, graph_outputs=None, shape_override=None, ignore_default=None, use_default=None): """Creates an onnx graph from the provided tfjs nodes""" if shape_override is None: shape_override = {} onnx_nodes = [] output_shapes = {} tf_dtypes = {} op_info = {} graph_name = 'tfjs_model' func_name = None def update_shapes(new_shapes): if isinstance(new_shapes, dict): new_shapes = new_shapes.items() for k, v in new_shapes: output_shapes[k] = shape_override.get(k, v) if func is not None: tf_dtypes, fn_input_shapes, graph_inputs, graph_outputs, func_name = read_tfjs_function(func) update_shapes(fn_input_shapes) graph_name = func_name for inp in graph_inputs: onnx_nodes.append(helper.make_node("Placeholder", [], outputs=[inp], name=inp)) if graph_inputs is None: placeholder_ops = ["Placeholder", "PlaceholderWithDefault", "PlaceholderV2"] graph_inputs = [n['name'] + ':0' for n in nodes if n['op'] in placeholder_ops] for node in nodes: if node['op'] == "NextIteration": # NextIteration nodes can violate the topological sort with cyclic dependencies, so we do them first. node_name = node['name'] output_name = node_name + ':0' output_shapes[output_name] = None tf_dtypes[output_name] = read_tfjs_attr(node['attr']['T'], tf_dtypes=True) op_info[node_name] = (node['op'], {'dtype': tf_dtypes[output_name]}, [tf_dtypes[output_name]]) for node in nodes: op_type = node['op'] node_name = node['name'] if op_type == "Const": np_arr = weights[node_name] out_name = node_name + ':0' tf_dtype = read_tfjs_attr(node['attr']['dtype'], tf_dtypes=True) onnx_dtype = tf_utils.map_tf_dtype(tf_dtype) # The dtype of a Const in tfjs can differ from that of the weight used to get its value np_dtype = utils.map_onnx_to_numpy_type(onnx_dtype) onnx_tensor = numpy_helper.from_array(np_arr.astype(np_dtype), out_name) onnx_node = helper.make_node("Const", [], outputs=[out_name], name=node_name, value=onnx_tensor) onnx_nodes.append(onnx_node) output_shapes[out_name] = shape_override.get(out_name, list(np_arr.shape)) tf_dtypes[out_name] = tf_dtype op_info[node_name] = (op_type, {'dtype': tf_dtypes[out_name]}, []) continue tf_attr = {} onnx_attr = {} fix_string_attr(node) node_def = tfjs_node_to_tf_node_def(node) for k, v in node.get('attr', {}).items(): tf_attr[k] = read_tfjs_attr(v, tf_dtypes=True) if k in tf_utils.TF_IGNORED_NODE_ATTRS: continue if k == 'DstT': k = 'to' onnx_attr[k] = read_tfjs_attr(v) if op_type == "FusedDepthwiseConv2dNative": # This op isn't in tensorflow but can be converted to a TF op op_type = "_FusedDepthwiseConv2dNative" err_msg = "explicit_paddings for supported for _FusedDepthwiseConv2dNative" utils.make_sure(len(tf_attr['explicit_paddings']) == 0, err_msg) del tf_attr['explicit_paddings'] del onnx_attr['explicit_paddings'] del node_def.attr['explicit_paddings'] node_def.op = op_type input_names = [inp for inp in node.get('input', []) if not inp.startswith('^')] input_names = [resolve_output(inp, op_info, func_name) for inp in input_names] inp_dtypes = [tf_dtypes[inp] for inp in input_names] inp_shapes = [output_shapes[inp] for inp in input_names] inp_consts = [weights.get(inp.split(':')[0]) for inp in input_names] out_dtypes = get_output_dtypes(op_type, tf_attr, inp_dtypes) out_shapes = get_output_shapes(node_def, inp_dtypes, inp_shapes, inp_consts) op_info[node_name] = (op_type, tf_attr, inp_dtypes) output_names = [node_name + ":" + str(i) for i in range(len(out_dtypes))] tf_dtypes.update(zip(output_names, out_dtypes)) update_shapes(zip(output_names, out_shapes)) if op_type == "PlaceholderWithDefault": remove = False if ignore_default and node_name in ignore_default: op_type = 'Placeholder' input_names = [] elif use_default and node_name in use_default: remove = True elif node_name.endswith('keras_learning_phase'): logger.warning("Removing optional input %s that appears to be a keras learning phase parameter. " "Use --ignore_default to force this into an input.", node_name) remove = True if remove: op_type = 'Identity' graph_inputs = [inp for inp in graph_inputs if inp != node_name + ":0"] onnx_node = helper.make_node(op_type, input_names, output_names, name=node_name, **onnx_attr) onnx_nodes.append(onnx_node) dtypes = {k: tf_utils.map_tf_dtype(v) for k, v in tf_dtypes.items()} if graph_outputs is None: output_to_node = {out: node.name for node in onnx_nodes for out in node.output} node_to_outputs = {node.name: list(node.output) for node in onnx_nodes} used_nodes = set(output_to_node[out] for node in onnx_nodes for out in node.input) unused_nodes = [node for node in onnx_nodes if node.name not in used_nodes] graph_outputs = [out for node in unused_nodes for out in node_to_outputs[node.name]] graph_outputs_mapped = [resolve_output(out, op_info, func_name) for out in graph_outputs] g = Graph(onnx_nodes, output_shapes, dtypes, input_names=graph_inputs, output_names=graph_outputs_mapped, is_subgraph=func is not None, graph_name=graph_name) g.rename_tensors(dict(zip(graph_outputs_mapped, graph_outputs))) return g
{"hexsha": "58fdd5d38f3b36c794dfc11e74a866e4c03d72f2", "size": 19828, "ext": "py", "lang": "Python", "max_stars_repo_path": "tf2onnx/tfjs_utils.py", "max_stars_repo_name": "guschmue/tensorflow-onnx", "max_stars_repo_head_hexsha": "186b9540d705188de34faffd119aa6a4f5b150c0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tf2onnx/tfjs_utils.py", "max_issues_repo_name": "guschmue/tensorflow-onnx", "max_issues_repo_head_hexsha": "186b9540d705188de34faffd119aa6a4f5b150c0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tf2onnx/tfjs_utils.py", "max_forks_repo_name": "guschmue/tensorflow-onnx", "max_forks_repo_head_hexsha": "186b9540d705188de34faffd119aa6a4f5b150c0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.2224532225, "max_line_length": 118, "alphanum_fraction": 0.6397014323, "include": true, "reason": "import numpy", "num_tokens": 4783}
from pathlib import Path from tempfile import TemporaryDirectory import json import os import re import unittest from pandas import DataFrame from schematics.exceptions import DataError, ValidationError from schematics.models import Model from schematics.types import StringType import numpy as np import OpenEXR as openexr import hidebound.core.tools as hbt # ------------------------------------------------------------------------------ class ToolsTests(unittest.TestCase): def create_files(self, root): filepaths = [ 'a/1.foo', 'a/b/2.json', 'a/b/3.txt', 'a/b/c/4.json', 'a/b/c/5.txt' ] filepaths = [Path(root, x) for x in filepaths] for filepath in filepaths: os.makedirs(filepath.parent, exist_ok=True) with open(filepath, 'w') as f: f.write('') return filepaths def make_file_or_dir(self, filepath): filepath = Path(filepath) if '.' in filepath.as_posix(): os.makedirs(filepath.parent, exist_ok=True) with open(filepath, 'w') as f: f.write('test') else: os.makedirs(filepath, exist_ok=True) # -------------------------------------------------------------------------- def test_error_to_string(self): error = KeyError('Foo') expected = 'KeyError( Foo )' result = hbt.error_to_string(error) self.assertEqual(result, expected) error = ValidationError(['foo', 'bar']) expected = 'ValidationError(\nfoo\nbar\n)' result = hbt.error_to_string(error) self.assertEqual(result, expected) class Foo(Model): bar = StringType(required=True) baz = StringType(required=True) try: Foo({}).validate() except DataError as e: result = hbt.error_to_string(e) expected = r'DataError\(\n.*(bar|baz).*\n.*(bar|baz).*\n\)' self.assertRegex(result, expected) def test_to_prototype(self): dicts = [ dict(a=1, b=2, c=3), dict(a=1, b=2, d=3), dict(a=1, b=2, e=3), ] expected = dict(a=[1, 1, 1], b=[2, 2, 2], c=[3], d=[3], e=[3]) result = hbt.to_prototype(dicts) self.assertEqual(result, expected) def test_list_all_files(self): expected = '/foo/bar is not a directory or does not exist.' with self.assertRaisesRegexp(FileNotFoundError, expected): next(hbt.list_all_files('/foo/bar')) expected = '/foo.bar is not a directory or does not exist.' with self.assertRaisesRegexp(FileNotFoundError, expected): next(hbt.list_all_files('/foo.bar')) with TemporaryDirectory() as root: expected = sorted(self.create_files(root)) result = sorted(list(hbt.list_all_files(root))) self.assertEqual(result, expected) def test_list_all_files_include(self): with TemporaryDirectory() as root: regex = r'\.txt' self.create_files(root) expected = [ Path(root, 'a/b/3.txt'), Path(root, 'a/b/c/5.txt'), ] result = hbt.list_all_files(root, include_regex=regex) result = sorted(list(result)) self.assertEqual(result, expected) def test_list_all_files_exclude(self): with TemporaryDirectory() as root: regex = r'\.txt' self.create_files(root) expected = [ Path(root, 'a/1.foo'), Path(root, 'a/b/2.json'), Path(root, 'a/b/c/4.json'), ] result = hbt.list_all_files(root, exclude_regex=regex) result = sorted(list(result)) self.assertEqual(result, expected) def test_list_all_files_include_exclude(self): with TemporaryDirectory() as root: i_regex = r'/a/b' e_regex = r'\.json' self.create_files(root) expected = [ Path(root, 'a/b/3.txt'), Path(root, 'a/b/c/5.txt'), ] result = hbt.list_all_files( root, include_regex=i_regex, exclude_regex=e_regex ) result = sorted(list(result)) self.assertEqual(result, expected) def test_delete_empty_directories(self): with TemporaryDirectory() as root: paths = [ Path(root, 'a0/b0/c0/l0.txt'), Path(root, 'a0/b0/c1'), Path(root, 'a0/b0/c2/.DS_Store'), Path(root, 'a0/b0/c3/.DS_Store'), Path(root, 'a0/b0/c3/d0/e0/l1.txt'), Path(root, 'a0/b0/c3/d1/e0'), Path(root, 'a0/b0/c4/d1/e0/l2.txt'), Path(root, 'a0/b0/c4/d2'), Path(root, 'a0/b0/c4/d3/e0/l3.txt'), Path(root, 'a0/b0/c5'), Path(root, 'a1/b0/c0'), Path(root, 'a1/b0/.DS_Store'), Path(root, 'a1/b1/l4.txt'), ] list(map(self.make_file_or_dir, paths)) hbt.delete_empty_directories(root) result = sorted([x[0] for x in os.walk(root)]) expected = [ Path(root), Path(root, 'a0'), Path(root, 'a0/b0'), Path(root, 'a0/b0/c0'), # parent dir of file Path(root, 'a0/b0/c3'), Path(root, 'a0/b0/c3/d0'), Path(root, 'a0/b0/c3/d0/e0'), # parent dir of file Path(root, 'a0/b0/c4'), Path(root, 'a0/b0/c4/d1'), Path(root, 'a0/b0/c4/d1/e0'), # parent dir of file Path(root, 'a0/b0/c4/d3'), Path(root, 'a0/b0/c4/d3/e0'), # parent dir of file Path(root, 'a1'), Path(root, 'a1/b1'), # parent dir of file ] expected = sorted([x.as_posix() for x in expected]) self.assertEqual(result, expected) def test_delete_empty_directories_empty(self): with TemporaryDirectory() as root: hbt.delete_empty_directories(root) result = [x[0] for x in os.walk(root)] self.assertEqual(result, [root]) def test_delete_empty_directories_errors(self): expected = '/foo/bar is not a directory or does not exist.' with self.assertRaisesRegexp(FileNotFoundError, expected): next(hbt.delete_empty_directories('/foo/bar')) def test_directory_to_dataframe(self): with TemporaryDirectory() as root: self.create_files(root) filepaths = [ Path(root, 'a/b/3.txt'), Path(root, 'a/b/c/5.txt'), ] expected = DataFrame() expected['filepath'] = filepaths expected['filename'] = expected.filepath.apply(lambda x: x.name) expected['extension'] = 'txt' expected.filepath = expected.filepath.apply(lambda x: x.as_posix()) result = hbt.directory_to_dataframe( root, include_regex=r'/a/b', exclude_regex=r'\.json' ) cols = ['filepath', 'filename', 'extension'] for col in cols: self.assertEqual(result[col].tolist(), expected[col].tolist()) def test_read_exr_header(self): with TemporaryDirectory() as root: header = openexr.Header(5, 10) exr = root + '/foo.exr' output = openexr.OutputFile(exr, header) data = dict( R=np.ones((10, 5, 1), dtype=np.float32).tobytes(), G=np.zeros((10, 5, 1), dtype=np.float32).tobytes(), B=np.zeros((10, 5, 1), dtype=np.float32).tobytes(), ) output = openexr.OutputFile(exr, header) output.writePixels(data) result = hbt.read_exr_header(exr) win = result['dataWindow'] x = (win.max.x - win.min.x) + 1 y = (win.max.y - win.min.y) + 1 self.assertEqual(x, 5) self.assertEqual(y, 10) result = sorted(result['channels'].keys()) self.assertEqual(result, list('BGR')) def test_read_exr_header_error(self): with TemporaryDirectory() as root: exr = root + '/foo.exr' with open(exr, 'w') as f: f.write('taco') expected = f'{exr} is not an EXR file.' with self.assertRaisesRegexp(IOError, expected): hbt.read_exr_header(exr) def test_time_string(self): result = re.search( r'\d\d\d\d-\d\d-\d\dT-\d\d-\d\d-\d\d', hbt.time_string() ) self.assertIsNotNone(result) def test_write_json(self): with TemporaryDirectory() as root: filepath = Path(root, 'test.json') # dict expected = dict(a='b', c='d') hbt.write_json(expected, filepath) with open(filepath) as f: result = json.load(f) self.assertEqual(result, expected) # list expected = [ dict(a='b', c='d'), dict(e='f', g='h'), dict(i='j', k='l'), ] hbt.write_json(expected, filepath) with open(filepath) as f: result = json.load(f) self.assertEqual(result, expected) with open(filepath) as f: result = f.read() expected = map(json.dumps, expected) expected = '[\n' + ',\n'.join(expected) + '\n]' self.assertEqual(result, expected) def test_read_json(self): with TemporaryDirectory() as root: filepath = Path(root, 'test.json') with open(filepath, 'w') as f: f.write(''' // a comment { "a": "b", "c": "d", }''') result = hbt.read_json(filepath) expected = {"a": "b", "c": "d"} self.assertEqual(result, expected) def test_read_json_error(self): with TemporaryDirectory() as root: filepath = Path(root, 'test.json') with open(filepath, 'w') as f: f.write(''' { "a": "b", // encoding error "c": "d", }''') expected = 'No JSON data could be decoded from .*/test.json. ' expected += 'Please remove any inline comments.' with self.assertRaisesRegexp(json.JSONDecodeError, expected): hbt.read_json(filepath)
{"hexsha": "7ef728c534c4d483dad53cfc864e86200ec5f178", "size": 10710, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/hidebound/core/tools_test.py", "max_stars_repo_name": "theNewFlesh/nerve", "max_stars_repo_head_hexsha": "4e430db20cf69cb065318806edf13656b5b035c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/hidebound/core/tools_test.py", "max_issues_repo_name": "theNewFlesh/nerve", "max_issues_repo_head_hexsha": "4e430db20cf69cb065318806edf13656b5b035c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/hidebound/core/tools_test.py", "max_forks_repo_name": "theNewFlesh/nerve", "max_forks_repo_head_hexsha": "4e430db20cf69cb065318806edf13656b5b035c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6601941748, "max_line_length": 80, "alphanum_fraction": 0.5151260504, "include": true, "reason": "import numpy", "num_tokens": 2465}
#!/usr/bin/python #========================================================================== # summary.py #========================================================================== # # -h --help Display this message # -v --verbose Verbose mode # -p --prefetcher Type of Prefetcher # -s --stride number of strides # -1 --l1-size size of L1 cache in KB # -2 --l2-size size of L2 cache in KB # -d --directory name of the directory to save the run # # Author : Matheus Ogleari # Date : May 5, 2015 import optparse import fileinput import re import os import sys import numpy as np import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages reports = [ # order matters !!! #"sht64k2To14.log", #"sht64k2To16.log", #"sht64k2To18.log", #"sht64k2To20.log", #"sht64k2To22.log", #"sht128k2To14.log", #"sht128k2To16.log", #"sht128k2To18.log", #"sht128k2To20.log", #"sht128k2To22.log", #"sht256k2To14.log", #"sht256k2To16.log", #"sht256k2To18.log", #"sht256k2To20.log", #"sht256k2To22.log", #"sht512k2To14.log", #"sht512k2To16.log", #"sht512k2To18.log", #"sht512k2To20.log", #"sht512k2To22.log", #"sht1M2To14.log", #"sht1M2To16.log", #"sht1M2To18.log", #"sht1M2To20.log", #"sht1M2To22.log", #"sht2M2To14.log", #"sht2M2To16.log", #"sht2M2To18.log", #"sht2M2To20.log", #"sht2M2To22.log", #"sht4M2To14.log", #"sht4M2To16.log", #"sht4M2To18.log", #"sht4M2To20.log", #"sht4M2To22.log", ## another order #"sht64k2To09.log", #"sht128k2To09.log", #"sht256k2To09.log", #"sht512k2To09.log", #"sht1M2To09.log", #"sht2M2To09.log", #"sht4M2To09.log", #"sht8M2To09.log", #"sht16M2To09.log", #"sht64k2To13.log", #"sht128k2To13.log", #"sht256k2To13.log", #"sht512k2To13.log", #"sht1M2To13.log", #"sht2M2To13.log", #"sht4M2To13.log", #"sht8M2To13.log", #"sht16M2To13.log", #"sht64k2To14.log", #"sht128k2To14.log", #"sht256k2To14.log", #"sht512k2To14.log", #"sht1M2To14.log", #"sht2M2To14.log", #"sht4M2To14.log", #"sht8M2To14.log", #"sht64k2To16.log", #"sht128k2To16.log", #"sht256k2To16.log", #"sht512k2To16.log", #"sht1M2To16.log", #"sht2M2To16.log", #"sht4M2To16.log", #"sht64k2To18.log", #"sht128k2To18.log", #"sht256k2To18.log", #"sht512k2To18.log", #"sht1M2To18.log", #"sht2M2To18.log", #"sht4M2To18.log", #"sht8M2To18.log", #"sht64k2To20.log", #"sht128k2To20.log", #"sht256k2To20.log", #"sht512k2To20.log", #"sht1M2To20.log", #"sht2M2To20.log", #"sht4M2To20.log", #"sht64k2To22.log", #"sht128k2To22.log", #"sht256k2To22.log", #"sht512k2To22.log", #"sht1M2To22.log", #"sht2M2To22.log", #"sht4M2To22.log", #"sht1w2To18.log", #"sht2w2To18.log", #"sht4w2To18.log", #"sht8w2To18.log", #"sht16w2To18.log", "stm64k2To18.log", "stm128k2To18.log", "stm256k2To18.log", "stm512k2To18.log", "stm1M2To18.log", "stm2M2To18.log", "stm4M2To18.log", "stm8M2To18.log", ] #------------------------------------------------------------------------------- # Command line processing #------------------------------------------------------------------------------- class OptionParserWithCustomError( optparse.OptionParser ): def error( self, msg = "" ): if ( msg ): print("\n ERROR: %s" % msg ) print("") for line in fileinput.input(sys.argv[0]): if ( not re.match( "#", line ) ): sys.exit(msg != "" ) if ( ( fileinput.lineno() == 3 ) or ( fileinput.lineno() > 4 ) ): print( re.sub( "^#", "", line.rstrip( "\n" ) ) ) def parse_cmdline(): p = OptionParserWithCustomError( add_help_option=False ) (opts,args) = p.parse_args() if ( help == True ): p.error() if args: p.error( "found extra positional arguments" ) return opts #------------------------------------------------------------------------------- # Helper Class #------------------------------------------------------------------------------- class Result: def __init__( self, l1_RDmr, l1_WRmr, l2_RDmr, l2_WRmr, ipc, cycles ): self.l1_RDmr = l1_RDmr #l1 RD miss rate self.l1_WRmr = l1_WRmr #l1 WR miss rate self.l2_RDmr = l2_RDmr #l2 RD miss rate self.l2_WRmr = l2_WRmr #l2 WR miss rate self.ipc = ipc self.cycles = cycles class Experiment: # bmark: the name of the benchmark (e.g., hash, sps, rbtree) # l1_missrate: Miss Rate for the L1 cache # l2_missrate: Miss Rate for the L2 Cache # ipc: Instructions per Cycle result def __init__( self, bmark ): self.bmark = bmark self.result = None def print_results( self ): choice = self.bmark[:2] bmarks = {'ht': 'hashtable', 'ph': 'p-hashtable', 'ps': 'parsec:blackscholes', 'sh': 'hashtable-str', 'da': 'double apps'} if (choice in bmarks): bm = (bmarks[choice]) else: bm = 'benchmark' choice = self.bmark[-7:-5] cacheSize = {'4k': '64k', '8k': '128k', '6k': '256k', '2k': '512k'} if (choice in cacheSize): cs = (cacheSize[choice]) else: cs = choice ht_sz = 2**int(self.bmark[-2:]) output = "%12s\t%4s\t%4s\t%4s\t%4s\t%7s\t%7s\t%.2f\t%.2f\t%10s" %( bm, cs, 1024, ht_sz, self.result.ipc, self.result.l1_RDmr, self.result.l1_WRmr, self.result.l2_RDmr, self.result.l2_WRmr, self.result.cycles) print( output ) def set_result( self, l1_RDmr, l1_WRmr, l2_RDmr, l2_WRmr, ipc, cycles ): self.result = Result( l1_RDmr, l1_WRmr, l2_RDmr, l2_WRmr, ipc, cycles ) def drawGraph(array, numX): xs = list(range(numX)) #print(xs) ys = [[0 for x in range(numX)] for x in range(len(array)/numX)] #print(ys) #generate y coordinates for i in range(len(array)): ys[i/numX][i%numX] = array[i] ind = np.arange(numX) xLab = ['64k', '128k', '256k', '512k', '1M', '2M', '4M'] #plt.figure(1) #plt.figure(figsize=(20,5)) plt.plot(xs, ys[0], label='2**14') plt.plot(xs, ys[1], label='2**16') plt.plot(xs, ys[2], label='2**18') plt.plot(xs, ys[3], label='2**20') plt.xlabel('Cache Size') plt.ylabel('IPC') plt.xticks(ind, xLab) plt.legend() plt.title('IPC') plt.show() #pp = PdfPages('htIPC.pdf') #pp.savefig() #plt.close() #------------------------------------------------------------------------------- # Main #------------------------------------------------------------------------------- def main(): opts = parse_cmdline() #experiments = [Experiment] * len(reports) #header = "%12s\t%4s\t%7s\t%7s\t%7s\t%7st" %( "bmark", "IPC", "L1 RDMR", "L1_WRMR", "L2 RDMR", "L2 RDMR") header = "%12s\t%4s\t%4s\t%4s\t%4s\t%7s\t%7s\t%7s\t%7s\t%10s" %( "benchmark","L2Sz", "loops", "ht_sz", "IPC", "L1_RDMR","L1_WRMR", "L2_RDMR", "L2_WDMR", "cycles") print( header ) #a list to store the results res = [] for report in reports: experiment = Experiment( "%s" %(report[:-4]) ) if os.path.exists( report ): report_file = open( report ) else: continue report_txt = report_file.read() re1='(total)' # Word 1 re2='( )' # White Space 1 re3='(number)' # Word 2 re4='( )' # White Space 2 re5='(of)' # Word 3 re6='( )' # White Space 3 re7='(fetched)' # Word 4 re8='( )' # White Space 4 re9='(instructions)' # Word 5 re10='.*?' # Non-greedy match on filler re11='(\\d+)' # Integer Number 1 re12='.*?' # Non-greedy match on filler re13='([+-]?\\d*\\.\\d+)(?![-+0-9\\.])' # Float 1 rg = re.compile(re1+re2+re3+re4+re5+re6+re7+re8+re9+re10+re11+re12+re13,re.IGNORECASE|re.DOTALL) ipc = rg.findall( report_txt ) #print(ipc[0]) ninst = ipc[0][9] ipc = ipc[0][10] ncycles = int( int( ninst ) / float( ipc ) ) #print( ninst ) #print( ipc ) #print( ncycles ) #=================================================== # count L1 RD miss rate #=================================================== re1='(L1)' # Alphanum 1 re2='(\\$)' # Any Single Character 1 re3='(D)' # White Space 1 re4='(\\[.*?\\])' re5='( )' # White Space 2 re6='(:)' # Any Single Character 2 re7='( )' # White Space 3 re8='(RD)' # Word 1 #re8='(WR)' # Word 1 re9='( )' # White Space 4 re10='(\\(.*?\\))' # Any Single Character 3 #re10='(\\(ab*\\))' #re11='.*?' # Non-greedy match on filler re11='(=)' re12='(\\(.*?\\))' # Any Single Character 3 re13='(=)' re14='(\\s+)' #re15='(\\w+.\\w+%)' re15='([+-]?\\d*\\.\\d+)(?![-+0-9\\.])' # Float 1 rg=re.compile(re1+re2+re3+re4+re5+re6+re7+re8+re9+re10+re11+re12+re13+re14+re15,re.IGNORECASE|re.DOTALL) l1_results = rg.findall( report_txt ) #print(len(l1_results)) #for i in range(len(l1_results)): #print(l1_results[i]) l1_RDmr = l1_results[0][14] #print(l1_RDmr) L1_RDMR = float(l1_RDmr) #print(L1_RDMR) #=================================================== # count L1 WR miss rate #=================================================== re1='(L1)' # Alphanum 1 re2='(\\$)' # Any Single Character 1 re3='(D)' # White Space 1 re4='(\\[.*?\\])' re5='( )' # White Space 2 re6='(:)' # Any Single Character 2 re7='( )' # White Space 3 re8='(WR)' # Word 1 #re8='(WR)' # Word 1 re9='( )' # White Space 4 re10='(\\(.*?\\))' # Any Single Character 3 #re10='(\\(ab*\\))' #re11='.*?' # Non-greedy match on filler re11='(=)' re12='(\\(.*?\\))' # Any Single Character 3 re13='(=)' re14='(\\s+)' #re15='(\\w+.\\w+%)' re15='([+-]?\\d*\\.\\d+)(?![-+0-9\\.])' # Float 1 rg=re.compile(re1+re2+re3+re4+re5+re6+re7+re8+re9+re10+re11+re12+re13+re14+re15,re.IGNORECASE|re.DOTALL) l1_results = rg.findall( report_txt ) #print(len(l1_results)) #for i in range(len(l1_results)): #print(l1_results[i]) l1_WRmr = l1_results[0][14] #print(l1_WRmr) L1_WRMR = float(l1_WRmr) #=================================================== # count L2 RD miss rate #=================================================== re1='(L2)' # Alphanum 1 re2='(\\$)' # Any Single Character 1 re3='( )' # White Space 1 re4='(\\[.*?\\])' re5='( )' # White Space 2 re6='(:)' # Any Single Character 2 re7='( )' # White Space 3 re8='(RD)' # Word 1 re9='( )' # White Space 4 re10='(\\(.*?\\))' # Any Single Character 3 re11='(=)' re12='(\\(.*?\\))' # Any Single Character 3 re13='(=)' re14='(\\s+)' #re15='(\\w+.\\w+%)' re15='([+-]?\\d*\\.\\d+)(?![-+0-9\\.])' # Float 1 rg=re.compile(re1+re2+re3+re4+re5+re6+re7+re8+re9+re10+re11+re12+re13+re14+re15,re.IGNORECASE|re.DOTALL) l2_results = rg.findall( report_txt ) #print(len(l2_results)) #for i in range(len(l2_results)): #print(l2_results[i]) l2_RDmr = l2_results[0][14] #print(l2_RDmr) L2_RDMR = float(l2_RDmr) #get the global miss rate L2_RDMR = L2_RDMR*L1_RDMR/100 #=================================================== # count L2 WR miss rate #=================================================== re1='(L2)' # Alphanum 1 re2='(\\$)' # Any Single Character 1 re3='( )' # White Space 1 re4='(\\[.*?\\])' re5='( )' # White Space 2 re6='(:)' # Any Single Character 2 re7='( )' # White Space 3 re8='(WR)' # Word 1 re9='( )' # White Space 4 re10='(\\(.*?\\))' # Any Single Character 3 re11='(=)' re12='(\\(.*?\\))' # Any Single Character 3 re13='(=)' re14='(\\s+)' #re15='(\\w+.\\w+%)' re15='([+-]?\\d*\\.\\d+)(?![-+0-9\\.])' # Float 1 rg=re.compile(re1+re2+re3+re4+re5+re6+re7+re8+re9+re10+re11+re12+re13+re14+re15,re.IGNORECASE|re.DOTALL) l2_results = rg.findall( report_txt ) #print(len(l2_results)) #for i in range(len(l2_results)): #print(l2_results[i]) l2_WRmr = l2_results[0][14] #print(l2_WRmr) #get the global miss rate L2_WRMR = float(l2_WRmr) L2_WRMR = L2_WRMR*L1_WRMR/100 res.append(ipc) # Checks for result results in all 5 bmarks experiment.set_result( L1_RDMR, L1_WRMR, L2_RDMR, L2_WRMR, ipc, ncycles) experiment.print_results() #drawGraph(res, 7) main()
{"hexsha": "762fce0e272c5684ba72b3bec2614c070e9ff56a", "size": 12746, "ext": "py", "lang": "Python", "max_stars_repo_path": "multithreading/sum_sin.py", "max_stars_repo_name": "cmpArch207/benchmarks", "max_stars_repo_head_hexsha": "22c6d38880f14bf41ce809638c8a1de9368e8017", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-26T09:34:36.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-26T09:34:36.000Z", "max_issues_repo_path": "multithreading/sum_sin.py", "max_issues_repo_name": "cmpArch207/benchmarks", "max_issues_repo_head_hexsha": "22c6d38880f14bf41ce809638c8a1de9368e8017", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "multithreading/sum_sin.py", "max_forks_repo_name": "cmpArch207/benchmarks", "max_forks_repo_head_hexsha": "22c6d38880f14bf41ce809638c8a1de9368e8017", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.6485900217, "max_line_length": 108, "alphanum_fraction": 0.505570375, "include": true, "reason": "import numpy", "num_tokens": 4591}
[STATEMENT] lemma ev_alw_shift[iff]: "ev (alw P) (u @- v) \<longleftrightarrow> ev (alw P) v" [PROOF STATE] proof (prove) goal (1 subgoal): 1. ev (alw P) (u @- v) = ev (alw P) v [PROOF STEP] by (induct u) (auto)
{"llama_tokens": 103, "file": "Transition_Systems_and_Automata_Basic_Sequence_LTL", "length": 1}
/- Copyright (c) 2019 Johan Commelin. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Johan Commelin -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.default import Mathlib.data.finset.basic import Mathlib.data.multiset.nat_antidiagonal import Mathlib.PostPort namespace Mathlib /-! # The "antidiagonal" {(0,n), (1,n-1), ..., (n,0)} as a finset. -/ namespace finset namespace nat /-- The antidiagonal of a natural number `n` is the finset of pairs `(i,j)` such that `i+j = n`. -/ def antidiagonal (n : ℕ) : finset (ℕ × ℕ) := mk (multiset.nat.antidiagonal n) (multiset.nat.nodup_antidiagonal n) /-- A pair (i,j) is contained in the antidiagonal of `n` if and only if `i+j=n`. -/ @[simp] theorem mem_antidiagonal {n : ℕ} {x : ℕ × ℕ} : x ∈ antidiagonal n ↔ prod.fst x + prod.snd x = n := sorry /-- The cardinality of the antidiagonal of `n` is `n+1`. -/ @[simp] theorem card_antidiagonal (n : ℕ) : card (antidiagonal n) = n + 1 := sorry /-- The antidiagonal of `0` is the list `[(0,0)]` -/ @[simp] theorem antidiagonal_zero : antidiagonal 0 = singleton (0, 0) := rfl theorem antidiagonal_succ {n : ℕ} : antidiagonal (n + 1) = insert (0, n + 1) (map (function.embedding.prod_map (function.embedding.mk Nat.succ nat.succ_injective) (function.embedding.refl ℕ)) (antidiagonal n)) := sorry theorem map_swap_antidiagonal {n : ℕ} : map (function.embedding.mk prod.swap (function.right_inverse.injective prod.swap_right_inverse)) (antidiagonal n) = antidiagonal n := sorry
{"author": "AurelienSaue", "repo": "Mathlib4_auto", "sha": "590df64109b08190abe22358fabc3eae000943f2", "save_path": "github-repos/lean/AurelienSaue-Mathlib4_auto", "path": "github-repos/lean/AurelienSaue-Mathlib4_auto/Mathlib4_auto-590df64109b08190abe22358fabc3eae000943f2/Mathlib/data/finset/nat_antidiagonal.lean"}