text
stringlengths
0
1.25M
meta
stringlengths
47
1.89k
[STATEMENT] lemma hoare_weaken_left[trans]: \<open>A \<le> B \<Longrightarrow> hoare B p C \<Longrightarrow> hoare A p C\<close> [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>A \<le> B; hoare B p C\<rbrakk> \<Longrightarrow> hoare A p C [PROOF STEP] unfolding hoare_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>A \<le> B; \<forall>\<psi>\<in>space_as_set B. program p *\<^sub>V \<psi> \<in> space_as_set C\<rbrakk> \<Longrightarrow> \<forall>\<psi>\<in>space_as_set A. program p *\<^sub>V \<psi> \<in> space_as_set C [PROOF STEP] by (meson in_mono less_eq_ccsubspace.rep_eq)
{"llama_tokens": 250, "file": "Registers_QHoare", "length": 2}
#KRM import numpy as np from math import * import scipy.io import scipy as spy from netCDF4 import Dataset import pandas as pd import pylab as pl import os import sys lib_path = os.path.abspath('../../Building_canyon/BuildCanyon/PythonModulesMITgcm') # Add absolute path to my python scripts sys.path.append(lib_path) import ReadOutTools_MITgcm as rout import savitzky_golay as sg #--------------------- Functions------------------------------------------------------------------------------------- def a_weight_mean(ConcArea,Area): sumNum = np.sum(np.sum(ConcArea,axis=1),axis=1) sumDen = np.sum(Area) awmean = sumNum/sumDen return awmean def mask2DCanyon(bathy, sbdepth=-152.5): '''Mask out the canyon from the shelf. bathy : depths 2D array from the grid file sbdepth: shelf depth, always negative float Returns mask''' bathyMasked = np.ma.masked_less(-bathy, -152.5) return(bathyMasked.mask) def ConcArea(Tr, hfac, ra, bathy, sbdepth=-152.5): '''Tr: tracer field (nt,nz,ny,nx) hfac: fraction of open cell at center (nz,ny,nx) ra: array of cell horizontal areas (ny,nx) bathy : depths 2D array from the grid file (ny,nx) sbdepth: shelf break depth (negative value) RETURNS: ConcArea = concentration at cell closest to bottom times its area (nt,ny,nx) Conc = cocnetration near bottom (nt,ny,nx)''' ConcArea = np.empty((19,360,360)) Conc = np.empty((19,360,360)) ConcFiltered = np.empty((19,360,360)) ConcAreaFiltered = np.empty((19,360,360)) Area = np.empty((360,360)) BottomInd = np.argmax(hfac[::-1,:,:]>0.0,axis=0) # start looking for first no-land cell from the bottom up. BottomInd = np.ones(np.shape(BottomInd))*89 - BottomInd # Get index of unreversed z axis print(np.shape(BottomInd)) for tt in range(19): #print(tt) for j in range(360): for i in range(360): TrBottom = Tr[tt,BottomInd[i,j],i,j] ConcArea[tt,i,j] = TrBottom*ra[i,j] Conc[tt,i,j] = TrBottom Area[i,j] = ra[i,j] # Filter step noise ConcFiltered[tt,:,j] = sg.savitzky_golay(Conc[tt,:,j], 7,3) ConcAreaFiltered[tt,:,j] = sg.savitzky_golay(ConcArea[tt,:,j], 7,3) print(np.shape(ConcArea)) maskShelf2D = mask2DCanyon(bathy, sbdepth) maskShelf = np.expand_dims(maskShelf2D,0) # expand along time dimension maskShelf = maskShelf + np.zeros(Conc.shape) return (np.ma.masked_array(ConcAreaFiltered, mask=maskShelf), np.ma.masked_array(ConcFiltered, mask=maskShelf), np.ma.masked_array(Area, mask=maskShelf2D), ) #---------------------------------------------------------------------------------------------------------- NoCanyonGrid='/ocean/kramosmu/MITgcm/TracerExperiments/BARKLEY/run02/gridGlob.nc' NoCanyonGridOut = Dataset(NoCanyonGrid) CanyonGrid='/ocean/kramosmu/MITgcm/TracerExperiments/BARKLEY/run01/gridGlob.nc' CanyonGridOut = Dataset(CanyonGrid) CanyonState='/ocean/kramosmu/MITgcm/TracerExperiments/BARKLEY/run01/stateGlob.nc' CanyonStateOut = Dataset(CanyonState) nx = 360 ny = 360 nz = 90 hFacCNoC = rout.getField(NoCanyonGrid, 'HFacC') MaskCNoC = rout.getMask(NoCanyonGrid, 'HFacC') rANoC = rout.getField(NoCanyonGrid, 'rA') bathyNoC = rout.getField(NoCanyonGrid, 'Depth') hFacC = rout.getField(CanyonGrid, 'HFacC') MaskC = rout.getMask(CanyonGrid, 'HFacC') rA = rout.getField(CanyonGrid, 'rA') bathy = rout.getField(CanyonGrid, 'Depth') z = CanyonStateOut.variables['Z'] time = CanyonStateOut.variables['T'] ptracerCanyon = '/ocean/kramosmu/MITgcm/TracerExperiments/BARKLEY/run01/ptracersGlob.nc' ptracerFlat = '/ocean/kramosmu/MITgcm/TracerExperiments/BARKLEY/run02/ptracersGlob.nc' labelsListCanyon = ['Linear', 'Salt', 'Oxygen', 'Nitrate', 'Silicate', 'Phosphate', 'Nitrous_Acid', 'Methane', ] labelsListFlat = ['Linear', 'Salt', 'Oxygen', 'Nitrate', 'Silicate', 'Phosphate', 'Nitrous_Acid', 'Methane', ] tracerListCanyon = ['Tr01','Tr02','Tr03', 'Tr04','Tr05','Tr06', 'Tr07','Tr08', ] tracerListFlat = ['Tr01','Tr02','Tr03', 'Tr04','Tr05','Tr06', 'Tr07','Tr08', ] nt = len(time) CACanyon = np.empty((nt,len(tracerListCanyon))) # Concentration * area integrated over shelf bottom CAFlat = np.empty((nt,len(tracerListFlat))) # Concentration * area integrated over shelf bottom ii = 0 for tracerID in tracerListCanyon: Tr = rout.getField(ptracerCanyon, tracerID) print(ptracerCanyon) concArea,conc,area=ConcArea(Tr, hFacC, rA, bathy) CACanyon[:,ii] = a_weight_mean(concArea,area) ii = ii + 1 raw_data = {'time':time[:], 'ConcAreaLin':CACanyon[:,0], 'ConcAreaSlt':CACanyon[:,1], 'ConcAreaOxy':CACanyon[:,2], 'ConcAreaNit':CACanyon[:,3], 'ConcAreaSil':CACanyon[:,4], 'ConcAreaPho':CACanyon[:,5], 'ConcAreaNAc':CACanyon[:,6], 'ConcAreaMet':CACanyon[:,7], } df = pd.DataFrame(raw_data, columns = ['time', 'ConcAreaLin', 'ConcAreaSlt', 'ConcAreaOxy', 'ConcAreaNit', 'ConcAreaSil', 'ConcAreaPho', 'ConcAreaNAc', 'ConcAreaMet', ]) filename1 = ('results/metricsDataFrames/bottomConcentrationAreaFiltCanyonRunsBarkley.csv' ) df.to_csv(filename1) print(filename1) ii = 0 for tracerID in tracerListFlat: Tr = rout.getField(ptracerFlat, tracerID) print(ptracerFlat) concArea,conc,area=ConcArea(Tr, hFacCNoC, rANoC, bathyNoC) CAFlat[:,ii] = a_weight_mean(concArea,area) ii = ii + 1 raw_data = {'time':time[:], 'ConcAreaFlatLin':CAFlat[:,0], 'ConcAreaFlatSlt':CAFlat[:,1], 'ConcAreaFlatOxy':CAFlat[:,2], 'ConcAreaFlatNit':CAFlat[:,3], 'ConcAreaFlatSil':CAFlat[:,4], 'ConcAreaFlatPho':CAFlat[:,5], 'ConcAreaFlatNAc':CAFlat[:,6], 'ConcAreaFlatMet':CAFlat[:,7], } dfFlat = pd.DataFrame(raw_data, columns = ['time', 'ConcAreaFlatLin', 'ConcAreaFlatSlt', 'ConcAreaFlatOxy', 'ConcAreaFlatNit', 'ConcAreaFlatSil', 'ConcAreaFlatPho', 'ConcAreaFlatNAc', 'ConcAreaFlatMet', ]) filename2 = ('results/metricsDataFrames/bottomConcentrationAreaFiltFlatRunsBarkley.csv' ) dfFlat.to_csv(filename2) print(filename2)
{"hexsha": "518e268359686390f37dee7661238402371dcf65", "size": 7554, "ext": "py", "lang": "Python", "max_stars_repo_path": "PythonScripts/bottomConcentrationBARKLEY.py", "max_stars_repo_name": "UBC-MOAD/outputanalysisnotebooks", "max_stars_repo_head_hexsha": "50839cde3832d26bac6641427fed03c818fbe170", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PythonScripts/bottomConcentrationBARKLEY.py", "max_issues_repo_name": "UBC-MOAD/outputanalysisnotebooks", "max_issues_repo_head_hexsha": "50839cde3832d26bac6641427fed03c818fbe170", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PythonScripts/bottomConcentrationBARKLEY.py", "max_forks_repo_name": "UBC-MOAD/outputanalysisnotebooks", "max_forks_repo_head_hexsha": "50839cde3832d26bac6641427fed03c818fbe170", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7073170732, "max_line_length": 124, "alphanum_fraction": 0.5390521578, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2031}
from dash import dcc, html, Input, Output, callback, dash_table import dash_bootstrap_components as dbc import pandas as pd import plotly.express as px import numpy as np import scipy.stats as stats from pages.style import PADDING_STYLE THRESHOLD = 0.5 TEXT_STYLE = { 'textAlign':'center', 'width': '70%', 'margin': '0 auto', 'background-color': 'AliceBlue', 'color': 'Blue' } PASS_TEST = """ The p-value obtained is {pvalue:.2f}. The p-value is less than our significance level of 0.10. **Hence, we conclude that there `is` enough evidence to support that the comments are not relevant to their posts.** """ FAIL_TEST = """ The p-value obtained is {pvalue:.2f}. The p-value is greater than our significance level of 0.10. **Hence, we conclude that there `is not` enough evidence to support that the comments are not relevant to their posts.** """ layout =html.Div([ html.H1('Relevance',style={'textAlign':'center'}), html.Div([ html.H3("Are the comments in discussions relevant to the submission?", className="display-6 text-center"), html.P(id='relevancesubredditprinter',className='fs-4 text-center'), html.Hr(), ]), ### Comment Relevance Histogram Distribution and T-Test dbc.Card([ html.H5("Does this Subreddit have relevant discussion?", className = 'card-title'), html.P('This histogram shows us the frequency distribution of relevance scores across all comments in this subreddit. We included a dotted line at the 0.5 relevance mark as we found that to be a good indicator of having good relevance to the parent post in multiple subreddits. Any score >0.5 can be deemed as satisfactorily relevant. *NOTE: Negative relevance score may occur if the comment is too small to compare with its original post!', className = 'card-subtitle'), dcc.Loading(children=[ dcc.Graph(id='relevance1'), ]), ], style=PADDING_STYLE), ### End Comment Relevence Histogram ### Comment Relevance Table dbc.Card([ html.H5("Comment Relevance Preview", className = 'card-title'), html.P('Check out how relevance scores reflect on a more granular level by looking at relevance scores for each comment with respect to their posts.', className = 'card-subtitle'), html.P("Click on a comment to see which post it refers to below.", style=TEXT_STYLE), dcc.Loading(children=[ dash_table.DataTable(id="reltable", page_size=10, style_header={'font-weight': 'bold'}, style_data={'whiteSpace': 'normal'}, columns=[{'name': 'Comment', 'id': 'Comment'}, {'name': 'Comment Relevance', 'id': 'Comment Relevance'}], style_cell={ 'font-family':'sans-serif', 'textAlign': 'left', 'font-size': '14px', 'padding-top': '3px', 'padding-bottom': '8px', 'padding-left': '8px', 'padding-right': '8px', }, style_data_conditional=[ { 'if': { 'filter_query': '{Comment Relevance} >= 0.5', }, 'backgroundColor': '#80ff59', }, { 'if': { 'filter_query': '{Comment Relevance} < 0.5', }, 'backgroundColor': '#ff6e6e', } ], css=[{ 'selector': '.dash-spreadsheet td div', 'rule': ''' line-height: 15px; max-height: 75px; min-height: 33px; display: block; overflow-y: auto; ''' }] ) ]), dcc.Loading(children=[ html.Div(id='relposttable') ]), ], style=PADDING_STYLE), ### End Comment Relevance Table ### T-Test dbc.Card([ html.H5("So, are the comments in this subreddit relevant to their posts?"), dcc.Loading(children=[ html.P(f"We use the Wilcoxon test for difference of medians to determine this. (Alternate Hypothesis: subreddit median relevance < {THRESHOLD}):"), html.P(f"*Conducting test with 0.10 significance level"), dcc.Markdown(id='relttest'), ]) ], style=PADDING_STYLE) ### End T-Test ]), @callback( Output('relevancesubredditprinter', 'children'), Output('relevance1', 'figure'), Output('relttest', 'children'), Output('reltable', 'data'), Input('session', 'data') ) def update_graph(data): try: df = pd.DataFrame(data) subreddit = df.at[0, 'subreddit'] # Generate Comment Relevance Histogram Distribution plot df["color"] = np.select( [df["comment_relevance"].gt(THRESHOLD), df["comment_relevance"].lt(THRESHOLD)], ["green", "red"], "orange") comm_relevance_dist = px.histogram(df, x="comment_relevance", title='Distribution of Comment Relevance', labels={'comment_relevance':'Comment Relevance Score', 'count':'Number of Comments', 'color':'Comment Relevance Score'}, color="color", color_discrete_map={ "green": "#80ff59", "red": "#ff6e6e", "orange": "orange"}) comm_relevance_dist.update_layout(yaxis_title="Number of Comments", showlegend=False) comm_relevance_dist.add_vline(x=THRESHOLD, line_width=3, line_dash="dash", line_color="black") # Hypothesis Test # test = stats.ttest_1samp(a=df.comment_relevance, popmean=THRESHOLD, alternative='less') comment_relevance_scores = df.comment_relevance.to_numpy() comment_relevance_scores = comment_relevance_scores - THRESHOLD test = stats.wilcoxon(x=comment_relevance_scores, alternative='less') if test.pvalue > 0.1: test_output = FAIL_TEST.format(pvalue=test.pvalue) else: test_output = PASS_TEST.format(pvalue=test.pvalue) # Comment Relevance Table comment_df = df[['comment', 'comment_relevance', 'post_id']].copy() comment_df['id'] = comment_df.post_id comment_df.rename(columns={'comment': 'Comment', 'comment_relevance': 'Comment Relevance'}, inplace=True) return f'For r/{subreddit}, we calculated relevance scores on a real number scale from 0 to 1 to see how relevant comments were to their original posts, 0 showing no relevance at all and 1 meaning extremely relevant. We believe relevance to be an important factor in deciding if a discussion is propagating in the right direction.', comm_relevance_dist, test_output, comment_df.to_dict('records') except KeyError as e: print(e) return 'No data loaded! Go to Home Page first!', {}, "", [] @callback( Output('relposttable', 'children'), Input('session', 'data'), Input('reltable', 'active_cell') ) def display_post(data, active_cell): if active_cell is None: return "" df = pd.DataFrame(data) selected = df[df['post_id'] == active_cell['row_id']] selected = selected[['post_id', 'post_title', 'post_body']].groupby('post_id').first() selected.rename(columns={'post_title': 'Post Title', 'post_body': 'Post Body'}, inplace=True) table = dash_table.DataTable(selected.to_dict('records'), page_size=5, style_header={'font-weight': 'bold'}, style_data={'whiteSpace': 'normal'}, style_cell={ 'font-family':'sans-serif', 'textAlign': 'left', 'font-size': '14px', 'padding-top': '3px', 'padding-bottom': '8px', 'padding-left': '8px', 'padding-right': '8px', }, css=[{ 'selector': '.dash-spreadsheet td div', 'rule': ''' line-height: 15px; max-height: 75px; min-height: 33px; display: block; overflow-y: auto; ''' }] ), return table
{"hexsha": "3a669387b01d64c166841624076936b6393035e9", "size": 10177, "ext": "py", "lang": "Python", "max_stars_repo_path": "pages/relevance_page.py", "max_stars_repo_name": "reddit-conflicting-viewpoints/Reddit", "max_stars_repo_head_hexsha": "7d531f8cb826cf2d8196cf126d1e11dacc144155", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pages/relevance_page.py", "max_issues_repo_name": "reddit-conflicting-viewpoints/Reddit", "max_issues_repo_head_hexsha": "7d531f8cb826cf2d8196cf126d1e11dacc144155", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pages/relevance_page.py", "max_forks_repo_name": "reddit-conflicting-viewpoints/Reddit", "max_forks_repo_head_hexsha": "7d531f8cb826cf2d8196cf126d1e11dacc144155", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.0052083333, "max_line_length": 487, "alphanum_fraction": 0.4750908912, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1845}
import pickle import numpy as np # with open("./output/cifar_inception2.pkl", 'rb') as f: # dat = pickle.load(f) # is_dict = dict({}) # for item in dat: # allis = dat[item] # allis = [x[0] for x in allis] # is_dict[item] = np.array(allis) # print(item, np.max(is_dict[item]), # np.where(is_dict[item] == np.max(is_dict[item]))) # with open("./output/cifar_inception5.pkl", 'rb') as f: # dat = pickle.load(f) # is_dict = dict({}) # for item in dat: # allis = dat[item] # allis = [x[0] for x in allis] # is_dict[item] = np.array(allis) # print(item, np.max(is_dict[item]), # np.where(is_dict[item] == np.max(is_dict[item]))) # with open("./output/cifar_inception10.pkl", 'rb') as f: # dat = pickle.load(f) # is_dict = dict({}) # for item in dat: # allis = dat[item] # allis = [x[0] for x in allis] # is_dict[item] = np.array(allis) # print(item, np.max(is_dict[item]), # np.where(is_dict[item] == np.max(is_dict[item]))) # with open("./output/cifar_inceptionhinge.pkl", 'rb') as f: # dat = pickle.load(f) # is_dict = dict({}) # for item in dat: # allis = dat[item] # allis = [x[0] for x in allis] # is_dict[item] = np.array(allis) # print(item, np.max(is_dict[item]), # np.where(is_dict[item] == np.max(is_dict[item]))) with open("./output/cifar_inception_plot.pkl", 'rb') as f: dat = pickle.load(f) is_dict = dict({}) for item in dat: allis = dat[item] allis = [x[0] for x in allis] is_dict[item] = np.array(allis) print(item, np.max(is_dict[item]), np.where(is_dict[item] == np.max(is_dict[item])))
{"hexsha": "f425a726fc8c81f77c12d517609868d22c60eb62", "size": 1794, "ext": "py", "lang": "Python", "max_stars_repo_path": "Summary/summary_results.py", "max_stars_repo_name": "taufikxu/GAN_PID", "max_stars_repo_head_hexsha": "96565d8181e9b42eb30b4b11a946fefc88bfb8f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-05-27T05:13:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-24T09:25:24.000Z", "max_issues_repo_path": "Summary/summary_results.py", "max_issues_repo_name": "taufikxu/GAN_PID", "max_issues_repo_head_hexsha": "96565d8181e9b42eb30b4b11a946fefc88bfb8f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Summary/summary_results.py", "max_forks_repo_name": "taufikxu/GAN_PID", "max_forks_repo_head_hexsha": "96565d8181e9b42eb30b4b11a946fefc88bfb8f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-23T01:57:24.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-23T01:57:24.000Z", "avg_line_length": 33.8490566038, "max_line_length": 65, "alphanum_fraction": 0.5412486065, "include": true, "reason": "import numpy", "num_tokens": 514}
""" Test that builds the 5 DOF model presented in the article: [1]: Branlard, Flexible multibody dynamics using joint coordinates and the Rayleigh-Ritz approximation: the general framework behind and beyond Flex, Wind Energy, 2019 """ ## import numpy as np import copy import unittest from welib.yams.bodies import FlexibleBody from welib.yams.yams import * from welib.yams.TNSB import manual_assembly def main(DEBUG=False,main_axis='x',nShapes_twr=1,bInit=1): # Main Parameters nSpan_twr = 101 nSpan_bld = 61 nShapes_bld = 1 # 0,1,2 bCompat =False bSftMass = 1 bHubMass = 1 bNacMass = 1 bBldMass = 1 nB = 2 # 2 or 3 main_axis =main_axis nDOF = 1 + nShapes_twr + nShapes_bld * nB q = np.zeros((nDOF,1)) if bInit: q[:] = 1 # Define some kind of initial conditions ## --- Strucural and geometrical Inputs L_twr = 100 EI_twr = 2*10**12 m_twr = 9*10**3 L_bld = 60 EI_bld = 2*10**10 m_bld = 5*10**2 GKt_bld = 7*10**11 jxx_bld = 10*5 r_ET_inE = np.array([[0] ,[0],[0]] ) if main_axis=='x': r_TN_inT = np.array([[L_twr],[0],[0]] ) r_NGnac_inN = np.array([[0] ,[0],[2.0]]) r_NS_inN = np.array([[0] ,[0],[-10]]) elif main_axis=='z': r_TN_inT = np.array([[0],[0],[L_twr]] ) r_NGnac_inN = np.array([[1.0],[0],[0]]) r_NS_inN = np.array([[-10],[0],[0]]) r_SGhub_inS = np.array([[0] ,[0],[0]] ) r_SR_inS = np.array([[0] ,[0],[0]] ) r_RGhub_inS = np.array([[0] ,[0],[0]] ) M_hub=10**5 IR_hub = np.zeros((3,3)) IR_hub[0,0] = 2*10**5 IR_hub[1,1] = 2*10**5 IR_hub[2,2] = 3*10**5 IR_hub = IR_hub M_nac = 4*10**5 I0_nac=np.zeros((3,3)) I0_nac[0,0]=7*10**6 I0_nac[1,1]=3*10**6 I0_nac[2,2]=1*10**6 # Inertias not at COG... IG_hub = translateInertiaMatrix(IR_hub, M_hub, np.array([0,0,0]), r_RGhub_inS) IG_nac = translateInertiaMatrixToCOG(I0_nac,M_nac, r_NGnac_inN) ## Derived parameters iPsi = nShapes_twr # Index of DOF corresponding to azimuth # --------------------------------------------------------------------------------} ## --- Creating bodies # --------------------------------------------------------------------------------{ Yaw=RigidBody('YawBearing',0,(0,0,0),(0,0,0)); # Bld # TODO # TODO - THIS HAS SOME INITIAL CONDITION IN IT #Bld=UniformBeamBody('Blade', nShapes_bld, nSpan_bld, L_bld, EI_bld , m_bld, Mtop=0, jxxG=jxx_bld, GKt=GKt_bld, bCompatibility=bCompat) Blds=[] Blds.append(Body('B1')) #Blds[0].MM = np.array([ # [ 3.0000E+04, 0.0000E+00, 0.0000E+00, 0.0000E+00, 5.2444E+03, 0.0000E+00, -2.4905E+02, -1.1333E+03], # [ 0.0000E+00, 3.0000E+04, 0.0000E+00, -5.2401E+03, 0.0000E+00, 9.0000E+05, 0.0000E+00, 0.0000E+00], # [ 0.0000E+00, 0.0000E+00, 3.0000E+04, 0.0000E+00, -9.0000E+05, 0.0000E+00, 1.1746E+04, -6.5057E+03], # [ 0.0000E+00, -5.2401E+03, 0.0000E+00, 6.0150E+06, 0.0000E+00, -4.3043E+05, 0.0000E+00, 0.0000E+00], # [ 5.2444E+03, 0.0000E+00, -9.0000E+05, 0.0000E+00, 3.6015E+07, 0.0000E+00, -5.1196E+05, 8.1533E+04], # [ 0.0000E+00, 9.0000E+05, 0.0000E+00, -4.3043E+05, 0.0000E+00, 3.6000E+07, 0.0000E+00, 0.0000E+00], # [ -2.4905E+02, 0.0000E+00, 1.1746E+04, 0.0000E+00, -5.1196E+05, 0.0000E+00, 7.5019E+03, 4.2759E+00], # [ -1.1333E+03, 0.0000E+00, -6.5057E+03, 0.0000E+00, 8.1533E+04, 0.0000E+00, 4.2759E+00, 7.5066E+03]]) Blds[0].MM = np.array([ [ 3.0000E+04, 0.0000E+00, 0.0000E+00, 0.0000E+00, 1.1741E+04, 0.0000E+00,-1.9634E+02], [ 0.0000E+00, 3.0000E+04, 0.0000E+00,-1.1746E+04, 0.0000E+00, 9.0000E+05, 0.0000E+00], [ 0.0000E+00, 0.0000E+00, 3.0000E+04, 0.0000E+00,-9.0000E+05, 0.0000E+00, 1.1746E+04], [ 0.0000E+00,-1.1746E+04, 0.0000E+00, 6.0075E+06, 0.0000E+00,-5.1196E+05, 0.0000E+00], [ 1.1741E+04, 0.0000E+00,-9.0000E+05, 0.0000E+00, 3.6008E+07, 0.0000E+00,-5.1196E+05], [ 0.0000E+00, 9.0000E+05, 0.0000E+00,-5.1196E+05, 0.0000E+00, 3.6000E+07, 0.0000E+00], [-1.9634E+02, 0.0000E+00, 1.1746E+04, 0.0000E+00,-5.1196E+05, 0.0000E+00, 7.5019E+03]]) Blds[0].PhiU =np.zeros(nShapes_bld) # cannot access nf Blds[0].KK = np.zeros((8, 8)) Blds[0].KK[6,6:]= np.array([ 2.8624E+05, -1.0224E+03]) Blds[0].KK[7,6:]= np.array([-1.0224E+03, 1.1249E+07]) Blds[0].MM = Blds[0].MM[:6+nShapes_bld,:6+nShapes_bld] Blds[0].KK = Blds[0].KK[:6+nShapes_bld,:6+nShapes_bld] Blds[0].MM *=bBldMass Blds[0].DD = np.zeros((6+nShapes_bld, 6+nShapes_bld)) for iB in range(nB-1): Blds.append(copy.deepcopy(Blds[0])) # Generator only Gen=RigidBody('Gen', 0, IG_hub, r_SGhub_inS) # ShaftHub Body Sft=RigidBody('ShaftHubGen',M_hub,IG_hub,r_SGhub_inS); Sft.MM*=bSftMass # Nacelle Body Nac=RigidBody('Nacelle',M_nac,IG_nac,r_NGnac_inN); Nac.MM*=bNacMass # Tower Body # TODO # TODO - THIS HAS SOME INITIAL CONDITION IN IT Mtop=sum([B.Mass for B in Blds]) + Sft.Mass + Nac.Mass; Twr=UniformBeamBody('Tower', nShapes_twr, nSpan_twr, L_twr, EI_twr , m_twr, Mtop=Mtop, bAxialCorr=False, bStiffening=False, main_axis=main_axis) # Temporary x_0=np.array([[0],[0],[0]]) R_0b=np.eye(3) gz=q[0:nShapes_twr,0] v_0 = np.zeros(6+nShapes_twr) a_v_0 = np.zeros(6+nShapes_twr) # TODO: to fully match matlab code, need "UseShapeIntegral" implemented Twr.updateKinematics(x_0,R_0b,gz,v_0,a_v_0) Twr.computeMassMatrix() # --------------------------------------------------------------------------------} # --- Manual assembly # --------------------------------------------------------------------------------{ Struct = manual_assembly(Twr,Yaw,Nac,Gen,Sft,Blds,q,r_ET_inE,r_TN_inT,r_NS_inN,r_SR_inS,main_axis=main_axis,DEBUG=DEBUG) return Struct class TestTNSB(unittest.TestCase): def test_TNSB_article(self): Struct=main() MM=Struct.MM KK=Struct.KK np.testing.assert_allclose(MM[0,0],7.86e5 ,rtol = 1e-3) np.testing.assert_allclose(MM[1,1],7.23e7 ,rtol = 1e-3) np.testing.assert_allclose(MM[2,2],7.50e3 ,rtol = 1e-3) np.testing.assert_allclose(MM[3,3],7.50e3 ,rtol = 1e-3) np.testing.assert_allclose(MM[0,2],7.71e3 ,rtol = 1e-3) np.testing.assert_allclose(MM[0,3],1.578e4,rtol = 1e-3 ) np.testing.assert_allclose(KK[0,0],6.01e6 ,rtol = 1e-3) np.testing.assert_allclose(KK[1,1],0.00e0 ,rtol = 1e-3) np.testing.assert_allclose(KK[2,2],2.86e5 ,rtol = 1e-3) np.testing.assert_allclose(KK[2,2],2.86e5 ,rtol = 1e-3) Twr=Struct.Twr Twr.gravity=9.81 Twr.bStiffening=True Twr.computeStiffnessMatrix() np.testing.assert_allclose(Twr.Mtop,560000.0 ,rtol = 1e-10) np.testing.assert_allclose(Twr.KKg[6,6],-98815.131096 ,rtol = 1e-10) #print(Twr.Mtop) #print(Twr.KKg) if __name__=='__main__': np.set_printoptions(linewidth=500) unittest.main() #Struct=main(DEBUG=True,main_axis='x',nShapes_twr=1,bInit=1)
{"hexsha": "736bddd6ac3bc61aac284fc975158be58e1cf8db", "size": 7353, "ext": "py", "lang": "Python", "max_stars_repo_path": "welib/yams/tests/test_TNSB_article.py", "max_stars_repo_name": "moonieann/welib", "max_stars_repo_head_hexsha": "0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2019-07-24T23:37:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T20:40:40.000Z", "max_issues_repo_path": "welib/yams/tests/test_TNSB_article.py", "max_issues_repo_name": "moonieann/welib", "max_issues_repo_head_hexsha": "0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "welib/yams/tests/test_TNSB_article.py", "max_forks_repo_name": "moonieann/welib", "max_forks_repo_head_hexsha": "0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2019-03-14T13:47:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T15:47:27.000Z", "avg_line_length": 41.5423728814, "max_line_length": 172, "alphanum_fraction": 0.5667074663, "include": true, "reason": "import numpy", "num_tokens": 3188}
# -*- coding: utf-8 -*- """ The script to demo the feature extraction procedure Objective: 1. Read in the image from the resampled nii files (generated by the resize_volume.py) the lung lobe segmentation files the lesion segmentation files 2. Segment the lesion regions within certain lung lobe 3. Extract radiomic features of the lung lobe lesion region 4. Run the manuscript in a multi-processing way Input: 1. Resampled CT images 2. Lung lobe masks () 3. Lesion masks () Output: Radiomic feature list of each lung lobe of each patient/image """ from __future__ import print_function import numpy as np import SimpleITK as sitk from radiomics import featureextractor import sys import os import os.path as osp import shutil from multiprocessing import Pool from multiprocessing import TimeoutError as MP_TimeoutError from time import sleep import csv START = "START" FINISH = "FINISH" WARNING = "WARNING" ERROR = "ERROR" LOG = "LOG" # file locations lobe_mask_loc = 'data_location' # lobe segmentation files lesion_mask_loc = 'data_location' # lesion segmentation files image_loc = 'data_location' # CT images save_root_path = 'data_location' # file save location # feature extractor settings # First define the settings settings = {} settings['binWidth'] = 20 settings['sigma'] = [0.5, 1.5, 2.5, 3.5, 4.5] # Instantiate the extractor extractor = featureextractor.RadiomicsFeatureExtractor(**settings) extractor.disableAllImageTypes() extractor.enableImageTypeByName('Original') extractor.enableImageTypeByName('Square') extractor.enableImageTypeByName('SquareRoot') extractor.enableImageTypeByName('Logarithm') extractor.enableImageTypeByName('Exponential') extractor.enableImageTypeByName('Wavelet') extractor.enableImageTypeByName('LoG') # the lists of features we need extractor.enableAllFeatures() extractor.enableFeaturesByName(shape=['Compactness1', 'Compactness2', 'Elongation', 'Flatness', 'LeastAxisLength', 'MajorAxisLength', 'Maximum2DDiameterColumn', 'Maximum2DDiameterRow', 'Maximum2DDiameterSlice', 'Maximum3DDiameter', 'MeshVolume', 'MinorAxisLength', 'SphericalDisproportion', 'Sphericity', 'SurfaceArea', 'SurfaceVolumeRatio', 'VoxelVolume']) print('Extraction parameters:\n\t', extractor.settings) # read nii file def read_nii_file(file_loc): reader = sitk.ImageFileReader() reader.SetImageIO("NiftiImageIO") reader.SetFileName(file_loc) image = reader.Execute() return image # read dicom files def read_dcm_file(file_loc): reader = sitk.ImageSeriesReader() dicom_names = reader.GetGDCMSeriesFileNames(file_loc) reader.SetFileNames(dicom_names) pcr_img = reader.Execute() return pcr_img def log_print(pid, comment, logs): if type(logs) is str: logs = [logs] for log in logs: print("# JOB %d : --%s-- %s" % (pid, comment, log)) sys.stdout.flush() def result_scan(results): unfinish = 1 while unfinish > 0: unfinish = 0 for i, res in enumerate(results): try: res.get() except Exception as e: if type(e) == MP_TimeoutError: unfinish += 1 continue else: print("\n\n\nERROR OCCUR: PID ##%d##, ERRORTYPE: %s\n\n\n" %(i, type(e))) raise e # feature extraction def extract_radiomic_features(input_file_loc, mask_lesion_loc, output_file_name, pid, title_flag = 1): ''' # debug only input_file_loc = input_file mask_lobe_loc = mask_lobe_file mask_lesion_loc = mask_lesion_file ''' # read input image pcr_img = read_nii_file(input_file_loc) # read lesion seg mask_lesion = read_nii_file(mask_lesion_loc) pcr_img.SetDirection(mask_lesion.GetDirection()) log_print(pid, START, 'Vol:%s' % input_file_loc) with open(output_file_name, 'w', newline='') as outfile: # extract radiomic features from whole lung lobe lesion region result = extractor.execute(pcr_img, mask_lesion) keys, values = [],[] if title_flag == 1: title_flag += 1 for key, value in result.items(): keys.append(key) values.append(value) csvwriter = csv.writer(outfile) csvwriter.writerow(keys) csvwriter.writerow(values) else: for key, value in result.items(): values.append(value) csvwriter = csv.writer(outfile) csvwriter.writerow(values) log_print(pid, FINISH, 'Vol:%s' % input_file_loc) _ = None while _ not in ['y', 'n']: _ = input('About to remove dir %s, START? [y/n]' % save_root_path).lower() if _ == 'n': exit() shutil.rmtree(save_root_path, ignore_errors=True) os.makedirs(save_root_path) processes = 8 pool = Pool(processes) results = list() pid = 0 datasetList = os.listdir(image_loc) datasetList.sort() for filename in datasetList: file = os.path.splitext(filename)[0] # image location input_file = os.path.join(image_loc, filename) # lesion mask location mask_lesion_file = os.path.join(lesion_mask_loc, filename) save_file = osp.join(save_root_path, file+'.csv') results.append( pool.apply_async( extract_radiomic_features, args=(input_file, mask_lesion_file, save_file, pid))) pid += 1 pool.close() result_scan(results) pool.join()
{"hexsha": "ab4d39a926433687b78204b7555bcb8668320db1", "size": 6096, "ext": "py", "lang": "Python", "max_stars_repo_path": "feature_generator/WLR_generator.py", "max_stars_repo_name": "DIAL-RPI/COVID19-ICUPrediction", "max_stars_repo_head_hexsha": "b79a9c53987bd71d8df7dae554eafaafa592ca9a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-08-27T03:13:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T05:17:46.000Z", "max_issues_repo_path": "feature_generator/WLR_generator.py", "max_issues_repo_name": "DIAL-RPI/COVID19-ICUPrediction", "max_issues_repo_head_hexsha": "b79a9c53987bd71d8df7dae554eafaafa592ca9a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "feature_generator/WLR_generator.py", "max_forks_repo_name": "DIAL-RPI/COVID19-ICUPrediction", "max_forks_repo_head_hexsha": "b79a9c53987bd71d8df7dae554eafaafa592ca9a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0295566502, "max_line_length": 102, "alphanum_fraction": 0.6130249344, "include": true, "reason": "import numpy", "num_tokens": 1369}
[STATEMENT] lemma (in group) exp_of_derived_is_subgroup': assumes "H \<subseteq> carrier G" shows "subgroup ((derived G ^^ (Suc n)) H) G" [PROOF STATE] proof (prove) goal (1 subgoal): 1. subgroup ((derived G ^^ Suc n) H) G [PROOF STEP] using assms derived_is_subgroup[OF subgroup.subset] derived_is_subgroup [PROOF STATE] proof (prove) using this: H \<subseteq> carrier G subgroup ?H G \<Longrightarrow> subgroup (derived G ?H) G ?H \<subseteq> carrier G \<Longrightarrow> subgroup (derived G ?H) G goal (1 subgoal): 1. subgroup ((derived G ^^ Suc n) H) G [PROOF STEP] by (induct n) (auto)
{"llama_tokens": 229, "file": null, "length": 2}
import numpy as np from sklearn.cluster import KMeans raw_data = [] fitness = [0,0,0,0,0,0] Population = [[1, 2], [1, 4], [1, 0], [4, 2], [4, 4], [4, 0]] for i in range(len(Population)): raw_data.append(Population[i]) raw_data = np.array(raw_data) num_cluster = int(2) kmeans = KMeans(n_clusters=num_cluster, random_state=0).fit(raw_data) distances = kmeans.transform(raw_data).sum(axis=1) labels = kmeans.labels_ print distances print labels raw_data_1, closet_item_idx = [], [] for clst in range(num_cluster): min_idx, min_dist = -1, -1 for idx in range(len(distances)): if labels[idx] == clst: if min_dist < 0 and min_dist < 0: min_idx = idx min_dist = distances[idx] elif min_dist > distances[idx]: min_idx = idx min_dist = distances[idx] raw_data_1.append((raw_data[min_idx],min_idx)) closet_item_idx.append(min_idx) print raw_data_1 print closet_item_idx for i in range(len(closet_item_idx)): fitness[closet_item_idx[i]] = i+1 print fitness for i in range(len(Population)): if i not in closet_item_idx: fitness[i] = fitness[closet_item_idx[labels[i]]] print fitness
{"hexsha": "64aadc1da97ca244e36ab5d7b11dfc54182677b4", "size": 1209, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/test.py", "max_stars_repo_name": "xijunlee/SPC-POSM", "max_stars_repo_head_hexsha": "d5b831445437f93d00cb5fe7eb7ac462512feb13", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/test.py", "max_issues_repo_name": "xijunlee/SPC-POSM", "max_issues_repo_head_hexsha": "d5b831445437f93d00cb5fe7eb7ac462512feb13", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/test.py", "max_forks_repo_name": "xijunlee/SPC-POSM", "max_forks_repo_head_hexsha": "d5b831445437f93d00cb5fe7eb7ac462512feb13", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7857142857, "max_line_length": 69, "alphanum_fraction": 0.6559139785, "include": true, "reason": "import numpy", "num_tokens": 343}
# -*- coding: UTF8 -*- """ translate between evo and Pandas types author: Michael Grupp This file is part of evo (github.com/MichaelGrupp/evo). evo is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. evo is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with evo. If not, see <http://www.gnu.org/licenses/>. """ import os import numpy as np import pandas as pd from evo.core import trajectory, result def trajectory_to_df(traj): if not isinstance(traj, trajectory.PosePath3D): raise TypeError("trajectory.PosePath3D or derived required") poses_dict = { "x": traj.positions_xyz[:, 0], "y": traj.positions_xyz[:, 1], "z": traj.positions_xyz[:, 2], "qw": traj.orientations_quat_wxyz[:, 0], "qx": traj.orientations_quat_wxyz[:, 1], "qy": traj.orientations_quat_wxyz[:, 2], "qz": traj.orientations_quat_wxyz[:, 3], } if type(traj) is trajectory.PoseTrajectory3D: index = traj.timestamps else: index = np.arange(0, traj.num_poses) return pd.DataFrame(data=poses_dict, index=index) def result_to_df(result_obj, label=None): if not isinstance(result_obj, result.Result): raise TypeError("result.Result or derived required") data = { "info": result_obj.info, "stats": result_obj.stats, "np_arrays": {}, "trajectories": {} } for name, array in result_obj.np_arrays.items(): data["np_arrays"][name] = array if label is None and "est_name" in data["info"]: label = os.path.splitext(os.path.basename(data["info"]["est_name"]))[0] elif label is None: label = "unnamed_result" return pd.DataFrame(data=data).T.stack().to_frame(name=label)
{"hexsha": "930be587463e4dfe5d3238e84bad4012ee176926", "size": 2150, "ext": "py", "lang": "Python", "max_stars_repo_path": "ros/src/simu_tools/scripts/evo/tools/pandas_bridge.py", "max_stars_repo_name": "yx0123/AirSim", "max_stars_repo_head_hexsha": "49e332b27f50c031bc2085726dd10c65bc8be7fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2020-08-25T22:44:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-15T23:55:50.000Z", "max_issues_repo_path": "ros/src/simu_tools/scripts/evo/tools/pandas_bridge.py", "max_issues_repo_name": "yx0123/AirSim", "max_issues_repo_head_hexsha": "49e332b27f50c031bc2085726dd10c65bc8be7fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-06-18T05:23:31.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-07T08:07:50.000Z", "max_forks_repo_path": "ros/src/simu_tools/scripts/evo/tools/pandas_bridge.py", "max_forks_repo_name": "yx0123/AirSim", "max_forks_repo_head_hexsha": "49e332b27f50c031bc2085726dd10c65bc8be7fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-11-12T08:55:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-28T08:58:19.000Z", "avg_line_length": 33.0769230769, "max_line_length": 79, "alphanum_fraction": 0.6818604651, "include": true, "reason": "import numpy", "num_tokens": 531}
import Mathbin import ZkSNARK.GeneralLemmas.MvDivisibility import ZkSNARK.GeneralLemmas.PolynomialMvSvCast noncomputable section namespace KnowledgeSoundness open Finset Polynomial /- The finite field parameter of our SNARK -/ variable {F : Type u} [Field F] /- The naturals representing: m - the number of gates in the circuit, n_stmt - the statement size, n_wit - the witness size -/ /-- An inductive type from which to index the variables of the 3-variable polynomials the proof manages -/ inductive Vars : Type | X : Vars | Y : Vars | Z : Vars instance : DecidableEq Vars := fun a b => match a, b with | .X, .X => isTrue rfl | .X, .Y => isFalse (fun h => Vars.noConfusion h) | .X, .Z => isFalse (fun h => Vars.noConfusion h) | .Y, .X => isFalse (fun h => Vars.noConfusion h) | .Y, .Y => isTrue rfl | .Y, .Z => isFalse (fun h => Vars.noConfusion h) | .Z, .X => isFalse (fun h => Vars.noConfusion h) | .Z, .Y => isFalse (fun h => Vars.noConfusion h) | .Z, .Z => isTrue rfl variable {m n_stmt n_wit : ℕ} def n := n_stmt + n_wit /- u_stmt and u_wit are fin-indexed collections of polynomials from the square span program -/ variable (u_stmt : Finₓ n_stmt → F[X]) variable (u_wit : Finₓ n_wit → F[X]) /- The roots of the polynomial t -/ variable (r : Finₓ m → F) /-- t is the polynomial divisibility by which is used to verify satisfaction of the SSP -/ def t : Polynomial F := ∏ i in finRange m, (x : F[X]) - c (r i) lemma t_def : (t r) = ∏ i in finRange m, (x : F[X]) - c (r i) := rfl /- t has degree m -/ lemma nat_degree_t : (t r).natDegree = m := by rw [t, Polynomial.nat_degree_prod] have h1 : ∀ x : F, RingHom.toFun Polynomial.c x = coeFn Polynomial.c x · intro rw [RingHom.to_fun_eq_coe] conv_lhs => · congr skip ext simp_rw [h1 (r _), Polynomial.nat_degree_X_sub_C] rw [Finset.sum_const, Finset.fin_range_card, Algebra.id.smul_eq_mul, mul_oneₓ] intros apply Polynomial.X_sub_C_ne_zero lemma monic_t : Polynomial.Monic (t r) := by rw [t] apply Polynomial.monic_prod_of_monic intros exact Polynomial.monic_X_sub_C (r _) lemma degree_t_pos (hm : 0 < m) : 0 < (t r).degree := by suffices h : (t r).degree = some m · rw [h] apply WithBot.some_lt_some.2 exact hm have h := nat_degree_t r rw [Polynomial.natDegree] at h revert h -- this is needed because degree (t r) is not substituted in h otherwise induction degree (t r) · intro h rw [Option.get_or_else_none] at h rw [eq_comm] rw [← h] at hm exfalso simp at hm intro h rw [Option.get_or_else_some] at h rw [h] -- Single variable form of V_wit def V_wit_sv (a_wit : Finₓ n_wit → F) : Polynomial F := ∑ i in finRange n_wit, a_wit i • u_wit i /- The statement polynomial that the verifier computes from the statement bits, as a single variable polynomial -/ def V_stmt_sv (a_stmt : Finₓ n_stmt → F) : Polynomial F := ∑ i in finRange n_stmt, a_stmt i • u_stmt i /- Checks whether a statement witness pair satisfies the SSP -/ def satisfying (a_stmt : Finₓ n_stmt → F) (a_wit : Finₓ n_wit → F) := ((∑ i in finRange n_stmt, a_stmt i • u_stmt i) + ∑ i in finRange n_wit, a_wit i • u_wit i) ^ 2 %ₘ (t r) = 1 /- Multivariable polynomial definititons and ultilities -/ /- Helper for converting MvPolynomial to single -/ def singlify : Vars → Polynomial F | Vars.X => Polynomial.x | Vars.Y => 1 | Vars.Z => 1 variable (F) /- Helpers for representing X, Y, Z as 3-variable polynomials -/ def X_poly : MvPolynomial Vars F := MvPolynomial.x Vars.X def Y_poly : MvPolynomial Vars F := MvPolynomial.x Vars.Y def Z_poly : MvPolynomial Vars F := MvPolynomial.x Vars.Z variable {F} /- Multivariable version of t -/ def t_mv : MvPolynomial Vars F := (t r).eval₂ MvPolynomial.c (X_poly F) /- V_stmt as a multivariable polynomial of Vars.X -/ def V_stmt_mv (a_stmt : Finₓ n_stmt → F) : MvPolynomial Vars F := (V_stmt_sv u_stmt a_stmt).eval₂ MvPolynomial.c (X_poly F) /-- Converting a single variable polynomial to a multivariable polynomial and back yields the same polynomial -/ lemma my_multivariable_to_single_variable (p : Polynomial F) : ((p.eval₂ MvPolynomial.c (X_poly F)).eval₂ Polynomial.c singlify) = p := by apply multivariable_to_single_variable simp_rw [singlify] variable (F) /-- The crs elements as multivariate polynomials of the toxic waste samples -/ def crs_powers_of_τ (i : Finₓ m) : (MvPolynomial Vars F) := X_poly F ^ (i : ℕ) def crs_γ : MvPolynomial Vars F := Z_poly F def crs_γβ : MvPolynomial Vars F := (Z_poly F) * Y_poly F def crs_β_ssps (i : Finₓ n_wit) : (MvPolynomial Vars F) := (Y_poly F) * (u_wit i).eval₂ MvPolynomial.c (X_poly F) variable {F} /- The coefficients of the CRS elements in the algebraic adversary's representation -/ variable (b v h : Finₓ m → F) variable (b_γ v_γ h_γ b_γβ v_γβ h_γβ : F) variable (b' v' h' : Finₓ n_wit → F) /-- Polynomial forms of the adversary's proof representation -/ def B_wit : MvPolynomial Vars F := (∑ i in finRange m, b i • crs_powers_of_τ F i) + b_γ • crs_γ F + b_γβ • crs_γβ F + ∑ i in finRange n_wit, b' i • crs_β_ssps F u_wit i def V_wit : MvPolynomial Vars F := (∑ i in finRange m, v i • crs_powers_of_τ F i) + v_γ • crs_γ F + v_γβ • crs_γβ F + ∑ i in finRange n_wit, v' i • crs_β_ssps F u_wit i def H : MvPolynomial Vars F := (∑ i in finRange m, h i • crs_powers_of_τ F i) + h_γ • crs_γ F + h_γβ • crs_γβ F + ∑ i in finRange n_wit, h' i • crs_β_ssps F u_wit i /-- V as a multivariable polynomial -/ def V (a_stmt : Finₓ n_stmt → F) : MvPolynomial Vars F := V_stmt_mv u_stmt a_stmt + V_wit u_wit v v_γ v_γβ v' /- Lemmas for proof -/ lemma eq_helper (x j : ℕ) : x = j ∨ (x = Zero.zero ∧ j = Zero.zero) ↔ x = j := by apply Iff.intro · intro h apply Or.elim h id · rintro ⟨h, h'⟩ rw [h, h'] · intro h left exact h lemma h2_1 (j : Finₓ m) : (B_wit u_wit b b_γ b_γβ b').coeff (Finsupp.single Vars.X j) = b j := by rw [B_wit, MvPolynomial.coeff_add, MvPolynomial.coeff_add, MvPolynomial.coeff_add] simp only [B_wit, crs_powers_of_τ, crs_γ, crs_γβ, crs_β_ssps, X_poly, Y_poly, Z_poly, Finsupp.single_eq_single_iff, eq_helper, true_and, Nat.one_ne_zero, mul_boole, add_zero, Algebra.id.smul_eq_mul, MvPolynomial.coeff_add, eq_self_iff_true, not_true, Finsupp.mem_support_iff, if_false, Ne.def, not_false_iff, Finset.sum_const_zero, MvPolynomial.coeff_smul, _root_.mul_zero, Finsupp.single_eq_of_ne, false_and, or_self, MvPolynomial.coeff_mul] rw [MvPolynomial.coeff_sum] simp_rw [MvPolynomial.coeff_smul] have h1 : (@Zero.zero F (AddZeroClassₓ.toHasZero F) : F) = (@OfNat.ofNat F 0 Zero.toOfNat0 : F) · simp only [OfNat.ofNat] have : b j = b j + 0 + 0 + 0 · rw [← h1, add_zeroₓ (b j), add_zeroₓ (b j), add_zeroₓ (b j)] rw [this] apply congr_arg2ₓ apply congr_arg2ₓ apply congr_arg2ₓ · conv_lhs => · congr skip ext rw [MvPolynomial.coeff_smul, MvPolynomial.coeff_X_pow] simp_rw [smul_ite _ (b _) _ _, smul_zero (b _), Finsupp.single_eq_single_iff, true_and, eq_helper] rw [Finset.sum_ite, Finset.sum_const_zero, add_zeroₓ] conv_lhs => · congr skip ext rw [smul_eq_mul, mul_oneₓ] rw [Finset.sum_eq_single] · intros x hx h exfalso apply h rw [Finset.mem_filter] at hx rw [Finₓ.eq_iff_veq, hx.2] · intro h exfalso apply h simp_rw [Finset.mem_filter, and_true, Finset.mem_fin_range] · rw [MvPolynomial.coeff_smul, (@smul_eq_zero F F _ _ _ _ _ _).2] · rw [h1] · right rw [MvPolynomial.coeff_X', if_neg] intro H rw [Finsupp.single_eq_single_iff] at H simp only [false_and] at H · rw [MvPolynomial.coeff_smul, (@smul_eq_zero F F _ _ _ _ _ _).2, h1] simp only right rw [MvPolynomial.coeff_mul] apply Finset.sum_eq_zero intros x hx simp_rw [MvPolynomial.coeff_X'] rw [boole_mul _ _, ite_eq_right_iff, ite_eq_right_iff] intros f1 f2 exfalso simp only [Finsupp.mem_antidiagonal] at hx rw [← f1, ← f2, Finsupp.ext_iff] at hx simp only [Pi.add_apply, Finsupp.coe_add] at hx specialize hx Vars.Z simp only [Finsupp.single_eq_same] at hx rw [Finsupp.single_eq_of_ne, Finsupp.single_eq_of_ne, add_zeroₓ] at hx apply Nat.one_ne_zero · simp simp at hx simp only simp only · rw [MvPolynomial.coeff_sum] apply Finset.sum_eq_zero intros x hx rw [MvPolynomial.coeff_smul] rw [(@smul_eq_zero F F _ _ _ _ _ _).2] right rw [mul_comm (MvPolynomial.x Vars.Y) _] rw [MvPolynomial.coeff_mul_X', if_neg] rw [Finsupp.mem_support_iff, not_not, Finsupp.single_eq_of_ne] simp only lemma h3_1 : (B_wit u_wit b b_γ b_γβ b').coeff (Finsupp.single Vars.Z 1) = b_γ := by sorry -- rw B_wit, -- simp [crs_powers_of_τ, crs_γ, crs_γβ, crs_β_ssps], -- simp [X_poly, Y_poly, Z_poly], -- simp with coeff_simp, -- simp [finsupp.single_eq_single_iff], -- -- simp? [-finsupp.single_nat_sub], -- simp?, -- ite_finsupp_simplify, -- simp only with coeff_simp, -- ite_finsupp_simplify, -- this is pretty useless lemma h4_1 (hb : ∀ i, b i = 0) : (λ i : Finₓ m => b i • crs_powers_of_τ F i) = λ i : Finₓ m => (0 : MvPolynomial Vars F) := by funext i rw [hb i] change Zero.zero • crs_powers_of_τ F i = Zero.zero rw [zero_smul] lemma h5_1 : b_γβ • ((Z_poly F) * Y_poly F) = (Y_poly F) * b_γβ • Z_poly F := by sorry -- rw MvPolynomial.smul_eq_C_mul -- rw MvPolynomial.smul_eq_C_mul -- ring lemma h6_2 : ((H u_wit h h_γ h_γβ h') * t_mv r + (MvPolynomial.c 1 : MvPolynomial Vars F)).coeff (Finsupp.single Vars.Z 2) = 0 := by sorry -- rw MvPolynomial.coeff_add -- rw MvPolynomial.coeff_C -- rw if_neg -- rw MvPolynomial.coeff_mul -- rw single_2_antidiagonal -- rw Finset.sum_insert -- rw Finset.sum_insert -- rw Finset.sum_singleton -- simp [H, t_mv, crs_powers_of_τ, crs_γ, crs_γβ, crs_β_ssps, X_poly, Y_poly, Z_poly] -- simp only with coeff_simp polynomial_nf -- unfold_coes, -- ite_finsupp_simplify, -- simp only with coeff_simp, -- simp [-finsupp.single_zero, finsupp.single_eq_single_iff] -- rw finset.mem_singleton -- simp -- simp [finset.mem_insert, finset.mem_singleton] -- simp -- -- dec_trivial, -- rw finsupp.eq_single_iff, -- dec_trivial, -- TODO check all lemmas are used lemma h6_3 (a_stmt : Finₓ n_stmt → F) : ((b_γβ • Z_poly F + (∑ i in finRange n_stmt, a_stmt i • Polynomial.eval₂ MvPolynomial.c (X_poly F) (u_stmt i)) + ∑ i in finRange n_wit, b' i • Polynomial.eval₂ MvPolynomial.c (X_poly F) (u_wit i : F[X])) ^ 2 : MvPolynomial Vars F).coeff (Finsupp.single Vars.Z 2) = b_γβ ^ 2 := by sorry -- rw pow_succ -- rw pow_one -- rw MvPolynomial.coeff_mul -- rw single_2_antidiagonal -- rw Finset.sum_insert -- rw Finset.sum_insert -- rw Finset.sum_singleton -- -- NOTE The simp only with coeff_simp and ite_finsupp_simplify tactic work here -- -- But I used to get deterministic timeout - i fixed this by making simp only with coeff_simp do simp only instead -- -- -- simp [X_poly, Y_poly, Z_poly] -- simp only with coeff_simp polynomial_nf -- -- simp only with coeff_simp, -- -- ite_finsupp_simplify, -- rw pow_succ -- rw pow_one -- simp -- simp [-finsupp.single_zero, finsupp.single_eq_single_iff] -- simp [finset.mem_insert, finset.mem_singleton] -- simp /- This function represents the exctractor in the AGM. -/ -- This makes no sense -- def extractor : Finₓ n_wit → F := b' lemma zero_eq_zero : (0 : F) = Zero.zero := rfl /-- Show that if the adversary polynomials obey the equations, then the coefficients give a satisfying witness. This theorem appears in the Baby SNARK paper as Theorem 1 case 1. -/ theorem case_1 (a_stmt : Finₓ n_stmt → F ) (hm : 0 < m) (eqnI : B_wit u_wit b b_γ b_γβ b' = (V_wit u_wit v v_γ v_γβ v') * Y_poly F) (eqnII : (H u_wit h h_γ h_γβ h') * t_mv r + (MvPolynomial.c 1 : MvPolynomial Vars F) = (V u_stmt u_wit v v_γ v_γβ v' a_stmt) ^ 2) : (satisfying u_stmt u_wit r a_stmt b') := by -- TODO eqnI should have a Z term on both sides -- "B_wit only has terms with a Y component" have h1 : (∀ m : Vars →₀ ℕ, m Vars.Y = 0 → (B_wit u_wit b b_γ b_γβ b').coeff m = 0) · rw [eqnI] sorry -- "b_0 b_1, ..., b_m, ... are all zero" have h2 : ∀ i : Finₓ m, b i = 0 · intro i rw [← (h2_1 u_wit b b_γ b_γβ b' i), eqnI] apply coeff_mul_X_eq_zero rw [← coefn_funlike, Finsupp.single_eq_of_ne] · rfl · decide -- b_γ = 0 have h3 : b_γ = 0 · rw [← h3_1 u_wit b b_γ b_γβ b', eqnI] apply coeff_mul_X_eq_zero rw [← coefn_funlike, Finsupp.single_eq_of_ne] · rfl · decide -- "We can write B_wit as ..." have h4 : B_wit u_wit b b_γ b_γβ b' = b_γβ • crs_γβ F + ∑ i in finRange n_wit, b' i • crs_β_ssps F u_wit i · simp_rw [B_wit, h2, h3] have : (∑ i in finRange m, (Zero.zero : F) • (crs_powers_of_τ F i : MvPolynomial Vars F)) = ∑ i in finRange m, Zero.zero · apply Finset.sum_congr rfl (fun i hi => ?_) rw [zero_smul] rw [zero_eq_zero, zero_smul, this, Finset.sum_const_zero, add_zeroₓ, zero_addₓ] -- "... we also see that V_wit must not have any Y terms at all" have h5 : V_wit u_wit v v_γ v_γβ v' = b_γβ • Z_poly F + ∑ i in finRange n_wit, b' i • (u_wit i).eval₂ MvPolynomial.c (X_poly F) · apply left_cancel_X_mul Vars.Y rw [← Y_poly, _root_.mul_comm, ← eqnI, h4, crs_γβ] simp_rw [crs_β_ssps] rw [mul_addₓ, h5_1, Finset.mul_sum, add_right_injₓ] apply Finset.sum_congr rfl (fun i hi => ?_) rw [MvPolynomial.smul_eq_C_mul, MvPolynomial.smul_eq_C_mul, mul_left_commₓ] -- -- "... write V(.) as follows ..." have h6 : V u_stmt u_wit v v_γ v_γβ v' a_stmt = b_γβ • Z_poly F + (∑ i in finRange n_stmt, a_stmt i • (u_stmt i).eval₂ MvPolynomial.c (X_poly F)) + ∑ i in finRange n_wit, b' i • (u_wit i).eval₂ MvPolynomial.c (X_poly F) · rw [V, V_stmt_mv, h5, V_stmt_sv, eval₂_finset_sum, add_left_commₓ] rw [add_assocₓ, add_right_injₓ, add_left_injₓ] apply Finset.sum_congr rfl (fun i hi => ?_) rw [eval₂_smul, MvPolynomial.smul_eq_C_mul] -- ... we can conclude that b_γβ = 0. have h7 : b_γβ = 0 · let eqnII' := eqnII rw [h6] at eqnII' have h6_1 := congr_arg (MvPolynomial.coeff (Finsupp.single Vars.Z 2)) eqnII' rw [h6_2, h6_3] at h6_1 exact pow_eq_zero (Eq.symm h6_1) -- Finally, we arrive at the conclusion that the coefficients define a satisfying witness such that ... have h8 : V u_stmt u_wit v v_γ v_γβ v' a_stmt = (∑ i in finRange n_stmt, a_stmt i • (u_stmt i).eval₂ MvPolynomial.c (X_poly F)) + ∑ i in finRange n_wit, b' i • (u_wit i).eval₂ MvPolynomial.c (X_poly F) · rw [h6, h7, add_assocₓ] change (Zero.zero : F) • Z_poly F + _ = _ rw [zero_smul, zero_addₓ] -- Treat both sides of this a single variable polynomial have h9 := congr_arg (MvPolynomial.eval₂ (@Polynomial.c F _) singlify) h8 rw [MvPolynomial.eval₂_add, MvPolynomial.eval₂_sum, MvPolynomial.eval₂_sum] at h9 conv at h9 => · rhs congr congr skip intro i rw [MvPolynomial.smul_eq_C_mul, ← eval₂_smul] rw [my_multivariable_to_single_variable] skip congr skip intro i rw [MvPolynomial.smul_eq_C_mul, ← eval₂_smul] rw [my_multivariable_to_single_variable] have wat : 0 = 0 := rfl -- if this isn't here, the `conv` makes everything break? clear wat rw [satisfying, ← h9] clear h8 h9 sorry -- -- TODO is there a more efficient way to simply say (evaluate f on both sides of this hypothesis)? Yes the congr tactic does this -- have h10 : ((H * t_mv + MvPolynomial.C 1).eval₂ Polynomial.C singlify) %ₘ t = (((V a_stmt)^2).eval₂ Polynomial.C singlify) %ₘ t -- rw eqnII -- -- h10 done -- rw MvPolynomial.eval₂_add at h10 -- rw MvPolynomial.eval₂_mul at h10 -- rw ←MvPolynomial.eval₂_pow -- rw ←h10 -- rw t_mv -- rw my_multivariable_to_single_variable t -- have h12: MvPolynomial.C 1 = (Polynomial.C 1 : Polynomial F).eval₂ MvPolynomial.C X_poly -- rw Polynomial.eval₂_C -- -- h12 done -- rw h12 -- rw my_multivariable_to_single_variable -- have h13 : (MvPolynomial.eval₂ Polynomial.C singlify H * t + Polynomial.C 1 : Polynomial F) /ₘ t = (MvPolynomial.eval₂ Polynomial.C singlify H : Polynomial F) ∧ (MvPolynomial.eval₂ Polynomial.C singlify H * t + Polynomial.C 1 : Polynomial F) %ₘ t = (Polynomial.C 1 : Polynomial F) -- apply Polynomial.div_mod_by_monic_unique -- exact monic_t -- split -- rw [add_comm, mul_comm] -- rw Polynomial.degree_C -- exact degree_t_pos hm -- exact one_ne_zero -- -- h13 done -- rw h13.2 -- simp -- end end KnowledgeSoundness
{"author": "lurk-lab", "repo": "ZKSnark.lean", "sha": "a92ff01fac8e59ffb0de13a41eac6461af6d7cf0", "save_path": "github-repos/lean/lurk-lab-ZKSnark.lean", "path": "github-repos/lean/lurk-lab-ZKSnark.lean/ZKSnark.lean-a92ff01fac8e59ffb0de13a41eac6461af6d7cf0/ZkSNARK/BabyZkSNARK/KnowledgeSoundness.lean"}
""" This is is a part of the DeepLearning.AI TensorFlow Developer Professional Certificate offered on Coursera. All copyrights belong to them. I am sharing this work here to showcase the projects I have worked on Course: Introduction to TensorFlow for Artificial Intelligence, Machine Learning, and Deep Learning Week 2: Deep Neural Networks for Time Series Aim: Creating Batch datasets """ import tensorflow as tf import numpy as np import matplotlib.pyplot as plt print(tf.__version__) dataset = tf.data.Dataset.range(10) for val in dataset: print(val.numpy()) dataset = tf.data.Dataset.range(10) dataset = dataset.window(5, shift=1) for window_dataset in dataset: for val in window_dataset: print(val.numpy(), end=" ") print() dataset = tf.data.Dataset.range(10) dataset = dataset.window(5, shift=1, drop_remainder=True) for window_dataset in dataset: for val in window_dataset: print(val.numpy(), end=" ") print() dataset = tf.data.Dataset.range(10) dataset = dataset.window(5, shift=1, drop_remainder=True) dataset = dataset.flat_map(lambda window: window.batch(5)) for window in dataset: print(window.numpy()) dataset = tf.data.Dataset.range(10) dataset = dataset.window(5, shift=1, drop_remainder=True) dataset = dataset.flat_map(lambda window: window.batch(5)) dataset = dataset.map(lambda window: (window[:-1], window[-1:])) for x,y in dataset: print(x.numpy(), y.numpy()) dataset = tf.data.Dataset.range(10) dataset = dataset.window(5, shift=1, drop_remainder=True) dataset = dataset.flat_map(lambda window: window.batch(5)) dataset = dataset.map(lambda window: (window[:-1], window[-1:])) dataset = dataset.shuffle(buffer_size=10) for x,y in dataset: print(x.numpy(), y.numpy()) dataset = tf.data.Dataset.range(10) dataset = dataset.window(5, shift=1, drop_remainder=True) dataset = dataset.flat_map(lambda window: window.batch(5)) dataset = dataset.map(lambda window: (window[:-1], window[-1:])) dataset = dataset.shuffle(buffer_size=10) dataset = dataset.batch(2).prefetch(1) for x,y in dataset: print("x = ", x.numpy()) print("y = ", y.numpy())
{"hexsha": "72ff21c194557ab30ff249201a36fe127d3eb9ad", "size": 2167, "ext": "py", "lang": "Python", "max_stars_repo_path": "Course_4_Week_2_Project_1.py", "max_stars_repo_name": "Vivek9Chavan/DeepLearning.AI-TensorFlow-Developer-Professional-Certificate", "max_stars_repo_head_hexsha": "c48f2040631a87d973ea8cbe534af9cd8f715d4a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Course_4_Week_2_Project_1.py", "max_issues_repo_name": "Vivek9Chavan/DeepLearning.AI-TensorFlow-Developer-Professional-Certificate", "max_issues_repo_head_hexsha": "c48f2040631a87d973ea8cbe534af9cd8f715d4a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Course_4_Week_2_Project_1.py", "max_forks_repo_name": "Vivek9Chavan/DeepLearning.AI-TensorFlow-Developer-Professional-Certificate", "max_forks_repo_head_hexsha": "c48f2040631a87d973ea8cbe534af9cd8f715d4a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3432835821, "max_line_length": 108, "alphanum_fraction": 0.7143516382, "include": true, "reason": "import numpy", "num_tokens": 506}
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Nov 27 14:52:57 2018 @author: amaity Generate a synthetic PDF distribution that is dependent upon the number of core allocations, you might ignore the workload for now """ import numpy as np import matplotlib.pyplot as plt from scipy import stats from scipy import special as sp def construct_pdf(D,M,var,plot=False): """ Creates a mu and var for a normal distribution """ synth_risk = np.linspace(1e-5,0.999999,M) mu = [] vr = [] m = M for s in synth_risk: tmu = estimate_mu(D,s,var) mu.append(tmu) vr.append(var) # Create Plots if plot: if m % 16 == 0: dist = stats.norm(loc=tmu,scale=np.sqrt(var)) x = np.linspace(dist.ppf(0.001),dist.ppf(0.999),1000) y = dist.pdf(x) plt.plot(x,y,label="m=%d"%m) m = m-1 if plot: plt.legend() plt.xlabel("Execution Time") plt.title("Synthetically Constructed Execution Time Distrbution") plt.savefig("constructed-dist.pdf") plt.close() mu.reverse() vr.reverse() return (mu,vr) def estimate_mu(D,s,var): """ Given the deadline, risk (probability) and variance of the distribution Returns the value of mu """ mu = D - (np.sqrt(2*var) * sp.erfinv(1 - 2*s)) return mu def main_test2(): D = 2500 var = 4.00 s = np.linspace(0.01,0.99,100) mu = estimate_mu(D,s,var) plt.plot(s,mu) if __name__=="__main__": #main_test2() construct_pdf(2500,4,4.00,True)
{"hexsha": "558fefc1491501361b4f070434deace9dc0141f2", "size": 1676, "ext": "py", "lang": "Python", "max_stars_repo_path": "ptss_synthpdf.py", "max_stars_repo_name": "Arka2009/ecopmcpoc", "max_stars_repo_head_hexsha": "4325026d11b6b72716d96072dde9665983be4dbb", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ptss_synthpdf.py", "max_issues_repo_name": "Arka2009/ecopmcpoc", "max_issues_repo_head_hexsha": "4325026d11b6b72716d96072dde9665983be4dbb", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ptss_synthpdf.py", "max_forks_repo_name": "Arka2009/ecopmcpoc", "max_forks_repo_head_hexsha": "4325026d11b6b72716d96072dde9665983be4dbb", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.0526315789, "max_line_length": 73, "alphanum_fraction": 0.5710023866, "include": true, "reason": "import numpy,from scipy", "num_tokens": 471}
syntax "foo" : tactic macro_rules | `(tactic| foo) => `(tactic| assumption) macro_rules | `(tactic| foo) => `(tactic| apply Nat.pred_lt; assumption) macro_rules | `(tactic| foo) => `(tactic| contradiction) example (i : Nat) (h : i - 1 < i) : i - 1 < i := by foo example (i : Nat) (h : i ≠ 0) : i - 1 < i := by foo example (i : Nat) (h : False) : i - 1 < i := by foo
{"author": "leanprover", "repo": "lean4", "sha": "742d053a97bdd109a41a921facd1cd6a55e89bc7", "save_path": "github-repos/lean/leanprover-lean4", "path": "github-repos/lean/leanprover-lean4/lean4-742d053a97bdd109a41a921facd1cd6a55e89bc7/tests/lean/run/evalTacticBug.lean"}
# multivariate multi-step encoder-decoder lstm example from numpy import array from numpy import hstack from keras.models import load_model import tensorflow_hub as hub import numpy as np import tensorflow_text from sklearn.neighbors import NearestCentroid import os import tensorflow as tf os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import pandas as pd import itertools from random import sample embed = hub.load("https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3") df = pd.read_csv("data/data.csv", encoding= 'unicode_escape') df = df.dropna() df_transactions = df[df['Description'].str.len().gt(3)].dropna().groupby('InvoiceNo')['Description'].agg({'size': len, 'set': lambda x: list(set(x))}) n_steps_in, n_steps_out = 3, 2 n_features = 512 descriptions = np.load('data/descriptions.data.npy') values = np.load('data/values.data.npy') clf = NearestCentroid() clf.fit(values, descriptions) model = load_model('data/model.h5') def pad_list(s, n): s = [string for string in s if string != ""] return [''] * (n - len(s)) + s def next(x_in): x_input = array([embed(x_in)]) x_input = x_input.reshape((1, n_steps_in, n_features)) yhat = model.predict(x_input, verbose=0) return list(set(clf.predict(yhat[0,:]))) def predictIndex(i): input = pad_list(df_transactions['set'][i], n_steps_in)[:n_steps_in] print(input, '=>', next(input)) for i in sample(range(1, 1000), 20): predictIndex(i)
{"hexsha": "07b01f36b9d9e6781b9b82b7871407bcd22fd407", "size": 1451, "ext": "py", "lang": "Python", "max_stars_repo_path": "testmodel.py", "max_stars_repo_name": "bgokden/lstm-recommender-example", "max_stars_repo_head_hexsha": "77c82fc57e6cf27816d0417dd38327e16fd0541a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-03T00:07:30.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-03T00:07:30.000Z", "max_issues_repo_path": "testmodel.py", "max_issues_repo_name": "bgokden/lstm-recommender-example", "max_issues_repo_head_hexsha": "77c82fc57e6cf27816d0417dd38327e16fd0541a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-11-13T18:41:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T01:17:35.000Z", "max_forks_repo_path": "testmodel.py", "max_forks_repo_name": "bgokden/lstm-recommender-example", "max_forks_repo_head_hexsha": "77c82fc57e6cf27816d0417dd38327e16fd0541a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-01-21T06:41:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T05:31:06.000Z", "avg_line_length": 30.2291666667, "max_line_length": 150, "alphanum_fraction": 0.7181254307, "include": true, "reason": "import numpy,from numpy", "num_tokens": 390}
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data import matplotlib.pyplot as plt import os import numpy as np # mnist = input_data.read_data_sets("MNIST_data", one_hot=True) # print "basic information of mnist dataset" # print "mnist training data size: ", mnist.train.num_examples # print "mnist validating data size: ", mnist.validation.num_examples # print "mnist testing data size: ", mnist.test.num_examples # print "mnist example training data: ", mnist.train.images[0] # print "mnist example training data label", mnist.train.labels[0] # define input and output data size INPUT_NODE = 784 OUTPUT_NODE = 10 # params for neural network LAYER1_NODE = 500 BATCH_SIZE = 1000 LEARNING_RATE_BASE = 0.8 LEARNING_RATE_DECAY = 0.999 REGULARIZATION_RATE = 0.0001 TRAINING_STEPS = 100000 MOVING_AVERAGE_DECAY = 0.99 # calc the result of forward propagation, # ***original method*** def inference(input_tensor, avg_class, weights1, biases1, weights2, biases2): # use current value when there's no moving average model if avg_class is None: layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1) return tf.matmul(layer1, weights2) + biases2 else: layer1 = tf.nn.relu( tf.matmul(input_tensor, avg_class.average(weights1)) + avg_class.average(biases1)) return tf.matmul(layer1, avg_class.average(weights2)) + avg_class.average(biases2) # training process def train(mnist): x = tf.placeholder(tf.float32, [None, INPUT_NODE], name="x-input") y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name="y-input") # generate hidden layer params weight1 = tf.Variable(tf.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev=0.1)) biases1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE])) # generate output layer params weight2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1)) biases2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE])) # forward propagation y = inference(x, None, weight1, biases1, weight2, biases2) # used to store training cycles global_step = tf.Variable(0, trainable=False) # define EMA function to increase robustness when predict variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) variable_averages_op = variable_averages.apply(tf.trainable_variables()) # # forward propagation with moving average function # average_y = inference(x, variable_averages, weight1, biases1, weight2, biases2) average_y = inference(x, variable_averages, weight1, biases1, weight2, biases2) # cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.arg_max(y_, 1)) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.arg_max(y_, 1)) # calc cross_entropy mean for current batch cross_entropy_mean = tf.reduce_mean(cross_entropy) # calc L2 regularization loss function regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE) regularization = regularizer(weight1) + regularizer(weight2) loss = cross_entropy_mean + regularization # learning rate = learning rate * LEARNING_RATE_DECAY ^ (global_step / decay_step) learning_rate = tf.train.exponential_decay( LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY) train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) # combine backward propagation and EMA value modification with tf.control_dependencies([train_step, variable_averages_op]): train_op = tf.no_op(name="train") correct_prediction = tf.equal(tf.arg_max(average_y, 1), tf.arg_max(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) # prepare validation dataset to stop optimization validation_feed = {x: mnist.validation.images, y_: mnist.validation.labels} # define test dataset for final evaluation test_feed = {x: mnist.test.images, y_: mnist.test.labels} validation_result = range(TRAINING_STEPS / 1000) test_result = range(TRAINING_STEPS / 1000) for i in range(TRAINING_STEPS): if i % 1000 == 0: # print "average_y: ", average_y, sess.run(average_y, feed_dict=validation_feed) # print "y_: ", y_, sess.run(y_, feed_dict=validation_feed) validate_acc = sess.run(accuracy, feed_dict=validation_feed) validation_result[i / 1000] = validate_acc # print "after %d training step(s), validation accuracy using average model is %g " % (i, validate_acc) xs, ys = mnist.train.next_batch(BATCH_SIZE) sess.run(train_op, feed_dict={x: xs, y_: ys}) test_acc = sess.run(accuracy, feed_dict=test_feed) test_result[i / 1000] = test_acc # print "after %d training step(s), test accuracy using average model is %g " % (i, test_acc) print validation_result print test_result # draw a graph of accuracy using matplotlib iteration_count = range(0, TRAINING_STEPS, 1000) plt.figure(num=1, figsize=(15, 8)) plt.title("Plot accuracy", size=20) plt.xlabel("iteration count", size=14) plt.ylabel("accuracy/%", size=14) validation_note = [TRAINING_STEPS - 1000, validation_result[TRAINING_STEPS / 1000 - 1]] test_note = [TRAINING_STEPS - 1000, test_result[TRAINING_STEPS / 1000 - 1]] plt.annotate('validate-' + str(validation_note), xy=(test_note[0], test_note[1]), xytext=(test_note[0] - 1000, test_note[1] - 0.1), arrowprops=dict(facecolor='black', shrink=0.05)) plt.annotate('test-' + str(test_note), xy=(test_note[0], test_note[1]), xytext=(test_note[0] + 1000, test_note[1] - 0.07), arrowprops=dict(facecolor='black', shrink=0.05)) plt.grid(True) plt.plot(iteration_count, validation_result, color='b', linestyle='-', marker='o', label='validation data') plt.plot(iteration_count, test_result, linestyle='-.', marker='X', label='test data') plt.legend(loc="upper left") try: os.mkdir('images/') except: print("directory already exist") plt.savefig('images/mnist_accuracy_evaluation.png', format='png') img_vector = mnist.train.images[5] img_length = int(np.sqrt(INPUT_NODE)) img = np.ndarray([img_length, img_length]) # print "image size: ", img_length, "*", img_length for c in range(INPUT_NODE): # print "image indices: ", c / img_length, "*", c % img_length img[c / img_length][c % img_length] = img_vector[c] plt.figure(num=2, figsize=(15, 8)) plt.imshow(img) plt.show() def main(argv=None): mnist = input_data.read_data_sets("MNIST_data", one_hot=True) print "basic information of mnist dataset" print "mnist training data size: ", mnist.train.num_examples print "mnist validating data size: ", mnist.validation.num_examples print "mnist testing data size: ", mnist.test.num_examples train(mnist) if __name__ == '__main__': tf.app.run()
{"hexsha": "55cbbdb12daa23d9f27d1c8ca4966b6c07fed124", "size": 7311, "ext": "py", "lang": "Python", "max_stars_repo_path": "mnist_number_recognition/mnist_dataset.py", "max_stars_repo_name": "carbo-T/TF", "max_stars_repo_head_hexsha": "56ebfc253615b22fc3a55ba5e952837c47bf85cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-11-01T04:16:58.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-01T04:16:58.000Z", "max_issues_repo_path": "mnist_number_recognition/mnist_dataset.py", "max_issues_repo_name": "carbo-T/TF", "max_issues_repo_head_hexsha": "56ebfc253615b22fc3a55ba5e952837c47bf85cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mnist_number_recognition/mnist_dataset.py", "max_forks_repo_name": "carbo-T/TF", "max_forks_repo_head_hexsha": "56ebfc253615b22fc3a55ba5e952837c47bf85cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.7784431138, "max_line_length": 119, "alphanum_fraction": 0.6959376282, "include": true, "reason": "import numpy", "num_tokens": 1822}
#include "Visualizer.h" #include <boost/format.hpp> #include "affdex_small_logo.h" #include <algorithm> Visualizer::Visualizer(): GREEN_COLOR_CLASSIFIERS({ "joy" }), RED_COLOR_CLASSIFIERS({ "anger", "disgust", "sadness", "fear", "contempt" }) { logo_resized = false; logo = cv::imdecode(cv::InputArray(small_logo), CV_LOAD_IMAGE_UNCHANGED); EXPRESSIONS = { "smile", "innerBrowRaise", "browRaise", "browFurrow", "noseWrinkle", "upperLipRaise", "lipCornerDepressor", "chinRaise", "lipPucker", "lipPress", "lipSuck", "mouthOpen", "smirk", "eyeClosure", "attention", "eyeWiden", "cheekRaise", "lidTighten", "dimpler", "lipStretch", "jawDrop" }; EMOTIONS = { "joy", "fear", "disgust", "sadness", "anger", "surprise", "contempt", "valence", "engagement" }; HEAD_ANGLES = { "pitch", "yaw", "roll" }; EMOJIS = std::vector<std::string> { "relaxed", "smiley", "laughing", "kissing", "disappointed", "rage", "smirk", "wink", "stuckOutTongueWinkingEye", "stuckOutTongue", "flushed", "scream" }; GENDER_MAP = std::map<affdex::Gender, std::string> { { affdex::Gender::Male, "male" }, { affdex::Gender::Female, "female" }, { affdex::Gender::Unknown, "unknown" }, }; GLASSES_MAP = std::map<affdex::Glasses, std::string> { { affdex::Glasses::Yes, "yes" }, { affdex::Glasses::No, "no" } }; AGE_MAP = std::map<affdex::Age, std::string> { { affdex::Age::AGE_UNKNOWN, "unknown"}, { affdex::Age::AGE_UNDER_18, "under 18" }, { affdex::Age::AGE_18_24, "18-24" }, { affdex::Age::AGE_25_34, "25-34" }, { affdex::Age::AGE_35_44, "35-44" }, { affdex::Age::AGE_45_54, "45-54" }, { affdex::Age::AGE_55_64, "55-64" }, { affdex::Age::AGE_65_PLUS, "65 plus" } }; ETHNICITY_MAP = std::map<affdex::Ethnicity, std::string> { { affdex::Ethnicity::UNKNOWN, "unknown"}, { affdex::Ethnicity::CAUCASIAN, "caucasian" }, { affdex::Ethnicity::BLACK_AFRICAN, "black african" }, { affdex::Ethnicity::SOUTH_ASIAN, "south asian" }, { affdex::Ethnicity::EAST_ASIAN, "east asian" }, { affdex::Ethnicity::HISPANIC, "hispanic" } }; } void Visualizer::drawFaceMetrics(affdex::Face face, std::vector<cv::Point2f> bounding_box) { cv::Scalar white_color = cv::Scalar(255, 255, 255); //Draw Right side metrics int padding = bounding_box[0].y; //Top left Y drawValues((float *)&face.expressions, EXPRESSIONS, bounding_box[2].x + spacing, padding, white_color, false); padding = bounding_box[2].y; //Top left Y //Draw Head Angles drawHeadOrientation(face.measurements.orientation, bounding_box[0].x - spacing, padding); //Draw Appearance drawAppearance(face.appearance, bounding_box[0].x - spacing, padding); //Draw Left side metrics drawValues((float *)&face.emotions, EMOTIONS, bounding_box[0].x - spacing, padding, white_color, true); } void Visualizer::drawValues(const float * first, const std::vector<std::string> names, const int x, int &padding, const cv::Scalar clr, const bool align_right) { for (std::string name : names) { drawClassifierOutput(name, (*first), cv::Point(x, padding += spacing), align_right); first++; } } void Visualizer::updateImage(cv::Mat output_img) { img = output_img; if (!logo_resized) { double logo_width = (logo.size().width > img.size().width*0.25 ? img.size().width*0.25 : logo.size().width); double logo_height = ((double)logo_width) * ((double)logo.size().height / logo.size().width); cv::resize(logo, logo, cv::Size(logo_width, logo_height)); logo_resized = true; } cv::Mat roi = img(cv::Rect(img.cols - logo.cols - 10, 10, logo.cols, logo.rows)); overlayImage(logo, roi, cv::Point(0, 0)); } void Visualizer::drawPoints(affdex::VecFeaturePoint points) { for (auto& point : points) //Draw face feature points. { cv::circle(img, cv::Point(point.x, point.y), 2.0f, cv::Scalar(255, 255, 255)); } } void Visualizer::drawBoundingBox(cv::Point2f top_left, cv::Point2f bottom_right, float valence) { //Draw bounding box const ColorgenRedGreen valence_color_generator( -100, 100 ); cv::rectangle( img, top_left, bottom_right, valence_color_generator(valence), 3); } /** @brief DrawText prints text on screen either right or left justified at the anchor location (loc) * @param output_img -- Image we are plotting on * @param name -- Name of the classifier * @param value -- Value we are trying to display * @param loc -- Exact location. When aligh_right is (true/false) this should be the (upper-right, upper-left) * @param align_right -- Whether to right or left justify the text * @param color -- Color */ void Visualizer::drawText(const std::string& name, const std::string& value, const cv::Point2f loc, bool align_right, cv::Scalar color) { const int block_width = 8; const int margin = 2; const int block_size = 10; const int max_blocks = 100/block_size; cv::Point2f display_loc = loc; const std::string label = name+": "; if( align_right ) { display_loc.x -= (margin+block_width) * max_blocks; int baseline=0; cv::Size txtSize = cv::getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 0.5f, 5,&baseline); display_loc.x -= txtSize.width; } cv::putText(img, label+value, display_loc, cv::FONT_HERSHEY_SIMPLEX, 0.5f, color, 1); } /** @brief DrawClassifierOutput handles choosing between equalizer or text as well as defining the colors * @param name -- Name of the classifier * @param value -- Value we are trying to display * @param loc -- Exact location. When aligh_right is (true/false) this should be the (upper-right, upper-left) * @param align_right -- Whether to right or left justify the text */ void Visualizer::drawClassifierOutput(const std::string& classifier, const float value, const cv::Point2f& loc, bool align_right) { static const ColorgenLinear white_yellow_generator( 0, 100, cv::Scalar(255,255,255), cv::Scalar(0, 255, 255)); static const ColorgenRedGreen valence_color_generator( -100, 100 ); // Determine the display color cv::Scalar color = cv::Scalar(255, 255, 255); if( classifier == "valence") { color = valence_color_generator( value ); } else if( RED_COLOR_CLASSIFIERS.count(classifier) ) { color = cv::Scalar(0, 0, 255); } else if( GREEN_COLOR_CLASSIFIERS.count(classifier) ) { color = cv::Scalar(0, 255, 0); } float equalizer_magnitude = value; if( classifier == "valence" ) { equalizer_magnitude = std::fabs(value); } drawEqualizer(classifier, equalizer_magnitude, loc, align_right, color ); } void Visualizer::drawEqualizer(const std::string& name, const float value, const cv::Point2f& loc, bool align_right, cv::Scalar color) { const int block_width = 8; const int block_height = 10; const int margin = 2; const int block_size = 10; const int max_blocks = 100/block_size; int blocks = round(value / block_size); int i = loc.x, j = loc.y - 10; cv::Point2f display_loc = loc; const std::string label = align_right? name+": " : " :"+name; for (int x = 0 ; x < (100/block_size) ; x++) { cv::Scalar scalar_clr = color; float alpha = 0.8; const int ii = (std::max)( float(i), 0.0f); const int jj = (std::max)( float(j), 0.0f); const int width = (std::min)(float(block_width), float(img.size().width-ii)); const int height = (std::min)(float(block_height), float(img.size().height-jj)); if (height < 0 || width < 0) continue; cv::Mat roi = img(cv::Rect(ii, jj, width, height)); if (x >= blocks) { alpha = 0.3; scalar_clr = cv::Scalar(186, 186, 186); } cv::Mat color(roi.size(), CV_8UC3, scalar_clr); cv::addWeighted(color, alpha, roi, 1.0 - alpha , 0.0, roi); i += align_right? -(margin+block_width):(margin+block_width); } display_loc.x += align_right? -(margin+block_width) * max_blocks : (margin+block_width) * max_blocks; if( align_right ) { int baseline=0; cv::Size txtSize = cv::getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 0.5f, 5,&baseline); display_loc.x -= txtSize.width; } cv::putText(img, label, display_loc, cv::FONT_HERSHEY_SIMPLEX, 0.5f, cv::Scalar(50,50,50), 5); cv::putText(img, label, display_loc, cv::FONT_HERSHEY_SIMPLEX, 0.5f, cv::Scalar(255, 255, 255), 1); } void Visualizer::drawHeadOrientation(affdex::Orientation headAngles, const int x, int &padding, bool align_right, cv::Scalar color) { std::string valueStr = boost::str(boost::format("%3.1f") % headAngles.pitch); drawText("pitch", valueStr, cv::Point(x, padding += spacing), align_right, color ); valueStr = boost::str(boost::format("%3.1f") % headAngles.yaw); drawText("yaw", valueStr, cv::Point(x, padding += spacing), align_right, color ); valueStr = boost::str(boost::format("%3.1f") % headAngles.roll); drawText("roll", valueStr, cv::Point(x, padding += spacing), align_right, color ); } void Visualizer::drawAppearance(affdex::Appearance appearance, const int x, int &padding, bool align_right, cv::Scalar color) { drawText("gender", GENDER_MAP[appearance.gender], cv::Point(x, padding += spacing), align_right, color ); drawText("age", AGE_MAP[appearance.age], cv::Point(x, padding += spacing), align_right, color ); drawText("ethnicity", ETHNICITY_MAP[appearance.ethnicity], cv::Point(x, padding += spacing), align_right, color ); } void Visualizer::showImage() { cv::imshow("analyze video", img); cv::waitKey(5); } void Visualizer::overlayImage(const cv::Mat &foreground, cv::Mat &background, cv::Point2i location) { // start at the row indicated by location, or at row 0 if location.y is negative. for(int y = (std::max)(location.y , 0); y < background.rows; ++y) { int fY = y - location.y; // because of the translation // we are done of we have processed all rows of the foreground image. if(fY >= foreground.rows) break; // start at the column indicated by location, // or at column 0 if location.x is negative. for(int x = (std::max)(location.x, 0); x < background.cols; ++x) { int fX = x - location.x; // because of the translation. // we are done with this row if the column is outside of the foreground image. if(fX >= foreground.cols) break; // determine the opacity of the foregrond pixel, using its fourth (alpha) channel. double opacity = ((double)foreground.data[fY * foreground.step + fX * foreground.channels() + (foreground.channels()-1)]) / 255.; // and now combine the background and foreground pixel, using the opacity, // but only if opacity > 0. for(int c = 0; opacity > 0 && c < background.channels(); ++c) { unsigned char foregroundPx = foreground.data[fY * foreground.step + fX * foreground.channels() + c]; unsigned char backgroundPx = background.data[y * background.step + x * background.channels() + c]; background.data[y*background.step + background.channels()*x + c] = backgroundPx * (1.-opacity) + foregroundPx * opacity; } } } } cv::Scalar ColorgenRedGreen::operator()( const float val ) const { float norm_val = ( val - red_val_ ) / ( green_val_ - red_val_ ); norm_val = norm_val < 0.0 ? 0.0 : norm_val; norm_val = norm_val > 1.0 ? 1.0 : norm_val; const int B = 0; const int G = norm_val * 255; const int R = ( 1.0 - norm_val ) * 255; return cv::Scalar( B, G, R ); } cv::Scalar ColorgenLinear::operator()( const float val ) const { float norm_val = ( val - val1_ ) / ( val2_ - val1_ ); const int B = color1_.val[0] * (1.0f-norm_val) + color2_.val[0]*norm_val; const int G = color1_.val[1] * (1.0f-norm_val) + color2_.val[1]*norm_val; const int R = color1_.val[2] * (1.0f-norm_val) + color2_.val[2]*norm_val; return cv::Scalar( B, G, R ); }
{"hexsha": "ca4be41266c8678fd1b340344392047bd18181aa", "size": 12815, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "common/Visualizer.cpp", "max_stars_repo_name": "nbonfire/cpp-sdk-samples", "max_stars_repo_head_hexsha": "e85b169301bf4737a4e720646774140087701a07", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "common/Visualizer.cpp", "max_issues_repo_name": "nbonfire/cpp-sdk-samples", "max_issues_repo_head_hexsha": "e85b169301bf4737a4e720646774140087701a07", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "common/Visualizer.cpp", "max_forks_repo_name": "nbonfire/cpp-sdk-samples", "max_forks_repo_head_hexsha": "e85b169301bf4737a4e720646774140087701a07", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2529069767, "max_line_length": 124, "alphanum_fraction": 0.6097541943, "num_tokens": 3569}
-- Andreas, 2012-02-14. No short-circuit conversion test for sizes! {-# OPTIONS --sized-types --show-implicit #-} -- {-# OPTIONS -v tc.size.solve:20 -v tc.conv.size:20 -v tc.term.con:50 -v tc.term.args:50 #-} module Issue298b where open import Common.Size data BTree : {size : Size} → Set where leaf : {i : Size} → BTree {↑ i} node : {i : Size} → BTree {i} → BTree {i} → BTree {↑ i} works : ∀ {i} → BTree {i} → BTree works (node (node t1 t2) t3) = node (works t1) (node t2 t3) works t = t
{"hexsha": "8f38873acc1755f8f315a10b0ebe455cb62b97fc", "size": 498, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "test/succeed/Issue298b.agda", "max_stars_repo_name": "asr/agda-kanso", "max_stars_repo_head_hexsha": "aa10ae6a29dc79964fe9dec2de07b9df28b61ed5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-27T04:41:05.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-27T04:41:05.000Z", "max_issues_repo_path": "test/succeed/Issue298b.agda", "max_issues_repo_name": "masondesu/agda", "max_issues_repo_head_hexsha": "70c8a575c46f6a568c7518150a1a64fcd03aa437", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/succeed/Issue298b.agda", "max_forks_repo_name": "masondesu/agda", "max_forks_repo_head_hexsha": "70c8a575c46f6a568c7518150a1a64fcd03aa437", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.125, "max_line_length": 94, "alphanum_fraction": 0.6184738956, "num_tokens": 180}
The http://www.hr.ucdavis.edu/Administration/UCD_Community_Interest_Groups/AAFSA African American Faculty and Staff Association meets on the third Wednesday of the month in 3201 Hart Hall.
{"hexsha": "532823b778ed40effae041fe79f70e23644956a3", "size": 190, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/African_American_Faculty_and_Staff_Association.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/African_American_Faculty_and_Staff_Association.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/African_American_Faculty_and_Staff_Association.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 63.3333333333, "max_line_length": 188, "alphanum_fraction": 0.8368421053, "num_tokens": 44}
library(derivr) library(dplyr) library(plotly) # Plot PnL for three options: spot_price <- seq(100, 200) call1 <- 10 * bs_call(180, spot_price, 7, 0.25, 0.001) call2 <- -10 * bs_call(170, spot_price, 7, 0.30, 0.001) put1 <- 10 * bs_put(140, spot_price, 7, 0.40, 0.001) sum <- call1 + call2 + put1 data <- data.frame(spot_price) atm <- 150 max_val <- max(call1, call2, put1, sum) min_val <- min(call1, call2, put1, sum) plotly::plot_ly(data, x= ~spot_price, y = ~sum, name='Sum', type='scatter', mode='lines') %>% plotly::add_trace(y = ~call1, name='CALL1', line = list(dash = 'dash')) %>% plotly::add_trace(y = ~call2, name='CALL2', line = list(dash = 'dash')) %>% plotly::add_trace(y = ~put1, name='PUT1', line = list(dash = 'dash')) %>% plotly::add_segments(x = atm, xend = atm, y = max_val, yend = min_val, name='ATM', line=list(dash = 'dot'))
{"hexsha": "334acfb9e025bc1b97a86382ff83140daa1f2fdb", "size": 864, "ext": "r", "lang": "R", "max_stars_repo_path": "examples/example_plot.r", "max_stars_repo_name": "beccau/derivr", "max_stars_repo_head_hexsha": "529bd42926b2f582f23880473ba32960762c7788", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/example_plot.r", "max_issues_repo_name": "beccau/derivr", "max_issues_repo_head_hexsha": "529bd42926b2f582f23880473ba32960762c7788", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/example_plot.r", "max_forks_repo_name": "beccau/derivr", "max_forks_repo_head_hexsha": "529bd42926b2f582f23880473ba32960762c7788", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.56, "max_line_length": 109, "alphanum_fraction": 0.6331018519, "num_tokens": 326}
import pprint import numpy as np from core.net_errors import NetIsNotInitialized def calculate_average_neighboring(net_object): if net_object.net is None: raise NetIsNotInitialized() net = net_object.net zero_weights = np.zeros((net_object.config[0])) weights = np.ma.array(np.reshape(net[-1]['w'], (net_object.m, net_object.n, zero_weights.shape[0])), mask=False) weights = np.insert(weights, (0, weights.shape[1]), 0, axis=1) weights = np.insert(weights, (0, weights.shape[0]), 0, axis=0) weights.mask = True weights.mask[1:-1, 1:-1] = False result = np.zeros((net_object.m, net_object.n)) for i, j in np.ndindex(weights.shape[:2]): if not weights.mask[i, j].all(): a = [[i - 1, i - 1, i, i, i + 1, i + 1], [j - 1, j, j - 1, j + 1, j - 1, j]] w = weights[a] d = [] for weight in w: if not np.all(weight.mask): d.append(net_object.d(weights[i, j], weight)) result[i - 1, j - 1] = np.nanmean(d) return result
{"hexsha": "03066320eb5b39ad4014dd573d7e932e301d5fb3", "size": 1075, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/net_neighboring_calculator.py", "max_stars_repo_name": "nikon-petr/kohonen", "max_stars_repo_head_hexsha": "c23ae3032c58681040fe023bfa395d1ff9989876", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "core/net_neighboring_calculator.py", "max_issues_repo_name": "nikon-petr/kohonen", "max_issues_repo_head_hexsha": "c23ae3032c58681040fe023bfa395d1ff9989876", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "core/net_neighboring_calculator.py", "max_forks_repo_name": "nikon-petr/kohonen", "max_forks_repo_head_hexsha": "c23ae3032c58681040fe023bfa395d1ff9989876", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7142857143, "max_line_length": 116, "alphanum_fraction": 0.576744186, "include": true, "reason": "import numpy", "num_tokens": 311}
// Boost.Geometry (aka GGL, Generic Geometry Library) // Copyright (c) 2007-2014 Barend Gehrels, Amsterdam, the Netherlands. // Copyright (c) 2008-2014 Bruno Lalande, Paris, France. // Copyright (c) 2009-2014 Mateusz Loskot, London, UK. // Copyright (c) 2014 Adam Wulkiewicz, Lodz, Poland. // This file was modified by Oracle on 2014. // Modifications copyright (c) 2014, Oracle and/or its affiliates. // Contributed and/or modified by Menelaos Karavelas, on behalf of Oracle // Parts of Boost.Geometry are redesigned from Geodan's Geographic Library // (geolib/GGL), copyright (c) 1995-2010 Geodan, Amsterdam, the Netherlands. // Use, modification and distribution is subject to the Boost Software License, // Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_GEOMETRY_ALGORITHMS_DETAIL_COUNTING_HPP #define BOOST_GEOMETRY_ALGORITHMS_DETAIL_COUNTING_HPP #include <cstddef> #include <boost/range.hpp> #include <boost/geometry/core/exterior_ring.hpp> #include <boost/geometry/core/interior_rings.hpp> #include <boost/geometry/util/range.hpp> #include <boost/geometry/algorithms/detail/interior_iterator.hpp> namespace boost { namespace geometry { #ifndef DOXYGEN_NO_DETAIL namespace detail { namespace counting { template <std::size_t D> struct other_count { template <typename Geometry> static inline std::size_t apply(Geometry const&) { return D; } template <typename Geometry> static inline std::size_t apply(Geometry const&, bool) { return D; } }; template <typename RangeCount> struct polygon_count { template <typename Polygon> static inline std::size_t apply(Polygon const& poly) { std::size_t n = RangeCount::apply(exterior_ring(poly)); typename interior_return_type<Polygon const>::type rings = interior_rings(poly); for (typename detail::interior_iterator<Polygon const>::type it = boost::begin(rings); it != boost::end(rings); ++it) { n += RangeCount::apply(*it); } return n; } }; template <typename SingleCount> struct multi_count { template <typename MultiGeometry> static inline std::size_t apply(MultiGeometry const& geometry) { std::size_t n = 0; for (typename boost::range_iterator<MultiGeometry const>::type it = boost::begin(geometry); it != boost::end(geometry); ++it) { n += SingleCount::apply(*it); } return n; } }; }} // namespace detail::counting #endif // DOXYGEN_NO_DETAIL }} // namespace boost::geometry #endif // BOOST_GEOMETRY_ALGORITHMS_DETAIL_COUNTING_HPP
{"hexsha": "dc5bb26c10b405a96248df62a2737d6af784ad47", "size": 2746, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "boost/geometry/algorithms/detail/counting.hpp", "max_stars_repo_name": "cpp-pm/boost", "max_stars_repo_head_hexsha": "38c6c8c07f2fcc42d573b10807fef27ec14930f8", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 12278.0, "max_stars_repo_stars_event_min_datetime": "2015-01-29T17:11:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T21:12:00.000Z", "max_issues_repo_path": "ios/Pods/boost-for-react-native/boost/geometry/algorithms/detail/counting.hpp", "max_issues_repo_name": "c7yrus/alyson-v3", "max_issues_repo_head_hexsha": "5ad95a8f782f5f5d2fd543d44ca6a8b093395965", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9469.0, "max_issues_repo_issues_event_min_datetime": "2015-01-30T05:33:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:17:21.000Z", "max_forks_repo_path": "ios/Pods/boost-for-react-native/boost/geometry/algorithms/detail/counting.hpp", "max_forks_repo_name": "c7yrus/alyson-v3", "max_forks_repo_head_hexsha": "5ad95a8f782f5f5d2fd543d44ca6a8b093395965", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1343.0, "max_forks_repo_forks_event_min_datetime": "2017-12-08T19:47:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T11:31:36.000Z", "avg_line_length": 25.4259259259, "max_line_length": 79, "alphanum_fraction": 0.6864530226, "num_tokens": 665}
! ******************************************************************************************************************************** ! ! atchem_data_netCDF.f90 ! ATCHEM netCDF ROUTINES ! ******************************************************************************************************************************** ! MODULE atchem_data_netCDF USE gem_netcdf USE atchem_lib IMPLICIT NONE SAVE CONTAINS ! ****************************************************************************************************************************** ! ! SAVE NETCDF RESTART DATA ! ****************************************************************************************************************************** ! SUBROUTINE sub_data_netCDF_ncrstsave(dum_name,dum_yr,dum_iou) ! -------------------------------------------------------- ! ! DUMMY ARGUMENTS ! -------------------------------------------------------- ! character(LEN=*),INTENT(IN)::dum_name ! REAL,INTENT(in)::dum_yr ! INTEGER,INTENT(OUT)::dum_iou ! ! -------------------------------------------------------- ! ! DEFINE LOCAL VARIABLES ! -------------------------------------------------------- ! integer::ia,l integer::loc_ntrec,loc_iou integer::loc_id_lonm,loc_id_latm,loc_id_lon_e,loc_id_lat_e integer,dimension(1:2)::loc_it_1 integer,dimension(1:3)::loc_it_2 character(127)::loc_title,loc_timunit character(7)::loc_string_year real::loc_c0,loc_c1 real,dimension(0:n_i)::loc_lon_e real,dimension(0:n_j)::loc_lat_e real,dimension(n_i,n_j)::loc_ij,loc_ij_mask ! -------------------------------------------------------- ! ! INITIALIZE LOCAL VARIABLES ! -------------------------------------------------------- ! loc_c0 = 0.0 loc_c1 = 1.0 loc_lon_e = 0.0 ; loc_lat_e = 0.0 loc_ij = 0.0 ; loc_ij_mask = 0.0 ! -------------------------------------------------------- ! ! WRITE TO FILE ! -------------------------------------------------------- ! ! -------------------------------------------------------- ! open file call sub_opennew(dum_name,loc_iou) ! -------------------------------------------------------- ! start definitions call sub_redef(loc_iou) ! -------------------------------------------------------- ! set global attributes loc_string_year = fun_conv_num_char_n(8,int(dum_yr)) loc_title = 'ATCHEM restart @ year '//loc_string_year call sub_putglobal(loc_iou,dum_name,loc_title,string_ncrunid,loc_timunit) ! -------------------------------------------------------- ! define dimensions call sub_defdim ('lon', loc_iou, n_i, loc_id_lonm) call sub_defdim ('lat', loc_iou, n_j, loc_id_latm) call sub_defdim ('lon_edges', loc_iou, n_i+1, loc_id_lon_e) call sub_defdim ('lat_edges', loc_iou, n_j+1, loc_id_lat_e) ! -------------------------------------------------------- ! define 1d data (t) loc_it_1(1) = loc_id_lonm call sub_defvar ('lon',loc_iou,1,loc_it_1,loc_c0,loc_c0,'X','D','longitude of the t grid','longitude','degrees_east') loc_it_1(1) = loc_id_latm call sub_defvar ('lat',loc_iou,1,loc_it_1,loc_c0,loc_c0,'Y','D','latitude of the t grid','latitude','degrees_north') loc_it_1(1) = loc_id_lon_e call sub_defvar ('lon_edges',loc_iou,1,loc_it_1,loc_c0,loc_c0,' ','D' ,'longitude of t grid edges',' ','degrees') loc_it_1(1) = loc_id_lat_e call sub_defvar ('lat_edges',loc_iou,1,loc_it_1,loc_c0,loc_c0,' ','D','latitude of t grid edges',' ','degrees') loc_it_2(1) = loc_id_lonm loc_it_2(2) = loc_id_latm ! -------------------------------------------------------- ! define (2D) tracer variables DO l=1,n_l_atm ia = conv_iselected_ia(l) call sub_defvar('atm_'//trim(string_atm(ia)),loc_iou,2,loc_it_2,loc_c0,loc_c0,' ','F', & & string_longname_atm(ia),'Atmosphere tracer - '//trim(string_atm(ia)),' ') end DO ! -------------------------------------------------------- ! end definitions call sub_enddef (loc_iou) call sub_sync(loc_iou) ! -------------------------------------------------------- ! loc_ntrec = 1 ! -------------------------------------------------------- ! write 1D variables call sub_putvar1d ('lon',loc_iou,n_i,loc_ntrec,n_i,phys_atm(ipa_lon,:,1),loc_c1,loc_c0) call edge_maker (1,loc_lon_e,phys_atm(ipa_lon,:,1),phys_atm(ipa_lone,:,1),phys_atm(ipa_dlon,:,1),n_i) call sub_putvar1d ('lon_edges',loc_iou,n_i+1,loc_ntrec,n_i+1,loc_lon_e,loc_c1,loc_c0) call sub_putvar1d ('lat',loc_iou,n_j,loc_ntrec,n_j,phys_atm(ipa_lat,1,:),loc_c1,loc_c0) call edge_maker (1,loc_lat_e, phys_atm(ipa_lat,1,:),phys_atm(ipa_latn,1,:),phys_atm(ipa_dlat,1,:), n_j) call sub_putvar1d ('lat_edges',loc_iou,n_j+1,loc_ntrec,n_j+1,loc_lat_e,loc_c1,loc_c0) ! -------------------------------------------------------- ! write (2D) tracer variables loc_ij_mask(:,:) = 1.0 loc_ij(:,:)= 0.0 DO l=1,n_l_atm ia = conv_iselected_ia(l) loc_ij(:,:) = atm(ia,:,:) call sub_putvar2d('atm_'//trim(string_atm(ia)),loc_iou,n_i,n_j,loc_ntrec,loc_ij(:,:),loc_ij_mask(:,:)) end DO ! -------------------------------------------------------- ! close file and return IOU call sub_closefile(loc_iou) dum_iou = loc_iou ! -------------------------------------------------------- ! ! END ! -------------------------------------------------------- ! END SUBROUTINE sub_data_netCDF_ncrstsave ! ****************************************************************************************************************************** ! END MODULE atchem_data_netCDF
{"hexsha": "bd70842860be537c34706374591b06b38ad97dd0", "size": 5743, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/atchem/atchem_data_netCDF.f90", "max_stars_repo_name": "derpycode/cgenie", "max_stars_repo_head_hexsha": "ca5f66a857e3ba7ed60785052d19f92abb7ffc00", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2016-05-10T01:51:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T10:14:32.000Z", "max_issues_repo_path": "src/atchem/atchem_data_netCDF.f90", "max_issues_repo_name": "derpycode/cgenie", "max_issues_repo_head_hexsha": "ca5f66a857e3ba7ed60785052d19f92abb7ffc00", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-05-10T17:00:54.000Z", "max_issues_repo_issues_event_max_datetime": "2016-05-10T19:33:11.000Z", "max_forks_repo_path": "src/atchem/atchem_data_netCDF.f90", "max_forks_repo_name": "derpycode/cgenie", "max_forks_repo_head_hexsha": "ca5f66a857e3ba7ed60785052d19f92abb7ffc00", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2015-05-15T19:54:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-12T15:25:56.000Z", "avg_line_length": 50.8230088496, "max_line_length": 132, "alphanum_fraction": 0.4461083058, "num_tokens": 1451}
from mod_create import * import yaml import sys import numpy as np from math import * import transformations as tr from PyQt5 import QtCore, QtGui def SaveParameter(interfaceobj): filer = '/home/themarkofaspur/catkin_ws/src/cdpr3/sdf/cube.yaml' yamlObject = DictToObj(filer) #interfaceobj = interfaceobj._widget #filepath if (interfaceobj.YAMLFILE.isModified()): Text = interfaceobj.model.YAMLFILE.text() Text = str(Text) yamlObject.filepath = Text # Anchorpoints if (interfaceobj.anchorpoint1.isModified()): Text = interfaceobj.anchorpoint1.text() Text = (str(Text)).split(',') yamlObject.model.points[0].frame = [float(Text[0]), float(Text[1]), float(Text[2])] if (interfaceobj.anchorpoint2.isModified()): Text = interfaceobj.anchorpoint2.text() Text = (str(Text)).split(',') yamlObject.model.points[1].frame = [float(Text[0]), float(Text[1]), float(Text[2])] if (interfaceobj.anchorpoint3.isModified()): Text = interfaceobj.anchorpoint3.text() Text = (str(Text)).split(',') yamlObject.model.points[2].frame = [float(Text[0]), float(Text[1]), float(Text[2])] if (interfaceobj.anchorpoint4.isModified()): Text = interfaceobj.anchorpoint4.text() Text = (str(Text)).split(',') yamlObject.model.points[3].frame = [float(Text[0]), float(Text[1]), float(Text[2])] if (interfaceobj.anchorpoint5.isModified()): Text = interfaceobj.anchorpoint5.text() Text = (str(Text)).split(',') yamlObject.model.points[4].frame = [float(Text[0]), float(Text[1]), float(Text[2])] if (interfaceobj.anchorpoint6.isModified()): Text = interfaceobj.anchorpoint6.text() Text = (str(Text)).split(',') yamlObject.model.points[5].frame = [float(Text[0]), float(Text[1]), float(Text[2])] if (interfaceobj.anchorpoint7.isModified()): Text = interfaceobj.anchorpoint7.text() Text = (str(Text)).split(',') yamlObject.model.points[6].frame = [float(Text[0]), float(Text[1]), float(Text[2])] if (interfaceobj.anchorpoint8.isModified()): Text = interfaceobj.anchorpoint8.text() Text = (str(Text)).split(',') yamlObject.model.points[7].frame = [float(Text[0]), float(Text[1]), float(Text[2])] #Exit Points if (interfaceobj.exitpoint1.isModified()): Text = interfaceobj.exitpoint1.text() Text = (str(Text)).split(',') yamlObject.model.points[0].platform = [float(Text[0]), float(Text[1]), float(Text[2])] if (interfaceobj.exitpoint2.isModified()): Text = interfaceobj.exitpoint2.text() Text = (str(Text)).split(',') yamlObject.model.points[1].platform = [float(Text[0]), float(Text[1]), float(Text[2])] if (interfaceobj.exitpoint3.isModified()): Text = interfaceobj.exitpoint3.text() Text = (str(Text)).split(',') yamlObject.model.points[2].platform = [float(Text[0]), float(Text[1]), float(Text[2])] if (interfaceobj.exitpoint4.isModified()): Text = interfaceobj.exitpoint4.text() Text = (str(Text)).split(',') yamlObject.model.points[3].platform = [float(Text[0]), float(Text[1]), float(Text[2])] if (interfaceobj.exitpoint5.isModified()): Text = interfaceobj.exitpoint5.text() Text = (str(Text)).split(',') yamlObject.model.points[4].platform = [float(Text[0]), float(Text[1]), float(Text[2])] if (interfaceobj.exitpoint6.isModified()): Text = interfaceobj.exitpoint6.text() Text = (str(Text)).split(',') yamlObject.model.points[5].platform = [float(Text[0]), float(Text[1]), float(Text[2])] if (interfaceobj.exitpoint7.isModified()): Text = interfaceobj.exitpoint7.text() Text = (str(Text)).split(',') yamlObject.model.points[6].platform = [float(Text[0]), float(Text[1]), float(Text[2])] if (interfaceobj.exitpoint8.isModified()): Text = interfaceobj.exitpoint8.text() Text = (str(Text)).split(',') yamlObject.model.points[7].platform = [float(Text[0]), float(Text[1]), float(Text[2])] #End-Effector Parameters if (interfaceobj.endeffMass.isModified()): Text = interfaceobj.endeffMass.text() yamlObject.model.platform.mass = float(Text) if (interfaceobj.endEffShape.currentIndex() != -1): Text = interfaceobj.endEffShape.currentText() yamlObject.model.platform.type = str(Text) yamlObject.model.platform.visual = str(Text) if (interfaceobj.endEffShape_2.currentIndex() != -1): Text = interfaceobj.endEffShape_2.currentText() yamlObject.model.platform.color = str(Text) if (interfaceobj.endeffPosition.isModified()): Text = interfaceobj.endeffPosition.text() Text = (str(Text)).split(',') yamlObject.model.platform.position.xyz = [float(Text[0]), float(Text[1]), float(Text[2])] if (interfaceobj.endeffRPY.isModified()): Text = interfaceobj.endeffRPY.text() Text = (str(Text)).split(',') yamlObject.model.platform.position.rpy = [float(Text[0]), float(Text[1]), float(Text[2])] if (interfaceobj.Inertialxx.isModified()): Text = interfaceobj.Inertialxx.text() yamlObject.model.platform.inertial.xx = float(Text) if (interfaceobj.Inertialxy.isModified()): Text = interfaceobj.Inertialxy.text() yamlObject.model.platform.inertial.xy = float(Text) if (interfaceobj.Inertialxz.isModified()): Text = interfaceobj.Inertialxz.text() yamlObject.model.platform.inertial.xz = float(Text) if (interfaceobj.Inertialyy.isModified()): Text = interfaceobj.Inertialyy.text() yamlObject.model.platform.inertial.yy = float(Text) if (interfaceobj.Inertialyz.isModified()): Text = interfaceobj.Inertialyz.text() yamlObject.model.platform.inertial.yz = float(Text) if (interfaceobj.Inertialzz.isModified()): Text = interfaceobj.Inertialzz.text() yamlObject.model.platform.inertial.zz = float(Text) #Individual Cable Parameters if (interfaceobj.CableMass.isModified()): Text = interfaceobj.CableMass.text() yamlObject.model.cable.mass = float(Text) if (interfaceobj.CableRadius.isModified()): Text = interfaceobj.CableRadius.text() yamlObject.model.cable.radius = float(Text) # Bounding Frame Parameters if (interfaceobj.endeffMass_3.isModified()): Text = interfaceobj.endeffMass_3.text() yamlObject.model.frame.mass = float(Text) if (interfaceobj.lowerframe.isModified()): Text = interfaceobj.lowerframe.text() Text = (str(Text)).split(',') yamlObject.model.frame.lower = [float(Text[0]), float(Text[1]), float(Text[2])] if (interfaceobj.upperframe.isModified()): Text = interfaceobj.upperframe.text() Text = (str(Text)).split(',') yamlObject.model.frame.upper = [float(Text[0]), float(Text[1]), float(Text[2])] if (interfaceobj.endEffShape_6.currentIndex() != -1): Text = interfaceobj.endEffShape_6.currentText() yamlObject.model.frame.type = str(Text) if (interfaceobj.endEffShape_5.currentIndex() != -1): Text = interfaceobj.endEffShape_5.currentText() yamlObject.model.frame.color = str(Text) # Minimum and Maximum Forces if (interfaceobj.fmax.isModified()): Text = interfaceobj.fmax.text() yamlObject.model.joints.actuated.effort = float(Text) if (interfaceobj.fmin.isModified()): Text = interfaceobj.fmin.text() yamlObject.model.joints.actuated.min = float(Text) dictObject = ObjectToDict(yamlObject) WriteYAML(dictObject, filer)
{"hexsha": "3d7e533a8b96a4b9cf4cf7f54940ec62efce3bca", "size": 7795, "ext": "py", "lang": "Python", "max_stars_repo_path": "rqt_cdpr/src/rqt_cdpr/saveparam.py", "max_stars_repo_name": "siddharthumakarthikeyan/Cable-Driven-Parallel-Robots-CDPR-Modelling", "max_stars_repo_head_hexsha": "4e8d991d55ae7da91b3c90773c679f3369a4dafa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-06-01T12:19:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T12:30:09.000Z", "max_issues_repo_path": "rqt_cdpr/build/lib.linux-x86_64-2.7/rqt_cdpr/saveparam.py", "max_issues_repo_name": "siddharthumakarthikeyan/Cable-Driven-Parallel-Robots-CDPR-Modelling", "max_issues_repo_head_hexsha": "4e8d991d55ae7da91b3c90773c679f3369a4dafa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-09-27T12:24:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-27T12:24:50.000Z", "max_forks_repo_path": "rqt_cdpr/build/lib.linux-x86_64-2.7/rqt_cdpr/saveparam.py", "max_forks_repo_name": "siddharthumakarthikeyan/Cable-Driven-Parallel-Robots-CDPR-Modelling", "max_forks_repo_head_hexsha": "4e8d991d55ae7da91b3c90773c679f3369a4dafa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-02T00:48:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-02T00:48:11.000Z", "avg_line_length": 38.0243902439, "max_line_length": 97, "alphanum_fraction": 0.6474663246, "include": true, "reason": "import numpy", "num_tokens": 1887}
[STATEMENT] lemma pow_mono_exp: assumes a: "a \<ge> (1 :: 'a :: ordered_semiring_1)" shows "n \<ge> m \<Longrightarrow> a ^ n \<ge> a ^ m" [PROOF STATE] proof (prove) goal (1 subgoal): 1. m \<le> n \<Longrightarrow> a ^ m \<le> a ^ n [PROOF STEP] proof (induct m arbitrary: n) [PROOF STATE] proof (state) goal (2 subgoals): 1. \<And>n. 0 \<le> n \<Longrightarrow> a ^ 0 \<le> a ^ n 2. \<And>m n. \<lbrakk>\<And>n. m \<le> n \<Longrightarrow> a ^ m \<le> a ^ n; Suc m \<le> n\<rbrakk> \<Longrightarrow> a ^ Suc m \<le> a ^ n [PROOF STEP] case 0 [PROOF STATE] proof (state) this: 0 \<le> n goal (2 subgoals): 1. \<And>n. 0 \<le> n \<Longrightarrow> a ^ 0 \<le> a ^ n 2. \<And>m n. \<lbrakk>\<And>n. m \<le> n \<Longrightarrow> a ^ m \<le> a ^ n; Suc m \<le> n\<rbrakk> \<Longrightarrow> a ^ Suc m \<le> a ^ n [PROOF STEP] show ?case [PROOF STATE] proof (prove) goal (1 subgoal): 1. a ^ 0 \<le> a ^ n [PROOF STEP] using pow_mono_one[OF a] [PROOF STATE] proof (prove) using this: (1::'a) \<le> a ^ ?n goal (1 subgoal): 1. a ^ 0 \<le> a ^ n [PROOF STEP] by auto [PROOF STATE] proof (state) this: a ^ 0 \<le> a ^ n goal (1 subgoal): 1. \<And>m n. \<lbrakk>\<And>n. m \<le> n \<Longrightarrow> a ^ m \<le> a ^ n; Suc m \<le> n\<rbrakk> \<Longrightarrow> a ^ Suc m \<le> a ^ n [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>m n. \<lbrakk>\<And>n. m \<le> n \<Longrightarrow> a ^ m \<le> a ^ n; Suc m \<le> n\<rbrakk> \<Longrightarrow> a ^ Suc m \<le> a ^ n [PROOF STEP] case (Suc m nn) [PROOF STATE] proof (state) this: m \<le> ?n \<Longrightarrow> a ^ m \<le> a ^ ?n Suc m \<le> nn goal (1 subgoal): 1. \<And>m n. \<lbrakk>\<And>n. m \<le> n \<Longrightarrow> a ^ m \<le> a ^ n; Suc m \<le> n\<rbrakk> \<Longrightarrow> a ^ Suc m \<le> a ^ n [PROOF STEP] then [PROOF STATE] proof (chain) picking this: m \<le> ?n \<Longrightarrow> a ^ m \<le> a ^ ?n Suc m \<le> nn [PROOF STEP] obtain n where nn: "nn = Suc n" [PROOF STATE] proof (prove) using this: m \<le> ?n \<Longrightarrow> a ^ m \<le> a ^ ?n Suc m \<le> nn goal (1 subgoal): 1. (\<And>n. nn = Suc n \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by (cases nn, auto) [PROOF STATE] proof (state) this: nn = Suc n goal (1 subgoal): 1. \<And>m n. \<lbrakk>\<And>n. m \<le> n \<Longrightarrow> a ^ m \<le> a ^ n; Suc m \<le> n\<rbrakk> \<Longrightarrow> a ^ Suc m \<le> a ^ n [PROOF STEP] note Suc = Suc[unfolded nn] [PROOF STATE] proof (state) this: m \<le> ?n \<Longrightarrow> a ^ m \<le> a ^ ?n Suc m \<le> Suc n goal (1 subgoal): 1. \<And>m n. \<lbrakk>\<And>n. m \<le> n \<Longrightarrow> a ^ m \<le> a ^ n; Suc m \<le> n\<rbrakk> \<Longrightarrow> a ^ Suc m \<le> a ^ n [PROOF STEP] hence rec: "a ^ n \<ge> a ^ m" [PROOF STATE] proof (prove) using this: m \<le> ?n \<Longrightarrow> a ^ m \<le> a ^ ?n Suc m \<le> Suc n goal (1 subgoal): 1. a ^ m \<le> a ^ n [PROOF STEP] by auto [PROOF STATE] proof (state) this: a ^ m \<le> a ^ n goal (1 subgoal): 1. \<And>m n. \<lbrakk>\<And>n. m \<le> n \<Longrightarrow> a ^ m \<le> a ^ n; Suc m \<le> n\<rbrakk> \<Longrightarrow> a ^ Suc m \<le> a ^ n [PROOF STEP] show ?case [PROOF STATE] proof (prove) goal (1 subgoal): 1. a ^ Suc m \<le> a ^ nn [PROOF STEP] unfolding nn power_Suc [PROOF STATE] proof (prove) goal (1 subgoal): 1. a * a ^ m \<le> a * a ^ n [PROOF STEP] by (rule times_right_mono[OF ge_trans[OF a one_ge_zero] rec]) [PROOF STATE] proof (state) this: a ^ Suc m \<le> a ^ nn goal: No subgoals! [PROOF STEP] qed
{"llama_tokens": 1601, "file": "Abstract-Rewriting_SN_Orders", "length": 17}
module Tests using SparseRegressionAlgorithms S = SparseRegressionAlgorithms using Base.Test macro display(ex) :(display($ex)) end @testset "sweep" begin n, p = 1000, 5 x = randn(n, p) y = x * collect(1:p) + randn(n) w = rand(n) @display S.sweepreg(x, y) end end
{"hexsha": "87ff215854cd8b63f76fd5e330c004efb8de1203", "size": 289, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "joshday/SparseRegressionAlgorithms", "max_stars_repo_head_hexsha": "86eda4625ac0273cc4dfa6283357ccf68ea99f27", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2016-08-14T12:26:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-22T17:24:46.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "joshday/SparseRegressionAlgorithms", "max_issues_repo_head_hexsha": "86eda4625ac0273cc4dfa6283357ccf68ea99f27", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "joshday/SparseRegressionAlgorithms", "max_forks_repo_head_hexsha": "86eda4625ac0273cc4dfa6283357ccf68ea99f27", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:05:58.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:05:58.000Z", "avg_line_length": 16.0555555556, "max_line_length": 35, "alphanum_fraction": 0.6470588235, "num_tokens": 97}
from typing import Union import numpy as np import tensorflow.keras as keras from numpy import ndarray from pandas import DataFrame from .model_helpers import make_tensorboard_callback, make_save_path from ..utils import naming class SimpleModel: def __init__(self, directory_name: str, n_input: int, n_output: int): self.directory_name = directory_name self.model = keras.models.Sequential() self.model.add(keras.layers.Dense(400, input_dim=n_input)) self.model.add(keras.layers.Activation('relu')) self.model.add(keras.layers.Dense(800)) self.model.add(keras.layers.Activation('relu')) self.model.add(keras.layers.Dense(400)) self.model.add(keras.layers.Activation('relu')) self.model.add(keras.layers.Dense(300)) self.model.add(keras.layers.Activation('relu')) self.model.add(keras.layers.Dense(200)) self.model.add(keras.layers.Activation('relu')) self.model.add(keras.layers.Dense(100)) self.model.add(keras.layers.Activation('relu')) self.model.add(keras.layers.Dense(n_output)) self.model.add(keras.layers.Activation('relu')) self.model.summary() adam_optimizer = keras.optimizers.Adam(lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=1e-8, decay=0, amsgrad=False) self.model.compile(loss='mean_squared_error', optimizer=adam_optimizer) def train(self, input_train: ndarray, output_train: Union[DataFrame, ndarray], input_val: ndarray, output_val: Union[DataFrame, ndarray], epochs=60): *callbacks, time_stamp = self._setup_training() output_train = np.array(output_train) output_val = np.array(output_val) hist = self.model.fit(input_train, output_train, validation_data=(input_val, output_val), epochs=epochs, batch_size=256, verbose=1, callbacks=callbacks) return hist, time_stamp def train_with_generator(self, data_generator_train, data_generator_val, epochs=60): *callbacks, time_stamp = self._setup_training() hist = self.model.fit(data_generator_train, validation_data=data_generator_val, epochs=epochs, verbose=1, callbacks=callbacks) return hist, time_stamp def _setup_training(self): time_stamp = naming.make_timestamp() tb_callback = make_tensorboard_callback(self.directory_name, time_stamp) save_path = make_save_path(self.directory_name, time_stamp) checkpoint = keras.callbacks.ModelCheckpoint(filepath=save_path, monitor='val_loss', verbose=1, save_best_only=True) lr_reduction = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5) return tb_callback, checkpoint, lr_reduction, time_stamp
{"hexsha": "462aecb8a5ed8a09ae34a53857540485a0ce9cc8", "size": 2900, "ext": "py", "lang": "Python", "max_stars_repo_path": "mlreflect/models/simple_model.py", "max_stars_repo_name": "schreiber-lab/mlreflect", "max_stars_repo_head_hexsha": "88a80ccac48461cc8934a46041726b70e469c6b8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mlreflect/models/simple_model.py", "max_issues_repo_name": "schreiber-lab/mlreflect", "max_issues_repo_head_hexsha": "88a80ccac48461cc8934a46041726b70e469c6b8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mlreflect/models/simple_model.py", "max_forks_repo_name": "schreiber-lab/mlreflect", "max_forks_repo_head_hexsha": "88a80ccac48461cc8934a46041726b70e469c6b8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.1891891892, "max_line_length": 113, "alphanum_fraction": 0.675862069, "include": true, "reason": "import numpy,from numpy", "num_tokens": 612}
using Juxta using Test ja = JuxtArray(randn(5,10), ["x","y"], Dict("x"=>collect(1:5),"y"=>collect(1:10))) ja1 = JuxtArray(randn(5,10), ["x","y"], Dict("x"=>collect(1:5),"y"=>collect(1:10) .* 2)) ja2 = JuxtArray(randn(5,10), ["x","y"], Dict("x"=>collect(1:5),"y"=>collect(1:10) .* 2)) ja3 = JuxtArray(randn(5,10), ["x","y"], Dict("x"=>collect(1:5),"y"=>collect(1:10) .* 2)) ja4 = JuxtArray(randn(5,10,1), ["x","y","za"], Dict("x"=>collect(1:5),"y"=>collect(1:10) .* 2,"za"=>[2])) ja5 = JuxtArray(randn(5,10,1), ["x","y","za"], Dict("x"=>collect(1:5),"y"=>collect(1:10) .* 2,"za"=>[2])) @testset "juxta.jl" begin @test typeof(ja) == JuxtArray @test ja.indices["x"] == 1:5 @test ja.indices["y"] == 1:10 @test ja1.indices["x"] == 1:5 @test ja1.indices["y"] == 1:10 @test isel!(ja, x=2:4).indices["x"] == 1:3 @test isel!(ja, y=2).indices["y"] == 1:1 @test ja.coords["y"][1] == 2 @test size(ja.array,1) == 3 @test isel!(ja1, x=2:4, y=3:2:8).indices["x"] == 1:3 @test ja1.indices["y"] == 1:3 @test ja1.coords["y"] == [3,5,7] .* 2 @test ja1.coords["x"] == collect(2:4) @test sel!(ja2, x=1.1:0.2:4.1).indices["x"] == 1:3 @test ja2.coords["x"] == collect(2:4) @test sel!(ja3, "nearest", x=1.1:0.2:4.1).indices["x"] == 1:4 @test ja3.coords["x"] == collect(1:4) @test sel!(ja3, "nearest", x=0.8).indices["x"] == 1:1 @test ja3.coords["x"] == Number[1] @test sel!(ja3, y=10.3).indices["y"] == 1:2 @test ja3.coords["y"] == Number[10,12] @test size(ja3) == (1,2) @test size(ja3, "x") == 1 @test size(ja3, "y") == 2 @test size(dropdims(ja3, ["x"])) == (2,) @test ja3.dims == ["y"] @test (ja4 |> j->isel!(j,x=2) |> j->dropdims(j,["x","za"]) |> j->size(j)) == (10,) @test ja4.dims == ["y"] @test typeof(show(ja4)) == Nothing @test ja5(x=1.1:0.2:4.1).indices["x"] == 1:3 end
{"hexsha": "f41fbb092ef0e0208583e8485f4379cc84462a16", "size": 2035, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "suyashbire1/Juxta.jl", "max_stars_repo_head_hexsha": "be8ce1f5024afc10535ac0ca95a1faaff0d309a0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "suyashbire1/Juxta.jl", "max_issues_repo_head_hexsha": "be8ce1f5024afc10535ac0ca95a1faaff0d309a0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "suyashbire1/Juxta.jl", "max_forks_repo_head_hexsha": "be8ce1f5024afc10535ac0ca95a1faaff0d309a0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9019607843, "max_line_length": 79, "alphanum_fraction": 0.4742014742, "num_tokens": 855}
#!/usr/bin/python2 """Downloads forecasted streamflow for rivers of interest. This script downloads forecasted streamflow for rivers of interest. Run as a scheduled task or cron job to keep your database current. """ import gzip import json import os import tempfile from urllib import urlretrieve from netCDF4 import Dataset import numpy as np from pynwm.noaa import noaa_latest from pynwm.nwm_data import get_schema from pynwm.nwm_subset import combine_files def _unzip(filename): """If zip file, unzips and deletes the zip.""" if filename[-3:] == '.gz': tmpdir = tempfile.gettempdir() nc_file = os.path.join(tmpdir, os.path.basename(filename[:-3])) with gzip.open(filename, 'rb') as z, open(nc_file, 'wb') as uz: uz.write(z.read()) os.remove(filename) return nc_file else: return filename def _add_max_q(nc_filename): """Adds a variable storing maximum streamflow for each river.""" with Dataset(nc_filename, 'a') as nc: schema = get_schema(nc) streamflow_var = nc.variables['streamflow'] max_q = np.amax(streamflow_var[:], axis=0) var = nc.createVariable('max_streamflow', 'i', (schema['id_dim'],)) var.long_name = 'Maximum River Flow' var.units = streamflow_var.units var.scale_factor = 0.01 var.add_offset = 0.0 var[:] = max_q def main(): # Config tells us what we want and where to put it with open('config.json') as f: cfg = json.load(f) with open(cfg['river_id_file']) as f: ids = [x.strip() for x in f.readlines()] output_folder = cfg['output_folder'] product = cfg['product'] existing_files = [f for f in os.listdir(output_folder) if f.endswith('.nc')] current_files = [] tmpdir = tempfile.gettempdir() # Get the latest simulation. 'long_range' may have more than one. sims = noaa_latest.find_latest_simulation(product) for key, sim in sims.iteritems(): filename = key + '.nc' if filename in existing_files: print(filename + ' is current.') current_files.append(filename) else: # We don't have it yet. Download, subset and merge files. print('\nRetrieving files to build ' + filename) dl_files = [] for index, nwm_file in enumerate(sim['files']): dl_file = os.path.join(tmpdir, nwm_file) print('Downloading ' + nwm_file) urlretrieve(sim['links'][index], dl_file) nc_file = _unzip(dl_file) dl_files.append(nc_file) print('Building cube') filepath = os.path.join(output_folder, filename) combine_files(dl_files, filepath, ids) # Max streamflow can be useful to quickly identify floods _add_max_q(filepath) current_files.append(filepath) # With our subset, now we don't need the downloaded files for nc_file in dl_files: os.remove(nc_file) if current_files: # Delete obsolete files. You may want to archive or manage differently. old_files = [x for x in existing_files if x not in current_files] for old_file in old_files: filename = os.path.join(output_folder, old_file) if os.path.isfile(filename): os.remove(filename) print('\nFinished') if __name__ == '__main__': main()
{"hexsha": "e7aba33e46468ee9798f2f1b8d5ea9981dff0b09", "size": 3500, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/get_latest_forecast/get_latest_forecast.py", "max_stars_repo_name": "tyjmadsen/pynwm_v1", "max_stars_repo_head_hexsha": "3e2e3b8f65ca3c0a21dbe57e3cd00e054957b794", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2016-09-01T17:00:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-02T12:36:35.000Z", "max_issues_repo_path": "examples/get_latest_forecast/get_latest_forecast.py", "max_issues_repo_name": "tyjmadsen/pynwm_v1", "max_issues_repo_head_hexsha": "3e2e3b8f65ca3c0a21dbe57e3cd00e054957b794", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2016-06-23T15:21:55.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-17T21:42:44.000Z", "max_forks_repo_path": "examples/get_latest_forecast/get_latest_forecast.py", "max_forks_repo_name": "tyjmadsen/pynwm_v1", "max_forks_repo_head_hexsha": "3e2e3b8f65ca3c0a21dbe57e3cd00e054957b794", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2016-07-04T15:12:36.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-18T16:20:24.000Z", "avg_line_length": 34.3137254902, "max_line_length": 79, "alphanum_fraction": 0.6234285714, "include": true, "reason": "import numpy", "num_tokens": 791}
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for dm_pix._src.augment.""" import functools from absl.testing import absltest from absl.testing import parameterized from dm_pix._src import augment import jax import numpy as np import tensorflow as tf _IMG_SHAPE = (131, 111, 3) _RAND_FLOATS_IN_RANGE = list( np.random.uniform(0., 1., size=(10,) + _IMG_SHAPE).astype(np.float32)) _RAND_FLOATS_OUT_OF_RANGE = list( np.random.uniform(-0.5, 1.5, size=(10,) + _IMG_SHAPE).astype(np.float32)) _KERNEL_SIZE = _IMG_SHAPE[0] / 10. class _ImageAugmentationTest(parameterized.TestCase): """Runs tests for the various augments with the correct arguments.""" def _test_fn_with_random_arg(self, images_list, jax_fn, tf_fn, **kw_range): pass def _test_fn(self, images_list, jax_fn, tf_fn): pass def assertAllCloseTolerant(self, x, y): # Increase tolerance on TPU due to lower precision. tol = 1e-2 if jax.local_devices()[0].platform == "tpu" else 1e-4 np.testing.assert_allclose(x, y, rtol=tol, atol=tol) self.assertEqual(x.dtype, y.dtype) @parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE), ("out_of_range", _RAND_FLOATS_OUT_OF_RANGE)) def test_adjust_brightness(self, images_list): self._test_fn_with_random_arg( images_list, jax_fn=augment.adjust_brightness, tf_fn=tf.image.adjust_brightness, delta=(-0.5, 0.5)) key = jax.random.PRNGKey(0) self._test_fn_with_random_arg( images_list, jax_fn=functools.partial(augment.random_brightness, key), tf_fn=None, max_delta=(0, 0.5)) @parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE), ("out_of_range", _RAND_FLOATS_OUT_OF_RANGE)) def test_adjust_contrast(self, images_list): self._test_fn_with_random_arg( images_list, jax_fn=augment.adjust_contrast, tf_fn=tf.image.adjust_contrast, factor=(0.5, 1.5)) key = jax.random.PRNGKey(0) self._test_fn_with_random_arg( images_list, jax_fn=functools.partial(augment.random_contrast, key, upper=1), tf_fn=None, lower=(0, 0.9)) # Doesn't make sense outside of [0, 1]. @parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE)) def test_adjust_gamma(self, images_list): self._test_fn_with_random_arg( images_list, jax_fn=augment.adjust_gamma, tf_fn=tf.image.adjust_gamma, gamma=(0.5, 1.5)) @parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE), ("out_of_range", _RAND_FLOATS_OUT_OF_RANGE)) def test_adjust_saturation(self, images_list): # tf.image.adjust_saturation has a buggy implementation when the green and # blue channels have very close values that don't match the red channel. # This is due to a rounding error in http://shortn/_ETSJsEwUj5 # if (g - b) < 0 but small enough that (hh + 1) == 1. # Eg: tf.image.adjust_saturation([[[0.75, 0.0369078, 0.0369079]]], 1.0) # -> [[[0.03690779, 0.03690779, 0.03690779]]] # Perturb the inputs slightly so that this doesn't happen. def perturb(rgb): rgb_new = np.copy(rgb) rgb_new[..., 1] += 0.001 * (np.abs(rgb[..., 2] - rgb[..., 1]) < 1e-3) return rgb_new images_list = list(map(perturb, images_list)) self._test_fn_with_random_arg( images_list, jax_fn=augment.adjust_saturation, tf_fn=tf.image.adjust_saturation, factor=(0.5, 1.5)) key = jax.random.PRNGKey(0) self._test_fn_with_random_arg( images_list, jax_fn=functools.partial(augment.random_saturation, key, upper=1), tf_fn=None, lower=(0, 0.9)) # CPU TF uses a different hue adjustment method outside of the [0, 1] range. # Disable out-of-range tests. @parameterized.named_parameters( ("in_range", _RAND_FLOATS_IN_RANGE),) def test_adjust_hue(self, images_list): self._test_fn_with_random_arg( images_list, jax_fn=augment.adjust_hue, tf_fn=tf.image.adjust_hue, delta=(-0.5, 0.5)) key = jax.random.PRNGKey(0) self._test_fn_with_random_arg( images_list, jax_fn=functools.partial(augment.random_hue, key), tf_fn=None, max_delta=(0, 0.5)) @parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE), ("out_of_range", _RAND_FLOATS_OUT_OF_RANGE)) def test_rot90(self, images_list): self._test_fn( images_list, jax_fn=lambda img: augment.rot90(img, k=1), tf_fn=lambda img: tf.image.rot90(img, k=1)) self._test_fn( images_list, jax_fn=lambda img: augment.rot90(img, k=2), tf_fn=lambda img: tf.image.rot90(img, k=2)) self._test_fn( images_list, jax_fn=lambda img: augment.rot90(img, k=3), tf_fn=lambda img: tf.image.rot90(img, k=3)) # The functions below don't have a TF equivalent to compare to, we just check # that they run. @parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE), ("out_of_range", _RAND_FLOATS_OUT_OF_RANGE)) def test_flip(self, images_list): self._test_fn( images_list, jax_fn=augment.flip_left_right, tf_fn=tf.image.flip_left_right) self._test_fn( images_list, jax_fn=augment.flip_up_down, tf_fn=tf.image.flip_up_down) key = jax.random.PRNGKey(0) self._test_fn( images_list, jax_fn=functools.partial(augment.random_flip_left_right, key), tf_fn=None) self._test_fn( images_list, jax_fn=functools.partial(augment.random_flip_up_down, key), tf_fn=None) @parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE), ("out_of_range", _RAND_FLOATS_OUT_OF_RANGE)) def test_solarize(self, images_list): self._test_fn_with_random_arg( images_list, jax_fn=augment.solarize, tf_fn=None, threshold=(0., 1.)) @parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE), ("out_of_range", _RAND_FLOATS_OUT_OF_RANGE)) def test_gaussian_blur(self, images_list): blur_fn = functools.partial(augment.gaussian_blur, kernel_size=_KERNEL_SIZE) self._test_fn_with_random_arg( images_list, jax_fn=blur_fn, tf_fn=None, sigma=(0.1, 2.0)) @parameterized.named_parameters(("in_range", _RAND_FLOATS_IN_RANGE), ("out_of_range", _RAND_FLOATS_OUT_OF_RANGE)) def test_random_crop(self, images_list): key = jax.random.PRNGKey(43) crop_fn = lambda img: augment.random_crop(key, img, (100, 100, 3)) self._test_fn(images_list, jax_fn=crop_fn, tf_fn=None) class TestMatchTensorflow(_ImageAugmentationTest): def _test_fn_with_random_arg(self, images_list, jax_fn, tf_fn, **kw_range): if tf_fn is None: return assert len(kw_range) == 1 kw_name, (random_min, random_max) = list(kw_range.items())[0] for image_rgb in images_list: argument = np.random.uniform(random_min, random_max, size=()) adjusted_jax = jax_fn(image_rgb, **{kw_name: argument}) adjusted_tf = tf_fn(image_rgb, argument).numpy() self.assertAllCloseTolerant(adjusted_jax, adjusted_tf) def _test_fn(self, images_list, jax_fn, tf_fn): if tf_fn is None: return for image_rgb in images_list: adjusted_jax = jax_fn(image_rgb) adjusted_tf = tf_fn(image_rgb).numpy() self.assertAllCloseTolerant(adjusted_jax, adjusted_tf) class TestVmap(_ImageAugmentationTest): def _test_fn_with_random_arg(self, images_list, jax_fn, tf_fn, **kw_range): del tf_fn # unused. assert len(kw_range) == 1 kw_name, (random_min, random_max) = list(kw_range.items())[0] arguments = [ np.random.uniform(random_min, random_max, size=()) for _ in images_list ] fn_vmap = jax.vmap(jax_fn) outputs_vmaped = list( fn_vmap(np.stack(images_list, axis=0), np.stack(arguments, axis=0))) assert len(images_list) == len(outputs_vmaped) assert len(images_list) == len(arguments) for image_rgb, argument, adjusted_vmap in zip(images_list, arguments, outputs_vmaped): adjusted_jax = jax_fn(image_rgb, **{kw_name: argument}) self.assertAllCloseTolerant(adjusted_jax, adjusted_vmap) def _test_fn(self, images_list, jax_fn, tf_fn): del tf_fn # unused. fn_vmap = jax.vmap(jax_fn) outputs_vmaped = list(fn_vmap(np.stack(images_list, axis=0))) assert len(images_list) == len(outputs_vmaped) for image_rgb, adjusted_vmap in zip(images_list, outputs_vmaped): adjusted_jax = jax_fn(image_rgb) self.assertAllCloseTolerant(adjusted_jax, adjusted_vmap) class TestJit(_ImageAugmentationTest): def _test_fn_with_random_arg(self, images_list, jax_fn, tf_fn, **kw_range): del tf_fn # unused. assert len(kw_range) == 1 kw_name, (random_min, random_max) = list(kw_range.items())[0] jax_fn_jitted = jax.jit(jax_fn) for image_rgb in images_list: argument = np.random.uniform(random_min, random_max, size=()) adjusted_jax = jax_fn(image_rgb, argument) adjusted_jit = jax_fn_jitted(image_rgb, **{kw_name: argument}) self.assertAllCloseTolerant(adjusted_jax, adjusted_jit) def _test_fn(self, images_list, jax_fn, tf_fn): del tf_fn # unused. jax_fn_jitted = jax.jit(jax_fn) for image_rgb in images_list: adjusted_jax = jax_fn(image_rgb) adjusted_jit = jax_fn_jitted(image_rgb) self.assertAllCloseTolerant(adjusted_jax, adjusted_jit) if __name__ == "__main__": absltest.main()
{"hexsha": "866586365dfc27be9563799388e577ccf2078bcb", "size": 10330, "ext": "py", "lang": "Python", "max_stars_repo_path": "dm_pix/_src/augment_test.py", "max_stars_repo_name": "SupreethRao99/dm_pix", "max_stars_repo_head_hexsha": "6accc9643b1ebfc2e6a036629cec042bc6f5325d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dm_pix/_src/augment_test.py", "max_issues_repo_name": "SupreethRao99/dm_pix", "max_issues_repo_head_hexsha": "6accc9643b1ebfc2e6a036629cec042bc6f5325d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dm_pix/_src/augment_test.py", "max_forks_repo_name": "SupreethRao99/dm_pix", "max_forks_repo_head_hexsha": "6accc9643b1ebfc2e6a036629cec042bc6f5325d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.8345864662, "max_line_length": 80, "alphanum_fraction": 0.6836398838, "include": true, "reason": "import numpy,import jax", "num_tokens": 2802}
theory Dioid imports Lattice begin (* +------------------------------------------------------------------------+ *) section {* Dioids *} (* +------------------------------------------------------------------------+ *) record 'a dioid = "'a partial_object" + plus :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "+\<index>" 70) mult :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "\<cdot>\<index>" 80) one :: "'a" ("1\<index>") zero :: "'a" ("0\<index>") locale dioid = fixes A (structure) assumes add_closed: "\<lbrakk>(x::'a) \<in> carrier A; y \<in> carrier A\<rbrakk> \<Longrightarrow> x + y \<in> carrier A" and mult_closed: "\<lbrakk>x \<in> carrier A; y \<in> carrier A\<rbrakk> \<Longrightarrow> x\<cdot>y \<in> carrier A" and one_closed: "(1::'a) \<in> carrier A" and zero_closed: "(0::'a) \<in> carrier A" and mult_assoc: "\<lbrakk>x \<in> carrier A; y \<in> carrier A; z \<in> carrier A\<rbrakk> \<Longrightarrow> (x\<cdot>y)\<cdot>z = x\<cdot>(y\<cdot>z)" and add_assoc: "\<lbrakk>(x::'a) \<in> carrier A; y \<in> carrier A; z \<in> carrier A\<rbrakk> \<Longrightarrow> (x + y) + z = x + (y + z)" and add_comm: "\<lbrakk>(x::'a) \<in> carrier A; y \<in> carrier A\<rbrakk> \<Longrightarrow> x + y = y + x" and add_idem: "(x::'a) \<in> carrier A \<Longrightarrow> x + x = x" and distl: "\<lbrakk>(x::'a) \<in> carrier A; y \<in> carrier A; z \<in> carrier A\<rbrakk> \<Longrightarrow> x\<cdot>(y + z) = x\<cdot>y + x\<cdot>z" and distr: "\<lbrakk>(x::'a) \<in> carrier A; y \<in> carrier A; z \<in> carrier A\<rbrakk> \<Longrightarrow> (x + y)\<cdot>z = x\<cdot>z + y\<cdot>z" and mult_onel: "(x::'a) \<in> carrier A \<Longrightarrow> 1\<cdot>x = x" and mult_oner: "(x::'a) \<in> carrier A \<Longrightarrow> x\<cdot>1 = x" and add_zerol: "(x::'a) \<in> carrier A \<Longrightarrow> 0 + x = x" and mult_zerol: "(x::'a) \<in> carrier A \<Longrightarrow> 0\<cdot>x = 0" and mult_zeror: "(x::'a) \<in> carrier A \<Longrightarrow> x\<cdot>0 = 0" begin definition nat_order :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infixl "\<sqsubseteq>" 50) where "x \<sqsubseteq> y \<equiv> x + y = y" abbreviation natural :: "'a ord" where "natural \<equiv> \<lparr>carrier = carrier A, le = nat_order\<rparr>" lemma natural: "order natural" by (default, auto simp add: nat_order_def, (metis add_idem add_assoc add_comm)+) end sublocale dioid \<subseteq> order "\<lparr>carrier = carrier A, le = nat_order\<rparr>" using natural . sublocale dioid \<subseteq> join_semilattice "\<lparr>carrier = carrier A, le = nat_order\<rparr>" apply (default, auto simp add: order.is_lub_simp[OF natural]) by (smt add_assoc add_closed add_comm add_idem nat_order_def) context dioid begin lemma add_zeror: "x \<in> carrier A \<Longrightarrow> x + 0 = x" by (metis add_comm add_zerol zero_closed) lemma add_lub: "\<lbrakk>x \<in> carrier A; y \<in> carrier A; z \<in> carrier A\<rbrakk> \<Longrightarrow> x \<sqsubseteq> z \<and> y \<sqsubseteq> z \<longleftrightarrow> x + y \<sqsubseteq> z" by (metis add_assoc add_closed add_comm add_idem nat_order_def) lemma add_iso: "\<lbrakk>x \<in> carrier A; y \<in> carrier A; z \<in> carrier A\<rbrakk> \<Longrightarrow> x \<sqsubseteq> y \<longrightarrow> z + x \<sqsubseteq> z + y" by (metis add_assoc add_closed add_comm add_idem nat_order_def) lemma mult_isol: "\<lbrakk>x \<in> carrier A; y \<in> carrier A; z \<in> carrier A\<rbrakk> \<Longrightarrow> x \<sqsubseteq> y \<longrightarrow> z\<cdot>x \<sqsubseteq> z\<cdot>y" by (metis distl nat_order_def) lemma mult_isor: "\<lbrakk>x \<in> carrier A; y \<in> carrier A; z \<in> carrier A\<rbrakk> \<Longrightarrow> x \<sqsubseteq> y \<longrightarrow> x\<cdot>z \<sqsubseteq> y\<cdot>z" by (metis distr nat_order_def) lemma mult_double_iso: "\<lbrakk>w \<in> carrier A; x \<in> carrier A; y \<in> carrier A; z \<in> carrier A\<rbrakk> \<Longrightarrow> w \<sqsubseteq> x \<and> y \<sqsubseteq> z \<longrightarrow> w\<cdot>y \<sqsubseteq> x\<cdot>z" by (smt add_assoc distl distr add_idem nat_order_def mult_closed) lemma subdistl: "\<lbrakk>x \<in> carrier A; y \<in> carrier A; z \<in> carrier A\<rbrakk> \<Longrightarrow> z\<cdot>x \<sqsubseteq> z\<cdot>(x + y)" by (metis add_assoc add_closed add_idem nat_order_def mult_double_iso) lemma zero_min: "x \<in> carrier A \<Longrightarrow> 0 \<sqsubseteq> x" by (metis (lifting) add_zerol nat_order_def) lemma no_trivial_inverse: "\<forall>x\<in>carrier A.(x \<noteq> 0 \<longrightarrow> \<not>(\<exists>y\<in>carrier A.(x+y = 0)))" by (metis add_lub add_zeror nat_order_def zero_min) lemmas nat_antisym = order_antisym[simplified] and nat_refl = order_refl[simplified] and nat_trans = order_trans[simplified] end (* +------------------------------------------------------------------------+ *) subsection {* Free Dioid *} (* +------------------------------------------------------------------------+ *) datatype 'a dioid_term = DioidAtom 'a | DioidPlus "'a dioid_term" "'a dioid_term" | DioidMult "'a dioid_term" "'a dioid_term" | DioidOne | DioidZero primrec (in dioid) term_unfold :: "'a dioid_term \<Rightarrow> 'a" where "term_unfold (DioidAtom x) = x" | "term_unfold (DioidPlus x y) = (term_unfold x) + (term_unfold y)" | "term_unfold (DioidMult x y) = (term_unfold x) \<cdot> (term_unfold y)" | "term_unfold DioidOne = 1" | "term_unfold DioidZero = 0" lemma (in dioid) term_fold_atom: "x \<in> carrier A \<Longrightarrow> x = term_unfold (DioidAtom x)" by (rule term_unfold.simps(1)[symmetric]) primrec (in dioid) term_atoms :: "'a dioid_term \<Rightarrow> 'a set" where "term_atoms (DioidAtom x) = {x}" | "term_atoms (DioidPlus x y) = (term_atoms x) \<union> (term_atoms y)" | "term_atoms (DioidMult x y) = (term_atoms x) \<union> (term_atoms y)" | "term_atoms DioidOne = {}" | "term_atoms DioidZero = {}" lemma (in dioid) term_closure: "term_atoms x \<subseteq> carrier A \<Longrightarrow> term_unfold x \<in> carrier A" by (induct x, simp_all add: add_closed mult_closed one_closed zero_closed) primrec dioid_term_map :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a dioid_term \<Rightarrow> 'b dioid_term" where "dioid_term_map f (DioidAtom x) = DioidAtom (f x)" | "dioid_term_map f (DioidPlus x y) = DioidPlus (dioid_term_map f x) (dioid_term_map f y)" | "dioid_term_map f (DioidMult x y) = DioidMult (dioid_term_map f x) (dioid_term_map f y)" | "dioid_term_map f DioidOne = DioidOne" | "dioid_term_map f DioidZero = DioidZero" inductive dioid_con :: "'a dioid_term \<Rightarrow> 'a dioid_term \<Rightarrow> bool" where refl [intro]: "dioid_con x x" | sym [sym]: "dioid_con x y \<Longrightarrow> dioid_con y x" | trans [trans]: "dioid_con x y \<Longrightarrow> dioid_con y z \<Longrightarrow> dioid_con x z" | add_lift: "dioid_con x1 x2 \<Longrightarrow> dioid_con y1 y2 \<Longrightarrow> dioid_con (DioidPlus x1 y1) (DioidPlus x2 y2)" | mult_lift: "dioid_con x1 x2 \<Longrightarrow> dioid_con y1 y2 \<Longrightarrow> dioid_con (DioidMult x1 y1) (DioidMult x2 y2)" | mult_assoc: "dioid_con (DioidMult (DioidMult x y) z) (DioidMult x (DioidMult y z))" | add_assoc: "dioid_con (DioidPlus (DioidPlus x y) z) (DioidPlus x (DioidPlus y z))" | add_comm: "dioid_con (DioidPlus x y) (DioidPlus y x)" | add_idem: "dioid_con (DioidPlus x x) x" | distl: "dioid_con (DioidMult x (DioidPlus y z)) (DioidPlus (DioidMult x y) (DioidMult x z))" | distr: "dioid_con (DioidMult (DioidPlus x y) z) (DioidPlus (DioidMult x z) (DioidMult y z))" | mult_onel: "dioid_con (DioidMult DioidOne x) x" | mult_oner: "dioid_con (DioidMult x DioidOne) x" | add_zerol: "dioid_con (DioidPlus DioidZero x) x" | mult_zerol: "dioid_con (DioidMult DioidZero x) DioidZero" | mult_zeror: "dioid_con (DioidMult x DioidZero) DioidZero" quotient_type 'a dioid_expr = "'a dioid_term" / dioid_con by (metis (lifting) dioid_con.refl dioid_con.sym dioid_con.trans equivpI reflpI sympI transpI) lift_definition term_mult :: "'a dioid_expr \<Rightarrow> 'a dioid_expr \<Rightarrow> 'a dioid_expr" is DioidMult by (rule mult_lift, assumption+) lift_definition term_plus :: "'a dioid_expr \<Rightarrow> 'a dioid_expr \<Rightarrow> 'a dioid_expr" is DioidPlus by (rule add_lift, assumption+) lift_definition term_one :: "'a dioid_expr" is DioidOne by (rule dioid_con.refl) lift_definition term_zero :: "'a dioid_expr" is DioidZero by (rule dioid_con.refl) definition free_dioid :: "'a dioid_expr dioid" where "free_dioid = \<lparr>carrier = UNIV, plus = term_plus, mult = term_mult, one = term_one, zero = term_zero\<rparr>" lemma "dioid free_dioid" proof (unfold_locales, simp_all add: free_dioid_def) fix x y z show "term_mult (term_mult x y) z = term_mult x (term_mult y z)" by (transfer, rule dioid_con.mult_assoc) show "term_plus (term_plus x y) z = term_plus x (term_plus y z)" by (transfer, rule dioid_con.add_assoc) show "term_plus x y = term_plus y x" by (transfer, rule dioid_con.add_comm) show "term_plus x x = x" by (transfer, rule dioid_con.add_idem) show "term_mult x (term_plus y z) = term_plus (term_mult x y) (term_mult x z)" by (transfer, rule dioid_con.distl) show "term_mult (term_plus x y) z = term_plus (term_mult x z) (term_mult y z)" by (transfer, rule dioid_con.distr) show "term_mult term_one x = x" by (transfer, rule dioid_con.mult_onel) show "term_mult x term_one = x" by (transfer, rule dioid_con.mult_oner) show "term_plus term_zero x = x" by (transfer, rule dioid_con.add_zerol) show "term_mult term_zero x = term_zero" by (transfer, rule dioid_con.mult_zerol) show "term_mult x term_zero = term_zero" by (transfer, rule dioid_con.mult_zeror) qed ML {* fun term_fold_tac fold_atom folds closure leaves = Subgoal.FOCUS (fn {context, prems, ...} => let val witnesses = Locale.get_witnesses context val subst_thm = hd (witnesses RL [fold_atom]) val subst_thms = prems RL [subst_thm] val folds = witnesses RL folds val to_leaves_thm = hd (witnesses RL [closure]) in DETERM (Method.insert_tac subst_thms 1 THEN REPEAT (etac @{thm ssubst} 1) THEN asm_full_simp_tac (HOL_basic_ss addsimps folds) 1) THEN (if leaves then rtac to_leaves_thm 1 else all_tac) end) val dioid_term_fold_tac = term_fold_tac @{thm dioid.term_fold_atom} @{thms dioid.term_unfold.simps[symmetric]} @{thm dioid.term_closure} *} method_setup dioid_closure = {* Scan.succeed (fn ctxt => let val witnesses = Locale.get_witnesses ctxt val unfolds = witnesses RL @{thms dioid.term_atoms.simps} in METHOD (fn _ => dioid_term_fold_tac true ctxt 1 THEN asm_full_simp_tac (@{simpset} addsimps unfolds) 1) end) *} lemma (in dioid) "\<lbrakk>y\<cdot>y \<in> carrier A; x \<in> carrier A\<rbrakk> \<Longrightarrow> x + x + y\<cdot>y \<in> carrier A" by dioid_closure end
{"author": "Alasdair", "repo": "Thesis", "sha": "8face4b62adfd73803b387e95c24f06e09736e30", "save_path": "github-repos/isabelle/Alasdair-Thesis", "path": "github-repos/isabelle/Alasdair-Thesis/Thesis-8face4b62adfd73803b387e95c24f06e09736e30/WorkingSKAT/Dioid.thy"}
//================================================================================================== /*! @file @copyright 2016 NumScale SAS @copyright 2016 J.T. Lapreste Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) */ //================================================================================================== #ifndef BOOST_SIMD_FUNCTION_SCALAR_IS_LESSGREATER_HPP_INCLUDED #define BOOST_SIMD_FUNCTION_SCALAR_IS_LESSGREATER_HPP_INCLUDED #include <boost/simd/function/definition/is_lessgreater.hpp> #include <boost/simd/arch/common/scalar/function/is_lessgreater.hpp> #endif
{"hexsha": "ba161fa28baa2e6da4af514f1d027d34491e2ff4", "size": 690, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/simd/function/scalar/is_lessgreater.hpp", "max_stars_repo_name": "yaeldarmon/boost.simd", "max_stars_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/boost/simd/function/scalar/is_lessgreater.hpp", "max_issues_repo_name": "yaeldarmon/boost.simd", "max_issues_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/boost/simd/function/scalar/is_lessgreater.hpp", "max_forks_repo_name": "yaeldarmon/boost.simd", "max_forks_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5, "max_line_length": 100, "alphanum_fraction": 0.5710144928, "num_tokens": 127}
[STATEMENT] lemma hd_if: "hd (if p then xs else ys) = (if p then hd xs else hd ys)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. hd (if p then xs else ys) = (if p then hd xs else hd ys) [PROOF STEP] by auto
{"llama_tokens": 96, "file": "Word_Lib_Many_More", "length": 1}
import os import math from collections import OrderedDict import numpy as np import torch import tensorflow as tf from utils import fill_layer, _gather_token_embedding, _get_encode_output_mapping_dict from transformer_pb2 import Transformer from transformers import BartForConditionalGeneration os.environ["CUDA_VISIBLE_DEVICES"] = "-1" """ For the mapping dictionary: key is the value of the proto parameter, value is a powerful expression, each && split tensor name of the matching path or expression. The sub-pattern of the path is separated by spaces, and the expression starts with a expression_. You can operate separately on each tensor and support multiple expressions. Multiple matching paths and the expression will finally be concatenated on axis = -1. """ enc_layer_mapping_dict = OrderedDict( { "multihead_norm_scale": "self_attn_layer_norm weight", "multihead_norm_bias": "self_attn_layer_norm bias", "multihead_project_kernel_qkv": "self_attn q_proj weight&&self_attn k_proj weight&&self_attn v_proj weight&&expression_.transpose(0, 1)", "multihead_project_bias_qkv": "self_attn q_proj bias&&self_attn k_proj bias&&self_attn v_proj bias", "multihead_project_kernel_output": "self_attn out_proj weight&&expression_.transpose(0, 1)", "multihead_project_bias_output": "self_attn out_proj bias", "ffn_norm_scale": "final_layer_norm weight", "ffn_norm_bias": "final_layer_norm bias", "ffn_first_kernel": "fc1 weight&&expression_.transpose(0, 1)", "ffn_first_bias": "fc1 bias", "ffn_second_kernel": "fc2 weight&&expression_.transpose(0, 1)", "ffn_second_bias": "fc2 bias", } ) dec_layer_mapping_dict = OrderedDict( { "self_norm_scale": "self_attn_layer_norm weight", "self_norm_bias": "self_attn_layer_norm bias", "self_project_kernel_qkv": "self_attn q_proj weight&&self_attn k_proj weight&&self_attn v_proj weight&&expression_.transpose(0, 1)", "self_project_bias_qkv": "self_attn q_proj bias&&self_attn k_proj bias&&self_attn v_proj bias", "self_project_kernel_output": "self_attn out_proj weight&&expression_.transpose(0, 1)", "self_project_bias_output": "self_attn out_proj bias", "encdec_norm_scale": "encoder_attn_layer_norm weight", "encdec_norm_bias": "encoder_attn_layer_norm bias", "encdec_project_kernel_q": "encoder_attn q_proj weight&&expression_.transpose(0, 1)", "encdec_project_bias_q": "encoder_attn q_proj bias", "encdec_project_kernel_output": "encoder_attn out_proj weight&&expression_.transpose(0, 1)", "encdec_project_bias_output": "encoder_attn out_proj bias", "ffn_norm_scale": "final_layer_norm weight", "ffn_norm_bias": "final_layer_norm bias", "ffn_first_kernel": "fc1 weight&&expression_.transpose(0, 1)", "ffn_first_bias": "fc1 bias", "ffn_second_kernel": "fc2 weight&&expression_.transpose(0, 1)", "ffn_second_bias": "fc2 bias", } ) src_emb_mapping_dict = OrderedDict( { "norm_scale": "layernorm_embedding weight", "norm_bias": "layernorm_embedding bias", } ) trg_emb_mapping_dict = OrderedDict( { "norm_scale": "layernorm_embedding weight", "norm_bias": "layernorm_embedding bias", "shared_bias": "final_logits_bias", } ) def extract_transformer_weights( output_file, model_dir, head_num, generation_method, max_step, extra_decode_length=50, beam_size=4, length_penalty: float = 0, topk=1, topp=0.75, lang="en", only_decoder=True, ): transformer = Transformer() # load var names reloaded = BartForConditionalGeneration.from_pretrained(model_dir).state_dict() encoder_state_dict = {} decoder_state_dict = {} for k in reloaded: if k.startswith("model.encoder."): encoder_state_dict[k] = reloaded[k] if k.startswith("model.decoder."): decoder_state_dict[k] = reloaded[k] if k == "model.shared.weight": encoder_state_dict[k] = reloaded[k] decoder_state_dict[k] = reloaded[k] if k == "final_logits_bias": decoder_state_dict[k] = reloaded[k] dec_var_name_list = list(decoder_state_dict.keys()) enc_var_name_list = list(encoder_state_dict.keys()) # fill each encoder layer's params if not only_decoder: enc_tensor_names = {} for name in enc_var_name_list: name_split = name.split(".") if len(name_split) <= 3 or not name_split[3].isdigit(): continue layer_id = int(name_split[3]) enc_tensor_names.setdefault(layer_id, []).append(name) for layer_id in sorted(enc_tensor_names.keys()): fill_layer( enc_tensor_names[layer_id], encoder_state_dict, transformer.encoder_stack.add(), enc_layer_mapping_dict, ) # fill each decoder layer's params dec_tensor_names = {} for name in dec_var_name_list: name_split = name.split(".") if len(name_split) <= 3 or not name.split(".")[3].isdigit(): continue layer_id = int(name.split(".")[3]) dec_tensor_names.setdefault(layer_id, []).append(name) for layer_id in sorted(dec_tensor_names.keys()): fill_layer( dec_tensor_names[layer_id], decoder_state_dict, transformer.decoder_stack.add(), dec_layer_mapping_dict, ) # fill src_embedding if not only_decoder: fill_layer( enc_var_name_list, encoder_state_dict, transformer.src_embedding, src_emb_mapping_dict, ) # bart position index starts from 2 # https://github.com/huggingface/transformers/blob/master/src/transformers/models/bart/configuration_bart.py#L208 # https://github.com/huggingface/transformers/blob/master/src/transformers/models/bart/modeling_bart.py#L821 pos_emb_list = ( encoder_state_dict["model.encoder.embed_positions.weight"] .numpy()[ 2 : 2 + max_step, : ] # because in huggingface bart, the position embedding starts from 2 .reshape([-1]) .tolist() ) transformer.src_embedding.position_embedding[:] = pos_emb_list print( "model.encoder.embed_positions.weight -> src_embedding.position_embedding, shape: {}, conversion finished!".format( encoder_state_dict["model.encoder.embed_positions.weight"] .numpy()[2 : 2 + max_step, :] .shape ) ) src_tb = _gather_token_embedding( enc_var_name_list, encoder_state_dict, "shared" ) transformer.src_embedding.token_embedding[:] = src_tb.flatten().tolist() # fill trg_embedding encode_output_mapping_dict = _get_encode_output_mapping_dict(len(dec_tensor_names)) trg_emb_mapping_dict.update(encode_output_mapping_dict) fill_layer( dec_var_name_list, decoder_state_dict, transformer.trg_embedding, trg_emb_mapping_dict, ) pos_emb_list = ( decoder_state_dict["model.decoder.embed_positions.weight"] .numpy()[2 : 2 + max_step, :] .reshape([-1]) .tolist() ) transformer.trg_embedding.position_embedding[:] = pos_emb_list print( "model.decoder.embed_positions.weight -> trg_embedding.position_embedding, shape: {}, conversion finished!".format( decoder_state_dict["model.decoder.embed_positions.weight"] .numpy()[:max_step, :] .shape ) ) # assert lang in LANG2ID trg_tb = _gather_token_embedding( dec_var_name_list, decoder_state_dict, "shared", lang=lang ) transformer.trg_embedding.token_embedding[:] = trg_tb.transpose().flatten().tolist() print( "token_embedding.weight -> trg_embedding.token_embedding, shape: {}, conversion finished!".format( trg_tb.transpose().shape ) ) # change encoder layer norm scale&bias position tmp_scale, tmp_bias = ( transformer.src_embedding.norm_scale, transformer.src_embedding.norm_bias, ) for i, encoder in enumerate(transformer.encoder_stack): print("***Fix encoder layer {} LayerNorm scale and bias***".format(i)) new_tmp_scale, new_tmp_bias = ( encoder.multihead_norm_scale[:], encoder.multihead_norm_bias[:], ) encoder.multihead_norm_scale[:], encoder.multihead_norm_bias[:] = ( tmp_scale, tmp_bias, ) print( "multihead_norm_scale: {} -> {}\nmultihead_norm_bias: {} -> {}".format( new_tmp_scale[:3], encoder.multihead_norm_scale[:3], new_tmp_bias[:3], encoder.multihead_norm_bias[:3], ) ) tmp_scale, tmp_bias = new_tmp_scale[:], new_tmp_bias[:] new_tmp_scale, new_tmp_bias = ( encoder.ffn_norm_scale[:], encoder.ffn_norm_bias[:], ) encoder.ffn_norm_scale[:], encoder.ffn_norm_bias[:] = ( tmp_scale, tmp_bias, ) print( "ffn_norm_scale: {} -> {}\nffn_norm_bias: {} -> {}".format( new_tmp_scale[:3], encoder.ffn_norm_scale[:3], new_tmp_bias[:3], encoder.ffn_norm_bias[:3], ) ) tmp_scale, tmp_bias = new_tmp_scale[:], new_tmp_bias[:] transformer.src_embedding.norm_scale[:], transformer.src_embedding.norm_bias[:] = ( tmp_scale, tmp_bias, ) # change decoder layer norm scale&bias position tmp_scale, tmp_bias = ( transformer.trg_embedding.norm_scale, transformer.trg_embedding.norm_bias, ) for i, decoder in enumerate(transformer.decoder_stack): print("***Fix decoder layer {} LayerNorm scale and bias***".format(i)) new_tmp_scale, new_tmp_bias = ( decoder.self_norm_scale[:], decoder.self_norm_bias[:], ) decoder.self_norm_scale[:], decoder.self_norm_bias[:] = tmp_scale, tmp_bias print( "self_norm_scale: {} -> {}\nself_norm_bias: {} -> {}".format( new_tmp_scale[:3], decoder.self_norm_scale[:3], new_tmp_bias[:3], decoder.self_norm_bias[:3], ) ) tmp_scale, tmp_bias = new_tmp_scale[:], new_tmp_bias[:] new_tmp_scale, new_tmp_bias = ( decoder.encdec_norm_scale[:], decoder.encdec_norm_bias[:], ) decoder.encdec_norm_scale[:], decoder.encdec_norm_bias[:] = tmp_scale, tmp_bias print( "encdec_norm_scale: {} -> {}\nencdec_norm_bias: {} -> {}".format( new_tmp_scale[:3], decoder.encdec_norm_scale[:3], new_tmp_bias[:3], decoder.encdec_norm_bias[:3], ) ) tmp_scale, tmp_bias = new_tmp_scale[:], new_tmp_bias[:] new_tmp_scale, new_tmp_bias = ( decoder.ffn_norm_scale[:], decoder.ffn_norm_bias[:], ) decoder.ffn_norm_scale[:], decoder.ffn_norm_bias[:] = ( tmp_scale, tmp_bias, ) print( "ffn_norm_scale: {} -> {}\nffn_norm_bias: {} -> {}".format( new_tmp_scale[:3], decoder.ffn_norm_scale[:3], new_tmp_bias[:3], decoder.ffn_norm_bias[:3], ) ) tmp_scale, tmp_bias = new_tmp_scale[:], new_tmp_bias[:] transformer.trg_embedding.norm_scale[:], transformer.trg_embedding.norm_bias[:] = ( tmp_scale, tmp_bias, ) # fill in conf transformer.model_conf.head_num = head_num transformer.model_conf.beam_size = beam_size transformer.model_conf.length_penalty = length_penalty transformer.model_conf.extra_decode_length = extra_decode_length transformer.model_conf.src_padding_id = 1 transformer.model_conf.trg_start_id = 2 transformer.model_conf.trg_end_id = 2 transformer.model_conf.sampling_method = generation_method transformer.model_conf.topk = topk transformer.model_conf.topp = topp transformer.model_conf.diverse_lambda = 0 transformer.model_conf.is_post_ln = True transformer.model_conf.no_scale_embedding = True transformer.model_conf.use_gelu = True print("Wrting to {0}".format(output_file)) with tf.io.gfile.GFile(output_file, "wb") as fout: fout.write(transformer.SerializeToString()) transformer = Transformer() with tf.io.gfile.GFile(output_file, "rb") as fin: transformer.ParseFromString(fin.read()) print(transformer.model_conf) if __name__ == "__main__": output_lightseq_model_name = "lightseq_bart_base.pb" input_huggingface_bart_model = ( "facebook/bart-base" # Example: you can try "facebook/bart-large" as well ) head_number = 12 # for bart-large, we have 16 generation_method = "beam_search" # in order to get score, we should use `beam_search` inference method beam_size = 4 max_step = 50 # max step for generation, it decides GPU memory occupancy extra_decode_length = 50 # maximum_generation_length = min(src_length + extra_decode_length, max_step) length_penalty = 1.0 extract_transformer_weights( output_lightseq_model_name, input_huggingface_bart_model, head_num=head_number, # layer number generation_method=generation_method, beam_size=beam_size, max_step=max_step, extra_decode_length=extra_decode_length, only_decoder=False, length_penalty=length_penalty, )
{"hexsha": "82162c5aa78752800e9b312a01cc80911c1c0076", "size": 13992, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/inference/python/hf_bart_export.py", "max_stars_repo_name": "godweiyang/lightseq", "max_stars_repo_head_hexsha": "a23f6807c354410fbd3bb11a17918e99c7af6272", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-04-13T15:48:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-15T08:49:45.000Z", "max_issues_repo_path": "examples/inference/python/hf_bart_export.py", "max_issues_repo_name": "godweiyang/lightseq", "max_issues_repo_head_hexsha": "a23f6807c354410fbd3bb11a17918e99c7af6272", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/inference/python/hf_bart_export.py", "max_forks_repo_name": "godweiyang/lightseq", "max_forks_repo_head_hexsha": "a23f6807c354410fbd3bb11a17918e99c7af6272", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-04-15T12:17:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-12T15:37:41.000Z", "avg_line_length": 37.8162162162, "max_line_length": 197, "alphanum_fraction": 0.6407947399, "include": true, "reason": "import numpy", "num_tokens": 3127}
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests of the FunctionCallOp lowering compilation pass.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import tensorflow as tf from tensorflow_probability.python.internal.auto_batching import allocation_strategy from tensorflow_probability.python.internal.auto_batching import lowering from tensorflow_probability.python.internal.auto_batching import numpy_backend from tensorflow_probability.python.internal.auto_batching import test_programs from tensorflow_probability.python.internal.auto_batching import virtual_machine as vm NP_BACKEND = numpy_backend.NumpyBackend() def _fibonacci_lowered_execute(inputs, backend): prog = test_programs.fibonacci_function_calls() alloc = allocation_strategy.optimize(prog) lowered = lowering.lower_function_calls(alloc) return list(vm.execute( lowered, [inputs], max_stack_depth=15, backend=backend)) def _is_even_lowered_execute(inputs, backend): prog = test_programs.is_even_function_calls() alloc = allocation_strategy.optimize(prog) lowered = lowering.lower_function_calls(alloc) return list(vm.execute( lowered, [inputs], max_stack_depth=int(max(inputs)) + 3, backend=backend)) class LoweringTest(tf.test.TestCase): def testLoweringFibonacciNumpy(self): self.assertEqual([8], _fibonacci_lowered_execute([5], NP_BACKEND)) self.assertEqual( [8, 13, 34, 55], _fibonacci_lowered_execute([5, 6, 8, 9], NP_BACKEND)) def testLoweringIsEvenNumpy(self): self.assertEqual([True], _is_even_lowered_execute([0], NP_BACKEND)) self.assertEqual([False], _is_even_lowered_execute([1], NP_BACKEND)) self.assertEqual([True], _is_even_lowered_execute([2], NP_BACKEND)) self.assertEqual( [False, True, True, False, True], _is_even_lowered_execute([5, 6, 8, 9, 0], NP_BACKEND)) if __name__ == '__main__': tf.test.main()
{"hexsha": "fc204cef5f7cd0821f20b9d4023b5bf55cc47f9b", "size": 2618, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow_probability/python/internal/auto_batching/lowering_test.py", "max_stars_repo_name": "nxdao2000/probability", "max_stars_repo_head_hexsha": "33d2bc1cb0e7b6284579ea7f3692b9d056e0d700", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tensorflow_probability/python/internal/auto_batching/lowering_test.py", "max_issues_repo_name": "nxdao2000/probability", "max_issues_repo_head_hexsha": "33d2bc1cb0e7b6284579ea7f3692b9d056e0d700", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorflow_probability/python/internal/auto_batching/lowering_test.py", "max_forks_repo_name": "nxdao2000/probability", "max_forks_repo_head_hexsha": "33d2bc1cb0e7b6284579ea7f3692b9d056e0d700", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9420289855, "max_line_length": 86, "alphanum_fraction": 0.7524828113, "include": true, "reason": "import numpy", "num_tokens": 573}
[STATEMENT] lemma [bm_simps]: " bin_mismatch_pref x y (y \<cdot> v)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. bin_mismatch_pref x y (y \<cdot> v) [PROOF STEP] unfolding bin_mismatch_pref_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<exists>k. x \<^sup>@ k \<cdot> y \<le>p y \<cdot> v [PROOF STEP] using append_Nil pow_zero[of x] [PROOF STATE] proof (prove) using this: \<epsilon> \<cdot> ?ys = ?ys x \<^sup>@ 0 = \<epsilon> goal (1 subgoal): 1. \<exists>k. x \<^sup>@ k \<cdot> y \<le>p y \<cdot> v [PROOF STEP] by fast
{"llama_tokens": 253, "file": "Combinatorics_Words_Submonoids", "length": 3}
import numpy as np import os import sys import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D #sys.path.append(os.path.join(os.path.dirname(__file__),"../")) from crowdsourcing.interfaces.mechanical_turk import * from crowdsourcing.interfaces.local_webserver import * from crowdsourcing.util.image_search import * from crowdsourcing.annotation_types.classification import * from crowdsourcing.annotation_types.bbox import * from crowdsourcing.annotation_types.part import * # directory containing the images we want to annotate IMAGE_DIR = 'data/bbox/pedestrians_small/images' OUTPUT_FOLDER = 'pedestrians_small2' USE_MTURK = False#True ONLINE = True WORKERS_PER_IMAGE = 0 with open('keys.json') as f: keys = json.load(f) # Amazon account information for paying for mturk tasks, see # http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSGettingStartedGuide/AWSCredentials.htm AWS_ACCESS_KEY = keys.AWS_ACCESS_KEY AWS_SECRET_ACCESS_KEY = keys.AWS_SECRET_ACCESS_KEY SANDBOX = False HOST = 'sbranson.no-ip.org' PARAMS = {'instructions':'Draw a box around each pedestrian in the image', 'example_url':'', 'object_name':'pedestrian'} dataset = CrowdDatasetBBox(name='pedestrians') dataset.scan_image_directory(IMAGE_DIR) if USE_MTURK: crowdsource = MTurkCrowdsourcer(dataset, AWS_ACCESS_KEY, AWS_SECRET_ACCESS_KEY, HOST, OUTPUT_FOLDER, sandbox=SANDBOX, hit_params = PARAMS, online = ONLINE, thumbnail_size = (100,100), initial_assignments_per_image=WORKERS_PER_IMAGE) else: crowdsource = LocalCrowdsourcer(dataset, HOST, OUTPUT_FOLDER, hit_params = PARAMS, online = ONLINE, thumbnail_size = (100,100), initial_assignments_per_image=WORKERS_PER_IMAGE, port=8080) crowdsource.run()
{"hexsha": "08cb8a6c9dbadf044fb7fdbdc7cf6d84929802a3", "size": 1753, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/collect_annotations_bbox.py", "max_stars_repo_name": "sbranson/online_crowdsourcing", "max_stars_repo_head_hexsha": "d1f7c814bb60aae9cf5e76e0b299713246f98ce3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-08-14T21:14:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-04T09:32:37.000Z", "max_issues_repo_path": "experiments/collect_annotations_bbox.py", "max_issues_repo_name": "sbranson/online_crowdsourcing", "max_issues_repo_head_hexsha": "d1f7c814bb60aae9cf5e76e0b299713246f98ce3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiments/collect_annotations_bbox.py", "max_forks_repo_name": "sbranson/online_crowdsourcing", "max_forks_repo_head_hexsha": "d1f7c814bb60aae9cf5e76e0b299713246f98ce3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-09T08:20:27.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-09T08:20:27.000Z", "avg_line_length": 37.2978723404, "max_line_length": 189, "alphanum_fraction": 0.7923559612, "include": true, "reason": "import numpy", "num_tokens": 422}
import numpy as np import pandas as pd import argparse from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error import mlflow from models import Model_RFR from utils import Trainer def main(): mlflow.start_run() df = pd.read_csv(args.input) #print(df.head()) X = df.drop('y', axis=1).values y = df['y'].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) params_dict = {#'objective': args.objective, } model = Model_RFR(params=params_dict) trainer = Trainer(exp_name=args.exp_name, model=model, model_name=args.model_name, cv=args.cv) train_pred = trainer.predict_cv(X_train, y_train) train_score = mean_squared_error(y_train, train_pred) print(train_score) mlflow.log_metric('Validation Mean Squared Error', train_score) mlflow.end_run() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input') parser.add_argument('--exp_name') parser.add_argument('--model_name', default='RFR') parser.add_argument('--cv', type=int, default=5) #parser.add_argument('--objective', default='regression') #parser.add_argument('--metric', default='mean_squared_error') args = parser.parse_args() main()
{"hexsha": "756f8f214c2872d821f1a2c7ea8d9fec140c69b5", "size": 1342, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models/train.py", "max_stars_repo_name": "yktsnd/titanic", "max_stars_repo_head_hexsha": "60805236d6260170894b0a6501eba6f464ff60ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/models/train.py", "max_issues_repo_name": "yktsnd/titanic", "max_issues_repo_head_hexsha": "60805236d6260170894b0a6501eba6f464ff60ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/models/train.py", "max_forks_repo_name": "yktsnd/titanic", "max_forks_repo_head_hexsha": "60805236d6260170894b0a6501eba6f464ff60ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8222222222, "max_line_length": 98, "alphanum_fraction": 0.7004470939, "include": true, "reason": "import numpy", "num_tokens": 320}
import numpy as np import cv2 import torch from torch.multiprocessing import Pool, Process, set_start_method import itertools import matplotlib.pyplot as plt import torch.multiprocessing as mp def cut_empty(img, padding=30): rows, cols = img.shape e_r = img.max(1).nonzero()[0] min_non_e_r = e_r[0] max_non_e_r = e_r[-1] e_c = img.max(0).nonzero()[0] min_non_e_c = e_c[0] max_non_e_c = e_c[-1] crop_rows = (max(0, min_non_e_r - padding), min(rows, max_non_e_r + padding)) crop_cols = (max(0, min_non_e_c - padding), min(cols, max_non_e_c + padding)) r_img = img[crop_rows[0]: crop_rows[1], crop_cols[0]: crop_cols[1]] return r_img, (crop_rows, crop_cols) def overlap_score(args): """ :param full_map: :param point_cloud: :param rotation_orig: (row, col) of point cloud :param pos: (row, col) :param orientation: degrees :return: """ full_map, point_cloud, rotation_orig, pos, orientation, max_dim = args point_cloud = point_cloud.numpy() rows, cols = point_cloud.shape # Rotate crop from map out_d = max_dim * 2 + 1 map_overlap = full_map[ pos[0] - max_dim: pos[0] + max_dim + 1, pos[1] - max_dim: pos[1] + max_dim + 1] map_overlap = map_overlap.numpy() rotation_m = cv2.getRotationMatrix2D((max_dim, max_dim), orientation, 1.) map_overlap = cv2.warpAffine(map_overlap, rotation_m, (out_d, out_d)) # Select point cloud intersection zone start_row = max_dim - rotation_orig[0] start_col = max_dim - rotation_orig[1] map_overlap = map_overlap[start_row: start_row + rows, start_col: start_col + cols] # img = map_overlap # # img = cv2.resize(img, (0, 0), fx=0.8, fy=0.8) # cv2.imshow("test", img*255 ) # # img = dst # # img = cv2.resize(img, (0, 0), fx=0.8, fy=0.8) # cv2.imshow("test2", img*255 ) # cv2.waitKey(0) # cv2.destroyAllWindows() # Multiply intensities of overlapping pixels # overlap_score = 0 overlap_score = (point_cloud * map_overlap).sum() # # Subtract not matched pixels from map # overlap_score -= map_overlap[dst == 0].sum() return overlap_score def overlap_score_test(args): return 0 class LocateMap: def __init__(self, map_cfg, localization_cfg): occupancy_map = map_cfg.occupancy_map semantic_map = map_cfg.semantic_map lane_map = map_cfg.lane_map scale_px_cm = map_cfg.scale_px_cm map_labels = map_cfg.map_labels # Transform variables map_labels = dict({x: np.array(y, dtype=np.uint8) for x, y in map_labels}) # Localization cfg self.normal_orientation = localization_cfg.normal_orientation self.no_particles = localization_cfg.no_particles self.no_workers = no_workers = localization_cfg.no_workers # Should be with 0 - Free space/ 255 occupancy occupancy_map = cv2.imread(occupancy_map, cv2.IMREAD_UNCHANGED) semantic_map = cv2.imread(semantic_map) lane_map = cv2.imread(lane_map, cv2.IMREAD_UNCHANGED) self.road_map = ((semantic_map == map_labels["road"]).sum(2) == 3) assert len(lane_map.shape) == 3, "Wrong shape for lane_map" lane_map = cv2.cvtColor(lane_map, cv2.COLOR_BGR2GRAY) # lane_map = lane_map.astype(np.float32) self.shared_lane_map = torch.from_numpy(lane_map) # self.shared_lane_map.div_(255.) self.shared_lane_map.share_memory_() self.point_cloud_mesh_size = None self.point_cloud_mesh = None self.log = True def locate(self, point_cloud_view, p_pos, p_orientation, pos_std, orientation_std): """ :param point_cloud_view: Point_cloud_img_view (Scaled to map) :param p_pos: Predicted location position Pixels (row, col) :param p_orientation: The heading in degrees relative to the geographic North Pole. :param pos_acc: Pos acc in px :param orientation_acc: in degrees :return: """ # # -- Test data # point_cloud_view = gt_port_img # p_pos = data_pos # p_orientation = data_orientation # # pos_std = sim_noise_pos # orientation_std = sim_noise_direction # # ----------------------------------- no_particles = self.no_particles road_map = self.road_map normal_orientation = self.normal_orientation point_cloud_mesh_size = self.point_cloud_mesh_size shared_lane_map = self.shared_lane_map # -- Generate Positions & orientations x_offset = np.random.normal(0, pos_std, no_particles).astype(np.int) y_offset = np.random.normal(0, pos_std, no_particles).astype(np.int) possible_x = x_offset + p_pos[0] possible_y = y_offset + p_pos[1] # -- Filter points positions # on_road = road_map[possible_x, possible_y].nonzero()[0] # possible_pos = list(zip(possible_x[on_road], possible_y[on_road])) possible_pos = list(zip(possible_x, possible_y)) p_orientation = p_orientation - normal_orientation # adjust for std orientation possible_o = p_orientation + np.random.normal(0, orientation_std, len(possible_pos)) # Fill map for rotation -> rows_p, cols_p = point_cloud_view.shape max_dim = int(np.sqrt(rows_p ** 2 + cols_p ** 2) + 1) # # center to -> # r_start = max_dim - rows_p // 2 point_cloud_mesh = torch.from_numpy(point_cloud_view) point_cloud_mesh.share_memory_() center_rotation = (rows_p // 2, 0) # point_cloud_mesh[r_start: r_start + rows_p, max_dim: max_dim + cols_p] = point_cloud_view point_cloud_mesh.div_(255.) # get pool args_list = zip(itertools.cycle([shared_lane_map]), itertools.cycle([point_cloud_mesh]), itertools.cycle([center_rotation]), possible_pos, possible_o, itertools.cycle([max_dim])) # Seems slow :( # multi_pool = Pool(processes=self.no_workers) # predictions = multi_pool.map(overlap_score, args_list) # multi_pool.close() # multi_pool.join() predictions = [overlap_score(args) for args in args_list] predictions = torch.tensor(predictions, dtype=torch.float32) predictions = (predictions - predictions.min()) predictions = predictions / predictions.max() # predictions = torch.nn.Softmax()(predictions) if self.log: # View prob map lane_map_view = (shared_lane_map * 255.).numpy().astype(np.uint8) cv2.imwrite("test2.png", lane_map_view) lane_map_view = cv2.cvtColor(lane_map_view, cv2.COLOR_GRAY2BGR) global ground_pos dist_to_ground = [] possible_pos = np.array(possible_pos) for ix, (x, y) in enumerate(possible_pos): d = np.linalg.norm(np.array([x, y]) - np.array(ground_pos)) dist_to_ground.append(d) lane_map_view[x, y, :2] = 0 lane_map_view[x, y, 2] += int(predictions[ix] * 50.) # print(f"Dist: {d} _ prob: {predictions[ix]}") # crop zone around lane_map_view = lane_map_view[p_pos[0]-max_dim: p_pos[0]+max_dim, p_pos[1] - max_dim: p_pos[1] + max_dim ] select = predictions.sort()[1][-3:] plt.scatter(possible_pos[select, 1], possible_pos[select, 0], s=predictions[select]*10) plt.scatter([ground_pos[1]], [ground_pos[0]]) plt.show() # for x, y in possible_pos: # occupancy_map_view[x, y # img = ((occupancy_map > 0) * 255).astype(np.uint8) img = lane_map_view # img = cv2.resize(img, (0, 0), fx=0.8, fy=0.8) cv2.imshow("test", img) cv2.waitKey(0) cv2.destroyAllWindows() class TestPointCloud: def __init__(self, full_cfg): test_cfg = full_cfg.test self.sim_noise_pos = test_cfg.sim_noise_pos self.sim_noise_direction = test_cfg.sim_noise_direction self.accuracy = test_cfg.accuracy self.scale_px_cm = full_cfg.map.scale_px_cm ground_truth_point_path = test_cfg.ground_truth_point_path ground_truth_point_path_img = ground_truth_point_path + ".png" ground_truth_point_path_corners = ground_truth_point_path + ".log" point_cloud = cv2.imread(ground_truth_point_path_img, cv2.IMREAD_UNCHANGED) point_cloud = ((point_cloud.sum(2) > 0) * 255).astype(np.uint8) # Reduce to 1 channel self.point_cloud = point_cloud with open(ground_truth_point_path_corners, "r") as f: ground_pos = eval(f.readline()) ground_orientation = eval(f.readline()) # center left corner self.ground_pos = ground_pos self.ground_orientation = ground_orientation # rotation center rows, cols = point_cloud.shape self.max_dim = int(np.sqrt(rows**2 + cols**2) + 1) self.ground_truth_pos = [rows//2, 0] # img = occupancy_map[corners[0][0]:corners[1][0], corners[0][1]:corners[1][1]] def get_test(self): sim_noise_pos = self.sim_noise_pos / self.scale_px_cm sim_noise_direction = self.sim_noise_direction ground_pos = self.ground_pos ground_orientation = self.ground_orientation point_cloud = self.point_cloud max_dim = self.max_dim # ADD space for rotation rows, cols = point_cloud.shape new_demo_size = (max_dim*2, max_dim*2) demo = np.zeros(new_demo_size, np.uint8) # center to -> r_start = max_dim - rows//2 demo[r_start: r_start+rows, max_dim: max_dim+cols] = point_cloud center_rotation = (max_dim, max_dim) # TODO calculate another ground truth viewport # # Rotate # # conv_matrix = cv2.getRotationMatrix2D(center_rotation, error_angle, 1) # dst = cv2.warpAffine(demo, conv_matrix, new_demo_size) # # # Crop & recalculate ground truth point (rows/2, 0) # crop, margins = cut_empty(dst) # cv2.imshow("test", crop) # cv2.waitKey(0) # cv2.destroyAllWindows() gt_port_img = point_cloud gt_port_pos = ground_pos gt_port_orientation = ground_orientation # Fake gps & orientation error_pos = np.random.uniform(-sim_noise_pos, sim_noise_pos, 2).astype(np.int) x = ground_pos[0] + error_pos[0] y = ground_pos[1] + error_pos[1] error_angle = np.random.uniform(-sim_noise_direction, sim_noise_direction, 1)[0] data_orientation = ground_orientation + error_angle data_pos = [x, y] return (gt_port_img, [data_pos, data_orientation], [gt_port_pos, gt_port_orientation], [sim_noise_pos, sim_noise_direction]) if __name__ == "__main__": try: set_start_method('spawn') except RuntimeError: pass from utils import read_cfg import time config_file = "configs/default.yaml" full_cfg = read_cfg(config_file) map_cfg = full_cfg.map localization_cfg = full_cfg.localization test_generator = TestPointCloud(full_cfg) (gt_port_img, [data_pos, data_orientation], [gt_port_pos, gt_port_orientation], [sim_noise_pos, sim_noise_direction]) = test_generator.get_test() global ground_pos ground_pos = gt_port_pos print(sim_noise_pos) localization = LocateMap(map_cfg, localization_cfg) t = [] for i in range(30): st = time.time() localization.locate(gt_port_img, data_pos, data_orientation, sim_noise_pos, sim_noise_direction) t.append(time.time() - st) print(t[-1]) print("---mean---") print(np.mean(t)) # # imshow # img = ((occupancy_map > 0) * 255).astype(np.uint8) # img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5) # cv2.imshow("test", r_img) # cv2.waitKey(0) # cv2.destroyAllWindows()
{"hexsha": "ae60b78b5ae76b419983c715cb1aecf224b1addf", "size": 12236, "ext": "py", "lang": "Python", "max_stars_repo_path": "localization.py", "max_stars_repo_name": "nemodrive/ImprovedLocalization", "max_stars_repo_head_hexsha": "613298f01d152f12aa0c7c3b5f33612a1175b5d3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "localization.py", "max_issues_repo_name": "nemodrive/ImprovedLocalization", "max_issues_repo_head_hexsha": "613298f01d152f12aa0c7c3b5f33612a1175b5d3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "localization.py", "max_forks_repo_name": "nemodrive/ImprovedLocalization", "max_forks_repo_head_hexsha": "613298f01d152f12aa0c7c3b5f33612a1175b5d3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-26T20:36:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T20:36:22.000Z", "avg_line_length": 35.1609195402, "max_line_length": 99, "alphanum_fraction": 0.627983001, "include": true, "reason": "import numpy", "num_tokens": 3068}
using SailRoute, Test @testset "Distances" begin dist, bearing = SailRoute.haversine(-99.436554, 41.507483, -98.315949, 38.504048) @test dist ≈ 187.595 atol=0.01 dist, bearing = SailRoute.euclidean(0.0, 0.0, 10.0, 10.0) @test dist ≈ 14.142 atol=0.01 @test bearing ≈ 45.0 end
{"hexsha": "6d6973337aa6202fc2bbcf27e430fade64aed232", "size": 297, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_domain.jl", "max_stars_repo_name": "TAJD/sail_route.jl", "max_stars_repo_head_hexsha": "8f8548188719564f6634868cbe2687a88df1d4a7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-08-03T15:34:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-19T18:05:16.000Z", "max_issues_repo_path": "test/test_domain.jl", "max_issues_repo_name": "TAJD/SailRoute.jl", "max_issues_repo_head_hexsha": "8f8548188719564f6634868cbe2687a88df1d4a7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_domain.jl", "max_forks_repo_name": "TAJD/SailRoute.jl", "max_forks_repo_head_hexsha": "8f8548188719564f6634868cbe2687a88df1d4a7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7, "max_line_length": 85, "alphanum_fraction": 0.6632996633, "num_tokens": 122}
"""Tests for epi_forecast_stat_mech.mechanistic_models.observables.""" from absl.testing import absltest from absl.testing import parameterized from epi_forecast_stat_mech.mechanistic_models import mechanistic_models from epi_forecast_stat_mech.mechanistic_models import observables import numpy as np from jax import numpy as jnp from jax.config import config config.parse_flags_with_absl() # Necessary for running on TPU. class ObservablesTest(parameterized.TestCase): # jnp's can't be in arguments, so we cast the expected result after the fact. @parameterized.parameters( dict( observable=observables.ObserveSpecified(['log_K']), mech_model=mechanistic_models.ViboudChowellModel(), mech_params=np.asarray([1., 1., 1., 10.], dtype=np.float32), epidemic=None, expected_result={'log_K': np.float32([10.])}), dict( observable=observables.InternalParams(), mech_model=mechanistic_models.ViboudChowellModel(), mech_params=np.asarray([1., 1., 1., 10.], dtype=np.float32), epidemic=None, expected_result={'log_r': np.float32([1.]), 'log_a': np.float32([1.]), 'log_p': np.float32([1.]), 'log_K': np.float32([10.])}), ) def test_expected(self, observable, mech_model, mech_params, epidemic, expected_result): jnp_expected_result = { key: jnp.asarray(value) for key, value in expected_result.items() } result = observable.observables(mech_model, mech_params, epidemic) self.assertSameStructure(result, jnp_expected_result) for key, value in result.items(): expected_value = jnp_expected_result[key] self.assertAlmostEqual(np.asarray(value), expected_value, places=3) if __name__ == '__main__': absltest.main()
{"hexsha": "95cf75d952d450d82d17f1cc8bb8564c44bf2d38", "size": 1874, "ext": "py", "lang": "Python", "max_stars_repo_path": "epi_forecast_stat_mech/mechanistic_models/observables_test.py", "max_stars_repo_name": "HopkinsIDD/EpiForecastStatMech", "max_stars_repo_head_hexsha": "4ba57edff1ece0c56ec6dfa41eac4cfe4a1c66cb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-04-11T17:24:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T09:28:03.000Z", "max_issues_repo_path": "epi_forecast_stat_mech/mechanistic_models/observables_test.py", "max_issues_repo_name": "HopkinsIDD/EpiForecastStatMech", "max_issues_repo_head_hexsha": "4ba57edff1ece0c56ec6dfa41eac4cfe4a1c66cb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-05-03T23:48:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T12:15:58.000Z", "max_forks_repo_path": "epi_forecast_stat_mech/mechanistic_models/observables_test.py", "max_forks_repo_name": "HopkinsIDD/EpiForecastStatMech", "max_forks_repo_head_hexsha": "4ba57edff1ece0c56ec6dfa41eac4cfe4a1c66cb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-04-13T18:39:56.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-14T02:02:54.000Z", "avg_line_length": 39.0416666667, "max_line_length": 79, "alphanum_fraction": 0.6760939168, "include": true, "reason": "import numpy,from jax", "num_tokens": 440}
[STATEMENT] lemma set_to_map_simp : assumes inj_on_fst: "inj_on fst S" shows "(set_to_map S k = Some v) \<longleftrightarrow> (k, v) \<in> S" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (set_to_map S k = Some v) = ((k, v) \<in> S) [PROOF STEP] proof (cases "\<exists>v. (k, v) \<in> S") [PROOF STATE] proof (state) goal (2 subgoals): 1. \<exists>v. (k, v) \<in> S \<Longrightarrow> (set_to_map S k = Some v) = ((k, v) \<in> S) 2. \<nexists>v. (k, v) \<in> S \<Longrightarrow> (set_to_map S k = Some v) = ((k, v) \<in> S) [PROOF STEP] case True [PROOF STATE] proof (state) this: \<exists>v. (k, v) \<in> S goal (2 subgoals): 1. \<exists>v. (k, v) \<in> S \<Longrightarrow> (set_to_map S k = Some v) = ((k, v) \<in> S) 2. \<nexists>v. (k, v) \<in> S \<Longrightarrow> (set_to_map S k = Some v) = ((k, v) \<in> S) [PROOF STEP] note kv_ex = this [PROOF STATE] proof (state) this: \<exists>v. (k, v) \<in> S goal (2 subgoals): 1. \<exists>v. (k, v) \<in> S \<Longrightarrow> (set_to_map S k = Some v) = ((k, v) \<in> S) 2. \<nexists>v. (k, v) \<in> S \<Longrightarrow> (set_to_map S k = Some v) = ((k, v) \<in> S) [PROOF STEP] then [PROOF STATE] proof (chain) picking this: \<exists>v. (k, v) \<in> S [PROOF STEP] obtain v' where kv'_in: "(k, v') \<in> S" [PROOF STATE] proof (prove) using this: \<exists>v. (k, v) \<in> S goal (1 subgoal): 1. (\<And>v'. (k, v') \<in> S \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by blast [PROOF STATE] proof (state) this: (k, v') \<in> S goal (2 subgoals): 1. \<exists>v. (k, v) \<in> S \<Longrightarrow> (set_to_map S k = Some v) = ((k, v) \<in> S) 2. \<nexists>v. (k, v) \<in> S \<Longrightarrow> (set_to_map S k = Some v) = ((k, v) \<in> S) [PROOF STEP] with inj_on_fst [PROOF STATE] proof (chain) picking this: inj_on fst S (k, v') \<in> S [PROOF STEP] have kv''_in: "\<And>v''. (k, v'') \<in> S \<longleftrightarrow> v' = v''" [PROOF STATE] proof (prove) using this: inj_on fst S (k, v') \<in> S goal (1 subgoal): 1. \<And>v''. ((k, v'') \<in> S) = (v' = v'') [PROOF STEP] unfolding inj_on_def Ball_def [PROOF STATE] proof (prove) using this: \<forall>x. x \<in> S \<longrightarrow> (\<forall>xa. xa \<in> S \<longrightarrow> fst x = fst xa \<longrightarrow> x = xa) (k, v') \<in> S goal (1 subgoal): 1. \<And>v''. ((k, v'') \<in> S) = (v' = v'') [PROOF STEP] by auto [PROOF STATE] proof (state) this: ((k, ?v'') \<in> S) = (v' = ?v'') goal (2 subgoals): 1. \<exists>v. (k, v) \<in> S \<Longrightarrow> (set_to_map S k = Some v) = ((k, v) \<in> S) 2. \<nexists>v. (k, v) \<in> S \<Longrightarrow> (set_to_map S k = Some v) = ((k, v) \<in> S) [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) goal (1 subgoal): 1. (set_to_map S k = Some v) = ((k, v) \<in> S) [PROOF STEP] unfolding set_to_map_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. (Eps_Opt (\<lambda>v. (k, v) \<in> S) = Some v) = ((k, v) \<in> S) [PROOF STEP] by (simp add: kv_ex kv''_in) [PROOF STATE] proof (state) this: (set_to_map S k = Some v) = ((k, v) \<in> S) goal (1 subgoal): 1. \<nexists>v. (k, v) \<in> S \<Longrightarrow> (set_to_map S k = Some v) = ((k, v) \<in> S) [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. \<nexists>v. (k, v) \<in> S \<Longrightarrow> (set_to_map S k = Some v) = ((k, v) \<in> S) [PROOF STEP] case False [PROOF STATE] proof (state) this: \<nexists>v. (k, v) \<in> S goal (1 subgoal): 1. \<nexists>v. (k, v) \<in> S \<Longrightarrow> (set_to_map S k = Some v) = ((k, v) \<in> S) [PROOF STEP] hence kv''_nin: "\<And>v''. (k, v'') \<notin> S" [PROOF STATE] proof (prove) using this: \<nexists>v. (k, v) \<in> S goal (1 subgoal): 1. \<And>v''. (k, v'') \<notin> S [PROOF STEP] by simp [PROOF STATE] proof (state) this: (k, ?v'') \<notin> S goal (1 subgoal): 1. \<nexists>v. (k, v) \<in> S \<Longrightarrow> (set_to_map S k = Some v) = ((k, v) \<in> S) [PROOF STEP] thus ?thesis [PROOF STATE] proof (prove) using this: (k, ?v'') \<notin> S goal (1 subgoal): 1. (set_to_map S k = Some v) = ((k, v) \<in> S) [PROOF STEP] by (simp add: set_to_map_def) [PROOF STATE] proof (state) this: (set_to_map S k = Some v) = ((k, v) \<in> S) goal: No subgoals! [PROOF STEP] qed
{"llama_tokens": 2006, "file": "Automatic_Refinement_Lib_Misc", "length": 20}
""" test_util_misc.py Author: Jordan Mirocha Affiliation: McGill Created on: Tue 24 Mar 2020 21:31:12 EDT Description: """ import ares import numpy as np def test(): """ Run through miscellaneous functions and make sure they run to completion for a variety of cases. """ fake_sys_argv = ['script_name', 'param1=a_string', 'param2=2', 'param3=4.0', 'param4=1e10', 'param5=True', 'param6=None', 'param7=[1,2,3]'] kw = ares.util.Misc.get_cmd_line_kwargs(fake_sys_argv) kw_types = [type(kw[key]) for key in kw.keys()] # Check these somehow? ares_rev = ares.util.Misc.get_rev() n_nu_1 = ares.util.Misc.num_freq_bins(100) n_nu_2 = ares.util.Misc.num_freq_bins(200) assert n_nu_2 > n_nu_1 #class FakeClass(object): # self.a = 1 # self.b = 2 # #result = ares.util.Misc.get_attribute('t) #assert s1 x = np.arange(100) y = np.sin(x) xch, ych = ares.util.Misc.split_by_sign(x, y) ct = 0 for element in xch: ct += len(element) assert ct == len(x) if __name__ == '__main__': test()
{"hexsha": "8ee874e6b56e73195e7142069a534c4e3dd38bc7", "size": 1144, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_util_misc.py", "max_stars_repo_name": "eklem1/ares", "max_stars_repo_head_hexsha": "df39056065f0493e3c922fb50ced2dc6d1bc79a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_util_misc.py", "max_issues_repo_name": "eklem1/ares", "max_issues_repo_head_hexsha": "df39056065f0493e3c922fb50ced2dc6d1bc79a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_util_misc.py", "max_forks_repo_name": "eklem1/ares", "max_forks_repo_head_hexsha": "df39056065f0493e3c922fb50ced2dc6d1bc79a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.1851851852, "max_line_length": 81, "alphanum_fraction": 0.6048951049, "include": true, "reason": "import numpy", "num_tokens": 353}
#!/usr/bin/python3 # -*- coding:utf-8 -*- # Project: http://cloudedbats.org, https://github.com/cloudedbats # Copyright (c) 2021-present Arnold Andreasson # License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit). import asyncio import logging import numpy import alsaaudio class SoundCapture: """ """ def __init__(self, logger="DefaultLogger"): """ """ self.config = None self.out_queue_list = [] self.card_index = None self.sampling_freq_hz = None self.channels = None self.buffer_size = None self.period_size = None self.main_loop = None self.capture_executor = None self.logger = logging.getLogger(logger) def setup(self, card_index, config): """ """ self.config = config self.card_index = int(card_index) # List of out data queues. self.out_queue_list = [] # Setup for sound capture. self.sampling_freq_hz = int(self.config["sampling_freq_hz"]) self.channels = self.config["channels"] self.buffer_size = int(self.config["buffer_size"]) self.period_size = int(self.config["period_size"]) def add_out_queue(self, out_queue): """ """ self.out_queue_list.append(out_queue) async def start(self): """ """ # Use executor for the IO-blocking part. self.main_loop = asyncio.get_event_loop() self.capture_executor = self.main_loop.run_in_executor(None, self.run_capture) async def stop(self): """ """ self.capture_active = False if self.capture_executor: self.capture_executor.cancel() self.capture_executor = None def run_capture(self): """ """ pmc_capture = None self.capture_active = True channels = 1 if self.channels.upper() == "STEREO": channels = 2 try: pmc_capture = alsaaudio.PCM( alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NORMAL, channels=channels, rate=self.sampling_freq_hz, format=alsaaudio.PCM_FORMAT_S16_LE, periodsize=self.period_size, device="sysdefault", cardindex=self.card_index, ) # Empty numpy buffer. in_buffer_int16 = numpy.array([], dtype=numpy.int16) while self.capture_active: # Read from capture device. length, data = pmc_capture.read() if length < 0: self.logger.debug("Sound capture overrun: " + str(length)) elif len(data) > 0: # Convert from string-byte array to int16 array. in_data_int16 = numpy.frombuffer(data, dtype=numpy.int16) # print("CAPTURE: length: ", length, " data-len: ", len(in_data_int16)) # Convert stereo to mono by using either left or right channel. if self.channels.upper() == "MONO-LEFT": in_data_int16 = in_data_int16[0::2].copy() if self.channels.upper() == "MONO-RIGHT": in_data_int16 = in_data_int16[1::2].copy() # Concatenate in_buffer_int16 = numpy.concatenate( (in_buffer_int16, in_data_int16) ) while len(in_buffer_int16) >= self.buffer_size: # Copy "buffer_size" part and save remaining part. data_int16 = in_buffer_int16[0 : self.buffer_size] in_buffer_int16 = in_buffer_int16[self.buffer_size :] # Put data on queues in the queue list. for data_queue in self.out_queue_list: # Copy data. data_int16_copy = data_int16.copy() # Put together. data_dict = { "status": "data", "data": data_int16_copy, } try: if not data_queue.full(): self.main_loop.call_soon_threadsafe( data_queue.put_nowait, data_dict ) else: self.logger.debug("Sound capture: Queue full.") # except Exception as e: # Logging error. message = ( "Failed to put captured sound on queue: " + str(e) ) self.logger.error(message) if not self.main_loop.is_running(): # Terminate. self.capture_active = False break # except Exception as e: self.logger.error("EXCEPTION Sound capture: " + str(e)) finally: self.capture_active = False if pmc_capture: pmc_capture.close() self.logger.debug("Sound capture ended.")
{"hexsha": "c1c083c20bd00c96c703883607922564ceb5492f", "size": 5469, "ext": "py", "lang": "Python", "max_stars_repo_path": "pathfinder/sound_capture.py", "max_stars_repo_name": "cloudedbats/cloudedbats_pathfinder", "max_stars_repo_head_hexsha": "c28ec8df0674e4e9bdf6d60a2ee658ae6615d398", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pathfinder/sound_capture.py", "max_issues_repo_name": "cloudedbats/cloudedbats_pathfinder", "max_issues_repo_head_hexsha": "c28ec8df0674e4e9bdf6d60a2ee658ae6615d398", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pathfinder/sound_capture.py", "max_forks_repo_name": "cloudedbats/cloudedbats_pathfinder", "max_forks_repo_head_hexsha": "c28ec8df0674e4e9bdf6d60a2ee658ae6615d398", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.6304347826, "max_line_length": 93, "alphanum_fraction": 0.4905832876, "include": true, "reason": "import numpy", "num_tokens": 1009}
#ifndef pcw_HocrPageParser_hpp__ #define pcw_HocrPageParser_hpp__ #include <boost/filesystem/path.hpp> #include <memory> #include "pugixml.hpp" #include "PageParser.hpp" #include "Xml.hpp" namespace pcw { class Box; class ParserPage; class XmlParserPage; using ParserPagePtr = std::shared_ptr<ParserPage>; using XmlParserPagePtr = std::shared_ptr<XmlParserPage>; class HocrPageParser: public PageParser, public pugi::xml_tree_walker { public: HocrPageParser(Xml xml); HocrPageParser(const Path& path); virtual ~HocrPageParser() noexcept override = default; virtual bool has_next() const noexcept override { return page_node_; } virtual ParserPagePtr parse() override; virtual bool begin(Xml::Node& node) override; virtual bool for_each(Xml::Node& node) override; private: void next_page(); Xml xml_; Path path_; Xml::Node page_node_; XmlParserPagePtr page_; }; } #endif // pcw_HocrPageParser_hpp__
{"hexsha": "a5a0eced45260eb004f42d7561495c81477bc0fa", "size": 951, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "rest/src/parser/HocrPageParser.hpp", "max_stars_repo_name": "cisocrgroup/pocoweb", "max_stars_repo_head_hexsha": "93546d026321744602f6ee90fd82503da56da3b7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-04-09T20:46:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-07T17:29:02.000Z", "max_issues_repo_path": "rest/src/parser/HocrPageParser.hpp", "max_issues_repo_name": "cisocrgroup/pocoweb", "max_issues_repo_head_hexsha": "93546d026321744602f6ee90fd82503da56da3b7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 61.0, "max_issues_repo_issues_event_min_datetime": "2018-01-03T09:49:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-18T12:26:11.000Z", "max_forks_repo_path": "rest/src/parser/HocrPageParser.hpp", "max_forks_repo_name": "cisocrgroup/pocoweb", "max_forks_repo_head_hexsha": "93546d026321744602f6ee90fd82503da56da3b7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2020-01-10T15:44:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-19T13:39:53.000Z", "avg_line_length": 23.1951219512, "max_line_length": 57, "alphanum_fraction": 0.7518401682, "num_tokens": 243}
************************************ * Friction and mobility * * for a hard-sphere configuration * * Module RIGID * * * * K. Hinsen * * Last revision: June 16, 1994 * ************************************ * Calculate connection matrix from rigid body list subroutine cnctm(cl) implicit none integer cl(*) integer npart parameter (npart = _NP_) double precision c(0:2,npart) character*1 cnct(npart,npart) common /conf/ c,cnct integer i,j do 100 i = 1,npart do 101 j = 1,npart cnct(i,j) = ' ' if (cl(i) .eq. cl(j)) cnct(i,j) = '+' 101 continue 100 continue end * construct list of rigid bodies from connection matrix subroutine rgdbd(cl,ncl) implicit none integer cl(*),ncl integer npart parameter (npart = _NP_) double precision c(0:2,npart) character*1 cnct(npart,npart) common /conf/ c,cnct integer i,j,k,no,nn do 100 i = 1,npart cl(i) = 0 100 continue ncl = 0 do 110 i = 1,npart if (cl(i) .eq. 0) then cl(i) = i ncl = ncl + 1 endif do 111 j = i+1,npart if (cnct(i,j) .ne. ' ' .and. cl(i) .ne. cl(j)) then if (cl(j) .eq. 0) then cl(j) = cl(i) else no = cl(j) nn = cl(i) do 112 k = 1,npart if (cl(k) .eq. no) cl(k) = nn 112 continue ncl = ncl - 1 endif endif 111 continue 110 continue call cnctm(cl) end * calculate rigid body velocity matrix subroutine velmat implicit none integer npart parameter (npart = _NP_) double precision c(0:2,npart) character*1 cnct(npart,npart) common /conf/ c,cnct double precision vrb(6*npart,6*npart) integer nrb,irb(npart) common /rb/ vrb,nrb,irb integer cl(npart),np(npart) double precision re(0:2,npart) integer i,j,k double precision x,y,z call rgdbd(cl,nrb) do 100 i = 1,npart np(i) = 0 re(0,i) = 0.d0 re(1,i) = 0.d0 re(2,i) = 0.d0 100 continue do 110 i = 1,npart j = cl(i) np(j) = np(j) + 1 re(0,j) = re(0,j) + c(0,i) re(1,j) = re(1,j) + c(1,i) re(2,j) = re(2,j) + c(2,i) 110 continue do 120 i = 1,npart j = cl(i) re(0,i) = re(0,j) re(1,i) = re(1,j) re(2,i) = re(2,j) 120 continue do 130 i = 1,npart j = cl(i) re(0,i) = re(0,i)/np(j) re(1,i) = re(1,i)/np(j) re(2,i) = re(2,i)/np(j) 130 continue j = 1 do 140 i = 1,npart if (np(i) .gt. 0) then np(i) = j j = j + 1 endif 140 continue do 150 i = 1,npart irb(i) = np(cl(i)) 150 continue do 200 i = 1,6*npart do 201 j = 1,6*nrb vrb(i,j) = 0.d0 201 continue 200 continue do 210 i = 0,npart-1 j = irb(i+1)-1 do 211 k = 1,6 vrb(6*i+k,6*j+k) = 1.d0 211 continue x = c(0,i+1)-re(0,i+1) y = c(1,i+1)-re(1,i+1) z = c(2,i+1)-re(2,i+1) vrb(6*i+2,6*j+6) = x vrb(6*i+3,6*j+5) = -x vrb(6*i+3,6*j+4) = y vrb(6*i+1,6*j+6) = -y vrb(6*i+1,6*j+5) = z vrb(6*i+2,6*j+4) = -z 210 continue end * calculate reduced friction and mobility matrices subroutine evalfr implicit none integer npart parameter (npart = _NP_) double precision vrb(6*npart,6*npart) integer nrb,irb(npart) common /rb/ vrb,nrb,irb double precision fr(6*npart,6*npart) common /frict/ fr double precision frrb(6*npart,6*npart) common frrb integer i,j,k,l call velmat #ifdef MOBILITY_DIRECT if (nrb .eq. npart) then call evalm else #endif call evalf if (nrb .ne. npart) then do 100 i = 1,6*nrb do 101 j = 1,6*nrb frrb(i,j) = 0.d0 do 102 k = 1,6*npart do 103 l = 1,6*npart frrb(i,j) = frrb(i,j) + vrb(k,i)*fr(k,l)*vrb(l,j) 103 continue 102 continue 101 continue 100 continue do 110 i = 1,6*nrb do 111 j = 1,6*nrb fr(i,j) = frrb(i,j) 111 continue 110 continue endif #ifdef VELOCITIES_RB call solvev #endif #ifdef MOBILITY_RB call inv(nrb) #endif #ifdef NORMALIZE_RB call norm(nrb) #endif #ifdef MOBILITY_DIRECT endif #endif end
{"hexsha": "1f50121f9ecd2af6731c1d90bbcfeb4860d85502", "size": 5057, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "hydrolib/rigid_dp.f", "max_stars_repo_name": "khinsen/HYDROLIB", "max_stars_repo_head_hexsha": "c0e7834a17c7046a6e4c0a27b980aaf5919aecd5", "max_stars_repo_licenses": ["BSD-3-Clause-Clear"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-10-12T22:57:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T12:52:41.000Z", "max_issues_repo_path": "hydrolib/rigid_dp.f", "max_issues_repo_name": "khinsen/HYDROLIB", "max_issues_repo_head_hexsha": "c0e7834a17c7046a6e4c0a27b980aaf5919aecd5", "max_issues_repo_licenses": ["BSD-3-Clause-Clear"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hydrolib/rigid_dp.f", "max_forks_repo_name": "khinsen/HYDROLIB", "max_forks_repo_head_hexsha": "c0e7834a17c7046a6e4c0a27b980aaf5919aecd5", "max_forks_repo_licenses": ["BSD-3-Clause-Clear"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-12T22:57:46.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-12T22:57:46.000Z", "avg_line_length": 21.7974137931, "max_line_length": 70, "alphanum_fraction": 0.4449278228, "num_tokens": 1639}
import ggg t : Int t = yyy
{"hexsha": "486aa1e35123da7398df5c0ee5fabb173e96d694", "size": 28, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "tests/idris2/perror008/Issue1224b.idr", "max_stars_repo_name": "ska80/idris-jvm", "max_stars_repo_head_hexsha": "66223d026d034578876b325e9fcd95874faa6052", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 396, "max_stars_repo_stars_event_min_datetime": "2016-07-17T08:00:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T22:47:13.000Z", "max_issues_repo_path": "tests/idris2/perror008/Issue1224b.idr", "max_issues_repo_name": "ska80/idris-jvm", "max_issues_repo_head_hexsha": "66223d026d034578876b325e9fcd95874faa6052", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 54, "max_issues_repo_issues_event_min_datetime": "2016-08-04T06:13:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-03T04:00:31.000Z", "max_forks_repo_path": "tests/idris2/perror008/Issue1224b.idr", "max_forks_repo_name": "ska80/idris-jvm", "max_forks_repo_head_hexsha": "66223d026d034578876b325e9fcd95874faa6052", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 28, "max_forks_repo_forks_event_min_datetime": "2016-09-15T15:19:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-27T13:05:48.000Z", "avg_line_length": 5.6, "max_line_length": 10, "alphanum_fraction": 0.6071428571, "num_tokens": 13}
#--------------------------------------------------------------------------- # # BinaryStrings.py: a module to manipulate binary strings as integers # # by Lidia Yamamoto, Belgium, July 2013 # # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # # Copyright (C) 2015 Lidia A. R. Yamamoto # Contact: http://www.artificial-chemistries.org/ # # This file is part of PyCellChemistry. # # PyCellChemistry is free software: you can redistribute it and/or # modify it under the terms of the GNU General Public License # version 3, as published by the Free Software Foundation. # # PyCellChemistry is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with PyCellChemistry, see file COPYING. If not, see # http://www.gnu.org/licenses/ # import numpy as np import sys import time def randprob(): """ random floating point number between 0 and 1 """ return np.random.random() def dice(prob): """ throw a dice: true if the dice falls within the given probability """ if prob == 1.0: return True if prob <= 0.0: return False d = randprob() return (d <= prob) def randbin( nbits, p1=0.5 ): """ random integer with a given number of bits 'nbits' bits are uniformly distributed, with a possible bias: p1 = probability of a one p0 = 1 - p1 = probability of a 0 """ if p1 == 0.5: # unbiased coin if (nbits <= 16): return np.random.randint(2**nbits) n = 0 while (nbits > 0): b = min(nbits, 16) x = np.random.randint(2**b) n = (n << b) + x nbits -= b else: # biased coin: CAUTION! can be over 10 times slower than unbiased! n = 0 for i in range(nbits): r = np.random.random() if (r < p1): b = 1 else: b = 0 n = (n << 1) + b return n def getbitvalue(binstr, pos): """ get value of the bit at position 'pos' in binary string 'binstr' """ return (binstr >> pos) & 1 def flipbit( binstr, pos, nbits ): """ flip a given bit in the string """ mask = (1 << pos) return (binstr ^ mask) def getsegment(binstr, p0, p1): """ get value of segment from p0 to p1 in binary string 'binstr' """ seg = 0 for p in range(p1 - 1, p0 - 1, -1): b = getbitvalue(binstr, p) seg = (seg << 1) | b return seg def mutate1( binstr, nbits ): """ mutate a random bit in the string """ pos = np.random.randint(nbits) return flipbit(binstr, pos, nbits) def mutatep( binstr, nbits, prob ): """ mutate each bit in the string with a given probability caution: this method is computationally much slower than mutate1() """ if prob <= 0 or nbits <= 0: return binstr mask = 1 newb = binstr for i in range(nbits): if dice(prob): newb = (newb ^ mask) mask = (mask << 1) return newb def crossover( s1, s2, nbits ): """ crossover between two binary strings s1 and s2 at a random position; returns two children c1 and c2 """ pos = np.random.randint(1, nbits) # cut in 2 segments at position pos m2 = 2**nbits - 1 m1 = 2**pos - 1 # mask for lower segment [0;pos-1] m2 = (m2 & ~ m1) # mask for upper segment [pos;len-1] #print "pos=", pos, "m1=", bin(m1), "m2=", bin(m2) c1 = ((s1 & m2) | (s2 & m1)) # child1 = upper(s1) + lower(s2) c2 = ((s2 & m2) | (s1 & m1)) # child2 = upper(s2) + lower(s1) return (c1, c2) def count_ones( binstr ): """ number of ones in a binary string """ n = 0 while (binstr != 0): binstr = (binstr & (binstr - 1)) n += 1 return n def hamming( x, y ): """ Hamming distance between two integers x and y """ n = x ^ y return count_ones(n) def entropy( binstr, nbits ): """ entropy of a binary string CAUTION: for a string of nbits, there are only nbits/2 + 1 different entropy levels 0 <= entropy <= 1 including 0.0 (for p0 = 0 or p0 = 1) and 1.0 (for p0=p1=0.5) """ n1 = count_ones(binstr) if n1 > nbits: raise ValueError('bad bit count for entropy calculation') p1 = 1.0 * n1 / nbits p0 = 1 - p1 if p0 == 0 or p1 == 0: return 0.0 else: return - p0 * np.log2(p0) - p1 * np.log2(p1) def int2str( binstr, nbits ): """ converts an integer to a string containing the binary number in ascii form, without prefix '0b' and filled with leading zeros up to nbits """ return str(bin(binstr))[2:].zfill(nbits)
{"hexsha": "ab0e47f81102dae07e272d2c9923898775c42ffa", "size": 4897, "ext": "py", "lang": "Python", "max_stars_repo_path": "frameworks/pycellchem-2.0/src/artchem/BinaryStrings.py", "max_stars_repo_name": "danielrcardenas/ac-course-2017", "max_stars_repo_head_hexsha": "515512f27b5fd2ff8693eba1b3d8050b1580ec93", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "frameworks/pycellchem-2.0/src/artchem/BinaryStrings.py", "max_issues_repo_name": "danielrcardenas/ac-course-2017", "max_issues_repo_head_hexsha": "515512f27b5fd2ff8693eba1b3d8050b1580ec93", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "frameworks/pycellchem-2.0/src/artchem/BinaryStrings.py", "max_forks_repo_name": "danielrcardenas/ac-course-2017", "max_forks_repo_head_hexsha": "515512f27b5fd2ff8693eba1b3d8050b1580ec93", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9936708861, "max_line_length": 77, "alphanum_fraction": 0.5662650602, "include": true, "reason": "import numpy", "num_tokens": 1474}
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ops.tf.Cholesky.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf class CholeskyOpTest(tf.test.TestCase): def _verifyCholesky(self, x): with self.test_session() as sess: # Verify that LL^T == x. if x.ndim == 2: chol = tf.cholesky(x) verification = tf.matmul(chol, chol, transpose_a=False, transpose_b=True) else: chol = tf.batch_cholesky(x) verification = tf.batch_matmul(chol, chol, adj_x=False, adj_y=True) chol_np, verification_np = sess.run([chol, verification]) self.assertAllClose(x, verification_np) self.assertShapeEqual(x, chol) # Check that the cholesky is lower triangular, and has positive diagonal # elements. if chol_np.shape[-1] > 0: chol_reshaped = np.reshape(chol_np, (-1, chol_np.shape[-2], chol_np.shape[-1])) for chol_matrix in chol_reshaped: self.assertAllClose(chol_matrix, np.tril(chol_matrix)) self.assertTrue((np.diag(chol_matrix) > 0.0).all()) def testBasic(self): self._verifyCholesky(np.array([[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]])) def testBatch(self): simple_array = np.array([[[1., 0.], [0., 5.]]]) # shape (1, 2, 2) self._verifyCholesky(simple_array) self._verifyCholesky(np.vstack((simple_array, simple_array))) odd_sized_array = np.array([[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]]) self._verifyCholesky(np.vstack((odd_sized_array, odd_sized_array))) # Generate random positive-definite matrices. matrices = np.random.rand(10, 5, 5) for i in xrange(10): matrices[i] = np.dot(matrices[i].T, matrices[i]) self._verifyCholesky(matrices) def testNonSquareMatrix(self): with self.assertRaises(ValueError): tf.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]])) def testWrongDimensions(self): tensor3 = tf.constant([1., 2.]) with self.assertRaises(ValueError): tf.cholesky(tensor3) def testNotInvertible(self): # The input should be invertible. with self.test_session(): with self.assertRaisesOpError("LLT decomposition was not successful. The " "input might not be valid."): # All rows of the matrix below add to zero self._verifyCholesky(np.array([[1., -1., 0.], [-1., 1., -1.], [0., -1., 1.]])) def testEmpty(self): self._verifyCholesky(np.empty([0, 2, 2])) self._verifyCholesky(np.empty([2, 0, 0])) if __name__ == "__main__": tf.test.main()
{"hexsha": "e0c075f9e56cf677f3976990385c5a60bea5f609", "size": 3547, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow/python/kernel_tests/cholesky_op_test.py", "max_stars_repo_name": "jylinman/tensorflow", "max_stars_repo_head_hexsha": "5248d111c3aeaf9f560cd77bff0f183f38e31e0b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2016-03-10T11:55:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-03T02:49:11.000Z", "max_issues_repo_path": "tensorflow/python/kernel_tests/cholesky_op_test.py", "max_issues_repo_name": "jylinman/tensorflow", "max_issues_repo_head_hexsha": "5248d111c3aeaf9f560cd77bff0f183f38e31e0b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorflow/python/kernel_tests/cholesky_op_test.py", "max_forks_repo_name": "jylinman/tensorflow", "max_forks_repo_head_hexsha": "5248d111c3aeaf9f560cd77bff0f183f38e31e0b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 39, "max_forks_repo_forks_event_min_datetime": "2016-03-25T05:13:09.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-16T01:30:53.000Z", "avg_line_length": 38.1397849462, "max_line_length": 80, "alphanum_fraction": 0.6137581054, "include": true, "reason": "import numpy", "num_tokens": 911}
import pytest import numpy as np from keras.utils.test_utils import layer_test from keras import layers def test_flatten(): def test_4d(): np_inp_channels_last = np.arange(24, dtype='float32').reshape((1, 4, 3, 2)) np_output_cl = layer_test(layers.Flatten, kwargs={'data_format': 'channels_last'}, input_data=np_inp_channels_last) np_inp_channels_first = np.transpose(np_inp_channels_last, [0, 3, 1, 2]) np_output_cf = layer_test(layers.Flatten, kwargs={'data_format': 'channels_first'}, input_data=np_inp_channels_first, expected_output=np_output_cl) def test_3d(): np_inp_channels_last = np.arange(12, dtype='float32').reshape( (1, 4, 3)) np_output_cl = layer_test(layers.Flatten, kwargs={'data_format': 'channels_last'}, input_data=np_inp_channels_last) np_inp_channels_first = np.transpose(np_inp_channels_last, [0, 2, 1]) np_output_cf = layer_test(layers.Flatten, kwargs={'data_format': 'channels_first'}, input_data=np_inp_channels_first, expected_output=np_output_cl) def test_5d(): np_inp_channels_last = np.arange(120, dtype='float32').reshape( (1, 5, 4, 3, 2)) np_output_cl = layer_test(layers.Flatten, kwargs={'data_format': 'channels_last'}, input_data=np_inp_channels_last) np_inp_channels_first = np.transpose(np_inp_channels_last, [0, 4, 1, 2, 3]) np_output_cf = layer_test(layers.Flatten, kwargs={'data_format': 'channels_first'}, input_data=np_inp_channels_first, expected_output=np_output_cl) test_3d() test_4d() test_5d() if __name__ == '__main__': pytest.main([__file__])
{"hexsha": "51ba0f7ac9df9e3b01107547afeb1950308cdb54", "size": 2555, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/587/test_flattening.py", "max_stars_repo_name": "CSUN-COMP587-F18/keras", "max_stars_repo_head_hexsha": "5beed1cae876f570f92ee49b532aef2f97176bb2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/587/test_flattening.py", "max_issues_repo_name": "CSUN-COMP587-F18/keras", "max_issues_repo_head_hexsha": "5beed1cae876f570f92ee49b532aef2f97176bb2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2018-09-27T23:03:18.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-05T23:32:33.000Z", "max_forks_repo_path": "tests/587/test_flattening.py", "max_forks_repo_name": "CSUN-COMP587-F18/keras", "max_forks_repo_head_hexsha": "5beed1cae876f570f92ee49b532aef2f97176bb2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5, "max_line_length": 83, "alphanum_fraction": 0.4598825832, "include": true, "reason": "import numpy", "num_tokens": 452}
#!pip install grpcio==1.24.3 #!pip install tensorflow==2.2.0 import tensorflow as tf if not tf.__version__ == '2.2.0': print(tf.__version__) raise ValueError('please upgrade to TensorFlow 2.2.0, or restart your Kernel (Kernel->Restart & Clear Output)') tf.executing_eagerly() from tensorflow.python.framework.ops import disable_eager_execution disable_eager_execution() tf.executing_eagerly() import numpy as np a = tf.constant(np.array([1., 2., 3.])) type(a) b = tf.constant(np.array([4.,5.,6.])) c = tf.tensordot(a, b, 1) type(c) print(c) session = tf.compat.v1.Session() output = session.run(c) session.close() print(output) import tensorflow as tf import numpy as np from tensorflow.python.framework.ops import enable_eager_execution enable_eager_execution() x = [[4]] m = tf.matmul(x, x) print("Result, {}".format(m)) a = tf.constant(np.array([1., 2., 3.])) type(a) print(a.numpy()) b = tf.constant(np.array([4.,5.,6.])) c = tf.tensordot(a, b,1) type(c) print(c.numpy()) def fizzbuzz(max_num): counter = tf.constant(0) max_num = tf.convert_to_tensor(max_num) for num in range(1, max_num.numpy()+1): num = tf.constant(num) if int(num % 3) == 0 and int(num % 5) == 0: print('FizzBuzz') elif int(num % 3) == 0: print('Fizz') elif int(num % 5) == 0: print('Buzz') else: print(num.numpy()) counter += 1 fizzbuzz(15)
{"hexsha": "7847a78cfca08fa09519856970481771f5dceab4", "size": 1394, "ext": "py", "lang": "Python", "max_stars_repo_path": "tf_eager.py", "max_stars_repo_name": "indervirbanipal/building-deep-learning-models-with-tensorflow", "max_stars_repo_head_hexsha": "6317f5d96bfe4e88f3c357c43a20d86a87a4a35e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tf_eager.py", "max_issues_repo_name": "indervirbanipal/building-deep-learning-models-with-tensorflow", "max_issues_repo_head_hexsha": "6317f5d96bfe4e88f3c357c43a20d86a87a4a35e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tf_eager.py", "max_forks_repo_name": "indervirbanipal/building-deep-learning-models-with-tensorflow", "max_forks_repo_head_hexsha": "6317f5d96bfe4e88f3c357c43a20d86a87a4a35e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.8059701493, "max_line_length": 115, "alphanum_fraction": 0.6614060258, "include": true, "reason": "import numpy", "num_tokens": 416}
#' State-Dependent Memory-Less Adaptive Transition Kernel #' #' @param x Current state #' @param f Objective function #' @param gr (optional) Gradient of the objective function. #' @param rz Random number function. #' @param dz Fensity function of `z`. #' @param rz.args List of parameters passed to `rz`. #' @param ... Further arguments passed to `f` and, if specified, `gr` #' @param alpha Number in `[0, 1)`. #' @param eps If no `gr` is specified, epsilon used to approximate the gradient. #' @noRd # sdml_adaptive_kernel <- function( # x, # f, # gr, # rz = stats::rnorm, # dz = stats::dnorm, # rz.args = list(mean=0, sd=1), # ..., # alpha = .9, # eps = rep(1e-5, length(x)) # ) { # # # Number of parameters # k <- length(x) # # # Evaluating functions # fx <- f(x, ...) # # # Gradient function # gx <- if (missing(gr)) # (f(x + eps, ...) - fx)/ # (2 * eps) # else # gr(x, ...) # # # # Proposal # beta_x <- 1 - stats::plogis(gx)*alpha # x_new <- x + do.call(rz, c(list(k), rz.args))*beta_x # fx_new <- f(x_new, ...) # # # New gradient # gx_new <- if (missing(gr)) # (f(x_new + eps, ...) - fx_new)/ # (2 * eps) # else # gr(x_new, ...) # # # Hastings ratio # beta_x_new <- 1 - stats::plogis(gx_new)*alpha # fz_x_x_new <- do.call(dz, c(list((x_new - x)/beta_x), rz.args))/beta_x # fz_x_new_x <- do.call(dz, c(list((x - x_new)/beta_x_new), rz.args))/beta_x_new # # list( # x = x_new, # h = min(1, fx_new * fz_x_x_new/(fx * fz_x_new_x + 1e-15)) # ) # # }
{"hexsha": "1510332a75d2d459a00d31a1c69c265fe4ceae2e", "size": 1585, "ext": "r", "lang": "R", "max_stars_repo_path": "R/kernel_sdml.r", "max_stars_repo_name": "arokem/fmcmc", "max_stars_repo_head_hexsha": "2b897e6978d1b23107d3e88d39abb68f4ad3ab97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-09-20T17:37:05.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-20T17:37:05.000Z", "max_issues_repo_path": "R/kernel_sdml.r", "max_issues_repo_name": "arokem/fmcmc", "max_issues_repo_head_hexsha": "2b897e6978d1b23107d3e88d39abb68f4ad3ab97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "R/kernel_sdml.r", "max_forks_repo_name": "arokem/fmcmc", "max_forks_repo_head_hexsha": "2b897e6978d1b23107d3e88d39abb68f4ad3ab97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.1587301587, "max_line_length": 82, "alphanum_fraction": 0.5520504732, "num_tokens": 543}
from openchem.models.SiameseModel import SiameseModel from openchem.modules.embeddings.basic_embedding import Embedding from openchem.modules.encoders.rnn_encoder import RNNEncoder from openchem.modules.encoders.gcn_encoder import GraphCNNEncoder from openchem.modules.mlp.openchem_mlp import OpenChemMLP, OpenChemMLPSimple from openchem.data.siamese_data_layer import SiameseDataset from openchem.utils.utils import identity import torch import torch.nn as nn import numpy as np from torch.optim import RMSprop, Adam from torch.optim.lr_scheduler import ExponentialLR, StepLR import torch.nn.functional as F from sklearn.metrics import roc_auc_score, mean_squared_error, r2_score, f1_score from openchem.data.utils import read_smiles_property_file data = read_smiles_property_file('./benchmark_datasets/reactions/4_11_with_y2.csv', cols_to_read=[11, 12, 14], keep_header=False) reactant1 = data[0] reactant2 = data[1] labels = np.array(data[2], dtype="float").reshape(-1, 1) reactants = [reactant1[i] + " " + reactant2[i] for i in range(len(reactant2))] from openchem.data.utils import get_tokens tokens, _, _ = get_tokens(reactants) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(reactants, labels, test_size=0.2, random_state=42) y_mean = np.mean(y_train) y_std = np.std(y_train) y_train = (y_train - y_mean) / y_std y_test = (y_test - y_mean) / y_std from openchem.data.utils import save_smiles_property_file save_smiles_property_file('./benchmark_datasets/reactions/train.smi', X_train, y_train, delimiter=" ") save_smiles_property_file('./benchmark_datasets/reactions/test.smi', X_test, y_test, delimiter=" ") from openchem.data.smiles_data_layer import SmilesDataset head1_arguments = { "tokens": tokens, "delimiter": " ", "sanitize": False } head2_arguments = { "tokens": tokens, "delimiter": " ", "sanitize": False } train_dataset = SiameseDataset('./benchmark_datasets/reactions/train.smi', head1_type='smiles', head2_type='smiles', cols_to_read=[0, 1, 2], head1_arguments=head1_arguments, head2_arguments=head2_arguments ) test_dataset = SiameseDataset('./benchmark_datasets/reactions/test.smi', head1_type='smiles', head2_type='smiles', cols_to_read=[0, 1, 2], head1_arguments=head1_arguments, head2_arguments=head2_arguments ) model = SiameseModel model_params = { 'use_cuda': True, 'task': 'classification', 'random_seed': 5, 'use_clip_grad': True, 'max_grad_norm': 10.0, 'batch_size': 256, 'num_epochs': 51, 'logdir': './logs/reactions', 'print_every': 1, 'save_every': 1, 'train_data_layer': train_dataset, 'val_data_layer': test_dataset, 'eval_metrics': f1_score, 'criterion': nn.CrossEntropyLoss(),#nn.MSELoss(), 'optimizer': Adam, 'optimizer_params': { 'lr': 0.001, }, 'lr_scheduler': StepLR, 'lr_scheduler_params': { 'step_size': 10, 'gamma': 0.8 }, 'head1_embedding': Embedding, 'head1_embedding_params': { 'num_embeddings': len(tokens), 'embedding_dim': 128, 'padding_idx': tokens.index(' ') }, 'head2_embedding': Embedding, 'head2_embedding_params': { 'num_embeddings': len(tokens), 'embedding_dim': 128, 'padding_idx': tokens.index(' ') }, 'head1_encoder': RNNEncoder, 'head1_encoder_params': { 'input_size': 128, 'layer': "LSTM", 'encoder_dim': 128, 'n_layers': 4, 'dropout': 0.8, 'is_bidirectional': False }, 'head2_encoder': RNNEncoder, 'head2_encoder_params': { 'input_size': 128, 'layer': "LSTM", 'encoder_dim': 128, 'n_layers': 4, 'dropout': 0.8, 'is_bidirectional': False }, 'merge': "mul", 'mlp': OpenChemMLPSimple, 'mlp_params': { 'input_size': 128, 'n_layers': 2, 'hidden_size': [128, 2], 'activation': [F.relu, nn.Softmax(dim=1)], } }
{"hexsha": "4151452cb58e0c1ec78425027b7a820b3da9092d", "size": 4445, "ext": "py", "lang": "Python", "max_stars_repo_path": "example_configs/siamese_reactions.py", "max_stars_repo_name": "jmhayesesq/Open-Chem", "max_stars_repo_head_hexsha": "e612d5cd471079c64e61ceda946c3dc7cf095bd8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 466, "max_stars_repo_stars_event_min_datetime": "2018-08-10T04:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T07:04:39.000Z", "max_issues_repo_path": "example_configs/siamese_reactions.py", "max_issues_repo_name": "jmhayesesq/Open-Chem", "max_issues_repo_head_hexsha": "e612d5cd471079c64e61ceda946c3dc7cf095bd8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2019-08-06T08:23:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-23T22:39:52.000Z", "max_forks_repo_path": "example_configs/siamese_reactions.py", "max_forks_repo_name": "jmhayesesq/Open-Chem", "max_forks_repo_head_hexsha": "e612d5cd471079c64e61ceda946c3dc7cf095bd8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 91, "max_forks_repo_forks_event_min_datetime": "2018-08-19T00:37:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-27T11:15:31.000Z", "avg_line_length": 34.1923076923, "max_line_length": 103, "alphanum_fraction": 0.6249718785, "include": true, "reason": "import numpy", "num_tokens": 1117}
from malaya_speech.path import ( PATH_TTS_TACOTRON2, S3_PATH_TTS_TACOTRON2, PATH_TTS_FASTSPEECH2, S3_PATH_TTS_FASTSPEECH2, PATH_TTS_FASTPITCH, S3_PATH_TTS_FASTPITCH, PATH_TTS_GLOWTTS, S3_PATH_TTS_GLOWTTS, ) from malaya_speech.utils.text import ( convert_to_ascii, collapse_whitespace, put_spacing_num, tts_encode, TextIDS, ) from malaya_speech.supervised import tts import numpy as np import re _tacotron2_availability = { 'male': { 'Size (MB)': 104, 'Quantized Size (MB)': 26.3, 'Combined loss': 0.1838, }, 'female': { 'Size (MB)': 104, 'Quantized Size (MB)': 26.3, 'Combined loss': 0.1887, }, 'husein': { 'Size (MB)': 104, 'Quantized Size (MB)': 26.3, 'Combined loss': 0.1165, }, 'haqkiem': { 'Size (MB)': 104, 'Quantized Size (MB)': 26.3, 'Combined loss': 0.1375, }, 'female-singlish': { 'Size (MB)': 104, 'Quantized Size (MB)': 26.3, 'Combined loss': 0.0923, }, } _fastspeech2_availability = { 'male': { 'Size (MB)': 125, 'Quantized Size (MB)': 31.7, 'Combined loss': 1.8, }, 'female': { 'Size (MB)': 125, 'Quantized Size (MB)': 31.7, 'Combined loss': 1.932, }, 'husein': { 'Size (MB)': 125, 'Quantized Size (MB)': 31.7, 'Combined loss': 0.5832, }, 'haqkiem': { 'Size (MB)': 125, 'Quantized Size (MB)': 31.7, 'Combined loss': 0.5663, }, 'female-singlish': { 'Size (MB)': 125, 'Quantized Size (MB)': 31.7, 'Combined loss': 0.5112, }, } _fastpitch_availability = { 'male': { 'Size (MB)': 123, 'Quantized Size (MB)': 31.1, 'Combined loss': 1.614, }, 'female': { 'Size (MB)': 123, 'Quantized Size (MB)': 31.1, 'Combined loss': 1.669, }, 'husein': { 'Size (MB)': 123, 'Quantized Size (MB)': 31.1, 'Combined loss': 0.52515, }, 'haqkiem': { 'Size (MB)': 123, 'Quantized Size (MB)': 31.1, 'Combined loss': 0.5186, }, } _glowtts_availability = { 'male': { 'Size (MB)': 119, 'Quantized Size (MB)': 27.6, 'Combined loss': -1.429, }, 'female': { 'Size (MB)': 119, 'Quantized Size (MB)': 27.6, 'Combined loss': -1.464, }, 'haqkiem': { 'Size (MB)': 119, 'Quantized Size (MB)': 27.6, 'Combined loss': -1.649, }, 'female-singlish': { 'Size (MB)': 119, 'Quantized Size (MB)': 27.6, 'Combined loss': -1.728, }, 'multispeaker': { 'Size (MB)': 404, 'Quantized Size (MB)': 79.9, 'Combined loss': -1.882, } } def load_text_ids( pad_to: int = 8, understand_punct: bool = True, true_case_model=None, **kwargs ): try: import malaya except BaseException: raise ModuleNotFoundError( 'malaya not installed. Please install it by `pip install malaya` and try again.' ) normalizer = malaya.normalize.normalizer(date=False, time=False) sentence_tokenizer = malaya.text.function.split_into_sentences return TextIDS( pad_to=pad_to, understand_punct=understand_punct, normalizer=normalizer, sentence_tokenizer=sentence_tokenizer, true_case_model=true_case_model, ) def available_tacotron2(): """ List available Tacotron2, Text to Mel models. """ from malaya_speech.utils import describe_availability return describe_availability( _tacotron2_availability, text='`husein`, `haqkiem` and `female-singlish` combined loss from training set', ) def available_fastspeech2(): """ List available FastSpeech2, Text to Mel models. """ from malaya_speech.utils import describe_availability return describe_availability( _fastspeech2_availability, text='`husein`, `haqkiem` and `female-singlish` combined loss from training set', ) def available_fastpitch(): """ List available FastPitch, Text to Mel models. """ from malaya_speech.utils import describe_availability return describe_availability( _fastpitch_availability, text='`husein` and `haqkiem` combined loss from training set', ) def available_glowtts(): """ List available GlowTTS, Text to Mel models. """ from malaya_speech.utils import describe_availability return describe_availability( _glowtts_availability, text='`haqkiem` and `female-singlish` combined loss from training set', ) def tacotron2( model: str = 'male', quantized: bool = False, pad_to: int = 8, true_case_model=None, **kwargs ): """ Load Tacotron2 TTS model. Parameters ---------- model : str, optional (default='male') Model architecture supported. Allowed values: * ``'female'`` - Tacotron2 trained on female voice. * ``'male'`` - Tacotron2 trained on male voice. * ``'husein'`` - Tacotron2 trained on Husein voice, https://www.linkedin.com/in/husein-zolkepli/ * ``'haqkiem'`` - Tacotron2 trained on Haqkiem voice, https://www.linkedin.com/in/haqkiem-daim/ * ``'female-singlish'`` - Tacotron2 trained on female Singlish voice, https://www.imda.gov.sg/programme-listing/digital-services-lab/national-speech-corpus quantized : bool, optional (default=False) if True, will load 8-bit quantized model. Quantized model not necessary faster, totally depends on the machine. pad_to : int, optional (default=8) size of pad character with 0. Increase can stable up prediction on short sentence, we trained on 8. true_case_model: str, optional (default=None) load any true case model, eg, malaya true case model from https://malaya.readthedocs.io/en/latest/load-true-case.html the interface must accept a string, return a string, eg, string = true_case_model(string) Returns ------- result : malaya_speech.model.tf.Tacotron class """ model = model.lower() if model not in _tacotron2_availability: raise ValueError( 'model not supported, please check supported models from `malaya_speech.tts.available_tacotron2()`.' ) text_ids = load_text_ids( pad_to=pad_to, true_case_model=true_case_model, quantized=quantized, **kwargs ) return tts.tacotron_load( path=PATH_TTS_TACOTRON2, s3_path=S3_PATH_TTS_TACOTRON2, model=model, name='text-to-speech', normalizer=text_ids, quantized=quantized, **kwargs ) def fastspeech2( model: str = 'male', quantized: bool = False, pad_to: int = 8, true_case_model=None, **kwargs ): """ Load Fastspeech2 TTS model. Parameters ---------- model : str, optional (default='male') Model architecture supported. Allowed values: * ``'female'`` - Fastspeech2 trained on female voice. * ``'male'`` - Fastspeech2 trained on male voice. * ``'husein'`` - Fastspeech2 trained on Husein voice, https://www.linkedin.com/in/husein-zolkepli/ * ``'haqkiem'`` - Fastspeech2 trained on Haqkiem voice, https://www.linkedin.com/in/haqkiem-daim/ * ``'female-singlish'`` - Fastspeech2 trained on female Singlish voice, https://www.imda.gov.sg/programme-listing/digital-services-lab/national-speech-corpus quantized : bool, optional (default=False) if True, will load 8-bit quantized model. Quantized model not necessary faster, totally depends on the machine. pad_to : int, optional (default=8) size of pad character with 0. Increase can stable up prediction on short sentence, we trained on 8. true_case_model: str, optional (default=None) load any true case model, eg, malaya true case model from https://malaya.readthedocs.io/en/latest/load-true-case.html the interface must accept a string, return a string, eg, string = true_case_model(string) Returns ------- result : malaya_speech.model.tf.Fastspeech class """ model = model.lower() if model not in _fastspeech2_availability: raise ValueError( 'model not supported, please check supported models from `malaya_speech.tts.available_fastspeech2()`.' ) text_ids = load_text_ids( pad_to=pad_to, true_case_model=true_case_model, quantized=quantized, **kwargs ) return tts.fastspeech_load( path=PATH_TTS_FASTSPEECH2, s3_path=S3_PATH_TTS_FASTSPEECH2, model=model, name='text-to-speech', normalizer=text_ids, quantized=quantized, **kwargs ) def fastpitch( model: str = 'male', quantized: bool = False, pad_to: int = 8, true_case_model=None, **kwargs ): """ Load Fastspitch TTS model. Parameters ---------- model : str, optional (default='male') Model architecture supported. Allowed values: * ``'female'`` - Fastpitch trained on female voice. * ``'male'`` - Fastpitch trained on male voice. * ``'husein'`` - Fastpitch trained on Husein voice, https://www.linkedin.com/in/husein-zolkepli/ * ``'haqkiem'`` - Fastpitch trained on Haqkiem voice, https://www.linkedin.com/in/haqkiem-daim/ quantized : bool, optional (default=False) if True, will load 8-bit quantized model. Quantized model not necessary faster, totally depends on the machine. pad_to : int, optional (default=8) size of pad character with 0. Increase can stable up prediction on short sentence, we trained on 8. true_case_model: str, optional (default=None) load any true case model, eg, malaya true case model from https://malaya.readthedocs.io/en/latest/load-true-case.html the interface must accept a string, return a string, eg, string = true_case_model(string) Returns ------- result : malaya_speech.model.tf.Fastpitch class """ model = model.lower() if model not in _fastpitch_availability: raise ValueError( 'model not supported, please check supported models from `malaya_speech.tts.available_fastpitch()`.' ) text_ids = load_text_ids( pad_to=pad_to, true_case_model=true_case_model, quantized=quantized, **kwargs ) return tts.fastpitch_load( path=PATH_TTS_FASTPITCH, s3_path=S3_PATH_TTS_FASTPITCH, model=model, name='text-to-speech', normalizer=text_ids, quantized=quantized, **kwargs ) def glowtts(model: str = 'male', quantized: bool = False, pad_to: int = 2, true_case_model=None, **kwargs): """ Load GlowTTS TTS model. Parameters ---------- model : str, optional (default='male') Model architecture supported. Allowed values: * ``'female'`` - GlowTTS trained on female voice. * ``'male'`` - GlowTTS trained on male voice. * ``'haqkiem'`` - GlowTTS trained on Haqkiem voice, https://www.linkedin.com/in/haqkiem-daim/ * ``'female-singlish'`` - GlowTTS trained on female Singlish voice, https://www.imda.gov.sg/programme-listing/digital-services-lab/national-speech-corpus * ``'multispeaker'`` - Multispeaker GlowTTS trained on male, female, husein and haqkiem voices, also able to do voice conversion. quantized : bool, optional (default=False) if True, will load 8-bit quantized model. Quantized model not necessary faster, totally depends on the machine. pad_to : int, optional (default=2) size of pad character with 0. Increase can stable up prediction on short sentence, we trained on 2. true_case_model: str, optional (default=None) load any true case model, eg, malaya true case model from https://malaya.readthedocs.io/en/latest/load-true-case.html the interface must accept a string, return a string, eg, string = true_case_model(string) Returns ------- result : malaya_speech.model.tf.GlowTTS class """ model = model.lower() if model not in _glowtts_availability: raise ValueError( 'model not supported, please check supported models from `malaya_speech.tts.available_glowtts()`.' ) text_ids = load_text_ids( pad_to=pad_to, true_case_model=true_case_model, quantized=quantized, **kwargs ) return tts.glowtts_load( path=PATH_TTS_GLOWTTS, s3_path=S3_PATH_TTS_GLOWTTS, model=model, name='text-to-speech', normalizer=text_ids, quantized=quantized, **kwargs )
{"hexsha": "a844f88e2b6b7da10f8540ea2dc6ebe1a95db030", "size": 12958, "ext": "py", "lang": "Python", "max_stars_repo_path": "malaya_speech/tts.py", "max_stars_repo_name": "ishine/malaya-speech", "max_stars_repo_head_hexsha": "fd34afc7107af1656dff4b3201fa51dda54fde18", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "malaya_speech/tts.py", "max_issues_repo_name": "ishine/malaya-speech", "max_issues_repo_head_hexsha": "fd34afc7107af1656dff4b3201fa51dda54fde18", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "malaya_speech/tts.py", "max_forks_repo_name": "ishine/malaya-speech", "max_forks_repo_head_hexsha": "fd34afc7107af1656dff4b3201fa51dda54fde18", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9260969977, "max_line_length": 165, "alphanum_fraction": 0.618305294, "include": true, "reason": "import numpy", "num_tokens": 3464}
# -*- coding: utf-8 -*- from abc import ABC, abstractmethod import numpy as np class Border(ABC): def __init__(self, length, origin=np.zeros((2, 1))): """Build a new border. Args: length (numpy.ndarray): The vector of length. origin (numpy.ndarray): The center position of the environment. """ self.length = length self.origin = origin @abstractmethod def wrap(self, point2d): """Wrap a vector to stay within the border. Args: point2d (numpy.ndarray): The point to wrap. Returns: numpy.ndarray: The wrapped point. """ pass @abstractmethod def vector(self, point2d_from, point2d_to): """Compute the vector that connects the given points. Args: point2d_from (numpy.ndarray): The start point. point2d_to (numpy.ndarray): The end point. Returns: numpy.ndarray: The vector between these point wrt. the border type. """ pass
{"hexsha": "607c7b68979c44d109cd200a4ab6e9f4dda5ccbf", "size": 1056, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/sim/borders/border.py", "max_stars_repo_name": "Bacmel/Boids-Boids-Boids", "max_stars_repo_head_hexsha": "f66954359373b34c2a493ed025773306a280c701", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/sim/borders/border.py", "max_issues_repo_name": "Bacmel/Boids-Boids-Boids", "max_issues_repo_head_hexsha": "f66954359373b34c2a493ed025773306a280c701", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/sim/borders/border.py", "max_forks_repo_name": "Bacmel/Boids-Boids-Boids", "max_forks_repo_head_hexsha": "f66954359373b34c2a493ed025773306a280c701", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.4666666667, "max_line_length": 79, "alphanum_fraction": 0.5785984848, "include": true, "reason": "import numpy", "num_tokens": 231}
''' code_snippet_poisson_busiest30min.py This snippet is the code we used to compute the arrival rate of potentially-race-relevant messages for each 30-minute sessions of the trading day. It produces the statistics for the busiest 30 minutes Poisson exercise (Table 4.5). The code is specific to the LSE settings and may not be applicable to other exchange message data directly. We provide this code for the purpose of disclosing how we did the analysis and helping our user reproduce the related results. Note that the coding convention (e.g., naming) in this script is slightly different from the code in /PythonCode, because this is part of an earlier version of our code. To reproduce the busiest 30 minutes Poisson exercise (Busiest 30 Min columns in Table 4.5 and Appendix Table B.7), users need to 1. Go through the code snippet carefully and understand the logic. 2. Adapt/rewrite the code to their context depending on the specific details of their setting. Users can make use of this code snippet in this step. 3. Execute the code to obtain the arrival rate of potentially-race-relevant messages for each 30-minute sessions of the trading day. 4. Find the busiest 30 minutes arrival rate for each symbol-date. Users have to do this by themselves. 5. Compute the expected number of potential race activities based on the busiest 30 minutes arrival rate. Users can refer to write.poisson.tables in /RCode/Functions.R. ''' ################################################################################### # Import packages import pandas as pd import numpy as np import datetime import logging import pickle import sys import os import importlib from LatencyArbitrageAnalysis.utils.Dtypes import dtypes_msgs, dtypes_top base = '/data/workspace2/gatewaydata/workingarea/proc/' sys.path.insert(1, base + '/code/') PrepData = importlib.import_module('04a_Prep_Race_Data') # Collect arguments runtime, date, sym = sys.argv[1], sys.argv[2], sys.argv[3] # Initialize log logpath = base + '/code/logs/%s/' % runtime if not os.path.exists(logpath): os.makedirs(logpath) class LoggerWriter(object): def __init__(self, level): self.level = level def write(self, message): for line in message.rstrip().splitlines(): self.level(line.rstrip()) def flush(self): self.level(sys.stderr) logger = logging.getLogger(__name__) sys.stdout = LoggerWriter(logger.warning) sys.stderr = LoggerWriter(logger.error) logger.setLevel(logging.INFO) handler = logging.FileHandler(logpath + 'Temp_Counter_%s_%s_%s.log' % (runtime, date, sym)) handler.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) ### INITIALIZE ### logger.info('Processing: %s %s' % (date, sym)) # Specify paths infile_msgs = base + '/data/clean/%s/CleanMsgData_%s_%s.csv.gz' % (date, date, sym) infile_depth = base + '/data/book/%s/DepthInfo_%s_%s.pkl' % (date, date, sym) infile_top = base + '/data/book/%s/BBO_%s_%s.csv.gz' % (date, date, sym) outfile_stats = base + '/output/poisson_by_session/%s/Counters_%s_%s.pkl' % (date, date, sym) # Start timer timer_st = datetime.datetime.now() # Add info to log logger.info('Timer Start: %s' % str(timer_st)) ### LOAD DATA ### logger.info('Loading data...') # Load symbol-date info info = pd.read_csv(base + '/reference_data/Symbol_Date_Info.csv') info = info.loc[(info['InstrumentID'].astype('str') == sym) & (info['Date'] == date)] # Load ticktables ticktables = pd.read_pickle(base + '/reference_data/Ticktables.pkl') ticktable = ticktables[info['Segment_ID'].item()][info['TickTable'].astype('int').item()][info['Curr'].item()] # Load message data df = pd.read_csv(infile_msgs, dtype = dtypes_msgs, parse_dates=['MessageTimestamp', 'Timestamp', 'ExpireDateTime', 'TransactTime']) # Load top-of-book data top = pd.read_csv(infile_top, index_col = 0, dtype = dtypes_top, parse_dates = ['MessageTimestamp', 't_last_chg_MidPt']) # Load other order book data depth = pickle.load(open(infile_depth, 'rb')) # Clean timestamps df['MessageTimestamp'] = pd.to_datetime(df['MessageTimestamp']) # Restrict to regular hours # Start with all False and then for each group of session messages update reg_hours by index to True # if the messages belong to the session. Outcome is a True/False flag for every message on whether they # belong to a session in regular hours reg_hours = pd.Series(False, index=df.index) sess_id = pd.Series(np.nan, index=df.index) i = 1 while i <= info['Sess_Max_N'].item(): sess_st = pd.to_datetime(info['Sess_St_%s' % i].item()) sess_end = pd.to_datetime(info['Sess_End_%s' % i].item()) # Session starts on first inbound in New Order / New Quote in regular hours sess_st_id = df.index[(df['MessageTimestamp'] > sess_st) & (df['MessageType'].isin({'D', 'S'}))][0] # Session ends on last outbound in regular hours sess_end_id = df.index[(df['MessageTimestamp'] < sess_end) & (df['MessageType'].isin({'8'}))][-1] sess_msgs = ((df.index >= sess_st_id) & (df.index <= sess_end_id)) sess_id[sess_msgs] = i reg_hours = reg_hours | sess_msgs i += 1 # Keep regular hours df, top = df.loc[reg_hours], top.loc[reg_hours] # Keep post open auction post_open_auction = df.PostOpenAuction df, top = df.loc[post_open_auction], top.loc[post_open_auction] # Currency conversion (FTSE 350 is in GBX) price_factor, to_GBX, to_GBP = 100000000, 1, .01 reaction_time = np.timedelta64(29, 'us') df_races, top_races = PrepData.prepare_data(df, top, ticktable, price_factor, sess_id, reaction_time) df_races['N_Inbound_NBBO'] = 0 df_races.loc[((df_races['AskRaceRlvtType'] == 'Take Attempt') & (df_races['AskRaceRlvtPriceLvlSigned'] >= top_races['BestAskSigned'])) | ((df_races['BidRaceRlvtType'] == 'Take Attempt') & (df_races['BidRaceRlvtPriceLvlSigned'] >= top_races['BestBidSigned'])) | ((df_races['AskRaceRlvtType'] == 'Cancel Attempt') & (df_races['AskRaceRlvtPriceLvlSigned'] == top_races['BestAskSigned'])) | ((df_races['BidRaceRlvtType'] == 'Cancel Attempt') & (df_races['BidRaceRlvtPriceLvlSigned'] == top_races['BestBidSigned'])), 'N_Inbound_NBBO'] = 1 df_races['N_Inbound'] = 0 df_races.loc[df_races['MessageType'].isin(['D','F', 'q', 'G', 'S', 'C', 'H', 'Z', 's', 'u']), 'N_Inbound'] = 1 df_races['N_Msgs'] = 1 ## Count # relevant msgs by session df_races = df_races.set_index('MessageTimestamp') stats = df_races[['N_Msgs','N_Inbound','N_Inbound_NBBO']].resample('30Min', how='sum') stats = stats.reset_index() stats['Session'] = stats['MessageTimestamp'].dt.time # Metadata stats['Date'] = date stats['InstrumentID'] = sym pickle.dump(stats, open(outfile_stats, 'wb')) # End timer timer_end = datetime.datetime.now() # Add info to log logger.info('Complete.') logger.info('Timer End: %s' % str(timer_end)) logger.info('Time Elapsed: %s' % str(timer_end - timer_st))
{"hexsha": "6ac70ec774bb88d24fc98024db980dcb8e0697f3", "size": 6986, "ext": "py", "lang": "Python", "max_stars_repo_path": "PythonCode/MiscABOCodeSnippets/code_snippet_poisson_busiest30min.py", "max_stars_repo_name": "ericbudish/HFT-Races", "max_stars_repo_head_hexsha": "fe9ffc2da98b529e43e25800695aad698b46b10a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-09-16T10:05:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-26T00:18:26.000Z", "max_issues_repo_path": "PythonCode/MiscABOCodeSnippets/code_snippet_poisson_busiest30min.py", "max_issues_repo_name": "ericbudish/HFT-Races", "max_issues_repo_head_hexsha": "fe9ffc2da98b529e43e25800695aad698b46b10a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PythonCode/MiscABOCodeSnippets/code_snippet_poisson_busiest30min.py", "max_forks_repo_name": "ericbudish/HFT-Races", "max_forks_repo_head_hexsha": "fe9ffc2da98b529e43e25800695aad698b46b10a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-09-23T13:41:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-11T18:10:13.000Z", "avg_line_length": 41.0941176471, "max_line_length": 159, "alphanum_fraction": 0.7124248497, "include": true, "reason": "import numpy", "num_tokens": 1878}
/** * Swaggy Jenkins * Jenkins API clients generated from Swagger / Open API specification * * OpenAPI spec version: 1.1.1 * Contact: blah@cliffano.com * * NOTE: This class is auto generated by OpenAPI-Generator 3.2.1-SNAPSHOT. * https://openapi-generator.tech * Do not edit the class manually. */ #include "StringParameterValue.h" #include <string> #include <sstream> #include <boost/property_tree/ptree.hpp> #include <boost/property_tree/json_parser.hpp> using boost::property_tree::ptree; using boost::property_tree::read_json; using boost::property_tree::write_json; namespace org { namespace openapitools { namespace server { namespace model { StringParameterValue::StringParameterValue() { m__class = ""; m_Name = ""; m_Value = ""; } StringParameterValue::~StringParameterValue() { } std::string StringParameterValue::toJsonString() { std::stringstream ss; ptree pt; pt.put("_class", m__class); pt.put("Name", m_Name); pt.put("Value", m_Value); write_json(ss, pt, false); return ss.str(); } void StringParameterValue::fromJsonString(std::string const& jsonString) { std::stringstream ss(jsonString); ptree pt; read_json(ss,pt); m__class = pt.get("_class", ""); m_Name = pt.get("Name", ""); m_Value = pt.get("Value", ""); } std::string StringParameterValue::getClass() const { return m__class; } void StringParameterValue::setClass(std::string value) { m__class = value; } std::string StringParameterValue::getName() const { return m_Name; } void StringParameterValue::setName(std::string value) { m_Name = value; } std::string StringParameterValue::getValue() const { return m_Value; } void StringParameterValue::setValue(std::string value) { m_Value = value; } } } } }
{"hexsha": "2c22c032d1156233a9660329339d8de520be668a", "size": 1753, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "clients/cpp-restbed-server/generated/model/StringParameterValue.cpp", "max_stars_repo_name": "PankTrue/swaggy-jenkins", "max_stars_repo_head_hexsha": "aca35a7cca6e1fcc08bd399e05148942ac2f514b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23.0, "max_stars_repo_stars_event_min_datetime": "2017-08-01T12:25:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-25T03:44:11.000Z", "max_issues_repo_path": "clients/cpp-restbed-server/generated/model/StringParameterValue.cpp", "max_issues_repo_name": "PankTrue/swaggy-jenkins", "max_issues_repo_head_hexsha": "aca35a7cca6e1fcc08bd399e05148942ac2f514b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 35.0, "max_issues_repo_issues_event_min_datetime": "2017-06-14T03:28:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T10:25:54.000Z", "max_forks_repo_path": "clients/cpp-restbed-server/generated/model/StringParameterValue.cpp", "max_forks_repo_name": "PankTrue/swaggy-jenkins", "max_forks_repo_head_hexsha": "aca35a7cca6e1fcc08bd399e05148942ac2f514b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11.0, "max_forks_repo_forks_event_min_datetime": "2017-08-31T19:00:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-19T12:04:12.000Z", "avg_line_length": 18.6489361702, "max_line_length": 74, "alphanum_fraction": 0.7107815174, "num_tokens": 423}
# 3. Import libraries and modules import numpy as np np.random.seed(123) # for reproducibility import tensorflow as tf tf.set_random_seed(123) from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Convolution2D, MaxPooling2D from keras.utils import np_utils from keras.datasets import mnist batch_size = 128 # 4. Load pre-shuffled MNIST data into train and test sets (X_train, y_train), (X_test, y_test) = mnist.load_data() # 5. Preprocess input data X_train = X_train.reshape(X_train.shape[0], 28, 28, 1) X_test = X_test.reshape(X_test.shape[0], 28, 28, 1) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 255 X_test /= 255 # 6. Preprocess class labels Y_train = np_utils.to_categorical(y_train, 10) Y_test = np_utils.to_categorical(y_test, 10) # 7. Define model architecture model = Sequential() model.add(Convolution2D(32, (6, 6), activation='relu', input_shape=(28,28,1))) model.add(Convolution2D(20, (6, 6), activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.40)) model.add(Dense(10, activation='softmax')) # 8. Compile model model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # 9. Fit model on training data model.fit(X_train, Y_train, batch_size=batch_size, epochs=15, verbose=1, validation_data=(X_test, Y_test)) model.save('model.h5') # 10. Evaluate model on test data score = model.evaluate(X_test, Y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1])
{"hexsha": "182395286aeab2fb00504e615658941852b583c0", "size": 1704, "ext": "py", "lang": "Python", "max_stars_repo_path": "tf-learn/example-4-Kera/example4-cnn.py", "max_stars_repo_name": "liufuyang/kaggle-youtube-8m", "max_stars_repo_head_hexsha": "1cfbbf92ec9b5ead791f98f7a09463ee165a8120", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2017-04-09T09:51:00.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-29T11:29:32.000Z", "max_issues_repo_path": "tf-learn/example-4-Kera/example4-cnn.py", "max_issues_repo_name": "liufuyang/kaggle-youtube-8m", "max_issues_repo_head_hexsha": "1cfbbf92ec9b5ead791f98f7a09463ee165a8120", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tf-learn/example-4-Kera/example4-cnn.py", "max_forks_repo_name": "liufuyang/kaggle-youtube-8m", "max_forks_repo_head_hexsha": "1cfbbf92ec9b5ead791f98f7a09463ee165a8120", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2017-03-12T18:17:21.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-16T21:52:24.000Z", "avg_line_length": 29.8947368421, "max_line_length": 78, "alphanum_fraction": 0.7300469484, "include": true, "reason": "import numpy", "num_tokens": 476}
import numpy as np import itertools import os from utils import close_pair_utils, parallel_utils, BSMC_utils import config def compute_pileup_for_clusters(cluster_dict, get_run_start_end, genome_len, thresholds): """ General function for computing pileup curves; close genomes will be clustered according to cluster_dict :param cluster_dict: precomputed clusters using pairwise divergence matrix (using function in close_pair_utils.py) :param get_run_start_end: function that compute the list of event starts and ends for each threshold. Use get_event_start_end_BSMC for BSMC simulated data. Use Pileup_Helper.get_event_start_end for real species data :param genome_len: rough length of the genome. Used in determining the threshold lengths :param mean_div: mean pairwise divergence. Used in determining the threshold lengths :param surprise_index: number of expected synonymous snps in the run. Used in determining the threshold lengths :return: np array of shape (genome length, number of thresholds) """ genome_len = int(genome_len) cumu_runs = np.zeros([genome_len, len(thresholds)]) num_clusters = len(cluster_dict) num_comparisons = 0 for i, j in itertools.combinations(range(1, num_clusters+1), 2): # i, j are cluster ids num_comparisons += 1 num_pairs = 0. tmp_runs = np.zeros(cumu_runs.shape) for l, m in itertools.product(cluster_dict[i], cluster_dict[j]): # l, m are sample ids num_pairs += 1 all_start_end = get_run_start_end(l, m, thresholds) for k, dat in enumerate(all_start_end): # k represent which threshold for start, end in dat: tmp_runs[start:end, k] += 1 tmp_runs /= num_pairs cumu_runs += tmp_runs cumu_runs /= num_comparisons return cumu_runs def get_event_start_end_BSMC(sim_data, genome_len, idx1, idx2, thresholds): """ Function for obtaining all sharing events over thresholds for genome pairs (idx1, idx2) :param sim_data: np array loaded with relevant BSMC_util function :param genome_len: Same length that supplied to BSMC program :param idx1: index of the first genome (0 indexed) :param idx2: index of the second genome :param thresholds: List of threshold lengths to filter events. In number of sites :return: A list containing a list of (start loc, end loc) for each threshold """ site_locations, runs = BSMC_utils.compare_two_samples(idx1, idx2, sim_data, genome_len) site_locations = np.array(site_locations * genome_len).astype(int) runs = site_locations[1:] - site_locations[:-1] all_dat = [] for k in range(len(thresholds)): threshold = thresholds[k] event_starts = site_locations[:-1][runs > threshold] event_ends = site_locations[1:][runs > threshold] all_dat.append(zip(event_starts, event_ends)) return all_dat class Pileup_Helper: def __init__(self, species_name, allowed_variants=["4D"], clade_cutoff=None, close_pair_cutoff=1e-3): """ Wrapper over DataHoarder to provide pileup specific functions :param species_name: :param allowed_variants: :param clade_cutoff: Only for B vulgatus to select the major clade :param close_pair_cutoff: For reducing overcounting of sharing """ self.dh = parallel_utils.DataHoarder(species_name, mode='QP', allowed_variants=allowed_variants) self.good_chromo = self.dh.chromosomes[self.dh.general_mask] div_dir = os.path.join(config.analysis_directory, 'pairwise_divergence', 'between_hosts', '%s.csv' % species_name) self.div_mat = np.loadtxt(div_dir, delimiter=',') clade_cutoff = clade_cutoff if clade_cutoff else 1 # form first order clusters using clade divergence cutoff d = close_pair_utils.get_clusters_from_pairwise_matrix(self.div_mat, threshold=clade_cutoff) clade_cluster = 1 + np.argmax(map(len, d.values())) # keep the largest clade clade_samples = d[clade_cluster] single_subject_samples = self.dh.get_single_subject_idxs() self.good_samples = np.intersect1d(single_subject_samples, clade_samples) self.close_pair_cutoff = close_pair_cutoff self.cluster_dict = close_pair_utils.get_clusters_from_pairwise_matrix( self.div_mat[self.good_samples, :][:, self.good_samples], threshold=close_pair_cutoff) def update_close_pair_cutoff(self, new_cutoff): self.close_pair_cutoff = new_cutoff self.cluster_dict = close_pair_utils.get_clusters_from_pairwise_matrix( self.div_mat[self.good_samples, :][:, self.good_samples], threshold=new_cutoff) def cluster_id_to_sample_id(self, cluster_id): # sample id is used by self.dh, indexes all samples # cluster id indexes only the good samples return self.good_samples[cluster_id] def get_event_start_end(self, idx1, idx2, thresholds): i1 = self.cluster_id_to_sample_id(idx1) i2 = self.cluster_id_to_sample_id(idx2) pair = (i1, i2) # get the snp data snp_vec, coverage_arr = self.dh.get_snp_vector(pair) # get the location in the full array snp_to_core = np.nonzero(coverage_arr)[0] snp_genome_locs = snp_to_core[np.nonzero(snp_vec)[0]] runs = parallel_utils.compute_runs_all_chromosomes(snp_vec, self.good_chromo[coverage_arr]) all_dat = [] for i in range(len(thresholds)): threshold = thresholds[i] event_starts = snp_genome_locs[:-1][runs > threshold] event_ends = snp_genome_locs[1:][runs > threshold] all_dat.append(zip(event_starts, event_ends)) return all_dat
{"hexsha": "a4311582f33c1f994b0437f8726f3e2d84558405", "size": 5843, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/pileup_utils.py", "max_stars_repo_name": "zhiru-liu/microbiome_evolution", "max_stars_repo_head_hexsha": "5a08fbf41357d845236e3ff46c31315929d2b649", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/pileup_utils.py", "max_issues_repo_name": "zhiru-liu/microbiome_evolution", "max_issues_repo_head_hexsha": "5a08fbf41357d845236e3ff46c31315929d2b649", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/pileup_utils.py", "max_forks_repo_name": "zhiru-liu/microbiome_evolution", "max_forks_repo_head_hexsha": "5a08fbf41357d845236e3ff46c31315929d2b649", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.5040650407, "max_line_length": 118, "alphanum_fraction": 0.7010097553, "include": true, "reason": "import numpy", "num_tokens": 1368}
[STATEMENT] lemma equivI[intro?]: "\<lbrakk> \<And>s t \<pi>. \<pi>:(c,s) \<Rightarrow> t \<Longrightarrow> \<pi>:(c',s) \<Rightarrow> t; \<And>s t \<pi>. \<pi>:(c',s) \<Rightarrow> t \<Longrightarrow> \<pi>:(c,s) \<Rightarrow> t\<rbrakk> \<Longrightarrow> c \<sim> c'" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>\<And>s t \<pi>. \<pi>: (c, s) \<Rightarrow> t \<Longrightarrow> \<pi>: (c', s) \<Rightarrow> t; \<And>s t \<pi>. \<pi>: (c', s) \<Rightarrow> t \<Longrightarrow> \<pi>: (c, s) \<Rightarrow> t\<rbrakk> \<Longrightarrow> c \<sim> c' [PROOF STEP] by (auto simp: equiv_c_def)
{"llama_tokens": 245, "file": "IMP2_basic_Semantics", "length": 1}
import numpy as np import SimpleITK as sitk import six from radiomics import featureextractor def read_dcm_series(dcm_dir): """ Args: dcm_dir: Str. Path to dicom series directory Returns: sitk_image: SimpleITK object of 3D CT volume. """ reader = sitk.ImageSeriesReader() series_file_names = reader.GetGDCMSeriesFileNames(dcm_dir) reader.SetFileNames(series_file_names) sitk_image = reader.Execute() return sitk_image def extract_feature_unit(sub_img, p, q, padding=2): """ Args: sub_img: Numpy array. The tumor area defined by mask p,q: Int. The index of central pixel padding: Int. Number of pixels padded on each side after extracting tumor Returns: features_temp: Dict. A dictionary contains all the radiomic features with keys used in "pyradiomics" """ # p and q are used to index the central pixel mask = np.copy(sub_img) mask[:, :] = 0 mask[p - padding:p + padding + 1, q - padding:q + padding + 1] = 1 img_ex = sitk.GetImageFromArray([sub_img]) mask_ex = sitk.GetImageFromArray([mask]) extractor = featureextractor.RadiomicsFeatureExtractor() radio_result = extractor.execute(img_ex, mask_ex) features_temp = {} features_temp["first"] = [] features_temp["shape"] = [] features_temp["glcm"] = [] features_temp["gldm"] = [] features_temp["glrlm"] = [] features_temp["glszm"] = [] features_temp["ngtdm"] = [] for key, val in six.iteritems(radio_result): if (key.startswith('original_firstorder')): features_temp["first"].append(val) elif (key.startswith('original_shape')): features_temp["shape"].append(val) elif (key.startswith('original_glcm')): features_temp["glcm"].append(val) elif (key.startswith('original_gldm')): features_temp["gldm"].append(val) elif (key.startswith('original_glrlm')): features_temp["glrlm"].append(val) elif (key.startswith('original_glszm')): features_temp["glszm"].append(val) elif (key.startswith('original_ngtdm')): features_temp["ngtdm"].append(val) else: pass return features_temp
{"hexsha": "61d18475df1280776827a98c6e9223cc26565dd5", "size": 2258, "ext": "py", "lang": "Python", "max_stars_repo_path": "ITHscore/utils.py", "max_stars_repo_name": "LiJiaqi96/ITHscore", "max_stars_repo_head_hexsha": "d7281a427305c2839f089b1ea9935fd1b2aeb641", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ITHscore/utils.py", "max_issues_repo_name": "LiJiaqi96/ITHscore", "max_issues_repo_head_hexsha": "d7281a427305c2839f089b1ea9935fd1b2aeb641", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ITHscore/utils.py", "max_forks_repo_name": "LiJiaqi96/ITHscore", "max_forks_repo_head_hexsha": "d7281a427305c2839f089b1ea9935fd1b2aeb641", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.28125, "max_line_length": 108, "alphanum_fraction": 0.6470327724, "include": true, "reason": "import numpy", "num_tokens": 555}
''' computing the combinations value ''' from scipy.special import comb def helper(): ''' helping in calculating the probability ''' res = 0 for i in range(26, 41): res += comb(50, i) * (0.4 ** i) * (0.6 ** (50 - i)) return res print(helper())
{"hexsha": "01875b1c010114fc4579c58fa23b5244b226e1b7", "size": 280, "ext": "py", "lang": "Python", "max_stars_repo_path": "Machine Learning Basic Principles/quiz1/1.py", "max_stars_repo_name": "JayWu7/Machine-Learning-Courses-Study-Record", "max_stars_repo_head_hexsha": "7586c3429514bc21c7cfe42f85ca8c0fcf8f072b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-04T12:03:11.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-04T12:03:11.000Z", "max_issues_repo_path": "Machine Learning Basic Principles/quiz1/1.py", "max_issues_repo_name": "JayWu7/Machine-Learning-Courses-Study-Record", "max_issues_repo_head_hexsha": "7586c3429514bc21c7cfe42f85ca8c0fcf8f072b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Machine Learning Basic Principles/quiz1/1.py", "max_forks_repo_name": "JayWu7/Machine-Learning-Courses-Study-Record", "max_forks_repo_head_hexsha": "7586c3429514bc21c7cfe42f85ca8c0fcf8f072b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-18T11:20:58.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-18T11:20:58.000Z", "avg_line_length": 15.5555555556, "max_line_length": 59, "alphanum_fraction": 0.5535714286, "include": true, "reason": "from scipy", "num_tokens": 80}
"""Collect information from the NY State Board of Elections website (http://www.elections.ny.gov/2016ElectionResults.html) and publish in a way more sensible format. Code is given for 2016 and 2014 below, but could easily be extended. Note the NY State Board of Elections uses inconsistent names on their website, and data from before 2014 is only published in PDF format, so an intermediary XLS must be produced first. Use `df_from_raw_pdf(raw)` to process data from these intermediary XLS files, as the line spacing is slightly different. Tabula (http://tabula.technology) can be used to manually extract tables from PDFs. Enrollment data is similar, in that it is only published in PDF format. This script assumes you have used Tabula to extract this and it is is available in file `../Data/tabula-congress_nov16.csv`, `../Data/tabula-senate_nov16.csv` and `../Data/tabula-assembly_nov16.csv`. INPUT: Elections data from web in non-sensible Excel format, enrollment data from scraped PDFs. OUTPUT: ../Data/NY State Senate Election Details.xls, an Excel sheet of elections data in sensible summary format. """ import pandas as pd import numpy as np import re from collections import defaultdict def df_from_raw(raw): """Read an Excel file and output a dataframe for further use.""" row_num = 0 candidates = [] forenames, surnames, parties = (), (), () district_name = "" for series in raw.iterrows(): cols = series[1] if "DISTRICT" in str( cols.iloc[0]): # If col 0 contains word DISTRICT then initialize row_num, store name district_name = cols.iloc[0] row_num = 1 continue if row_num == 2: # Forenames forenames = cols row_num += 1 continue if row_num == 3: # Surnames surnames = cols row_num += 1 continue if row_num == 4: # Party parties = cols row_num += 1 continue if "RECAP" in str(cols.iloc[0]): # Recap totals for i in range(1, cols.shape[0]): if pd.notnull(cols.iloc[i]) and int(cols.iloc[i]) != 0: # Recap total is not null candidate = {} candidate['District'] = district_name candidate['Name'] = " ".join( [forenames.iloc[i], surnames.iloc[i]]) candidate['Party'] = parties.iloc[i] candidate['Votes'] = cols.iloc[i] candidates.append(candidate) else: # Skip rows with no data or irrelevant data row_num += 1 continue df = pd.DataFrame(candidates) df['Won'] = df.groupby(['District'])['Votes'].transform(max) == df['Votes'] return df def df_from_raw_pdf(raw): row_num = 0 candidates = [] forenames, surnames, parties = (), (), () district_name = "" for series in raw.iterrows(): cols = series[1] if "DISTRICT" in str( cols.iloc[0]): # If col 0 contains word DISTRICT then initialize row_num, store name district_name = cols.iloc[0] row_num = 1 continue if row_num == 1: # Forenames forenames = cols row_num += 1 continue if row_num == 2: # Surnames surnames = cols row_num += 1 continue if row_num == 3: # Party parties = cols row_num += 1 continue if "RECAP" in str(cols.iloc[0]): # Recap totals for i in range(1, cols.shape[0]): if pd.notnull(cols.iloc[i]) and int(cols.iloc[i]) != 0: # Recap total is not null candidate = {} candidate['District'] = district_name candidate['Name'] = " ".join([forenames.iloc[i], surnames.iloc[i]]) candidate['Party'] = parties.iloc[i] candidate['Votes'] = cols.iloc[i] candidates.append(candidate) else: # Skip rows with no data or irrelevant data row_num += 1 continue df = pd.DataFrame(candidates) df['Won'] = df.groupby(['District'])['Votes'].transform(max) == df['Votes'] return df if __name__ == "__main__": o = open("../Data/2014-2016-elections-raw.csv", "w") # 2016 election data. raw = pd.read_excel("http://www.elections.ny.gov/NYSBOE/elections/2016/General/2016Senate.xls") df = df_from_raw(raw) df['Year'] = "2016" df.to_csv(o, header=True) raw = pd.read_excel("http://www.elections.ny.gov/NYSBOE/elections/2016/General/2016Assembly.xls") df = df_from_raw(raw) df['Year'] = "2016" df.to_csv(o, header=False) raw = pd.read_excel("http://www.elections.ny.gov/NYSBOE/elections/2016/General/2016Congress.xls") df = df_from_raw(raw) df['Year'] = "2016" df.to_csv(o, header=False) # 2014 election data. raw = pd.read_excel("http://www.elections.ny.gov/NYSBOE/elections/2014/general/2014Senate.xlsx") df = df_from_raw(raw) df['Year'] = "2014" df.head() df.to_csv(o, header=False) raw = pd.read_excel("http://www.elections.ny.gov/NYSBOE/elections/2014/General/2014Assembly.xlsx") df = df_from_raw(raw) df['Year'] = "2014" df.to_csv(o, header=False) raw = pd.read_excel("http://www.elections.ny.gov/NYSBOE/elections/2014/General/2014Congress.xlsx") df = df_from_raw(raw) df['Year'] = "2014" df.to_csv(o, header=False) o.close() # Enrollment data. cols = [ 'District', 'County', 'Status', 'DEM', 'REP', 'CON', 'GRE', 'WOR', 'IND', 'WEP', 'REF', 'OTH', 'BLANK', 'TOTAL'] coltypes = { 'District': str, 'County': str, 'Status': str, 'DEM': np.int32, 'REP': np.int32, 'CON': np.int32, 'GRE': np.int32, 'WOR': np.int32, 'IND': np.int32, 'WEP': np.int32, 'REF': np.int32, 'OTH': np.int32, 'BLANK': np.int32, 'TOTAL': np.int32} district_class_labels = [ 'Very Conservative', 'Conservative', 'Neutral', 'Liberal', 'Very Liberal'] senate = pd.read_csv( '../Data/tabula-senate_enrollment_nov16.csv', header=0, names=cols, dtype=coltypes, thousands=',') senate['DemPct'] = senate['DEM'] / senate['TOTAL'] senate['DistrictClass'] = pd.qcut( senate['DemPct'], 5, labels=district_class_labels) assembly = pd.read_csv( '../Data/tabula-assembly_enrollment_nov16.csv', header=0, names=cols, dtype=coltypes, thousands=',') assembly['DemPct'] = assembly['DEM'] / assembly['TOTAL'] assembly['DistrictClass'] = pd.qcut( assembly['DemPct'], 5, labels=district_class_labels) congress = pd.read_csv( '../Data/tabula-congress_enrollment_nov16.csv', header=0, names=cols, dtype=coltypes, thousands=',') congress['DemPct'] = congress['DEM'] / congress['TOTAL'] congress['DistrictClass'] = pd.qcut( congress['DemPct'], 5, labels=district_class_labels) df = pd.read_csv("../Data/2014-2016-elections-raw.csv") df.drop('Unnamed: 0', axis=1, inplace=True) districts = defaultdict(list) for row in df.itertuples(): districts[row.District].append(row) regexp = re.compile('^(\d+).*(CONGRESS|ASSEMBLY|SENATE).*') clean = [] for district in districts.keys(): new = {} total_votes_2014 = 0 total_votes_2016 = 0 m = regexp.match(district) if m: district_num = m.group(1) district_type = m.group(2) else: raise ValueError("Cannot match district " + district) for row in districts[district]: new['DistrictNum'] = district_num new['DistrictType'] = district_type if row.Year == 2016: enroll_data = { 'CONGRESS': congress, 'ASSEMBLY': assembly, 'SENATE': senate}[district_type] new['2016TotalEnrollment'] = enroll_data.loc[ (enroll_data['County'] == 'District Total') & ( enroll_data['Status'] == 'Total') & ( enroll_data['District'] == district_num), 'TOTAL'].tolist()[0] new['2016DemEnrollment'] = enroll_data.loc[ (enroll_data['County'] == 'District Total') & ( enroll_data['Status'] == 'Total') & ( enroll_data['District'] == district_num), 'DEM'].tolist()[0] new['2016RepEnrollment'] = enroll_data.loc[ (enroll_data['County'] == 'District Total') & ( enroll_data['Status'] == 'Total') & ( enroll_data['District'] == district_num), 'REP'].tolist()[0] new['DistrictClass'] = enroll_data.loc[ (enroll_data['County'] == 'District Total') & ( enroll_data['Status'] == 'Total') & ( enroll_data['District'] == district_num), 'DistrictClass'].tolist()[0] total_votes_2016 += row.Votes if row.Party in 'DEM': new['2016DemName'] = row.Name new['2016DemVotes'] = row.Votes new['2016DemWin'] = row.Won if row.Party in 'REP + TRPREP/TRP': new['2016RepName'] = row.Name new['2016RepVotes'] = row.Votes new['2016RepWin'] = row.Won if row.Year == 2014: total_votes_2014 += row.Votes if row.Party in 'DEM': new['2014DemName'] = row.Name new['2014DemVotes'] = row.Votes new['2014DemWin'] = row.Won if row.Party in 'REP + TRPREP/TRP': new['2014RepName'] = row.Name new['2014RepVotes'] = row.Votes new['2016RepWin'] = row.Won new['2016TotalVotes'] = total_votes_2016 new['2014TotalVotes'] = total_votes_2014 clean.append(new) df2 = pd.DataFrame(clean) df2['2016DemMargin'] = df2['2016DemVotes'] - df2['2016RepVotes'] df2['2016DemMarginPct'] = 100 * \ df2['2016DemMargin'] / (df2['2016TotalVotes']) df2['2014DemMargin'] = df2['2014DemVotes'] - df2['2014RepVotes'] df2['2014DemMarginPct'] = 100 * \ df2['2014DemMargin'] / (df2['2014TotalVotes']) df2.to_excel("../Data/NY State Senate Election Details.xls")
{"hexsha": "79f52d778f4e57f218fd05fe195e6b1074423615", "size": 10988, "ext": "py", "lang": "Python", "max_stars_repo_path": "Tools/ElectionData.py", "max_stars_repo_name": "UnitedThruAction/Data", "max_stars_repo_head_hexsha": "c589df73eb1c5f3a466357f863d63f0f8e209105", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Tools/ElectionData.py", "max_issues_repo_name": "UnitedThruAction/Data", "max_issues_repo_head_hexsha": "c589df73eb1c5f3a466357f863d63f0f8e209105", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2017-03-04T01:41:05.000Z", "max_issues_repo_issues_event_max_datetime": "2017-04-01T18:44:06.000Z", "max_forks_repo_path": "Tools/ElectionData.py", "max_forks_repo_name": "UnitedThruAction/Data", "max_forks_repo_head_hexsha": "c589df73eb1c5f3a466357f863d63f0f8e209105", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.6678700361, "max_line_length": 461, "alphanum_fraction": 0.5465962869, "include": true, "reason": "import numpy", "num_tokens": 2771}
using BinDeps @BinDeps.setup libbnet = library_dependency("libbnet") libdir = BinDeps.libdir(libbnet) srcdir = joinpath(BinDeps.srcdir(libbnet), "binary_networks") provides(Sources, URI("https://raw.githubusercontent.com/afternone/CommunityDetection.jl/master/deps/binary_networks.tar.gz"), libbnet) provides(BuildProcess, (@build_steps begin GetSources(libbnet) CreateDirectory(libdir) @build_steps begin ChangeDirectory(srcdir) FileRule(joinpath(libdir, "libbnet.so"), @build_steps begin `make` `cp libbnet.so $libdir` end) end end), libbnet) @BinDeps.install Dict(:libbnet => :libbnet)
{"hexsha": "2c94f8082a73d3f495992466ca8d799700496db0", "size": 722, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "deps/build.jl", "max_stars_repo_name": "afternone/LFR.jl", "max_stars_repo_head_hexsha": "74fa3d8de8fb9da4c3be884789522aedbacd319b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-03-31T07:05:25.000Z", "max_stars_repo_stars_event_max_datetime": "2017-03-31T07:05:25.000Z", "max_issues_repo_path": "deps/build.jl", "max_issues_repo_name": "afternone/LFR.jl", "max_issues_repo_head_hexsha": "74fa3d8de8fb9da4c3be884789522aedbacd319b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deps/build.jl", "max_forks_repo_name": "afternone/LFR.jl", "max_forks_repo_head_hexsha": "74fa3d8de8fb9da4c3be884789522aedbacd319b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3913043478, "max_line_length": 136, "alphanum_fraction": 0.6509695291, "num_tokens": 187}
import cv2 import numpy as np from easygraphics import * import qimage2ndarray def main(): init_graph(800,600) set_render_mode(RenderMode.RENDER_MANUAL) set_background_color("white") print("init_camera") success, frame = cameraCapture.read() print("init_camera_ok") while is_run() and success: height, width, bytesPerComponent = frame.shape img : np.ndarray = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) image = Image(qimage2ndarray.array2qimage(img)) draw_image(0,0,image) image.close() delay_fps(30) success, frame = cameraCapture.read() cameraCapture = cv2.VideoCapture(0) # force the camera to use MJPG format, or it will be very slow to capture cameraCapture.set(cv2.CAP_PROP_FOURCC,cv2.VideoWriter_fourcc('M','J','P','G')) easy_run(main) cameraCapture.release()
{"hexsha": "471ee2f00d2193c21489c81e97fd3baf4de0f397", "size": 848, "ext": "py", "lang": "Python", "max_stars_repo_path": "References/opencv/chap_02/camera_easygraphics.py", "max_stars_repo_name": "royqh1979/python_libs_usage", "max_stars_repo_head_hexsha": "57546d5648d8a6b7aca7d7ff9481aa7cd4d8f511", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "References/opencv/chap_02/camera_easygraphics.py", "max_issues_repo_name": "royqh1979/python_libs_usage", "max_issues_repo_head_hexsha": "57546d5648d8a6b7aca7d7ff9481aa7cd4d8f511", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "References/opencv/chap_02/camera_easygraphics.py", "max_forks_repo_name": "royqh1979/python_libs_usage", "max_forks_repo_head_hexsha": "57546d5648d8a6b7aca7d7ff9481aa7cd4d8f511", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4074074074, "max_line_length": 78, "alphanum_fraction": 0.7087264151, "include": true, "reason": "import numpy", "num_tokens": 214}
# r^2 based on the latest measured y-values import numpy as np # Calculate r^2 based on the latest measured y-values # measured_y and estimated_y must be vectors. def r2lm(measured_y, estimated_y): measured_y = np.array(measured_y).flatten() estimated_y = np.array(estimated_y).flatten() return float(1 - sum((measured_y - estimated_y) ** 2) / sum((measured_y[1:] - measured_y[:-1]) ** 2))
{"hexsha": "26d451467747756127fb560030ec5fc36fac74e6", "size": 414, "ext": "py", "lang": "Python", "max_stars_repo_path": "r2lm.py", "max_stars_repo_name": "hkaneko1985/r2lm", "max_stars_repo_head_hexsha": "a04458c27a662ea11ba04433bbf9e675bbea330b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-17T08:07:06.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-17T08:07:06.000Z", "max_issues_repo_path": "r2lm.py", "max_issues_repo_name": "hkaneko1985/r2lm", "max_issues_repo_head_hexsha": "a04458c27a662ea11ba04433bbf9e675bbea330b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "r2lm.py", "max_forks_repo_name": "hkaneko1985/r2lm", "max_forks_repo_head_hexsha": "a04458c27a662ea11ba04433bbf9e675bbea330b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.6363636364, "max_line_length": 106, "alphanum_fraction": 0.6956521739, "include": true, "reason": "import numpy", "num_tokens": 113}
import tensorflow as tf import numpy as np from tqdm import tqdm from data import ablate_interactions def compile_model(model, learning_rate=0.005): optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) loss = tf.keras.losses.MeanSquaredError() metrics = [tf.keras.metrics.MeanSquaredError()] model.compile(optimizer=optimizer, loss=loss, metrics=metrics) def get_interactions(x_train, x_test, model, interaction_function): interactions = interaction_function(model, x_test, baseline=x_train) def get_default_model(num_features): model = tf.keras.models.Sequential() model.add(tf.keras.layers.Input(shape=(num_features,))) model.add(tf.keras.layers.Dense(units=64, activation=tf.keras.activations.relu)) model.add(tf.keras.layers.Dense(units=64, activation=tf.keras.activations.relu)) model.add(tf.keras.layers.Dense(units=64, activation=tf.keras.activations.relu)) model.add(tf.keras.layers.Dense(units=1, activation=None)) return model def get_performance(x_train, x_test, model, random_weights, spec_df, interactions_train, interactions_test, k=0, num_iters=25, batch_size=128, epochs=200, use_random_draw=False): test_performances = [] for _ in tqdm(range(num_iters)): y_train = ablate_interactions(x_train, interactions_train, spec_df, k, using_random_draw=use_random_draw) y_test = ablate_interactions(x_test, interactions_test, spec_df, k, using_random_draw=use_random_draw) model.set_weights(random_weights) callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, mode='min') model.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=epochs, verbose=0, validation_split=0.2, callbacks=[callback]) _, test_perf = model.evaluate(x=x_test, y=y_test, batch_size=batch_size, verbose=0) test_performances.append(test_perf) mean_test_performance = np.mean(test_performances) sd_test_performance = np.std(test_performances) print('Finished training ({:.3f} +- {:.3f})'.format(mean_test_performance, sd_test_performance)) return mean_test_performance, sd_test_performance
{"hexsha": "29b5703dd3d6934569b4e1de68fcd1bff3680423", "size": 3074, "ext": "py", "lang": "Python", "max_stars_repo_path": "benchmarking/remove_and_retrain/utils.py", "max_stars_repo_name": "Locust2520/path_explain", "max_stars_repo_head_hexsha": "45d1cd6690060c8c9a7b57f72bff3b7c66dbd815", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 145, "max_stars_repo_stars_event_min_datetime": "2020-02-10T23:55:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T18:05:57.000Z", "max_issues_repo_path": "benchmarking/remove_and_retrain/utils.py", "max_issues_repo_name": "Locust2520/path_explain", "max_issues_repo_head_hexsha": "45d1cd6690060c8c9a7b57f72bff3b7c66dbd815", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-09-10T11:53:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-11T17:53:23.000Z", "max_forks_repo_path": "benchmarking/remove_and_retrain/utils.py", "max_forks_repo_name": "Locust2520/path_explain", "max_forks_repo_head_hexsha": "45d1cd6690060c8c9a7b57f72bff3b7c66dbd815", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2020-02-19T14:18:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-14T01:57:44.000Z", "avg_line_length": 39.9220779221, "max_line_length": 100, "alphanum_fraction": 0.5318802863, "include": true, "reason": "import numpy", "num_tokens": 528}
""" Adapted from: https://github.com/ChihebTrabelsi/deep_complex_networks/blob/master/musicnet/scripts/resample.py Instructions: wget https://homes.cs.washington.edu/~thickstn/media/musicnet.npz python3 -u resample.py musicnet.npz musicnet_11khz.npz 44100 11000 """ from __future__ import print_function import argparse import numpy from intervaltree import Interval, IntervalTree from resampy import resample def resample_musicnet(file_in, file_out, frame_rate, frame_rate_out): ratio = frame_rate_out / float(frame_rate) print('.. resampling {} ({}Hz) into {} ({}Hz)'.format( file_in, frame_rate, file_out, frame_rate_out)) print('.. sampling with ratio {}'.format(ratio)) resampled_data = {} with open(file_in, 'rb') as f_in: np_load_old = numpy.load numpy.load = lambda *a, **k: np_load_old(*a, allow_pickle=True, **k) data_in = numpy.load(file_in, encoding='latin1') numpy.load = np_load_old n_files = len(data_in.keys()) for i, key in enumerate(data_in): print('.. aggregating {} ({} / {})'.format(key, i, n_files)) data = data_in[key] data[0] = resample(data[0], frame_rate, frame_rate_out) resampled_intervals = [] for interval in data[1]: resampled_begin = int(interval.begin * ratio) resampled_end = int(interval.end * ratio) resampled_interval = Interval( resampled_begin, resampled_end, interval.data) resampled_intervals.append(resampled_interval) data[1] = IntervalTree(resampled_intervals) resampled_data[key] = data print('.. saving output') with open(file_out, 'wb') as f_out: numpy.savez(f_out, **resampled_data) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('file_in') parser.add_argument('file_out') parser.add_argument('frame_rate', type=int) parser.add_argument('frame_rate_out', type=int) resample_musicnet(**parser.parse_args().__dict__)
{"hexsha": "5a09e2bd8fcee5688b323cf964d71b4254125180", "size": 2172, "ext": "py", "lang": "Python", "max_stars_repo_path": "resample.py", "max_stars_repo_name": "chiuhans111/RSE", "max_stars_repo_head_hexsha": "152639e51b76120f1006bb64eb3689a054e01b92", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 40, "max_stars_repo_stars_event_min_datetime": "2020-04-24T01:03:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T18:19:30.000Z", "max_issues_repo_path": "resample.py", "max_issues_repo_name": "chiuhans111/RSE", "max_issues_repo_head_hexsha": "152639e51b76120f1006bb64eb3689a054e01b92", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-09-09T13:26:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T18:37:05.000Z", "max_forks_repo_path": "resample.py", "max_forks_repo_name": "chiuhans111/RSE", "max_forks_repo_head_hexsha": "152639e51b76120f1006bb64eb3689a054e01b92", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-11-25T14:26:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-29T10:18:40.000Z", "avg_line_length": 36.2, "max_line_length": 111, "alphanum_fraction": 0.6376611418, "include": true, "reason": "import numpy", "num_tokens": 508}
"""Test module for loss functions.""" from typing import Tuple import numpy as np import pytest import pandas as pd import tensorflow as tf from .. import datasets from ..keras import losses, models from ..keras import layers as pypsps_layers from .. import utils, inference from ..keras import metrics from pypress.keras import layers as press_layers from pypress.keras import regularizers tfk = tf.keras def _test_data() -> Tuple[np.ndarray, np.ndarray]: y_true = np.array([0.0, 1.0, 2.0]) y_pred = np.array([[0.0, 1.0], [-1, 0.1], [0.1, 0.5]]) return y_true, y_pred @pytest.mark.parametrize( "reduction,expected_len", [("auto", 1), ("sum", 1), ("sum_over_batch_size", 1), ("none", 3)], ) def test_negloglik_normal_loss(reduction, expected_len): y_true, y_pred = _test_data() loss = losses.NegloglikNormal(reduction=reduction)( y_true=y_true.astype("float32"), y_pred=y_pred.astype("float32") ) if expected_len == 1: assert not len(loss.numpy().shape) else: assert loss.shape[0] == expected_len def test_psps_model_and_causal_loss(): pypsps_outcome_loss = losses.OutcomeLoss( loss=losses.NegloglikNormal(reduction="none"), reduction="auto" ) pypsps_treat_loss = losses.TreatmentLoss( loss=tf.keras.losses.BinaryCrossentropy(reduction="none"), reduction="auto" ) pypsps_causal_loss = losses.CausalLoss( outcome_loss=pypsps_outcome_loss, treatment_loss=pypsps_treat_loss, alpha=1.0, outcome_loss_weight=0.0, predictive_states_regularizer=tf.keras.regularizers.l2(0.1), reduction="auto", ) ks_data = datasets.KangSchafer(n_samples=1000, true_ate=10).run() inputs, outputs = ks_data.to_keras_inputs_outputs() assert outputs.shape == (1000, 2) tf.random.set_seed(10) model = models.build_toy_model( n_states=3, n_features=ks_data.n_features, compile=True ) preds = model.predict(inputs) outcome_pred, const_scale, propensity_score, weights = utils.split_y_pred(preds) assert outcome_pred.shape == (1000, 3) # (obs, states) assert const_scale.shape == (1000, 3) assert propensity_score.shape[0] == 1000 assert weights.shape == (1000, 3) causal_loss = pypsps_causal_loss(outputs, preds) assert causal_loss.numpy() == pytest.approx(34.08, 0.1) def test_end_to_end_dataset_model_fit(): np.random.seed(10) ks_data = datasets.KangSchafer(n_samples=1000, true_ate=10).run() tf.random.set_seed(10) model = models.build_toy_model( n_states=3, n_features=ks_data.features.shape[1], compile=True ) inputs, outputs = ks_data.to_keras_inputs_outputs() history = model.fit( inputs, outputs, epochs=2, batch_size=64, verbose=2, validation_split=0.2, ) l = history.history["loss"] assert l[0] > l[-1] preds = model.predict(inputs) assert preds.shape[0] == ks_data.n_samples ate = inference.predict_ate(model, inputs[0]) assert ate > 0
{"hexsha": "a4654647cd22678a663c53bbf9b5e1243814f1de", "size": 3066, "ext": "py", "lang": "Python", "max_stars_repo_path": "pypsps/tests/test_losses.py", "max_stars_repo_name": "gmgeorg/pypsps", "max_stars_repo_head_hexsha": "39fe4299772c569bd33b94d10ebc7f3883815756", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-03T13:27:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-03T14:02:12.000Z", "max_issues_repo_path": "pypsps/tests/test_losses.py", "max_issues_repo_name": "gmgeorg/pypsps", "max_issues_repo_head_hexsha": "39fe4299772c569bd33b94d10ebc7f3883815756", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pypsps/tests/test_losses.py", "max_forks_repo_name": "gmgeorg/pypsps", "max_forks_repo_head_hexsha": "39fe4299772c569bd33b94d10ebc7f3883815756", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2, "max_line_length": 84, "alphanum_fraction": 0.6797129811, "include": true, "reason": "import numpy", "num_tokens": 842}
def configuration(parent_package="", top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration("em", parent_package, top_path) config.add_subpackage("fdem") config.add_subpackage("tdem") return config
{"hexsha": "1e328e4c7c65a2ee23365091ae39e8425fb43deb", "size": 257, "ext": "py", "lang": "Python", "max_stars_repo_path": "geoana/em/setup.py", "max_stars_repo_name": "simpeg/geoana", "max_stars_repo_head_hexsha": "417e23a0a689da19112e5fd361f823a2abd8785a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2017-11-14T12:29:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T18:36:28.000Z", "max_issues_repo_path": "geoana/em/setup.py", "max_issues_repo_name": "simpeg/geoana", "max_issues_repo_head_hexsha": "417e23a0a689da19112e5fd361f823a2abd8785a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2016-09-02T02:44:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T22:41:33.000Z", "max_forks_repo_path": "geoana/em/setup.py", "max_forks_repo_name": "simpeg/geoana", "max_forks_repo_head_hexsha": "417e23a0a689da19112e5fd361f823a2abd8785a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-03-07T22:07:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-14T20:08:33.000Z", "avg_line_length": 25.7, "max_line_length": 58, "alphanum_fraction": 0.7431906615, "include": true, "reason": "from numpy", "num_tokens": 54}
\section{Options, Part 2} \subsection*{Binomial model: risk-neutral pricing} Solve by replication $\delta$ shares of the stock, $b$ dollars of riskless bond $\delta u S_0 + b (1+r) = C_u$ \\ $\delta d S_0 + b (1+r) = C_d$ Solution: $\delta = \frac{C_u-C_d}{(u-d)S_0}$ , $b=\frac{1}{1+r}\frac{uC_d-dC_u}{u-d}$ \\ Then: $C_0 = \delta S_0 + b$ \\ $ C_0 = \frac{C_u-C_d}{(u-d)} + \frac{1}{1+r} \frac{uC_d-dC_u}{u-d}$ \\ \subsection*{Risk neutral probability} $q_u=\frac{(1+r)-d}{u-d}$, $q_d = \frac{u-(1+r)}{u-d}$ Then $C_0=\frac{q_u C_u + q_d C_d}{1+r} = \frac{E^Q[C_T]}{1+r}$ where $E^Q[\cdot]$ is the expectation under probability $Q=(1,1-q)$, which is called the risk-neutral probability \subsection*{State prices and risk-neutral probabilities} $\Phi_u = \frac{q}{1+r}$ , $\Phi_d = \frac{1-q}{1+r}$ $\Phi_{uu} = \frac{q^2}{(1+r)^2}$ , $\Phi_{ud} = \Phi_{du} = \frac{q(1-q)}{(1+r)^2}$ , $\Phi_{dd} = \frac{(1-q)^2}{(1+r)^2}$ With state prices, can price any state-contingent payoff as a portfolio of state-contingent claims: mathematically equivalent to the risk-neutral valuation formula. \subsection*{Implementing binominal model} \begin{itemize} \item As we reduce the length of the time step, holding the maturity fixed, the binomial distribution of log returns converges to Normal distribution. \item Key model parameters $u$, and $d$ need to be chosen to reflect the distribution of the stock return \end{itemize} Once choice is: $u=exp(\sigma \frac{T}{n})$, $d=\frac{1}{u}$, $p=\frac{1}{2} + \frac{1}{2} \frac{u}{\sigma} \sqrt{\frac{T}{n}}$ \subsection*{Black-Scholes-Merton formula} $C_0 = S_0 N(x) - K e^{-rT} N (x-\sigma \sqrt(T))$ \\ $x = \frac{ln(\frac{S_0}{K e^{-rT}})}{\sigma \sqrt{T}} + \frac{1}{2} \sigma \sqrt{T}$ \\ In Excel \texttt{$N(x)$=NORM.S.DIST(x,TRUE)} The call is equivalent to a levered long position in the stock; $S_0 N(x) $ is the amount invested in the stock; $K e^{-rT} N (x-\sigma \sqrt(T))$ is the dollar amount borrowed. Equivalent formulation: $ C(S_t, t) = N(d_1)S_t - N(d_2)Ke^{-r(T - t)} $ \\ $ d_1 = \frac{1}{\sigma\sqrt{T - t}}\left[\ln\left(\frac{S_t}{K}\right) + \left(r + \frac{\sigma^2}{2}\right)(T - t)\right] $\\ $ d_2 = d_1 - \sigma\sqrt{T - t} $ The price of a corresponding put option based on put-call parity is: $ P(S_t, t) = Ke^{-r(T - t)} - S_t + C(S_t, t) = N(-d_2) Ke^{-r(T - t)} - N(-d_1) S_t $ \subsection*{Option Greeks} Delta: $\delta = \frac{\partial C}{\partial S}$ Omega: $\Omega = \frac{\partial C}{\partial S} \frac{S}{C}$ Gamma: $\Gamma = \frac{\partial \delta}{\partial S} = \frac{\partial^2 C}{\partial S^2} $ Theta: $\Theta = \frac{\partial C}{\partial S}$ Vega: $\mathcal{V} = \frac{\partial C}{\partial S}$
{"hexsha": "20458728cbe20c398c63c87f5fa4a5970f2400c8", "size": 2712, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "15.415.2x/assets/week_13.tex", "max_stars_repo_name": "j053g/cheatsheets", "max_stars_repo_head_hexsha": "22f7a84879c04d44de40467ddcc0f6e551b812c7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-12-14T08:49:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-07T17:26:15.000Z", "max_issues_repo_path": "15.415.2x/assets/week_13.tex", "max_issues_repo_name": "j053g/cheatsheets", "max_issues_repo_head_hexsha": "22f7a84879c04d44de40467ddcc0f6e551b812c7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "15.415.2x/assets/week_13.tex", "max_forks_repo_name": "j053g/cheatsheets", "max_forks_repo_head_hexsha": "22f7a84879c04d44de40467ddcc0f6e551b812c7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.3043478261, "max_line_length": 128, "alphanum_fraction": 0.6286873156, "num_tokens": 1015}
[STATEMENT] lemma finite_set_sum: assumes "finite A" and "\<forall>i\<in>A. finite (B i)" shows "finite (\<Sum>i\<in>A. B i)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. finite (sum B A) [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: finite A \<forall>i\<in>A. finite (B i) goal (1 subgoal): 1. finite (sum B A) [PROOF STEP] by (induct set: finite, simp, simp add: finite_set_plus)
{"llama_tokens": 172, "file": null, "length": 2}
import datetime import pandas as pd import numpy as np import os from tqdm import tqdm def interact_feature_engineer(samples, data, uid, iid, time_col): date_ths = str(data[time_col].max()) last_3months = 90 last_3months_date = datetime.datetime.strptime(date_ths, '%Y-%m-%d %H:%M:%S') - datetime.timedelta( days=last_3months) data_l3m = data[data[time_col] >= last_3months_date] last_month = 30 last_month_date = datetime.datetime.strptime(date_ths, '%Y-%m-%d %H:%M:%S') - datetime.timedelta(days=last_month) data_lm = data[data[time_col] >= last_month_date] last_week = 7 last_week_date = datetime.datetime.strptime(date_ths, '%Y-%m-%d %H:%M:%S') - datetime.timedelta(days=last_week) data_lw = data[data[time_col] >= last_week_date] data_ = data[data[uid].isin(samples[uid].unique())] # 上次购买候选物品距今时间 tmp = data.groupby([uid, iid])[time_col].agg('max').reset_index() tmp['purchase_corr_item_max_time'] = ( datetime.datetime.strptime(date_ths, '%Y-%m-%d %H:%M:%S') - tmp[time_col]).dt.days samples = samples.merge(tmp[[uid, iid, 'purchase_corr_item_max_time']], on=[uid, iid], how='left') # 过去购买过该物品次数统计 tmp = data.groupby([uid, iid])[time_col].agg('count').reset_index() tmp.columns = [uid, iid, 'purchase_corr_item_cnt'] samples = samples.merge(tmp, on=[uid, iid], how='left') cols = ['count'] # 过去三天购买过的物品次数统计 last_3days = 3 # 30 last_3days_date = datetime.datetime.strptime(date_ths, '%Y-%m-%d %H:%M:%S') - datetime.timedelta(days=last_3days) tmp = data_lw[data_lw[time_col] >= last_3days_date].groupby([uid, iid])[iid].agg( cols).reset_index() new_col = ['user_item_last_3days_{}'.format(col) for col in cols] tmp.columns = [uid, iid] + new_col samples = samples.merge(tmp, on=[uid, iid], how='left') # 过去两周购买过的物品次数统计 last_2weeks = 14 last_2weeks_date = datetime.datetime.strptime(date_ths, '%Y-%m-%d %H:%M:%S') - datetime.timedelta(days=last_2weeks) tmp = data_lm[data_lm[time_col] >= last_2weeks_date].groupby([uid, iid])[iid].agg( cols).reset_index() new_col = ['user_item_last_2weeks_{}'.format(col) for col in cols] tmp.columns = [uid, iid] + new_col samples = samples.merge(tmp, on=[uid, iid], how='left') # 过去一个月购买过的物品次数统计 tmp = data_lm.groupby([uid, iid])[iid].agg(cols).reset_index() new_col = ['user_item_last_month_{}'.format(col) for col in cols] tmp.columns = [uid, iid] + new_col samples = samples.merge(tmp, on=[uid, iid], how='left') # 过去3个月购买过的物品次数统计 tmp = data_l3m.groupby([uid, iid])[iid].agg(cols).reset_index() new_col = ['user_item_last_3months_{}'.format(col) for col in cols] tmp.columns = [uid, iid] + new_col samples = samples.merge(tmp, on=[uid, iid], how='left') # 过去一周购买过的物品次数统计 tmp = data_lw.groupby([uid, iid])[iid].agg(cols).reset_index() new_col = ['user_item_last_week_{}'.format(col) for col in cols] tmp.columns = [uid, iid] + new_col samples = samples.merge(tmp, on=[uid, iid], how='left') # 过去一天购买过的物品次数统计 tmp = data_lw[data_lw[time_col] == data_lw[time_col].max()].groupby([uid, iid])[iid].agg( cols).reset_index() new_col = ['user_item_last_day_{}'.format(col) for col in cols] tmp.columns = [uid, iid] + new_col samples = samples.merge(tmp, on=[uid, iid], how='left') # 历史最近一次点击距今时间 tmp = data_.groupby(uid)[time_col].agg('max').reset_index() tmp['latest_purchase_time_sub'] = ( datetime.datetime.strptime(date_ths, '%Y-%m-%d %H:%M:%S') - tmp[time_col]).dt.days samples = samples.merge(tmp[[uid, 'latest_purchase_time_sub']], on=uid, how='left') del data_, tmp return samples
{"hexsha": "c78482e197260f2ca36266aeb8fb5b77a461cc79", "size": 3765, "ext": "py", "lang": "Python", "max_stars_repo_path": "autox/autox_recommend/recall_and_rank/feature_engineer/interact_feature_engineer.py", "max_stars_repo_name": "OneToolsCollection/4paradigm-AutoX", "max_stars_repo_head_hexsha": "f8e838021354de17f5bb9bc44e9d68d12dda6427", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "autox/autox_recommend/recall_and_rank/feature_engineer/interact_feature_engineer.py", "max_issues_repo_name": "OneToolsCollection/4paradigm-AutoX", "max_issues_repo_head_hexsha": "f8e838021354de17f5bb9bc44e9d68d12dda6427", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "autox/autox_recommend/recall_and_rank/feature_engineer/interact_feature_engineer.py", "max_forks_repo_name": "OneToolsCollection/4paradigm-AutoX", "max_forks_repo_head_hexsha": "f8e838021354de17f5bb9bc44e9d68d12dda6427", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9239130435, "max_line_length": 119, "alphanum_fraction": 0.6544488712, "include": true, "reason": "import numpy", "num_tokens": 1219}
#Import packages import pandas as pd import numpy as np from SyntheticControlMethods import Synth, DiffSynth #Import data data = pd.read_csv("/Users/oscarengelbrektson/Documents/test_dataset.csv") #Fit Synthetic Control sc = Synth(data, "y", "ID", "Time", 10, "A", n_optim=30, pen=1) sc.plot(["original", "pointwise", "cumulative"], treated_label="California", synth_label="Synthetic California", treatment_label="Proposal 99") #Plot validity tests sc.in_space_placebo() #np.savetxt("pre_post_rmspe_ratio.csv", synth.pre_post_rmspe_ratio, delimiter=",") sc.plot(['rmspe ratio'], in_space_exclusion_multiple=None)
{"hexsha": "7ab76e56914b160aac120f49d79521b79ee758bb", "size": 632, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/test_dataset.py", "max_stars_repo_name": "OscarEngelbrektson/SyntheticControl", "max_stars_repo_head_hexsha": "3f496b36ed46c4e5c1e08ce6e903013e6eeb29df", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 68, "max_stars_repo_stars_event_min_datetime": "2020-10-19T12:22:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T04:09:07.000Z", "max_issues_repo_path": "examples/test_dataset.py", "max_issues_repo_name": "hayeszhou/SyntheticControlMethods", "max_issues_repo_head_hexsha": "3f496b36ed46c4e5c1e08ce6e903013e6eeb29df", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2021-02-12T05:12:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-15T00:19:40.000Z", "max_forks_repo_path": "examples/test_dataset.py", "max_forks_repo_name": "hayeszhou/SyntheticControlMethods", "max_forks_repo_head_hexsha": "3f496b36ed46c4e5c1e08ce6e903013e6eeb29df", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2020-11-20T10:39:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-26T22:58:24.000Z", "avg_line_length": 30.0952380952, "max_line_length": 82, "alphanum_fraction": 0.7515822785, "include": true, "reason": "import numpy", "num_tokens": 168}
/*! * @author Shin'ichiro Nakaoka * @author Hisashi Ikari */ #include <boost/python.hpp> #include <boost/filesystem.hpp> #include <cnoid/ExecutablePath> #include <cnoid/FloatingNumberString> #include <cnoid/FileUtil> #include <cnoid/AbstractSeq> #include <cnoid/MultiSeq> #include <cnoid/MultiValueSeq> #include <cnoid/MultiSE3Seq> #include <cnoid/MultiAffine3Seq> #include <cnoid/Array2D> #include <cnoid/EigenTypes> //#include <boost/python/cross_module.hpp> using namespace boost::python; using namespace cnoid; /*! * @brief Provides the Util package of Choreonoid. * And we want to provide this classes to the other so(dll) like CNOID_EXPORT. * @note Util is used from so many others. Therefore, export_module(like CNOID_EXPORT) is needed. * * @reference http://goo.gl/eNOVqh * @reference http://goo.gl/nXYCSR */ BOOST_PYTHON_MODULE(Util) { /*! * @brief It will provide the file utility. */ def("shareDirectory", &cnoid::shareDirectory, return_value_policy<copy_const_reference>()); /*! * @brief It will provide the file utility for getting executable path. */ def("executablePath", &cnoid::executablePath, return_value_policy<copy_const_reference>()); /*! * @brief It will provide the file utility for getting executable path. */ def("executableBasename", &cnoid::executableBasename, return_value_policy<copy_const_reference>()); /*! * @brief It will provide the file utility for getting executable path. */ def("executableTopDirectory", &cnoid::executableTopDirectory, return_value_policy<copy_const_reference>()); /*! * @brief It will provide the float utility. */ class_ <FloatingNumberString, boost::noncopyable>("FloatingNumberString", init<const std::string&>()) .def("set", &FloatingNumberString::set) .def("setPositiveValue", &FloatingNumberString::setPositiveValue, return_value_policy<return_by_value>()) .def("setNonNegativeValue", &FloatingNumberString::setNonNegativeValue, return_value_policy<return_by_value>()) .def("value", &FloatingNumberString::value, return_value_policy<return_by_value>()); /*! * @brief This is a definition for SE3. */ void (SE3::*set)(const Vector3 translation, const Matrix3& R) = &SE3::set; Vector3& (SE3::*translation)() = &SE3::translation; Quat& (SE3::*rotation)() = &SE3::rotation; class_<SE3, boost::noncopyable>("SE3", init<>()) .def("set", set) .def("translation", translation, return_value_policy<return_by_value>()) .def("rotation", rotation, return_value_policy<return_by_value>()); /*! * @brief It will provide the access of the Frame and Part (Array2D) in the BodyMotion. * Only definition of the type. Please access in the wrapper. */ class_<AbstractSeq, boost::noncopyable>("AbstractSeq", no_init) .def("seqType", &AbstractSeq::seqType, return_value_policy<return_by_value>()) .def("getFrameRate", &AbstractSeq::getFrameRate, return_value_policy<return_by_value>()) .def("setFrameRate", &AbstractSeq::setFrameRate) .def("getTimeStep", &AbstractSeq::getTimeStep, return_value_policy<return_by_value>()) .def("setTimeStep", &AbstractSeq::setTimeStep) .def("getTimeOfFrame", &AbstractSeq::getTimeOfFrame, return_value_policy<return_by_value>()) .def("getNumFrames", &AbstractSeq::getNumFrames, return_value_policy<return_by_value>()) .def("setNumFrames", &AbstractSeq::setNumFrames, (args("n"), args("clearNewElements") = false)) .def("setTimeLength", &AbstractSeq::setTimeLength, (args("n"), args("clearNewElements") = false)) .def("getTimeLength", &AbstractSeq::getTimeLength, return_value_policy<return_by_value>()) .def("seqContentName", &AbstractSeq::seqContentName, return_value_policy<return_by_value>()) .def("setSeqContentName", &AbstractSeq::setSeqContentName) .def("seqMessage", &AbstractSeq::seqMessage, return_value_policy<return_by_value>()); /*! * @brief It will provide the access of the Frame and Part (Array2D) in the BodyMotion. * Only definition of the type. Please access in the wrapper. */ class_ <AbstractMultiSeq, bases<AbstractSeq>, boost::noncopyable >("AbstractMultiSeq", no_init) .def("setDimension", &AbstractMultiSeq::setDimension, (args("numFrames"), args("numParts"), args("clearNewElements") = false)) .def("setNumParts", &AbstractMultiSeq::setNumParts, (args("numParts"), args("clearNewElements") = false)) .def("getNumParts", &AbstractMultiSeq::getNumParts, return_value_policy<return_by_value>()) .def("partIndex", &AbstractMultiSeq::partIndex, return_value_policy<return_by_value>()) .def("partLabel", &AbstractMultiSeq::partLabel, return_value_policy<return_by_value>()); /*! * @brief It will provide the access of the Frame and Part (Array2D) in the BodyMotion. * Only definition of the type. Please access in the wrapper. */ typedef MultiSeq< double, std::allocator<double> > MultiSeqDouble; #ifdef _INVALID_VERSION typedef MultiSeq< SE3, std::allocator<SE3> > MultiSeqSE3; typedef MultiSeq< Affine3, std::allocator<Affine3> > MultiSeqAffine; #endif // for double concrete type. class_<MultiSeqDouble::Frame, boost::noncopyable>("FrameDouble", no_init) .def("empty", &MultiSeqDouble::Frame::empty) .def("size", &MultiSeqDouble::Frame::size) .def("__getitem__", &MultiSeqDouble::Frame::at, return_value_policy<return_by_value>()); class_<MultiSeqDouble::Part, boost::noncopyable>("PartDouble", no_init) .def("empty", &MultiSeqDouble::Part::empty) .def("size", &MultiSeqDouble::Part::size) .def("__getitem__", &MultiSeqDouble::Part::at, return_value_policy<return_by_value>()); #ifdef _INVALID_VERSION // for SE3 concrete type. class_<MultiSeqSE3::Frame, boost::noncopyable>("FrameSE3", no_init) .def("empty", &MultiSeqSE3::Frame::empty) .def("size", &MultiSeqSE3::Frame::size) .def("__getitem__", &MultiSeqSE3::Frame::at, return_value_policy<return_by_value>()); class_<MultiSeqSE3::Part, boost::noncopyable>("PartSE3", no_init) .def("empty", &MultiSeqSE3::Part::empty) .def("size", &MultiSeqSE3::Part::size) .def("__getitem__", &MultiSeqSE3::Part::at, return_value_policy<return_by_value>()); #endif #ifdef _INVALID_VERSION // for Affine concrete type. class_<MultiSeqAffine::Frame, boost::noncopyable>("FrameAffine", no_init) .def("empty", &MultiSeqAffine::Frame::empty) .def("size", &MultiSeqAffine::Frame::size) .def("__getitem__", &MultiSeqAffine::Frame::at, return_value_policy<return_by_value>()); class_<MultiSeqAffine::Part, boost::noncopyable>("PartAffine", no_init) .def("empty", &MultiSeqAffine::Part::empty) .def("size", &MultiSeqAffine::Part::size) .def("__getitem__", &MultiSeqAffine::Part::at, return_value_policy<return_by_value>()); #endif /*! * @brief It will provide the access of the Frame and Part (Array2D) in the BodyMotion. * Only definition of the type. Please access in the wrapper. */ // for double concrete type. const MultiSeqDouble::Frame (MultiSeqDouble::*frameDouble)(int index) const = &MultiSeqDouble::frame; const MultiSeqDouble::Part (MultiSeqDouble::*partDouble)(int index) const = &MultiSeqDouble::part; #ifdef _INVALID_VERSION // for SE3 concrete type. const MultiSeqSE3::Frame (MultiSeqSE3::*frameSE3)(int index) const = &MultiSeqSE3::frame; const MultiSeqSE3::Part (MultiSeqSE3::*partSE3)(int index) const = &MultiSeqSE3::part; // for Affine concrete type. const MultiSeqAffine::Frame (MultiSeqAffine::*frameAffine)(int index) const = &MultiSeqAffine::frame; const MultiSeqAffine::Part (MultiSeqAffine::*partAffine)(int index) const = &MultiSeqAffine::part; #endif /*! * @note MultiSeq is a template class. The template class is not the instance. * Create a concrete class by providing a template parameter to the this MultiSeq. * However, We define as another concrete class and this class (such as MultiValueSeq). * The reason is because the strong type is different. * And please forgive the presence of multiple same method. */ // for double concrete type. const double& (MultiSeqDouble::*atDouble)(int rowIndex, int colIndex) const = &MultiSeqDouble::at; const MultiSeqDouble::Row (MultiSeqDouble::*rowDouble)(int rowIndex) const = &MultiSeqDouble::row; const MultiSeqDouble::Column (MultiSeqDouble::*columnDouble)(int colIndex) const = &MultiSeqDouble::column; #ifdef _INVALID_VERSION // for SE3 concrete type. const SE3& (MultiSeqDouble::*atSE3)(int rowIndex, int colIndex) const = &MultiSeqSE3::at; const MultiSeqSE3::Row (MultiSeqSE3::*rowSE3)(int rowIndex) const = &MultiSeqSE3::row; const MultiSeqSE3::Column (MultiSeqSE3::*columnSE3)(int colIndex) const = &MultiSeqSE3::column; // for Affine concrete type. const Affine3& (MultiSeqAffine::*atAffine)(int rowIndex, int colIndex) const = &MultiSeqAffine::at; const MultiSeqAffine::Row (MultiSeqAffine::*rowAffine)(int rowIndex) const = &MultiSeqAffine::row; const MultiSeqAffine::Column (MultiSeqAffine::*columnAffine)(int colIndex) const = &MultiSeqAffine::column; #endif // for double concrete type. class_ <MultiSeqDouble, bases<AbstractMultiSeq>, boost::noncopyable >("AbstractMultiSeqDouble", no_init) .def("frameRate", &MultiSeqDouble::frameRate, return_value_policy<return_by_value>()) .def("timeStep", &MultiSeqDouble::timeStep, return_value_policy<return_by_value>()) .def("numFrames", &MultiSeqDouble::numFrames, return_value_policy<return_by_value>()) .def("numParts", &MultiSeqDouble::numParts, return_value_policy<return_by_value>()) .def("timeLength", &MultiSeqDouble::timeLength, return_value_policy<return_by_value>()) .def("frameOfTime", &MultiSeqDouble::frameOfTime, return_value_policy<return_by_value>()) .def("timeOfFrame", &MultiSeqDouble::timeOfFrame, return_value_policy<return_by_value>()) .def("clampFrameIndex", &MultiSeqDouble::clampFrameIndex) .def("frame", frameDouble, return_value_policy<return_by_value>()) .def("part", partDouble, return_value_policy<return_by_value>()) .def("empty", &MultiSeqDouble::empty, return_value_policy<return_by_value>()) .def("resize", &MultiSeqDouble::resize) .def("resizeColumn", &MultiSeqDouble::resizeColumn) .def("rowSize", &MultiSeqDouble::rowSize, return_value_policy<return_by_value>()) .def("resizeRow", &MultiSeqDouble::resizeRow) .def("colSize", &MultiSeqDouble::colSize, return_value_policy<return_by_value>()) .def("at", atDouble, return_value_policy<return_by_value>()) .def("row", rowDouble, return_value_policy<return_by_value>()) .def("column", columnDouble, return_value_policy<return_by_value>()); #ifdef _INVALID_VERSION // for SE3 concrete type. class_ <MultiSeqSE3, bases<AbstractMultiSeq>, boost::noncopyable >("AbstractMultiSeqSE3", no_init) .def("frameRate", &MultiSeqDouble::frameRate, return_value_policy<return_by_value>()) .def("timeStep", &MultiSeqDouble::timeStep, return_value_policy<return_by_value>()) .def("numFrames", &MultiSeqDouble::numFrames, return_value_policy<return_by_value>()) .def("numParts", &MultiSeqDouble::numParts, return_value_policy<return_by_value>()) .def("timeLength", &MultiSeqDouble::timeLength, return_value_policy<return_by_value>()) .def("frameOfTime", &MultiSeqDouble::frameOfTime, return_value_policy<return_by_value>()) .def("timeOfFrame", &MultiSeqDouble::timeOfFrame, return_value_policy<return_by_value>()) .def("clampFrameIndex", &MultiSeqDouble::clampFrameIndex) .def("frame", frameSE3, return_value_policy<return_by_value>()) .def("part", partSE3, return_value_policy<return_by_value>()) .def("empty", &MultiSeqDouble::empty, return_value_policy<return_by_value>()) .def("resize", &MultiSeqDouble::resize) .def("resizeColumn", &MultiSeqDouble::resizeColumn) .def("rowSize", &MultiSeqDouble::rowSize, return_value_policy<return_by_value>()) .def("resizeRow", &MultiSeqDouble::resizeRow) .def("colSize", &MultiSeqDouble::colSize, return_value_policy<return_by_value>()) .def("at", atSE3, return_value_policy<return_by_value>()) .def("row", rowSE3, return_value_policy<return_by_value>()) .def("column", columnSE3, return_value_policy<return_by_value>()); #endif #ifdef _INVALID_VERSION // for Affine concrete type. class_ <MultiSeqAffine, bases<AbstractMultiSeq>, boost::noncopyable >("AbstractMultiSeqAffine", no_init) .def("frameRate", &MultiSeqDouble::frameRate, return_value_policy<return_by_value>()) .def("timeStep", &MultiSeqDouble::timeStep, return_value_policy<return_by_value>()) .def("numFrames", &MultiSeqDouble::numFrames, return_value_policy<return_by_value>()) .def("numParts", &MultiSeqDouble::numParts, return_value_policy<return_by_value>()) .def("timeLength", &MultiSeqDouble::timeLength, return_value_policy<return_by_value>()) .def("frameOfTime", &MultiSeqDouble::frameOfTime, return_value_policy<return_by_value>()) .def("timeOfFrame", &MultiSeqDouble::timeOfFrame, return_value_policy<return_by_value>()) .def("clampFrameIndex", &MultiSeqDouble::clampFrameIndex) .def("frame", frameAffine, return_value_policy<return_by_value>()) .def("part", partAffine, return_value_policy<return_by_value>()) .def("empty", &MultiSeqDouble::empty, return_value_policy<return_by_value>()) .def("resize", &MultiSeqDouble::resize) .def("resizeColumn", &MultiSeqDouble::resizeColumn) .def("rowSize", &MultiSeqDouble::rowSize, return_value_policy<return_by_value>()) .def("resizeRow", &MultiSeqDouble::resizeRow) .def("colSize", &MultiSeqDouble::colSize, return_value_policy<return_by_value>()) .def("at", atAffine, return_value_policy<return_by_value>()) .def("row", rowAffine, return_value_policy<return_by_value>()) .def("column", columnAffine, return_value_policy<return_by_value>()); #endif /*! * @brief Sequence basic information such as body motion. */ class_< MultiValueSeq, bases< MultiSeqDouble >, boost::noncopyable >("MultiValueSeq", init<>()); // Parent class has all method about this method of class. // But we can use all parent methods by this class. #ifdef _INVALID_VERSION /*! * @brief Sequence basic information such as body motion. */ class_< MultiSE3Seq, bases< MultiSeqSE3 >, boost::noncopyable >("MultiSE3Seq", init<>()); /*! * @brief Sequence basic information such as body motion. */ class_< MultiAffine3Seq, bases< MultiSeqAffine >, boost::noncopyable >("MultiAffineSeq", init<>()); #endif }
{"hexsha": "7878f66d602f7b1498a62d448d3acc923d6dcfd2", "size": 15249, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/Util/python/PyUtil.cpp", "max_stars_repo_name": "snozawa/choreonoid", "max_stars_repo_head_hexsha": "12ab42ccbf287d68216637e55ddae8412771c752", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Util/python/PyUtil.cpp", "max_issues_repo_name": "snozawa/choreonoid", "max_issues_repo_head_hexsha": "12ab42ccbf287d68216637e55ddae8412771c752", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Util/python/PyUtil.cpp", "max_forks_repo_name": "snozawa/choreonoid", "max_forks_repo_head_hexsha": "12ab42ccbf287d68216637e55ddae8412771c752", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.1611842105, "max_line_length": 134, "alphanum_fraction": 0.7018820906, "num_tokens": 3946}
""" TODO: npy_void """ from __future__ import absolute_import import numpy from .capi import sctypebits scalar = dict( c_char = dict(\ ctype = 'signed char', init = ' = 0', argument_format = 'b', return_format = 'b', argument_title = 'a python integer (converting to C signed char)', return_title = 'a python integer (converting from C signed char)', ), c_short = dict(\ ctype = 'short int', init = ' = 0', argument_format = 'h', return_format = 'h', argument_title = 'a python integer (converting to C short int)', return_title = 'a python integer (converting from C short int)', ), c_int = dict(\ ctype = 'int', init = ' = 0', argument_format = 'i', return_format = 'i', argument_title = 'a python integer (converting to C int)', return_title = 'a python integer (converting from C int)', ), c_long = dict(\ ctype = 'long', init = ' = 0', argument_format = 'l', return_format = 'l', argument_title = 'a python integer (converting to C long int)', return_title = 'a python integer (converting from C long int)', ), c_long_long = dict(\ ctype = 'PY_LONG_LONG', init = ' = 0', argument_format = 'L', return_format = 'L', argument_title = 'a python integer (converting to C PY_LONG_LONG)', return_title = 'a python integer (converting from C PY_LONG_LONG)', ), c_unsigned_char = dict(\ ctype = 'unsigned char', init = ' = 0', argument_format = 'B', return_format = 'B', argument_title = 'a python integer (converting to C unsigned char)', return_title = 'a python integer (converting from C unsigned char)', ), c_unsigned_short = dict(\ ctype = 'unsigned short int', init = ' = 0', argument_format = 'H', return_format = 'H', argument_title = 'a python integer (converting to C unsigned short int)', return_title = 'a python integer (converting from C unsigned short int)', ), c_unsigned_int = dict(\ ctype = 'unsigned int', init = ' = 0', argument_format = 'I', return_format = 'I', argument_title = 'a python integer (converting to C unsigned int)', return_title = 'a python integer (converting from C unsigned int)', ), c_unsigned_long = dict(\ ctype = 'unsigned long', init = ' = 0', argument_format = 'k', return_format = 'k', argument_title = 'a python integer (converting to C unsigned long int)', return_title = 'a python integer (converting from C unsigned long int)', ), c_unsigned_long_long = dict(\ ctype = 'unsigned PY_LONG_LONG', init = ' = 0', argument_format = 'K', return_format = 'K', argument_title = 'a python integer (converting to C unsigned PY_LONG_LONG)', return_title = 'a python integer (converting from C unsigned PY_LONG_LONG)', ), c_float = dict(\ ctype = 'float', init = ' = 0.0', argument_format = 'f', return_format = 'f', argument_title = 'a python floating point number (converting to C float)', return_title = 'a python floating point number (converting from C float)', ), c_double = dict(\ ctype = 'double', init = ' = 0.0', argument_format = 'd', return_format = 'd', argument_title = 'a python floating point number (converting to C double)', return_title = 'a python floating point number (converting from C double)', ), c_Py_complex = dict(\ ctype = 'Py_complex', argument_format = 'D', return_format = 'D', init = ' = {0.0, 0.0}', argument_title = 'a python complex number (converting to C Py_complex structure)', return_title = 'a python complex number (converting from C Py_complex structure)', ), c_Py_ssize_t = dict(\ ctype = 'Py_ssize_t', argument_format = 'n', return_format = 'n', init = ' = 0', argument_title = 'a python integer (converting to C Py_ssize_t)', return_title = 'a python integer (converting from C Py_ssize_t)', ), c_char1 = dict(\ ctype = 'char', argument_format = 'c', return_format = 'c', init = " = '\\0'", argument_title = 'a python character (converting to C char)', return_title = 'a python character (converting from C char)', ), c_const_char_ptr = dict(\ ctype = 'const char *', argument_format = 'z', return_format = 'z', init = ' = NULL', argument_title = 'a python string or Unicode or None object (converting to C const char *)', return_title = 'a python string or None (converting from C char *)', ), c_char_ptr = dict(\ ctype = 'char *', argument_format = 'O&', argument_converter = 'pyobj_to_char_ptr', clean_argument_converter = 'clean_pyobj_to_char_ptr', return_format = 'z', init = ' = NULL', argument_title = 'a python string (converting to C char *)', return_title = 'a python string or None (converting from C char *)', ), c_Py_UNICODE_ptr = dict(\ ctype = 'Py_UNICODE*', argument_format ='u', return_format = 'u', init = ' = NULL', argument_title = 'a python Unicode object (converting to C Py_UNICODE*)', return_title = 'a python Unicode object or None (converting from C Py_UNICODE*)' ), py_bool = dict(\ ctype = 'PyBoolObject*', init = ' = NULL', pyctype = 'PyBool_Type', argument_format = 'O!', return_format = 'N', title = 'a python bool' ), py_int = dict(\ ctype = 'PyIntObject*', init = ' = NULL', pyctype = 'PyInt_Type', argument_format = 'O!', return_format = 'N', title = 'a python integer' ), py_long = dict(\ ctype = 'PyLongObject*', init = ' = NULL', pyctype = 'PyLong_Type', argument_format = 'O!', return_format = 'N', title = 'a python long integer' ), py_float = dict(\ ctype = 'PyFloatObject*', init = ' = NULL', pyctype = 'PyFloat_Type', argument_format = 'O!', return_format = 'N', title = 'a python floating point number' ), py_complex = dict(\ ctype = 'PyComplexObject*', init = ' = NULL', pyctype = 'PyComplex_Type', argument_format = 'O!', return_format = 'N', title = 'a python complex number' ), py_str = dict(\ ctype = 'PyStringObject*', init = ' = NULL', argument_format = 'S', return_format = 'N', title = 'a python string' ), py_unicode = dict(\ ctype = 'PyUnicodeObject*', init = ' = NULL', argument_format = 'U', return_format = 'N', title = 'a python Unicode object' ), py_buffer = dict(\ pyctype = 'PyBuffer_Type', ctype = 'PyBufferObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', title = 'a python buffer'), py_tuple = dict(\ pyctype = 'PyTuple_Type', ctype = 'PyTupleObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', title = 'a python tuple'), py_list = dict(\ pyctype = 'PyList_Type', ctype = 'PyListObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', title = 'a python list'), py_dict = dict(\ pyctype = 'PyDict_Type', ctype = 'PyDictObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', title = 'a python dictionary'), py_file = dict(\ pyctype = 'PyFile_Type', ctype = 'PyFileObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', title = 'a python file object'), py_instance = dict(\ pyctype = 'PyInstance_Type', ctype = 'PyObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', title = 'a python instance object'), py_function = dict(\ pyctype = 'PyFunction_Type', ctype = 'PyFunctionObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', title = 'a python function object'), py_method = dict(\ pyctype = 'PyMethod_Type', ctype = 'PyObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', title = 'a python instance method object'), py_module = dict(\ pyctype = 'PyModule_Type', ctype = 'PyObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', title = 'a python module object'), py_iter = dict(\ pyctype = 'PySeqIter_Type', ctype = 'PyObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', title = 'a python iterator'), py_property = dict(\ pyctype = 'PyProperty_Type', ctype = 'PyObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', title = 'a python property attribute'), py_slice = dict(\ pyctype = 'PySlice_Type', ctype = 'PyObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', title = 'a python slice object'), py_cell = dict(\ pyctype = 'PyCell_Type', ctype = 'PyCellObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL'), py_generator = dict(\ pyctype = 'PyGen_Type', ctype = 'PyGenObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL'), py_set = dict(\ pyctype = 'PySet_Type', ctype = 'PySetObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', title = 'a python set object'), py_frozenset = dict(\ pyctype = 'PyFrozenSet_Type', ctype = 'PySetObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', title = 'a python frozenset object'), py_cobject = dict(\ ctype = 'PyCObject*', argument_format = 'O', return_format = 'N', init = ' = NULL', title = 'a PyCObject object'), py_type = dict(\ pyctype = 'PyType_Type', ctype = 'PyTypeObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', title = 'a python type object'), py_object = dict(\ ctype = 'PyObject*', argument_format = 'O', return_format = 'N', init = ' = NULL', title = 'a python object'), numeric_array = dict(\ pyctype = 'PyArray_Type', ctype = 'PyArrayObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', title = 'a Numeric array', require_numeric = True, ), numpy_ndarray = dict(\ pyctype = 'PyArray_Type', ctype = 'PyArrayObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', title = 'a numpy array', require_numpy = True, ), numpy_descr = dict(\ pyctype = 'PyArrayDescr_Type', ctype = 'PyArray_Descr*', argument_format = 'O!', return_format = 'N', init = ' = NULL', require_numpy = True, ), numpy_ufunc = dict(\ pyctype = 'PyUFunc_Type', ctype = 'PyUFuncObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', title = 'a numpy universal function', require_numpy = True, ), numpy_iter = dict(\ pyctype = 'PyArrayIter_Type', ctype = 'PyArrayIterObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', require_numpy = True, ), numpy_multiiter = dict(\ pyctype = 'PyArrayMultiIter_Type', ctype = 'PyArrayMultiIterObject*', argument_format = 'O!', return_format = 'N', init = ' = NULL', require_numpy = True, ), npy_bool = dict(\ ctype = 'npy_bool', init = ' = 0', argument_format = 'O&', argument_converter = 'pyobj_to_npy_bool', return_format = 'O&', return_converter = 'pyobj_from_npy_bool', argument_title = 'a python truth value (converting to C npy_bool)', return_title = 'a numpy bool', require_numpy = True, ), numpy_bool = dict(\ ctype = 'PyBoolScalarObject*', init = ' = NULL', argument_format = 'O&', argument_converter = 'pyobj_to_numpy_bool', return_format = 'N', require_numpy = True, argument_title = 'a python bool (converting to C PyBoolScalarObject*)', return_title = 'a numpy bool', ), numpy_string = dict(\ ctype = 'PyStringScalarObject*', init = ' = NULL', argument_format = 'O&', argument_converter = 'pyobj_to_numpy_string', return_format = 'N', require_numpy = True, argument_title = 'a python string (converting to C PyStringScalarObject*)', return_title = 'a numpy string', ), numpy_unicode = dict(\ ctype = 'PyUnicodeScalarObject*', init = ' = NULL', argument_format = 'O&', argument_converter = 'pyobj_to_numpy_unicode', return_format = 'N', require_numpy = True, argument_title = 'a python string (converting to C PyUnicodeScalarObject*)', return_title = 'a numpy unicode', ), npy_string = dict(\ typedef = 'npy_string', ctype = 'npy_string', init = ' = {NULL, 0}', argument_format = 'O&', argument_converter = 'pyobj_to_npy_string', clean_argument_converter = 'clean_pyobj_to_npy_string', return_format = 'O&', return_converter = 'pyobj_from_npy_string', require_numpy = True, argument_title = 'a python string (converting to C npy_string)', return_title = 'a numpy string', ), npy_unicode = dict(\ typedef = 'npy_unicode', ctype = 'npy_unicode', init = ' = {NULL, 0}', argument_format = 'O&', argument_converter = 'pyobj_to_npy_unicode', clean_argument_converter = 'clean_pyobj_to_npy_unicode', return_format = 'O&', return_converter = 'pyobj_from_npy_unicode', require_numpy = True, argument_title = 'a python string (converting to C npy_unicode)', return_title = 'a numpy unicode', ), numpy_void = dict(\ ctype = 'PyVoidScalarObject*', init = ' = NULL', argument_format = 'O&', argument_converter = 'pyobj_to_numpy_void', return_format = 'N', require_numpy = True, argument_title = 'a python string (converting to C PyVoidScalarObject*)', return_title = 'a numpy void', ), ) scalar['c_PY_LONG_LONG'] = scalar['c_long_long'] scalar['c_unsigned_PY_LONG_LONG'] = scalar['c_unsigned_long_long'] scalar['numpy_bool_'] = scalar['numpy_bool'] scalar['numpy_str_'] = scalar['numpy_str'] = scalar['numpy_string0'] \ = scalar['numpy_string_'] = scalar['numpy_string'] scalar['numpy_unicode0'] = scalar['numpy_unicode_'] = scalar['numpy_unicode'] scalar['npy_str'] = scalar['npy_string'] scalar['numpy_void0'] = scalar['numpy_void'] for Cls_name, bits_list in list(sctypebits.items()): if Cls_name=='Complex': init = ' = {0.0, 0.0}' t = 'complex' elif Cls_name=='Float': init = ' = 0.0' t = 'floating point number' else: init = ' = 0' t = 'integer' for bits in bits_list: n = Cls_name.lower() + str(bits) Cls = Cls_name + str(bits) ctype = 'npy_' + n scalar[ctype] = dict( ctype = ctype, pycype = None, init = init, argument_format = 'O&', argument_converter = 'pyobj_to_'+ctype, return_format = 'O&', return_converter = 'pyobj_from_'+ctype, require_numpy = True, argument_title = 'a python %s (converting to C %s)' % (t,ctype), return_title = 'a numpy %s-bit %s' % (bits, t) ) ctype = 'Py%sScalarObject*' % (Cls) ctype_name = 'numpy_' + n scalar[ctype_name] = dict( ctype = ctype, pyctype = None, init = ' = NULL', argument_format = 'O&', argument_converter = 'pyobj_to_'+ctype_name, return_format = 'N', require_numpy = True, argument_title = 'a python %s (converting to C %s)' % (t,ctype), return_title = 'a numpy %s-bit %s' % (bits, t) ) scalar['npy_intp'] = scalar['npy_'+numpy.intp.__name__] scalar['npy_int'] = scalar['npy_'+numpy.int_.__name__] scalar['npy_float'] = scalar['npy_'+numpy.float_.__name__] scalar['npy_complex'] = scalar['npy_'+numpy.complex_.__name__] array = dict( numpy_int64 = dict(\ typenum = 'PyArray_INT64', ctype = 'PyArrayObject*', init = ' = NULL', title = 'a numpy array of 64-bit integers', argument_format = 'O&', argument_converter = 'pyobj_to_numpy_array_int64', return_format = 'N', require_numpy = True, ), c_int = dict(\ ctype='int*', init=' = NULL', title='a C int array', input_title = 'a python integer sequence (converting to C int*)', input_format = 'O', input_object = '&%(varname)s_py', input_frompyobj = dict(\ required = '%(varname)s_arr = PyArray_FROMANY(%(varname)s_py, NPY_INT, %(rank)s, %(rank)s, %(requirements)s);\n' 'if (%(varname)s_arr != NULL) {\n' ' %(varname)s = PyArray_DATA(%(varname)s_arr);', ), input_cleanfrompyobj = dict(\ required = '} /*if (%(varname)s_arr != NULL)*/' ), output_title = 'a python integer sequence (converting from C int*)', output_format = 'N', output_object = '%(varname)s_arr' ), numpy_int8 = dict(\ ctype='npy_int8*', init=' = NULL', title='a C npy_int8 array' ) )
{"hexsha": "150da86ef034e4faed2c5db74d15ca68d0973d29", "size": 17058, "ext": "py", "lang": "Python", "max_stars_repo_path": "extgen/type_rules.py", "max_stars_repo_name": "maedoc/inducer-f2py", "max_stars_repo_head_hexsha": "fce51e6603f7474632d452437dbb8b194d6c879d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 57, "max_stars_repo_stars_event_min_datetime": "2015-10-20T15:58:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T08:52:04.000Z", "max_issues_repo_path": "extgen/type_rules.py", "max_issues_repo_name": "maedoc/inducer-f2py", "max_issues_repo_head_hexsha": "fce51e6603f7474632d452437dbb8b194d6c879d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2016-03-14T16:02:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T11:15:50.000Z", "max_forks_repo_path": "extgen/type_rules.py", "max_forks_repo_name": "maedoc/inducer-f2py", "max_forks_repo_head_hexsha": "fce51e6603f7474632d452437dbb8b194d6c879d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2015-12-30T15:41:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T17:14:42.000Z", "avg_line_length": 27.7817589577, "max_line_length": 118, "alphanum_fraction": 0.6014186892, "include": true, "reason": "import numpy", "num_tokens": 4654}
''' Pedagogical example realization of seq2seq recurrent neural networks, using TensorFlow and TFLearn. ''' from __future__ import division, print_function import os import sys import tflearn import argparse import json import numpy as np import tensorflow as tf from pattern import SequencePattern #from tensorflow.python.ops import seq2seq #import tensorflow.contrib.seq2seq as seq2seq import tensorflow.contrib.legacy_seq2seq as seq2seq from tensorflow.python.ops import rnn_cell #----------------------------------------------------------------------------- class TFLearnSeq2Seq(object): ''' seq2seq recurrent neural network, implemented using TFLearn. ''' AVAILABLE_MODELS = ["embedding_rnn", "embedding_attention"] def __init__(self, sequence_pattern, seq2seq_model=None, verbose=None, name=None, data_dir=None): ''' sequence_pattern_class = a SequencePattern class instance, which defines pattern parameters (input, output lengths, name, generating function) seq2seq_model = string specifying which seq2seq model to use, e.g. "embedding_rnn" ''' self.sequence_pattern = sequence_pattern self.seq2seq_model = seq2seq_model or "embedding_rnn" assert self.seq2seq_model in self.AVAILABLE_MODELS self.in_seq_len = self.sequence_pattern.INPUT_SEQUENCE_LENGTH self.out_seq_len = self.sequence_pattern.OUTPUT_SEQUENCE_LENGTH self.in_max_int = self.sequence_pattern.INPUT_MAX_INT self.out_max_int = self.sequence_pattern.OUTPUT_MAX_INT self.verbose = verbose or 0 self.n_input_symbols = self.in_max_int + 1 self.n_output_symbols = self.out_max_int + 2 # extra one for GO symbol self.model_instance = None self.name = name self.data_dir = data_dir def generate_trainig_data(self, num_points): ''' Generate training dataset. Produce random (integer) sequences X, and corresponding expected output sequences Y = generate_output_sequence(X). Return xy_data, y_data (both of type uint32) xy_data = numpy array of shape [num_points, in_seq_len + out_seq_len], with each point being X + Y y_data = numpy array of shape [num_points, out_seq_len] ''' # all_x = np.load("input_histograms.npy") # all_y = np.load("output_histograms.npy") # print(all_y) # print(all_y.shape) # x_data = all_x[0:num_points, 0:8].astype(np.uint32) # #y_data = all_y[0:num_points, 0:8].astype(np.uint32) # y_data = x_data + 100 # #print(y_data) # #y_data = np.ones(x_data.shape).astype(np.uint32) # #print(y_data) # print(y_data.shape) # x_data = np.random.randint(0, self.in_max_int, size=(num_points, self.in_seq_len)) # shape [num_points, in_seq_len] # #x_data = x_data.astype(np.uint32) # ensure integer type # y_data = [ self.sequence_pattern.generate_output_sequence(x) for x in x_data ] # print(y_data) # y_data = np.array(y_data) # xy_data = np.append(x_data, y_data, axis=1) # shape [num_points, 2*seq_len] # print(x_data) # print(y_data) # print(xy_data) # print(x_data.shape) # #print(y_data.shape) # print(xy_data.shape) # print(x_data.dtype) # #print(y_data.dtype) # print(xy_data.dtype) # print(type(x_data)) # print(type(y_data)) # print(type(xy_data)) # return xy_data, y_data # all_x = np.load("input_histograms.npy").astype(np.uint32) # all_y = np.load("output_histograms.npy").astype(np.uint32) # x_data = np.random.randint(0, self.in_max_int, size=(num_points, self.in_seq_len)) # shape [num_points, in_seq_len] # x_data = x_data.astype(np.uint32) # ensure integer type # y_data = [ self.sequence_pattern.generate_output_sequence(x) for x in x_data ] # y_data = np.array(y_data) # print(x_data) # print("in x_data") # print(y_data) # print(type(x_data), x_data.shape) # print(type(y_data), y_data.shape) # print(y_data.shape) # print("ghesmate aval=============") # shape1 , shape2 = y_data.shape # print(x_data.shape) # for i in range(shape1): # for j in range(shape2): # #print(x_data[i,j]) # x_data[i,j] = all_x[i,j] # y_data[i,j] = all_y[i,j] # print("ghesmate dovom=============") # print(x_data) # print(y_data) # print(type(x_data), x_data.shape) # print(type(y_data), y_data.shape) # y_data = [ self.sequence_pattern.generate_output_sequence(x) for x in x_data ] x_data = np.random.randint(0, self.in_max_int, size=(num_points, self.in_seq_len)) # shape [num_points, in_seq_len] x_data = x_data.astype(np.uint32) # ensure integer type y_data = [ self.sequence_pattern.generate_output_sequence(x) for x in x_data ] y_data = np.array(y_data) xy_data = np.append(x_data, y_data, axis=1) # shape [num_points, 2*seq_len] return xy_data, y_data def sequence_loss(self, y_pred, y_true): ''' Loss function for the seq2seq RNN. Reshape predicted and true (label) tensors, generate dummy weights, then use seq2seq.sequence_loss to actually compute the loss function. ''' if self.verbose > 2: print ("my_sequence_loss y_pred=%s, y_true=%s" % (y_pred, y_true)) logits = tf.unstack(y_pred, axis=1) # list of [-1, num_decoder_synbols] elements targets = tf.unstack(y_true, axis=1) # y_true has shape [-1, self.out_seq_len]; unpack to list of self.out_seq_len [-1] elements if self.verbose > 2: print ("my_sequence_loss logits=%s" % (logits,)) print ("my_sequence_loss targets=%s" % (targets,)) weights = [tf.ones_like(yp, dtype=tf.float32) for yp in targets] if self.verbose > 4: print ("my_sequence_loss weights=%s" % (weights,)) sl = seq2seq.sequence_loss(logits, targets, weights) if self.verbose > 2: print ("my_sequence_loss return = %s" % sl) return sl def accuracy(self, y_pred, y_true, x_in): # y_pred is [-1, self.out_seq_len, num_decoder_symbols]; y_true is [-1, self.out_seq_len] ''' Compute accuracy of the prediction, based on the true labels. Use the average number of equal values. ''' pred_idx = tf.to_int32(tf.argmax(y_pred, 2)) # [-1, self.out_seq_len] if self.verbose > 2: print ("my_accuracy pred_idx = %s" % pred_idx) accuracy = tf.reduce_mean(tf.cast(tf.equal(pred_idx, y_true), tf.float32), name='acc') return accuracy def model(self, mode="train", num_layers=1, cell_size=32, cell_type="BasicLSTMCell", embedding_size=20, learning_rate=0.0001, tensorboard_verbose=0, checkpoint_path=None): ''' Build tensor specifying graph of operations for the seq2seq neural network model. mode = string, either "train" or "predict" cell_type = attribute of rnn_cell specifying which RNN cell type to use cell_size = size for the hidden layer in the RNN cell num_layers = number of RNN cell layers to use Return TFLearn model instance. Use DNN model for this. ''' assert mode in ["train", "predict"] checkpoint_path = checkpoint_path or ("%s%ss2s_checkpoint.tfl" % (self.data_dir or "", "/" if self.data_dir else "")) GO_VALUE = self.out_max_int + 1 # unique integer value used to trigger decoder outputs in the seq2seq RNN network = tflearn.input_data(shape=[None, self.in_seq_len + self.out_seq_len], dtype=tf.int32, name="XY") encoder_inputs = tf.slice(network, [0, 0], [-1, self.in_seq_len], name="enc_in") # get encoder inputs encoder_inputs = tf.unstack(encoder_inputs, axis=1) # transform into list of self.in_seq_len elements, each [-1] decoder_inputs = tf.slice(network, [0, self.in_seq_len], [-1, self.out_seq_len], name="dec_in") # get decoder inputs decoder_inputs = tf.unstack(decoder_inputs, axis=1) # transform into list of self.out_seq_len elements, each [-1] go_input = tf.multiply( tf.ones_like(decoder_inputs[0], dtype=tf.int32), GO_VALUE ) # insert "GO" symbol as the first decoder input; drop the last decoder input decoder_inputs = [go_input] + decoder_inputs[: self.out_seq_len-1] # insert GO as first; drop last decoder input feed_previous = not (mode=="train") if self.verbose > 3: print ("feed_previous = %s" % str(feed_previous)) print ("encoder inputs: %s" % str(encoder_inputs)) print ("decoder inputs: %s" % str(decoder_inputs)) print ("len decoder inputs: %s" % len(decoder_inputs)) self.n_input_symbols = self.in_max_int + 1 # default is integers from 0 to 9 self.n_output_symbols = self.out_max_int + 2 # extra "GO" symbol for decoder inputs single_cell = getattr(rnn_cell, cell_type)(cell_size, state_is_tuple=True) if num_layers==1: cell = single_cell else: cell = rnn_cell.MultiRNNCell([single_cell] * num_layers) if self.seq2seq_model=="embedding_rnn": model_outputs, states = seq2seq.embedding_rnn_seq2seq(encoder_inputs, # encoder_inputs: A list of 2D Tensors [batch_size, input_size]. decoder_inputs, cell, num_encoder_symbols=self.n_input_symbols, num_decoder_symbols=self.n_output_symbols, embedding_size=embedding_size, feed_previous=feed_previous) elif self.seq2seq_model=="embedding_attention": model_outputs, states = seq2seq.embedding_attention_seq2seq(encoder_inputs, # encoder_inputs: A list of 2D Tensors [batch_size, input_size]. decoder_inputs, cell, num_encoder_symbols=self.n_input_symbols, num_decoder_symbols=self.n_output_symbols, embedding_size=embedding_size, num_heads=1, initial_state_attention=False, feed_previous=feed_previous) else: raise Exception('[TFLearnSeq2Seq] Unknown seq2seq model %s' % self.seq2seq_model) tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + "seq2seq_model", model_outputs) # for TFLearn to know what to save and restore # model_outputs: list of the same length as decoder_inputs of 2D Tensors with shape [batch_size x output_size] containing the generated outputs. if self.verbose > 2: print ("model outputs: %s" % model_outputs) network = tf.stack(model_outputs, axis=1) # shape [-1, n_decoder_inputs (= self.out_seq_len), num_decoder_symbols] if self.verbose > 2: print ("packed model outputs: %s" % network) if self.verbose > 3: all_vars = tf.get_collection(tf.GraphKeys.VARIABLES) print ("all_vars = %s" % all_vars) with tf.name_scope("TargetsData"): # placeholder for target variable (i.e. trainY input) targetY = tf.placeholder(shape=[None, self.out_seq_len], dtype=tf.int32, name="Y") network = tflearn.regression(network, placeholder=targetY, optimizer='adam', learning_rate=learning_rate, loss=self.sequence_loss, metric=self.accuracy, name="Y") model = tflearn.DNN(network, tensorboard_verbose=tensorboard_verbose, checkpoint_path=checkpoint_path) return model def train(self, num_epochs=20, num_points=10, model=None, model_params=None, weights_input_fn=None, validation_set=0.1, snapshot_step=5000, batch_size=32, weights_output_fn=None): ''' Train model, with specified number of epochs, and dataset size. Use specified model, or create one if not provided. Load initial weights from file weights_input_fn, if provided. validation_set specifies what to use for the validation. Returns logits for prediction, as an numpy array of shape [out_seq_len, n_output_symbols]. ''' trainXY, trainY = self.generate_trainig_data(num_points) print ("[TFLearnSeq2Seq] Training on %d point dataset , with %d epochs" % (num_points, num_epochs)) print (" model parameters: %s" % json.dumps(model_params, indent=4)) model_params = model_params or {} model = model or self.setup_model("train", model_params, weights_input_fn) model.fit(trainXY, trainY, n_epoch=num_epochs, validation_set=validation_set, batch_size=batch_size, shuffle=True, show_metric=True, snapshot_step=snapshot_step, snapshot_epoch=False, run_id="TFLearnSeq2Seq" ) print ("Done!") if weights_output_fn is not None: weights_output_fn = self.canonical_weights_fn(weights_output_fn) model.save(weights_output_fn) print ("Saved %s" % weights_output_fn) self.weights_output_fn = weights_output_fn return model def canonical_weights_fn(self, iteration_num=0): ''' Construct canonical weights filename, based on model and pattern names. ''' if not type(iteration_num)==int: try: iteration_num = int(iteration_num) except Exception as err: return iteration_num model_name = self.name or "basic" wfn = "ts2s__%s__%s_%s.tfl" % (model_name, self.sequence_pattern.PATTERN_NAME, iteration_num) if self.data_dir: wfn = "%s/%s" % (self.data_dir, wfn) self.weights_filename = wfn return wfn def setup_model(self, mode, model_params=None, weights_input_fn=None): ''' Setup a model instance, using the specified mode and model parameters. Load the weights from the specified file, if it exists. If weights_input_fn is an integer, use that the model name, and the pattern name, to construct a canonical filename. ''' model_params = model_params or {} model = self.model_instance or self.model(mode=mode, **model_params) self.model_instance = model if weights_input_fn is not None: model.load(weights_input_fn) print(model) #if weights_input_fn: #if type(weights_input_fn)==int: #weights_input_fn = self.canonical_weights_fn(weights_input_fn) #if os.path.exists(weights_input_fn): #model.load(weights_input_fn) #print ("[TFLearnSeq2Seq] model weights loaded from %s" % weights_input_fn) #else: #print(weights_input_fn) #print ("[TFLearnSeq2Seq] MISSING model weights file %s" % weights_input_fn) return model def predict(self, Xin, model=None, model_params=None, weights_input_fn=None): ''' Make a prediction, using the seq2seq model, for the given input sequence Xin. If model is not provided, create one (or use last created instance). Return prediction, y prediction = array of integers, giving output prediction. Length = out_seq_len y = array of shape [out_seq_len, out_max_int], giving logits for output prediction ''' if not model: model = self.model_instance or self.setup_model("predict", model_params, weights_input_fn) if self.verbose: print ("Xin = %s" % str(Xin)) X = np.array(Xin).astype(np.uint32) print("X", X, type(X)) print(len(X)) assert len(X)==self.in_seq_len if self.verbose: print ("X Input shape=%s, data=%s" % (X.shape, X)) print ("Expected output = %s" % str(self.sequence_pattern.generate_output_sequence(X))) Yin = [0]*self.out_seq_len XY = np.append(X, np.array(Yin).astype(np.float32)) XY = XY.reshape([-1, self.in_seq_len + self.out_seq_len]) # batch size 1 if self.verbose > 1: print ("XY Input shape=%s, data=%s" % (XY.shape, XY)) res = model.predict(XY) res = np.array(res) if self.verbose > 1: print ("prediction shape = %s" % str(res.shape)) y = res.reshape(self.out_seq_len, self.n_output_symbols) prediction = np.argmax(y, axis=1) if self.verbose: print ("Predicted output sequence: %s" % str(prediction)) return prediction, y #----------------------------------------------------------------------------- class VAction(argparse.Action): def __call__(self, parser, args, values, option_string=None): curval = getattr(args, self.dest, 0) or 0 values=values.count('v')+1 setattr(args, self.dest, values + curval) #----------------------------------------------------------------------------- def CommandLine(args=None, arglist=None): ''' Main command line. Accepts args, to allow for simple unit testing. ''' help_text = """ Commands: train - give size of training set to use, as argument predict - give input sequence as argument (or specify inputs via --from-file <filename>) """ parser = argparse.ArgumentParser(description=help_text, formatter_class=argparse.RawTextHelpFormatter) #parser.add_argument("cmd", help="command") #parser.add_argument("cmd_input", nargs='*', help="input to command") #parser.add_argument('-v', "--verbose", nargs=0, help="increase output verbosity (add more -v to increase versbosity)", action=VAction, dest='verbose') #parser.add_argument("-m", "--model", help="seq2seq model name: either embedding_rnn (default) or embedding_attention", default=None) #parser.add_argument("-r", "--learning-rate", type=float, help="learning rate (default 0.0001)", default=0.0001) #parser.add_argument("-e", "--epochs", type=int, help="number of trainig epochs", default=10) #parser.add_argument("-i", "--input-weights", type=str, help="tflearn file with network weights to load", default=None) #parser.add_argument("-o", "--output-weights", type=str, help="new tflearn file where network weights are to be saved", default=None) #parser.add_argument("-p", "--pattern-name", type=str, help="name of pattern to use for sequence", default=None) #parser.add_argument("-n", "--name", type=str, help="name of model, used when generating default weights filenames", default=None) #parser.add_argument("--in-len", type=int, help="input sequence length (default 10)", default=None) #parser.add_argument("--out-len", type=int, help="output sequence length (default 10)", default=None) #parser.add_argument("--from-file", type=str, help="name of file to take input data sequences from (json format)", default=None) parser.add_argument("--iter-num", type=int, help="training iteration number; specify instead of input- or output-weights to use generated filenames", default=None) #parser.add_argument("--data-dir", help="directory to use for storing checkpoints (also used when generating default weights filenames)", default=None) # model parameters #parser.add_argument("-L", "--num-layers", type=int, help="number of RNN layers to use in the model (default 1)", default=1) #parser.add_argument("--cell-size", type=int, help="size of RNN cell to use (default 32)", default=32) #parser.add_argument("--cell-type", type=str, help="type of RNN cell to use (default BasicLSTMCell)", default="BasicLSTMCell") #parser.add_argument("--embedding-size", type=int, help="size of embedding to use (default 20)", default=20) #parser.add_argument("--tensorboard-verbose", type=int, help="tensorboard verbosity level (default 0)", default=0) if not args: args = parser.parse_args(arglist) p_num_layers = 1 p_cell_size = 32 p_cell_type = 'BasicLSTMCell' p_embedding_size = 32 p_learning_rate = 0.0001 operation = "train" p_train_data_size = 10000 p_pattern_name = "sorted" p_in_len = 32 p_out_len = 32 p_model = "embedding_rnn" p_data_dir = "models" p_name = "test1" p_epochs = 50 #p_input_weights = "/share/users/bsamadi/seq2seq/tflearn_seq2seq/sort_32_orig_input" p_input_weights = None #p_ouput_weights = "test_hame_yek" #p_ouput_weights = "sort_256" #p_ouput_weights = "sort_256_orig_input" #p_ouput_weights = "" p_ouput_weights = "orig_32" #p_ouput_weights = "try_on_hist" #p_ouput_weights = None A = np.load("input_histograms_32.npy").astype(np.uint32) B = np.load("output_histograms_32.npy").astype(np.uint32) max_input = np.max(A) max_output = np.max(A) #max_input = 3000 #max_output = 3000 if args.iter_num is not None: args.input_weights = args.iter_num args.output_weights = args.iter_num + 1 model_params = dict(num_layers=p_num_layers, cell_size=p_cell_size, cell_type=p_cell_type, embedding_size=p_embedding_size, learning_rate=p_learning_rate, ) if operation=="train": num_points = p_train_data_size sp = SequencePattern(p_pattern_name, in_seq_len=p_in_len, out_seq_len=p_out_len, max_input = max_input, max_output =max_output) ts2s = TFLearnSeq2Seq(sp, seq2seq_model=p_model, data_dir=p_data_dir, name=p_name) ts2s.train(num_epochs=p_epochs, num_points=num_points, weights_output_fn=p_ouput_weights, weights_input_fn=p_input_weights, model_params=model_params, batch_size = 8) return ts2s elif operation=="predict": A = np.load("input_histograms_32.npy").astype(np.uint32) A = A[0:1, :] A = np.array(A) print(A) inputs = A print(inputs) #if args.from_file: #inputs = json.loads(args.from_file) #try: #input_x = list(map(int, args.cmd_input)) #inputs = [input_x] #except: #raise Exception("Please provide a space-delimited input sequence as the argument") sp = SequencePattern(p_pattern_name, in_seq_len=p_in_len, out_seq_len=p_out_len, max_input = max_input, max_output =max_output) ts2s = TFLearnSeq2Seq(sp, seq2seq_model=p_model, data_dir=p_data_dir, name=p_name) results = [] print("inputs", inputs, A) for x in inputs: prediction, y = ts2s.predict(x, weights_input_fn=p_input_weights, model_params=model_params) #print("==> For input %s, prediction=%s (expected=%s)" % (x, prediction, sp.generate_output_sequence(x))) results.append([prediction]) print(results) exit() ts2s.prediction_results = results return ts2s else: print("Unknown command %s" % args.cmd) #----------------------------------------------------------------------------- # unit tests # def test_sp1(): # ''' # Test two different SequencePattern instances # ''' # sp = SequencePattern("maxmin_dup") # y = sp.generate_output_sequence(range(10)) # assert all(y==np.array([9, 0, 2, 3, 4, 5, 6, 7, 8, 9])) # sp = SequencePattern("sorted") # y = sp.generate_output_sequence([5,6,1,2,9]) # assert all(y==np.array([1, 2, 5, 6, 9])) # sp = SequencePattern("reversed") # y = sp.generate_output_sequence(range(10)) # assert all(y==np.array([9, 8, 7, 6, 5, 4, 3, 2, 1, 0])) # def test_sp2(): # ''' # Test two SequencePattern instance with lengths different from default # ''' # sp = SequencePattern("sorted", in_seq_len=20, out_seq_len=5) # x = np.random.randint(0, 9, 20) # y = sp.generate_output_sequence(x) # assert len(y)==5 # y_exp = sorted(x)[:5] # assert all(y==y_exp) # def test_train1(): # ''' # Test simple training of an embedding_rnn seq2seq model # ''' # sp = SequencePattern() # ts2s = TFLearnSeq2Seq(sp) # ofn = "test_%s" % ts2s.canonical_weights_fn(0) # print ("using weights filename %s" % ofn) # if os.path.exists(ofn): # os.unlink(ofn) # tf.reset_default_graph() # ts2s.train(num_epochs=1, num_points= 10, weights_output_fn=ofn) # assert os.path.exists(ofn) # def test_predict1(): # ''' # Test simple preductions using weights just produced (in test_train1) # ''' # sp = SequencePattern() # ts2s = TFLearnSeq2Seq(sp, verbose=1) # wfn = "test_%s" % ts2s.canonical_weights_fn(0) # print ("using weights filename %s" % wfn) # tf.reset_default_graph() # prediction, y = ts2s.predict(Xin=range(10), weights_input_fn=wfn) # assert len(prediction==10) # def test_train_predict2(): # ''' # Test that the embedding_attention model works, with saving and loading of weights # ''' # import tempfile # sp = SequencePattern() # tempdir = tempfile.mkdtemp() # ts2s = TFLearnSeq2Seq(sp, seq2seq_model="embedding_attention", data_dir=tempdir, name="attention") # tf.reset_default_graph() # ts2s.train(num_epochs=1, num_points=10, weights_output_fn=1, weights_input_fn=0) # assert os.path.exists(ts2s.weights_output_fn) # tf.reset_default_graph() # ts2s = TFLearnSeq2Seq(sp, seq2seq_model="embedding_attention", data_dir="DATA", name="attention", verbose=1) # prediction, y = ts2s.predict(Xin=range(10), weights_input_fn=1) # assert len(prediction==10) # os.system("rm -rf %s" % tempdir) # def test_train_predict3(): # ''' # Test that a model trained on sequencees of one length can be used for predictions on other sequence lengths # ''' # import tempfile # sp = SequencePattern("sorted", in_seq_len=10, out_seq_len=10) # tempdir = tempfile.mkdtemp() # ts2s = TFLearnSeq2Seq(sp, seq2seq_model="embedding_attention", data_dir=tempdir, name="attention") # tf.reset_default_graph() # ts2s.train(num_epochs=1, num_points=10, weights_output_fn=1, weights_input_fn=0) # assert os.path.exists(ts2s.weights_output_fn) # tf.reset_default_graph() # sp = SequencePattern("sorted", in_seq_len=20, out_seq_len=8) # tf.reset_default_graph() # ts2s = TFLearnSeq2Seq(sp, seq2seq_model="embedding_attention", data_dir="DATA", name="attention", verbose=1) # x = np.random.randint(0, 9, 20) # prediction, y = ts2s.predict(x, weights_input_fn=1) # assert len(prediction==8) # os.system("rm -rf %s" % tempdir) # def test_main1(): # ''' # Integration test - training # ''' # import tempfile # tempdir = tempfile.mkdtemp() # arglist = "--data-dir %s -e 2 --iter-num=1 -v -v --tensorboard-verbose=1 train 5000" % tempdir # arglist = arglist.split(' ') # tf.reset_default_graph() # ts2s = CommandLine(arglist=arglist) # assert os.path.exists(ts2s.weights_output_fn) # os.system("rm -rf %s" % tempdir) # def test_main2(): # ''' # Integration test - training then prediction # ''' # import tempfile # tempdir = tempfile.mkdtemp() # arglist = "--data-dir %s -e 2 --iter-num=1 -v -v --tensorboard-verbose=1 train 5000" % tempdir # arglist = arglist.split(' ') # tf.reset_default_graph() # ts2s = CommandLine(arglist=arglist) # wfn = ts2s.weights_output_fn # assert os.path.exists(wfn) # arglist = "-i %s predict 1 2 3 4 5 6 7 8 9 0" % wfn # arglist = arglist.split(' ') # tf.reset_default_graph() # ts2s = CommandLine(arglist=arglist) # assert len(ts2s.prediction_results[0][0])==10 # os.system("rm -rf %s" % tempdir) # def test_main3(): # ''' # Integration test - training then prediction: attention model # ''' # import tempfile # wfn = "tmp_weights.tfl" # if os.path.exists(wfn): # os.unlink(wfn) # arglist = "-e 2 -o tmp_weights.tfl -v -v -v -v -m embedding_attention train 5000" # arglist = arglist.split(' ') # tf.reset_default_graph() # ts2s = CommandLine(arglist=arglist) # assert os.path.exists(wfn) # arglist = "-i tmp_weights.tfl -v -v -v -v -m embedding_attention predict 1 2 3 4 5 6 7 8 9 0" # arglist = arglist.split(' ') # tf.reset_default_graph() # ts2s = CommandLine(arglist=arglist) # assert len(ts2s.prediction_results[0][0])==10 #----------------------------------------------------------------------------- if __name__=="__main__": CommandLine()
{"hexsha": "8aa11720bd00e1c37be4b550199ced1940f06552", "size": 30016, "ext": "py", "lang": "Python", "max_stars_repo_path": "tflearn_seq2seq.py", "max_stars_repo_name": "behnam-samadi/tflearn_seq2seq", "max_stars_repo_head_hexsha": "88517d2a9d27b9cf053b0e91346a2e27344e1ba9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tflearn_seq2seq.py", "max_issues_repo_name": "behnam-samadi/tflearn_seq2seq", "max_issues_repo_head_hexsha": "88517d2a9d27b9cf053b0e91346a2e27344e1ba9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tflearn_seq2seq.py", "max_forks_repo_name": "behnam-samadi/tflearn_seq2seq", "max_forks_repo_head_hexsha": "88517d2a9d27b9cf053b0e91346a2e27344e1ba9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.9472913616, "max_line_length": 168, "alphanum_fraction": 0.6098747335, "include": true, "reason": "import numpy", "num_tokens": 7229}
import math import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec # The following import configures Matplotlib for 3D plotting. from mpl_toolkits.mplot3d import Axes3D from scipy.special import sph_harm, genlaguerre, factorial, lpmv plt.rc('text', usetex=True) a0 = 0.5292 points = 400 def L(l, m, rho): sum = 0 for k in range(0, n - l): arr = [ (-1.0) ** (k + 1), factorial(n + l) ** 2.0, factorial(n - l - 1 - k), factorial(2 * l + 1 + k), factorial(k), rho ** k, ] sum += arr[0]*arr[1]/(arr[2]*arr[3]*arr[4])*arr[5] return sum def R(r, n, l): rho = 2*r/(n*a0) sum = L(l, m, rho) arr = [ (2.0 / (n * a0)) ** 3.0, factorial(n - l - 1), 2.0 * n, factorial(n + l) ** 3.0, ] normTermSquared = arr[0]*arr[1]/(arr[2]*arr[3]) radialTerm = np.exp(-rho / 2.0) * (rho ** l) return (sum * radialTerm) ** 2 * normTermSquared def angDF(l, m, theta, phi): absm = np.abs(m) normRoot = factorial(l - absm)/factorial(l + absm) normRoot = normRoot * (2.0 * l + 1.0)/(4.0 * np.pi) legendre = lpmv(absm, l, np.cos(theta)) return normRoot * legendre ** 2.0 * np.exp(m * 2.0j * phi) def plot_Y(fig, ax, n, l, m, scale): """Plot the spherical harmonic of degree el and order m on Axes ax.""" r0 = scale x = np.linspace(-r0, r0, points) y = np.linspace(r0, -r0, points) WF = np.zeros((points, points)) s = 0 for dx in range(points): for dy in range(points): kx = x[dx] ky = y[dy] phi = math.atan2(ky, kx) theta = math.atan2(np.hypot(kx, ky), 0) r = np.hypot(kx, ky) # theta e phi ao contrário # Y = angDF(l, m, phi, theta) Y = sph_harm(m, l, theta, phi) # if m < 0: # Y = np.sqrt(2) * (-1)**m * Y.imag # elif m > 0: # Y = np.sqrt(2) * (-1)**m * Y.real # k = np.abs(R(r, n, l) * Y)**2 # wf = Y * R(r, n, l) # R(r, n, l) * Y ** 2 wf = Y ** 2 * R(r, n, l) prob = np.abs(wf) s += prob WF.itemset((dx, dy), prob) # WF[dx, dy] = np.abs(R(r, n, l) * Y)**2 # WF = np.log10(WF + 1) # cmap.set_clim(0, .5) # plot R """ kx = np.linspace(0, r0, 100) ky = list(map(lambda r: R(r, n, l), kx)) ax.plot(kx, ky) ax.set_xticks(np.linspace(0, r0, 6)) ax.set_xticklabels(range(0, 6)) """ # plot bla min = np.min(WF) max = np.max(WF) print('min: {}, max: {}, sum: {}'.format(min, max, s)) # plt.grid(True) im = ax.imshow(WF, cmap='binary') ax.tick_params(direction='in') ax.set_xticks(np.linspace(0, points, 5)) ax.set_yticks(np.linspace(0, points, 5)) ax.set_xticklabels(map(lambda x: r'$\mathbf{{{}}}$'.format(int(x)), np.linspace(-scale, scale, 5))) ax.set_yticklabels(map(lambda x: r'$\mathbf{{{}}}$'.format(int(x)), np.linspace(scale, -scale, 5))) ax.set_xlabel(r'$\bm{x \left(\mathrm{\AA}\right)}$') ax.set_ylabel(r'$\bm{y \left(\mathrm{\AA}\right)}$') ax.set_title(r'$\bm{{({}, {}, {})}}$'.format(n, l, m)) plt.setp(ax.spines.values(), linewidth=2) # fig.colorbar(im, ax=ax) # Colour the plotted surface according to the sign of Y. def plotblablabla(n, l, m, scale): fig = plt.figure(figsize=(2.5, 2.5), dpi=400) plt.rc('text.latex', preamble=r'\usepackage{bm}') fig.tight_layout(pad=0.0) ax = plt.axes() plot_Y(fig, ax, n, l, m, scale) plt.savefig('Y_{}_{}_{}.png'.format(n, l, m), transparent=False, bbox_inches='tight') plt.close() bla = False n, l, m = (10, 8, 2) plotblablabla(10, 8, 2, 150) if bla: for n in range(1, 7): for l in range(0, n): for m in range(-l, l + 1): print('n: {}, l: {}, m: {}'.format(n, l, m)) plotblablabla(n, l, m, 20)
{"hexsha": "18994a126cf727df65a14b7bc9737034fe5bacb0", "size": 4027, "ext": "py", "lang": "Python", "max_stars_repo_path": "mode2.py", "max_stars_repo_name": "ViniciusGiroto/hydrogen_orbitals", "max_stars_repo_head_hexsha": "354ce1cd04e4b981224b95eb3e1f57d8ddf02cc6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mode2.py", "max_issues_repo_name": "ViniciusGiroto/hydrogen_orbitals", "max_issues_repo_head_hexsha": "354ce1cd04e4b981224b95eb3e1f57d8ddf02cc6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mode2.py", "max_forks_repo_name": "ViniciusGiroto/hydrogen_orbitals", "max_forks_repo_head_hexsha": "354ce1cd04e4b981224b95eb3e1f57d8ddf02cc6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5602836879, "max_line_length": 103, "alphanum_fraction": 0.5073255525, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1389}
program test_blas ! use magma use omp_lib implicit none INTEGER,PARAMETER::Ns(3)=(/5000,10000,15000/) INTEGER,PARAMETER::M=1000 COMPLEX,ALLOCATABLE :: A(:,:),B(:,:),z(:,:) REAL,ALLOCATABLE :: R(:,:,:),eig(:) COMPLEX,ALLOCATABLE :: WORK(:) REAL,ALLOCATABLE ::rwork(:) INTEGER,ALLOCATABLE ::iwork(:),ifail(:) INTEGER :: i,j,lwork,lrwork,liwork,ne,info,nn,n REAL :: time1,time2,wtemp(1) DO nn=1,size(Ns) N=Ns(nn) time1=omp_get_wtime() ALLOCATE (eig(N),ifail(N)) Allocate(R(N,N,4),A(N,N),b(N,N),z(N,N)) CALL random_number(r) A=cmplx(R(:,:,1),R(:,:,2)) call zherk("U","N",N,N,1.0,cmplx(R(:,:,3),R(:,:,4)),N,0.0,B,N) DO i=1,N DO j=1,i A(i,j)=conjg(A(j,i)) B(i,j)=conjg(B(j,i)) ENDDO A(i,i)=real(A(i,i)) ENDDO time2=omp_get_wtime() print *, "Init:",time2-time1 time1=omp_get_wtime() allocate(iwork(5*N),rwork(7*N)) CALL zhegvx (1, "V", "I", "U", N, A, N, B, N, 0.0, 0.0, 1, M, 1E-8, ne, eig, Z, N, Wtemp, -1, RWORK, IWORK, IFAIL, INFO) lwork=wtemp(1) print *,"lwork:",lwork allocate(work(lwork)) CALL zhegvx (1, "V", "I", "U", N, A, N, B, N, 0.0, 0.0, 1, M, 1E-8, ne, eig, Z, N, WORK, LWORK, RWORK, IWORK, IFAIL, INFO) time2=omp_get_wtime() print *, N,"MKL:",time2-time1 deallocate(work,iwork,rwork,A,B,z,r,eig,ifail) enddo end
{"hexsha": "bc36e6649a001e01c122b0d0fcd8cecbbd69cb03", "size": 1351, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "tests/small-testcodes/mkl_performance.f90", "max_stars_repo_name": "MRedies/FLEUR", "max_stars_repo_head_hexsha": "84234831c55459a7539e78600e764ff4ca2ec4b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/small-testcodes/mkl_performance.f90", "max_issues_repo_name": "MRedies/FLEUR", "max_issues_repo_head_hexsha": "84234831c55459a7539e78600e764ff4ca2ec4b6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/small-testcodes/mkl_performance.f90", "max_forks_repo_name": "MRedies/FLEUR", "max_forks_repo_head_hexsha": "84234831c55459a7539e78600e764ff4ca2ec4b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5714285714, "max_line_length": 124, "alphanum_fraction": 0.570688379, "num_tokens": 584}
from keras import Sequential from keras.layers import Dense import numpy as np from ABC.Agents import Agent, AgentFactory class NeuralAgent(Agent): """ Simple dense neural agent. """ def __init__(self, size_list, activation="tanh"): """"The shape of the layers are one dimensional and taken as a list, from input size to output size.""" assert(len(size_list) > 1) model = Sequential() for i in range(1, len(size_list)-1): model.add(Dense(size_list[i], input_shape=(size_list[i-1],))) model.add(Dense(size_list[len(size_list)-1], activation=activation)) model.build(input_shape=(size_list[0],)) self.model = model self.size_list = size_list self.opt_state = [] def randomize(self): wei = self.get_weights() wei = np.random.uniform(-1, 1, wei.shape) self.set_weights(wei) def get_weights(self): # Return weights as a flattened array wei = self.model.get_weights() res = np.array([]) for w in wei: res = np.hstack((res, w.flatten())) return res def set_weights(self, weights): # Set weights from a flattened array wei = self.model.get_weights() res = [] count = 0 for w in wei: res.append(np.array(weights[count:count + w.size]).reshape(w.shape)) count += w.size self.model.set_weights(res) def choose_action(self, state): state = np.array([state, ]) return self.model.predict(state)[0] def __str__(self): stringlist = [] self.model.summary(print_fn=lambda x: stringlist.append(x)) return "\n".join(stringlist) def get_opt_state(self): return self.opt_state def set_opt_state(self, state): self.opt_state = state def __getstate__(self): dic = dict() dic["as_vector"] = self.get_weights() dic["size_list"] = self.size_list return dic def __setstate__(self, state): self.__init__(state["size_list"]) self.set_weights(state["as_vector"]) class NeuralAgentFactory(AgentFactory): def __init__(self, size_list, activation="tanh"): assert (len(size_list) > 1) self.size_list = size_list self.activation = activation def new(self): return NeuralAgent(self.size_list, self.activation)
{"hexsha": "e5b2a19a9e87146293dca42fe1df0fbeb182b172", "size": 2415, "ext": "py", "lang": "Python", "max_stars_repo_path": "Objects/Agents/KerasAgent.py", "max_stars_repo_name": "MessireToaster/CoEvolution", "max_stars_repo_head_hexsha": "965050f0374bbe6f6d33b371c582a5485bd22410", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-07-09T16:28:21.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-29T08:07:19.000Z", "max_issues_repo_path": "Objects/Agents/KerasAgent.py", "max_issues_repo_name": "JeremyF-141592/CoEvolution", "max_issues_repo_head_hexsha": "965050f0374bbe6f6d33b371c582a5485bd22410", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Objects/Agents/KerasAgent.py", "max_forks_repo_name": "JeremyF-141592/CoEvolution", "max_forks_repo_head_hexsha": "965050f0374bbe6f6d33b371c582a5485bd22410", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1875, "max_line_length": 111, "alphanum_fraction": 0.6132505176, "include": true, "reason": "import numpy", "num_tokens": 555}
import numpy as np from tqdm import tqdm from keras_cv_attention_models.imagenet import data def eval(model, data_name="imagenet2012", input_shape=None, batch_size=64, central_fraction=1.0, mode='tf'): input_shape = model.input_shape[1:-1] if input_shape is None else input_shape _, test_dataset, _, _, _ = data.init_dataset(data_name, input_shape=input_shape, batch_size=batch_size, central_fraction=central_fraction, mode=mode) y_true, y_pred_top_1, y_pred_top_5 = [], [], [] for img_batch, true_labels in tqdm(test_dataset, "Evaluating", total=len(test_dataset)): predicts = model(img_batch).numpy() pred_args = predicts.argsort(-1) y_pred_top_1.extend(pred_args[:, -1]) y_pred_top_5.extend(pred_args[:, -5:]) y_true.extend(np.array(true_labels).argmax(-1)) y_true, y_pred_top_1, y_pred_top_5 = np.array(y_true), np.array(y_pred_top_1), np.array(y_pred_top_5) accuracy_1 = np.sum(y_true == y_pred_top_1) / y_true.shape[0] accuracy_5 = np.sum([ii in jj for ii, jj in zip(y_true, y_pred_top_5)]) / y_true.shape[0] print(">>>> Accuracy top1:", accuracy_1, "top5:", accuracy_5) return y_true, y_pred_top_1, y_pred_top_5
{"hexsha": "f397dbd468fe37c2265b771e458ae50e51374877", "size": 1197, "ext": "py", "lang": "Python", "max_stars_repo_path": "keras_cv_attention_models/imagenet/eval.py", "max_stars_repo_name": "awsaf49/keras_cv_attention_models", "max_stars_repo_head_hexsha": "242aaf02fd46f68d57f710b9e805afe96e3067e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "keras_cv_attention_models/imagenet/eval.py", "max_issues_repo_name": "awsaf49/keras_cv_attention_models", "max_issues_repo_head_hexsha": "242aaf02fd46f68d57f710b9e805afe96e3067e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "keras_cv_attention_models/imagenet/eval.py", "max_forks_repo_name": "awsaf49/keras_cv_attention_models", "max_forks_repo_head_hexsha": "242aaf02fd46f68d57f710b9e805afe96e3067e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 57.0, "max_line_length": 153, "alphanum_fraction": 0.7201336675, "include": true, "reason": "import numpy", "num_tokens": 338}
import argparse import numpy as np import open3d as o3d parser = argparse.ArgumentParser() parser.add_argument('--file', type=str, default='../carla_results/auto_pilot_v3_42/eval_routes_06_12_23_30_25/lidar_360/0000.npy', help='npy point cloud') def main(): pcd_npy = np.load(args.file) pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(pcd_npy[:,0:3]) print(np.asarray(pcd.points)) o3d.visualization.draw_geometries([pcd]) if __name__ == '__main__': global args args = parser.parse_args() main()
{"hexsha": "7c7d992e1e74f12b9e45686331dc8daf91c0d637", "size": 570, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/point_cloud_vis.py", "max_stars_repo_name": "sisl/neat", "max_stars_repo_head_hexsha": "42758d910f453686366eddfd1aed440e34c94828", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 183, "max_stars_repo_stars_event_min_datetime": "2021-08-18T13:22:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:40:48.000Z", "max_issues_repo_path": "tools/point_cloud_vis.py", "max_issues_repo_name": "sisl/neat", "max_issues_repo_head_hexsha": "42758d910f453686366eddfd1aed440e34c94828", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2021-09-24T15:30:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T11:19:23.000Z", "max_forks_repo_path": "tools/point_cloud_vis.py", "max_forks_repo_name": "sisl/neat", "max_forks_repo_head_hexsha": "42758d910f453686366eddfd1aed440e34c94828", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2021-09-11T13:32:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T16:55:53.000Z", "avg_line_length": 31.6666666667, "max_line_length": 155, "alphanum_fraction": 0.7035087719, "include": true, "reason": "import numpy", "num_tokens": 162}
using IterativeSolvers, KrylovKit, Arpack # In this file, we regroud a way to provide eigen solvers abstract type AbstractEigenSolver end abstract type AbstractMFEigenSolver <: AbstractEigenSolver end abstract type AbstractFloquetSolver <: AbstractEigenSolver end # the following function returns the n-th eigenvectors computed by an eigen solver. This function is necessary given the different return types each eigensolver has geteigenvector(eigsolve::ES, vecs, n::Int) where {ES <: AbstractEigenSolver} = vecs[:, n] geteigenvector(eigsolve::ES, vecs, I::Array{Int64,1}) where {ES <: AbstractEigenSolver} = vecs[:, I] #################################################################################################### # Solvers for default \ operator (backslash) #################################################################################################### """ The struct `Default` is used to provide the backslash operator to our Package """ @with_kw struct DefaultEig{Tby} <: AbstractEigenSolver which::Tby = real # how do we sort the computed eigenvalues end function (l::DefaultEig)(J, nev::Int64) # I put Array so we can call it on small sparse matrices F = eigen(Array(J)) I = sortperm(F.values, by = l.which, rev = true) nev2 = min(nev, length(I)) return Complex.(F.values[I[1:nev2]]), F.vectors[:, I[1:nev2]], true, 1 end # case of sparse matrices or matrix free method @with_kw struct EigArpack{T, Tby, Tw} <: AbstractEigenSolver sigma::T = nothing which::Symbol = :LR by::Tby = real # how do we sort the computed eigenvalues. kwargs::Tw = nothing end EigArpack(sigma = nothing, which = :LR; kwargs...) = EigArpack(sigma, which, real, kwargs) function (l::EigArpack)(J, nev::Int64) if J isa AbstractMatrix λ, ϕ, ncv = Arpack.eigs(J; nev = nev, which = l.which, sigma = l.sigma, l.kwargs...) else N = length(l.kwargs[:v0]) T = eltype(l.kwargs[:v0]) Jmap = LinearMap{T}(J, N, N; ismutating = false) λ, ϕ, ncv, = Arpack.eigs(Jmap; nev = nev, which = l.which, sigma = l.sigma, l.kwargs...) end I = sortperm(λ, by = l.by, rev = true) ncv < nev && @warn "$ncv eigenvalues have converged using Arpack.eigs, you requested $nev" return λ[I], ϕ[:, I], true, 1 end #################################################################################################### # Solvers for KrylovKit #################################################################################################### @with_kw struct EigKrylovKit{T, vectype} <: AbstractMFEigenSolver dim::Int64 = KrylovDefaults.krylovdim # Krylov Dimension tol::T = 1e-4 # tolerance for solver restart::Int64 = 200 # number of restarts maxiter::Int64 = KrylovDefaults.maxiter verbose::Int = 0 which::Symbol = :LR issymmetric::Bool = false # if the linear map is symmetric, only meaningful if T<:Real ishermitian::Bool = false # if the linear map is hermitian x₀::vectype = nothing # example of vector in case of a matrix-free operator end function (l::EigKrylovKit{T, vectype})(J, nev::Int64) where {T, vectype} if J isa AbstractMatrix && isnothing(l.x₀) vals, vec, info = KrylovKit.eigsolve(J, nev, l.which; verbosity = l.verbose, krylovdim = l.dim, maxiter = l.maxiter, tol = l.tol, issymmetric = l.issymmetric, ishermitian = l.ishermitian) else vals, vec, info = KrylovKit.eigsolve(J, l.x₀, nev, l.which; verbosity = l.verbose, krylovdim = l.dim, maxiter = l.maxiter, tol = l.tol, issymmetric = l.issymmetric, ishermitian = l.ishermitian) end info.converged == 0 && (@warn "KrylovKit.eigsolve solver did not converge") return vals, vec, true, info.numops end geteigenvector(eigsolve::EigKrylovKit{T, vectype}, vecs, n::Int) where {T, vectype} = vecs[n] geteigenvector(eigsolve::EigKrylovKit{T, vectype}, vecs, I::Array{Int64,1}) where {T, vectype} = vecs[I]
{"hexsha": "e382bb8239c0de97304e34ea911f2fc78eb7d52a", "size": 3805, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/EigSolver.jl", "max_stars_repo_name": "oxinabox/PseudoArcLengthContinuation.jl", "max_stars_repo_head_hexsha": "98bae695df0e7c0521680dae7d786843078e730b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/EigSolver.jl", "max_issues_repo_name": "oxinabox/PseudoArcLengthContinuation.jl", "max_issues_repo_head_hexsha": "98bae695df0e7c0521680dae7d786843078e730b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/EigSolver.jl", "max_forks_repo_name": "oxinabox/PseudoArcLengthContinuation.jl", "max_forks_repo_head_hexsha": "98bae695df0e7c0521680dae7d786843078e730b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.164556962, "max_line_length": 196, "alphanum_fraction": 0.6375821288, "num_tokens": 1123}
#include <StdInc.h> #include "ConvertServiceImpl.h" #include "SparrowFrontend/Helpers/SprTypeTraits.h" #include "SparrowFrontend/Helpers/DeclsHelpers.h" #include "SparrowFrontend/Helpers/StdDef.h" #include "SparrowFrontend/NodeCommonsCpp.h" #include "SparrowFrontend/SparrowFrontendTypes.hpp" #include "SparrowFrontend/Nodes/SprProperties.h" #include "SparrowFrontend/Services/IOverloadService.h" #include "SparrowFrontend/Services/IConceptsService.h" #include "Feather/Api/Feather.h" #include "Feather/Utils/FeatherUtils.hpp" #include "Feather/Utils/cppif/FeatherTypes.hpp" #include "Nest/Utils/Tuple.hpp" #include "Nest/Utils/cppif/SmallVector.hpp" #include "Nest/Utils/Profiling.h" #include <utility> #include <boost/range/adaptor/reversed.hpp> namespace SprFrontend { using namespace Feather; using Nest::SmallVector; ConversionResult ConvertServiceImpl::checkConversion( CompilationContext* context, Type srcType, Type destType, ConversionFlags flags) { return cachedCheckConversion(context, flags, srcType, destType); } ConversionResult ConvertServiceImpl::checkConversion( Node* arg, Type destType, ConversionFlags flags) { ASSERT(arg); Type srcType = Nest_computeType(arg); if (!srcType) return {}; ASSERT(destType); return cachedCheckConversion(arg->context, flags, srcType, destType); } ConversionResult ConvertServiceImpl::checkConversionImpl( CompilationContext* context, int flags, Type src, Type dest) { ASSERT(src); ASSERT(dest); // If the types are the same, then we are happy if (src == dest) return convDirect; // If any of the types doesn't have storage, conversion is invalid if (!src.hasStorage() || !dest.hasStorage()) return convNone; TypeWithStorage srcS = TypeWithStorage(src); TypeWithStorage destS = TypeWithStorage(dest); ConversionResult res{convDirect}; // Check for null conversions if (StdDef::clsNull && srcS.referredNode() == StdDef::clsNull) { if (isDataLikeType(destS) && destS.numReferences() > 0 && destS.referredNode() != StdDef::clsNull) { res.addConversion(convImplicit, ConvAction(ActionType::makeNull, destS)); return res; } } // Do the types have the same mode? if (srcS.mode() == destS.mode()) { if (!checkConversionSameMode(res, context, flags, srcS, destS)) return {}; } else { // The only supported mode conversion is CT->RT if (srcS.mode() != modeCt || destS.mode() != modeRt) return {}; // For datatypes conversion, the source type must be usable at RT // TODO (types): check MyRange/ct -> #Range, where MyRange is ct-only if (!isConceptType(destS) && !srcS.canBeUsedAtRt()) return {}; // Disallow conversion of references // @T/ct -> @T is disallowed // Allowed: T/ct -> @T, @T/ct -> T, T/ct -> T mut, T/ct mut -> T int srcRefsBase = srcS.numReferences(); int destRefsBase = destS.numReferences(); if (isCategoryType(srcS)) --srcRefsBase; if (isCategoryType(destS)) --destRefsBase; if (srcRefsBase != 0 && destRefsBase != 0) return {}; // If the types have different modes, then we split our conversion in the following: // a) convert 'src' to reach zero-references // b) apply CT-to-RT conversion // c) covert result to 'dest' TypeWithStorage src0; // same as 'src', with with zero references if (isDataLikeType(srcS)) src0 = removeAllRefs(srcS); else if (isConceptType(srcS)) { src0 = ConceptType::get(ConceptType(srcS).decl(), srcS.mode()); } else return {}; // a) Remove all references from source if (srcS != src0) if (!checkConversionSameMode(res, context, flags, srcS, src0)) return {}; // b) remove CT src0 = src0.changeMode(modeRt, NOLOC); res.addConversion(convDirect, ConvAction(ActionType::modeCast, src0)); // c) convert src0 to dest if (src0 != destS) if (!checkConversionSameMode(res, context, flags, src0, destS)) return {}; } return res; } bool ConvertServiceImpl::checkConversionSameMode(ConversionResult& res, CompilationContext* context, int flags, TypeWithStorage src, TypeWithStorage dest) { ASSERT(src); ASSERT(dest); TypeWithStorage destBase = baseType(dest); // Is the destination is a concept? if (isConceptType(destBase)) { return checkConversionToConcept(res, context, flags, src, dest); } // Treat data-like to data-like conversions if (isDataLikeType(dest) && isDataLikeType(src)) { return checkDataConversion( res, context, flags, TypeWithStorage(src), TypeWithStorage(dest)); } return false; } bool ConvertServiceImpl::checkConversionToConcept(ConversionResult& res, CompilationContext* context, int flags, TypeWithStorage src, TypeWithStorage dest) { ASSERT(src); ASSERT(dest); TypeWithStorage srcBase = baseType(src); TypeWithStorage destBase = baseType(dest); // Case 1: concept -> concept if (isConceptType(srcBase)) { // Check wrapper types if (!checkWrapperTypes(res, src, dest)) return false; // Iteratively search the base concept to find our dest type src = srcBase; while (src != destBase) { ConceptDecl conceptNode = ConceptDecl(ConceptType(src).decl()); if (!conceptNode) return false; ConceptType baseType = g_ConceptsService->baseConceptType(conceptNode); if (!baseType || baseType == src) return false; // Not found; cannot convert src = baseType.changeMode(src.mode(), conceptNode.location()); } return true; } // Case 2: data-like -> concept (concept) else if (Feather::isDataLikeType(src)) { // Treat the destination type kind as data-like int destTypeKind = dest.kind(); if (destTypeKind == typeKindConcept) destTypeKind = typeKindData; // Check wrapper types if (!checkWrapperTypes(res, src, dest)) return false; bool isOk = false; Nest::NodeHandle concept = dest.referredNode(); if (!concept) isOk = true; // If we have a concept, check if the type fulfills the concept else if (concept.kind() == nkSparrowDeclSprConcept) { isOk = g_ConceptsService->conceptIsFulfilled(ConceptDecl(concept), src); } // If we have a generic, check if the type is generated from the generic GenericDatatype genericDatatype = concept.kindCast<GenericDatatype>(); if (genericDatatype) { isOk = g_ConceptsService->typeGeneratedFromGeneric(genericDatatype, src); } if (!isOk) return false; // Conversion is possible res.addConversion(convConcept); return true; } return false; } bool ConvertServiceImpl::checkDataConversion(ConversionResult& res, CompilationContext* context, int flags, TypeWithStorage src, TypeWithStorage dest) { // Precondition: we only support datatype-like conversions if (!isDataLikeType(removeCategoryIfPresent(src))) return false; // Case 1: The datatypes have the same decl if (dest.referredNode() == src.referredNode()) { // Check wrapper types if (!checkWrapperTypes(res, src, dest)) return false; res.addConversion(convDirect); return true; } // Case 2: Custom conversions else if (0 == (flags & flagDontCallConversionCtor)) { Node* destClass = dest.referredNode(); if (!destClass) return false; // If the destination class is not marked as convert, bail out // This saves us some processing time if (!Nest_hasProperty(destClass, propConvert)) return false; if (!Nest_computeType(destClass)) return false; // Try to convert srcType to mut destClass if (!g_OverloadService->selectConversionCtor(context, destClass, dest.mode(), src)) return false; // Check access if (!canAccessNode(destClass, context->sourceCode)) return false; // If the class is not public, store the current source code for this conversion // This conversion is not ok in all contexts Nest_SourceCode* sourceCode = nullptr; if (!isPublic(destClass)) sourceCode = context->sourceCode; TypeWithStorage t = destClass->type; EvalMode destMode = t.mode(); if (destMode == modeRt) destMode = src.mode(); t = t.changeMode(destMode, NOLOC); TypeWithStorage resType = Feather::isCategoryType(t) ? t : MutableType::get(t); res.addConversion(convCustom, ConvAction(ActionType::customCvt, resType), sourceCode); // Finally, check the wrapper types if (!checkWrapperTypes(res, resType, dest)) return false; return true; } return false; } namespace { //! A logical type wrapper enum TypeWrapper { twPlain = 0, //!< T twPtr, //!< Ptr(T), T != cat twConst, //!< Const(T) twMutable, //!< Mutable(T) twTemp, //!< Temp(T) twPtrConst, //!< Ptr(Const(T)) twPtrMutable, //!< Ptr(Mutable(T)) twPtrTemp, //!< Ptr(Temp(T)) }; ostream& operator<<(ostream& os, TypeWrapper tw) { switch (tw) { case twPlain: os << "Plain"; break; case twPtr: os << "Ptr"; break; case twConst: os << "Const"; break; case twMutable: os << "Mutable"; break; case twTemp: os << "Temp"; break; case twPtrConst: os << "PtrConst"; break; case twPtrMutable: os << "PtrMutable"; break; case twPtrTemp: os << "PtrTemp"; break; default: os << "UnknownWrapper"; } return os; } //! Checks if the type wrapper is a ptr-like (with or without cat) bool isPtr(TypeWrapper t) { return t == twPtr || t == twPtrConst || t == twPtrMutable || t == twPtrTemp; } //! Decompose the type between a base type and a set of wrapper types //! Note: we have distinct wrappers for ptr, ptr(const), ptr(mutable) and ptr(temp) void analyzeType(TypeWithStorage type, TypeWithStorage& base, SmallVector<TypeWrapper>& wrappers) { wrappers.clear(); wrappers.reserve(type.numReferences()); TypeWrapper twPtrDefault = twPtrMutable; while (true) { TypeWithStorage nextType; TypeWrapper tw; if (type.kind() == typeKindPtr) { tw = twPtrDefault; nextType = PtrType(type).base(); if (nextType.kind() == typeKindConst) { tw = twPtrConst; nextType = ConstType(nextType).base(); } else if (nextType.kind() == typeKindMutable) { tw = twPtrMutable; nextType = MutableType(nextType).base(); } else if (nextType.kind() == typeKindTemp) { tw = twPtrTemp; nextType = TempType(nextType).base(); } } else if (type.kind() == typeKindConst) { tw = twConst; // twPtrDefault = twPtrConst; // transient category types? nextType = ConstType(type).base(); } else if (type.kind() == typeKindMutable) { tw = twMutable; // twPtrDefault = twPtrMutable; // transient category types? nextType = MutableType(type).base(); } else if (type.kind() == typeKindTemp) { tw = twTemp; // twPtrDefault = twPtrTemp; // transient category types? nextType = TempType(type).base(); } else break; wrappers.push_back(tw); ASSERT(nextType); type = nextType; } std::reverse(wrappers.begin(), wrappers.end()); base = type; } //! Replace the base type from the given type; all the other wrappers remain exactly the same TypeWithStorage replaceBaseType(TypeWithStorage type, TypeWithStorage newBase) { SmallVector<int> kinds; kinds.reserve(type.numReferences()); while (true) { auto kind = type.kind(); if (type.kind() == typeKindPtr) { type = PtrType(type).base(); } else if (type.kind() == typeKindConst) { type = ConstType(type).base(); } else if (type.kind() == typeKindMutable) { type = MutableType(type).base(); } else if (type.kind() == typeKindTemp) { type = TempType(type).base(); } else break; kinds.push_back(kind); } TypeWithStorage res = newBase; for (auto k : boost::adaptors::reverse(kinds)) { if (k == typeKindPtr) res = PtrType::get(res); else if (k == typeKindConst) res = ConstType::get(res); else if (k == typeKindMutable) res = MutableType::get(res); else if (k == typeKindTemp) res = TempType::get(res); } return res; } enum ElemConvType { none = 0, // no conversion possible direct, // same type addPtr, // add pointer removePtr, // remove pointer catCast, // cast between categories (with extra refs) addCat, // plain -> category removeCat, // category -> plain ptr2Cat, // ptr -> category cat2Ptr, // category -> ptr }; ostream& operator<<(ostream& os, ElemConvType tw) { switch (tw) { case none: os << "none"; break; case direct: os << "direct"; break; case addPtr: os << "addPtr"; break; case removePtr: os << "removePtr"; break; case catCast: os << "catCast"; break; case addCat: os << "addCat"; break; case removeCat: os << "removeCat"; break; case ptr2Cat: os << "ptr2Cat"; break; case cat2Ptr: os << "cat2Ptr"; break; } return os; } //! Check an elementary casts; looks only at the kinds of the top-most types. ElemConvType checkElementaryCast(TypeWrapper src, TypeWrapper dest) { // clang-format off constexpr ElemConvType conversions[8][8] = { {direct, addPtr, addCat, none, addCat, addPtr, addPtr, addPtr },// from plain {removePtr, direct, ptr2Cat, ptr2Cat, ptr2Cat, catCast, direct, none },// from ptr {removeCat, cat2Ptr, direct, none, none, cat2Ptr, none, none },// from const {removeCat, cat2Ptr, catCast, direct, none, cat2Ptr, cat2Ptr, none },// from mutable {removeCat, cat2Ptr, catCast, catCast, direct, cat2Ptr, cat2Ptr, cat2Ptr},// from temp {removePtr, none, ptr2Cat, none, none, direct, none, none },// from ptr const {removePtr, direct, ptr2Cat, ptr2Cat, none, catCast, direct, none },// from ptr mutable {removePtr, catCast, catCast, catCast, catCast, catCast, catCast, direct },// from ptr temp }; // to: plain, ptr, const, mutable, temp, p-const, p-mut, p-temp // clang-format on return conversions[src][dest]; } //! Check cast for cat types wrapped by ptr (both on src and dest) ElemConvType checkInPtrCast(TypeWrapper src, TypeWrapper dest) { ASSERT(isPtr(src)); ASSERT(isPtr(dest)); // clang-format off constexpr ElemConvType conversions[8][8] = { {none, none, none, none, none, none, none, none },// from plain {none, direct, none, none, none, catCast, direct, none },// from ptr {none, none, none, none, none, none, none, none },// from const {none, none, none, none, none, none, none, none },// from mutable {none, none, none, none, none, none, none, none },// from temp {none, none, none, none, none, direct, none, none },// from ptr const {none, direct, none, none, none, catCast, direct, none },// from ptr mutable {none, catCast, none, none, none, catCast, catCast, direct },// from ptr temp }; // to: plain, ptr, const,mut, temp, p-const, p-mut, p-temp // clang-format on return conversions[src][dest]; } //! A stack of node kinds, from which we can pop the base kinds. struct WrappersStack { SmallVector<TypeWrapper> wrappers; int cur{0}; //! Get the index kind at the given index; returns twPlain if going over bounds TypeWrapper operator[](int idx) const { return cur + idx < wrappers.size() ? wrappers[cur + idx] : twPlain; } //! Is the stack empty bool empty() const { return cur >= wrappers.size(); } //! The number of elements remaining in the stack int size() const { return int(wrappers.size()) - cur; } //! Consume the first kind from the stack TypeWrapper pop() { assert(!empty()); auto res = wrappers[cur]; cur++; return res; } }; } // namespace bool ConvertServiceImpl::checkWrapperTypes( ConversionResult& res, TypeWithStorage src, TypeWithStorage dest) { // Analyze the two types: figure our their base type and all the wrappers WrappersStack srcWrappers; WrappersStack destWrappers; TypeWithStorage srcBase, destBase; analyzeType(src, srcBase, srcWrappers.wrappers); analyzeType(dest, destBase, destWrappers.wrappers); bool doDebug = false; // StringRef(src.description()) == "i8/ct ptr ptr ptr mut";// && // StringRef(dest.description()) == "i8 mut ptr"; if (doDebug) { cerr << src << " -> " << dest << "\n"; cerr << " src: base=" << srcBase << " wrappers ="; for (auto w : srcWrappers.wrappers) cerr << " " << w; cerr << "\n"; cerr << " dest: base=" << destBase << " wrappers ="; for (auto w : destWrappers.wrappers) cerr << " " << w; cerr << "\n"; } // Handle the case where the destination is a concept // Apply the dest shape on the base source type if (destBase.kind() == typeKindConcept) { dest = replaceBaseType(dest, srcBase); destBase = srcBase; } // Check origin if (srcBase != destBase) return false; // First clear up the pointers from both sides -- advance in tandem // We match pointer to pointer // We check categories at every iteration // Note: between pointers we may have at most one cat type, but nothing else. bool needsCast = false; bool needsImplicit = false; while (!srcWrappers.empty() && !destWrappers.empty()) { TypeWrapper srcW = srcWrappers[0]; TypeWrapper destW = destWrappers[0]; bool srcIsPtr = isPtr(srcW); bool destIsPtr = isPtr(destW); if (destIsPtr && !srcIsPtr) return false; // cannot add ptr if (!destIsPtr || !srcIsPtr) break; srcWrappers.pop(); destWrappers.pop(); // Check elementary casts between possible category types auto conv = checkInPtrCast(srcW, destW); if (doDebug) cerr << " iter check: " << srcW << " -> " << destW << " = " << conv << "\n"; if (conv == none) return false; // TODO (now): Check possible values of ElemConvType if (conv != direct) { needsCast = true; needsImplicit = true; } } // Ensure there are no more pointers on the dest side if (!destWrappers.empty() && isPtr(destWrappers[0])) return false; // Now the middle part, after the common pointers // dest may or may not have a cat left // Try to consume everything from dest bool shouldAddCat = false; bool needsDeref = false; auto srcW = srcWrappers[0]; auto destW = destWrappers[0]; auto conv = checkElementaryCast(srcW, destW); if (doDebug) cerr << " top check: " << srcW << " -> " << destW << " = " << conv << "\n"; switch (conv) { case none: return false; case direct: // T <cat> <others>* -> U <cat> (where T and U are ref-equivalent) if (!srcWrappers.empty()) srcWrappers.pop(); break; case catCast: // T <cat1> <others>* -> U <cat2> (where T and U are ref-equivalent) needsCast = true; needsImplicit = true; srcWrappers.pop(); break; case addCat: // T -> U <cat> (where T and U are ref-equivalent) // the source doesn't have anymore ptrs if (srcWrappers.cur > 0 && destW == twTemp) return false; // Forbid adding 'temp' on ptr types ASSERT(srcWrappers.empty()); needsImplicit = true; shouldAddCat = true; break; case ptr2Cat: // T ptr <others>* -> U <cat> needsImplicit = true; needsCast = true; srcWrappers.pop(); break; case removeCat: // T <cat> <others>* -> U // needsImplicit = true; needsDeref = true; if (!srcWrappers.empty()) srcWrappers.pop(); break; case removePtr: // T ptr <others>* -> U // Don't do anything. Treat this withing general deref break; case cat2Ptr: return false; case addPtr: default: ASSERT(false); return false; } // Dest should be empty now if (!destWrappers.empty()) { destWrappers.pop(); ASSERT(destWrappers.empty()); } // Do we just need to add a category? if (shouldAddCat) { ASSERT(srcWrappers.empty()); res.addConversion(convDirect, ConvAction(ActionType::addRef, dest)); return true; } // We may still have some wrapper types in src. // Process them in reverse order now int numDerefs = srcWrappers.size() - destWrappers.size(); if (doDebug) cerr << " num derefs=" << numDerefs << "\n"; for (int i = 0; i < numDerefs; i++) { src = dereferenceType(src); res.addConversion(convImplicit, ConvAction(ActionType::dereference, src)); } // Handle cases like T mut -> T or T ptr mut -> T ptr if (Feather::isCategoryType(src) && src.numReferences() > dest.numReferences()) { src = removeCategoryIfPresent(src); res.addConversion(convDirect, ConvAction(ActionType::dereference, src)); } if (doDebug) cerr << " src=" << src << " dest=" << dest << "\n"; if (src != dest) { if (needsCast) res.addConversion(needsImplicit ? convImplicit : convDirect, ConvAction(ActionType::bitcast, dest)); else if (src.numReferences() > dest.numReferences()) res.addConversion(convImplicit, ConvAction(ActionType::dereference, dest)); else if (src.numReferences() == dest.numReferences()) { res.addConversion(convImplicit, ConvAction(ActionType::bitcast, dest)); } else REP_INTERNAL(NOLOC, "Invalid conversion between %1% and %2%") % src % dest; } if (doDebug) { cerr << " conv ok:\n"; for (auto p : res.convertActions()) cerr << " " << p.first << " => " << p.second << "\n"; } return true; } const ConversionResult& ConvertServiceImpl::cachedCheckConversion( CompilationContext* context, int flags, Type srcType, Type destType) { PROFILING_ZONE(); // Try to find the conversion in the map -- first, try without a source code KeyType key(srcType, destType, flags, nullptr); auto it = conversionMap_.find(key); if (it != conversionMap_.end()) return it->second; // Now try with a source code key = KeyType(srcType, destType, flags, context->sourceCode); it = conversionMap_.find(key); if (it != conversionMap_.end()) return it->second; // Compute the value normally ConversionResult res = checkConversionImpl(context, flags, srcType, destType); // cout << srcType << " -> " << destType << " : " << res << endl; // cout << srcType << " -> " << destType << " (" << flags << ") :" << res << endl; // Put the result in the cache, if not context dependent std::get<3>(key) = res.sourceCode(); auto r = conversionMap_.insert(make_pair(key, res)); return r.first->second; } } // namespace SprFrontend
{"hexsha": "2b2041f748aba367ef4a82349b737e2ffbac7899", "size": 24704, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/SparrowFrontend/Services/Convert/ConvertServiceImpl.cpp", "max_stars_repo_name": "Sparrow-lang/sparrow", "max_stars_repo_head_hexsha": "b1cf41f79b52665d8208f8fb5a7539d764286daa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 80.0, "max_stars_repo_stars_event_min_datetime": "2015-05-05T12:21:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T18:38:48.000Z", "max_issues_repo_path": "src/SparrowFrontend/Services/Convert/ConvertServiceImpl.cpp", "max_issues_repo_name": "Sparrow-lang/sparrow", "max_issues_repo_head_hexsha": "b1cf41f79b52665d8208f8fb5a7539d764286daa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 51.0, "max_issues_repo_issues_event_min_datetime": "2016-09-09T13:44:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-28T07:03:02.000Z", "max_forks_repo_path": "src/SparrowFrontend/Services/Convert/ConvertServiceImpl.cpp", "max_forks_repo_name": "Sparrow-lang/sparrow", "max_forks_repo_head_hexsha": "b1cf41f79b52665d8208f8fb5a7539d764286daa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8.0, "max_forks_repo_forks_event_min_datetime": "2015-07-28T11:34:15.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-01T21:54:06.000Z", "avg_line_length": 33.9807427785, "max_line_length": 102, "alphanum_fraction": 0.59375, "num_tokens": 6152}
# Copyright 2022 Reuben Owen-Williams # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mysql.connector from mysql.connector import Error from dbLogin import configprivate import pandas as pd import numpy as np """ Manipulates the "uk_gov_data_sparse" and "uk_gov_data_dense" table to produce "...preproc" tables, while addressing the outliers identified in dataCleanAnalyse.py. """ def create_database_connection(host_name, user_name, user_password, database): """ Returns a connection to the database "vehicles" in the local MySQL server. """ connection = None try: connection = mysql.connector.connect( host=host_name, user=user_name, passwd=user_password, database=database ) print("MySQL Database connection successful") except Error as err: print(f"Error: '{err}'") return connection def create_table(connection, query): """ Creates a table in the "vehicles" database in the local MySQL server. """ cursor = connection.cursor() try: cursor.execute(query) print("Table created successfully") except Error as err: print(f"Error: '{err}'") def insert_table(connection, query, df): """ Performs queries, e.g. INSERT, in the "vehicles" database. """ cursor = connection.cursor() try: for i in range(0, df.shape[0]): cursor.execute(query, tuple(df.iloc[i].values.flatten().tolist())) connection.commit() print("Table edited successfully") except Error as err: print(f"Error: '{err}'") def main(): """ Manipulates the "uk_gov_data_sparse" and "uk_gov_data_dense" table to produce "...preproc" tables, while addressing the outliers identified in dataCleanAnalyse.py. """ connection = create_database_connection("localhost", configprivate.username, configprivate.password, "vehicles") # Read the UK gov data from the "vehicles" database using pandas. Convert "car_id" from int64 (a numpy type) to float as MySQL cannot convert: # https://stackoverflow.com/questions/56731036/interfaceerror-failed-executing-the-operation-python-type-numpy-int64-cannot-b govDataSparse = pd.read_sql("SELECT * FROM uk_gov_data_sparse", connection) govDataSparse = govDataSparse.astype(dtype = {"car_id": float}, copy=True) # Create the table "uk_gov_data_sparse_preproc". create_govtablesparse_query = """ USE vehicles; CREATE TABLE uk_gov_data_sparse_preproc LIKE uk_gov_data_sparse; """ create_table(connection, create_govtablesparse_query) # (1) Replace "model" = "G-Class MY 201.5" (ICE outlier) emissions with "377.0". govDataSparse.loc[(govDataSparse["powertrain"]=="Internal Combustion Engine (ICE)")&(govDataSparse["co2_emissions_gPERkm"]<90), "co2_emissions_gPERkm"] = 377 # (3) Replace "model" = "Galaxy Model Year Post 2021" (HEV outlier) emissions with "148.0". govDataSparse.loc[(govDataSparse["model"]=="Galaxy Model Year Post 2021")&(govDataSparse["description"]=="2.5 Duratec (FHEV)")&(govDataSparse["co2_emissions_gPERkm"]>300), "co2_emissions_gPERkm"] = 148 # Populate the sparse preprocessed table "uk_gov_data_sparse_preproc". connection = create_database_connection("localhost", configprivate.username, configprivate.password, "vehicles") govDataSparseImport = govDataSparse.replace({np.nan: None}, inplace=False) query = """INSERT INTO uk_gov_data_sparse_preproc VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)""" insert_table(connection, query, govDataSparseImport) # Save this preprocessed sparse data as a csv to "data\processed". govDataSparseImport.to_csv('./data/processed/uk_gov_data_sparse_preproc.csv', index=False, encoding="ISO-8859-1") connection = create_database_connection("localhost", configprivate.username, configprivate.password, "vehicles") # Read the UK gov data from the "vehicles" database using pandas. Convert "car_id" from int64 (a numpy type) to float as MySQL cannot convert: # https://stackoverflow.com/questions/56731036/interfaceerror-failed-executing-the-operation-python-type-numpy-int64-cannot-b govDataDense = pd.read_sql("SELECT * FROM uk_gov_data_dense", connection) govDataDense = govDataDense.astype(dtype = {"car_id": float, "engine_size_cm3": float, "power_ps": float}, copy=True) # Create the table "uk_gov_data_dense_preproc". create_govtabledense_query = """ USE vehicles; CREATE TABLE uk_gov_data_dense_preproc LIKE uk_gov_data_dense; """ create_table(connection, create_govtabledense_query) # (1) Replace "model" = "G-Class MY 201.5" (ICE outlier) emissions with "377.0". govDataDense.loc[(govDataDense["powertrain"]=="Internal Combustion Engine (ICE)")&(govDataDense["co2_emissions_gPERkm"]<90), "co2_emissions_gPERkm"] = 377 # (3) Replace "model" = "Galaxy Model Year Post 2021" (HEV outlier) emissions with "148.0". govDataDense.loc[(govDataDense["model"]=="Galaxy Model Year Post 2021")&(govDataDense["description"]=="2.5 Duratec (FHEV)")&(govDataDense["co2_emissions_gPERkm"]>300), "co2_emissions_gPERkm"] = 148 # Populate the dense table "uk_gov_data_dense_preproc". connection = create_database_connection("localhost", configprivate.username, configprivate.password, "vehicles") govDataDenseImport = govDataDense query = """INSERT INTO uk_gov_data_dense_preproc VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)""" insert_table(connection, query, govDataDenseImport) # Save this cleaned dense data as a csv to "data\processed". govDataDenseImport.to_csv('./data/processed/uk_gov_data_dense_preproc.csv', index=False, encoding="ISO-8859-1") main()
{"hexsha": "30feb68a9932f2cecc93dc474ca651626461fede", "size": 6298, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data/dataPreprocess.py", "max_stars_repo_name": "ReubenGitHub/MachineLearning-Vehicle-Emissions", "max_stars_repo_head_hexsha": "5a6d5366d15cb918de5464c48e0067efceda4149", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/data/dataPreprocess.py", "max_issues_repo_name": "ReubenGitHub/MachineLearning-Vehicle-Emissions", "max_issues_repo_head_hexsha": "5a6d5366d15cb918de5464c48e0067efceda4149", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/data/dataPreprocess.py", "max_forks_repo_name": "ReubenGitHub/MachineLearning-Vehicle-Emissions", "max_forks_repo_head_hexsha": "5a6d5366d15cb918de5464c48e0067efceda4149", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.203125, "max_line_length": 205, "alphanum_fraction": 0.7127659574, "include": true, "reason": "import numpy", "num_tokens": 1588}
from __future__ import print_function, division from .vector import Vector, _check_vector from .frame import _check_frame __all__ = ['Point'] class Point(object): """This object represents a point in a dynamic system. It stores the: position, velocity, and acceleration of a point. The position is a vector defined as the vector distance from a parent point to this point. Parameters ========== name : string The display name of the Point Examples ======== >>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols >>> from sympy.physics.vector import init_vprinting >>> init_vprinting(pretty_print=False) >>> N = ReferenceFrame('N') >>> O = Point('O') >>> P = Point('P') >>> u1, u2, u3 = dynamicsymbols('u1 u2 u3') >>> O.set_vel(N, u1 * N.x + u2 * N.y + u3 * N.z) >>> O.acc(N) u1'*N.x + u2'*N.y + u3'*N.z symbols() can be used to create multiple Points in a single step, for example: >>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols >>> from sympy.physics.vector import init_vprinting >>> init_vprinting(pretty_print=False) >>> from sympy import symbols >>> N = ReferenceFrame('N') >>> u1, u2 = dynamicsymbols('u1 u2') >>> A, B = symbols('A B', cls=Point) >>> type(A) <class 'sympy.physics.vector.point.Point'> >>> A.set_vel(N, u1 * N.x + u2 * N.y) >>> B.set_vel(N, u2 * N.x + u1 * N.y) >>> A.acc(N) - B.acc(N) (u1' - u2')*N.x + (-u1' + u2')*N.y """ def __init__(self, name): """Initialization of a Point object. """ self.name = name self._pos_dict = {} self._vel_dict = {} self._acc_dict = {} self._pdlist = [self._pos_dict, self._vel_dict, self._acc_dict] def __str__(self): return self.name __repr__ = __str__ def _check_point(self, other): if not isinstance(other, Point): raise TypeError('A Point must be supplied') def _pdict_list(self, other, num): """Returns a list of points that gives the shortest path with respect to position, velocity, or acceleration from this point to the provided point. Parameters ========== other : Point A point that may be related to this point by position, velocity, or acceleration. num : integer 0 for searching the position tree, 1 for searching the velocity tree, and 2 for searching the acceleration tree. Returns ======= list of Points A sequence of points from self to other. Notes ===== It isn't clear if num = 1 or num = 2 actually works because the keys to ``_vel_dict`` and ``_acc_dict`` are :class:`ReferenceFrame` objects which do not have the ``_pdlist`` attribute. """ outlist = [[self]] oldlist = [[]] while outlist != oldlist: oldlist = outlist[:] for i, v in enumerate(outlist): templist = v[-1]._pdlist[num].keys() for i2, v2 in enumerate(templist): if not v.__contains__(v2): littletemplist = v + [v2] if not outlist.__contains__(littletemplist): outlist.append(littletemplist) for i, v in enumerate(oldlist): if v[-1] != other: outlist.remove(v) outlist.sort(key=len) if len(outlist) != 0: return outlist[0] raise ValueError('No Connecting Path found between ' + other.name + ' and ' + self.name) def a1pt_theory(self, otherpoint, outframe, interframe): """Sets the acceleration of this point with the 1-point theory. The 1-point theory for point acceleration looks like this: ^N a^P = ^B a^P + ^N a^O + ^N alpha^B x r^OP + ^N omega^B x (^N omega^B x r^OP) + 2 ^N omega^B x ^B v^P where O is a point fixed in B, P is a point moving in B, and B is rotating in frame N. Parameters ========== otherpoint : Point The first point of the 1-point theory (O) outframe : ReferenceFrame The frame we want this point's acceleration defined in (N) fixedframe : ReferenceFrame The intermediate frame in this calculation (B) Examples ======== >>> from sympy.physics.vector import Point, ReferenceFrame >>> from sympy.physics.vector import dynamicsymbols >>> from sympy.physics.vector import init_vprinting >>> init_vprinting(pretty_print=False) >>> q = dynamicsymbols('q') >>> q2 = dynamicsymbols('q2') >>> qd = dynamicsymbols('q', 1) >>> q2d = dynamicsymbols('q2', 1) >>> N = ReferenceFrame('N') >>> B = ReferenceFrame('B') >>> B.set_ang_vel(N, 5 * B.y) >>> O = Point('O') >>> P = O.locatenew('P', q * B.x) >>> P.set_vel(B, qd * B.x + q2d * B.y) >>> O.set_vel(N, 0) >>> P.a1pt_theory(O, N, B) (-25*q + q'')*B.x + q2''*B.y - 10*q'*B.z """ _check_frame(outframe) _check_frame(interframe) self._check_point(otherpoint) dist = self.pos_from(otherpoint) v = self.vel(interframe) a1 = otherpoint.acc(outframe) a2 = self.acc(interframe) omega = interframe.ang_vel_in(outframe) alpha = interframe.ang_acc_in(outframe) self.set_acc(outframe, a2 + 2 * (omega ^ v) + a1 + (alpha ^ dist) + (omega ^ (omega ^ dist))) return self.acc(outframe) def a2pt_theory(self, otherpoint, outframe, fixedframe): """Sets the acceleration of this point with the 2-point theory. The 2-point theory for point acceleration looks like this: ^N a^P = ^N a^O + ^N alpha^B x r^OP + ^N omega^B x (^N omega^B x r^OP) where O and P are both points fixed in frame B, which is rotating in frame N. Parameters ========== otherpoint : Point The first point of the 2-point theory (O) outframe : ReferenceFrame The frame we want this point's acceleration defined in (N) fixedframe : ReferenceFrame The frame in which both points are fixed (B) Examples ======== >>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols >>> from sympy.physics.vector import init_vprinting >>> init_vprinting(pretty_print=False) >>> q = dynamicsymbols('q') >>> qd = dynamicsymbols('q', 1) >>> N = ReferenceFrame('N') >>> B = N.orientnew('B', 'Axis', [q, N.z]) >>> O = Point('O') >>> P = O.locatenew('P', 10 * B.x) >>> O.set_vel(N, 5 * N.x) >>> P.a2pt_theory(O, N, B) - 10*q'**2*B.x + 10*q''*B.y """ _check_frame(outframe) _check_frame(fixedframe) self._check_point(otherpoint) dist = self.pos_from(otherpoint) a = otherpoint.acc(outframe) omega = fixedframe.ang_vel_in(outframe) alpha = fixedframe.ang_acc_in(outframe) self.set_acc(outframe, a + (alpha ^ dist) + (omega ^ (omega ^ dist))) return self.acc(outframe) def acc(self, frame): """The acceleration Vector of this Point in a ReferenceFrame. Parameters ========== frame : ReferenceFrame The frame in which the returned acceleration vector will be defined in Examples ======== >>> from sympy.physics.vector import Point, ReferenceFrame >>> N = ReferenceFrame('N') >>> p1 = Point('p1') >>> p1.set_acc(N, 10 * N.x) >>> p1.acc(N) 10*N.x """ _check_frame(frame) if not (frame in self._acc_dict): if self._vel_dict[frame] != 0: return (self._vel_dict[frame]).dt(frame) else: return Vector(0) return self._acc_dict[frame] def locatenew(self, name, value): """Creates a new point with a position defined from this point. Parameters ========== name : str The name for the new point value : Vector The position of the new point relative to this point Examples ======== >>> from sympy.physics.vector import ReferenceFrame, Point >>> N = ReferenceFrame('N') >>> P1 = Point('P1') >>> P2 = P1.locatenew('P2', 10 * N.x) """ if not isinstance(name, str): raise TypeError('Must supply a valid name') if value == 0: value = Vector(0) value = _check_vector(value) p = Point(name) p.set_pos(self, value) self.set_pos(p, -value) return p def pos_from(self, otherpoint): """Returns a Vector distance between this Point and the other Point. Parameters ========== otherpoint : Point The otherpoint we are locating this one relative to Examples ======== >>> from sympy.physics.vector import Point, ReferenceFrame >>> N = ReferenceFrame('N') >>> p1 = Point('p1') >>> p2 = Point('p2') >>> p1.set_pos(p2, 10 * N.x) >>> p1.pos_from(p2) 10*N.x """ outvec = Vector(0) plist = self._pdict_list(otherpoint, 0) for i in range(len(plist) - 1): outvec += plist[i]._pos_dict[plist[i + 1]] return outvec def set_acc(self, frame, value): """Used to set the acceleration of this Point in a ReferenceFrame. Parameters ========== frame : ReferenceFrame The frame in which this point's acceleration is defined value : Vector The vector value of this point's acceleration in the frame Examples ======== >>> from sympy.physics.vector import Point, ReferenceFrame >>> N = ReferenceFrame('N') >>> p1 = Point('p1') >>> p1.set_acc(N, 10 * N.x) >>> p1.acc(N) 10*N.x """ if value == 0: value = Vector(0) value = _check_vector(value) _check_frame(frame) self._acc_dict.update({frame: value}) def set_pos(self, otherpoint, value): """Used to set the position of this point w.r.t. another point. Parameters ========== otherpoint : Point The other point which this point's location is defined relative to value : Vector The vector which defines the location of this point Examples ======== >>> from sympy.physics.vector import Point, ReferenceFrame >>> N = ReferenceFrame('N') >>> p1 = Point('p1') >>> p2 = Point('p2') >>> p1.set_pos(p2, 10 * N.x) >>> p1.pos_from(p2) 10*N.x """ if value == 0: value = Vector(0) value = _check_vector(value) self._check_point(otherpoint) self._pos_dict.update({otherpoint: value}) otherpoint._pos_dict.update({self: -value}) def set_vel(self, frame, value): """Sets the velocity Vector of this Point in a ReferenceFrame. Parameters ========== frame : ReferenceFrame The frame in which this point's velocity is defined value : Vector The vector value of this point's velocity in the frame Examples ======== >>> from sympy.physics.vector import Point, ReferenceFrame >>> N = ReferenceFrame('N') >>> p1 = Point('p1') >>> p1.set_vel(N, 10 * N.x) >>> p1.vel(N) 10*N.x """ if value == 0: value = Vector(0) value = _check_vector(value) _check_frame(frame) self._vel_dict.update({frame: value}) def v1pt_theory(self, otherpoint, outframe, interframe): """Sets the velocity of this point with the 1-point theory. The 1-point theory for point velocity looks like this: ^N v^P = ^B v^P + ^N v^O + ^N omega^B x r^OP where O is a point fixed in B, P is a point moving in B, and B is rotating in frame N. Parameters ========== otherpoint : Point The first point of the 2-point theory (O) outframe : ReferenceFrame The frame we want this point's velocity defined in (N) interframe : ReferenceFrame The intermediate frame in this calculation (B) Examples ======== >>> from sympy.physics.vector import Point, ReferenceFrame >>> from sympy.physics.vector import dynamicsymbols >>> from sympy.physics.vector import init_vprinting >>> init_vprinting(pretty_print=False) >>> q = dynamicsymbols('q') >>> q2 = dynamicsymbols('q2') >>> qd = dynamicsymbols('q', 1) >>> q2d = dynamicsymbols('q2', 1) >>> N = ReferenceFrame('N') >>> B = ReferenceFrame('B') >>> B.set_ang_vel(N, 5 * B.y) >>> O = Point('O') >>> P = O.locatenew('P', q * B.x) >>> P.set_vel(B, qd * B.x + q2d * B.y) >>> O.set_vel(N, 0) >>> P.v1pt_theory(O, N, B) q'*B.x + q2'*B.y - 5*q*B.z """ _check_frame(outframe) _check_frame(interframe) self._check_point(otherpoint) dist = self.pos_from(otherpoint) v1 = self.vel(interframe) v2 = otherpoint.vel(outframe) omega = interframe.ang_vel_in(outframe) self.set_vel(outframe, v1 + v2 + (omega ^ dist)) return self.vel(outframe) def v2pt_theory(self, otherpoint, outframe, fixedframe): """Sets the velocity of this point with the 2-point theory. The 2-point theory for point velocity looks like this: ^N v^P = ^N v^O + ^N omega^B x r^OP where O and P are both points fixed in frame B, which is rotating in frame N. Parameters ========== otherpoint : Point The first point of the 2-point theory (O) outframe : ReferenceFrame The frame we want this point's velocity defined in (N) fixedframe : ReferenceFrame The frame in which both points are fixed (B) Examples ======== >>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols >>> from sympy.physics.vector import init_vprinting >>> init_vprinting(pretty_print=False) >>> q = dynamicsymbols('q') >>> qd = dynamicsymbols('q', 1) >>> N = ReferenceFrame('N') >>> B = N.orientnew('B', 'Axis', [q, N.z]) >>> O = Point('O') >>> P = O.locatenew('P', 10 * B.x) >>> O.set_vel(N, 5 * N.x) >>> P.v2pt_theory(O, N, B) 5*N.x + 10*q'*B.y """ _check_frame(outframe) _check_frame(fixedframe) self._check_point(otherpoint) dist = self.pos_from(otherpoint) v = otherpoint.vel(outframe) omega = fixedframe.ang_vel_in(outframe) self.set_vel(outframe, v + (omega ^ dist)) return self.vel(outframe) def vel(self, frame): """The velocity Vector of this Point in the ReferenceFrame. Parameters ========== frame : ReferenceFrame The frame in which the returned velocity vector will be defined in Examples ======== >>> from sympy.physics.vector import Point, ReferenceFrame >>> N = ReferenceFrame('N') >>> p1 = Point('p1') >>> p1.set_vel(N, 10 * N.x) >>> p1.vel(N) 10*N.x """ _check_frame(frame) if not (frame in self._vel_dict): raise ValueError('Velocity of point ' + self.name + ' has not been' ' defined in ReferenceFrame ' + frame.name) return self._vel_dict[frame] def partial_velocity(self, frame, *gen_speeds): """Returns the partial velocities of the linear velocity vector of this point in the given frame with respect to one or more provided generalized speeds. Parameters ========== frame : ReferenceFrame The frame with which the velocity is defined in. gen_speeds : functions of time The generalized speeds. Returns ======= partial_velocities : tuple of Vector The partial velocity vectors corresponding to the provided generalized speeds. Examples ======== >>> from sympy.physics.vector import ReferenceFrame, Point >>> from sympy.physics.vector import dynamicsymbols >>> N = ReferenceFrame('N') >>> A = ReferenceFrame('A') >>> p = Point('p') >>> u1, u2 = dynamicsymbols('u1, u2') >>> p.set_vel(N, u1 * N.x + u2 * A.y) >>> p.partial_velocity(N, u1) N.x >>> p.partial_velocity(N, u1, u2) (N.x, A.y) """ partials = [self.vel(frame).diff(speed, frame, var_in_dcm=False) for speed in gen_speeds] if len(partials) == 1: return partials[0] else: return tuple(partials)
{"hexsha": "d4e3cfcc90cc359a8288befb2bc96162f9bdb0ef", "size": 17502, "ext": "py", "lang": "Python", "max_stars_repo_path": "sympy/physics/vector/point.py", "max_stars_repo_name": "alexmalins/sympy", "max_stars_repo_head_hexsha": "6acf7eadb39677fa728ac6437339f4d90c33d961", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sympy/physics/vector/point.py", "max_issues_repo_name": "alexmalins/sympy", "max_issues_repo_head_hexsha": "6acf7eadb39677fa728ac6437339f4d90c33d961", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sympy/physics/vector/point.py", "max_forks_repo_name": "alexmalins/sympy", "max_forks_repo_head_hexsha": "6acf7eadb39677fa728ac6437339f4d90c33d961", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8677248677, "max_line_length": 82, "alphanum_fraction": 0.5466232431, "include": true, "reason": "from sympy", "num_tokens": 4348}
! ! Note: This isn't intended to be a comprehensive PRNG test suite, but is ! merely intended to highlight any serious flaws in the coding rather than ! numerical design. ! program random use spral_random implicit none integer, parameter :: long = selected_int_kind(18) integer, parameter :: wp = kind(0d0) integer, parameter :: nsamples = 50000 integer, parameter :: nbins = 100 real, parameter :: require_confidence = 0.99 integer :: errors errors = 0 call test_real_dist() call test_integer32_dist() call test_integer64_dist() call test_logical_dist() if(errors.eq.0) then write(*, "(/a)") "===================" write(*, "(a)") "All tests suceeeded" write(*, "(a)") "===================" else write(*, "(/a)") "===================" write(*, "(a, i4)") "Failed ", errors write(*, "(a)") "===================" endif contains subroutine test_real_dist type(random_state) :: state integer :: i, j integer :: bin(nbins) real(wp) :: sample, chisq write(*, "(/a)") "=====================================" write(*, "(a)") "Testing random_real()" write(*, "(a)") "=====================================" ! ! Test (-1,1) distribution ! write(*, "(a)", advance="no") "Sampling Unif(-1,1)...... " ! Acquire sample bin(:) = 0 do i = 1, nsamples sample = random_real(state) j = int(nbins * ((sample+1.0_wp)/2.0_wp)) + 1 bin(j) = bin(j) + 1 end do chisq = 1.0_wp/nsamples * sum( (bin(:)**2.0_wp) * nbins ) - nsamples if(chisq < chisq_pval(nbins-1, require_confidence)) then write(*, "(a)") "pass" else write(*, "(a)") "fail" write(*, "(a,es12.4)") "chisq statistic = ", chisq write(*, "(a,es12.4)") "chisq required < ", & chisq_pval(nbins-1, require_confidence) errors = errors + 1 endif ! ! Test (0,1) distribution ! write(*, "(a)", advance="no") "Sampling Unif(0,1)....... " ! Acquire sample bin(:) = 0 do i = 1, nsamples sample = random_real(state, positive=.true.) j = int(nbins * sample) + 1 bin(j) = bin(j) + 1 end do chisq = 1.0_wp/nsamples * sum( (bin(:)**2.0_wp) * nbins ) - nsamples if(chisq < chisq_pval(nbins-1, require_confidence)) then write(*, "(a)") "pass" else write(*, "(a)") "fail" write(*, "(a,es12.4)") "chisq statistic = ", chisq write(*, "(a,es12.4)") "chisq required < ", & chisq_pval(nbins-1, require_confidence) errors = errors + 1 endif end subroutine test_real_dist subroutine test_integer32_dist type(random_state) :: state integer :: i integer :: bin(nbins) real(wp) :: chisq integer :: sample write(*, "(/a)") "=====================================" write(*, "(a)") "Testing random_integer() 32-bit" write(*, "(a)") "=====================================" ! ! Test (1,...,n) distribution ! write(*, "(a)", advance="no") "Sampling Unif(1,...,n)... " ! Acquire sample bin(:) = 0 do i = 1, nsamples sample = random_integer(state,nbins) bin(sample) = bin(sample) + 1 end do chisq = 1.0_wp/nsamples * sum( (bin(:)**2.0_wp) * nbins ) - nsamples if(chisq < chisq_pval(nbins-1, require_confidence)) then write(*, "(a)") "pass" else write(*, "(a)") "fail" write(*, "(a,es12.4)") "chisq statistic = ", chisq write(*, "(a,es12.4)") "chisq required < ", & chisq_pval(nbins-1, require_confidence) errors = errors + 1 endif end subroutine test_integer32_dist subroutine test_integer64_dist type(random_state) :: state integer :: i integer :: bin(nbins) real(wp) :: chisq integer(long) :: sample write(*, "(/a)") "=====================================" write(*, "(a)") "Testing random_integer() 64-bit" write(*, "(a)") "=====================================" ! ! Test (1,...,n) distribution ! write(*, "(a)", advance="no") "Sampling Unif(1,...,n)... " ! Acquire sample bin(:) = 0 do i = 1, nsamples sample = random_integer(state,int(nbins,long)) bin(sample) = bin(sample) + 1 end do chisq = 1.0_wp/nsamples * sum( (bin(:)**2.0_wp) * nbins ) - nsamples if(chisq < chisq_pval(nbins-1, require_confidence)) then write(*, "(a)") "pass" else write(*, "(a)") "fail" write(*, "(a,es12.4)") "chisq statistic = ", chisq write(*, "(a,es12.4)") "chisq required < ", & chisq_pval(nbins-1, require_confidence) errors = errors + 1 endif end subroutine test_integer64_dist subroutine test_logical_dist type(random_state) :: state integer :: i, j integer :: bin(2) real(wp) :: chisq logical :: sample write(*, "(/a)") "=====================================" write(*, "(a)") "Testing random_logical()" write(*, "(a)") "=====================================" ! ! Test (1,...,n) distribution ! write(*, "(a)", advance="no") "Sampling B(1,0.5)........ " ! Acquire sample bin(:) = 0 do i = 1, nsamples sample = random_logical(state) if(sample) then j = 1 else j = 2 endif bin(j) = bin(j) + 1 end do chisq = 1.0_wp/nsamples * sum( (bin(:)**2.0_wp) * 2 ) - nsamples if(chisq < chisq_pval(1, require_confidence)) then write(*, "(a)") "pass" else write(*, "(a)") "fail" write(*, "(a,es12.4)") "chisq statistic = ", chisq write(*, "(a,es12.4)") "chisq required < ", & chisq_pval(1, require_confidence) errors = errors + 1 endif end subroutine test_logical_dist real(wp) function chisq_pval(dof, p) integer, intent(in) :: dof real, intent(in) :: p real(wp) :: xp if(p.eq.0.99) then xp = 2.33 else write(*, "(a)") "Uncoded pval for chisq_pval" stop endif chisq_pval = dof + sqrt(2.0_wp*dof)*xp + (2*xp**2)/3 - 2/3.0_wp end function chisq_pval end program random
{"hexsha": "eb6d0617b147b16d1cae7813ace01b924c1a388a", "size": 5986, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "tests/random.f90", "max_stars_repo_name": "mjacobse/spral", "max_stars_repo_head_hexsha": "9bf003b2cc199928ec18c967ce0e009d98790898", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 76, "max_stars_repo_stars_event_min_datetime": "2016-10-03T13:58:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T00:11:34.000Z", "max_issues_repo_path": "tests/random.f90", "max_issues_repo_name": "mjacobse/spral", "max_issues_repo_head_hexsha": "9bf003b2cc199928ec18c967ce0e009d98790898", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 51, "max_issues_repo_issues_event_min_datetime": "2016-09-20T19:01:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T12:52:21.000Z", "max_forks_repo_path": "tests/random.f90", "max_forks_repo_name": "mjacobse/spral", "max_forks_repo_head_hexsha": "9bf003b2cc199928ec18c967ce0e009d98790898", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2016-09-30T20:52:47.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T14:58:37.000Z", "avg_line_length": 26.963963964, "max_line_length": 74, "alphanum_fraction": 0.5250584698, "num_tokens": 1945}
# Python code adapted from authors: https://arxiv.org/abs/1611.05666 from __future__ import print_function, division import argparse import torch import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler from torch.autograd import Variable import numpy as np import torchvision from torchvision import datasets, models, transforms import torch.backends.cudnn as cudnn import matplotlib matplotlib.use('agg') import matplotlib.pyplot as plt import copy from PIL import Image import time import os from model import ft_net, ft_net_dense, PCB, verif_net from random_erasing import RandomErasing from tripletfolder import TripletFolder import yaml from shutil import copyfile def train_model(model, model_verif, criterion, optimizer, scheduler, num_epochs=25): since = time.time() best_model_wts = model.state_dict() best_acc = 0.0 last_margin = 0.0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) # Each epoch has a training and validation phase for phase in ['train']: if phase == 'train': scheduler.step() model.train(True) # Set model to training mode else: model.train(False) # Set model to evaluate mode running_loss = 0.0 running_verif_loss = 0.0 running_corrects = 0.0 running_verif_corrects = 0.0 for data in dataloaders[phase]: inputs, labels, pos, neg = data now_batch_size,c,h,w = inputs.shape if now_batch_size<opt.batchsize: continue if use_gpu: inputs = Variable(inputs.cuda()) pos = Variable(pos.cuda()) neg = Variable(neg.cuda()) labels = Variable(labels.cuda()) else: inputs, labels = Variable(inputs), Variable(labels) optimizer.zero_grad() outputs, f = model(inputs) _, pf = model(pos) _, nf = model(neg) pscore = model_verif(pf * f) nscore = model_verif(nf * f) labels_0 = torch.zeros(now_batch_size).long() labels_1 = torch.ones(now_batch_size).long() labels_0 = Variable(labels_0.cuda()) labels_1 = Variable(labels_1.cuda()) _, preds = torch.max(outputs.data, 1) _, p_preds = torch.max(pscore.data, 1) _, n_preds = torch.max(nscore.data, 1) loss_id = criterion(outputs, labels) loss_verif = (criterion(pscore, labels_0) + criterion(nscore , labels_1)) * 0.5 * opt.alpha loss = loss_id + loss_verif if phase == 'train': loss.backward() optimizer.step() if int(version[0]) > 0 or int(version[2]) > 3: running_loss += loss.item() running_verif_loss += loss_verif.item() else: running_loss += loss.data[0] running_verif_loss += loss_verif.data[0] running_corrects += float(torch.sum(preds == labels.data)) running_verif_corrects += float(torch.sum(p_preds == 0)) + float(torch.sum(n_preds == 1)) datasize = dataset_sizes['train']//opt.batchsize * opt.batchsize epoch_loss = running_loss / datasize epoch_verif_loss = running_verif_loss / datasize epoch_acc = running_corrects / datasize epoch_verif_acc = running_verif_corrects / (2*datasize) print('{} Loss: {:.4f} Loss_verif: {:.4f} Acc: {:.4f} Verif_Acc: {:.4f} '.format( phase, epoch_loss, epoch_verif_loss, epoch_acc, epoch_verif_acc )) y_loss[phase].append(epoch_loss) y_err[phase].append(1.0 - epoch_acc) if epoch % 10 == 9: save_network(model, epoch) draw_curve(epoch) last_model_wts = model.state_dict() print() time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) model.load_state_dict(last_model_wts) save_network(model, 'last') return model def save_network(network, epoch_label): save_filename = 'net_%s.pth'% epoch_label save_path = os.path.join('./model',name,save_filename) torch.save(network.cpu().state_dict(), save_path) if torch.cuda.is_available: network.cuda(gpu_ids[0]) if __name__ == '__main__': version = torch.__version__ parser = argparse.ArgumentParser(description='Training') parser.add_argument('--gpu_ids',default='0', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2') parser.add_argument('--name',default='ft_ResNet50', type=str, help='output model name') parser.add_argument('--data_dir',default='../Market/pytorch',type=str, help='training dir path') parser.add_argument('--train_all', action='store_true', help='use all training data' ) parser.add_argument('--color_jitter', action='store_true', help='use color jitter in training' ) parser.add_argument('--batchsize', default=32, type=int, help='batchsize') parser.add_argument('--lr', default=0.1, type=float, help='learning rate') parser.add_argument('--alpha', default=1.0, type=float, help='alpha') parser.add_argument('--erasing_p', default=0, type=float, help='Random Erasing probability, in [0,1]') parser.add_argument('--use_dense', action='store_true', help='use densenet121' ) parser.add_argument('--PCB', action='store_true', help='use PCB+ResNet50' ) opt = parser.parse_args() data_dir = opt.data_dir name = opt.name str_ids = opt.gpu_ids.split(',') gpu_ids = [] for str_id in str_ids: gid = int(str_id) if gid >= 0: gpu_ids.append(gid) if len(gpu_ids) > 0: torch.cuda.set_device(gpu_ids[0]) cudnn.benchmark = True transform_train_list = [ transforms.Resize((256,128), interpolation=3), transforms.Pad(10), transforms.RandomCrop((256,128)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ] transform_val_list = [ transforms.Resize(size=(256,128),interpolation=3), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ] if opt.PCB: transform_train_list = [ transforms.Resize((384,192), interpolation=3), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ] transform_val_list = [ transforms.Resize(size=(384,192),interpolation=3), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ] if opt.erasing_p > 0: transform_train_list = transform_train_list + [RandomErasing(probability = opt.erasing_p, mean=[0.0, 0.0, 0.0])] if opt.color_jitter: transform_train_list = [transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0)] + transform_train_list print(transform_train_list) data_transforms = { 'train': transforms.Compose( transform_train_list ), 'val': transforms.Compose(transform_val_list), } train_all = '' if opt.train_all: train_all = '_all' image_datasets = {} image_datasets['train'] = TripletFolder(os.path.join(data_dir, 'train_all'), data_transforms['train']) image_datasets['val'] = TripletFolder(os.path.join(data_dir, 'val'), data_transforms['val']) batch = {} class_names = image_datasets['train'].classes class_vector = [s[1] for s in image_datasets['train'].samples] dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize, shuffle=True, num_workers=8) for x in ['train', 'val']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']} use_gpu = torch.cuda.is_available() since = time.time() print(time.time()-since) y_loss = {} y_loss['train'] = [] y_loss['val'] = [] y_err = {} y_err['train'] = [] y_err['val'] = [] x_epoch = [] fig = plt.figure() ax0 = fig.add_subplot(121, title="triplet_loss") ax1 = fig.add_subplot(122, title="top1err") def draw_curve(current_epoch): x_epoch.append(current_epoch) ax0.plot(x_epoch, y_loss['train'], 'bo-', label='train') #ax0.plot(x_epoch, y_loss['val'], 'ro-', label='val') ax1.plot(x_epoch, y_err['train'], 'bo-', label='train') #ax1.plot(x_epoch, y_err['val'], 'ro-', label='val') if current_epoch == 0: ax0.legend() ax1.legend() fig.savefig( os.path.join('./model',name,'train.jpg')) if opt.use_dense: model = ft_net_dense(len(class_names)) else: model = ft_net(len(class_names)) if opt.PCB: model = PCB(len(class_names)) model_verif = verif_net() print(model) print(model_verif) if use_gpu: model = model.cuda() model_verif = model_verif.cuda() criterion = nn.CrossEntropyLoss() if not opt.PCB: ignored_params = list(map(id, model.model.fc.parameters() )) + list(map(id, model.classifier.parameters() )) base_params = filter(lambda p: id(p) not in ignored_params, model.parameters()) optimizer_ft = optim.SGD([ {'params': base_params, 'lr': 0.1*opt.lr}, {'params': model.model.fc.parameters(), 'lr': opt.lr}, {'params': model.classifier.parameters(), 'lr': opt.lr}, {'params': model_verif.classifier.parameters(), 'lr': opt.lr} ], weight_decay=5e-4, momentum=0.9, nesterov=True) else: ignored_params = list(map(id, model.model.fc.parameters() )) ignored_params += (list(map(id, model.classifier0.parameters() )) +list(map(id, model.classifier1.parameters() )) +list(map(id, model.classifier2.parameters() )) +list(map(id, model.classifier3.parameters() )) +list(map(id, model.classifier4.parameters() )) +list(map(id, model.classifier5.parameters() )) #+list(map(id, model.classifier6.parameters() )) #+list(map(id, model.classifier7.parameters() )) ) base_params = filter(lambda p: id(p) not in ignored_params, model.parameters()) optimizer_ft = optim.SGD([ {'params': base_params, 'lr': 0.001}, {'params': model.model.fc.parameters(), 'lr': 0.01}, {'params': model.classifier0.parameters(), 'lr': 0.01}, {'params': model.classifier1.parameters(), 'lr': 0.01}, {'params': model.classifier2.parameters(), 'lr': 0.01}, {'params': model.classifier3.parameters(), 'lr': 0.01}, {'params': model.classifier4.parameters(), 'lr': 0.01}, {'params': model.classifier5.parameters(), 'lr': 0.01}, #{'params': model.classifier6.parameters(), 'lr': 0.01}, #{'params': model.classifier7.parameters(), 'lr': 0.01} ], weight_decay=5e-4, momentum=0.9, nesterov=True) exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer_ft, milestones=[40,60], gamma=0.1) dir_name = os.path.join('./model',name) if not os.path.isdir(dir_name): os.mkdir(dir_name) copyfile('./train.py', dir_name+'/train.py') copyfile('./model.py', dir_name+'/model.py') copyfile('./tripletfolder.py', dir_name+'/tripletfolder.py') with open('%s/opts.yaml'%dir_name,'w') as fp: yaml.dump(vars(opt), fp, default_flow_style=False) model = train_model(model, model_verif, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=60)
{"hexsha": "4e220d452d14eceb2947f56bdfaf8bbd5bd26307", "size": 12569, "ext": "py", "lang": "Python", "max_stars_repo_path": "reid_verif/train.py", "max_stars_repo_name": "hthieu166/cs547-final-proj-image-ranking", "max_stars_repo_head_hexsha": "81c6d3ebccae77df1d2e8421927a1a2684f80c98", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "reid_verif/train.py", "max_issues_repo_name": "hthieu166/cs547-final-proj-image-ranking", "max_issues_repo_head_hexsha": "81c6d3ebccae77df1d2e8421927a1a2684f80c98", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "reid_verif/train.py", "max_forks_repo_name": "hthieu166/cs547-final-proj-image-ranking", "max_forks_repo_head_hexsha": "81c6d3ebccae77df1d2e8421927a1a2684f80c98", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.7753164557, "max_line_length": 131, "alphanum_fraction": 0.5818283077, "include": true, "reason": "import numpy", "num_tokens": 2997}
# -*- coding: utf-8 -*- from __future__ import absolute_import import numpy as np import scipy.signal from keras.models import Sequential from keras.layers.convolutional import Convolution1D from keras.layers import Input, Lambda, merge, Permute, Reshape from keras.models import Model from keras import backend as K def _get_stft_kernels(n_dft, keras_ver='new'): '''Return dft kernels for real/imagnary parts assuming the input signal is real. An asymmetric hann window is used (scipy.signal.hann). Parameters ---------- n_dft : int > 0 and power of 2 [scalar] Number of dft components. keras_ver : string, 'new' or 'old' It determines the reshaping strategy. Returns ------- dft_real_kernels : np.ndarray [shape=(nb_filter, 1, 1, n_win)] dft_imag_kernels : np.ndarray [shape=(nb_filter, 1, 1, n_win)] * nb_filter = n_dft/2 + 1 * n_win = n_dft ''' assert n_dft > 1 and ((n_dft & (n_dft - 1)) == 0), \ ('n_dft should be > 1 and power of 2, but n_dft == %d' % n_dft) nb_filter = n_dft / 2 + 1 # prepare DFT filters timesteps = range(n_dft) w_ks = [(2 * np.pi * k) / float(n_dft) for k in xrange(n_dft)] dft_real_kernels = np.array([[np.cos(w_k * n) for n in timesteps] for w_k in w_ks]) dft_imag_kernels = np.array([[np.sin(w_k * n) for n in timesteps] for w_k in w_ks]) # windowing DFT filters dft_window = scipy.signal.hann(n_dft, sym=False) dft_window = dft_window.reshape((1, -1)) dft_real_kernels = np.multiply(dft_real_kernels, dft_window) dft_imag_kernels = np.multiply(dft_imag_kernels, dft_window) if keras_ver == 'old': # 1.0.6: reshape filter e.g. (5, 8) -> (5, 1, 8, 1) dft_real_kernels = dft_real_kernels[:nb_filter] dft_imag_kernels = dft_imag_kernels[:nb_filter] dft_real_kernels = dft_real_kernels[:, np.newaxis, :, np.newaxis] dft_imag_kernels = dft_imag_kernels[:, np.newaxis, :, np.newaxis] else: dft_real_kernels = dft_real_kernels[:nb_filter].transpose() dft_imag_kernels = dft_imag_kernels[:nb_filter].transpose() dft_real_kernels = dft_real_kernels[:, np.newaxis, np.newaxis, :] dft_imag_kernels = dft_imag_kernels[:, np.newaxis, np.newaxis, :] return dft_real_kernels, dft_imag_kernels def Logam_layer(name='log_amplitude'): '''Return a keras layer for log-amplitude. The computation is simplified from librosa.logamplitude by not having parameters such as ref_power, amin, tob_db. Parameters ---------- name : string Name of the logamplitude layer Returns ------- a Keras layer : Keras's Lambda layer for log-amplitude-ing. ''' def logam(x): log_spec = 10 * K.log(K.maximum(x, 1e-10))/K.log(10) log_spec = log_spec - K.max(log_spec) # [-?, 0] log_spec = K.maximum(log_spec, -80.0) # [-80, 0] return log_spec def logam_shape(shapes): '''shapes: shape of input(s) of the layer''' # print('output shape of logam:', shapes) return shapes return Lambda(lambda x: logam(x), name=name, output_shape=logam_shape) def get_spectrogram_model(n_dft, input_shape, trainable=False, n_hop=None, border_mode='same', logamplitude=True): '''Returns two tensors, x as input, stft_magnitude as result. x(input) and STFT_magnitude(tensor) (#freq, #time shape) It assumes mono input. These tensors can be use to build a Keras model using Functional API, `e.g., model = keras.models.Model(x, STFT_magnitude)` to build a model that does STFT. It uses two `Convolution1D` to compute real/imaginary parts of STFT and sum(real**2, imag**2). Parameters ---------- n_dft : int > 0 and power of 2 [scalar] number of dft components. input_shape : tuple (length=2), Input shape of raw audio input. It should (num_audio_samples, 1), e.g. (441000, 1) trainable : boolean If it is `True`, the STFT kernels (=weights of two 1d conv layer) is set as `trainable`, therefore they are initiated with STFT kernels but then updated. n_hop : int > 0 [scalar] number of samples between successive frames. border_mode : 'valid' or 'same'. if 'valid' the edges of input signal are ignored. logamplitude : boolean whether logamplitude to stft or not this is then used in Keras - Functional model API STFT_real and STFT_imag is set as non_trainable Returns ------- x : input tensor STFT_magnitude : STFT magnitude, either in shape: (None, 1, n_freq, n_frame) or (None, n_freq, n_frame, 1) ''' assert trainable in (True, False) if n_hop is None: n_hop = n_dft / 2 n_channel = input_shape[1] # get DFT kernels dft_real_kernels, dft_imag_kernels = _get_stft_kernels(n_dft) nb_filter = n_dft / 2 + 1 # layers - one for the real, one for the imaginary x = Input(shape=input_shape, name='audio_input', dtype='float32') STFT_real = Convolution1D(nb_filter, n_dft, subsample_length=n_hop, border_mode=border_mode, weights=[dft_real_kernels], bias=False, name='dft_real', input_shape=input_shape)(x) STFT_imag = Convolution1D(nb_filter, n_dft, subsample_length=n_hop, border_mode=border_mode, weights=[dft_imag_kernels], bias=False, name='dft_imag', input_shape=input_shape)(x) STFT_real.trainable = trainable STFT_imag.trainable = trainable STFT_real = Lambda(lambda x: x ** 2, name='real_pow')(STFT_real) STFT_imag = Lambda(lambda x: x ** 2, name='imag_pow')(STFT_imag) STFT_magnitude = merge([STFT_real, STFT_imag], mode='sum', name='sum') if logamplitude: STFT_magnitude = Logam_layer()(STFT_magnitude) STFT_magnitude = Permute((2, 1))(STFT_magnitude) # (sample, freq, time) model_conv1d = Model(input=x, output=STFT_magnitude, name='stft_conv1d') model_stft = Sequential(name='stft_model') model_stft.add(model_conv1d) if K.image_dim_ordering() == 'th': model_stft.add(Reshape((1, ) + model_conv1d.output_shape[1:])) else: model_stft.add(Reshape(model_conv1d.output_shape[1:] + (1, ))) return model_stft def Spectrogram(n_dft, input_shape, trainable=False, n_hop=None, border_mode='same', logamplitude=True): '''A keras model for Spectrogram using STFT Parameters ---------- n_dft : int > 0 and power of 2 [scalar] number of dft components. input_shape : tuple (length=2), Input shape of raw audio input. It should (num_audio_samples, n_ch), e.g. (441000, 1), (16000, 2) trainable : boolean If it is `True`, the STFT kernels (=weights of two 1d conv layer) is set as `trainable`, therefore they are initiated with STFT kernels but then updated. n_hop : int > 0 [scalar] number of audio samples between successive frames. border_mode : 'valid' or 'same'. if 'valid' the edges of input signal are ignored. logamplitude : boolean whether logamplitude to stft or not Returns ------- A keras model that has output shape of (None, n_ch, n_freq, n_frame) (if `img_dim_ordering() == 'th'`) or (None, n_freq, n_frame, n_ch) (if `img_dim_ordering() == 'tf'`). ''' model = get_spectrogram_model(n_dft, input_shape=input_shape, trainable=trainable, n_hop=n_hop, border_mode=border_mode, logamplitude=logamplitude) model.trainable = trainable return model
{"hexsha": "66a3b27e36f12e2b67fab1d998ba3b2428945e56", "size": 8274, "ext": "py", "lang": "Python", "max_stars_repo_path": "stft.py", "max_stars_repo_name": "keunwoochoi/keras_STFT_layer", "max_stars_repo_head_hexsha": "7d99459651d78c182578b8aec679061f6ce3780d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 65, "max_stars_repo_stars_event_min_datetime": "2016-09-11T16:23:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T12:29:31.000Z", "max_issues_repo_path": "stft.py", "max_issues_repo_name": "keunwoochoi/keras_STFT_layer", "max_issues_repo_head_hexsha": "7d99459651d78c182578b8aec679061f6ce3780d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2016-10-22T23:56:47.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-07T15:09:53.000Z", "max_forks_repo_path": "stft.py", "max_forks_repo_name": "keunwoochoi/keras_STFT_layer", "max_forks_repo_head_hexsha": "7d99459651d78c182578b8aec679061f6ce3780d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2016-09-11T16:23:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-05T02:40:59.000Z", "avg_line_length": 34.049382716, "max_line_length": 79, "alphanum_fraction": 0.6063572637, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2189}
import logging import numpy as np from causalml.propensity import compute_propensity_score from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier from sklearn.model_selection import cross_val_predict, KFold from sklearn.tree import DecisionTreeClassifier logger = logging.getLogger("causalml") class PolicyLearner(object): """ A Learner that learns a treatment assignment policy with observational data using doubly robust estimator of causal effect for binary treatment. Details of the policy learner are available at Athey and Wager (2018) (https://arxiv.org/abs/1702.02896). """ def __init__( self, outcome_learner=GradientBoostingRegressor(), treatment_learner=GradientBoostingClassifier(), policy_learner=DecisionTreeClassifier(), clip_bounds=(1e-3, 1 - 1e-3), n_fold=5, random_state=None, calibration=False, ): """Initialize a treatment assignment policy learner. Args: outcome_learner (optional): a regression model to estimate outcomes policy_learner (optional): a classification model to estimate treatment assignment. It needs to take `sample_weight` as an input argument for `fit()` clip_bounds (tuple, optional): lower and upper bounds for clipping propensity scores to avoid division by zero in PolicyLearner.fit() n_fold (int, optional): the number of cross validation folds for outcome_learner random_state (int or RandomState, optional): a seed (int) or random number generator (RandomState) """ self.model_mu = outcome_learner self.model_w = treatment_learner self.model_pi = policy_learner self.clip_bounds = clip_bounds self.cv = KFold(n_splits=n_fold, shuffle=True, random_state=random_state) self.calibration = calibration self._y_pred, self._tau_pred, self._w_pred, self._dr_score = ( None, None, None, None, ) def __repr__(self): return ( "{}(model_mu={},\n" "\tmodel_w={},\n" "\model_pi={})".format( self.__class__.__name__, self.model_mu.__repr__(), self.model_w.__repr__(), self.model_pi.__repr__(), ) ) def _outcome_estimate(self, X, w, y): self._y_pred = np.zeros(len(y)) self._tau_pred = np.zeros(len(y)) for train_index, test_index in self.cv.split(y): X_train, X_test = X[train_index], X[test_index] w_train, w_test = w[train_index], w[test_index] y_train, y_test = y[train_index], y[test_index] self.model_mu.fit( np.concatenate([X_train, w_train.reshape(-1, 1)], axis=1), y_train ) self._y_pred[test_index] = self.model_mu.predict( np.concatenate([X_test, w_test.reshape(-1, 1)], axis=1) ) self._tau_pred[test_index] = self.model_mu.predict( np.concatenate([X_test, np.ones((len(w_test), 1))], axis=1) ) - self.model_mu.predict( np.concatenate([X_test, np.zeros((len(w_test), 1))], axis=1) ) def _treatment_estimate(self, X, w): self._w_pred = np.zeros(len(w)) for train_index, test_index in self.cv.split(w): X_train, X_test = X[train_index], X[test_index] w_train, w_test = w[train_index], w[test_index] self._w_pred[test_index], _ = compute_propensity_score( X=X_train, treatment=w_train, X_pred=X_test, treatment_pred=w_test, calibrate_p=self.calibration, ) self._w_pred = np.clip( self._w_pred, a_min=self.clip_bounds[0], a_max=self.clip_bounds[1] ) def fit(self, X, treatment, y, p=None, dhat=None): """Fit the treatment assignment policy learner. Args: X (np.matrix): a feature matrix treatment (np.array): a treatment vector (1 if treated, otherwise 0) y (np.array): an outcome vector p (optional, np.array): user provided propensity score vector between 0 and 1 dhat (optinal, np.array): user provided predicted treatment effect vector Returns: self: returns an instance of self. """ logger.info( "generating out-of-fold CV outcome estimates with {}".format(self.model_mu) ) self._outcome_estimate(X, treatment, y) if dhat is not None: self._tau_pred = dhat if p is None: self._treatment_estimate(X, treatment) else: self._w_pred = np.clip(p, self.clip_bounds[0], self.clip_bounds[1]) # Doubly Robust Modification self._dr_score = self._tau_pred + (treatment - self._w_pred) / self._w_pred / ( 1 - self._w_pred ) * (y - self._y_pred) target = self._dr_score.copy() target = np.sign(target) logger.info("training the treatment assignment model, {}".format(self.model_pi)) self.model_pi.fit(X, target, sample_weight=abs(self._dr_score)) return self def predict(self, X): """Predict treatment assignment that optimizes the outcome. Args: X (np.matrix): a feature matrix Returns: (numpy.ndarray): predictions of treatment assignment. """ return self.model_pi.predict(X) def predict_proba(self, X): """Predict treatment assignment score that optimizes the outcome. Args: X (np.matrix): a feature matrix Returns: (numpy.ndarray): predictions of treatment assignment score. """ pi_hat = self.model_pi.predict_proba(X)[:, 1] return pi_hat
{"hexsha": "004b5eaaa5a00a12c35f82a44290ac1782ef2418", "size": 6002, "ext": "py", "lang": "Python", "max_stars_repo_path": "causalml/optimize/policylearner.py", "max_stars_repo_name": "rainfireliang/causalml", "max_stars_repo_head_hexsha": "d58024d8de4ab6136c5519949b58a22dd885df29", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2919, "max_stars_repo_stars_event_min_datetime": "2019-08-12T23:02:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T21:59:34.000Z", "max_issues_repo_path": "causalml/optimize/policylearner.py", "max_issues_repo_name": "rainfireliang/causalml", "max_issues_repo_head_hexsha": "d58024d8de4ab6136c5519949b58a22dd885df29", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 317, "max_issues_repo_issues_event_min_datetime": "2019-08-13T14:16:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-26T08:44:06.000Z", "max_forks_repo_path": "causalml/optimize/policylearner.py", "max_forks_repo_name": "rainfireliang/causalml", "max_forks_repo_head_hexsha": "d58024d8de4ab6136c5519949b58a22dd885df29", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 466, "max_forks_repo_forks_event_min_datetime": "2019-08-18T01:45:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:11:53.000Z", "avg_line_length": 34.4942528736, "max_line_length": 119, "alphanum_fraction": 0.6046317894, "include": true, "reason": "import numpy", "num_tokens": 1326}
"""Helper methods for model interpretation.""" import numpy from gewittergefahr.gg_utils import radar_utils from gewittergefahr.gg_utils import error_checking from gewittergefahr.deep_learning import cnn from gewittergefahr.deep_learning import input_examples from gewittergefahr.deep_learning import deep_learning_utils as dl_utils from gewittergefahr.deep_learning import training_validation_io as trainval_io LARGE_INTEGER = int(1e10) CLASS_COMPONENT_TYPE_STRING = 'class' NEURON_COMPONENT_TYPE_STRING = 'neuron' CHANNEL_COMPONENT_TYPE_STRING = 'channel' VALID_COMPONENT_TYPE_STRINGS = [ CLASS_COMPONENT_TYPE_STRING, NEURON_COMPONENT_TYPE_STRING, CHANNEL_COMPONENT_TYPE_STRING ] PREDICTOR_MATRICES_KEY = 'denorm_predictor_matrices' SOUNDING_PRESSURES_KEY = 'sounding_pressure_matrix_pa' MODEL_FILE_KEY = 'model_file_name' FULL_STORM_IDS_KEY = 'full_storm_id_strings' STORM_TIMES_KEY = 'storm_times_unix_sec' MEAN_PREDICTOR_MATRICES_KEY = 'mean_denorm_predictor_matrices' MEAN_SOUNDING_PRESSURES_KEY = 'mean_sounding_pressures_pa' PMM_MAX_PERCENTILE_KEY = 'pmm_max_percentile_level' NON_PMM_FILE_KEY = 'non_pmm_file_name' def check_component_type(component_type_string): """Ensures that model-component type is valid. :param component_type_string: Component type. :raises: ValueError: if `component_type_string not in VALID_COMPONENT_TYPE_STRINGS`. """ error_checking.assert_is_string(component_type_string) if component_type_string not in VALID_COMPONENT_TYPE_STRINGS: error_string = ( '\n\n{0:s}\nValid component types (listed above) do not include ' '"{1:s}".' ).format(str(VALID_COMPONENT_TYPE_STRINGS), component_type_string) raise ValueError(error_string) def check_component_metadata( component_type_string, target_class=None, layer_name=None, neuron_indices=None, channel_index=None): """Checks metadata for model component. :param component_type_string: Component type (must be accepted by `check_component_type`). :param target_class: [used only if component_type_string = "class"] Target class. Integer from 0...(K - 1), where K = number of classes. :param layer_name: [used only if component_type_string = "neuron" or "channel"] Name of layer containing neuron or channel. :param neuron_indices: [used only if component_type_string = "neuron"] 1-D numpy array with indices of neuron. :param channel_index: [used only if component_type_string = "channel"] Index of channel. """ check_component_type(component_type_string) if component_type_string == CLASS_COMPONENT_TYPE_STRING: error_checking.assert_is_integer(target_class) error_checking.assert_is_geq(target_class, 0) if component_type_string in [NEURON_COMPONENT_TYPE_STRING, CHANNEL_COMPONENT_TYPE_STRING]: error_checking.assert_is_string(layer_name) if component_type_string == NEURON_COMPONENT_TYPE_STRING: error_checking.assert_is_integer_numpy_array(neuron_indices) error_checking.assert_is_geq_numpy_array(neuron_indices, 0) error_checking.assert_is_numpy_array(neuron_indices, num_dimensions=1) if component_type_string == CHANNEL_COMPONENT_TYPE_STRING: error_checking.assert_is_integer(channel_index) error_checking.assert_is_geq(channel_index, 0) def model_component_to_string( component_type_string, target_class=None, layer_name=None, neuron_indices=None, channel_index=None): """Returns string descriptions for model component (class/neuron/channel). Specifically, this method creates two strings: - verbose string (to use in figure legends) - abbreviation (to use in file names) :param component_type_string: See doc for `check_component_metadata`. :param target_class: Same. :param layer_name: Same. :param neuron_indices: Same. :param channel_index: Same. :return: verbose_string: See general discussion above. :return: abbrev_string: See general discussion above. """ check_component_metadata( component_type_string=component_type_string, target_class=target_class, layer_name=layer_name, neuron_indices=neuron_indices, channel_index=channel_index) if component_type_string == CLASS_COMPONENT_TYPE_STRING: verbose_string = 'Class {0:d}'.format(target_class) abbrev_string = 'class{0:d}'.format(target_class) else: verbose_string = 'Layer "{0:s}"'.format(layer_name) abbrev_string = 'layer={0:s}'.format(layer_name.replace('_', '-')) if component_type_string == CHANNEL_COMPONENT_TYPE_STRING: verbose_string += ', channel {0:d}'.format(channel_index) abbrev_string += '_channel{0:d}'.format(channel_index) if component_type_string == NEURON_COMPONENT_TYPE_STRING: this_neuron_string = ', '.join( ['{0:d}'.format(i) for i in neuron_indices]) verbose_string += '; neuron ({0:s})'.format(this_neuron_string) this_neuron_string = ','.join( ['{0:d}'.format(i) for i in neuron_indices]) abbrev_string += '_neuron{0:s}'.format(this_neuron_string) return verbose_string, abbrev_string def sort_neurons_by_weight(model_object, layer_name): """Sorts neurons of the given layer in descending order by weight. K = number of dimensions in `weight_matrix` W = number of values in `weight_matrix` :param model_object: Instance of `keras.models.Model`. :param layer_name: Name of layer whose neurons are to be sorted. :return: weight_matrix: numpy array of weights, with the same dimensions as `model_object.get_layer(name=layer_name).get_weights()[0]`. If the layer is convolutional, dimensions of `weight_matrix` are as follows: - Last dimension = output channel - Second-last dimension = input channel - First dimensions = spatial dimensions For example, if the conv layer has a 3-by-5 kernel with 16 input channels and 32 output channels, `weight_matrix` will be 3 x 5 x 16 x 32. If the layer is dense (fully connected), `weight_matrix` is 1-D. :return: sort_indices_as_tuple: length-K tuple. sort_indices_as_tuple[k] is a length-W numpy array, containing indices for the [k]th dimension of `weight_matrix`. When these indices are applied to all dimensions of `weight_matrix` -- i.e., when sort_indices_as_tuple[k] is applied for k = 0...(K - 1) -- `weight_matrix` has been sorted in descending order. :raises: TypeError: if the given layer is neither dense nor convolutional. """ layer_type_string = type(model_object.get_layer(name=layer_name)).__name__ valid_layer_type_strings = ['Dense', 'Conv1D', 'Conv2D', 'Conv3D'] if layer_type_string not in valid_layer_type_strings: error_string = ( '\n\n{0:s}\nLayer "{1:s}" has type "{2:s}", which is not in the ' 'above list.' ).format(str(valid_layer_type_strings), layer_name, layer_type_string) raise TypeError(error_string) weight_matrix = model_object.get_layer(name=layer_name).get_weights()[0] sort_indices_linear = numpy.argsort( -numpy.reshape(weight_matrix, weight_matrix.size)) sort_indices_as_tuple = numpy.unravel_index( sort_indices_linear, weight_matrix.shape) return weight_matrix, sort_indices_as_tuple def denormalize_data(list_of_input_matrices, model_metadata_dict): """Denormalizes input data for a Keras model. E = number of examples (storm objects) H = number of height levels per sounding :param list_of_input_matrices: length-T list of input matrices (numpy arrays), where T = number of input tensors to the model. :param model_metadata_dict: Dictionary with metadata for the relevant model, created by `cnn.read_model_metadata`. :return: list_of_input_matrices: Denormalized version of input (same dimensions). """ training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY] if model_metadata_dict[cnn.CONV_2D3D_KEY]: list_of_input_matrices[0] = dl_utils.denormalize_radar_images( radar_image_matrix=list_of_input_matrices[0], field_names=[radar_utils.REFL_NAME], normalization_type_string=training_option_dict[ trainval_io.NORMALIZATION_TYPE_KEY], normalization_param_file_name=training_option_dict[ trainval_io.NORMALIZATION_FILE_KEY], min_normalized_value=training_option_dict[ trainval_io.MIN_NORMALIZED_VALUE_KEY], max_normalized_value=training_option_dict[ trainval_io.MAX_NORMALIZED_VALUE_KEY]) list_of_input_matrices[1] = dl_utils.denormalize_radar_images( radar_image_matrix=list_of_input_matrices[1], field_names=training_option_dict[trainval_io.RADAR_FIELDS_KEY], normalization_type_string=training_option_dict[ trainval_io.NORMALIZATION_TYPE_KEY], normalization_param_file_name=training_option_dict[ trainval_io.NORMALIZATION_FILE_KEY], min_normalized_value=training_option_dict[ trainval_io.MIN_NORMALIZED_VALUE_KEY], max_normalized_value=training_option_dict[ trainval_io.MAX_NORMALIZED_VALUE_KEY]) else: list_of_layer_operation_dicts = model_metadata_dict[ cnn.LAYER_OPERATIONS_KEY] if list_of_layer_operation_dicts is None: radar_field_names = training_option_dict[ trainval_io.RADAR_FIELDS_KEY] else: radar_field_names = [ d[input_examples.RADAR_FIELD_KEY] for d in list_of_layer_operation_dicts ] list_of_input_matrices[0] = dl_utils.denormalize_radar_images( radar_image_matrix=list_of_input_matrices[0], field_names=radar_field_names, normalization_type_string=training_option_dict[ trainval_io.NORMALIZATION_TYPE_KEY], normalization_param_file_name=training_option_dict[ trainval_io.NORMALIZATION_FILE_KEY], min_normalized_value=training_option_dict[ trainval_io.MIN_NORMALIZED_VALUE_KEY], max_normalized_value=training_option_dict[ trainval_io.MAX_NORMALIZED_VALUE_KEY]) if training_option_dict[trainval_io.SOUNDING_FIELDS_KEY] is not None: list_of_input_matrices[-1] = dl_utils.denormalize_soundings( sounding_matrix=list_of_input_matrices[-1], field_names=training_option_dict[trainval_io.SOUNDING_FIELDS_KEY], normalization_type_string=training_option_dict[ trainval_io.NORMALIZATION_TYPE_KEY], normalization_param_file_name=training_option_dict[ trainval_io.NORMALIZATION_FILE_KEY], min_normalized_value=training_option_dict[ trainval_io.MIN_NORMALIZED_VALUE_KEY], max_normalized_value=training_option_dict[ trainval_io.MAX_NORMALIZED_VALUE_KEY]) return list_of_input_matrices
{"hexsha": "4612ce90e544412e13ada97192f31e8cfbdcf4fb", "size": 11293, "ext": "py", "lang": "Python", "max_stars_repo_path": "gewittergefahr/deep_learning/model_interpretation.py", "max_stars_repo_name": "dopplerchase/GewitterGefahr", "max_stars_repo_head_hexsha": "4415b08dd64f37eba5b1b9e8cc5aa9af24f96593", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2018-10-04T01:07:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T08:49:32.000Z", "max_issues_repo_path": "gewittergefahr/deep_learning/model_interpretation.py", "max_issues_repo_name": "liuximarcus/GewitterGefahr", "max_issues_repo_head_hexsha": "d819874d616f98a25187bfd3091073a2e6d5279e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2017-12-25T02:01:08.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-19T01:54:21.000Z", "max_forks_repo_path": "gewittergefahr/deep_learning/model_interpretation.py", "max_forks_repo_name": "liuximarcus/GewitterGefahr", "max_forks_repo_head_hexsha": "d819874d616f98a25187bfd3091073a2e6d5279e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2017-12-10T23:05:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-29T08:49:33.000Z", "avg_line_length": 42.7765151515, "max_line_length": 80, "alphanum_fraction": 0.7244310635, "include": true, "reason": "import numpy", "num_tokens": 2414}
/* Copyright 2014 Rogier van Dalen. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /** \file Give an example, and test, of how to use the classes defined in \c utility::storage. */ #define BOOST_TEST_MODULE test_utility_storage #include "utility/test/boost_unit_test.hpp" #include "utility/storage.hpp" #include <type_traits> #include <string> #include <boost/mpl/assert.hpp> #include "utility/test/tracked.hpp" BOOST_AUTO_TEST_SUITE(test_suite_utility_storage_example) namespace storage = utility::storage; template <class Type> class simple_container { public: simple_container (typename storage::pass <Type>::type content) : content_ (content) {} simple_container (typename storage::pass_rvalue <Type>::type content) : content_ (std::move (content)) {} typename storage::get <Type, simple_container &>::type content() { return content_; } typename storage::get <Type, simple_container const &>::type content() const { return content_; } typename storage::get_pointer <Type, simple_container &>::type pointer() { return & content(); } typename storage::get_pointer <Type, simple_container const &>::type pointer() const { return & content(); } void replace_with (typename storage::pass <Type>::type new_content) { content_ = new_content; } void replace_with (typename storage::pass_rvalue <Type>::type new_content) { content_ = std::move (new_content); } private: typename storage::store <Type>::type content_; }; BOOST_AUTO_TEST_CASE (test_utility_storage_example_simple) { { simple_container <int> c (7); BOOST_CHECK_EQUAL (c.content(), 7); c.content() = 8; BOOST_CHECK_EQUAL (c.content(), 8); } // const container. { simple_container <int> const c (-7); BOOST_CHECK_EQUAL (c.content(), -7); BOOST_MPL_ASSERT (( std::is_same <decltype (c.content()), int const &>)); } // Contain a reference. { int i; simple_container <int &> c (i); i = 5; BOOST_CHECK_EQUAL (c.content(), 5); c.content() = 15; BOOST_CHECK_EQUAL (i, 15); // Replace the original object. int i2 = 4; c.replace_with (i2); BOOST_CHECK_EQUAL (c.content(), 4); BOOST_CHECK_EQUAL (i, 15); c.content() = 13; BOOST_CHECK_EQUAL (i2, 13); BOOST_CHECK_EQUAL (i, 15); } // const container to a reference. { int i; simple_container <int &> const c (i); i = 5; BOOST_CHECK_EQUAL (c.content(), 5); c.content() = 15; BOOST_CHECK_EQUAL (i, 15); } // Arrays { int is [5] {5, 3, 5, 7}; simple_container <int [5]> c (is); BOOST_CHECK_EQUAL (c.content() [0], 5); BOOST_CHECK_EQUAL (c.content() [1], 3); BOOST_CHECK_EQUAL (c.content() [2], 5); BOOST_CHECK_EQUAL (c.content() [3], 7); BOOST_CHECK_EQUAL (c.content() [4], 0); BOOST_CHECK_EQUAL ((*c.pointer())[0], 5); BOOST_CHECK_EQUAL ((*c.pointer())[3], 7); } { int const is [2] {27}; simple_container <int [2]> c (is); BOOST_CHECK_EQUAL (c.content() [0], 27); BOOST_CHECK_EQUAL (c.content() [1], 0); int is2 [2] {31, 33}; c = is2; BOOST_CHECK_EQUAL (c.content() [0], 31); BOOST_CHECK_EQUAL (c.content() [1], 33); is2 [0] = 4; is2 [1] = 7; simple_container <int [2]> c2 (is2); c = c2; BOOST_CHECK_EQUAL (c2.content() [0], 4); BOOST_CHECK_EQUAL (c2.content() [1], 7); } { int is [2] {27}; simple_container <int [2]> const c (is); BOOST_CHECK_EQUAL (c.content() [0], 27); BOOST_CHECK_EQUAL (c.content() [1], 0); } { int const is [2] {27}; simple_container <int [2]> const c (is); BOOST_CHECK_EQUAL (c.content() [0], 27); BOOST_CHECK_EQUAL (c.content() [1], 0); } { int is [2] {27}; simple_container <int const [2]> const c (is); BOOST_CHECK_EQUAL (c.content() [0], 27); BOOST_CHECK_EQUAL (c.content() [1], 0); } } BOOST_AUTO_TEST_CASE (test_utility_storage_example_extensive) { { utility::tracked_registry registry; simple_container <utility::tracked <int>> c ( utility::tracked <int> (registry, 9)); BOOST_CHECK_EQUAL (c.content().content(), 9); // value_construct, copy, move, copy_assign, move_assign, swap, // destruct, destruct_moved) registry.check_counts (1, 0, 1, 0, 0, 0, 0, 1); c.content() = utility::tracked <int> (registry, 91); BOOST_CHECK_EQUAL (c.content().content(), 91); registry.check_counts (2, 0, 1, 0, 1, 0, 0, 2); utility::tracked <int> t (registry, 89); c.replace_with (t); BOOST_CHECK_EQUAL (c.content().content(), 89); registry.check_counts (3, 0, 1, 1, 1, 0, 0, 2); } // Initialise with lvalue reference. { utility::tracked_registry registry; utility::tracked <int> t (registry, 78); simple_container <utility::tracked <int>> c (t); BOOST_CHECK_EQUAL (c.content().content(), 78); // value_construct, copy, move, copy_assign, move_assign, swap, // destruct, destruct_moved) registry.check_counts (1, 1, 0, 0, 0, 0, 0, 0); } // Const container. { utility::tracked_registry registry; simple_container <utility::tracked <int>> const c ( utility::tracked <int> (registry, 9)); BOOST_CHECK_EQUAL (c.content().content(), 9); BOOST_MPL_ASSERT ((std::is_same <decltype (c.content()), utility::tracked <int> const &>)); } // Const contents. { utility::tracked_registry registry; simple_container <utility::tracked <int> const> c ( utility::tracked <int> (registry, 9)); BOOST_CHECK_EQUAL (c.content().content(), 9); // value_construct, copy, move, copy_assign, move_assign, swap, // destruct, destruct_moved) registry.check_counts (1, 0, 1, 0, 0, 0, 0, 1); c.replace_with (utility::tracked <int> (registry, 91)); BOOST_CHECK_EQUAL (c.content().content(), 91); registry.check_counts (2, 0, 1, 0, 1, 0, 0, 2); } } int example_function_1 (double, std::string) { return 0; } int example_function_2 (double, std::string) { return 1; } struct example_struct { int example_function_1 (std::string, double) { return 0; } int example_function_2 (std::string, double) { return 1; } int example_function_3 (std::string, double) const { return 0; } int example_function_4 (std::string, double) const { return 1; } int example_function_5 (std::string, double) volatile { return 0; } int example_function_6 (std::string, double) volatile { return 1; } int example_function_7 (std::string, double) const volatile { return 0; } int example_function_8 (std::string, double) const volatile { return 1; } std::string example_member_1; std::string example_member_2; }; // The C++ faq recommends using this. // https://isocpp.org/wiki/faq/pointers-to-members #define CALL_MEMBER_FUNCTION(object, member_pointer) \ ((object).*(member_pointer)) BOOST_AUTO_TEST_CASE (test_utility_storage_example_functions) { // Function. { simple_container <int (double, std::string)> c (example_function_1); BOOST_CHECK_EQUAL (&c.content(), &example_function_1); c.replace_with (example_function_2); BOOST_CHECK_EQUAL (&c.content(), &example_function_2); } // Function reference. { simple_container <int (&) (double, std::string)> c (example_function_1); BOOST_CHECK_EQUAL (&c.content(), &example_function_1); BOOST_CHECK_EQUAL (c.content() (1., ""), 0); c.replace_with (example_function_2); BOOST_CHECK_EQUAL (&c.content(), &example_function_2); BOOST_CHECK_EQUAL (c.content() (1., ""), 1); } // Function pointer. { simple_container <int (*) (double, std::string)> c (example_function_1); BOOST_CHECK_EQUAL (c.content(), &example_function_1); BOOST_CHECK_EQUAL (c.content() (1., ""), 0); c.replace_with (example_function_2); BOOST_CHECK_EQUAL (c.content(), &example_function_2); BOOST_CHECK_EQUAL (c.content() (1., ""), 1); } // Member functions. example_struct s; { simple_container <int (example_struct::*) (std::string, double)> c ( &example_struct::example_function_1); BOOST_CHECK_EQUAL (c.content(), &example_struct::example_function_1); auto member_function = c.content(); BOOST_CHECK_EQUAL (CALL_MEMBER_FUNCTION (s, member_function) ("", 1.), 0); c.replace_with (&example_struct::example_function_2); BOOST_CHECK_EQUAL (c.content(), &example_struct::example_function_2); member_function = c.content(); BOOST_CHECK_EQUAL (CALL_MEMBER_FUNCTION (s, member_function) ("", 1.), 1); } { simple_container < int (example_struct::*) (std::string, double) const> c ( &example_struct::example_function_3); BOOST_CHECK_EQUAL (c.content(), &example_struct::example_function_3); auto member_function = c.content(); BOOST_CHECK_EQUAL (CALL_MEMBER_FUNCTION (s, member_function) ("", 1.), 0); c.replace_with (&example_struct::example_function_4); BOOST_CHECK_EQUAL (c.content(), &example_struct::example_function_4); member_function = c.content(); BOOST_CHECK_EQUAL (CALL_MEMBER_FUNCTION (s, member_function) ("", 1.), 1); } { simple_container < int (example_struct::*) (std::string, double) volatile> c ( &example_struct::example_function_5); BOOST_CHECK_EQUAL (c.content(), &example_struct::example_function_5); auto member_function = c.content(); BOOST_CHECK_EQUAL (CALL_MEMBER_FUNCTION (s, member_function) ("", 1.), 0); c.replace_with (&example_struct::example_function_6); BOOST_CHECK_EQUAL (c.content(), &example_struct::example_function_6); member_function = c.content(); BOOST_CHECK_EQUAL (CALL_MEMBER_FUNCTION (s, member_function) ("", 1.), 1); } { simple_container < int (example_struct::*) (std::string, double) const volatile> c ( &example_struct::example_function_7); BOOST_CHECK_EQUAL (c.content(), &example_struct::example_function_7); auto member_function = c.content(); BOOST_CHECK_EQUAL (CALL_MEMBER_FUNCTION (s, member_function) ("", 1.), 0); c.replace_with (&example_struct::example_function_8); BOOST_CHECK_EQUAL (c.content(), &example_struct::example_function_8); member_function = c.content(); BOOST_CHECK_EQUAL (CALL_MEMBER_FUNCTION (s, member_function) ("", 1.), 1); } { simple_container <std::string (example_struct::*)> c ( &example_struct::example_member_1); BOOST_CHECK_EQUAL (c.content(), &example_struct::example_member_1); c.replace_with (&example_struct::example_member_2); BOOST_CHECK_EQUAL (c.content(), &example_struct::example_member_2); } } BOOST_AUTO_TEST_SUITE_END()
{"hexsha": "18284a9207dfe10e1accfa50017fda776d555aba", "size": 11968, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/utility/test-storage-example.cpp", "max_stars_repo_name": "rogiervd/utility", "max_stars_repo_head_hexsha": "10a3f45ee5bdedd4bbe6fca973b5bb44e142c80f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/utility/test-storage-example.cpp", "max_issues_repo_name": "rogiervd/utility", "max_issues_repo_head_hexsha": "10a3f45ee5bdedd4bbe6fca973b5bb44e142c80f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/utility/test-storage-example.cpp", "max_forks_repo_name": "rogiervd/utility", "max_forks_repo_head_hexsha": "10a3f45ee5bdedd4bbe6fca973b5bb44e142c80f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3038348083, "max_line_length": 80, "alphanum_fraction": 0.6204044118, "num_tokens": 3030}
!< Jiang-Shu and Gerolymos-Senechal-Vallet weights. module wenoof_weights_int_js !< Jiang-Shu and Gerolymos-Senechal-Vallet weights. !< !< @note The provided WENO weights implements the weights defined in *Efficient Implementation of Weighted ENO !< Schemes*, Guang-Shan Jiang, Chi-Wang Shu, JCP, 1996, vol. 126, pp. 202--228, doi:10.1006/jcph.1996.0130 and !< *Very-high-order weno schemes*, G. A. Gerolymos, D. Senechal, I. Vallet, JCP, 2009, vol. 228, pp. 8481-8524, !< doi:10.1016/j.jcp.2009.07.039 use penf, only : I_P, R_P, str use wenoof_alpha_factory, only : alpha_factory use wenoof_alpha_object, only : alpha_object, alpha_object_constructor use wenoof_base_object, only : base_object, base_object_constructor use wenoof_beta_factory, only : beta_factory use wenoof_beta_object, only : beta_object, beta_object_constructor use wenoof_kappa_factory, only : kappa_factory use wenoof_kappa_int_js, only : kappa_int_js use wenoof_kappa_object, only : kappa_object, kappa_object_constructor use wenoof_weights_object, only : weights_object, weights_object_constructor implicit none private public :: weights_int_js public :: weights_int_js_constructor type, extends(weights_object_constructor) :: weights_int_js_constructor !< Jiang-Shu and Gerolymos-Senechal-Vallet optimal weights object constructor. class(alpha_object_constructor), allocatable :: alpha_constructor !< Alpha coefficients (non linear weights) constructor. class(beta_object_constructor), allocatable :: beta_constructor !< Beta coefficients (smoothness indicators) constructor. class(kappa_object_constructor), allocatable :: kappa_constructor !< kappa coefficients (optimal, linear weights) constructor. contains ! public deferred methods procedure, pass(lhs) :: constr_assign_constr !< `=` operator. endtype weights_int_js_constructor type, extends(weights_object):: weights_int_js !< Jiang-Shu and Gerolymos-Senechal-Vallet weights object. !< !< @note The provided WENO weights implements the weights defined in *Efficient Implementation of Weighted ENO !< Schemes*, Guang-Shan Jiang, Chi-Wang Shu, JCP, 1996, vol. 126, pp. 202--228, doi:10.1006/jcph.1996.0130 and !< *Very-high-order weno schemes*, G. A. Gerolymos, D. Senechal, I. Vallet, JCP, 2009, vol. 228, pp. 8481-8524, !< doi:10.1016/j.jcp.2009.07.039 class(alpha_object), allocatable :: alpha !< Alpha coefficients (non linear weights). class(beta_object), allocatable :: beta !< Beta coefficients (smoothness indicators). class(kappa_object), allocatable :: kappa !< kappa coefficients (optimal, linear weights). contains ! deferred public methods procedure, pass(self) :: create !< Create weights. procedure, pass(self) :: compute_int !< Compute weights (interpolate). procedure, pass(self) :: compute_rec !< Compute weights (reconstruct). procedure, pass(self) :: description !< Return object string-description. procedure, pass(self) :: destroy !< Destroy weights. procedure, pass(self) :: smoothness_indicators_int !< Return smoothness indicators (interpolate). procedure, pass(self) :: smoothness_indicators_rec !< Return smoothness indicators (reconstrcut). procedure, pass(lhs) :: object_assign_object !< `=` operator. endtype weights_int_js contains ! constructor ! deferred public methods subroutine constr_assign_constr(lhs, rhs) !< `=` operator. class(weights_int_js_constructor), intent(inout) :: lhs !< Left hand side. class(base_object_constructor), intent(in) :: rhs !< Right hand side. call lhs%assign_(rhs=rhs) select type(rhs) type is(weights_int_js_constructor) if (allocated(rhs%alpha_constructor)) then if (.not.allocated(lhs%alpha_constructor)) allocate(lhs%alpha_constructor, mold=rhs%alpha_constructor) lhs%alpha_constructor = rhs%alpha_constructor else if (allocated(lhs%alpha_constructor)) deallocate(lhs%alpha_constructor) endif if (allocated(rhs%beta_constructor)) then if (.not.allocated(lhs%beta_constructor)) allocate(lhs%beta_constructor, mold=rhs%beta_constructor) lhs%beta_constructor = rhs%beta_constructor else if (allocated(lhs%beta_constructor)) deallocate(lhs%beta_constructor) endif if (allocated(rhs%kappa_constructor)) then if (.not.allocated(lhs%kappa_constructor)) allocate(lhs%kappa_constructor, mold=rhs%kappa_constructor) lhs%kappa_constructor = rhs%kappa_constructor else if (allocated(lhs%kappa_constructor)) deallocate(lhs%kappa_constructor) endif endselect endsubroutine constr_assign_constr ! deferred public methods subroutine create(self, constructor) !< Create reconstructor. class(weights_int_js), intent(inout) :: self !< Weights. class(base_object_constructor), intent(in) :: constructor !< Constructor. type(alpha_factory) :: a_factory !< Alpha factory. type(beta_factory) :: b_factory !< Beta factory. type(kappa_factory) :: k_factory !< Kappa factory. call self%destroy call self%create_(constructor=constructor) select type(constructor) type is(weights_int_js_constructor) associate(alpha_constructor=>constructor%alpha_constructor, & beta_constructor=>constructor%beta_constructor, & kappa_constructor=>constructor%kappa_constructor) call a_factory%create(constructor=alpha_constructor, object=self%alpha) call b_factory%create(constructor=beta_constructor, object=self%beta) call k_factory%create(constructor=kappa_constructor, object=self%kappa) endassociate endselect endsubroutine create pure subroutine compute_int(self, stencil, values) !< Compute weights. class(weights_int_js), intent(in) :: self !< Weights. real(R_P), intent(in) :: stencil(1-self%S:) !< Stencil used for the interpolation, [1-S:-1+S]. real(R_P), intent(out) :: values(0:) !< Weights values. real(R_P) :: alpha(0:self%S-1) !< Aplha values. real(R_P) :: beta(0:self%S-1) !< Beta values. real(R_P) :: alpha_sum !< Sum of aplha values. integer(I_P) :: s !< Counters. call self%beta%compute(stencil=stencil, values=beta) select type(kappa => self%kappa) class is(kappa_int_js) call self%alpha%compute(beta=beta, kappa=kappa%values, values=alpha) endselect alpha_sum = sum(alpha) do s=0, self%S - 1 ! stencils loop values(s) = alpha(s) / alpha_sum enddo endsubroutine compute_int pure subroutine compute_rec(self, stencil, values) !< Compute weights. class(weights_int_js), intent(in) :: self !< Weights. real(R_P), intent(in) :: stencil(1:,1-self%S:) !< Stencil used for the interpolation, [1:2, 1-S:-1+S]. real(R_P), intent(out) :: values(1:,0:) !< Weights values of stencil interpolations. ! empty procedure endsubroutine compute_rec pure function description(self, prefix) result(string) !< Return object string-descripition. class(weights_int_js), intent(in) :: self !< Weights. character(len=*), intent(in), optional :: prefix !< Prefixing string. character(len=:), allocatable :: string !< String-description. character(len=:), allocatable :: prefix_ !< Prefixing string, local variable. character(len=1), parameter :: NL=new_line('a') !< New line char. prefix_ = '' ; if (present(prefix)) prefix_ = prefix string = prefix_//'Jiang-Shu weights object for interpolation:'//NL string = string//prefix_//' - S = '//trim(str(self%S))//NL string = string//prefix_//self%alpha%description(prefix=prefix_//' ') endfunction description elemental subroutine destroy(self) !< Destroy weights. class(weights_int_js), intent(inout) :: self !< Weights. call self%destroy_ if (allocated(self%alpha)) deallocate(self%alpha) if (allocated(self%beta)) deallocate(self%beta) if (allocated(self%kappa)) deallocate(self%kappa) endsubroutine destroy pure subroutine smoothness_indicators_int(self, si) !< Return smoothness indicators (interpolate). class(weights_int_js), intent(in) :: self !< Weights. real(R_P), intent(out) :: si(:) !< Smoothness indicators. ! TODO implement this endsubroutine smoothness_indicators_int pure subroutine smoothness_indicators_rec(self, si) !< Return smoothness indicators (reconstruct). class(weights_int_js), intent(in) :: self !< Weights. real(R_P), intent(out) :: si(:,:) !< Smoothness indicators. ! empty procedure endsubroutine smoothness_indicators_rec pure subroutine object_assign_object(lhs, rhs) !< `=` operator. class(weights_int_js), intent(inout) :: lhs !< Left hand side. class(base_object), intent(in) :: rhs !< Right hand side. call lhs%assign_(rhs=rhs) select type(rhs) type is(weights_int_js) if (allocated(rhs%alpha)) then if (.not.allocated(lhs%alpha)) allocate(lhs%alpha, mold=rhs%alpha) lhs%alpha = rhs%alpha else if (allocated(lhs%alpha)) deallocate(lhs%alpha) endif if (allocated(rhs%beta)) then if (.not.allocated(lhs%beta)) allocate(lhs%beta, mold=rhs%beta) lhs%beta = rhs%beta else if (allocated(lhs%beta)) deallocate(lhs%beta) endif if (allocated(rhs%kappa)) then if (.not.allocated(lhs%kappa)) allocate(lhs%kappa, mold=rhs%kappa) lhs%kappa = rhs%kappa else if (allocated(lhs%kappa)) deallocate(lhs%kappa) endif endselect endsubroutine object_assign_object endmodule wenoof_weights_int_js
{"hexsha": "e1e5aac0a24cea2136505dc08f1deeba89ed2554", "size": 9957, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/lib/concrete_objects/wenoof_weights_int_js.f90", "max_stars_repo_name": "Fortran-FOSS-Programmers/WenOOF", "max_stars_repo_head_hexsha": "7f53d1b7e6e026407c8e48ef2f8a3755d90f3ae2", "max_stars_repo_licenses": ["MIT", "BSD-2-Clause", "BSD-3-Clause"], "max_stars_count": 35, "max_stars_repo_stars_event_min_datetime": "2015-07-30T03:45:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-13T02:40:05.000Z", "max_issues_repo_path": "src/lib/concrete_objects/wenoof_weights_int_js.f90", "max_issues_repo_name": "Fortran-FOSS-Programmers/WenOOF", "max_issues_repo_head_hexsha": "7f53d1b7e6e026407c8e48ef2f8a3755d90f3ae2", "max_issues_repo_licenses": ["MIT", "BSD-2-Clause", "BSD-3-Clause"], "max_issues_count": 23, "max_issues_repo_issues_event_min_datetime": "2015-07-30T03:59:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T07:54:21.000Z", "max_forks_repo_path": "src/lib/concrete_objects/wenoof_weights_int_js.f90", "max_forks_repo_name": "Fortran-FOSS-Programmers/WenOOF", "max_forks_repo_head_hexsha": "7f53d1b7e6e026407c8e48ef2f8a3755d90f3ae2", "max_forks_repo_licenses": ["MIT", "BSD-2-Clause", "BSD-3-Clause"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2015-11-16T18:30:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-20T00:19:15.000Z", "avg_line_length": 47.4142857143, "max_line_length": 128, "alphanum_fraction": 0.6896655619, "num_tokens": 2544}
from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys try: from numpy import arccos from numpy import array from numpy import cross from numpy import int64 from numpy import isnan from numpy import mean from numpy import newaxis from numpy import sin from numpy import sum from numpy import tile from numpy import zeros except ImportError: if 'ironpython' not in sys.version.lower(): raise from compas.numerical import connectivity_matrix from compas.numerical import mass_matrix from compas.numerical import normrow from compas.numerical import uvw_lengths from time import time __author__ = ['Andrew Liew <liew@arch.ethz.ch>'] __copyright__ = 'Copyright 2017, Block Research Group - ETH Zurich' __license__ = 'MIT License' __email__ = 'liew@arch.ethz.ch' __all__ = [ 'drx_numpy' ] def drx_numpy(network, factor=1.0, tol=0.1, steps=10000, refresh=0, update=False, callback=None, **kwargs): """Run dynamic relaxation analysis. Parameters ---------- network : Network Network to analyse. factor : float Convergence factor. tol : float Tolerance value. steps : int Maximum number of steps. refresh : int Update progress every n steps. update : bool Update the co-ordinates of the Network. callback : callable Callback function. Returns ------- array Vertex co-ordinates. array Edge forces. array Edge lengths. Examples -------- .. plot:: :include-source: import compas from compas.datastructures import Network from compas.plotters import NetworkPlotter from compas.numerical import drx_numpy from compas.utilities import i_to_rgb from numpy import linspace L0 = 1 L = 1.5 n = 40 EI = 0.2 pins = [0, 5, 20, n - 5] # Network vertices = [[i, i, 0] for i in list(linspace(0, L0, n))] edges = [[i, i + 1] for i in range(n - 1)] network = Network.from_vertices_and_edges(vertices=vertices, edges=edges) network.update_default_vertex_attributes({'is_fixed': False, 'P': [1, -2, 0], 'EIx': EI, 'EIy': EI}) network.update_default_edge_attributes({'E': 50, 'A': 1, 'l0': L / n}) network.set_vertices_attributes(pins, {'B': [0, 0, 0], 'is_fixed': True}) network.beams = {'beam': {'nodes': list(range(n))}} # Plotter plotter = NetworkPlotter(network) lines = [] for u, v in network.edges(): lines.append({ 'start': network.vertex_coordinates(u, 'xy'), 'end' : network.vertex_coordinates(v, 'xy'), 'color': '#cccccc', 'width': 1.0}) plotter.draw_lines(lines) # Solver drx_numpy(network=network, tol=0.01, refresh=10, factor=30, update=True) # Result plotter.draw_vertices(radius=0.005, facecolor={key: '#ff0000' for key in pins}) plotter.draw_edges() plotter.show() See Also -------- * """ # Setup tic1 = time() X, B, P, Pn, S, V, E, A, C, Ct, f0, l0, ind_c, ind_t, u, v, M, ks = _create_arrays(network) try: inds, indi, indf, EIx, EIy = _beam_data(network) beams = network.beams except AttributeError: beams = inds = indi = indf = EIx = EIy = None toc1 = time() - tic1 # Solver tic2 = time() X, f, l = drx_solver(tol, steps, factor, C, Ct, X, ks, l0, f0, ind_c, ind_t, P, S, B, M, V, refresh, beams, inds, indi, indf, EIx, EIy, callback, **kwargs) toc2 = time() - tic2 # Summary if refresh: print('\n\nNumPy-SciPy DR -------------------') print('Setup time: {0:.3g}s'.format(toc1)) print('Solver time: {0:.3g}s'.format(toc2)) print('----------------------------------') # Update if update: i_k = network.index_key() for i in sorted(list(network.vertices()), key=int): x, y, z = X[i, :] network.set_vertex_attributes(i_k[i], {'x': x, 'y': y, 'z': z}) uv_i = network.uv_index() for edge in network.edges(): i = uv_i[edge] network.set_edge_attribute(edge, 'f', float(f[i])) return X, f, l def drx_solver(tol, steps, factor, C, Ct, X, ks, l0, f0, ind_c, ind_t, P, S, B, M, V, refresh, beams, inds, indi, indf, EIx, EIy, callback, **kwargs): """ NumPy and SciPy dynamic relaxation solver. Parameters: tol (float): Tolerance limit. steps (int): Maximum number of steps. factor (float): Convergence factor. C (array): Connectivity matrix. Ct (array): Transposed connectivity matrix. X (array): Nodal co-ordinates. ks (array): Initial edge axial stiffnesses. l0 (array): Initial edge lengths. f0 (array): Initial edge forces. ind_c (list): Indices of compression only edges. ind_t (list): Indices of tension only edges. P (array): Nodal loads Px, Py, Pz. S (array): Shear forces Sx, Sy, Sz. B (array): Constraint conditions. M (array): Mass matrix. V (array): Nodal velocities Vx, Vy, Vz. refresh (int): Update progress every n steps. beams (bool): Dictionary of beam information. inds (list): Indices of beam element start nodes. indi (list): Indices of beam element intermediate nodes. indf (list): Indices of beam element finish nodes beams. EIx (array): Nodal EIx flexural stiffnesses. EIy (array): Nodal EIy flexural stiffnesses. callback (obj): Callback function. Returns: array: Updated nodal co-ordinates. array: Updated forces. array: Updated lengths. """ res = 1000 * tol ts, Uo = 0, 0 M = factor * tile(M, (1, 3)) while (ts <= steps) and (res > tol): uvw, l = uvw_lengths(C, X) f = f0 + ks * (l - l0) if ind_t: f[ind_t] *= f[ind_t] > 0 if ind_c: f[ind_c] *= f[ind_c] < 0 if beams: S = _beam_shear(S, X, inds, indi, indf, EIx, EIy) q = f / l qt = tile(q, (1, 3)) R = (P - S - Ct.dot(uvw * qt)) * B res = mean(normrow(R)) V += R / M Un = sum(M * V**2) if Un < Uo: V *= 0 Uo = Un X += V if refresh: if (ts % refresh == 0) or (res < tol): print('Step:{0} Residual:{1:.3g}'.format(ts, res)) if callback: callback(X, **kwargs) ts += 1 return X, f, l def _beam_data(network): """ Create data for beam element calculations. Parameters: network (obj): Network to be analysed. Returns: list: Indices of beam element start nodes. list: Indices of beam element intermediate nodes. list: Indices of beam element finish nodes beams. array: Nodal EIx flexural stiffnesses of all beams. array: Nodal EIy flexural stiffnesses of all beams. """ beams = network.beams inds, indi, indf, EIx, EIy = [], [], [], [], [] for _, beam in beams.items(): nodes = beam['nodes'] inds.extend(nodes[:-2]) indi.extend(nodes[1:-1]) indf.extend(nodes[2:]) EIx.extend([network.vertex[i]['EIx'] for i in nodes[1:-1]]) EIy.extend([network.vertex[i]['EIy'] for i in nodes[1:-1]]) EIx = array(EIx)[:, newaxis] EIy = array(EIy)[:, newaxis] return inds, indi, indf, EIx, EIy def _beam_shear(S, X, inds, indi, indf, EIx, EIy): """ Generate the beam nodal shear forces Sx, Sy and Sz. Parameters: S (array): Nodal shear force array. X (array): Co-ordinates of nodes. inds (list): Indices of beam element start nodes. indi (list): Indices of beam element intermediate nodes. indf (list): Indices of beam element finish nodes beams. EIx (array): Nodal EIx flexural stiffnesses. EIy (array): Nodal EIy flexural stiffnesses. Returns: array: Updated beam nodal shears. """ S *= 0 Xs = X[inds, :] Xi = X[indi, :] Xf = X[indf, :] Qa = Xi - Xs Qb = Xf - Xi Qc = Xf - Xs Qn = cross(Qa, Qb) mu = 0.5 * (Xf - Xs) La = normrow(Qa) Lb = normrow(Qb) Lc = normrow(Qc) LQn = normrow(Qn) Lmu = normrow(mu) a = arccos((La**2 + Lb**2 - Lc**2) / (2 * La * Lb)) k = 2 * sin(a) / Lc ex = Qn / tile(LQn, (1, 3)) # temporary simplification ez = mu / tile(Lmu, (1, 3)) ey = cross(ez, ex) K = tile(k / LQn, (1, 3)) * Qn Kx = tile(sum(K * ex, 1)[:, newaxis], (1, 3)) * ex Ky = tile(sum(K * ey, 1)[:, newaxis], (1, 3)) * ey Mc = EIx * Kx + EIy * Ky cma = cross(Mc, Qa) cmb = cross(Mc, Qb) ua = cma / tile(normrow(cma), (1, 3)) ub = cmb / tile(normrow(cmb), (1, 3)) c1 = cross(Qa, ua) c2 = cross(Qb, ub) Lc1 = normrow(c1) Lc2 = normrow(c2) Ms = sum(Mc**2, 1)[:, newaxis] Sa = ua * tile(Ms * Lc1 / (La * sum(Mc * c1, 1)[:, newaxis]), (1, 3)) Sb = ub * tile(Ms * Lc2 / (Lb * sum(Mc * c2, 1)[:, newaxis]), (1, 3)) Sa[isnan(Sa)] = 0 Sb[isnan(Sb)] = 0 S[inds, :] += Sa S[indi, :] -= Sa + Sb S[indf, :] += Sb # Add node junction duplication for when elements cross each other # mu[0, :] = -1.25*x[0, :] + 1.5*x[1, :] - 0.25*x[2, :] # mu[-1, :] = 0.25*x[-3, :] - 1.5*x[-2, :] + 1.25*x[-1, :] return S def _create_arrays(network): """ Create arrays for dynamic relaxation solver. Parameters: network (obj): Network to analyse. Returns: array: Nodal co-ordinates x, y, z. array: Constraint conditions Bx, By, Bz. array: Nodal loads Px, Py, Pz. array: Resultant nodal loads. array: Shear force components Sx, Sy, Sz. array: Nodal velocities Vx, Vy, Vz. array: Edge Young's moduli. array: Edge areas. array: Connectivity matrix. array: Transposed connectivity matrix. array: Edge initial forces. array: Edge initial lengths. list: Compression only edges indices. list: Tension only edges indices. array: Network edges' start points. array: Network edges' end points. array: Mass matrix. array: Edge axial stiffnesses. """ # Vertices n = network.number_of_vertices() B = zeros((n, 3)) P = zeros((n, 3)) X = zeros((n, 3)) S = zeros((n, 3)) V = zeros((n, 3)) k_i = network.key_index() for key in network.vertices(): i = k_i[key] vertex = network.vertex[key] B[i, :] = vertex.get('B', [1, 1, 1]) P[i, :] = vertex.get('P', [0, 0, 0]) X[i, :] = [vertex[j] for j in 'xyz'] Pn = normrow(P) # Edges uv_i = network.uv_index() edges = list(network.edges()) m = len(edges) u = zeros(m, dtype=int64) v = zeros(m, dtype=int64) E = zeros((m, 1)) A = zeros((m, 1)) s0 = zeros((m, 1)) l0 = zeros((m, 1)) ind_c = [] ind_t = [] for c, uv in enumerate(edges): ui, vi = uv i = uv_i[(ui, vi)] edge = network.edge[ui][vi] E[i] = edge.get('E', 0) A[i] = edge.get('A', 0) l0[i] = edge.get('l0', network.edge_length(ui, vi)) s0[i] = edge.get('s0', 0) u[c] = k_i[ui] v[c] = k_i[vi] ct = edge.get('ct', None) if ct == 'c': ind_c.append(i) elif ct == 't': ind_t.append(i) f0 = s0 * A ks = E * A / l0 q0 = f0 / l0 # Faces (testing) # if network.face: # for face in faces: # fdata = network.facedata[face] # Eh = fdata.get('E', 0) # th = fdata.get('t', 0) # Ah = network.face_area(face) # for ui, vi in network.face_edges(face): # i = uv_i[(ui, vi)] # ks[i] += 1.5 * Eh * Ah * th / l0[i]**2 # Arrays C = connectivity_matrix([[k_i[ui], k_i[vi]] for ui, vi in edges], 'csr') Ct = C.transpose() M = mass_matrix(Ct=Ct, ks=ks, q=q0, c=1, tiled=False) return X, B, P, Pn, S, V, E, A, C, Ct, f0, l0, ind_c, ind_t, u, v, M, ks # ============================================================================== # Main # ============================================================================== if __name__ == "__main__": import compas from compas.datastructures import Network from compas.plotters import NetworkPlotter from compas.utilities import i_to_rgb from numpy import linspace def plot_iterations(X, radius=0.005): for i in network.vertices(): x, y, z = X[i, :] network.set_vertex_attributes(i, {'x': x, 'y': y, 'z': z}) plotter.update_vertices(radius) plotter.update_edges() plotter.update(pause=0.01) # ========================================================================== # Example 1 # ========================================================================== # # Load Network # network = Network.from_obj(compas.get('lines.obj')) # network.update_default_vertex_attributes({'is_fixed': False, 'P': [1, 1, 0]}) # network.update_default_edge_attributes({'E': 10, 'A': 1, 'ct': 't'}) # network.set_vertices_attributes(network.leaves(), {'B': [0, 0, 0], 'is_fixed': True}) # # Plotter # plotter = NetworkPlotter(network, figsize=(10, 7)) # lines = [] # for u, v in network.edges(): # lines.append({ # 'start': network.vertex_coordinates(u, 'xy'), # 'end' : network.vertex_coordinates(v, 'xy'), # 'color': '#cccccc', # 'width': 1.0}) # plotter.draw_lines(lines) # plotter.draw_vertices(facecolor={key: '#ff0000' for key in network.vertices_where({'is_fixed': True})}) # plotter.draw_edges() # # Solver # X, f, l = drx_numpy(network=network, tol=0.001, refresh=2, update=True, callback=plot_iterations, radius=0.1) # # Forces # fmax = max(network.get_edges_attribute('f')) # plotter.draw_edges( # color={(u, v): i_to_rgb(attr['f'] / fmax) for u, v, attr in network.edges(True)}, # width={(u, v): 10 * attr['f'] / fmax for u, v, attr in network.edges(True)}) # plotter.update() # plotter.show() # ========================================================================== # Example 2 # ========================================================================== # Input L0 = 1 L = 1.5 n = 40 EI = 0.2 pins = [0, 5, 20, n - 5] # Network vertices = [[i, i, 0] for i in list(linspace(0, L0, n))] edges = [[i, i + 1] for i in range(n - 1)] network = Network.from_vertices_and_edges(vertices=vertices, edges=edges) network.update_default_vertex_attributes({'is_fixed': False, 'P': [1, -2, 0], 'EIx': EI, 'EIy': EI}) network.update_default_edge_attributes({'E': 50, 'A': 1, 'l0': L / n}) network.set_vertices_attributes(pins, {'B': [0, 0, 0], 'is_fixed': True}) network.beams = {'beam': {'nodes': list(range(n))}} # Plotter plotter = NetworkPlotter(network, figsize=(10, 7)) lines = [] for u, v in network.edges(): lines.append({ 'start': network.vertex_coordinates(u, 'xy'), 'end' : network.vertex_coordinates(v, 'xy'), 'color': '#cccccc', 'width': 1.0}) plotter.draw_lines(lines) plotter.draw_vertices(radius=0.005, facecolor={key: '#ff0000' for key in network.vertices_where({'is_fixed': True})}) plotter.draw_edges() # Solver drx_numpy(network=network, tol=0.01, refresh=10, factor=30, update=True, callback=plot_iterations) plotter.show()
{"hexsha": "b1449a5a0653785e35579000955d6c7fb47f858b", "size": 16579, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/compas/numerical/algorithms/drx_numpy.py", "max_stars_repo_name": "gonzalocasas/compas", "max_stars_repo_head_hexsha": "2fabc7e5c966a02d823fa453564151e1a1e7e3c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/compas/numerical/algorithms/drx_numpy.py", "max_issues_repo_name": "gonzalocasas/compas", "max_issues_repo_head_hexsha": "2fabc7e5c966a02d823fa453564151e1a1e7e3c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/compas/numerical/algorithms/drx_numpy.py", "max_forks_repo_name": "gonzalocasas/compas", "max_forks_repo_head_hexsha": "2fabc7e5c966a02d823fa453564151e1a1e7e3c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8159851301, "max_line_length": 122, "alphanum_fraction": 0.5173412148, "include": true, "reason": "from numpy", "num_tokens": 4747}
[STATEMENT] lemma has_white_path_to_induct[consumes 1, case_names refl step, induct set: has_white_path_to]: assumes "(x has_white_path_to y) s" assumes "\<And>x. P x x" assumes "\<And>x y z. \<lbrakk>(x has_white_path_to y) s; P x y; (y points_to z) s; white z s\<rbrakk> \<Longrightarrow> P x z" shows "P x y" [PROOF STATE] proof (prove) goal (1 subgoal): 1. P x y [PROOF STEP] using assms [PROOF STATE] proof (prove) using this: (x has_white_path_to y) s P ?x ?x \<lbrakk>(?x has_white_path_to ?y) s; P ?x ?y; (?y points_to ?z) s; white ?z s\<rbrakk> \<Longrightarrow> P ?x ?z goal (1 subgoal): 1. P x y [PROOF STEP] unfolding has_white_path_to_def [PROOF STATE] proof (prove) using this: (\<lambda>x y. (x points_to y) s \<and> white y s)\<^sup>*\<^sup>* x y P ?x ?x \<lbrakk>(\<lambda>x y. (x points_to y) s \<and> white y s)\<^sup>*\<^sup>* ?x ?y; P ?x ?y; (?y points_to ?z) s; white ?z s\<rbrakk> \<Longrightarrow> P ?x ?z goal (1 subgoal): 1. P x y [PROOF STEP] by (rule rtranclp.induct; blast)
{"llama_tokens": 466, "file": "ConcurrentGC_Global_Invariants_Lemmas", "length": 3}
# The following commented code was an attempt (along with setting the # PYTHONHASHSEED environment variable to 0 in the PyDev run configuration # for this script) to make classifier training reproducible, but it didn't # work. # # import random # random.seed(1) # # import numpy as np # np.random.seed(1) # # import tensorflow as tf # tf.set_random_seed(1) from pathlib import Path import sys import time from tensorflow import keras from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from matplotlib.backends.backend_pdf import PdfPages from matplotlib.ticker import MultipleLocator import matplotlib.pyplot as plt import numpy as np from vesper.mpg_ranch.nfc_coarse_classifier_2_1.feature_computer import \ FeatureComputer from vesper.util.binary_classification_stats import BinaryClassificationStats from vesper.util.bunch import Bunch from vesper.util.clip_hdf5_file import ClipHdf5File from vesper.util.conditional_printer import ConditionalPrinter from vesper.util.settings import Settings import vesper.mpg_ranch.nfc_coarse_classifier_2_1.classifier_utils as \ classifier_utils import vesper.util.numpy_utils as numpy_utils import vesper.util.open_mp_utils as open_mp_utils import vesper.util.yaml_utils as yaml_utils # TODO: Offer reproducible training option. # TODO: Balance data in training epochs. # TODO: Try using longer thrush waveforms. # TODO: Try adding convolutional layers. # TODO: Try learning a filter bank instead of using a spectrogram. # TODO: Try lots of random sets of hyperparameter values. # TODO: Try training several networks and using majority vote of best three. DATASET_DIR_PATH = Path( '/Users/harold/Desktop/NFC/Data/Vesper ML/Datasets/' 'Coarse Classification/2017') DATASET_FILE_NAME_FORMAT = '2017 {} Clips 22050.h5' RESULTS_DIR_PATH = Path('/Users/harold/Desktop/ML Results') PR_PLOT_FILE_NAME_FORMAT = '{} 2017 PR.pdf' ROC_PLOT_FILE_NAME_FORMAT = '{} 2017 ROC.pdf' PR_CSV_FILE_NAME_FORMAT = '{} 2017 PR.csv' PR_CSV_FILE_HEADER = ( 'Threshold,' 'Training Recall,' 'Training Precision,' 'Validation Recall,' 'Validation Precision\n') PR_CSV_FILE_ROW_FORMAT = '{:.2f},{:.3f},{:.3f},{:.3f},{:.3f}\n' VERBOSE = True # Progress notification period for clip reading and spectrogram computation # when output is verbose, in clips. NOTIFICATION_PERIOD = 10000 SETTINGS = { 'Tseep': Settings( clip_type='Tseep', waveform_start_time=.080, waveform_duration=.150, spectrogram_window_size=.005, spectrogram_hop_size=.0025, spectrogram_start_freq=4000, spectrogram_end_freq=10000, spectrogram_power_clipping_fraction=.001, spectrogram_normalization_enabled=True, min_recall=.98, training_set_size=120000, validation_set_size=5000, test_set_size=5000, num_epochs=40, batch_size=128, # Sizes in units of the hidden layers of the classification # neural network. All of the hidden layers are dense, and all # use the RELU activation function. The final layer of the # network comprises a single unit with a sigmoid activation # function. Setting this to the empty list yields a logistic # regression classifier. hidden_layer_sizes=[16], regularization_beta=.002, precision_recall_plot_lower_axis_limit=.80, precision_recall_plot_major_tick_interval=.05, precision_recall_plot_minor_tick_interval=.01 ), 'Thrush': Settings( clip_type='Thrush', waveform_start_time=.150, waveform_duration=.175, spectrogram_window_size=.005, spectrogram_hop_size=.0025, spectrogram_start_freq=2000, spectrogram_end_freq=5000, spectrogram_power_clipping_fraction=.001, spectrogram_normalization_enabled=True, min_recall=.97, training_set_size=None, validation_set_size=5000, test_set_size=5000, num_epochs=40, batch_size=128, # Sizes in units of the hidden layers of the classification # neural network. All of the hidden layers are dense, and all # use the RELU activation function. The final layer of the # network comprises a single unit with a sigmoid activation # function. Setting this to the empty list yields a logistic # regression classifier. hidden_layer_sizes=[16], # Got the following results for training networks of various # sizes on 2017-11-15: # # [8] 0.07 0.971 0.801 # [10] 0.14 0.971 0.865 # [12] 0.12 0.970 0.819 # [14] 0.08 0.971 0.801 # [16] 0.06 0.972 0.805 # [18] 0.15 0.970 0.866 # [20] 0.09 0.973 0.815 # [22] 0.11 0.970 0.849 # [24] 0.11 0.972 0.858 # # [8] 0.09 0.971 0.833 # [10] 0.08 0.972 0.790 # [12] 0.11 0.971 0.851 # [14] 0.11 0.971 0.841 # [16] 0.09 0.972 0.834 # [18] 0.16 0.970 0.819 # [20] 0.13 0.971 0.852 # [22] 0.11 0.971 0.847 # [24] 0.12 0.971 0.833 # # [8] 0.09 0.970 0.842 # [10] 0.09 0.970 0.846 # [12] 0.12 0.972 0.838 # [14] 0.11 0.971 0.833 # [16] 0.10 0.971 0.824 # [18] 0.08 0.972 0.837 # [20] 0.11 0.970 0.838 # [22] 0.08 0.971 0.850 # [24] 0.10 0.971 0.823 # hidden_layer_sizes=[ # [8], [10], [12], [14], [16], [18], [20], [22], [24] # ], regularization_beta=.002, precision_recall_plot_lower_axis_limit=.80, precision_recall_plot_major_tick_interval=.05, precision_recall_plot_minor_tick_interval=.01 ) } def main(): open_mp_utils.work_around_multiple_copies_issue() clip_type = sys.argv[1] settings = SETTINGS[clip_type] clips = get_clips(clip_type, settings) if not VERBOSE: print('Computing features...') features = compute_features(clips, settings) print('Getting targets from classifications...') targets = get_targets(clips) print('Creating training, validation, and test data sets...') train_set, val_set, _ = create_data_sets(features, targets, settings) # print('Training classifiers...') # _train_classifiers(train_set, val_set, settings) print('Training classifier...') model = train_classifier(train_set, settings) print('Testing classifier...') train_stats = test_classifier(model, train_set) val_stats = test_classifier(model, val_set) save_results(clip_type, train_stats, val_stats, settings) print('Saving classifier...') save_classifier(model, settings, val_stats) print() def get_clips(clip_type, settings): file_path = create_dataset_file_path(clip_type) file_ = ClipHdf5File(file_path) num_file_clips = file_.get_num_clips() num_clips = get_num_read_clips(num_file_clips, settings) if num_clips != num_file_clips: s = '{} of {}'.format(num_clips, num_file_clips) else: s = '{}'.format(num_clips) print('Reading {} clips from file "{}"...'.format(s, file_path)) if VERBOSE: start_time = time.time() listener = (lambda n: print(' {}'.format(n))) if VERBOSE else None clips = file_.read_clips(num_clips, NOTIFICATION_PERIOD, listener) if VERBOSE: elapsed_time = time.time() - start_time elapsed_time = int(round(10 * elapsed_time)) / 10 if elapsed_time != 0: rate = num_clips / elapsed_time s = ', an average of {:.1f} clips per second'.format(rate) else: s = '' print('Read {} clips in {:.1f} seconds{}.'.format( len(clips), elapsed_time, s)) num_calls = len( [c for c in clips if c.classification.startswith('Call')]) num_noises = num_clips - num_calls print('Clips include {} calls and {} noises.'.format( num_calls, num_noises)) settings.waveform_sample_rate = file_.get_sample_rate() return clips def create_dataset_file_path(clip_type): file_name = DATASET_FILE_NAME_FORMAT.format(clip_type) return DATASET_DIR_PATH / file_name def get_num_read_clips(num_file_clips, settings): train_size = settings.training_set_size val_size = settings.validation_set_size test_size = settings.test_set_size if train_size is None: if num_file_clips <= val_size + test_size: raise ValueError(( 'File contains {} clips, fewer than required ' 'with the specified validation and test set sizes ' 'of {} and {} clips, respectively.').format( num_file_clips, val_size, test_size)) return num_file_clips else: num_clips = train_size + val_size + test_size if num_clips > num_file_clips: raise ValueError(( 'File contains {} clips, too few for the ' 'specified training, validation, and test set ' 'sizes of {}, {}, and {} clips, respectively.').format( num_file_clips, train_size, val_size, test_size)) return num_clips def compute_features(clips, settings): vprint = ConditionalPrinter(VERBOSE) vprint('Collecting waveforms...') waveforms = collect_waveforms(clips) num_waveforms = len(waveforms) fc = FeatureComputer(settings) vprint('Trimming waveforms...') waveforms = fc.trim_waveforms(waveforms) def show_clip_count(n): vprint(' {}'.format(n)) vprint('Computing spectrograms...') start_time = time.time() spectrograms = fc.compute_spectrograms( waveforms, NOTIFICATION_PERIOD, show_clip_count) elapsed_time = time.time() - start_time spectrogram_rate = num_waveforms / elapsed_time spectrum_rate = spectrogram_rate * spectrograms[0].shape[0] vprint(( 'Computed {} spectrograms of shape {} in {:.1f} seconds, an ' 'average of {:.1f} spectrograms and {:.1f} spectra per ' 'second.').format( num_waveforms, spectrograms[0].shape, elapsed_time, spectrogram_rate, spectrum_rate)) vprint('Trimming spectrogram frequencies...') vprint(' input shape {}'.format(spectrograms.shape)) spectrograms = fc.trim_spectrograms(spectrograms) vprint(' output shape {}'.format(spectrograms.shape)) fc.configure_spectrogram_power_clipping(spectrograms) if settings.spectrogram_min_power is not None: vprint('Clipping spectrogram powers to {}...'.format( (settings.spectrogram_min_power, settings.spectrogram_max_power))) fc.clip_spectrogram_powers(spectrograms) fc.configure_spectrogram_normalization(spectrograms) if settings.spectrogram_mean is not None: vprint('Normalizing spectrograms with {}...'.format( (settings.spectrogram_mean, settings.spectrogram_standard_dev))) fc.normalize_spectrograms(spectrograms) vprint('Flattening spectrograms...') features = fc.flatten_spectrograms(spectrograms) return features def collect_waveforms(clips): num_clips = len(clips) num_samples = len(clips[0].waveform) waveforms = np.zeros((num_clips, num_samples)) for i, clip in enumerate(clips): waveforms[i] = clip.waveform return waveforms def get_targets(clips): targets = np.array([get_target(c) for c in clips]) targets.shape = (len(targets), 1) return targets def get_target(clip): return 1 if clip.classification.startswith('Call') else 0 def create_data_sets(features, targets, settings): num_examples = len(features) assert(len(targets) == num_examples) train_size = settings.training_set_size val_size = settings.validation_set_size test_size = settings.test_set_size assert(val_size + test_size < num_examples) if train_size is None: train_size = num_examples - val_size - test_size assert(train_size + val_size + test_size <= num_examples) # Shuffle examples. permutation = numpy_utils.reproducible_permutation(num_examples) features = features[permutation] targets = targets[permutation] test_start = num_examples - test_size val_start = test_start - val_size train_set = Bunch( name='training', features=features[:val_start], targets=targets[:val_start]) val_set = Bunch( name='validation', features=features[val_start:test_start], targets=targets[val_start:test_start]) test_set = Bunch( name='test', features=features[test_start:], targets=targets[test_start:]) return train_set, val_set, test_set def train_classifiers(train_set, val_set, settings): results = [] input_length = train_set.features.shape[1] for hidden_layer_sizes in settings.hidden_layer_sizes: print('Training classifier with hidden layer sizes {}...'.format( hidden_layer_sizes)) model = create_classifier_model( input_length, hidden_layer_sizes, settings.regularization_beta) verbose = 2 if VERBOSE else 0 model.fit( train_set.features, train_set.targets, epochs=settings.num_epochs, batch_size=settings.batch_size, verbose=verbose) stats = test_classifier(model, val_set) i = find_classification_threshold_index(stats, settings.min_recall) results.append( (hidden_layer_sizes, stats.threshold[i], stats.recall[i], stats.precision[i])) print( 'Classifier (hidden layer sizes, threshold, recall, precision) ' 'tuples:') for r in results: print(' {} {:.2f} {:.3f} {:.3f}'.format(*r)) def create_classifier_model( input_length, hidden_layer_sizes, regularization_beta): layer_sizes = hidden_layer_sizes + [1] num_layers = len(layer_sizes) regularizer = keras.regularizers.l2(regularization_beta) model = Sequential() for i in range(num_layers): kwargs = { 'activation': 'sigmoid' if i == num_layers - 1 else 'relu', 'kernel_regularizer': regularizer } if i == 0: kwargs['input_dim'] = input_length model.add(Dense(layer_sizes[i], **kwargs)) model.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) return model def test_classifier(model, data_set, num_thresholds=101): features = data_set.features targets = data_set.targets values = model.predict(features, batch_size=len(features)) thresholds = np.arange(num_thresholds) / float(num_thresholds - 1) return BinaryClassificationStats(targets, values, thresholds) def find_classification_threshold_index(stats, min_recall): recall = stats.recall i = 0 while recall[i] >= min_recall: i += 1 return i - 1 def train_classifier(train_set, settings): input_length = train_set.features.shape[1] model = create_classifier_model( input_length, settings.hidden_layer_sizes, settings.regularization_beta) verbose = 2 if VERBOSE else 0 model.fit( train_set.features, train_set.targets, epochs=settings.num_epochs, batch_size=settings.batch_size, verbose=verbose) return model def save_results(clip_type, train_stats, val_stats, settings): plot_precision_recall_curves(clip_type, train_stats, val_stats, settings) plot_roc_curves(clip_type, train_stats, val_stats) write_precision_recall_csv_file(clip_type, train_stats, val_stats) def plot_precision_recall_curves(clip_type, train_stats, val_stats, settings): file_path = create_results_file_path(PR_PLOT_FILE_NAME_FORMAT, clip_type) with PdfPages(file_path) as pdf: plt.figure(figsize=(6, 6)) # Plot training and validation curves. plt.plot( train_stats.recall, train_stats.precision, 'b', val_stats.recall, val_stats.precision, 'g') # Set title, legend, and axis labels. plt.title('{} Precision vs. Recall'.format(clip_type)) plt.legend(['Training', 'Validation']) plt.xlabel('Recall') plt.ylabel('Precision') # Set axis limits. lower_limit = settings.precision_recall_plot_lower_axis_limit plt.xlim((lower_limit, 1)) plt.ylim((lower_limit, 1)) # Configure grid. major_locator = MultipleLocator( settings.precision_recall_plot_major_tick_interval) minor_locator = MultipleLocator( settings.precision_recall_plot_minor_tick_interval) axes = plt.gca() axes.xaxis.set_major_locator(major_locator) axes.xaxis.set_minor_locator(minor_locator) axes.yaxis.set_major_locator(major_locator) axes.yaxis.set_minor_locator(minor_locator) plt.grid(which='both') plt.grid(which='minor', alpha=.4) pdf.savefig() plt.close() def create_results_file_path(file_name_format, clip_type): file_name = file_name_format.format(clip_type) return RESULTS_DIR_PATH / file_name def plot_roc_curves(clip_type, train_stats, val_stats): file_path = create_results_file_path( ROC_PLOT_FILE_NAME_FORMAT, clip_type) with PdfPages(file_path) as pdf: plt.figure(figsize=(6, 6)) # Plot training and validation curves. plt.plot( train_stats.false_positive_rate, train_stats.true_positive_rate, 'b', val_stats.false_positive_rate, val_stats.true_positive_rate, 'g') # Set title, legend, and axis labels. plt.title('{} ROC'.format(clip_type)) plt.legend(['Training', 'Validation']) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') # Set axis limits. plt.xlim((0, 1)) plt.ylim((0, 1)) # Configure grid. major_locator = MultipleLocator(.25) minor_locator = MultipleLocator(.05) axes = plt.gca() axes.xaxis.set_major_locator(major_locator) axes.xaxis.set_minor_locator(minor_locator) axes.yaxis.set_major_locator(major_locator) axes.yaxis.set_minor_locator(minor_locator) plt.grid(which='both') plt.grid(which='minor', alpha=.4) pdf.savefig() plt.close() def write_precision_recall_csv_file(clip_type, train_stats, val_stats): file_path = create_results_file_path(PR_CSV_FILE_NAME_FORMAT, clip_type) with open(file_path, 'w') as csv_file: csv_file.write(PR_CSV_FILE_HEADER) columns = ( train_stats.threshold, train_stats.recall, train_stats.precision, val_stats.recall, val_stats.precision ) for row in zip(*columns): csv_file.write(PR_CSV_FILE_ROW_FORMAT.format(*row)) def save_classifier(model, settings, stats): clip_type = settings.clip_type path = classifier_utils.get_model_file_path(clip_type) path.parent.mkdir(exist_ok=True) model.save(str(path)) settings = create_classifier_settings(settings, stats) text = yaml_utils.dump(settings, default_flow_style=False) path = classifier_utils.get_settings_file_path(clip_type) path.write_text(text) def create_classifier_settings(s, stats): return dict( clip_type=s.clip_type, waveform_sample_rate=float(s.waveform_sample_rate), waveform_start_time=s.waveform_start_time, waveform_duration=s.waveform_duration, spectrogram_window_size=s.spectrogram_window_size, spectrogram_hop_size=s.spectrogram_hop_size, spectrogram_start_freq=s.spectrogram_start_freq, spectrogram_end_freq=s.spectrogram_end_freq, spectrogram_min_power=float(s.spectrogram_min_power), spectrogram_max_power=float(s.spectrogram_max_power), spectrogram_mean=float(s.spectrogram_mean), spectrogram_standard_dev=float(s.spectrogram_standard_dev), classification_threshold=find_classification_threshold( stats, s.min_recall) ) def find_classification_threshold(stats, min_recall): i = find_classification_threshold_index(stats, min_recall) return float(stats.threshold[i]) if __name__ == '__main__': main()
{"hexsha": "1ad98dd91e2715b5f16dedec9925950c6a184419", "size": 21652, "ext": "py", "lang": "Python", "max_stars_repo_path": "vesper/mpg_ranch/nfc_coarse_classifier_2_1/train_classifier.py", "max_stars_repo_name": "HaroldMills/NFC", "max_stars_repo_head_hexsha": "356b2234dc3c7d180282a597fa1e039ae79e03c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "vesper/mpg_ranch/nfc_coarse_classifier_2_1/train_classifier.py", "max_issues_repo_name": "HaroldMills/NFC", "max_issues_repo_head_hexsha": "356b2234dc3c7d180282a597fa1e039ae79e03c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2015-01-12T12:41:29.000Z", "max_issues_repo_issues_event_max_datetime": "2015-01-12T12:41:29.000Z", "max_forks_repo_path": "vesper/mpg_ranch/nfc_coarse_classifier_2_1/train_classifier.py", "max_forks_repo_name": "HaroldMills/NFC", "max_forks_repo_head_hexsha": "356b2234dc3c7d180282a597fa1e039ae79e03c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.79943101, "max_line_length": 78, "alphanum_fraction": 0.6360613338, "include": true, "reason": "import numpy", "num_tokens": 5034}
import torch from torch.utils.data import Dataset import pandas as pd import os import numpy as np from torch.utils.data.dataloader import default_collate from util import tokenize class HowToVQA_Dataset(Dataset): def __init__( self, csv_path, caption, features_path, qmax_words=20, amax_words=20, train=True, n_pair=32, max_feats=20, bert_tokenizer=None, ): """ :param csv_path: path to a csv with video_id and video_path columns :param caption: dictionary mapping video_id to a dictionary mapping start, end, question and answer to corresponding lists :param features_path: path to the directory of features :param qmax_words: maximum number of words in the question :param amax_words: maximum number of words in the answer :param train: whether to train or validate :param n_pair: number of clips to sample from each video :param max_feats: maximum number of video features :param bert_tokenizer: BERT tokenizer """ self.data = pd.read_csv(csv_path) self.caption = caption self.feature_path = features_path self.qmax_words = qmax_words self.amax_words = amax_words self.train = train self.n_pair = n_pair self.max_feats = max_feats self.bert_tokenizer = bert_tokenizer def __len__(self): return len(self.data) def _get_text(self, caption, n_pair_max, train=True): n_caption = len(caption["start"]) n_pair_max = min(n_caption, n_pair_max) start = np.zeros(n_pair_max) end = np.zeros(n_pair_max) atxt = [""] * n_pair_max qtxt = [""] * n_pair_max r_ind = ( np.random.choice(range(n_caption), n_pair_max, replace=False) if train else np.arange(n_pair_max) ) # sample clips for i in range(n_pair_max): ind = r_ind[i] atxt[i], qtxt[i], start[i], end[i] = ( str(caption["answer"][ind]), str(caption["question"][ind]), caption["start"][ind], caption["end"][ind], ) question = tokenize( qtxt, self.bert_tokenizer, add_special_tokens=True, max_length=self.qmax_words, dynamic_padding=True, truncation=True, ) answer = tokenize( atxt, self.bert_tokenizer, add_special_tokens=True, max_length=self.amax_words, dynamic_padding=True, truncation=True, ) return start, end, atxt, answer, qtxt, question def _get_video(self, vid_path, start, end): feature_path = os.path.join(self.feature_path, vid_path) video = torch.from_numpy(np.load(feature_path)).float() video_len = np.zeros(len(start)) feature = torch.zeros(len(start), self.max_feats, video.shape[-1]) for i in range(len(start)): s = int(start[i]) e = int(end[i]) + 1 slice = video[s:e] video_len[i] = min(self.max_feats, len(slice)) if len(slice) < self.max_feats: padded_slice = torch.cat( [slice, torch.zeros(self.max_feats - len(slice), slice.shape[1])] ) else: padded_slice = slice[: self.max_feats] feature[i] = padded_slice return feature, video_len def __getitem__(self, idx): video_id = self.data["video_id"].values[idx] video_path = self.data["video_path"].values[idx] start, end, atxt, answer, qtxt, question = self._get_text( self.caption[video_id], self.n_pair, train=self.train ) video, video_len = self._get_video(video_path, start, end) return { "video_id": video_id, "video_path": video_path, "atxt": atxt, "qtxt": qtxt, "start": start, "end": end, "video": video, "video_len": video_len, "answer": answer, "question": question, } def howtovqa_collate_fn(batch): """ :param batch: [dataset[i] for i in N] :return: tensorized batch with the question and the ans candidates padded to the max length of the batch """ bs = len(batch) video_id = default_collate([batch[i]["video_id"] for i in range(bs)]) video_path = default_collate([batch[i]["video_path"] for i in range(bs)]) atxt = [batch[i]["atxt"] for i in range(bs)] atxt = [x for y in atxt for x in y] qtxt = [batch[i]["qtxt"] for i in range(bs)] qtxt = [x for y in qtxt for x in y] start = torch.cat([torch.from_numpy(batch[i]["start"]) for i in range(bs)], 0) end = torch.cat([torch.from_numpy(batch[i]["end"]) for i in range(bs)], 0) video = torch.cat([batch[i]["video"] for i in range(bs)], 0) video_len = torch.cat( [torch.from_numpy(batch[i]["video_len"]) for i in range(bs)], 0 ) ans = [batch[i]["answer"] for i in range(bs)] maxalen = max([x.shape[1] for x in ans]) answer = torch.zeros(sum(x.shape[0] for x in ans), maxalen).long() idx = 0 for i, tensor in enumerate(ans): n, l = tensor.shape answer[idx : idx + n, :l] = tensor idx += n que = [batch[i]["question"] for i in range(bs)] maxquelen = max([x.shape[1] for x in que]) question = torch.zeros(sum(x.shape[0] for x in que), maxquelen).long() idx = 0 for i, tensor in enumerate(que): n, l = tensor.shape question[idx : idx + n, :l] = tensor idx += n return { "video_id": video_id, "video_path": video_path, "atxt": atxt, "qtxt": qtxt, "start": start, "end": end, "video": video, "video_len": video_len, "answer": answer, "question": question, }
{"hexsha": "ecbd42fb6da25b37db0799ed6aa145b8d9f61e23", "size": 6224, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/howtovqa_loader.py", "max_stars_repo_name": "Tiamat-Tech/just-ask", "max_stars_repo_head_hexsha": "80725161e12ad0682b4c2091f61a5889a335ba21", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 59, "max_stars_repo_stars_event_min_datetime": "2021-07-22T22:53:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T06:58:17.000Z", "max_issues_repo_path": "data/howtovqa_loader.py", "max_issues_repo_name": "Tiamat-Tech/just-ask", "max_issues_repo_head_hexsha": "80725161e12ad0682b4c2091f61a5889a335ba21", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-10-13T11:24:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-23T06:54:38.000Z", "max_forks_repo_path": "data/howtovqa_loader.py", "max_forks_repo_name": "Tiamat-Tech/just-ask", "max_forks_repo_head_hexsha": "80725161e12ad0682b4c2091f61a5889a335ba21", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-07-22T23:40:46.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-01T02:42:29.000Z", "avg_line_length": 34.3867403315, "max_line_length": 131, "alphanum_fraction": 0.5536632391, "include": true, "reason": "import numpy", "num_tokens": 1493}
#!/usr/bin/env python3 import argparse import hashlib import numpy as np import os import pandas as pd import pkg_resources import pyarrow as pa import re import sys import traceback from datetime import datetime, timedelta, timezone from pyarrow import csv, feather from eccodes import * def getHash(out_file): md5 = hashlib.md5() with open(out_file, 'rb') as out_f: for chunk in iter(lambda: out_f.read(4096 * md5.block_size), b''): md5.update(chunk) checksum = md5.hexdigest() return checksum def is_bufr_matched(in_file, bufr_descriptor): warno = 184 rc = False with open(in_file, 'rb') as in_file_stream: while True: bufr = None try: bufr = codes_bufr_new_from_file(in_file_stream) if bufr is None: break unexpanded_descriptors = codes_get_array(bufr, 'unexpandedDescriptors') if bufr_descriptor in unexpanded_descriptors: rc = True codes_release(bufr) except: print('Warning', warno, ':', 'BUFR decode error on', in_file, 'has occurred. The file is not created', file=sys.stderr) rc = False return rc def get_ttaaii_cccc_ddhhmm_bbb_data_date_list(message, in_file, debug): warno = 185 ttaaii_cccc_ddhhmm_bbb_data_date_list = [] word = '' header_num = 0 message_counter = 0 data_date = '' while message_counter < len(message): if message[message_counter] == 10 or message[message_counter] == 13: if word: ttaaii_cccc_ddhhmm_bbb_data_date_list.append(word) break elif message[message_counter] == 32: if word: ttaaii_cccc_ddhhmm_bbb_data_date_list.append(word) word = '' header_num += 1 else: try: word += message[message_counter].to_bytes(1, 'little').decode() except: return [] message_counter += 1 if len(ttaaii_cccc_ddhhmm_bbb_data_date_list) == 3: ttaaii_cccc_ddhhmm_bbb_data_date_list.append('') if len(ttaaii_cccc_ddhhmm_bbb_data_date_list) == 4: in_file_mtime = datetime.utcfromtimestamp(os.path.getmtime(in_file)) ddhhmm = ttaaii_cccc_ddhhmm_bbb_data_date_list[2] if ddhhmm[0:2] == in_file_mtime.strftime('%d'): data_date = in_file_mtime.strftime('%Y%m%d') else: for timedelta_day in range(1, 28): data_date = (in_file_mtime + timedelta(days=-timedelta_day)).strftime('%Y%m%d') if ddhhmm[0:2] == data_date[6:8]: break else: data_date = '' if not data_date: for timedelta_day in range(1, 7): data_date = (in_file_mtime + timedelta(days=timedelta_day)).strftime('%Y%m%d') if ddhhmm[0:2] == data_date[6:8]: break ttaaii_cccc_ddhhmm_bbb_data_date_list.append(data_date) if debug and len(ttaaii_cccc_ddhhmm_bbb_data_date_list) == 5: print('Debug', ':', 'ttaaii_cccc_ddhhmm_bbb_data_date =', ttaaii_cccc_ddhhmm_bbb_data_date_list, file=sys.stderr) return ttaaii_cccc_ddhhmm_bbb_data_date_list def get_grib_subdir_list(grib_file): warno = 186 subdir_list = [] with open(grib_file, 'rb') as grib_file_stream: is_grib = True while 1: try: gid = codes_grib_new_from_file(grib_file_stream) if gid is None: break i_size = codes_get(gid, 'iDirectionIncrementInDegrees') j_size = codes_get(gid, 'jDirectionIncrementInDegrees') if i_size <= 0 and j_size > 0: i_size = j_size elif i_size > 0 and j_size <= 0: j_size = i_size subdir_list.append(str(i_size) + '_' + str(j_size) + '_' + str(codes_get(gid, 'latitudeOfFirstGridPointInDegrees')) + '_' + str(codes_get(gid, 'longitudeOfFirstGridPointInDegrees')) + '_' + str(codes_get(gid, 'latitudeOfLastGridPointInDegrees')) + '_' + str(codes_get(gid, 'longitudeOfLastGridPointInDegrees'))) subdir_list.append(str(codes_get(gid, 'dataDate')).zfill(8) + str(codes_get(gid, 'dataTime')).zfill(4)[0:4]) codes_release(gid) except: print('Warning', warno, ':', 'GRIB decode error on', grib_file, 'has occurred. The file is not created', file=sys.stderr) is_grib = False if not is_grib: return [] return subdir_list def create_file(in_file, my_cccc, message, start_char4, out_dir, conf_list, debug): warno = 187 in_file_name = os.path.basename(in_file) for conf_row in conf_list: if re.match(r'' + conf_row.file_name_pattern, in_file_name): ttaaii = '' cccc = '' ddhhmm = '' bbb = '' data_date = '' out_directory_list = [] out_directory_list.append(out_dir) ttaaii_cccc_ddhhmm_bbb_data_date_list = [] if re.match(r'^[A-Z][A-Z][A-Z][A-Z]$', start_char4): ttaaii_cccc_ddhhmm_bbb_data_date_list = get_ttaaii_cccc_ddhhmm_bbb_data_date_list(message, in_file, debug) if len(ttaaii_cccc_ddhhmm_bbb_data_date_list) == 5: ttaaii = ttaaii_cccc_ddhhmm_bbb_data_date_list[0] cccc = ttaaii_cccc_ddhhmm_bbb_data_date_list[1] ddhhmm = ttaaii_cccc_ddhhmm_bbb_data_date_list[2] bbb = ttaaii_cccc_ddhhmm_bbb_data_date_list[3] data_date = ttaaii_cccc_ddhhmm_bbb_data_date_list[4] out_directory_list.append(cccc) out_directory_list.append(conf_row.format) out_directory_list.append(conf_row.category) elif conf_row.cccc: cccc = conf_row.cccc out_directory_list.append(cccc) out_directory_list.append(conf_row.format) out_directory_list.append(conf_row.category) if conf_row.cccc and conf_row.cccc != cccc: continue if conf_row.file_extension == 'txt' and conf_row.text_pattern and not re.search(r'' + conf_row.text_pattern, message.decode("ascii", errors="ignore").replace(ttaaii, '', 1).replace(cccc, '', 1).replace('\r', ' ').replace('\n', ' ')): continue if conf_row.format == 'bufr' and not np.isnan(conf_row.bufr_descriptor) and not is_bufr_matched(in_file, conf_row.bufr_descriptor): continue if not re.match(r'^[A-Z][A-Z][A-Z][A-Z]$', cccc): print('Warning', warno, ':', 'cccc of', ttaaii, cccc, ddhhmm, bbb, 'on', in_file, 'is invalid. The file is not created', file=sys.stderr) return '' if conf_row.format == 'grib' or re.match(r'^GRIB$', start_char4): subdir_list = get_grib_subdir_list(in_file) if len(subdir_list) == 2: out_directory_list.extend(subdir_list) data_date = subdir_list[1][0:8] else: return '' elif not data_date and re.match(r'^BUFR$', start_char4): out_directory_list.append(conf_row.subcategory) is_bufr = True with open(in_file, 'rb') as bufr_file_stream: while True: try: bufr = codes_bufr_new_from_file(bufr_file_stream) if bufr is None: break year = codes_get_array(bufr, 'typicalYear')[0] month = codes_get_array(bufr, 'typicalMonth')[0] day = codes_get_array(bufr, 'typicalDay')[0] hour = codes_get_array(bufr, 'typicalHour')[0] minute = codes_get_array(bufr, 'typicalMinute')[0] codes_release(bufr) if month > 0 and month < 13 and day > 0 and day < 32 and hour > -1 and hour < 24 and minute > -1 and minute <60: data_date = str(year).zfill(4) + str(month).zfill(2) + str(day).zfill(2) out_directory_list.append(data_date + str(hour).zfill(2) + str(minute).zfill(2)) else: is_bufr = False print('Warning', warno, ':', 'BUFR on', in_file, 'is invalid datetime. The file is not created', file=sys.stderr) except: is_bufr = False print('Warning', warno, ':', 'BUFR decode error on', in_file, 'has occurred. The file is not created', file=sys.stderr) if not is_bufr: return '' else: out_directory_list.append(conf_row.subcategory) out_directory_list.append(data_date + ddhhmm[2:6]) out_directory = '/'.join(out_directory_list) os.makedirs(out_directory, exist_ok=True) if ttaaii: out_file_list = [] out_file_list.append(out_directory) out_file_list.append('/A_') out_file_list.append(ttaaii) out_file_list.append(cccc) out_file_list.append(ddhhmm) out_file_list.append(bbb) out_file_list.append('_C_') out_file_list.append(my_cccc) out_file_list.append('_') out_file_list.append(datetime.now(timezone.utc).strftime('%Y%m%d%H%M%S%f')) out_file_list.append('.') out_file_list.append(conf_row.file_extension) out_file = ''.join(out_file_list) else: out_file_list = [] out_file_list.append(out_directory) out_file_list.append(os.path.basename(in_file)) out_file = '/'.join(out_file_list) with open(out_file, 'wb') as out_file_stream: out_file_stream.write(message) return out_file print('Warning', warno, ':', in_file, 'is not matched on configuration file. The file is not created', file=sys.stderr) return '' def create_file_from_batch(in_file, my_cccc, message, out_dir, tmp_grib_file, tmp_bufr_file, conf_list, debug): warno = 188 ttaaii_cccc_ddhhmm_bbb_data_date_list = get_ttaaii_cccc_ddhhmm_bbb_data_date_list(message, in_file, debug) if len(ttaaii_cccc_ddhhmm_bbb_data_date_list) != 5: print('Warning', warno, ':', 'header of', ttaaii_cccc_ddhhmm_bbb_data_date_list, 'on', in_file, 'is invalid. The file is not created', file=sys.stderr) return '' ttaaii = ttaaii_cccc_ddhhmm_bbb_data_date_list[0] cccc = ttaaii_cccc_ddhhmm_bbb_data_date_list[1] ddhhmm = ttaaii_cccc_ddhhmm_bbb_data_date_list[2] bbb = ttaaii_cccc_ddhhmm_bbb_data_date_list[3] data_date = ttaaii_cccc_ddhhmm_bbb_data_date_list[4] for conf_row in conf_list: if re.match(r'' + conf_row.ttaaii_pattern, ttaaii) and re.match(r'' + conf_row.file_name_pattern, os.path.basename(in_file)): if conf_row.cccc and conf_row.cccc != cccc: continue if conf_row.file_extension == 'txt' and conf_row.text_pattern and not re.search(r'' + conf_row.text_pattern, message.decode("ascii", errors="ignore").replace(ttaaii, '', 1).replace(cccc, '', 1).replace('\r', ' ').replace('\n', ' ')): continue if conf_row.format == 'bufr' and not np.isnan(conf_row.bufr_descriptor): with open(tmp_bufr_file, 'wb') as tmp_bufr_file_stream: tmp_bufr_file_stream.write(message) if not is_bufr_matched(tmp_bufr_file, conf_row.bufr_descriptor): continue if not re.match(r'^[A-Z][A-Z][A-Z][A-Z]$', cccc): print('Warning', warno, ':', 'cccc of', ttaaii, cccc, ddhhmm, bbb, 'on', in_file, 'is invalid. The file is not created', file=sys.stderr) return '' if data_date and re.match(r'([0-1][0-9]|2[0-4])', ddhhmm[2:4]) and re.match(r'[0-5][0-9]', ddhhmm[4:6]): out_directory_list = [] out_directory_list.append(out_dir) out_directory_list.append(cccc) out_directory_list.append(conf_row.format) out_directory_list.append(conf_row.category) if conf_row.format == 'grib': with open(tmp_grib_file, 'wb') as tmp_grib_file_stream: tmp_grib_file_stream.write(message) subdir_list = get_grib_subdir_list(in_file) if len(subdir_list) == 2: out_directory_list.extend(subdir_list) data_date = subdir_list[1][0:8] else: return '' else: out_directory_list.append(conf_row.subcategory) out_directory_list.append(data_date + ddhhmm[2:6]) out_directory = '/'.join(out_directory_list) os.makedirs(out_directory, exist_ok=True) out_file_list = [] out_file_list.append(out_directory) out_file_list.append('/A_') out_file_list.append(ttaaii) out_file_list.append(cccc) out_file_list.append(ddhhmm) out_file_list.append(bbb) out_file_list.append('_C_') out_file_list.append(my_cccc) out_file_list.append('_') out_file_list.append(datetime.now(timezone.utc).strftime('%Y%m%d%H%M%S%f')) out_file_list.append('.') out_file_list.append(conf_row.file_extension) out_file = ''.join(out_file_list) with open(out_file, 'wb') as out_file_stream: out_file_stream.write(message) return out_file else: print('Warning', warno, ':', 'ddhhmm of', ttaaii, cccc, ddhhmm, bbb, 'on', in_file, 'is invalid. The file is not created', file=sys.stderr) return '' print('Warning', warno, ':', in_file, 'is not matched on configuration file. The file is not created', file=sys.stderr) return '' def convert_to_cache(my_cccc, input_file_list, out_dir, checksum_arrow_file, out_list_file, tmp_grib_file, tmp_bufr_file, conf_list, debug): warno = 189 checksum_df = feather.read_feather(checksum_arrow_file) checksum_list = [] now = datetime.utcnow() for in_file in input_file_list: if debug: print('Debug', ':', 'in_file =', in_file, file=sys.stderr) with open(in_file, 'rb') as in_file_stream: batch_type = 0 message_length = 0 start_byte4 = None start_char4 = None try: start_byte4 = in_file_stream.read(4) if len(start_byte4) < 4: break start_char4 = start_byte4.decode() except: print('Warning', warno, ':', 'The start 4 bytes of', in_file, 'are not strings.', file=sys.stderr) while start_char4: if debug: print('Debug', ':', 'start_char4 =', start_char4, file=sys.stderr) message = bytearray() if re.match(r'\d\d\d\d', start_char4): batch_type = 1 message_length = int(start_char4 + in_file_stream.read(4).decode()) try: if message_length == 0: break format_identifier = int(in_file_stream.read(2).decode()) if format_identifier == 0: in_file_stream.read(10) # skip message_length -= 10 elif format_identifier == 1: in_file_stream.read(3) # skip message_length -= 3 else: print('Warning', warno, ':', 'The format identifier of', in_file, 'is not 00 or 01.', file=sys.stderr) break except: print('Warning', warno, ':', 'The bytes of message length on', in_file, 'are not strings.', file=sys.stderr) break elif start_char4 == '####': try: batch_type = 2 in_file_stream.read(3) # skip '018' message_length = int(in_file_stream.read(6).decode()) in_file_stream.read(5) # skip ####\n except: print('Warning', warno, ':', 'The bytes of message length on', in_file, 'are not strings.', file=sys.stderr) break elif start_char4 == '****': try: batch_type = 3 message_length = int(in_file_stream.read(10).decode()) in_file_stream.read(5) # skip ****\n except: print('Warning', warno, ':', 'The bytes of message length on', in_file, 'are not strings.', file=sys.stderr) break else: try: message.extend(start_char4.encode()) message.extend(in_file_stream.read()) except: print('Warning', warno, ':', 'can not encode or read', in_file, file=sys.stderr) break out_file = create_file(in_file, my_cccc, message, start_char4, out_dir, conf_list, debug) if out_file: out_file_checksum = getHash(out_file) if len(checksum_df[checksum_df['checksum'] == out_file_checksum].index) == 0 and not out_file_checksum in checksum_list: checksum_list.append(out_file_checksum) print(out_file, file=out_list_file) else: os.remove(out_file) break if message_length <= 0: if debug: print('Debug', ':', 'The message length of', in_file, 'is invalid. (<=0)', file=sys.stderr) break if debug: print('Debug', ':', 'batch_type =', batch_type, ', message_length =', message_length, file=sys.stderr) if batch_type == 1: message = bytearray(in_file_stream.read(message_length)) elif batch_type == 2 or batch_type == 3: message = bytearray(in_file_stream.read(message_length)) message_counter = len(message) - 1 while message_counter > -1: if message[message_counter] == 3 or message[message_counter] == 10 or message[message_counter] == 13 or message[message_counter] == 32: message.pop(message_counter) else: break message_counter -= 1 message_counter = 0 while message_counter < len(message): if message[0] == 10 or message[0] == 13 or message[0] == 32: message.pop(0) else: break message_counter += 1 out_file = create_file_from_batch(in_file, my_cccc, message, out_dir, tmp_grib_file, tmp_bufr_file, conf_list, debug) if out_file: out_file_checksum = getHash(out_file) if len(checksum_df[checksum_df['checksum'] == out_file_checksum].index) == 0 and not out_file_checksum in checksum_list: checksum_list.append(out_file_checksum) print(out_file, file=out_list_file) else: os.remove(out_file) try: byte4 = in_file_stream.read(4) if len(byte4) < 4: break start_char4 = byte4.decode() except: start_char4 = None print('Warning', warno, ':', 'The start 4 bytes of the message on', in_file, 'are not strings.', file=sys.stderr) if len(checksum_list) > 0: td = timedelta(days=1) new_checksum_df = pd.concat([checksum_df[(checksum_df['mtime'] >= now - td) & (checksum_df['mtime'] <= now + td)], pd.DataFrame({"mtime": [now] * len(checksum_list), "checksum": checksum_list})]) with open(checksum_arrow_file, 'bw') as checksum_arrow_f: feather.write_feather(new_checksum_df, checksum_arrow_f, compression='zstd') def main(): errno=198 parser = argparse.ArgumentParser() parser.add_argument('my_cccc', type=str, metavar='my_cccc') parser.add_argument('input_directory_or_list_file', type=str, metavar='input_directory_or_list_file') parser.add_argument('output_directory', type=str, metavar='output_directory') parser.add_argument('checksum_arrow_file', type=str, metavar='checksum.arrow') parser.add_argument('--output_list_file', type=argparse.FileType('w'), metavar='output_list_file', default=sys.stdout) parser.add_argument('--tmp_grib_file', type=str, metavar='tmp_grib_file', default='tmp_grib_file.bin') parser.add_argument('--tmp_bufr_file', type=str, metavar='tmp_bufr_file', default='tmp_bufr_file.bin') parser.add_argument("--config", type=str, metavar='conf_batch_to_cache.csv', default=pkg_resources.resource_filename(__name__, 'conf_batch_to_cache.csv')) parser.add_argument("--debug", action='store_true') args = parser.parse_args() input_file_list = [] if not re.match(r'^[A-Z][A-Z0-9]{3}$', args.my_cccc): print('Error', errno, ':', 'CCCC of', args.my_cccc, 'is invalid (!=^[A-Z][A-Z0-9]{3}$).', file=sys.stderr) sys.exit(errno) if not os.access(args.input_directory_or_list_file, os.F_OK): print('Error', errno, ':', args.input_directory_or_list_file, 'does not exist.', file=sys.stderr) sys.exit(errno) if not os.access(args.output_directory, os.F_OK): os.makedirs(args.output_directory, exist_ok=True) if not os.access(args.config, os.F_OK): print('Error', errno, ':', args.config, 'does not exist.', file=sys.stderr) sys.exit(errno) if os.path.isdir(args.input_directory_or_list_file) and os.access(args.input_directory_or_list_file, os.R_OK) and os.access(args.input_directory_or_list_file, os.X_OK): in_dir_entry_list = [f for f in os.scandir(args.input_directory_or_list_file) if os.path.isfile(f) and os.access(f, os.R_OK) and not re.match(r'(^.*\.tmp$|^\..*$)', f.name) and os.path.getsize(f) > 4] input_file_list = [in_dir_entry.path for in_dir_entry in sorted(in_dir_entry_list, key=os.path.getmtime)] elif os.path.isfile(args.input_directory_or_list_file) and os.access(args.input_directory_or_list_file, os.R_OK): with open(args.input_directory_or_list_file, 'r') as in_list_file_stream: input_file_list = [in_file.rstrip('\n') for in_file in in_list_file_stream.readlines()] else: print('Error', errno, ':', args.input_directory_or_list_file, 'is not directory/file/readable/executable.', file=sys.stderr) sys.exit(errno) if not os.path.isdir(args.output_directory): print('Error', errno, ':', args.output_directory, 'is not directory.', file=sys.stderr) sys.exit(errno) if not os.path.exists(args.checksum_arrow_file): with open(args.checksum_arrow_file, 'bw') as out_f: property_batch = pa.record_batch([[], []], names=['mtime', 'checksum']) property_table = pa.Table.from_batches([property_batch]) feather.write_feather(property_table, out_f, compression='zstd') if not os.path.isfile(args.checksum_arrow_file): print('Error', errno, ':', args.checksum_arrow_file, 'is not file.', file=sys.stderr) sys.exit(errno) if not os.path.isfile(args.config): print('Error', errno, ':', args.config, 'is not file.', file=sys.stderr) sys.exit(errno) if not (os.access(args.output_directory, os.R_OK) and os.access(args.output_directory, os.W_OK) and os.access(args.output_directory, os.X_OK)): print('Error', errno, ':', args.output_directory, 'is not readable/writable/executable.', file=sys.stderr) sys.exit(errno) if not os.access(args.checksum_arrow_file, os.R_OK) or not os.access(args.checksum_arrow_file, os.W_OK): print('Error', errno, ':', args.checksum_arrow_file, 'is not readable and writable.', file=sys.stderr) sys.exit(errno) if not os.access(args.config, os.R_OK): print('Error', errno, ':', args.config, 'is not readable.', file=sys.stderr) sys.exit(errno) try: conf_list = list(csv.read_csv(args.config).to_pandas().itertuples()) convert_to_cache(args.my_cccc, input_file_list, args.output_directory, args.checksum_arrow_file, args.output_list_file, args.tmp_grib_file, args.tmp_bufr_file, conf_list, args.debug) except: traceback.print_exc(file=sys.stderr) sys.exit(199) if __name__ == '__main__': main()
{"hexsha": "dc23c67861dedd785f83f8d624383f7c80641956", "size": 25822, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/meteorological_preprocessor/met_pre_batch_to_cache.py", "max_stars_repo_name": "public-tatsuya-noyori/meteorological_preprocessor", "max_stars_repo_head_hexsha": "fc90bbb5c63b917b6d3ec6fb69a392801a63220e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-05T09:44:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-05T09:44:24.000Z", "max_issues_repo_path": "src/meteorological_preprocessor/met_pre_batch_to_cache.py", "max_issues_repo_name": "public-tatsuya-noyori/meteorological_preprocessor", "max_issues_repo_head_hexsha": "fc90bbb5c63b917b6d3ec6fb69a392801a63220e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/meteorological_preprocessor/met_pre_batch_to_cache.py", "max_forks_repo_name": "public-tatsuya-noyori/meteorological_preprocessor", "max_forks_repo_head_hexsha": "fc90bbb5c63b917b6d3ec6fb69a392801a63220e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-06T11:04:22.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-06T11:04:22.000Z", "avg_line_length": 54.4767932489, "max_line_length": 327, "alphanum_fraction": 0.5752459143, "include": true, "reason": "import numpy", "num_tokens": 5988}
#!/usr/bin/env python ''' Data structure utils module for generic use. Has functions that would be really nice to add to string, dictionary or DataFrame types. Could extend some of these to add capability. Some of the functions need some heavy testing and debug. ''' import re import pandas as pd import numpy as np import logging import datetime as dt _ANYOP_ = re.compile(r'[><=&|]') logger = logging.getLogger(__name__) ## =================================================================== ## ## ROUNDING FUNCTIONS ## =================================================================== ## # Simple round off to 2 & 3 dec places wrappers R2 = lambda x: round(x, 2) R3 = lambda x: round(x, 3) ## ------------------------------------------------------------------- ## def RN(num, places=2): """Simple round off to 2 dec places by default wrapper. Fails silently on non floats""" try: f = round(num, places) except: f = num return f ## ------------------------------------------------------------------- ## def roundoff_list(alist, places=2): """Rounds off contents in a list to '2' places by default. Fails silently on non floats. NOTE. list is modified INPLACE""" for i, v in enumerate(alist): alist[i] = RN(v, places) return ## ------------------------------------------------------------------- ## def roundoff_dict(adict, places=2): """Rounds off contents in a dictionary table to '2' places by default. Can handle one level deep of list of nums. Fails silently on non floats. NOTE. Dictionary is modified INPLACE""" for k,v in list(adict.items()): if type(v) == list: roundoff_list(v, places) else: adict[k] = RN(v, places) return ## ------------------------------------------------------------------- ## def roundoff_df(df, places=0, columns=None, indices=None): """Round off all entries in DataFrame. If no specific columns or indices are provided all DataFrame elements are rounded. Returns a DataFrame with rounding applied places : number of decimal places to round columns: None or list of columns to apply rounding indices: None or list of indices to apply rounding """ tmp = df.copy() if columns==None and indices==None: # round all for j in tmp.columns: tmp[j] = tmp[j].round(places) elif columns!=None and indices==None: # round specific columns for j in columns: tmp[j] = tmp[j].round(places) elif columns==None and indices!=None: # round specific rows for i in indices: tmp.ix[i] = tmp.ix[i].round(places) else: # specific rows & columns (slow at the moment) for i in indices: tmp.ix[i, columns] = tmp.ix[i, columns].round(places) return tmp ## =================================================================== ## ## DATAFRAME BUILDING FUNCTIONS ## =================================================================== ## def parse2df(fname): '''Returns & generates a DataFrame from a file that has free from spacing BUT has the header position specified with "|". Currently not changeable but may be modified in the future. Helps deal with non-CSV format but that has maligned tabs and spaces. ''' # NOTE. This is currently done in two passes to make use of exisiting # function txt2df & is a little slow, but ok for small files to parse try: fin = open (fname) except: print("Could not open {} for reading!".format(fname)) txtlist = fin.readlines() # slurp whole thing fin.close() sep_pragma = '#<pr:sep>' txtlist = [t.replace(sep_pragma,' '*len(sep_pragma)) if t.lstrip().startswith(sep_pragma) else t for t in txtlist] df = txt2df(''.join(txtlist), header_sep='|', header=True, skip_comment=True, index='default') return df ## ------------------------------------------------------------------- ## def txt2df(strtxt, header_sep='|', header=True, skip_comment=True, index='default', strip_pct=True): '''Returns & generates a DataFrame from a string that has free form spacing BUT has the header position specified with "|". Currently not changeable but may be modified in the future. Helps deal with non-CSV format but that has maligned tabs and spaces. strtxt: blob of text that needs to be parsed header: [True|False] - has a header or not header_sep: character marking positions of string separation skip_comment: [True|False] - determine if comments are to be skipped over. Empty lines are always ignored. currently only '#' is treated as comment index: use 'default', None, or any other valid column strip_pct: If True, removes '%' characters from txt. Returns: A pd.DataFrame object ''' positions = [] lol = [] sep_found = False for line in strtxt.split('\n'): if len(line.strip()) == 0: # skip blank continue elif not sep_found and line.lstrip().startswith(header_sep): positions = [i for (i, c) in enumerate(line) if c == header_sep] continue elif (line.lstrip()).startswith('#'): # skip comment continue line = line.replace('\t', ' ') if len(positions) == 0: # use split by space by default, basically csv type lol.append( line.strip().split() ) else: startpos = [0] + positions[:] endpos = positions[:] + [len(line)] tmp = [line[a:b].strip() for a,b in zip(startpos,endpos)] if strip_pct: tmp[1:] = [t.replace('%','') for t in tmp[1:]] # Remove % symbols from data text lol.append(tmp) index_name = lol[0][0] if index == 'default' else index df = pd.DataFrame(lol[1:], columns =lol[0]) if index_name != None: df.set_index(index_name, inplace=True) return df ## =================================================================== ## ## PRINTING FUNCTIONS ## =================================================================== ## def pprint_dict(adict, orderlist=[]): """Prints out a dictionary (eg stock quote) nicely in desired print orderlist.""" if adict == {}: return keyset = set(adict.keys()) orderset = set(orderlist) if not orderset.issubset(keyset): logger.warn( "Specified order is mismatched. Bad fields will be ignored") for k in orderset: if k not in keyset: orderlist.remove(k) # now print in clean order remainset = keyset.symmetric_difference(orderlist) maxlen = max( [len(k) for k in keyset] ) printsets = orderlist + list(remainset) for k in printsets: v = adict[k] if type(v) == pd.Series or type(v) == pd.DataFrame: print('-' * 80) print(v) print('-' * 80) continue if type(v) == dt.datetime: v = dt.datetime.ctime(v) print(k.ljust(maxlen), ':', v) ## =================================================================== ## ## OTHER UTILITIES ## =================================================================== ## def isnumeric(a): """Returns True if 'a' is an int, float, or a string that could be converted to an int or float""" if type(a) == int or type(a) == float: return True elif type(a) == str: if a.replace('.','').isdigit() and a.count('.') < 2: return True elif np.dtype(a) == int or np.dtype(a) == float: return True return False ## ------------------------------------------------------------------- ## def reorder_list(origlist, orderlist, qualifier='any'): """ origlist: Given list of labels eg: ['Strike', 'Exp', 'Ask', 'Bid'] orderlist: List of labels in desired order, eg: ['Exp', 'Strike'] qualifier: One of ['any'|'before'|'after'|'begin'|'end'] any - any position as longer as order is satisfied before - insert labels before position of the last entry after - insert labels after position of first entry begin - set the order to beginning of list of all labels end - set the order to end of the list of all labels NOTE1. origlist is expected to have UNIQUE entries ONLY NOTE2. All positions of given orderlist will be adjacent to each other.""" if len(origlist) < 2: return origlist if len(orderlist) < 2: return origlist keylist = origlist[:] # need copy to avoid messing up input deslist = orderlist[:] # need copy to avoid messing up input keyset = set(keylist) orderset = set(deslist) if not orderset.issubset(keyset): logger.warn( "Specified order is mismatched. Bad fields will be ignored") for k in orderset: if k not in keyset: deslist.remove(k) if qualifier in ('any'): idx = keylist.index(deslist[0]) for k in deslist: keylist.remove(k) keylist = keylist[:idx] + deslist + keylist[idx:] if qualifier in ('after'): idx = keylist.index(deslist[0]) firstleg, lastleg = keylist[:idx], keylist[idx+1:] for k in deslist[1:]: lastleg.remove(k) keylist = firstleg + deslist + lastleg if qualifier == 'before': idx = keylist.index(deslist[-1]) firstleg, lastleg = keylist[:idx], keylist[idx+1:] for k in deslist[:-1]: firstleg.remove(k) keylist = firstleg + deslist + lastleg if qualifier == 'begin': for k in deslist: keylist.remove(k) keylist = deslist + keylist if qualifier == 'end': for k in deslist: keylist.remove(k) keylist = keylist + deslist return keylist ## ------------------------------------------------------------------- ## def df_reorder_columns(df, orderlist=[], qualifier='any'): """Sets the order of columns of a dataframe in desired order. Note that behavior of qualifier is same as reorder_list() function. """ keylist = reorder_list(list(df.columns), orderlist, qualifier) return df.reindex(keylist, axis=1) # --------------------------------------------------------------- # def filter_column(df, qr, col=None): """Filters out indices from an a DataFrame using the same symantics as DataFrame indexing, and returns a new DataFrame df: DataFrame input (unmodified) qr: Simple query string that supports following forms: int, float, range, relational, unary & binary logical ops col: if None, applies the query on the index, else matching col Eg, qr could be: 2, 2:3, <1, !=2, 10.5:10.75, <3 | >5, >7 & <8 They ALL MUST be strings though. Returns a an empty DataFrame if it fails. """ opset = _ANYOP_.findall(qr) inner_eval = False col = 'index' if col==None else col if opset: inner_eval = True if '&' in opset: qs = qr.split('&') qs = '(df.{} {}) & (df.{} {})'.format(col, qs[0], col, qs[1]) elif '|' in opset: qs = qr.split('|') qs = '(df.{} {}) | (df.{} {})'.format(col, qs[0], col, qs[1]) else: qs = '(df.{} {})'.format(col, qr) print('DEBUG', qs) else: if col == 'index': qs = 'df.ix[{}]'.format(qr) else: logger.error("Could not process evaluation of '{}. Returning empty DF'".format(qs)) return pd.DataFrame( {} ) logger.debug('Evaluating {}'.format(qs)) try: dfout = df[eval(qs)] if inner_eval else eval(qs) except: logger.error("Could not process evaluation of '{}. Returning empty DF'".format(qs)) dfout = pd.DataFrame( {} ) return dfout # either a dataframe or a series ## ------------------------------------------------------------------- ## def columnize(tbl, strip_header=True): """Wrapper for converting a table in list of row (vector) format into list of column (vector) format by slicing it. M rows of N-elem list/tuple would return N-rows of M-elem lists eg: 100 rows x (1x3) tuples => 3 rows of (100x1) vectors. So: [['Strike', 'Bid', 'Ask', [np.array(73.0, 73.5, 74.0), [73.0 , 2.65, 2.70, => np.array(2.65, 2.47, 2.30), [73.5 , 2.47, 2.52, np.array(2.70, 2.52, 2.36)] [74.0 , 2.52, 2.36]] NOTE the conversion of floats to strs in the transposed format is not kept by default IF the header is included ie, strip_header=False. Some massaging needed then. """ if len(tbl)==0 or (len(tbl)==1 and strip_header): logger.error('Empty table entered. No action taken') return [] if strip_header: amat = np.matrix( tbl[1:] ) # MUST be a consistent 2-D table else: amat = np.matrix( tbl ) # n=inject to preserve native data types - except int->float row_types = [type(a) for a in tbl[1]] row_types = [float if a==int else a for a in row_types] clist = amat.transpose().tolist() return [np.array(c, dtype=ntype) for c,ntype in zip(clist,row_types)] ## ------------------------------------------------------------------- ## def broadcast(alist, n, axis=1): """Broadcasts a list like object along columns (axis=1) or along rows (axis=0) alist: list like object, pandas pandas.Series, or numpy.array n : int - number of times to replicate axis : 0|1 - 0: work down rows, 1: work along columns Works better with numpy.ndarray & pd.Series objects. NOTE. if list has non-homogenous types, typecasting will be performed as per numpy rules Eg: broadcast([1 2 3 4], 3, axis=0) -> [[1, 2, 3, 4],[1, 2, 3, 4],[1, 2, 3, 4]] - 3x4 broadcast([1 2 3 4], 3, axis=1) -> [[1, 1, 1],[2, 2, 2],[3, 3, 3],[4, 4, 4]] - 4x3 A list|narray|matrix type returns the same type back but a Series returns a DataFrame """ aa = ma = sa = alist # just for convenience if type(alist)==list: if axis==0: return [alist]*n # easy else: return (np.array([alist]).T*np.ones(n)).tolist() #pylint: disable=no-member elif type(aa)==np.ndarray: if axis==0: return np.array( np.mat(np.ones(n)).T*aa ) else: return np.array([aa]).T*np.ones(n) #pylint: disable=no-member elif type(ma)==np.matrix: if axis==0: return np.mat(np.ones(n)).T*alist else: return alist.T*np.ones(n) elif type(sa)==pd.Series: if axis==0: return pd.DataFrame( np.mat(np.ones(n)).T*np.mat(sa) ) else: return pd.DataFrame( np.mat(aa).T*np.ones(n) ) else: print('Unrecognized list like object', type(alist) , 'entered') return None ## =================================================================== ## if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) df = parse2df('../testdir/test_parse2df.txt') print(df)
{"hexsha": "b78df6f2c1e1f0f1911d7913e006c13f23092ac0", "size": 15006, "ext": "py", "lang": "Python", "max_stars_repo_path": "mhut/datautils.py", "max_stars_repo_name": "mhassan1900/MHut", "max_stars_repo_head_hexsha": "bec1cb0573fdd4f0404aec9ecd116026d748d977", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mhut/datautils.py", "max_issues_repo_name": "mhassan1900/MHut", "max_issues_repo_head_hexsha": "bec1cb0573fdd4f0404aec9ecd116026d748d977", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-01-14T16:14:44.000Z", "max_issues_repo_issues_event_max_datetime": "2017-06-21T03:12:04.000Z", "max_forks_repo_path": "mhut/datautils.py", "max_forks_repo_name": "mhassan1900/MHut", "max_forks_repo_head_hexsha": "bec1cb0573fdd4f0404aec9ecd116026d748d977", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0862944162, "max_line_length": 101, "alphanum_fraction": 0.5539784086, "include": true, "reason": "import numpy", "num_tokens": 3700}
""" Creating a simulation: Simulation class ======================================= Both initialization and running the simulation is done by interacting with an instance of :py:class:`polychrom.simulation.Simulation` class. Overall parameters ------------------ Overall technical parameters of a simulation are generally initialized in the constructor of the Simulation class. :py:meth:`polychrom.simulation.Simulation.__init__` . This includes **Techcnical parameters not affecting the output of simulations** * Platform (cuda (usually), opencl, or CPU (slow)) * GPU index * reporter (where to save results): see :py:mod`polychrom.hdf5_reporter` **Parameters affecting the simulation** * number of particles * integrator (we usually use variable Langevin) + error tolerance of integrator * collision rate * Whether to use periodic boundary conditions (PBC) * timestep (if using non-variable integrator) **Parameters that are changed rarely, but may be useful** * particle mass, temperature and length scale * kinetic energy at which to raise an error * OpenMM precision * Rounding before saving (default is to 0.01) Starting conformation is loaded using :meth:`polychrom.simulation.Simulation.set_data` method. Many tools for creating starting conformations are in :mod:`polychrom.starting_conformations` Adding forces ------------- **Forces** define the main aspects of a given simulation. Polymer connectivity, confinement, crosslinks, tethering monomers, etc. are all defined as different forces acting on the particles. Typicall used forces are listed in :py:mod:`polychrom.forces` module. Forces out of there can be added using :py:meth:`polychrom.simulation.Simulation.add_force` method. Forces and their parameters are an essential part of nearly any polymer simulations. Some forces have just a few paramters (e.g. spherical confinement just needs a radius), while other forces may have lots of parameters and can define complex structures. For example, harmonidBondForce with a specially-created bond list was used to create a backbone-plectoneme conformation in Caulobacter simulations (Le et al, Science 2013). Same harmonic bonds that change over time are used to simulate loop extrusion as in (Fudenberg, 2016). Some forces need to be added together. Those include forces defining polymer connectivity. Those forces are combined into **forcekits**. Forcekits are defined in :py:mod:`polychrom.forcekits` module. The only example of a forcekit for now is defining polymer connectivity using bonds, polymer stiffness, and inter-monomer interaction ("nonbonded force"). Some forces were written for openmm-polymer library and were not fully ported/tested into the polychrom library. Those forces reside in :py:mod:`polychrom.legacy.forces` module. Some of them can be used as is, and some of them would need to be copied to your code and potentially conformed to the new style of defining forces. This includes accepting simulation object as a parameter, and having a ``.name`` attribute. Defining your own forces ------------------------ Each force in :py:mod:`polychrom.forces` is a simple function that wraps creation of an openmm force object. Users can create new forces in the script defining their simulation and add them using add_force method. Good examples of forces are in :py:mod:`polychrom.forces` - all but harmonic bond force use custom forces, and provide explanations of why particular energy function was chosen. Description of the module :py:mod:`polychrom.forces` has some important information about adding new forces. Running a simulation -------------------- To run a simulation, you call :py:meth:`polychrom.simulation.Simulation.doBlock` method in a loop. Unless specified otherwise, this would save a conformation into a defined reporter. Terminating a simulation is not necessary; however, terminating a reporter using reporter.dump_data() is needed for the hdf5 reporter. This all can be viewed in the example script. """ from __future__ import absolute_import, division, print_function import numpy as np import sys import os import time import tempfile import logging from six import string_types from collections.abc import Iterable import simtk.openmm as openmm import simtk.unit from . import forces logging.basicConfig(level=logging.INFO) class IntegrationFailError(Exception): pass class EKExceedsError(Exception): pass class Simulation(object): """ This is a base class for creating a Simulation and interacting with it. All general simulation parameters are defined in the constructor. Forces are defined in :py:mod:`polychrom.forces` module, and are added using :py:meth:`polychrom.simulation.Simulation.add_force` method. """ def __init__(self, **kwargs): """ All numbers here are floats. Units specified in a parameter. Parameters ---------- N : int number of particles error_tol : float, optional Error tolerance parameter for variableLangevin integrator Values of around 0.01 are reasonable for a "nice" simulation (i.e. simulation with soft forces etc). Simulations with strong forces may need 0.001 or less OpenMM manual recommends 0.001, but our forces tend to be "softer" than theirs timestep : number timestep in femtoseconds. Mandatory for non-variable integrators. Ignored for variableLangevin integrator. Value of 70-80 are appropriate collision_rate : number collision rate in inverse picoseconds. values of 0.01 or 0.05 are often used. Consult with lab members on values. In brief, equilibrium simulations likely do not care about the exact dynamics you're using, and therefore can be simulated in a "ballistic" dynamics with col_rate of around 0.001-0.01. Dynamical simulations and active simulations may be more sensitive to col_rate, though this is still under discussion/investigation. Johannes converged on using 0.1 for loop extrusion simulations, just to be safe. PBCbox : (float,float,float) or False; default:False Controls periodic boundary conditions If PBCbox is False, do not use periodic boundary conditions If intending to use PBC, then set PBCbox to (x,y,z) where x,y,z are dimensions of the bounding box for PBC GPU : GPU index as a string ("0" for first, "1" for second etc.) Machines with 1 GPU automatically select their GPU. integrator : "langevin", "variableLangevin", "verlet", "variableVerlet", "brownian", optional Integrator to use (see Openmm class reference) mass : number or np.array Particle mass (default 100 amu) temperature : simtk.units.quantity(units.kelvin), optional Temperature of the simulation. Devault value is 300 K. verbose : bool, optional If True, prints a lot of stuff in the command line. length_scale : float, optional The geometric scaling factor of the system. By default, length_scale=1.0 and harmonic bonds and repulsive forces have the scale of 1 nm. max_Ek: float, optional raise error if kinetic energy in (kT/particle) exceeds this value platform : string, optional Platform to use: CUDA (preferred fast GPU platform) OpenCL (maybe slower GPU platofrm, does not need CUDA installed) CPU (medium speed parallelized CPU platform) reference (slow CPU platform for debug) verbose : bool, optional Shout out loud about every change. precision: str, optional (not recommended to change) mixed is optimal for most situations. If you are using double precision, it will be slower by a factor of 10 or so. save_decimals: int or False, optional Round to this number of decimals before saving. ``False`` is no rounding. Default is 2. It gives maximum error of 0.005, which is nearly always harmless but saves up to 40% of storage space (0.6 of the original) Using one decimal is safe most of the time, and reduces storage to 40% of int32. NOTE that using periodic boundary conditions will make storage advantage less. """ default_args = { "platform": "CUDA", "GPU": "0", "integrator": "variablelangevin", "temperature": 300, "PBCbox": False, "length_scale": 1.0, "mass": 100, "reporters": [], "max_Ek": 10, "precision": "mixed", "save_decimals": 2, "verbose": False, } valid_names = list(default_args.keys()) + [ "N", "error_tol", "collision_rate", "timestep", ] for i in kwargs.keys(): if i not in valid_names: raise ValueError( "incorrect argument provided: {0}. Allowed are {1}".format( i, valid_names ) ) if None in kwargs.values(): raise ValueError("None is not allowed in arguments due to HDF5 incompatiliblity. Use False instead.") default_args.update(kwargs) kwargs = default_args self.kwargs = kwargs platform = kwargs["platform"] self.GPU = kwargs["GPU"] # setting default GPU properties = {} if self.GPU.lower() != "default": if platform.lower() in ["cuda", "opencl"]: properties["DeviceIndex"] = str(self.GPU) properties["Precision"] = kwargs["precision"] self.properties = properties if platform.lower() == "opencl": platform_object = openmm.Platform.getPlatformByName("OpenCL") elif platform.lower() == "reference": platform_object = openmm.Platform.getPlatformByName("Reference") elif platform.lower() == "cuda": platform_object = openmm.Platform.getPlatformByName("CUDA") elif platform.lower() == "cpu": platform_object = openmm.Platform.getPlatformByName("CPU") else: raise RuntimeError("Undefined platform: {0}".format(platform)) self.platform = platform_object self.temperature = kwargs["temperature"] self.collisionRate = kwargs["collision_rate"] * (1 / simtk.unit.picosecond) self.integrator_type = kwargs["integrator"] if isinstance(self.integrator_type, string_types): self.integrator_type = str(self.integrator_type) if self.integrator_type.lower() == "langevin": self.integrator = openmm.LangevinIntegrator( self.temperature, kwargs["collision_rate"] * (1 / simtk.unit.picosecond), kwargs["timestep"] * simtk.unit.femtosecond, ) elif self.integrator_type.lower() == "variablelangevin": self.integrator = openmm.VariableLangevinIntegrator( self.temperature, kwargs["collision_rate"] * (1 / simtk.unit.picosecond), kwargs["error_tol"], ) elif self.integrator_type.lower() == "verlet": self.integrator = openmm.VariableVerletIntegrator( kwargs["timestep"] * simtk.unit.femtosecond ) elif self.integrator_type.lower() == "variableverlet": self.integrator = openmm.VariableVerletIntegrator(kwargs["error_tol"]) elif self.integrator_type.lower() == "brownian": self.integrator = openmm.BrownianIntegrator( self.temperature, kwargs["collision_rate"] * (1 / simtk.unit.picosecond), kwargs["timestep"] * simtk.unit.femtosecond, ) else: logging.info("Using the provided integrator object") self.integrator = self.integrator_type self.integrator_type = "UserDefined" kwargs["integrator"] = "user_defined" self.N = kwargs["N"] self.verbose = kwargs["verbose"] self.reporters = kwargs["reporters"] self.forces_applied = False self.length_scale = kwargs["length_scale"] self.eK_critical = kwargs["max_Ek"] # Max allowed kinetic energy self.step = 0 self.block = 0 self.time = 0 self.nm = simtk.unit.nanometer self.kB = simtk.unit.BOLTZMANN_CONSTANT_kB * simtk.unit.AVOGADRO_CONSTANT_NA self.kT = self.kB * self.temperature * simtk.unit.kelvin # thermal energy # All masses are the same, # unless individual mass multipliers are specified in self.load() self.conlen = 1.0 * simtk.unit.nanometer * self.length_scale self.kbondScalingFactor = float( (2 * self.kT / self.conlen ** 2) / (simtk.unit.kilojoule_per_mole / simtk.unit.nanometer ** 2) ) self.system = openmm.System() # adding PBC self.PBC = False if kwargs["PBCbox"] is not False: self.PBC = True PBCbox = np.array(kwargs["PBCbox"]) self.system.setDefaultPeriodicBoxVectors( [float(PBCbox[0]), 0.0, 0.0], [0.0, float(PBCbox[1]), 0.0], [0.0, 0.0, float(PBCbox[2])], ) self.force_dict = {} # Dictionary to store forces # saving arguments - not trying to save reporters because they are not serializable kwCopy = {i: j for i, j in kwargs.items() if i != "reporters"} for reporter in self.reporters: reporter.report("initArgs", kwCopy) def get_data(self): "Returns an Nx3 array of positions" return np.asarray(self.data / simtk.unit.nanometer, dtype=np.float32) def get_scaled_data(self): """Returns data, scaled back to PBC box """ if not self.PBC: return self.get_data() alldata = self.get_data() boxsize = np.array(self.kwargs["PBCbox"]) mults = np.floor(alldata / boxsize[None, :]) toRet = alldata - mults * boxsize[None, :] assert toRet.min() >= 0 return toRet def set_data(self, data, center=False, random_offset=1e-5, report=True): """Sets particle positions Parameters ---------- data : Nx3 array-line Array of positions center : bool or "zero", optional Move center of mass to zero before starting the simulation if center == "zero", then center the data such as all positions are positive and start at zero random_offset: float or None add random offset to each particle Recommended for integer starting conformations and in general report : bool, optional If set to False, will not report this action to reporters. """ data = np.asarray(data, dtype="float") if len(data) != self.N: raise ValueError(f"length of data, {len(data)} does not match N, {self.N}") if data.shape[1] != 3: raise ValueError( "Data is not shaped correctly. Needs (N,3), provided: {0}".format( data.shape ) ) if np.isnan(data).any(): raise ValueError("Data contains NANs") if random_offset: data = data + (np.random.random(data.shape) * 2 - 1) * random_offset if center is True: av = np.mean(data, axis=0) data -= av elif center == "zero": minvalue = np.min(data, axis=0) data -= minvalue self.data = simtk.unit.Quantity(data, simtk.unit.nanometer) if report: for reporter in self.reporters: reporter.report( "starting_conformation", {"pos": data, "time": self.time, "block": self.block}, ) if hasattr(self, "context"): self.init_positions() def RG(self): """ Returns ------- Gyration ratius in units of length (bondlength). """ data = self.get_scaled_data() data = data - np.mean(data, axis=0)[None, :] return np.sqrt(np.sum(np.var(np.array(data), 0))) def dist(self, i, j): """ Calculates distance between particles i and j. Added for convenience, and not for production code. Not for use in large for-loops. """ data = self.get_data() dif = data[i] - data[j] return np.sqrt(sum(dif ** 2)) def add_force(self, force): """ Adds a force or a forcekit to the system. """ if isinstance(force, Iterable): for f in force: self.add_force(f) else: if force.name in self.force_dict: raise ValueError( "A force named {} was added to the system twice!".format(force.name) ) forces._prepend_force_name_to_params(force) self.force_dict[force.name] = force if self.forces_applied: raise RuntimeError("Cannot add force after the context has been created") def _apply_forces(self): """ Adds all particles and forces to the system. Then applies all the forces in the forcedict. Forces should not be modified after that, unless you do it carefully (see openmm reference). This method is called automatically when you run energy minimization, or run your first block. On rare occasions, you would need to run it manually, but then you probably know what you're doing. One example when this method is used is a loop extrusion code (extrusion_3d.ipynb). In that case, you restart a simulation, but don't do energy minimization. However, before doing the first block, you just need to advance the integrator. This requires manually creating context/etc which would be normally done by the do_block method. """ if self.forces_applied: return self.masses = np.zeros(self.N, dtype=float) + self.kwargs["mass"] for mass in self.masses: self.system.addParticle(mass) for i in list(self.force_dict.keys()): # Adding forces force = self.force_dict[i] if hasattr(force, "CutoffNonPeriodic") and hasattr(force, "CutoffPeriodic"): if self.PBC: force.setNonbondedMethod(force.CutoffPeriodic) logging.info("Using periodic boundary conditions") else: force.setNonbondedMethod(force.CutoffNonPeriodic) logging.info( "adding force {} {}".format(i, self.system.addForce(self.force_dict[i])) ) for reporter in self.reporters: reporter.report( "applied_forces", {i: j.__getstate__() for i, j in self.force_dict.items()}, ) self.context = openmm.Context( self.system, self.integrator, self.platform, self.properties ) self.init_positions() self.init_velocities() self.forces_applied = True def init_velocities(self, temperature="current"): """Initializes particles velocities Parameters ---------- temperature: temperature to set velocities (default: temerature of the simulation) """ try: self.context except: raise ValueError( "No context, cannot set velocs." "Initialize context before that" ) if temperature == "current": temperature = self.temperature self.context.setVelocitiesToTemperature(temperature) def init_positions(self): """Sends particle coordinates to OpenMM system. If system has exploded, this is used in the code to reset coordinates. """ try: self.context except: raise ValueError( "No context, cannot set positions." " Initialize context before that" ) self.context.setPositions(self.data) eP = ( self.context.getState(getEnergy=True).getPotentialEnergy() / self.N / self.kT ) logging.info("Particles loaded. Potential energy is %lf" % eP) def reinitialize(self): """Reinitializes the OpenMM context object. This should be called if low-level parameters, such as parameters of forces, have changed """ self.context.reinitialize() self.init_positions() self.init_velocities() def local_energy_minimization( self, tolerance=0.3, maxIterations=0, random_offset=0.02 ): """ A wrapper to the build-in OpenMM Local Energy Minimization See caveat below Parameters ---------- tolerance: float It is something like a value of force below which the minimizer is trying to minimize energy to. see openmm documentation for description Value of 0.3 seems to be fine for most normal forces. maxIterations: int Maximum # of iterations for minimization to do. default: 0 means there is no limit This is relevant especially if your simulation does not have a well-defined energy minimum (e.g. you want to simulate a collapse of a chain in some potential). In that case, if you don't limit energy minimization, it will attempt to do a whole simulation for you. In that case, setting a limit to the # of iterations will just stop energy minimization manually when it reaches this # of iterations. random_offset: float A random offset to introduce after energy minimization. Should ideally make your forces have realistic values. For example, if your stiffest force is polymer bond force with "wiggle_dist" of 0.05, setting this to 0.02 will make separation between monomers realistic, and therefore will make force values realistic. See why do we need it in the caveat below. Caveat ------ If using variable langevin integrator after minimization, a big error may happen in the first timestep. The reason is that enregy minimization makes all the forces basically 0. Variable langevin integrator measures the forces and assumes that they are all small - so it makes the timestep very large, and at the first timestep it overshoots completely and energy goes up a lot. The workaround for now is to randomize positions after energy minimization """ logging.info("Performing local energy minimization") self._apply_forces() self.state = self.context.getState(getPositions=False, getEnergy=True) eK = self.state.getKineticEnergy() / self.N / self.kT eP = self.state.getPotentialEnergy() / self.N / self.kT locTime = self.state.getTime() logging.info( "before minimization eK={0}, eP={1}, time={2}".format(eK, eP, locTime) ) openmm.LocalEnergyMinimizer.minimize(self.context, tolerance, maxIterations) self.state = self.context.getState(getPositions=True, getEnergy=True) eK = self.state.getKineticEnergy() / self.N / self.kT eP = self.state.getPotentialEnergy() / self.N / self.kT coords = self.state.getPositions(asNumpy=True) self.data = coords self.set_data(self.get_data(), random_offset=random_offset, report=False) for reporter in self.reporters: reporter.report( "energy_minimization", {"pos": self.get_data(), "time": self.time, "block": self.block}, ) locTime = self.state.getTime() logging.info( "after minimization eK={0}, eP={1}, time={2}".format(eK, eP, locTime) ) def do_block( self, steps=None, check_functions=[], get_velocities=False, save=True, save_extras={}, ): """performs one block of simulations, doing steps timesteps, or steps_per_block if not specified. Parameters ---------- steps : int or None Number of timesteps to perform. increment : bool, optional If true, will not increment self.block and self.steps counters """ if not self.forces_applied: if self.verbose: logging.info("applying forces") sys.stdout.flush() self._apply_forces() self.forces_applied = True a = time.time() self.integrator.step(steps) # integrate! self.state = self.context.getState( getPositions=True, getVelocities=get_velocities, getEnergy=True ) b = time.time() coords = self.state.getPositions(asNumpy=True) newcoords = coords / simtk.unit.nanometer newcoords = np.array(newcoords, dtype=np.float32) if self.kwargs["save_decimals"] is not False: newcoords = np.round(newcoords, self.kwargs["save_decimals"]) self.time = self.state.getTime() / simtk.unit.picosecond # calculate energies in KT/particle eK = self.state.getKineticEnergy() / self.N / self.kT eP = self.state.getPotentialEnergy() / self.N / self.kT curtime = self.state.getTime() / simtk.unit.picosecond msg = "block %4s " % int(self.block) msg += "pos[1]=[%.1lf %.1lf %.1lf] " % tuple(newcoords[0]) check_fail = False for check_function in check_functions: if not check_function(newcoords): check_fail = True if np.isnan(newcoords).any(): raise IntegrationFailError("Coordinates are NANs") if eK > self.eK_critical: raise EKExceedsError("Ek={1} exceeds {0}".format(self.eK_critical, eK)) if (np.isnan(eK)) or (np.isnan(eP)): raise IntegrationFailError("Energy is NAN)") if check_fail: raise IntegrationFailError("Custom checks failed") dif = np.sqrt(np.mean(np.sum((newcoords - self.get_data()) ** 2, axis=1))) msg += "dr=%.2lf " % (dif,) self.data = coords msg += "t=%2.1lfps " % (self.state.getTime() / simtk.unit.picosecond) msg += "kin=%.2lf pot=%.2lf " % (eK, eP) msg += "Rg=%.3lf " % self.RG() msg += "SPS=%.0lf " % (steps / (float(b - a))) if ( self.integrator_type.lower() == "variablelangevin" or self.integrator_type.lower() == "variableverlet" ): dt = self.integrator.getStepSize() msg += "dt=%.1lffs " % (dt / simtk.unit.femtosecond) mass = self.system.getParticleMass(1) dx = simtk.unit.sqrt(2.0 * eK * self.kT / mass) * dt msg += "dx=%.2lfpm " % (dx / simtk.unit.nanometer * 1000.0) logging.info(msg) result = { "pos": newcoords, "potentialEnergy": eP, "kineticEnergy": eK, "time": curtime, "block": self.block, } if get_velocities: result["vel"] = self.state.getVelocities() / ( simtk.unit.nanometer / simtk.unit.picosecond ) result.update(save_extras) if save: for reporter in self.reporters: reporter.report("data", result) self.block += 1 self.step += steps return result def print_stats(self): """Prints detailed statistics of a system. Will be run every 50 steps """ state = self.context.getState( getPositions=True, getVelocities=True, getEnergy=True ) eP = state.getPotentialEnergy() pos = np.array(state.getPositions() / simtk.unit.nanometer) bonds = np.sqrt(np.sum(np.diff(pos, axis=0) ** 2, axis=1)) sbonds = np.sort(bonds) vel = state.getVelocities() mass = self.system.getParticleMass(1) vkT = np.array(vel / simtk.unit.sqrt(self.kT / mass), dtype=float) self.velocs = vkT EkPerParticle = 0.5 * np.sum(vkT ** 2, axis=1) cm = np.mean(pos, axis=0) centredPos = pos - cm[None, :] dists = np.sqrt(np.sum(centredPos ** 2, axis=1)) per95 = np.percentile(dists, 95) den = (0.95 * self.N) / ((4.0 * np.pi * per95 ** 3) / 3) per5 = np.percentile(dists, 5) den5 = (0.05 * self.N) / ((4.0 * np.pi * per5 ** 3) / 3) x, y, z = pos[:, 0], pos[:, 1], pos[:, 2] minmedmax = lambda x: (x.min(), np.median(x), x.mean(), x.max()) print("\n Statistics: number of particles: %d\n" % (self.N,)) print("Statistics for particle position") print(" mean position is: ", np.mean(pos, axis=0), " Rg = ", self.RG()) print(" median bond size is ", np.median(bonds)) print( " three shortest/longest (<10)/ bonds are ", sbonds[:3], " ", sbonds[sbonds < 10][-3:], ) if (sbonds > 10).sum() > 0: print("longest 10 bonds are", sbonds[-10:]) print(" 95 percentile of distance to center is: ", per95) print(" density of closest 95% monomers is: ", den) print(" density of the 5% closest to CoM monomers is: ", den5) print(" min/median/mean/max coordinates are: ") print(" x: %.2lf, %.2lf, %.2lf, %.2lf" % minmedmax(x)) print(" y: %.2lf, %.2lf, %.2lf, %.2lf" % minmedmax(y)) print(" z: %.2lf, %.2lf, %.2lf, %.2lf" % minmedmax(z)) print() print("Statistics for velocities:") print( " mean kinetic energy is: ", np.mean(EkPerParticle), "should be:", 1.5 ) print(" fastest particles are (in kT): ", np.sort(EkPerParticle)[-5:]) print() print("Statistics for the system:") print(" Forces are: ", list(self.force_dict.keys())) print() print("Potential Energy Ep = ", eP / self.N / self.kT) def show(self, shifts=[0.0, 0.2, 0.4, 0.6, 0.8], scale="auto"): """shows system in rasmol by drawing spheres draws 4 spheres in between any two points (5 * N spheres total) """ # if you want to change positions of the spheres along each segment, # change these numbers: e.g. [0,.1, .2 ... .9] will draw 10 spheres, # and this will look better data = self.get_data() if len(data[0]) != 3: data = np.transpose(data) if len(data[0]) != 3: logging.error("wrong data!") return # determining the 95 percentile distance between particles, if scale == "auto": meandist = np.percentile( np.sqrt(np.sum(np.diff(data, axis=0) ** 2, axis=1)), 95 ) # rescaling the data, so that bonds are of the order of 1. # This is because rasmol spheres are of the fixed diameter. data /= meandist else: data /= scale if self.N > 1000: # system is sufficiently large count = 0 for _ in range(100): a, b = np.random.randint(0, self.N, 2) dist = np.sqrt(np.sum((data[a] - data[b]) ** 2)) if dist < 1.3: count += 1 if count > 100: raise RuntimeError( "Too many particles are close together. " "This will cause rasmol to choke" ) rascript = tempfile.NamedTemporaryFile() # writing the rasmol script. Spacefill controls radius of the sphere. rascript.write( b"""wireframe off color temperature spacefill 100 background white """ ) rascript.flush() # creating the array, linearly chanhing from -225 to 225 # to serve as an array of colors colors = np.array( [int((j * 450.0) / (len(data))) - 225 for j in range(len(data))] ) # creating spheres along the trajectory newData = np.zeros((len(data) * len(shifts) - (len(shifts) - 1), 4)) for i in range(len(shifts)): newData[i : -1 : len(shifts), :3] = data[:-1] * shifts[i] + data[1:] * ( 1 - shifts[i] ) newData[i : -1 : len(shifts), 3] = colors[:-1] newData[-1, :3] = data[-1] newData[-1, 3] = colors[-1] towrite = tempfile.NamedTemporaryFile() towrite.write(("{:d}\n\n".format(int(len(newData))).encode("utf-8"))) # number of atoms and a blank line after is a requirement of rasmol for i in newData: towrite.write( ( "CA\t{:f}\t{:f}\t{:f}\t{:d}\n".format(i[0], i[1], i[2], int(i[3])) ).encode("utf-8") ) towrite.flush() "TODO: rewrite using subprocess.popen" if os.name == "posix": # if linux os.system("rasmol -xyz %s -script %s" % (towrite.name, rascript.name)) else: # if windows os.system( "C:/RasWin/raswin.exe -xyz %s -script %s" % (towrite.name, rascript.name) ) rascript.close() towrite.close()
{"hexsha": "71868f06ea91650bad687801146c985aba6925a3", "size": 34524, "ext": "py", "lang": "Python", "max_stars_repo_path": "polychrom/simulation.py", "max_stars_repo_name": "nchowder/polychrom", "max_stars_repo_head_hexsha": "cfd1344d9d59f84cc237b24a3b2ab2241e219214", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "polychrom/simulation.py", "max_issues_repo_name": "nchowder/polychrom", "max_issues_repo_head_hexsha": "cfd1344d9d59f84cc237b24a3b2ab2241e219214", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "polychrom/simulation.py", "max_forks_repo_name": "nchowder/polychrom", "max_forks_repo_head_hexsha": "cfd1344d9d59f84cc237b24a3b2ab2241e219214", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.4882943144, "max_line_length": 531, "alphanum_fraction": 0.5898505388, "include": true, "reason": "import numpy", "num_tokens": 7829}
import numpy as np import matplotlib.pyplot as plt def tmp_calc_ld(speciome, plot = False): #TODO: 02-01-20: # - debug what I wrote below to keep only seg sites # - aggain change scape size in the sweep params file to 20,20 # - try this out and see if it fixes my issues # - if so, tweak linkage plot so that it labels axes by pos numbers for # seg sites #TODO: I should also include (either as an alternative within this fn, #or as separate fn) the option to calculate D' #TODO: I keep getting warnings like the following, which could just be #due to divison of small floating-point numbers, but I should figure out #exactly what's going on and be sure everything checks out. WARNING: # stats.py:117: RuntimeWarning: invalid value encountered in double_scalars #speciome = _get_speciome(spp) n = np.shape(speciome)[0] #num individs x = np.shape(speciome)[2] #ploidy N = n*x #L = spp.gen_arch.L L = speciome.shape[1] assert L == np.shape(speciome)[1], ("The length of the 1st dimension " "of speciome doesn't equal spp.genomic_arch.L") #get rid of fixed sites, which create problems for linkage calculation segregating = np.sum(speciome, axis = (1,2)) / ( speciome.shape[1] * speciome.shape[2]) segregating = np.int8([n != 0 and n != 1 for n in segregating]) keep_inds = [i for i in range(L) if segregating[i] == 1] speciome = speciome[segregating, :, :] seg_L = len(segregating) r2_mat = np.zeros([seg_L]*2) * np.nan # set NaN as the "no data" value for i in range(seg_L): for j in range(i+1, seg_L): #calculate freq of allele 1 at locus i f1_i = np.sum(speciome[:,i,:], axis = None)/(N) #calculate freq of allele 1 at locus j f1_j = np.sum(speciome[:,j,:], axis = None)/(N) #calculate freq of chroms with 1_1 haplotype at loci i and j f11_ij = float(np.sum(speciome[:,[i,j],:].sum(axis = 1) ==2, axis = None))/(N) D_1_1 = f11_ij - (f1_i * f1_j) r2 = (D_1_1**2)/(f1_i*(1-f1_i)*f1_j*(1-f1_j)) if not 0 <= r2 <= 1: print("\t r2 was", r2) print("\t f1_i was", f1_i) print("\t f1_j was", f1_j) print("\t f11_ij was", f11_ij) print("\t D11 was ", D_1_1) else: print("r2 was", r2) print("f1_i was", f1_i) print("f1_j was", f1_j) print("f11_ij was", f11_ij) print("D11 was ", D_1_1) r2_mat[i,j] = r2 r2_mat[j,i] = r2 return(r2_mat) def plot_linkage_vs_dist(r2_mat, recomb_rates, L=1001, sweep_loc=500): # TODO: Do I need to account for possibility of even number of recombination # events between loci when calculating distance? In other words, should # I convert the distance to a true cM distance, or just leave as the sum # of the interlocus recombination rates? r2s = [] dists = [] for i in range(L-1): for j in range(i, L-1): r2 = r2_mat[i, j] if not np.isnan(r2): dist = np.sum(recomb_rates[i+1:j+1]) r2s.append(r2) dists.append(dist) fig = plt.figure() plt.plot(dists, r2s, '.r') plt.xlabel("recombination distance (sum of interlocus recomb. rates)") plt.ylabel("linkage ($R^{2}$)") plt.show() return(r2s, dists)
{"hexsha": "8976315fc4497e8828e2e83113e6f0a6a445fec3", "size": 3632, "ext": "py", "lang": "Python", "max_stars_repo_path": "scratch/tmp_calc_ld_2.py", "max_stars_repo_name": "AnushaPB/geonomics-1", "max_stars_repo_head_hexsha": "deee0c377e81f509463eaf6f9d0b2f0809f2ddc3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-08-27T17:06:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-17T22:55:07.000Z", "max_issues_repo_path": "scratch/tmp_calc_ld_2.py", "max_issues_repo_name": "AnushaPB/geonomics-1", "max_issues_repo_head_hexsha": "deee0c377e81f509463eaf6f9d0b2f0809f2ddc3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scratch/tmp_calc_ld_2.py", "max_forks_repo_name": "AnushaPB/geonomics-1", "max_forks_repo_head_hexsha": "deee0c377e81f509463eaf6f9d0b2f0809f2ddc3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-08-28T23:45:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-25T21:47:40.000Z", "avg_line_length": 39.0537634409, "max_line_length": 80, "alphanum_fraction": 0.5655286344, "include": true, "reason": "import numpy", "num_tokens": 1047}