idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
22,000 | def compute_frame ( self , ** kwargs ) : r if self . G . N > 2000 : _logger . warning ( 'Creating a big matrix. ' 'You should prefer the filter method.' ) s = np . identity ( self . G . N ) return self . filter ( s , ** kwargs ) . T . reshape ( - 1 , self . G . N ) | r Compute the associated frame . |
22,001 | def complement ( self , frame_bound = None ) : r def kernel ( x , * args , ** kwargs ) : y = self . evaluate ( x ) np . power ( y , 2 , out = y ) y = np . sum ( y , axis = 0 ) if frame_bound is None : bound = y . max ( ) elif y . max ( ) > frame_bound : raise ValueError ( 'The chosen bound is not feasible. ' 'Choose at least {}.' . format ( y . max ( ) ) ) else : bound = frame_bound return np . sqrt ( bound - y ) return Filter ( self . G , kernel ) | r Return the filter that makes the frame tight . |
22,002 | def inverse ( self ) : r A , _ = self . estimate_frame_bounds ( ) if A == 0 : _logger . warning ( 'The filter bank is not invertible as it is not ' 'a frame (lower frame bound A=0).' ) def kernel ( g , i , x ) : y = g . evaluate ( x ) . T z = np . linalg . pinv ( np . expand_dims ( y , axis = - 1 ) ) . squeeze ( axis = - 2 ) return z [ : , i ] kernels = [ partial ( kernel , self , i ) for i in range ( self . n_filters ) ] return Filter ( self . G , kernels ) | r Return the pseudo - inverse filter bank . |
22,003 | def grad ( self , x ) : r x = self . _check_signal ( x ) return self . D . T . dot ( x ) | r Compute the gradient of a signal defined on the vertices . |
22,004 | def div ( self , y ) : r y = np . asanyarray ( y ) if y . shape [ 0 ] != self . Ne : raise ValueError ( 'First dimension must be the number of edges ' 'G.Ne = {}, got {}.' . format ( self . Ne , y . shape ) ) return self . D . dot ( y ) | r Compute the divergence of a signal defined on the edges . |
22,005 | def gft ( self , s ) : r s = self . _check_signal ( s ) U = np . conjugate ( self . U ) return np . tensordot ( U , s , ( [ 0 ] , [ 0 ] ) ) | r Compute the graph Fourier transform . |
22,006 | def igft ( self , s_hat ) : r s_hat = self . _check_signal ( s_hat ) return np . tensordot ( self . U , s_hat , ( [ 1 ] , [ 0 ] ) ) | r Compute the inverse graph Fourier transform . |
22,007 | def compute_cheby_coeff ( f , m = 30 , N = None , * args , ** kwargs ) : r G = f . G i = kwargs . pop ( 'i' , 0 ) if not N : N = m + 1 a_arange = [ 0 , G . lmax ] a1 = ( a_arange [ 1 ] - a_arange [ 0 ] ) / 2 a2 = ( a_arange [ 1 ] + a_arange [ 0 ] ) / 2 c = np . zeros ( m + 1 ) tmpN = np . arange ( N ) num = np . cos ( np . pi * ( tmpN + 0.5 ) / N ) for o in range ( m + 1 ) : c [ o ] = 2. / N * np . dot ( f . _kernels [ i ] ( a1 * num + a2 ) , np . cos ( np . pi * o * ( tmpN + 0.5 ) / N ) ) return c | r Compute Chebyshev coefficients for a Filterbank . |
22,008 | def cheby_op ( G , c , signal , ** kwargs ) : r if not isinstance ( c , np . ndarray ) : c = np . array ( c ) c = np . atleast_2d ( c ) Nscales , M = c . shape if M < 2 : raise TypeError ( "The coefficients have an invalid shape" ) try : Nv = np . shape ( signal ) [ 1 ] r = np . zeros ( ( G . N * Nscales , Nv ) ) except IndexError : r = np . zeros ( ( G . N * Nscales ) ) a_arange = [ 0 , G . lmax ] a1 = float ( a_arange [ 1 ] - a_arange [ 0 ] ) / 2. a2 = float ( a_arange [ 1 ] + a_arange [ 0 ] ) / 2. twf_old = signal twf_cur = ( G . L . dot ( signal ) - a2 * signal ) / a1 tmpN = np . arange ( G . N , dtype = int ) for i in range ( Nscales ) : r [ tmpN + G . N * i ] = 0.5 * c [ i , 0 ] * twf_old + c [ i , 1 ] * twf_cur factor = 2 / a1 * ( G . L - a2 * sparse . eye ( G . N ) ) for k in range ( 2 , M ) : twf_new = factor . dot ( twf_cur ) - twf_old for i in range ( Nscales ) : r [ tmpN + G . N * i ] += c [ i , k ] * twf_new twf_old = twf_cur twf_cur = twf_new return r | r Chebyshev polynomial of graph Laplacian applied to vector . |
22,009 | def cheby_rect ( G , bounds , signal , ** kwargs ) : r if not ( isinstance ( bounds , ( list , np . ndarray ) ) and len ( bounds ) == 2 ) : raise ValueError ( 'Bounds of wrong shape.' ) bounds = np . array ( bounds ) m = int ( kwargs . pop ( 'order' , 30 ) + 1 ) try : Nv = np . shape ( signal ) [ 1 ] r = np . zeros ( ( G . N , Nv ) ) except IndexError : r = np . zeros ( ( G . N ) ) b1 , b2 = np . arccos ( 2. * bounds / G . lmax - 1. ) factor = 4. / G . lmax * G . L - 2. * sparse . eye ( G . N ) T_old = signal T_cur = factor . dot ( signal ) / 2. r = ( b1 - b2 ) / np . pi * signal + 2. / np . pi * ( np . sin ( b1 ) - np . sin ( b2 ) ) * T_cur for k in range ( 2 , m ) : T_new = factor . dot ( T_cur ) - T_old r += 2. / ( k * np . pi ) * ( np . sin ( k * b1 ) - np . sin ( k * b2 ) ) * T_new T_old = T_cur T_cur = T_new return r | r Fast filtering using Chebyshev polynomial for a perfect rectangle filter . |
22,010 | def compute_jackson_cheby_coeff ( filter_bounds , delta_lambda , m ) : r if delta_lambda [ 0 ] > filter_bounds [ 0 ] or delta_lambda [ 1 ] < filter_bounds [ 1 ] : _logger . error ( "Bounds of the filter are out of the lambda values" ) raise ( ) elif delta_lambda [ 0 ] > delta_lambda [ 1 ] : _logger . error ( "lambda_min is greater than lambda_max" ) raise ( ) a1 = ( delta_lambda [ 1 ] - delta_lambda [ 0 ] ) / 2 a2 = ( delta_lambda [ 1 ] + delta_lambda [ 0 ] ) / 2 filter_bounds [ 0 ] = ( filter_bounds [ 0 ] - a2 ) / a1 filter_bounds [ 1 ] = ( filter_bounds [ 1 ] - a2 ) / a1 ch = np . empty ( m + 1 , dtype = float ) ch [ 0 ] = ( 2 / ( np . pi ) ) * ( np . arccos ( filter_bounds [ 0 ] ) - np . arccos ( filter_bounds [ 1 ] ) ) for i in range ( 1 , len ( ch ) ) : ch [ i ] = ( 2 / ( np . pi * i ) ) * ( np . sin ( i * np . arccos ( filter_bounds [ 0 ] ) ) - np . sin ( i * np . arccos ( filter_bounds [ 1 ] ) ) ) jch = np . empty ( m + 1 , dtype = float ) alpha = ( np . pi / ( m + 2 ) ) for i in range ( len ( jch ) ) : jch [ i ] = ( 1 / np . sin ( alpha ) ) * ( ( 1 - i / ( m + 2 ) ) * np . sin ( alpha ) * np . cos ( i * alpha ) + ( 1 / ( m + 2 ) ) * np . cos ( alpha ) * np . sin ( i * alpha ) ) jch = ch * jch return ch , jch | r To compute the m + 1 coefficients of the polynomial approximation of an ideal band - pass between a and b between a range of values defined by lambda_min and lambda_max . |
22,011 | def lanczos_op ( f , s , order = 30 ) : r G = f . G Nf = len ( f . g ) try : Nv = np . shape ( s ) [ 1 ] is2d = True c = np . zeros ( ( G . N * Nf , Nv ) ) except IndexError : Nv = 1 is2d = False c = np . zeros ( ( G . N * Nf ) ) tmpN = np . arange ( G . N , dtype = int ) for j in range ( Nv ) : if is2d : V , H , _ = lanczos ( G . L . toarray ( ) , order , s [ : , j ] ) else : V , H , _ = lanczos ( G . L . toarray ( ) , order , s ) Eh , Uh = np . linalg . eig ( H ) Eh [ Eh < 0 ] = 0 fe = f . evaluate ( Eh ) V = np . dot ( V , Uh ) for i in range ( Nf ) : if is2d : c [ tmpN + i * G . N , j ] = np . dot ( V , fe [ : ] [ i ] * np . dot ( V . T , s [ : , j ] ) ) else : c [ tmpN + i * G . N ] = np . dot ( V , fe [ : ] [ i ] * np . dot ( V . T , s ) ) return c | r Perform the lanczos approximation of the signal s . |
22,012 | def interpolate ( G , f_subsampled , keep_inds , order = 100 , reg_eps = 0.005 , ** kwargs ) : r L_reg = G . L + reg_eps * sparse . eye ( G . N ) K_reg = getattr ( G . mr , 'K_reg' , kron_reduction ( L_reg , keep_inds ) ) green_kernel = getattr ( G . mr , 'green_kernel' , filters . Filter ( G , lambda x : 1. / ( reg_eps + x ) ) ) alpha = K_reg . dot ( f_subsampled ) try : Nv = np . shape ( f_subsampled ) [ 1 ] f_interpolated = np . zeros ( ( G . N , Nv ) ) except IndexError : f_interpolated = np . zeros ( ( G . N ) ) f_interpolated [ keep_inds ] = alpha return _analysis ( green_kernel , f_interpolated , order = order , ** kwargs ) | r Interpolate a graph signal . |
22,013 | def kron_reduction ( G , ind ) : r if isinstance ( G , graphs . Graph ) : if G . lap_type != 'combinatorial' : msg = 'Unknown reduction for {} Laplacian.' . format ( G . lap_type ) raise NotImplementedError ( msg ) if G . is_directed ( ) : msg = 'This method only work for undirected graphs.' raise NotImplementedError ( msg ) L = G . L else : L = G N = np . shape ( L ) [ 0 ] ind_comp = np . setdiff1d ( np . arange ( N , dtype = int ) , ind ) L_red = L [ np . ix_ ( ind , ind ) ] L_in_out = L [ np . ix_ ( ind , ind_comp ) ] L_out_in = L [ np . ix_ ( ind_comp , ind ) ] . tocsc ( ) L_comp = L [ np . ix_ ( ind_comp , ind_comp ) ] . tocsc ( ) Lnew = L_red - L_in_out . dot ( linalg . spsolve ( L_comp , L_out_in ) ) if np . abs ( Lnew - Lnew . T ) . sum ( ) < np . spacing ( 1 ) * np . abs ( Lnew ) . sum ( ) : Lnew = ( Lnew + Lnew . T ) / 2. if isinstance ( G , graphs . Graph ) : Wnew = sparse . diags ( Lnew . diagonal ( ) , 0 ) - Lnew Snew = Lnew . diagonal ( ) - np . ravel ( Wnew . sum ( 0 ) ) if np . linalg . norm ( Snew , 2 ) >= np . spacing ( 1000 ) : Wnew = Wnew + sparse . diags ( Snew , 0 ) Wnew = Wnew - Wnew . diagonal ( ) coords = G . coords [ ind , : ] if len ( G . coords . shape ) else np . ndarray ( None ) Gnew = graphs . Graph ( Wnew , coords = coords , lap_type = G . lap_type , plotting = G . plotting ) else : Gnew = Lnew return Gnew | r Compute the Kron reduction . |
22,014 | def pyramid_analysis ( Gs , f , ** kwargs ) : r if np . shape ( f ) [ 0 ] != Gs [ 0 ] . N : raise ValueError ( "PYRAMID ANALYSIS: The signal to analyze should have the same dimension as the first graph." ) levels = len ( Gs ) - 1 h_filters = kwargs . pop ( 'h_filters' , lambda x : 1. / ( 2 * x + 1 ) ) if not isinstance ( h_filters , list ) : if hasattr ( h_filters , '__call__' ) : logger . warning ( 'Converting filters into a list.' ) h_filters = [ h_filters ] else : logger . error ( 'Filters must be a list of functions.' ) if len ( h_filters ) == 1 : h_filters = h_filters * levels elif len ( h_filters ) != levels : message = 'The number of filters must be one or equal to {}.' . format ( levels ) raise ValueError ( message ) ca = [ f ] pe = [ ] for i in range ( levels ) : s_low = _analysis ( filters . Filter ( Gs [ i ] , h_filters [ i ] ) , ca [ i ] , ** kwargs ) ca . append ( s_low [ Gs [ i + 1 ] . mr [ 'idx' ] ] ) s_pred = interpolate ( Gs [ i ] , ca [ i + 1 ] , Gs [ i + 1 ] . mr [ 'idx' ] , ** kwargs ) pe . append ( ca [ i ] - s_pred ) return ca , pe | r Compute the graph pyramid transform coefficients . |
22,015 | def pyramid_synthesis ( Gs , cap , pe , order = 30 , ** kwargs ) : r least_squares = bool ( kwargs . pop ( 'least_squares' , False ) ) def_ul = Gs [ 0 ] . N > 3000 or Gs [ 0 ] . _e is None or Gs [ 0 ] . _U is None use_landweber = bool ( kwargs . pop ( 'use_landweber' , def_ul ) ) reg_eps = float ( kwargs . get ( 'reg_eps' , 0.005 ) ) if least_squares and 'h_filters' not in kwargs : ValueError ( 'h-filters not provided.' ) levels = len ( Gs ) - 1 if len ( pe ) != levels : ValueError ( 'Gs and pe have different shapes.' ) ca = [ cap ] for i in range ( levels ) : if not least_squares : s_pred = interpolate ( Gs [ levels - i - 1 ] , ca [ i ] , Gs [ levels - i ] . mr [ 'idx' ] , order = order , reg_eps = reg_eps , ** kwargs ) ca . append ( s_pred + pe [ levels - i - 1 ] ) else : ca . append ( _pyramid_single_interpolation ( Gs [ levels - i - 1 ] , ca [ i ] , pe [ levels - i - 1 ] , h_filters [ levels - i - 1 ] , use_landweber = use_landweber , ** kwargs ) ) ca . reverse ( ) reconstruction = ca [ 0 ] return reconstruction , ca | r Synthesize a signal from its pyramid coefficients . |
22,016 | def tree_multiresolution ( G , Nlevel , reduction_method = 'resistance_distance' , compute_full_eigen = False , root = None ) : r if not root : if hasattr ( G , 'root' ) : root = G . root else : root = 1 Gs = [ G ] if compute_full_eigen : Gs [ 0 ] . compute_fourier_basis ( ) subsampled_vertex_indices = [ ] depths , parents = _tree_depths ( G . A , root ) old_W = G . W for lev in range ( Nlevel ) : down_odd = round ( depths ) % 2 down_even = np . ones ( ( Gs [ lev ] . N ) ) - down_odd keep_inds = np . where ( down_even == 1 ) [ 0 ] subsampled_vertex_indices . append ( keep_inds ) non_root_keep_inds , new_non_root_inds = np . setdiff1d ( keep_inds , root ) old_parents_of_non_root_keep_inds = parents [ non_root_keep_inds ] old_grandparents_of_non_root_keep_inds = parents [ old_parents_of_non_root_keep_inds ] old_W_i_inds , old_W_j_inds , old_W_weights = sparse . find ( old_W ) i_inds = np . concatenate ( ( new_non_root_inds , new_non_root_parents ) ) j_inds = np . concatenate ( ( new_non_root_parents , new_non_root_inds ) ) new_N = np . sum ( down_even ) if reduction_method == "unweighted" : new_weights = np . ones ( np . shape ( i_inds ) ) elif reduction_method == "sum" : old_weights_to_parents = old_W_weights [ old_weights_to_parents_inds ] old_weights_parents_to_grandparents = old_W_weights [ old_weights_parents_to_grandparents_inds ] new_weights = old_weights_to_parents + old_weights_parents_to_grandparents new_weights = np . concatenate ( ( new_weights . new_weights ) ) elif reduction_method == "resistance_distance" : old_weights_to_parents = old_W_weight [ sold_weights_to_parents_inds ] old_weights_parents_to_grandparents = old_W_weights [ old_weights_parents_to_grandparents_inds ] new_weights = 1. / ( 1. / old_weights_to_parents + 1. / old_weights_parents_to_grandparents ) new_weights = np . concatenate ( ( [ new_weights , new_weights ] ) ) else : raise ValueError ( 'Unknown graph reduction method.' ) new_W = sparse . csc_matrix ( ( new_weights , ( i_inds , j_inds ) ) , shape = ( new_N , new_N ) ) new_root = np . where ( keep_inds == root ) [ 0 ] parents = np . zeros ( np . shape ( keep_inds ) [ 0 ] , np . shape ( keep_inds ) [ 0 ] ) parents [ : new_root - 1 , new_root : ] = new_non_root_parents depths = depths [ keep_inds ] depths = depths / 2. Gtemp = graphs . Graph ( new_W , coords = Gs [ lev ] . coords [ keep_inds ] , limits = G . limits , root = new_root ) if compute_full_eigen : Gs [ lev + 1 ] . compute_fourier_basis ( ) Gs . append ( Gtemp ) old_W = new_W root = new_root return Gs , subsampled_vertex_indices | r Compute a multiresolution of trees |
22,017 | def close_all ( ) : r global _qtg_windows for window in _qtg_windows : window . close ( ) _qtg_windows = [ ] global _qtg_widgets for widget in _qtg_widgets : widget . close ( ) _qtg_widgets = [ ] global _plt_figures for fig in _plt_figures : _ , plt , _ = _import_plt ( ) plt . close ( fig ) _plt_figures = [ ] | r Close all opened windows . |
22,018 | def _plot_filter ( filters , n , eigenvalues , sum , title , ax , ** kwargs ) : r if eigenvalues is None : eigenvalues = ( filters . G . _e is not None ) if sum is None : sum = filters . n_filters > 1 if title is None : title = repr ( filters ) return _plt_plot_filter ( filters , n = n , eigenvalues = eigenvalues , sum = sum , title = title , ax = ax , ** kwargs ) | r Plot the spectral response of a filter bank . |
22,019 | def _plot_spectrogram ( G , node_idx ) : r from pygsp import features qtg , _ , _ = _import_qtg ( ) if not hasattr ( G , 'spectr' ) : features . compute_spectrogram ( G ) M = G . spectr . shape [ 1 ] spectr = G . spectr [ node_idx , : ] if node_idx is not None else G . spectr spectr = np . ravel ( spectr ) min_spec , max_spec = spectr . min ( ) , spectr . max ( ) pos = np . array ( [ 0. , 0.25 , 0.5 , 0.75 , 1. ] ) color = [ [ 20 , 133 , 212 , 255 ] , [ 53 , 42 , 135 , 255 ] , [ 48 , 174 , 170 , 255 ] , [ 210 , 184 , 87 , 255 ] , [ 249 , 251 , 14 , 255 ] ] color = np . array ( color , dtype = np . ubyte ) cmap = qtg . ColorMap ( pos , color ) spectr = ( spectr . astype ( float ) - min_spec ) / ( max_spec - min_spec ) w = qtg . GraphicsWindow ( ) w . setWindowTitle ( "Spectrogram of {}" . format ( G . __repr__ ( limit = 4 ) ) ) label = 'frequencies {}:{:.2f}:{:.2f}' . format ( 0 , G . lmax / M , G . lmax ) v = w . addPlot ( labels = { 'bottom' : 'nodes' , 'left' : label } ) v . setAspectLocked ( ) spi = qtg . ScatterPlotItem ( np . repeat ( np . arange ( G . N ) , M ) , np . ravel ( np . tile ( np . arange ( M ) , ( 1 , G . N ) ) ) , pxMode = False , symbol = 's' , size = 1 , brush = cmap . map ( spectr , 'qcolor' ) ) v . addItem ( spi ) global _qtg_windows _qtg_windows . append ( w ) | r Plot the graph s spectrogram . |
22,020 | def classification_tikhonov ( G , y , M , tau = 0 ) : r y [ M == False ] = 0 Y = _to_logits ( y . astype ( np . int ) ) return regression_tikhonov ( G , Y , M , tau ) | r Solve a classification problem on graph via Tikhonov minimization . |
22,021 | def regression_tikhonov ( G , y , M , tau = 0 ) : r if tau > 0 : y [ M == False ] = 0 if sparse . issparse ( G . L ) : def Op ( x ) : return ( M * x . T ) . T + tau * ( G . L . dot ( x ) ) LinearOp = sparse . linalg . LinearOperator ( [ G . N , G . N ] , Op ) if y . ndim > 1 : sol = np . empty ( shape = y . shape ) res = np . empty ( shape = y . shape [ 1 ] ) for i in range ( y . shape [ 1 ] ) : sol [ : , i ] , res [ i ] = sparse . linalg . cg ( LinearOp , y [ : , i ] ) else : sol , res = sparse . linalg . cg ( LinearOp , y ) return sol else : if type ( G . L ) . __module__ == np . __name__ : LinearOp = np . diag ( M * 1 ) + tau * G . L return np . linalg . solve ( LinearOp , M * y ) else : if np . prod ( M . shape ) != G . n_vertices : raise ValueError ( "M should be of size [G.n_vertices,]" ) indl = M indu = ( M == False ) Luu = G . L [ indu , : ] [ : , indu ] Wul = - G . L [ indu , : ] [ : , indl ] if sparse . issparse ( G . L ) : sol_part = sparse . linalg . spsolve ( Luu , Wul . dot ( y [ indl ] ) ) else : sol_part = np . linalg . solve ( Luu , np . matmul ( Wul , y [ indl ] ) ) sol = y . copy ( ) sol [ indu ] = sol_part return sol | r Solve a regression problem on graph via Tikhonov minimization . |
22,022 | def set_signal ( self , signal , name ) : r signal = self . _check_signal ( signal ) self . signals [ name ] = signal | r Attach a signal to the graph . |
22,023 | def subgraph ( self , vertices ) : r adjacency = self . W [ vertices , : ] [ : , vertices ] try : coords = self . coords [ vertices ] except AttributeError : coords = None graph = Graph ( adjacency , self . lap_type , coords , self . plotting ) for name , signal in self . signals . items ( ) : graph . set_signal ( signal [ vertices ] , name ) return graph | r Create a subgraph from a list of vertices . |
22,024 | def extract_components ( self ) : r if self . A . shape [ 0 ] != self . A . shape [ 1 ] : self . logger . error ( 'Inconsistent shape to extract components. ' 'Square matrix required.' ) return None if self . is_directed ( ) : raise NotImplementedError ( 'Directed graphs not supported yet.' ) graphs = [ ] visited = np . zeros ( self . A . shape [ 0 ] , dtype = bool ) while not visited . all ( ) : stack = set ( np . nonzero ( ~ visited ) [ 0 ] [ [ 0 ] ] ) comp = [ ] while len ( stack ) : v = stack . pop ( ) if not visited [ v ] : comp . append ( v ) visited [ v ] = True stack . update ( set ( [ idx for idx in self . A [ v , : ] . nonzero ( ) [ 1 ] if not visited [ idx ] ] ) ) comp = sorted ( comp ) self . logger . info ( ( 'Constructing subgraph for component of ' 'size {}.' ) . format ( len ( comp ) ) ) G = self . subgraph ( comp ) G . info = { 'orig_idx' : comp } graphs . append ( G ) return graphs | r Split the graph into connected components . |
22,025 | def compute_laplacian ( self , lap_type = 'combinatorial' ) : r if lap_type != self . lap_type : self . _lmax = None self . _U = None self . _e = None self . _coherence = None self . _D = None self . lap_type = lap_type if not self . is_directed ( ) : W = self . W else : W = utils . symmetrize ( self . W , method = 'average' ) if lap_type == 'combinatorial' : D = sparse . diags ( self . dw ) self . L = D - W elif lap_type == 'normalized' : d = np . zeros ( self . n_vertices ) disconnected = ( self . dw == 0 ) np . power ( self . dw , - 0.5 , where = ~ disconnected , out = d ) D = sparse . diags ( d ) self . L = sparse . identity ( self . n_vertices ) - D * W * D self . L [ disconnected , disconnected ] = 0 self . L . eliminate_zeros ( ) else : raise ValueError ( 'Unknown Laplacian type {}' . format ( lap_type ) ) | r Compute a graph Laplacian . |
22,026 | def _check_signal ( self , s ) : r s = np . asanyarray ( s ) if s . shape [ 0 ] != self . n_vertices : raise ValueError ( 'First dimension must be the number of vertices ' 'G.N = {}, got {}.' . format ( self . N , s . shape ) ) return s | r Check if signal is valid . |
22,027 | def dirichlet_energy ( self , x ) : r x = self . _check_signal ( x ) return x . T . dot ( self . L . dot ( x ) ) | r Compute the Dirichlet energy of a signal defined on the vertices . |
22,028 | def dw ( self ) : r if self . _dw is None : if not self . is_directed ( ) : self . _dw = np . ravel ( self . W . sum ( axis = 0 ) ) else : degree_in = np . ravel ( self . W . sum ( axis = 0 ) ) degree_out = np . ravel ( self . W . sum ( axis = 1 ) ) self . _dw = ( degree_in + degree_out ) / 2 return self . _dw | r The weighted degree of vertices . |
22,029 | def lmax ( self ) : r if self . _lmax is None : self . logger . warning ( 'The largest eigenvalue G.lmax is not ' 'available, we need to estimate it. ' 'Explicitly call G.estimate_lmax() or ' 'G.compute_fourier_basis() ' 'once beforehand to suppress the warning.' ) self . estimate_lmax ( ) return self . _lmax | r Largest eigenvalue of the graph Laplacian . |
22,030 | def _get_upper_bound ( self ) : r if self . lap_type == 'normalized' : return 2 elif self . lap_type == 'combinatorial' : bounds = [ ] bounds += [ self . n_vertices * np . max ( self . W ) ] bounds += [ 2 * np . max ( self . dw ) ] if self . n_edges > 0 : sources , targets , _ = self . get_edge_list ( ) bounds += [ np . max ( self . dw [ sources ] + self . dw [ targets ] ) ] if not self . is_directed ( ) : W = self . W else : W = utils . symmetrize ( self . W , method = 'average' ) m = W . dot ( self . dw ) / self . dw bounds += [ np . max ( self . dw + m ) ] return min ( bounds ) else : raise ValueError ( 'Unknown Laplacian type ' '{}' . format ( self . lap_type ) ) | r Return an upper bound on the eigenvalues of the Laplacian . |
22,031 | def get_edge_list ( self ) : r if self . is_directed ( ) : W = self . W . tocoo ( ) else : W = sparse . triu ( self . W , format = 'coo' ) sources = W . row targets = W . col weights = W . data assert self . n_edges == sources . size == targets . size == weights . size return sources , targets , weights | r Return an edge list an alternative representation of the graph . |
22,032 | def prox_tv ( x , gamma , G , A = None , At = None , nu = 1 , tol = 10e-4 , maxit = 200 , use_matrix = True ) : r if A is None : def A ( x ) : return x if At is None : def At ( x ) : return x tight = 0 l1_nu = 2 * G . lmax * nu if use_matrix : def l1_a ( x ) : return G . Diff * A ( x ) def l1_at ( x ) : return G . Diff * At ( D . T * x ) else : def l1_a ( x ) : return G . grad ( A ( x ) ) def l1_at ( x ) : return G . div ( x ) functions , _ = _import_pyunlocbox ( ) functions . norm_l1 ( x , gamma , A = l1_a , At = l1_at , tight = tight , maxit = maxit , verbose = verbose , tol = tol ) | r Total Variation proximal operator for graphs . |
22,033 | def is_regular ( self ) : r warn = False msg = 'The given matrix' if np . abs ( self . A - self . A . T ) . sum ( ) > 0 : warn = True msg = '{} is not symmetric,' . format ( msg ) if self . A . max ( axis = None ) > 1 : warn = True msg = '{} has parallel edges,' . format ( msg ) if np . min ( self . d ) != np . max ( self . d ) : warn = True msg = '{} is not d-regular,' . format ( msg ) if self . A . diagonal ( ) . any ( ) : warn = True msg = '{} has self loop.' . format ( msg ) if warn : self . logger . warning ( '{}.' . format ( msg [ : - 1 ] ) ) | r Troubleshoot a given regular graph . |
22,034 | def _break_signals ( self ) : r for name in list ( self . signals . keys ( ) ) : if self . signals [ name ] . ndim == 2 : for i , signal_1d in enumerate ( self . signals [ name ] . T ) : self . signals [ name + '_' + str ( i ) ] = signal_1d del self . signals [ name ] | r Break N - dimensional signals into N 1D signals . |
22,035 | def _join_signals ( self ) : r joined = dict ( ) for name in self . signals : name_base = name . rsplit ( '_' , 1 ) [ 0 ] names = joined . get ( name_base , list ( ) ) names . append ( name ) joined [ name_base ] = names for name_base , names in joined . items ( ) : if len ( names ) > 1 : names = sorted ( names ) signal_nd = np . stack ( [ self . signals [ n ] for n in names ] , axis = 1 ) self . signals [ name_base ] = signal_nd for name in names : del self . signals [ name ] | r Join N 1D signals into one N - dimensional signal . |
22,036 | def to_networkx ( self ) : r nx = _import_networkx ( ) def convert ( number ) : if issubclass ( number . dtype . type , ( np . integer , np . bool_ ) ) : return int ( number ) else : return float ( number ) def edges ( ) : for source , target , weight in zip ( * self . get_edge_list ( ) ) : yield int ( source ) , int ( target ) , { 'weight' : convert ( weight ) } def nodes ( ) : for vertex in range ( self . n_vertices ) : signals = { name : convert ( signal [ vertex ] ) for name , signal in self . signals . items ( ) } yield vertex , signals self . _break_signals ( ) graph = nx . DiGraph ( ) if self . is_directed ( ) else nx . Graph ( ) graph . add_nodes_from ( nodes ( ) ) graph . add_edges_from ( edges ( ) ) graph . name = self . __class__ . __name__ return graph | r Export the graph to NetworkX . |
22,037 | def to_graphtool ( self ) : r convert = { np . bool_ : 'bool' , np . int8 : 'int8_t' , np . int16 : 'int16_t' , np . int32 : 'int32_t' , np . int64 : 'int64_t' , np . short : 'short' , np . intc : 'int' , np . uintc : 'unsigned int' , np . long : 'long' , np . longlong : 'long long' , np . uint : 'unsigned long' , np . single : 'float' , np . double : 'double' , np . longdouble : 'long double' , } gt = _import_graphtool ( ) graph = gt . Graph ( directed = self . is_directed ( ) ) sources , targets , weights = self . get_edge_list ( ) graph . add_edge_list ( np . asarray ( ( sources , targets ) ) . T ) try : dtype = convert [ weights . dtype . type ] except KeyError : raise TypeError ( "Type {} of the edge weights is not supported." . format ( weights . dtype ) ) prop = graph . new_edge_property ( dtype ) prop . get_array ( ) [ : ] = weights graph . edge_properties [ 'weight' ] = prop self . _break_signals ( ) for name , signal in self . signals . items ( ) : try : dtype = convert [ signal . dtype . type ] except KeyError : raise TypeError ( "Type {} of signal {} is not supported." . format ( signal . dtype , name ) ) prop = graph . new_vertex_property ( dtype ) prop . get_array ( ) [ : ] = signal graph . vertex_properties [ name ] = prop return graph | r Export the graph to graph - tool . |
22,038 | def from_networkx ( cls , graph , weight = 'weight' ) : r nx = _import_networkx ( ) from . graph import Graph adjacency = nx . to_scipy_sparse_matrix ( graph , weight = weight ) graph_pg = Graph ( adjacency ) for i , node in enumerate ( graph . nodes ( ) ) : for name in graph . nodes [ node ] . keys ( ) : try : signal = graph_pg . signals [ name ] except KeyError : signal = np . full ( graph_pg . n_vertices , np . nan ) graph_pg . set_signal ( signal , name ) try : signal [ i ] = graph . nodes [ node ] [ name ] except KeyError : pass graph_pg . _join_signals ( ) return graph_pg | r Import a graph from NetworkX . |
22,039 | def from_graphtool ( cls , graph , weight = 'weight' ) : r gt = _import_graphtool ( ) import graph_tool . spectral from . graph import Graph weight = graph . edge_properties . get ( weight , None ) adjacency = gt . spectral . adjacency ( graph , weight = weight ) graph_pg = Graph ( adjacency . T ) for name , signal in graph . vertex_properties . items ( ) : graph_pg . set_signal ( signal . get_array ( ) , name ) graph_pg . _join_signals ( ) return graph_pg | r Import a graph from graph - tool . |
22,040 | def load ( cls , path , fmt = None , backend = None ) : r if fmt is None : fmt = os . path . splitext ( path ) [ 1 ] [ 1 : ] if fmt not in [ 'graphml' , 'gml' , 'gexf' ] : raise ValueError ( 'Unsupported format {}.' . format ( fmt ) ) def load_networkx ( path , fmt ) : nx = _import_networkx ( ) load = getattr ( nx , 'read_' + fmt ) graph = load ( path ) return cls . from_networkx ( graph ) def load_graphtool ( path , fmt ) : gt = _import_graphtool ( ) graph = gt . load_graph ( path , fmt = fmt ) return cls . from_graphtool ( graph ) if backend == 'networkx' : return load_networkx ( path , fmt ) elif backend == 'graph-tool' : return load_graphtool ( path , fmt ) elif backend is None : try : return load_networkx ( path , fmt ) except ImportError : try : return load_graphtool ( path , fmt ) except ImportError : raise ImportError ( 'Cannot import networkx nor graph-tool.' ) else : raise ValueError ( 'Unknown backend {}.' . format ( backend ) ) | r Load a graph from a file . |
22,041 | def save ( self , path , fmt = None , backend = None ) : r if fmt is None : fmt = os . path . splitext ( path ) [ 1 ] [ 1 : ] if fmt not in [ 'graphml' , 'gml' , 'gexf' ] : raise ValueError ( 'Unsupported format {}.' . format ( fmt ) ) def save_networkx ( graph , path , fmt ) : nx = _import_networkx ( ) graph = graph . to_networkx ( ) save = getattr ( nx , 'write_' + fmt ) save ( graph , path ) def save_graphtool ( graph , path , fmt ) : graph = graph . to_graphtool ( ) graph . save ( path , fmt = fmt ) if backend == 'networkx' : save_networkx ( self , path , fmt ) elif backend == 'graph-tool' : save_graphtool ( self , path , fmt ) elif backend is None : try : save_networkx ( self , path , fmt ) except ImportError : try : save_graphtool ( self , path , fmt ) except ImportError : raise ImportError ( 'Cannot import networkx nor graph-tool.' ) else : raise ValueError ( 'Unknown backend {}.' . format ( backend ) ) | r Save the graph to a file . |
22,042 | def loadmat ( path ) : r data = pkgutil . get_data ( 'pygsp' , 'data/' + path + '.mat' ) data = io . BytesIO ( data ) return scipy . io . loadmat ( data ) | r Load a matlab data file . |
22,043 | def distanz ( x , y = None ) : r try : x . shape [ 1 ] except IndexError : x = x . reshape ( 1 , x . shape [ 0 ] ) if y is None : y = x else : try : y . shape [ 1 ] except IndexError : y = y . reshape ( 1 , y . shape [ 0 ] ) rx , cx = x . shape ry , cy = y . shape if rx != ry : raise ValueError ( "The sizes of x and y do not fit" ) xx = ( x * x ) . sum ( axis = 0 ) yy = ( y * y ) . sum ( axis = 0 ) xy = np . dot ( x . T , y ) d = abs ( np . kron ( np . ones ( ( cy , 1 ) ) , xx ) . T + np . kron ( np . ones ( ( cx , 1 ) ) , yy ) - 2 * xy ) return np . sqrt ( d ) | r Calculate the distance between two colon vectors . |
22,044 | def resistance_distance ( G ) : r if sparse . issparse ( G ) : L = G . tocsc ( ) else : if G . lap_type != 'combinatorial' : raise ValueError ( 'Need a combinatorial Laplacian.' ) L = G . L . tocsc ( ) try : pseudo = sparse . linalg . inv ( L ) except RuntimeError : pseudo = sparse . lil_matrix ( np . linalg . pinv ( L . toarray ( ) ) ) N = np . shape ( L ) [ 0 ] d = sparse . csc_matrix ( pseudo . diagonal ( ) ) rd = sparse . kron ( d , sparse . csc_matrix ( np . ones ( ( N , 1 ) ) ) ) . T + sparse . kron ( d , sparse . csc_matrix ( np . ones ( ( N , 1 ) ) ) ) - pseudo - pseudo . T return rd | r Compute the resistance distances of a graph . |
22,045 | def symmetrize ( W , method = 'average' ) : r if W . shape [ 0 ] != W . shape [ 1 ] : raise ValueError ( 'Matrix must be square.' ) if method == 'average' : return ( W + W . T ) / 2 elif method == 'maximum' : if sparse . issparse ( W ) : bigger = ( W . T > W ) return W - W . multiply ( bigger ) + W . T . multiply ( bigger ) else : return np . maximum ( W , W . T ) elif method == 'fill' : A = ( W > 0 ) if sparse . issparse ( W ) : mask = ( A + A . T ) - A W = W + mask . multiply ( W . T ) else : mask = np . logical_xor ( np . logical_or ( A , A . T ) , A ) W = W + mask * W . T return symmetrize ( W , method = 'average' ) elif method in [ 'tril' , 'triu' ] : if sparse . issparse ( W ) : tri = getattr ( sparse , method ) else : tri = getattr ( np , method ) W = tri ( W ) return symmetrize ( W , method = 'maximum' ) else : raise ValueError ( 'Unknown symmetrization method {}.' . format ( method ) ) | r Symmetrize a square matrix . |
22,046 | def compute_log_scales ( lmin , lmax , Nscales , t1 = 1 , t2 = 2 ) : r scale_min = t1 / lmax scale_max = t2 / lmin return np . exp ( np . linspace ( np . log ( scale_max ) , np . log ( scale_min ) , Nscales ) ) | r Compute logarithm scales for wavelets . |
22,047 | def import_modules ( names , src , dst ) : for name in names : module = importlib . import_module ( src + '.' + name ) setattr ( sys . modules [ dst ] , name , module ) | Import modules in package . |
22,048 | def import_classes ( names , src , dst ) : for name in names : module = importlib . import_module ( 'pygsp.' + src + '.' + name . lower ( ) ) setattr ( sys . modules [ 'pygsp.' + dst ] , name , getattr ( module , name ) ) | Import classes in package from their implementation modules . |
22,049 | def import_functions ( names , src , dst ) : for name in names : module = importlib . import_module ( 'pygsp.' + src ) setattr ( sys . modules [ 'pygsp.' + dst ] , name , getattr ( module , name ) ) | Import functions in package from their implementation modules . |
22,050 | def _handle_api_result ( result : Optional [ Dict [ str , Any ] ] ) -> Any : if isinstance ( result , dict ) : if result . get ( 'status' ) == 'failed' : raise ActionFailed ( retcode = result . get ( 'retcode' ) ) return result . get ( 'data' ) | Retrieve data field from the API result object . |
22,051 | def reduce ( self ) -> None : idx = 0 while idx < len ( self ) : if idx > 0 and self [ idx - 1 ] . type == 'text' and self [ idx ] . type == 'text' : self [ idx - 1 ] . data [ 'text' ] += self [ idx ] . data [ 'text' ] del self [ idx ] else : idx += 1 | Remove redundant segments . |
22,052 | def extract_plain_text ( self , reduce : bool = False ) -> str : if reduce : self . reduce ( ) result = '' for seg in self : if seg . type == 'text' : result += ' ' + seg . data [ 'text' ] if result : result = result [ 1 : ] return result | Extract text segments from the message joined by single space . |
22,053 | def send_html_mail ( subject , message , message_html , from_email , recipient_list , priority = None , fail_silently = False , auth_user = None , auth_password = None , headers = { } ) : from django . utils . encoding import force_text from django . core . mail import EmailMultiAlternatives from mailer . models import make_message priority = get_priority ( priority ) subject = force_text ( subject ) message = force_text ( message ) msg = make_message ( subject = subject , body = message , from_email = from_email , to = recipient_list , priority = priority ) email = msg . email email = EmailMultiAlternatives ( email . subject , email . body , email . from_email , email . to , headers = headers ) email . attach_alternative ( message_html , "text/html" ) msg . email = email msg . save ( ) return 1 | Function to queue HTML e - mails |
22,054 | def make_message ( subject = "" , body = "" , from_email = None , to = None , bcc = None , attachments = None , headers = None , priority = None ) : to = filter_recipient_list ( to ) bcc = filter_recipient_list ( bcc ) core_msg = EmailMessage ( subject = subject , body = body , from_email = from_email , to = to , bcc = bcc , attachments = attachments , headers = headers ) db_msg = Message ( priority = priority ) db_msg . email = core_msg return db_msg | Creates a simple message for the email parameters supplied . The to and bcc lists are filtered using DontSendEntry . |
22,055 | def has_address ( self , address ) : queryset = self . filter ( to_address__iexact = address ) return queryset . exists ( ) | is the given address on the don t send list? |
22,056 | def prioritize ( ) : while True : hp_qs = Message . objects . high_priority ( ) . using ( 'default' ) mp_qs = Message . objects . medium_priority ( ) . using ( 'default' ) lp_qs = Message . objects . low_priority ( ) . using ( 'default' ) while hp_qs . count ( ) or mp_qs . count ( ) : while hp_qs . count ( ) : for message in hp_qs . order_by ( "when_added" ) : yield message while hp_qs . count ( ) == 0 and mp_qs . count ( ) : yield mp_qs . order_by ( "when_added" ) [ 0 ] while hp_qs . count ( ) == 0 and mp_qs . count ( ) == 0 and lp_qs . count ( ) : yield lp_qs . order_by ( "when_added" ) [ 0 ] if Message . objects . non_deferred ( ) . using ( 'default' ) . count ( ) == 0 : break | Yield the messages in the queue in the order they should be sent . |
22,057 | def send_all ( ) : EMAIL_BACKEND = getattr ( settings , "MAILER_EMAIL_BACKEND" , "django.core.mail.backends.smtp.EmailBackend" ) acquired , lock = acquire_lock ( ) if not acquired : return start_time = time . time ( ) deferred = 0 sent = 0 try : connection = None for message in prioritize ( ) : try : if connection is None : connection = get_connection ( backend = EMAIL_BACKEND ) logging . info ( "sending message '{0}' to {1}" . format ( message . subject , ", " . join ( message . to_addresses ) ) ) email = message . email if email is not None : email . connection = connection if not hasattr ( email , 'reply_to' ) : email . reply_to = [ ] ensure_message_id ( email ) email . send ( ) email . connection = None message . email = email MessageLog . objects . log ( message , RESULT_SUCCESS ) sent += 1 else : logging . warning ( "message discarded due to failure in converting from DB. Added on '%s' with priority '%s'" % ( message . when_added , message . priority ) ) message . delete ( ) except ( socket_error , smtplib . SMTPSenderRefused , smtplib . SMTPRecipientsRefused , smtplib . SMTPDataError , smtplib . SMTPAuthenticationError ) as err : message . defer ( ) logging . info ( "message deferred due to failure: %s" % err ) MessageLog . objects . log ( message , RESULT_FAILURE , log_message = str ( err ) ) deferred += 1 connection = None if _limits_reached ( sent , deferred ) : break _throttle_emails ( ) finally : release_lock ( lock ) logging . info ( "" ) logging . info ( "%s sent; %s deferred;" % ( sent , deferred ) ) logging . info ( "done in %.2f seconds" % ( time . time ( ) - start_time ) ) | Send all eligible messages in the queue . |
22,058 | def send_loop ( ) : while True : while not Message . objects . all ( ) : logging . debug ( "sleeping for %s seconds before checking queue again" % EMPTY_QUEUE_SLEEP ) time . sleep ( EMPTY_QUEUE_SLEEP ) send_all ( ) | Loop indefinitely checking queue at intervals of EMPTY_QUEUE_SLEEP and sending messages if any are on queue . |
22,059 | def import_name ( name ) : if isinstance ( name , str ) : components = name . split ( '.' ) mod = __import__ ( '.' . join ( components [ 0 : - 1 ] ) , globals ( ) , locals ( ) , [ components [ - 1 ] ] ) return getattr ( mod , components [ - 1 ] ) else : return name | import module given by str or pass the module if it is not str |
22,060 | def copy_plan ( modeladmin , request , queryset ) : for plan in queryset : plan_copy = deepcopy ( plan ) plan_copy . id = None plan_copy . available = False plan_copy . default = False plan_copy . created = None plan_copy . save ( force_insert = True ) for pricing in plan . planpricing_set . all ( ) : pricing . id = None pricing . plan = plan_copy pricing . save ( force_insert = True ) for quota in plan . planquota_set . all ( ) : quota . id = None quota . plan = plan_copy quota . save ( force_insert = True ) | Admin command for duplicating plans preserving quotas and pricings . |
22,061 | def recalculate ( self , amount , billing_info ) : order = Order ( pk = - 1 ) order . amount = amount order . currency = self . get_currency ( ) country = getattr ( billing_info , 'country' , None ) if not country is None : country = country . code tax_number = getattr ( billing_info , 'tax_number' , None ) tax_session_key = "tax_%s_%s" % ( tax_number , country ) tax = self . request . session . get ( tax_session_key ) if tax is None : taxation_policy = getattr ( settings , 'PLANS_TAXATION_POLICY' , None ) if not taxation_policy : raise ImproperlyConfigured ( 'PLANS_TAXATION_POLICY is not set' ) taxation_policy = import_name ( taxation_policy ) tax = str ( taxation_policy . get_tax_rate ( tax_number , country ) ) self . request . session [ tax_session_key ] = tax order . tax = Decimal ( tax ) if tax != 'None' else None return order | Calculates and return pre - filled Order |
22,062 | def get_all_context ( self ) : self . plan_pricing = get_object_or_404 ( PlanPricing . objects . all ( ) . select_related ( 'plan' , 'pricing' ) , Q ( pk = self . kwargs [ 'pk' ] ) & Q ( plan__available = True ) & ( Q ( plan__customized = self . request . user ) | Q ( plan__customized__isnull = True ) ) ) if not self . request . user . userplan . is_expired ( ) and self . request . user . userplan . plan != self . plan_pricing . plan : raise Http404 self . plan = self . plan_pricing . plan self . pricing = self . plan_pricing . pricing | Retrieves Plan and Pricing for current order creation |
22,063 | def create_proforma_invoice ( sender , instance , created , ** kwargs ) : if created : Invoice . create ( instance , Invoice . INVOICE_TYPES [ 'PROFORMA' ] ) | For every Order if there are defined billing_data creates invoice proforma which is an order confirmation document |
22,064 | def get_quota_value ( self , user , quota_dict = None ) : if quota_dict is None : quota_dict = get_user_quota ( user ) return quota_dict . get ( self . code , self . default_quota_value ) | Returns quota value for a given user |
22,065 | def send_template_email ( recipients , title_template , body_template , context , language ) : send_emails = getattr ( settings , 'SEND_PLANS_EMAILS' , True ) if not send_emails : return site_name = getattr ( settings , 'SITE_NAME' , 'Please define settings.SITE_NAME' ) domain = getattr ( settings , 'SITE_URL' , None ) if domain is None : try : Site = apps . get_model ( 'sites' , 'Site' ) current_site = Site . objects . get_current ( ) site_name = current_site . name domain = current_site . domain except LookupError : pass context . update ( { 'site_name' : site_name , 'site_domain' : domain } ) if language is not None : translation . activate ( language ) mail_title_template = loader . get_template ( title_template ) mail_body_template = loader . get_template ( body_template ) title = mail_title_template . render ( context ) body = mail_body_template . render ( context ) try : email_from = getattr ( settings , 'DEFAULT_FROM_EMAIL' ) except AttributeError : raise ImproperlyConfigured ( 'DEFAULT_FROM_EMAIL setting needed for sending e-mails' ) mail . send_mail ( title , body , email_from , recipients ) if language is not None : translation . deactivate ( ) email_logger . info ( u"Email (%s) sent to %s\nTitle: %s\n%s\n\n" % ( language , recipients , title , body ) ) | Sends e - mail using templating system |
22,066 | def _calculate_day_cost ( self , plan , period ) : plan_pricings = plan . planpricing_set . order_by ( '-pricing__period' ) . select_related ( 'pricing' ) selected_pricing = None for plan_pricing in plan_pricings : selected_pricing = plan_pricing if plan_pricing . pricing . period <= period : break if selected_pricing : return ( selected_pricing . price / selected_pricing . pricing . period ) . quantize ( Decimal ( '1.00' ) ) raise ValueError ( 'Plan %s has no pricings.' % plan ) | Finds most fitted plan pricing for a given period and calculate day cost |
22,067 | def get_change_price ( self , plan_old , plan_new , period ) : if period is None or period < 1 : return None plan_old_day_cost = self . _calculate_day_cost ( plan_old , period ) plan_new_day_cost = self . _calculate_day_cost ( plan_new , period ) if plan_new_day_cost <= plan_old_day_cost : return self . _calculate_final_price ( period , None ) else : return self . _calculate_final_price ( period , plan_new_day_cost - plan_old_day_cost ) | Calculates total price of plan change . Returns None if no payment is required . |
22,068 | def comparator ( operator ) : @ wraps ( operator ) def wrapper ( self , other ) : if not isinstance ( other , ( VersionInfo , dict ) ) : return NotImplemented return operator ( self , other ) return wrapper | Wrap a VersionInfo binary op method in a type - check |
22,069 | def set_sampled_topics ( self , sampled_topics ) : assert sampled_topics . dtype == np . int and len ( sampled_topics . shape ) <= 2 if len ( sampled_topics . shape ) == 1 : self . sampled_topics = sampled_topics . reshape ( 1 , sampled_topics . shape [ 0 ] ) else : self . sampled_topics = sampled_topics self . samples = self . sampled_topics . shape [ 0 ] self . tt = self . tt_comp ( self . sampled_topics ) self . dt = self . dt_comp ( self . sampled_topics ) | Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . |
22,070 | def dt_comp ( self , sampled_topics ) : samples = sampled_topics . shape [ 0 ] dt = np . zeros ( ( self . D , self . K , samples ) ) for s in range ( samples ) : dt [ : , : , s ] = samplers_lda . dt_comp ( self . docid , sampled_topics [ s , : ] , self . N , self . K , self . D , self . alpha ) return dt | Compute document - topic matrix from sampled_topics . |
22,071 | def tt_comp ( self , sampled_topics ) : samples = sampled_topics . shape [ 0 ] tt = np . zeros ( ( self . V , self . K , samples ) ) for s in range ( samples ) : tt [ : , : , s ] = samplers_lda . tt_comp ( self . tokens , sampled_topics [ s , : ] , self . N , self . V , self . K , self . beta ) return tt | Compute term - topic matrix from sampled_topics . |
22,072 | def topic_content ( self , W , output_file = "topic_description.csv" ) : topic_top_probs = [ ] topic_top_words = [ ] tt = self . tt_avg ( False ) for t in range ( self . K ) : top_word_indices = list ( tt [ : , t ] . argsort ( ) [ - W : ] [ : : - 1 ] ) topic_top_probs . append ( np . round ( np . sort ( tt [ : , t ] ) [ - W : ] [ : : - 1 ] , 3 ) ) topic_top_words . append ( [ list ( self . token_key . keys ( ) ) [ list ( self . token_key . values ( ) ) . index ( i ) ] for i in top_word_indices ] ) with codecs . open ( output_file , "w" , "utf-8" ) as f : for t in range ( self . K ) : words = ',' . join ( topic_top_words [ t ] ) probs = ',' . join ( [ str ( i ) for i in topic_top_probs [ t ] ] ) f . write ( "topic" + str ( t ) + ',' ) f . write ( "%s\n" % words ) f . write ( " " + ',' ) f . write ( "%s\n" % probs ) | Print top W words in each topic to file . |
22,073 | def perplexity ( self ) : return samplers_lda . perplexity_comp ( self . docid , self . tokens , self . tt , self . dt , self . N , self . K , self . samples ) | Compute perplexity for each sample . |
22,074 | def samples_keep ( self , index ) : try : if isinstance ( index , ( int , long ) ) : index = range ( self . samples ) [ - index : ] except ( NameError ) : if isinstance ( index , int ) : index = range ( self . samples ) [ - index : ] self . sampled_topics = np . take ( self . sampled_topics , index , axis = 0 ) self . tt = np . take ( self . tt , index , axis = 2 ) self . dt = np . take ( self . dt , index , axis = 2 ) self . samples = len ( index ) | Keep subset of samples . If index is an integer keep last N = index samples . If index is a list keep the samples corresponding to the index values in the list . |
22,075 | def tt_avg ( self , print_output = True , output_file = "tt.csv" ) : avg = self . tt . mean ( axis = 2 ) if print_output : np . savetxt ( output_file , avg , delimiter = "," ) return avg | Compute average term - topic matrix and print to file if print_output = True . |
22,076 | def dict_print ( self , output_file = "dict.csv" ) : with codecs . open ( output_file , "w" , encoding = 'utf-8' ) as f : for ( v , k ) in self . token_key . items ( ) : f . write ( "%s,%d\n" % ( v , k ) ) | Print mapping from tokens to numeric indices . |
22,077 | def query ( self , query_samples ) : self . sampled_topics = np . zeros ( ( self . samples , self . N ) , dtype = np . int ) for s in range ( self . samples ) : self . sampled_topics [ s , : ] = samplers_lda . sampler_query ( self . docid , self . tokens , self . topic_seed , np . ascontiguousarray ( self . tt [ : , : , s ] , dtype = np . float ) , self . N , self . K , self . D , self . alpha , query_samples ) print ( "Sample %d queried" % s ) self . dt = np . zeros ( ( self . D , self . K , self . samples ) ) for s in range ( self . samples ) : self . dt [ : , : , s ] = samplers_lda . dt_comp ( self . docid , self . sampled_topics [ s , : ] , self . N , self . K , self . D , self . alpha ) | Query docs with query_samples number of Gibbs sampling iterations . |
22,078 | def dt_avg ( self , print_output = True , output_file = "dt_query.csv" ) : avg = self . dt . mean ( axis = 2 ) if print_output : np . savetxt ( output_file , avg , delimiter = "," ) return avg | Compute average document - topic matrix and print to file if print_output = True . |
22,079 | def phrase_replace ( self , replace_dict ) : def r ( tokens ) : text = ' ' + ' ' . join ( tokens ) for k , v in replace_dict . items ( ) : text = text . replace ( " " + k + " " , " " + v + " " ) return text . split ( ) self . stems = list ( map ( r , self . stems ) ) | Replace phrases with single token mapping defined in replace_dict |
22,080 | def stem ( self ) : def s ( tokens ) : return [ PorterStemmer ( ) . stem ( t ) for t in tokens ] self . stems = list ( map ( s , self . tokens ) ) | Stem tokens with Porter Stemmer . |
22,081 | def bigram ( self , items ) : def bigram_join ( tok_list ) : text = nltk . bigrams ( tok_list ) return list ( map ( lambda x : x [ 0 ] + '.' + x [ 1 ] , text ) ) if items == "tokens" : self . bigrams = list ( map ( bigram_join , self . tokens ) ) elif items == "stems" : self . bigrams = list ( map ( bigram_join , self . stems ) ) else : raise ValueError ( "Items must be either \'tokens\' or \'stems\'." ) | generate bigrams of either items = tokens or stems |
22,082 | def E_step ( self ) : if not hasattr ( self , 'type_prob' ) : self . type_prob = np . empty ( ( self . N , self . K ) ) temp_probs = np . zeros ( ( self . N , self . K ) ) for i in range ( self . N ) : for k in range ( self . K ) : temp_probs [ i , k ] = np . log ( self . rho [ k ] ) + np . dot ( self . feature_counts [ i , : ] , np . log ( self . mu [ k , : ] ) ) temp_probsZ = temp_probs - np . max ( temp_probs , axis = 1 ) [ : , np . newaxis ] self . type_prob = np . exp ( temp_probsZ ) / np . exp ( temp_probsZ ) . sum ( axis = 1 ) [ : , np . newaxis ] return np . log ( np . exp ( temp_probsZ ) . sum ( axis = 1 ) ) . sum ( ) + np . max ( temp_probs , axis = 1 ) . sum ( ) | compute type probabilities given current parameter estimates . |
22,083 | def M_step ( self ) : for k in range ( self . K ) : self . rho [ k ] = self . type_prob [ : , k ] . sum ( ) / self . N for k in range ( self . K ) : for m in range ( self . M ) : temp_prob = np . dot ( self . type_prob [ : , k ] , self . feature_counts [ : , m ] ) if temp_prob < 1e-99 : temp_prob = 1e-99 self . mu [ k , m ] = temp_prob / np . dot ( self . type_prob [ : , k ] , self . observations ) | generate new parameter estimates given updated type distribution |
22,084 | def estimate ( self , maxiter = 250 , convergence = 1e-7 ) : self . loglik = np . zeros ( maxiter ) iter = 0 while iter < maxiter : self . loglik [ iter ] = self . E_step ( ) if np . isnan ( self . loglik [ iter ] ) : print ( "undefined log-likelihood" ) break self . M_step ( ) if self . loglik [ iter ] - self . loglik [ iter - 1 ] < 0 and iter > 0 : print ( "log-likelihood decreased by %f at iteration %d" % ( self . loglik [ iter ] - self . loglik [ iter - 1 ] , iter ) ) elif self . loglik [ iter ] - self . loglik [ iter - 1 ] < convergence and iter > 0 : print ( "convergence at iteration %d, loglik = %f" % ( iter , self . loglik [ iter ] ) ) self . loglik = self . loglik [ self . loglik < 0 ] break iter += 1 | run EM algorithm until convergence or until maxiter reached |
22,085 | def get_content ( path ) : with codecs . open ( abs_path ( path ) , encoding = 'utf-8' ) as f : return f . read ( ) | Get content of file . |
22,086 | def sanitize_dataframe ( df ) : import pandas as pd import numpy as np df = df . copy ( ) if isinstance ( df . index , pd . core . index . MultiIndex ) : raise ValueError ( 'Hierarchical indices not supported' ) if isinstance ( df . columns , pd . core . index . MultiIndex ) : raise ValueError ( 'Hierarchical indices not supported' ) def to_list_if_array ( val ) : if isinstance ( val , np . ndarray ) : return val . tolist ( ) else : return val for col_name , dtype in df . dtypes . iteritems ( ) : if str ( dtype ) == 'category' : df [ col_name ] = df [ col_name ] . astype ( str ) elif str ( dtype ) == 'bool' : df [ col_name ] = df [ col_name ] . astype ( object ) elif np . issubdtype ( dtype , np . integer ) : df [ col_name ] = df [ col_name ] . astype ( object ) elif np . issubdtype ( dtype , np . floating ) : col = df [ col_name ] bad_values = col . isnull ( ) | np . isinf ( col ) df [ col_name ] = col . astype ( object ) . where ( ~ bad_values , None ) elif str ( dtype ) . startswith ( 'datetime' ) : df [ col_name ] = df [ col_name ] . astype ( str ) . replace ( 'NaT' , '' ) elif dtype == object : col = df [ col_name ] . apply ( to_list_if_array , convert_dtype = False ) df [ col_name ] = col . where ( col . notnull ( ) , None ) return df | Sanitize a DataFrame to prepare it for serialization . |
22,087 | def prepare_spec ( spec , data = None ) : import pandas as pd if isinstance ( data , pd . DataFrame ) : data = sanitize_dataframe ( data ) spec [ 'data' ] = { 'values' : data . to_dict ( orient = 'records' ) } elif data is None : pass else : data = pd . DataFrame ( data ) data = sanitize_dataframe ( data ) spec [ 'data' ] = { 'values' : data . to_dict ( orient = 'records' ) } return spec | Prepare a Vega - Lite spec for sending to the frontend . |
22,088 | def _repr_mimebundle_ ( self , include = None , exclude = None ) : id = uuid . uuid4 ( ) return ( { 'application/javascript' : self . _generate_js ( id ) } , { 'jupyter-vega' : '#{0}' . format ( id ) } , ) | Display the visualization in the Jupyter notebook . |
22,089 | def pass_creds_to_nylas ( ) : if not google . authorized : return "Error: not yet connected with Google!" , 400 if "refresh_token" not in google . token : return ( ( "Error: missing Google refresh token. " "Uncomment the `reprompt_consent` line in the code to fix this." ) , 500 , ) google_resp = google . get ( "/oauth2/v2/userinfo?fields=name,email" ) assert google_resp . ok , "Received failure response from Google userinfo API" google_userinfo = google_resp . json ( ) nylas_authorize_data = { "client_id" : app . config [ "NYLAS_OAUTH_CLIENT_ID" ] , "name" : google_userinfo [ "name" ] , "email_address" : google_userinfo [ "email" ] , "provider" : "gmail" , "settings" : { "google_client_id" : app . config [ "GOOGLE_OAUTH_CLIENT_ID" ] , "google_client_secret" : app . config [ "GOOGLE_OAUTH_CLIENT_SECRET" ] , "google_refresh_token" : google . token [ "refresh_token" ] , } , } nylas_authorize_resp = requests . post ( "https://api.nylas.com/connect/authorize" , json = nylas_authorize_data ) assert nylas_authorize_resp . ok , "Received failure response from Nylas authorize API" nylas_code = nylas_authorize_resp . json ( ) [ "code" ] nylas_token_data = { "client_id" : app . config [ "NYLAS_OAUTH_CLIENT_ID" ] , "client_secret" : app . config [ "NYLAS_OAUTH_CLIENT_SECRET" ] , "code" : nylas_code , } nylas_token_resp = requests . post ( "https://api.nylas.com/connect/token" , json = nylas_token_data ) assert nylas_token_resp . ok , "Received failure response from Nylas token API" nylas_access_token = nylas_token_resp . json ( ) [ "access_token" ] session [ "nylas_access_token" ] = nylas_access_token return redirect ( url_for ( "index" ) ) | This view loads the credentials from Google and passes them to Nylas to set up native authentication . |
22,090 | def pass_creds_to_nylas ( name , email , password , server_host = None ) : nylas_authorize_data = { "client_id" : app . config [ "NYLAS_OAUTH_CLIENT_ID" ] , "name" : name , "email_address" : email , "provider" : "exchange" , "settings" : { "username" : email , "password" : password } , } if server_host : nylas_authorize_data [ "settings" ] [ "eas_server_host" ] = server_host nylas_authorize_resp = requests . post ( "https://api.nylas.com/connect/authorize" , json = nylas_authorize_data ) if not nylas_authorize_resp . ok : message = nylas_authorize_resp . json ( ) [ "message" ] raise APIError ( message ) nylas_code = nylas_authorize_resp . json ( ) [ "code" ] nylas_token_data = { "client_id" : app . config [ "NYLAS_OAUTH_CLIENT_ID" ] , "client_secret" : app . config [ "NYLAS_OAUTH_CLIENT_SECRET" ] , "code" : nylas_code , } nylas_token_resp = requests . post ( "https://api.nylas.com/connect/token" , json = nylas_token_data ) if not nylas_token_resp . ok : message = nylas_token_resp . json ( ) [ "message" ] raise APIError ( message ) nylas_access_token = nylas_token_resp . json ( ) [ "access_token" ] session [ "nylas_access_token" ] = nylas_access_token return redirect ( url_for ( "success" ) ) | Passes Exchange credentials to Nylas to set up native authentication . |
22,091 | def _get_resource_raw ( self , cls , id , extra = None , headers = None , stream = False , ** filters ) : headers = headers or { } headers . update ( self . session . headers ) postfix = "/{}" . format ( extra ) if extra else "" if cls . api_root != "a" : url = "{}/{}/{}{}" . format ( self . api_server , cls . collection_name , id , postfix ) else : url = "{}/a/{}/{}/{}{}" . format ( self . api_server , self . app_id , cls . collection_name , id , postfix ) converted_filters = convert_datetimes_to_timestamps ( filters , cls . datetime_filter_attrs ) url = str ( URLObject ( url ) . add_query_params ( converted_filters . items ( ) ) ) response = self . _get_http_session ( cls . api_root ) . get ( url , headers = headers , stream = stream ) return _validate ( response ) | Get an individual REST resource |
22,092 | def convert_datetimes_to_timestamps ( data , datetime_attrs ) : if not data : return data new_data = { } for key , value in data . items ( ) : if key in datetime_attrs and isinstance ( value , datetime ) : new_key = datetime_attrs [ key ] new_data [ new_key ] = timestamp_from_dt ( value ) else : new_data [ key ] = value return new_data | Given a dictionary of data and a dictionary of datetime attributes return a new dictionary that converts any datetime attributes that may be present to their timestamped equivalent . |
22,093 | def verify_signature ( message , key , signature ) : digest = hmac . new ( key , msg = message , digestmod = hashlib . sha256 ) . hexdigest ( ) return digest == signature | This function will verify the authenticity of a digital signature . For security purposes Nylas includes a digital signature in the headers of every webhook notification so that clients can verify that the webhook request came from Nylas and no one else . The signing key is your OAuth client secret which only you and Nylas know . |
22,094 | def process_delta ( delta ) : kwargs = { "type" : delta [ "type" ] , "date" : datetime . datetime . utcfromtimestamp ( delta [ "date" ] ) , "object_id" : delta [ "object_data" ] [ "id" ] , } print ( " * {type} at {date} with ID {object_id}" . format ( ** kwargs ) ) | This is the part of the code where you would process the information from the webhook notification . Each delta is one change that happened and might require fetching message IDs updating your database and so on . |
22,095 | def error_respond ( self ) : response = JSONRPCErrorResponse ( ) response . error = self . message response . unique_id = None response . _jsonrpc_error_code = self . jsonrpc_error_code if hasattr ( self , 'data' ) : response . data = self . data return response | Converts the error to an error response object . |
22,096 | def error_respond ( self , error ) : if self . unique_id is None : return None response = JSONRPCErrorResponse ( ) response . unique_id = None if self . one_way else self . unique_id code , msg , data = _get_code_message_and_data ( error ) response . error = msg response . _jsonrpc_error_code = code if data : response . data = data return response | Create an error response to this request . |
22,097 | def respond ( self , result ) : if self . one_way or self . unique_id is None : return None response = JSONRPCSuccessResponse ( ) response . result = result response . unique_id = self . unique_id return response | Create a response to this request . |
22,098 | def parse_reply ( self , data ) : if isinstance ( data , bytes ) : data = data . decode ( ) try : rep = json . loads ( data ) except Exception as e : raise InvalidReplyError ( e ) for k in rep . keys ( ) : if not k in self . _ALLOWED_REPLY_KEYS : raise InvalidReplyError ( 'Key not allowed: %s' % k ) if 'jsonrpc' not in rep : raise InvalidReplyError ( 'Missing jsonrpc (version) in response.' ) if rep [ 'jsonrpc' ] != self . JSON_RPC_VERSION : raise InvalidReplyError ( 'Wrong JSONRPC version' ) if 'id' not in rep : raise InvalidReplyError ( 'Missing id in response' ) if ( 'error' in rep ) and ( 'result' in rep ) : raise InvalidReplyError ( 'Reply must contain exactly one of result and error.' ) if 'error' in rep : response = JSONRPCErrorResponse ( ) error = rep [ 'error' ] response . error = error [ "message" ] response . _jsonrpc_error_code = error [ "code" ] if "data" in error : response . data = error [ "data" ] else : response = JSONRPCSuccessResponse ( ) response . result = rep . get ( 'result' , None ) response . unique_id = rep [ 'id' ] return response | Deserializes and validates a response . |
22,099 | def parse_request ( self , data ) : if isinstance ( data , bytes ) : data = data . decode ( ) try : req = json . loads ( data ) except Exception as e : raise JSONRPCParseError ( ) if isinstance ( req , list ) : requests = JSONRPCBatchRequest ( ) for subreq in req : try : requests . append ( self . _parse_subrequest ( subreq ) ) except RPCError as e : requests . append ( e ) except Exception as e : requests . append ( JSONRPCInvalidRequestError ( ) ) if not requests : raise JSONRPCInvalidRequestError ( ) return requests else : return self . _parse_subrequest ( req ) | Deserializes and validates a request . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.