idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
27,300
def check_matching_blocks ( * arrays ) : if len ( arrays ) <= 1 : return if all ( isinstance ( x , da . Array ) for x in arrays ) : chunks = arrays [ 0 ] . chunks for array in arrays [ 1 : ] : if array . chunks != chunks : raise ValueError ( "Mismatched chunks. {} != {}" . format ( chunks , array . chunks ) ) elif all ...
Check that the partitioning structure for many arrays matches .
27,301
def check_chunks ( n_samples , n_features , chunks = None ) : if chunks is None : chunks = ( max ( 100 , n_samples // cpu_count ( ) ) , n_features ) elif isinstance ( chunks , Integral ) : chunks = ( max ( 100 , n_samples // chunks ) , n_features ) elif isinstance ( chunks , Sequence ) : chunks = tuple ( chunks ) if le...
Validate and normalize the chunks argument for a dask . array
27,302
def get_scorer ( scoring , compute = True ) : if isinstance ( scoring , six . string_types ) : try : scorer , kwargs = SCORERS [ scoring ] except KeyError : raise ValueError ( "{} is not a valid scoring value. " "Valid options are {}" . format ( scoring , sorted ( SCORERS ) ) ) else : scorer = scoring kwargs = { } kwar...
Get a scorer from string
27,303
def to_indexable ( * args , ** kwargs ) : if kwargs . get ( "allow_scalars" , False ) : indexable = _maybe_indexable else : indexable = _indexable for x in args : if x is None or isinstance ( x , ( da . Array , dd . DataFrame ) ) : yield x elif is_dask_collection ( x ) : yield delayed ( indexable , pure = True ) ( x ) ...
Ensure that all args are an indexable type .
27,304
def _index_param_value ( num_samples , v , indices ) : if not _is_arraylike ( v ) or _num_samples ( v ) != num_samples : return v if sp . issparse ( v ) : v = v . tocsr ( ) return safe_indexing ( v , indices )
Private helper function for parameter value indexing .
27,305
def add_warning ( self , key , * args , ** kwargs ) : self . _deprecations [ key ] = ( args , kwargs )
Add a warning to be triggered when the specified key is read
27,306
def _construct ( x , categories ) : data = np . ones ( len ( x ) ) rows = np . arange ( len ( x ) ) columns = x . ravel ( ) return scipy . sparse . csr_matrix ( ( data , ( rows , columns ) ) , shape = ( len ( x ) , len ( categories ) ) )
Make a sparse matrix from an encoded array .
27,307
def _encode_dask_array ( values , uniques = None , encode = False , onehot_dtype = None ) : if uniques is None : if encode and onehot_dtype : raise ValueError ( "Cannot use 'encode` and 'onehot_dtype' simultaneously." ) if encode : uniques , encoded = da . unique ( values , return_inverse = True ) return uniques , enco...
One - hot or label encode a dask array .
27,308
def pipeline ( names , steps ) : steps , times = zip ( * map ( _maybe_timed , steps ) ) fit_time = sum ( times ) if any ( s is FIT_FAILURE for s in steps ) : fit_est = FIT_FAILURE else : fit_est = Pipeline ( list ( zip ( names , steps ) ) ) return fit_est , fit_time
Reconstruct a Pipeline from names and steps
27,309
def feature_union ( names , steps , weights ) : steps , times = zip ( * map ( _maybe_timed , steps ) ) fit_time = sum ( times ) if any ( s is FIT_FAILURE for s in steps ) : fit_est = FIT_FAILURE else : fit_est = FeatureUnion ( list ( zip ( names , steps ) ) , transformer_weights = weights ) return fit_est , fit_time
Reconstruct a FeatureUnion from names steps and weights
27,310
def feature_union_concat ( Xs , nsamples , weights ) : if any ( x is FIT_FAILURE for x in Xs ) : return FIT_FAILURE Xs = [ X if w is None else X * w for X , w in zip ( Xs , weights ) if X is not None ] if not Xs : return np . zeros ( ( nsamples , 0 ) ) if any ( sparse . issparse ( f ) for f in Xs ) : return sparse . hs...
Apply weights and concatenate outputs from a FeatureUnion
27,311
def _generate_idx ( n , seed , n_train , n_test ) : idx = check_random_state ( seed ) . permutation ( n ) ind_test = idx [ : n_test ] ind_train = idx [ n_test : n_train + n_test ] return ind_train , ind_test
Generate train test indices for a length - n array .
27,312
def _blockwise_slice ( arr , idx ) : objs = [ ] offsets = np . hstack ( [ 0 , np . cumsum ( arr . chunks [ 0 ] ) [ : - 1 ] ] ) for i , ( x , idx2 ) in enumerate ( zip ( arr . to_delayed ( ) . ravel ( ) , idx . to_delayed ( ) . ravel ( ) ) ) : idx3 = idx2 - offsets [ i ] objs . append ( x [ idx3 ] ) shapes = idx . chunk...
Slice an array that is blockwise - aligned with idx .
27,313
def transform ( self , X ) : msg = "'X' should be a 1-dimensional array with length 'num_samples'." if not dask . is_dask_collection ( X ) : return super ( HashingVectorizer , self ) . transform ( X ) if isinstance ( X , db . Bag ) : bag2 = X . map_partitions ( _transform , estimator = self ) objs = bag2 . to_delayed (...
Transform a sequence of documents to a document - term matrix .
27,314
def correct_peaks ( sig , peak_inds , search_radius , smooth_window_size , peak_dir = 'compare' ) : sig_len = sig . shape [ 0 ] n_peaks = len ( peak_inds ) sig = sig - smooth ( sig = sig , window_size = smooth_window_size ) if peak_dir == 'up' : shifted_peak_inds = shift_peaks ( sig = sig , peak_inds = peak_inds , sear...
Adjust a set of detected peaks to coincide with local signal maxima and
27,315
def shift_peaks ( sig , peak_inds , search_radius , peak_up ) : sig_len = sig . shape [ 0 ] n_peaks = len ( peak_inds ) shift_inds = np . zeros ( n_peaks , dtype = 'int' ) for i in range ( n_peaks ) : ind = peak_inds [ i ] local_sig = sig [ max ( 0 , ind - search_radius ) : min ( ind + search_radius , sig_len - 1 ) ] i...
Helper function for correct_peaks . Return the shifted peaks to local maxima or minima within a radius .
27,316
def get_plot_dims ( signal , ann_samp ) : "Figure out the number of plot channels" if signal is not None : if signal . ndim == 1 : sig_len = len ( signal ) n_sig = 1 else : sig_len = signal . shape [ 0 ] n_sig = signal . shape [ 1 ] else : sig_len = 0 n_sig = 0 if ann_samp is not None : n_annot = len ( ann_samp ) else ...
Figure out the number of plot channels
27,317
def create_figure ( n_subplots , figsize ) : "Create the plot figure and subplot axes" fig = plt . figure ( figsize = figsize ) axes = [ ] for i in range ( n_subplots ) : axes . append ( fig . add_subplot ( n_subplots , 1 , i + 1 ) ) return fig , axes
Create the plot figure and subplot axes
27,318
def plot_signal ( signal , sig_len , n_sig , fs , time_units , sig_style , axes ) : "Plot signal channels" if len ( sig_style ) == 1 : sig_style = n_sig * sig_style if time_units == 'samples' : t = np . linspace ( 0 , sig_len - 1 , sig_len ) else : downsample_factor = { 'seconds' : fs , 'minutes' : fs * 60 , 'hours' : ...
Plot signal channels
27,319
def plot_annotation ( ann_samp , n_annot , ann_sym , signal , n_sig , fs , time_units , ann_style , axes ) : "Plot annotations, possibly overlaid on signals" if len ( ann_style ) == 1 : ann_style = n_annot * ann_style if time_units == 'samples' : downsample_factor = 1 else : downsample_factor = { 'seconds' : float ( fs...
Plot annotations possibly overlaid on signals
27,320
def plot_ecg_grids ( ecg_grids , fs , units , time_units , axes ) : "Add ecg grids to the axes" if ecg_grids == 'all' : ecg_grids = range ( 0 , len ( axes ) ) for ch in ecg_grids : auto_xlims = axes [ ch ] . get_xlim ( ) auto_ylims = axes [ ch ] . get_ylim ( ) ( major_ticks_x , minor_ticks_x , major_ticks_y , minor_tic...
Add ecg grids to the axes
27,321
def calc_ecg_grids ( minsig , maxsig , sig_units , fs , maxt , time_units ) : if time_units == 'samples' : majorx = 0.2 * fs minorx = 0.04 * fs elif time_units == 'seconds' : majorx = 0.2 minorx = 0.04 elif time_units == 'minutes' : majorx = 0.2 / 60 minorx = 0.04 / 60 elif time_units == 'hours' : majorx = 0.2 / 3600 m...
Calculate tick intervals for ecg grids
27,322
def label_figure ( axes , n_subplots , time_units , sig_name , sig_units , ylabel , title ) : "Add title, and axes labels" if title : axes [ 0 ] . set_title ( title ) if not ylabel : ylabel = [ ] if not sig_name : sig_name = [ 'ch_' + str ( i ) for i in range ( n_subplots ) ] if not sig_units : sig_units = n_subplots *...
Add title and axes labels
27,323
def get_wfdb_plot_items ( record , annotation , plot_sym ) : if record : if record . p_signal is not None : signal = record . p_signal elif record . d_signal is not None : signal = record . d_signal else : raise ValueError ( 'The record has no signal to plot' ) fs = record . fs sig_name = record . sig_name sig_units = ...
Get items to plot from wfdb objects
27,324
def _remote_file_size ( url = None , file_name = None , pb_dir = None ) : if file_name and pb_dir : url = posixpath . join ( config . db_index_url , pb_dir , file_name ) response = requests . head ( url , headers = { 'Accept-Encoding' : 'identity' } ) response . raise_for_status ( ) remote_file_size = int ( response . ...
Get the remote file size in bytes
27,325
def _stream_header ( file_name , pb_dir ) : url = posixpath . join ( config . db_index_url , pb_dir , file_name ) response = requests . get ( url ) response . raise_for_status ( ) filelines = response . content . decode ( 'iso-8859-1' ) . splitlines ( ) header_lines = [ ] comment_lines = [ ] for line in filelines : lin...
Stream the lines of a remote header file .
27,326
def _stream_dat ( file_name , pb_dir , byte_count , start_byte , dtype ) : url = posixpath . join ( config . db_index_url , pb_dir , file_name ) end_byte = start_byte + byte_count - 1 headers = { "Range" : "bytes=%d-%d" % ( start_byte , end_byte ) , 'Accept-Encoding' : '*' } response = requests . get ( url , headers = ...
Stream data from a remote dat file into a 1d numpy array .
27,327
def _stream_annotation ( file_name , pb_dir ) : url = posixpath . join ( config . db_index_url , pb_dir , file_name ) response = requests . get ( url ) response . raise_for_status ( ) ann_data = np . fromstring ( response . content , dtype = np . dtype ( '<u1' ) ) return ann_data
Stream an entire remote annotation file from physiobank
27,328
def get_dbs ( ) : url = posixpath . join ( config . db_index_url , 'DBS' ) response = requests . get ( url ) dbs = response . content . decode ( 'ascii' ) . splitlines ( ) dbs = [ re . sub ( '\t{2,}' , '\t' , line ) . split ( '\t' ) for line in dbs ] return dbs
Get a list of all the Physiobank databases available .
27,329
def get_record_list ( db_dir , records = 'all' ) : db_url = posixpath . join ( config . db_index_url , db_dir ) if records == 'all' : response = requests . get ( posixpath . join ( db_url , 'RECORDS' ) ) if response . status_code == 404 : raise ValueError ( 'The database %s has no WFDB files to download' % db_url ) rec...
Get a list of records belonging to a database .
27,330
def make_local_dirs ( dl_dir , dl_inputs , keep_subdirs ) : if not os . path . isdir ( dl_dir ) : os . makedirs ( dl_dir ) print ( 'Created local base download directory: %s' % dl_dir ) if keep_subdirs : dl_dirs = set ( [ os . path . join ( dl_dir , d [ 1 ] ) for d in dl_inputs ] ) for d in dl_dirs : if not os . path ....
Make any required local directories to prepare for downloading
27,331
def dl_pb_file ( inputs ) : basefile , subdir , db , dl_dir , keep_subdirs , overwrite = inputs url = posixpath . join ( config . db_index_url , db , subdir , basefile ) remote_file_size = _remote_file_size ( url ) if keep_subdirs : dldir = os . path . join ( dl_dir , subdir ) else : dldir = dl_dir local_file = os . pa...
Download a file from physiobank .
27,332
def dl_full_file ( url , save_file_name ) : response = requests . get ( url ) with open ( save_file_name , 'wb' ) as writefile : writefile . write ( response . content ) return
Download a file . No checks are performed .
27,333
def dl_files ( db , dl_dir , files , keep_subdirs = True , overwrite = False ) : db_url = posixpath . join ( config . db_index_url , db ) response = requests . get ( db_url ) response . raise_for_status ( ) dl_inputs = [ ( os . path . split ( file ) [ 1 ] , os . path . split ( file ) [ 0 ] , db , dl_dir , keep_subdirs ...
Download specified files from a Physiobank database .
27,334
def label_triplets_to_df ( triplets ) : label_df = pd . DataFrame ( { 'label_store' : np . array ( [ t [ 0 ] for t in triplets ] , dtype = 'int' ) , 'symbol' : [ t [ 1 ] for t in triplets ] , 'description' : [ t [ 2 ] for t in triplets ] } ) label_df . set_index ( label_df [ 'label_store' ] . values , inplace = True ) ...
Get a pd dataframe from a tuple triplets used to define annotation labels .
27,335
def wrann ( record_name , extension , sample , symbol = None , subtype = None , chan = None , num = None , aux_note = None , label_store = None , fs = None , custom_labels = None , write_dir = '' ) : annotation = Annotation ( record_name = record_name , extension = extension , sample = sample , symbol = symbol , subtyp...
Write a WFDB annotation file .
27,336
def rdann ( record_name , extension , sampfrom = 0 , sampto = None , shift_samps = False , pb_dir = None , return_label_elements = [ 'symbol' ] , summarize_labels = False ) : return_label_elements = check_read_inputs ( sampfrom , sampto , return_label_elements ) filebytes = load_byte_pairs ( record_name , extension , p...
Read a WFDB annotation file record_name . extension and return an Annotation object .
27,337
def update_extra_fields ( subtype , chan , num , aux_note , update ) : if update [ 'subtype' ] : subtype . append ( 0 ) if update [ 'chan' ] : if chan == [ ] : chan . append ( 0 ) else : chan . append ( chan [ - 1 ] ) if update [ 'num' ] : if num == [ ] : num . append ( 0 ) else : num . append ( num [ - 1 ] ) if update...
Update the field if the current annotation did not provide a value .
27,338
def get_special_inds ( sample , label_store , aux_note ) : s0_inds = np . where ( sample == np . int64 ( 0 ) ) [ 0 ] note_inds = np . where ( label_store == np . int64 ( 22 ) ) [ 0 ] potential_definition_inds = set ( s0_inds ) . intersection ( note_inds ) notann_inds = np . where ( label_store == np . int64 ( 0 ) ) [ 0...
Get the indices of annotations that hold definition information about the entire annotation file and other empty annotations to be removed .
27,339
def rm_empty_indices ( * args ) : rm_inds = args [ 0 ] if not rm_inds : return args [ 1 : ] keep_inds = [ i for i in range ( len ( args [ 1 ] ) ) if i not in rm_inds ] return [ [ a [ i ] for i in keep_inds ] for a in args [ 1 : ] ]
Remove unwanted list indices . First argument is the list of indices to remove . Other elements are the lists to trim .
27,340
def apply_range ( self , sampfrom = 0 , sampto = None ) : sampto = sampto or self . sample [ - 1 ] kept_inds = np . intersect1d ( np . where ( self . sample >= sampfrom ) , np . where ( self . sample <= sampto ) ) for field in [ 'sample' , 'label_store' , 'subtype' , 'chan' , 'num' ] : setattr ( self , field , getattr ...
Filter the annotation attributes to keep only items between the desired sample values
27,341
def wrann ( self , write_fs = False , write_dir = '' ) : for field in [ 'record_name' , 'extension' ] : if getattr ( self , field ) is None : raise Exception ( 'Missing required field for writing annotation file: ' , field ) present_label_fields = self . get_label_fields ( ) if not present_label_fields : raise Exceptio...
Write a WFDB annotation file from this object .
27,342
def get_label_fields ( self ) : present_label_fields = [ ] for field in ann_label_fields : if getattr ( self , field ) is not None : present_label_fields . append ( field ) return present_label_fields
Get the present label fields in the object
27,343
def check_field_cohesion ( self , present_label_fields ) : nannots = len ( self . sample ) for field in [ 'sample' , 'num' , 'subtype' , 'chan' , 'aux_note' ] + present_label_fields : if getattr ( self , field ) is not None : if len ( getattr ( self , field ) ) != nannots : raise ValueError ( "The lengths of the 'sampl...
Check that the content and structure of different fields are consistent with one another .
27,344
def get_available_label_stores ( self , usefield = 'tryall' ) : if usefield == 'tryall' : if self . label_store is not None : usefield = 'label_store' elif self . symbol is not None : usefield = 'symbol' elif self . description is not None : usefield = 'description' else : raise ValueError ( 'No label fields are define...
Get the label store values that may be used for writing this annotation .
27,345
def get_custom_label_attribute ( self , attribute ) : if attribute not in ann_label_fields : raise ValueError ( 'Invalid attribute specified' ) if isinstance ( self . custom_labels , pd . DataFrame ) : if 'label_store' not in list ( self . custom_labels ) : raise ValueError ( 'label_store not defined in custom_labels' ...
Get a list of the custom_labels attribute . ie . label_store symbol or description .
27,346
def create_label_map ( self , inplace = True ) : label_map = ann_label_table . copy ( ) if self . custom_labels is not None : self . standardize_custom_labels ( ) for i in self . custom_labels . index : label_map . loc [ i ] = self . custom_labels . loc [ i ] if inplace : self . __label_map__ = label_map else : return ...
Creates mapping df based on ann_label_table and self . custom_labels .
27,347
def wr_ann_file ( self , write_fs , write_dir = '' ) : if write_fs : fs_bytes = self . calc_fs_bytes ( ) else : fs_bytes = [ ] cl_bytes = self . calc_cl_bytes ( ) core_bytes = self . calc_core_bytes ( ) if fs_bytes == [ ] and cl_bytes == [ ] : end_special_bytes = [ ] else : end_special_bytes = [ 0 , 236 , 255 , 255 , 2...
Calculate the bytes used to encode an annotation set and write them to an annotation file
27,348
def calc_core_bytes ( self ) : if len ( self . sample ) == 1 : sampdiff = np . array ( [ self . sample [ 0 ] ] ) else : sampdiff = np . concatenate ( ( [ self . sample [ 0 ] ] , np . diff ( self . sample ) ) ) compact_annotation = copy . deepcopy ( self ) compact_annotation . compact_fields ( ) extra_write_fields = [ ]...
Convert all used annotation fields into bytes to write
27,349
def get_contained_labels ( self , inplace = True ) : if self . custom_labels is not None : self . check_field ( 'custom_labels' ) label_map = ann_label_table . copy ( ) if isinstance ( self . custom_labels , ( list , tuple ) ) : custom_labels = label_triplets_to_df ( self . custom_labels ) elif isinstance ( self . cust...
Get the set of unique labels contained in this annotation . Returns a pandas dataframe or sets the contained_labels attribute of the object .
27,350
def set_label_elements ( self , wanted_label_elements ) : if isinstance ( wanted_label_elements , str ) : wanted_label_elements = [ wanted_label_elements ] missing_elements = [ e for e in wanted_label_elements if getattr ( self , e ) is None ] contained_elements = [ e for e in ann_label_fields if getattr ( self , e ) i...
Set one or more label elements based on at least one of the others
27,351
def _dat_read_params ( fmt , sig_len , byte_offset , skew , tsamps_per_frame , sampfrom , sampto ) : start_flat_sample = sampfrom * tsamps_per_frame if ( sampto + max ( skew ) ) > sig_len : end_flat_sample = sig_len * tsamps_per_frame extra_flat_samples = ( sampto + max ( skew ) - sig_len ) * tsamps_per_frame else : en...
Calculate the parameters used to read and process a dat file given its layout and the desired sample range .
27,352
def _required_byte_num ( mode , fmt , n_samp ) : if fmt == '212' : n_bytes = math . ceil ( n_samp * 1.5 ) elif fmt in [ '310' , '311' ] : n_extra = n_samp % 3 if n_extra == 2 : if fmt == '310' : n_bytes = upround ( n_samp * 4 / 3 , 4 ) else : if mode == 'read' : n_bytes = math . ceil ( n_samp * 4 / 3 ) else : n_bytes =...
Determine how many signal bytes are needed to read or write a number of desired samples from a dat file .
27,353
def _rd_dat_file ( file_name , dir_name , pb_dir , fmt , start_byte , n_samp ) : if fmt == '212' : byte_count = _required_byte_num ( 'read' , '212' , n_samp ) element_count = byte_count elif fmt in [ '310' , '311' ] : byte_count = _required_byte_num ( 'read' , fmt , n_samp ) element_count = byte_count else : element_co...
Read data from a dat file either local or remote into a 1d numpy array .
27,354
def _skew_sig ( sig , skew , n_sig , read_len , fmt , nan_replace , samps_per_frame = None ) : if max ( skew ) > 0 : if isinstance ( sig , list ) : for ch in range ( n_sig ) : if skew [ ch ] > 0 : sig [ ch ] [ : read_len * samps_per_frame [ ch ] ] = sig [ ch ] [ skew [ ch ] * samps_per_frame [ ch ] : ] for ch in range ...
Skew the signal insert nans and shave off end of array if needed .
27,355
def _check_sig_dims ( sig , read_len , n_sig , samps_per_frame ) : if isinstance ( sig , np . ndarray ) : if sig . shape != ( read_len , n_sig ) : raise ValueError ( 'Samples were not loaded correctly' ) else : if len ( sig ) != n_sig : raise ValueError ( 'Samples were not loaded correctly' ) for ch in range ( n_sig ) ...
Integrity check of a signal s shape after reading .
27,356
def _digi_bounds ( fmt ) : if isinstance ( fmt , list ) : return [ _digi_bounds ( f ) for f in fmt ] if fmt == '80' : return ( - 128 , 127 ) elif fmt == '212' : return ( - 2048 , 2047 ) elif fmt == '16' : return ( - 32768 , 32767 ) elif fmt == '24' : return ( - 8388608 , 8388607 ) elif fmt == '32' : return ( - 21474836...
Return min and max digital values for each format type . Accepts lists .
27,357
def _digi_nan ( fmt ) : if isinstance ( fmt , list ) : return [ _digi_nan ( f ) for f in fmt ] if fmt == '80' : return - 128 if fmt == '310' : return - 512 if fmt == '311' : return - 512 elif fmt == '212' : return - 2048 elif fmt == '16' : return - 32768 elif fmt == '61' : return - 32768 elif fmt == '160' : return - 32...
Return the wfdb digital value used to store nan for the format type .
27,358
def est_res ( signals ) : res_levels = np . power ( 2 , np . arange ( 0 , 33 ) ) if isinstance ( signals , list ) : n_sig = len ( signals ) else : if signals . ndim == 1 : n_sig = 1 else : n_sig = signals . shape [ 1 ] res = [ ] for ch in range ( n_sig ) : if isinstance ( signals , list ) : sorted_sig = np . sort ( np ...
Estimate the resolution of each signal in a multi - channel signal in bits . Maximum of 32 bits .
27,359
def _np_dtype ( bit_res , discrete ) : bit_res = min ( bit_res , 64 ) for np_res in [ 8 , 16 , 32 , 64 ] : if bit_res <= np_res : break if discrete is True : return 'int' + str ( np_res ) else : return 'float' + str ( max ( np_res , 16 ) )
Given the bit resolution of a signal return the minimum numpy dtype used to store it .
27,360
def _infer_sig_len ( file_name , fmt , n_sig , dir_name , pb_dir = None ) : if pb_dir is None : file_size = os . path . getsize ( os . path . join ( dir_name , file_name ) ) else : file_size = download . _remote_file_size ( file_name = file_name , pb_dir = pb_dir ) sig_len = int ( file_size / ( BYTES_PER_SAMPLE [ fmt ]...
Infer the length of a signal from a dat file .
27,361
def adc ( self , expanded = False , inplace = False ) : d_nans = _digi_nan ( self . fmt ) intdtype = 'int64' if inplace : if expanded : for ch in range ( self . n_sig ) : ch_nanlocs = np . isnan ( self . e_p_signal [ ch ] ) np . multiply ( self . e_p_signal [ ch ] , self . adc_gain [ ch ] , self . e_p_signal [ ch ] ) n...
Performs analogue to digital conversion of the physical signal stored in p_signal if expanded is False or e_p_signal if expanded is True .
27,362
def dac ( self , expanded = False , return_res = 64 , inplace = False ) : d_nans = _digi_nan ( self . fmt ) if return_res == 64 : floatdtype = 'float64' elif return_res == 32 : floatdtype = 'float32' else : floatdtype = 'float16' if inplace : if expanded : for ch in range ( self . n_sig ) : ch_nanlocs = self . e_d_sign...
Performs the digital to analogue conversion of the signal stored in d_signal if expanded is False or e_d_signal if expanded is True .
27,363
def calc_adc_params ( self ) : adc_gains = [ ] baselines = [ ] if np . where ( np . isinf ( self . p_signal ) ) [ 0 ] . size : raise ValueError ( 'Signal contains inf. Cannot perform adc.' ) minvals = np . nanmin ( self . p_signal , axis = 0 ) maxvals = np . nanmax ( self . p_signal , axis = 0 ) for ch in range ( np . ...
Compute appropriate adc_gain and baseline parameters for adc conversion given the physical signal and the fmts .
27,364
def wr_dat_files ( self , expanded = False , write_dir = '' ) : file_names , dat_channels = describe_list_indices ( self . file_name ) DAT_FMTS = { } dat_offsets = { } for fn in file_names : DAT_FMTS [ fn ] = self . fmt [ dat_channels [ fn ] [ 0 ] ] if self . byte_offset is None : dat_offsets [ fn ] = 0 else : dat_offs...
Write each of the specified dat files
27,365
def wfdb_strptime ( time_string ) : n_colons = time_string . count ( ':' ) if n_colons == 0 : time_fmt = '%S' elif n_colons == 1 : time_fmt = '%M:%S' elif n_colons == 2 : time_fmt = '%H:%M:%S' if '.' in time_string : time_fmt += '.%f' return datetime . datetime . strptime ( time_string , time_fmt ) . time ( )
Given a time string in an acceptable wfdb format return a datetime . time object .
27,366
def _read_header_lines ( base_record_name , dir_name , pb_dir ) : file_name = base_record_name + '.hea' if pb_dir is None : with open ( os . path . join ( dir_name , file_name ) , 'r' ) as fp : header_lines = [ ] comment_lines = [ ] for line in fp : line = line . strip ( ) if line . startswith ( '#' ) : comment_lines ....
Read the lines in a local or remote header file .
27,367
def _parse_record_line ( record_line ) : record_fields = { } ( record_fields [ 'record_name' ] , record_fields [ 'n_seg' ] , record_fields [ 'n_sig' ] , record_fields [ 'fs' ] , record_fields [ 'counter_freq' ] , record_fields [ 'base_counter' ] , record_fields [ 'sig_len' ] , record_fields [ 'base_time' ] , record_fie...
Extract fields from a record line string into a dictionary
27,368
def _parse_signal_lines ( signal_lines ) : n_sig = len ( signal_lines ) signal_fields = { } for field in SIGNAL_SPECS . index : signal_fields [ field ] = n_sig * [ None ] for ch in range ( n_sig ) : ( signal_fields [ 'file_name' ] [ ch ] , signal_fields [ 'fmt' ] [ ch ] , signal_fields [ 'samps_per_frame' ] [ ch ] , si...
Extract fields from a list of signal line strings into a dictionary .
27,369
def _read_segment_lines ( segment_lines ) : segment_fields = { } for field in SEGMENT_SPECS . index : segment_fields [ field ] = [ None ] * len ( segment_lines ) for i in range ( len ( segment_lines ) ) : ( segment_fields [ 'seg_name' ] [ i ] , segment_fields [ 'seg_len' ] [ i ] ) = _rx_segment . findall ( segment_line...
Extract fields from segment line strings into a dictionary
27,370
def get_write_subset ( self , spec_type ) : if spec_type == 'record' : write_fields = [ ] record_specs = RECORD_SPECS . copy ( ) if not hasattr ( self , 'n_seg' ) : record_specs . drop ( 'n_seg' , inplace = True ) for field in record_specs . index [ - 1 : : - 1 ] : if field in write_fields : continue if ( record_specs ...
Get a set of fields used to write the header ; either record or signal specification fields . Helper function for get_write_fields . Gets the default required fields the user defined fields and their dependencies .
27,371
def set_defaults ( self ) : rfields , sfields = self . get_write_fields ( ) for f in rfields : self . set_default ( f ) for f in sfields : self . set_default ( f )
Set defaults for fields needed to write the header if they have defaults .
27,372
def get_write_fields ( self ) : rec_write_fields = self . get_write_subset ( 'record' ) if self . comments != None : rec_write_fields . append ( 'comments' ) self . check_field ( 'n_sig' ) if self . n_sig > 0 : sig_write_fields = self . get_write_subset ( 'signal' ) else : sig_write_fields = None return rec_write_field...
Get the list of fields used to write the header separating record and signal specification fields . Returns the default required fields the user defined fields and their dependencies .
27,373
def set_default ( self , field ) : if field in RECORD_SPECS . index : if RECORD_SPECS . loc [ field , 'write_default' ] is None or getattr ( self , field ) is not None : return setattr ( self , field , RECORD_SPECS . loc [ field , 'write_default' ] ) elif field in SIGNAL_SPECS . index : if field == 'file_name' and self...
Set the object s attribute to its default value if it is missing and there is a default .
27,374
def check_field_cohesion ( self , rec_write_fields , sig_write_fields ) : if self . n_sig > 0 : for f in sig_write_fields : if len ( getattr ( self , f ) ) != self . n_sig : raise ValueError ( 'The length of field: ' + f + ' must match field n_sig.' ) datfmts = { } for ch in range ( self . n_sig ) : if self . file_name...
Check the cohesion of fields used to write the header
27,375
def wr_header_file ( self , rec_write_fields , sig_write_fields , write_dir ) : record_line = '' for field in RECORD_SPECS . index : if field in rec_write_fields : string_field = str ( getattr ( self , field ) ) if field == 'fs' and isinstance ( self . fs , float ) : if round ( self . fs , 8 ) == float ( int ( self . f...
Write a header file using the specified fields . Converts Record attributes into appropriate wfdb format strings .
27,376
def get_write_fields ( self ) : write_fields = self . get_write_subset ( 'record' ) write_fields = write_fields + [ 'seg_name' , 'seg_len' ] if self . comments != None : write_fields . append ( 'comments' ) return write_fields
Get the list of fields used to write the multi - segment header . Returns the default required fields the user defined fields and their dependencies .
27,377
def wr_header_file ( self , write_fields , write_dir ) : record_line = '' for field in RECORD_SPECS . index : if field in write_fields : record_line += RECORD_SPECS . loc [ field , 'delimiter' ] + str ( getattr ( self , field ) ) header_lines = [ record_line ] segment_lines = self . n_seg * [ '' ] for field in SEGMENT_...
Write a header file using the specified fields
27,378
def resample_ann ( resampled_t , ann_sample ) : tmp = np . zeros ( len ( resampled_t ) , dtype = 'int16' ) j = 0 tprec = resampled_t [ j ] for i , v in enumerate ( ann_sample ) : while True : d = False if v < tprec : j -= 1 tprec = resampled_t [ j ] if j + 1 == len ( resampled_t ) : tmp [ j ] += 1 break tnow = resample...
Compute the new annotation indices
27,379
def resample_sig ( x , fs , fs_target ) : t = np . arange ( x . shape [ 0 ] ) . astype ( 'float64' ) if fs == fs_target : return x , t new_length = int ( x . shape [ 0 ] * fs_target / fs ) resampled_x , resampled_t = signal . resample ( x , num = new_length , t = t ) assert resampled_x . shape == resampled_t . shape an...
Resample a signal to a different frequency .
27,380
def resample_singlechan ( x , ann , fs , fs_target ) : resampled_x , resampled_t = resample_sig ( x , fs , fs_target ) new_sample = resample_ann ( resampled_t , ann . sample ) assert ann . sample . shape == new_sample . shape resampled_ann = Annotation ( record_name = ann . record_name , extension = ann . extension , s...
Resample a single - channel signal with its annotations
27,381
def resample_multichan ( xs , ann , fs , fs_target , resamp_ann_chan = 0 ) : assert resamp_ann_chan < xs . shape [ 1 ] lx = [ ] lt = None for chan in range ( xs . shape [ 1 ] ) : resampled_x , resampled_t = resample_sig ( xs [ : , chan ] , fs , fs_target ) lx . append ( resampled_x ) if chan == resamp_ann_chan : lt = r...
Resample multiple channels with their annotations
27,382
def normalize_bound ( sig , lb = 0 , ub = 1 ) : mid = ub - ( ub - lb ) / 2 min_v = np . min ( sig ) max_v = np . max ( sig ) mid_v = max_v - ( max_v - min_v ) / 2 coef = ( ub - lb ) / ( max_v - min_v ) return sig * coef - ( mid_v * coef ) + mid
Normalize a signal between the lower and upper bound
27,383
def smooth ( sig , window_size ) : box = np . ones ( window_size ) / window_size return np . convolve ( sig , box , mode = 'same' )
Apply a uniform moving average filter to a signal
27,384
def get_filter_gain ( b , a , f_gain , fs ) : w , h = signal . freqz ( b , a ) w_gain = f_gain * 2 * np . pi / fs ind = np . where ( w >= w_gain ) [ 0 ] [ 0 ] gain = abs ( h [ ind ] ) return gain
Given filter coefficients return the gain at a particular frequency .
27,385
def _check_item_type ( item , field_name , allowed_types , expect_list = False , required_channels = 'all' ) : if expect_list : if not isinstance ( item , list ) : raise TypeError ( 'Field `%s` must be a list.' % field_name ) if required_channels == 'all' : required_channels = list ( range ( len ( item ) ) ) for ch in ...
Check the item s type against a set of allowed types . Vary the print message regarding whether the item can be None . Helper to BaseRecord . check_field .
27,386
def check_np_array ( item , field_name , ndim , parent_class , channel_num = None ) : if item . ndim != ndim : error_msg = 'Field `%s` must have ndim == %d' % ( field_name , ndim ) if channel_num is not None : error_msg = ( 'Channel %d of f' % channel_num ) + error_msg [ 1 : ] raise TypeError ( error_msg ) if not np . ...
Check a numpy array s shape and dtype against required specifications .
27,387
def rdheader ( record_name , pb_dir = None , rd_segments = False ) : dir_name , base_record_name = os . path . split ( record_name ) dir_name = os . path . abspath ( dir_name ) header_lines , comment_lines = _header . _read_header_lines ( base_record_name , dir_name , pb_dir ) record_fields = _header . _parse_record_li...
Read a WFDB header file and return a Record or MultiRecord object with the record descriptors as attributes .
27,388
def rdsamp ( record_name , sampfrom = 0 , sampto = None , channels = None , pb_dir = None , channel_names = None , warn_empty = False ) : record = rdrecord ( record_name = record_name , sampfrom = sampfrom , sampto = sampto , channels = channels , physical = True , pb_dir = pb_dir , m2s = True , channel_names = channel...
Read a WFDB record and return the physical signals and a few important descriptor fields .
27,389
def _get_wanted_channels ( wanted_sig_names , record_sig_names , pad = False ) : if pad : return [ record_sig_names . index ( s ) if s in record_sig_names else None for s in wanted_sig_names ] else : return [ record_sig_names . index ( s ) for s in wanted_sig_names if s in record_sig_names ]
Given some wanted signal names and the signal names contained in a record return the indices of the record channels that intersect .
27,390
def wrsamp ( record_name , fs , units , sig_name , p_signal = None , d_signal = None , fmt = None , adc_gain = None , baseline = None , comments = None , base_time = None , base_date = None , write_dir = '' ) : if p_signal is not None and d_signal is not None : raise Exception ( 'Must only give one of the inputs: p_sig...
Write a single segment WFDB record creating a WFDB header file and any associated dat files .
27,391
def is_monotonic ( full_list ) : prev_elements = set ( { full_list [ 0 ] } ) prev_item = full_list [ 0 ] for item in full_list : if item != prev_item : if item in prev_elements : return False prev_item = item prev_elements . add ( item ) return True
Determine whether elements in a list are monotonic . ie . unique elements are clustered together .
27,392
def _adjust_datetime ( self , sampfrom ) : if sampfrom : dt_seconds = sampfrom / self . fs if self . base_date and self . base_time : self . base_datetime = datetime . datetime . combine ( self . base_date , self . base_time ) self . base_datetime += datetime . timedelta ( seconds = dt_seconds ) self . base_date = self...
Adjust date and time fields to reflect user input if possible .
27,393
def wrsamp ( self , expanded = False , write_dir = '' ) : self . wrheader ( write_dir = write_dir ) if self . n_sig > 0 : self . wr_dats ( expanded = expanded , write_dir = write_dir )
Write a wfdb header file and any associated dat files from this object .
27,394
def wrsamp ( self , write_dir = '' ) : self . wrheader ( write_dir = write_dir ) for seg in self . segments : seg . wrsamp ( write_dir = write_dir )
Write a multi - segment header along with headers and dat files for all segments from this object .
27,395
def _check_segment_cohesion ( self ) : if self . n_seg != len ( self . segments ) : raise ValueError ( "Length of segments must match the 'n_seg' field" ) for i in range ( n_seg ) : s = self . segments [ i ] if i == 0 and self . seg_len [ 0 ] == 0 : for file_name in s . file_name : if file_name != '~' : raise ValueErro...
Check the cohesion of the segments field with other fields used to write the record
27,396
def _required_segments ( self , sampfrom , sampto ) : if self . layout == 'fixed' : startseg = 0 else : startseg = 1 cumsumlengths = list ( np . cumsum ( self . seg_len [ startseg : ] ) ) seg_numbers = [ [ sampfrom < cs for cs in cumsumlengths ] . index ( True ) ] if sampto == cumsumlengths [ len ( cumsumlengths ) - 1 ...
Determine the segments and the samples within each segment in a multi - segment record that lie within a sample range .
27,397
def _required_channels ( self , seg_numbers , channels , dir_name , pb_dir ) : if self . layout == 'fixed' : required_channels = [ channels ] * len ( seg_numbers ) else : required_channels = [ ] l_sig_names = self . segments [ 0 ] . sig_name w_sig_names = [ l_sig_names [ c ] for c in channels ] for i in range ( len ( s...
Get the channel numbers to be read from each specified segment given the channel numbers specified for the entire record .
27,398
def rdtff ( file_name , cut_end = False ) : file_size = os . path . getsize ( file_name ) with open ( file_name , 'rb' ) as fp : fields , file_fields = _rdheader ( fp ) signal , markers , triggers = _rdsignal ( fp , file_size = file_size , header_size = file_fields [ 'header_size' ] , n_sig = file_fields [ 'n_sig' ] , ...
Read values from a tff file
27,399
def _rdheader ( fp ) : tag = None while tag != 2 : tag = struct . unpack ( '>H' , fp . read ( 2 ) ) [ 0 ] data_size = struct . unpack ( '>H' , fp . read ( 2 ) ) [ 0 ] pad_len = ( 4 - ( data_size % 4 ) ) % 4 pos = fp . tell ( ) if tag == 1001 : storage_method = fs = struct . unpack ( 'B' , fp . read ( 1 ) ) [ 0 ] storag...
Read header info of the windaq file