func_code_string
stringlengths
52
1.94M
func_documentation_string
stringlengths
1
47.2k
def network_from_edgelist(self, edgelist): teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist') if len(edgelist[0]) == 4: colnames = ['i', 'j', 't', 'weight'] elif len(edgelist[0]) == 3: colnames = ['i', 'j', 't'] self.network = pd.DataFrame(edg...
Defines a network from an array. Parameters ---------- edgelist : list of lists. A list of lists which are 3 or 4 in length. For binary networks each sublist should be [i, j ,t] where i and j are node indicies and t is the temporal index. For weighted networks each subli...
def _drop_duplicate_ij(self): self.network['ij'] = list(map(lambda x: tuple(sorted(x)), list( zip(*[self.network['i'].values, self.network['j'].values])))) self.network.drop_duplicates(['ij', 't'], inplace=True) self.network.reset_index(inplace=True, drop=True) self....
Drops duplicate entries from the network dataframe.
def _drop_diagonal(self): self.network = self.network.where( self.network['i'] != self.network['j']).dropna() self.network.reset_index(inplace=True, drop=True)
Drops self-contacts from the network dataframe.
def add_edge(self, edgelist): if not isinstance(edgelist[0], list): edgelist = [edgelist] teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist') if len(edgelist[0]) == 4: colnames = ['i', 'j', 't', 'weight'] elif len(edgelist[0]) == 3: ...
Adds an edge from network. Parameters ---------- edgelist : list a list (or list of lists) containing the i,j and t indicies to be added. For weighted networks list should also contain a 'weight' key. Returns -------- Updates TenetoBIDS.network datafram...
def drop_edge(self, edgelist): if not isinstance(edgelist[0], list): edgelist = [edgelist] teneto.utils.check_TemporalNetwork_input(edgelist, 'edgelist') if self.hdf5: with pd.HDFStore(self.network) as hdf: for e in edgelist: h...
Removes an edge from network. Parameters ---------- edgelist : list a list (or list of lists) containing the i,j and t indicies to be removes. Returns -------- Updates TenetoBIDS.network dataframe
def calc_networkmeasure(self, networkmeasure, **measureparams): availablemeasures = [f for f in dir( teneto.networkmeasures) if not f.startswith('__')] if networkmeasure not in availablemeasures: raise ValueError( 'Unknown network measure. Available netwo...
Calculate network measure. Parameters ----------- networkmeasure : str Function to call. Functions available are in teneto.networkmeasures measureparams : kwargs kwargs for teneto.networkmeasure.[networkmeasure]
def generatenetwork(self, networktype, **networkparams): availabletypes = [f for f in dir( teneto.generatenetwork) if not f.startswith('__')] if networktype not in availabletypes: raise ValueError( 'Unknown network measure. Available networks to generate ...
Generate a network Parameters ----------- networktype : str Function to call. Functions available are in teneto.generatenetwork measureparams : kwargs kwargs for teneto.generatenetwork.[networktype] Returns -------- TenetoBIDS.network is...
def save_aspickle(self, fname): if fname[-4:] != '.pkl': fname += '.pkl' with open(fname, 'wb') as f: pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
Saves object as pickle. fname : str file path.
def postpro_fisher(data, report=None): if not report: report = {} # Due to rounding errors data[data < -0.99999999999999] = -1 data[data > 0.99999999999999] = 1 fisher_data = 0.5 * np.log((1 + data) / (1 - data)) report['fisher'] = {} report['fisher']['performed'] = 'yes' #r...
Performs fisher transform on everything in data. If report variable is passed, this is added to the report.
def postpro_boxcox(data, report=None): if not report: report = {} # Note the min value of all time series will now be at least 1. mindata = 1 - np.nanmin(data) data = data + mindata ind = np.triu_indices(data.shape[0], k=1) boxcox_list = np.array([sp.stats.boxcox(np.squeeze( ...
Performs box cox transform on everything in data. If report variable is passed, this is added to the report.
def postpro_standardize(data, report=None): if not report: report = {} # First make dim 1 = time. data = np.transpose(data, [2, 0, 1]) standardized_data = (data - data.mean(axis=0)) / data.std(axis=0) standardized_data = np.transpose(standardized_data, [1, 2, 0]) report['standardize...
Standardizes everything in data (along axis -1). If report variable is passed, this is added to the report.
def derive_temporalnetwork(data, params): report = {} if 'dimord' not in params.keys(): params['dimord'] = 'node,time' if 'report' not in params.keys(): params['report'] = False if 'analysis_id' not in params.keys(): params['analysis_id'] = '' if 'postpro' not in params....
Derives connectivity from the data. A lot of data is inherently built with edges (e.g. communication between two individuals). However other networks are derived from the covariance of time series (e.g. brain networks between two regions). Covariance based metrics deriving time-resolved networks can ...
def _weightfun_jackknife(T, report): weights = np.ones([T, T]) np.fill_diagonal(weights, 0) report['method'] = 'jackknife' report['jackknife'] = '' return weights, report
Creates the weights for the jackknife method. See func: teneto.derive.derive.
def _weightfun_sliding_window(T, params, report): weightat0 = np.zeros(T) weightat0[0:params['windowsize']] = np.ones(params['windowsize']) weights = np.array([np.roll(weightat0, i) for i in range(0, T + 1 - params['windowsize'])]) report['method'] = 'slidingwindow' repo...
Creates the weights for the sliding window method. See func: teneto.derive.derive.
def _weightfun_tapered_sliding_window(T, params, report): x = np.arange(-(params['windowsize'] - 1) / 2, (params['windowsize']) / 2) distribution_parameters = ','.join(map(str, params['distribution_params'])) taper = eval('sps.' + params['distribution'] + '.pdf(x,' + distri...
Creates the weights for the tapered method. See func: teneto.derive.derive.
def _weightfun_spatial_distance(data, params, report): distance = getDistanceFunction(params['distance']) weights = np.array([distance(data[n, :], data[t, :]) for n in np.arange( 0, data.shape[0]) for t in np.arange(0, data.shape[0])]) weights = np.reshape(weights, [data.shape[0], data.shape[0]...
Creates the weights for the spatial distance method. See func: teneto.derive.derive.
def _temporal_derivative(data, params, report): # Data should be timexnode report = {} # Derivative tdat = data[1:, :] - data[:-1, :] # Normalize tdat = tdat / np.std(tdat, axis=0) # Coupling coupling = np.array([tdat[:, i] * tdat[:, j] for i in np.arange(0, ...
Performs mtd method. See func: teneto.derive.derive.
def binarize_percent(netin, level, sign='pos', axis='time'): netin, netinfo = process_input(netin, ['C', 'G', 'TO']) # Set diagonal to 0 netin = set_diagonal(netin, 0) if axis == 'graphlet' and netinfo['nettype'][-1] == 'u': triu = np.triu_indices(netinfo['netshape'][0], k=1) netin ...
Binarizes a network proprtionally. When axis='time' (only one available at the moment) then the top values for each edge time series are considered. Parameters ---------- netin : array or dict network (graphlet or contact representation), level : float Percent to keep (expressed as dec...
def binarize_rdp(netin, level, sign='pos', axis='time'): netin, netinfo = process_input(netin, ['C', 'G', 'TO']) trajectory = rdp(netin, level) contacts = [] # Use the trajectory points as threshold for n in range(trajectory['index'].shape[0]): if sign == 'pos': sel = trajec...
Binarizes a network based on RDP compression. Parameters ---------- netin : array or dict Network (graphlet or contact representation), level : float Delta parameter which is the tolorated error in RDP compression. sign : str, default='pos' States the sign of the thresholdi...
def binarize(netin, threshold_type, threshold_level, sign='pos', axis='time'): if threshold_type == 'percent': netout = binarize_percent(netin, threshold_level, sign, axis) elif threshold_type == 'magnitude': netout = binarize_magnitude(netin, threshold_level, sign) elif threshold_type ...
Binarizes a network, returning the network. General wrapper function for different binarization functions. Parameters ---------- netin : array or dict Network (graphlet or contact representation), threshold_type : str What type of thresholds to make binarization. Options: 'rdp', 'perce...
def process_input(netIn, allowedformats, outputformat='G'): inputtype = checkInput(netIn) # Convert TN to G representation if inputtype == 'TN' and 'TN' in allowedformats and outputformat != 'TN': G = netIn.df_to_array() netInfo = {'nettype': netIn.nettype, 'netshape': netIn.netshape} ...
Takes input network and checks what the input is. Parameters ---------- netIn : array, dict, or TemporalNetwork Network (graphlet, contact or object) allowedformats : str Which format of network objects that are allowed. Options: 'C', 'TN', 'G'. outputformat: str, default=G ...
def clean_community_indexes(communityID): communityID = np.array(communityID) cid_shape = communityID.shape if len(cid_shape) > 1: communityID = communityID.flatten() new_communityID = np.zeros(len(communityID)) for i, n in enumerate(np.unique(communityID)): new_communityID[comm...
Takes input of community assignments. Returns reindexed community assignment by using smallest numbers possible. Parameters ---------- communityID : array-like list or array of integers. Output from community detection algorithems. Returns ------- new_communityID : array clea...
def multiple_contacts_get_values(C): d = collections.OrderedDict() for c in C['contacts']: ct = tuple(c) if ct in d: d[ct] += 1 else: d[ct] = 1 new_contacts = [] new_values = [] for (key, value) in d.items(): new_values.append(value) ...
Given an contact representation with repeated contacts, this function removes duplicates and creates a value Parameters ---------- C : dict contact representation with multiple repeated contacts. Returns ------- :C_out: dict Contact representation with duplicate contacts re...
def df_to_array(df, netshape, nettype): if len(df) > 0: idx = np.array(list(map(list, df.values))) G = np.zeros([netshape[0], netshape[0], netshape[1]]) if idx.shape[1] == 3: if nettype[-1] == 'u': idx = np.vstack([idx, idx[:, [1, 0, 2]]]) idx = i...
Returns a numpy array (snapshot representation) from thedataframe contact list Parameters: df : pandas df pandas df with columns, i,j,t. netshape : tuple network shape, format: (node, time) nettype : str 'wu', 'wd', 'bu', 'bd' Returns: -------- ...
def check_distance_funciton_input(distance_func_name, netinfo): if distance_func_name == 'default' and netinfo['nettype'][0] == 'b': print('Default distance funciton specified. As network is binary, using Hamming') distance_func_name = 'hamming' elif distance_func_name == 'default' and neti...
Funciton checks distance_func_name, if it is specified as 'default'. Then given the type of the network selects a default distance function. Parameters ---------- distance_func_name : str distance function name. netinfo : dict the output of utils.process_input Returns -------...
def load_parcellation_coords(parcellation_name): path = tenetopath[0] + '/data/parcellation/' + parcellation_name + '.csv' parc = np.loadtxt(path, skiprows=1, delimiter=',', usecols=[1, 2, 3]) return parc
Loads coordinates of included parcellations. Parameters ---------- parcellation_name : str options: 'gordon2014_333', 'power2012_264', 'shen2013_278'. Returns ------- parc : array parcellation cordinates
def make_parcellation(data_path, parcellation, parc_type=None, parc_params=None): if isinstance(parcellation, str): parcin = '' if '+' in parcellation: parcin = parcellation parcellation = parcellation.split('+')[0] if '+OH' in parcin: subcortical = T...
Performs a parcellation which reduces voxel space to regions of interest (brain data). Parameters ---------- data_path : str Path to .nii image. parcellation : str Specify which parcellation that you would like to use. For MNI: 'gordon2014_333', 'power2012_264', For TAL: 'shen2013_278'...
def create_traj_ranges(start, stop, N): steps = (1.0/(N-1)) * (stop - start) if np.isscalar(steps): return steps*np.arange(N) + start else: return steps[:, None]*np.arange(N) + start[:, None]
Fills in the trajectory range. # Adapted from https://stackoverflow.com/a/40624614
def get_dimord(measure, calc=None, community=None): if not calc: calc = '' else: calc = '_' + calc if not community: community = '' else: community = 'community' if 'community' in calc and 'community' in community: community = '' if calc == 'community...
Get the dimension order of a network measure. Parameters ---------- measure : str Name of funciton in teneto.networkmeasures. calc : str, default=None Calc parameter for the function community : bool, default=None If not null, then community property is assumed to be believ...
def get_network_when(tnet, i=None, j=None, t=None, ij=None, logic='and', copy=False, asarray=False): if isinstance(tnet, pd.DataFrame): network = tnet hdf5 = False # Can add hdfstore elif isinstance(tnet, object): network = tnet.network hdf5 = tnet.hdf5 if ij is not ...
Returns subset of dataframe that matches index Parameters ---------- tnet : df or TemporalNetwork TemporalNetwork object or pandas dataframe edgelist i : list or int get nodes in column i (source nodes in directed networks) j : list or int get nodes in column j (target nodes...
def create_supraadjacency_matrix(tnet, intersliceweight=1): newnetwork = tnet.network.copy() newnetwork['i'] = (tnet.network['i']) + \ ((tnet.netshape[0]) * (tnet.network['t'])) newnetwork['j'] = (tnet.network['j']) + \ ((tnet.netshape[0]) * (tnet.network['t'])) if 'weight' not in n...
Returns a supraadjacency matrix from a temporal network structure Parameters -------- tnet : TemporalNetwork Temporal network (any network type) intersliceweight : int Weight that links the same node from adjacent time-points Returns -------- supranet : dataframe Su...
def tnet_to_nx(df, t=None): if t is not None: df = get_network_when(df, t=t) if 'weight' in df.columns: nxobj = nx.from_pandas_edgelist( df, source='i', target='j', edge_attr='weight') else: nxobj = nx.from_pandas_edgelist(df, source='i', target='j') return nxobj
Creates undirected networkx object
def temporal_louvain(tnet, resolution=1, intersliceweight=1, n_iter=100, negativeedge='ignore', randomseed=None, consensus_threshold=0.5, temporal_consensus=True, njobs=1): r tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN') # Divide resolution by the number of timepoints resolution = resolution / tnet...
r""" Louvain clustering for a temporal network. Parameters ----------- tnet : array, dict, TemporalNetwork Input network resolution : int resolution of Louvain clustering ($\gamma$) intersliceweight : int interslice weight of multilayer clustering ($\omega$). Must be pos...
def make_consensus_matrix(com_membership, th=0.5): r com_membership = np.array(com_membership) D = [] for i in range(com_membership.shape[0]): for j in range(i+1, com_membership.shape[0]): con = np.sum((com_membership[i, :] - com_membership[j, :]) == 0, axis=...
r""" Makes the consensus matrix . Parameters ---------- com_membership : array Shape should be node, time, iteration. th : float threshold to cancel noisey edges Returns ------- D : array consensus matrix
def make_temporal_consensus(com_membership): r com_membership = np.array(com_membership) # make first indicies be between 0 and 1. com_membership[:, 0] = clean_community_indexes(com_membership[:, 0]) # loop over all timepoints, get jacccard distance in greedy manner for largest community to time per...
r""" Matches community labels accross time-points Jaccard matching is in a greedy fashiong. Matching the largest community at t with the community at t-1. Parameters ---------- com_membership : array Shape should be node, time. Returns ------- D : array temporal cons...
def flexibility(communities): # Preallocate flex = np.zeros(communities.shape[0]) # Go from the second time point to last, compare with time-point before for t in range(1, communities.shape[1]): flex[communities[:, t] != communities[:, t-1]] += 1 # Normalize flex = flex / (communiti...
Amount a node changes community Parameters ---------- communities : array Community array of shape (node,time) Returns -------- flex : array Size with the flexibility of each node. Notes ----- Flexbility calculates the number of times a node switches its community ...
def slice_plot(netin, ax, nodelabels=None, timelabels=None, communities=None, plotedgeweights=False, edgeweightscalar=1, timeunit='', linestyle='k-', cmap=None, nodesize=100, nodekwargs=None, edgekwargs=None): r # Get input type (C or G) inputType = checkInput(netin) # Convert C representation to G ...
r''' Fuction draws "slice graph" and exports axis handles Parameters ---------- netin : array, dict temporal network input (graphlet or contact) ax : matplotlib figure handles. nodelabels : list nodes labels. List of strings. timelabels : list labels of dimension ...
def local_variation(data): r ict = 0 # are ict present if isinstance(data, dict): # This could be done better if [k for k in list(data.keys()) if k == 'intercontacttimes'] == ['intercontacttimes']: ict = 1 # if shortest paths are not calculated, calculate them if ict == ...
r""" Calculates the local variaiont of inter-contact times. [LV-1]_, [LV-2]_ Parameters ---------- data : array, dict This is either (1) temporal network input (graphlet or contact) with nettype: 'bu', 'bd'. (2) dictionary of ICTs (output of *intercontacttimes*). Returns ------- ...
def drop_bids_suffix(fname): if '/' in fname: split = fname.split('/') dirnames = '/'.join(split[:-1]) + '/' fname = split[-1] else: dirnames = '' tags = [tag for tag in fname.split('_') if '-' in tag] fname_head = '_'.join(tags) fileformat = '.' + '.'.join(fname...
Given a filename sub-01_run-01_preproc.nii.gz, it will return ['sub-01_run-01', '.nii.gz'] Parameters ---------- fname : str BIDS filename with suffice. Directories should not be included. Returns ------- fname_head : str BIDS filename with fileformat : str The fil...
def load_tabular_file(fname, return_meta=False, header=True, index_col=True): if index_col: index_col = 0 else: index_col = None if header: header = 0 else: header = None df = pd.read_csv(fname, header=header, index_col=index_col, sep='\t') if return_meta: ...
Given a file name loads as a pandas data frame Parameters ---------- fname : str file name and path. Must be tsv. return_meta : header : bool (default True) if there is a header in the tsv file, true will use first row in file. index_col : bool (default None) if there i...
def get_sidecar(fname, allowedfileformats='default'): if allowedfileformats == 'default': allowedfileformats = ['.tsv', '.nii.gz'] for f in allowedfileformats: fname = fname.split(f)[0] fname += '.json' if os.path.exists(fname): with open(fname) as fs: sidecar = ...
Loads sidecar or creates one
def process_exclusion_criteria(exclusion_criteria): relfun = [] threshold = [] for ec in exclusion_criteria: if ec[0:2] == '>=': relfun.append(np.greater_equal) threshold.append(float(ec[2:])) elif ec[0:2] == '<=': relfun.append(np.less_equal) ...
Parses an exclusion critera string to get the function and threshold. Parameters ---------- exclusion_criteria : list list of strings where each string is of the format [relation][threshold]. E.g. \'<0.5\' or \'>=1\' Returns ------- relfun : list list of numpy f...
def reachability_latency(tnet=None, paths=None, rratio=1, calc='global'): if tnet is not None and paths is not None: raise ValueError('Only network or path input allowed.') if tnet is None and paths is None: raise ValueError('No input.') # if shortest paths are not calculated, calculate...
Reachability latency. This is the r-th longest temporal path. Parameters --------- data : array or dict Can either be a network (graphlet or contact), binary unidrected only. Alternative can be a paths dictionary (output of teneto.networkmeasure.shortest_temporal_path) rratio: float (default...
def fluctuability(netin, calc='global'): r # Get input type (C or G) netin, _ = process_input(netin, ['C', 'G', 'TN']) netin[netin != 0] = 1 unique_edges = np.sum(netin, axis=2) unique_edges[unique_edges > 0] = 1 unique_edges[unique_edges == 0] = 0 fluct = (np.sum(unique_edges)) / np.sum...
r""" Fluctuability of temporal networks. This is the variation of the network's edges over time. [fluct-1]_ This is the unique number of edges through time divided by the overall number of edges. Parameters ---------- netin : array or dict Temporal network input (graphlet or contact) (net...
def topological_overlap(tnet, calc='time'): r tnet = process_input(tnet, ['C', 'G', 'TN'])[0] numerator = np.sum(tnet[:, :, :-1] * tnet[:, :, 1:], axis=1) denominator = np.sqrt( np.sum(tnet[:, :, :-1], axis=1) * np.sum(tnet[:, :, 1:], axis=1)) topo_overlap = numerator / denominator topo_...
r""" Topological overlap quantifies the persistency of edges through time. If two consequtive time-points have similar edges, this becomes high (max 1). If there is high change, this becomes 0. References: [topo-1]_, [topo-2]_ Parameters ---------- tnet : array, dict graphlet or contact se...
def recruitment(temporalcommunities, staticcommunities): # make sure the static and temporal communities have the same number of nodes if staticcommunities.shape[0] != temporalcommunities.shape[0]: raise ValueError( 'Temporal and static communities have different dimensions') all...
Calculates recruitment coefficient for each node. Recruitment coefficient is the average probability of nodes from the same static communities being in the same temporal communities at other time-points or during different tasks. Parameters: ------------ temporalcommunities : array temporal ...
def circle_plot(netIn, ax, nodelabels=None, linestyle='k-', nodesize=1000, cmap='Set2'): r # Get input type (C or G) inputType = checkInput(netIn, conMat=1) if nodelabels is None: nodelabels = [] # Convert C representation to G if inputType == 'M': shape = np.shape(netIn) ...
r''' Function draws "circle plot" and exports axis handles Parameters ------------- netIn : temporal network input (graphlet or contact) ax : matplotlib ax handles. nodelabels : list nodes labels. List of strings linestyle : str line style nodesize : int size of...
def integration(temporalcommunities, staticcommunities): # make sure the static and temporal communities have the same number of nodes if staticcommunities.shape[0] != temporalcommunities.shape[0]: raise ValueError( 'Temporal and static communities have different dimensions') alleg ...
Calculates the integration coefficient for each node. Measures the average probability that a node is in the same community as nodes from other systems. Parameters: ------------ temporalcommunities : array temporal communities vector (node,time) staticcommunities : array ...
def intercontacttimes(tnet): # Process input tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN') if tnet.nettype[0] == 'w': print('WARNING: assuming connections to be binary when computing intercontacttimes') # Each time series is padded with a 0 at the start and end. Then t[0:-1]-[t:]. ...
Calculates the intercontacttimes of each edge in a network. Parameters ----------- tnet : array, dict Temporal network (craphlet or contact). Nettype: 'bu', 'bd' Returns --------- contacts : dict Intercontact times as numpy array in dictionary. contacts['intercontacttimes'] ...
def gen_report(report, sdir='./', report_name='report.html'): # Create report directory if not os.path.exists(sdir): os.makedirs(sdir) # Add a slash to file directory if not included to avoid DirNameFleName # instead of DirName/FileName being creaated if sdir[-1] != '/': sdir +=...
Generates report of derivation and postprocess steps in teneto.derive
def add_history(self, fname, fargs, init=0): if init == 1: self.history = [] self.history.append([fname, fargs])
Adds a processing step to TenetoBIDS.history.
def export_history(self, dirname): mods = [(m.__name__, m.__version__) for m in sys.modules.values() if m if hasattr(m, '__version__')] with open(dirname + '/requirements.txt', 'w') as f: for m in mods: m = list(m) if not isinstance(m[...
Exports TenetoBIDShistory.py, tenetoinfo.json, requirements.txt (modules currently imported) to dirname Parameters --------- dirname : str directory to export entire TenetoBIDS history.
def derive_temporalnetwork(self, params, update_pipeline=True, tag=None, njobs=1, confound_corr_report=True): if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) files = self.get_selected_files(quiet=1) confound_files = self.get_sele...
Derive time-varying connectivity on the selected files. Parameters ---------- params : dict. See teneto.timeseries.derive_temporalnetwork for the structure of the param dictionary. Assumes dimord is time,node (output of other TenetoBIDS funcitons) update_pipeline : bool ...
def _derive_temporalnetwork(self, f, i, tag, params, confounds_exist, confound_files): data = load_tabular_file(f, index_col=True, header=True) fs, _ = drop_bids_suffix(f) save_name, save_dir, _ = self._save_namepaths_bids_derivatives( fs, tag, 'tvc', 'tvcconn') if '...
Funciton called by TenetoBIDS.derive_temporalnetwork for concurrent processing.
def make_functional_connectivity(self, njobs=None, returngroup=False, file_hdr=None, file_idx=None): if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) files = self.get_selected_files(quiet=1) R_group = [] with ProcessPoolEx...
Makes connectivity matrix for each of the subjects. Parameters ---------- returngroup : bool, default=False If true, returns the group average connectivity matrix. njobs : int How many parallel jobs to run file_idx : bool Default False, true i...
def _save_namepaths_bids_derivatives(self, f, tag, save_directory, suffix=None): file_name = f.split('/')[-1].split('.')[0] if tag != '': tag = '_' + tag if suffix: file_name, _ = drop_bids_suffix(file_name) save_name = file_name + tag sav...
Creates output directory and output name Paramters --------- f : str input files, includes the file bids_suffix tag : str what should be added to f in the output file. save_directory : str additional directory that the output file should go in...
def get_tags(self, tag, quiet=1): if not self.pipeline: print('Please set pipeline first.') self.get_pipeline_alternatives(quiet) else: if tag == 'sub': datapath = self.BIDS_dir + '/derivatives/' + self.pipeline + '/' tag_alter...
Returns which tag alternatives can be identified in the BIDS derivatives structure.
def get_pipeline_alternatives(self, quiet=0): if not os.path.exists(self.BIDS_dir + '/derivatives/'): print('Derivative directory not found. Is the data preprocessed?') else: pipeline_alternatives = os.listdir(self.BIDS_dir + '/derivatives/') if quiet == 0: ...
The pipeline are the different outputs that are placed in the ./derivatives directory. get_pipeline_alternatives gets those which are found in the specified BIDS directory structure.
def get_pipeline_subdir_alternatives(self, quiet=0): if not self.pipeline: print('Please set pipeline first.') self.get_pipeline_alternatives() else: pipeline_subdir_alternatives = [] for s in self.bids_tags['sub']: derdir_files = ...
Note ----- This function currently returns the wrong folders and will be fixed in the future. This function should return ./derivatives/pipeline/sub-xx/[ses-yy/][func/]/pipeline_subdir But it does not care about ses-yy at the moment.
def get_selected_files(self, pipeline='pipeline', forfile=None, quiet=0, allowedfileformats='default'): # This could be mnade better file_dict = dict(self.bids_tags) if allowedfileformats == 'default': allowedfileformats = ['.tsv', '.nii.gz'] if forfile: ...
Parameters ---------- pipeline : string can be \'pipeline\' (main analysis pipeline, self in tnet.set_pipeline) or \'confound\' (where confound files are, set in tnet.set_confonud_pipeline()), \'functionalconnectivity\' quiet: int If 1, prints results. If 0, n...
def set_exclusion_file(self, confound, exclusion_criteria, confound_stat='mean'): self.add_history(inspect.stack()[0][3], locals(), 1) if isinstance(confound, str): confound = [confound] if isinstance(exclusion_criteria, str): exclusion_criteria = [exclusion_crit...
Excludes subjects given a certain exclusion criteria. Parameters ---------- confound : str or list string or list of confound name(s) from confound files exclusion_criteria : str or list for each confound, an exclusion_criteria should be expresse...
def set_exclusion_timepoint(self, confound, exclusion_criteria, replace_with, tol=1, overwrite=True, desc=None): self.add_history(inspect.stack()[0][3], locals(), 1) if isinstance(confound, str): confound = [confound] if isinstance(exclusion_criteria, str): exclu...
Excludes subjects given a certain exclusion criteria. Does not work on nifti files, only csv, numpy or tsc. Assumes data is node,time Parameters ---------- confound : str or list string or list of confound name(s) from confound files. Assumes data is node,time ex...
def make_parcellation(self, parcellation, parc_type=None, parc_params=None, network='defaults', update_pipeline=True, removeconfounds=False, tag=None, njobs=None, clean_params=None, yeonetworkn=None): if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(),...
Reduces the data from voxel to parcellation space. Files get saved in a teneto folder in the derivatives with a roi tag at the end. Parameters ----------- parcellation : str specify which parcellation that you would like to use. For MNI: 'power2012_264', 'gordon2014_333'. TAL: 'she...
def communitydetection(self, community_detection_params, community_type='temporal', tag=None, file_hdr=False, file_idx=False, njobs=None): if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) if not tag: tag = '' else: ...
Calls temporal_louvain_with_consensus on connectivity data Parameters ---------- community_detection_params : dict kwargs for detection. See teneto.communitydetection.louvain.temporal_louvain_with_consensus community_type : str Either 'temporal' or 'static'. If ...
def removeconfounds(self, confounds=None, clean_params=None, transpose=None, njobs=None, update_pipeline=True, overwrite=True, tag=None): if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) if not self.confounds and not confounds: ...
Removes specified confounds using nilearn.signal.clean Parameters ---------- confounds : list List of confounds. Can be prespecified in set_confounds clean_params : dict Dictionary of kawgs to pass to nilearn.signal.clean transpose : bool (default False) ...
def networkmeasures(self, measure=None, measure_params=None, tag=None, njobs=None): if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) # measure can be string or list if isinstance(measure, str): measure = [measure] ...
Calculates a network measure For available funcitons see: teneto.networkmeasures Parameters ---------- measure : str or list Mame of function(s) from teneto.networkmeasures that will be run. measure_params : dict or list of dctionaries) Containing kwar...
def set_confound_pipeline(self, confound_pipeline): self.add_history(inspect.stack()[0][3], locals(), 1) if not os.path.exists(self.BIDS_dir + '/derivatives/' + confound_pipeline): print('Specified direvative directory not found.') self.get_pipeline_alternatives() ...
There may be times when the pipeline is updated (e.g. teneto) but you want the confounds from the preprocessing pipieline (e.g. fmriprep). To do this, you set the confound_pipeline to be the preprocessing pipeline where the confound files are. Parameters ---------- confound_pipeline : ...
def set_bids_suffix(self, bids_suffix): self.add_history(inspect.stack()[0][3], locals(), 1) self.bids_suffix = bids_suffix
The last analysis step is the final tag that is present in files.
def set_pipeline(self, pipeline): self.add_history(inspect.stack()[0][3], locals(), 1) if not os.path.exists(self.BIDS_dir + '/derivatives/' + pipeline): print('Specified direvative directory not found.') self.get_pipeline_alternatives() else: # Todo:...
Specify the pipeline. See get_pipeline_alternatives to see what are avaialble. Input should be a string.
def print_dataset_summary(self): print('--- DATASET INFORMATION ---') print('--- Subjects ---') if self.raw_data_exists: if self.BIDS.get_subjects(): print('Number of subjects (in dataset): ' + str(len(self.BIDS.get_subjects()))) ...
Prints information about the the BIDS data and the files currently selected.
def load_frompickle(cls, fname, reload_object=False): if fname[-4:] != '.pkl': fname += '.pkl' with open(fname, 'rb') as f: tnet = pickle.load(f) if reload_object: reloadnet = teneto.TenetoBIDS(tnet.BIDS_dir, pipeline=tnet.pipeline, pipeline_subdir=tn...
Loaded saved instance of fname : str path to pickle object (output of TenetoBIDS.save_aspickle) reload_object : bool (default False) reloads object by calling teneto.TenetoBIDS (some information lost, for development) Returns ------- self : ...
def load_data(self, datatype='tvc', tag=None, measure=''): if datatype == 'temporalnetwork' and not measure: raise ValueError( 'When datatype is temporalnetwork, \'measure\' must also be specified.') self.add_history(inspect.stack()[0][3], locals(), 1) data_l...
Function loads time-varying connectivity estimates created by the TenetoBIDS.derive function. The default grabs all data (in numpy arrays) in the teneto/../func/tvc/ directory. Data is placed in teneto.tvc_data_ Parameters ---------- datatype : str \'tvc\', \'parcel...
def temporal_closeness_centrality(tnet=None, paths=None): if tnet is not None and paths is not None: raise ValueError('Only network or path input allowed.') if tnet is None and paths is None: raise ValueError('No input.') # if shortest paths are not calculated, calculate them if tne...
Returns temporal closeness centrality per node. Parameters ----------- Input should be *either* tnet or paths. data : array or dict Temporal network input (graphlet or contact). nettype: 'bu', 'bd'. paths : pandas dataframe Output of TenetoBIDS.networkmeasure.shortest_temporal_...
def sid(tnet, communities, axis=0, calc='global', decay=0): r tnet, netinfo = utils.process_input(tnet, ['C', 'G', 'TN']) D = temporal_degree_centrality( tnet, calc='time', communities=communities, decay=decay) # Check network output (order of communitiesworks) network_ids = np.unique(commun...
r""" Segregation integration difference (SID). An estimation of each community or global difference of within versus between community strength.[sid-1]_ Parameters ---------- tnet: array, dict Temporal network input (graphlet or contact). Allowerd nettype: 'bu', 'bd', 'wu', 'wd' communit...
def bursty_coeff(data, calc='edge', nodes='all', communities=None, threshold_type=None, threshold_level=None, threshold_params=None): r if threshold_type is not None: if threshold_params is None: threshold_params = {} data = binarize(data, threshold_type, thr...
r""" Calculates the bursty coefficient.[1][2] Parameters ---------- data : array, dict This is either (1) temporal network input (graphlet or contact) with nettype: 'bu', 'bd'. (2) dictionary of ICTs (output of *intercontacttimes*). A weighted network can be applied if you specify thre...
def flatten(d, reducer='tuple', inverse=False): if isinstance(reducer, str): reducer = REDUCER_DICT[reducer] flat_dict = {} def _flatten(d, parent=None): for key, value in six.viewitems(d): flat_key = reducer(parent, key) if isinstance(value, Mapping): ...
Flatten dict-like object. Parameters ---------- d: dict-like object The dict that will be flattened. reducer: {'tuple', 'path', function} (default: 'tuple') The key joining method. If a function is given, the function will be used to reduce. 'tuple': The resulting key wi...
def nested_set_dict(d, keys, value): assert keys key = keys[0] if len(keys) == 1: if key in d: raise ValueError("duplicated key '{}'".format(key)) d[key] = value return d = d.setdefault(key, {}) nested_set_dict(d, keys[1:], value)
Set a value to a sequence of nested keys Parameters ---------- d: Mapping keys: Sequence[str] value: Any
def unflatten(d, splitter='tuple', inverse=False): if isinstance(splitter, str): splitter = SPLITTER_DICT[splitter] unflattened_dict = {} for flat_key, value in six.viewitems(d): if inverse: flat_key, value = value, flat_key key_tuple = splitter(flat_key) nes...
Unflatten dict-like object. Parameters ---------- d: dict-like object The dict that will be unflattened. splitter: {'tuple', 'path', function} (default: 'tuple') The key splitting method. If a function is given, the function will be used to split. 'tuple': Use each eleme...
def plot_pianoroll(ax, pianoroll, is_drum=False, beat_resolution=None, downbeats=None, preset='default', cmap='Blues', xtick='auto', ytick='octave', xticklabel=True, yticklabel='auto', tick_loc=None, tick_direction='in', label='both', grid='bot...
Plot a pianoroll given as a numpy array. Parameters ---------- ax : matplotlib.axes.Axes object A :class:`matplotlib.axes.Axes` object where the pianoroll will be plotted on. pianoroll : np.ndarray A pianoroll to be plotted. The values should be in [0, 1] when data type ...
def plot_track(track, filename=None, beat_resolution=None, downbeats=None, preset='default', cmap='Blues', xtick='auto', ytick='octave', xticklabel=True, yticklabel='auto', tick_loc=None, tick_direction='in', label='both', grid='both', grid_linestyle=':', grid...
Plot the pianoroll or save a plot of the pianoroll. Parameters ---------- filename : The filename to which the plot is saved. If None, save nothing. beat_resolution : int The number of time steps used to represent a beat. Required and only effective when `xtick` is 'beat'. d...
def plot_multitrack(multitrack, filename=None, mode='separate', track_label='name', preset='default', cmaps=None, xtick='auto', ytick='octave', xticklabel=True, yticklabel='auto', tick_loc=None, tick_direction='in', label='both', grid='both...
Plot the pianorolls or save a plot of them. Parameters ---------- filename : str The filename to which the plot is saved. If None, save nothing. mode : {'separate', 'stacked', 'hybrid'} A string that indicate the plotting mode to use. Defaults to 'separate'. - In 'separ...
def save_animation(filename, pianoroll, window, hop=1, fps=None, is_drum=False, beat_resolution=None, downbeats=None, preset='default', cmap='Blues', xtick='auto', ytick='octave', xticklabel=True, yticklabel='auto', tick_loc=None, tick_direction='in', ...
Save a pianoroll to an animation in video or GIF format. Parameters ---------- filename : str The filename to which the animation is saved. pianoroll : np.ndarray A pianoroll to be plotted. The values should be in [0, 1] when data type is float, and in [0, 127] when data type is...
def append_track(self, track=None, pianoroll=None, program=0, is_drum=False, name='unknown'): if track is not None: if not isinstance(track, Track): raise TypeError("`track` must be a pypianoroll.Track instance.") track.check_validity() ...
Append a multitrack.Track instance to the track list or create a new multitrack.Track object and append it to the track list. Parameters ---------- track : pianoroll.Track A :class:`pypianoroll.Track` instance to be appended to the track list. pianoroll :...
def check_validity(self): # tracks for track in self.tracks: if not isinstance(track, Track): raise TypeError("`tracks` must be a list of " "`pypianoroll.Track` instances.") track.check_validity() # tempo if...
Raise an error if any invalid attribute found. Raises ------ TypeError If an attribute has an invalid type. ValueError If an attribute has an invalid value (of the correct type).
def clip(self, lower=0, upper=127): for track in self.tracks: track.clip(lower, upper)
Clip the pianorolls of all tracks by the given lower and upper bounds. Parameters ---------- lower : int or float The lower bound to clip the pianorolls. Defaults to 0. upper : int or float The upper bound to clip the pianorolls. Defaults to 127.
def get_active_length(self): active_length = 0 for track in self.tracks: now_length = track.get_active_length() if active_length < track.get_active_length(): active_length = now_length return active_length
Return the maximum active length (i.e., without trailing silence) among the pianorolls of all tracks. The unit is time step. Returns ------- active_length : int The maximum active length (i.e., without trailing silence) among the pianorolls of all tracks. The uni...
def get_active_pitch_range(self): lowest, highest = self.tracks[0].get_active_pitch_range() if len(self.tracks) > 1: for track in self.tracks[1:]: low, high = track.get_active_pitch_range() if low < lowest: lowest = low ...
Return the active pitch range of the pianorolls of all tracks as a tuple (lowest, highest). Returns ------- lowest : int The lowest active pitch among the pianorolls of all tracks. highest : int The lowest highest pitch among the pianorolls of all tracks.
def get_downbeat_steps(self): if self.downbeat is None: return [] downbeat_steps = np.nonzero(self.downbeat)[0].tolist() return downbeat_steps
Return the indices of time steps that contain downbeats. Returns ------- downbeat_steps : list The indices of time steps that contain downbeats.
def get_empty_tracks(self): empty_track_indices = [idx for idx, track in enumerate(self.tracks) if not np.any(track.pianoroll)] return empty_track_indices
Return the indices of tracks with empty pianorolls. Returns ------- empty_track_indices : list The indices of tracks with empty pianorolls.
def get_max_length(self): max_length = 0 for track in self.tracks: if max_length < track.pianoroll.shape[0]: max_length = track.pianoroll.shape[0] return max_length
Return the maximum length of the pianorolls along the time axis (in time step). Returns ------- max_length : int The maximum length of the pianorolls along the time axis (in time step).
def get_merged_pianoroll(self, mode='sum'): stacked = self.get_stacked_pianoroll() if mode == 'any': merged = np.any(stacked, axis=2) elif mode == 'sum': merged = np.sum(stacked, axis=2) elif mode == 'max': merged = np.max(stacked, axis=2) ...
Return the merged pianoroll. Parameters ---------- mode : {'sum', 'max', 'any'} A string that indicates the merging strategy to apply along the track axis. Default to 'sum'. - In 'sum' mode, the merged pianoroll is the sum of all the pianorolls...
def get_stacked_pianoroll(self): multitrack = deepcopy(self) multitrack.pad_to_same() stacked = np.stack([track.pianoroll for track in multitrack.tracks], -1) return stacked
Return a stacked multitrack pianoroll. The shape of the return array is (n_time_steps, 128, n_tracks). Returns ------- stacked : np.ndarray, shape=(n_time_steps, 128, n_tracks) The stacked pianoroll.
def load(self, filename): def reconstruct_sparse(target_dict, name): return csc_matrix((target_dict[name+'_csc_data'], target_dict[name+'_csc_indices'], target_dict[name+'_csc_indptr']), ...
Load a npz file. Supports only files previously saved by :meth:`pypianoroll.Multitrack.save`. Notes ----- Attribute values will all be overwritten. Parameters ---------- filename : str The name of the npz file to be loaded.
def merge_tracks(self, track_indices=None, mode='sum', program=0, is_drum=False, name='merged', remove_merged=False): if mode not in ('max', 'sum', 'any'): raise ValueError("`mode` must be one of {'max', 'sum', 'any'}.") merged = self[track_indices].get_merged_p...
Merge pianorolls of the tracks specified by `track_indices`. The merged track will have program number as given by `program` and drum indicator as given by `is_drum`. The merged track will be appended at the end of the track list. Parameters ---------- track_indices : li...
def pad_to_same(self): max_length = self.get_max_length() for track in self.tracks: if track.pianoroll.shape[0] < max_length: track.pad(max_length - track.pianoroll.shape[0])
Pad shorter pianorolls with zeros at the end along the time axis to make the resulting pianoroll lengths the same as the maximum pianoroll length among all the tracks.
def parse_midi(self, filename, **kwargs): pm = pretty_midi.PrettyMIDI(filename) self.parse_pretty_midi(pm, **kwargs)
Parse a MIDI file. Parameters ---------- filename : str The name of the MIDI file to be parsed. **kwargs: See :meth:`pypianoroll.Multitrack.parse_pretty_midi` for full documentation.
def parse_pretty_midi(self, pm, mode='max', algorithm='normal', binarized=False, skip_empty_tracks=True, collect_onsets_only=False, threshold=0, first_beat_time=None): if mode not in ('max', 'sum'): raise ValueErr...
Parse a :class:`pretty_midi.PrettyMIDI` object. The data type of the resulting pianorolls is automatically determined (int if 'mode' is 'sum', np.uint8 if `mode` is 'max' and `binarized` is False, bool if `mode` is 'max' and `binarized` is True). Parameters ---------- pm...
def remove_tracks(self, track_indices): if isinstance(track_indices, int): track_indices = [track_indices] self.tracks = [track for idx, track in enumerate(self.tracks) if idx not in track_indices]
Remove tracks specified by `track_indices`. Parameters ---------- track_indices : list The indices of the tracks to be removed.
def save(self, filename, compressed=True): def update_sparse(target_dict, sparse_matrix, name): csc = csc_matrix(sparse_matrix) target_dict[name+'_csc_data'] = csc.data target_dict[name+'_csc_indices'] = csc.indices target_dict[name+'_csc_ind...
Save the multitrack pianoroll to a (compressed) npz file, which can be later loaded by :meth:`pypianoroll.Multitrack.load`. Notes ----- To reduce the file size, the pianorolls are first converted to instances of scipy.sparse.csc_matrix, whose component arrays are then collected ...
def to_pretty_midi(self, constant_tempo=None, constant_velocity=100): self.check_validity() pm = pretty_midi.PrettyMIDI(initial_tempo=self.tempo[0]) # TODO: Add downbeat support -> time signature change events # TODO: Add tempo support -> tempo change events if constant_...
Convert to a :class:`pretty_midi.PrettyMIDI` instance. Notes ----- - Only constant tempo is supported by now. - The velocities of the converted pianorolls are clipped to [0, 127], i.e. values below 0 and values beyond 127 are replaced by 127 and 0, respectively. ...