id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
235,100
dask/dask-ml
dask_ml/cluster/k_means.py
init_pp
def init_pp(X, n_clusters, random_state): """K-means initialization using k-means++ This uses scikit-learn's implementation. """ x_squared_norms = row_norms(X, squared=True).compute() logger.info("Initializing with k-means++") with _timer("initialization of %2d centers" % n_clusters, _logger=logger): centers = sk_k_means._k_init( X, n_clusters, random_state=random_state, x_squared_norms=x_squared_norms ) return centers
python
def init_pp(X, n_clusters, random_state): x_squared_norms = row_norms(X, squared=True).compute() logger.info("Initializing with k-means++") with _timer("initialization of %2d centers" % n_clusters, _logger=logger): centers = sk_k_means._k_init( X, n_clusters, random_state=random_state, x_squared_norms=x_squared_norms ) return centers
[ "def", "init_pp", "(", "X", ",", "n_clusters", ",", "random_state", ")", ":", "x_squared_norms", "=", "row_norms", "(", "X", ",", "squared", "=", "True", ")", ".", "compute", "(", ")", "logger", ".", "info", "(", "\"Initializing with k-means++\"", ")", "wi...
K-means initialization using k-means++ This uses scikit-learn's implementation.
[ "K", "-", "means", "initialization", "using", "k", "-", "means", "++" ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/cluster/k_means.py#L372-L384
235,101
dask/dask-ml
dask_ml/cluster/k_means.py
init_random
def init_random(X, n_clusters, random_state): """K-means initialization using randomly chosen points""" logger.info("Initializing randomly") idx = sorted(draw_seed(random_state, 0, len(X), size=n_clusters)) centers = X[idx].compute() return centers
python
def init_random(X, n_clusters, random_state): logger.info("Initializing randomly") idx = sorted(draw_seed(random_state, 0, len(X), size=n_clusters)) centers = X[idx].compute() return centers
[ "def", "init_random", "(", "X", ",", "n_clusters", ",", "random_state", ")", ":", "logger", ".", "info", "(", "\"Initializing randomly\"", ")", "idx", "=", "sorted", "(", "draw_seed", "(", "random_state", ",", "0", ",", "len", "(", "X", ")", ",", "size",...
K-means initialization using randomly chosen points
[ "K", "-", "means", "initialization", "using", "randomly", "chosen", "points" ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/cluster/k_means.py#L388-L393
235,102
dask/dask-ml
dask_ml/cluster/k_means.py
init_scalable
def init_scalable( X, n_clusters, random_state=None, max_iter=None, oversampling_factor=2 ): """K-Means initialization using k-means|| This is algorithm 2 in Scalable K-Means++ (2012). """ logger.info("Initializing with k-means||") # Step 1: Initialize Centers idx = 0 centers = da.compute(X[idx, np.newaxis])[0] c_idx = {idx} # Step 2: Initialize cost cost, = compute(evaluate_cost(X, centers)) if cost == 0: n_iter = 0 else: n_iter = int(np.round(np.log(cost))) if max_iter is not None: n_iter = min(max_iter, n_iter) # Steps 3 - 6: update candidate Centers for i in range(n_iter): with _timer( "init iteration %2d/%2d , %2d centers" % (i + 1, n_iter, len(c_idx)), _logger=logger, ): new_idxs = _sample_points(X, centers, oversampling_factor, random_state) new_idxs = set(*compute(new_idxs)) c_idx |= new_idxs # Sort before slicing, for better performance / memory # usage with the scheduler. # See https://github.com/dask/dask-ml/issues/39 centers = X[sorted(c_idx)].compute() # XXX: scikit-learn doesn't have weighted k-means. # The paper weights each center by the number of points closest to it. # https://stackoverflow.com/a/37198799/1889400 claims you can scale the # features before clustering, but that doesn't seem right. # I think that replicating the *points*, proportional to the number of # original points closest to the candidate centers, would be a better way # to do that. if len(centers) < n_clusters: logger.warning("Found fewer than %d clusters in init.", n_clusters) # supplement with random need = n_clusters - len(centers) locs = sorted( random_state.choice( np.arange(0, len(X)), size=need, replace=False, chunks=len(X) ) ) extra = X[locs].compute() return np.vstack([centers, extra]) else: # Step 7, 8 without weights # dask RandomState objects aren't valid for scikit-learn rng2 = ( random_state.randint(0, np.iinfo("i4").max - 1, chunks=()) .compute(scheduler="single-threaded") .item() ) km = sk_k_means.KMeans(n_clusters, random_state=rng2) km.fit(centers) return km.cluster_centers_
python
def init_scalable( X, n_clusters, random_state=None, max_iter=None, oversampling_factor=2 ): logger.info("Initializing with k-means||") # Step 1: Initialize Centers idx = 0 centers = da.compute(X[idx, np.newaxis])[0] c_idx = {idx} # Step 2: Initialize cost cost, = compute(evaluate_cost(X, centers)) if cost == 0: n_iter = 0 else: n_iter = int(np.round(np.log(cost))) if max_iter is not None: n_iter = min(max_iter, n_iter) # Steps 3 - 6: update candidate Centers for i in range(n_iter): with _timer( "init iteration %2d/%2d , %2d centers" % (i + 1, n_iter, len(c_idx)), _logger=logger, ): new_idxs = _sample_points(X, centers, oversampling_factor, random_state) new_idxs = set(*compute(new_idxs)) c_idx |= new_idxs # Sort before slicing, for better performance / memory # usage with the scheduler. # See https://github.com/dask/dask-ml/issues/39 centers = X[sorted(c_idx)].compute() # XXX: scikit-learn doesn't have weighted k-means. # The paper weights each center by the number of points closest to it. # https://stackoverflow.com/a/37198799/1889400 claims you can scale the # features before clustering, but that doesn't seem right. # I think that replicating the *points*, proportional to the number of # original points closest to the candidate centers, would be a better way # to do that. if len(centers) < n_clusters: logger.warning("Found fewer than %d clusters in init.", n_clusters) # supplement with random need = n_clusters - len(centers) locs = sorted( random_state.choice( np.arange(0, len(X)), size=need, replace=False, chunks=len(X) ) ) extra = X[locs].compute() return np.vstack([centers, extra]) else: # Step 7, 8 without weights # dask RandomState objects aren't valid for scikit-learn rng2 = ( random_state.randint(0, np.iinfo("i4").max - 1, chunks=()) .compute(scheduler="single-threaded") .item() ) km = sk_k_means.KMeans(n_clusters, random_state=rng2) km.fit(centers) return km.cluster_centers_
[ "def", "init_scalable", "(", "X", ",", "n_clusters", ",", "random_state", "=", "None", ",", "max_iter", "=", "None", ",", "oversampling_factor", "=", "2", ")", ":", "logger", ".", "info", "(", "\"Initializing with k-means||\"", ")", "# Step 1: Initialize Centers",...
K-Means initialization using k-means|| This is algorithm 2 in Scalable K-Means++ (2012).
[ "K", "-", "Means", "initialization", "using", "k", "-", "means||" ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/cluster/k_means.py#L397-L467
235,103
dask/dask-ml
dask_ml/cluster/k_means.py
_sample_points
def _sample_points(X, centers, oversampling_factor, random_state): r""" Sample points independently with probability .. math:: p_x = \frac{\ell \cdot d^2(x, \mathcal{C})}{\phi_X(\mathcal{C})} """ # re-implement evaluate_cost here, to avoid redundant computation distances = pairwise_distances(X, centers).min(1) ** 2 denom = distances.sum() p = oversampling_factor * distances / denom draws = random_state.uniform(size=len(p), chunks=p.chunks) picked = p > draws new_idxs, = da.where(picked) return new_idxs
python
def _sample_points(X, centers, oversampling_factor, random_state): r""" Sample points independently with probability .. math:: p_x = \frac{\ell \cdot d^2(x, \mathcal{C})}{\phi_X(\mathcal{C})} """ # re-implement evaluate_cost here, to avoid redundant computation distances = pairwise_distances(X, centers).min(1) ** 2 denom = distances.sum() p = oversampling_factor * distances / denom draws = random_state.uniform(size=len(p), chunks=p.chunks) picked = p > draws new_idxs, = da.where(picked) return new_idxs
[ "def", "_sample_points", "(", "X", ",", "centers", ",", "oversampling_factor", ",", "random_state", ")", ":", "# re-implement evaluate_cost here, to avoid redundant computation", "distances", "=", "pairwise_distances", "(", "X", ",", "centers", ")", ".", "min", "(", "...
r""" Sample points independently with probability .. math:: p_x = \frac{\ell \cdot d^2(x, \mathcal{C})}{\phi_X(\mathcal{C})}
[ "r", "Sample", "points", "independently", "with", "probability" ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/cluster/k_means.py#L476-L495
235,104
dask/dask-ml
dask_ml/preprocessing/data.py
RobustScaler.transform
def transform(self, X): """Center and scale the data. Can be called on sparse input, provided that ``RobustScaler`` has been fitted to dense input and ``with_centering=False``. Parameters ---------- X : {array-like, sparse matrix} The data used to scale along the specified axis. This implementation was copied and modified from Scikit-Learn. See License information here: https://github.com/scikit-learn/scikit-learn/blob/master/README.rst """ if self.with_centering: check_is_fitted(self, "center_") if self.with_scaling: check_is_fitted(self, "scale_") X = self._check_array(X, self.copy) # if sparse.issparse(X): # if self.with_scaling: # inplace_column_scale(X, 1.0 / self.scale_) # else: if self.with_centering: X -= self.center_ if self.with_scaling: X /= self.scale_ return X
python
def transform(self, X): if self.with_centering: check_is_fitted(self, "center_") if self.with_scaling: check_is_fitted(self, "scale_") X = self._check_array(X, self.copy) # if sparse.issparse(X): # if self.with_scaling: # inplace_column_scale(X, 1.0 / self.scale_) # else: if self.with_centering: X -= self.center_ if self.with_scaling: X /= self.scale_ return X
[ "def", "transform", "(", "self", ",", "X", ")", ":", "if", "self", ".", "with_centering", ":", "check_is_fitted", "(", "self", ",", "\"center_\"", ")", "if", "self", ".", "with_scaling", ":", "check_is_fitted", "(", "self", ",", "\"scale_\"", ")", "X", "...
Center and scale the data. Can be called on sparse input, provided that ``RobustScaler`` has been fitted to dense input and ``with_centering=False``. Parameters ---------- X : {array-like, sparse matrix} The data used to scale along the specified axis. This implementation was copied and modified from Scikit-Learn. See License information here: https://github.com/scikit-learn/scikit-learn/blob/master/README.rst
[ "Center", "and", "scale", "the", "data", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L168-L198
235,105
dask/dask-ml
dask_ml/preprocessing/data.py
RobustScaler.inverse_transform
def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : array-like The data used to scale along the specified axis. This implementation was copied and modified from Scikit-Learn. See License information here: https://github.com/scikit-learn/scikit-learn/blob/master/README.rst """ check_is_fitted(self, "center_", "scale_") # if sparse.issparse(X): # if self.with_scaling: # inplace_column_scale(X, self.scale_) # else: if self.with_scaling: X *= self.scale_ if self.with_centering: X += self.center_ return X
python
def inverse_transform(self, X): check_is_fitted(self, "center_", "scale_") # if sparse.issparse(X): # if self.with_scaling: # inplace_column_scale(X, self.scale_) # else: if self.with_scaling: X *= self.scale_ if self.with_centering: X += self.center_ return X
[ "def", "inverse_transform", "(", "self", ",", "X", ")", ":", "check_is_fitted", "(", "self", ",", "\"center_\"", ",", "\"scale_\"", ")", "# if sparse.issparse(X):", "# if self.with_scaling:", "# inplace_column_scale(X, self.scale_)", "# else:", "if", "self", "...
Scale back the data to the original representation Parameters ---------- X : array-like The data used to scale along the specified axis. This implementation was copied and modified from Scikit-Learn. See License information here: https://github.com/scikit-learn/scikit-learn/blob/master/README.rst
[ "Scale", "back", "the", "data", "to", "the", "original", "representation" ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L200-L223
235,106
dask/dask-ml
dask_ml/preprocessing/data.py
Categorizer.fit
def fit(self, X, y=None): """Find the categorical columns. Parameters ---------- X : pandas.DataFrame or dask.DataFrame y : ignored Returns ------- self """ X = self._check_array(X) if self.categories is not None: # some basic validation columns = pd.Index(self.categories) categories = self.categories elif isinstance(X, pd.DataFrame): columns, categories = self._fit(X) else: columns, categories = self._fit_dask(X) self.columns_ = columns self.categories_ = categories return self
python
def fit(self, X, y=None): X = self._check_array(X) if self.categories is not None: # some basic validation columns = pd.Index(self.categories) categories = self.categories elif isinstance(X, pd.DataFrame): columns, categories = self._fit(X) else: columns, categories = self._fit_dask(X) self.columns_ = columns self.categories_ = categories return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "X", "=", "self", ".", "_check_array", "(", "X", ")", "if", "self", ".", "categories", "is", "not", "None", ":", "# some basic validation", "columns", "=", "pd", ".", "Index", "(...
Find the categorical columns. Parameters ---------- X : pandas.DataFrame or dask.DataFrame y : ignored Returns ------- self
[ "Find", "the", "categorical", "columns", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L401-L427
235,107
dask/dask-ml
dask_ml/preprocessing/data.py
Categorizer.transform
def transform(self, X, y=None): """Transform the columns in ``X`` according to ``self.categories_``. Parameters ---------- X : pandas.DataFrame or dask.DataFrame y : ignored Returns ------- X_trn : pandas.DataFrame or dask.DataFrame Same type as the input. The columns in ``self.categories_`` will be converted to categorical dtype. """ check_is_fitted(self, "categories_") X = self._check_array(X).copy() categories = self.categories_ for k, dtype in categories.items(): if _HAS_CTD: if not isinstance(dtype, pd.api.types.CategoricalDtype): dtype = pd.api.types.CategoricalDtype(*dtype) X[k] = X[k].astype(dtype) else: cat, ordered = dtype X[k] = X[k].astype("category").cat.set_categories(cat, ordered) return X
python
def transform(self, X, y=None): check_is_fitted(self, "categories_") X = self._check_array(X).copy() categories = self.categories_ for k, dtype in categories.items(): if _HAS_CTD: if not isinstance(dtype, pd.api.types.CategoricalDtype): dtype = pd.api.types.CategoricalDtype(*dtype) X[k] = X[k].astype(dtype) else: cat, ordered = dtype X[k] = X[k].astype("category").cat.set_categories(cat, ordered) return X
[ "def", "transform", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "check_is_fitted", "(", "self", ",", "\"categories_\"", ")", "X", "=", "self", ".", "_check_array", "(", "X", ")", ".", "copy", "(", ")", "categories", "=", "self", ".", "c...
Transform the columns in ``X`` according to ``self.categories_``. Parameters ---------- X : pandas.DataFrame or dask.DataFrame y : ignored Returns ------- X_trn : pandas.DataFrame or dask.DataFrame Same type as the input. The columns in ``self.categories_`` will be converted to categorical dtype.
[ "Transform", "the", "columns", "in", "X", "according", "to", "self", ".", "categories_", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L455-L482
235,108
dask/dask-ml
dask_ml/preprocessing/data.py
DummyEncoder.fit
def fit(self, X, y=None): """Determine the categorical columns to be dummy encoded. Parameters ---------- X : pandas.DataFrame or dask.dataframe.DataFrame y : ignored Returns ------- self """ self.columns_ = X.columns columns = self.columns if columns is None: columns = X.select_dtypes(include=["category"]).columns else: for column in columns: assert is_categorical_dtype(X[column]), "Must be categorical" self.categorical_columns_ = columns self.non_categorical_columns_ = X.columns.drop(self.categorical_columns_) if _HAS_CTD: self.dtypes_ = {col: X[col].dtype for col in self.categorical_columns_} else: self.dtypes_ = { col: (X[col].cat.categories, X[col].cat.ordered) for col in self.categorical_columns_ } left = len(self.non_categorical_columns_) self.categorical_blocks_ = {} for col in self.categorical_columns_: right = left + len(X[col].cat.categories) if self.drop_first: right -= 1 self.categorical_blocks_[col], left = slice(left, right), right if isinstance(X, pd.DataFrame): sample = X.iloc[:1] else: sample = X._meta_nonempty self.transformed_columns_ = pd.get_dummies( sample, drop_first=self.drop_first ).columns return self
python
def fit(self, X, y=None): self.columns_ = X.columns columns = self.columns if columns is None: columns = X.select_dtypes(include=["category"]).columns else: for column in columns: assert is_categorical_dtype(X[column]), "Must be categorical" self.categorical_columns_ = columns self.non_categorical_columns_ = X.columns.drop(self.categorical_columns_) if _HAS_CTD: self.dtypes_ = {col: X[col].dtype for col in self.categorical_columns_} else: self.dtypes_ = { col: (X[col].cat.categories, X[col].cat.ordered) for col in self.categorical_columns_ } left = len(self.non_categorical_columns_) self.categorical_blocks_ = {} for col in self.categorical_columns_: right = left + len(X[col].cat.categories) if self.drop_first: right -= 1 self.categorical_blocks_[col], left = slice(left, right), right if isinstance(X, pd.DataFrame): sample = X.iloc[:1] else: sample = X._meta_nonempty self.transformed_columns_ = pd.get_dummies( sample, drop_first=self.drop_first ).columns return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "self", ".", "columns_", "=", "X", ".", "columns", "columns", "=", "self", ".", "columns", "if", "columns", "is", "None", ":", "columns", "=", "X", ".", "select_dtypes", "(", "...
Determine the categorical columns to be dummy encoded. Parameters ---------- X : pandas.DataFrame or dask.dataframe.DataFrame y : ignored Returns ------- self
[ "Determine", "the", "categorical", "columns", "to", "be", "dummy", "encoded", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L570-L617
235,109
dask/dask-ml
dask_ml/preprocessing/data.py
DummyEncoder.transform
def transform(self, X, y=None): """Dummy encode the categorical columns in X Parameters ---------- X : pd.DataFrame or dd.DataFrame y : ignored Returns ------- transformed : pd.DataFrame or dd.DataFrame Same type as the input """ if not X.columns.equals(self.columns_): raise ValueError( "Columns of 'X' do not match the training " "columns. Got {!r}, expected {!r}".format(X.columns, self.columns) ) if isinstance(X, pd.DataFrame): return pd.get_dummies(X, drop_first=self.drop_first, columns=self.columns) elif isinstance(X, dd.DataFrame): return dd.get_dummies(X, drop_first=self.drop_first, columns=self.columns) else: raise TypeError("Unexpected type {}".format(type(X)))
python
def transform(self, X, y=None): if not X.columns.equals(self.columns_): raise ValueError( "Columns of 'X' do not match the training " "columns. Got {!r}, expected {!r}".format(X.columns, self.columns) ) if isinstance(X, pd.DataFrame): return pd.get_dummies(X, drop_first=self.drop_first, columns=self.columns) elif isinstance(X, dd.DataFrame): return dd.get_dummies(X, drop_first=self.drop_first, columns=self.columns) else: raise TypeError("Unexpected type {}".format(type(X)))
[ "def", "transform", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "if", "not", "X", ".", "columns", ".", "equals", "(", "self", ".", "columns_", ")", ":", "raise", "ValueError", "(", "\"Columns of 'X' do not match the training \"", "\"columns. Got ...
Dummy encode the categorical columns in X Parameters ---------- X : pd.DataFrame or dd.DataFrame y : ignored Returns ------- transformed : pd.DataFrame or dd.DataFrame Same type as the input
[ "Dummy", "encode", "the", "categorical", "columns", "in", "X" ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L619-L642
235,110
dask/dask-ml
dask_ml/preprocessing/data.py
DummyEncoder.inverse_transform
def inverse_transform(self, X): """Inverse dummy-encode the columns in `X` Parameters ---------- X : array or dataframe Either the NumPy, dask, or pandas version Returns ------- data : DataFrame Dask array or dataframe will return a Dask DataFrame. Numpy array or pandas dataframe will return a pandas DataFrame """ if isinstance(X, np.ndarray): X = pd.DataFrame(X, columns=self.transformed_columns_) elif isinstance(X, da.Array): # later on we concat(..., axis=1), which requires # known divisions. Suboptimal, but I think unavoidable. unknown = np.isnan(X.chunks[0]).any() if unknown: lengths = blockwise(len, "i", X[:, 0], "i", dtype="i8").compute() X = X.copy() chunks = (tuple(lengths), X.chunks[1]) X._chunks = chunks X = dd.from_dask_array(X, columns=self.transformed_columns_) big = isinstance(X, dd.DataFrame) if big: chunks = np.array(X.divisions) chunks[-1] = chunks[-1] + 1 chunks = tuple(chunks[1:] - chunks[:-1]) non_cat = X[list(self.non_categorical_columns_)] cats = [] for col in self.categorical_columns_: slice_ = self.categorical_blocks_[col] if _HAS_CTD: dtype = self.dtypes_[col] categories, ordered = dtype.categories, dtype.ordered else: categories, ordered = self.dtypes_[col] # use .values to avoid warning from pandas cols_slice = list(X.columns[slice_]) if big: inds = X[cols_slice].to_dask_array(lengths=chunks) else: inds = X[cols_slice].values codes = inds.argmax(1) if self.drop_first: codes += 1 codes[(inds == 0).all(1)] = 0 if big: # dask codes._chunks = (chunks,) # Need a Categorical.from_codes for dask series = ( dd.from_dask_array(codes, columns=col) .astype("category") .cat.set_categories(np.arange(len(categories)), ordered=ordered) .cat.rename_categories(categories) ) # Bug in pandas <= 0.20.3 lost name if series.name is None: series.name = col series.divisions = X.divisions else: # pandas series = pd.Series( pd.Categorical.from_codes(codes, categories, ordered=ordered), name=col, ) cats.append(series) if big: df = dd.concat([non_cat] + cats, axis=1)[list(self.columns_)] else: df = pd.concat([non_cat] + cats, axis=1)[self.columns_] return df
python
def inverse_transform(self, X): if isinstance(X, np.ndarray): X = pd.DataFrame(X, columns=self.transformed_columns_) elif isinstance(X, da.Array): # later on we concat(..., axis=1), which requires # known divisions. Suboptimal, but I think unavoidable. unknown = np.isnan(X.chunks[0]).any() if unknown: lengths = blockwise(len, "i", X[:, 0], "i", dtype="i8").compute() X = X.copy() chunks = (tuple(lengths), X.chunks[1]) X._chunks = chunks X = dd.from_dask_array(X, columns=self.transformed_columns_) big = isinstance(X, dd.DataFrame) if big: chunks = np.array(X.divisions) chunks[-1] = chunks[-1] + 1 chunks = tuple(chunks[1:] - chunks[:-1]) non_cat = X[list(self.non_categorical_columns_)] cats = [] for col in self.categorical_columns_: slice_ = self.categorical_blocks_[col] if _HAS_CTD: dtype = self.dtypes_[col] categories, ordered = dtype.categories, dtype.ordered else: categories, ordered = self.dtypes_[col] # use .values to avoid warning from pandas cols_slice = list(X.columns[slice_]) if big: inds = X[cols_slice].to_dask_array(lengths=chunks) else: inds = X[cols_slice].values codes = inds.argmax(1) if self.drop_first: codes += 1 codes[(inds == 0).all(1)] = 0 if big: # dask codes._chunks = (chunks,) # Need a Categorical.from_codes for dask series = ( dd.from_dask_array(codes, columns=col) .astype("category") .cat.set_categories(np.arange(len(categories)), ordered=ordered) .cat.rename_categories(categories) ) # Bug in pandas <= 0.20.3 lost name if series.name is None: series.name = col series.divisions = X.divisions else: # pandas series = pd.Series( pd.Categorical.from_codes(codes, categories, ordered=ordered), name=col, ) cats.append(series) if big: df = dd.concat([non_cat] + cats, axis=1)[list(self.columns_)] else: df = pd.concat([non_cat] + cats, axis=1)[self.columns_] return df
[ "def", "inverse_transform", "(", "self", ",", "X", ")", ":", "if", "isinstance", "(", "X", ",", "np", ".", "ndarray", ")", ":", "X", "=", "pd", ".", "DataFrame", "(", "X", ",", "columns", "=", "self", ".", "transformed_columns_", ")", "elif", "isinst...
Inverse dummy-encode the columns in `X` Parameters ---------- X : array or dataframe Either the NumPy, dask, or pandas version Returns ------- data : DataFrame Dask array or dataframe will return a Dask DataFrame. Numpy array or pandas dataframe will return a pandas DataFrame
[ "Inverse", "dummy", "-", "encode", "the", "columns", "in", "X" ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L644-L729
235,111
dask/dask-ml
dask_ml/preprocessing/data.py
OrdinalEncoder.fit
def fit(self, X, y=None): """Determine the categorical columns to be encoded. Parameters ---------- X : pandas.DataFrame or dask.dataframe.DataFrame y : ignored Returns ------- self """ self.columns_ = X.columns columns = self.columns if columns is None: columns = X.select_dtypes(include=["category"]).columns else: for column in columns: assert is_categorical_dtype(X[column]), "Must be categorical" self.categorical_columns_ = columns self.non_categorical_columns_ = X.columns.drop(self.categorical_columns_) if _HAS_CTD: self.dtypes_ = {col: X[col].dtype for col in self.categorical_columns_} else: self.dtypes_ = { col: (X[col].cat.categories, X[col].cat.ordered) for col in self.categorical_columns_ } return self
python
def fit(self, X, y=None): self.columns_ = X.columns columns = self.columns if columns is None: columns = X.select_dtypes(include=["category"]).columns else: for column in columns: assert is_categorical_dtype(X[column]), "Must be categorical" self.categorical_columns_ = columns self.non_categorical_columns_ = X.columns.drop(self.categorical_columns_) if _HAS_CTD: self.dtypes_ = {col: X[col].dtype for col in self.categorical_columns_} else: self.dtypes_ = { col: (X[col].cat.categories, X[col].cat.ordered) for col in self.categorical_columns_ } return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "self", ".", "columns_", "=", "X", ".", "columns", "columns", "=", "self", ".", "columns", "if", "columns", "is", "None", ":", "columns", "=", "X", ".", "select_dtypes", "(", "...
Determine the categorical columns to be encoded. Parameters ---------- X : pandas.DataFrame or dask.dataframe.DataFrame y : ignored Returns ------- self
[ "Determine", "the", "categorical", "columns", "to", "be", "encoded", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L804-L835
235,112
dask/dask-ml
dask_ml/preprocessing/data.py
OrdinalEncoder.transform
def transform(self, X, y=None): """Ordinal encode the categorical columns in X Parameters ---------- X : pd.DataFrame or dd.DataFrame y : ignored Returns ------- transformed : pd.DataFrame or dd.DataFrame Same type as the input """ if not X.columns.equals(self.columns_): raise ValueError( "Columns of 'X' do not match the training " "columns. Got {!r}, expected {!r}".format(X.columns, self.columns) ) if not isinstance(X, (pd.DataFrame, dd.DataFrame)): raise TypeError("Unexpected type {}".format(type(X))) X = X.copy() for col in self.categorical_columns_: X[col] = X[col].cat.codes return X
python
def transform(self, X, y=None): if not X.columns.equals(self.columns_): raise ValueError( "Columns of 'X' do not match the training " "columns. Got {!r}, expected {!r}".format(X.columns, self.columns) ) if not isinstance(X, (pd.DataFrame, dd.DataFrame)): raise TypeError("Unexpected type {}".format(type(X))) X = X.copy() for col in self.categorical_columns_: X[col] = X[col].cat.codes return X
[ "def", "transform", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "if", "not", "X", ".", "columns", ".", "equals", "(", "self", ".", "columns_", ")", ":", "raise", "ValueError", "(", "\"Columns of 'X' do not match the training \"", "\"columns. Got ...
Ordinal encode the categorical columns in X Parameters ---------- X : pd.DataFrame or dd.DataFrame y : ignored Returns ------- transformed : pd.DataFrame or dd.DataFrame Same type as the input
[ "Ordinal", "encode", "the", "categorical", "columns", "in", "X" ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L837-L861
235,113
dask/dask-ml
dask_ml/preprocessing/data.py
OrdinalEncoder.inverse_transform
def inverse_transform(self, X): """Inverse ordinal-encode the columns in `X` Parameters ---------- X : array or dataframe Either the NumPy, dask, or pandas version Returns ------- data : DataFrame Dask array or dataframe will return a Dask DataFrame. Numpy array or pandas dataframe will return a pandas DataFrame """ if isinstance(X, np.ndarray): X = pd.DataFrame(X, columns=self.columns_) elif isinstance(X, da.Array): # later on we concat(..., axis=1), which requires # known divisions. Suboptimal, but I think unavoidable. unknown = np.isnan(X.chunks[0]).any() if unknown: lengths = blockwise(len, "i", X[:, 0], "i", dtype="i8").compute() X = X.copy() chunks = (tuple(lengths), X.chunks[1]) X._chunks = chunks X = dd.from_dask_array(X, columns=self.columns_) big = isinstance(X, dd.DataFrame) if big: chunks = np.array(X.divisions) chunks[-1] = chunks[-1] + 1 chunks = tuple(chunks[1:] - chunks[:-1]) X = X.copy() for col in self.categorical_columns_: if _HAS_CTD: dtype = self.dtypes_[col] categories, ordered = dtype.categories, dtype.ordered else: categories, ordered = self.dtypes_[col] # use .values to avoid warning from pandas codes = X[col].values if big: # dask codes._chunks = (chunks,) # Need a Categorical.from_codes for dask series = ( dd.from_dask_array(codes, columns=col) .astype("category") .cat.set_categories(np.arange(len(categories)), ordered=ordered) .cat.rename_categories(categories) ) # Bug in pandas <= 0.20.3 lost name if series.name is None: series.name = col series.divisions = X.divisions else: # pandas series = pd.Series( pd.Categorical.from_codes(codes, categories, ordered=ordered), name=col, ) X[col] = series return X
python
def inverse_transform(self, X): if isinstance(X, np.ndarray): X = pd.DataFrame(X, columns=self.columns_) elif isinstance(X, da.Array): # later on we concat(..., axis=1), which requires # known divisions. Suboptimal, but I think unavoidable. unknown = np.isnan(X.chunks[0]).any() if unknown: lengths = blockwise(len, "i", X[:, 0], "i", dtype="i8").compute() X = X.copy() chunks = (tuple(lengths), X.chunks[1]) X._chunks = chunks X = dd.from_dask_array(X, columns=self.columns_) big = isinstance(X, dd.DataFrame) if big: chunks = np.array(X.divisions) chunks[-1] = chunks[-1] + 1 chunks = tuple(chunks[1:] - chunks[:-1]) X = X.copy() for col in self.categorical_columns_: if _HAS_CTD: dtype = self.dtypes_[col] categories, ordered = dtype.categories, dtype.ordered else: categories, ordered = self.dtypes_[col] # use .values to avoid warning from pandas codes = X[col].values if big: # dask codes._chunks = (chunks,) # Need a Categorical.from_codes for dask series = ( dd.from_dask_array(codes, columns=col) .astype("category") .cat.set_categories(np.arange(len(categories)), ordered=ordered) .cat.rename_categories(categories) ) # Bug in pandas <= 0.20.3 lost name if series.name is None: series.name = col series.divisions = X.divisions else: # pandas series = pd.Series( pd.Categorical.from_codes(codes, categories, ordered=ordered), name=col, ) X[col] = series return X
[ "def", "inverse_transform", "(", "self", ",", "X", ")", ":", "if", "isinstance", "(", "X", ",", "np", ".", "ndarray", ")", ":", "X", "=", "pd", ".", "DataFrame", "(", "X", ",", "columns", "=", "self", ".", "columns_", ")", "elif", "isinstance", "("...
Inverse ordinal-encode the columns in `X` Parameters ---------- X : array or dataframe Either the NumPy, dask, or pandas version Returns ------- data : DataFrame Dask array or dataframe will return a Dask DataFrame. Numpy array or pandas dataframe will return a pandas DataFrame
[ "Inverse", "ordinal", "-", "encode", "the", "columns", "in", "X" ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/data.py#L863-L933
235,114
dask/dask-ml
dask_ml/_partial.py
fit
def fit(model, x, y, compute=True, shuffle_blocks=True, random_state=None, **kwargs): """ Fit scikit learn model against dask arrays Model must support the ``partial_fit`` interface for online or batch learning. Ideally your rows are independent and identically distributed. By default, this function will step through chunks of the arrays in random order. Parameters ---------- model: sklearn model Any model supporting partial_fit interface x: dask Array Two dimensional array, likely tall and skinny y: dask Array One dimensional array with same chunks as x's rows compute : bool Whether to compute this result shuffle_blocks : bool Whether to shuffle the blocks with ``random_state`` or not random_state : int or numpy.random.RandomState Random state to use when shuffling blocks kwargs: options to pass to partial_fit Examples -------- >>> import dask.array as da >>> X = da.random.random((10, 3), chunks=(5, 3)) >>> y = da.random.randint(0, 2, 10, chunks=(5,)) >>> from sklearn.linear_model import SGDClassifier >>> sgd = SGDClassifier() >>> sgd = da.learn.fit(sgd, X, y, classes=[1, 0]) >>> sgd # doctest: +SKIP SGDClassifier(alpha=0.0001, class_weight=None, epsilon=0.1, eta0=0.0, fit_intercept=True, l1_ratio=0.15, learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5, random_state=None, shuffle=False, verbose=0, warm_start=False) This passes all of X and y through the classifier sequentially. We can use the classifier as normal on in-memory data >>> import numpy as np >>> sgd.predict(np.random.random((4, 3))) # doctest: +SKIP array([1, 0, 0, 1]) Or predict on a larger dataset >>> z = da.random.random((400, 3), chunks=(100, 3)) >>> da.learn.predict(sgd, z) # doctest: +SKIP dask.array<x_11, shape=(400,), chunks=((100, 100, 100, 100),), dtype=int64> """ if not hasattr(x, "chunks") and hasattr(x, "to_dask_array"): x = x.to_dask_array() assert x.ndim == 2 if y is not None: if not hasattr(y, "chunks") and hasattr(y, "to_dask_array"): y = y.to_dask_array() assert y.ndim == 1 assert x.chunks[0] == y.chunks[0] assert hasattr(model, "partial_fit") if len(x.chunks[1]) > 1: x = x.rechunk(chunks=(x.chunks[0], sum(x.chunks[1]))) nblocks = len(x.chunks[0]) order = list(range(nblocks)) if shuffle_blocks: rng = sklearn.utils.check_random_state(random_state) rng.shuffle(order) name = "fit-" + dask.base.tokenize(model, x, y, kwargs, order) dsk = {(name, -1): model} dsk.update( { (name, i): ( _partial_fit, (name, i - 1), (x.name, order[i], 0), (getattr(y, "name", ""), order[i]), kwargs, ) for i in range(nblocks) } ) graphs = {x.name: x.__dask_graph__(), name: dsk} if hasattr(y, "__dask_graph__"): graphs[y.name] = y.__dask_graph__() try: from dask.highlevelgraph import HighLevelGraph new_dsk = HighLevelGraph.merge(*graphs.values()) except ImportError: from dask import sharedict new_dsk = sharedict.merge(*graphs.values()) value = Delayed((name, nblocks - 1), new_dsk) if compute: return value.compute() else: return value
python
def fit(model, x, y, compute=True, shuffle_blocks=True, random_state=None, **kwargs): if not hasattr(x, "chunks") and hasattr(x, "to_dask_array"): x = x.to_dask_array() assert x.ndim == 2 if y is not None: if not hasattr(y, "chunks") and hasattr(y, "to_dask_array"): y = y.to_dask_array() assert y.ndim == 1 assert x.chunks[0] == y.chunks[0] assert hasattr(model, "partial_fit") if len(x.chunks[1]) > 1: x = x.rechunk(chunks=(x.chunks[0], sum(x.chunks[1]))) nblocks = len(x.chunks[0]) order = list(range(nblocks)) if shuffle_blocks: rng = sklearn.utils.check_random_state(random_state) rng.shuffle(order) name = "fit-" + dask.base.tokenize(model, x, y, kwargs, order) dsk = {(name, -1): model} dsk.update( { (name, i): ( _partial_fit, (name, i - 1), (x.name, order[i], 0), (getattr(y, "name", ""), order[i]), kwargs, ) for i in range(nblocks) } ) graphs = {x.name: x.__dask_graph__(), name: dsk} if hasattr(y, "__dask_graph__"): graphs[y.name] = y.__dask_graph__() try: from dask.highlevelgraph import HighLevelGraph new_dsk = HighLevelGraph.merge(*graphs.values()) except ImportError: from dask import sharedict new_dsk = sharedict.merge(*graphs.values()) value = Delayed((name, nblocks - 1), new_dsk) if compute: return value.compute() else: return value
[ "def", "fit", "(", "model", ",", "x", ",", "y", ",", "compute", "=", "True", ",", "shuffle_blocks", "=", "True", ",", "random_state", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "hasattr", "(", "x", ",", "\"chunks\"", ")", "and", ...
Fit scikit learn model against dask arrays Model must support the ``partial_fit`` interface for online or batch learning. Ideally your rows are independent and identically distributed. By default, this function will step through chunks of the arrays in random order. Parameters ---------- model: sklearn model Any model supporting partial_fit interface x: dask Array Two dimensional array, likely tall and skinny y: dask Array One dimensional array with same chunks as x's rows compute : bool Whether to compute this result shuffle_blocks : bool Whether to shuffle the blocks with ``random_state`` or not random_state : int or numpy.random.RandomState Random state to use when shuffling blocks kwargs: options to pass to partial_fit Examples -------- >>> import dask.array as da >>> X = da.random.random((10, 3), chunks=(5, 3)) >>> y = da.random.randint(0, 2, 10, chunks=(5,)) >>> from sklearn.linear_model import SGDClassifier >>> sgd = SGDClassifier() >>> sgd = da.learn.fit(sgd, X, y, classes=[1, 0]) >>> sgd # doctest: +SKIP SGDClassifier(alpha=0.0001, class_weight=None, epsilon=0.1, eta0=0.0, fit_intercept=True, l1_ratio=0.15, learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5, random_state=None, shuffle=False, verbose=0, warm_start=False) This passes all of X and y through the classifier sequentially. We can use the classifier as normal on in-memory data >>> import numpy as np >>> sgd.predict(np.random.random((4, 3))) # doctest: +SKIP array([1, 0, 0, 1]) Or predict on a larger dataset >>> z = da.random.random((400, 3), chunks=(100, 3)) >>> da.learn.predict(sgd, z) # doctest: +SKIP dask.array<x_11, shape=(400,), chunks=((100, 100, 100, 100),), dtype=int64>
[ "Fit", "scikit", "learn", "model", "against", "dask", "arrays" ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/_partial.py#L109-L217
235,115
dask/dask-ml
dask_ml/_partial.py
predict
def predict(model, x): """ Predict with a scikit learn model Parameters ---------- model : scikit learn classifier x : dask Array See docstring for ``da.learn.fit`` """ if not hasattr(x, "chunks") and hasattr(x, "to_dask_array"): x = x.to_dask_array() assert x.ndim == 2 if len(x.chunks[1]) > 1: x = x.rechunk(chunks=(x.chunks[0], sum(x.chunks[1]))) func = partial(_predict, model) xx = np.zeros((1, x.shape[1]), dtype=x.dtype) dt = model.predict(xx).dtype return x.map_blocks(func, chunks=(x.chunks[0], (1,)), dtype=dt).squeeze()
python
def predict(model, x): if not hasattr(x, "chunks") and hasattr(x, "to_dask_array"): x = x.to_dask_array() assert x.ndim == 2 if len(x.chunks[1]) > 1: x = x.rechunk(chunks=(x.chunks[0], sum(x.chunks[1]))) func = partial(_predict, model) xx = np.zeros((1, x.shape[1]), dtype=x.dtype) dt = model.predict(xx).dtype return x.map_blocks(func, chunks=(x.chunks[0], (1,)), dtype=dt).squeeze()
[ "def", "predict", "(", "model", ",", "x", ")", ":", "if", "not", "hasattr", "(", "x", ",", "\"chunks\"", ")", "and", "hasattr", "(", "x", ",", "\"to_dask_array\"", ")", ":", "x", "=", "x", ".", "to_dask_array", "(", ")", "assert", "x", ".", "ndim",...
Predict with a scikit learn model Parameters ---------- model : scikit learn classifier x : dask Array See docstring for ``da.learn.fit``
[ "Predict", "with", "a", "scikit", "learn", "model" ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/_partial.py#L224-L242
235,116
dask/dask-ml
dask_ml/cluster/spectral.py
_slice_mostly_sorted
def _slice_mostly_sorted(array, keep, rest, ind=None): """Slice dask array `array` that is almost entirely sorted already. We perform approximately `2 * len(keep)` slices on `array`. This is OK, since `keep` is small. Individually, each of these slices is entirely sorted. Parameters ---------- array : dask.array.Array keep : ndarray[Int] This must be sorted. rest : ndarray[Bool] ind : ndarray[Int], optional Returns ------- sliced : dask.array.Array """ if ind is None: ind = np.arange(len(array)) idx = np.argsort(np.concatenate([keep, ind[rest]])) slices = [] if keep[0] > 0: # avoid creating empty slices slices.append(slice(None, keep[0])) slices.append([keep[0]]) windows = zip(keep[:-1], keep[1:]) for l, r in windows: if r > l + 1: # avoid creating empty slices slices.append(slice(l + 1, r)) slices.append([r]) if keep[-1] < len(array) - 1: # avoid creating empty slices slices.append(slice(keep[-1] + 1, None)) result = da.concatenate([array[idx[slice_]] for slice_ in slices]) return result
python
def _slice_mostly_sorted(array, keep, rest, ind=None): if ind is None: ind = np.arange(len(array)) idx = np.argsort(np.concatenate([keep, ind[rest]])) slices = [] if keep[0] > 0: # avoid creating empty slices slices.append(slice(None, keep[0])) slices.append([keep[0]]) windows = zip(keep[:-1], keep[1:]) for l, r in windows: if r > l + 1: # avoid creating empty slices slices.append(slice(l + 1, r)) slices.append([r]) if keep[-1] < len(array) - 1: # avoid creating empty slices slices.append(slice(keep[-1] + 1, None)) result = da.concatenate([array[idx[slice_]] for slice_ in slices]) return result
[ "def", "_slice_mostly_sorted", "(", "array", ",", "keep", ",", "rest", ",", "ind", "=", "None", ")", ":", "if", "ind", "is", "None", ":", "ind", "=", "np", ".", "arange", "(", "len", "(", "array", ")", ")", "idx", "=", "np", ".", "argsort", "(", ...
Slice dask array `array` that is almost entirely sorted already. We perform approximately `2 * len(keep)` slices on `array`. This is OK, since `keep` is small. Individually, each of these slices is entirely sorted. Parameters ---------- array : dask.array.Array keep : ndarray[Int] This must be sorted. rest : ndarray[Bool] ind : ndarray[Int], optional Returns ------- sliced : dask.array.Array
[ "Slice", "dask", "array", "array", "that", "is", "almost", "entirely", "sorted", "already", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/cluster/spectral.py#L339-L376
235,117
dask/dask-ml
dask_ml/datasets.py
make_counts
def make_counts( n_samples=1000, n_features=100, n_informative=2, scale=1.0, chunks=100, random_state=None, ): """ Generate a dummy dataset for modeling count data. Parameters ---------- n_samples : int number of rows in the output array n_features : int number of columns (features) in the output array n_informative : int number of features that are correlated with the outcome scale : float Scale the true coefficient array by this chunks : int Number of rows per dask array block. random_state : int, RandomState instance or None (default) Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : dask.array, size ``(n_samples, n_features)`` y : dask.array, size ``(n_samples,)`` array of non-negative integer-valued data Examples -------- >>> X, y = make_counts() """ rng = dask_ml.utils.check_random_state(random_state) X = rng.normal(0, 1, size=(n_samples, n_features), chunks=(chunks, n_features)) informative_idx = rng.choice(n_features, n_informative, chunks=n_informative) beta = (rng.random(n_features, chunks=n_features) - 1) * scale informative_idx, beta = dask.compute(informative_idx, beta) z0 = X[:, informative_idx].dot(beta[informative_idx]) rate = da.exp(z0) y = rng.poisson(rate, size=1, chunks=(chunks,)) return X, y
python
def make_counts( n_samples=1000, n_features=100, n_informative=2, scale=1.0, chunks=100, random_state=None, ): rng = dask_ml.utils.check_random_state(random_state) X = rng.normal(0, 1, size=(n_samples, n_features), chunks=(chunks, n_features)) informative_idx = rng.choice(n_features, n_informative, chunks=n_informative) beta = (rng.random(n_features, chunks=n_features) - 1) * scale informative_idx, beta = dask.compute(informative_idx, beta) z0 = X[:, informative_idx].dot(beta[informative_idx]) rate = da.exp(z0) y = rng.poisson(rate, size=1, chunks=(chunks,)) return X, y
[ "def", "make_counts", "(", "n_samples", "=", "1000", ",", "n_features", "=", "100", ",", "n_informative", "=", "2", ",", "scale", "=", "1.0", ",", "chunks", "=", "100", ",", "random_state", "=", "None", ",", ")", ":", "rng", "=", "dask_ml", ".", "uti...
Generate a dummy dataset for modeling count data. Parameters ---------- n_samples : int number of rows in the output array n_features : int number of columns (features) in the output array n_informative : int number of features that are correlated with the outcome scale : float Scale the true coefficient array by this chunks : int Number of rows per dask array block. random_state : int, RandomState instance or None (default) Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- X : dask.array, size ``(n_samples, n_features)`` y : dask.array, size ``(n_samples,)`` array of non-negative integer-valued data Examples -------- >>> X, y = make_counts()
[ "Generate", "a", "dummy", "dataset", "for", "modeling", "count", "data", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/datasets.py#L24-L73
235,118
dask/dask-ml
dask_ml/datasets.py
make_blobs
def make_blobs( n_samples=100, n_features=2, centers=None, cluster_std=1.0, center_box=(-10.0, 10.0), shuffle=True, random_state=None, chunks=None, ): """ Generate isotropic Gaussian blobs for clustering. This can be used to generate very large Dask arrays on a cluster of machines. When using Dask in distributed mode, the client machine only needs to allocate a single block's worth of data. Parameters ---------- n_samples : int or array-like, optional (default=100) If int, it is the total number of points equally divided among clusters. If array-like, each element of the sequence indicates the number of samples per cluster. n_features : int, optional (default=2) The number of features for each sample. centers : int or array of shape [n_centers, n_features], optional (default=None) The number of centers to generate, or the fixed center locations. If n_samples is an int and centers is None, 3 centers are generated. If n_samples is array-like, centers must be either None or an array of length equal to the length of n_samples. cluster_std : float or sequence of floats, optional (default=1.0) The standard deviation of the clusters. center_box : pair of floats (min, max), optional (default=(-10.0, 10.0)) The bounding box for each cluster center when centers are generated at random. shuffle : boolean, optional (default=True) Shuffle the samples. random_state : int, RandomState instance or None (default) Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. chunks : int, tuple How to chunk the array. Must be one of the following forms: - A blocksize like 1000. - A blockshape like (1000, 1000). - Explicit sizes of all blocks along all dimensions like ((1000, 1000, 500), (400, 400)). Returns ------- X : array of shape [n_samples, n_features] The generated samples. y : array of shape [n_samples] The integer labels for cluster membership of each sample. Examples -------- >>> from dask_ml.datasets import make_blobs >>> X, y = make_blobs(n_samples=100000, chunks=10000) >>> X dask.array<..., shape=(100000, 2), dtype=float64, chunksize=(10000, 2)> >>> y dask.array<concatenate, shape=(100000,), dtype=int64, chunksize=(10000,)> See Also -------- make_classification: a more intricate variant """ chunks = da.core.normalize_chunks(chunks, (n_samples, n_features)) _check_axis_partitioning(chunks, n_features) if centers is None: # TODO: non-int n_samples? centers = 3 if isinstance(centers, numbers.Integral): # Make a prototype n_centers = centers X, y = sklearn.datasets.make_blobs( n_samples=chunks[0][0], n_features=n_features, centers=centers, shuffle=shuffle, cluster_std=cluster_std, center_box=center_box, random_state=random_state, ) centers = [] centers = np.zeros((n_centers, n_features)) for i in range(n_centers): centers[i] = X[y == i].mean(0) objs = [ dask.delayed(sklearn.datasets.make_blobs, nout=2)( n_samples=n_samples_per_block, n_features=n_features, centers=centers, cluster_std=cluster_std, shuffle=shuffle, center_box=center_box, random_state=i, ) for i, n_samples_per_block in enumerate(chunks[0]) ] Xobjs, yobjs = zip(*objs) Xarrs = [ da.from_delayed(arr, shape=(n, n_features), dtype="f8") for arr, n in zip(Xobjs, chunks[0]) ] X_big = da.vstack(Xarrs) yarrs = [ da.from_delayed(arr, shape=(n,), dtype=np.dtype("int")) for arr, n in zip(yobjs, chunks[0]) ] y_big = da.hstack(yarrs) return X_big, y_big
python
def make_blobs( n_samples=100, n_features=2, centers=None, cluster_std=1.0, center_box=(-10.0, 10.0), shuffle=True, random_state=None, chunks=None, ): chunks = da.core.normalize_chunks(chunks, (n_samples, n_features)) _check_axis_partitioning(chunks, n_features) if centers is None: # TODO: non-int n_samples? centers = 3 if isinstance(centers, numbers.Integral): # Make a prototype n_centers = centers X, y = sklearn.datasets.make_blobs( n_samples=chunks[0][0], n_features=n_features, centers=centers, shuffle=shuffle, cluster_std=cluster_std, center_box=center_box, random_state=random_state, ) centers = [] centers = np.zeros((n_centers, n_features)) for i in range(n_centers): centers[i] = X[y == i].mean(0) objs = [ dask.delayed(sklearn.datasets.make_blobs, nout=2)( n_samples=n_samples_per_block, n_features=n_features, centers=centers, cluster_std=cluster_std, shuffle=shuffle, center_box=center_box, random_state=i, ) for i, n_samples_per_block in enumerate(chunks[0]) ] Xobjs, yobjs = zip(*objs) Xarrs = [ da.from_delayed(arr, shape=(n, n_features), dtype="f8") for arr, n in zip(Xobjs, chunks[0]) ] X_big = da.vstack(Xarrs) yarrs = [ da.from_delayed(arr, shape=(n,), dtype=np.dtype("int")) for arr, n in zip(yobjs, chunks[0]) ] y_big = da.hstack(yarrs) return X_big, y_big
[ "def", "make_blobs", "(", "n_samples", "=", "100", ",", "n_features", "=", "2", ",", "centers", "=", "None", ",", "cluster_std", "=", "1.0", ",", "center_box", "=", "(", "-", "10.0", ",", "10.0", ")", ",", "shuffle", "=", "True", ",", "random_state", ...
Generate isotropic Gaussian blobs for clustering. This can be used to generate very large Dask arrays on a cluster of machines. When using Dask in distributed mode, the client machine only needs to allocate a single block's worth of data. Parameters ---------- n_samples : int or array-like, optional (default=100) If int, it is the total number of points equally divided among clusters. If array-like, each element of the sequence indicates the number of samples per cluster. n_features : int, optional (default=2) The number of features for each sample. centers : int or array of shape [n_centers, n_features], optional (default=None) The number of centers to generate, or the fixed center locations. If n_samples is an int and centers is None, 3 centers are generated. If n_samples is array-like, centers must be either None or an array of length equal to the length of n_samples. cluster_std : float or sequence of floats, optional (default=1.0) The standard deviation of the clusters. center_box : pair of floats (min, max), optional (default=(-10.0, 10.0)) The bounding box for each cluster center when centers are generated at random. shuffle : boolean, optional (default=True) Shuffle the samples. random_state : int, RandomState instance or None (default) Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. chunks : int, tuple How to chunk the array. Must be one of the following forms: - A blocksize like 1000. - A blockshape like (1000, 1000). - Explicit sizes of all blocks along all dimensions like ((1000, 1000, 500), (400, 400)). Returns ------- X : array of shape [n_samples, n_features] The generated samples. y : array of shape [n_samples] The integer labels for cluster membership of each sample. Examples -------- >>> from dask_ml.datasets import make_blobs >>> X, y = make_blobs(n_samples=100000, chunks=10000) >>> X dask.array<..., shape=(100000, 2), dtype=float64, chunksize=(10000, 2)> >>> y dask.array<concatenate, shape=(100000,), dtype=int64, chunksize=(10000,)> See Also -------- make_classification: a more intricate variant
[ "Generate", "isotropic", "Gaussian", "blobs", "for", "clustering", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/datasets.py#L76-L203
235,119
dask/dask-ml
dask_ml/datasets.py
make_regression
def make_regression( n_samples=100, n_features=100, n_informative=10, n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0, shuffle=True, coef=False, random_state=None, chunks=None, ): """ Generate a random regression problem. The input set can either be well conditioned (by default) or have a low rank-fat tail singular profile. See :func:`sklearn.datasets.make_low_rank_matrix` for more details. This can be used to generate very large Dask arrays on a cluster of machines. When using Dask in distributed mode, the client machine only needs to allocate a single block's worth of data. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=100) The number of features. n_informative : int, optional (default=10) The number of informative features, i.e., the number of features used to build the linear model used to generate the output. n_targets : int, optional (default=1) The number of regression targets, i.e., the dimension of the y output vector associated with a sample. By default, the output is a scalar. bias : float, optional (default=0.0) The bias term in the underlying linear model. effective_rank : int or None, optional (default=None) if not None: The approximate number of singular vectors required to explain most of the input data by linear combinations. Using this kind of singular spectrum in the input allows the generator to reproduce the correlations often observed in practice. if None: The input set is well conditioned, centered and gaussian with unit variance. tail_strength : float between 0.0 and 1.0, optional (default=0.5) The relative importance of the fat noisy tail of the singular values profile if `effective_rank` is not None. noise : float, optional (default=0.0) The standard deviation of the gaussian noise applied to the output. shuffle : boolean, optional (default=True) Shuffle the samples and the features. coef : boolean, optional (default=False) If True, the coefficients of the underlying linear model are returned. random_state : int, RandomState instance or None (default) Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. chunks : int, tuple How to chunk the array. Must be one of the following forms: - A blocksize like 1000. - A blockshape like (1000, 1000). - Explicit sizes of all blocks along all dimensions like ((1000, 1000, 500), (400, 400)). Returns ------- X : Dask array of shape [n_samples, n_features] The input samples. y : Dask array of shape [n_samples] or [n_samples, n_targets] The output values. coef : array of shape [n_features] or [n_features, n_targets], optional The coefficient of the underlying linear model. It is returned only if coef is True. """ chunks = da.core.normalize_chunks(chunks, (n_samples, n_features)) _check_axis_partitioning(chunks, n_features) rng = sklearn.utils.check_random_state(random_state) return_coef = coef is True if chunks[1][0] != n_features: raise ValueError( "Can only generate arrays partitioned along the " "first axis. Specifying a larger chunksize for " "the second axis." ) _, _, coef = sklearn.datasets.make_regression( n_samples=chunks[0][0], n_features=n_features, n_informative=n_informative, n_targets=n_targets, bias=bias, effective_rank=effective_rank, tail_strength=tail_strength, noise=noise, shuffle=shuffle, coef=True, # hardcode here random_state=rng, ) seed = da.random.random_state_data(1, random_state=rng) da_rng = da.random.RandomState(seed[0]) X_big = da_rng.normal(size=(n_samples, n_features), chunks=(chunks[0], n_features)) y_big = da.dot(X_big, coef) + bias if noise > 0: y_big = y_big + da_rng.normal( scale=noise, size=y_big.shape, chunks=y_big.chunks ) y_big = y_big.squeeze() if return_coef: return X_big, y_big, coef else: return X_big, y_big
python
def make_regression( n_samples=100, n_features=100, n_informative=10, n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0, shuffle=True, coef=False, random_state=None, chunks=None, ): chunks = da.core.normalize_chunks(chunks, (n_samples, n_features)) _check_axis_partitioning(chunks, n_features) rng = sklearn.utils.check_random_state(random_state) return_coef = coef is True if chunks[1][0] != n_features: raise ValueError( "Can only generate arrays partitioned along the " "first axis. Specifying a larger chunksize for " "the second axis." ) _, _, coef = sklearn.datasets.make_regression( n_samples=chunks[0][0], n_features=n_features, n_informative=n_informative, n_targets=n_targets, bias=bias, effective_rank=effective_rank, tail_strength=tail_strength, noise=noise, shuffle=shuffle, coef=True, # hardcode here random_state=rng, ) seed = da.random.random_state_data(1, random_state=rng) da_rng = da.random.RandomState(seed[0]) X_big = da_rng.normal(size=(n_samples, n_features), chunks=(chunks[0], n_features)) y_big = da.dot(X_big, coef) + bias if noise > 0: y_big = y_big + da_rng.normal( scale=noise, size=y_big.shape, chunks=y_big.chunks ) y_big = y_big.squeeze() if return_coef: return X_big, y_big, coef else: return X_big, y_big
[ "def", "make_regression", "(", "n_samples", "=", "100", ",", "n_features", "=", "100", ",", "n_informative", "=", "10", ",", "n_targets", "=", "1", ",", "bias", "=", "0.0", ",", "effective_rank", "=", "None", ",", "tail_strength", "=", "0.5", ",", "noise...
Generate a random regression problem. The input set can either be well conditioned (by default) or have a low rank-fat tail singular profile. See :func:`sklearn.datasets.make_low_rank_matrix` for more details. This can be used to generate very large Dask arrays on a cluster of machines. When using Dask in distributed mode, the client machine only needs to allocate a single block's worth of data. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=100) The number of features. n_informative : int, optional (default=10) The number of informative features, i.e., the number of features used to build the linear model used to generate the output. n_targets : int, optional (default=1) The number of regression targets, i.e., the dimension of the y output vector associated with a sample. By default, the output is a scalar. bias : float, optional (default=0.0) The bias term in the underlying linear model. effective_rank : int or None, optional (default=None) if not None: The approximate number of singular vectors required to explain most of the input data by linear combinations. Using this kind of singular spectrum in the input allows the generator to reproduce the correlations often observed in practice. if None: The input set is well conditioned, centered and gaussian with unit variance. tail_strength : float between 0.0 and 1.0, optional (default=0.5) The relative importance of the fat noisy tail of the singular values profile if `effective_rank` is not None. noise : float, optional (default=0.0) The standard deviation of the gaussian noise applied to the output. shuffle : boolean, optional (default=True) Shuffle the samples and the features. coef : boolean, optional (default=False) If True, the coefficients of the underlying linear model are returned. random_state : int, RandomState instance or None (default) Determines random number generation for dataset creation. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. chunks : int, tuple How to chunk the array. Must be one of the following forms: - A blocksize like 1000. - A blockshape like (1000, 1000). - Explicit sizes of all blocks along all dimensions like ((1000, 1000, 500), (400, 400)). Returns ------- X : Dask array of shape [n_samples, n_features] The input samples. y : Dask array of shape [n_samples] or [n_samples, n_targets] The output values. coef : array of shape [n_features] or [n_features, n_targets], optional The coefficient of the underlying linear model. It is returned only if coef is True.
[ "Generate", "a", "random", "regression", "problem", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/datasets.py#L206-L338
235,120
dask/dask-ml
dask_ml/decomposition/truncated_svd.py
TruncatedSVD.fit_transform
def fit_transform(self, X, y=None): """Fit model to X and perform dimensionality reduction on X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : Ignored Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array, of the same type as the input array. If ``X`` was a ``dask.array``, then ``X_new`` will be a ``dask.array`` with the same chunks along the first dimension. """ X = self._check_array(X) if self.algorithm not in {"tsqr", "randomized"}: raise ValueError() if self.algorithm == "tsqr": u, s, v = da.linalg.svd(X) u = u[:, : self.n_components] s = s[: self.n_components] v = v[: self.n_components] else: u, s, v = da.linalg.svd_compressed( X, self.n_components, self.n_iter, seed=self.random_state ) u, v = svd_flip(u, v) X_transformed = u * s explained_var = X_transformed.var(axis=0) full_var = X.var(axis=0).sum() explained_variance_ratio = explained_var / full_var components, ev, evr, sv = compute(v, explained_var, explained_variance_ratio, s) self.components_ = components self.explained_variance_ = ev self.explained_variance_ratio_ = evr self.singular_values_ = sv return X_transformed
python
def fit_transform(self, X, y=None): X = self._check_array(X) if self.algorithm not in {"tsqr", "randomized"}: raise ValueError() if self.algorithm == "tsqr": u, s, v = da.linalg.svd(X) u = u[:, : self.n_components] s = s[: self.n_components] v = v[: self.n_components] else: u, s, v = da.linalg.svd_compressed( X, self.n_components, self.n_iter, seed=self.random_state ) u, v = svd_flip(u, v) X_transformed = u * s explained_var = X_transformed.var(axis=0) full_var = X.var(axis=0).sum() explained_variance_ratio = explained_var / full_var components, ev, evr, sv = compute(v, explained_var, explained_variance_ratio, s) self.components_ = components self.explained_variance_ = ev self.explained_variance_ratio_ = evr self.singular_values_ = sv return X_transformed
[ "def", "fit_transform", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "X", "=", "self", ".", "_check_array", "(", "X", ")", "if", "self", ".", "algorithm", "not", "in", "{", "\"tsqr\"", ",", "\"randomized\"", "}", ":", "raise", "ValueError"...
Fit model to X and perform dimensionality reduction on X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : Ignored Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array, of the same type as the input array. If ``X`` was a ``dask.array``, then ``X_new`` will be a ``dask.array`` with the same chunks along the first dimension.
[ "Fit", "model", "to", "X", "and", "perform", "dimensionality", "reduction", "on", "X", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/decomposition/truncated_svd.py#L144-L186
235,121
dask/dask-ml
dask_ml/compose/_column_transformer.py
ColumnTransformer._hstack
def _hstack(self, Xs): """ Stacks X horizontally. Supports input types (X): list of numpy arrays, sparse arrays and DataFrames """ types = set(type(X) for X in Xs) if self.sparse_output_: return sparse.hstack(Xs).tocsr() elif dd.Series in types or dd.DataFrame in types: with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Concatenating", UserWarning) return dd.concat(Xs, axis="columns") elif da.Array in types: # To allow compatibility with dask core 1.0.0, this is the `else` # part of the definition of the dask.array.hstack inlined. # The `then` branch is removed because _validate_output in # sklearn.compose.ColumnTransformer ensures ndim == 2, so the # check `all(x.ndim == 1 for x in Xs)` should always fail. # # Once dask.array.hstack supports allow_unknown_chunksizes, # changed this to da.hstack(Xs, allow_unknown_chunksizes=True) return da.concatenate(Xs, axis=1, allow_unknown_chunksizes=True) elif self.preserve_dataframe and (pd.Series in types or pd.DataFrame in types): return pd.concat(Xs, axis="columns") else: return np.hstack(Xs)
python
def _hstack(self, Xs): types = set(type(X) for X in Xs) if self.sparse_output_: return sparse.hstack(Xs).tocsr() elif dd.Series in types or dd.DataFrame in types: with warnings.catch_warnings(): warnings.filterwarnings("ignore", "Concatenating", UserWarning) return dd.concat(Xs, axis="columns") elif da.Array in types: # To allow compatibility with dask core 1.0.0, this is the `else` # part of the definition of the dask.array.hstack inlined. # The `then` branch is removed because _validate_output in # sklearn.compose.ColumnTransformer ensures ndim == 2, so the # check `all(x.ndim == 1 for x in Xs)` should always fail. # # Once dask.array.hstack supports allow_unknown_chunksizes, # changed this to da.hstack(Xs, allow_unknown_chunksizes=True) return da.concatenate(Xs, axis=1, allow_unknown_chunksizes=True) elif self.preserve_dataframe and (pd.Series in types or pd.DataFrame in types): return pd.concat(Xs, axis="columns") else: return np.hstack(Xs)
[ "def", "_hstack", "(", "self", ",", "Xs", ")", ":", "types", "=", "set", "(", "type", "(", "X", ")", "for", "X", "in", "Xs", ")", "if", "self", ".", "sparse_output_", ":", "return", "sparse", ".", "hstack", "(", "Xs", ")", ".", "tocsr", "(", ")...
Stacks X horizontally. Supports input types (X): list of numpy arrays, sparse arrays and DataFrames
[ "Stacks", "X", "horizontally", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/compose/_column_transformer.py#L172-L200
235,122
dask/dask-ml
dask_ml/decomposition/pca.py
PCA.transform
def transform(self, X): """Apply dimensionality reduction on X. X is projected on the first principal components previous extracted from a training set. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ check_is_fitted(self, ["mean_", "components_"], all_or_any=all) # X = check_array(X) if self.mean_ is not None: X = X - self.mean_ X_transformed = da.dot(X, self.components_.T) if self.whiten: X_transformed /= np.sqrt(self.explained_variance_) return X_transformed
python
def transform(self, X): check_is_fitted(self, ["mean_", "components_"], all_or_any=all) # X = check_array(X) if self.mean_ is not None: X = X - self.mean_ X_transformed = da.dot(X, self.components_.T) if self.whiten: X_transformed /= np.sqrt(self.explained_variance_) return X_transformed
[ "def", "transform", "(", "self", ",", "X", ")", ":", "check_is_fitted", "(", "self", ",", "[", "\"mean_\"", ",", "\"components_\"", "]", ",", "all_or_any", "=", "all", ")", "# X = check_array(X)", "if", "self", ".", "mean_", "is", "not", "None", ":", "X"...
Apply dimensionality reduction on X. X is projected on the first principal components previous extracted from a training set. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components)
[ "Apply", "dimensionality", "reduction", "on", "X", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/decomposition/pca.py#L319-L344
235,123
dask/dask-ml
dask_ml/decomposition/pca.py
PCA.fit_transform
def fit_transform(self, X, y=None): """Fit the model with X and apply the dimensionality reduction on X. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. y : Ignored Returns ------- X_new : array-like, shape (n_samples, n_components) """ # X = check_array(X) U, S, V = self._fit(X) U = U[:, : self.n_components_] if self.whiten: # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples) U *= np.sqrt(X.shape[0] - 1) else: # X_new = X * V = U * S * V^T * V = U * S U *= S[: self.n_components_] return U
python
def fit_transform(self, X, y=None): # X = check_array(X) U, S, V = self._fit(X) U = U[:, : self.n_components_] if self.whiten: # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples) U *= np.sqrt(X.shape[0] - 1) else: # X_new = X * V = U * S * V^T * V = U * S U *= S[: self.n_components_] return U
[ "def", "fit_transform", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "# X = check_array(X)", "U", ",", "S", ",", "V", "=", "self", ".", "_fit", "(", "X", ")", "U", "=", "U", "[", ":", ",", ":", "self", ".", "n_components_", "]", "if"...
Fit the model with X and apply the dimensionality reduction on X. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. y : Ignored Returns ------- X_new : array-like, shape (n_samples, n_components)
[ "Fit", "the", "model", "with", "X", "and", "apply", "the", "dimensionality", "reduction", "on", "X", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/decomposition/pca.py#L346-L373
235,124
dask/dask-ml
dask_ml/decomposition/pca.py
PCA.inverse_transform
def inverse_transform(self, X): """Transform data back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data, where n_samples in the number of samples and n_components is the number of components. Returns ------- X_original array-like, shape (n_samples, n_features) Notes ----- If whitening is enabled, inverse_transform does not compute the exact inverse operation of transform. """ check_is_fitted(self, "mean_") if self.whiten: return ( da.dot( X, np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_, ) + self.mean_ ) else: return da.dot(X, self.components_) + self.mean_
python
def inverse_transform(self, X): check_is_fitted(self, "mean_") if self.whiten: return ( da.dot( X, np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_, ) + self.mean_ ) else: return da.dot(X, self.components_) + self.mean_
[ "def", "inverse_transform", "(", "self", ",", "X", ")", ":", "check_is_fitted", "(", "self", ",", "\"mean_\"", ")", "if", "self", ".", "whiten", ":", "return", "(", "da", ".", "dot", "(", "X", ",", "np", ".", "sqrt", "(", "self", ".", "explained_vari...
Transform data back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data, where n_samples in the number of samples and n_components is the number of components. Returns ------- X_original array-like, shape (n_samples, n_features) Notes ----- If whitening is enabled, inverse_transform does not compute the exact inverse operation of transform.
[ "Transform", "data", "back", "to", "its", "original", "space", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/decomposition/pca.py#L375-L406
235,125
dask/dask-ml
dask_ml/decomposition/pca.py
PCA.score_samples
def score_samples(self, X): """Return the log-likelihood of each sample. See. "Pattern Recognition and Machine Learning" by C. Bishop, 12.2.1 p. 574 or http://www.miketipping.com/papers/met-mppca.pdf Parameters ---------- X : array, shape(n_samples, n_features) The data. Returns ------- ll : array, shape (n_samples,) Log-likelihood of each sample under the current model """ check_is_fitted(self, "mean_") # X = check_array(X) Xr = X - self.mean_ n_features = X.shape[1] precision = self.get_precision() # [n_features, n_features] log_like = -0.5 * (Xr * (da.dot(Xr, precision))).sum(axis=1) log_like -= 0.5 * (n_features * da.log(2.0 * np.pi) - fast_logdet(precision)) return log_like
python
def score_samples(self, X): check_is_fitted(self, "mean_") # X = check_array(X) Xr = X - self.mean_ n_features = X.shape[1] precision = self.get_precision() # [n_features, n_features] log_like = -0.5 * (Xr * (da.dot(Xr, precision))).sum(axis=1) log_like -= 0.5 * (n_features * da.log(2.0 * np.pi) - fast_logdet(precision)) return log_like
[ "def", "score_samples", "(", "self", ",", "X", ")", ":", "check_is_fitted", "(", "self", ",", "\"mean_\"", ")", "# X = check_array(X)", "Xr", "=", "X", "-", "self", ".", "mean_", "n_features", "=", "X", ".", "shape", "[", "1", "]", "precision", "=", "s...
Return the log-likelihood of each sample. See. "Pattern Recognition and Machine Learning" by C. Bishop, 12.2.1 p. 574 or http://www.miketipping.com/papers/met-mppca.pdf Parameters ---------- X : array, shape(n_samples, n_features) The data. Returns ------- ll : array, shape (n_samples,) Log-likelihood of each sample under the current model
[ "Return", "the", "log", "-", "likelihood", "of", "each", "sample", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/decomposition/pca.py#L408-L433
235,126
dask/dask-ml
dask_ml/utils.py
assert_estimator_equal
def assert_estimator_equal(left, right, exclude=None, **kwargs): """Check that two Estimators are equal Parameters ---------- left, right : Estimators exclude : str or sequence of str attributes to skip in the check kwargs : dict Passed through to the dask `assert_eq` method. """ left_attrs = [x for x in dir(left) if x.endswith("_") and not x.startswith("_")] right_attrs = [x for x in dir(right) if x.endswith("_") and not x.startswith("_")] if exclude is None: exclude = set() elif isinstance(exclude, str): exclude = {exclude} else: exclude = set(exclude) assert (set(left_attrs) - exclude) == set(right_attrs) - exclude for attr in set(left_attrs) - exclude: l = getattr(left, attr) r = getattr(right, attr) _assert_eq(l, r, **kwargs)
python
def assert_estimator_equal(left, right, exclude=None, **kwargs): left_attrs = [x for x in dir(left) if x.endswith("_") and not x.startswith("_")] right_attrs = [x for x in dir(right) if x.endswith("_") and not x.startswith("_")] if exclude is None: exclude = set() elif isinstance(exclude, str): exclude = {exclude} else: exclude = set(exclude) assert (set(left_attrs) - exclude) == set(right_attrs) - exclude for attr in set(left_attrs) - exclude: l = getattr(left, attr) r = getattr(right, attr) _assert_eq(l, r, **kwargs)
[ "def", "assert_estimator_equal", "(", "left", ",", "right", ",", "exclude", "=", "None", ",", "*", "*", "kwargs", ")", ":", "left_attrs", "=", "[", "x", "for", "x", "in", "dir", "(", "left", ")", "if", "x", ".", "endswith", "(", "\"_\"", ")", "and"...
Check that two Estimators are equal Parameters ---------- left, right : Estimators exclude : str or sequence of str attributes to skip in the check kwargs : dict Passed through to the dask `assert_eq` method.
[ "Check", "that", "two", "Estimators", "are", "equal" ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/utils.py#L58-L84
235,127
dask/dask-ml
dask_ml/utils.py
check_matching_blocks
def check_matching_blocks(*arrays): """Check that the partitioning structure for many arrays matches. Parameters ---------- *arrays : Sequence of array-likes This includes * Dask Array * Dask DataFrame * Dask Series """ if len(arrays) <= 1: return if all(isinstance(x, da.Array) for x in arrays): # TODO: unknown chunks, ensure blocks match, or just raise (configurable) chunks = arrays[0].chunks for array in arrays[1:]: if array.chunks != chunks: raise ValueError( "Mismatched chunks. {} != {}".format(chunks, array.chunks) ) elif all(isinstance(x, (dd.Series, dd.DataFrame)) for x in arrays): divisions = arrays[0].divisions for array in arrays[1:]: if array.divisions != divisions: raise ValueError( "Mismatched divisions. {} != {}".format(divisions, array.divisions) ) else: raise ValueError("Unexpected types {}.".format({type(x) for x in arrays}))
python
def check_matching_blocks(*arrays): if len(arrays) <= 1: return if all(isinstance(x, da.Array) for x in arrays): # TODO: unknown chunks, ensure blocks match, or just raise (configurable) chunks = arrays[0].chunks for array in arrays[1:]: if array.chunks != chunks: raise ValueError( "Mismatched chunks. {} != {}".format(chunks, array.chunks) ) elif all(isinstance(x, (dd.Series, dd.DataFrame)) for x in arrays): divisions = arrays[0].divisions for array in arrays[1:]: if array.divisions != divisions: raise ValueError( "Mismatched divisions. {} != {}".format(divisions, array.divisions) ) else: raise ValueError("Unexpected types {}.".format({type(x) for x in arrays}))
[ "def", "check_matching_blocks", "(", "*", "arrays", ")", ":", "if", "len", "(", "arrays", ")", "<=", "1", ":", "return", "if", "all", "(", "isinstance", "(", "x", ",", "da", ".", "Array", ")", "for", "x", "in", "arrays", ")", ":", "# TODO: unknown ch...
Check that the partitioning structure for many arrays matches. Parameters ---------- *arrays : Sequence of array-likes This includes * Dask Array * Dask DataFrame * Dask Series
[ "Check", "that", "the", "partitioning", "structure", "for", "many", "arrays", "matches", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/utils.py#L190-L221
235,128
dask/dask-ml
dask_ml/utils.py
check_chunks
def check_chunks(n_samples, n_features, chunks=None): """Validate and normalize the chunks argument for a dask.array Parameters ---------- n_samples, n_features : int Give the shape of the array chunks : int, sequence, optional, default None * For 'chunks=None', this picks a "good" default number of chunks based on the number of CPU cores. The default results in a block structure with one block per core along the first dimension (of roughly equal lengths) and a single block along the second dimension. This may or may not be appropriate for your use-case. The chunk size will be at least 100 along the first dimension. * When chunks is an int, we split the ``n_samples`` into ``chunks`` blocks along the first dimension, and a single block along the second. Again, the chunksize will be at least 100 along the first dimension. * When chunks is a sequence, we validate that it's length two and turn it into a tuple. Returns ------- chunks : tuple """ if chunks is None: chunks = (max(100, n_samples // cpu_count()), n_features) elif isinstance(chunks, Integral): chunks = (max(100, n_samples // chunks), n_features) elif isinstance(chunks, Sequence): chunks = tuple(chunks) if len(chunks) != 2: raise AssertionError("Chunks should be a 2-tuple.") else: raise ValueError("Unknown type of chunks: '{}'".format(type(chunks))) return chunks
python
def check_chunks(n_samples, n_features, chunks=None): if chunks is None: chunks = (max(100, n_samples // cpu_count()), n_features) elif isinstance(chunks, Integral): chunks = (max(100, n_samples // chunks), n_features) elif isinstance(chunks, Sequence): chunks = tuple(chunks) if len(chunks) != 2: raise AssertionError("Chunks should be a 2-tuple.") else: raise ValueError("Unknown type of chunks: '{}'".format(type(chunks))) return chunks
[ "def", "check_chunks", "(", "n_samples", ",", "n_features", ",", "chunks", "=", "None", ")", ":", "if", "chunks", "is", "None", ":", "chunks", "=", "(", "max", "(", "100", ",", "n_samples", "//", "cpu_count", "(", ")", ")", ",", "n_features", ")", "e...
Validate and normalize the chunks argument for a dask.array Parameters ---------- n_samples, n_features : int Give the shape of the array chunks : int, sequence, optional, default None * For 'chunks=None', this picks a "good" default number of chunks based on the number of CPU cores. The default results in a block structure with one block per core along the first dimension (of roughly equal lengths) and a single block along the second dimension. This may or may not be appropriate for your use-case. The chunk size will be at least 100 along the first dimension. * When chunks is an int, we split the ``n_samples`` into ``chunks`` blocks along the first dimension, and a single block along the second. Again, the chunksize will be at least 100 along the first dimension. * When chunks is a sequence, we validate that it's length two and turn it into a tuple. Returns ------- chunks : tuple
[ "Validate", "and", "normalize", "the", "chunks", "argument", "for", "a", "dask", ".", "array" ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/utils.py#L224-L261
235,129
dask/dask-ml
dask_ml/metrics/scorer.py
get_scorer
def get_scorer(scoring, compute=True): """Get a scorer from string Parameters ---------- scoring : str | callable scoring method as string. If callable it is returned as is. Returns ------- scorer : callable The scorer. """ # This is the same as sklearns, only we use our SCORERS dict, # and don't have back-compat code if isinstance(scoring, six.string_types): try: scorer, kwargs = SCORERS[scoring] except KeyError: raise ValueError( "{} is not a valid scoring value. " "Valid options are {}".format(scoring, sorted(SCORERS)) ) else: scorer = scoring kwargs = {} kwargs["compute"] = compute return make_scorer(scorer, **kwargs)
python
def get_scorer(scoring, compute=True): # This is the same as sklearns, only we use our SCORERS dict, # and don't have back-compat code if isinstance(scoring, six.string_types): try: scorer, kwargs = SCORERS[scoring] except KeyError: raise ValueError( "{} is not a valid scoring value. " "Valid options are {}".format(scoring, sorted(SCORERS)) ) else: scorer = scoring kwargs = {} kwargs["compute"] = compute return make_scorer(scorer, **kwargs)
[ "def", "get_scorer", "(", "scoring", ",", "compute", "=", "True", ")", ":", "# This is the same as sklearns, only we use our SCORERS dict,", "# and don't have back-compat code", "if", "isinstance", "(", "scoring", ",", "six", ".", "string_types", ")", ":", "try", ":", ...
Get a scorer from string Parameters ---------- scoring : str | callable scoring method as string. If callable it is returned as is. Returns ------- scorer : callable The scorer.
[ "Get", "a", "scorer", "from", "string" ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/metrics/scorer.py#L22-L51
235,130
dask/dask-ml
dask_ml/model_selection/utils.py
to_indexable
def to_indexable(*args, **kwargs): """Ensure that all args are an indexable type. Conversion runs lazily for dask objects, immediately otherwise. Parameters ---------- args : array_like or scalar allow_scalars : bool, optional Whether to allow scalars in args. Default is False. """ if kwargs.get("allow_scalars", False): indexable = _maybe_indexable else: indexable = _indexable for x in args: if x is None or isinstance(x, (da.Array, dd.DataFrame)): yield x elif is_dask_collection(x): yield delayed(indexable, pure=True)(x) else: yield indexable(x)
python
def to_indexable(*args, **kwargs): if kwargs.get("allow_scalars", False): indexable = _maybe_indexable else: indexable = _indexable for x in args: if x is None or isinstance(x, (da.Array, dd.DataFrame)): yield x elif is_dask_collection(x): yield delayed(indexable, pure=True)(x) else: yield indexable(x)
[ "def", "to_indexable", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ".", "get", "(", "\"allow_scalars\"", ",", "False", ")", ":", "indexable", "=", "_maybe_indexable", "else", ":", "indexable", "=", "_indexable", "for", "x", "in",...
Ensure that all args are an indexable type. Conversion runs lazily for dask objects, immediately otherwise. Parameters ---------- args : array_like or scalar allow_scalars : bool, optional Whether to allow scalars in args. Default is False.
[ "Ensure", "that", "all", "args", "are", "an", "indexable", "type", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/utils.py#L33-L54
235,131
dask/dask-ml
dask_ml/model_selection/utils.py
_index_param_value
def _index_param_value(num_samples, v, indices): """Private helper function for parameter value indexing. This determines whether a fit parameter `v` to a SearchCV.fit should be indexed along with `X` and `y`. Note that this differs from the scikit-learn version. They pass `X` and compute num_samples. We pass `num_samples` instead. """ if not _is_arraylike(v) or _num_samples(v) != num_samples: # pass through: skip indexing return v if sp.issparse(v): v = v.tocsr() return safe_indexing(v, indices)
python
def _index_param_value(num_samples, v, indices): if not _is_arraylike(v) or _num_samples(v) != num_samples: # pass through: skip indexing return v if sp.issparse(v): v = v.tocsr() return safe_indexing(v, indices)
[ "def", "_index_param_value", "(", "num_samples", ",", "v", ",", "indices", ")", ":", "if", "not", "_is_arraylike", "(", "v", ")", "or", "_num_samples", "(", "v", ")", "!=", "num_samples", ":", "# pass through: skip indexing", "return", "v", "if", "sp", ".", ...
Private helper function for parameter value indexing. This determines whether a fit parameter `v` to a SearchCV.fit should be indexed along with `X` and `y`. Note that this differs from the scikit-learn version. They pass `X` and compute num_samples. We pass `num_samples` instead.
[ "Private", "helper", "function", "for", "parameter", "value", "indexing", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/utils.py#L57-L70
235,132
dask/dask-ml
dask_ml/model_selection/utils.py
DeprecationDict.add_warning
def add_warning(self, key, *args, **kwargs): """Add a warning to be triggered when the specified key is read Parameters ---------- key : any hashable object The key """ self._deprecations[key] = (args, kwargs)
python
def add_warning(self, key, *args, **kwargs): self._deprecations[key] = (args, kwargs)
[ "def", "add_warning", "(", "self", ",", "key", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_deprecations", "[", "key", "]", "=", "(", "args", ",", "kwargs", ")" ]
Add a warning to be triggered when the specified key is read Parameters ---------- key : any hashable object The key
[ "Add", "a", "warning", "to", "be", "triggered", "when", "the", "specified", "key", "is", "read" ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/utils.py#L141-L149
235,133
dask/dask-ml
dask_ml/preprocessing/label.py
_construct
def _construct(x, categories): """Make a sparse matrix from an encoded array. >>> construct(np.array([0, 1, 0]), np.array([0, 1])).toarray() array([[1., 0.], [0., 1.], [1., 0.]]) """ # type: (np.ndarray, np.ndarray) -> scipy.sparse.csr_matrix data = np.ones(len(x)) rows = np.arange(len(x)) columns = x.ravel() return scipy.sparse.csr_matrix( (data, (rows, columns)), shape=(len(x), len(categories)) )
python
def _construct(x, categories): # type: (np.ndarray, np.ndarray) -> scipy.sparse.csr_matrix data = np.ones(len(x)) rows = np.arange(len(x)) columns = x.ravel() return scipy.sparse.csr_matrix( (data, (rows, columns)), shape=(len(x), len(categories)) )
[ "def", "_construct", "(", "x", ",", "categories", ")", ":", "# type: (np.ndarray, np.ndarray) -> scipy.sparse.csr_matrix", "data", "=", "np", ".", "ones", "(", "len", "(", "x", ")", ")", "rows", "=", "np", ".", "arange", "(", "len", "(", "x", ")", ")", "...
Make a sparse matrix from an encoded array. >>> construct(np.array([0, 1, 0]), np.array([0, 1])).toarray() array([[1., 0.], [0., 1.], [1., 0.]])
[ "Make", "a", "sparse", "matrix", "from", "an", "encoded", "array", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/label.py#L224-L238
235,134
dask/dask-ml
dask_ml/preprocessing/label.py
_encode_dask_array
def _encode_dask_array(values, uniques=None, encode=False, onehot_dtype=None): """One-hot or label encode a dask array. Parameters ---------- values : da.Array, shape [n_samples,] unqiques : np.ndarray, shape [n_uniques,] encode : bool, default False Whether to encode the values (True) or just discover the uniques. onehot_dtype : np.dtype, optional Optional dtype for the resulting one-hot encoded array. This changes the shape, dtype, and underlying storage of the returned dask array. ======= ================= ========================= thing onehot_dtype=None onehot_dtype=onehot_dtype ======= ================= ========================= shape (n_samples,) (n_samples, len(uniques)) dtype np.intp onehot_dtype storage np.ndarray scipy.sparse.csr_matrix ======= ================= ========================= Returns ------- uniques : ndarray The discovered uniques (uniques=None) or just `uniques` encoded : da.Array, optional The encoded values. Only returend when ``encode=True``. """ if uniques is None: if encode and onehot_dtype: raise ValueError("Cannot use 'encode` and 'onehot_dtype' simultaneously.") if encode: uniques, encoded = da.unique(values, return_inverse=True) return uniques, encoded else: return da.unique(values) if encode: if onehot_dtype: dtype = onehot_dtype new_axis = 1 chunks = values.chunks + (len(uniques),) else: dtype = np.dtype("int") new_axis = None chunks = values.chunks return ( uniques, values.map_blocks( _check_and_search_block, uniques, onehot_dtype=onehot_dtype, dtype=dtype, new_axis=new_axis, chunks=chunks, ), ) else: return uniques
python
def _encode_dask_array(values, uniques=None, encode=False, onehot_dtype=None): if uniques is None: if encode and onehot_dtype: raise ValueError("Cannot use 'encode` and 'onehot_dtype' simultaneously.") if encode: uniques, encoded = da.unique(values, return_inverse=True) return uniques, encoded else: return da.unique(values) if encode: if onehot_dtype: dtype = onehot_dtype new_axis = 1 chunks = values.chunks + (len(uniques),) else: dtype = np.dtype("int") new_axis = None chunks = values.chunks return ( uniques, values.map_blocks( _check_and_search_block, uniques, onehot_dtype=onehot_dtype, dtype=dtype, new_axis=new_axis, chunks=chunks, ), ) else: return uniques
[ "def", "_encode_dask_array", "(", "values", ",", "uniques", "=", "None", ",", "encode", "=", "False", ",", "onehot_dtype", "=", "None", ")", ":", "if", "uniques", "is", "None", ":", "if", "encode", "and", "onehot_dtype", ":", "raise", "ValueError", "(", ...
One-hot or label encode a dask array. Parameters ---------- values : da.Array, shape [n_samples,] unqiques : np.ndarray, shape [n_uniques,] encode : bool, default False Whether to encode the values (True) or just discover the uniques. onehot_dtype : np.dtype, optional Optional dtype for the resulting one-hot encoded array. This changes the shape, dtype, and underlying storage of the returned dask array. ======= ================= ========================= thing onehot_dtype=None onehot_dtype=onehot_dtype ======= ================= ========================= shape (n_samples,) (n_samples, len(uniques)) dtype np.intp onehot_dtype storage np.ndarray scipy.sparse.csr_matrix ======= ================= ========================= Returns ------- uniques : ndarray The discovered uniques (uniques=None) or just `uniques` encoded : da.Array, optional The encoded values. Only returend when ``encode=True``.
[ "One", "-", "hot", "or", "label", "encode", "a", "dask", "array", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/preprocessing/label.py#L241-L301
235,135
dask/dask-ml
dask_ml/model_selection/methods.py
pipeline
def pipeline(names, steps): """Reconstruct a Pipeline from names and steps""" steps, times = zip(*map(_maybe_timed, steps)) fit_time = sum(times) if any(s is FIT_FAILURE for s in steps): fit_est = FIT_FAILURE else: fit_est = Pipeline(list(zip(names, steps))) return fit_est, fit_time
python
def pipeline(names, steps): steps, times = zip(*map(_maybe_timed, steps)) fit_time = sum(times) if any(s is FIT_FAILURE for s in steps): fit_est = FIT_FAILURE else: fit_est = Pipeline(list(zip(names, steps))) return fit_est, fit_time
[ "def", "pipeline", "(", "names", ",", "steps", ")", ":", "steps", ",", "times", "=", "zip", "(", "*", "map", "(", "_maybe_timed", ",", "steps", ")", ")", "fit_time", "=", "sum", "(", "times", ")", "if", "any", "(", "s", "is", "FIT_FAILURE", "for", ...
Reconstruct a Pipeline from names and steps
[ "Reconstruct", "a", "Pipeline", "from", "names", "and", "steps" ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/methods.py#L183-L191
235,136
dask/dask-ml
dask_ml/model_selection/methods.py
feature_union
def feature_union(names, steps, weights): """Reconstruct a FeatureUnion from names, steps, and weights""" steps, times = zip(*map(_maybe_timed, steps)) fit_time = sum(times) if any(s is FIT_FAILURE for s in steps): fit_est = FIT_FAILURE else: fit_est = FeatureUnion(list(zip(names, steps)), transformer_weights=weights) return fit_est, fit_time
python
def feature_union(names, steps, weights): steps, times = zip(*map(_maybe_timed, steps)) fit_time = sum(times) if any(s is FIT_FAILURE for s in steps): fit_est = FIT_FAILURE else: fit_est = FeatureUnion(list(zip(names, steps)), transformer_weights=weights) return fit_est, fit_time
[ "def", "feature_union", "(", "names", ",", "steps", ",", "weights", ")", ":", "steps", ",", "times", "=", "zip", "(", "*", "map", "(", "_maybe_timed", ",", "steps", ")", ")", "fit_time", "=", "sum", "(", "times", ")", "if", "any", "(", "s", "is", ...
Reconstruct a FeatureUnion from names, steps, and weights
[ "Reconstruct", "a", "FeatureUnion", "from", "names", "steps", "and", "weights" ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/methods.py#L194-L202
235,137
dask/dask-ml
dask_ml/model_selection/methods.py
feature_union_concat
def feature_union_concat(Xs, nsamples, weights): """Apply weights and concatenate outputs from a FeatureUnion""" if any(x is FIT_FAILURE for x in Xs): return FIT_FAILURE Xs = [X if w is None else X * w for X, w in zip(Xs, weights) if X is not None] if not Xs: return np.zeros((nsamples, 0)) if any(sparse.issparse(f) for f in Xs): return sparse.hstack(Xs).tocsr() return np.hstack(Xs)
python
def feature_union_concat(Xs, nsamples, weights): if any(x is FIT_FAILURE for x in Xs): return FIT_FAILURE Xs = [X if w is None else X * w for X, w in zip(Xs, weights) if X is not None] if not Xs: return np.zeros((nsamples, 0)) if any(sparse.issparse(f) for f in Xs): return sparse.hstack(Xs).tocsr() return np.hstack(Xs)
[ "def", "feature_union_concat", "(", "Xs", ",", "nsamples", ",", "weights", ")", ":", "if", "any", "(", "x", "is", "FIT_FAILURE", "for", "x", "in", "Xs", ")", ":", "return", "FIT_FAILURE", "Xs", "=", "[", "X", "if", "w", "is", "None", "else", "X", "...
Apply weights and concatenate outputs from a FeatureUnion
[ "Apply", "weights", "and", "concatenate", "outputs", "from", "a", "FeatureUnion" ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/methods.py#L205-L214
235,138
dask/dask-ml
dask_ml/model_selection/_split.py
_generate_idx
def _generate_idx(n, seed, n_train, n_test): """Generate train, test indices for a length-n array. Parameters ---------- n : int The length of the array seed : int Seed for a RandomState n_train, n_test : int, 0 < n_train, n_test < n Number of samples to use for the train or test index. Notes ----- """ idx = check_random_state(seed).permutation(n) ind_test = idx[:n_test] ind_train = idx[n_test : n_train + n_test] return ind_train, ind_test
python
def _generate_idx(n, seed, n_train, n_test): idx = check_random_state(seed).permutation(n) ind_test = idx[:n_test] ind_train = idx[n_test : n_train + n_test] return ind_train, ind_test
[ "def", "_generate_idx", "(", "n", ",", "seed", ",", "n_train", ",", "n_test", ")", ":", "idx", "=", "check_random_state", "(", "seed", ")", ".", "permutation", "(", "n", ")", "ind_test", "=", "idx", "[", ":", "n_test", "]", "ind_train", "=", "idx", "...
Generate train, test indices for a length-n array. Parameters ---------- n : int The length of the array seed : int Seed for a RandomState n_train, n_test : int, 0 < n_train, n_test < n Number of samples to use for the train or test index. Notes -----
[ "Generate", "train", "test", "indices", "for", "a", "length", "-", "n", "array", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/_split.py#L67-L87
235,139
dask/dask-ml
dask_ml/model_selection/_split.py
_blockwise_slice
def _blockwise_slice(arr, idx): """Slice an array that is blockwise-aligned with idx. Parameters ---------- arr : Dask array idx : Dask array Should have the following properties * Same blocks as `arr` along the first dimension * Contains only integers * Each block's values should be between ``[0, len(block))`` Returns ------- sliced : dask.Array """ objs = [] offsets = np.hstack([0, np.cumsum(arr.chunks[0])[:-1]]) for i, (x, idx2) in enumerate( zip(arr.to_delayed().ravel(), idx.to_delayed().ravel()) ): idx3 = idx2 - offsets[i] objs.append(x[idx3]) shapes = idx.chunks[0] if arr.ndim == 2: P = arr.shape[1] shapes = [(x, P) for x in shapes] else: shapes = [(x,) for x in shapes] sliced = da.concatenate( [ da.from_delayed(x, shape=shape, dtype=arr.dtype) for x, shape in zip(objs, shapes) ] ) return sliced
python
def _blockwise_slice(arr, idx): objs = [] offsets = np.hstack([0, np.cumsum(arr.chunks[0])[:-1]]) for i, (x, idx2) in enumerate( zip(arr.to_delayed().ravel(), idx.to_delayed().ravel()) ): idx3 = idx2 - offsets[i] objs.append(x[idx3]) shapes = idx.chunks[0] if arr.ndim == 2: P = arr.shape[1] shapes = [(x, P) for x in shapes] else: shapes = [(x,) for x in shapes] sliced = da.concatenate( [ da.from_delayed(x, shape=shape, dtype=arr.dtype) for x, shape in zip(objs, shapes) ] ) return sliced
[ "def", "_blockwise_slice", "(", "arr", ",", "idx", ")", ":", "objs", "=", "[", "]", "offsets", "=", "np", ".", "hstack", "(", "[", "0", ",", "np", ".", "cumsum", "(", "arr", ".", "chunks", "[", "0", "]", ")", "[", ":", "-", "1", "]", "]", "...
Slice an array that is blockwise-aligned with idx. Parameters ---------- arr : Dask array idx : Dask array Should have the following properties * Same blocks as `arr` along the first dimension * Contains only integers * Each block's values should be between ``[0, len(block))`` Returns ------- sliced : dask.Array
[ "Slice", "an", "array", "that", "is", "blockwise", "-", "aligned", "with", "idx", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/_split.py#L317-L356
235,140
dask/dask-ml
dask_ml/feature_extraction/text.py
HashingVectorizer.transform
def transform(self, X): """Transform a sequence of documents to a document-term matrix. Transformation is done in parallel, and correctly handles dask collections. Parameters ---------- X : dask.Bag of raw text documents, length = n_samples Samples. Each sample must be a text document (either bytes or unicode strings, file name or file object depending on the constructor argument) which will be tokenized and hashed. Returns ------- X : dask.array.Array, shape = (n_samples, self.n_features) Document-term matrix. Each block of the array is a scipy sparse matrix. Notes ----- The returned dask Array is composed scipy sparse matricies. If you need to compute on the result immediately, you may need to convert the individual blocks to ndarrays or pydata/sparse matricies. >>> import sparse >>> X.map_blocks(sparse.COO.from_scipy_sparse, dtype=X.dtype) # doctest: +SKIP See the :doc:`examples/text-vectorization` for more. """ msg = "'X' should be a 1-dimensional array with length 'num_samples'." if not dask.is_dask_collection(X): return super(HashingVectorizer, self).transform(X) if isinstance(X, db.Bag): bag2 = X.map_partitions(_transform, estimator=self) objs = bag2.to_delayed() arrs = [ da.from_delayed(obj, (np.nan, self.n_features), self.dtype) for obj in objs ] result = da.concatenate(arrs, axis=0) elif isinstance(X, dd.Series): result = X.map_partitions(_transform, self) elif isinstance(X, da.Array): # dask.Array chunks = ((np.nan,) * X.numblocks[0], (self.n_features,)) if X.ndim == 1: result = X.map_blocks( _transform, estimator=self, dtype="f8", chunks=chunks, new_axis=1 ) else: raise ValueError(msg) else: raise ValueError(msg) return result
python
def transform(self, X): msg = "'X' should be a 1-dimensional array with length 'num_samples'." if not dask.is_dask_collection(X): return super(HashingVectorizer, self).transform(X) if isinstance(X, db.Bag): bag2 = X.map_partitions(_transform, estimator=self) objs = bag2.to_delayed() arrs = [ da.from_delayed(obj, (np.nan, self.n_features), self.dtype) for obj in objs ] result = da.concatenate(arrs, axis=0) elif isinstance(X, dd.Series): result = X.map_partitions(_transform, self) elif isinstance(X, da.Array): # dask.Array chunks = ((np.nan,) * X.numblocks[0], (self.n_features,)) if X.ndim == 1: result = X.map_blocks( _transform, estimator=self, dtype="f8", chunks=chunks, new_axis=1 ) else: raise ValueError(msg) else: raise ValueError(msg) return result
[ "def", "transform", "(", "self", ",", "X", ")", ":", "msg", "=", "\"'X' should be a 1-dimensional array with length 'num_samples'.\"", "if", "not", "dask", ".", "is_dask_collection", "(", "X", ")", ":", "return", "super", "(", "HashingVectorizer", ",", "self", ")"...
Transform a sequence of documents to a document-term matrix. Transformation is done in parallel, and correctly handles dask collections. Parameters ---------- X : dask.Bag of raw text documents, length = n_samples Samples. Each sample must be a text document (either bytes or unicode strings, file name or file object depending on the constructor argument) which will be tokenized and hashed. Returns ------- X : dask.array.Array, shape = (n_samples, self.n_features) Document-term matrix. Each block of the array is a scipy sparse matrix. Notes ----- The returned dask Array is composed scipy sparse matricies. If you need to compute on the result immediately, you may need to convert the individual blocks to ndarrays or pydata/sparse matricies. >>> import sparse >>> X.map_blocks(sparse.COO.from_scipy_sparse, dtype=X.dtype) # doctest: +SKIP See the :doc:`examples/text-vectorization` for more.
[ "Transform", "a", "sequence", "of", "documents", "to", "a", "document", "-", "term", "matrix", "." ]
cc4837c2c2101f9302cac38354b55754263cd1f3
https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/feature_extraction/text.py#L10-L67
235,141
MIT-LCP/wfdb-python
wfdb/processing/peaks.py
correct_peaks
def correct_peaks(sig, peak_inds, search_radius, smooth_window_size, peak_dir='compare'): """ Adjust a set of detected peaks to coincide with local signal maxima, and Parameters ---------- sig : numpy array The 1d signal array peak_inds : np array Array of the original peak indices max_gap : int The radius within which the original peaks may be shifted. smooth_window_size : int The window size of the moving average filter applied on the signal. Peak distance is calculated on the difference between the original and smoothed signal. peak_dir : str, optional The expected peak direction: 'up' or 'down', 'both', or 'compare'. - If 'up', the peaks will be shifted to local maxima - If 'down', the peaks will be shifted to local minima - If 'both', the peaks will be shifted to local maxima of the rectified signal - If 'compare', the function will try both 'up' and 'down' options, and choose the direction that gives the largest mean distance from the smoothed signal. Returns ------- corrected_peak_inds : numpy array Array of the corrected peak indices Examples -------- """ sig_len = sig.shape[0] n_peaks = len(peak_inds) # Subtract the smoothed signal from the original sig = sig - smooth(sig=sig, window_size=smooth_window_size) # Shift peaks to local maxima if peak_dir == 'up': shifted_peak_inds = shift_peaks(sig=sig, peak_inds=peak_inds, search_radius=search_radius, peak_up=True) elif peak_dir == 'down': shifted_peak_inds = shift_peaks(sig=sig, peak_inds=peak_inds, search_radius=search_radius, peak_up=False) elif peak_dir == 'both': shifted_peak_inds = shift_peaks(sig=np.abs(sig), peak_inds=peak_inds, search_radius=search_radius, peak_up=True) else: shifted_peak_inds_up = shift_peaks(sig=sig, peak_inds=peak_inds, search_radius=search_radius, peak_up=True) shifted_peak_inds_down = shift_peaks(sig=sig, peak_inds=peak_inds, search_radius=search_radius, peak_up=False) # Choose the direction with the biggest deviation up_dist = np.mean(np.abs(sig[shifted_peak_inds_up])) down_dist = np.mean(np.abs(sig[shifted_peak_inds_down])) if up_dist >= down_dist: shifted_peak_inds = shifted_peak_inds_up else: shifted_peak_inds = shifted_peak_inds_down return shifted_peak_inds
python
def correct_peaks(sig, peak_inds, search_radius, smooth_window_size, peak_dir='compare'): sig_len = sig.shape[0] n_peaks = len(peak_inds) # Subtract the smoothed signal from the original sig = sig - smooth(sig=sig, window_size=smooth_window_size) # Shift peaks to local maxima if peak_dir == 'up': shifted_peak_inds = shift_peaks(sig=sig, peak_inds=peak_inds, search_radius=search_radius, peak_up=True) elif peak_dir == 'down': shifted_peak_inds = shift_peaks(sig=sig, peak_inds=peak_inds, search_radius=search_radius, peak_up=False) elif peak_dir == 'both': shifted_peak_inds = shift_peaks(sig=np.abs(sig), peak_inds=peak_inds, search_radius=search_radius, peak_up=True) else: shifted_peak_inds_up = shift_peaks(sig=sig, peak_inds=peak_inds, search_radius=search_radius, peak_up=True) shifted_peak_inds_down = shift_peaks(sig=sig, peak_inds=peak_inds, search_radius=search_radius, peak_up=False) # Choose the direction with the biggest deviation up_dist = np.mean(np.abs(sig[shifted_peak_inds_up])) down_dist = np.mean(np.abs(sig[shifted_peak_inds_down])) if up_dist >= down_dist: shifted_peak_inds = shifted_peak_inds_up else: shifted_peak_inds = shifted_peak_inds_down return shifted_peak_inds
[ "def", "correct_peaks", "(", "sig", ",", "peak_inds", ",", "search_radius", ",", "smooth_window_size", ",", "peak_dir", "=", "'compare'", ")", ":", "sig_len", "=", "sig", ".", "shape", "[", "0", "]", "n_peaks", "=", "len", "(", "peak_inds", ")", "# Subtrac...
Adjust a set of detected peaks to coincide with local signal maxima, and Parameters ---------- sig : numpy array The 1d signal array peak_inds : np array Array of the original peak indices max_gap : int The radius within which the original peaks may be shifted. smooth_window_size : int The window size of the moving average filter applied on the signal. Peak distance is calculated on the difference between the original and smoothed signal. peak_dir : str, optional The expected peak direction: 'up' or 'down', 'both', or 'compare'. - If 'up', the peaks will be shifted to local maxima - If 'down', the peaks will be shifted to local minima - If 'both', the peaks will be shifted to local maxima of the rectified signal - If 'compare', the function will try both 'up' and 'down' options, and choose the direction that gives the largest mean distance from the smoothed signal. Returns ------- corrected_peak_inds : numpy array Array of the corrected peak indices Examples --------
[ "Adjust", "a", "set", "of", "detected", "peaks", "to", "coincide", "with", "local", "signal", "maxima", "and" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/processing/peaks.py#L106-L188
235,142
MIT-LCP/wfdb-python
wfdb/processing/peaks.py
shift_peaks
def shift_peaks(sig, peak_inds, search_radius, peak_up): """ Helper function for correct_peaks. Return the shifted peaks to local maxima or minima within a radius. peak_up : bool Whether the expected peak direction is up """ sig_len = sig.shape[0] n_peaks = len(peak_inds) # The indices to shift each peak ind by shift_inds = np.zeros(n_peaks, dtype='int') # Iterate through peaks for i in range(n_peaks): ind = peak_inds[i] local_sig = sig[max(0, ind - search_radius):min(ind + search_radius, sig_len-1)] if peak_up: shift_inds[i] = np.argmax(local_sig) else: shift_inds[i] = np.argmin(local_sig) # May have to adjust early values for i in range(n_peaks): ind = peak_inds[i] if ind >= search_radius: break shift_inds[i] -= search_radius - ind shifted_peak_inds = peak_inds + shift_inds - search_radius return shifted_peak_inds
python
def shift_peaks(sig, peak_inds, search_radius, peak_up): sig_len = sig.shape[0] n_peaks = len(peak_inds) # The indices to shift each peak ind by shift_inds = np.zeros(n_peaks, dtype='int') # Iterate through peaks for i in range(n_peaks): ind = peak_inds[i] local_sig = sig[max(0, ind - search_radius):min(ind + search_radius, sig_len-1)] if peak_up: shift_inds[i] = np.argmax(local_sig) else: shift_inds[i] = np.argmin(local_sig) # May have to adjust early values for i in range(n_peaks): ind = peak_inds[i] if ind >= search_radius: break shift_inds[i] -= search_radius - ind shifted_peak_inds = peak_inds + shift_inds - search_radius return shifted_peak_inds
[ "def", "shift_peaks", "(", "sig", ",", "peak_inds", ",", "search_radius", ",", "peak_up", ")", ":", "sig_len", "=", "sig", ".", "shape", "[", "0", "]", "n_peaks", "=", "len", "(", "peak_inds", ")", "# The indices to shift each peak ind by", "shift_inds", "=", ...
Helper function for correct_peaks. Return the shifted peaks to local maxima or minima within a radius. peak_up : bool Whether the expected peak direction is up
[ "Helper", "function", "for", "correct_peaks", ".", "Return", "the", "shifted", "peaks", "to", "local", "maxima", "or", "minima", "within", "a", "radius", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/processing/peaks.py#L191-L223
235,143
MIT-LCP/wfdb-python
wfdb/plot/plot.py
get_plot_dims
def get_plot_dims(signal, ann_samp): "Figure out the number of plot channels" if signal is not None: if signal.ndim == 1: sig_len = len(signal) n_sig = 1 else: sig_len = signal.shape[0] n_sig = signal.shape[1] else: sig_len = 0 n_sig = 0 if ann_samp is not None: n_annot = len(ann_samp) else: n_annot = 0 return sig_len, n_sig, n_annot, max(n_sig, n_annot)
python
def get_plot_dims(signal, ann_samp): "Figure out the number of plot channels" if signal is not None: if signal.ndim == 1: sig_len = len(signal) n_sig = 1 else: sig_len = signal.shape[0] n_sig = signal.shape[1] else: sig_len = 0 n_sig = 0 if ann_samp is not None: n_annot = len(ann_samp) else: n_annot = 0 return sig_len, n_sig, n_annot, max(n_sig, n_annot)
[ "def", "get_plot_dims", "(", "signal", ",", "ann_samp", ")", ":", "if", "signal", "is", "not", "None", ":", "if", "signal", ".", "ndim", "==", "1", ":", "sig_len", "=", "len", "(", "signal", ")", "n_sig", "=", "1", "else", ":", "sig_len", "=", "sig...
Figure out the number of plot channels
[ "Figure", "out", "the", "number", "of", "plot", "channels" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/plot/plot.py#L125-L143
235,144
MIT-LCP/wfdb-python
wfdb/plot/plot.py
create_figure
def create_figure(n_subplots, figsize): "Create the plot figure and subplot axes" fig = plt.figure(figsize=figsize) axes = [] for i in range(n_subplots): axes.append(fig.add_subplot(n_subplots, 1, i+1)) return fig, axes
python
def create_figure(n_subplots, figsize): "Create the plot figure and subplot axes" fig = plt.figure(figsize=figsize) axes = [] for i in range(n_subplots): axes.append(fig.add_subplot(n_subplots, 1, i+1)) return fig, axes
[ "def", "create_figure", "(", "n_subplots", ",", "figsize", ")", ":", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "axes", "=", "[", "]", "for", "i", "in", "range", "(", "n_subplots", ")", ":", "axes", ".", "append", "(", "...
Create the plot figure and subplot axes
[ "Create", "the", "plot", "figure", "and", "subplot", "axes" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/plot/plot.py#L146-L154
235,145
MIT-LCP/wfdb-python
wfdb/plot/plot.py
plot_signal
def plot_signal(signal, sig_len, n_sig, fs, time_units, sig_style, axes): "Plot signal channels" # Extend signal style if necesary if len(sig_style) == 1: sig_style = n_sig * sig_style # Figure out time indices if time_units == 'samples': t = np.linspace(0, sig_len-1, sig_len) else: downsample_factor = {'seconds':fs, 'minutes':fs * 60, 'hours':fs * 3600} t = np.linspace(0, sig_len-1, sig_len) / downsample_factor[time_units] # Plot the signals if signal.ndim == 1: axes[0].plot(t, signal, sig_style[0], zorder=3) else: for ch in range(n_sig): axes[ch].plot(t, signal[:,ch], sig_style[ch], zorder=3)
python
def plot_signal(signal, sig_len, n_sig, fs, time_units, sig_style, axes): "Plot signal channels" # Extend signal style if necesary if len(sig_style) == 1: sig_style = n_sig * sig_style # Figure out time indices if time_units == 'samples': t = np.linspace(0, sig_len-1, sig_len) else: downsample_factor = {'seconds':fs, 'minutes':fs * 60, 'hours':fs * 3600} t = np.linspace(0, sig_len-1, sig_len) / downsample_factor[time_units] # Plot the signals if signal.ndim == 1: axes[0].plot(t, signal, sig_style[0], zorder=3) else: for ch in range(n_sig): axes[ch].plot(t, signal[:,ch], sig_style[ch], zorder=3)
[ "def", "plot_signal", "(", "signal", ",", "sig_len", ",", "n_sig", ",", "fs", ",", "time_units", ",", "sig_style", ",", "axes", ")", ":", "# Extend signal style if necesary", "if", "len", "(", "sig_style", ")", "==", "1", ":", "sig_style", "=", "n_sig", "*...
Plot signal channels
[ "Plot", "signal", "channels" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/plot/plot.py#L157-L177
235,146
MIT-LCP/wfdb-python
wfdb/plot/plot.py
plot_annotation
def plot_annotation(ann_samp, n_annot, ann_sym, signal, n_sig, fs, time_units, ann_style, axes): "Plot annotations, possibly overlaid on signals" # Extend annotation style if necesary if len(ann_style) == 1: ann_style = n_annot * ann_style # Figure out downsample factor for time indices if time_units == 'samples': downsample_factor = 1 else: downsample_factor = {'seconds':float(fs), 'minutes':float(fs)*60, 'hours':float(fs)*3600}[time_units] # Plot the annotations for ch in range(n_annot): if ann_samp[ch] is not None and len(ann_samp[ch]): # Figure out the y values to plot on a channel basis # 1 dimensional signals if n_sig > ch: if signal.ndim == 1: y = signal[ann_samp[ch]] else: y = signal[ann_samp[ch], ch] else: y = np.zeros(len(ann_samp[ch])) axes[ch].plot(ann_samp[ch] / downsample_factor, y, ann_style[ch]) # Plot the annotation symbols if any if ann_sym is not None and ann_sym[ch] is not None: for i, s in enumerate(ann_sym[ch]): axes[ch].annotate(s, (ann_samp[ch][i] / downsample_factor, y[i]))
python
def plot_annotation(ann_samp, n_annot, ann_sym, signal, n_sig, fs, time_units, ann_style, axes): "Plot annotations, possibly overlaid on signals" # Extend annotation style if necesary if len(ann_style) == 1: ann_style = n_annot * ann_style # Figure out downsample factor for time indices if time_units == 'samples': downsample_factor = 1 else: downsample_factor = {'seconds':float(fs), 'minutes':float(fs)*60, 'hours':float(fs)*3600}[time_units] # Plot the annotations for ch in range(n_annot): if ann_samp[ch] is not None and len(ann_samp[ch]): # Figure out the y values to plot on a channel basis # 1 dimensional signals if n_sig > ch: if signal.ndim == 1: y = signal[ann_samp[ch]] else: y = signal[ann_samp[ch], ch] else: y = np.zeros(len(ann_samp[ch])) axes[ch].plot(ann_samp[ch] / downsample_factor, y, ann_style[ch]) # Plot the annotation symbols if any if ann_sym is not None and ann_sym[ch] is not None: for i, s in enumerate(ann_sym[ch]): axes[ch].annotate(s, (ann_samp[ch][i] / downsample_factor, y[i]))
[ "def", "plot_annotation", "(", "ann_samp", ",", "n_annot", ",", "ann_sym", ",", "signal", ",", "n_sig", ",", "fs", ",", "time_units", ",", "ann_style", ",", "axes", ")", ":", "# Extend annotation style if necesary", "if", "len", "(", "ann_style", ")", "==", ...
Plot annotations, possibly overlaid on signals
[ "Plot", "annotations", "possibly", "overlaid", "on", "signals" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/plot/plot.py#L180-L214
235,147
MIT-LCP/wfdb-python
wfdb/plot/plot.py
plot_ecg_grids
def plot_ecg_grids(ecg_grids, fs, units, time_units, axes): "Add ecg grids to the axes" if ecg_grids == 'all': ecg_grids = range(0, len(axes)) for ch in ecg_grids: # Get the initial plot limits auto_xlims = axes[ch].get_xlim() auto_ylims= axes[ch].get_ylim() (major_ticks_x, minor_ticks_x, major_ticks_y, minor_ticks_y) = calc_ecg_grids(auto_ylims[0], auto_ylims[1], units[ch], fs, auto_xlims[1], time_units) min_x, max_x = np.min(minor_ticks_x), np.max(minor_ticks_x) min_y, max_y = np.min(minor_ticks_y), np.max(minor_ticks_y) for tick in minor_ticks_x: axes[ch].plot([tick, tick], [min_y, max_y], c='#ededed', marker='|', zorder=1) for tick in major_ticks_x: axes[ch].plot([tick, tick], [min_y, max_y], c='#bababa', marker='|', zorder=2) for tick in minor_ticks_y: axes[ch].plot([min_x, max_x], [tick, tick], c='#ededed', marker='_', zorder=1) for tick in major_ticks_y: axes[ch].plot([min_x, max_x], [tick, tick], c='#bababa', marker='_', zorder=2) # Plotting the lines changes the graph. Set the limits back axes[ch].set_xlim(auto_xlims) axes[ch].set_ylim(auto_ylims)
python
def plot_ecg_grids(ecg_grids, fs, units, time_units, axes): "Add ecg grids to the axes" if ecg_grids == 'all': ecg_grids = range(0, len(axes)) for ch in ecg_grids: # Get the initial plot limits auto_xlims = axes[ch].get_xlim() auto_ylims= axes[ch].get_ylim() (major_ticks_x, minor_ticks_x, major_ticks_y, minor_ticks_y) = calc_ecg_grids(auto_ylims[0], auto_ylims[1], units[ch], fs, auto_xlims[1], time_units) min_x, max_x = np.min(minor_ticks_x), np.max(minor_ticks_x) min_y, max_y = np.min(minor_ticks_y), np.max(minor_ticks_y) for tick in minor_ticks_x: axes[ch].plot([tick, tick], [min_y, max_y], c='#ededed', marker='|', zorder=1) for tick in major_ticks_x: axes[ch].plot([tick, tick], [min_y, max_y], c='#bababa', marker='|', zorder=2) for tick in minor_ticks_y: axes[ch].plot([min_x, max_x], [tick, tick], c='#ededed', marker='_', zorder=1) for tick in major_ticks_y: axes[ch].plot([min_x, max_x], [tick, tick], c='#bababa', marker='_', zorder=2) # Plotting the lines changes the graph. Set the limits back axes[ch].set_xlim(auto_xlims) axes[ch].set_ylim(auto_ylims)
[ "def", "plot_ecg_grids", "(", "ecg_grids", ",", "fs", ",", "units", ",", "time_units", ",", "axes", ")", ":", "if", "ecg_grids", "==", "'all'", ":", "ecg_grids", "=", "range", "(", "0", ",", "len", "(", "axes", ")", ")", "for", "ch", "in", "ecg_grids...
Add ecg grids to the axes
[ "Add", "ecg", "grids", "to", "the", "axes" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/plot/plot.py#L217-L251
235,148
MIT-LCP/wfdb-python
wfdb/plot/plot.py
calc_ecg_grids
def calc_ecg_grids(minsig, maxsig, sig_units, fs, maxt, time_units): """ Calculate tick intervals for ecg grids - 5mm 0.2s major grids, 0.04s minor grids - 0.5mV major grids, 0.125 minor grids 10 mm is equal to 1mV in voltage. """ # Get the grid interval of the x axis if time_units == 'samples': majorx = 0.2 * fs minorx = 0.04 * fs elif time_units == 'seconds': majorx = 0.2 minorx = 0.04 elif time_units == 'minutes': majorx = 0.2 / 60 minorx = 0.04/60 elif time_units == 'hours': majorx = 0.2 / 3600 minorx = 0.04 / 3600 # Get the grid interval of the y axis if sig_units.lower()=='uv': majory = 500 minory = 125 elif sig_units.lower()=='mv': majory = 0.5 minory = 0.125 elif sig_units.lower()=='v': majory = 0.0005 minory = 0.000125 else: raise ValueError('Signal units must be uV, mV, or V to plot ECG grids.') major_ticks_x = np.arange(0, upround(maxt, majorx) + 0.0001, majorx) minor_ticks_x = np.arange(0, upround(maxt, majorx) + 0.0001, minorx) major_ticks_y = np.arange(downround(minsig, majory), upround(maxsig, majory) + 0.0001, majory) minor_ticks_y = np.arange(downround(minsig, majory), upround(maxsig, majory) + 0.0001, minory) return (major_ticks_x, minor_ticks_x, major_ticks_y, minor_ticks_y)
python
def calc_ecg_grids(minsig, maxsig, sig_units, fs, maxt, time_units): # Get the grid interval of the x axis if time_units == 'samples': majorx = 0.2 * fs minorx = 0.04 * fs elif time_units == 'seconds': majorx = 0.2 minorx = 0.04 elif time_units == 'minutes': majorx = 0.2 / 60 minorx = 0.04/60 elif time_units == 'hours': majorx = 0.2 / 3600 minorx = 0.04 / 3600 # Get the grid interval of the y axis if sig_units.lower()=='uv': majory = 500 minory = 125 elif sig_units.lower()=='mv': majory = 0.5 minory = 0.125 elif sig_units.lower()=='v': majory = 0.0005 minory = 0.000125 else: raise ValueError('Signal units must be uV, mV, or V to plot ECG grids.') major_ticks_x = np.arange(0, upround(maxt, majorx) + 0.0001, majorx) minor_ticks_x = np.arange(0, upround(maxt, majorx) + 0.0001, minorx) major_ticks_y = np.arange(downround(minsig, majory), upround(maxsig, majory) + 0.0001, majory) minor_ticks_y = np.arange(downround(minsig, majory), upround(maxsig, majory) + 0.0001, minory) return (major_ticks_x, minor_ticks_x, major_ticks_y, minor_ticks_y)
[ "def", "calc_ecg_grids", "(", "minsig", ",", "maxsig", ",", "sig_units", ",", "fs", ",", "maxt", ",", "time_units", ")", ":", "# Get the grid interval of the x axis", "if", "time_units", "==", "'samples'", ":", "majorx", "=", "0.2", "*", "fs", "minorx", "=", ...
Calculate tick intervals for ecg grids - 5mm 0.2s major grids, 0.04s minor grids - 0.5mV major grids, 0.125 minor grids 10 mm is equal to 1mV in voltage.
[ "Calculate", "tick", "intervals", "for", "ecg", "grids" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/plot/plot.py#L253-L297
235,149
MIT-LCP/wfdb-python
wfdb/plot/plot.py
label_figure
def label_figure(axes, n_subplots, time_units, sig_name, sig_units, ylabel, title): "Add title, and axes labels" if title: axes[0].set_title(title) # Determine y label # Explicit labels take precedence if present. Otherwise, construct labels # using signal names and units if not ylabel: ylabel = [] # Set default channel and signal names if needed if not sig_name: sig_name = ['ch_'+str(i) for i in range(n_subplots)] if not sig_units: sig_units = n_subplots * ['NU'] ylabel = ['/'.join(pair) for pair in zip(sig_name, sig_units)] # If there are annotations with channels outside of signal range # put placeholders n_missing_labels = n_subplots - len(ylabel) if n_missing_labels: ylabel = ylabel + ['ch_%d/NU' % i for i in range(len(ylabel), n_subplots)] for ch in range(n_subplots): axes[ch].set_ylabel(ylabel[ch]) axes[-1].set_xlabel('/'.join(['time', time_units[:-1]]))
python
def label_figure(axes, n_subplots, time_units, sig_name, sig_units, ylabel, title): "Add title, and axes labels" if title: axes[0].set_title(title) # Determine y label # Explicit labels take precedence if present. Otherwise, construct labels # using signal names and units if not ylabel: ylabel = [] # Set default channel and signal names if needed if not sig_name: sig_name = ['ch_'+str(i) for i in range(n_subplots)] if not sig_units: sig_units = n_subplots * ['NU'] ylabel = ['/'.join(pair) for pair in zip(sig_name, sig_units)] # If there are annotations with channels outside of signal range # put placeholders n_missing_labels = n_subplots - len(ylabel) if n_missing_labels: ylabel = ylabel + ['ch_%d/NU' % i for i in range(len(ylabel), n_subplots)] for ch in range(n_subplots): axes[ch].set_ylabel(ylabel[ch]) axes[-1].set_xlabel('/'.join(['time', time_units[:-1]]))
[ "def", "label_figure", "(", "axes", ",", "n_subplots", ",", "time_units", ",", "sig_name", ",", "sig_units", ",", "ylabel", ",", "title", ")", ":", "if", "title", ":", "axes", "[", "0", "]", ".", "set_title", "(", "title", ")", "# Determine y label", "# ...
Add title, and axes labels
[ "Add", "title", "and", "axes", "labels" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/plot/plot.py#L300-L329
235,150
MIT-LCP/wfdb-python
wfdb/plot/plot.py
get_wfdb_plot_items
def get_wfdb_plot_items(record, annotation, plot_sym): """ Get items to plot from wfdb objects """ # Get record attributes if record: if record.p_signal is not None: signal = record.p_signal elif record.d_signal is not None: signal = record.d_signal else: raise ValueError('The record has no signal to plot') fs = record.fs sig_name = record.sig_name sig_units = record.units record_name = 'Record: %s' % record.record_name ylabel = ['/'.join(pair) for pair in zip(sig_name, sig_units)] else: signal = fs = ylabel = record_name = None # Get annotation attributes if annotation: # Get channels ann_chans = set(annotation.chan) n_ann_chans = max(ann_chans) + 1 # Indices for each channel chan_inds = n_ann_chans * [np.empty(0, dtype='int')] for chan in ann_chans: chan_inds[chan] = np.where(annotation.chan == chan)[0] ann_samp = [annotation.sample[ci] for ci in chan_inds] if plot_sym: ann_sym = n_ann_chans * [None] for ch in ann_chans: ann_sym[ch] = [annotation.symbol[ci] for ci in chan_inds[ch]] else: ann_sym = None # Try to get fs from annotation if not already in record if fs is None: fs = annotation.fs record_name = record_name or annotation.record_name else: ann_samp = None ann_sym = None # Cleaning: remove empty channels and set labels and styles. # Wrangle together the signal and annotation channels if necessary if record and annotation: # There may be instances in which the annotation `chan` # attribute has non-overlapping channels with the signal. # In this case, omit empty middle channels. This function should # already process labels and arrangements before passing into # `plot_items` sig_chans = set(range(signal.shape[1])) all_chans = sorted(sig_chans.union(ann_chans)) # Need to update ylabels and annotation values if sig_chans != all_chans: compact_ann_samp = [] if plot_sym: compact_ann_sym = [] else: compact_ann_sym = None ylabel = [] for ch in all_chans: # ie. 0, 1, 9 if ch in ann_chans: compact_ann_samp.append(ann_samp[ch]) if plot_sym: compact_ann_sym.append(ann_sym[ch]) if ch in sig_chans: ylabel.append(''.join([sig_name[ch], sig_units[ch]])) else: ylabel.append('ch_%d/NU' % ch) ann_samp = compact_ann_samp ann_sym = compact_ann_sym # Signals encompass annotations else: ylabel = ['/'.join(pair) for pair in zip(sig_name, sig_units)] # Remove any empty middle channels from annotations elif annotation: ann_samp = [a for a in ann_samp if a.size] if ann_sym is not None: ann_sym = [a for a in ann_sym if a] ylabel = ['ch_%d/NU' % ch for ch in ann_chans] return signal, ann_samp, ann_sym, fs, ylabel, record_name
python
def get_wfdb_plot_items(record, annotation, plot_sym): # Get record attributes if record: if record.p_signal is not None: signal = record.p_signal elif record.d_signal is not None: signal = record.d_signal else: raise ValueError('The record has no signal to plot') fs = record.fs sig_name = record.sig_name sig_units = record.units record_name = 'Record: %s' % record.record_name ylabel = ['/'.join(pair) for pair in zip(sig_name, sig_units)] else: signal = fs = ylabel = record_name = None # Get annotation attributes if annotation: # Get channels ann_chans = set(annotation.chan) n_ann_chans = max(ann_chans) + 1 # Indices for each channel chan_inds = n_ann_chans * [np.empty(0, dtype='int')] for chan in ann_chans: chan_inds[chan] = np.where(annotation.chan == chan)[0] ann_samp = [annotation.sample[ci] for ci in chan_inds] if plot_sym: ann_sym = n_ann_chans * [None] for ch in ann_chans: ann_sym[ch] = [annotation.symbol[ci] for ci in chan_inds[ch]] else: ann_sym = None # Try to get fs from annotation if not already in record if fs is None: fs = annotation.fs record_name = record_name or annotation.record_name else: ann_samp = None ann_sym = None # Cleaning: remove empty channels and set labels and styles. # Wrangle together the signal and annotation channels if necessary if record and annotation: # There may be instances in which the annotation `chan` # attribute has non-overlapping channels with the signal. # In this case, omit empty middle channels. This function should # already process labels and arrangements before passing into # `plot_items` sig_chans = set(range(signal.shape[1])) all_chans = sorted(sig_chans.union(ann_chans)) # Need to update ylabels and annotation values if sig_chans != all_chans: compact_ann_samp = [] if plot_sym: compact_ann_sym = [] else: compact_ann_sym = None ylabel = [] for ch in all_chans: # ie. 0, 1, 9 if ch in ann_chans: compact_ann_samp.append(ann_samp[ch]) if plot_sym: compact_ann_sym.append(ann_sym[ch]) if ch in sig_chans: ylabel.append(''.join([sig_name[ch], sig_units[ch]])) else: ylabel.append('ch_%d/NU' % ch) ann_samp = compact_ann_samp ann_sym = compact_ann_sym # Signals encompass annotations else: ylabel = ['/'.join(pair) for pair in zip(sig_name, sig_units)] # Remove any empty middle channels from annotations elif annotation: ann_samp = [a for a in ann_samp if a.size] if ann_sym is not None: ann_sym = [a for a in ann_sym if a] ylabel = ['ch_%d/NU' % ch for ch in ann_chans] return signal, ann_samp, ann_sym, fs, ylabel, record_name
[ "def", "get_wfdb_plot_items", "(", "record", ",", "annotation", ",", "plot_sym", ")", ":", "# Get record attributes", "if", "record", ":", "if", "record", ".", "p_signal", "is", "not", "None", ":", "signal", "=", "record", ".", "p_signal", "elif", "record", ...
Get items to plot from wfdb objects
[ "Get", "items", "to", "plot", "from", "wfdb", "objects" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/plot/plot.py#L417-L510
235,151
MIT-LCP/wfdb-python
wfdb/io/download.py
_remote_file_size
def _remote_file_size(url=None, file_name=None, pb_dir=None): """ Get the remote file size in bytes Parameters ---------- url : str, optional The full url of the file. Use this option to explicitly state the full url. file_name : str, optional The base file name. Use this argument along with pb_dir if you want the full url to be constructed. pb_dir : str, optional The base file name. Use this argument along with file_name if you want the full url to be constructed. Returns ------- remote_file_size : int Size of the file in bytes """ # Option to construct the url if file_name and pb_dir: url = posixpath.join(config.db_index_url, pb_dir, file_name) response = requests.head(url, headers={'Accept-Encoding': 'identity'}) # Raise HTTPError if invalid url response.raise_for_status() # Supposed size of the file remote_file_size = int(response.headers['content-length']) return remote_file_size
python
def _remote_file_size(url=None, file_name=None, pb_dir=None): # Option to construct the url if file_name and pb_dir: url = posixpath.join(config.db_index_url, pb_dir, file_name) response = requests.head(url, headers={'Accept-Encoding': 'identity'}) # Raise HTTPError if invalid url response.raise_for_status() # Supposed size of the file remote_file_size = int(response.headers['content-length']) return remote_file_size
[ "def", "_remote_file_size", "(", "url", "=", "None", ",", "file_name", "=", "None", ",", "pb_dir", "=", "None", ")", ":", "# Option to construct the url", "if", "file_name", "and", "pb_dir", ":", "url", "=", "posixpath", ".", "join", "(", "config", ".", "d...
Get the remote file size in bytes Parameters ---------- url : str, optional The full url of the file. Use this option to explicitly state the full url. file_name : str, optional The base file name. Use this argument along with pb_dir if you want the full url to be constructed. pb_dir : str, optional The base file name. Use this argument along with file_name if you want the full url to be constructed. Returns ------- remote_file_size : int Size of the file in bytes
[ "Get", "the", "remote", "file", "size", "in", "bytes" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L35-L69
235,152
MIT-LCP/wfdb-python
wfdb/io/download.py
_stream_header
def _stream_header(file_name, pb_dir): """ Stream the lines of a remote header file. Parameters ---------- file_name : str pb_dir : str The Physiobank database directory from which to find the required header file. eg. For file '100.hea' in 'http://physionet.org/physiobank/database/mitdb', pb_dir='mitdb'. """ # Full url of header location url = posixpath.join(config.db_index_url, pb_dir, file_name) response = requests.get(url) # Raise HTTPError if invalid url response.raise_for_status() # Get each line as a string filelines = response.content.decode('iso-8859-1').splitlines() # Separate content into header and comment lines header_lines = [] comment_lines = [] for line in filelines: line = str(line.strip()) # Comment line if line.startswith('#'): comment_lines.append(line) # Non-empty non-comment line = header line. elif line: # Look for a comment in the line ci = line.find('#') if ci > 0: header_lines.append(line[:ci]) # comment on same line as header line comment_lines.append(line[ci:]) else: header_lines.append(line) return (header_lines, comment_lines)
python
def _stream_header(file_name, pb_dir): # Full url of header location url = posixpath.join(config.db_index_url, pb_dir, file_name) response = requests.get(url) # Raise HTTPError if invalid url response.raise_for_status() # Get each line as a string filelines = response.content.decode('iso-8859-1').splitlines() # Separate content into header and comment lines header_lines = [] comment_lines = [] for line in filelines: line = str(line.strip()) # Comment line if line.startswith('#'): comment_lines.append(line) # Non-empty non-comment line = header line. elif line: # Look for a comment in the line ci = line.find('#') if ci > 0: header_lines.append(line[:ci]) # comment on same line as header line comment_lines.append(line[ci:]) else: header_lines.append(line) return (header_lines, comment_lines)
[ "def", "_stream_header", "(", "file_name", ",", "pb_dir", ")", ":", "# Full url of header location", "url", "=", "posixpath", ".", "join", "(", "config", ".", "db_index_url", ",", "pb_dir", ",", "file_name", ")", "response", "=", "requests", ".", "get", "(", ...
Stream the lines of a remote header file. Parameters ---------- file_name : str pb_dir : str The Physiobank database directory from which to find the required header file. eg. For file '100.hea' in 'http://physionet.org/physiobank/database/mitdb', pb_dir='mitdb'.
[ "Stream", "the", "lines", "of", "a", "remote", "header", "file", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L71-L115
235,153
MIT-LCP/wfdb-python
wfdb/io/download.py
_stream_dat
def _stream_dat(file_name, pb_dir, byte_count, start_byte, dtype): """ Stream data from a remote dat file, into a 1d numpy array. Parameters ---------- file_name : str The name of the dat file to be read. pb_dir : str The physiobank directory where the dat file is located. byte_count : int The number of bytes to be read. start_byte : int The starting byte number to read from. dtype : str The numpy dtype to load the data into. Returns ------- sig_data : numpy array The data read from the dat file. """ # Full url of dat file url = posixpath.join(config.db_index_url, pb_dir, file_name) # Specify the byte range end_byte = start_byte + byte_count - 1 headers = {"Range":"bytes=%d-%d" % (start_byte, end_byte), 'Accept-Encoding': '*'} # Get the content response = requests.get(url, headers=headers, stream=True) # Raise HTTPError if invalid url response.raise_for_status() # Convert to numpy array sig_data = np.fromstring(response.content, dtype=dtype) return sig_data
python
def _stream_dat(file_name, pb_dir, byte_count, start_byte, dtype): # Full url of dat file url = posixpath.join(config.db_index_url, pb_dir, file_name) # Specify the byte range end_byte = start_byte + byte_count - 1 headers = {"Range":"bytes=%d-%d" % (start_byte, end_byte), 'Accept-Encoding': '*'} # Get the content response = requests.get(url, headers=headers, stream=True) # Raise HTTPError if invalid url response.raise_for_status() # Convert to numpy array sig_data = np.fromstring(response.content, dtype=dtype) return sig_data
[ "def", "_stream_dat", "(", "file_name", ",", "pb_dir", ",", "byte_count", ",", "start_byte", ",", "dtype", ")", ":", "# Full url of dat file", "url", "=", "posixpath", ".", "join", "(", "config", ".", "db_index_url", ",", "pb_dir", ",", "file_name", ")", "# ...
Stream data from a remote dat file, into a 1d numpy array. Parameters ---------- file_name : str The name of the dat file to be read. pb_dir : str The physiobank directory where the dat file is located. byte_count : int The number of bytes to be read. start_byte : int The starting byte number to read from. dtype : str The numpy dtype to load the data into. Returns ------- sig_data : numpy array The data read from the dat file.
[ "Stream", "data", "from", "a", "remote", "dat", "file", "into", "a", "1d", "numpy", "array", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L118-L159
235,154
MIT-LCP/wfdb-python
wfdb/io/download.py
_stream_annotation
def _stream_annotation(file_name, pb_dir): """ Stream an entire remote annotation file from physiobank Parameters ---------- file_name : str The name of the annotation file to be read. pb_dir : str The physiobank directory where the annotation file is located. """ # Full url of annotation file url = posixpath.join(config.db_index_url, pb_dir, file_name) # Get the content response = requests.get(url) # Raise HTTPError if invalid url response.raise_for_status() # Convert to numpy array ann_data = np.fromstring(response.content, dtype=np.dtype('<u1')) return ann_data
python
def _stream_annotation(file_name, pb_dir): # Full url of annotation file url = posixpath.join(config.db_index_url, pb_dir, file_name) # Get the content response = requests.get(url) # Raise HTTPError if invalid url response.raise_for_status() # Convert to numpy array ann_data = np.fromstring(response.content, dtype=np.dtype('<u1')) return ann_data
[ "def", "_stream_annotation", "(", "file_name", ",", "pb_dir", ")", ":", "# Full url of annotation file", "url", "=", "posixpath", ".", "join", "(", "config", ".", "db_index_url", ",", "pb_dir", ",", "file_name", ")", "# Get the content", "response", "=", "requests...
Stream an entire remote annotation file from physiobank Parameters ---------- file_name : str The name of the annotation file to be read. pb_dir : str The physiobank directory where the annotation file is located.
[ "Stream", "an", "entire", "remote", "annotation", "file", "from", "physiobank" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L162-L185
235,155
MIT-LCP/wfdb-python
wfdb/io/download.py
get_dbs
def get_dbs(): """ Get a list of all the Physiobank databases available. Examples -------- >>> dbs = get_dbs() """ url = posixpath.join(config.db_index_url, 'DBS') response = requests.get(url) dbs = response.content.decode('ascii').splitlines() dbs = [re.sub('\t{2,}', '\t', line).split('\t') for line in dbs] return dbs
python
def get_dbs(): url = posixpath.join(config.db_index_url, 'DBS') response = requests.get(url) dbs = response.content.decode('ascii').splitlines() dbs = [re.sub('\t{2,}', '\t', line).split('\t') for line in dbs] return dbs
[ "def", "get_dbs", "(", ")", ":", "url", "=", "posixpath", ".", "join", "(", "config", ".", "db_index_url", ",", "'DBS'", ")", "response", "=", "requests", ".", "get", "(", "url", ")", "dbs", "=", "response", ".", "content", ".", "decode", "(", "'asci...
Get a list of all the Physiobank databases available. Examples -------- >>> dbs = get_dbs()
[ "Get", "a", "list", "of", "all", "the", "Physiobank", "databases", "available", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L188-L203
235,156
MIT-LCP/wfdb-python
wfdb/io/download.py
get_record_list
def get_record_list(db_dir, records='all'): """ Get a list of records belonging to a database. Parameters ---------- db_dir : str The database directory, usually the same as the database slug. The location to look for a RECORDS file. records : list, optional A Option used when this function acts as a helper function. Leave as default 'all' to get all records. Examples -------- >>> wfdb.get_record_list('mitdb') """ # Full url physiobank database db_url = posixpath.join(config.db_index_url, db_dir) # Check for a RECORDS file if records == 'all': response = requests.get(posixpath.join(db_url, 'RECORDS')) if response.status_code == 404: raise ValueError('The database %s has no WFDB files to download' % db_url) # Get each line as a string record_list = response.content.decode('ascii').splitlines() # Otherwise the records are input manually else: record_list = records return record_list
python
def get_record_list(db_dir, records='all'): # Full url physiobank database db_url = posixpath.join(config.db_index_url, db_dir) # Check for a RECORDS file if records == 'all': response = requests.get(posixpath.join(db_url, 'RECORDS')) if response.status_code == 404: raise ValueError('The database %s has no WFDB files to download' % db_url) # Get each line as a string record_list = response.content.decode('ascii').splitlines() # Otherwise the records are input manually else: record_list = records return record_list
[ "def", "get_record_list", "(", "db_dir", ",", "records", "=", "'all'", ")", ":", "# Full url physiobank database", "db_url", "=", "posixpath", ".", "join", "(", "config", ".", "db_index_url", ",", "db_dir", ")", "# Check for a RECORDS file", "if", "records", "==",...
Get a list of records belonging to a database. Parameters ---------- db_dir : str The database directory, usually the same as the database slug. The location to look for a RECORDS file. records : list, optional A Option used when this function acts as a helper function. Leave as default 'all' to get all records. Examples -------- >>> wfdb.get_record_list('mitdb')
[ "Get", "a", "list", "of", "records", "belonging", "to", "a", "database", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L208-L241
235,157
MIT-LCP/wfdb-python
wfdb/io/download.py
make_local_dirs
def make_local_dirs(dl_dir, dl_inputs, keep_subdirs): """ Make any required local directories to prepare for downloading """ # Make the local download dir if it doesn't exist if not os.path.isdir(dl_dir): os.makedirs(dl_dir) print('Created local base download directory: %s' % dl_dir) # Create all required local subdirectories # This must be out of dl_pb_file to # avoid clash in multiprocessing if keep_subdirs: dl_dirs = set([os.path.join(dl_dir, d[1]) for d in dl_inputs]) for d in dl_dirs: if not os.path.isdir(d): os.makedirs(d) return
python
def make_local_dirs(dl_dir, dl_inputs, keep_subdirs): # Make the local download dir if it doesn't exist if not os.path.isdir(dl_dir): os.makedirs(dl_dir) print('Created local base download directory: %s' % dl_dir) # Create all required local subdirectories # This must be out of dl_pb_file to # avoid clash in multiprocessing if keep_subdirs: dl_dirs = set([os.path.join(dl_dir, d[1]) for d in dl_inputs]) for d in dl_dirs: if not os.path.isdir(d): os.makedirs(d) return
[ "def", "make_local_dirs", "(", "dl_dir", ",", "dl_inputs", ",", "keep_subdirs", ")", ":", "# Make the local download dir if it doesn't exist", "if", "not", "os", ".", "path", ".", "isdir", "(", "dl_dir", ")", ":", "os", ".", "makedirs", "(", "dl_dir", ")", "pr...
Make any required local directories to prepare for downloading
[ "Make", "any", "required", "local", "directories", "to", "prepare", "for", "downloading" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L277-L294
235,158
MIT-LCP/wfdb-python
wfdb/io/download.py
dl_pb_file
def dl_pb_file(inputs): """ Download a file from physiobank. The input args are to be unpacked for the use of multiprocessing map, because python2 doesn't have starmap... """ basefile, subdir, db, dl_dir, keep_subdirs, overwrite = inputs # Full url of file url = posixpath.join(config.db_index_url, db, subdir, basefile) # Supposed size of the file remote_file_size = _remote_file_size(url) # Figure out where the file should be locally if keep_subdirs: dldir = os.path.join(dl_dir, subdir) else: dldir = dl_dir local_file = os.path.join(dldir, basefile) # The file exists locally. if os.path.isfile(local_file): # Redownload regardless if overwrite: dl_full_file(url, local_file) # Process accordingly. else: local_file_size = os.path.getsize(local_file) # Local file is smaller than it should be. Append it. if local_file_size < remote_file_size: print('Detected partially downloaded file: %s Appending file...' % local_file) headers = {"Range": "bytes="+str(local_file_size)+"-", 'Accept-Encoding': '*'} r = requests.get(url, headers=headers, stream=True) print('headers: ', headers) print('r content length: ', len(r.content)) with open(local_file, 'ba') as writefile: writefile.write(r.content) print('Done appending.') # Local file is larger than it should be. Redownload. elif local_file_size > remote_file_size: dl_full_file(url, local_file) # If they're the same size, do nothing. # The file doesn't exist. Download it. else: dl_full_file(url, local_file) return
python
def dl_pb_file(inputs): basefile, subdir, db, dl_dir, keep_subdirs, overwrite = inputs # Full url of file url = posixpath.join(config.db_index_url, db, subdir, basefile) # Supposed size of the file remote_file_size = _remote_file_size(url) # Figure out where the file should be locally if keep_subdirs: dldir = os.path.join(dl_dir, subdir) else: dldir = dl_dir local_file = os.path.join(dldir, basefile) # The file exists locally. if os.path.isfile(local_file): # Redownload regardless if overwrite: dl_full_file(url, local_file) # Process accordingly. else: local_file_size = os.path.getsize(local_file) # Local file is smaller than it should be. Append it. if local_file_size < remote_file_size: print('Detected partially downloaded file: %s Appending file...' % local_file) headers = {"Range": "bytes="+str(local_file_size)+"-", 'Accept-Encoding': '*'} r = requests.get(url, headers=headers, stream=True) print('headers: ', headers) print('r content length: ', len(r.content)) with open(local_file, 'ba') as writefile: writefile.write(r.content) print('Done appending.') # Local file is larger than it should be. Redownload. elif local_file_size > remote_file_size: dl_full_file(url, local_file) # If they're the same size, do nothing. # The file doesn't exist. Download it. else: dl_full_file(url, local_file) return
[ "def", "dl_pb_file", "(", "inputs", ")", ":", "basefile", ",", "subdir", ",", "db", ",", "dl_dir", ",", "keep_subdirs", ",", "overwrite", "=", "inputs", "# Full url of file", "url", "=", "posixpath", ".", "join", "(", "config", ".", "db_index_url", ",", "d...
Download a file from physiobank. The input args are to be unpacked for the use of multiprocessing map, because python2 doesn't have starmap...
[ "Download", "a", "file", "from", "physiobank", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L297-L349
235,159
MIT-LCP/wfdb-python
wfdb/io/download.py
dl_full_file
def dl_full_file(url, save_file_name): """ Download a file. No checks are performed. Parameters ---------- url : str The url of the file to download save_file_name : str The name to save the file as """ response = requests.get(url) with open(save_file_name, 'wb') as writefile: writefile.write(response.content) return
python
def dl_full_file(url, save_file_name): response = requests.get(url) with open(save_file_name, 'wb') as writefile: writefile.write(response.content) return
[ "def", "dl_full_file", "(", "url", ",", "save_file_name", ")", ":", "response", "=", "requests", ".", "get", "(", "url", ")", "with", "open", "(", "save_file_name", ",", "'wb'", ")", "as", "writefile", ":", "writefile", ".", "write", "(", "response", "."...
Download a file. No checks are performed. Parameters ---------- url : str The url of the file to download save_file_name : str The name to save the file as
[ "Download", "a", "file", ".", "No", "checks", "are", "performed", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L352-L368
235,160
MIT-LCP/wfdb-python
wfdb/io/download.py
dl_files
def dl_files(db, dl_dir, files, keep_subdirs=True, overwrite=False): """ Download specified files from a Physiobank database. Parameters ---------- db : str The Physiobank database directory to download. eg. For database: 'http://physionet.org/physiobank/database/mitdb', db='mitdb'. dl_dir : str The full local directory path in which to download the files. files : list A list of strings specifying the file names to download relative to the database base directory. keep_subdirs : bool, optional Whether to keep the relative subdirectories of downloaded files as they are organized in Physiobank (True), or to download all files into the same base directory (False). overwrite : bool, optional If True, all files will be redownloaded regardless. If False, existing files with the same name and relative subdirectory will be checked. If the local file is the same size as the online file, the download is skipped. If the local file is larger, it will be deleted and the file will be redownloaded. If the local file is smaller, the file will be assumed to be partially downloaded and the remaining bytes will be downloaded and appended. Examples -------- >>> wfdb.dl_files('ahadb', os.getcwd(), ['STAFF-Studies-bibliography-2016.pdf', 'data/001a.hea', 'data/001a.dat']) """ # Full url physiobank database db_url = posixpath.join(config.db_index_url, db) # Check if the database is valid response = requests.get(db_url) response.raise_for_status() # Construct the urls to download dl_inputs = [(os.path.split(file)[1], os.path.split(file)[0], db, dl_dir, keep_subdirs, overwrite) for file in files] # Make any required local directories make_local_dirs(dl_dir, dl_inputs, keep_subdirs) print('Downloading files...') # Create multiple processes to download files. # Limit to 2 connections to avoid overloading the server pool = multiprocessing.Pool(processes=2) pool.map(dl_pb_file, dl_inputs) print('Finished downloading files') return
python
def dl_files(db, dl_dir, files, keep_subdirs=True, overwrite=False): # Full url physiobank database db_url = posixpath.join(config.db_index_url, db) # Check if the database is valid response = requests.get(db_url) response.raise_for_status() # Construct the urls to download dl_inputs = [(os.path.split(file)[1], os.path.split(file)[0], db, dl_dir, keep_subdirs, overwrite) for file in files] # Make any required local directories make_local_dirs(dl_dir, dl_inputs, keep_subdirs) print('Downloading files...') # Create multiple processes to download files. # Limit to 2 connections to avoid overloading the server pool = multiprocessing.Pool(processes=2) pool.map(dl_pb_file, dl_inputs) print('Finished downloading files') return
[ "def", "dl_files", "(", "db", ",", "dl_dir", ",", "files", ",", "keep_subdirs", "=", "True", ",", "overwrite", "=", "False", ")", ":", "# Full url physiobank database", "db_url", "=", "posixpath", ".", "join", "(", "config", ".", "db_index_url", ",", "db", ...
Download specified files from a Physiobank database. Parameters ---------- db : str The Physiobank database directory to download. eg. For database: 'http://physionet.org/physiobank/database/mitdb', db='mitdb'. dl_dir : str The full local directory path in which to download the files. files : list A list of strings specifying the file names to download relative to the database base directory. keep_subdirs : bool, optional Whether to keep the relative subdirectories of downloaded files as they are organized in Physiobank (True), or to download all files into the same base directory (False). overwrite : bool, optional If True, all files will be redownloaded regardless. If False, existing files with the same name and relative subdirectory will be checked. If the local file is the same size as the online file, the download is skipped. If the local file is larger, it will be deleted and the file will be redownloaded. If the local file is smaller, the file will be assumed to be partially downloaded and the remaining bytes will be downloaded and appended. Examples -------- >>> wfdb.dl_files('ahadb', os.getcwd(), ['STAFF-Studies-bibliography-2016.pdf', 'data/001a.hea', 'data/001a.dat'])
[ "Download", "specified", "files", "from", "a", "Physiobank", "database", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/download.py#L371-L425
235,161
MIT-LCP/wfdb-python
wfdb/io/annotation.py
label_triplets_to_df
def label_triplets_to_df(triplets): """ Get a pd dataframe from a tuple triplets used to define annotation labels. The triplets should come in the form: (label_store, symbol, description) """ label_df = pd.DataFrame({'label_store':np.array([t[0] for t in triplets], dtype='int'), 'symbol':[t[1] for t in triplets], 'description':[t[2] for t in triplets]}) label_df.set_index(label_df['label_store'].values, inplace=True) label_df = label_df[list(ann_label_fields)] return label_df
python
def label_triplets_to_df(triplets): label_df = pd.DataFrame({'label_store':np.array([t[0] for t in triplets], dtype='int'), 'symbol':[t[1] for t in triplets], 'description':[t[2] for t in triplets]}) label_df.set_index(label_df['label_store'].values, inplace=True) label_df = label_df[list(ann_label_fields)] return label_df
[ "def", "label_triplets_to_df", "(", "triplets", ")", ":", "label_df", "=", "pd", ".", "DataFrame", "(", "{", "'label_store'", ":", "np", ".", "array", "(", "[", "t", "[", "0", "]", "for", "t", "in", "triplets", "]", ",", "dtype", "=", "'int'", ")", ...
Get a pd dataframe from a tuple triplets used to define annotation labels. The triplets should come in the form: (label_store, symbol, description)
[ "Get", "a", "pd", "dataframe", "from", "a", "tuple", "triplets", "used", "to", "define", "annotation", "labels", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L925-L942
235,162
MIT-LCP/wfdb-python
wfdb/io/annotation.py
wrann
def wrann(record_name, extension, sample, symbol=None, subtype=None, chan=None, num=None, aux_note=None, label_store=None, fs=None, custom_labels=None, write_dir=''): """ Write a WFDB annotation file. Specify at least the following: - The record name of the WFDB record (record_name) - The annotation file extension (extension) - The annotation locations in samples relative to the beginning of the record (sample) - Either the numerical values used to store the labels (`label_store`), or more commonly, the display symbols of each label (`symbol`). Parameters ---------- record_name : str The string name of the WFDB record to be written (without any file extensions). extension : str The string annotation file extension. sample : numpy array A numpy array containing the annotation locations in samples relative to the beginning of the record. symbol : list, or numpy array, optional The symbols used to display the annotation labels. List or numpy array. If this field is present, `label_store` must not be present. subtype : numpy array, optional A numpy array containing the marked class/category of each annotation. chan : numpy array, optional A numpy array containing the signal channel associated with each annotation. num : numpy array, optional A numpy array containing the labelled annotation number for each annotation. aux_note : list, optional A list containing the auxiliary information string (or None for annotations without notes) for each annotation. label_store : numpy array, optional A numpy array containing the integer values used to store the annotation labels. If this field is present, `symbol` must not be present. fs : int, or float, optional The numerical sampling frequency of the record to be written to the file. custom_labels : pandas dataframe, optional The map of custom defined annotation labels used for this annotation, in addition to the standard WFDB annotation labels. Custom labels are defined by two or three fields: - The integer values used to store custom annotation labels in the file (optional) - Their short display symbols - Their long descriptions. This input argument may come in four formats: 1. A pandas.DataFrame object with columns: ['label_store', 'symbol', 'description'] 2. A pandas.DataFrame object with columns: ['symbol', 'description'] If this option is chosen, label_store values are automatically chosen. 3. A list or tuple of tuple triplets, with triplet elements representing: (label_store, symbol, description). 4. A list or tuple of tuple pairs, with pair elements representing: (symbol, description). If this option is chosen, label_store values are automatically chosen. If the `label_store` field is given for this function, and `custom_labels` is defined, `custom_labels` must contain `label_store` in its mapping. ie. it must come in format 1 or 3 above. write_dir : str, optional The directory in which to write the annotation file Notes ----- This is a gateway function, written as a simple way to write WFDB annotation files without needing to explicity create an Annotation object. You may also create an Annotation object, manually set its attributes, and call its `wrann` instance method. Each annotation stored in a WFDB annotation file contains a sample field and a label field. All other fields may or may not be present. Examples -------- >>> # Read an annotation as an Annotation object >>> annotation = wfdb.rdann('b001', 'atr', pb_dir='cebsdb') >>> # Write a copy of the annotation file >>> wfdb.wrann('b001', 'cpy', annotation.sample, annotation.symbol) """ # Create Annotation object annotation = Annotation(record_name=record_name, extension=extension, sample=sample, symbol=symbol, subtype=subtype, chan=chan, num=num, aux_note=aux_note, label_store=label_store, fs=fs, custom_labels=custom_labels) # Find out which input field describes the labels if symbol is None: if label_store is None: raise Exception("Either the 'symbol' field or the 'label_store' field must be set") else: if label_store is None: annotation.sym_to_aux() else: raise Exception("Only one of the 'symbol' and 'label_store' fields may be input, for describing annotation labels") # Perform field checks and write the annotation file annotation.wrann(write_fs=True, write_dir=write_dir)
python
def wrann(record_name, extension, sample, symbol=None, subtype=None, chan=None, num=None, aux_note=None, label_store=None, fs=None, custom_labels=None, write_dir=''): # Create Annotation object annotation = Annotation(record_name=record_name, extension=extension, sample=sample, symbol=symbol, subtype=subtype, chan=chan, num=num, aux_note=aux_note, label_store=label_store, fs=fs, custom_labels=custom_labels) # Find out which input field describes the labels if symbol is None: if label_store is None: raise Exception("Either the 'symbol' field or the 'label_store' field must be set") else: if label_store is None: annotation.sym_to_aux() else: raise Exception("Only one of the 'symbol' and 'label_store' fields may be input, for describing annotation labels") # Perform field checks and write the annotation file annotation.wrann(write_fs=True, write_dir=write_dir)
[ "def", "wrann", "(", "record_name", ",", "extension", ",", "sample", ",", "symbol", "=", "None", ",", "subtype", "=", "None", ",", "chan", "=", "None", ",", "num", "=", "None", ",", "aux_note", "=", "None", ",", "label_store", "=", "None", ",", "fs",...
Write a WFDB annotation file. Specify at least the following: - The record name of the WFDB record (record_name) - The annotation file extension (extension) - The annotation locations in samples relative to the beginning of the record (sample) - Either the numerical values used to store the labels (`label_store`), or more commonly, the display symbols of each label (`symbol`). Parameters ---------- record_name : str The string name of the WFDB record to be written (without any file extensions). extension : str The string annotation file extension. sample : numpy array A numpy array containing the annotation locations in samples relative to the beginning of the record. symbol : list, or numpy array, optional The symbols used to display the annotation labels. List or numpy array. If this field is present, `label_store` must not be present. subtype : numpy array, optional A numpy array containing the marked class/category of each annotation. chan : numpy array, optional A numpy array containing the signal channel associated with each annotation. num : numpy array, optional A numpy array containing the labelled annotation number for each annotation. aux_note : list, optional A list containing the auxiliary information string (or None for annotations without notes) for each annotation. label_store : numpy array, optional A numpy array containing the integer values used to store the annotation labels. If this field is present, `symbol` must not be present. fs : int, or float, optional The numerical sampling frequency of the record to be written to the file. custom_labels : pandas dataframe, optional The map of custom defined annotation labels used for this annotation, in addition to the standard WFDB annotation labels. Custom labels are defined by two or three fields: - The integer values used to store custom annotation labels in the file (optional) - Their short display symbols - Their long descriptions. This input argument may come in four formats: 1. A pandas.DataFrame object with columns: ['label_store', 'symbol', 'description'] 2. A pandas.DataFrame object with columns: ['symbol', 'description'] If this option is chosen, label_store values are automatically chosen. 3. A list or tuple of tuple triplets, with triplet elements representing: (label_store, symbol, description). 4. A list or tuple of tuple pairs, with pair elements representing: (symbol, description). If this option is chosen, label_store values are automatically chosen. If the `label_store` field is given for this function, and `custom_labels` is defined, `custom_labels` must contain `label_store` in its mapping. ie. it must come in format 1 or 3 above. write_dir : str, optional The directory in which to write the annotation file Notes ----- This is a gateway function, written as a simple way to write WFDB annotation files without needing to explicity create an Annotation object. You may also create an Annotation object, manually set its attributes, and call its `wrann` instance method. Each annotation stored in a WFDB annotation file contains a sample field and a label field. All other fields may or may not be present. Examples -------- >>> # Read an annotation as an Annotation object >>> annotation = wfdb.rdann('b001', 'atr', pb_dir='cebsdb') >>> # Write a copy of the annotation file >>> wfdb.wrann('b001', 'cpy', annotation.sample, annotation.symbol)
[ "Write", "a", "WFDB", "annotation", "file", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L1057-L1168
235,163
MIT-LCP/wfdb-python
wfdb/io/annotation.py
rdann
def rdann(record_name, extension, sampfrom=0, sampto=None, shift_samps=False, pb_dir=None, return_label_elements=['symbol'], summarize_labels=False): """ Read a WFDB annotation file record_name.extension and return an Annotation object. Parameters ---------- record_name : str The record name of the WFDB annotation file. ie. for file '100.atr', record_name='100'. extension : str The annotatator extension of the annotation file. ie. for file '100.atr', extension='atr'. sampfrom : int, optional The minimum sample number for annotations to be returned. sampto : int, optional The maximum sample number for annotations to be returned. shift_samps : bool, optional Specifies whether to return the sample indices relative to `sampfrom` (True), or sample 0 (False). pb_dir : str, optional Option used to stream data from Physiobank. The Physiobank database directory from which to find the required annotation file. eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb': pb_dir='mitdb'. return_label_elements : list, optional The label elements that are to be returned from reading the annotation file. A list with at least one of the following options: 'symbol', 'label_store', 'description'. summarize_labels : bool, optional If True, assign a summary table of the set of annotation labels contained in the file to the 'contained_labels' attribute of the returned object. This table will contain the columns: ['label_store', 'symbol', 'description', 'n_occurrences'] Returns ------- annotation : Annotation The Annotation object. Call help(wfdb.Annotation) for the attribute descriptions. Notes ----- For every annotation sample, the annotation file explictly stores the 'sample' and 'symbol' fields, but not necessarily the others. When reading annotation files using this function, fields which are not stored in the file will either take their default values of 0 or None, or will be carried over from their previous values if any. Examples -------- >>> ann = wfdb.rdann('sample-data/100', 'atr', sampto=300000) """ return_label_elements = check_read_inputs(sampfrom, sampto, return_label_elements) # Read the file in byte pairs filebytes = load_byte_pairs(record_name, extension, pb_dir) # Get wfdb annotation fields from the file bytes (sample, label_store, subtype, chan, num, aux_note) = proc_ann_bytes(filebytes, sampto) # Get the indices of annotations that hold definition information about # the entire annotation file, and other empty annotations to be removed. potential_definition_inds, rm_inds = get_special_inds(sample, label_store, aux_note) # Try to extract information describing the annotation file (fs, custom_labels) = interpret_defintion_annotations(potential_definition_inds, aux_note) # Remove annotations that do not store actual sample and label information (sample, label_store, subtype, chan, num, aux_note) = rm_empty_indices(rm_inds, sample, label_store, subtype, chan, num, aux_note) # Convert lists to numpy arrays dtype='int' (sample, label_store, subtype, chan, num) = lists_to_int_arrays(sample, label_store, subtype, chan, num) # Try to get fs from the header file if it is not contained in the # annotation file if fs is None: try: rec = record.rdheader(record_name, pb_dir) fs = rec.fs except: pass # Create the annotation object annotation = Annotation(record_name=os.path.split(record_name)[1], extension=extension, sample=sample, label_store=label_store, subtype=subtype, chan=chan, num=num, aux_note=aux_note, fs=fs, custom_labels=custom_labels) # Apply the desired index range if sampfrom > 0 and sampto is not None: annotation.apply_range(sampfrom=sampfrom, sampto=sampto) # If specified, obtain annotation samples relative to the starting # index if shift_samps and len(sample) > 0 and sampfrom: annotation.sample = annotation.sample - sampfrom # Get the set of unique label definitions contained in this # annotation if summarize_labels: annotation.get_contained_labels(inplace=True) # Set/unset the desired label values annotation.set_label_elements(return_label_elements) return annotation
python
def rdann(record_name, extension, sampfrom=0, sampto=None, shift_samps=False, pb_dir=None, return_label_elements=['symbol'], summarize_labels=False): return_label_elements = check_read_inputs(sampfrom, sampto, return_label_elements) # Read the file in byte pairs filebytes = load_byte_pairs(record_name, extension, pb_dir) # Get wfdb annotation fields from the file bytes (sample, label_store, subtype, chan, num, aux_note) = proc_ann_bytes(filebytes, sampto) # Get the indices of annotations that hold definition information about # the entire annotation file, and other empty annotations to be removed. potential_definition_inds, rm_inds = get_special_inds(sample, label_store, aux_note) # Try to extract information describing the annotation file (fs, custom_labels) = interpret_defintion_annotations(potential_definition_inds, aux_note) # Remove annotations that do not store actual sample and label information (sample, label_store, subtype, chan, num, aux_note) = rm_empty_indices(rm_inds, sample, label_store, subtype, chan, num, aux_note) # Convert lists to numpy arrays dtype='int' (sample, label_store, subtype, chan, num) = lists_to_int_arrays(sample, label_store, subtype, chan, num) # Try to get fs from the header file if it is not contained in the # annotation file if fs is None: try: rec = record.rdheader(record_name, pb_dir) fs = rec.fs except: pass # Create the annotation object annotation = Annotation(record_name=os.path.split(record_name)[1], extension=extension, sample=sample, label_store=label_store, subtype=subtype, chan=chan, num=num, aux_note=aux_note, fs=fs, custom_labels=custom_labels) # Apply the desired index range if sampfrom > 0 and sampto is not None: annotation.apply_range(sampfrom=sampfrom, sampto=sampto) # If specified, obtain annotation samples relative to the starting # index if shift_samps and len(sample) > 0 and sampfrom: annotation.sample = annotation.sample - sampfrom # Get the set of unique label definitions contained in this # annotation if summarize_labels: annotation.get_contained_labels(inplace=True) # Set/unset the desired label values annotation.set_label_elements(return_label_elements) return annotation
[ "def", "rdann", "(", "record_name", ",", "extension", ",", "sampfrom", "=", "0", ",", "sampto", "=", "None", ",", "shift_samps", "=", "False", ",", "pb_dir", "=", "None", ",", "return_label_elements", "=", "[", "'symbol'", "]", ",", "summarize_labels", "="...
Read a WFDB annotation file record_name.extension and return an Annotation object. Parameters ---------- record_name : str The record name of the WFDB annotation file. ie. for file '100.atr', record_name='100'. extension : str The annotatator extension of the annotation file. ie. for file '100.atr', extension='atr'. sampfrom : int, optional The minimum sample number for annotations to be returned. sampto : int, optional The maximum sample number for annotations to be returned. shift_samps : bool, optional Specifies whether to return the sample indices relative to `sampfrom` (True), or sample 0 (False). pb_dir : str, optional Option used to stream data from Physiobank. The Physiobank database directory from which to find the required annotation file. eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb': pb_dir='mitdb'. return_label_elements : list, optional The label elements that are to be returned from reading the annotation file. A list with at least one of the following options: 'symbol', 'label_store', 'description'. summarize_labels : bool, optional If True, assign a summary table of the set of annotation labels contained in the file to the 'contained_labels' attribute of the returned object. This table will contain the columns: ['label_store', 'symbol', 'description', 'n_occurrences'] Returns ------- annotation : Annotation The Annotation object. Call help(wfdb.Annotation) for the attribute descriptions. Notes ----- For every annotation sample, the annotation file explictly stores the 'sample' and 'symbol' fields, but not necessarily the others. When reading annotation files using this function, fields which are not stored in the file will either take their default values of 0 or None, or will be carried over from their previous values if any. Examples -------- >>> ann = wfdb.rdann('sample-data/100', 'atr', sampto=300000)
[ "Read", "a", "WFDB", "annotation", "file", "record_name", ".", "extension", "and", "return", "an", "Annotation", "object", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L1196-L1315
235,164
MIT-LCP/wfdb-python
wfdb/io/annotation.py
update_extra_fields
def update_extra_fields(subtype, chan, num, aux_note, update): """ Update the field if the current annotation did not provide a value. - aux_note and sub are set to default values if missing. - chan and num copy over previous value if missing. """ if update['subtype']: subtype.append(0) if update['chan']: if chan == []: chan.append(0) else: chan.append(chan[-1]) if update['num']: if num == []: num.append(0) else: num.append(num[-1]) if update['aux_note']: aux_note.append('') return subtype, chan, num, aux_note
python
def update_extra_fields(subtype, chan, num, aux_note, update): if update['subtype']: subtype.append(0) if update['chan']: if chan == []: chan.append(0) else: chan.append(chan[-1]) if update['num']: if num == []: num.append(0) else: num.append(num[-1]) if update['aux_note']: aux_note.append('') return subtype, chan, num, aux_note
[ "def", "update_extra_fields", "(", "subtype", ",", "chan", ",", "num", ",", "aux_note", ",", "update", ")", ":", "if", "update", "[", "'subtype'", "]", ":", "subtype", ".", "append", "(", "0", ")", "if", "update", "[", "'chan'", "]", ":", "if", "chan...
Update the field if the current annotation did not provide a value. - aux_note and sub are set to default values if missing. - chan and num copy over previous value if missing.
[ "Update", "the", "field", "if", "the", "current", "annotation", "did", "not", "provide", "a", "value", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L1471-L1497
235,165
MIT-LCP/wfdb-python
wfdb/io/annotation.py
get_special_inds
def get_special_inds(sample, label_store, aux_note): """ Get the indices of annotations that hold definition information about the entire annotation file, and other empty annotations to be removed. Note: There is no need to deal with SKIP annotations (label_store=59) which were already dealt with in proc_core_fields and hence not included here. """ s0_inds = np.where(sample == np.int64(0))[0] note_inds = np.where(label_store == np.int64(22))[0] # sample = 0 with aux_note means there should be an fs or custom label definition. # Either way, they are to be removed. potential_definition_inds = set(s0_inds).intersection(note_inds) # Other indices which are not actual annotations. notann_inds = np.where(label_store == np.int64(0))[0] rm_inds = potential_definition_inds.union(set(notann_inds)) return potential_definition_inds, rm_inds
python
def get_special_inds(sample, label_store, aux_note): s0_inds = np.where(sample == np.int64(0))[0] note_inds = np.where(label_store == np.int64(22))[0] # sample = 0 with aux_note means there should be an fs or custom label definition. # Either way, they are to be removed. potential_definition_inds = set(s0_inds).intersection(note_inds) # Other indices which are not actual annotations. notann_inds = np.where(label_store == np.int64(0))[0] rm_inds = potential_definition_inds.union(set(notann_inds)) return potential_definition_inds, rm_inds
[ "def", "get_special_inds", "(", "sample", ",", "label_store", ",", "aux_note", ")", ":", "s0_inds", "=", "np", ".", "where", "(", "sample", "==", "np", ".", "int64", "(", "0", ")", ")", "[", "0", "]", "note_inds", "=", "np", ".", "where", "(", "lab...
Get the indices of annotations that hold definition information about the entire annotation file, and other empty annotations to be removed. Note: There is no need to deal with SKIP annotations (label_store=59) which were already dealt with in proc_core_fields and hence not included here.
[ "Get", "the", "indices", "of", "annotations", "that", "hold", "definition", "information", "about", "the", "entire", "annotation", "file", "and", "other", "empty", "annotations", "to", "be", "removed", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L1504-L1526
235,166
MIT-LCP/wfdb-python
wfdb/io/annotation.py
rm_empty_indices
def rm_empty_indices(*args): """ Remove unwanted list indices. First argument is the list of indices to remove. Other elements are the lists to trim. """ rm_inds = args[0] if not rm_inds: return args[1:] keep_inds = [i for i in range(len(args[1])) if i not in rm_inds] return [[a[i] for i in keep_inds] for a in args[1:]]
python
def rm_empty_indices(*args): rm_inds = args[0] if not rm_inds: return args[1:] keep_inds = [i for i in range(len(args[1])) if i not in rm_inds] return [[a[i] for i in keep_inds] for a in args[1:]]
[ "def", "rm_empty_indices", "(", "*", "args", ")", ":", "rm_inds", "=", "args", "[", "0", "]", "if", "not", "rm_inds", ":", "return", "args", "[", "1", ":", "]", "keep_inds", "=", "[", "i", "for", "i", "in", "range", "(", "len", "(", "args", "[", ...
Remove unwanted list indices. First argument is the list of indices to remove. Other elements are the lists to trim.
[ "Remove", "unwanted", "list", "indices", ".", "First", "argument", "is", "the", "list", "of", "indices", "to", "remove", ".", "Other", "elements", "are", "the", "lists", "to", "trim", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L1567-L1580
235,167
MIT-LCP/wfdb-python
wfdb/io/annotation.py
Annotation.apply_range
def apply_range(self, sampfrom=0, sampto=None): """ Filter the annotation attributes to keep only items between the desired sample values """ sampto = sampto or self.sample[-1] kept_inds = np.intersect1d(np.where(self.sample>=sampfrom), np.where(self.sample<=sampto)) for field in ['sample', 'label_store', 'subtype', 'chan', 'num']: setattr(self, field, getattr(self, field)[kept_inds]) self.aux_note = [self.aux_note[i] for i in kept_inds] self.ann_len = len(self.sample)
python
def apply_range(self, sampfrom=0, sampto=None): sampto = sampto or self.sample[-1] kept_inds = np.intersect1d(np.where(self.sample>=sampfrom), np.where(self.sample<=sampto)) for field in ['sample', 'label_store', 'subtype', 'chan', 'num']: setattr(self, field, getattr(self, field)[kept_inds]) self.aux_note = [self.aux_note[i] for i in kept_inds] self.ann_len = len(self.sample)
[ "def", "apply_range", "(", "self", ",", "sampfrom", "=", "0", ",", "sampto", "=", "None", ")", ":", "sampto", "=", "sampto", "or", "self", ".", "sample", "[", "-", "1", "]", "kept_inds", "=", "np", ".", "intersect1d", "(", "np", ".", "where", "(", ...
Filter the annotation attributes to keep only items between the desired sample values
[ "Filter", "the", "annotation", "attributes", "to", "keep", "only", "items", "between", "the", "desired", "sample", "values" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L134-L151
235,168
MIT-LCP/wfdb-python
wfdb/io/annotation.py
Annotation.wrann
def wrann(self, write_fs=False, write_dir=''): """ Write a WFDB annotation file from this object. Parameters ---------- write_fs : bool, optional Whether to write the `fs` attribute to the file. """ for field in ['record_name', 'extension']: if getattr(self, field) is None: raise Exception('Missing required field for writing annotation file: ',field) present_label_fields = self.get_label_fields() if not present_label_fields: raise Exception('At least one annotation label field is required to write the annotation: ', ann_label_fields) # Check the validity of individual fields self.check_fields() # Standardize the format of the custom_labels field self.standardize_custom_labels() # Create the label map used in this annotaion self.create_label_map() # Check the cohesion of fields self.check_field_cohesion(present_label_fields) # Calculate the label_store field if necessary if 'label_store' not in present_label_fields: self.convert_label_attribute(source_field=present_label_fields[0], target_field='label_store') # Write the header file using the specified fields self.wr_ann_file(write_fs=write_fs, write_dir=write_dir) return
python
def wrann(self, write_fs=False, write_dir=''): for field in ['record_name', 'extension']: if getattr(self, field) is None: raise Exception('Missing required field for writing annotation file: ',field) present_label_fields = self.get_label_fields() if not present_label_fields: raise Exception('At least one annotation label field is required to write the annotation: ', ann_label_fields) # Check the validity of individual fields self.check_fields() # Standardize the format of the custom_labels field self.standardize_custom_labels() # Create the label map used in this annotaion self.create_label_map() # Check the cohesion of fields self.check_field_cohesion(present_label_fields) # Calculate the label_store field if necessary if 'label_store' not in present_label_fields: self.convert_label_attribute(source_field=present_label_fields[0], target_field='label_store') # Write the header file using the specified fields self.wr_ann_file(write_fs=write_fs, write_dir=write_dir) return
[ "def", "wrann", "(", "self", ",", "write_fs", "=", "False", ",", "write_dir", "=", "''", ")", ":", "for", "field", "in", "[", "'record_name'", ",", "'extension'", "]", ":", "if", "getattr", "(", "self", ",", "field", ")", "is", "None", ":", "raise", ...
Write a WFDB annotation file from this object. Parameters ---------- write_fs : bool, optional Whether to write the `fs` attribute to the file.
[ "Write", "a", "WFDB", "annotation", "file", "from", "this", "object", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L153-L191
235,169
MIT-LCP/wfdb-python
wfdb/io/annotation.py
Annotation.get_label_fields
def get_label_fields(self): """ Get the present label fields in the object """ present_label_fields = [] for field in ann_label_fields: if getattr(self, field) is not None: present_label_fields.append(field) return present_label_fields
python
def get_label_fields(self): present_label_fields = [] for field in ann_label_fields: if getattr(self, field) is not None: present_label_fields.append(field) return present_label_fields
[ "def", "get_label_fields", "(", "self", ")", ":", "present_label_fields", "=", "[", "]", "for", "field", "in", "ann_label_fields", ":", "if", "getattr", "(", "self", ",", "field", ")", "is", "not", "None", ":", "present_label_fields", ".", "append", "(", "...
Get the present label fields in the object
[ "Get", "the", "present", "label", "fields", "in", "the", "object" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L193-L202
235,170
MIT-LCP/wfdb-python
wfdb/io/annotation.py
Annotation.check_field_cohesion
def check_field_cohesion(self, present_label_fields): """ Check that the content and structure of different fields are consistent with one another. """ # Ensure all written annotation fields have the same length nannots = len(self.sample) for field in ['sample', 'num', 'subtype', 'chan', 'aux_note']+present_label_fields: if getattr(self, field) is not None: if len(getattr(self, field)) != nannots: raise ValueError("The lengths of the 'sample' and '"+field+"' fields do not match") # Ensure all label fields are defined by the label map. This has to be checked because # it is possible the user defined (or lack of) custom_labels does not capture all the # labels present. for field in present_label_fields: defined_values = self.__label_map__[field].values if set(getattr(self, field)) - set(defined_values) != set(): raise ValueError('\n'.join(['\nThe '+field+' field contains elements not encoded in the stardard WFDB annotation labels, or this object\'s custom_labels field', '- To see the standard WFDB annotation labels, call: show_ann_labels()', '- To transfer non-encoded symbol items into the aux_note field, call: self.sym_to_aux()', '- To define custom labels, set the custom_labels field as a list of tuple triplets with format: (label_store, symbol, description)'])) return
python
def check_field_cohesion(self, present_label_fields): # Ensure all written annotation fields have the same length nannots = len(self.sample) for field in ['sample', 'num', 'subtype', 'chan', 'aux_note']+present_label_fields: if getattr(self, field) is not None: if len(getattr(self, field)) != nannots: raise ValueError("The lengths of the 'sample' and '"+field+"' fields do not match") # Ensure all label fields are defined by the label map. This has to be checked because # it is possible the user defined (or lack of) custom_labels does not capture all the # labels present. for field in present_label_fields: defined_values = self.__label_map__[field].values if set(getattr(self, field)) - set(defined_values) != set(): raise ValueError('\n'.join(['\nThe '+field+' field contains elements not encoded in the stardard WFDB annotation labels, or this object\'s custom_labels field', '- To see the standard WFDB annotation labels, call: show_ann_labels()', '- To transfer non-encoded symbol items into the aux_note field, call: self.sym_to_aux()', '- To define custom labels, set the custom_labels field as a list of tuple triplets with format: (label_store, symbol, description)'])) return
[ "def", "check_field_cohesion", "(", "self", ",", "present_label_fields", ")", ":", "# Ensure all written annotation fields have the same length", "nannots", "=", "len", "(", "self", ".", "sample", ")", "for", "field", "in", "[", "'sample'", ",", "'num'", ",", "'subt...
Check that the content and structure of different fields are consistent with one another.
[ "Check", "that", "the", "content", "and", "structure", "of", "different", "fields", "are", "consistent", "with", "one", "another", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L361-L386
235,171
MIT-LCP/wfdb-python
wfdb/io/annotation.py
Annotation.get_available_label_stores
def get_available_label_stores(self, usefield='tryall'): """ Get the label store values that may be used for writing this annotation. Available store values include: - the undefined values in the standard wfdb labels - the store values not used in the current annotation object. - the store values whose standard wfdb symbols/descriptions match those of the custom labels (if custom_labels exists) If 'usefield' is explicitly specified, the function will use that field to figure out available label stores. If 'usefield' is set to 'tryall', the function will choose one of the contained attributes by checking availability in the order: label_store, symbol, description """ # Figure out which field to use to get available labels stores. if usefield == 'tryall': if self.label_store is not None: usefield = 'label_store' elif self.symbol is not None: usefield = 'symbol' elif self.description is not None: usefield = 'description' else: raise ValueError('No label fields are defined. At least one of the following is required: ', ann_label_fields) return self.get_available_label_stores(usefield = usefield) # Use the explicitly stated field to get available stores. else: # If usefield == 'label_store', there are slightly fewer/different steps # compared to if it were another option contained_field = getattr(self, usefield) # Get the unused label_store values if usefield == 'label_store': unused_label_stores = set(ann_label_table['label_store'].values) - contained_field else: # the label_store values from the standard wfdb annotation labels # whose symbols are not contained in this annotation unused_field = set(ann_label_table[usefield].values) - contained_field unused_label_stores = ann_label_table.loc[ann_label_table[usefield] in unused_field, 'label_store'].values # Get the standard wfdb label_store values overwritten by the # custom_labels if any if self.custom_symbols is not None: custom_field = set(self.get_custom_label_attribute(usefield)) if usefield == 'label_store': overwritten_label_stores = set(custom_field).intersection(set(ann_label_table['label_store'])) else: overwritten_fields = set(custom_field).intersection(set(ann_label_table[usefield])) overwritten_label_stores = ann_label_table.loc[ann_label_table[usefield] in overwritten_fields, 'label_store'].values else: overwritten_label_stores = set() # The undefined values in the standard wfdb labels undefined_label_stores = self.get_undefined_label_stores() # Final available label stores = undefined + unused + overwritten available_label_stores = set(undefined_label_stores).union(set(unused_label_stores)).union(overwritten_label_stores) return available_label_stores
python
def get_available_label_stores(self, usefield='tryall'): # Figure out which field to use to get available labels stores. if usefield == 'tryall': if self.label_store is not None: usefield = 'label_store' elif self.symbol is not None: usefield = 'symbol' elif self.description is not None: usefield = 'description' else: raise ValueError('No label fields are defined. At least one of the following is required: ', ann_label_fields) return self.get_available_label_stores(usefield = usefield) # Use the explicitly stated field to get available stores. else: # If usefield == 'label_store', there are slightly fewer/different steps # compared to if it were another option contained_field = getattr(self, usefield) # Get the unused label_store values if usefield == 'label_store': unused_label_stores = set(ann_label_table['label_store'].values) - contained_field else: # the label_store values from the standard wfdb annotation labels # whose symbols are not contained in this annotation unused_field = set(ann_label_table[usefield].values) - contained_field unused_label_stores = ann_label_table.loc[ann_label_table[usefield] in unused_field, 'label_store'].values # Get the standard wfdb label_store values overwritten by the # custom_labels if any if self.custom_symbols is not None: custom_field = set(self.get_custom_label_attribute(usefield)) if usefield == 'label_store': overwritten_label_stores = set(custom_field).intersection(set(ann_label_table['label_store'])) else: overwritten_fields = set(custom_field).intersection(set(ann_label_table[usefield])) overwritten_label_stores = ann_label_table.loc[ann_label_table[usefield] in overwritten_fields, 'label_store'].values else: overwritten_label_stores = set() # The undefined values in the standard wfdb labels undefined_label_stores = self.get_undefined_label_stores() # Final available label stores = undefined + unused + overwritten available_label_stores = set(undefined_label_stores).union(set(unused_label_stores)).union(overwritten_label_stores) return available_label_stores
[ "def", "get_available_label_stores", "(", "self", ",", "usefield", "=", "'tryall'", ")", ":", "# Figure out which field to use to get available labels stores.", "if", "usefield", "==", "'tryall'", ":", "if", "self", ".", "label_store", "is", "not", "None", ":", "usefi...
Get the label store values that may be used for writing this annotation. Available store values include: - the undefined values in the standard wfdb labels - the store values not used in the current annotation object. - the store values whose standard wfdb symbols/descriptions match those of the custom labels (if custom_labels exists) If 'usefield' is explicitly specified, the function will use that field to figure out available label stores. If 'usefield' is set to 'tryall', the function will choose one of the contained attributes by checking availability in the order: label_store, symbol, description
[ "Get", "the", "label", "store", "values", "that", "may", "be", "used", "for", "writing", "this", "annotation", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L463-L527
235,172
MIT-LCP/wfdb-python
wfdb/io/annotation.py
Annotation.get_custom_label_attribute
def get_custom_label_attribute(self, attribute): """ Get a list of the custom_labels attribute. ie. label_store, symbol, or description. The custom_labels variable could be in a number of formats """ if attribute not in ann_label_fields: raise ValueError('Invalid attribute specified') if isinstance(self.custom_labels, pd.DataFrame): if 'label_store' not in list(self.custom_labels): raise ValueError('label_store not defined in custom_labels') a = list(self.custom_labels[attribute].values) else: if len(self.custom_labels[0]) == 2: if attribute == 'label_store': raise ValueError('label_store not defined in custom_labels') elif attribute == 'symbol': a = [l[0] for l in self.custom_labels] elif attribute == 'description': a = [l[1] for l in self.custom_labels] else: if attribute == 'label_store': a = [l[0] for l in self.custom_labels] elif attribute == 'symbol': a = [l[1] for l in self.custom_labels] elif attribute == 'description': a = [l[2] for l in self.custom_labels] return a
python
def get_custom_label_attribute(self, attribute): if attribute not in ann_label_fields: raise ValueError('Invalid attribute specified') if isinstance(self.custom_labels, pd.DataFrame): if 'label_store' not in list(self.custom_labels): raise ValueError('label_store not defined in custom_labels') a = list(self.custom_labels[attribute].values) else: if len(self.custom_labels[0]) == 2: if attribute == 'label_store': raise ValueError('label_store not defined in custom_labels') elif attribute == 'symbol': a = [l[0] for l in self.custom_labels] elif attribute == 'description': a = [l[1] for l in self.custom_labels] else: if attribute == 'label_store': a = [l[0] for l in self.custom_labels] elif attribute == 'symbol': a = [l[1] for l in self.custom_labels] elif attribute == 'description': a = [l[2] for l in self.custom_labels] return a
[ "def", "get_custom_label_attribute", "(", "self", ",", "attribute", ")", ":", "if", "attribute", "not", "in", "ann_label_fields", ":", "raise", "ValueError", "(", "'Invalid attribute specified'", ")", "if", "isinstance", "(", "self", ".", "custom_labels", ",", "pd...
Get a list of the custom_labels attribute. ie. label_store, symbol, or description. The custom_labels variable could be in a number of formats
[ "Get", "a", "list", "of", "the", "custom_labels", "attribute", ".", "ie", ".", "label_store", "symbol", "or", "description", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L530-L562
235,173
MIT-LCP/wfdb-python
wfdb/io/annotation.py
Annotation.create_label_map
def create_label_map(self, inplace=True): """ Creates mapping df based on ann_label_table and self.custom_labels. Table composed of entire WFDB standard annotation table, overwritten/appended with custom_labels if any. Sets __label_map__ attribute, or returns value. """ label_map = ann_label_table.copy() if self.custom_labels is not None: self.standardize_custom_labels() for i in self.custom_labels.index: label_map.loc[i] = self.custom_labels.loc[i] if inplace: self.__label_map__ = label_map else: return label_map
python
def create_label_map(self, inplace=True): label_map = ann_label_table.copy() if self.custom_labels is not None: self.standardize_custom_labels() for i in self.custom_labels.index: label_map.loc[i] = self.custom_labels.loc[i] if inplace: self.__label_map__ = label_map else: return label_map
[ "def", "create_label_map", "(", "self", ",", "inplace", "=", "True", ")", ":", "label_map", "=", "ann_label_table", ".", "copy", "(", ")", "if", "self", ".", "custom_labels", "is", "not", "None", ":", "self", ".", "standardize_custom_labels", "(", ")", "fo...
Creates mapping df based on ann_label_table and self.custom_labels. Table composed of entire WFDB standard annotation table, overwritten/appended with custom_labels if any. Sets __label_map__ attribute, or returns value.
[ "Creates", "mapping", "df", "based", "on", "ann_label_table", "and", "self", ".", "custom_labels", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L565-L583
235,174
MIT-LCP/wfdb-python
wfdb/io/annotation.py
Annotation.wr_ann_file
def wr_ann_file(self, write_fs, write_dir=''): """ Calculate the bytes used to encode an annotation set and write them to an annotation file """ # Calculate the fs bytes to write if present and desired to write if write_fs: fs_bytes = self.calc_fs_bytes() else: fs_bytes = [] # Calculate the custom_labels bytes to write if present cl_bytes = self.calc_cl_bytes() # Calculate the core field bytes to write core_bytes = self.calc_core_bytes() # Mark the end of the special annotation types if needed if fs_bytes == [] and cl_bytes == []: end_special_bytes = [] else: end_special_bytes = [0, 236, 255, 255, 255, 255, 1, 0] # Write the file with open(os.path.join(write_dir, self.record_name+'.'+self.extension), 'wb') as f: # Combine all bytes to write: fs (if any), custom annotations (if any), main content, file terminator np.concatenate((fs_bytes, cl_bytes, end_special_bytes, core_bytes, np.array([0,0]))).astype('u1').tofile(f) return
python
def wr_ann_file(self, write_fs, write_dir=''): # Calculate the fs bytes to write if present and desired to write if write_fs: fs_bytes = self.calc_fs_bytes() else: fs_bytes = [] # Calculate the custom_labels bytes to write if present cl_bytes = self.calc_cl_bytes() # Calculate the core field bytes to write core_bytes = self.calc_core_bytes() # Mark the end of the special annotation types if needed if fs_bytes == [] and cl_bytes == []: end_special_bytes = [] else: end_special_bytes = [0, 236, 255, 255, 255, 255, 1, 0] # Write the file with open(os.path.join(write_dir, self.record_name+'.'+self.extension), 'wb') as f: # Combine all bytes to write: fs (if any), custom annotations (if any), main content, file terminator np.concatenate((fs_bytes, cl_bytes, end_special_bytes, core_bytes, np.array([0,0]))).astype('u1').tofile(f) return
[ "def", "wr_ann_file", "(", "self", ",", "write_fs", ",", "write_dir", "=", "''", ")", ":", "# Calculate the fs bytes to write if present and desired to write", "if", "write_fs", ":", "fs_bytes", "=", "self", ".", "calc_fs_bytes", "(", ")", "else", ":", "fs_bytes", ...
Calculate the bytes used to encode an annotation set and write them to an annotation file
[ "Calculate", "the", "bytes", "used", "to", "encode", "an", "annotation", "set", "and", "write", "them", "to", "an", "annotation", "file" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L586-L615
235,175
MIT-LCP/wfdb-python
wfdb/io/annotation.py
Annotation.calc_core_bytes
def calc_core_bytes(self): """ Convert all used annotation fields into bytes to write """ # The difference sample to write if len(self.sample) == 1: sampdiff = np.array([self.sample[0]]) else: sampdiff = np.concatenate(([self.sample[0]], np.diff(self.sample))) # Create a copy of the annotation object with a # compact version of fields to write compact_annotation = copy.deepcopy(self) compact_annotation.compact_fields() # The optional fields to be written. Write if they are not None or all empty extra_write_fields = [] for field in ['num', 'subtype', 'chan', 'aux_note']: if not isblank(getattr(compact_annotation, field)): extra_write_fields.append(field) data_bytes = [] # Iterate across all fields one index at a time for i in range(len(sampdiff)): # Process the samp (difference) and sym items data_bytes.append(field2bytes('samptype', [sampdiff[i], self.symbol[i]])) # Process the extra optional fields for field in extra_write_fields: value = getattr(compact_annotation, field)[i] if value is not None: data_bytes.append(field2bytes(field, value)) # Flatten and convert to correct format data_bytes = np.array([item for sublist in data_bytes for item in sublist]).astype('u1') return data_bytes
python
def calc_core_bytes(self): # The difference sample to write if len(self.sample) == 1: sampdiff = np.array([self.sample[0]]) else: sampdiff = np.concatenate(([self.sample[0]], np.diff(self.sample))) # Create a copy of the annotation object with a # compact version of fields to write compact_annotation = copy.deepcopy(self) compact_annotation.compact_fields() # The optional fields to be written. Write if they are not None or all empty extra_write_fields = [] for field in ['num', 'subtype', 'chan', 'aux_note']: if not isblank(getattr(compact_annotation, field)): extra_write_fields.append(field) data_bytes = [] # Iterate across all fields one index at a time for i in range(len(sampdiff)): # Process the samp (difference) and sym items data_bytes.append(field2bytes('samptype', [sampdiff[i], self.symbol[i]])) # Process the extra optional fields for field in extra_write_fields: value = getattr(compact_annotation, field)[i] if value is not None: data_bytes.append(field2bytes(field, value)) # Flatten and convert to correct format data_bytes = np.array([item for sublist in data_bytes for item in sublist]).astype('u1') return data_bytes
[ "def", "calc_core_bytes", "(", "self", ")", ":", "# The difference sample to write", "if", "len", "(", "self", ".", "sample", ")", "==", "1", ":", "sampdiff", "=", "np", ".", "array", "(", "[", "self", ".", "sample", "[", "0", "]", "]", ")", "else", ...
Convert all used annotation fields into bytes to write
[ "Convert", "all", "used", "annotation", "fields", "into", "bytes", "to", "write" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L676-L716
235,176
MIT-LCP/wfdb-python
wfdb/io/annotation.py
Annotation.get_contained_labels
def get_contained_labels(self, inplace=True): """ Get the set of unique labels contained in this annotation. Returns a pandas dataframe or sets the contained_labels attribute of the object. Requires the label_store field to be set. Function will try to use attributes contained in the order: 1. label_store 2. symbol 3. description This function should also be called to summarize information about an annotation after it has been read. Should not be a helper function to others except rdann. """ if self.custom_labels is not None: self.check_field('custom_labels') # Create the label map label_map = ann_label_table.copy() # Convert the tuple triplets into a pandas dataframe if needed if isinstance(self.custom_labels, (list, tuple)): custom_labels = label_triplets_to_df(self.custom_labels) elif isinstance(self.custom_labels, pd.DataFrame): # Set the index just in case it doesn't already match the label_store self.custom_labels.set_index( self.custom_labels['label_store'].values, inplace=True) custom_labels = self.custom_labels else: custom_labels = None # Merge the standard wfdb labels with the custom labels. # custom labels values overwrite standard wfdb if overlap. if custom_labels is not None: for i in custom_labels.index: label_map.loc[i] = custom_labels.loc[i] # This doesn't work... # label_map.loc[custom_labels.index] = custom_labels.loc[custom_labels.index] # Get the labels using one of the features if self.label_store is not None: index_vals = set(self.label_store) reset_index = False counts = np.unique(self.label_store, return_counts=True) elif self.symbol is not None: index_vals = set(self.symbol) label_map.set_index(label_map['symbol'].values, inplace=True) reset_index = True counts = np.unique(self.symbol, return_counts=True) elif self.description is not None: index_vals = set(self.description) label_map.set_index(label_map['description'].values, inplace=True) reset_index = True counts = np.unique(self.description, return_counts=True) else: raise Exception('No annotation labels contained in object') contained_labels = label_map.loc[index_vals, :] # Add the counts for i in range(len(counts[0])): contained_labels.loc[counts[0][i], 'n_occurrences'] = counts[1][i] contained_labels['n_occurrences'] = pd.to_numeric(contained_labels['n_occurrences'], downcast='integer') if reset_index: contained_labels.set_index(contained_labels['label_store'].values, inplace=True) if inplace: self.contained_labels = contained_labels return else: return contained_labels
python
def get_contained_labels(self, inplace=True): if self.custom_labels is not None: self.check_field('custom_labels') # Create the label map label_map = ann_label_table.copy() # Convert the tuple triplets into a pandas dataframe if needed if isinstance(self.custom_labels, (list, tuple)): custom_labels = label_triplets_to_df(self.custom_labels) elif isinstance(self.custom_labels, pd.DataFrame): # Set the index just in case it doesn't already match the label_store self.custom_labels.set_index( self.custom_labels['label_store'].values, inplace=True) custom_labels = self.custom_labels else: custom_labels = None # Merge the standard wfdb labels with the custom labels. # custom labels values overwrite standard wfdb if overlap. if custom_labels is not None: for i in custom_labels.index: label_map.loc[i] = custom_labels.loc[i] # This doesn't work... # label_map.loc[custom_labels.index] = custom_labels.loc[custom_labels.index] # Get the labels using one of the features if self.label_store is not None: index_vals = set(self.label_store) reset_index = False counts = np.unique(self.label_store, return_counts=True) elif self.symbol is not None: index_vals = set(self.symbol) label_map.set_index(label_map['symbol'].values, inplace=True) reset_index = True counts = np.unique(self.symbol, return_counts=True) elif self.description is not None: index_vals = set(self.description) label_map.set_index(label_map['description'].values, inplace=True) reset_index = True counts = np.unique(self.description, return_counts=True) else: raise Exception('No annotation labels contained in object') contained_labels = label_map.loc[index_vals, :] # Add the counts for i in range(len(counts[0])): contained_labels.loc[counts[0][i], 'n_occurrences'] = counts[1][i] contained_labels['n_occurrences'] = pd.to_numeric(contained_labels['n_occurrences'], downcast='integer') if reset_index: contained_labels.set_index(contained_labels['label_store'].values, inplace=True) if inplace: self.contained_labels = contained_labels return else: return contained_labels
[ "def", "get_contained_labels", "(", "self", ",", "inplace", "=", "True", ")", ":", "if", "self", ".", "custom_labels", "is", "not", "None", ":", "self", ".", "check_field", "(", "'custom_labels'", ")", "# Create the label map", "label_map", "=", "ann_label_table...
Get the set of unique labels contained in this annotation. Returns a pandas dataframe or sets the contained_labels attribute of the object. Requires the label_store field to be set. Function will try to use attributes contained in the order: 1. label_store 2. symbol 3. description This function should also be called to summarize information about an annotation after it has been read. Should not be a helper function to others except rdann.
[ "Get", "the", "set", "of", "unique", "labels", "contained", "in", "this", "annotation", ".", "Returns", "a", "pandas", "dataframe", "or", "sets", "the", "contained_labels", "attribute", "of", "the", "object", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L781-L859
235,177
MIT-LCP/wfdb-python
wfdb/io/annotation.py
Annotation.set_label_elements
def set_label_elements(self, wanted_label_elements): """ Set one or more label elements based on at least one of the others """ if isinstance(wanted_label_elements, str): wanted_label_elements = [wanted_label_elements] # Figure out which desired label elements are missing missing_elements = [e for e in wanted_label_elements if getattr(self, e) is None] contained_elements = [e for e in ann_label_fields if getattr(self, e )is not None] if not contained_elements: raise Exception('No annotation labels contained in object') for e in missing_elements: self.convert_label_attribute(contained_elements[0], e) unwanted_label_elements = list(set(ann_label_fields) - set(wanted_label_elements)) self.rm_attributes(unwanted_label_elements) return
python
def set_label_elements(self, wanted_label_elements): if isinstance(wanted_label_elements, str): wanted_label_elements = [wanted_label_elements] # Figure out which desired label elements are missing missing_elements = [e for e in wanted_label_elements if getattr(self, e) is None] contained_elements = [e for e in ann_label_fields if getattr(self, e )is not None] if not contained_elements: raise Exception('No annotation labels contained in object') for e in missing_elements: self.convert_label_attribute(contained_elements[0], e) unwanted_label_elements = list(set(ann_label_fields) - set(wanted_label_elements)) self.rm_attributes(unwanted_label_elements) return
[ "def", "set_label_elements", "(", "self", ",", "wanted_label_elements", ")", ":", "if", "isinstance", "(", "wanted_label_elements", ",", "str", ")", ":", "wanted_label_elements", "=", "[", "wanted_label_elements", "]", "# Figure out which desired label elements are missing"...
Set one or more label elements based on at least one of the others
[ "Set", "one", "or", "more", "label", "elements", "based", "on", "at", "least", "one", "of", "the", "others" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/annotation.py#L861-L885
235,178
MIT-LCP/wfdb-python
wfdb/io/_signal.py
_dat_read_params
def _dat_read_params(fmt, sig_len, byte_offset, skew, tsamps_per_frame, sampfrom, sampto): """ Calculate the parameters used to read and process a dat file, given its layout, and the desired sample range. Parameters ---------- fmt : str The format of the dat file sig_len : int The signal length (per channel) of the dat file byte_offset : int The byte offset of the dat file skew : list The skew for the signals of the dat file tsamps_per_frame : int The total samples/frame for all channels of the dat file sampfrom : int The starting sample number to be read from the signals sampto : int The final sample number to be read from the signals Returns ------- start_byte : int The starting byte to read the dat file from. Always points to the start of a byte block for special formats. n_read_samples : int The number of flat samples to read from the dat file. block_floor_samples : int The extra samples read prior to the first desired sample, for special formats, in order to ensure entire byte blocks are read. extra_flat_samples : int The extra samples desired beyond what is contained in the file. nan_replace : list The number of samples to replace with nan at the end of each signal, due to skew wanting samples beyond the file. Examples -------- sig_len=100, t = 4 (total samples/frame), skew = [0, 2, 4, 5] sampfrom=0, sampto=100 --> read_len = 100, n_sampread = 100*t, extralen = 5, nan_replace = [0, 2, 4, 5] sampfrom=50, sampto=100 --> read_len = 50, n_sampread = 50*t, extralen = 5, nan_replace = [0, 2, 4, 5] sampfrom=0, sampto=50 --> read_len = 50, n_sampread = 55*t, extralen = 0, nan_replace = [0, 0, 0, 0] sampfrom=95, sampto=99 --> read_len = 4, n_sampread = 5*t, extralen = 4, nan_replace = [0, 1, 3, 4] """ # First flat sample number to read (if all channels were flattened) start_flat_sample = sampfrom * tsamps_per_frame # Calculate the last flat sample number to read. # Cannot exceed sig_len * tsamps_per_frame, the number of samples # stored in the file. If extra 'samples' are desired by the skew, # keep track. # Where was the -sampfrom derived from? Why was it in the formula? if (sampto + max(skew)) > sig_len: end_flat_sample = sig_len * tsamps_per_frame extra_flat_samples = (sampto + max(skew) - sig_len) * tsamps_per_frame else: end_flat_sample = (sampto + max(skew)) * tsamps_per_frame extra_flat_samples = 0 # Adjust the starting sample number to read from start of blocks for special fmts. # Keep track of how many preceeding samples are read, to be discarded later. if fmt == '212': # Samples come in groups of 2, in 3 byte blocks block_floor_samples = start_flat_sample % 2 start_flat_sample = start_flat_sample - block_floor_samples elif fmt in ['310', '311']: # Samples come in groups of 3, in 4 byte blocks block_floor_samples = start_flat_sample % 3 start_flat_sample = start_flat_sample - block_floor_samples else: block_floor_samples = 0 # The starting byte to read from start_byte = byte_offset + int(start_flat_sample * BYTES_PER_SAMPLE[fmt]) # The number of samples to read n_read_samples = end_flat_sample - start_flat_sample # The number of samples to replace with nan at the end of each signal # due to skew wanting samples beyond the file nan_replace = [max(0, sampto + s - sig_len) for s in skew] return (start_byte, n_read_samples, block_floor_samples, extra_flat_samples, nan_replace)
python
def _dat_read_params(fmt, sig_len, byte_offset, skew, tsamps_per_frame, sampfrom, sampto): # First flat sample number to read (if all channels were flattened) start_flat_sample = sampfrom * tsamps_per_frame # Calculate the last flat sample number to read. # Cannot exceed sig_len * tsamps_per_frame, the number of samples # stored in the file. If extra 'samples' are desired by the skew, # keep track. # Where was the -sampfrom derived from? Why was it in the formula? if (sampto + max(skew)) > sig_len: end_flat_sample = sig_len * tsamps_per_frame extra_flat_samples = (sampto + max(skew) - sig_len) * tsamps_per_frame else: end_flat_sample = (sampto + max(skew)) * tsamps_per_frame extra_flat_samples = 0 # Adjust the starting sample number to read from start of blocks for special fmts. # Keep track of how many preceeding samples are read, to be discarded later. if fmt == '212': # Samples come in groups of 2, in 3 byte blocks block_floor_samples = start_flat_sample % 2 start_flat_sample = start_flat_sample - block_floor_samples elif fmt in ['310', '311']: # Samples come in groups of 3, in 4 byte blocks block_floor_samples = start_flat_sample % 3 start_flat_sample = start_flat_sample - block_floor_samples else: block_floor_samples = 0 # The starting byte to read from start_byte = byte_offset + int(start_flat_sample * BYTES_PER_SAMPLE[fmt]) # The number of samples to read n_read_samples = end_flat_sample - start_flat_sample # The number of samples to replace with nan at the end of each signal # due to skew wanting samples beyond the file nan_replace = [max(0, sampto + s - sig_len) for s in skew] return (start_byte, n_read_samples, block_floor_samples, extra_flat_samples, nan_replace)
[ "def", "_dat_read_params", "(", "fmt", ",", "sig_len", ",", "byte_offset", ",", "skew", ",", "tsamps_per_frame", ",", "sampfrom", ",", "sampto", ")", ":", "# First flat sample number to read (if all channels were flattened)", "start_flat_sample", "=", "sampfrom", "*", "...
Calculate the parameters used to read and process a dat file, given its layout, and the desired sample range. Parameters ---------- fmt : str The format of the dat file sig_len : int The signal length (per channel) of the dat file byte_offset : int The byte offset of the dat file skew : list The skew for the signals of the dat file tsamps_per_frame : int The total samples/frame for all channels of the dat file sampfrom : int The starting sample number to be read from the signals sampto : int The final sample number to be read from the signals Returns ------- start_byte : int The starting byte to read the dat file from. Always points to the start of a byte block for special formats. n_read_samples : int The number of flat samples to read from the dat file. block_floor_samples : int The extra samples read prior to the first desired sample, for special formats, in order to ensure entire byte blocks are read. extra_flat_samples : int The extra samples desired beyond what is contained in the file. nan_replace : list The number of samples to replace with nan at the end of each signal, due to skew wanting samples beyond the file. Examples -------- sig_len=100, t = 4 (total samples/frame), skew = [0, 2, 4, 5] sampfrom=0, sampto=100 --> read_len = 100, n_sampread = 100*t, extralen = 5, nan_replace = [0, 2, 4, 5] sampfrom=50, sampto=100 --> read_len = 50, n_sampread = 50*t, extralen = 5, nan_replace = [0, 2, 4, 5] sampfrom=0, sampto=50 --> read_len = 50, n_sampread = 55*t, extralen = 0, nan_replace = [0, 0, 0, 0] sampfrom=95, sampto=99 --> read_len = 4, n_sampread = 5*t, extralen = 4, nan_replace = [0, 1, 3, 4]
[ "Calculate", "the", "parameters", "used", "to", "read", "and", "process", "a", "dat", "file", "given", "its", "layout", "and", "the", "desired", "sample", "range", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_signal.py#L1035-L1123
235,179
MIT-LCP/wfdb-python
wfdb/io/_signal.py
_required_byte_num
def _required_byte_num(mode, fmt, n_samp): """ Determine how many signal bytes are needed to read or write a number of desired samples from a dat file. Parameters ---------- mode : str Whether the file is to be read or written: 'read' or 'write'. fmt : str The wfdb dat format. n_samp : int The number of samples wanted. Returns ------- n_bytes : int The number of bytes required to read or write the file Notes ----- Read and write require the same number in most cases. An exception is fmt 311 for n_extra==2. """ if fmt == '212': n_bytes = math.ceil(n_samp*1.5) elif fmt in ['310', '311']: n_extra = n_samp % 3 if n_extra == 2: if fmt == '310': n_bytes = upround(n_samp * 4/3, 4) # 311 else: if mode == 'read': n_bytes = math.ceil(n_samp * 4/3) # Have to write more bytes for wfdb c to work else: n_bytes = upround(n_samp * 4/3, 4) # 0 or 1 else: n_bytes = math.ceil(n_samp * 4/3 ) else: n_bytes = n_samp * BYTES_PER_SAMPLE[fmt] return int(n_bytes)
python
def _required_byte_num(mode, fmt, n_samp): if fmt == '212': n_bytes = math.ceil(n_samp*1.5) elif fmt in ['310', '311']: n_extra = n_samp % 3 if n_extra == 2: if fmt == '310': n_bytes = upround(n_samp * 4/3, 4) # 311 else: if mode == 'read': n_bytes = math.ceil(n_samp * 4/3) # Have to write more bytes for wfdb c to work else: n_bytes = upround(n_samp * 4/3, 4) # 0 or 1 else: n_bytes = math.ceil(n_samp * 4/3 ) else: n_bytes = n_samp * BYTES_PER_SAMPLE[fmt] return int(n_bytes)
[ "def", "_required_byte_num", "(", "mode", ",", "fmt", ",", "n_samp", ")", ":", "if", "fmt", "==", "'212'", ":", "n_bytes", "=", "math", ".", "ceil", "(", "n_samp", "*", "1.5", ")", "elif", "fmt", "in", "[", "'310'", ",", "'311'", "]", ":", "n_extra...
Determine how many signal bytes are needed to read or write a number of desired samples from a dat file. Parameters ---------- mode : str Whether the file is to be read or written: 'read' or 'write'. fmt : str The wfdb dat format. n_samp : int The number of samples wanted. Returns ------- n_bytes : int The number of bytes required to read or write the file Notes ----- Read and write require the same number in most cases. An exception is fmt 311 for n_extra==2.
[ "Determine", "how", "many", "signal", "bytes", "are", "needed", "to", "read", "or", "write", "a", "number", "of", "desired", "samples", "from", "a", "dat", "file", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_signal.py#L1126-L1173
235,180
MIT-LCP/wfdb-python
wfdb/io/_signal.py
_rd_dat_file
def _rd_dat_file(file_name, dir_name, pb_dir, fmt, start_byte, n_samp): """ Read data from a dat file, either local or remote, into a 1d numpy array. This is the lowest level dat reading function (along with `_stream_dat` which this function may call), and is called by `_rd_dat_signals`. Parameters ---------- start_byte : int The starting byte number to read from. n_samp : int The total number of samples to read. Does NOT need to create whole blocks for special format. Any number of samples should be readable. * other params See docstring for `_rd_dat_signals` Returns ------- sig_data : numpy array The data read from the dat file. The dtype varies depending on fmt. Byte aligned fmts are read in their final required format. Unaligned formats are read as uint8 to be further processed. Notes ----- See docstring notes for `_rd_dat_signals` """ # element_count is the number of elements to read using np.fromfile # for local files # byte_count is the number of bytes to read for streaming files if fmt == '212': byte_count = _required_byte_num('read', '212', n_samp) element_count = byte_count elif fmt in ['310', '311']: byte_count = _required_byte_num('read', fmt, n_samp) element_count = byte_count else: element_count = n_samp byte_count = n_samp * BYTES_PER_SAMPLE[fmt] # Local dat file if pb_dir is None: with open(os.path.join(dir_name, file_name), 'rb') as fp: fp.seek(start_byte) sig_data = np.fromfile(fp, dtype=np.dtype(DATA_LOAD_TYPES[fmt]), count=element_count) # Stream dat file from physiobank else: sig_data = download._stream_dat(file_name, pb_dir, byte_count, start_byte, np.dtype(DATA_LOAD_TYPES[fmt])) return sig_data
python
def _rd_dat_file(file_name, dir_name, pb_dir, fmt, start_byte, n_samp): # element_count is the number of elements to read using np.fromfile # for local files # byte_count is the number of bytes to read for streaming files if fmt == '212': byte_count = _required_byte_num('read', '212', n_samp) element_count = byte_count elif fmt in ['310', '311']: byte_count = _required_byte_num('read', fmt, n_samp) element_count = byte_count else: element_count = n_samp byte_count = n_samp * BYTES_PER_SAMPLE[fmt] # Local dat file if pb_dir is None: with open(os.path.join(dir_name, file_name), 'rb') as fp: fp.seek(start_byte) sig_data = np.fromfile(fp, dtype=np.dtype(DATA_LOAD_TYPES[fmt]), count=element_count) # Stream dat file from physiobank else: sig_data = download._stream_dat(file_name, pb_dir, byte_count, start_byte, np.dtype(DATA_LOAD_TYPES[fmt])) return sig_data
[ "def", "_rd_dat_file", "(", "file_name", ",", "dir_name", ",", "pb_dir", ",", "fmt", ",", "start_byte", ",", "n_samp", ")", ":", "# element_count is the number of elements to read using np.fromfile", "# for local files", "# byte_count is the number of bytes to read for streaming ...
Read data from a dat file, either local or remote, into a 1d numpy array. This is the lowest level dat reading function (along with `_stream_dat` which this function may call), and is called by `_rd_dat_signals`. Parameters ---------- start_byte : int The starting byte number to read from. n_samp : int The total number of samples to read. Does NOT need to create whole blocks for special format. Any number of samples should be readable. * other params See docstring for `_rd_dat_signals` Returns ------- sig_data : numpy array The data read from the dat file. The dtype varies depending on fmt. Byte aligned fmts are read in their final required format. Unaligned formats are read as uint8 to be further processed. Notes ----- See docstring notes for `_rd_dat_signals`
[ "Read", "data", "from", "a", "dat", "file", "either", "local", "or", "remote", "into", "a", "1d", "numpy", "array", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_signal.py#L1176-L1234
235,181
MIT-LCP/wfdb-python
wfdb/io/_signal.py
_skew_sig
def _skew_sig(sig, skew, n_sig, read_len, fmt, nan_replace, samps_per_frame=None): """ Skew the signal, insert nans and shave off end of array if needed. Parameters ---------- sig : numpy array The original signal skew : list List of samples to skew for each signal n_sig : int The number of signals Notes ----- `fmt` is just for the correct nan value. `samps_per_frame` is only used for skewing expanded signals. """ if max(skew)>0: # Expanded frame samples. List of arrays. if isinstance(sig, list): # Shift the channel samples for ch in range(n_sig): if skew[ch]>0: sig[ch][:read_len*samps_per_frame[ch]] = sig[ch][skew[ch]*samps_per_frame[ch]:] # Shave off the extra signal length at the end for ch in range(n_sig): sig[ch] = sig[ch][:read_len*samps_per_frame[ch]] # Insert nans where skewed signal overran dat file for ch in range(n_sig): if nan_replace[ch]>0: sig[ch][-nan_replace[ch]:] = _digi_nan(fmt) # Uniform array else: # Shift the channel samples for ch in range(n_sig): if skew[ch]>0: sig[:read_len, ch] = sig[skew[ch]:, ch] # Shave off the extra signal length at the end sig = sig[:read_len, :] # Insert nans where skewed signal overran dat file for ch in range(n_sig): if nan_replace[ch]>0: sig[-nan_replace[ch]:, ch] = _digi_nan(fmt) return sig
python
def _skew_sig(sig, skew, n_sig, read_len, fmt, nan_replace, samps_per_frame=None): if max(skew)>0: # Expanded frame samples. List of arrays. if isinstance(sig, list): # Shift the channel samples for ch in range(n_sig): if skew[ch]>0: sig[ch][:read_len*samps_per_frame[ch]] = sig[ch][skew[ch]*samps_per_frame[ch]:] # Shave off the extra signal length at the end for ch in range(n_sig): sig[ch] = sig[ch][:read_len*samps_per_frame[ch]] # Insert nans where skewed signal overran dat file for ch in range(n_sig): if nan_replace[ch]>0: sig[ch][-nan_replace[ch]:] = _digi_nan(fmt) # Uniform array else: # Shift the channel samples for ch in range(n_sig): if skew[ch]>0: sig[:read_len, ch] = sig[skew[ch]:, ch] # Shave off the extra signal length at the end sig = sig[:read_len, :] # Insert nans where skewed signal overran dat file for ch in range(n_sig): if nan_replace[ch]>0: sig[-nan_replace[ch]:, ch] = _digi_nan(fmt) return sig
[ "def", "_skew_sig", "(", "sig", ",", "skew", ",", "n_sig", ",", "read_len", ",", "fmt", ",", "nan_replace", ",", "samps_per_frame", "=", "None", ")", ":", "if", "max", "(", "skew", ")", ">", "0", ":", "# Expanded frame samples. List of arrays.", "if", "isi...
Skew the signal, insert nans and shave off end of array if needed. Parameters ---------- sig : numpy array The original signal skew : list List of samples to skew for each signal n_sig : int The number of signals Notes ----- `fmt` is just for the correct nan value. `samps_per_frame` is only used for skewing expanded signals.
[ "Skew", "the", "signal", "insert", "nans", "and", "shave", "off", "end", "of", "array", "if", "needed", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_signal.py#L1343-L1394
235,182
MIT-LCP/wfdb-python
wfdb/io/_signal.py
_check_sig_dims
def _check_sig_dims(sig, read_len, n_sig, samps_per_frame): """ Integrity check of a signal's shape after reading. """ if isinstance(sig, np.ndarray): if sig.shape != (read_len, n_sig): raise ValueError('Samples were not loaded correctly') else: if len(sig) != n_sig: raise ValueError('Samples were not loaded correctly') for ch in range(n_sig): if len(sig[ch]) != samps_per_frame[ch] * read_len: raise ValueError('Samples were not loaded correctly')
python
def _check_sig_dims(sig, read_len, n_sig, samps_per_frame): if isinstance(sig, np.ndarray): if sig.shape != (read_len, n_sig): raise ValueError('Samples were not loaded correctly') else: if len(sig) != n_sig: raise ValueError('Samples were not loaded correctly') for ch in range(n_sig): if len(sig[ch]) != samps_per_frame[ch] * read_len: raise ValueError('Samples were not loaded correctly')
[ "def", "_check_sig_dims", "(", "sig", ",", "read_len", ",", "n_sig", ",", "samps_per_frame", ")", ":", "if", "isinstance", "(", "sig", ",", "np", ".", "ndarray", ")", ":", "if", "sig", ".", "shape", "!=", "(", "read_len", ",", "n_sig", ")", ":", "rai...
Integrity check of a signal's shape after reading.
[ "Integrity", "check", "of", "a", "signal", "s", "shape", "after", "reading", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_signal.py#L1397-L1410
235,183
MIT-LCP/wfdb-python
wfdb/io/_signal.py
_digi_bounds
def _digi_bounds(fmt): """ Return min and max digital values for each format type. Accepts lists. Parmeters --------- fmt : str, or list The wfdb dat format, or a list of them. """ if isinstance(fmt, list): return [_digi_bounds(f) for f in fmt] if fmt == '80': return (-128, 127) elif fmt == '212': return (-2048, 2047) elif fmt == '16': return (-32768, 32767) elif fmt == '24': return (-8388608, 8388607) elif fmt == '32': return (-2147483648, 2147483647)
python
def _digi_bounds(fmt): if isinstance(fmt, list): return [_digi_bounds(f) for f in fmt] if fmt == '80': return (-128, 127) elif fmt == '212': return (-2048, 2047) elif fmt == '16': return (-32768, 32767) elif fmt == '24': return (-8388608, 8388607) elif fmt == '32': return (-2147483648, 2147483647)
[ "def", "_digi_bounds", "(", "fmt", ")", ":", "if", "isinstance", "(", "fmt", ",", "list", ")", ":", "return", "[", "_digi_bounds", "(", "f", ")", "for", "f", "in", "fmt", "]", "if", "fmt", "==", "'80'", ":", "return", "(", "-", "128", ",", "127",...
Return min and max digital values for each format type. Accepts lists. Parmeters --------- fmt : str, or list The wfdb dat format, or a list of them.
[ "Return", "min", "and", "max", "digital", "values", "for", "each", "format", "type", ".", "Accepts", "lists", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_signal.py#L1416-L1439
235,184
MIT-LCP/wfdb-python
wfdb/io/_signal.py
_digi_nan
def _digi_nan(fmt): """ Return the wfdb digital value used to store nan for the format type. Parmeters --------- fmt : str, or list The wfdb dat format, or a list of them. """ if isinstance(fmt, list): return [_digi_nan(f) for f in fmt] if fmt == '80': return -128 if fmt == '310': return -512 if fmt == '311': return -512 elif fmt == '212': return -2048 elif fmt == '16': return -32768 elif fmt == '61': return -32768 elif fmt == '160': return -32768 elif fmt == '24': return -8388608 elif fmt == '32': return -2147483648
python
def _digi_nan(fmt): if isinstance(fmt, list): return [_digi_nan(f) for f in fmt] if fmt == '80': return -128 if fmt == '310': return -512 if fmt == '311': return -512 elif fmt == '212': return -2048 elif fmt == '16': return -32768 elif fmt == '61': return -32768 elif fmt == '160': return -32768 elif fmt == '24': return -8388608 elif fmt == '32': return -2147483648
[ "def", "_digi_nan", "(", "fmt", ")", ":", "if", "isinstance", "(", "fmt", ",", "list", ")", ":", "return", "[", "_digi_nan", "(", "f", ")", "for", "f", "in", "fmt", "]", "if", "fmt", "==", "'80'", ":", "return", "-", "128", "if", "fmt", "==", "...
Return the wfdb digital value used to store nan for the format type. Parmeters --------- fmt : str, or list The wfdb dat format, or a list of them.
[ "Return", "the", "wfdb", "digital", "value", "used", "to", "store", "nan", "for", "the", "format", "type", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_signal.py#L1442-L1472
235,185
MIT-LCP/wfdb-python
wfdb/io/_signal.py
est_res
def est_res(signals): """ Estimate the resolution of each signal in a multi-channel signal in bits. Maximum of 32 bits. Parameters ---------- signals : numpy array, or list A 2d numpy array representing a uniform multichannel signal, or a list of 1d numpy arrays representing multiple channels of signals with different numbers of samples per frame. Returns ------- bit_res : list A list of estimated integer resolutions for each channel """ res_levels = np.power(2, np.arange(0, 33)) # Expanded sample signals. List of numpy arrays if isinstance(signals, list): n_sig = len(signals) # Uniform numpy array else: if signals.ndim ==1: n_sig = 1 else: n_sig = signals.shape[1] res = [] for ch in range(n_sig): # Estimate the number of steps as the range divided by the # minimum increment. if isinstance(signals, list): sorted_sig = np.sort(np.unique(signals[ch])) else: if signals.ndim == 1: sorted_sig = np.sort(np.unique(signals)) else: sorted_sig = np.sort(np.unique(signals[:,ch])) min_inc = min(np.diff(sorted_sig)) if min_inc == 0: # Case where signal is flat. Resolution is 0. res.append(0) else: nlevels = 1 + (sorted_sig[-1]-sorted_sig[0]) / min_inc if nlevels >= res_levels[-1]: res.append(32) else: res.append(np.where(res_levels>=nlevels)[0][0]) return res
python
def est_res(signals): res_levels = np.power(2, np.arange(0, 33)) # Expanded sample signals. List of numpy arrays if isinstance(signals, list): n_sig = len(signals) # Uniform numpy array else: if signals.ndim ==1: n_sig = 1 else: n_sig = signals.shape[1] res = [] for ch in range(n_sig): # Estimate the number of steps as the range divided by the # minimum increment. if isinstance(signals, list): sorted_sig = np.sort(np.unique(signals[ch])) else: if signals.ndim == 1: sorted_sig = np.sort(np.unique(signals)) else: sorted_sig = np.sort(np.unique(signals[:,ch])) min_inc = min(np.diff(sorted_sig)) if min_inc == 0: # Case where signal is flat. Resolution is 0. res.append(0) else: nlevels = 1 + (sorted_sig[-1]-sorted_sig[0]) / min_inc if nlevels >= res_levels[-1]: res.append(32) else: res.append(np.where(res_levels>=nlevels)[0][0]) return res
[ "def", "est_res", "(", "signals", ")", ":", "res_levels", "=", "np", ".", "power", "(", "2", ",", "np", ".", "arange", "(", "0", ",", "33", ")", ")", "# Expanded sample signals. List of numpy arrays", "if", "isinstance", "(", "signals", ",", "list", ")", ...
Estimate the resolution of each signal in a multi-channel signal in bits. Maximum of 32 bits. Parameters ---------- signals : numpy array, or list A 2d numpy array representing a uniform multichannel signal, or a list of 1d numpy arrays representing multiple channels of signals with different numbers of samples per frame. Returns ------- bit_res : list A list of estimated integer resolutions for each channel
[ "Estimate", "the", "resolution", "of", "each", "signal", "in", "a", "multi", "-", "channel", "signal", "in", "bits", ".", "Maximum", "of", "32", "bits", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_signal.py#L1475-L1528
235,186
MIT-LCP/wfdb-python
wfdb/io/_signal.py
_np_dtype
def _np_dtype(bit_res, discrete): """ Given the bit resolution of a signal, return the minimum numpy dtype used to store it. Parameters ---------- bit_res : int The bit resolution. discrete : bool Whether the dtype is to be int or float. Returns ------- dtype : str String numpy dtype used to store the signal of the given resolution """ bit_res = min(bit_res, 64) for np_res in [8, 16, 32, 64]: if bit_res <= np_res: break if discrete is True: return 'int' + str(np_res) else: # No float8 dtype return 'float' + str(max(np_res, 16))
python
def _np_dtype(bit_res, discrete): bit_res = min(bit_res, 64) for np_res in [8, 16, 32, 64]: if bit_res <= np_res: break if discrete is True: return 'int' + str(np_res) else: # No float8 dtype return 'float' + str(max(np_res, 16))
[ "def", "_np_dtype", "(", "bit_res", ",", "discrete", ")", ":", "bit_res", "=", "min", "(", "bit_res", ",", "64", ")", "for", "np_res", "in", "[", "8", ",", "16", ",", "32", ",", "64", "]", ":", "if", "bit_res", "<=", "np_res", ":", "break", "if",...
Given the bit resolution of a signal, return the minimum numpy dtype used to store it. Parameters ---------- bit_res : int The bit resolution. discrete : bool Whether the dtype is to be int or float. Returns ------- dtype : str String numpy dtype used to store the signal of the given resolution
[ "Given", "the", "bit", "resolution", "of", "a", "signal", "return", "the", "minimum", "numpy", "dtype", "used", "to", "store", "it", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_signal.py#L1599-L1628
235,187
MIT-LCP/wfdb-python
wfdb/io/_signal.py
_infer_sig_len
def _infer_sig_len(file_name, fmt, n_sig, dir_name, pb_dir=None): """ Infer the length of a signal from a dat file. Parameters ---------- file_name : str Name of the dat file fmt : str WFDB fmt of the dat file n_sig : int Number of signals contained in the dat file Notes ----- sig_len * n_sig * bytes_per_sample == file_size """ if pb_dir is None: file_size = os.path.getsize(os.path.join(dir_name, file_name)) else: file_size = download._remote_file_size(file_name=file_name, pb_dir=pb_dir) sig_len = int(file_size / (BYTES_PER_SAMPLE[fmt] * n_sig)) return sig_len
python
def _infer_sig_len(file_name, fmt, n_sig, dir_name, pb_dir=None): if pb_dir is None: file_size = os.path.getsize(os.path.join(dir_name, file_name)) else: file_size = download._remote_file_size(file_name=file_name, pb_dir=pb_dir) sig_len = int(file_size / (BYTES_PER_SAMPLE[fmt] * n_sig)) return sig_len
[ "def", "_infer_sig_len", "(", "file_name", ",", "fmt", ",", "n_sig", ",", "dir_name", ",", "pb_dir", "=", "None", ")", ":", "if", "pb_dir", "is", "None", ":", "file_size", "=", "os", ".", "path", ".", "getsize", "(", "os", ".", "path", ".", "join", ...
Infer the length of a signal from a dat file. Parameters ---------- file_name : str Name of the dat file fmt : str WFDB fmt of the dat file n_sig : int Number of signals contained in the dat file Notes ----- sig_len * n_sig * bytes_per_sample == file_size
[ "Infer", "the", "length", "of", "a", "signal", "from", "a", "dat", "file", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_signal.py#L1798-L1824
235,188
MIT-LCP/wfdb-python
wfdb/io/_signal.py
SignalMixin.adc
def adc(self, expanded=False, inplace=False): """ Performs analogue to digital conversion of the physical signal stored in p_signal if expanded is False, or e_p_signal if expanded is True. The p_signal/e_p_signal, fmt, gain, and baseline fields must all be valid. If inplace is True, the adc will be performed inplace on the variable, the d_signal/e_d_signal attribute will be set, and the p_signal/e_p_signal field will be set to None. Parameters ---------- expanded : bool, optional Whether to transform the `e_p_signal` attribute (True) or the `p_signal` attribute (False). inplace : bool, optional Whether to automatically set the object's corresponding digital signal attribute and set the physical signal attribute to None (True), or to return the converted signal as a separate variable without changing the original physical signal attribute (False). Returns ------- d_signal : numpy array, optional The digital conversion of the signal. Either a 2d numpy array or a list of 1d numpy arrays. Examples: --------- >>> import wfdb >>> record = wfdb.rdsamp('sample-data/100') >>> d_signal = record.adc() >>> record.adc(inplace=True) >>> record.dac(inplace=True) """ # The digital nan values for each channel d_nans = _digi_nan(self.fmt) # To do: choose the minimum return res needed intdtype = 'int64' # Do inplace conversion and set relevant variables. if inplace: if expanded: for ch in range(self.n_sig): # nan locations for the channel ch_nanlocs = np.isnan(self.e_p_signal[ch]) np.multiply(self.e_p_signal[ch], self.adc_gain[ch], self.e_p_signal[ch]) np.add(e_p_signal[ch], self.baseline[ch], self.e_p_signal[ch]) self.e_p_signal[ch] = self.e_p_signal[ch].astype(intdtype, copy=False) self.e_p_signal[ch][ch_nanlocs] = d_nans[ch] self.e_d_signal = self.e_p_signal self.e_p_signal = None else: nanlocs = np.isnan(self.p_signal) np.multiply(self.p_signal, self.adc_gain, self.p_signal) np.add(self.p_signal, self.baseline, self.p_signal) self.p_signal = self.p_signal.astype(intdtype, copy=False) self.d_signal = self.p_signal self.p_signal = None # Return the variable else: if expanded: d_signal = [] for ch in range(self.n_sig): # nan locations for the channel ch_nanlocs = np.isnan(self.e_p_signal[ch]) ch_d_signal = self.e_p_signal.copy() np.multiply(ch_d_signal, self.adc_gain[ch], ch_d_signal) np.add(ch_d_signal, self.baseline[ch], ch_d_signal) ch_d_signal = ch_d_signal.astype(intdtype, copy=False) ch_d_signal[ch_nanlocs] = d_nans[ch] d_signal.append(ch_d_signal) else: nanlocs = np.isnan(self.p_signal) # Cannot cast dtype to int now because gain is float. d_signal = self.p_signal.copy() np.multiply(d_signal, self.adc_gain, d_signal) np.add(d_signal, self.baseline, d_signal) d_signal = d_signal.astype(intdtype, copy=False) if nanlocs.any(): for ch in range(d_signal.shape[1]): if nanlocs[:,ch].any(): d_signal[nanlocs[:,ch],ch] = d_nans[ch] return d_signal
python
def adc(self, expanded=False, inplace=False): # The digital nan values for each channel d_nans = _digi_nan(self.fmt) # To do: choose the minimum return res needed intdtype = 'int64' # Do inplace conversion and set relevant variables. if inplace: if expanded: for ch in range(self.n_sig): # nan locations for the channel ch_nanlocs = np.isnan(self.e_p_signal[ch]) np.multiply(self.e_p_signal[ch], self.adc_gain[ch], self.e_p_signal[ch]) np.add(e_p_signal[ch], self.baseline[ch], self.e_p_signal[ch]) self.e_p_signal[ch] = self.e_p_signal[ch].astype(intdtype, copy=False) self.e_p_signal[ch][ch_nanlocs] = d_nans[ch] self.e_d_signal = self.e_p_signal self.e_p_signal = None else: nanlocs = np.isnan(self.p_signal) np.multiply(self.p_signal, self.adc_gain, self.p_signal) np.add(self.p_signal, self.baseline, self.p_signal) self.p_signal = self.p_signal.astype(intdtype, copy=False) self.d_signal = self.p_signal self.p_signal = None # Return the variable else: if expanded: d_signal = [] for ch in range(self.n_sig): # nan locations for the channel ch_nanlocs = np.isnan(self.e_p_signal[ch]) ch_d_signal = self.e_p_signal.copy() np.multiply(ch_d_signal, self.adc_gain[ch], ch_d_signal) np.add(ch_d_signal, self.baseline[ch], ch_d_signal) ch_d_signal = ch_d_signal.astype(intdtype, copy=False) ch_d_signal[ch_nanlocs] = d_nans[ch] d_signal.append(ch_d_signal) else: nanlocs = np.isnan(self.p_signal) # Cannot cast dtype to int now because gain is float. d_signal = self.p_signal.copy() np.multiply(d_signal, self.adc_gain, d_signal) np.add(d_signal, self.baseline, d_signal) d_signal = d_signal.astype(intdtype, copy=False) if nanlocs.any(): for ch in range(d_signal.shape[1]): if nanlocs[:,ch].any(): d_signal[nanlocs[:,ch],ch] = d_nans[ch] return d_signal
[ "def", "adc", "(", "self", ",", "expanded", "=", "False", ",", "inplace", "=", "False", ")", ":", "# The digital nan values for each channel", "d_nans", "=", "_digi_nan", "(", "self", ".", "fmt", ")", "# To do: choose the minimum return res needed", "intdtype", "=",...
Performs analogue to digital conversion of the physical signal stored in p_signal if expanded is False, or e_p_signal if expanded is True. The p_signal/e_p_signal, fmt, gain, and baseline fields must all be valid. If inplace is True, the adc will be performed inplace on the variable, the d_signal/e_d_signal attribute will be set, and the p_signal/e_p_signal field will be set to None. Parameters ---------- expanded : bool, optional Whether to transform the `e_p_signal` attribute (True) or the `p_signal` attribute (False). inplace : bool, optional Whether to automatically set the object's corresponding digital signal attribute and set the physical signal attribute to None (True), or to return the converted signal as a separate variable without changing the original physical signal attribute (False). Returns ------- d_signal : numpy array, optional The digital conversion of the signal. Either a 2d numpy array or a list of 1d numpy arrays. Examples: --------- >>> import wfdb >>> record = wfdb.rdsamp('sample-data/100') >>> d_signal = record.adc() >>> record.adc(inplace=True) >>> record.dac(inplace=True)
[ "Performs", "analogue", "to", "digital", "conversion", "of", "the", "physical", "signal", "stored", "in", "p_signal", "if", "expanded", "is", "False", "or", "e_p_signal", "if", "expanded", "is", "True", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_signal.py#L320-L416
235,189
MIT-LCP/wfdb-python
wfdb/io/_signal.py
SignalMixin.dac
def dac(self, expanded=False, return_res=64, inplace=False): """ Performs the digital to analogue conversion of the signal stored in `d_signal` if expanded is False, or `e_d_signal` if expanded is True. The d_signal/e_d_signal, fmt, gain, and baseline fields must all be valid. If inplace is True, the dac will be performed inplace on the variable, the p_signal/e_p_signal attribute will be set, and the d_signal/e_d_signal field will be set to None. Parameters ---------- expanded : bool, optional Whether to transform the `e_d_signal attribute` (True) or the `d_signal` attribute (False). inplace : bool, optional Whether to automatically set the object's corresponding physical signal attribute and set the digital signal attribute to None (True), or to return the converted signal as a separate variable without changing the original digital signal attribute (False). Returns ------- p_signal : numpy array, optional The physical conversion of the signal. Either a 2d numpy array or a list of 1d numpy arrays. Examples -------- >>> import wfdb >>> record = wfdb.rdsamp('sample-data/100', physical=False) >>> p_signal = record.dac() >>> record.dac(inplace=True) >>> record.adc(inplace=True) """ # The digital nan values for each channel d_nans = _digi_nan(self.fmt) # Get the appropriate float dtype if return_res == 64: floatdtype = 'float64' elif return_res == 32: floatdtype = 'float32' else: floatdtype = 'float16' # Do inplace conversion and set relevant variables. if inplace: if expanded: for ch in range(self.n_sig): # nan locations for the channel ch_nanlocs = self.e_d_signal[ch] == d_nans[ch] self.e_d_signal[ch] = self.e_d_signal[ch].astype(floatdtype, copy=False) np.subtract(self.e_d_signal[ch], self.baseline[ch], self.e_d_signal[ch]) np.divide(self.e_d_signal[ch], self.adc_gain[ch], self.e_d_signal[ch]) self.e_d_signal[ch][ch_nanlocs] = np.nan self.e_p_signal = self.e_d_signal self.e_d_signal = None else: nanlocs = self.d_signal == d_nans # Do float conversion immediately to avoid potential under/overflow # of efficient int dtype self.d_signal = self.d_signal.astype(floatdtype, copy=False) np.subtract(self.d_signal, self.baseline, self.d_signal) np.divide(self.d_signal, self.adc_gain, self.d_signal) self.d_signal[nanlocs] = np.nan self.p_signal = self.d_signal self.d_signal = None # Return the variable else: if expanded: p_signal = [] for ch in range(self.n_sig): # nan locations for the channel ch_nanlocs = self.e_d_signal[ch] == d_nans[ch] ch_p_signal = self.e_d_signal[ch].astype(floatdtype, copy=False) np.subtract(ch_p_signal, self.baseline[ch], ch_p_signal) np.divide(ch_p_signal, self.adc_gain[ch], ch_p_signal) ch_p_signal[ch_nanlocs] = np.nan p_signal.append(ch_p_signal) else: nanlocs = self.d_signal == d_nans p_signal = self.d_signal.astype(floatdtype, copy=False) np.subtract(p_signal, self.baseline, p_signal) np.divide(p_signal, self.adc_gain, p_signal) p_signal[nanlocs] = np.nan return p_signal
python
def dac(self, expanded=False, return_res=64, inplace=False): # The digital nan values for each channel d_nans = _digi_nan(self.fmt) # Get the appropriate float dtype if return_res == 64: floatdtype = 'float64' elif return_res == 32: floatdtype = 'float32' else: floatdtype = 'float16' # Do inplace conversion and set relevant variables. if inplace: if expanded: for ch in range(self.n_sig): # nan locations for the channel ch_nanlocs = self.e_d_signal[ch] == d_nans[ch] self.e_d_signal[ch] = self.e_d_signal[ch].astype(floatdtype, copy=False) np.subtract(self.e_d_signal[ch], self.baseline[ch], self.e_d_signal[ch]) np.divide(self.e_d_signal[ch], self.adc_gain[ch], self.e_d_signal[ch]) self.e_d_signal[ch][ch_nanlocs] = np.nan self.e_p_signal = self.e_d_signal self.e_d_signal = None else: nanlocs = self.d_signal == d_nans # Do float conversion immediately to avoid potential under/overflow # of efficient int dtype self.d_signal = self.d_signal.astype(floatdtype, copy=False) np.subtract(self.d_signal, self.baseline, self.d_signal) np.divide(self.d_signal, self.adc_gain, self.d_signal) self.d_signal[nanlocs] = np.nan self.p_signal = self.d_signal self.d_signal = None # Return the variable else: if expanded: p_signal = [] for ch in range(self.n_sig): # nan locations for the channel ch_nanlocs = self.e_d_signal[ch] == d_nans[ch] ch_p_signal = self.e_d_signal[ch].astype(floatdtype, copy=False) np.subtract(ch_p_signal, self.baseline[ch], ch_p_signal) np.divide(ch_p_signal, self.adc_gain[ch], ch_p_signal) ch_p_signal[ch_nanlocs] = np.nan p_signal.append(ch_p_signal) else: nanlocs = self.d_signal == d_nans p_signal = self.d_signal.astype(floatdtype, copy=False) np.subtract(p_signal, self.baseline, p_signal) np.divide(p_signal, self.adc_gain, p_signal) p_signal[nanlocs] = np.nan return p_signal
[ "def", "dac", "(", "self", ",", "expanded", "=", "False", ",", "return_res", "=", "64", ",", "inplace", "=", "False", ")", ":", "# The digital nan values for each channel", "d_nans", "=", "_digi_nan", "(", "self", ".", "fmt", ")", "# Get the appropriate float dt...
Performs the digital to analogue conversion of the signal stored in `d_signal` if expanded is False, or `e_d_signal` if expanded is True. The d_signal/e_d_signal, fmt, gain, and baseline fields must all be valid. If inplace is True, the dac will be performed inplace on the variable, the p_signal/e_p_signal attribute will be set, and the d_signal/e_d_signal field will be set to None. Parameters ---------- expanded : bool, optional Whether to transform the `e_d_signal attribute` (True) or the `d_signal` attribute (False). inplace : bool, optional Whether to automatically set the object's corresponding physical signal attribute and set the digital signal attribute to None (True), or to return the converted signal as a separate variable without changing the original digital signal attribute (False). Returns ------- p_signal : numpy array, optional The physical conversion of the signal. Either a 2d numpy array or a list of 1d numpy arrays. Examples -------- >>> import wfdb >>> record = wfdb.rdsamp('sample-data/100', physical=False) >>> p_signal = record.dac() >>> record.dac(inplace=True) >>> record.adc(inplace=True)
[ "Performs", "the", "digital", "to", "analogue", "conversion", "of", "the", "signal", "stored", "in", "d_signal", "if", "expanded", "is", "False", "or", "e_d_signal", "if", "expanded", "is", "True", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_signal.py#L419-L513
235,190
MIT-LCP/wfdb-python
wfdb/io/_signal.py
SignalMixin.calc_adc_params
def calc_adc_params(self): """ Compute appropriate adc_gain and baseline parameters for adc conversion, given the physical signal and the fmts. Returns ------- adc_gains : list List of calculated `adc_gain` values for each channel. baselines : list List of calculated `baseline` values for each channel. Notes ----- This is the mapping equation: `digital - baseline / adc_gain = physical` `physical * adc_gain + baseline = digital` The original WFDB library stores `baseline` as int32. Constrain abs(adc_gain) <= 2**31 == 2147483648 This function does carefully deal with overflow for calculated int32 `baseline` values, but does not consider over/underflow for calculated float `adc_gain` values. """ adc_gains = [] baselines = [] if np.where(np.isinf(self.p_signal))[0].size: raise ValueError('Signal contains inf. Cannot perform adc.') # min and max ignoring nans, unless whole channel is nan. # Should suppress warning message. minvals = np.nanmin(self.p_signal, axis=0) maxvals = np.nanmax(self.p_signal, axis=0) for ch in range(np.shape(self.p_signal)[1]): # Get the minimum and maximum (valid) storage values dmin, dmax = _digi_bounds(self.fmt[ch]) # add 1 because the lowest value is used to store nans dmin = dmin + 1 pmin = minvals[ch] pmax = maxvals[ch] # Figure out digital samples used to store physical samples # If the entire signal is nan, gain/baseline won't be used if pmin == np.nan: adc_gain = 1 baseline = 1 # If the signal is just one value, store one digital value. elif pmin == pmax: if pmin == 0: adc_gain = 1 baseline = 1 else: # All digital values are +1 or -1. Keep adc_gain > 0 adc_gain = abs(1 / pmin) baseline = 0 # Regular varied signal case. else: # The equation is: p = (d - b) / g # Approximately, pmax maps to dmax, and pmin maps to # dmin. Gradient will be equal to, or close to # delta(d) / delta(p), since intercept baseline has # to be an integer. # Constraint: baseline must be between +/- 2**31 adc_gain = (dmax-dmin) / (pmax-pmin) baseline = dmin - adc_gain*pmin # Make adjustments for baseline to be an integer # This up/down round logic of baseline is to ensure # there is no overshoot of dmax. Now pmax will map # to dmax or dmax-1 which is also fine. if pmin > 0: baseline = int(np.ceil(baseline)) else: baseline = int(np.floor(baseline)) # After baseline is set, adjust gain correspondingly.Set # the gain to map pmin to dmin, and p==0 to baseline. # In the case where pmin == 0 and dmin == baseline, # adc_gain is already correct. Avoid dividing by 0. if dmin != baseline: adc_gain = (dmin - baseline) / pmin # Remap signal if baseline exceeds boundaries. # This may happen if pmax < 0 if baseline > MAX_I32: # pmin maps to dmin, baseline maps to 2**31 - 1 # pmax will map to a lower value than before adc_gain = (MAX_I32) - dmin / abs(pmin) baseline = MAX_I32 # This may happen if pmin > 0 elif baseline < MIN_I32: # pmax maps to dmax, baseline maps to -2**31 + 1 adc_gain = (dmax - MIN_I32) / pmax baseline = MIN_I32 adc_gains.append(adc_gain) baselines.append(baseline) return (adc_gains, baselines)
python
def calc_adc_params(self): adc_gains = [] baselines = [] if np.where(np.isinf(self.p_signal))[0].size: raise ValueError('Signal contains inf. Cannot perform adc.') # min and max ignoring nans, unless whole channel is nan. # Should suppress warning message. minvals = np.nanmin(self.p_signal, axis=0) maxvals = np.nanmax(self.p_signal, axis=0) for ch in range(np.shape(self.p_signal)[1]): # Get the minimum and maximum (valid) storage values dmin, dmax = _digi_bounds(self.fmt[ch]) # add 1 because the lowest value is used to store nans dmin = dmin + 1 pmin = minvals[ch] pmax = maxvals[ch] # Figure out digital samples used to store physical samples # If the entire signal is nan, gain/baseline won't be used if pmin == np.nan: adc_gain = 1 baseline = 1 # If the signal is just one value, store one digital value. elif pmin == pmax: if pmin == 0: adc_gain = 1 baseline = 1 else: # All digital values are +1 or -1. Keep adc_gain > 0 adc_gain = abs(1 / pmin) baseline = 0 # Regular varied signal case. else: # The equation is: p = (d - b) / g # Approximately, pmax maps to dmax, and pmin maps to # dmin. Gradient will be equal to, or close to # delta(d) / delta(p), since intercept baseline has # to be an integer. # Constraint: baseline must be between +/- 2**31 adc_gain = (dmax-dmin) / (pmax-pmin) baseline = dmin - adc_gain*pmin # Make adjustments for baseline to be an integer # This up/down round logic of baseline is to ensure # there is no overshoot of dmax. Now pmax will map # to dmax or dmax-1 which is also fine. if pmin > 0: baseline = int(np.ceil(baseline)) else: baseline = int(np.floor(baseline)) # After baseline is set, adjust gain correspondingly.Set # the gain to map pmin to dmin, and p==0 to baseline. # In the case where pmin == 0 and dmin == baseline, # adc_gain is already correct. Avoid dividing by 0. if dmin != baseline: adc_gain = (dmin - baseline) / pmin # Remap signal if baseline exceeds boundaries. # This may happen if pmax < 0 if baseline > MAX_I32: # pmin maps to dmin, baseline maps to 2**31 - 1 # pmax will map to a lower value than before adc_gain = (MAX_I32) - dmin / abs(pmin) baseline = MAX_I32 # This may happen if pmin > 0 elif baseline < MIN_I32: # pmax maps to dmax, baseline maps to -2**31 + 1 adc_gain = (dmax - MIN_I32) / pmax baseline = MIN_I32 adc_gains.append(adc_gain) baselines.append(baseline) return (adc_gains, baselines)
[ "def", "calc_adc_params", "(", "self", ")", ":", "adc_gains", "=", "[", "]", "baselines", "=", "[", "]", "if", "np", ".", "where", "(", "np", ".", "isinf", "(", "self", ".", "p_signal", ")", ")", "[", "0", "]", ".", "size", ":", "raise", "ValueEr...
Compute appropriate adc_gain and baseline parameters for adc conversion, given the physical signal and the fmts. Returns ------- adc_gains : list List of calculated `adc_gain` values for each channel. baselines : list List of calculated `baseline` values for each channel. Notes ----- This is the mapping equation: `digital - baseline / adc_gain = physical` `physical * adc_gain + baseline = digital` The original WFDB library stores `baseline` as int32. Constrain abs(adc_gain) <= 2**31 == 2147483648 This function does carefully deal with overflow for calculated int32 `baseline` values, but does not consider over/underflow for calculated float `adc_gain` values.
[ "Compute", "appropriate", "adc_gain", "and", "baseline", "parameters", "for", "adc", "conversion", "given", "the", "physical", "signal", "and", "the", "fmts", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_signal.py#L516-L622
235,191
MIT-LCP/wfdb-python
wfdb/io/_signal.py
SignalMixin.wr_dat_files
def wr_dat_files(self, expanded=False, write_dir=''): """ Write each of the specified dat files """ # Get the set of dat files to be written, and # the channels to be written to each file. file_names, dat_channels = describe_list_indices(self.file_name) # Get the fmt and byte offset corresponding to each dat file DAT_FMTS = {} dat_offsets = {} for fn in file_names: DAT_FMTS[fn] = self.fmt[dat_channels[fn][0]] # byte_offset may not be present if self.byte_offset is None: dat_offsets[fn] = 0 else: dat_offsets[fn] = self.byte_offset[dat_channels[fn][0]] # Write the dat files if expanded: for fn in file_names: wr_dat_file(fn, DAT_FMTS[fn], None , dat_offsets[fn], True, [self.e_d_signal[ch] for ch in dat_channels[fn]], self.samps_per_frame, write_dir=write_dir) else: # Create a copy to prevent overwrite dsig = self.d_signal.copy() for fn in file_names: wr_dat_file(fn, DAT_FMTS[fn], dsig[:, dat_channels[fn][0]:dat_channels[fn][-1]+1], dat_offsets[fn], write_dir=write_dir)
python
def wr_dat_files(self, expanded=False, write_dir=''): # Get the set of dat files to be written, and # the channels to be written to each file. file_names, dat_channels = describe_list_indices(self.file_name) # Get the fmt and byte offset corresponding to each dat file DAT_FMTS = {} dat_offsets = {} for fn in file_names: DAT_FMTS[fn] = self.fmt[dat_channels[fn][0]] # byte_offset may not be present if self.byte_offset is None: dat_offsets[fn] = 0 else: dat_offsets[fn] = self.byte_offset[dat_channels[fn][0]] # Write the dat files if expanded: for fn in file_names: wr_dat_file(fn, DAT_FMTS[fn], None , dat_offsets[fn], True, [self.e_d_signal[ch] for ch in dat_channels[fn]], self.samps_per_frame, write_dir=write_dir) else: # Create a copy to prevent overwrite dsig = self.d_signal.copy() for fn in file_names: wr_dat_file(fn, DAT_FMTS[fn], dsig[:, dat_channels[fn][0]:dat_channels[fn][-1]+1], dat_offsets[fn], write_dir=write_dir)
[ "def", "wr_dat_files", "(", "self", ",", "expanded", "=", "False", ",", "write_dir", "=", "''", ")", ":", "# Get the set of dat files to be written, and", "# the channels to be written to each file.", "file_names", ",", "dat_channels", "=", "describe_list_indices", "(", "...
Write each of the specified dat files
[ "Write", "each", "of", "the", "specified", "dat", "files" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_signal.py#L667-L700
235,192
MIT-LCP/wfdb-python
wfdb/io/_header.py
wfdb_strptime
def wfdb_strptime(time_string): """ Given a time string in an acceptable wfdb format, return a datetime.time object. Valid formats: SS, MM:SS, HH:MM:SS, all with and without microsec. """ n_colons = time_string.count(':') if n_colons == 0: time_fmt = '%S' elif n_colons == 1: time_fmt = '%M:%S' elif n_colons == 2: time_fmt = '%H:%M:%S' if '.' in time_string: time_fmt += '.%f' return datetime.datetime.strptime(time_string, time_fmt).time()
python
def wfdb_strptime(time_string): n_colons = time_string.count(':') if n_colons == 0: time_fmt = '%S' elif n_colons == 1: time_fmt = '%M:%S' elif n_colons == 2: time_fmt = '%H:%M:%S' if '.' in time_string: time_fmt += '.%f' return datetime.datetime.strptime(time_string, time_fmt).time()
[ "def", "wfdb_strptime", "(", "time_string", ")", ":", "n_colons", "=", "time_string", ".", "count", "(", "':'", ")", "if", "n_colons", "==", "0", ":", "time_fmt", "=", "'%S'", "elif", "n_colons", "==", "1", ":", "time_fmt", "=", "'%M:%S'", "elif", "n_col...
Given a time string in an acceptable wfdb format, return a datetime.time object. Valid formats: SS, MM:SS, HH:MM:SS, all with and without microsec.
[ "Given", "a", "time", "string", "in", "an", "acceptable", "wfdb", "format", "return", "a", "datetime", ".", "time", "object", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_header.py#L653-L672
235,193
MIT-LCP/wfdb-python
wfdb/io/_header.py
_read_header_lines
def _read_header_lines(base_record_name, dir_name, pb_dir): """ Read the lines in a local or remote header file. Parameters ---------- base_record_name : str The base name of the WFDB record to be read, without any file extensions. dir_name : str The local directory location of the header file. This parameter is ignored if `pb_dir` is set. pb_dir : str Option used to stream data from Physiobank. The Physiobank database directory from which to find the required record files. eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb' pb_dir='mitdb'. Returns ------- header_lines : list List of strings corresponding to the header lines. comment_lines : list List of strings corresponding to the comment lines. """ file_name = base_record_name + '.hea' # Read local file if pb_dir is None: with open(os.path.join(dir_name, file_name), 'r') as fp: # Record line followed by signal/segment lines if any header_lines = [] # Comment lines comment_lines = [] for line in fp: line = line.strip() # Comment line if line.startswith('#'): comment_lines.append(line) # Non-empty non-comment line = header line. elif line: # Look for a comment in the line ci = line.find('#') if ci > 0: header_lines.append(line[:ci]) # comment on same line as header line comment_lines.append(line[ci:]) else: header_lines.append(line) # Read online header file else: header_lines, comment_lines = download._stream_header(file_name, pb_dir) return header_lines, comment_lines
python
def _read_header_lines(base_record_name, dir_name, pb_dir): file_name = base_record_name + '.hea' # Read local file if pb_dir is None: with open(os.path.join(dir_name, file_name), 'r') as fp: # Record line followed by signal/segment lines if any header_lines = [] # Comment lines comment_lines = [] for line in fp: line = line.strip() # Comment line if line.startswith('#'): comment_lines.append(line) # Non-empty non-comment line = header line. elif line: # Look for a comment in the line ci = line.find('#') if ci > 0: header_lines.append(line[:ci]) # comment on same line as header line comment_lines.append(line[ci:]) else: header_lines.append(line) # Read online header file else: header_lines, comment_lines = download._stream_header(file_name, pb_dir) return header_lines, comment_lines
[ "def", "_read_header_lines", "(", "base_record_name", ",", "dir_name", ",", "pb_dir", ")", ":", "file_name", "=", "base_record_name", "+", "'.hea'", "# Read local file", "if", "pb_dir", "is", "None", ":", "with", "open", "(", "os", ".", "path", ".", "join", ...
Read the lines in a local or remote header file. Parameters ---------- base_record_name : str The base name of the WFDB record to be read, without any file extensions. dir_name : str The local directory location of the header file. This parameter is ignored if `pb_dir` is set. pb_dir : str Option used to stream data from Physiobank. The Physiobank database directory from which to find the required record files. eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb' pb_dir='mitdb'. Returns ------- header_lines : list List of strings corresponding to the header lines. comment_lines : list List of strings corresponding to the comment lines.
[ "Read", "the", "lines", "in", "a", "local", "or", "remote", "header", "file", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_header.py#L675-L730
235,194
MIT-LCP/wfdb-python
wfdb/io/_header.py
_parse_record_line
def _parse_record_line(record_line): """ Extract fields from a record line string into a dictionary """ # Dictionary for record fields record_fields = {} # Read string fields from record line (record_fields['record_name'], record_fields['n_seg'], record_fields['n_sig'], record_fields['fs'], record_fields['counter_freq'], record_fields['base_counter'], record_fields['sig_len'], record_fields['base_time'], record_fields['base_date']) = re.findall(_rx_record, record_line)[0] for field in RECORD_SPECS.index: # Replace empty strings with their read defaults (which are # mostly None) if record_fields[field] == '': record_fields[field] = RECORD_SPECS.loc[field, 'read_default'] # Typecast non-empty strings for non-string (numerical/datetime) # fields else: if RECORD_SPECS.loc[field, 'allowed_types'] == int_types: record_fields[field] = int(record_fields[field]) elif RECORD_SPECS.loc[field, 'allowed_types'] == float_types: record_fields[field] = float(record_fields[field]) # cast fs to an int if it is close if field == 'fs': fs = float(record_fields['fs']) if round(fs, 8) == float(int(fs)): fs = int(fs) record_fields['fs'] = fs elif field == 'base_time': record_fields['base_time'] = wfdb_strptime(record_fields['base_time']) elif field == 'base_date': record_fields['base_date'] = datetime.datetime.strptime( record_fields['base_date'], '%d/%m/%Y').date() # This is not a standard wfdb field, but is useful to set. if record_fields['base_date'] and record_fields['base_time']: record_fields['base_datetime'] = datetime.datetime.combine( record_fields['base_date'], record_fields['base_time']) return record_fields
python
def _parse_record_line(record_line): # Dictionary for record fields record_fields = {} # Read string fields from record line (record_fields['record_name'], record_fields['n_seg'], record_fields['n_sig'], record_fields['fs'], record_fields['counter_freq'], record_fields['base_counter'], record_fields['sig_len'], record_fields['base_time'], record_fields['base_date']) = re.findall(_rx_record, record_line)[0] for field in RECORD_SPECS.index: # Replace empty strings with their read defaults (which are # mostly None) if record_fields[field] == '': record_fields[field] = RECORD_SPECS.loc[field, 'read_default'] # Typecast non-empty strings for non-string (numerical/datetime) # fields else: if RECORD_SPECS.loc[field, 'allowed_types'] == int_types: record_fields[field] = int(record_fields[field]) elif RECORD_SPECS.loc[field, 'allowed_types'] == float_types: record_fields[field] = float(record_fields[field]) # cast fs to an int if it is close if field == 'fs': fs = float(record_fields['fs']) if round(fs, 8) == float(int(fs)): fs = int(fs) record_fields['fs'] = fs elif field == 'base_time': record_fields['base_time'] = wfdb_strptime(record_fields['base_time']) elif field == 'base_date': record_fields['base_date'] = datetime.datetime.strptime( record_fields['base_date'], '%d/%m/%Y').date() # This is not a standard wfdb field, but is useful to set. if record_fields['base_date'] and record_fields['base_time']: record_fields['base_datetime'] = datetime.datetime.combine( record_fields['base_date'], record_fields['base_time']) return record_fields
[ "def", "_parse_record_line", "(", "record_line", ")", ":", "# Dictionary for record fields", "record_fields", "=", "{", "}", "# Read string fields from record line", "(", "record_fields", "[", "'record_name'", "]", ",", "record_fields", "[", "'n_seg'", "]", ",", "record...
Extract fields from a record line string into a dictionary
[ "Extract", "fields", "from", "a", "record", "line", "string", "into", "a", "dictionary" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_header.py#L733-L777
235,195
MIT-LCP/wfdb-python
wfdb/io/_header.py
_parse_signal_lines
def _parse_signal_lines(signal_lines): """ Extract fields from a list of signal line strings into a dictionary. """ n_sig = len(signal_lines) # Dictionary for signal fields signal_fields = {} # Each dictionary field is a list for field in SIGNAL_SPECS.index: signal_fields[field] = n_sig * [None] # Read string fields from signal line for ch in range(n_sig): (signal_fields['file_name'][ch], signal_fields['fmt'][ch], signal_fields['samps_per_frame'][ch], signal_fields['skew'][ch], signal_fields['byte_offset'][ch], signal_fields['adc_gain'][ch], signal_fields['baseline'][ch], signal_fields['units'][ch], signal_fields['adc_res'][ch], signal_fields['adc_zero'][ch], signal_fields['init_value'][ch], signal_fields['checksum'][ch], signal_fields['block_size'][ch], signal_fields['sig_name'][ch]) = _rx_signal.findall(signal_lines[ch])[0] for field in SIGNAL_SPECS.index: # Replace empty strings with their read defaults (which are mostly None) # Note: Never set a field to None. [None]* n_sig is accurate, indicating # that different channels can be present or missing. if signal_fields[field][ch] == '': signal_fields[field][ch] = SIGNAL_SPECS.loc[field, 'read_default'] # Special case: missing baseline defaults to ADCzero if present if field == 'baseline' and signal_fields['adc_zero'][ch] != '': signal_fields['baseline'][ch] = int(signal_fields['adc_zero'][ch]) # Typecast non-empty strings for numerical fields else: if SIGNAL_SPECS.loc[field, 'allowed_types'] is int_types: signal_fields[field][ch] = int(signal_fields[field][ch]) elif SIGNAL_SPECS.loc[field, 'allowed_types'] is float_types: signal_fields[field][ch] = float(signal_fields[field][ch]) # Special case: adc_gain of 0 means 200 if field == 'adc_gain' and signal_fields['adc_gain'][ch] == 0: signal_fields['adc_gain'][ch] = 200. return signal_fields
python
def _parse_signal_lines(signal_lines): n_sig = len(signal_lines) # Dictionary for signal fields signal_fields = {} # Each dictionary field is a list for field in SIGNAL_SPECS.index: signal_fields[field] = n_sig * [None] # Read string fields from signal line for ch in range(n_sig): (signal_fields['file_name'][ch], signal_fields['fmt'][ch], signal_fields['samps_per_frame'][ch], signal_fields['skew'][ch], signal_fields['byte_offset'][ch], signal_fields['adc_gain'][ch], signal_fields['baseline'][ch], signal_fields['units'][ch], signal_fields['adc_res'][ch], signal_fields['adc_zero'][ch], signal_fields['init_value'][ch], signal_fields['checksum'][ch], signal_fields['block_size'][ch], signal_fields['sig_name'][ch]) = _rx_signal.findall(signal_lines[ch])[0] for field in SIGNAL_SPECS.index: # Replace empty strings with their read defaults (which are mostly None) # Note: Never set a field to None. [None]* n_sig is accurate, indicating # that different channels can be present or missing. if signal_fields[field][ch] == '': signal_fields[field][ch] = SIGNAL_SPECS.loc[field, 'read_default'] # Special case: missing baseline defaults to ADCzero if present if field == 'baseline' and signal_fields['adc_zero'][ch] != '': signal_fields['baseline'][ch] = int(signal_fields['adc_zero'][ch]) # Typecast non-empty strings for numerical fields else: if SIGNAL_SPECS.loc[field, 'allowed_types'] is int_types: signal_fields[field][ch] = int(signal_fields[field][ch]) elif SIGNAL_SPECS.loc[field, 'allowed_types'] is float_types: signal_fields[field][ch] = float(signal_fields[field][ch]) # Special case: adc_gain of 0 means 200 if field == 'adc_gain' and signal_fields['adc_gain'][ch] == 0: signal_fields['adc_gain'][ch] = 200. return signal_fields
[ "def", "_parse_signal_lines", "(", "signal_lines", ")", ":", "n_sig", "=", "len", "(", "signal_lines", ")", "# Dictionary for signal fields", "signal_fields", "=", "{", "}", "# Each dictionary field is a list", "for", "field", "in", "SIGNAL_SPECS", ".", "index", ":", ...
Extract fields from a list of signal line strings into a dictionary.
[ "Extract", "fields", "from", "a", "list", "of", "signal", "line", "strings", "into", "a", "dictionary", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_header.py#L780-L824
235,196
MIT-LCP/wfdb-python
wfdb/io/_header.py
_read_segment_lines
def _read_segment_lines(segment_lines): """ Extract fields from segment line strings into a dictionary """ # Dictionary for segment fields segment_fields = {} # Each dictionary field is a list for field in SEGMENT_SPECS.index: segment_fields[field] = [None] * len(segment_lines) # Read string fields from signal line for i in range(len(segment_lines)): (segment_fields['seg_name'][i], segment_fields['seg_len'][i]) = _rx_segment.findall(segment_lines[i])[0] # Typecast strings for numerical field if field == 'seg_len': segment_fields['seg_len'][i] = int(segment_fields['seg_len'][i]) return segment_fields
python
def _read_segment_lines(segment_lines): # Dictionary for segment fields segment_fields = {} # Each dictionary field is a list for field in SEGMENT_SPECS.index: segment_fields[field] = [None] * len(segment_lines) # Read string fields from signal line for i in range(len(segment_lines)): (segment_fields['seg_name'][i], segment_fields['seg_len'][i]) = _rx_segment.findall(segment_lines[i])[0] # Typecast strings for numerical field if field == 'seg_len': segment_fields['seg_len'][i] = int(segment_fields['seg_len'][i]) return segment_fields
[ "def", "_read_segment_lines", "(", "segment_lines", ")", ":", "# Dictionary for segment fields", "segment_fields", "=", "{", "}", "# Each dictionary field is a list", "for", "field", "in", "SEGMENT_SPECS", ".", "index", ":", "segment_fields", "[", "field", "]", "=", "...
Extract fields from segment line strings into a dictionary
[ "Extract", "fields", "from", "segment", "line", "strings", "into", "a", "dictionary" ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_header.py#L827-L847
235,197
MIT-LCP/wfdb-python
wfdb/io/_header.py
BaseHeaderMixin.get_write_subset
def get_write_subset(self, spec_type): """ Get a set of fields used to write the header; either 'record' or 'signal' specification fields. Helper function for `get_write_fields`. Gets the default required fields, the user defined fields, and their dependencies. Parameters ---------- spec_type : str The set of specification fields desired. Either 'record' or 'signal'. Returns ------- write_fields : list or dict For record fields, returns a list of all fields needed. For signal fields, it returns a dictionary of all fields needed, with keys = field and value = list of channels that must be present for the field. """ if spec_type == 'record': write_fields = [] record_specs = RECORD_SPECS.copy() # Remove the n_seg requirement for single segment items if not hasattr(self, 'n_seg'): record_specs.drop('n_seg', inplace=True) for field in record_specs.index[-1::-1]: # Continue if the field has already been included if field in write_fields: continue # If the field is required by default or has been # defined by the user if (record_specs.loc[field, 'write_required'] or getattr(self, field) is not None): req_field = field # Add the field and its recursive dependencies while req_field is not None: write_fields.append(req_field) req_field = record_specs.loc[req_field, 'dependency'] # Add comments if any if getattr(self, 'comments') is not None: write_fields.append('comments') # signal spec field. Need to return a potentially different list for each channel. elif spec_type == 'signal': # List of lists for each channel write_fields = [] signal_specs = SIGNAL_SPECS.copy() for ch in range(self.n_sig): # The fields needed for this channel write_fields_ch = [] for field in signal_specs.index[-1::-1]: if field in write_fields_ch: continue item = getattr(self, field) # If the field is required by default or has been defined by the user if signal_specs.loc[field, 'write_required'] or (item is not None and item[ch] is not None): req_field = field # Add the field and its recursive dependencies while req_field is not None: write_fields_ch.append(req_field) req_field = signal_specs.loc[req_field, 'dependency'] write_fields.append(write_fields_ch) # Convert the list of lists to a single dictionary. # keys = field and value = list of channels in which the # field is required. dict_write_fields = {} # For fields present in any channel: for field in set([i for write_fields_ch in write_fields for i in write_fields_ch]): dict_write_fields[field] = [] for ch in range(self.n_sig): if field in write_fields[ch]: dict_write_fields[field].append(ch) write_fields = dict_write_fields return write_fields
python
def get_write_subset(self, spec_type): if spec_type == 'record': write_fields = [] record_specs = RECORD_SPECS.copy() # Remove the n_seg requirement for single segment items if not hasattr(self, 'n_seg'): record_specs.drop('n_seg', inplace=True) for field in record_specs.index[-1::-1]: # Continue if the field has already been included if field in write_fields: continue # If the field is required by default or has been # defined by the user if (record_specs.loc[field, 'write_required'] or getattr(self, field) is not None): req_field = field # Add the field and its recursive dependencies while req_field is not None: write_fields.append(req_field) req_field = record_specs.loc[req_field, 'dependency'] # Add comments if any if getattr(self, 'comments') is not None: write_fields.append('comments') # signal spec field. Need to return a potentially different list for each channel. elif spec_type == 'signal': # List of lists for each channel write_fields = [] signal_specs = SIGNAL_SPECS.copy() for ch in range(self.n_sig): # The fields needed for this channel write_fields_ch = [] for field in signal_specs.index[-1::-1]: if field in write_fields_ch: continue item = getattr(self, field) # If the field is required by default or has been defined by the user if signal_specs.loc[field, 'write_required'] or (item is not None and item[ch] is not None): req_field = field # Add the field and its recursive dependencies while req_field is not None: write_fields_ch.append(req_field) req_field = signal_specs.loc[req_field, 'dependency'] write_fields.append(write_fields_ch) # Convert the list of lists to a single dictionary. # keys = field and value = list of channels in which the # field is required. dict_write_fields = {} # For fields present in any channel: for field in set([i for write_fields_ch in write_fields for i in write_fields_ch]): dict_write_fields[field] = [] for ch in range(self.n_sig): if field in write_fields[ch]: dict_write_fields[field].append(ch) write_fields = dict_write_fields return write_fields
[ "def", "get_write_subset", "(", "self", ",", "spec_type", ")", ":", "if", "spec_type", "==", "'record'", ":", "write_fields", "=", "[", "]", "record_specs", "=", "RECORD_SPECS", ".", "copy", "(", ")", "# Remove the n_seg requirement for single segment items", "if", ...
Get a set of fields used to write the header; either 'record' or 'signal' specification fields. Helper function for `get_write_fields`. Gets the default required fields, the user defined fields, and their dependencies. Parameters ---------- spec_type : str The set of specification fields desired. Either 'record' or 'signal'. Returns ------- write_fields : list or dict For record fields, returns a list of all fields needed. For signal fields, it returns a dictionary of all fields needed, with keys = field and value = list of channels that must be present for the field.
[ "Get", "a", "set", "of", "fields", "used", "to", "write", "the", "header", ";", "either", "record", "or", "signal", "specification", "fields", ".", "Helper", "function", "for", "get_write_fields", ".", "Gets", "the", "default", "required", "fields", "the", "...
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_header.py#L147-L233
235,198
MIT-LCP/wfdb-python
wfdb/io/_header.py
HeaderMixin.set_defaults
def set_defaults(self): """ Set defaults for fields needed to write the header if they have defaults. Notes ----- - This is NOT called by `rdheader`. It is only automatically called by the gateway `wrsamp` for convenience. - This is also not called by `wrheader` since it is supposed to be an explicit function. - This is not responsible for initializing the attributes. That is done by the constructor. See also `set_p_features` and `set_d_features`. """ rfields, sfields = self.get_write_fields() for f in rfields: self.set_default(f) for f in sfields: self.set_default(f)
python
def set_defaults(self): rfields, sfields = self.get_write_fields() for f in rfields: self.set_default(f) for f in sfields: self.set_default(f)
[ "def", "set_defaults", "(", "self", ")", ":", "rfields", ",", "sfields", "=", "self", ".", "get_write_fields", "(", ")", "for", "f", "in", "rfields", ":", "self", ".", "set_default", "(", "f", ")", "for", "f", "in", "sfields", ":", "self", ".", "set_...
Set defaults for fields needed to write the header if they have defaults. Notes ----- - This is NOT called by `rdheader`. It is only automatically called by the gateway `wrsamp` for convenience. - This is also not called by `wrheader` since it is supposed to be an explicit function. - This is not responsible for initializing the attributes. That is done by the constructor. See also `set_p_features` and `set_d_features`.
[ "Set", "defaults", "for", "fields", "needed", "to", "write", "the", "header", "if", "they", "have", "defaults", "." ]
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_header.py#L241-L262
235,199
MIT-LCP/wfdb-python
wfdb/io/_header.py
HeaderMixin.get_write_fields
def get_write_fields(self): """ Get the list of fields used to write the header, separating record and signal specification fields. Returns the default required fields, the user defined fields, and their dependencies. Does NOT include `d_signal` or `e_d_signal`. Returns ------- rec_write_fields : list Record specification fields to be written. Includes 'comment' if present. sig_write_fields : dict Dictionary of signal specification fields to be written, with values equal to the channels that need to be present for each field. """ # Record specification fields rec_write_fields = self.get_write_subset('record') # Add comments if any if self.comments != None: rec_write_fields.append('comments') # Get required signal fields if signals are present. self.check_field('n_sig') if self.n_sig > 0: sig_write_fields = self.get_write_subset('signal') else: sig_write_fields = None return rec_write_fields, sig_write_fields
python
def get_write_fields(self): # Record specification fields rec_write_fields = self.get_write_subset('record') # Add comments if any if self.comments != None: rec_write_fields.append('comments') # Get required signal fields if signals are present. self.check_field('n_sig') if self.n_sig > 0: sig_write_fields = self.get_write_subset('signal') else: sig_write_fields = None return rec_write_fields, sig_write_fields
[ "def", "get_write_fields", "(", "self", ")", ":", "# Record specification fields", "rec_write_fields", "=", "self", ".", "get_write_subset", "(", "'record'", ")", "# Add comments if any", "if", "self", ".", "comments", "!=", "None", ":", "rec_write_fields", ".", "ap...
Get the list of fields used to write the header, separating record and signal specification fields. Returns the default required fields, the user defined fields, and their dependencies. Does NOT include `d_signal` or `e_d_signal`. Returns ------- rec_write_fields : list Record specification fields to be written. Includes 'comment' if present. sig_write_fields : dict Dictionary of signal specification fields to be written, with values equal to the channels that need to be present for each field.
[ "Get", "the", "list", "of", "fields", "used", "to", "write", "the", "header", "separating", "record", "and", "signal", "specification", "fields", ".", "Returns", "the", "default", "required", "fields", "the", "user", "defined", "fields", "and", "their", "depen...
cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c
https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_header.py#L306-L342