_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q270200 | feature_selection | test | def feature_selection(feat_select, X, y):
"""" Implements various kinds of feature selection """
# K-best
if re.match('.*-best', feat_select) is not None:
n = int(feat_select.split('-')[0])
selector = SelectKBest(k=n)
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UserWarning)
features_selected = np.where(
selector.fit(X, y).get_support() is True)[0]
elif re.match('.*-randombest', feat_select) is not None:
n = int(feat_select.split('-')[0])
from random import shuffle
features = range(0, X.shape[1])
shuffle(features)
features_selected = features[:n]
return features_selected | python | {
"resource": ""
} |
q270201 | get_studies_by_regions | test | def get_studies_by_regions(dataset, masks, threshold=0.08, remove_overlap=True,
studies=None, features=None,
regularization="scale"):
""" Set up data for a classification task given a set of masks
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features, and returns studies by feature matrix
(X) and class labels (y)
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
features: An optional list of feature names used to constrain the
set used in classification. If None, will use all features in
the dataset.
regularize: Optional boolean indicating if X should be regularized
Returns:
A tuple (X, y) of np arrays.
X is a feature by studies matrix and y is a vector of class labels
"""
import nibabel as nib
import os
# Load masks using NiBabel
try:
loaded_masks = [nib.load(os.path.relpath(m)) for m in masks]
except OSError:
print('Error loading masks. Check the path')
# Get a list of studies that activate for each mask file--i.e., a list of
# lists
grouped_ids = [dataset.get_studies(mask=m, activation_threshold=threshold)
for m in loaded_masks]
# Flattened ids
flat_ids = reduce(lambda a, b: a + b, grouped_ids)
# Remove duplicates
if remove_overlap:
import collections
flat_ids = [id for (id, count) in
collections.Counter(flat_ids).items() if count == 1]
grouped_ids = [[x for x in m if x in flat_ids] for m in
grouped_ids] # Remove
# Create class label(y)
y = [[idx] * len(ids) for (idx, ids) in enumerate(grouped_ids)]
y = reduce(lambda a, b: a + b, y) # Flatten
y = np.array(y)
# Extract feature set for each class separately
X = [dataset.get_feature_data(ids=group_ids, features=features)
for group_ids in grouped_ids]
X = np.vstack(tuple(X))
if regularization:
X = regularize(X, method=regularization)
return (X, y) | python | {
"resource": ""
} |
q270202 | get_feature_order | test | def get_feature_order(dataset, features):
""" Returns a list with the order that features requested appear in
dataset """
all_features = dataset.get_feature_names()
i = [all_features.index(f) for f in features]
return i | python | {
"resource": ""
} |
q270203 | classify_regions | test | def classify_regions(dataset, masks, method='ERF', threshold=0.08,
remove_overlap=True, regularization='scale',
output='summary', studies=None, features=None,
class_weight='auto', classifier=None,
cross_val='4-Fold', param_grid=None, scoring='accuracy'):
""" Perform classification on specified regions
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features. Then it trains an algorithm to
classify studies based on features and tests performance.
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
method: a string indicating which method to used.
'SVM': Support Vector Classifier with rbf kernel
'ERF': Extremely Randomized Forest classifier
'Dummy': A dummy classifier using stratified classes as
predictor
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
regularization: A string indicating type of regularization to use.
If None, performs no regularization.
'scale': Unit scale without demeaning
output: A string indicating output type
'summary': Dictionary with summary statistics including score
and n
'summary_clf': Same as above but also includes classifier
'clf': Only returns classifier
Warning: using cv without grid will return an untrained
classifier
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
features: An optional list of feature names used to constrain the
set used in classification. If None, will use all features in
the dataset.
class_weight: Parameter to pass to classifier determining how to
weight classes
classifier: An optional sci-kit learn classifier to use instead of
pre-set up classifiers set up using 'method'
cross_val: A string indicating type of cross validation to use.
Can also pass a scikit_classifier
param_grid: A dictionary indicating which parameters to optimize
using GridSearchCV. If None, no GridSearch will be used
Returns:
A tuple (X, y) of np arrays.
X is a feature by studies matrix and y is a vector of class labels
"""
(X, y) = get_studies_by_regions(dataset, masks, threshold, remove_overlap,
studies, features,
regularization=regularization)
return classify(X, y, method, classifier, output, cross_val,
class_weight, scoring=scoring, param_grid=param_grid) | python | {
"resource": ""
} |
q270204 | classify | test | def classify(X, y, clf_method='ERF', classifier=None, output='summary_clf',
cross_val=None, class_weight=None, regularization=None,
param_grid=None, scoring='accuracy', refit_all=True,
feat_select=None):
""" Wrapper for scikit-learn classification functions
Imlements various types of classification and cross validation """
# Build classifier
clf = Classifier(clf_method, classifier, param_grid)
# Fit & test model with or without cross-validation
if cross_val is not None:
score = clf.cross_val_fit(X, y, cross_val, scoring=scoring,
feat_select=feat_select,
class_weight=class_weight)
else:
# Does not support scoring function
score = clf.fit(X, y, class_weight=class_weight).score(X, y)
# Return some stuff...
from collections import Counter
if output == 'clf':
return clf
else:
if output == 'summary':
output = {'score': score, 'n': dict(Counter(y))}
elif output == 'summary_clf':
output = {
'score': score,
'n': dict(Counter(y)),
'clf': clf,
'features_selected': clf.features_selected,
'predictions': clf.predictions
}
return output | python | {
"resource": ""
} |
q270205 | Classifier.fit | test | def fit(self, X, y, cv=None, class_weight='auto'):
""" Fits X to outcomes y, using clf """
# Incorporate error checking such as :
# if isinstance(self.classifier, ScikitClassifier):
# do one thingNone
# otherwiseNone.
self.X = X
self.y = y
self.set_class_weight(class_weight=class_weight, y=y)
self.clf = self.clf.fit(X, y)
return self.clf | python | {
"resource": ""
} |
q270206 | Classifier.set_class_weight | test | def set_class_weight(self, class_weight='auto', y=None):
""" Sets the class_weight of the classifier to match y """
if class_weight is None:
cw = None
try:
self.clf.set_params(class_weight=cw)
except ValueError:
pass
elif class_weight == 'auto':
c = np.bincount(y)
ii = np.nonzero(c)[0]
c = c / float(c.sum())
cw = dict(zip(ii[::-1], c[ii]))
try:
self.clf.set_params(class_weight=cw)
except ValueError:
import warnings
warnings.warn(
"Tried to set class_weight, but failed. The classifier "
"probably doesn't support it") | python | {
"resource": ""
} |
q270207 | Classifier.cross_val_fit | test | def cross_val_fit(self, X, y, cross_val='4-Fold', scoring='accuracy',
feat_select=None, class_weight='auto'):
""" Fits X to outcomes y, using clf and cv_method """
from sklearn import cross_validation
self.X = X
self.y = y
self.set_class_weight(class_weight=class_weight, y=y)
# Set cross validator
if isinstance(cross_val, string_types):
if re.match('.*-Fold', cross_val) is not None:
n = int(cross_val.split('-')[0])
self.cver = cross_validation.StratifiedKFold(self.y, n)
else:
raise Exception('Unrecognized cross validation method')
else:
self.cver = cross_val
if feat_select is not None:
self.features_selected = []
# Perform cross-validated classification
from sklearn.grid_search import GridSearchCV
if isinstance(self.clf, GridSearchCV):
import warnings
if feat_select is not None:
warnings.warn(
"Cross-validated feature selection not supported with "
"GridSearchCV")
self.clf.set_params(cv=self.cver, scoring=scoring)
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UserWarning)
self.clf = self.clf.fit(X, y)
self.cvs = self.clf.best_score_
else:
self.cvs = self.feat_select_cvs(
feat_select=feat_select, scoring=scoring)
if feat_select is not None:
fs = feature_selection(
feat_select, X, y)
self.features_selected.append(fs)
X = X[:, fs]
self.clf.fit(X, y)
return self.cvs.mean() | python | {
"resource": ""
} |
q270208 | Classifier.fit_dataset | test | def fit_dataset(self, dataset, y, features=None,
feature_type='features'):
""" Given a dataset, fits either features or voxels to y """
# Get data from dataset
if feature_type == 'features':
X = np.rot90(dataset.feature_table.data.toarray())
elif feature_type == 'voxels':
X = np.rot90(dataset.image_table.data.toarray())
self.sk_classifier.fit(X, y) | python | {
"resource": ""
} |
q270209 | average_within_regions | test | def average_within_regions(dataset, regions, masker=None, threshold=None,
remove_zero=True):
""" Aggregates over all voxels within each ROI in the input image.
Takes a Dataset and a Nifti image that defines distinct regions, and
returns a numpy matrix of ROIs x mappables, where the value at each
ROI is the proportion of active voxels in that ROI. Each distinct ROI
must have a unique value in the image; non-contiguous voxels with the
same value will be assigned to the same ROI.
Args:
dataset: Either a Dataset instance from which image data are
extracted, or a Numpy array containing image data to use. If
the latter, the array contains voxels in rows and
features/studies in columns. The number of voxels must be equal
to the length of the vectorized image mask in the regions
image.
regions: An image defining the boundaries of the regions to use.
Can be one of:
1) A string name of the NIFTI or Analyze-format image
2) A NiBabel SpatialImage
3) A list of NiBabel images
4) A 1D numpy array of the same length as the mask vector in
the Dataset's current Masker.
masker: Optional masker used to load image if regions is not a
numpy array. Must be passed if dataset is a numpy array.
threshold: An optional float in the range of 0 - 1 or integer. If
passed, the array will be binarized, with ROI values above the
threshold assigned to True and values below the threshold
assigned to False. (E.g., if threshold = 0.05, only ROIs in
which more than 5% of voxels are active will be considered
active.) If threshold is integer, studies will only be
considered active if they activate more than that number of
voxels in the ROI.
remove_zero: An optional boolean; when True, assume that voxels
with value of 0 should not be considered as a separate ROI, and
will be ignored.
Returns:
A 2D numpy array with ROIs in rows and mappables in columns.
"""
if masker is not None:
masker = masker
else:
if isinstance(dataset, Dataset):
masker = dataset.masker
else:
if not type(regions).__module__.startswith('numpy'):
raise ValueError(
"If dataset is a numpy array and regions is not a numpy "
"array, a masker must be provided.")
if not type(regions).__module__.startswith('numpy'):
regions = masker.mask(regions)
if isinstance(dataset, Dataset):
dataset = dataset.get_image_data(dense=False)
# If multiple images are passed, give each one a unique value
if regions.ndim == 2:
m = regions
for i in range(regions.shape[1]):
_nz = np.nonzero(m[:, i])[0]
if isinstance(threshold, int):
m[_nz, i] = 1.0
else:
m[_nz, i] = 1.0 / np.count_nonzero(m[:, i])
# Otherwise create an ROI-coding matrix
else:
labels = np.unique(regions)
if remove_zero:
labels = labels[np.nonzero(labels)]
n_regions = labels.size
m = np.zeros((regions.size, n_regions))
for i in range(n_regions):
if isinstance(threshold, int):
m[regions == labels[i], i] = 1.0
else:
m[regions == labels[i], i] = 1.0 / \
np.sum(regions == labels[i])
# Call dot() on the array itself as this will use sparse matrix
# multiplication if possible.
result = dataset.T.dot(m).T
if threshold is not None:
result[result < threshold] = 0.0
result = result.astype(bool)
return result | python | {
"resource": ""
} |
q270210 | get_random_voxels | test | def get_random_voxels(dataset, n_voxels):
""" Returns mappable data for a random subset of voxels.
May be useful as a baseline in predictive analyses--e.g., to compare
performance of a more principled feature selection method with simple
random selection.
Args:
dataset: A Dataset instance
n_voxels: An integer specifying the number of random voxels to select.
Returns:
A 2D numpy array with (randomly-selected) voxels in rows and mappables
in columns.
"""
voxels = np.arange(dataset.masker.n_vox_in_vol)
np.random.shuffle(voxels)
selected = voxels[0:n_voxels]
return dataset.get_image_data(voxels=selected) | python | {
"resource": ""
} |
q270211 | _get_top_words | test | def _get_top_words(model, feature_names, n_top_words=40):
""" Return top forty words from each topic in trained topic model.
"""
topic_words = []
for topic in model.components_:
top_words = [feature_names[i] for i in topic.argsort()[:-n_top_words-1:-1]]
topic_words += [top_words]
return topic_words | python | {
"resource": ""
} |
q270212 | pearson | test | def pearson(x, y):
""" Correlates row vector x with each row vector in 2D array y. """
data = np.vstack((x, y))
ms = data.mean(axis=1)[(slice(None, None, None), None)]
datam = data - ms
datass = np.sqrt(np.sum(datam**2, axis=1))
temp = np.dot(datam[1:], datam[0].T)
rs = temp / (datass[1:] * datass[0])
return rs | python | {
"resource": ""
} |
q270213 | fdr | test | def fdr(p, q=.05):
""" Determine FDR threshold given a p value array and desired false
discovery rate q. """
s = np.sort(p)
nvox = p.shape[0]
null = np.array(range(1, nvox + 1), dtype='float') * q / nvox
below = np.where(s <= null)[0]
return s[max(below)] if len(below) else -1 | python | {
"resource": ""
} |
q270214 | Dataset._load_activations | test | def _load_activations(self, filename):
""" Load activation data from a text file.
Args:
filename (str): a string pointing to the location of the txt file
to read from.
"""
logger.info("Loading activation data from %s..." % filename)
activations = pd.read_csv(filename, sep='\t')
activations.columns = [col.lower()
for col in list(activations.columns)]
# Make sure all mandatory columns exist
mc = ['x', 'y', 'z', 'id', 'space']
if (set(mc) - set(list(activations.columns))):
logger.error(
"At least one of mandatory columns (x, y, z, id, and space) "
"is missing from input file.")
return
# Transform to target space where needed
spaces = activations['space'].unique()
xyz = activations[['x', 'y', 'z']].values
for s in spaces:
if s != self.transformer.target:
inds = activations['space'] == s
xyz[inds] = self.transformer.apply(s, xyz[inds])
activations[['x', 'y', 'z']] = xyz
# xyz --> ijk
ijk = pd.DataFrame(
transformations.xyz_to_mat(xyz), columns=['i', 'j', 'k'])
activations = pd.concat([activations, ijk], axis=1)
return activations | python | {
"resource": ""
} |
q270215 | Dataset.create_image_table | test | def create_image_table(self, r=None):
""" Create and store a new ImageTable instance based on the current
Dataset. Will generally be called privately, but may be useful as a
convenience method in cases where the user wants to re-generate the
table with a new smoothing kernel of different radius.
Args:
r (int): An optional integer indicating the radius of the smoothing
kernel. By default, this is None, which will keep whatever
value is currently set in the Dataset instance.
"""
logger.info("Creating image table...")
if r is not None:
self.r = r
self.image_table = ImageTable(self) | python | {
"resource": ""
} |
q270216 | Dataset.get_studies | test | def get_studies(self, features=None, expression=None, mask=None,
peaks=None, frequency_threshold=0.001,
activation_threshold=0.0, func=np.sum, return_type='ids',
r=6
):
""" Get IDs or data for studies that meet specific criteria.
If multiple criteria are passed, the set intersection is returned. For
example, passing expression='emotion' and mask='my_mask.nii.gz' would
return only those studies that are associated with emotion AND report
activation within the voxels indicated in the passed image.
Args:
ids (list): A list of IDs of studies to retrieve.
features (list or str): The name of a feature, or a list of
features, to use for selecting studies.
expression (str): A string expression to pass to the PEG for study
retrieval.
mask: the mask image (see Masker documentation for valid data
types).
peaks (ndarray or list): Either an n x 3 numpy array, or a list of
lists or tuples (e.g., [(-10, 22, 14)]) specifying the world
(x/y/z) coordinates of the target location(s).
frequency_threshold (float): For feature-based or expression-based
selection, the threshold for selecting studies--i.e., the
cut-off for a study to be included. Must be a float in range
[0, 1].
activation_threshold (int or float): For mask-based selection,
threshold for a study to be included based on amount of
activation displayed. If an integer, represents the absolute
number of voxels that must be active within the mask in order
for a study to be selected. If a float, it represents the
proportion of voxels that must be active.
func (Callable): The function to use when aggregating over the list
of features. See documentation in FeatureTable.get_ids() for a
full explanation. Only used for feature- or expression-based
selection.
return_type (str): A string specifying what data to return. Valid
options are:
'ids': returns a list of IDs of selected studies.
'images': returns a voxel x study matrix of data for all
selected studies.
'weights': returns a dict where the keys are study IDs and the
values are the computed weights. Only valid when performing
feature-based selection.
r (int): For peak-based selection, the distance cut-off (in mm)
for inclusion (i.e., only studies with one or more activations
within r mm of one of the passed foci will be returned).
Returns:
When return_type is 'ids' (default), returns a list of IDs of the
selected studies. When return_type is 'data', returns a 2D numpy
array, with voxels in rows and studies in columns. When return_type
is 'weights' (valid only for expression-based selection), returns
a dict, where the keys are study IDs, and the values are the
computed weights.
Examples
--------
Select all studies tagged with the feature 'emotion':
>>> ids = dataset.get_studies(features='emotion')
Select all studies that activate at least 20% of voxels in an amygdala
mask, and retrieve activation data rather than IDs:
>>> data = dataset.get_studies(mask='amygdala_mask.nii.gz',
threshold=0.2, return_type='images')
Select studies that report at least one activation within 12 mm of at
least one of three specific foci:
>>> ids = dataset.get_studies(peaks=[[12, -20, 30], [-26, 22, 22],
[0, 36, -20]], r=12)
"""
results = []
# Feature-based selection
if features is not None:
# Need to handle weights as a special case, because we can't
# retrieve the weights later using just the IDs.
if return_type == 'weights':
if expression is not None or mask is not None or \
peaks is not None:
raise ValueError(
"return_type cannot be 'weights' when feature-based "
"search is used in conjunction with other search "
"modes.")
return self.feature_table.get_ids(
features, frequency_threshold, func, get_weights=True)
else:
results.append(self.feature_table.get_ids(
features, frequency_threshold, func))
# Logical expression-based selection
if expression is not None:
_ids = self.feature_table.get_ids_by_expression(
expression, frequency_threshold, func)
results.append(list(_ids))
# Mask-based selection
if mask is not None:
mask = self.masker.mask(mask, in_global_mask=True).astype(bool)
num_vox = np.sum(mask)
prop_mask_active = self.image_table.data.T.dot(mask).astype(float)
if isinstance(activation_threshold, float):
prop_mask_active /= num_vox
indices = np.where(prop_mask_active > activation_threshold)[0]
results.append([self.image_table.ids[ind] for ind in indices])
# Peak-based selection
if peaks is not None:
r = float(r)
found = set()
for p in peaks:
xyz = np.array(p, dtype=float)
x = self.activations['x']
y = self.activations['y']
z = self.activations['z']
dists = np.sqrt(np.square(x - xyz[0]) + np.square(y - xyz[1]) +
np.square(z - xyz[2]))
inds = np.where((dists > 5.5) & (dists < 6.5))[0]
tmp = dists[inds]
found |= set(self.activations[dists <= r]['id'].unique())
results.append(found)
# Get intersection of all sets
ids = list(reduce(lambda x, y: set(x) & set(y), results))
if return_type == 'ids':
return ids
elif return_type == 'data':
return self.get_image_data(ids) | python | {
"resource": ""
} |
q270217 | Dataset.add_features | test | def add_features(self, features, append=True, merge='outer',
duplicates='ignore', min_studies=0.0, threshold=0.001):
""" Construct a new FeatureTable from file.
Args:
features: Feature data to add. Can be:
(a) A text file containing the feature data, where each row is
a study in the database, with features in columns. The first
column must contain the IDs of the studies to match up with the
image data.
(b) A pandas DataFrame, where studies are in rows, features are
in columns, and the index provides the study IDs.
append (bool): If True, adds new features to existing ones
incrementally. If False, replaces old features.
merge, duplicates, min_studies, threshold: Additional arguments
passed to FeatureTable.add_features().
"""
if (not append) or not hasattr(self, 'feature_table'):
self.feature_table = FeatureTable(self)
self.feature_table.add_features(features, merge=merge,
duplicates=duplicates,
min_studies=min_studies,
threshold=threshold) | python | {
"resource": ""
} |
q270218 | Dataset.get_feature_names | test | def get_feature_names(self, features=None):
""" Returns names of features. If features is None, returns all
features. Otherwise assumes the user is trying to find the order of the
features. """
if features:
return self.feature_table.get_ordered_names(features)
else:
return self.feature_table.feature_names | python | {
"resource": ""
} |
q270219 | Dataset.get_feature_counts | test | def get_feature_counts(self, threshold=0.001):
""" Returns a dictionary, where the keys are the feature names
and the values are the number of studies tagged with the feature. """
counts = np.sum(self.get_feature_data() >= threshold, 0)
return dict(zip(self.get_feature_names(), list(counts))) | python | {
"resource": ""
} |
q270220 | Dataset.load | test | def load(cls, filename):
""" Load a pickled Dataset instance from file. """
try:
dataset = pickle.load(open(filename, 'rb'))
except UnicodeDecodeError:
# Need to try this for python3
dataset = pickle.load(open(filename, 'rb'), encoding='latin')
if hasattr(dataset, 'feature_table'):
dataset.feature_table._csr_to_sdf()
return dataset | python | {
"resource": ""
} |
q270221 | Dataset.save | test | def save(self, filename):
""" Pickle the Dataset instance to the provided file.
"""
if hasattr(self, 'feature_table'):
self.feature_table._sdf_to_csr()
pickle.dump(self, open(filename, 'wb'), -1)
if hasattr(self, 'feature_table'):
self.feature_table._csr_to_sdf() | python | {
"resource": ""
} |
q270222 | ImageTable.get_image_data | test | def get_image_data(self, ids=None, voxels=None, dense=True):
""" Slices and returns a subset of image data.
Args:
ids (list, array): A list or 1D numpy array of study ids to
return. If None, returns data for all studies.
voxels (list, array): A list or 1D numpy array of voxel indices
(i.e., rows) to return. If None, returns data for all voxels.
dense (bool): Optional boolean. When True (default), convert the
result to a dense array before returning. When False, keep as
sparse matrix.
Returns:
A 2D numpy array with voxels in rows and studies in columns.
"""
if dense and ids is None and voxels is None:
logger.warning(
"Warning: get_image_data() is being called without specifying "
"a subset of studies or voxels to retrieve. This may result in"
" a very large amount of data (several GB) being read into "
"memory. If you experience any problems, consider returning a "
"sparse matrix by passing dense=False, or pass in a list of "
"ids of voxels to retrieve only a portion of the data.")
result = self.data
if ids is not None:
idxs = np.where(np.in1d(np.array(self.ids), np.array(ids)))[0]
result = result[:, idxs]
if voxels is not None:
result = result[voxels, :]
return result.toarray() if dense else result | python | {
"resource": ""
} |
q270223 | FeatureTable.get_feature_data | test | def get_feature_data(self, ids=None, features=None, dense=True):
""" Slices and returns a subset of feature data.
Args:
ids (list, array): A list or 1D numpy array of study ids to
return rows for. If None, returns data for all studies
(i.e., all rows in array).
features (list, array): A list or 1D numpy array of named features
to return. If None, returns data for all features (i.e., all
columns in array).
dense (bool): Optional boolean. When True (default), convert the
result to a dense array before returning. When False, keep as
sparse matrix. Note that if ids is not None, the returned array
will always be dense.
Returns:
A pandas DataFrame with study IDs in rows and features incolumns.
"""
result = self.data
if ids is not None:
result = result.ix[ids]
if features is not None:
result = result.ix[:, features]
return result.to_dense() if dense else result | python | {
"resource": ""
} |
q270224 | FeatureTable.get_ordered_names | test | def get_ordered_names(self, features):
""" Given a list of features, returns features in order that they
appear in database.
Args:
features (list): A list or 1D numpy array of named features to
return.
Returns:
A list of features in order they appear in database.
"""
idxs = np.where(
np.in1d(self.data.columns.values, np.array(features)))[0]
return list(self.data.columns[idxs].values) | python | {
"resource": ""
} |
q270225 | FeatureTable.get_ids | test | def get_ids(self, features, threshold=0.0, func=np.sum, get_weights=False):
""" Returns a list of all studies in the table that meet the desired
feature-based criteria.
Will most commonly be used to retrieve studies that use one or more
features with some minimum frequency; e.g.,:
get_ids(['fear', 'anxiety'], threshold=0.001)
Args:
features (lists): a list of feature names to search on.
threshold (float): optional float indicating threshold features
must pass to be included.
func (Callable): any numpy function to use for thresholding
(default: sum). The function will be applied to the list of
features and the result compared to the threshold. This can be
used to change the meaning of the query in powerful ways. E.g,:
max: any of the features have to pass threshold
(i.e., max > thresh)
min: all features must each individually pass threshold
(i.e., min > thresh)
sum: the summed weight of all features must pass threshold
(i.e., sum > thresh)
get_weights (bool): if True, returns a dict with ids => weights.
Returns:
When get_weights is false (default), returns a list of study
names. When true, returns a dict, with study names as keys
and feature weights as values.
"""
if isinstance(features, str):
features = [features]
features = self.search_features(features) # Expand wild cards
feature_weights = self.data.ix[:, features]
weights = feature_weights.apply(func, 1)
above_thresh = weights[weights >= threshold]
# ids_to_keep = self.ids[above_thresh]
return above_thresh if get_weights else list(above_thresh.index) | python | {
"resource": ""
} |
q270226 | FeatureTable.search_features | test | def search_features(self, search):
''' Returns all features that match any of the elements in the input
list.
Args:
search (str, list): A string or list of strings defining the query.
Returns:
A list of matching feature names.
'''
if isinstance(search, string_types):
search = [search]
search = [s.replace('*', '.*') for s in search]
cols = list(self.data.columns)
results = []
for s in search:
results.extend([f for f in cols if re.match(s + '$', f)])
return list(set(results)) | python | {
"resource": ""
} |
q270227 | FeatureTable.get_ids_by_expression | test | def get_ids_by_expression(self, expression, threshold=0.001, func=np.sum):
""" Use a PEG to parse expression and return study IDs."""
lexer = lp.Lexer()
lexer.build()
parser = lp.Parser(
lexer, self.dataset, threshold=threshold, func=func)
parser.build()
return parser.parse(expression).keys().values | python | {
"resource": ""
} |
q270228 | FeatureTable._sdf_to_csr | test | def _sdf_to_csr(self):
""" Convert FeatureTable to SciPy CSR matrix. """
data = self.data.to_dense()
self.data = {
'columns': list(data.columns),
'index': list(data.index),
'values': sparse.csr_matrix(data.values)
} | python | {
"resource": ""
} |
q270229 | deprecated | test | def deprecated(*args):
""" Deprecation warning decorator. Takes optional deprecation message,
otherwise will use a generic warning. """
def wrap(func):
def wrapped_func(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return func(*args, **kwargs)
return wrapped_func
if len(args) == 1 and callable(args[0]):
msg = "Function '%s' will be deprecated in future versions of " \
"Neurosynth." % args[0].__name__
return wrap(args[0])
else:
msg = args[0]
return wrap | python | {
"resource": ""
} |
q270230 | transform | test | def transform(foci, mat):
""" Convert coordinates from one space to another using provided
transformation matrix. """
t = linalg.pinv(mat)
foci = np.hstack((foci, np.ones((foci.shape[0], 1))))
return np.dot(foci, t)[:, 0:3] | python | {
"resource": ""
} |
q270231 | xyz_to_mat | test | def xyz_to_mat(foci, xyz_dims=None, mat_dims=None):
""" Convert an N x 3 array of XYZ coordinates to matrix indices. """
foci = np.hstack((foci, np.ones((foci.shape[0], 1))))
mat = np.array([[-0.5, 0, 0, 45], [0, 0.5, 0, 63], [0, 0, 0.5, 36]]).T
result = np.dot(foci, mat)[:, ::-1] # multiply and reverse column order
return np.round_(result).astype(int) | python | {
"resource": ""
} |
q270232 | Transformer.apply | test | def apply(self, name, foci):
""" Apply a named transformation to a set of foci.
If the named transformation doesn't exist, return foci untransformed.
"""
if name in self.transformations:
return transform(foci, self.transformations[name])
else:
logger.info(
"No transformation named '%s' found; coordinates left "
"untransformed." % name)
return foci | python | {
"resource": ""
} |
q270233 | Masker.mask | test | def mask(self, image, nan_to_num=True, layers=None, in_global_mask=False):
""" Vectorize an image and mask out all invalid voxels.
Args:
images: The image to vectorize and mask. Input can be any object
handled by get_image().
layers: Which mask layers to use (specified as int, string, or
list of ints and strings). When None, applies the conjunction
of all layers.
nan_to_num: boolean indicating whether to convert NaNs to 0.
in_global_mask: Whether to return the resulting masked vector in
the globally masked space (i.e., n_voxels =
len(self.global_mask)). If False (default), returns in the full
image space (i.e., n_voxels = len(self.volume)).
Returns:
A 1D NumPy array of in-mask voxels.
"""
self.set_mask(layers)
image = self.get_image(image, output='vector')
if in_global_mask:
masked_data = image[self.global_mask]
masked_data[~self.get_mask(in_global_mask=True)] = 0
else:
masked_data = image[self.current_mask]
if nan_to_num:
masked_data = np.nan_to_num(masked_data)
return masked_data | python | {
"resource": ""
} |
q270234 | Masker.get_mask | test | def get_mask(self, layers=None, output='vector', in_global_mask=True):
""" Set the current mask by taking the conjunction of all specified
layers.
Args:
layers: Which layers to include. See documentation for add() for
format.
include_global_mask: Whether or not to automatically include the
global mask (i.e., self.volume) in the conjunction.
"""
if in_global_mask:
output = 'vector'
if layers is None:
layers = self.layers.keys()
elif not isinstance(layers, list):
layers = [layers]
layers = map(lambda x: x if isinstance(x, string_types)
else self.stack[x], layers)
layers = [self.layers[l] for l in layers if l in self.layers]
# Always include the original volume
layers.append(self.full)
layers = np.vstack(layers).T.astype(bool)
mask = layers.all(axis=1)
mask = self.get_image(mask, output)
return mask[self.global_mask] if in_global_mask else mask | python | {
"resource": ""
} |
q270235 | load_imgs | test | def load_imgs(filenames, masker, nan_to_num=True):
""" Load multiple images from file into an ndarray.
Args:
filenames: A single filename or list of filenames pointing to valid
images.
masker: A Masker instance.
nan_to_num: Optional boolean indicating whether to convert NaNs to zero.
Returns:
An m x n 2D numpy array, where m = number of voxels in mask and
n = number of images passed.
"""
if isinstance(filenames, string_types):
filenames = [filenames]
data = np.zeros((masker.n_vox_in_mask, len(filenames)))
for i, f in enumerate(filenames):
data[:, i] = masker.mask(f, nan_to_num)
return data | python | {
"resource": ""
} |
q270236 | save_img | test | def save_img(data, filename, masker, header=None):
""" Save a vectorized image to file. """
if not header:
header = masker.get_header()
header.set_data_dtype(data.dtype) # Avoids loss of precision
# Update min/max -- this should happen on save, but doesn't seem to
header['cal_max'] = data.max()
header['cal_min'] = data.min()
img = nifti1.Nifti1Image(masker.unmask(data), None, header)
img.to_filename(filename) | python | {
"resource": ""
} |
q270237 | set_logging_level | test | def set_logging_level(level=None):
"""Set neurosynth's logging level
Args
level : str
Name of the logging level (warning, error, info, etc) known
to logging module. If no level provided, it would get that one
from environment variable NEUROSYNTH_LOGLEVEL
"""
if level is None:
level = os.environ.get('NEUROSYNTH_LOGLEVEL', 'warn')
if level is not None:
logger.setLevel(getattr(logging, level.upper()))
return logger.getEffectiveLevel() | python | {
"resource": ""
} |
q270238 | expand_address | test | def expand_address(address, languages=None, **kw):
"""
Expand the given address into one or more normalized strings.
Required
--------
@param address: the address as either Unicode or a UTF-8 encoded string
Options
-------
@param languages: a tuple or list of ISO language code strings (e.g. "en", "fr", "de", etc.)
to use in expansion. If None is passed, use language classifier
to detect language automatically.
@param address_components: an integer (bit-set) of address component expansions
to use e.g. ADDRESS_NAME | ADDRESS_STREET would use
only expansions which apply to venue names or streets.
@param latin_ascii: use the Latin to ASCII transliterator, which normalizes e.g. æ => ae
@param transliterate: use any available transliterators for non-Latin scripts, e.g.
for the Greek phrase διαφορετικούς becomes diaphoretikoús̱
@param strip_accents: strip accented characters e.g. é => e, ç => c. This loses some
information in various languags, but in general we want
@param decompose: perform Unicode normalization (NFD form)
@param lowercase: UTF-8 lowercase the string
@param trim_string: trim spaces on either side of the string
@param replace_word_hyphens: add version of the string replacing hyphens with space
@param delete_word_hyphens: add version of the string with hyphens deleted
@param replace_numeric_hyphens: add version of the string with numeric hyphens replaced
e.g. 12345-6789 => 12345 6789
@param delete_numeric_hyphens: add version of the string with numeric hyphens removed
e.g. 12345-6789 => 123456789
@param split_alpha_from_numeric: split tokens like CR17 into CR 17, helps with expansion
of certain types of highway abbreviations
@param delete_final_periods: remove final periods on abbreviations e.g. St. => St
@param delete_acronym_periods: remove periods in acronyms e.g. U.S.A. => USA
@param drop_english_possessives: normalize possessives e.g. Mark's => Marks
@param delete_apostrophes: delete other types of hyphens e.g. O'Malley => OMalley
@param expand_numex: converts numeric expressions e.g. Twenty sixth => 26th,
using either the supplied languages or the result of
automated language classification.
@param roman_numerals: normalize Roman numerals e.g. IX => 9. Since these can be
ambiguous (especially I and V), turning this on simply
adds another version of the string if any potential
Roman numerals are found.
"""
address = safe_decode(address, 'utf-8')
return _expand.expand_address(address, languages=languages, **kw) | python | {
"resource": ""
} |
q270239 | normalized_tokens | test | def normalized_tokens(s, string_options=DEFAULT_STRING_OPTIONS,
token_options=DEFAULT_TOKEN_OPTIONS,
strip_parentheticals=True, whitespace=False,
languages=None):
'''
Normalizes a string, tokenizes, and normalizes each token
with string and token-level options.
This version only uses libpostal's deterministic normalizations
i.e. methods with a single output. The string tree version will
return multiple normalized strings, each with tokens.
Usage:
normalized_tokens(u'St.-Barthélemy')
'''
s = safe_decode(s)
normalized_tokens = _normalize.normalized_tokens(s, string_options, token_options, whitespace, languages=languages)
if strip_parentheticals:
normalized_tokens = remove_parens(normalized_tokens)
return [(s, token_types.from_id(token_type)) for s, token_type in normalized_tokens] | python | {
"resource": ""
} |
q270240 | parse_address | test | def parse_address(address, language=None, country=None):
"""
Parse address into components.
@param address: the address as either Unicode or a UTF-8 encoded string
@param language (optional): language code
@param country (optional): country code
"""
address = safe_decode(address, 'utf-8')
return _parser.parse_address(address, language=language, country=country) | python | {
"resource": ""
} |
q270241 | near_dupe_hashes | test | def near_dupe_hashes(labels, values, languages=None, **kw):
"""
Hash the given address into normalized strings that can be used to group similar
addresses together for more detailed pairwise comparison. This can be thought of
as the blocking function in record linkage or locally-sensitive hashing in the
document near-duplicate detection.
Required
--------
@param labels: array of component labels as either Unicode or UTF-8 encoded strings
e.g. ["house_number", "road", "postcode"]
@param values: array of component values as either Unicode or UTF-8 encoded strings
e.g. ["123", "Broadway", "11216"]. Note len(values) must be equal to
len(labels).
Options
-------
@param languages: a tuple or list of ISO language code strings (e.g. "en", "fr", "de", etc.)
to use in expansion. If None is passed, use language classifier
to detect language automatically.
@param with_name: use name in the hashes
@param with_address: use house_number & street in the hashes
@param with_unit: use secondary unit as part of the hashes
@param with_city_or_equivalent: use the city, city_district, suburb, or island name as one of
the geo qualifiers
@param with_small_containing_boundaries: use small containing boundaries (currently state_district)
as one of the geo qualifiers
@param with_postal_code: use postal code as one of the geo qualifiers
@param with_latlon: use geohash + neighbors as one of the geo qualifiers
@param latitude: latitude (Y coordinate)
@param longitude: longitude (X coordinate)
@param geohash_precision: geohash tile size (default = 6)
@param name_and_address_keys: include keys with name + address + geo
@param name_only_keys: include keys with name + geo
@param address_only_keys: include keys with address + geo
"""
return _near_dupe.near_dupe_hashes(labels, values, languages=languages, **kw) | python | {
"resource": ""
} |
q270242 | dict_to_object | test | def dict_to_object(item, object_name):
"""Converts a python dict to a namedtuple, saving memory."""
fields = item.keys()
values = item.values()
return json.loads(json.dumps(item),
object_hook=lambda d:
namedtuple(object_name, fields)(*values)) | python | {
"resource": ""
} |
q270243 | TiingoClient.get_ticker_price | test | def get_ticker_price(self, ticker,
startDate=None, endDate=None,
fmt='json', frequency='daily'):
"""By default, return latest EOD Composite Price for a stock ticker.
On average, each feed contains 3 data sources.
Supported tickers + Available Day Ranges are here:
https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip
Args:
ticker (string): Unique identifier for stock ticker
startDate (string): Start of ticker range in YYYY-MM-DD format
endDate (string): End of ticker range in YYYY-MM-DD format
fmt (string): 'csv' or 'json'
frequency (string): Resample frequency
"""
url = self._get_url(ticker, frequency)
params = {
'format': fmt if fmt != "object" else 'json', # conversion local
'resampleFreq': frequency
}
if startDate:
params['startDate'] = startDate
if endDate:
params['endDate'] = endDate
# TODO: evaluate whether to stream CSV to cache on disk, or
# load as array in memory, or just pass plain text
response = self._request('GET', url, params=params)
if fmt == "json":
return response.json()
elif fmt == "object":
data = response.json()
return [dict_to_object(item, "TickerPrice") for item in data]
else:
return response.content.decode("utf-8") | python | {
"resource": ""
} |
q270244 | TiingoClient.get_dataframe | test | def get_dataframe(self, tickers,
startDate=None, endDate=None, metric_name=None, frequency='daily'):
""" Return a pandas.DataFrame of historical prices for one or more ticker symbols.
By default, return latest EOD Composite Price for a list of stock tickers.
On average, each feed contains 3 data sources.
Supported tickers + Available Day Ranges are here:
https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip
or from the TiingoClient.list_tickers() method.
Args:
tickers (string/list): One or more unique identifiers for a stock ticker.
startDate (string): Start of ticker range in YYYY-MM-DD format.
endDate (string): End of ticker range in YYYY-MM-DD format.
metric_name (string): Optional parameter specifying metric to be returned for each
ticker. In the event of a single ticker, this is optional and if not specified
all of the available data will be returned. In the event of a list of tickers,
this parameter is required.
frequency (string): Resample frequency (defaults to daily).
"""
valid_columns = ['open', 'high', 'low', 'close', 'volume', 'adjOpen', 'adjHigh', 'adjLow',
'adjClose', 'adjVolume', 'divCash', 'splitFactor']
if metric_name is not None and metric_name not in valid_columns:
raise APIColumnNameError('Valid data items are: ' + str(valid_columns))
params = {
'format': 'json',
'resampleFreq': frequency
}
if startDate:
params['startDate'] = startDate
if endDate:
params['endDate'] = endDate
if pandas_is_installed:
if type(tickers) is str:
stock = tickers
url = self._get_url(stock, frequency)
response = self._request('GET', url, params=params)
df = pd.DataFrame(response.json())
if metric_name is not None:
prices = df[metric_name]
prices.index = df['date']
else:
prices = df
prices.index = df['date']
del (prices['date'])
else:
prices = pd.DataFrame()
for stock in tickers:
url = self._get_url(stock, frequency)
response = self._request('GET', url, params=params)
df = pd.DataFrame(response.json())
df.index = df['date']
df.rename(index=str, columns={metric_name: stock}, inplace=True)
prices = pd.concat([prices, df[stock]], axis=1)
prices.index = pd.to_datetime(prices.index)
return prices
else:
error_message = ("Pandas is not installed, but .get_ticker_price() was "
"called with fmt=pandas. In order to install tiingo with "
"pandas, reinstall with pandas as an optional dependency. \n"
"Install tiingo with pandas dependency: \'pip install tiingo[pandas]\'\n"
"Alternatively, just install pandas: pip install pandas.")
raise InstallPandasException(error_message) | python | {
"resource": ""
} |
q270245 | TiingoClient.get_bulk_news | test | def get_bulk_news(self, file_id=None, fmt='json'):
"""Only available to institutional clients.
If ID is NOT provided, return array of available file_ids.
If ID is provided, provides URL which you can use to download your
file, as well as some metadata about that file.
"""
if file_id:
url = "tiingo/news/bulk_download/{}".format(file_id)
else:
url = "tiingo/news/bulk_download"
response = self._request('GET', url)
data = response.json()
if fmt == 'json':
return data
elif fmt == 'object':
return dict_to_object(data, "BulkNews") | python | {
"resource": ""
} |
q270246 | RestClient._request | test | def _request(self, method, url, **kwargs):
"""Make HTTP request and return response object
Args:
method (str): GET, POST, PUT, DELETE
url (str): path appended to the base_url to create request
**kwargs: passed directly to a requests.request object
"""
resp = self._session.request(method,
'{}/{}'.format(self._base_url, url),
headers=self._headers,
**kwargs)
try:
resp.raise_for_status()
except HTTPError as e:
logging.error(resp.content)
raise RestClientError(e)
return resp | python | {
"resource": ""
} |
q270247 | HTTPClient.get_bearer_info | test | async def get_bearer_info(self):
"""Get the application bearer token from client_id and client_secret."""
if self.client_id is None:
raise SpotifyException(_GET_BEARER_ERR % 'client_id')
elif self.client_secret is None:
raise SpotifyException(_GET_BEARER_ERR % 'client_secret')
token = b64encode(':'.join((self.client_id, self.client_secret)).encode())
kwargs = {
'url': 'https://accounts.spotify.com/api/token',
'data': {'grant_type': 'client_credentials'},
'headers': {'Authorization': 'Basic ' + token.decode()}
}
async with self._session.post(**kwargs) as resp:
return json.loads(await resp.text(encoding='utf-8')) | python | {
"resource": ""
} |
q270248 | HTTPClient.request | test | async def request(self, route, **kwargs):
"""Make a request to the spotify API with the current bearer credentials.
Parameters
----------
route : Union[tuple[str, str], Route]
A tuple of the method and url or a :class:`Route` object.
kwargs : Any
keyword arguments to pass into :class:`aiohttp.ClientSession.request`
"""
if isinstance(route, tuple):
method, url = route
else:
method = route.method
url = route.url
if self.bearer_info is None:
self.bearer_info = bearer_info = await self.get_bearer_info()
access_token = bearer_info['access_token']
else:
access_token = self.bearer_info['access_token']
headers = {
'Authorization': 'Bearer ' + access_token,
'Content-Type': kwargs.get('content_type', 'application/json'),
**kwargs.pop('headers', {})
}
for _ in range(self.RETRY_AMOUNT):
r = await self._session.request(method, url, headers=headers, **kwargs)
try:
status = r.status
try:
data = json.loads(await r.text(encoding='utf-8'))
except json.decoder.JSONDecodeError:
data = {}
if 300 > status >= 200:
return data
if status == 401:
self.bearer_info = bearer_info = await self.get_bearer_info()
headers['Authorization'] = 'Bearer ' + bearer_info['access_token']
continue
if status == 429:
# we're being rate limited.
amount = r.headers.get('Retry-After')
await asyncio.sleep(int(amount), loop=self.loop)
continue
if status in (502, 503):
# unconditional retry
continue
if status == 403:
raise Forbidden(r, data)
elif status == 404:
raise NotFound(r, data)
finally:
await r.release()
else:
raise HTTPException(r, data) | python | {
"resource": ""
} |
q270249 | HTTPClient.album_tracks | test | def album_tracks(self, spotify_id, limit=20, offset=0, market='US'):
"""Get an albums tracks by an ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optiona[int]
The offset of which Spotify should start yielding from.
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
"""
route = Route('GET', '/albums/{spotify_id}/tracks', spotify_id=spotify_id)
payload = {'limit': limit, 'offset': offset}
if market:
payload['market'] = market
return self.request(route, params=payload) | python | {
"resource": ""
} |
q270250 | HTTPClient.artist | test | def artist(self, spotify_id):
"""Get a spotify artist by their ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
"""
route = Route('GET', '/artists/{spotify_id}', spotify_id=spotify_id)
return self.request(route) | python | {
"resource": ""
} |
q270251 | HTTPClient.artist_albums | test | def artist_albums(self, spotify_id, include_groups=None, limit=20, offset=0, market='US'):
"""Get an artists tracks by their ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
include_groups : INCLUDE_GROUPS_TP
INCLUDE_GROUPS
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optiona[int]
The offset of which Spotify should start yielding from.
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
"""
route = Route('GET', '/artists/{spotify_id}/albums', spotify_id=spotify_id)
payload = {'limit': limit, 'offset': offset}
if include_groups:
payload['include_groups'] = include_groups
if market:
payload['market'] = market
return self.request(route, params=payload) | python | {
"resource": ""
} |
q270252 | HTTPClient.artist_top_tracks | test | def artist_top_tracks(self, spotify_id, country):
"""Get an artists top tracks per country with their ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
country : COUNTRY_TP
COUNTRY
"""
route = Route('GET', '/artists/{spotify_id}/top-tracks', spotify_id=spotify_id)
payload = {'country': country}
return self.request(route, params=payload) | python | {
"resource": ""
} |
q270253 | HTTPClient.artist_related_artists | test | def artist_related_artists(self, spotify_id):
"""Get related artists for an artist by their ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
"""
route = Route('GET', '/artists/{spotify_id}/related-artists', spotify_id=spotify_id)
return self.request(route) | python | {
"resource": ""
} |
q270254 | HTTPClient.artists | test | def artists(self, spotify_ids):
"""Get a spotify artists by their IDs.
Parameters
----------
spotify_id : List[str]
The spotify_ids to search with.
"""
route = Route('GET', '/artists')
payload = {'ids': spotify_ids}
return self.request(route, params=payload) | python | {
"resource": ""
} |
q270255 | HTTPClient.category | test | def category(self, category_id, country=None, locale=None):
"""Get a single category used to tag items in Spotify.
Parameters
----------
category_id : str
The Spotify category ID for the category.
country : COUNTRY_TP
COUNTRY
locale : LOCALE_TP
LOCALE
"""
route = Route('GET', '/browse/categories/{category_id}', category_id=category_id)
payload = {}
if country:
payload['country'] = country
if locale:
payload['locale'] = locale
return self.request(route, params=payload) | python | {
"resource": ""
} |
q270256 | HTTPClient.category_playlists | test | def category_playlists(self, category_id, limit=20, offset=0, country=None):
"""Get a list of Spotify playlists tagged with a particular category.
Parameters
----------
category_id : str
The Spotify category ID for the category.
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
country : COUNTRY_TP
COUNTRY
"""
route = Route('GET', '/browse/categories/{category_id}/playlists', category_id=category_id)
payload = {'limit': limit, 'offset': offset}
if country:
payload['country'] = country
return self.request(route, params=payload) | python | {
"resource": ""
} |
q270257 | HTTPClient.categories | test | def categories(self, limit=20, offset=0, country=None, locale=None):
"""Get a list of categories used to tag items in Spotify.
Parameters
----------
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
country : COUNTRY_TP
COUNTRY
locale : LOCALE_TP
LOCALE
"""
route = Route('GET', '/browse/categories')
payload = {'limit': limit, 'offset': offset}
if country:
payload['country'] = country
if locale:
payload['locale'] = locale
return self.request(route, params=payload) | python | {
"resource": ""
} |
q270258 | HTTPClient.featured_playlists | test | def featured_playlists(self, locale=None, country=None, timestamp=None, limit=20, offset=0):
"""Get a list of Spotify featured playlists.
Parameters
----------
locale : LOCALE_TP
LOCALE
country : COUNTRY_TP
COUNTRY
timestamp : TIMESTAMP_TP
TIMESTAMP
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
"""
route = Route('GET', '/browse/featured-playlists')
payload = {'limit': limit, 'offset': offset}
if country:
payload['country'] = country
if locale:
payload['locale'] = locale
if timestamp:
payload['timestamp'] = timestamp
return self.request(route, params=payload) | python | {
"resource": ""
} |
q270259 | HTTPClient.new_releases | test | def new_releases(self, *, country=None, limit=20, offset=0):
"""Get a list of new album releases featured in Spotify.
Parameters
----------
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
country : COUNTRY_TP
COUNTRY
"""
route = Route('GET', '/browse/new-releases')
payload = {'limit': limit, 'offset': offset}
if country:
payload['country'] = country
return self.request(route, params=payload) | python | {
"resource": ""
} |
q270260 | HTTPClient.recommendations | test | def recommendations(self, seed_artists, seed_genres, seed_tracks, *, limit=20, market=None, **filters):
"""Get Recommendations Based on Seeds.
Parameters
----------
seed_artists : str
A comma separated list of Spotify IDs for seed artists. Up to 5 seed values may be provided.
seed_genres : str
A comma separated list of any genres in the set of available genre seeds. Up to 5 seed values may be provided.
seed_tracks : str
A comma separated list of Spotify IDs for a seed track. Up to 5 seed values may be provided.
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
max_* : Optional[Keyword arguments]
For each tunable track attribute, a hard ceiling on the selected track attribute’s value can be provided.
min_* : Optional[Keyword arguments]
For each tunable track attribute, a hard floor on the selected track attribute’s value can be provided.
target_* : Optional[Keyword arguments]
For each of the tunable track attributes (below) a target value may be provided.
"""
route = Route('GET', '/recommendations')
payload = {'seed_artists': seed_artists, 'seed_genres': seed_genres, 'seed_tracks': seed_tracks, 'limit': limit}
if market:
payload['market'] = market
if filters:
payload.update(filters)
return self.request(route, param=payload) | python | {
"resource": ""
} |
q270261 | HTTPClient.following_artists_or_users | test | def following_artists_or_users(self, ids, *, type='artist'):
"""Check to see if the current user is following one or more artists or other Spotify users.
Parameters
----------
ids : List[str]
A comma-separated list of the artist or the user Spotify IDs to check.
A maximum of 50 IDs can be sent in one request.
type : Optional[str]
The ID type: either "artist" or "user".
Default: "artist"
"""
route = Route('GET', '/me/following/contains')
payload = {'ids': ids, 'type': type}
return self.request(route, params=payload) | python | {
"resource": ""
} |
q270262 | Artist.get_albums | test | async def get_albums(self, *, limit: Optional[int] = 20, offset: Optional[int] = 0, include_groups=None, market: Optional[str] = None) -> List[Album]:
"""Get the albums of a Spotify artist.
Parameters
----------
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optiona[int]
The offset of which Spotify should start yielding from.
include_groups : INCLUDE_GROUPS_TP
INCLUDE_GROUPS
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
Returns
-------
albums : List[Album]
The albums of the artist.
"""
from .album import Album
data = await self.__client.http.artist_albums(self.id, limit=limit, offset=offset, include_groups=include_groups, market=market)
return list(Album(self.__client, item) for item in data['items']) | python | {
"resource": ""
} |
q270263 | Artist.get_all_albums | test | async def get_all_albums(self, *, market='US') -> List[Album]:
"""loads all of the artists albums, depending on how many the artist has this may be a long operation.
Parameters
----------
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
Returns
-------
albums : List[Album]
The albums of the artist.
"""
from .album import Album
albums = []
offset = 0
total = await self.total_albums(market=market)
while len(albums) < total:
data = await self.__client.http.artist_albums(self.id, limit=50, offset=offset, market=market)
offset += 50
albums += list(Album(self.__client, item) for item in data['items'])
return albums | python | {
"resource": ""
} |
q270264 | Artist.total_albums | test | async def total_albums(self, *, market: str = None) -> int:
"""get the total amout of tracks in the album.
Parameters
----------
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
Returns
-------
total : int
The total amount of albums.
"""
data = await self.__client.http.artist_albums(self.id, limit=1, offset=0, market=market)
return data['total'] | python | {
"resource": ""
} |
q270265 | Artist.related_artists | test | async def related_artists(self) -> List[Artist]:
"""Get Spotify catalog information about artists similar to a given artist.
Similarity is based on analysis of the Spotify community’s listening history.
Returns
-------
artists : List[Artits]
The artists deemed similar.
"""
related = await self.__client.http.artist_related_artists(self.id)
return list(Artist(self.__client, item) for item in related['artists']) | python | {
"resource": ""
} |
q270266 | User.currently_playing | test | async def currently_playing(self) -> Tuple[Context, Track]:
"""Get the users currently playing track.
Returns
-------
context, track : Tuple[Context, Track]
A tuple of the context and track.
"""
data = await self.http.currently_playing()
if data.get('item'):
data['Context'] = Context(data.get('context'))
data['item'] = Track(self.__client, data.get('item'))
return data | python | {
"resource": ""
} |
q270267 | User.get_player | test | async def get_player(self) -> Player:
"""Get information about the users current playback.
Returns
-------
player : Player
A player object representing the current playback.
"""
self._player = player = Player(self.__client, self, await self.http.current_player())
return player | python | {
"resource": ""
} |
q270268 | User.get_devices | test | async def get_devices(self) -> List[Device]:
"""Get information about the users avaliable devices.
Returns
-------
devices : List[Device]
The devices the user has available.
"""
data = await self.http.available_devices()
return [Device(item) for item in data['devices']] | python | {
"resource": ""
} |
q270269 | User.recently_played | test | async def recently_played(self) -> List[Dict[str, Union[Track, Context, str]]]:
"""Get tracks from the current users recently played tracks.
Returns
-------
playlist_history : List[Dict[str, Union[Track, Context, str]]]
A list of playlist history object.
Each object is a dict with a timestamp, track and context field.
"""
data = await self.http.recently_played()
f = lambda data: {'context': Context(data.get('context')), 'track': Track(self.__client, data.get('track'))}
# List[T] where T: {'track': Track, 'content': Context: 'timestamp': ISO8601}
return [{'timestamp': track['timestamp'], **f(track)} for track in data['items']] | python | {
"resource": ""
} |
q270270 | User.replace_tracks | test | async def replace_tracks(self, playlist, *tracks) -> str:
"""Replace all the tracks in a playlist, overwriting its existing tracks.
This powerful request can be useful for replacing tracks, re-ordering existing tracks, or clearing the playlist.
Parameters
----------
playlist : Union[str, PLaylist]
The playlist to modify
tracks : Sequence[Union[str, Track]]
Tracks to place in the playlist
"""
tracks = [str(track) for track in tracks]
await self.http.replace_playlist_tracks(self.id, str(playlist), tracks=','.join(tracks)) | python | {
"resource": ""
} |
q270271 | User.reorder_tracks | test | async def reorder_tracks(self, playlist, start, insert_before, length=1, *, snapshot_id=None):
"""Reorder a track or a group of tracks in a playlist.
Parameters
----------
playlist : Union[str, Playlist]
The playlist to modify
start : int
The position of the first track to be reordered.
insert_before : int
The position where the tracks should be inserted.
length : Optional[int]
The amount of tracks to be reordered. Defaults to 1 if not set.
snapshot_id : str
The playlist’s snapshot ID against which you want to make the changes.
Returns
-------
snapshot_id : str
The snapshot id of the playlist.
"""
data = await self.http.reorder_playlists_tracks(self.id, str(playlist), start, length, insert_before, snapshot_id=snapshot_id)
return data['snapshot_id'] | python | {
"resource": ""
} |
q270272 | User.create_playlist | test | async def create_playlist(self, name, *, public=True, collaborative=False, description=None):
"""Create a playlist for a Spotify user.
Parameters
----------
name : str
The name of the playlist.
public : Optional[bool]
The public/private status of the playlist.
`True` for public, `False` for private.
collaborative : Optional[bool]
If `True`, the playlist will become collaborative and other users will be able to modify the playlist.
description : Optional[str]
The playlist description
Returns
-------
playlist : Playlist
The playlist that was created.
"""
data = {
'name': name,
'public': public,
'collaborative': collaborative
}
if description:
data['description'] = description
playlist_data = await self.http.create_playlist(self.id, data)
return Playlist(self.__client, playlist_data) | python | {
"resource": ""
} |
q270273 | User.get_playlists | test | async def get_playlists(self, *, limit=20, offset=0):
"""get the users playlists from spotify.
Parameters
----------
limit : Optional[int]
The limit on how many playlists to retrieve for this user (default is 20).
offset : Optional[int]
The offset from where the api should start from in the playlists.
Returns
-------
playlists : List[Playlist]
A list of the users playlists.
"""
if hasattr(self, 'http'):
http = self.http
else:
http = self.__client.http
data = await http.get_playlists(self.id, limit=limit, offset=offset)
return [Playlist(self.__client, playlist_data) for playlist_data in data['items']] | python | {
"resource": ""
} |
q270274 | Album.get_tracks | test | async def get_tracks(self, *, limit: Optional[int] = 20, offset: Optional[int] = 0) -> List[Track]:
"""get the albums tracks from spotify.
Parameters
----------
limit : Optional[int]
The limit on how many tracks to retrieve for this album (default is 20).
offset : Optional[int]
The offset from where the api should start from in the tracks.
Returns
-------
tracks : List[Track]
The tracks of the artist.
"""
data = await self.__client.http.album_tracks(self.id, limit=limit, offset=offset)
return list(Track(self.__client, item) for item in data['items']) | python | {
"resource": ""
} |
q270275 | Album.get_all_tracks | test | async def get_all_tracks(self, *, market: Optional[str] = 'US') -> List[Track]:
"""loads all of the albums tracks, depending on how many the album has this may be a long operation.
Parameters
----------
market : Optional[str]
An ISO 3166-1 alpha-2 country code. Provide this parameter if you want to apply Track Relinking.
Returns
-------
tracks : List[Track]
The tracks of the artist.
"""
tracks = []
offset = 0
total = self.total_tracks or None
while True:
data = await self.__client.http.album_tracks(self.id, limit=50, offset=offset, market=market)
if total is None:
total = data['total']
offset += 50
tracks += list(Track(self.__client, item) for item in data['items'])
if len(tracks) >= total:
break
return tracks | python | {
"resource": ""
} |
q270276 | Client.oauth2_url | test | def oauth2_url(self, redirect_uri: str, scope: Optional[str] = None, state: Optional[str] = None) -> str:
"""Generate an outh2 url for user authentication.
Parameters
----------
redirect_uri : str
Where spotify should redirect the user to after authentication.
scope : Optional[str]
Space seperated spotify scopes for different levels of access.
state : Optional[str]
Using a state value can increase your assurance that an incoming connection is the result of an authentication request.
Returns
-------
url : str
The OAuth2 url.
"""
return OAuth2.url_(self.http.client_id, redirect_uri, scope=scope, state=state) | python | {
"resource": ""
} |
q270277 | Client.get_album | test | async def get_album(self, spotify_id: str, *, market: str = 'US') -> Album:
"""Retrive an album with a spotify ID.
Parameters
----------
spotify_id : str
The ID to search for.
market : Optional[str]
An ISO 3166-1 alpha-2 country code
Returns
-------
album : Album
The album from the ID
"""
data = await self.http.album(to_id(spotify_id), market=market)
return Album(self, data) | python | {
"resource": ""
} |
q270278 | Client.get_artist | test | async def get_artist(self, spotify_id: str) -> Artist:
"""Retrive an artist with a spotify ID.
Parameters
----------
spotify_id : str
The ID to search for.
Returns
-------
artist : Artist
The artist from the ID
"""
data = await self.http.artist(to_id(spotify_id))
return Artist(self, data) | python | {
"resource": ""
} |
q270279 | Client.get_track | test | async def get_track(self, spotify_id: str) -> Track:
"""Retrive an track with a spotify ID.
Parameters
----------
spotify_id : str
The ID to search for.
Returns
-------
track : Track
The track from the ID
"""
data = await self.http.track(to_id(spotify_id))
return Track(self, data) | python | {
"resource": ""
} |
q270280 | Client.get_user | test | async def get_user(self, spotify_id: str) -> User:
"""Retrive an user with a spotify ID.
Parameters
----------
spotify_id : str
The ID to search for.
Returns
-------
user : User
The user from the ID
"""
data = await self.http.user(to_id(spotify_id))
return User(self, data) | python | {
"resource": ""
} |
q270281 | Client.get_albums | test | async def get_albums(self, *ids: List[str], market: str = 'US') -> List[Album]:
"""Retrive multiple albums with a list of spotify IDs.
Parameters
----------
ids : List[str]
the ID to look for
market : Optional[str]
An ISO 3166-1 alpha-2 country code
Returns
-------
albums : List[Album]
The albums from the IDs
"""
data = await self.http.albums(','.join(to_id(_id) for _id in ids), market=market)
return list(Album(self, album) for album in data['albums']) | python | {
"resource": ""
} |
q270282 | Client.get_artists | test | async def get_artists(self, *ids: List[str]) -> List[Artist]:
"""Retrive multiple artists with a list of spotify IDs.
Parameters
----------
ids : List[str]
the IDs to look for
Returns
-------
artists : List[Artist]
The artists from the IDs
"""
data = await self.http.artists(','.join(to_id(_id) for _id in ids))
return list(Artist(self, artist) for artist in data['artists']) | python | {
"resource": ""
} |
q270283 | Client.search | test | async def search(self, q: str, *, types: Optional[Iterable[str]] = ['track', 'playlist', 'artist', 'album'], limit: Optional[int] = 20, offset: Optional[int] = 0, market: Optional[str] = None) -> Dict[str, List[Union[Track, Playlist, Artist, Album]]]:
"""Access the spotify search functionality.
Parameters
----------
q : str
the search query
types : Optional[Iterable[str]]
A sequence of search types (can be any of `track`, `playlist`, `artist` or `album`) to refine the search request.
A `ValueError` may be raised if a search type is found that is not valid.
limit : Optional[int]
The limit of search results to return when searching.
Maximum limit is 50, any larger may raise a :class:`HTTPException`
offset : Optional[int]
The offset from where the api should start from in the search results.
market : Optional[str]
An ISO 3166-1 alpha-2 country code. Provide this parameter if you want to apply Track Relinking.
Returns
-------
results : Dict[str, List[Union[Track, Playlist, Artist, Album]]]
The results of the search.
"""
if not hasattr(types, '__iter__'):
raise TypeError('types must be an iterable.')
elif not isinstance(types, list):
types = list(item for item in types)
types_ = set(types)
if not types_.issubset(_SEARCH_TYPES):
raise ValueError(_SEARCH_TYPE_ERR % types_.difference(_SEARCH_TYPES).pop())
kwargs = {
'q': q.replace(' ', '+'),
'queary_type': ','.join(tp.strip() for tp in types),
'market': market,
'limit': limit,
'offset': offset
}
data = await self.http.search(**kwargs)
return {key: [_TYPES[obj['type']](self, obj) for obj in value['items']] for key, value in data.items()} | python | {
"resource": ""
} |
q270284 | to_id | test | def to_id(string: str) -> str:
"""Get a spotify ID from a URI or open.spotify URL.
Paramters
---------
string : str
The string to operate on.
Returns
-------
id : str
The Spotify ID from the string.
"""
string = string.strip()
match = _URI_RE.match(string)
if match is None:
match = _OPEN_RE.match(string)
if match is None:
return string
else:
return match.group(2)
else:
return match.group(1) | python | {
"resource": ""
} |
q270285 | assert_hasattr | test | def assert_hasattr(attr: str, msg: str, tp: BaseException = SpotifyException) -> Callable:
"""decorator to assert an object has an attribute when run."""
def decorator(func: Callable) -> Callable:
@functools.wraps(func)
def decorated(self, *args, **kwargs):
if not hasattr(self, attr):
raise tp(msg)
return func(self, *args, **kwargs)
if inspect.iscoroutinefunction(func):
@functools.wraps(func)
async def decorated(*args, **kwargs):
return await decorated(*args, **kwargs)
return decorated
return decorator | python | {
"resource": ""
} |
q270286 | OAuth2.from_client | test | def from_client(cls, client, *args, **kwargs):
"""Construct a OAuth2 object from a `spotify.Client`."""
return cls(client.http.client_id, *args, **kwargs) | python | {
"resource": ""
} |
q270287 | OAuth2.url_ | test | def url_(client_id: str, redirect_uri: str, *, scope: str = None, state: str = None, secure: bool = True) -> str:
"""Construct a OAuth2 URL instead of an OAuth2 object."""
attrs = {
'client_id': client_id,
'redirect_uri': quote(redirect_uri)
}
if scope is not None:
attrs['scope'] = quote(scope)
if state is not None:
attrs['state'] = state
parameters = '&'.join('{0}={1}'.format(*item) for item in attrs.items())
return OAuth2._BASE.format(parameters=parameters) | python | {
"resource": ""
} |
q270288 | OAuth2.attrs | test | def attrs(self):
"""Attributes used when constructing url parameters."""
data = {
'client_id': self.client_id,
'redirect_uri': quote(self.redirect_uri),
}
if self.scope is not None:
data['scope'] = quote(self.scope)
if self.state is not None:
data['state'] = self.state
return data | python | {
"resource": ""
} |
q270289 | OAuth2.parameters | test | def parameters(self) -> str:
"""URL parameters used."""
return '&'.join('{0}={1}'.format(*item) for item in self.attrs.items()) | python | {
"resource": ""
} |
q270290 | PartialTracks.build | test | async def build(self):
"""get the track object for each link in the partial tracks data
Returns
-------
tracks : List[Track]
The tracks
"""
data = await self.__func()
return list(PlaylistTrack(self.__client, track) for track in data['items']) | python | {
"resource": ""
} |
q270291 | Playlist.get_all_tracks | test | async def get_all_tracks(self) -> List[PlaylistTrack]:
"""Get all playlist tracks from the playlist.
Returns
-------
tracks : List[PlaylistTrack]
The playlists tracks.
"""
if isinstance(self._tracks, PartialTracks):
return await self._tracks.build()
_tracks = []
offset = 0
while len(self.tracks) < self.total_tracks:
data = await self.__client.http.get_playlist_tracks(self.owner.id, self.id, limit=50, offset=offset)
_tracks += [PlaylistTrack(self.__client, item) for item in data['items']]
offset += 50
self.total_tracks = len(self._tracks)
return list(self._tracks) | python | {
"resource": ""
} |
q270292 | Player.resume | test | async def resume(self, *, device: Optional[SomeDevice] = None):
"""Resume playback on the user's account.
Parameters
----------
device : Optional[:obj:`SomeDevice`]
The Device object or id of the device this command is targeting.
If not supplied, the user’s currently active device is the target.
"""
await self._user.http.play_playback(None, device_id=str(device)) | python | {
"resource": ""
} |
q270293 | Player.transfer | test | async def transfer(self, device: SomeDevice, ensure_playback: bool = False):
"""Transfer playback to a new device and determine if it should start playing.
Parameters
----------
device : :obj:`SomeDevice`
The device on which playback should be started/transferred.
ensure_playback : bool
if `True` ensure playback happens on new device.
else keep the current playback state.
"""
await self._user.http.transfer_player(str(device), play=ensure_playback) | python | {
"resource": ""
} |
q270294 | SpotifyBase.from_href | test | async def from_href(self):
"""Get the full object from spotify with a `href` attribute."""
if not hasattr(self, 'href'):
raise TypeError('Spotify object has no `href` attribute, therefore cannot be retrived')
elif hasattr(self, 'http'):
return await self.http.request(('GET', self.href))
else:
cls = type(self)
try:
client = getattr(self, '_{0}__client'.format(cls.__name__))
except AttributeError:
raise TypeError('Spotify object has no way to access a HTTPClient.')
else:
http = client.http
data = await http.request(('GET', self.href))
return cls(client, data) | python | {
"resource": ""
} |
q270295 | ExpirationDate.get | test | def get(self): # pragma: no cover
"""
Execute the logic behind the meaning of ExpirationDate + return the matched status.
:return:
The status of the tested domain.
Can be one of the official status.
:rtype: str
"""
# We get the status of the domain validation.
domain_validation = self.checker.is_domain_valid()
# We get the status of the IPv4 validation.
ip_validation = self.checker.is_ip_valid()
if "current_test_data" in PyFunceble.INTERN:
# The end-user want more information whith his test.
# We update some index.
PyFunceble.INTERN["current_test_data"].update(
{
"domain_syntax_validation": domain_validation,
"ip4_syntax_validation": ip_validation,
}
)
if (
domain_validation
and not ip_validation
or domain_validation
or PyFunceble.CONFIGURATION["local"]
):
# * The element is a valid domain.
# and
# * The element is not ahe valid IPv4.
# or
# * The element is a valid domain.
# * We get the HTTP status code of the currently tested element.
# and
# * We try to get the element status from the IANA database.
PyFunceble.INTERN.update(
{"http_code": HTTPCode().get(), "referer": Referer().get()}
)
if not PyFunceble.INTERN["referer"]:
# We could not get the referer.
# We parse the referer status into the upstream call.
return PyFunceble.INTERN["referer"]
# The WHOIS record status is not into our list of official status.
if PyFunceble.INTERN["referer"] and not self.checker.is_subdomain():
# * The iana database comparison status is not None.
# and
# * The domain we are testing is not a subdomain.
# We try to extract the expiration date from the WHOIS record.
# And we return the matched status.
return self._extract()
# The iana database comparison status is None.
# We log our whois record if the debug mode is activated.
Logs().whois(self.whois_record)
# And we return None, we could not extract the expiration date.
return None
if (
ip_validation
and not domain_validation
or ip_validation
or PyFunceble.CONFIGURATION["local"]
):
# * The element is a valid IPv4.
# and
# * The element is not a valid domain.
# or
# * The element is a valid IPv4.
# We get the HTTP status code.
PyFunceble.INTERN["http_code"] = HTTPCode().get()
# We log our whois record if the debug mode is activated.
Logs().whois(self.whois_record)
# And we return None, there is no expiration date to look for.
return None
# The validation was not passed.
# We log our whois record if the debug mode is activated.
Logs().whois(self.whois_record)
# And we return False, the domain could not pass the IP and domains syntax validation.
return False | python | {
"resource": ""
} |
q270296 | ExpirationDate._convert_or_shorten_month | test | def _convert_or_shorten_month(cls, data):
"""
Convert a given month into our unified format.
:param data: The month to convert or shorten.
:type data: str
:return: The unified month name.
:rtype: str
"""
# We map the different month and their possible representation.
short_month = {
"jan": [str(1), "01", "Jan", "January"],
"feb": [str(2), "02", "Feb", "February"],
"mar": [str(3), "03", "Mar", "March"],
"apr": [str(4), "04", "Apr", "April"],
"may": [str(5), "05", "May"],
"jun": [str(6), "06", "Jun", "June"],
"jul": [str(7), "07", "Jul", "July"],
"aug": [str(8), "08", "Aug", "August"],
"sep": [str(9), "09", "Sep", "September"],
"oct": [str(10), "Oct", "October"],
"nov": [str(11), "Nov", "November"],
"dec": [str(12), "Dec", "December"],
}
for month in short_month:
# We loop through our map.
if data in short_month[month]:
# If the parsed data (or month if you prefer) is into our map.
# We return the element (or key if you prefer) assigned to
# the month.
return month
# The element is not into our map.
# We return the parsed element (or month if you prefer).
return data | python | {
"resource": ""
} |
q270297 | Production._update_code_urls | test | def _update_code_urls(self):
"""
Read the code and update all links.
"""
to_ignore = [".gitignore", ".keep"]
for root, _, files in PyFunceble.walk(
PyFunceble.CURRENT_DIRECTORY
+ PyFunceble.directory_separator
+ "PyFunceble"
+ PyFunceble.directory_separator
):
# We loop through every directories and files in the `PyFunceble` directory.
for file in files:
# We loop through the list of files of the currently read directory.
if file not in to_ignore and "__pycache__" not in root:
# * The filename is not into the list of file to ignore.
# and
# * The directory we are reading is not `__pycache__`.
if root.endswith(PyFunceble.directory_separator):
# The root directory ends with the directory separator.
# We fix the path in the currently read file.
self._update_docs(root + file)
else:
# The root directory does not ends with the directory separator.
# We fix the path in the currently read file.
# (after appending the directory separator between the root and file)
self._update_docs(root + PyFunceble.directory_separator + file)
for root, _, files in PyFunceble.walk(
PyFunceble.CURRENT_DIRECTORY
+ PyFunceble.directory_separator
+ "tests"
+ PyFunceble.directory_separator
):
# We loop through every directories and files in the `tests` directory.
for file in files:
# We loop through the list of files of the currently read directory.
if file not in to_ignore and "__pycache__" not in root:
# * The filename is not into the list of file to ignore.
# and
# * The directory we are reading is not `__pycache__`.
if root.endswith(PyFunceble.directory_separator):
# The root directory ends with the directory separator.
# We fix the path in the currently read file.
self._update_docs(root + file)
else:
# The root directory does not ends with the directory separator.
# We fix the path in the currently read file.
# (after appending the directory separator between the root and file)
self._update_docs(root + PyFunceble.directory_separator + file) | python | {
"resource": ""
} |
q270298 | Production._is_version_greater | test | def _is_version_greater(self):
"""
Check if the current version is greater as the older older one.
"""
# we compare the 2 versions.
checked = Version(True).check_versions(
self.current_version[0], self.version_yaml
)
if checked is not None and not checked:
# The current version is greater as the older one.
# We return True.
return True
# We return False
return False | python | {
"resource": ""
} |
q270299 | Production.is_dev_version | test | def is_dev_version(cls):
"""
Check if the current branch is `dev`.
"""
# We initiate the command we have to run in order to
# get the branch we are currently working with.
command = "git branch"
# We execute and get the command output.
command_result = Command(command).execute()
for branch in command_result.split("\n"):
# We loop through each line of the command output.
if branch.startswith("*") and "dev" in branch:
# The current branch is `dev`.
# We return True.
return True
# The current branch is not `dev`.
# We return False.
return False | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.