_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q270200 | feature_selection | test | def feature_selection(feat_select, X, y):
"""" Implements various kinds of feature selection """
# K-best
if re.match('.*-best', feat_select) is not None:
n = int(feat_select.split('-')[0])
selector = SelectKBest(k=n)
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=UserWarning)
features_selected = np.where(
| python | {
"resource": ""
} |
q270201 | get_studies_by_regions | test | def get_studies_by_regions(dataset, masks, threshold=0.08, remove_overlap=True,
studies=None, features=None,
regularization="scale"):
""" Set up data for a classification task given a set of masks
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features, and returns studies by feature matrix
(X) and class labels (y)
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
features: An optional list of feature names used to constrain the
| python | {
"resource": ""
} |
q270202 | get_feature_order | test | def get_feature_order(dataset, features):
""" Returns a list with the order that features requested appear in
dataset """
| python | {
"resource": ""
} |
q270203 | classify_regions | test | def classify_regions(dataset, masks, method='ERF', threshold=0.08,
remove_overlap=True, regularization='scale',
output='summary', studies=None, features=None,
class_weight='auto', classifier=None,
cross_val='4-Fold', param_grid=None, scoring='accuracy'):
""" Perform classification on specified regions
Given a set of masks, this function retrieves studies associated with
each mask at the specified threshold, optionally removes overlap and
filters by studies and features. Then it trains an algorithm to
classify studies based on features and tests performance.
Args:
dataset: a Neurosynth dataset
maks: a list of paths to Nifti masks
method: a string indicating which method to used.
'SVM': Support Vector Classifier with rbf kernel
'ERF': Extremely Randomized Forest classifier
'Dummy': A dummy classifier using stratified classes as
predictor
threshold: percentage of voxels active within the mask for study
to be included
remove_overlap: A boolean indicating if studies studies that
appear in more than one mask should be excluded
regularization: A string indicating type of regularization to use.
If None, performs no regularization.
'scale': Unit scale without demeaning
output: A string indicating output type
'summary': Dictionary with summary statistics including score
and n
'summary_clf': Same as above but also includes classifier
'clf': Only returns classifier
Warning: using cv without grid will return an untrained
classifier
studies: An optional list of study names used to constrain the set
used in classification. If None, will use all features in the
dataset.
| python | {
"resource": ""
} |
q270204 | classify | test | def classify(X, y, clf_method='ERF', classifier=None, output='summary_clf',
cross_val=None, class_weight=None, regularization=None,
param_grid=None, scoring='accuracy', refit_all=True,
feat_select=None):
""" Wrapper for scikit-learn classification functions
Imlements various types of classification and cross validation """
# Build classifier
clf = Classifier(clf_method, classifier, param_grid)
# Fit & test model with or without cross-validation
if cross_val is not None:
score = clf.cross_val_fit(X, y, cross_val, scoring=scoring,
| python | {
"resource": ""
} |
q270205 | Classifier.fit | test | def fit(self, X, y, cv=None, class_weight='auto'):
""" Fits X to outcomes y, using clf """
# Incorporate error checking such as :
# if isinstance(self.classifier, ScikitClassifier):
# do one thingNone
# otherwiseNone.
| python | {
"resource": ""
} |
q270206 | Classifier.set_class_weight | test | def set_class_weight(self, class_weight='auto', y=None):
""" Sets the class_weight of the classifier to match y """
if class_weight is None:
cw = None
try:
self.clf.set_params(class_weight=cw)
except ValueError:
pass
elif class_weight == 'auto':
c = np.bincount(y)
ii = np.nonzero(c)[0]
c = c / float(c.sum())
cw = dict(zip(ii[::-1], c[ii]))
| python | {
"resource": ""
} |
q270207 | Classifier.cross_val_fit | test | def cross_val_fit(self, X, y, cross_val='4-Fold', scoring='accuracy',
feat_select=None, class_weight='auto'):
""" Fits X to outcomes y, using clf and cv_method """
from sklearn import cross_validation
self.X = X
self.y = y
self.set_class_weight(class_weight=class_weight, y=y)
# Set cross validator
if isinstance(cross_val, string_types):
if re.match('.*-Fold', cross_val) is not None:
n = int(cross_val.split('-')[0])
self.cver = cross_validation.StratifiedKFold(self.y, n)
else:
raise Exception('Unrecognized cross validation method')
else:
self.cver = cross_val
if feat_select is not None:
self.features_selected = []
# Perform cross-validated classification
from sklearn.grid_search import GridSearchCV
if isinstance(self.clf, GridSearchCV):
import warnings
if feat_select is not None:
warnings.warn(
"Cross-validated feature selection not supported with "
| python | {
"resource": ""
} |
q270208 | Classifier.fit_dataset | test | def fit_dataset(self, dataset, y, features=None,
feature_type='features'):
""" Given a dataset, fits either features or voxels to y """
# Get data from dataset
if feature_type == 'features':
X = np.rot90(dataset.feature_table.data.toarray())
| python | {
"resource": ""
} |
q270209 | average_within_regions | test | def average_within_regions(dataset, regions, masker=None, threshold=None,
remove_zero=True):
""" Aggregates over all voxels within each ROI in the input image.
Takes a Dataset and a Nifti image that defines distinct regions, and
returns a numpy matrix of ROIs x mappables, where the value at each
ROI is the proportion of active voxels in that ROI. Each distinct ROI
must have a unique value in the image; non-contiguous voxels with the
same value will be assigned to the same ROI.
Args:
dataset: Either a Dataset instance from which image data are
extracted, or a Numpy array containing image data to use. If
the latter, the array contains voxels in rows and
features/studies in columns. The number of voxels must be equal
to the length of the vectorized image mask in the regions
image.
regions: An image defining the boundaries of the regions to use.
Can be one of:
1) A string name of the NIFTI or Analyze-format image
2) A NiBabel SpatialImage
3) A list of NiBabel images
4) A 1D numpy array of the same length as the mask vector in
the Dataset's current Masker.
masker: Optional masker used to load image if regions is not a
numpy array. Must be passed if dataset is a numpy array.
threshold: An optional float in the range of 0 - 1 or integer. If
passed, the array will be binarized, with ROI values above the
threshold assigned to True and values below the threshold
assigned to False. (E.g., if threshold = 0.05, only ROIs in
which more than 5% of voxels are active will be considered
active.) If threshold is integer, studies will only be
considered active if they activate more than that number of
voxels in the ROI.
remove_zero: An optional boolean; when True, assume that voxels
with value of 0 should not be considered as a separate ROI, and
will be ignored.
Returns:
A 2D numpy array with ROIs in rows and mappables in columns.
"""
if masker is not None:
masker = masker
else:
if isinstance(dataset, Dataset):
masker = dataset.masker
else:
if not type(regions).__module__.startswith('numpy'):
| python | {
"resource": ""
} |
q270210 | get_random_voxels | test | def get_random_voxels(dataset, n_voxels):
""" Returns mappable data for a random subset of voxels.
May be useful as a baseline in predictive analyses--e.g., to compare
performance of a more principled feature selection method with simple
random selection.
Args:
dataset: A Dataset instance
n_voxels: An integer specifying the number of random voxels to select.
| python | {
"resource": ""
} |
q270211 | _get_top_words | test | def _get_top_words(model, feature_names, n_top_words=40):
""" Return top forty words from each topic in trained topic model.
"""
topic_words = []
for topic in model.components_:
| python | {
"resource": ""
} |
q270212 | pearson | test | def pearson(x, y):
""" Correlates row vector x with each row vector in 2D array y. """
data = np.vstack((x, y))
ms = data.mean(axis=1)[(slice(None, None, None), None)]
datam = data - ms
datass = | python | {
"resource": ""
} |
q270213 | fdr | test | def fdr(p, q=.05):
""" Determine FDR threshold given a p value array and desired false
discovery rate q. """
s = np.sort(p)
nvox = p.shape[0]
null = np.array(range(1, nvox + | python | {
"resource": ""
} |
q270214 | Dataset._load_activations | test | def _load_activations(self, filename):
""" Load activation data from a text file.
Args:
filename (str): a string pointing to the location of the txt file
to read from.
"""
logger.info("Loading activation data from %s..." % filename)
activations = pd.read_csv(filename, sep='\t')
activations.columns = [col.lower()
for col in list(activations.columns)]
# Make sure all mandatory columns exist
mc = ['x', 'y', 'z', 'id', 'space']
| python | {
"resource": ""
} |
q270215 | Dataset.create_image_table | test | def create_image_table(self, r=None):
""" Create and store a new ImageTable instance based on the current
Dataset. Will generally be called privately, but may be useful as a
convenience method in cases where the user wants to re-generate the
table with a new smoothing kernel of | python | {
"resource": ""
} |
q270216 | Dataset.get_studies | test | def get_studies(self, features=None, expression=None, mask=None,
peaks=None, frequency_threshold=0.001,
activation_threshold=0.0, func=np.sum, return_type='ids',
r=6
):
""" Get IDs or data for studies that meet specific criteria.
If multiple criteria are passed, the set intersection is returned. For
example, passing expression='emotion' and mask='my_mask.nii.gz' would
return only those studies that are associated with emotion AND report
activation within the voxels indicated in the passed image.
Args:
ids (list): A list of IDs of studies to retrieve.
features (list or str): The name of a feature, or a list of
features, to use for selecting studies.
expression (str): A string expression to pass to the PEG for study
retrieval.
mask: the mask image (see Masker documentation for valid data
types).
peaks (ndarray or list): Either an n x 3 numpy array, or a list of
lists or tuples (e.g., [(-10, 22, 14)]) specifying the world
(x/y/z) coordinates of the target location(s).
frequency_threshold (float): For feature-based or expression-based
selection, the threshold for selecting studies--i.e., the
cut-off for a study to be included. Must be a float in range
[0, 1].
activation_threshold (int or float): For mask-based selection,
threshold for a study to be included based on amount of
activation displayed. If an integer, represents the absolute
number of voxels that must be active within the mask in order
for a study to be selected. If a float, it represents the
proportion of voxels that must be active.
func (Callable): The function to use when aggregating over the list
of features. See documentation in FeatureTable.get_ids() for a
full explanation. Only used for feature- or expression-based
selection.
return_type (str): A string specifying what data to return. Valid
options are:
'ids': returns a list of IDs of selected studies.
'images': returns a voxel x study matrix of data for all
selected studies.
'weights': returns a dict where the keys are study IDs and the
values are the computed weights. Only valid when performing
feature-based selection.
r (int): For peak-based selection, the distance cut-off (in mm)
for inclusion (i.e., only studies with one or more activations
within r mm of one of the passed foci will be returned).
Returns:
When return_type is 'ids' (default), returns a list of IDs of the
selected studies. When return_type is 'data', returns a 2D numpy
array, with voxels in rows and studies in columns. When return_type
is 'weights' (valid only for expression-based selection), returns
a dict, where the keys are study IDs, and the values are the
computed weights.
Examples
--------
Select all studies tagged with the feature 'emotion':
>>> ids = dataset.get_studies(features='emotion')
Select all studies that activate at least 20% of voxels in an amygdala
mask, and retrieve activation data rather than IDs:
>>> data = dataset.get_studies(mask='amygdala_mask.nii.gz',
threshold=0.2, return_type='images')
Select studies that report at least one activation within 12 mm of at
least one of three specific foci:
>>> ids = dataset.get_studies(peaks=[[12, -20, 30], [-26, 22, 22],
[0, 36, -20]], r=12)
"""
results = []
# Feature-based selection
if features is not None:
# Need to handle weights as a special case, because we can't
# retrieve the weights later using just the IDs.
if return_type == 'weights':
| python | {
"resource": ""
} |
q270217 | Dataset.add_features | test | def add_features(self, features, append=True, merge='outer',
duplicates='ignore', min_studies=0.0, threshold=0.001):
""" Construct a new FeatureTable from file.
Args:
features: Feature data to add. Can be:
(a) A text file containing the feature data, where each row is
a study in the database, with features in columns. The first
column must contain the IDs of the studies to match up with the
image data.
| python | {
"resource": ""
} |
q270218 | Dataset.get_feature_names | test | def get_feature_names(self, features=None):
""" Returns names of features. If features is None, returns all
features. Otherwise assumes the user is trying to find the order of the
features. """
if features:
| python | {
"resource": ""
} |
q270219 | Dataset.get_feature_counts | test | def get_feature_counts(self, threshold=0.001):
""" Returns a dictionary, where the keys are the feature names
and the values are the number of studies tagged with the feature. """
| python | {
"resource": ""
} |
q270220 | Dataset.load | test | def load(cls, filename):
""" Load a pickled Dataset instance from file. """
try:
dataset = pickle.load(open(filename, 'rb'))
except UnicodeDecodeError:
| python | {
"resource": ""
} |
q270221 | Dataset.save | test | def save(self, filename):
""" Pickle the Dataset instance to the provided file.
"""
if hasattr(self, 'feature_table'):
self.feature_table._sdf_to_csr()
| python | {
"resource": ""
} |
q270222 | ImageTable.get_image_data | test | def get_image_data(self, ids=None, voxels=None, dense=True):
""" Slices and returns a subset of image data.
Args:
ids (list, array): A list or 1D numpy array of study ids to
return. If None, returns data for all studies.
voxels (list, array): A list or 1D numpy array of voxel indices
(i.e., rows) to return. If None, returns data for all voxels.
dense (bool): Optional boolean. When True (default), convert the
result to a dense array before returning. When False, keep as
sparse matrix.
Returns:
A 2D numpy array with voxels in rows and studies in columns.
"""
if dense and ids is None and voxels is None:
logger.warning(
"Warning: get_image_data() is being called without specifying "
"a subset of studies or voxels to retrieve. This may result in"
" a very large amount of data (several GB) being read into "
| python | {
"resource": ""
} |
q270223 | FeatureTable.get_feature_data | test | def get_feature_data(self, ids=None, features=None, dense=True):
""" Slices and returns a subset of feature data.
Args:
ids (list, array): A list or 1D numpy array of study ids to
return rows for. If None, returns data for all studies
(i.e., all rows in array).
features (list, array): A list or 1D numpy array of named features
to return. If None, returns data for all features (i.e., all
columns in array).
dense (bool): Optional boolean. When True (default), convert the
result to a dense array before returning. When False, keep as
| python | {
"resource": ""
} |
q270224 | FeatureTable.get_ordered_names | test | def get_ordered_names(self, features):
""" Given a list of features, returns features in order that they
appear in database.
Args:
features (list): A list or 1D numpy array of named features to
return.
Returns:
| python | {
"resource": ""
} |
q270225 | FeatureTable.get_ids | test | def get_ids(self, features, threshold=0.0, func=np.sum, get_weights=False):
""" Returns a list of all studies in the table that meet the desired
feature-based criteria.
Will most commonly be used to retrieve studies that use one or more
features with some minimum frequency; e.g.,:
get_ids(['fear', 'anxiety'], threshold=0.001)
Args:
features (lists): a list of feature names to search on.
threshold (float): optional float indicating threshold features
must pass to be included.
func (Callable): any numpy function to use for thresholding
(default: sum). The function will be applied to the list of
features and the result compared to the threshold. This can be
used to change the meaning of the query in powerful ways. E.g,:
max: any of the features have to pass threshold
(i.e., max > thresh)
min: all features must each individually pass threshold
(i.e., min > thresh)
| python | {
"resource": ""
} |
q270226 | FeatureTable.search_features | test | def search_features(self, search):
''' Returns all features that match any of the elements in the input
list.
Args:
search (str, list): A string or list of strings defining the query.
Returns:
| python | {
"resource": ""
} |
q270227 | FeatureTable.get_ids_by_expression | test | def get_ids_by_expression(self, expression, threshold=0.001, func=np.sum):
""" Use a PEG to parse expression and return study IDs."""
lexer = lp.Lexer()
lexer.build()
parser = lp.Parser(
| python | {
"resource": ""
} |
q270228 | FeatureTable._sdf_to_csr | test | def _sdf_to_csr(self):
""" Convert FeatureTable to SciPy CSR matrix. """
data = self.data.to_dense()
self.data = {
'columns': list(data.columns),
| python | {
"resource": ""
} |
q270229 | deprecated | test | def deprecated(*args):
""" Deprecation warning decorator. Takes optional deprecation message,
otherwise will use a generic warning. """
def wrap(func):
def wrapped_func(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return func(*args, **kwargs)
return wrapped_func
if len(args) == 1 and | python | {
"resource": ""
} |
q270230 | transform | test | def transform(foci, mat):
""" Convert coordinates from one space to another using provided
| python | {
"resource": ""
} |
q270231 | xyz_to_mat | test | def xyz_to_mat(foci, xyz_dims=None, mat_dims=None):
""" Convert an N x 3 array of XYZ coordinates to matrix indices. """
foci = np.hstack((foci, np.ones((foci.shape[0], 1))))
mat = np.array([[-0.5, 0, 0, 45], [0, 0.5, 0, 63], [0, 0, | python | {
"resource": ""
} |
q270232 | Transformer.apply | test | def apply(self, name, foci):
""" Apply a named transformation to a set of foci.
If the named transformation doesn't exist, return foci untransformed.
"""
if name in self.transformations:
return transform(foci, self.transformations[name])
else:
| python | {
"resource": ""
} |
q270233 | Masker.mask | test | def mask(self, image, nan_to_num=True, layers=None, in_global_mask=False):
""" Vectorize an image and mask out all invalid voxels.
Args:
images: The image to vectorize and mask. Input can be any object
handled by get_image().
layers: Which mask layers to use (specified as int, string, or
list of ints and strings). When None, applies the conjunction
of all layers.
nan_to_num: boolean indicating whether to convert NaNs to 0.
| python | {
"resource": ""
} |
q270234 | Masker.get_mask | test | def get_mask(self, layers=None, output='vector', in_global_mask=True):
""" Set the current mask by taking the conjunction of all specified
layers.
Args:
layers: Which layers to include. See documentation for add() for
format.
include_global_mask: Whether or not to automatically include the
global mask (i.e., self.volume) in the conjunction.
"""
if | python | {
"resource": ""
} |
q270235 | load_imgs | test | def load_imgs(filenames, masker, nan_to_num=True):
""" Load multiple images from file into an ndarray.
Args:
filenames: A single filename or list of filenames pointing to valid
images.
masker: A Masker instance.
nan_to_num: Optional boolean indicating whether to convert NaNs to zero.
Returns:
An m x n 2D numpy array, where m = number of voxels in mask and
| python | {
"resource": ""
} |
q270236 | save_img | test | def save_img(data, filename, masker, header=None):
""" Save a vectorized image to file. """
if not header:
header = masker.get_header()
header.set_data_dtype(data.dtype) # Avoids loss of precision
# Update min/max -- this should happen on | python | {
"resource": ""
} |
q270237 | set_logging_level | test | def set_logging_level(level=None):
"""Set neurosynth's logging level
Args
level : str
Name of the logging level (warning, error, info, etc) known
to logging module. If no level provided, it would get that one
from environment variable NEUROSYNTH_LOGLEVEL
"""
if level is None:
| python | {
"resource": ""
} |
q270238 | expand_address | test | def expand_address(address, languages=None, **kw):
"""
Expand the given address into one or more normalized strings.
Required
--------
@param address: the address as either Unicode or a UTF-8 encoded string
Options
-------
@param languages: a tuple or list of ISO language code strings (e.g. "en", "fr", "de", etc.)
to use in expansion. If None is passed, use language classifier
to detect language automatically.
@param address_components: an integer (bit-set) of address component expansions
to use e.g. ADDRESS_NAME | ADDRESS_STREET would use
only expansions which apply to venue names or streets.
@param latin_ascii: use the Latin to ASCII transliterator, which normalizes e.g. æ => ae
@param transliterate: use any available transliterators for non-Latin scripts, e.g.
for the Greek phrase διαφορετικούς becomes diaphoretikoús̱
@param strip_accents: strip accented characters e.g. é => e, ç => c. This loses some
information in various languags, but in general we want
@param decompose: perform Unicode normalization (NFD form)
@param lowercase: UTF-8 lowercase the string
@param trim_string: trim spaces on either side of the string
@param replace_word_hyphens: add version of the string replacing hyphens with space
@param delete_word_hyphens: add version of the string with hyphens deleted
@param replace_numeric_hyphens: add version of the string with numeric hyphens replaced
e.g. 12345-6789 => 12345 6789
@param delete_numeric_hyphens: add version of the string with numeric hyphens removed
| python | {
"resource": ""
} |
q270239 | normalized_tokens | test | def normalized_tokens(s, string_options=DEFAULT_STRING_OPTIONS,
token_options=DEFAULT_TOKEN_OPTIONS,
strip_parentheticals=True, whitespace=False,
languages=None):
'''
Normalizes a string, tokenizes, and normalizes each token
with string and token-level options.
This version only uses libpostal's deterministic normalizations
i.e. methods with a single output. The string tree version will
return multiple normalized strings, each with tokens.
Usage:
| python | {
"resource": ""
} |
q270240 | parse_address | test | def parse_address(address, language=None, country=None):
"""
Parse address into components.
@param address: the address as either Unicode or a UTF-8 encoded string
@param language (optional): language code
@param country (optional): | python | {
"resource": ""
} |
q270241 | near_dupe_hashes | test | def near_dupe_hashes(labels, values, languages=None, **kw):
"""
Hash the given address into normalized strings that can be used to group similar
addresses together for more detailed pairwise comparison. This can be thought of
as the blocking function in record linkage or locally-sensitive hashing in the
document near-duplicate detection.
Required
--------
@param labels: array of component labels as either Unicode or UTF-8 encoded strings
e.g. ["house_number", "road", "postcode"]
@param values: array of component values as either Unicode or UTF-8 encoded strings
e.g. ["123", "Broadway", "11216"]. Note len(values) must be equal to
len(labels).
Options
-------
@param languages: a tuple or list of ISO language code strings (e.g. "en", "fr", "de", etc.)
to use in expansion. If None is passed, use language classifier
to detect language automatically.
@param with_name: use name in the hashes
@param with_address: use house_number & street in the hashes
@param with_unit: use secondary unit as part of the hashes
@param with_city_or_equivalent: use the city, city_district, suburb, or island name as one of
| python | {
"resource": ""
} |
q270242 | dict_to_object | test | def dict_to_object(item, object_name):
"""Converts a python dict to a namedtuple, saving memory."""
fields = item.keys()
values = item.values()
return json.loads(json.dumps(item),
| python | {
"resource": ""
} |
q270243 | TiingoClient.get_ticker_price | test | def get_ticker_price(self, ticker,
startDate=None, endDate=None,
fmt='json', frequency='daily'):
"""By default, return latest EOD Composite Price for a stock ticker.
On average, each feed contains 3 data sources.
Supported tickers + Available Day Ranges are here:
https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip
Args:
ticker (string): Unique identifier for stock ticker
startDate (string): Start of ticker range in YYYY-MM-DD format
endDate (string): End of ticker range in YYYY-MM-DD format
fmt (string): 'csv' or 'json'
frequency (string): Resample frequency
"""
url = self._get_url(ticker, frequency)
params = {
'format': fmt if fmt != "object" else 'json', # conversion local
'resampleFreq': frequency
}
if startDate:
| python | {
"resource": ""
} |
q270244 | TiingoClient.get_dataframe | test | def get_dataframe(self, tickers,
startDate=None, endDate=None, metric_name=None, frequency='daily'):
""" Return a pandas.DataFrame of historical prices for one or more ticker symbols.
By default, return latest EOD Composite Price for a list of stock tickers.
On average, each feed contains 3 data sources.
Supported tickers + Available Day Ranges are here:
https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip
or from the TiingoClient.list_tickers() method.
Args:
tickers (string/list): One or more unique identifiers for a stock ticker.
startDate (string): Start of ticker range in YYYY-MM-DD format.
endDate (string): End of ticker range in YYYY-MM-DD format.
metric_name (string): Optional parameter specifying metric to be returned for each
ticker. In the event of a single ticker, this is optional and if not specified
all of the available data will be returned. In the event of a list of tickers,
this parameter is required.
frequency (string): Resample frequency (defaults to daily).
"""
valid_columns = ['open', 'high', 'low', 'close', 'volume', 'adjOpen', 'adjHigh', 'adjLow',
'adjClose', 'adjVolume', 'divCash', 'splitFactor']
if metric_name is not None and metric_name not in valid_columns:
raise APIColumnNameError('Valid data items are: ' + str(valid_columns))
params = {
'format': 'json',
'resampleFreq': frequency
}
if startDate:
params['startDate'] = startDate
if endDate:
params['endDate'] = endDate
if pandas_is_installed:
if type(tickers) is str:
stock = tickers
url = self._get_url(stock, frequency)
response = self._request('GET', url, params=params)
df = pd.DataFrame(response.json())
if metric_name is not None:
| python | {
"resource": ""
} |
q270245 | TiingoClient.get_bulk_news | test | def get_bulk_news(self, file_id=None, fmt='json'):
"""Only available to institutional clients.
If ID is NOT provided, return array of available file_ids.
If ID is provided, provides URL which you can use to download your
file, as well as some metadata | python | {
"resource": ""
} |
q270246 | RestClient._request | test | def _request(self, method, url, **kwargs):
"""Make HTTP request and return response object
Args:
method (str): GET, POST, PUT, DELETE
url (str): path appended to the base_url to create request
**kwargs: passed directly to a requests.request object
"""
resp = self._session.request(method,
'{}/{}'.format(self._base_url, url),
| python | {
"resource": ""
} |
q270247 | HTTPClient.get_bearer_info | test | async def get_bearer_info(self):
"""Get the application bearer token from client_id and client_secret."""
if self.client_id is None:
raise SpotifyException(_GET_BEARER_ERR % 'client_id')
elif self.client_secret is None:
raise SpotifyException(_GET_BEARER_ERR % 'client_secret')
token = b64encode(':'.join((self.client_id, self.client_secret)).encode())
kwargs = {
| python | {
"resource": ""
} |
q270248 | HTTPClient.request | test | async def request(self, route, **kwargs):
"""Make a request to the spotify API with the current bearer credentials.
Parameters
----------
route : Union[tuple[str, str], Route]
A tuple of the method and url or a :class:`Route` object.
kwargs : Any
keyword arguments to pass into :class:`aiohttp.ClientSession.request`
"""
if isinstance(route, tuple):
method, url = route
else:
method = route.method
url = route.url
if self.bearer_info is None:
self.bearer_info = bearer_info = await self.get_bearer_info()
access_token = bearer_info['access_token']
else:
access_token = self.bearer_info['access_token']
headers = {
'Authorization': 'Bearer ' + access_token,
'Content-Type': kwargs.get('content_type', 'application/json'),
**kwargs.pop('headers', {})
| python | {
"resource": ""
} |
q270249 | HTTPClient.album_tracks | test | def album_tracks(self, spotify_id, limit=20, offset=0, market='US'):
"""Get an albums tracks by an ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optiona[int]
The offset of which Spotify should start yielding from.
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
| python | {
"resource": ""
} |
q270250 | HTTPClient.artist | test | def artist(self, spotify_id):
"""Get a spotify artist by their ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
"""
| python | {
"resource": ""
} |
q270251 | HTTPClient.artist_albums | test | def artist_albums(self, spotify_id, include_groups=None, limit=20, offset=0, market='US'):
"""Get an artists tracks by their ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
include_groups : INCLUDE_GROUPS_TP
INCLUDE_GROUPS
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optiona[int]
The offset of which Spotify should start yielding from.
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
| python | {
"resource": ""
} |
q270252 | HTTPClient.artist_top_tracks | test | def artist_top_tracks(self, spotify_id, country):
"""Get an artists top tracks per country with their ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
country : COUNTRY_TP
COUNTRY | python | {
"resource": ""
} |
q270253 | HTTPClient.artist_related_artists | test | def artist_related_artists(self, spotify_id):
"""Get related artists for an artist by their ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
| python | {
"resource": ""
} |
q270254 | HTTPClient.artists | test | def artists(self, spotify_ids):
"""Get a spotify artists by their IDs.
Parameters
----------
spotify_id : List[str]
The spotify_ids to search with.
"""
| python | {
"resource": ""
} |
q270255 | HTTPClient.category | test | def category(self, category_id, country=None, locale=None):
"""Get a single category used to tag items in Spotify.
Parameters
----------
category_id : str
The Spotify category ID for the category.
country : COUNTRY_TP
| python | {
"resource": ""
} |
q270256 | HTTPClient.category_playlists | test | def category_playlists(self, category_id, limit=20, offset=0, country=None):
"""Get a list of Spotify playlists tagged with a particular category.
Parameters
----------
category_id : str
The Spotify category ID for the category.
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
country : COUNTRY_TP
COUNTRY
| python | {
"resource": ""
} |
q270257 | HTTPClient.categories | test | def categories(self, limit=20, offset=0, country=None, locale=None):
"""Get a list of categories used to tag items in Spotify.
Parameters
----------
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
country : COUNTRY_TP
COUNTRY
locale : LOCALE_TP
LOCALE
| python | {
"resource": ""
} |
q270258 | HTTPClient.featured_playlists | test | def featured_playlists(self, locale=None, country=None, timestamp=None, limit=20, offset=0):
"""Get a list of Spotify featured playlists.
Parameters
----------
locale : LOCALE_TP
LOCALE
country : COUNTRY_TP
COUNTRY
timestamp : TIMESTAMP_TP
TIMESTAMP
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
"""
| python | {
"resource": ""
} |
q270259 | HTTPClient.new_releases | test | def new_releases(self, *, country=None, limit=20, offset=0):
"""Get a list of new album releases featured in Spotify.
Parameters
----------
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
country : COUNTRY_TP
COUNTRY
"""
| python | {
"resource": ""
} |
q270260 | HTTPClient.recommendations | test | def recommendations(self, seed_artists, seed_genres, seed_tracks, *, limit=20, market=None, **filters):
"""Get Recommendations Based on Seeds.
Parameters
----------
seed_artists : str
A comma separated list of Spotify IDs for seed artists. Up to 5 seed values may be provided.
seed_genres : str
A comma separated list of any genres in the set of available genre seeds. Up to 5 seed values may be provided.
seed_tracks : str
A comma separated list of Spotify IDs for a seed track. Up to 5 seed values may be provided.
limit : Optional[int]
The maximum number of items to return. Default: 20. | python | {
"resource": ""
} |
q270261 | HTTPClient.following_artists_or_users | test | def following_artists_or_users(self, ids, *, type='artist'):
"""Check to see if the current user is following one or more artists or other Spotify users.
Parameters
----------
ids : List[str]
A comma-separated list of the artist or | python | {
"resource": ""
} |
q270262 | Artist.get_albums | test | async def get_albums(self, *, limit: Optional[int] = 20, offset: Optional[int] = 0, include_groups=None, market: Optional[str] = None) -> List[Album]:
"""Get the albums of a Spotify artist.
Parameters
----------
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optiona[int]
The offset of which Spotify should start yielding from.
include_groups : INCLUDE_GROUPS_TP
INCLUDE_GROUPS
market : Optional[str]
An ISO 3166-1 alpha-2 country | python | {
"resource": ""
} |
q270263 | Artist.get_all_albums | test | async def get_all_albums(self, *, market='US') -> List[Album]:
"""loads all of the artists albums, depending on how many the artist has this may be a long operation.
Parameters
----------
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
Returns
-------
albums : List[Album]
The albums of the artist.
"""
from .album import Album
albums = []
offset = 0
| python | {
"resource": ""
} |
q270264 | Artist.total_albums | test | async def total_albums(self, *, market: str = None) -> int:
"""get the total amout of tracks in the album.
Parameters
----------
market : Optional[str]
| python | {
"resource": ""
} |
q270265 | Artist.related_artists | test | async def related_artists(self) -> List[Artist]:
"""Get Spotify catalog information about artists similar to a given artist.
Similarity is based on analysis of the Spotify community’s listening history.
| python | {
"resource": ""
} |
q270266 | User.currently_playing | test | async def currently_playing(self) -> Tuple[Context, Track]:
"""Get the users currently playing track.
Returns
-------
context, track : Tuple[Context, Track]
A tuple of the context and track.
"""
| python | {
"resource": ""
} |
q270267 | User.get_player | test | async def get_player(self) -> Player:
"""Get information about the users current playback.
Returns
-------
player : Player
A player object representing the current playback.
"""
| python | {
"resource": ""
} |
q270268 | User.get_devices | test | async def get_devices(self) -> List[Device]:
"""Get information about the users avaliable devices.
Returns
-------
devices : List[Device]
The devices the user has available.
| python | {
"resource": ""
} |
q270269 | User.recently_played | test | async def recently_played(self) -> List[Dict[str, Union[Track, Context, str]]]:
"""Get tracks from the current users recently played tracks.
Returns
-------
playlist_history : List[Dict[str, Union[Track, Context, str]]]
A list of playlist history object.
Each object is a dict with a timestamp, track and context field.
"""
| python | {
"resource": ""
} |
q270270 | User.replace_tracks | test | async def replace_tracks(self, playlist, *tracks) -> str:
"""Replace all the tracks in a playlist, overwriting its existing tracks.
This powerful request can be useful for replacing tracks, re-ordering existing tracks, or clearing the playlist.
| python | {
"resource": ""
} |
q270271 | User.reorder_tracks | test | async def reorder_tracks(self, playlist, start, insert_before, length=1, *, snapshot_id=None):
"""Reorder a track or a group of tracks in a playlist.
Parameters
----------
playlist : Union[str, Playlist]
The playlist to modify
start : int
The position of the first track to be reordered.
insert_before : int
The position where the tracks should be inserted.
length : Optional[int]
The amount of tracks to be reordered. Defaults to 1 if not set.
snapshot_id : str
| python | {
"resource": ""
} |
q270272 | User.create_playlist | test | async def create_playlist(self, name, *, public=True, collaborative=False, description=None):
"""Create a playlist for a Spotify user.
Parameters
----------
name : str
The name of the playlist.
public : Optional[bool]
The public/private status of the playlist.
`True` for public, `False` for private.
collaborative : Optional[bool]
If `True`, the playlist will become collaborative and other users will be able to modify the playlist.
description : Optional[str]
The playlist description
Returns
------- | python | {
"resource": ""
} |
q270273 | User.get_playlists | test | async def get_playlists(self, *, limit=20, offset=0):
"""get the users playlists from spotify.
Parameters
----------
limit : Optional[int]
The limit on how many playlists to retrieve for this user (default is 20).
offset : Optional[int]
The offset from where the api should start from in the playlists.
Returns
-------
playlists : List[Playlist]
A list of the users playlists.
"""
| python | {
"resource": ""
} |
q270274 | Album.get_tracks | test | async def get_tracks(self, *, limit: Optional[int] = 20, offset: Optional[int] = 0) -> List[Track]:
"""get the albums tracks from spotify.
Parameters
----------
limit : Optional[int]
The limit on how many tracks to retrieve for this album (default is 20).
offset : Optional[int]
The offset from where the api should start from in the tracks.
Returns
-------
| python | {
"resource": ""
} |
q270275 | Album.get_all_tracks | test | async def get_all_tracks(self, *, market: Optional[str] = 'US') -> List[Track]:
"""loads all of the albums tracks, depending on how many the album has this may be a long operation.
Parameters
----------
market : Optional[str]
An ISO 3166-1 alpha-2 country code. Provide this parameter if you want to apply Track Relinking.
Returns
-------
tracks : List[Track]
The tracks of the artist.
"""
tracks = []
offset = 0
| python | {
"resource": ""
} |
q270276 | Client.oauth2_url | test | def oauth2_url(self, redirect_uri: str, scope: Optional[str] = None, state: Optional[str] = None) -> str:
"""Generate an outh2 url for user authentication.
Parameters
----------
redirect_uri : str
Where spotify should redirect the user to after authentication.
| python | {
"resource": ""
} |
q270277 | Client.get_album | test | async def get_album(self, spotify_id: str, *, market: str = 'US') -> Album:
"""Retrive an album with a spotify ID.
Parameters
----------
spotify_id : str
The ID to search for.
market : Optional[str]
An ISO 3166-1 alpha-2 country code
| python | {
"resource": ""
} |
q270278 | Client.get_artist | test | async def get_artist(self, spotify_id: str) -> Artist:
"""Retrive an artist with a spotify ID.
Parameters
----------
spotify_id : str
The ID to search for.
Returns
| python | {
"resource": ""
} |
q270279 | Client.get_track | test | async def get_track(self, spotify_id: str) -> Track:
"""Retrive an track with a spotify ID.
Parameters
----------
spotify_id : str
The ID to search for.
Returns
| python | {
"resource": ""
} |
q270280 | Client.get_user | test | async def get_user(self, spotify_id: str) -> User:
"""Retrive an user with a spotify ID.
Parameters
----------
spotify_id : str
The ID to search for.
Returns
| python | {
"resource": ""
} |
q270281 | Client.get_albums | test | async def get_albums(self, *ids: List[str], market: str = 'US') -> List[Album]:
"""Retrive multiple albums with a list of spotify IDs.
Parameters
----------
ids : List[str]
the ID to look for
market : Optional[str]
An ISO 3166-1 alpha-2 country code
Returns
-------
albums : List[Album]
The | python | {
"resource": ""
} |
q270282 | Client.get_artists | test | async def get_artists(self, *ids: List[str]) -> List[Artist]:
"""Retrive multiple artists with a list of spotify IDs.
Parameters
----------
ids : List[str]
| python | {
"resource": ""
} |
q270283 | Client.search | test | async def search(self, q: str, *, types: Optional[Iterable[str]] = ['track', 'playlist', 'artist', 'album'], limit: Optional[int] = 20, offset: Optional[int] = 0, market: Optional[str] = None) -> Dict[str, List[Union[Track, Playlist, Artist, Album]]]:
"""Access the spotify search functionality.
Parameters
----------
q : str
the search query
types : Optional[Iterable[str]]
A sequence of search types (can be any of `track`, `playlist`, `artist` or `album`) to refine the search request.
A `ValueError` may be raised if a search type is found that is not valid.
limit : Optional[int]
The limit of search results to return when searching.
Maximum limit is 50, any larger may raise a :class:`HTTPException`
offset : Optional[int]
The offset from where the api should start from in the search results.
market : Optional[str]
An ISO 3166-1 alpha-2 country code. Provide this parameter if you want to apply Track Relinking.
Returns
-------
results : Dict[str, List[Union[Track, Playlist, Artist, Album]]]
The results of the search.
"""
if | python | {
"resource": ""
} |
q270284 | to_id | test | def to_id(string: str) -> str:
"""Get a spotify ID from a URI or open.spotify URL.
Paramters
---------
string : str
The string to operate on.
Returns
-------
id : str
The Spotify ID from the string.
"""
string = string.strip()
match = _URI_RE.match(string)
| python | {
"resource": ""
} |
q270285 | assert_hasattr | test | def assert_hasattr(attr: str, msg: str, tp: BaseException = SpotifyException) -> Callable:
"""decorator to assert an object has an attribute when run."""
def decorator(func: Callable) -> Callable:
@functools.wraps(func)
def decorated(self, *args, **kwargs):
if not hasattr(self, attr):
raise tp(msg) | python | {
"resource": ""
} |
q270286 | OAuth2.from_client | test | def from_client(cls, client, *args, **kwargs):
"""Construct a OAuth2 object from a `spotify.Client`."""
| python | {
"resource": ""
} |
q270287 | OAuth2.url_ | test | def url_(client_id: str, redirect_uri: str, *, scope: str = None, state: str = None, secure: bool = True) -> str:
"""Construct a OAuth2 URL instead of an OAuth2 object."""
attrs = {
'client_id': client_id,
| python | {
"resource": ""
} |
q270288 | OAuth2.attrs | test | def attrs(self):
"""Attributes used when constructing url parameters."""
data = {
'client_id': self.client_id,
'redirect_uri': quote(self.redirect_uri),
}
if self.scope is not None:
| python | {
"resource": ""
} |
q270289 | OAuth2.parameters | test | def parameters(self) -> str:
"""URL parameters used."""
return | python | {
"resource": ""
} |
q270290 | PartialTracks.build | test | async def build(self):
"""get the track object for each link in the partial tracks data
Returns
-------
tracks : List[Track]
The tracks
"""
| python | {
"resource": ""
} |
q270291 | Playlist.get_all_tracks | test | async def get_all_tracks(self) -> List[PlaylistTrack]:
"""Get all playlist tracks from the playlist.
Returns
-------
tracks : List[PlaylistTrack]
The playlists tracks.
"""
if isinstance(self._tracks, PartialTracks):
return await self._tracks.build()
_tracks = []
offset = 0
while len(self.tracks) | python | {
"resource": ""
} |
q270292 | Player.resume | test | async def resume(self, *, device: Optional[SomeDevice] = None):
"""Resume playback on the user's account.
Parameters
----------
device : Optional[:obj:`SomeDevice`]
The Device object or id of the device this command is targeting.
| python | {
"resource": ""
} |
q270293 | Player.transfer | test | async def transfer(self, device: SomeDevice, ensure_playback: bool = False):
"""Transfer playback to a new device and determine if it should start playing.
Parameters
----------
device : :obj:`SomeDevice`
The device on which playback should be started/transferred.
ensure_playback : bool
| python | {
"resource": ""
} |
q270294 | SpotifyBase.from_href | test | async def from_href(self):
"""Get the full object from spotify with a `href` attribute."""
if not hasattr(self, 'href'):
raise TypeError('Spotify object has no `href` attribute, therefore cannot be retrived')
elif hasattr(self, 'http'):
return await self.http.request(('GET', | python | {
"resource": ""
} |
q270295 | ExpirationDate.get | test | def get(self): # pragma: no cover
"""
Execute the logic behind the meaning of ExpirationDate + return the matched status.
:return:
The status of the tested domain.
Can be one of the official status.
:rtype: str
"""
# We get the status of the domain validation.
domain_validation = self.checker.is_domain_valid()
# We get the status of the IPv4 validation.
ip_validation = self.checker.is_ip_valid()
if "current_test_data" in PyFunceble.INTERN:
# The end-user want more information whith his test.
# We update some index.
PyFunceble.INTERN["current_test_data"].update(
{
"domain_syntax_validation": domain_validation,
"ip4_syntax_validation": ip_validation,
}
)
if (
domain_validation
and not ip_validation
or domain_validation
or PyFunceble.CONFIGURATION["local"]
):
# * The element is a valid domain.
# and
# * The element is not ahe valid IPv4.
# or
# * The element is a valid domain.
# * We get the HTTP status code of the currently tested element.
# and
# * We try to get the element status from the IANA database.
PyFunceble.INTERN.update(
{"http_code": HTTPCode().get(), "referer": Referer().get()}
)
if not PyFunceble.INTERN["referer"]:
# We could not get the referer.
# We parse the referer status into the upstream call.
return PyFunceble.INTERN["referer"]
# The WHOIS record status is not into our list of official status.
if PyFunceble.INTERN["referer"] and not self.checker.is_subdomain():
# * The iana database comparison status is not None.
# and
# * The domain we are testing is not a subdomain.
# We try to extract the expiration date from the WHOIS record.
| python | {
"resource": ""
} |
q270296 | ExpirationDate._convert_or_shorten_month | test | def _convert_or_shorten_month(cls, data):
"""
Convert a given month into our unified format.
:param data: The month to convert or shorten.
:type data: str
:return: The unified month name.
:rtype: str
"""
# We map the different month and their possible representation.
short_month = {
"jan": [str(1), "01", "Jan", "January"],
"feb": [str(2), "02", "Feb", "February"],
"mar": [str(3), "03", "Mar", "March"],
"apr": [str(4), "04", "Apr", "April"],
"may": [str(5), "05", "May"],
"jun": [str(6), "06", "Jun", "June"],
"jul": [str(7), "07", "Jul", "July"],
"aug": [str(8), "08", "Aug", "August"],
"sep": [str(9), "09", "Sep", "September"],
"oct": [str(10), "Oct", "October"],
"nov": [str(11), "Nov", "November"],
"dec": [str(12), "Dec", "December"],
}
for month | python | {
"resource": ""
} |
q270297 | Production._update_code_urls | test | def _update_code_urls(self):
"""
Read the code and update all links.
"""
to_ignore = [".gitignore", ".keep"]
for root, _, files in PyFunceble.walk(
PyFunceble.CURRENT_DIRECTORY
+ PyFunceble.directory_separator
+ "PyFunceble"
+ PyFunceble.directory_separator
):
# We loop through every directories and files in the `PyFunceble` directory.
for file in files:
# We loop through the list of files of the currently read directory.
if file not in to_ignore and "__pycache__" not in root:
# * The filename is not into the list of file to ignore.
# and
# * The directory we are reading is not `__pycache__`.
if root.endswith(PyFunceble.directory_separator):
# The root directory ends with the directory separator.
# We fix the path in the currently read file.
self._update_docs(root + file)
else:
# The root directory does not ends with the directory separator.
# We fix the path in the currently read file.
# (after appending the directory separator between the root and file)
self._update_docs(root + PyFunceble.directory_separator + file)
for root, _, files in PyFunceble.walk(
PyFunceble.CURRENT_DIRECTORY
+ PyFunceble.directory_separator
+ "tests"
+ PyFunceble.directory_separator
):
# We loop through every directories and files in the `tests` directory.
for file in files:
# We loop through the list of files of the currently read directory.
| python | {
"resource": ""
} |
q270298 | Production._is_version_greater | test | def _is_version_greater(self):
"""
Check if the current version is greater as the older older one.
"""
# we compare the 2 versions.
checked = Version(True).check_versions(
self.current_version[0], self.version_yaml
)
if checked is not | python | {
"resource": ""
} |
q270299 | Production.is_dev_version | test | def is_dev_version(cls):
"""
Check if the current branch is `dev`.
"""
# We initiate the command we have to run in order to
# get the branch we are currently working with.
command = "git branch"
# We execute and get the command output.
command_result = Command(command).execute()
for branch in command_result.split("\n"):
| python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.