INSTRUCTION stringlengths 1 8.43k | RESPONSE stringlengths 75 104k |
|---|---|
Get IDs or data for studies that meet specific criteria. | def get_studies(self, features=None, expression=None, mask=None,
peaks=None, frequency_threshold=0.001,
activation_threshold=0.0, func=np.sum, return_type='ids',
r=6
):
""" Get IDs or data for studies that meet specific criteria.
... |
Construct a new FeatureTable from file. | def add_features(self, features, append=True, merge='outer',
duplicates='ignore', min_studies=0.0, threshold=0.001):
""" Construct a new FeatureTable from file.
Args:
features: Feature data to add. Can be:
(a) A text file containing the feature data, whe... |
A convenience wrapper for ImageTable. get_image_data (). | def get_image_data(self, ids=None, voxels=None, dense=True):
""" A convenience wrapper for ImageTable.get_image_data().
Args:
ids (list, array): A list or 1D numpy array of study ids to
return. If None, returns data for all studies.
voxels (list, array): A list o... |
Returns names of features. If features is None returns all features. Otherwise assumes the user is trying to find the order of the features. | def get_feature_names(self, features=None):
""" Returns names of features. If features is None, returns all
features. Otherwise assumes the user is trying to find the order of the
features. """
if features:
return self.feature_table.get_ordered_names(features)
else:
... |
Returns a dictionary where the keys are the feature names and the values are the number of studies tagged with the feature. | def get_feature_counts(self, threshold=0.001):
""" Returns a dictionary, where the keys are the feature names
and the values are the number of studies tagged with the feature. """
counts = np.sum(self.get_feature_data() >= threshold, 0)
return dict(zip(self.get_feature_names(), list(coun... |
Load a pickled Dataset instance from file. | def load(cls, filename):
""" Load a pickled Dataset instance from file. """
try:
dataset = pickle.load(open(filename, 'rb'))
except UnicodeDecodeError:
# Need to try this for python3
dataset = pickle.load(open(filename, 'rb'), encoding='latin')
if has... |
Pickle the Dataset instance to the provided file. | def save(self, filename):
""" Pickle the Dataset instance to the provided file.
"""
if hasattr(self, 'feature_table'):
self.feature_table._sdf_to_csr()
pickle.dump(self, open(filename, 'wb'), -1)
if hasattr(self, 'feature_table'):
self.feature_table._csr... |
Slices and returns a subset of image data. | def get_image_data(self, ids=None, voxels=None, dense=True):
""" Slices and returns a subset of image data.
Args:
ids (list, array): A list or 1D numpy array of study ids to
return. If None, returns data for all studies.
voxels (list, array): A list or 1D numpy a... |
Trim ImageTable to keep only the passed studies. This is a convenience method and should generally be avoided in favor of non - destructive alternatives that don t require slicing ( e. g. matrix multiplication ). | def trim(self, ids):
""" Trim ImageTable to keep only the passed studies. This is a
convenience method, and should generally be avoided in favor of
non-destructive alternatives that don't require slicing (e.g.,
matrix multiplication). """
self.data = self.get_image_data(ids, ... |
Add new features to FeatureTable. Args: features ( str DataFrame ): A filename to load data from or a pandas DataFrame. In either case studies are in rows and features are in columns. Values in cells reflect the weight of the intersecting feature for the intersecting study. Feature names and study IDs should be include... | def add_features(self, features, merge='outer', duplicates='ignore',
min_studies=0, threshold=0.0001):
""" Add new features to FeatureTable.
Args:
features (str, DataFrame): A filename to load data from, or a
pandas DataFrame. In either case, studies are ... |
Slices and returns a subset of feature data. | def get_feature_data(self, ids=None, features=None, dense=True):
""" Slices and returns a subset of feature data.
Args:
ids (list, array): A list or 1D numpy array of study ids to
return rows for. If None, returns data for all studies
(i.e., all rows in array... |
Given a list of features returns features in order that they appear in database. | def get_ordered_names(self, features):
""" Given a list of features, returns features in order that they
appear in database.
Args:
features (list): A list or 1D numpy array of named features to
return.
Returns:
A list of features in order they appear... |
Returns a list of all studies in the table that meet the desired feature - based criteria. | def get_ids(self, features, threshold=0.0, func=np.sum, get_weights=False):
""" Returns a list of all studies in the table that meet the desired
feature-based criteria.
Will most commonly be used to retrieve studies that use one or more
features with some minimum frequency; e.g.,:
... |
Returns all features that match any of the elements in the input list. | def search_features(self, search):
''' Returns all features that match any of the elements in the input
list.
Args:
search (str, list): A string or list of strings defining the query.
Returns:
A list of matching feature names.
'''
if isinstance(s... |
Use a PEG to parse expression and return study IDs. | def get_ids_by_expression(self, expression, threshold=0.001, func=np.sum):
""" Use a PEG to parse expression and return study IDs."""
lexer = lp.Lexer()
lexer.build()
parser = lp.Parser(
lexer, self.dataset, threshold=threshold, func=func)
parser.build()
retur... |
Returns features for which the mean loading across all specified studies ( in ids ) is > = threshold. | def get_features_by_ids(self, ids=None, threshold=0.0001, func=np.mean,
get_weights=False):
''' Returns features for which the mean loading across all specified
studies (in ids) is >= threshold. '''
weights = self.data.ix[ids].apply(func, 0)
above_thresh = wei... |
Convert FeatureTable to SciPy CSR matrix. | def _sdf_to_csr(self):
""" Convert FeatureTable to SciPy CSR matrix. """
data = self.data.to_dense()
self.data = {
'columns': list(data.columns),
'index': list(data.index),
'values': sparse.csr_matrix(data.values)
} |
Inverse of _sdf_to_csr (). | def _csr_to_sdf(self):
""" Inverse of _sdf_to_csr(). """
self.data = pd.DataFrame(self.data['values'].todense(),
index=self.data['index'],
columns=self.data['columns']).to_sparse() |
Deprecation warning decorator. Takes optional deprecation message otherwise will use a generic warning. | def deprecated(*args):
""" Deprecation warning decorator. Takes optional deprecation message,
otherwise will use a generic warning. """
def wrap(func):
def wrapped_func(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return func(*args, **kwargs)
retu... |
Execute a full clustering analysis pipeline. Args: dataset: a Dataset instance to extract all data from. method ( str ): the overall clustering approach to use. Valid options: coactivation ( default ): Clusters voxel within the ROI mask based on shared pattern of coactivation with the rest of the brain. studies: Treat ... | def magic(dataset, method='coactivation', roi_mask=None,
coactivation_mask=None, features=None, feature_threshold=0.05,
min_voxels_per_study=None, min_studies_per_voxel=None,
reduce_reference='pca', n_components=100,
distance_metric='correlation', clustering_algorithm='kmeans',
... |
Apply a transformation to the Clusterable instance. Accepts any scikit - learn - style class that implements a fit_transform () method. | def transform(self, transformer, transpose=False):
''' Apply a transformation to the Clusterable instance. Accepts any
scikit-learn-style class that implements a fit_transform() method. '''
data = self.data.T if transpose else self.data
data = transformer.fit_transform(data)
self... |
Generate meta - analysis images for a set of features. Args: dataset: A Dataset instance containing feature and activation data. features: A list of named features to generate meta - analysis maps for. If None analyzes all features in the current dataset. image_type: The type of image to return. Specify one of the exte... | def analyze_features(dataset, features=None, image_type='association-test_z',
threshold=0.001, q=0.01, output_dir=None, prefix=None):
""" Generate meta-analysis images for a set of features.
Args:
dataset: A Dataset instance containing feature and activation data.
features: ... |
Write out any images generated by the meta - analysis. Args: output_dir ( str ): folder to write images to prefix ( str ): all image files will be prepended with this string prefix_sep ( str ): glue between the prefix and rest of filename image_list ( list ): optional list of images to save -- e. g. [ pFgA_z pAgF ]. If... | def save_results(self, output_dir='.', prefix='', prefix_sep='_',
image_list=None):
""" Write out any images generated by the meta-analysis.
Args:
output_dir (str): folder to write images to
prefix (str): all image files will be prepended with this string
... |
Convert coordinates from one space to another using provided transformation matrix. | def transform(foci, mat):
""" Convert coordinates from one space to another using provided
transformation matrix. """
t = linalg.pinv(mat)
foci = np.hstack((foci, np.ones((foci.shape[0], 1))))
return np.dot(foci, t)[:, 0:3] |
Convert an N x 3 array of XYZ coordinates to matrix indices. | def xyz_to_mat(foci, xyz_dims=None, mat_dims=None):
""" Convert an N x 3 array of XYZ coordinates to matrix indices. """
foci = np.hstack((foci, np.ones((foci.shape[0], 1))))
mat = np.array([[-0.5, 0, 0, 45], [0, 0.5, 0, 63], [0, 0, 0.5, 36]]).T
result = np.dot(foci, mat)[:, ::-1] # multiply and revers... |
Apply a named transformation to a set of foci. | def apply(self, name, foci):
""" Apply a named transformation to a set of foci.
If the named transformation doesn't exist, return foci untransformed.
"""
if name in self.transformations:
return transform(foci, self.transformations[name])
else:
logger.info... |
Reset/ remove all layers keeping only the initial volume. | def reset(self):
""" Reset/remove all layers, keeping only the initial volume. """
self.layers = {}
self.stack = []
self.set_mask()
self.n_vox_in_vol = len(np.where(self.current_mask)[0]) |
Add one or more layers to the stack of masking layers. Args: layers: A string NiBabel image list or dict. If anything other than a dict is passed assigns sequential layer names based on the current position in stack ; if a dict uses key as the name and value as the mask image. | def add(self, layers, above=None, below=None):
""" Add one or more layers to the stack of masking layers.
Args:
layers: A string, NiBabel image, list, or dict. If anything other
than a dict is passed, assigns sequential layer names based on
the current positio... |
Remove one or more layers from the stack of masking layers. Args: layers: An int string or list of strings and/ or ints. Ints are interpreted as indices in the stack to remove ; strings are interpreted as names of layers to remove. Negative ints will also work -- i. e. remove ( - 1 ) will drop the last layer added. | def remove(self, layers):
""" Remove one or more layers from the stack of masking layers.
Args:
layers: An int, string or list of strings and/or ints. Ints are
interpreted as indices in the stack to remove; strings are
interpreted as names of layers to remove.... |
A flexible method for transforming between different representations of image data. Args: image: The input image. Can be a string ( filename of image ) NiBabel image N - dimensional array ( must have same shape as self. volume ) or vectorized image data ( must have same length as current conjunction mask ). output: The... | def get_image(self, image, output='vector'):
""" A flexible method for transforming between different
representations of image data.
Args:
image: The input image. Can be a string (filename of image),
NiBabel image, N-dimensional array (must have same shape as
... |
Vectorize an image and mask out all invalid voxels. | def mask(self, image, nan_to_num=True, layers=None, in_global_mask=False):
""" Vectorize an image and mask out all invalid voxels.
Args:
images: The image to vectorize and mask. Input can be any object
handled by get_image().
layers: Which mask layers to use (spe... |
Reconstruct a masked vector into the original 3D volume. Args: data: The 1D vector to reconstruct. ( Can also be a 2D vector where the second dimension is time but then output will always be set to array -- i. e. a 4D image will be returned. ) layers: Which mask layers to use ( specified as int string or list of ints a... | def unmask(self, data, layers=None, output='array'):
""" Reconstruct a masked vector into the original 3D volume.
Args:
data: The 1D vector to reconstruct. (Can also be a 2D vector where
the second dimension is time, but then output will always
be set to 'arra... |
Set the current mask by taking the conjunction of all specified layers. | def get_mask(self, layers=None, output='vector', in_global_mask=True):
""" Set the current mask by taking the conjunction of all specified
layers.
Args:
layers: Which layers to include. See documentation for add() for
format.
include_global_mask: Whether ... |
# Return all points within r mm of coordinates. Generates a cube and then discards all points outside sphere. Only returns values that fall within the dimensions of the image. | def get_sphere(coords, r=4, vox_dims=(2, 2, 2), dims=(91, 109, 91)):
""" # Return all points within r mm of coordinates. Generates a cube
and then discards all points outside sphere. Only returns values that
fall within the dimensions of the image."""
r = float(r)
xx, yy, zz = [slice(-r / vox_dims[i... |
Take a set of discrete foci ( i. e. 2 - D array of xyz coordinates ) and generate a corresponding image convolving each focus with a hard sphere of radius r. | def map_peaks_to_image(peaks, r=4, vox_dims=(2, 2, 2), dims=(91, 109, 91),
header=None):
""" Take a set of discrete foci (i.e., 2-D array of xyz coordinates)
and generate a corresponding image, convolving each focus with a
hard sphere of radius r."""
data = np.zeros(dims)
for ... |
Load multiple images from file into an ndarray. | def load_imgs(filenames, masker, nan_to_num=True):
""" Load multiple images from file into an ndarray.
Args:
filenames: A single filename or list of filenames pointing to valid
images.
masker: A Masker instance.
nan_to_num: Optional boolean indicating whether to convert NaNs to zero.
... |
Save a vectorized image to file. | def save_img(data, filename, masker, header=None):
""" Save a vectorized image to file. """
if not header:
header = masker.get_header()
header.set_data_dtype(data.dtype) # Avoids loss of precision
# Update min/max -- this should happen on save, but doesn't seem to
header['cal_max'] = data.m... |
Threshold data setting all values in the array above/ below threshold to zero. Args: data ( ndarray ): The image data to threshold. threshold ( float ): Numeric threshold to apply to image. mask ( ndarray ): Optional 1D - array with the same length as the data. If passed the threshold is first applied to the mask and t... | def threshold_img(data, threshold, mask=None, mask_out='below'):
""" Threshold data, setting all values in the array above/below threshold
to zero.
Args:
data (ndarray): The image data to threshold.
threshold (float): Numeric threshold to apply to image.
mask (ndarray): Optional 1D-a... |
Creates an image containing labeled cells in a 3D grid. Args: image: String or nibabel image. The image used to define the grid dimensions. Also used to define the mask to apply to the grid. Only voxels with non - zero values in the mask will be retained ; all other voxels will be zeroed out in the returned image. scal... | def create_grid(image, scale=4, apply_mask=True, save_file=None):
""" Creates an image containing labeled cells in a 3D grid.
Args:
image: String or nibabel image. The image used to define the grid
dimensions. Also used to define the mask to apply to the grid.
Only voxels with no... |
Set neurosynth s logging level | def set_logging_level(level=None):
"""Set neurosynth's logging level
Args
level : str
Name of the logging level (warning, error, info, etc) known
to logging module. If no level provided, it would get that one
from environment variable NEUROSYNTH_LOGLEVEL
"""
if level is N... |
Expand the given address into one or more normalized strings. | def expand_address(address, languages=None, **kw):
"""
Expand the given address into one or more normalized strings.
Required
--------
@param address: the address as either Unicode or a UTF-8 encoded string
Options
-------
@param languages: a tuple or list of ISO language code strings ... |
Normalizes a string tokenizes and normalizes each token with string and token - level options. | def normalized_tokens(s, string_options=DEFAULT_STRING_OPTIONS,
token_options=DEFAULT_TOKEN_OPTIONS,
strip_parentheticals=True, whitespace=False,
languages=None):
'''
Normalizes a string, tokenizes, and normalizes each token
with string and t... |
Parse address into components. | def parse_address(address, language=None, country=None):
"""
Parse address into components.
@param address: the address as either Unicode or a UTF-8 encoded string
@param language (optional): language code
@param country (optional): country code
"""
address = safe_decode(address, 'utf-8')
... |
Hash the given address into normalized strings that can be used to group similar addresses together for more detailed pairwise comparison. This can be thought of as the blocking function in record linkage or locally - sensitive hashing in the document near - duplicate detection. | def near_dupe_hashes(labels, values, languages=None, **kw):
"""
Hash the given address into normalized strings that can be used to group similar
addresses together for more detailed pairwise comparison. This can be thought of
as the blocking function in record linkage or locally-sensitive hashing in the... |
Detect whether the file contains an api key in the Token object that is not 40 * 0. See issue #86.: param file: path - to - file to check: return: boolean | def has_api_key(file_name):
"""
Detect whether the file contains an api key in the Token object that is not 40*'0'.
See issue #86.
:param file: path-to-file to check
:return: boolean
"""
f = open(file_name, 'r')
text = f.read()
if re.search(real_api_regex, text) is not None and \
... |
Change the api key in the Token object to 40 * 0. See issue #86.: param file: path - to - file to change | def remove_api_key(file_name):
"""
Change the api key in the Token object to 40*'0'. See issue #86.
:param file: path-to-file to change
"""
with open(file_name, 'r') as fp:
text = fp.read()
text = re.sub(real_api_regex, zero_token_string, text)
with open(file_name, 'w') as fp:
... |
Converts a python dict to a namedtuple saving memory. | def dict_to_object(item, object_name):
"""Converts a python dict to a namedtuple, saving memory."""
fields = item.keys()
values = item.values()
return json.loads(json.dumps(item),
object_hook=lambda d:
namedtuple(object_name, fields)(*values)) |
Return a list of dicts of metadata tickers for all supported tickers of the specified asset type as well as metadata about each ticker. This includes supported date range the exchange the ticker is traded on and the currency the stock is traded on. Tickers for unrelated products are omitted. https:// apimedia. tiingo. ... | def list_tickers(self, assetType):
"""Return a list of dicts of metadata tickers for all supported tickers
of the specified asset type, as well as metadata about each ticker.
This includes supported date range, the exchange the ticker is traded
on, and the currency the stock ... |
Return metadata for 1 ticker Use TiingoClient. list_tickers () to get available options | def get_ticker_metadata(self, ticker, fmt='json'):
"""Return metadata for 1 ticker
Use TiingoClient.list_tickers() to get available options
Args:
ticker (str) : Unique identifier for stock
"""
url = "tiingo/daily/{}".format(ticker)
response = self.... |
Check to see that frequency was specified correctly: param frequency ( string ): frequency string: return ( boolean ): | def _invalid_frequency(self, frequency):
"""
Check to see that frequency was specified correctly
:param frequency (string): frequency string
:return (boolean):
"""
is_valid = self._is_eod_frequency(frequency) or re.match(self._frequency_pattern, frequency)
return ... |
Return url based on frequency. Daily weekly or yearly use Tiingo EOD api ; anything less than daily uses the iex intraday api.: param ticker ( string ): ticker to be embedded in the url: param frequency ( string ): valid frequency per Tiingo api: return ( string ): url | def _get_url(self, ticker, frequency):
"""
Return url based on frequency. Daily, weekly, or yearly use Tiingo
EOD api; anything less than daily uses the iex intraday api.
:param ticker (string): ticker to be embedded in the url
:param frequency (string): valid frequency per Tiin... |
By default return latest EOD Composite Price for a stock ticker. On average each feed contains 3 data sources. | def get_ticker_price(self, ticker,
startDate=None, endDate=None,
fmt='json', frequency='daily'):
"""By default, return latest EOD Composite Price for a stock ticker.
On average, each feed contains 3 data sources.
Supported tickers + Avail... |
Return a pandas. DataFrame of historical prices for one or more ticker symbols. | def get_dataframe(self, tickers,
startDate=None, endDate=None, metric_name=None, frequency='daily'):
""" Return a pandas.DataFrame of historical prices for one or more ticker symbols.
By default, return latest EOD Composite Price for a list of stock tickers.
On av... |
Return list of news articles matching given search terms https:// api. tiingo. com/ docs/ tiingo/ news | def get_news(self, tickers=[], tags=[], sources=[], startDate=None,
endDate=None, limit=100, offset=0, sortBy="publishedDate",
fmt='json'):
"""Return list of news articles matching given search terms
https://api.tiingo.com/docs/tiingo/news
# Dates are i... |
Only available to institutional clients. If ID is NOT provided return array of available file_ids. If ID is provided provides URL which you can use to download your file as well as some metadata about that file. | def get_bulk_news(self, file_id=None, fmt='json'):
"""Only available to institutional clients.
If ID is NOT provided, return array of available file_ids.
If ID is provided, provides URL which you can use to download your
file, as well as some metadata about that file.
... |
Make HTTP request and return response object | def _request(self, method, url, **kwargs):
"""Make HTTP request and return response object
Args:
method (str): GET, POST, PUT, DELETE
url (str): path appended to the base_url to create request
**kwargs: passed directly to a requests.request object
... |
Get the application bearer token from client_id and client_secret. | async def get_bearer_info(self):
"""Get the application bearer token from client_id and client_secret."""
if self.client_id is None:
raise SpotifyException(_GET_BEARER_ERR % 'client_id')
elif self.client_secret is None:
raise SpotifyException(_GET_BEARER_ERR % 'client_se... |
Make a request to the spotify API with the current bearer credentials. | async def request(self, route, **kwargs):
"""Make a request to the spotify API with the current bearer credentials.
Parameters
----------
route : Union[tuple[str, str], Route]
A tuple of the method and url or a :class:`Route` object.
kwargs : Any
keyword ... |
Get a spotify album by its ID. | def album(self, spotify_id, market='US'):
"""Get a spotify album by its ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
Returns
-------
album : Dic... |
Get an albums tracks by an ID. | def album_tracks(self, spotify_id, limit=20, offset=0, market='US'):
"""Get an albums tracks by an ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1... |
Get a spotify album by its ID. | def albums(self, spotify_ids, market='US'):
"""Get a spotify album by its ID.
Parameters
----------
spotify_ids : List[str]
The spotify_ids to search by.
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
"""
route = Route('GET', '... |
Get a spotify artist by their ID. | def artist(self, spotify_id):
"""Get a spotify artist by their ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
"""
route = Route('GET', '/artists/{spotify_id}', spotify_id=spotify_id)
return self.request(route) |
Get an artists tracks by their ID. | def artist_albums(self, spotify_id, include_groups=None, limit=20, offset=0, market='US'):
"""Get an artists tracks by their ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
include_groups : INCLUDE_GROUPS_TP
INCLUDE_GROUPS
... |
Get an artists top tracks per country with their ID. | def artist_top_tracks(self, spotify_id, country):
"""Get an artists top tracks per country with their ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
country : COUNTRY_TP
COUNTRY
"""
route = Route('GET', '/artists/{... |
Get related artists for an artist by their ID. | def artist_related_artists(self, spotify_id):
"""Get related artists for an artist by their ID.
Parameters
----------
spotify_id : str
The spotify_id to search by.
"""
route = Route('GET', '/artists/{spotify_id}/related-artists', spotify_id=spotify_id)
... |
Get a spotify artists by their IDs. | def artists(self, spotify_ids):
"""Get a spotify artists by their IDs.
Parameters
----------
spotify_id : List[str]
The spotify_ids to search with.
"""
route = Route('GET', '/artists')
payload = {'ids': spotify_ids}
return self.request(route, ... |
Get a single category used to tag items in Spotify. | def category(self, category_id, country=None, locale=None):
"""Get a single category used to tag items in Spotify.
Parameters
----------
category_id : str
The Spotify category ID for the category.
country : COUNTRY_TP
COUNTRY
locale : LOCALE_TP
... |
Get a list of Spotify playlists tagged with a particular category. | def category_playlists(self, category_id, limit=20, offset=0, country=None):
"""Get a list of Spotify playlists tagged with a particular category.
Parameters
----------
category_id : str
The Spotify category ID for the category.
limit : Optional[int]
The ... |
Get a list of categories used to tag items in Spotify. | def categories(self, limit=20, offset=0, country=None, locale=None):
"""Get a list of categories used to tag items in Spotify.
Parameters
----------
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[i... |
Get a list of Spotify featured playlists. | def featured_playlists(self, locale=None, country=None, timestamp=None, limit=20, offset=0):
"""Get a list of Spotify featured playlists.
Parameters
----------
locale : LOCALE_TP
LOCALE
country : COUNTRY_TP
COUNTRY
timestamp : TIMESTAMP_TP
... |
Get a list of new album releases featured in Spotify. | def new_releases(self, *, country=None, limit=20, offset=0):
"""Get a list of new album releases featured in Spotify.
Parameters
----------
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
... |
Get Recommendations Based on Seeds. | def recommendations(self, seed_artists, seed_genres, seed_tracks, *, limit=20, market=None, **filters):
"""Get Recommendations Based on Seeds.
Parameters
----------
seed_artists : str
A comma separated list of Spotify IDs for seed artists. Up to 5 seed values may be provided... |
Check to see if the current user is following one or more artists or other Spotify users. | def following_artists_or_users(self, ids, *, type='artist'):
"""Check to see if the current user is following one or more artists or other Spotify users.
Parameters
----------
ids : List[str]
A comma-separated list of the artist or the user Spotify IDs to check.
... |
Get the albums of a Spotify artist. | async def get_albums(self, *, limit: Optional[int] = 20, offset: Optional[int] = 0, include_groups=None, market: Optional[str] = None) -> List[Album]:
"""Get the albums of a Spotify artist.
Parameters
----------
limit : Optional[int]
The maximum number of items to return. De... |
loads all of the artists albums depending on how many the artist has this may be a long operation. | async def get_all_albums(self, *, market='US') -> List[Album]:
"""loads all of the artists albums, depending on how many the artist has this may be a long operation.
Parameters
----------
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
Returns
---... |
get the total amout of tracks in the album. | async def total_albums(self, *, market: str = None) -> int:
"""get the total amout of tracks in the album.
Parameters
----------
market : Optional[str]
An ISO 3166-1 alpha-2 country code.
Returns
-------
total : int
The total amount of al... |
Get Spotify catalog information about an artist’s top tracks by country. | async def top_tracks(self, country: str = 'US') -> List[Track]:
"""Get Spotify catalog information about an artist’s top tracks by country.
Parameters
----------
country : str
The country to search for, it defaults to 'US'.
Returns
-------
tracks : L... |
Get Spotify catalog information about artists similar to a given artist. | async def related_artists(self) -> List[Artist]:
"""Get Spotify catalog information about artists similar to a given artist.
Similarity is based on analysis of the Spotify community’s listening history.
Returns
-------
artists : List[Artits]
The artists deemed simil... |
Get the users currently playing track. | async def currently_playing(self) -> Tuple[Context, Track]:
"""Get the users currently playing track.
Returns
-------
context, track : Tuple[Context, Track]
A tuple of the context and track.
"""
data = await self.http.currently_playing()
if data.get(... |
Get information about the users current playback. | async def get_player(self) -> Player:
"""Get information about the users current playback.
Returns
-------
player : Player
A player object representing the current playback.
"""
self._player = player = Player(self.__client, self, await self.http.current_playe... |
Get information about the users avaliable devices. | async def get_devices(self) -> List[Device]:
"""Get information about the users avaliable devices.
Returns
-------
devices : List[Device]
The devices the user has available.
"""
data = await self.http.available_devices()
return [Device(item) for item ... |
Get tracks from the current users recently played tracks. | async def recently_played(self) -> List[Dict[str, Union[Track, Context, str]]]:
"""Get tracks from the current users recently played tracks.
Returns
-------
playlist_history : List[Dict[str, Union[Track, Context, str]]]
A list of playlist history object.
Each obj... |
Add one or more tracks to a user’s playlist. | async def add_tracks(self, playlist: Union[str, Playlist], *tracks) -> str:
"""Add one or more tracks to a user’s playlist.
Parameters
----------
playlist : Union[str, Playlist]
The playlist to modify
tracks : Sequence[Union[str, Track]]
Tracks to add to ... |
Replace all the tracks in a playlist overwriting its existing tracks. This powerful request can be useful for replacing tracks re - ordering existing tracks or clearing the playlist. | async def replace_tracks(self, playlist, *tracks) -> str:
"""Replace all the tracks in a playlist, overwriting its existing tracks.
This powerful request can be useful for replacing tracks, re-ordering existing tracks, or clearing the playlist.
Parameters
----------
playlist : ... |
Remove one or more tracks from a user’s playlist. | async def remove_tracks(self, playlist, *tracks):
"""Remove one or more tracks from a user’s playlist.
Parameters
----------
playlist : Union[str, Playlist]
The playlist to modify
tracks : Sequence[Union[str, Track]]
Tracks to remove from the playlist
... |
Reorder a track or a group of tracks in a playlist. | async def reorder_tracks(self, playlist, start, insert_before, length=1, *, snapshot_id=None):
"""Reorder a track or a group of tracks in a playlist.
Parameters
----------
playlist : Union[str, Playlist]
The playlist to modify
start : int
The position of ... |
Change a playlist’s name and public/ private collaborative state and description. | async def edit_playlist(self, playlist, *, name=None, public=None, collaborative=None, description=None):
"""Change a playlist’s name and public/private, collaborative state and description.
Parameters
----------
playlist : Union[str, Playlist]
The playlist to modify
... |
Create a playlist for a Spotify user. | async def create_playlist(self, name, *, public=True, collaborative=False, description=None):
"""Create a playlist for a Spotify user.
Parameters
----------
name : str
The name of the playlist.
public : Optional[bool]
The public/private status of the play... |
get the users playlists from spotify. | async def get_playlists(self, *, limit=20, offset=0):
"""get the users playlists from spotify.
Parameters
----------
limit : Optional[int]
The limit on how many playlists to retrieve for this user (default is 20).
offset : Optional[int]
The offset fro... |
get the albums tracks from spotify. | async def get_tracks(self, *, limit: Optional[int] = 20, offset: Optional[int] = 0) -> List[Track]:
"""get the albums tracks from spotify.
Parameters
----------
limit : Optional[int]
The limit on how many tracks to retrieve for this album (default is 20).
offset : Op... |
loads all of the albums tracks depending on how many the album has this may be a long operation. | async def get_all_tracks(self, *, market: Optional[str] = 'US') -> List[Track]:
"""loads all of the albums tracks, depending on how many the album has this may be a long operation.
Parameters
----------
market : Optional[str]
An ISO 3166-1 alpha-2 country code. Provide this ... |
Generate an outh2 url for user authentication. | def oauth2_url(self, redirect_uri: str, scope: Optional[str] = None, state: Optional[str] = None) -> str:
"""Generate an outh2 url for user authentication.
Parameters
----------
redirect_uri : str
Where spotify should redirect the user to after authentication.
scope ... |
Retrive an album with a spotify ID. | async def get_album(self, spotify_id: str, *, market: str = 'US') -> Album:
"""Retrive an album with a spotify ID.
Parameters
----------
spotify_id : str
The ID to search for.
market : Optional[str]
An ISO 3166-1 alpha-2 country code
Returns
... |
Retrive an artist with a spotify ID. | async def get_artist(self, spotify_id: str) -> Artist:
"""Retrive an artist with a spotify ID.
Parameters
----------
spotify_id : str
The ID to search for.
Returns
-------
artist : Artist
The artist from the ID
"""
data = ... |
Retrive an track with a spotify ID. | async def get_track(self, spotify_id: str) -> Track:
"""Retrive an track with a spotify ID.
Parameters
----------
spotify_id : str
The ID to search for.
Returns
-------
track : Track
The track from the ID
"""
data = await ... |
Retrive an user with a spotify ID. | async def get_user(self, spotify_id: str) -> User:
"""Retrive an user with a spotify ID.
Parameters
----------
spotify_id : str
The ID to search for.
Returns
-------
user : User
The user from the ID
"""
data = await self.h... |
Retrive multiple albums with a list of spotify IDs. | async def get_albums(self, *ids: List[str], market: str = 'US') -> List[Album]:
"""Retrive multiple albums with a list of spotify IDs.
Parameters
----------
ids : List[str]
the ID to look for
market : Optional[str]
An ISO 3166-1 alpha-2 country code
... |
Retrive multiple artists with a list of spotify IDs. | async def get_artists(self, *ids: List[str]) -> List[Artist]:
"""Retrive multiple artists with a list of spotify IDs.
Parameters
----------
ids : List[str]
the IDs to look for
Returns
-------
artists : List[Artist]
The artists from the ID... |
Access the spotify search functionality. | async def search(self, q: str, *, types: Optional[Iterable[str]] = ['track', 'playlist', 'artist', 'album'], limit: Optional[int] = 20, offset: Optional[int] = 0, market: Optional[str] = None) -> Dict[str, List[Union[Track, Playlist, Artist, Album]]]:
"""Access the spotify search functionality.
Paramet... |
Check if one or more albums is already saved in the current Spotify user’s ‘Your Music’ library. | async def contains_albums(self, *albums: Sequence[Union[str, Album]]) -> List[bool]:
"""Check if one or more albums is already saved in the current Spotify user’s ‘Your Music’ library.
Parameters
----------
albums : Union[Album, str]
A sequence of artist objects or spotify I... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.