_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q264400 | MedicalImage.to_file | validation | def to_file(self, outpath):
"""Save this object instance in outpath.
Parameters
----------
outpath: str
Output file path
"""
if not self.has_mask() and not self.is_smoothed():
save_niigz(outpath, self.img)
else:
save_niigz(outpath, self.get_data(masked=True, smoothed=True),
self.get_header(), self.get_affine()) | python | {
"resource": ""
} |
q264401 | setup_logging | validation | def setup_logging(log_config_file=op.join(op.dirname(__file__), 'logger.yml'),
log_default_level=LOG_LEVEL,
env_key=MODULE_NAME.upper() + '_LOG_CFG'):
"""Setup logging configuration."""
path = log_config_file
value = os.getenv(env_key, None)
if value:
path = value
if op.exists(path):
log_cfg = yaml.load(read(path).format(MODULE_NAME))
logging.config.dictConfig(log_cfg)
#print('Started logging using config file {0}.'.format(path))
else:
logging.basicConfig(level=log_default_level)
#print('Started default logging. Could not find config file '
# 'in {0}.'.format(path))
log = logging.getLogger(__name__)
log.debug('Start logging.') | python | {
"resource": ""
} |
q264402 | get_3D_from_4D | validation | def get_3D_from_4D(filename, vol_idx=0):
"""Return a 3D volume from a 4D nifti image file
Parameters
----------
filename: str
Path to the 4D .mhd file
vol_idx: int
Index of the 3D volume to be extracted from the 4D volume.
Returns
-------
vol, hdr
The data array and the new 3D image header.
"""
def remove_4th_element_from_hdr_string(hdr, fieldname):
if fieldname in hdr:
hdr[fieldname] = ' '.join(hdr[fieldname].split()[:3])
vol, hdr = load_raw_data_with_mhd(filename)
if vol.ndim != 4:
raise ValueError('Volume in {} does not have 4 dimensions.'.format(op.join(op.dirname(filename),
hdr['ElementDataFile'])))
if not 0 <= vol_idx < vol.shape[3]:
raise IndexError('IndexError: 4th dimension in volume {} has {} volumes, not {}.'.format(filename,
vol.shape[3], vol_idx))
new_vol = vol[:, :, :, vol_idx].copy()
hdr['NDims'] = 3
remove_4th_element_from_hdr_string(hdr, 'ElementSpacing')
remove_4th_element_from_hdr_string(hdr, 'DimSize')
return new_vol, hdr | python | {
"resource": ""
} |
q264403 | _safe_cache | validation | def _safe_cache(memory, func, **kwargs):
""" A wrapper for mem.cache that flushes the cache if the version
number of nibabel has changed.
"""
cachedir = memory.cachedir
if cachedir is None or cachedir in __CACHE_CHECKED:
return memory.cache(func, **kwargs)
version_file = os.path.join(cachedir, 'module_versions.json')
versions = dict()
if os.path.exists(version_file):
with open(version_file, 'r') as _version_file:
versions = json.load(_version_file)
modules = (nibabel, )
# Keep only the major + minor version numbers
my_versions = dict((m.__name__, LooseVersion(m.__version__).version[:2])
for m in modules)
commons = set(versions.keys()).intersection(set(my_versions.keys()))
collisions = [m for m in commons if versions[m] != my_versions[m]]
# Flush cache if version collision
if len(collisions) > 0:
if nilearn.CHECK_CACHE_VERSION:
warnings.warn("Incompatible cache in %s: "
"different version of nibabel. Deleting "
"the cache. Put nilearn.CHECK_CACHE_VERSION "
"to false to avoid this behavior."
% cachedir)
try:
tmp_dir = (os.path.split(cachedir)[:-1]
+ ('old_%i' % os.getpid(), ))
tmp_dir = os.path.join(*tmp_dir)
# We use rename + unlink to be more robust to race
# conditions
os.rename(cachedir, tmp_dir)
shutil.rmtree(tmp_dir)
except OSError:
# Another process could have removed this dir
pass
try:
os.makedirs(cachedir)
except OSError:
# File exists?
pass
else:
warnings.warn("Incompatible cache in %s: "
"old version of nibabel." % cachedir)
# Write json files if configuration is different
if versions != my_versions:
with open(version_file, 'w') as _version_file:
json.dump(my_versions, _version_file)
__CACHE_CHECKED[cachedir] = True
return memory.cache(func, **kwargs) | python | {
"resource": ""
} |
q264404 | spatialimg_to_hdfgroup | validation | def spatialimg_to_hdfgroup(h5group, spatial_img):
"""Saves a Nifti1Image into an HDF5 group.
Parameters
----------
h5group: h5py Group
Output HDF5 file path
spatial_img: nibabel SpatialImage
Image to be saved
h5path: str
HDF5 group path where the image data will be saved.
Datasets will be created inside the given group path:
'data', 'extra', 'affine', the header information will
be set as attributes of the 'data' dataset.
"""
try:
h5group['data'] = spatial_img.get_data()
h5group['affine'] = spatial_img.get_affine()
if hasattr(h5group, 'get_extra'):
h5group['extra'] = spatial_img.get_extra()
hdr = spatial_img.get_header()
for k in list(hdr.keys()):
h5group['data'].attrs[k] = hdr[k]
except ValueError as ve:
raise Exception('Error creating group ' + h5group.name) from ve | python | {
"resource": ""
} |
q264405 | spatialimg_to_hdfpath | validation | def spatialimg_to_hdfpath(file_path, spatial_img, h5path=None, append=True):
"""Saves a Nifti1Image into an HDF5 file.
Parameters
----------
file_path: string
Output HDF5 file path
spatial_img: nibabel SpatialImage
Image to be saved
h5path: string
HDF5 group path where the image data will be saved.
Datasets will be created inside the given group path:
'data', 'extra', 'affine', the header information will
be set as attributes of the 'data' dataset.
Default: '/img'
append: bool
True if you don't want to erase the content of the file
if it already exists, False otherwise.
Note
----
HDF5 open modes
>>> 'r' Readonly, file must exist
>>> 'r+' Read/write, file must exist
>>> 'w' Create file, truncate if exists
>>> 'w-' Create file, fail if exists
>>> 'a' Read/write if exists, create otherwise (default)
"""
if h5path is None:
h5path = '/img'
mode = 'w'
if os.path.exists(file_path):
if append:
mode = 'a'
with h5py.File(file_path, mode) as f:
try:
h5img = f.create_group(h5path)
spatialimg_to_hdfgroup(h5img, spatial_img)
except ValueError as ve:
raise Exception('Error creating group ' + h5path) from ve | python | {
"resource": ""
} |
q264406 | get_nifti1hdr_from_h5attrs | validation | def get_nifti1hdr_from_h5attrs(h5attrs):
"""Transforms an H5py Attributes set to a dict.
Converts unicode string keys into standard strings
and each value into a numpy array.
Parameters
----------
h5attrs: H5py Attributes
Returns
--------
dict
"""
hdr = nib.Nifti1Header()
for k in list(h5attrs.keys()):
hdr[str(k)] = np.array(h5attrs[k])
return hdr | python | {
"resource": ""
} |
q264407 | all_childnodes_to_nifti1img | validation | def all_childnodes_to_nifti1img(h5group):
"""Returns in a list all images found under h5group.
Parameters
----------
h5group: h5py.Group
HDF group
Returns
-------
list of nifti1Image
"""
child_nodes = []
def append_parent_if_dataset(name, obj):
if isinstance(obj, h5py.Dataset):
if name.split('/')[-1] == 'data':
child_nodes.append(obj.parent)
vols = []
h5group.visititems(append_parent_if_dataset)
for c in child_nodes:
vols.append(hdfgroup_to_nifti1image(c))
return vols | python | {
"resource": ""
} |
q264408 | insert_volumes_in_one_dataset | validation | def insert_volumes_in_one_dataset(file_path, h5path, file_list, newshape=None,
concat_axis=0, dtype=None, append=True):
"""Inserts all given nifti files from file_list into one dataset in fname.
This will not check if the dimensionality of all files match.
Parameters
----------
file_path: string
HDF5 file path
h5path: string
file_list: list of strings
newshape: tuple or lambda function
If None, it will not reshape the images.
If a lambda function, this lambda will receive only the shape array.
e.g., newshape = lambda x: (np.prod(x[0:3]), x[3])
If a tuple, it will try to reshape all the images with the same shape.
It must work for all the images in file_list.
concat_axis: int
Axis of concatenation after reshaping
dtype: data type
Dataset data type
If not set, will use the type of the first file.
append: bool
Raises
------
ValueError if concat_axis is bigger than data dimensionality.
Note
----
For now, this only works if the dataset ends up being a 2D matrix.
I haven't tested for multi-dimensionality concatenations.
"""
def isalambda(v):
return isinstance(v, type(lambda: None)) and v.__name__ == '<lambda>'
mode = 'w'
if os.path.exists(file_path):
if append:
mode = 'a'
#loading the metadata into spatialimages
imgs = [nib.load(vol) for vol in file_list]
#getting the shapes of all volumes
shapes = [np.array(img.get_shape()) for img in imgs]
#getting the reshaped shapes
if newshape is not None:
if isalambda(newshape):
nushapes = np.array([newshape(shape) for shape in shapes])
else:
nushapes = np.array([shape for shape in shapes])
#checking if concat_axis is available in this new shapes
for nushape in nushapes:
assert(len(nushape) - 1 < concat_axis)
#calculate the shape of the new dataset
n_dims = nushapes.shape[1]
ds_shape = np.zeros(n_dims, dtype=np.int)
for a in list(range(n_dims)):
if a == concat_axis:
ds_shape[a] = np.sum(nushapes[:, concat_axis])
else:
ds_shape[a] = np.max(nushapes[:, a])
#get the type of the new dataset
#dtypes = [img.get_data_dtype() for img in imgs]
if dtype is None:
dtype = imgs[0].get_data_dtype()
with h5py.File(file_path, mode) as f:
try:
ic = 0
h5grp = f.create_group(os.path.dirname(h5path))
h5ds = h5grp.create_dataset(os.path.basename(h5path),
ds_shape, dtype)
for img in imgs:
#get the shape of the current image
nushape = nushapes[ic, :]
def append_to_dataset(h5ds, idx, data, concat_axis):
"""
@param h5ds: H5py DataSet
@param idx: int
@param data: ndarray
@param concat_axis: int
@return:
"""
shape = data.shape
ndims = len(shape)
if ndims == 1:
if concat_axis == 0:
h5ds[idx] = data
elif ndims == 2:
if concat_axis == 0:
h5ds[idx ] = data
elif concat_axis == 1:
h5ds[idx ] = data
elif ndims == 3:
if concat_axis == 0:
h5ds[idx ] = data
elif concat_axis == 1:
h5ds[idx ] = data
elif concat_axis == 2:
h5ds[idx ] = data
#appending the reshaped image into the dataset
append_to_dataset(h5ds, ic,
np.reshape(img.get_data(), tuple(nushape)),
concat_axis)
ic += 1
except ValueError as ve:
raise Exception('Error creating group {} in hdf file {}'.format(h5path, file_path)) from ve | python | {
"resource": ""
} |
q264409 | treefall | validation | def treefall(iterable):
"""
Generate all combinations of the elements of iterable and its subsets.
Parameters
----------
iterable: list, set or dict or any iterable object
Returns
-------
A generator of all possible combinations of the iterable.
Example:
-------
>>> for i in treefall([1, 2, 3, 4, 5]): print(i)
>>> (1, 2, 3)
>>> (1, 2)
>>> (1, 3)
>>> (2, 3)
>>> (1,)
>>> (2,)
>>> (3,)
>>> ()
"""
num_elems = len(iterable)
for i in range(num_elems, -1, -1):
for c in combinations(iterable, i):
yield c | python | {
"resource": ""
} |
q264410 | get_reliabledictionary_list | validation | def get_reliabledictionary_list(client, application_name, service_name):
"""List existing reliable dictionaries.
List existing reliable dictionaries and respective schema for given application and service.
:param application_name: Name of the application.
:type application_name: str
:param service_name: Name of the service.
:type service_name: str
"""
cluster = Cluster.from_sfclient(client)
service = cluster.get_application(application_name).get_service(service_name)
for dictionary in service.get_dictionaries():
print(dictionary.name) | python | {
"resource": ""
} |
q264411 | get_reliabledictionary_schema | validation | def get_reliabledictionary_schema(client, application_name, service_name, dictionary_name, output_file=None):
"""Query Schema information for existing reliable dictionaries.
Query Schema information existing reliable dictionaries for given application and service.
:param application_name: Name of the application.
:type application_name: str
:param service_name: Name of the service.
:type service_name: str
:param dictionary: Name of the reliable dictionary.
:type dictionary: str
:param output_file: Optional file to save the schema.
"""
cluster = Cluster.from_sfclient(client)
dictionary = cluster.get_application(application_name).get_service(service_name).get_dictionary(dictionary_name)
result = json.dumps(dictionary.get_information(), indent=4)
if (output_file == None):
output_file = "{}-{}-{}-schema-output.json".format(application_name, service_name, dictionary_name)
with open(output_file, "w") as output:
output.write(result)
print('Printed schema information to: ' + output_file)
print(result) | python | {
"resource": ""
} |
q264412 | query_reliabledictionary | validation | def query_reliabledictionary(client, application_name, service_name, dictionary_name, query_string, partition_key=None, partition_id=None, output_file=None):
"""Query existing reliable dictionary.
Query existing reliable dictionaries for given application and service.
:param application_name: Name of the application.
:type application_name: str
:param service_name: Name of the service.
:type service_name: str
:param dictionary_name: Name of the reliable dictionary.
:type dictionary_name: str
:param query_string: An OData query string. For example $top=10. Check https://www.odata.org/documentation/ for more information.
:type query_string: str
:param partition_key: Optional partition key of the desired partition, either a string if named schema or int if Int64 schema
:type partition_id: str
:param partition_id: Optional partition GUID of the owning reliable dictionary.
:type partition_id: str
:param output_file: Optional file to save the schema.
"""
cluster = Cluster.from_sfclient(client)
dictionary = cluster.get_application(application_name).get_service(service_name).get_dictionary(dictionary_name)
start = time.time()
if (partition_id != None):
result = dictionary.query(query_string, PartitionLookup.ID, partition_id)
elif (partition_key != None):
result = dictionary.query(query_string, PartitionLookup.KEY, partition_key)
else:
result = dictionary.query(query_string)
if type(result) is str:
print(result)
return
else:
result = json.dumps(result.get("value"), indent=4)
print("Query took " + str(time.time() - start) + " seconds")
if (output_file == None):
output_file = "{}-{}-{}-query-output.json".format(application_name, service_name, dictionary_name)
with open(output_file, "w") as output:
output.write(result)
print()
print('Printed output to: ' + output_file)
print(result) | python | {
"resource": ""
} |
q264413 | execute_reliabledictionary | validation | def execute_reliabledictionary(client, application_name, service_name, input_file):
"""Execute create, update, delete operations on existing reliable dictionaries.
carry out create, update and delete operations on existing reliable dictionaries for given application and service.
:param application_name: Name of the application.
:type application_name: str
:param service_name: Name of the service.
:type service_name: str
:param output_file: input file with list of json to provide the operation information for reliable dictionaries.
"""
cluster = Cluster.from_sfclient(client)
service = cluster.get_application(application_name).get_service(service_name)
# call get service with headers and params
with open(input_file) as json_file:
json_data = json.load(json_file)
service.execute(json_data)
return | python | {
"resource": ""
} |
q264414 | select_arg_verify | validation | def select_arg_verify(endpoint, cert, key, pem, ca, aad, no_verify): #pylint: disable=invalid-name,too-many-arguments
"""Verify arguments for select command"""
if not (endpoint.lower().startswith('http')
or endpoint.lower().startswith('https')):
raise CLIError('Endpoint must be HTTP or HTTPS')
usage = ('Valid syntax : --endpoint [ [ --key --cert | --pem | --aad] '
'[ --ca | --no-verify ] ]')
if ca and not (pem or all([key, cert])):
raise CLIError(usage)
if no_verify and not (pem or all([key, cert]) or aad):
raise CLIError(usage)
if no_verify and ca:
raise CLIError(usage)
if any([cert, key]) and not all([cert, key]):
raise CLIError(usage)
if aad and any([pem, cert, key]):
raise CLIError(usage)
if pem and any([cert, key]):
raise CLIError(usage) | python | {
"resource": ""
} |
q264415 | get_aad_token | validation | def get_aad_token(endpoint, no_verify):
#pylint: disable-msg=too-many-locals
"""Get AAD token"""
from azure.servicefabric.service_fabric_client_ap_is import (
ServiceFabricClientAPIs
)
from sfctl.auth import ClientCertAuthentication
from sfctl.config import set_aad_metadata
auth = ClientCertAuthentication(None, None, no_verify)
client = ServiceFabricClientAPIs(auth, base_url=endpoint)
aad_metadata = client.get_aad_metadata()
if aad_metadata.type != "aad":
raise CLIError("Not AAD cluster")
aad_resource = aad_metadata.metadata
tenant_id = aad_resource.tenant
authority_uri = aad_resource.login + '/' + tenant_id
context = adal.AuthenticationContext(authority_uri,
api_version=None)
cluster_id = aad_resource.cluster
client_id = aad_resource.client
set_aad_metadata(authority_uri, cluster_id, client_id)
code = context.acquire_user_code(cluster_id, client_id)
print(code['message'])
token = context.acquire_token_with_device_code(
cluster_id, code, client_id)
print("Succeed!")
return token, context.cache | python | {
"resource": ""
} |
q264416 | _openpyxl_read_xl | validation | def _openpyxl_read_xl(xl_path: str):
""" Use openpyxl to read an Excel file. """
try:
wb = load_workbook(filename=xl_path, read_only=True)
except:
raise
else:
return wb | python | {
"resource": ""
} |
q264417 | _check_xl_path | validation | def _check_xl_path(xl_path: str):
""" Return the expanded absolute path of `xl_path` if
if exists and 'xlrd' or 'openpyxl' depending on
which module should be used for the Excel file in `xl_path`.
Parameters
----------
xl_path: str
Path to an Excel file
Returns
-------
xl_path: str
User expanded and absolute path to `xl_path`
module: str
The name of the module you should use to process the
Excel file.
Choices: 'xlrd', 'pyopenxl'
Raises
------
IOError
If the file does not exist
RuntimError
If a suitable reader for xl_path is not found
"""
xl_path = op.abspath(op.expanduser(xl_path))
if not op.isfile(xl_path):
raise IOError("Could not find file in {}.".format(xl_path))
return xl_path, _use_openpyxl_or_xlrf(xl_path) | python | {
"resource": ""
} |
q264418 | read_xl | validation | def read_xl(xl_path: str):
""" Return the workbook from the Excel file in `xl_path`."""
xl_path, choice = _check_xl_path(xl_path)
reader = XL_READERS[choice]
return reader(xl_path) | python | {
"resource": ""
} |
q264419 | get_sheet_list | validation | def get_sheet_list(xl_path: str) -> List:
"""Return a list with the name of the sheets in
the Excel file in `xl_path`.
"""
wb = read_xl(xl_path)
if hasattr(wb, 'sheetnames'):
return wb.sheetnames
else:
return wb.sheet_names() | python | {
"resource": ""
} |
q264420 | concat_sheets | validation | def concat_sheets(xl_path: str, sheetnames=None, add_tab_names=False):
""" Return a pandas DataFrame with the concat'ed
content of the `sheetnames` from the Excel file in
`xl_path`.
Parameters
----------
xl_path: str
Path to the Excel file
sheetnames: list of str
List of existing sheet names of `xl_path`.
If None, will use all sheets from `xl_path`.
add_tab_names: bool
If True will add a 'Tab' column which says from which
tab the row comes from.
Returns
-------
df: pandas.DataFrame
"""
xl_path, choice = _check_xl_path(xl_path)
if sheetnames is None:
sheetnames = get_sheet_list(xl_path)
sheets = pd.read_excel(xl_path, sheetname=sheetnames)
if add_tab_names:
for tab in sheets:
sheets[tab]['Tab'] = [tab] * len(sheets[tab])
return pd.concat([sheets[tab] for tab in sheets]) | python | {
"resource": ""
} |
q264421 | _check_cols | validation | def _check_cols(df, col_names):
""" Raise an AttributeError if `df` does not have a column named as an item of
the list of strings `col_names`.
"""
for col in col_names:
if not hasattr(df, col):
raise AttributeError("DataFrame does not have a '{}' column, got {}.".format(col,
df.columns)) | python | {
"resource": ""
} |
q264422 | col_values | validation | def col_values(df, col_name):
""" Return a list of not null values from the `col_name` column of `df`."""
_check_cols(df, [col_name])
if 'O' in df[col_name] or pd.np.issubdtype(df[col_name].dtype, str): # if the column is of strings
return [nom.lower() for nom in df[pd.notnull(df)][col_name] if not pd.isnull(nom)]
else:
return [nom for nom in df[pd.notnull(df)][col_name] if not pd.isnull(nom)] | python | {
"resource": ""
} |
q264423 | duplicated_rows | validation | def duplicated_rows(df, col_name):
""" Return a DataFrame with the duplicated values of the column `col_name`
in `df`."""
_check_cols(df, [col_name])
dups = df[pd.notnull(df[col_name]) & df.duplicated(subset=[col_name])]
return dups | python | {
"resource": ""
} |
q264424 | duplicated | validation | def duplicated(values: Sequence):
""" Return the duplicated items in `values`"""
vals = pd.Series(values)
return vals[vals.duplicated()] | python | {
"resource": ""
} |
q264425 | _to_string | validation | def _to_string(data):
""" Convert to string all values in `data`.
Parameters
----------
data: dict[str]->object
Returns
-------
string_data: dict[str]->str
"""
sdata = data.copy()
for k, v in data.items():
if isinstance(v, datetime):
sdata[k] = timestamp_to_date_str(v)
elif not isinstance(v, (string_types, float, int)):
sdata[k] = str(v)
return sdata | python | {
"resource": ""
} |
q264426 | search_unique | validation | def search_unique(table, sample, unique_fields=None):
""" Search for items in `table` that have the same field sub-set values as in `sample`.
Expecting it to be unique, otherwise will raise an exception.
Parameters
----------
table: tinydb.table
sample: dict
Sample data
Returns
-------
search_result: tinydb.database.Element
Unique item result of the search.
Raises
------
KeyError:
If the search returns for more than one entry.
"""
if unique_fields is None:
unique_fields = list(sample.keys())
query = _query_data(sample, field_names=unique_fields, operators='__eq__')
items = table.search(query)
if len(items) == 1:
return items[0]
if len(items) == 0:
return None
raise MoreThanOneItemError('Expected to find zero or one items, but found '
'{} items.'.format(len(items))) | python | {
"resource": ""
} |
q264427 | find_unique | validation | def find_unique(table, sample, unique_fields=None):
"""Search in `table` an item with the value of the `unique_fields` in the `sample` sample.
Check if the the obtained result is unique. If nothing is found will return an empty list,
if there is more than one item found, will raise an IndexError.
Parameters
----------
table: tinydb.table
sample: dict
Sample data
unique_fields: list of str
Name of fields (keys) from `data` which are going to be used to build
a sample to look for exactly the same values in the database.
If None, will use every key in `data`.
Returns
-------
eid: int
Id of the object found with same `unique_fields`.
None if none is found.
Raises
------
MoreThanOneItemError
If more than one example is found.
"""
res = search_unique(table, sample, unique_fields)
if res is not None:
return res.eid
else:
return res | python | {
"resource": ""
} |
q264428 | _query_sample | validation | def _query_sample(sample, operators='__eq__'):
"""Create a TinyDB query that looks for items that have each field in `sample` with a value
compared with the correspondent operation in `operators`.
Parameters
----------
sample: dict
The sample data
operators: str or list of str
A list of comparison operations for each field value in `sample`.
If this is a str, will use the same operator for all `sample` fields.
If you want different operators for each field, remember to use an OrderedDict for `sample`.
Check TinyDB.Query class for possible choices.
Returns
-------
query: tinydb.database.Query
"""
if isinstance(operators, str):
operators = [operators] * len(sample)
if len(sample) != len(operators):
raise ValueError('Expected `operators` to be a string or a list with the same'
' length as `field_names` ({}), got {}.'.format(len(sample),
operators))
queries = []
for i, fn in enumerate(sample):
fv = sample[fn]
op = operators[i]
queries.append(_build_query(field_name=fn,
field_value=fv,
operator=op))
return _concat_queries(queries, operators='__and__') | python | {
"resource": ""
} |
q264429 | _query_data | validation | def _query_data(data, field_names=None, operators='__eq__'):
"""Create a tinyDB Query object that looks for items that confirms the correspondent operator
from `operators` for each `field_names` field values from `data`.
Parameters
----------
data: dict
The data sample
field_names: str or list of str
The name of the fields in `data` that will be used for the query.
operators: str or list of str
A list of comparison operations for each field value in `field_names`.
If this is a str, will use the same operator for all `field_names`.
If you want different operators for each field, remember to use an OrderedDict for `data`.
Check TinyDB.Query class for possible choices.
Returns
-------
query: tinydb.database.Query
"""
if field_names is None:
field_names = list(data.keys())
if isinstance(field_names, str):
field_names = [field_names]
# using OrderedDict by default, in case operators has different operators for each field.
sample = OrderedDict([(fn, data[fn]) for fn in field_names])
return _query_sample(sample, operators=operators) | python | {
"resource": ""
} |
q264430 | _concat_queries | validation | def _concat_queries(queries, operators='__and__'):
"""Create a tinyDB Query object that is the concatenation of each query in `queries`.
The concatenation operator is taken from `operators`.
Parameters
----------
queries: list of tinydb.Query
The list of tinydb.Query to be joined.
operators: str or list of str
List of binary operators to join `queries` into one query.
Check TinyDB.Query class for possible choices.
Returns
-------
query: tinydb.database.Query
"""
# checks first
if not queries:
raise ValueError('Expected some `queries`, got {}.'.format(queries))
if len(queries) == 1:
return queries[0]
if isinstance(operators, str):
operators = [operators] * (len(queries) - 1)
if len(queries) - 1 != len(operators):
raise ValueError('Expected `operators` to be a string or a list with the same'
' length as `field_names` ({}), got {}.'.format(len(queries),
operators))
# recursively build the query
first, rest, end = queries[0], queries[1:-1], queries[-1:][0]
bigop = getattr(first, operators[0])
for i, q in enumerate(rest):
bigop = getattr(bigop(q), operators[i])
return bigop(end) | python | {
"resource": ""
} |
q264431 | PetitDB.search_by_eid | validation | def search_by_eid(self, table_name, eid):
"""Return the element in `table_name` with Object ID `eid`.
If None is found will raise a KeyError exception.
Parameters
----------
table_name: str
The name of the table to look in.
eid: int
The Object ID of the element to look for.
Returns
-------
elem: tinydb.database.Element
Raises
------
KeyError
If the element with ID `eid` is not found.
"""
elem = self.table(table_name).get(eid=eid)
if elem is None:
raise KeyError('Could not find {} with eid {}.'.format(table_name, eid))
return elem | python | {
"resource": ""
} |
q264432 | PetitDB.search_unique | validation | def search_unique(self, table_name, sample, unique_fields=None):
""" Search in `table` an item with the value of the `unique_fields` in the `data` sample.
Check if the the obtained result is unique. If nothing is found will return an empty list,
if there is more than one item found, will raise an IndexError.
Parameters
----------
table_name: str
sample: dict
Sample data
unique_fields: list of str
Name of fields (keys) from `data` which are going to be used to build
a sample to look for exactly the same values in the database.
If None, will use every key in `data`.
Returns
-------
eid: int
Id of the object found with same `unique_fields`.
None if none is found.
Raises
------
MoreThanOneItemError
If more than one example is found.
"""
return search_unique(table=self.table(table_name),
sample=sample,
unique_fields=unique_fields) | python | {
"resource": ""
} |
q264433 | PetitDB.is_unique | validation | def is_unique(self, table_name, sample, unique_fields=None):
"""Return True if an item with the value of `unique_fields`
from `data` is unique in the table with `table_name`.
False if no sample is found or more than one is found.
See function `find_unique` for more details.
Parameters
----------
table_name: str
sample: dict
Sample data for query
unique_fields: str or list of str
Returns
-------
is_unique: bool
"""
try:
eid = find_unique(self.table(table_name),
sample=sample,
unique_fields=unique_fields)
except:
return False
else:
return eid is not None | python | {
"resource": ""
} |
q264434 | PetitDB.update_unique | validation | def update_unique(self, table_name, fields, data, cond=None, unique_fields=None,
*, raise_if_not_found=False):
"""Update the unique matching element to have a given set of fields.
Parameters
----------
table_name: str
fields: dict or function[dict -> None]
new data/values to insert into the unique element
or a method that will update the elements.
data: dict
Sample data for query
cond: tinydb.Query
which elements to update
unique_fields: list of str
raise_if_not_found: bool
Will raise an exception if the element is not found for update.
Returns
-------
eid: int
The eid of the updated element if found, None otherwise.
"""
eid = find_unique(self.table(table_name), data, unique_fields)
if eid is None:
if raise_if_not_found:
msg = 'Could not find {} with {}'.format(table_name, data)
if cond is not None:
msg += ' where {}.'.format(cond)
raise IndexError(msg)
else:
self.table(table_name).update(_to_string(fields), cond=cond, eids=[eid])
return eid | python | {
"resource": ""
} |
q264435 | PetitDB.count | validation | def count(self, table_name, sample):
"""Return the number of items that match the `sample` field values
in table `table_name`.
Check function search_sample for more details.
"""
return len(list(search_sample(table=self.table(table_name),
sample=sample))) | python | {
"resource": ""
} |
q264436 | is_img | validation | def is_img(obj):
""" Check for get_data and get_affine method in an object
Parameters
----------
obj: any object
Tested object
Returns
-------
is_img: boolean
True if get_data and get_affine methods are present and callable,
False otherwise.
"""
try:
get_data = getattr(obj, 'get_data')
get_affine = getattr(obj, 'get_affine')
return isinstance(get_data, collections.Callable) and \
isinstance(get_affine, collections.Callable)
except AttributeError:
return False | python | {
"resource": ""
} |
q264437 | get_data | validation | def get_data(img):
"""Get the data in the image without having a side effect on the Nifti1Image object
Parameters
----------
img: Nifti1Image
Returns
-------
np.ndarray
"""
if hasattr(img, '_data_cache') and img._data_cache is None:
# Copy locally the nifti_image to avoid the side effect of data
# loading
img = copy.deepcopy(img)
# force garbage collector
gc.collect()
return img.get_data() | python | {
"resource": ""
} |
q264438 | get_shape | validation | def get_shape(img):
"""Return the shape of img.
Paramerers
-----------
img:
Returns
-------
shape: tuple
"""
if hasattr(img, 'shape'):
shape = img.shape
else:
shape = img.get_data().shape
return shape | python | {
"resource": ""
} |
q264439 | check_img_compatibility | validation | def check_img_compatibility(one_img, another_img, only_check_3d=False):
"""Return true if one_img and another_img have the same shape.
False otherwise.
If both are nibabel.Nifti1Image will also check for affine matrices.
Parameters
----------
one_img: nibabel.Nifti1Image or np.ndarray
another_img: nibabel.Nifti1Image or np.ndarray
only_check_3d: bool
If True will check only the 3D part of the affine matrices when they have more dimensions.
Raises
------
NiftiFilesNotCompatible
"""
nd_to_check = None
if only_check_3d:
nd_to_check = 3
if hasattr(one_img, 'shape') and hasattr(another_img, 'shape'):
if not have_same_shape(one_img, another_img, nd_to_check=nd_to_check):
msg = 'Shape of the first image: \n{}\n is different from second one: \n{}'.format(one_img.shape,
another_img.shape)
raise NiftiFilesNotCompatible(repr_imgs(one_img), repr_imgs(another_img), message=msg)
if hasattr(one_img, 'get_affine') and hasattr(another_img, 'get_affine'):
if not have_same_affine(one_img, another_img, only_check_3d=only_check_3d):
msg = 'Affine matrix of the first image: \n{}\n is different ' \
'from second one:\n{}'.format(one_img.get_affine(), another_img.get_affine())
raise NiftiFilesNotCompatible(repr_imgs(one_img), repr_imgs(another_img), message=msg) | python | {
"resource": ""
} |
q264440 | have_same_affine | validation | def have_same_affine(one_img, another_img, only_check_3d=False):
"""Return True if the affine matrix of one_img is close to the affine matrix of another_img.
False otherwise.
Parameters
----------
one_img: nibabel.Nifti1Image
another_img: nibabel.Nifti1Image
only_check_3d: bool
If True will extract only the 3D part of the affine matrices when they have more dimensions.
Returns
-------
bool
Raises
------
ValueError
"""
img1 = check_img(one_img)
img2 = check_img(another_img)
ndim1 = len(img1.shape)
ndim2 = len(img2.shape)
if ndim1 < 3:
raise ValueError('Image {} has only {} dimensions, at least 3 dimensions is expected.'.format(repr_imgs(img1), ndim1))
if ndim2 < 3:
raise ValueError('Image {} has only {} dimensions, at least 3 dimensions is expected.'.format(repr_imgs(img2), ndim1))
affine1 = img1.get_affine()
affine2 = img2.get_affine()
if only_check_3d:
affine1 = affine1[:3, :3]
affine2 = affine2[:3, :3]
try:
return np.allclose(affine1, affine2)
except ValueError:
return False
except:
raise | python | {
"resource": ""
} |
q264441 | repr_imgs | validation | def repr_imgs(imgs):
"""Printing of img or imgs"""
if isinstance(imgs, string_types):
return imgs
if isinstance(imgs, collections.Iterable):
return '[{}]'.format(', '.join(repr_imgs(img) for img in imgs))
# try get_filename
try:
filename = imgs.get_filename()
if filename is not None:
img_str = "{}('{}')".format(imgs.__class__.__name__, filename)
else:
img_str = "{}(shape={}, affine={})".format(imgs.__class__.__name__,
repr(get_shape(imgs)),
repr(imgs.get_affine()))
except Exception as exc:
log.error('Error reading attributes from img.get_filename()')
return repr(imgs)
else:
return img_str | python | {
"resource": ""
} |
q264442 | have_same_shape | validation | def have_same_shape(array1, array2, nd_to_check=None):
"""
Returns true if array1 and array2 have the same shapes, false
otherwise.
Parameters
----------
array1: numpy.ndarray
array2: numpy.ndarray
nd_to_check: int
Number of the dimensions to check, i.e., if == 3 then will check only the 3 first numbers of array.shape.
Returns
-------
bool
"""
shape1 = array1.shape
shape2 = array2.shape
if nd_to_check is not None:
if len(shape1) < nd_to_check:
msg = 'Number of dimensions to check {} is out of bounds for the shape of the first image: \n{}\n.'.format(shape1)
raise ValueError(msg)
elif len(shape2) < nd_to_check:
msg = 'Number of dimensions to check {} is out of bounds for the shape of the second image: \n{}\n.'.format(shape2)
raise ValueError(msg)
shape1 = shape1[:nd_to_check]
shape2 = shape2[:nd_to_check]
return shape1 == shape2 | python | {
"resource": ""
} |
q264443 | dir_match | validation | def dir_match(regex, wd=os.curdir):
"""Create a list of regex matches that result from the match_regex
of all file names within wd.
The list of files will have wd as path prefix.
@param regex: string
@param wd: string
working directory
@return:
"""
ls = os.listdir(wd)
filt = re.compile(regex).match
return filter_list(ls, filt) | python | {
"resource": ""
} |
q264444 | recursive_dir_match | validation | def recursive_dir_match(folder_path, regex=''):
"""
Returns absolute paths of folders that match the regex within folder_path and
all its children folders.
Note: The regex matching is done using the match function
of the re module.
Parameters
----------
folder_path: string
regex: string
Returns
-------
A list of strings.
"""
outlist = []
for root, dirs, files in os.walk(folder_path):
outlist.extend([op.join(root, f) for f in dirs
if re.match(regex, f)])
return outlist | python | {
"resource": ""
} |
q264445 | get_file_list | validation | def get_file_list(file_dir, regex=''):
"""
Creates a list of files that match the search_regex within file_dir.
The list of files will have file_dir as path prefix.
Parameters
----------
@param file_dir:
@param search_regex:
Returns:
--------
List of paths to files that match the search_regex
"""
file_list = os.listdir(file_dir)
file_list.sort()
if regex:
file_list = search_list(file_list, regex)
file_list = [op.join(file_dir, fname) for fname in file_list]
return file_list | python | {
"resource": ""
} |
q264446 | recursive_find_search | validation | def recursive_find_search(folder_path, regex=''):
"""
Returns absolute paths of files that match the regex within file_dir and
all its children folders.
Note: The regex matching is done using the search function
of the re module.
Parameters
----------
folder_path: string
regex: string
Returns
-------
A list of strings.
"""
outlist = []
for root, dirs, files in os.walk(folder_path):
outlist.extend([op.join(root, f) for f in files
if re.search(regex, f)])
return outlist | python | {
"resource": ""
} |
q264447 | iter_recursive_find | validation | def iter_recursive_find(folder_path, *regex):
"""
Returns absolute paths of files that match the regexs within folder_path and
all its children folders.
This is an iterator function that will use yield to return each set of
file_paths in one iteration.
Will only return value if all the strings in regex match a file name.
Note: The regex matching is done using the search function
of the re module.
Parameters
----------
folder_path: string
regex: strings
Returns
-------
A list of strings.
"""
for root, dirs, files in os.walk(folder_path):
if len(files) > 0:
outlist = []
for f in files:
for reg in regex:
if re.search(reg, f):
outlist.append(op.join(root, f))
if len(outlist) == len(regex):
yield outlist | python | {
"resource": ""
} |
q264448 | get_all_files | validation | def get_all_files(folder):
"""
Generator that loops through all absolute paths of the files within folder
Parameters
----------
folder: str
Root folder start point for recursive search.
Yields
------
fpath: str
Absolute path of one file in the folders
"""
for path, dirlist, filelist in os.walk(folder):
for fn in filelist:
yield op.join(path, fn) | python | {
"resource": ""
} |
q264449 | recursive_glob | validation | def recursive_glob(base_directory, regex=''):
"""
Uses glob to find all files or folders that match the regex
starting from the base_directory.
Parameters
----------
base_directory: str
regex: str
Returns
-------
files: list
"""
files = glob(op.join(base_directory, regex))
for path, dirlist, filelist in os.walk(base_directory):
for dir_name in dirlist:
files.extend(glob(op.join(path, dir_name, regex)))
return files | python | {
"resource": ""
} |
q264450 | compose_err_msg | validation | def compose_err_msg(msg, **kwargs):
"""Append key-value pairs to msg, for display.
Parameters
----------
msg: string
arbitrary message
kwargs: dict
arbitrary dictionary
Returns
-------
updated_msg: string
msg, with "key: value" appended. Only string values are appended.
Example
-------
>>> compose_err_msg('Error message with arguments...', arg_num=123, \
arg_str='filename.nii', arg_bool=True)
'Error message with arguments...\\narg_str: filename.nii'
>>>
"""
updated_msg = msg
for k, v in sorted(kwargs.items()):
if isinstance(v, _basestring): # print only str-like arguments
updated_msg += "\n" + k + ": " + v
return updated_msg | python | {
"resource": ""
} |
q264451 | group_dicom_files | validation | def group_dicom_files(dicom_file_paths, header_fields):
"""
Gets a list of DICOM file absolute paths and returns a list of lists of
DICOM file paths. Each group contains a set of DICOM files that have
exactly the same headers.
Parameters
----------
dicom_file_paths: list of str
List or set of DICOM file paths
header_fields: list of str
List of header field names to check on the comparisons of the DICOM files.
Returns
-------
dict of DicomFileSets
The key is one filepath representing the group (the first found).
"""
dist = SimpleDicomFileDistance(field_weights=header_fields)
path_list = dicom_file_paths.copy()
path_groups = DefaultOrderedDict(DicomFileSet)
while len(path_list) > 0:
file_path1 = path_list.pop()
file_subgroup = [file_path1]
dist.set_dicom_file1(file_path1)
j = len(path_list)-1
while j >= 0:
file_path2 = path_list[j]
dist.set_dicom_file2(file_path2)
if dist.transform():
file_subgroup.append(file_path2)
path_list.pop(j)
j -= 1
path_groups[file_path1].from_set(file_subgroup, check_if_dicoms=False)
return path_groups | python | {
"resource": ""
} |
q264452 | copy_groups_to_folder | validation | def copy_groups_to_folder(dicom_groups, folder_path, groupby_field_name):
"""Copy the DICOM file groups to folder_path. Each group will be copied into
a subfolder with named given by groupby_field.
Parameters
----------
dicom_groups: boyle.dicom.sets.DicomFileSet
folder_path: str
Path to where copy the DICOM files.
groupby_field_name: str
DICOM field name. Will get the value of this field to name the group
folder.
"""
if dicom_groups is None or not dicom_groups:
raise ValueError('Expected a boyle.dicom.sets.DicomFileSet.')
if not os.path.exists(folder_path):
os.makedirs(folder_path, exist_ok=False)
for dcmg in dicom_groups:
if groupby_field_name is not None and len(groupby_field_name) > 0:
dfile = DicomFile(dcmg)
dir_name = ''
for att in groupby_field_name:
dir_name = os.path.join(dir_name, dfile.get_attributes(att))
dir_name = str(dir_name)
else:
dir_name = os.path.basename(dcmg)
group_folder = os.path.join(folder_path, dir_name)
os.makedirs(group_folder, exist_ok=False)
log.debug('Copying files to {}.'.format(group_folder))
import shutil
dcm_files = dicom_groups[dcmg]
for srcf in dcm_files:
destf = os.path.join(group_folder, os.path.basename(srcf))
while os.path.exists(destf):
destf += '+'
shutil.copy2(srcf, destf) | python | {
"resource": ""
} |
q264453 | calculate_file_distances | validation | def calculate_file_distances(dicom_files, field_weights=None,
dist_method_cls=None, **kwargs):
"""
Calculates the DicomFileDistance between all files in dicom_files, using an
weighted Levenshtein measure between all field names in field_weights and
their corresponding weights.
Parameters
----------
dicom_files: iterable of str
Dicom file paths
field_weights: dict of str to float
A dict with header field names to float scalar values, that
indicate a distance measure ratio for the levenshtein distance
averaging of all the header field names in it. e.g., {'PatientID': 1}
dist_method_cls: DicomFileDistance class
Distance method object to compare the files.
If None, the default DicomFileDistance method using Levenshtein
distance between the field_wieghts will be used.
kwargs: DicomFileDistance instantiation named arguments
Apart from the field_weitghts argument.
Returns
-------
file_dists: np.ndarray or scipy.sparse.lil_matrix of shape NxN
Levenshtein distances between each of the N items in dicom_files.
"""
if dist_method_cls is None:
dist_method = LevenshteinDicomFileDistance(field_weights)
else:
try:
dist_method = dist_method_cls(field_weights=field_weights, **kwargs)
except:
log.exception('Could not instantiate {} object with field_weights '
'and {}'.format(dist_method_cls, kwargs))
dist_dtype = np.float16
n_files = len(dicom_files)
try:
file_dists = np.zeros((n_files, n_files), dtype=dist_dtype)
except MemoryError as mee:
import scipy.sparse
file_dists = scipy.sparse.lil_matrix((n_files, n_files),
dtype=dist_dtype)
for idxi in range(n_files):
dist_method.set_dicom_file1(dicom_files[idxi])
for idxj in range(idxi+1, n_files):
dist_method.set_dicom_file2(dicom_files[idxj])
if idxi != idxj:
file_dists[idxi, idxj] = dist_method.transform()
return file_dists | python | {
"resource": ""
} |
q264454 | SimpleDicomFileDistance.transform | validation | def transform(self):
"""Check the field values in self.dcmf1 and self.dcmf2 and returns True
if all the field values are the same, False otherwise.
Returns
-------
bool
"""
if self.dcmf1 is None or self.dcmf2 is None:
return np.inf
for field_name in self.field_weights:
if (str(getattr(self.dcmf1, field_name, ''))
!= str(getattr(self.dcmf2, field_name, ''))):
return False
return True | python | {
"resource": ""
} |
q264455 | DicomFilesClustering.levenshtein_analysis | validation | def levenshtein_analysis(self, field_weights=None):
"""
Updates the status of the file clusters comparing the cluster
key files with a levenshtein weighted measure using either the
header_fields or self.header_fields.
Parameters
----------
field_weights: dict of strings with floats
A dict with header field names to float scalar values, that indicate a distance measure
ratio for the levenshtein distance averaging of all the header field names in it.
e.g., {'PatientID': 1}
"""
if field_weights is None:
if not isinstance(self.field_weights, dict):
raise ValueError('Expected a dict for `field_weights` parameter, '
'got {}'.format(type(self.field_weights)))
key_dicoms = list(self.dicom_groups.keys())
file_dists = calculate_file_distances(key_dicoms, field_weights, self._dist_method_cls)
return file_dists | python | {
"resource": ""
} |
q264456 | DicomFilesClustering.dist_percentile_threshold | validation | def dist_percentile_threshold(dist_matrix, perc_thr=0.05, k=1):
"""Thresholds a distance matrix and returns the result.
Parameters
----------
dist_matrix: array_like
Input array or object that can be converted to an array.
perc_thr: float in range of [0,100]
Percentile to compute which must be between 0 and 100 inclusive.
k: int, optional
Diagonal above which to zero elements.
k = 0 (the default) is the main diagonal,
k < 0 is below it and k > 0 is above.
Returns
-------
array_like
"""
triu_idx = np.triu_indices(dist_matrix.shape[0], k=k)
upper = np.zeros_like(dist_matrix)
upper[triu_idx] = dist_matrix[triu_idx] < np.percentile(dist_matrix[triu_idx], perc_thr)
return upper | python | {
"resource": ""
} |
q264457 | DicomFilesClustering.get_groups_in_same_folder | validation | def get_groups_in_same_folder(self, folder_depth=3):
"""
Returns a list of 2-tuples with pairs of dicom groups that
are in the same folder within given depth.
Parameters
----------
folder_depth: int
Path depth to check for folder equality.
Returns
-------
list of tuples of str
"""
group_pairs = []
key_dicoms = list(self.dicom_groups.keys())
idx = len(key_dicoms)
while idx > 0:
group1 = key_dicoms.pop()
dir_group1 = get_folder_subpath(group1, folder_depth)
for group in key_dicoms:
if group.startswith(dir_group1):
group_pairs.append((group1, group))
idx -= 1
return group_pairs | python | {
"resource": ""
} |
q264458 | DicomFilesClustering.merge_groups | validation | def merge_groups(self, indices):
"""Extend the lists within the DICOM groups dictionary.
The indices will indicate which list have to be extended by which
other list.
Parameters
----------
indices: list or tuple of 2 iterables of int, bot having the same len
The indices of the lists that have to be merged, both iterables
items will be read pair by pair, the first is the index to the
list that will be extended with the list of the second index.
The indices can be constructed with Numpy e.g.,
indices = np.where(square_matrix)
"""
try:
merged = merge_dict_of_lists(self.dicom_groups, indices,
pop_later=True, copy=True)
self.dicom_groups = merged
except IndexError:
raise IndexError('Index out of range to merge DICOM groups.') | python | {
"resource": ""
} |
q264459 | DicomFilesClustering.move_to_folder | validation | def move_to_folder(self, folder_path, groupby_field_name=None):
"""Copy the file groups to folder_path. Each group will be copied into
a subfolder with named given by groupby_field.
Parameters
----------
folder_path: str
Path to where copy the DICOM files.
groupby_field_name: str
DICOM field name. Will get the value of this field to name the group
folder. If empty or None will use the basename of the group key file.
"""
try:
copy_groups_to_folder(self.dicom_groups, folder_path, groupby_field_name)
except IOError as ioe:
raise IOError('Error moving dicom groups to {}.'.format(folder_path)) from ioe | python | {
"resource": ""
} |
q264460 | DicomFilesClustering.get_unique_field_values_per_group | validation | def get_unique_field_values_per_group(self, field_name,
field_to_use_as_key=None):
"""Return a dictionary where the key is the group key file path and
the values are sets of unique values of the field name of all DICOM
files in the group.
Parameters
----------
field_name: str
Name of the field to read from all files
field_to_use_as_key: str
Name of the field to get the value and use as key.
If None, will use the same key as the dicom_groups.
Returns
-------
Dict of sets
"""
unique_vals = DefaultOrderedDict(set)
for dcmg in self.dicom_groups:
for f in self.dicom_groups[dcmg]:
field_val = DicomFile(f).get_attributes(field_name)
key_val = dcmg
if field_to_use_as_key is not None:
try:
key_val = str(DicomFile(dcmg).get_attributes(field_to_use_as_key))
except KeyError as ke:
raise KeyError('Error getting field {} from '
'file {}'.format(field_to_use_as_key,
dcmg)) from ke
unique_vals[key_val].add(field_val)
return unique_vals | python | {
"resource": ""
} |
q264461 | get_config_value | validation | def get_config_value(name, fallback=None):
"""Gets a config by name.
In the case where the config name is not found, will use fallback value."""
cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX)
return cli_config.get('servicefabric', name, fallback) | python | {
"resource": ""
} |
q264462 | get_config_bool | validation | def get_config_bool(name):
"""Checks if a config value is set to a valid bool value."""
cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX)
return cli_config.getboolean('servicefabric', name, False) | python | {
"resource": ""
} |
q264463 | set_config_value | validation | def set_config_value(name, value):
"""Set a config by name to a value."""
cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX)
cli_config.set_value('servicefabric', name, value) | python | {
"resource": ""
} |
q264464 | cert_info | validation | def cert_info():
"""Path to certificate related files, either a single file path or a
tuple. In the case of no security, returns None."""
sec_type = security_type()
if sec_type == 'pem':
return get_config_value('pem_path', fallback=None)
if sec_type == 'cert':
cert_path = get_config_value('cert_path', fallback=None)
key_path = get_config_value('key_path', fallback=None)
return cert_path, key_path
return None | python | {
"resource": ""
} |
q264465 | set_aad_cache | validation | def set_aad_cache(token, cache):
"""Set AAD token cache."""
set_config_value('aad_token', jsonpickle.encode(token))
set_config_value('aad_cache', jsonpickle.encode(cache)) | python | {
"resource": ""
} |
q264466 | set_aad_metadata | validation | def set_aad_metadata(uri, resource, client):
"""Set AAD metadata."""
set_config_value('authority_uri', uri)
set_config_value('aad_resource', resource)
set_config_value('aad_client', client) | python | {
"resource": ""
} |
q264467 | set_auth | validation | def set_auth(pem=None, cert=None, key=None, aad=False):
"""Set certificate usage paths"""
if any([cert, key]) and pem:
raise ValueError('Cannot specify both pem and cert or key')
if any([cert, key]) and not all([cert, key]):
raise ValueError('Must specify both cert and key')
if pem:
set_config_value('security', 'pem')
set_config_value('pem_path', pem)
elif cert or key:
set_config_value('security', 'cert')
set_config_value('cert_path', cert)
set_config_value('key_path', key)
elif aad:
set_config_value('security', 'aad')
else:
set_config_value('security', 'none') | python | {
"resource": ""
} |
q264468 | filter_objlist | validation | def filter_objlist(olist, fieldname, fieldval):
"""
Returns a list with of the objects in olist that have a fieldname valued as fieldval
Parameters
----------
olist: list of objects
fieldname: string
fieldval: anything
Returns
-------
list of objets
"""
return [x for x in olist if getattr(x, fieldname) == fieldval] | python | {
"resource": ""
} |
q264469 | is_valid_regex | validation | def is_valid_regex(string):
"""
Checks whether the re module can compile the given regular expression.
Parameters
----------
string: str
Returns
-------
boolean
"""
try:
re.compile(string)
is_valid = True
except re.error:
is_valid = False
return is_valid | python | {
"resource": ""
} |
q264470 | is_fnmatch_regex | validation | def is_fnmatch_regex(string):
"""
Returns True if the given string is considered a fnmatch
regular expression, False otherwise.
It will look for
:param string: str
"""
is_regex = False
regex_chars = ['!', '*', '$']
for c in regex_chars:
if string.find(c) > -1:
return True
return is_regex | python | {
"resource": ""
} |
q264471 | where_is | validation | def where_is(strings, pattern, n=1, lookup_func=re.match):
"""Return index of the nth match found of pattern in strings
Parameters
----------
strings: list of str
List of strings
pattern: str
Pattern to be matched
nth: int
Number of times the match must happen to return the item index.
lookup_func: callable
Function to match each item in strings to the pattern, e.g., re.match or re.search.
Returns
-------
index: int
Index of the nth item that matches the pattern.
If there are no n matches will return -1
"""
count = 0
for idx, item in enumerate(strings):
if lookup_func(pattern, item):
count += 1
if count == n:
return idx
return -1 | python | {
"resource": ""
} |
q264472 | generate_config | validation | def generate_config(output_directory):
""" Generate a dcm2nii configuration file that disable the interactive
mode.
"""
if not op.isdir(output_directory):
os.makedirs(output_directory)
config_file = op.join(output_directory, "config.ini")
open_file = open(config_file, "w")
open_file.write("[BOOL]\nManualNIfTIConv=0\n")
open_file.close()
return config_file | python | {
"resource": ""
} |
q264473 | call_dcm2nii | validation | def call_dcm2nii(work_dir, arguments=''):
"""Converts all DICOM files within `work_dir` into one or more
NifTi files by calling dcm2nii on this folder.
Parameters
----------
work_dir: str
Path to the folder that contain the DICOM files
arguments: str
String containing all the flag arguments for `dcm2nii` CLI.
Returns
-------
sys_code: int
dcm2nii execution return code
"""
if not op.exists(work_dir):
raise IOError('Folder {} not found.'.format(work_dir))
cmd_line = 'dcm2nii {0} "{1}"'.format(arguments, work_dir)
log.info(cmd_line)
return subprocess.check_call(cmd_line, shell=True) | python | {
"resource": ""
} |
q264474 | convert_dcm2nii | validation | def convert_dcm2nii(input_dir, output_dir, filename):
""" Call MRICron's `dcm2nii` to convert the DICOM files inside `input_dir`
to Nifti and save the Nifti file in `output_dir` with a `filename` prefix.
Parameters
----------
input_dir: str
Path to the folder that contains the DICOM files
output_dir: str
Path to the folder where to save the NifTI file
filename: str
Output file basename
Returns
-------
filepaths: list of str
List of file paths created in `output_dir`.
"""
# a few checks before doing the job
if not op.exists(input_dir):
raise IOError('Expected an existing folder in {}.'.format(input_dir))
if not op.exists(output_dir):
raise IOError('Expected an existing output folder in {}.'.format(output_dir))
# create a temporary folder for dcm2nii export
tmpdir = tempfile.TemporaryDirectory(prefix='dcm2nii_')
# call dcm2nii
arguments = '-o "{}" -i y'.format(tmpdir.name)
try:
call_out = call_dcm2nii(input_dir, arguments)
except:
raise
else:
log.info('Converted "{}" to nifti.'.format(input_dir))
# get the filenames of the files that dcm2nii produced
filenames = glob(op.join(tmpdir.name, '*.nii*'))
# cleanup `filenames`, using only the post-processed (reoriented, cropped, etc.) images by dcm2nii
cleaned_filenames = remove_dcm2nii_underprocessed(filenames)
# copy files to the output_dir
filepaths = []
for srcpath in cleaned_filenames:
dstpath = op.join(output_dir, filename)
realpath = copy_w_plus(srcpath, dstpath)
filepaths.append(realpath)
# copy any other file produced by dcm2nii that is not a NifTI file, e.g., *.bvals, *.bvecs, etc.
basename = op.basename(remove_ext(srcpath))
aux_files = set(glob(op.join(tmpdir.name, '{}.*' .format(basename)))) - \
set(glob(op.join(tmpdir.name, '{}.nii*'.format(basename))))
for aux_file in aux_files:
aux_dstpath = copy_w_ext(aux_file, output_dir, remove_ext(op.basename(realpath)))
filepaths.append(aux_dstpath)
return filepaths | python | {
"resource": ""
} |
q264475 | remove_dcm2nii_underprocessed | validation | def remove_dcm2nii_underprocessed(filepaths):
""" Return a subset of `filepaths`. Keep only the files that have a basename longer than the
others with same suffix.
This works based on that dcm2nii appends a preffix character for each processing
step it does automatically in the DICOM to NifTI conversion.
Parameters
----------
filepaths: iterable of str
Returns
-------
cleaned_paths: iterable of str
"""
cln_flist = []
# sort them by size
len_sorted = sorted(filepaths, key=len)
for idx, fpath in enumerate(len_sorted):
remove = False
# get the basename and the rest of the files
fname = op.basename(fpath)
rest = len_sorted[idx+1:]
# check if the basename is in the basename of the rest of the files
for rest_fpath in rest:
rest_file = op.basename(rest_fpath)
if rest_file.endswith(fname):
remove = True
break
if not remove:
cln_flist.append(fpath)
return cln_flist | python | {
"resource": ""
} |
q264476 | dictify | validation | def dictify(a_named_tuple):
"""Transform a named tuple into a dictionary"""
return dict((s, getattr(a_named_tuple, s)) for s in a_named_tuple._fields) | python | {
"resource": ""
} |
q264477 | merge_dict_of_lists | validation | def merge_dict_of_lists(adict, indices, pop_later=True, copy=True):
"""Extend the within a dict of lists. The indices will indicate which
list have to be extended by which other list.
Parameters
----------
adict: OrderedDict
An ordered dictionary of lists
indices: list or tuple of 2 iterables of int, bot having the same length
The indices of the lists that have to be merged, both iterables items
will be read pair by pair, the first is the index to the list that
will be extended with the list of the second index.
The indices can be constructed with Numpy e.g.,
indices = np.where(square_matrix)
pop_later: bool
If True will oop out the lists that are indicated in the second
list of indices.
copy: bool
If True will perform a deep copy of the input adict before
modifying it, hence not changing the original input.
Returns
-------
Dictionary of lists
Raises
------
IndexError
If the indices are out of range
"""
def check_indices(idxs, x):
for i in chain(*idxs):
if i < 0 or i >= x:
raise IndexError("Given indices are out of dict range.")
check_indices(indices, len(adict))
rdict = adict.copy() if copy else adict
dict_keys = list(rdict.keys())
for i, j in zip(*indices):
rdict[dict_keys[i]].extend(rdict[dict_keys[j]])
if pop_later:
for i, j in zip(*indices):
rdict.pop(dict_keys[j], '')
return rdict | python | {
"resource": ""
} |
q264478 | append_dict_values | validation | def append_dict_values(list_of_dicts, keys=None):
"""
Return a dict of lists from a list of dicts with the same keys.
For each dict in list_of_dicts with look for the values of the
given keys and append it to the output dict.
Parameters
----------
list_of_dicts: list of dicts
keys: list of str
List of keys to create in the output dict
If None will use all keys in the first element of list_of_dicts
Returns
-------
DefaultOrderedDict of lists
"""
if keys is None:
keys = list(list_of_dicts[0].keys())
dict_of_lists = DefaultOrderedDict(list)
for d in list_of_dicts:
for k in keys:
dict_of_lists[k].append(d[k])
return dict_of_lists | python | {
"resource": ""
} |
q264479 | import_pyfile | validation | def import_pyfile(filepath, mod_name=None):
"""
Imports the contents of filepath as a Python module.
:param filepath: string
:param mod_name: string
Name of the module when imported
:return: module
Imported module
"""
import sys
if sys.version_info.major == 3:
import importlib.machinery
loader = importlib.machinery.SourceFileLoader('', filepath)
mod = loader.load_module(mod_name)
else:
import imp
mod = imp.load_source(mod_name, filepath)
return mod | python | {
"resource": ""
} |
q264480 | copy | validation | def copy(configfile='', destpath='', overwrite=False, sub_node=''):
"""Copies the files in the built file tree map
to despath.
:param configfile: string
Path to the FileTreeMap config file
:param destpath: string
Path to the files destination
:param overwrite: bool
Overwrite files if they already exist.
:param sub_node: string
Tree map configuration sub path.
Will copy only the contents within this sub-node
"""
log.info('Running {0} {1} {2}'.format(os.path.basename(__file__),
whoami(),
locals()))
assert(os.path.isfile(configfile))
if os.path.exists(destpath):
if os.listdir(destpath):
raise FolderAlreadyExists('Folder {0} already exists. Please clean '
'it or change destpath.'.format(destpath))
else:
log.info('Creating folder {0}'.format(destpath))
path(destpath).makedirs_p()
from boyle.files.file_tree_map import FileTreeMap
file_map = FileTreeMap()
try:
file_map.from_config_file(configfile)
except Exception as e:
raise FileTreeMapError(str(e))
if sub_node:
sub_map = file_map.get_node(sub_node)
if not sub_map:
raise FileTreeMapError('Could not find sub node '
'{0}'.format(sub_node))
file_map._filetree = {}
file_map._filetree[sub_node] = sub_map
try:
file_map.copy_to(destpath, overwrite=overwrite)
except Exception as e:
raise FileTreeMapError(str(e)) | python | {
"resource": ""
} |
q264481 | convert_sav | validation | def convert_sav(inputfile, outputfile=None, method='rpy2', otype='csv'):
""" Transforms the input .sav SPSS file into other format.
If you don't specify an outputfile, it will use the
inputfile and change its extension to .csv
"""
assert(os.path.isfile(inputfile))
assert(method=='rpy2' or method=='savread')
if method == 'rpy2':
df = sav_to_pandas_rpy2(inputfile)
elif method == 'savread':
df = sav_to_pandas_savreader(inputfile)
otype_exts = {'csv': '.csv',
'hdf': '.h5',
'stata': '.dta',
'json': '.json',
'pickle': '.pickle',
'excel': '.xls',
'html': '.html'}
if outputfile is None:
outputfile = inputfile.replace(path(inputfile).ext, '')
outputfile = add_extension_if_needed(outputfile, otype_exts[otype])
if otype == 'csv':
df.to_csv(outputfile)
elif otype == 'hdf':
df.to_hdf(outputfile, os.path.basename(outputfile))
elif otype == 'stata':
df.to_stata(outputfile)
elif otype == 'json':
df.to_json(outputfile)
elif otype == 'pickle':
df.to_pickle(outputfile)
elif otype == 'excel':
df.to_excel(outputfile)
elif otype == 'html':
df.to_html(outputfile)
else:
df.to_csv(outputfile) | python | {
"resource": ""
} |
q264482 | load_mask | validation | def load_mask(image, allow_empty=True):
"""Load a Nifti mask volume.
Parameters
----------
image: img-like object or boyle.nifti.NeuroImage or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
allow_empty: boolean, optional
Allow loading an empty mask (full of 0 values)
Returns
-------
nibabel.Nifti1Image with boolean data.
"""
img = check_img(image, make_it_3d=True)
values = np.unique(img.get_data())
if len(values) == 1:
# We accept a single value if it is not 0 (full true mask).
if values[0] == 0 and not allow_empty:
raise ValueError('Given mask is invalid because it masks all data')
elif len(values) == 2:
# If there are 2 different values, one of them must be 0 (background)
if 0 not in values:
raise ValueError('Background of the mask must be represented with 0.'
' Given mask contains: {}.'.format(values))
elif len(values) != 2:
# If there are more than 2 values, the mask is invalid
raise ValueError('Given mask is not made of 2 values: {}. '
'Cannot interpret as true or false'.format(values))
return nib.Nifti1Image(as_ndarray(get_img_data(img), dtype=bool), img.get_affine(), img.get_header()) | python | {
"resource": ""
} |
q264483 | load_mask_data | validation | def load_mask_data(image, allow_empty=True):
"""Load a Nifti mask volume and return its data matrix as boolean and affine.
Parameters
----------
image: img-like object or boyle.nifti.NeuroImage or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
allow_empty: boolean, optional
Allow loading an empty mask (full of 0 values)
Returns
-------
numpy.ndarray with dtype==bool, numpy.ndarray of affine transformation
"""
mask = load_mask(image, allow_empty=allow_empty)
return get_img_data(mask), mask.get_affine() | python | {
"resource": ""
} |
q264484 | union_mask | validation | def union_mask(filelist):
"""
Creates a binarised mask with the union of the files in filelist.
Parameters
----------
filelist: list of img-like object or boyle.nifti.NeuroImage or str
List of paths to the volume files containing the ROIs.
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
Returns
-------
ndarray of bools
Mask volume
Raises
------
ValueError
"""
firstimg = check_img(filelist[0])
mask = np.zeros_like(firstimg.get_data())
# create space for all features and read from subjects
try:
for volf in filelist:
roiimg = check_img(volf)
check_img_compatibility(firstimg, roiimg)
mask += get_img_data(roiimg)
except Exception as exc:
raise ValueError('Error joining mask {} and {}.'.format(repr_imgs(firstimg), repr_imgs(volf))) from exc
else:
return as_ndarray(mask > 0, dtype=bool) | python | {
"resource": ""
} |
q264485 | apply_mask | validation | def apply_mask(image, mask_img):
"""Read a Nifti file nii_file and a mask Nifti file.
Returns the voxels in nii_file that are within the mask, the mask indices
and the mask shape.
Parameters
----------
image: img-like object or boyle.nifti.NeuroImage or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
mask_img: img-like object or boyle.nifti.NeuroImage or str
3D mask array: True where a voxel should be used.
See img description.
Returns
-------
vol[mask_indices], mask_indices
Note
----
nii_file and mask_file must have the same shape.
Raises
------
NiftiFilesNotCompatible, ValueError
"""
img = check_img(image)
mask = check_img(mask_img)
check_img_compatibility(img, mask)
vol = img.get_data()
mask_data, _ = load_mask_data(mask)
return vol[mask_data], mask_data | python | {
"resource": ""
} |
q264486 | apply_mask_4d | validation | def apply_mask_4d(image, mask_img): # , smooth_mm=None, remove_nans=True):
"""Read a Nifti file nii_file and a mask Nifti file.
Extract the signals in nii_file that are within the mask, the mask indices
and the mask shape.
Parameters
----------
image: img-like object or boyle.nifti.NeuroImage or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
mask_img: img-like object or boyle.nifti.NeuroImage or str
3D mask array: True where a voxel should be used.
See img description.
smooth_mm: float #TBD
(optional) The size in mm of the FWHM Gaussian kernel to smooth the signal.
If True, remove_nans is True.
remove_nans: bool #TBD
If remove_nans is True (default), the non-finite values (NaNs and
infs) found in the images will be replaced by zeros.
Returns
-------
session_series, mask_data
session_series: numpy.ndarray
2D array of series with shape (voxel number, image number)
Note
----
nii_file and mask_file must have the same shape.
Raises
------
FileNotFound, NiftiFilesNotCompatible
"""
img = check_img(image)
mask = check_img(mask_img)
check_img_compatibility(img, mask, only_check_3d=True)
vol = get_data(img)
series, mask_data = _apply_mask_to_4d_data(vol, mask)
return series, mask_data | python | {
"resource": ""
} |
q264487 | vector_to_volume | validation | def vector_to_volume(arr, mask, order='C'):
"""Transform a given vector to a volume. This is a reshape function for
3D flattened and maybe masked vectors.
Parameters
----------
arr: np.array
1-Dimensional array
mask: numpy.ndarray
Mask image. Must have 3 dimensions, bool dtype.
Returns
-------
np.ndarray
"""
if mask.dtype != np.bool:
raise ValueError("mask must be a boolean array")
if arr.ndim != 1:
raise ValueError("vector must be a 1-dimensional array")
if arr.ndim == 2 and any(v == 1 for v in arr.shape):
log.debug('Got an array of shape {}, flattening for my purposes.'.format(arr.shape))
arr = arr.flatten()
volume = np.zeros(mask.shape[:3], dtype=arr.dtype, order=order)
volume[mask] = arr
return volume | python | {
"resource": ""
} |
q264488 | matrix_to_4dvolume | validation | def matrix_to_4dvolume(arr, mask, order='C'):
"""Transform a given vector to a volume. This is a reshape function for
4D flattened masked matrices where the second dimension of the matrix
corresponds to the original 4th dimension.
Parameters
----------
arr: numpy.array
2D numpy.array
mask: numpy.ndarray
Mask image. Must have 3 dimensions, bool dtype.
dtype: return type
If None, will get the type from vector
Returns
-------
data: numpy.ndarray
Unmasked data.
Shape: (mask.shape[0], mask.shape[1], mask.shape[2], X.shape[1])
"""
if mask.dtype != np.bool:
raise ValueError("mask must be a boolean array")
if arr.ndim != 2:
raise ValueError("X must be a 2-dimensional array")
if mask.sum() != arr.shape[0]:
# raise an error if the shape of arr is not what expected
raise ValueError('Expected arr of shape ({}, samples). Got {}.'.format(mask.sum(), arr.shape))
data = np.zeros(mask.shape + (arr.shape[1],), dtype=arr.dtype,
order=order)
data[mask, :] = arr
return data | python | {
"resource": ""
} |
q264489 | niftilist_mask_to_array | validation | def niftilist_mask_to_array(img_filelist, mask_file=None, outdtype=None):
"""From the list of absolute paths to nifti files, creates a Numpy array
with the masked data.
Parameters
----------
img_filelist: list of str
List of absolute file paths to nifti files. All nifti files must have
the same shape.
mask_file: str
Path to a Nifti mask file.
Should be the same shape as the files in nii_filelist.
outdtype: dtype
Type of the elements of the array, if not set will obtain the dtype from
the first nifti file.
Returns
-------
outmat:
Numpy array with shape N x prod(vol.shape) containing the N files as flat vectors.
mask_indices:
Tuple with the 3D spatial indices of the masking voxels, for reshaping
with vol_shape and remapping.
vol_shape:
Tuple with shape of the volumes, for reshaping.
"""
img = check_img(img_filelist[0])
if not outdtype:
outdtype = img.dtype
mask_data, _ = load_mask_data(mask_file)
indices = np.where (mask_data)
mask = check_img(mask_file)
outmat = np.zeros((len(img_filelist), np.count_nonzero(mask_data)),
dtype=outdtype)
for i, img_item in enumerate(img_filelist):
img = check_img(img_item)
if not are_compatible_imgs(img, mask):
raise NiftiFilesNotCompatible(repr_imgs(img), repr_imgs(mask_file))
vol = get_img_data(img)
outmat[i, :] = vol[indices]
return outmat, mask_data | python | {
"resource": ""
} |
q264490 | create | validation | def create(_):
"""Create a client for Service Fabric APIs."""
endpoint = client_endpoint()
if not endpoint:
raise CLIError("Connection endpoint not found. "
"Before running sfctl commands, connect to a cluster using "
"the 'sfctl cluster select' command.")
no_verify = no_verify_setting()
if security_type() == 'aad':
auth = AdalAuthentication(no_verify)
else:
cert = cert_info()
ca_cert = ca_cert_info()
auth = ClientCertAuthentication(cert, ca_cert, no_verify)
return ServiceFabricClientAPIs(auth, base_url=endpoint) | python | {
"resource": ""
} |
q264491 | DataFrame.aggregate | validation | def aggregate(self, clazz, new_col, *args):
"""
Aggregate the rows of the DataFrame into a single value.
:param clazz: name of a class that extends class Callable
:type clazz: class
:param new_col: name of the new column
:type new_col: str
:param args: list of column names of the object that function
should be applied to
:type args: tuple
:return: returns a new dataframe object with the aggregated value
:rtype: DataFrame
"""
if is_callable(clazz) and not is_none(new_col) and has_elements(*args):
return self.__do_aggregate(clazz, new_col, *args) | python | {
"resource": ""
} |
q264492 | group | validation | def group(*args):
"""
Pipeable grouping method.
Takes either
- a dataframe and a tuple of strings for grouping,
- a tuple of strings if a dataframe has already been piped into.
:Example:
group(dataframe, "column")
:Example:
dataframe >> group("column")
:param args: tuple of arguments
:type args: tuple
:return: returns a grouped dataframe object
:rtype: GroupedDataFrame
"""
if args and isinstance(args[0], dataframe.DataFrame):
return args[0].group(*args[1:])
elif not args:
raise ValueError("No arguments provided")
else:
return pipeable.Pipeable(pipeable.PipingMethod.GROUP, *args) | python | {
"resource": ""
} |
q264493 | aggregate | validation | def aggregate(*args):
"""
Pipeable aggregation method.
Takes either
- a dataframe and a tuple of arguments required for aggregation,
- a tuple of arguments if a dataframe has already been piped into.
In any case one argument has to be a class that extends callable.
:Example:
aggregate(dataframe, Function, "new_col_name", "old_col_name")
:Example:
dataframe >> aggregate(Function, "new_col_name", "old_col_name")
:param args: tuple of arguments
:type args: tuple
:return: returns a dataframe object
:rtype: DataFrame
"""
if args and isinstance(args[0], dataframe.DataFrame):
return args[0].aggregate(args[1], args[2], *args[3:])
elif not args:
raise ValueError("No arguments provided")
else:
return pipeable.Pipeable(pipeable.PipingMethod.AGGREGATE, *args) | python | {
"resource": ""
} |
q264494 | subset | validation | def subset(*args):
"""
Pipeable subsetting method.
Takes either
- a dataframe and a tuple of arguments required for subsetting,
- a tuple of arguments if a dataframe has already been piped into.
:Example:
subset(dataframe, "column")
:Example:
dataframe >> subset("column")
:param args: tuple of arguments
:type args: tuple
:return: returns a dataframe object
:rtype: DataFrame
"""
if args and isinstance(args[0], dataframe.DataFrame):
return args[0].subset(*args[1:])
elif not args:
raise ValueError("No arguments provided")
else:
return pipeable.Pipeable(pipeable.PipingMethod.SUBSET, *args) | python | {
"resource": ""
} |
q264495 | modify | validation | def modify(*args):
"""
Pipeable modification method
Takes either
- a dataframe and a tuple of arguments required for modification,
- a tuple of arguments if a dataframe has already been piped into.
In any case one argument has to be a class that extends callable.
:Example:
modify(dataframe, Function, "new_col_name", "old_col_name")
:Example:
dataframe >> modify(Function, "new_col_name", "old_col_name")
:param args: tuple of arguments
:type args: tuple
:return: returns a dataframe object
:rtype: DataFrame
"""
if args and isinstance(args[0], dataframe.DataFrame):
return args[0].modify(args[1], args[2], *args[3:])
elif not args:
raise ValueError("No arguments provided")
else:
return pipeable.Pipeable(pipeable.PipingMethod.MODIFY, *args) | python | {
"resource": ""
} |
q264496 | _escape_char | validation | def _escape_char(c, escape_char=ESCAPE_CHAR):
"""Escape a single character"""
buf = []
for byte in c.encode('utf8'):
buf.append(escape_char)
buf.append('%X' % _ord(byte))
return ''.join(buf) | python | {
"resource": ""
} |
q264497 | escape | validation | def escape(to_escape, safe=SAFE, escape_char=ESCAPE_CHAR, allow_collisions=False):
"""Escape a string so that it only contains characters in a safe set.
Characters outside the safe list will be escaped with _%x_,
where %x is the hex value of the character.
If `allow_collisions` is True, occurrences of `escape_char`
in the input will not be escaped.
In this case, `unescape` cannot be used to reverse the transform
because occurrences of the escape char in the resulting string are ambiguous.
Only use this mode when:
1. collisions cannot occur or do not matter, and
2. unescape will never be called.
.. versionadded: 1.0
allow_collisions argument.
Prior to 1.0, behavior was the same as allow_collisions=False (default).
"""
if isinstance(to_escape, bytes):
# always work on text
to_escape = to_escape.decode('utf8')
if not isinstance(safe, set):
safe = set(safe)
if allow_collisions:
safe.add(escape_char)
elif escape_char in safe:
# escape char can't be in safe list
safe.remove(escape_char)
chars = []
for c in to_escape:
if c in safe:
chars.append(c)
else:
chars.append(_escape_char(c, escape_char))
return u''.join(chars) | python | {
"resource": ""
} |
q264498 | unescape | validation | def unescape(escaped, escape_char=ESCAPE_CHAR):
"""Unescape a string escaped with `escape`
escape_char must be the same as that used in the call to escape.
"""
if isinstance(escaped, bytes):
# always work on text
escaped = escaped.decode('utf8')
escape_pat = re.compile(re.escape(escape_char).encode('utf8') + b'([a-z0-9]{2})', re.IGNORECASE)
buf = escape_pat.subn(_unescape_char, escaped.encode('utf8'))[0]
return buf.decode('utf8') | python | {
"resource": ""
} |
q264499 | BaseBackend.can_send | validation | def can_send(self, user, notice_type):
"""
Determines whether this backend is allowed to send a notification to
the given user and notice_type.
"""
from notification.models import NoticeSetting
return NoticeSetting.for_user(user, notice_type, self.medium_id).send | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.