docstring stringlengths 52 499 | function stringlengths 67 35.2k | __index_level_0__ int64 52.6k 1.16M |
|---|---|---|
Authenticate the gmusicapi Musicmanager instance.
Parameters:
oauth_filename (str): The filename of the oauth credentials file to use/create for login.
Default: ``oauth``
uploader_id (str): A unique id as a MAC address (e.g. ``'00:11:22:33:AA:BB'``).
This should only be provided in cases where the def... | def login(self, oauth_filename="oauth", uploader_id=None):
cls_name = type(self).__name__
oauth_cred = os.path.join(os.path.dirname(OAUTH_FILEPATH), oauth_filename + '.cred')
try:
if not self.api.login(oauth_credentials=oauth_cred, uploader_id=uploader_id):
try:
self.api.perform_oauth(storage_fi... | 846,342 |
Download Google Music songs.
Parameters:
songs (list or dict): Google Music song dict(s).
template (str): A filepath which can include template patterns.
Returns:
A list of result dictionaries.
::
[
{'result': 'downloaded', 'id': song_id, 'filepath': downloaded[song_id]}, # downloaded
... | def download(self, songs, template=None):
if not template:
template = os.getcwd()
songnum = 0
total = len(songs)
results = []
errors = {}
pad = len(str(total))
for result in self._download(songs, template):
song_id = songs[songnum]['id']
songnum += 1
downloaded, error = result
if do... | 846,345 |
Configure root logger using a standard stream handler.
Args:
level (string, optional): lowest level to log to the console
Returns:
logging.RootLogger: root logger instance with attached handler | def configure_stream(level='WARNING'):
# get the root logger
root_logger = logging.getLogger()
# set the logger level to the same as will be used by the handler
root_logger.setLevel(level)
# customize formatter, align each column
template = "[%(asctime)s] %(name)-25s %(levelname)-8s %(mess... | 846,352 |
Create a Transcript based on the vep annotation
Args:
transcript_info (dict): A dict with vep info
Returns:
transcript (puzzle.models.Transcript): A Transcripts | def _get_vep_transcript(self, transcript_info):
transcript = Transcript(
hgnc_symbol = transcript_info.get('SYMBOL'),
transcript_id = transcript_info.get('Feature'),
ensembl_id = transcript_info.get('Gene'),
biotype = transcript_info.get('... | 846,392 |
Create a transcript based on the snpeff annotation
Args:
transcript_info (dict): A dict with snpeff info
Returns:
transcript (puzzle.models.Transcript): A Transcripts | def _get_snpeff_transcript(self, transcript_info):
transcript = Transcript(
hgnc_symbol = transcript_info.get('Gene_Name'),
transcript_id = transcript_info.get('Feature'),
ensembl_id = transcript_info.get('Gene_ID'),
biotype = transcript_i... | 846,393 |
Generates parsed VcfRecord objects.
Typically called in a for loop to process each vcf record in a
VcfReader. VcfReader must be opened in advanced and closed when
complete. Skips all headers.
Args:
qualified: When True, sample names are prefixed with file name
Retu... | def vcf_records(self, format_tags=None, qualified=False):
if qualified:
sample_names = self.qualified_sample_names
else:
sample_names = self.sample_names
for line in self._file_reader.read_lines():
if line.startswith("#"):
continue
... | 846,442 |
Extracts a single clip according to audioClipSpec.
Arguments:
audioClipSpec (AudioClipSpec): Clip specification
showLogs (bool): Show ffmpeg output | def _extractClipData(self, audioClipSpec, showLogs=False):
command = [self._ffmpegPath]
if not showLogs:
command += ['-nostats', '-loglevel', '0']
command += [
'-i', self._audioFilePath,
'-ss', '%.3f' % audioClipSpec.start,
'-t', '%.3f' ... | 846,489 |
Return a gemini query
Args:
name (str) | def gemini_query(self, query_id):
logger.debug("Looking for query with id {0}".format(query_id))
return self.query(GeminiQuery).filter_by(id=query_id).first() | 846,504 |
Add a user defined gemini query
Args:
name (str)
query (str) | def add_gemini_query(self, name, query):
logger.info("Adding query {0} with text {1}".format(name, query))
new_query = GeminiQuery(name=name, query=query)
self.session.add(new_query)
self.save()
return new_query | 846,505 |
Delete a gemini query
Args:
name (str) | def delete_gemini_query(self, query_id):
query_obj = self.gemini_query(query_id)
logger.debug("Delete query: {0}".format(query_obj.name_query))
self.session.delete(query_obj)
self.save() | 846,506 |
Add the genotype calls for the variant
Args:
variant_obj (puzzle.models.Variant)
variant_dict (dict): A variant dictionary
case_obj (puzzle.models.Case) | def _add_genotype_calls(self, variant_obj, variant_line, case_obj):
variant_line = variant_line.split('\t')
#if there is gt calls we have no individuals to add
if len(variant_line) > 8:
gt_format = variant_line[8].split(':')
for individual in case_obj.individuals... | 846,547 |
Add a case obj with individuals to adapter
Args:
case_obj (puzzle.models.Case) | def add_case(self, case_obj):
for ind_obj in case_obj.individuals:
self._add_individual(ind_obj)
logger.debug("Adding case {0} to plugin".format(case_obj.case_id))
self.case_objs.append(case_obj)
if case_obj.tabix_index:
logger.debug("Setting filters.can_... | 846,584 |
Return a Case object
If no case_id is given return one case
Args:
case_id (str): A case id
Returns:
A Case object | def case(self, case_id=None):
if case_id:
for case in self.case_objs:
if case.case_id == case_id:
return case
else:
if self.cases:
return list(self.case_objs)[0]
return Case(case_id='unknown') | 846,585 |
Return a individual object
Args:
ind_id (str): A individual id
Returns:
individual (puzzle.models.individual) | def individual(self, ind_id=None):
for ind_obj in self.individual_objs:
if ind_obj.ind_id == ind_id:
return ind_obj
return None | 846,586 |
Return information about individuals
Args:
ind_ids (list(str)): List of individual ids
Returns:
individuals (Iterable): Iterable with Individuals | def individuals(self, ind_ids=None):
if ind_ids:
for ind_id in ind_ids:
for ind in self.individual_objs:
if ind.ind_id == ind_id:
yield ind
else:
for ind in self.individual_objs:
yield ind | 846,587 |
Append sql to a gemini query
Args:
query(str): The gemini query
extra_info(str): The text that should be added
Return:
extended_query(str) | def build_gemini_query(self, query, extra_info):
if 'WHERE' in query:
return "{0} AND {1}".format(query, extra_info)
else:
return "{0} WHERE {1}".format(query, extra_info) | 846,602 |
Return a specific variant.
We solve this by building a gemini query and send it to _variants
Args:
case_id (str): Path to a gemini database
variant_id (int): A gemini variant id
Returns:
variant_obj (dict): A puzzle variant | def variant(self, case_id, variant_id):
#Use the gemini id for fast lookup
variant_id = int(variant_id)
gemini_query = "SELECT * from variants WHERE variant_id = {0}".format(
variant_id
)
individuals = []
# Get the individuals for the case
ca... | 846,604 |
Return variants found in the gemini database
Args:
case_id (str): The case for which we want to see information
gemini_query (str): What variants should be chosen
filters (dict): A dictionary with filters
Yields:
variant_obj (dict... | def _variants(self, case_id, gemini_query):
individuals = []
# Get the individuals for the case
case_obj = self.case(case_id)
for individual in case_obj.individuals:
individuals.append(individual)
self.db = case_obj.variant_source
self.variant_type =... | 846,605 |
Make a puzzle variant from a gemini variant
Args:
case_id (str): related case id
gemini_variant (GeminiQueryRow): The gemini variant
individual_objs (list(dict)): A list of Individuals
index(int): The index of the variant
Returns:... | def _format_variant(self, case_id, gemini_variant, individual_objs,
index=0, add_all_info=False):
chrom = gemini_variant['chrom']
if chrom.startswith('chr') or chrom.startswith('CHR'):
chrom = chrom[3:]
variant_dict = {
'CHROM':chrom,
... | 846,606 |
Check if the variant is a variation in any of the individuals
Args:
gemini_variant (GeminiQueryRow): The gemini variant
ind_objs (list(puzzle.models.individual)): A list of individuals to check
Returns:
bool : If any of the individuals has the variant | def _is_variant(self, gemini_variant, ind_objs):
indexes = (ind.ind_index for ind in ind_objs)
#Check if any individual have a heterozygous or homozygous variant call
for index in indexes:
gt_call = gemini_variant['gt_types'][index]
if (gt_call == 1 or gt_call =... | 846,607 |
Add the consequences found in all transcripts
Args:
variant_obj (puzzle.models.Variant) | def _add_consequences(self, variant_obj):
consequences = set()
for transcript in variant_obj.transcripts:
for consequence in transcript.consequence.split('&'):
consequences.add(consequence)
variant_obj.consequences = list(consequences) | 846,670 |
Add the impact severity for the most severe consequence
Args:
variant_obj (puzzle.models.Variant)
gemini_variant (GeminiQueryRow) | def _add_impact_severity(self, variant_obj, gemini_variant):
gemini_impact = gemini_variant['impact_severity']
if gemini_impact == 'MED':
gemini_impact = 'MEDIUM'
variant_obj.impact_severity = gemini_impact | 846,671 |
Compare two song collections to find missing songs.
Parameters:
src_songs (list): Google Music song dicts or filepaths of local songs.
dest_songs (list): Google Music song dicts or filepaths of local songs.
Returns:
A list of Google Music song dicts or local song filepaths from source missing in destination. | def compare_song_collections(src_songs, dst_songs):
def gather_field_values(song):
return tuple((_normalize_metadata(song[field]) for field in _filter_comparison_fields(song)))
dst_songs_criteria = {gather_field_values(_normalize_song(dst_song)) for dst_song in dst_songs}
return [src_song for src_song in src_... | 846,680 |
Get filepaths with supported extensions from given filepaths.
Parameters:
filepaths (list or str): Filepath(s) to check.
supported_extensions (tuple or str): Supported file extensions or a single file extension.
max_depth (int): The depth in the directory tree to walk.
A depth of '0' limits the walk to the... | def get_supported_filepaths(filepaths, supported_extensions, max_depth=float('inf')):
supported_filepaths = []
for path in filepaths:
if os.name == 'nt' and CYGPATH_RE.match(path):
path = convert_cygwin_path(path)
if os.path.isdir(path):
for root, __, files in walk_depth(path, max_depth):
for f in ... | 846,681 |
Exclude file paths based on regex patterns.
Parameters:
filepaths (list or str): Filepath(s) to check.
exclude_patterns (list): Python regex patterns to check filepaths against.
Returns:
A list of filepaths to include and a list of filepaths to exclude. | def exclude_filepaths(filepaths, exclude_patterns=None):
if not exclude_patterns:
return filepaths, []
exclude_re = re.compile("|".join(pattern for pattern in exclude_patterns))
included_songs = []
excluded_songs = []
for filepath in filepaths:
if exclude_patterns and exclude_re.search(filepath):
excl... | 846,682 |
Generate a filename for a song based on metadata.
Parameters:
metadata (dict): A metadata dict.
Returns:
A filename. | def get_suggested_filename(metadata):
if metadata.get('title') and metadata.get('track_number'):
suggested_filename = '{track_number:0>2} {title}'.format(**metadata)
elif metadata.get('title') and metadata.get('trackNumber'):
suggested_filename = '{trackNumber:0>2} {title}'.format(**metadata)
elif metadata.ge... | 846,687 |
Create directory structure and file name based on metadata template.
Parameters:
template (str): A filepath which can include template patterns as defined by :param template_patterns:.
metadata (dict): A metadata dict.
template_patterns (dict): A dict of ``pattern: field`` pairs used to replace patterns with ... | def template_to_filepath(template, metadata, template_patterns=None):
if template_patterns is None:
template_patterns = TEMPLATE_PATTERNS
metadata = metadata if isinstance(metadata, dict) else _mutagen_fields_to_single_value(metadata)
assert isinstance(metadata, dict)
suggested_filename = get_suggested_filen... | 846,689 |
Walk a directory tree with configurable depth.
Parameters:
path (str): A directory path to walk.
max_depth (int): The depth in the directory tree to walk.
A depth of '0' limits the walk to the top directory.
Default: No limit. | def walk_depth(path, max_depth=float('inf')):
start_level = os.path.abspath(path).count(os.path.sep)
for dir_entry in os.walk(path):
root, dirs, _ = dir_entry
level = root.count(os.path.sep) - start_level
yield dir_entry
if level >= max_depth:
dirs[:] = [] | 846,690 |
Check what kind of file variant source is
Args:
variant_source (str): Path to variant source
Returns:
file_type (str): 'vcf', 'gemini' or 'unknown' | def get_file_type(variant_source):
file_type = 'unknown'
valid_vcf_suffixes = ('.vcf', '.vcf.gz')
if variant_source:
logger.debug("Check file type with file: {0}".format(variant_source))
if variant_source.endswith('.db'):
file_type = 'gemini'
logger.debug("File {... | 846,726 |
Try to find out what type of variants that exists in a variant source
Args:
variant_source (str): Path to variant source
source_mode (str): 'vcf' or 'gemini'
Returns:
variant_type (str): 'sv' or 'snv' | def get_variant_type(variant_source):
file_type = get_file_type(variant_source)
variant_type = 'sv'
if file_type == 'vcf':
variants = VCF(variant_source)
elif file_type == 'gemini':
variants = GeminiQuery(variant_source)
gemini_query = "SELECT * from variants"
varian... | 846,727 |
Recognizes and claims Strelka VCFs form the set of all input VCFs.
Each defined caller has a chance to evaluate and claim all the incoming
files as something that it can process.
Args:
file_readers: the collection of currently unclaimed files
Returns:
A tuple o... | def claim(self, file_readers):
(prefix_to_reader,
unclaimed_readers) = self._find_strelka_files(file_readers)
prefix_by_patients = self._split_prefix_by_patient(prefix_to_reader)
self._validate_vcf_readers(prefix_by_patients)
vcf_readers = self._create_vcf_readers(prefi... | 846,797 |
Add a frequency that will be displayed on the variant level
Args:
name (str): The name of the frequency field | def add_frequency(self, name, value):
logger.debug("Adding frequency {0} with value {1} to variant {2}".format(
name, value, self['variant_id']))
self['frequencies'].append({'label': name, 'value': value}) | 846,825 |
Set the max frequency for the variant
If max_freq use this, otherwise go through all frequencies and
set the highest as self['max_freq']
Args:
max_freq (float): The max frequency | def set_max_freq(self, max_freq=None):
if max_freq:
self['max_freq'] = max_freq
else:
for frequency in self['frequencies']:
if self['max_freq']:
if frequency['value'] > self['max_freq']:
self['max_freq'] = frequ... | 846,826 |
Add a severity to the variant
Args:
name (str): The name of the severity
value : The value of the severity | def add_severity(self, name, value):
logger.debug("Adding severity {0} with value {1} to variant {2}".format(
name, value, self['variant_id']))
self['severities'].append({name: value}) | 846,827 |
Add the information for a individual
This adds a genotype dict to variant['individuals']
Args:
genotype (dict): A genotype dictionary | def add_individual(self, genotype):
logger.debug("Adding genotype {0} to variant {1}".format(
genotype, self['variant_id']))
self['individuals'].append(genotype) | 846,828 |
Add the information transcript
This adds a transcript dict to variant['transcripts']
Args:
transcript (dict): A transcript dictionary | def add_transcript(self, transcript):
logger.debug("Adding transcript {0} to variant {1}".format(
transcript, self['variant_id']))
self['transcripts'].append(transcript) | 846,829 |
Add the information of a gene
This adds a gene dict to variant['genes']
Args:
gene (dict): A gene dictionary | def add_gene(self, gene):
logger.debug("Adding gene {0} to variant {1}".format(
gene, self['variant_id']))
self['genes'].append(gene) | 846,830 |
Add the information of a compound variant
This adds a compound dict to variant['compounds']
Args:
compound (dict): A compound dictionary | def add_compound(self, compound):
logger.debug("Adding compound {0} to variant {1}".format(
compound, self['variant_id']))
self['compounds'].append(compound) | 846,831 |
Generates parsed VcfRecord objects.
Typically called in a for loop to process each vcf record in a
VcfReader. VcfReader must be opened in advanced and closed when
complete. Skips all headers.
Args:
qualified: When True, sample names are prefixed with file name
Retu... | def vcf_records(self, qualified=False):
if qualified:
sample_names = self.qualified_sample_names
else:
sample_names = self.sample_names
for line in self._file_reader.read_lines():
if line.startswith("#"):
continue
yield Vc... | 846,852 |
Adds new info field (flag or key=value pair).
Args:
field: String flag (e.g. "SOMATIC") or key-value ("NEW_DP=42")
Raises:
KeyError: if info field already exists | def add_info_field(self, field):
if field in self.info_dict:
msg = "New info field [{}] already exists.".format(field)
raise KeyError(msg)
if "=" in field:
key, value = field.split("=")
self.info_dict[key] = value
else:
self.i... | 846,860 |
Appends a new format tag-value for all samples.
Args:
tag_name: string tag name; must not already exist
new_sample
Raises:
KeyError: if tag_name to be added already exists | def add_sample_tag_value(self, tag_name, new_sample_values):
if tag_name in self.format_tags:
msg = "New format value [{}] already exists.".format(tag_name)
raise KeyError(msg)
if not self._samples_match(new_sample_values):
raise KeyError("Sample name values... | 846,867 |
Add all transcripts for a variant
Go through all transcripts found for the variant
Args:
gemini_variant (GeminiQueryRow): The gemini variant
Yields:
transcript (puzzle.models.Transcript) | def _add_transcripts(self, variant_obj, gemini_variant):
query = "SELECT * from variant_impacts WHERE variant_id = {0}".format(
gemini_variant['variant_id']
)
gq = GeminiQuery(self.db)
gq.run(query)
for gemini_transcript in gq:
transcrip... | 846,872 |
Return a specific variant.
Args:
case_id (str): Path to vcf file
variant_id (str): A variant id
Returns:
variant (Variant): The variant object for the given id | def variant(self, case_id, variant_id):
case_obj = self.case(case_id=case_id)
vcf_file_path = case_obj.variant_source
self.head = get_header(vcf_file_path)
self.vep_header = self.head.vep_columns
self.snpeff_header = self.head.snpeff_columns
handle = VCF(vcf_fi... | 846,960 |
Check if variants follows the filters
This function will try to make filters faster for the vcf adapter
Args:
vcf_file_path(str): Path to vcf
filters (dict): A dictionary with filters
Yields:
varian_line (str): A vcf variant line | def _get_filtered_variants(self, vcf_file_path, filters={}):
genes = set()
consequences = set()
sv_types = set()
if filters.get('gene_ids'):
genes = set([gene_id.strip() for gene_id in filters['gene_ids']])
if filters.get('consequence'):
conseq... | 846,962 |
Return a Variant object
Format variant make a variant that includes enough information for
the variant view.
If add_all_info then all transcripts will be parsed
Args:
variant (cython2.Variant): A variant object
index (int): The index of the variant
c... | def _format_variants(self, variant, index, case_obj, add_all_info=False):
header_line = self.head.header
# Get the individual ids for individuals in vcf file
vcf_individuals = set([ind_id for ind_id in self.head.individuals])
#Create a info dict:
info_dict = dict(varian... | 846,963 |
Add the genes for a variant
Get the hgnc symbols from all transcripts and add them
to the variant
Args:
variant (dict): A variant dictionary
Returns:
genes (list): A list of Genes | def _get_genes(self, variant):
ensembl_ids = []
hgnc_symbols = []
for transcript in variant.transcripts:
if transcript.ensembl_id:
ensembl_ids.append(transcript.ensembl_id)
if transcript.hgnc_symbol:
hgnc_symbols.append(tr... | 847,000 |
Add the neccesary sv coordinates for a variant
Args:
variant (puzzle.models.variant) | def _add_sv_coordinates(self, variant):
variant.stop_chrom = variant.CHROM
variant.start = int(variant.POS)
# If we have a translocation:
if ':' in variant.ALT:
other_coordinates = variant.ALT.strip('ACGTN[]').split(':')
variant.stop_chrom = othe... | 847,001 |
Initialize a vcf adapter.
When instansiating all cases are found.
Args:
variant_type(str) : 'snv' or 'sv' | def __init__(self, variant_type='snv'):
super(VcfPlugin, self).__init__()
self.individual_objs = []
self.case_objs = []
self.variant_type = variant_type
logger.info("Setting variant type to {0}".format(variant_type))
self.variant_columns = ['CHROM', 'POS', 'ID... | 847,002 |
Parse the header and return a header object
Args:
vcf_file_path(str): Path to vcf
Returns:
head: A HeaderParser object | def get_header(vcf_file_path):
logger.info("Parsing header of file {0}".format(vcf_file_path))
head = HeaderParser()
handle = get_vcf_handle(infile=vcf_file_path)
# Parse the header
for line in handle:
line = line.rstrip()
if line.startswith('#'):
if line.startswith(... | 847,021 |
Calculate the 2D integral of the 1D surface brightness profile
(i.e, the flux) between rmin and rmax (elliptical radii).
Parameters:
-----------
rmin : minimum integration radius (deg)
rmax : maximum integration radius (deg)
Returns:
--------
integral ... | def integrate(self, rmin=0, rmax=np.inf):
if rmin < 0: raise Exception('rmin must be >= 0')
integrand = lambda r: self._pdf(r) * 2*np.pi * r
return scipy.integrate.quad(integrand,rmin,rmax,full_output=True,epsabs=0)[0] | 847,034 |
Calculate Jenks natural breaks.
Adapted from http://danieljlewis.org/files/2010/06/Jenks.pdf
Credit: Daniel Lewis
Arguments:
data -- Array of values to classify.
num_breaks -- Number of breaks to perform. | def jenks(data, num_breaks):
data = numpy.ma.compressed(data)
if len(data) > 1000:
data.sort()
ls = numpy.linspace(0, len(data)-1, 1000)
ls = [int(round(x)) for x in ls]
data_list = data[ls]
else:
data_list = data
data_list.sort()
mat1 = []
for i i... | 847,171 |
Calculate quantile breaks.
Arguments:
data -- Array of values to classify.
num_breaks -- Number of breaks to perform. | def quantile(data, num_breaks):
def scipy_mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None, limit=()):
def _quantiles1D(data,m,p):
x = numpy.sort(data.compressed())
n = len(x)
if n == 0:
return numpy.ma.array(numpy.empt... | 847,172 |
Calculate equal interval breaks.
Arguments:
data -- Array of values to classify.
num_breaks -- Number of breaks to perform. | def equal(data, num_breaks):
step = (numpy.amax(data) - numpy.amin(data)) / num_breaks
return numpy.linspace(numpy.amin(data) + step, numpy.amax(data), num_breaks) | 847,173 |
Read a generic input file into a recarray.
Accepted file formats: [.fits,.fz,.npy,.csv,.txt,.dat]
Parameters:
filename : input file name
kwargs : keyword arguments for the reader
Returns:
recarray : data array | def read(filename,**kwargs):
base,ext = os.path.splitext(filename)
if ext in ('.fits','.fz'):
# Abstract fits here...
return fitsio.read(filename,**kwargs)
elif ext in ('.npy'):
return np.load(filename,**kwargs)
elif ext in ('.csv'):
return np.recfromcsv(filename,**k... | 847,196 |
Write a recarray to a specific format.
Accepted file formats: [.fits,.fz,.npy,.csv,.txt,.dat]
Parameters:
filename : output file name
data : the recarray data
kwargs : keyword arguments for the writer
Returns:
ret : writer return (usually None) | def write(filename,data,**kwargs):
base,ext = os.path.splitext(filename)
if ext in ('.fits','.fz'):
# Abstract fits here...
return fitsio.write(filename,data,**kwargs)
elif ext in ('.npy'):
return np.save(filename,data,**kwargs)
elif ext in ('.csv'):
return np.savetx... | 847,197 |
Write a catalog file of the likelihood region including
membership properties.
Parameters:
-----------
loglike : input loglikelihood object
filename : output filename
Returns:
--------
None | def write_membership(loglike,filename):
ra,dec = gal2cel(loglike.catalog.lon,loglike.catalog.lat)
name_objid = loglike.config['catalog']['objid_field']
name_mag_1 = loglike.config['catalog']['mag_1_field']
name_mag_2 = loglike.config['catalog']['mag_2_field']
name_mag_err_1 = loglike.... | 847,204 |
Take the value from a two-dimensional histogram from the bin corresponding to (x, y).
Parameters:
-----------
histogram : The values in the histogram (n,m) (ADW: is this ordering right?)
x : the x-value to take from the hist
y : the y-value to take from the hist
bins_x : the xbin edges, includi... | def take2D(histogram, x, y, bins_x, bins_y):
histogram = np.array(histogram)
if np.isscalar(x):
x = [x]
if np.isscalar(y):
y = [y]
bins_x[-1] += 1.e-10 * (bins_x[-1] - bins_x[-2]) # Numerical stability
bins_y[-1] += 1.e-10 * (bins_y[-1] - bins_y[-2])
#return np.take(h... | 847,205 |
Numerical Riemannn integral of the IMF (stupid simple).
Parameters:
-----------
mass_min: minimum mass bound for integration (solar masses)
mass_max: maximum mass bound for integration (solar masses)
log_mode[True]: use logarithmic steps in stellar mass as oppose to linear
... | def integrate(self, mass_min, mass_max, log_mode=True, weight=False, steps=1e4):
if log_mode:
d_log_mass = (np.log10(mass_max) - np.log10(mass_min)) / float(steps)
log_mass = np.linspace(np.log10(mass_min), np.log10(mass_max), steps)
mass = 10.**log_mass
... | 847,320 |
New method.
Args:
cls (str): class name.
bases (tuple): base classes to inherit from.
dct (dict): class attributes.
Returns:
class: the new created class. | def __new__(mcs, cls, bases, dct):
super_new = super(_Metaclass, mcs).__new__
# Also ensure initialization is only performed for subclasses
# of AppSettings (excluding AppSettings class itself).
parents = [b for b in bases if isinstance(b, _Metaclass)]
if not parents:
... | 847,362 |
Return a setting object if it is in the ``_meta.settings`` dictionary.
Args:
item (str):
the name of the setting variable (not the setting's name).
Returns:
``Setting``: the setting object.
Raises:
AttributeError if the setting does not exis... | def __getattr__(cls, item):
if item in cls._meta.settings.keys():
return cls._meta.settings[item]
raise AttributeError("'%s' class has no attribute '%s'" % (cls.__name__, item)) | 847,363 |
r"""Wrap a string (tyically a regex) with a prefix and suffix (usually a nonconuming word break)
Arguments:
prefix, suffix (str): strings to append to the front and back of the provided string
grouper (2-len str or 2-tuple): characters or strings to separate prefix and suffix from the middle
>>> wra... | def wrap(s, prefix=r'\b', suffix=r'\b', grouper='()'):
r
return ((prefix or '') + try_get(grouper, 0, '') + (s or '') +
try_get(grouper, 1, try_get(grouper, 0, '')) + (suffix or '')) | 847,449 |
Merge a list of Catalogs.
Parameters:
-----------
catalog_list : List of Catalog objects.
Returns:
--------
catalog : Combined Catalog object | def mergeCatalogs(catalog_list):
# Check the columns
for c in catalog_list:
if c.data.dtype.names != catalog_list[0].data.dtype.names:
msg = "Catalog data columns not the same."
raise Exception(msg)
data = np.concatenate([c.data for c in catalog_list])
config = catal... | 847,457 |
Class to store information about detected objects. This class
augments the raw data array with several aliases and derived
quantities.
Parameters:
-----------
config : Configuration object
roi : Region of Interest to load catalog data for
data : Dat... | def __init__(self, config, roi=None, data=None, filenames=None):
self.config = Config(config)
if data is None:
self._parse(roi,filenames)
else:
self.data = data
self._defineVariables() | 847,458 |
Write the current object catalog to FITS file.
Parameters:
-----------
filename : the FITS file to write.
clobber : remove existing file
kwargs : passed to fitsio.write
Returns:
--------
None | def write(self, outfile, clobber=True, **kwargs):
fitsio.write(outfile,self.data,clobber=True,**kwargs) | 847,463 |
Parse catalog FITS files into recarray.
Parameters:
-----------
roi : The region of interest; if 'roi=None', read all catalog files
Returns:
--------
None | def _parse(self, roi=None, filenames=None):
if (roi is not None) and (filenames is not None):
msg = "Cannot take both roi and filenames"
raise Exception(msg)
if roi is not None:
pixels = roi.getCatalogPixels()
filenames = self.config.getFilenames... | 847,464 |
Calculate the surface intensity for each pixel in the interior
region of the ROI. Pixels are adaptively subsampled around the
kernel centroid out to a radius of 'factor * max_pixrad'.
Parameters:
-----------
factor : the radius of the oversample region in units of max_pixrad
... | def calc_surface_intensity(self, factor=10):
# First we calculate the surface intensity at native resolution
pixels = self.roi.pixels_interior
nside_in = self.config['coords']['nside_pixel']
surface_intensity = self.kernel.pdf(pixels.lon,pixels.lat)
# Then we recalculat... | 847,555 |
Calculate the spatial signal probability for each catalog object.
Parameters:
-----------
None
Returns:
--------
u_spatial : array of spatial probabilities per object | def calc_signal_spatial(self):
# Calculate the surface intensity
self.surface_intensity_sparse = self.calc_surface_intensity()
# Calculate the probability per object-by-object level
self.surface_intensity_object = self.kernel.pdf(self.catalog.lon,
... | 847,556 |
Maximize the log-likelihood as a function of richness.
ADW 2018-06-04: Does it make sense to set the richness to the mle?
Parameters:
-----------
atol : absolute tolerence for conversion
maxiter : maximum number of iterations
Returns:
--------
loglike, ... | def fit_richness(self, atol=1.e-3, maxiter=50):
# Check whether the signal probability for all objects are zero
# This can occur for finite kernels on the edge of the survey footprint
if np.isnan(self.u).any():
logger.warning("NaN signal probability found")
retur... | 847,557 |
Write a catalog file of the likelihood region including
membership properties.
Parameters:
-----------
filename : output filename
Returns:
--------
None | def write_membership(self,filename):
# Column names
name_objid = self.config['catalog']['objid_field']
name_mag_1 = self.config['catalog']['mag_1_field']
name_mag_2 = self.config['catalog']['mag_2_field']
name_mag_err_1 = self.config['catalog']['mag_err_1_field']
... | 847,559 |
Decorator that stores the result of the stored function in the
user's results cache until the batch completes. Keyword arguments are
not yet supported.
Arguments:
func (callable(*a)): The function whose results we want
to store. The positional arguments, ``a``, are u... | def memoise(cls, func):
@functools.wraps(func)
def f(*a):
for arg in a:
if isinstance(arg, User):
user = arg
break
else:
raise ValueError("One position argument must be a User")
func_k... | 847,565 |
Call method.
Args:
name (str): the value's name.
value (object): the value to check.
Raises:
ValueError: if value is not type base_type. | def __call__(self, name, value):
if not isinstance(value, self.base_type):
raise ValueError("%s must be %s, not %s" % (name, self.base_type, value.__class__)) | 847,806 |
Initialization method.
Args:
minimum (int): a minimum value (included).
maximum (int): a maximum value (included). | def __init__(self, minimum=None, maximum=None):
super(IntegerTypeChecker, self).__init__(base_type=int)
self.minimum = minimum
self.maximum = maximum | 847,807 |
Call method.
Args:
name (str): the value's name.
value (int): the value to check.
Raises:
ValueError: if value is not type int.
ValueError: if value is less than minimum.
ValueError: if value is more than maximum. | def __call__(self, name, value):
super(IntegerTypeChecker, self).__call__(name, value)
if isinstance(self.minimum, int):
if value < self.minimum:
raise ValueError("%s must be greater or equal %s" % (name, self.minimum))
if isinstance(self.maximum, int):
... | 847,808 |
Initialization method.
Args:
minimum (float): a minimum value (included).
maximum (float): a maximum value (included). | def __init__(self, minimum=None, maximum=None):
super(FloatTypeChecker, self).__init__(base_type=float)
self.minimum = minimum
self.maximum = maximum | 847,809 |
Call method.
Args:
name (str): the value's name.
value (float): the value to check.
Raises:
ValueError: if value is not type float.
ValueError: if value is less than minimum.
ValueError: if value is more than maximum. | def __call__(self, name, value):
super(FloatTypeChecker, self).__call__(name, value)
if isinstance(self.minimum, float):
if value < self.minimum:
raise ValueError("%s must be greater or equal %s" % (name, self.minimum))
if isinstance(self.maximum, float):
... | 847,810 |
Initialization method.
Args:
iter_type (type): the type of the iterable object.
item_type (type): the type of the items inside the object.
min_length (int): a minimum length (included).
max_length (int): a maximum length (included).
empty (bool): whet... | def __init__(self, iter_type, item_type=None, min_length=None, max_length=None, empty=True):
super(IterableTypeChecker, self).__init__(base_type=iter_type)
self.item_type = item_type
self.min_length = min_length
self.max_length = max_length
self.empty = empty | 847,811 |
Call method.
Args:
name (str): the value's name.
value (iterable): the value to check.
Raises:
ValueError: if value is not type iter_type.
ValueError: if any item in value is not type item_type.
ValueError: if value's length is less than min_... | def __call__(self, name, value):
super(IterableTypeChecker, self).__call__(name, value)
if isinstance(self.item_type, type):
if not all(isinstance(o, self.item_type) for o in value):
raise ValueError("All elements of %s must be %s" % (name, self.item_type))
i... | 847,812 |
Initialization method.
Args:
min_length (int): minimum length of the string (included).
max_length (int): maximum length of the string (included).
empty (bool): whether empty string is allowed. | def __init__(self, min_length=None, max_length=None, empty=True):
super(StringTypeChecker, self).__init__(
iter_type=str, min_length=min_length, max_length=max_length, empty=empty
) | 847,813 |
Initialization method.
Args:
item_type (type): the type of the items inside the list.
min_length (int): minimum length of the list (included).
max_length (int): maximum length of the list (included).
empty (bool): whether empty list is allowed. | def __init__(self, item_type=None, min_length=None, max_length=None, empty=True):
super(ListTypeChecker, self).__init__(
iter_type=list, item_type=item_type, min_length=min_length, max_length=max_length, empty=empty
) | 847,814 |
Initialization method.
Args:
item_type (type): the type of the items inside the set.
min_length (int): minimum length of the set (included).
max_length (int): maximum length of the set (included).
empty (bool): whether empty set is allowed. | def __init__(self, item_type=None, min_length=None, max_length=None, empty=True):
super(SetTypeChecker, self).__init__(
iter_type=set, item_type=item_type, min_length=min_length, max_length=max_length, empty=empty
) | 847,815 |
Initialization method.
Args:
item_type (type): the type of the items inside the tuple.
min_length (int): minimum length of the tuple (included).
max_length (int): maximum length of the tuple (included).
empty (bool): whether empty tuple is allowed. | def __init__(self, item_type=None, min_length=None, max_length=None, empty=True):
super(TupleTypeChecker, self).__init__(
iter_type=tuple, item_type=item_type, min_length=min_length, max_length=max_length, empty=empty
) | 847,816 |
Initialization method.
Args:
key_type (type): the type of the dict keys.
value_type (type): the type of the dict values.
min_length (int): minimum length of the dict (included).
max_length (int): maximum length of the dict (included).
empty (bool): wh... | def __init__(self, key_type=None, value_type=None, min_length=None, max_length=None, empty=True):
super(DictTypeChecker, self).__init__(base_type=dict)
self.key_type = key_type
self.value_type = value_type
self.min_length = min_length
self.max_length = max_length
... | 847,817 |
Initialization method.
Args:
empty (bool): | def __init__(self, empty=True):
super(ObjectTypeChecker, self).__init__(empty=empty) | 847,819 |
Call method.
Args:
name (str): the value's name.
value (str): the value to check.
Raises:
ValueError: if value is not type str. | def __call__(self, name, value):
super(ObjectTypeChecker, self).__call__(name, value) | 847,820 |
Sum an array of magnitudes in flux space.
Parameters:
-----------
mags : array of magnitudes
weights : array of weights for each magnitude (i.e. from a pdf)
Returns:
--------
sum_mag : the summed magnitude of all the stars | def sum_mags(mags, weights=None):
flux = 10**(-np.asarray(mags) / 2.5)
if weights is None:
return -2.5 * np.log10(np.sum(flux))
else:
return -2.5 * np.log10(np.sum(weights*flux)) | 847,844 |
Compute the stellar mass (Msun; average per star). PDF comes
from IMF, but weight by actual stellar mass.
Parameters:
-----------
mass_min : Minimum mass to integrate the IMF
steps : Number of steps to sample the isochrone
Returns:
--------
mass :... | def stellar_mass(self, mass_min=0.1, steps=10000):
mass_max = self.mass_init_upper_bound
d_log_mass = (np.log10(mass_max) - np.log10(mass_min)) / float(steps)
log_mass = np.linspace(np.log10(mass_min), np.log10(mass_max), steps)
mass = 10.**log_mass
if mass... | 847,851 |
Calculate the absolute visual magnitude (Mv) from the richness
by transforming the isochrone in the SDSS system and using the
g,r -> V transform equations from Jester 2005
[astro-ph/0506022].
Parameters:
-----------
richness : isochrone normalization parameter
s... | def absolute_magnitude(self, richness=1, steps=1e4):
# Using the SDSS g,r -> V from Jester 2005 [astro-ph/0506022]
# for stars with R-I < 1.15
# V = g_sdss - 0.59*(g_sdss - r_sdss) - 0.01
# Create a copy of the isochrone in the SDSS system
params = {k:v.value for k,v in... | 847,854 |
Simulate a set of stellar magnitudes (no uncertainty) for a
satellite of a given stellar mass and distance.
Parameters:
-----------
stellar_mass : the total stellar mass of the system (Msun)
distance_modulus : distance modulus of the system (if None takes from isochrone)
... | def simulate(self, stellar_mass, distance_modulus=None, **kwargs):
if distance_modulus is None: distance_modulus = self.distance_modulus
# Total number of stars in system
n = int(round(stellar_mass / self.stellar_mass()))
f_1 = scipy.interpolate.interp1d(self.mass_init, self.mag... | 847,856 |
Return a 2D histogram the isochrone in mag-mag space.
Parameters:
-----------
distance_modulus : distance modulus to calculate histogram at
delta_mag : magnitude bin size
mass_steps : number of steps to sample isochrone at
Returns:
--------
bins_mag_1 : ... | def histogram2d(self,distance_modulus=None,delta_mag=0.03,steps=10000):
if distance_modulus is not None:
self.distance_modulus = distance_modulus
# Isochrone will be binned, so might as well sample lots of points
mass_init,mass_pdf,mass_act,mag_1,mag_2 = self.sample(mass_st... | 847,862 |
Calculate the separation between a specific point and the
isochrone in magnitude-magnitude space. Uses an interpolation
ADW: Could speed this up...
Parameters:
-----------
mag_1 : The magnitude of the test points in the first band
mag_2 : The magnitude of the test point... | def separation(self, mag_1, mag_2):
iso_mag_1 = self.mag_1 + self.distance_modulus
iso_mag_2 = self.mag_2 + self.distance_modulus
def interp_iso(iso_mag_1,iso_mag_2,mag_1,mag_2):
interp_1 = scipy.interpolate.interp1d(iso_mag_1,iso_mag_2,bounds_error=False)
... | 847,866 |
Aggregates the items that this user has purchased.
Arguments:
cart_status (int or Iterable(int)): etc
category (Optional[models.inventory.Category]): the category
of items to restrict to.
Returns:
[ProductAndQuantity, ...]: A list of product-quantity... | def _items(self, cart_status, category=None):
if not isinstance(cart_status, Iterable):
cart_status = [cart_status]
status_query = (
Q(productitem__cart__status=status) for status in cart_status
)
in_cart = Q(productitem__cart__user=self.user)
... | 847,881 |
Aggregates the items that this user has purchased.
Arguments:
category (Optional[models.inventory.Category]): the category
of items to restrict to.
Returns:
[ProductAndQuantity, ...]: A list of product-quantity pairs,
aggregating like products fr... | def items_purchased(self, category=None):
return self._items(commerce.Cart.STATUS_PAID, category=category) | 847,883 |
Render a color map (image) of a matrix or sequence of Matrix objects
A color map is like a contour map except the "height" or "value" of each matrix element
is used to select a color from a continuous spectrum of colors (for heatmap white is max and red is medium)
Arguments:
mat (n... | def __init__(self, mat, **kwargs):
# try:
# self.colormaps = [ColorMap(m, cmap=cmap, pixelspervalue=pixelspervalue,
# minvalue=minvalue, maxvalue=maxvalue) for m in mat]
# except:
# pass
# # raise ValueError("Don't know how to di... | 848,326 |
Initialize a configuration object from a filename or a dictionary.
Provides functionality to merge with a default configuration.
Parameters:
config: filename, dict, or Config object (deep copied)
default: default configuration to merge
Returns:
config | def __init__(self, config, default=None):
self.update(self._load(default))
self.update(self._load(config))
self._formatFilepaths()
# For back-compatibility...
self.params = self
# Run some basic validation
# ADW: This should be run after creating filen... | 848,369 |
Load this config from an existing config
Parameters:
-----------
config : filename, config object, or dict to load
Returns:
--------
params : configuration parameters | def _load(self, config):
if isstring(config):
self.filename = config
params = yaml.load(open(config))
elif isinstance(config, Config):
# This is the copy constructor...
self.filename = config.filename
params = copy.deepcopy(config)
... | 848,370 |
Write a copy of this config object.
Parameters:
-----------
outfile : output filename
Returns:
--------
None | def write(self, filename):
ext = os.path.splitext(filename)[1]
writer = open(filename, 'w')
if ext == '.py':
writer.write(pprint.pformat(self))
elif ext == '.yaml':
writer.write(yaml.dump(self))
else:
writer.close()
raise E... | 848,373 |
Create a masked records array of all filenames for the given set of
pixels and store the existence of those files in the mask values.
Parameters:
-----------
None
Returns:
--------
recarray : pixels and mask value | def _createFilenames(self):
nside_catalog = self['coords']['nside_catalog']
npix = hp.nside2npix(nside_catalog)
pixels = np.arange(npix)
catalog_dir = self['catalog']['dirname']
catalog_base = self['catalog']['basename']
catalog_path = os.path.join(catalog_dir,c... | 848,375 |
Return the requested filenames.
Parameters:
-----------
pixels : requeseted pixels
Returns:
--------
filenames : recarray | def getFilenames(self,pixels=None):
logger.debug("Getting filenames...")
if pixels is None:
return self.filenames
else:
return self.filenames[np.in1d(self.filenames['pix'],pixels)] | 848,376 |
Return the indices of the super-pixels which contain each of the
sub-pixels (nside_in > nside_out).
Parameters:
-----------
ipix : index of the input subpixels
nside_in : nside of the input subpix
nside_out : nside of the desired superpixels
Returns:
--------
ipix_out : supe... | def d_grade_ipix(ipix, nside_in, nside_out, nest=False):
if nside_in==nside_out: return ipix
if not (nside_in > nside_out):
raise ValueError("nside_out must be less than nside_in")
return hp.vec2pix(nside_out, *hp.pix2vec(nside_in, ipix, nest), nest=nest) | 848,387 |
Return the indices of sub-pixels (resolution nside_subpix) within
the super-pixel(s) (resolution nside_superpix).
Parameters:
-----------
ipix : index of the input superpixel(s)
nside_in : nside of the input superpixel
nside_out : nside of the desired subpixels
Returns:
-----... | def u_grade_ipix(ipix, nside_in, nside_out, nest=False):
if nside_in==nside_out: return ipix
if not (nside_in < nside_out):
raise ValueError("nside_in must be less than nside_out")
if nest: nest_ipix = ipix
else: nest_ipix = hp.ring2nest(nside_in, ipix)
factor = (nside_out//nside... | 848,388 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.