_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q273500 | parse_ensembl_genes | test | def parse_ensembl_genes(lines):
"""Parse lines with ensembl formated genes
This is designed to take a biomart dump with genes from ensembl.
Mandatory columns are:
'Gene ID' 'Chromosome' 'Gene Start' 'Gene End' 'HGNC symbol
Args:
lines(iterable(str)): An iterable with ensembl formated genes
Yields:
ensembl_gene(dict): A dictionary with the relevant information
"""
LOG.info("Parsing ensembl genes from file")
header = []
for index, line in enumerate(lines):
# File allways start with a header line
if index == 0:
header = line.rstrip().split('\t')
continue
# After that each line represents a gene
yield parse_ensembl_line(line, header) | python | {
"resource": ""
} |
q273501 | parse_ensembl_exons | test | def parse_ensembl_exons(lines):
"""Parse lines with ensembl formated exons
This is designed to take a biomart dump with exons from ensembl.
Check documentation for spec for download
Args:
lines(iterable(str)): An iterable with ensembl formated exons
Yields:
ensembl_gene(dict): A dictionary with the relevant information
"""
header = []
LOG.debug("Parsing ensembl exons...")
for index, line in enumerate(lines):
# File allways start with a header line
if index == 0:
header = line.rstrip().split('\t')
continue
exon_info = parse_ensembl_line(line, header)
chrom = exon_info['chrom']
start = exon_info['exon_start']
end = exon_info['exon_end']
transcript = exon_info['ensembl_transcript_id']
gene = exon_info['ensembl_gene_id']
rank = exon_info['exon_rank']
strand = exon_info['strand']
# Recalculate start and stop (taking UTR regions into account for end exons)
if strand == 1:
# highest position: start of exon or end of 5' UTR
# If no 5' UTR make sure exon_start is allways choosen
start = max(start, exon_info.get('utr_5_end') or -1)
# lowest position: end of exon or start of 3' UTR
end = min(end, exon_info.get('utr_3_start') or float('inf'))
elif strand == -1:
# highest position: start of exon or end of 3' UTR
start = max(start, exon_info.get('utr_3_end') or -1)
# lowest position: end of exon or start of 5' UTR
end = min(end, exon_info.get('utr_5_start') or float('inf'))
exon_id = "-".join([chrom, str(start), str(end)])
if start > end:
raise ValueError("ERROR: %s" % exon_id)
data = {
"exon_id": exon_id,
"chrom": chrom,
"start": start,
"end": end,
"transcript": transcript,
"gene": gene,
"rank": rank,
}
yield data | python | {
"resource": ""
} |
q273502 | parse_ensembl_exon_request | test | def parse_ensembl_exon_request(result):
"""Parse a dataframe with ensembl exon information
Args:
res(pandas.DataFrame)
Yields:
gene_info(dict)
"""
keys = [
'chrom',
'gene',
'transcript',
'exon_id',
'exon_chrom_start',
'exon_chrom_end',
'5_utr_start',
'5_utr_end',
'3_utr_start',
'3_utr_end',
'strand',
'rank'
]
# for res in result.itertuples():
for res in zip(result['Chromosome/scaffold name'],
result['Gene stable ID'],
result['Transcript stable ID'],
result['Exon stable ID'],
result['Exon region start (bp)'],
result['Exon region end (bp)'],
result["5' UTR start"],
result["5' UTR end"],
result["3' UTR start"],
result["3' UTR end"],
result["Strand"],
result["Exon rank in transcript"]):
ensembl_info = dict(zip(keys, res))
# Recalculate start and stop (taking UTR regions into account for end exons)
if ensembl_info['strand'] == 1:
# highest position: start of exon or end of 5' UTR
# If no 5' UTR make sure exon_start is allways choosen
start = max(ensembl_info['exon_chrom_start'], ensembl_info['5_utr_end'] or -1)
# lowest position: end of exon or start of 3' UTR
end = min(ensembl_info['exon_chrom_end'], ensembl_info['3_utr_start'] or float('inf'))
elif ensembl_info['strand'] == -1:
# highest position: start of exon or end of 3' UTR
start = max(ensembl_info['exon_chrom_start'], ensembl_info['3_utr_end'] or -1)
# lowest position: end of exon or start of 5' UTR
end = min(ensembl_info['exon_chrom_end'], ensembl_info['5_utr_start'] or float('inf'))
ensembl_info['start'] = start
ensembl_info['end'] = end
yield ensembl_info | python | {
"resource": ""
} |
q273503 | init_log | test | def init_log(logger, filename=None, loglevel=None):
"""
Initializes the log file in the proper format.
Arguments:
filename (str): Path to a file. Or None if logging is to
be disabled.
loglevel (str): Determines the level of the log output.
"""
template = '[%(asctime)s] %(levelname)-8s: %(name)-25s: %(message)s'
formatter = logging.Formatter(template)
if loglevel:
logger.setLevel(getattr(logging, loglevel))
# We will always print warnings and higher to stderr
console = logging.StreamHandler()
console.setLevel('WARNING')
console.setFormatter(formatter)
if filename:
file_handler = logging.FileHandler(filename, encoding='utf-8')
if loglevel:
file_handler.setLevel(getattr(logging, loglevel))
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# If no logfile is provided we print all log messages that the user has
# defined to stderr
else:
if loglevel:
console.setLevel(getattr(logging, loglevel))
logger.addHandler(console) | python | {
"resource": ""
} |
q273504 | parse_omim_line | test | def parse_omim_line(line, header):
"""docstring for parse_omim_2_line"""
omim_info = dict(zip(header, line.split('\t')))
return omim_info | python | {
"resource": ""
} |
q273505 | parse_omim_morbid | test | def parse_omim_morbid(lines):
"""docstring for parse_omim_morbid"""
header = []
for i,line in enumerate(lines):
line = line.rstrip()
if line.startswith('#'):
if i < 10:
if line.startswith('# Phenotype'):
header = line[2:].split('\t')
else:
yield parse_omim_line(line, header) | python | {
"resource": ""
} |
q273506 | get_mim_phenotypes | test | def get_mim_phenotypes(genemap_lines):
"""Get a dictionary with phenotypes
Use the mim numbers for phenotypes as keys and phenotype information as
values.
Args:
genemap_lines(iterable(str))
Returns:
phenotypes_found(dict): A dictionary with mim_numbers as keys and
dictionaries with phenotype information as values.
{
'description': str, # Description of the phenotype
'hgnc_symbols': set(), # Associated hgnc symbols
'inheritance': set(), # Associated phenotypes
'mim_number': int, # mim number of phenotype
}
"""
# Set with all omim numbers that are phenotypes
# Parsed from mim2gene.txt
phenotype_mims = set()
phenotypes_found = {}
# Genemap is a file with one entry per gene.
# Each line hold a lot of information and in specific it
# has information about the phenotypes that a gene is associated with
# From this source we collect inheritane patterns and what hgnc symbols
# a phenotype is associated with
for entry in parse_genemap2(genemap_lines):
hgnc_symbol = entry['hgnc_symbol']
for phenotype in entry['phenotypes']:
mim_nr = phenotype['mim_number']
if mim_nr in phenotypes_found:
phenotype_entry = phenotypes_found[mim_nr]
phenotype_entry['inheritance'] = phenotype_entry['inheritance'].union(phenotype['inheritance'])
phenotype_entry['hgnc_symbols'].add(hgnc_symbol)
else:
phenotype['hgnc_symbols'] = set([hgnc_symbol])
phenotypes_found[mim_nr] = phenotype
return phenotypes_found | python | {
"resource": ""
} |
q273507 | cli | test | def cli(context, morbid, genemap, mim2gene, mim_titles, phenotypes):
"""Parse the omim files"""
# if not (morbid and genemap and mim2gene, mim_titles):
# print("Please provide all files")
# context.abort()
from scout.utils.handle import get_file_handle
from pprint import pprint as pp
print("Morbid file: %s" % morbid)
print("Genemap file: %s" % genemap)
print("mim2gene file: %s" % mim2gene)
print("MimTitles file: %s" % mim_titles)
if morbid:
morbid_handle = get_file_handle(morbid)
if genemap:
genemap_handle = get_file_handle(genemap)
if mim2gene:
mim2gene_handle = get_file_handle(mim2gene)
if mim_titles:
mimtitles_handle = get_file_handle(mim_titles)
mim_genes = get_mim_genes(genemap_handle, mim2gene_handle)
for entry in mim_genes:
if entry == 'C10orf11':
pp(mim_genes[entry])
context.abort()
if phenotypes:
if not genemap:
click.echo("Please provide the genemap file")
context.abort()
phenotypes = get_mim_phenotypes(genemap_handle)
for i,mim_term in enumerate(phenotypes):
# pp(phenotypes[mim_term])
pass
print("Number of phenotypes found: %s" % i)
context.abort()
# hgnc_genes = get_mim_genes(genemap_handle, mim2gene_handle)
# for hgnc_symbol in hgnc_genes:
# pp(hgnc_genes[hgnc_symbol])
# phenotypes = get_mim_phenotypes(genemap_handle, mim2gene_handle, mimtitles_handle)
# for mim_nr in phenotypes:
# pp(phenotypes[mim_nr])
genes = get_mim_genes(genemap_handle, mim2gene_handle)
for hgnc_symbol in genes:
if hgnc_symbol == 'OPA1':
print(genes[hgnc_symbol]) | python | {
"resource": ""
} |
q273508 | convert_number | test | def convert_number(string):
"""Convert a string to number
If int convert to int otherwise float
If not possible return None
"""
res = None
if isint(string):
res = int(string)
elif isfloat(string):
res = float(string)
return res | python | {
"resource": ""
} |
q273509 | GenericCalendar.formatmonth | test | def formatmonth(self, theyear, themonth, withyear=True, net=None, qs=None, template='happenings/partials/calendar/month_table.html'):
"""Return a formatted month as a table."""
context = self.get_context()
context['month_start_date'] = date(self.yr, self.mo, 1)
context['week_rows'] = []
for week in self.monthdays2calendar(theyear, themonth):
week_row = []
for day, weekday in week:
week_row.append(self.formatday(day, weekday))
context['week_rows'].append(week_row)
nxt, prev = get_next_and_prev(net)
extra_qs = ('&' + '&'.join(qs)) if qs else ''
context['prev_qs'] = mark_safe('?cal_prev=%d%s' % (prev, extra_qs))
context['next_qs'] = mark_safe('?cal_next=%d%s' % (nxt, extra_qs))
context['withyear'] = withyear
return render_to_string(template, context) | python | {
"resource": ""
} |
q273510 | LegacyGenericCalendar.formatday | test | def formatday(self, day, weekday):
"""Set some commonly used variables."""
self.wkday_not_today = '<td class="%s"><div class="td-inner">' % (
self.cssclasses[weekday])
self.wkday_today = (
'<td class="%s calendar-today"><div class="td-inner">' % (
self.cssclasses[weekday])
)
if URLS_NAMESPACE:
url_name = '%s:day_list' % (URLS_NAMESPACE)
else:
url_name = 'day_list'
self.day_url = reverse(url_name, args=(self.yr, self.mo, day))
self.day = day
self.anch = '<a href="%s">%d</a>' % (
self.day_url, day
)
self.end = '</div></td>' | python | {
"resource": ""
} |
q273511 | LegacyGenericCalendar.formatmonthname | test | def formatmonthname(self, theyear, themonth, withyear=True):
"""
Change colspan to "5", add "today" button, and return a month
name as a table row.
"""
display_month = month_name[themonth]
if isinstance(display_month, six.binary_type) and self.encoding:
display_month = display_month.decode(self.encoding)
if withyear:
s = u'%s %s' % (display_month, theyear)
else:
s = u'%s' % display_month
return ('<tr><th colspan="5" class="month">'
'<button id="cal-today-btn" class="btn btn-small">'
'Today</button> %s</th></tr>' % s) | python | {
"resource": ""
} |
q273512 | LegacyEventCalendar.popover_helper | test | def popover_helper(self):
"""Populate variables used to build popovers."""
# when
display_month = month_name[self.mo]
if isinstance(display_month, six.binary_type) and self.encoding:
display_month = display_month.decode('utf-8')
self.when = ('<p><b>When:</b> ' + display_month + ' ' +
str(self.day) + ', ' + self.event.l_start_date.strftime(
LEGACY_CALENDAR_TIME_FORMAT).lstrip('0') + ' - ' +
self.event.l_end_date.strftime(LEGACY_CALENDAR_TIME_FORMAT).lstrip('0') +
'</p>')
if self.event.location.exists(): # where
self.where = '<p><b>Where:</b> '
for l in self.event.location.all():
self.where += l.name
self.where += '</p>'
else:
self.where = ''
# description
self.desc = '<p><b>Description:</b> ' + self.event.description[:100]
self.desc += ('...</p>' if len(self.event.description) > 100
else '</p>')
self.event_url = self.event.get_absolute_url() # url
t = LEGACY_CALENDAR_TIME_FORMAT if self.event.l_start_date.minute else LEGACY_CALENDAR_HOUR_FORMAT
self.title2 = (self.event.l_start_date.strftime(t).lstrip('0') +
' ' + self.title) | python | {
"resource": ""
} |
q273513 | get_panel_info | test | def get_panel_info(panel_lines=None, panel_id=None, institute=None, version=None, date=None,
display_name=None):
"""Parse metadata for a gene panel
For historical reasons it is possible to include all information about a gene panel in the
header of a panel file. This function parses the header.
Args:
panel_lines(iterable(str))
Returns:
panel_info(dict): Dictionary with panel information
"""
panel_info = {
'panel_id': panel_id,
'institute': institute,
'version': version,
'date': date,
'display_name': display_name,
}
if panel_lines:
for line in panel_lines:
line = line.rstrip()
if not line.startswith('##'):
break
info = line[2:].split('=')
field = info[0]
value = info[1]
if not panel_info.get(field):
panel_info[field] = value
panel_info['date'] = get_date(panel_info['date'])
return panel_info | python | {
"resource": ""
} |
q273514 | parse_gene | test | def parse_gene(gene_info):
"""Parse a gene line with information from a panel file
Args:
gene_info(dict): dictionary with gene info
Returns:
gene(dict): A dictionary with the gene information
{
'hgnc_id': int,
'hgnc_symbol': str,
'disease_associated_transcripts': list(str),
'inheritance_models': list(str),
'mosaicism': bool,
'reduced_penetrance': bool,
'database_entry_version': str,
}
"""
gene = {}
# This is either hgnc id or hgnc symbol
identifier = None
hgnc_id = None
try:
if 'hgnc_id' in gene_info:
hgnc_id = int(gene_info['hgnc_id'])
elif 'hgnc_idnumber' in gene_info:
hgnc_id = int(gene_info['hgnc_idnumber'])
elif 'hgncid' in gene_info:
hgnc_id = int(gene_info['hgncid'])
except ValueError as e:
raise SyntaxError("Invalid hgnc id: {0}".format(hgnc_id))
gene['hgnc_id'] = hgnc_id
identifier = hgnc_id
hgnc_symbol = None
if 'hgnc_symbol' in gene_info:
hgnc_symbol = gene_info['hgnc_symbol']
elif 'hgncsymbol' in gene_info:
hgnc_symbol = gene_info['hgncsymbol']
elif 'symbol' in gene_info:
hgnc_symbol = gene_info['symbol']
gene['hgnc_symbol'] = hgnc_symbol
if not identifier:
if hgnc_symbol:
identifier = hgnc_symbol
else:
raise SyntaxError("No gene identifier could be found")
gene['identifier'] = identifier
# Disease associated transcripts is a ','-separated list of
# manually curated transcripts
transcripts = ""
if 'disease_associated_transcripts' in gene_info:
transcripts = gene_info['disease_associated_transcripts']
elif 'disease_associated_transcript' in gene_info:
transcripts = gene_info['disease_associated_transcript']
elif 'transcripts' in gene_info:
transcripts = gene_info['transcripts']
gene['transcripts'] = [
transcript.strip() for transcript in
transcripts.split(',') if transcript
]
# Genetic disease models is a ','-separated list of manually curated
# inheritance patterns that are followed for a gene
models = ""
if 'genetic_disease_models' in gene_info:
models = gene_info['genetic_disease_models']
elif 'genetic_disease_model' in gene_info:
models = gene_info['genetic_disease_model']
elif 'inheritance_models' in gene_info:
models = gene_info['inheritance_models']
elif 'genetic_inheritance_models' in gene_info:
models = gene_info['genetic_inheritance_models']
gene['inheritance_models'] = [
model.strip() for model in models.split(',')
if model.strip() in VALID_MODELS
]
# If a gene is known to be associated with mosaicism this is annotated
gene['mosaicism'] = True if gene_info.get('mosaicism') else False
# If a gene is known to have reduced penetrance this is annotated
gene['reduced_penetrance'] = True if gene_info.get('reduced_penetrance') else False
# The database entry version is a way to track when a a gene was added or
# modified, optional
gene['database_entry_version'] = gene_info.get('database_entry_version')
return gene | python | {
"resource": ""
} |
q273515 | parse_genes | test | def parse_genes(gene_lines):
"""Parse a file with genes and return the hgnc ids
Args:
gene_lines(iterable(str)): Stream with genes
Returns:
genes(list(dict)): Dictionaries with relevant gene info
"""
genes = []
header = []
hgnc_identifiers = set()
delimiter = '\t'
# This can be '\t' or ';'
delimiters = ['\t', ' ', ';']
# There are files that have '#' to indicate headers
# There are some files that start with a header line without
# any special symbol
for i,line in enumerate(gene_lines):
line = line.rstrip()
if not len(line) > 0:
continue
if line.startswith('#'):
if not line.startswith('##'):
# We need to try delimiters
# We prefer ';' or '\t' byt should accept ' '
line_length = 0
delimiter = None
for alt in delimiters:
head_line = line.split(alt)
if len(head_line) > line_length:
line_length = len(head_line)
delimiter = alt
header = [word.lower() for word in line[1:].split(delimiter)]
else:
# If no header symbol(#) assume first line is header
if i == 0:
line_length = 0
for alt in delimiters:
head_line = line.split(alt)
if len(head_line) > line_length:
line_length = len(head_line)
delimiter = alt
if ('hgnc' in line or 'HGNC' in line):
header = [word.lower() for word in line.split(delimiter)]
continue
# If first line is not a header try to sniff what the first
# columns holds
if line.split(delimiter)[0].isdigit():
header = ['hgnc_id']
else:
header = ['hgnc_symbol']
splitted_line = line.split(delimiter)
gene_info = dict(zip(header, splitted_line))
# There are cases when excel exports empty lines that looks like
# ;;;;;;;. This is a exception to handle these
info_found = False
for key in gene_info:
if gene_info[key]:
info_found = True
break
# If no info was found we skip that line
if not info_found:
continue
try:
gene = parse_gene(gene_info)
except Exception as e:
LOG.warning(e)
raise SyntaxError("Line {0} is malformed".format(i + 1))
identifier = gene.pop('identifier')
if not identifier in hgnc_identifiers:
hgnc_identifiers.add(identifier)
genes.append(gene)
return genes | python | {
"resource": ""
} |
q273516 | parse_gene_panel | test | def parse_gene_panel(path, institute='cust000', panel_id='test', panel_type='clinical', date=datetime.now(),
version=1.0, display_name=None, genes = None):
"""Parse the panel info and return a gene panel
Args:
path(str): Path to panel file
institute(str): Name of institute that owns the panel
panel_id(str): Panel id
date(datetime.datetime): Date of creation
version(float)
full_name(str): Option to have a long name
Returns:
gene_panel(dict)
"""
LOG.info("Parsing gene panel %s", panel_id)
gene_panel = {}
gene_panel['path'] = path
gene_panel['type'] = panel_type
gene_panel['date'] = date
gene_panel['panel_id'] = panel_id
gene_panel['institute'] = institute
version = version or 1.0
gene_panel['version'] = float(version)
gene_panel['display_name'] = display_name or panel_id
if not path:
panel_handle = genes
else:
panel_handle = get_file_handle(gene_panel['path'])
gene_panel['genes'] = parse_genes(gene_lines=panel_handle)
return gene_panel | python | {
"resource": ""
} |
q273517 | diseases | test | def diseases(context):
"""Show all diseases in the database"""
LOG.info("Running scout view diseases")
adapter = context.obj['adapter']
disease_objs = adapter.disease_terms()
nr_diseases = disease_objs.count()
if nr_diseases == 0:
click.echo("No diseases found")
else:
click.echo("Disease")
for disease_obj in adapter.disease_terms():
click.echo("{0}".format(disease_obj['_id']))
LOG.info("{0} diseases found".format(nr_diseases)) | python | {
"resource": ""
} |
q273518 | hpo | test | def hpo(context):
"""
Update the hpo terms in the database. Fetch the latest release and update terms.
"""
LOG.info("Running scout update hpo")
adapter = context.obj['adapter']
LOG.info("Dropping HPO terms")
adapter.hpo_term_collection.drop()
LOG.debug("HPO terms dropped")
load_hpo_terms(adapter) | python | {
"resource": ""
} |
q273519 | users | test | def users(store):
"""Display a list of all users and which institutes they belong to."""
user_objs = list(store.users())
total_events = store.user_events().count()
for user_obj in user_objs:
if user_obj.get('institutes'):
user_obj['institutes'] = [store.institute(inst_id) for inst_id in user_obj.get('institutes')]
else:
user_obj['institutes'] = []
user_obj['events'] = store.user_events(user_obj).count()
user_obj['events_rank'] = event_rank(user_obj['events'])
return dict(
users=sorted(user_objs, key=lambda user: -user['events']),
total_events=total_events,
) | python | {
"resource": ""
} |
q273520 | parse_conservations | test | def parse_conservations(variant):
"""Parse the conservation predictors
Args:
variant(dict): A variant dictionary
Returns:
conservations(dict): A dictionary with the conservations
"""
conservations = {}
conservations['gerp'] = parse_conservation(
variant,
'dbNSFP_GERP___RS'
)
conservations['phast'] = parse_conservation(
variant,
'dbNSFP_phastCons100way_vertebrate'
)
conservations['phylop'] = parse_conservation(
variant,
'dbNSFP_phyloP100way_vertebrate'
)
return conservations | python | {
"resource": ""
} |
q273521 | parse_conservation | test | def parse_conservation(variant, info_key):
"""Get the conservation prediction
Args:
variant(dict): A variant dictionary
info_key(str)
Returns:
conservations(list): List of censervation terms
"""
raw_score = variant.INFO.get(info_key)
conservations = []
if raw_score:
if isinstance(raw_score, numbers.Number):
raw_score = (raw_score,)
for score in raw_score:
if score >= CONSERVATION[info_key]['conserved_min']:
conservations.append('Conserved')
else:
conservations.append('NotConserved')
return conservations | python | {
"resource": ""
} |
q273522 | get_general_case_info | test | def get_general_case_info(adapter, institute_id=None, slice_query=None):
"""Return general information about cases
Args:
adapter(adapter.MongoAdapter)
institute_id(str)
slice_query(str): Query to filter cases to obtain statistics for.
Returns:
general(dict)
"""
general = {}
# Potentially sensitive slice queries are assumed allowed if we have got this far
name_query = slice_query
cases = adapter.cases(owner=institute_id, name_query=name_query)
phenotype_cases = 0
causative_cases = 0
pinned_cases = 0
cohort_cases = 0
pedigree = {
1: {
'title': 'Single',
'count': 0
},
2: {
'title': 'Duo',
'count': 0
},
3: {
'title': 'Trio',
'count': 0
},
'many': {
'title': 'Many',
'count': 0
},
}
case_ids = set()
total_cases = 0
for total_cases,case in enumerate(cases,1):
# If only looking at one institute we need to save the case ids
if institute_id:
case_ids.add(case['_id'])
if case.get('phenotype_terms'):
phenotype_cases += 1
if case.get('causatives'):
causative_cases += 1
if case.get('suspects'):
pinned_cases += 1
if case.get('cohorts'):
cohort_cases += 1
nr_individuals = len(case.get('individuals',[]))
if nr_individuals == 0:
continue
if nr_individuals > 3:
pedigree['many']['count'] += 1
else:
pedigree[nr_individuals]['count'] += 1
general['total_cases'] = total_cases
general['phenotype_cases'] = phenotype_cases
general['causative_cases'] = causative_cases
general['pinned_cases'] = pinned_cases
general['cohort_cases'] = cohort_cases
general['pedigree'] = pedigree
general['case_ids'] = case_ids
return general | python | {
"resource": ""
} |
q273523 | get_case_groups | test | def get_case_groups(adapter, total_cases, institute_id=None, slice_query=None):
"""Return the information about case groups
Args:
store(adapter.MongoAdapter)
total_cases(int): Total number of cases
slice_query(str): Query to filter cases to obtain statistics for.
Returns:
cases(dict):
"""
# Create a group with all cases in the database
cases = [{'status': 'all', 'count': total_cases, 'percent': 1}]
# Group the cases based on their status
pipeline = []
group = {'$group' : {'_id': '$status', 'count': {'$sum': 1}}}
subquery = {}
if institute_id and slice_query:
subquery = adapter.cases(owner=institute_id, name_query=slice_query,
yield_query=True)
elif institute_id:
subquery = adapter.cases(owner=institute_id, yield_query=True)
elif slice_query:
subquery = adapter.cases(name_query=slice_query, yield_query=True)
query = {'$match': subquery} if subquery else {}
if query:
pipeline.append(query)
pipeline.append(group)
res = adapter.case_collection.aggregate(pipeline)
for status_group in res:
cases.append({'status': status_group['_id'],
'count': status_group['count'],
'percent': status_group['count'] / total_cases})
return cases | python | {
"resource": ""
} |
q273524 | JSONResponseMixin.render_to_json_response | test | def render_to_json_response(self, context, **kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
return HttpResponse(
self.convert_context_to_json(context),
content_type='application/json',
**kwargs
) | python | {
"resource": ""
} |
q273525 | EventMonthView.get_year_and_month | test | def get_year_and_month(self, net, qs, **kwargs):
"""
Get the year and month. First tries from kwargs, then from
querystrings. If none, or if cal_ignore qs is specified,
sets year and month to this year and this month.
"""
now = c.get_now()
year = now.year
month = now.month + net
month_orig = None
if 'cal_ignore=true' not in qs:
if 'year' and 'month' in self.kwargs: # try kwargs
year, month_orig = map(
int, (self.kwargs['year'], self.kwargs['month'])
)
month = month_orig + net
else:
try: # try querystring
year = int(self.request.GET['cal_year'])
month_orig = int(self.request.GET['cal_month'])
month = month_orig + net
except Exception:
pass
# return the year and month, and any errors that may have occurred do
# to an invalid month/year being given.
return c.clean_year_month(year, month, month_orig) | python | {
"resource": ""
} |
q273526 | EventDayView.check_for_cancelled_events | test | def check_for_cancelled_events(self, d):
"""Check if any events are cancelled on the given date 'd'."""
for event in self.events:
for cn in event.cancellations.all():
if cn.date == d:
event.title += ' (CANCELLED)' | python | {
"resource": ""
} |
q273527 | HpoHandler.hpo_term | test | def hpo_term(self, hpo_id):
"""Fetch a hpo term
Args:
hpo_id(str)
Returns:
hpo_obj(dict)
"""
LOG.debug("Fetching hpo term %s", hpo_id)
return self.hpo_term_collection.find_one({'_id': hpo_id}) | python | {
"resource": ""
} |
q273528 | HpoHandler.hpo_terms | test | def hpo_terms(self, query=None, hpo_term=None, text=None, limit=None):
"""Return all HPO terms
If a query is sent hpo_terms will try to match with regex on term or
description.
Args:
query(str): Part of a hpoterm or description
hpo_term(str): Search for a specific hpo term
limit(int): the number of desired results
Returns:
result(pymongo.Cursor): A cursor with hpo terms
"""
query_dict = {}
search_term = None
if query:
query_dict = {'$or':
[
{'hpo_id': {'$regex': query, '$options':'i'}},
{'description': {'$regex': query, '$options':'i'}},
]
}
search_term = query
elif text:
new_string = ''
for i,word in enumerate(text.split(' ')):
if i == 0:
new_string += word
else:
new_string += ' \"{0}\"'.format(word)
LOG.info("Search HPO terms with %s", new_string)
query_dict['$text'] = {'$search': new_string}
search_term = text
elif hpo_term:
query_dict['hpo_id'] = hpo_term
search_term = hpo_term
limit = limit or int(10e10)
res = self.hpo_term_collection.find(query_dict).limit(limit).sort('hpo_number',ASCENDING)
LOG.info("Found {0} terms with search word {1}".format(res.count(), search_term))
return res | python | {
"resource": ""
} |
q273529 | HpoHandler.disease_term | test | def disease_term(self, disease_identifier):
"""Return a disease term
Checks if the identifier is a disease number or a id
Args:
disease_identifier(str)
Returns:
disease_obj(dict)
"""
query = {}
try:
disease_identifier = int(disease_identifier)
query['disease_nr'] = disease_identifier
except ValueError:
query['_id'] = disease_identifier
return self.disease_term_collection.find_one(query) | python | {
"resource": ""
} |
q273530 | HpoHandler.disease_terms | test | def disease_terms(self, hgnc_id=None):
"""Return all disease terms that overlaps a gene
If no gene, return all disease terms
Args:
hgnc_id(int)
Returns:
iterable(dict): A list with all disease terms that match
"""
query = {}
if hgnc_id:
LOG.debug("Fetching all diseases for gene %s", hgnc_id)
query['genes'] = hgnc_id
else:
LOG.info("Fetching all disease terms")
return list(self.disease_term_collection.find(query)) | python | {
"resource": ""
} |
q273531 | HpoHandler.load_disease_term | test | def load_disease_term(self, disease_obj):
"""Load a disease term into the database
Args:
disease_obj(dict)
"""
LOG.debug("Loading disease term %s into database", disease_obj['_id'])
try:
self.disease_term_collection.insert_one(disease_obj)
except DuplicateKeyError as err:
raise IntegrityError("Disease term %s already exists in database".format(disease_obj['_id']))
LOG.debug("Disease term saved") | python | {
"resource": ""
} |
q273532 | HpoHandler.generate_hpo_gene_list | test | def generate_hpo_gene_list(self, *hpo_terms):
"""Generate a sorted list with namedtuples of hpogenes
Each namedtuple of the list looks like (hgnc_id, count)
Args:
hpo_terms(iterable(str))
Returns:
hpo_genes(list(HpoGene))
"""
genes = {}
for term in hpo_terms:
hpo_obj = self.hpo_term(term)
if hpo_obj:
for hgnc_id in hpo_obj['genes']:
if hgnc_id in genes:
genes[hgnc_id] += 1
else:
genes[hgnc_id] = 1
else:
LOG.warning("Term %s could not be found", term)
sorted_genes = sorted(genes.items(), key=operator.itemgetter(1), reverse=True)
return sorted_genes | python | {
"resource": ""
} |
q273533 | Filterbank.read_hdf5 | test | def read_hdf5(self, filename, f_start=None, f_stop=None,
t_start=None, t_stop=None, load_data=True):
""" Populate Filterbank instance with data from HDF5 file
Note:
This is to be deprecated in future, please use Waterfall() to open files.
"""
print("Warning: this function will be deprecated in the future. Please use Waterfall to open HDF5 files.")
# raise DeprecationWarning('Please use Waterfall to open HDF5 files.')
self.header = {}
self.filename = filename
self.h5 = h5py.File(filename)
for key, val in self.h5[b'data'].attrs.items():
if six.PY3:
key = bytes(key, 'ascii')
if key == b'src_raj':
self.header[key] = Angle(val, unit='hr')
elif key == b'src_dej':
self.header[key] = Angle(val, unit='deg')
else:
self.header[key] = val
self.n_ints_in_file = self.h5[b"data"].shape[0]
i_start, i_stop, chan_start_idx, chan_stop_idx = self._setup_freqs(f_start=f_start, f_stop=f_stop)
ii_start, ii_stop, n_ints = self._setup_time_axis(t_start=t_start, t_stop=t_stop)
if load_data:
self.data = self.h5[b"data"][ii_start:ii_stop, :, chan_start_idx:chan_stop_idx]
self.file_size_bytes = os.path.getsize(self.filename)
# if self.header[b'foff'] < 0:
# self.data = self.data[..., ::-1] # Reverse data
else:
print("Skipping data load...")
self.data = np.array([0])
self.n_ints_in_file = 0
self.file_size_bytes = os.path.getsize(self.filename) | python | {
"resource": ""
} |
q273534 | Filterbank._setup_freqs | test | def _setup_freqs(self, f_start=None, f_stop=None):
""" Setup frequency axis """
## Setup frequency axis
f0 = self.header[b'fch1']
f_delt = self.header[b'foff']
i_start, i_stop = 0, self.header[b'nchans']
if f_start:
i_start = int((f_start - f0) / f_delt)
if f_stop:
i_stop = int((f_stop - f0) / f_delt)
#calculate closest true index value
chan_start_idx = np.int(i_start)
chan_stop_idx = np.int(i_stop)
#create freq array
if i_start < i_stop:
i_vals = np.arange(chan_start_idx, chan_stop_idx)
else:
i_vals = np.arange(chan_stop_idx, chan_start_idx)
self.freqs = f_delt * i_vals + f0
# if f_delt < 0:
# self.freqs = self.freqs[::-1]
if chan_stop_idx < chan_start_idx:
chan_stop_idx, chan_start_idx = chan_start_idx,chan_stop_idx
return i_start, i_stop, chan_start_idx, chan_stop_idx | python | {
"resource": ""
} |
q273535 | Filterbank._setup_time_axis | test | def _setup_time_axis(self, t_start=None, t_stop=None):
""" Setup time axis. """
# now check to see how many integrations requested
ii_start, ii_stop = 0, self.n_ints_in_file
if t_start:
ii_start = t_start
if t_stop:
ii_stop = t_stop
n_ints = ii_stop - ii_start
## Setup time axis
t0 = self.header[b'tstart']
t_delt = self.header[b'tsamp']
self.timestamps = np.arange(0, n_ints) * t_delt / 24./60./60 + t0
return ii_start, ii_stop, n_ints | python | {
"resource": ""
} |
q273536 | Filterbank.read_filterbank | test | def read_filterbank(self, filename=None, f_start=None, f_stop=None,
t_start=None, t_stop=None, load_data=True):
""" Populate Filterbank instance with data from Filterbank file
Note:
This is to be deprecated in future, please use Waterfall() to open files.
"""
if filename is None:
filename = self.filename
else:
self.filename = filename
self.header = read_header(filename)
#convert input frequencies into what their corresponding index would be
i_start, i_stop, chan_start_idx, chan_stop_idx = self._setup_freqs(f_start=f_start, f_stop=f_stop)
n_bits = self.header[b'nbits']
n_bytes = int(self.header[b'nbits'] / 8)
n_chans = self.header[b'nchans']
n_chans_selected = self.freqs.shape[0]
n_ifs = self.header[b'nifs']
# Load binary data
self.idx_data = len_header(filename)
f = open(filename, 'rb')
f.seek(self.idx_data)
filesize = os.path.getsize(self.filename)
n_bytes_data = filesize - self.idx_data
# Finally add some other info to the class as objects
self.n_ints_in_file = calc_n_ints_in_file(self.filename)
self.file_size_bytes = filesize
## Setup time axis
ii_start, ii_stop, n_ints = self._setup_time_axis(t_start=t_start, t_stop=t_stop)
# Seek to first integration
f.seek(int(ii_start * n_bits * n_ifs * n_chans / 8), 1)
# Set up indexes used in file read (taken out of loop for speed)
i0 = np.min((chan_start_idx, chan_stop_idx))
i1 = np.max((chan_start_idx, chan_stop_idx))
#Set up the data type (taken out of loop for speed)
if n_bits == 2:
dd_type = b'uint8'
n_chans_selected = int(n_chans_selected/4)
elif n_bytes == 4:
dd_type = b'float32'
elif n_bytes == 2:
dd_type = b'uint16'
elif n_bytes == 1:
dd_type = b'uint8'
if load_data:
if n_ints * n_ifs * n_chans_selected > MAX_DATA_ARRAY_SIZE:
print("[Filterbank] Error: data array is too large to load. Either select fewer points or manually increase MAX_DATA_ARRAY_SIZE. Large files are now handle with Waterfall .")
sys.exit()
if n_bits == 2:
self.data = np.zeros((n_ints, n_ifs, n_chans_selected*4), dtype=dd_type)
else:
self.data = np.zeros((n_ints, n_ifs, n_chans_selected), dtype=dd_type)
for ii in range(n_ints):
"""d = f.read(n_bytes * n_chans * n_ifs)
"""
for jj in range(n_ifs):
f.seek(n_bytes * i0, 1) # 1 = from current location
#d = f.read(n_bytes * n_chans_selected)
#bytes_to_read = n_bytes * n_chans_selected
dd = np.fromfile(f, count=n_chans_selected, dtype=dd_type)
# Reverse array if frequency axis is flipped
# if f_delt < 0:
# dd = dd[::-1]
if n_bits == 2:
dd = unpack_2to8(dd)
self.data[ii, jj] = dd
f.seek(n_bytes * (n_chans - i1), 1) # Seek to start of next block
else:
print("Skipping data load...")
self.data = np.array([0], dtype=dd_type) | python | {
"resource": ""
} |
q273537 | Filterbank.compute_lst | test | def compute_lst(self):
""" Compute LST for observation """
if self.header[b'telescope_id'] == 6:
self.coords = gbt_coords
elif self.header[b'telescope_id'] == 4:
self.coords = parkes_coords
else:
raise RuntimeError("Currently only Parkes and GBT supported")
if HAS_SLALIB:
# dut1 = (0.2 /3600.0) * np.pi/12.0
dut1 = 0.0
mjd = self.header[b'tstart']
tellong = np.deg2rad(self.coords[1])
last = s.sla_gmst(mjd) - tellong + s.sla_eqeqx(mjd) + dut1
# lmst = s.sla_gmst(mjd) - tellong
if last < 0.0 : last = last + 2.0*np.pi
return last
else:
raise RuntimeError("This method requires pySLALIB") | python | {
"resource": ""
} |
q273538 | Filterbank.blank_dc | test | def blank_dc(self, n_coarse_chan):
""" Blank DC bins in coarse channels.
Note: currently only works if entire file is read
"""
if n_coarse_chan < 1:
logger.warning('Coarse channel number < 1, unable to blank DC bin.')
return None
if not n_coarse_chan % int(n_coarse_chan) == 0:
logger.warning('Selection does not contain an interger number of coarse channels, unable to blank DC bin.')
return None
n_coarse_chan = int(n_coarse_chan)
n_chan = self.data.shape[-1]
n_chan_per_coarse = int(n_chan / n_coarse_chan)
mid_chan = int(n_chan_per_coarse / 2)
for ii in range(n_coarse_chan):
ss = ii*n_chan_per_coarse
self.data[..., ss+mid_chan] = np.median(self.data[..., ss+mid_chan+5:ss+mid_chan+10]) | python | {
"resource": ""
} |
q273539 | Filterbank.info | test | def info(self):
""" Print header information """
for key, val in self.header.items():
if key == b'src_raj':
val = val.to_string(unit=u.hour, sep=':')
if key == b'src_dej':
val = val.to_string(unit=u.deg, sep=':')
if key == b'tsamp':
val *= u.second
if key in ('foff', 'fch1'):
val *= u.MHz
if key == b'tstart':
print("%16s : %32s" % ("tstart (ISOT)", Time(val, format='mjd').isot))
key = "tstart (MJD)"
print("%16s : %32s" % (key, val))
print("\n%16s : %32s" % ("Num ints in file", self.n_ints_in_file))
print("%16s : %32s" % ("Data shape", self.data.shape))
print("%16s : %32s" % ("Start freq (MHz)", self.freqs[0]))
print("%16s : %32s" % ("Stop freq (MHz)", self.freqs[-1])) | python | {
"resource": ""
} |
q273540 | Filterbank._calc_extent | test | def _calc_extent(self,plot_f=None,plot_t=None,MJD_time=False):
""" Setup ploting edges.
"""
plot_f_begin = plot_f[0]
plot_f_end = plot_f[-1] + (plot_f[1]-plot_f[0])
plot_t_begin = self.timestamps[0]
plot_t_end = self.timestamps[-1] + (self.timestamps[1] - self.timestamps[0])
if MJD_time:
extent=(plot_f_begin, plot_f_begin_end, plot_t_begin, plot_t_end)
else:
extent=(plot_f_begin, plot_f_end, 0.0,(plot_t_end-plot_t_begin)*24.*60.*60)
return extent | python | {
"resource": ""
} |
q273541 | Filterbank.plot_waterfall | test | def plot_waterfall(self, f_start=None, f_stop=None, if_id=0, logged=True, cb=True, MJD_time=False, **kwargs):
""" Plot waterfall of data
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
logged (bool): Plot in linear (False) or dB units (True),
cb (bool): for plotting the colorbar
kwargs: keyword args to be passed to matplotlib imshow()
"""
plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)
#Using accending frequency for all plots.
if self.header[b'foff'] < 0:
plot_data = plot_data[..., ::-1] # Reverse data
plot_f = plot_f[::-1]
if logged:
plot_data = db(plot_data)
# Make sure waterfall plot is under 4k*4k
dec_fac_x, dec_fac_y = 1, 1
if plot_data.shape[0] > MAX_IMSHOW_POINTS[0]:
dec_fac_x = int(plot_data.shape[0] / MAX_IMSHOW_POINTS[0])
if plot_data.shape[1] > MAX_IMSHOW_POINTS[1]:
dec_fac_y = int(plot_data.shape[1] / MAX_IMSHOW_POINTS[1])
plot_data = rebin(plot_data, dec_fac_x, dec_fac_y)
try:
plt.title(self.header[b'source_name'])
except KeyError:
plt.title(self.filename)
extent = self._calc_extent(plot_f=plot_f,plot_t=self.timestamps,MJD_time=MJD_time)
plt.imshow(plot_data,
aspect='auto',
origin='lower',
rasterized=True,
interpolation='nearest',
extent=extent,
cmap='viridis',
**kwargs
)
if cb:
plt.colorbar()
plt.xlabel("Frequency [MHz]")
if MJD_time:
plt.ylabel("Time [MJD]")
else:
plt.ylabel("Time [s]") | python | {
"resource": ""
} |
q273542 | Filterbank.plot_time_series | test | def plot_time_series(self, f_start=None, f_stop=None, if_id=0, logged=True, orientation='h', MJD_time=False, **kwargs):
""" Plot the time series.
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
logged (bool): Plot in linear (False) or dB units (True),
kwargs: keyword args to be passed to matplotlib imshow()
"""
ax = plt.gca()
plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)
if logged and self.header[b'nbits'] >= 8:
plot_data = db(plot_data)
#Since the data has been squeezed, the axis for time goes away if only one bin, causing a bug with axis=1
if len(plot_data.shape) > 1:
plot_data = plot_data.mean(axis=1)
else:
plot_data = plot_data.mean()
#Make proper time axis for plotting (but only for plotting!). Note that this makes the values inclusive.
extent = self._calc_extent(plot_f=plot_f,plot_t=self.timestamps,MJD_time=MJD_time)
plot_t = np.linspace(extent[2],extent[3],len(self.timestamps))
if MJD_time:
tlabel = "Time [MJD]"
else:
tlabel = "Time [s]"
if logged:
plabel = "Power [dB]"
else:
plabel = "Power [counts]"
# Reverse oder if vertical orientation.
if 'v' in orientation:
plt.plot(plot_data, plot_t, **kwargs)
plt.xlabel(plabel)
else:
plt.plot(plot_t, plot_data, **kwargs)
plt.xlabel(tlabel)
plt.ylabel(plabel)
ax.autoscale(axis='both',tight=True) | python | {
"resource": ""
} |
q273543 | Filterbank.write_to_filterbank | test | def write_to_filterbank(self, filename_out):
""" Write data to blimpy file.
Args:
filename_out (str): Name of output file
"""
print("[Filterbank] Warning: Non-standard function to write in filterbank (.fil) format. Please use Waterfall.")
n_bytes = int(self.header[b'nbits'] / 8)
with open(filename_out, "wb") as fileh:
fileh.write(generate_sigproc_header(self))
j = self.data
if n_bytes == 4:
np.float32(j.ravel()).tofile(fileh)
elif n_bytes == 2:
np.int16(j.ravel()).tofile(fileh)
elif n_bytes == 1:
np.int8(j.ravel()).tofile(fileh) | python | {
"resource": ""
} |
q273544 | Filterbank.calibrate_band_pass_N1 | test | def calibrate_band_pass_N1(self):
""" One way to calibrate the band pass is to take the median value
for every frequency fine channel, and divide by it.
"""
band_pass = np.median(self.data.squeeze(),axis=0)
self.data = self.data/band_pass | python | {
"resource": ""
} |
q273545 | convert_to_coarse | test | def convert_to_coarse(data,chan_per_coarse):
'''
Converts a data array with length n_chans to an array of length n_coarse_chans
by averaging over the coarse channels
'''
#find number of coarse channels and reshape array
num_coarse = data.size/chan_per_coarse
data_shaped = np.array(np.reshape(data,(num_coarse,chan_per_coarse)))
#Return the average over each coarse channel
return np.mean(data_shaped[:,2:-1],axis=1) | python | {
"resource": ""
} |
q273546 | apply_Mueller | test | def apply_Mueller(I,Q,U,V, gain_offsets, phase_offsets, chan_per_coarse, feedtype='l'):
'''
Returns calibrated Stokes parameters for an observation given an array
of differential gains and phase differences.
'''
#Find shape of data arrays and calculate number of coarse channels
shape = I.shape
ax0 = I.shape[0]
ax1 = I.shape[1]
nchans = I.shape[2]
ncoarse = nchans/chan_per_coarse
#Reshape data arrays to separate coarse channels
I = np.reshape(I,(ax0,ax1,ncoarse,chan_per_coarse))
Q = np.reshape(Q,(ax0,ax1,ncoarse,chan_per_coarse))
U = np.reshape(U,(ax0,ax1,ncoarse,chan_per_coarse))
V = np.reshape(V,(ax0,ax1,ncoarse,chan_per_coarse))
#Swap axes 2 and 3 to in order for broadcasting to work correctly
I = np.swapaxes(I,2,3)
Q = np.swapaxes(Q,2,3)
U = np.swapaxes(U,2,3)
V = np.swapaxes(V,2,3)
#Apply top left corner of electronics chain inverse Mueller matrix
a = 1/(1-gain_offsets**2)
if feedtype=='l':
Icorr = a*(I-gain_offsets*Q)
Qcorr = a*(-1*gain_offsets*I+Q)
I = None
Q = None
if feedtype=='c':
Icorr = a*(I-gain_offsets*V)
Vcorr = a*(-1*gain_offsets*I+V)
I = None
V = None
#Apply bottom right corner of electronics chain inverse Mueller matrix
if feedtype=='l':
Ucorr = U*np.cos(phase_offsets)-V*np.sin(phase_offsets)
Vcorr = U*np.sin(phase_offsets)+V*np.cos(phase_offsets)
U = None
V = None
if feedtype=='c':
Qcorr = Q*np.cos(phase_offsets)+U*np.sin(phase_offsets)
Ucorr = -1*Q*np.sin(phase_offsets)+U*np.cos(phase_offsets)
Q = None
U = None
#Reshape arrays to original shape
Icorr = np.reshape(np.swapaxes(Icorr,2,3),shape)
Qcorr = np.reshape(np.swapaxes(Qcorr,2,3),shape)
Ucorr = np.reshape(np.swapaxes(Ucorr,2,3),shape)
Vcorr = np.reshape(np.swapaxes(Vcorr,2,3),shape)
#Return corrected data arrays
return Icorr,Qcorr,Ucorr,Vcorr | python | {
"resource": ""
} |
q273547 | calibrate_pols | test | def calibrate_pols(cross_pols,diode_cross,obsI=None,onefile=True,feedtype='l',**kwargs):
'''
Write Stokes-calibrated filterbank file for a given observation
with a calibrator noise diode measurement on the source
Parameters
----------
cross_pols : string
Path to cross polarization filterbank file (rawspec output) for observation to be calibrated
diode_cross : string
Path to cross polarization filterbank file of noise diode measurement ON the target
obsI : string
Path to Stokes I filterbank file of main observation (only needed if onefile=False)
onefile : boolean
True writes all calibrated Stokes parameters to a single filterbank file,
False writes four separate files
feedtype : 'l' or 'c'
Basis of antenna dipoles. 'c' for circular, 'l' for linear
'''
#Obtain time sample length, frequencies, and noise diode data
obs = Waterfall(diode_cross,max_load=150)
cross_dat = obs.data
tsamp = obs.header['tsamp']
#Calculate number of coarse channels in the noise diode measurement (usually 8)
dio_ncoarse = obs.calc_n_coarse_chan()
dio_nchans = obs.header['nchans']
dio_chan_per_coarse = dio_nchans/dio_ncoarse
obs = None
Idat,Qdat,Udat,Vdat = get_stokes(cross_dat,feedtype)
cross_dat = None
#Calculate differential gain and phase from noise diode measurements
print('Calculating Mueller Matrix variables')
gams = gain_offsets(Idat,Qdat,Udat,Vdat,tsamp,dio_chan_per_coarse,feedtype,**kwargs)
psis = phase_offsets(Idat,Qdat,Udat,Vdat,tsamp,dio_chan_per_coarse,feedtype,**kwargs)
#Clear data arrays to save memory
Idat = None
Qdat = None
Udat = None
Vdat = None
#Get corrected Stokes parameters
print('Opening '+cross_pols)
cross_obs = Waterfall(cross_pols,max_load=150)
obs_ncoarse = cross_obs.calc_n_coarse_chan()
obs_nchans = cross_obs.header['nchans']
obs_chan_per_coarse = obs_nchans/obs_ncoarse
print('Grabbing Stokes parameters')
I,Q,U,V = get_stokes(cross_obs.data,feedtype)
print('Applying Mueller Matrix')
I,Q,U,V = apply_Mueller(I,Q,U,V,gams,psis,obs_chan_per_coarse,feedtype)
#Use onefile (default) to produce one filterbank file containing all Stokes information
if onefile==True:
cross_obs.data[:,0,:] = np.squeeze(I)
cross_obs.data[:,1,:] = np.squeeze(Q)
cross_obs.data[:,2,:] = np.squeeze(U)
cross_obs.data[:,3,:] = np.squeeze(V)
cross_obs.write_to_fil(cross_pols[:-15]+'.SIQUV.polcal.fil')
print('Calibrated Stokes parameters written to '+cross_pols[:-15]+'.SIQUV.polcal.fil')
return
#Write corrected Stokes parameters to four filterbank files if onefile==False
obs = Waterfall(obs_I,max_load=150)
obs.data = I
obs.write_to_fil(cross_pols[:-15]+'.SI.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes I written to '+cross_pols[:-15]+'.SI.polcal.fil')
obs.data = Q
obs.write_to_fil(cross_pols[:-15]+'.Q.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes Q written to '+cross_pols[:-15]+'.Q.polcal.fil')
obs.data = U
obs.write_to_fil(cross_pols[:-15]+'.U.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes U written to '+cross_pols[:-15]+'.U.polcal.fil')
obs.data = V
obs.write_to_fil(cross_pols[:-15]+'.V.polcal.fil') #assuming file is named *.cross_pols.fil
print('Calibrated Stokes V written to '+cross_pols[:-15]+'.V.polcal.fil') | python | {
"resource": ""
} |
q273548 | fracpols | test | def fracpols(str, **kwargs):
'''Output fractional linear and circular polarizations for a
rawspec cross polarization .fil file. NOT STANDARD USE'''
I,Q,U,V,L=get_stokes(str, **kwargs)
return L/I,V/I | python | {
"resource": ""
} |
q273549 | write_polfils | test | def write_polfils(str, str_I, **kwargs):
'''Writes two new filterbank files containing fractional linear and
circular polarization data'''
lin,circ=fracpols(str, **kwargs)
obs = Waterfall(str_I, max_load=150)
obs.data = lin
obs.write_to_fil(str[:-15]+'.linpol.fil') #assuming file is named *.cross_pols.fil
obs.data = circ
obs.write_to_fil(str[:-15]+'.circpol.fil') | python | {
"resource": ""
} |
q273550 | closest | test | def closest(xarr, val):
""" Return the index of the closest in xarr to value val """
idx_closest = np.argmin(np.abs(np.array(xarr) - val))
return idx_closest | python | {
"resource": ""
} |
q273551 | rebin | test | def rebin(d, n_x, n_y=None):
""" Rebin data by averaging bins together
Args:
d (np.array): data
n_x (int): number of bins in x dir to rebin into one
n_y (int): number of bins in y dir to rebin into one
Returns:
d: rebinned data with shape (n_x, n_y)
"""
if d.ndim == 2:
if n_y is None:
n_y = 1
if n_x is None:
n_x = 1
d = d[:int(d.shape[0] // n_x) * n_x, :int(d.shape[1] // n_y) * n_y]
d = d.reshape((d.shape[0] // n_x, n_x, d.shape[1] // n_y, n_y))
d = d.mean(axis=3)
d = d.mean(axis=1)
elif d.ndim == 1:
d = d[:int(d.shape[0] // n_x) * n_x]
d = d.reshape((d.shape[0] // n_x, n_x))
d = d.mean(axis=1)
else:
raise RuntimeError("Only NDIM <= 2 supported")
return d | python | {
"resource": ""
} |
q273552 | unpack | test | def unpack(data, nbit):
"""upgrade data from nbits to 8bits
Notes: Pretty sure this function is a little broken!
"""
if nbit > 8:
raise ValueError("unpack: nbit must be <= 8")
if 8 % nbit != 0:
raise ValueError("unpack: nbit must divide into 8")
if data.dtype not in (np.uint8, np.int8):
raise TypeError("unpack: dtype must be 8-bit")
if nbit == 8:
return data
elif nbit == 4:
data = unpack_4to8(data)
return data
elif nbit == 2:
data = unpack_2to8(data)
return data
elif nbit == 1:
data = unpack_1to8(data)
return data | python | {
"resource": ""
} |
q273553 | get_diff | test | def get_diff(dio_cross,feedtype,**kwargs):
'''
Returns ON-OFF for all Stokes parameters given a cross_pols noise diode measurement
'''
#Get Stokes parameters, frequencies, and time sample length
obs = Waterfall(dio_cross,max_load=150)
freqs = obs.populate_freqs()
tsamp = obs.header['tsamp']
data = obs.data
obs = None
I,Q,U,V = get_stokes(data,feedtype)
#Fold noise diode data
I_OFF,I_ON = foldcal(I,tsamp,**kwargs)
Q_OFF,Q_ON = foldcal(Q,tsamp,**kwargs)
U_OFF,U_ON = foldcal(U,tsamp,**kwargs)
V_OFF,V_ON = foldcal(V,tsamp,**kwargs)
#Do ON-OFF subtraction
Idiff = I_ON-I_OFF
Qdiff = Q_ON-Q_OFF
Udiff = U_ON-U_OFF
Vdiff = V_ON-V_OFF
return Idiff,Qdiff,Udiff,Vdiff,freqs | python | {
"resource": ""
} |
q273554 | plot_Stokes_diode | test | def plot_Stokes_diode(dio_cross,diff=True,feedtype='l',**kwargs):
'''
Plots the uncalibrated full stokes spectrum of the noise diode.
Use diff=False to plot both ON and OFF, or diff=True for ON-OFF
'''
#If diff=True, get ON-OFF. If not get ON and OFF separately
if diff==True:
Idiff,Qdiff,Udiff,Vdiff,freqs = get_diff(dio_cross,feedtype,**kwargs)
else:
obs = Waterfall(dio_cross,max_load=150)
freqs = obs.populate_freqs()
tsamp = obs.header['tsamp']
data = obs.data
I,Q,U,V = get_stokes(data,feedtype)
I_OFF,I_ON = foldcal(I,tsamp,**kwargs)
Q_OFF,Q_ON = foldcal(Q,tsamp,**kwargs)
U_OFF,U_ON = foldcal(U,tsamp,**kwargs)
V_OFF,V_ON = foldcal(V,tsamp,**kwargs)
#Plot spectra
if diff==True:
plt.plot(freqs,Idiff,'k-',label='I')
plt.plot(freqs,Qdiff,'r-',label='Q')
plt.plot(freqs,Udiff,'g-',label='U')
plt.plot(freqs,Vdiff,'m-',label='V')
else:
plt.plot(freqs,I_ON,'k-',label='I ON')
plt.plot(freqs,I_OFF,'k--',label='I OFF')
plt.plot(freqs,Q_ON,'r-',label='Q ON')
plt.plot(freqs,Q_OFF,'r--',label='Q OFF')
plt.plot(freqs,U_ON,'g-',label='U ON')
plt.plot(freqs,U_OFF,'g--',label='U OFF')
plt.plot(freqs,V_ON,'m-',label='V ON')
plt.plot(freqs,V_OFF,'m--',label='V OFF')
plt.legend()
plt.xlabel('Frequency (MHz)')
plt.title('Uncalibrated Full Stokes Noise Diode Spectrum')
plt.ylabel('Power (Counts)') | python | {
"resource": ""
} |
q273555 | plot_calibrated_diode | test | def plot_calibrated_diode(dio_cross,chan_per_coarse=8,feedtype='l',**kwargs):
'''
Plots the corrected noise diode spectrum for a given noise diode measurement
after application of the inverse Mueller matrix for the electronics chain.
'''
#Get full stokes data for the ND observation
obs = Waterfall(dio_cross,max_load=150)
freqs = obs.populate_freqs()
tsamp = obs.header['tsamp']
data = obs.data
obs = None
I,Q,U,V = get_stokes(data,feedtype)
data = None
#Calculate Mueller Matrix variables for each coarse channel
psis = phase_offsets(I,Q,U,V,tsamp,chan_per_coarse,feedtype,**kwargs)
G = gain_offsets(I,Q,U,V,tsamp,chan_per_coarse,feedtype,**kwargs)
#Apply the Mueller matrix to original noise diode data and refold
I,Q,U,V = apply_Mueller(I,Q,U,V,G,psis,chan_per_coarse,feedtype)
I_OFF,I_ON = foldcal(I,tsamp,**kwargs)
Q_OFF,Q_ON = foldcal(Q,tsamp,**kwargs)
U_OFF,U_ON = foldcal(U,tsamp,**kwargs)
V_OFF,V_ON = foldcal(V,tsamp,**kwargs)
#Delete data arrays for space
I = None
Q = None
U = None
V = None
#Plot new ON-OFF spectra
plt.plot(freqs,I_ON-I_OFF,'k-',label='I')
plt.plot(freqs,Q_ON-Q_OFF,'r-',label='Q')
plt.plot(freqs,U_ON-U_OFF,'g-',label='U')
plt.plot(freqs,V_ON-V_OFF,'m-',label='V')
plt.legend()
plt.xlabel('Frequency (MHz)')
plt.title('Calibrated Full Stokes Noise Diode Spectrum')
plt.ylabel('Power (Counts)') | python | {
"resource": ""
} |
q273556 | plot_gain_offsets | test | def plot_gain_offsets(dio_cross,dio_chan_per_coarse=8,feedtype='l',ax1=None,ax2=None,legend=True,**kwargs):
'''
Plots the calculated gain offsets of each coarse channel along with
the time averaged power spectra of the X and Y feeds
'''
#Get ON-OFF ND spectra
Idiff,Qdiff,Udiff,Vdiff,freqs = get_diff(dio_cross,feedtype,**kwargs)
obs = Waterfall(dio_cross,max_load=150)
tsamp = obs.header['tsamp']
data = obs.data
obs = None
I,Q,U,V = get_stokes(data,feedtype)
#Get phase offsets and convert to degrees
coarse_G = gain_offsets(I,Q,U,V,tsamp,dio_chan_per_coarse,feedtype,**kwargs)
coarse_freqs = convert_to_coarse(freqs,dio_chan_per_coarse)
#Get X and Y spectra for the noise diode ON and OFF
#If using circular feeds these correspond to LL and RR
XX_OFF,XX_ON = foldcal(np.expand_dims(data[:,0,:],axis=1),tsamp,**kwargs)
YY_OFF,YY_ON = foldcal(np.expand_dims(data[:,1,:],axis=1),tsamp,**kwargs)
if ax1==None:
plt.subplot(211)
else:
axG = plt.axes(ax1)
plt.setp(axG.get_xticklabels(),visible=False)
plt.plot(coarse_freqs,coarse_G,'ko',markersize=2)
plt.ylabel(r'$\frac{\Delta G}{2}$',rotation=90)
if feedtype=='l':
plt.title('XY Gain Difference')
if feedtype=='c':
plt.title('LR Gain Difference')
plt.grid(True)
if ax2==None:
plt.subplot(212)
else:
axXY = plt.axes(ax2,sharex=axG)
if feedtype=='l':
plt.plot(freqs,XX_OFF,'b-',label='XX')
plt.plot(freqs,YY_OFF,'r-',label='YY')
if feedtype=='c':
plt.plot(freqs,XX_OFF,'b-',label='LL')
plt.plot(freqs,YY_OFF,'r-',label='RR')
plt.xlabel('Frequency (MHz)')
plt.ylabel('Power (Counts)')
if legend==True:
plt.legend() | python | {
"resource": ""
} |
q273557 | open_file | test | def open_file(filename, f_start=None, f_stop=None,t_start=None, t_stop=None,load_data=True,max_load=1.):
"""Open a HDF5 or filterbank file
Returns instance of a Reader to read data from file.
================== ==================================================
Filename extension File type
================== ==================================================
h5, hdf5 HDF5 format
fil fil format
*other* Will raise NotImplementedError
================== ==================================================
"""
if not os.path.isfile(filename):
type(filename)
print(filename)
raise IOError("No such file or directory: " + filename)
filename = os.path.expandvars(os.path.expanduser(filename))
# Get file extension to determine type
ext = filename.split(".")[-1].strip().lower()
if six.PY3:
ext = bytes(ext, 'ascii')
if h5py.is_hdf5(filename):
# Open HDF5 file
return H5Reader(filename, f_start=f_start, f_stop=f_stop, t_start=t_start, t_stop=t_stop,
load_data=load_data, max_load=max_load)
elif sigproc.is_filterbank(filename):
# Open FIL file
return FilReader(filename, f_start=f_start, f_stop=f_stop, t_start=t_start, t_stop=t_stop, load_data=load_data, max_load=max_load)
else:
raise NotImplementedError('Cannot open this type of file with Waterfall') | python | {
"resource": ""
} |
q273558 | Reader._setup_selection_range | test | def _setup_selection_range(self, f_start=None, f_stop=None, t_start=None, t_stop=None, init=False):
"""Making sure the selection if time and frequency are within the file limits.
Args:
init (bool): If call during __init__
"""
# This avoids resetting values
if init is True:
if t_start is None:
t_start = self.t_begin
if t_stop is None:
t_stop = self.t_end
if f_start is None:
f_start = self.f_begin
if f_stop is None:
f_stop = self.f_end
else:
if f_start is None:
f_start = self.f_start
if f_stop is None:
f_stop = self.f_stop
if t_start is None:
t_start = self.t_start
if t_stop is None:
t_stop = self.t_stop
# By now, all values start/stop are populated.
if t_stop >= 0 and t_start >= 0 and t_stop < t_start:
t_stop, t_start = t_start,t_stop
logger.warning('Given t_stop < t_start, assuming reversed values.')
if f_stop and f_start and f_stop < f_start:
f_stop, f_start = f_start,f_stop
logger.warning('Given f_stop < f_start, assuming reversed values.')
if t_start >= self.t_begin and t_start < self.t_end:
self.t_start = int(t_start)
else:
if init is False or t_start != None:
logger.warning('Setting t_start = %f, since t_start not given or not valid.'%self.t_begin)
self.t_start = self.t_begin
if t_stop <= self.t_end and t_stop > self.t_begin:
self.t_stop = int(t_stop)
else:
if init is False or t_stop:
logger.warning('Setting t_stop = %f, since t_stop not given or not valid.'%self.t_end)
self.t_stop = self.t_end
if f_start >= self.f_begin and f_start < self.f_end:
self.f_start = f_start
else:
if init is False or f_start:
logger.warning('Setting f_start = %f, since f_start not given or not valid.'%self.f_begin)
self.f_start = self.f_begin
if f_stop <= self.f_end and f_stop > self.f_begin:
self.f_stop = f_stop
else:
if init is False or f_stop:
logger.warning('Setting f_stop = %f, since f_stop not given or not valid.'%self.f_end)
self.f_stop = self.f_end
# Now we have setup bounds, we can calculate shape of selection
self.selection_shape = self._calc_selection_shape() | python | {
"resource": ""
} |
q273559 | Reader._calc_selection_size | test | def _calc_selection_size(self):
"""Calculate size of data of interest.
"""
#Check to see how many integrations requested
n_ints = self.t_stop - self.t_start
#Check to see how many frequency channels requested
n_chan = (self.f_stop - self.f_start) / abs(self.header[b'foff'])
n_bytes = self._n_bytes
selection_size = int(n_ints*n_chan*n_bytes)
return selection_size | python | {
"resource": ""
} |
q273560 | Reader._calc_selection_shape | test | def _calc_selection_shape(self):
"""Calculate shape of data of interest.
"""
#Check how many integrations requested
n_ints = int(self.t_stop - self.t_start)
#Check how many frequency channels requested
n_chan = int(np.round((self.f_stop - self.f_start) / abs(self.header[b'foff'])))
selection_shape = (n_ints,int(self.header[b'nifs']),n_chan)
return selection_shape | python | {
"resource": ""
} |
q273561 | Reader._setup_chans | test | def _setup_chans(self):
"""Setup channel borders
"""
if self.header[b'foff'] < 0:
f0 = self.f_end
else:
f0 = self.f_begin
i_start, i_stop = 0, self.n_channels_in_file
if self.f_start:
i_start = np.round((self.f_start - f0) / self.header[b'foff'])
if self.f_stop:
i_stop = np.round((self.f_stop - f0) / self.header[b'foff'])
#calculate closest true index value
chan_start_idx = np.int(i_start)
chan_stop_idx = np.int(i_stop)
if chan_stop_idx < chan_start_idx:
chan_stop_idx, chan_start_idx = chan_start_idx,chan_stop_idx
self.chan_start_idx = chan_start_idx
self.chan_stop_idx = chan_stop_idx | python | {
"resource": ""
} |
q273562 | Reader._setup_freqs | test | def _setup_freqs(self):
"""Updating frequency borders from channel values
"""
if self.header[b'foff'] > 0:
self.f_start = self.f_begin + self.chan_start_idx*abs(self.header[b'foff'])
self.f_stop = self.f_begin + self.chan_stop_idx*abs(self.header[b'foff'])
else:
self.f_start = self.f_end - self.chan_stop_idx*abs(self.header[b'foff'])
self.f_stop = self.f_end - self.chan_start_idx*abs(self.header[b'foff']) | python | {
"resource": ""
} |
q273563 | Reader.populate_timestamps | test | def populate_timestamps(self,update_header=False):
""" Populate time axis.
IF update_header then only return tstart
"""
#Check to see how many integrations requested
ii_start, ii_stop = 0, self.n_ints_in_file
if self.t_start:
ii_start = self.t_start
if self.t_stop:
ii_stop = self.t_stop
## Setup time axis
t0 = self.header[b'tstart']
t_delt = self.header[b'tsamp']
if update_header:
timestamps = ii_start * t_delt / 24./60./60. + t0
else:
timestamps = np.arange(ii_start, ii_stop) * t_delt / 24./60./60. + t0
return timestamps | python | {
"resource": ""
} |
q273564 | Reader.populate_freqs | test | def populate_freqs(self):
"""
Populate frequency axis
"""
if self.header[b'foff'] < 0:
f0 = self.f_end
else:
f0 = self.f_begin
self._setup_chans()
#create freq array
i_vals = np.arange(self.chan_start_idx, self.chan_stop_idx)
freqs = self.header[b'foff'] * i_vals + f0
return freqs | python | {
"resource": ""
} |
q273565 | Reader.calc_n_coarse_chan | test | def calc_n_coarse_chan(self, chan_bw=None):
""" This makes an attempt to calculate the number of coarse channels in a given file.
Note:
This is unlikely to work on non-Breakthrough Listen data, as a-priori knowledge of
the digitizer system is required.
"""
nchans = int(self.header[b'nchans'])
# Do we have a file with enough channels that it has coarse channelization?
if chan_bw is not None:
bandwidth = abs(self.f_stop - self.f_start)
n_coarse_chan = int(bandwidth / chan_bw)
return n_coarse_chan
elif nchans >= 2**20:
# Does the common FFT length of 2^20 divide through without a remainder?
# This should work for most GBT and all Parkes hires data
if nchans % 2**20 == 0:
n_coarse_chan = nchans // 2**20
return n_coarse_chan
# Early GBT data has non-2^N FFT length, check if it is GBT data
elif self.header[b'telescope_id'] == 6:
coarse_chan_bw = 2.9296875
bandwidth = abs(self.f_stop - self.f_start)
n_coarse_chan = int(bandwidth / coarse_chan_bw)
return n_coarse_chan
else:
logger.warning("Couldn't figure out n_coarse_chan")
elif self.header[b'telescope_id'] == 6 and nchans < 2**20:
#For GBT non-hires data
coarse_chan_bw = 2.9296875
bandwidth = abs(self.f_stop - self.f_start)
n_coarse_chan = int(bandwidth / coarse_chan_bw)
return n_coarse_chan
else:
logger.warning("This function currently only works for hires BL Parkes or GBT data.") | python | {
"resource": ""
} |
q273566 | Reader.calc_n_blobs | test | def calc_n_blobs(self, blob_dim):
""" Given the blob dimensions, calculate how many fit in the data selection.
"""
n_blobs = int(np.ceil(1.0 * np.prod(self.selection_shape) / np.prod(blob_dim)))
return n_blobs | python | {
"resource": ""
} |
q273567 | Reader.isheavy | test | def isheavy(self):
""" Check if the current selection is too large.
"""
selection_size_bytes = self._calc_selection_size()
if selection_size_bytes > self.MAX_DATA_ARRAY_SIZE:
return True
else:
return False | python | {
"resource": ""
} |
q273568 | FilReader.read_data | test | def read_data(self, f_start=None, f_stop=None,t_start=None, t_stop=None):
""" Read data.
"""
self._setup_selection_range(f_start=f_start, f_stop=f_stop, t_start=t_start, t_stop=t_stop)
#check if selection is small enough.
if self.isheavy():
logger.warning("Selection size of %.2f GB, exceeding our size limit %.2f GB. Instance created, "
"header loaded, but data not loaded, please try another (t,v) selection." % (self._calc_selection_size() / (1024. ** 3), self.MAX_DATA_ARRAY_SIZE / (1024. ** 3)))
self.data = np.array([0],dtype=self._d_type)
return None
#Convert input frequencies into what their corresponding channel number would be.
self._setup_chans()
#Update frequencies ranges from channel number.
self._setup_freqs()
n_chans = self.header[b'nchans']
n_chans_selected = self.selection_shape[self.freq_axis]
n_ifs = self.header[b'nifs']
# Load binary data
f = open(self.filename, 'rb')
f.seek(int(self.idx_data))
# now check to see how many integrations requested
n_ints = self.t_stop - self.t_start
# Seek to first integration
f.seek(int(self.t_start * self._n_bytes * n_ifs * n_chans), 1)
#Loading data
self.data = np.zeros((n_ints, n_ifs, n_chans_selected), dtype=self._d_type)
for ii in range(n_ints):
for jj in range(n_ifs):
f.seek(int(self._n_bytes * self.chan_start_idx), 1) # 1 = from current location
dd = np.fromfile(f, count=n_chans_selected, dtype=self._d_type)
# Reverse array if frequency axis is flipped
# if self.header[b'foff'] < 0:
# dd = dd[::-1]
self.data[ii, jj] = dd
f.seek(int(self._n_bytes * (n_chans - self.chan_stop_idx)), 1) | python | {
"resource": ""
} |
q273569 | FilReader.read_all | test | def read_all(self,reverse=True):
""" read all the data.
If reverse=True the x axis is flipped.
"""
raise NotImplementedError('To be implemented')
# go to start of the data
self.filfile.seek(int(self.datastart))
# read data into 2-D numpy array
# data=np.fromfile(self.filfile,dtype=self.dtype).reshape(self.channels,self.blocksize,order='F')
data=np.fromfile(self.filfile,dtype=self.dtype).reshape(self.blocksize, self.channels)
if reverse:
data = data[:,::-1]
return data | python | {
"resource": ""
} |
q273570 | FilReader.read_row | test | def read_row(self,rownumber,reverse=True):
""" Read a block of data. The number of samples per row is set in self.channels
If reverse=True the x axis is flipped.
"""
raise NotImplementedError('To be implemented')
# go to start of the row
self.filfile.seek(int(self.datastart+self.channels*rownumber*(int(self.nbits/8))))
# read data into 2-D numpy array
data=np.fromfile(self.filfile,count=self.channels,dtype=self.dtype).reshape(1, self.channels)
if reverse:
data = data[:,::-1]
return data | python | {
"resource": ""
} |
q273571 | Waterfall.read_data | test | def read_data(self, f_start=None, f_stop=None,t_start=None, t_stop=None):
""" Reads data selection if small enough.
"""
self.container.read_data(f_start=f_start, f_stop=f_stop,t_start=t_start, t_stop=t_stop)
self.__load_data() | python | {
"resource": ""
} |
q273572 | Waterfall.__update_header | test | def __update_header(self):
""" Updates the header information from the original file to the selection.
"""
#Updating frequency of first channel from selection
if self.header[b'foff'] < 0:
self.header[b'fch1'] = self.container.f_stop
else:
self.header[b'fch1'] = self.container.f_start
#Updating number of coarse channels.
self.header[b'nchans'] = self.container.selection_shape[self.freq_axis]
#Updating time stamp for first time bin from selection
self.header[b'tstart'] = self.container.populate_timestamps(update_header=True) | python | {
"resource": ""
} |
q273573 | Waterfall.info | test | def info(self):
""" Print header information and other derived information. """
print("\n--- File Info ---")
for key, val in self.file_header.items():
if key == 'src_raj':
val = val.to_string(unit=u.hour, sep=':')
if key == 'src_dej':
val = val.to_string(unit=u.deg, sep=':')
print("%16s : %32s" % (key, val))
print("\n%16s : %32s" % ("Num ints in file", self.n_ints_in_file))
print("%16s : %32s" % ("File shape", self.file_shape))
print("--- Selection Info ---")
print("%16s : %32s" % ("Data selection shape", self.selection_shape))
print("%16s : %32s" % ("Minimum freq (MHz)", self.container.f_start))
print("%16s : %32s" % ("Maximum freq (MHz)", self.container.f_stop)) | python | {
"resource": ""
} |
q273574 | Waterfall.write_to_fil | test | def write_to_fil(self, filename_out, *args, **kwargs):
""" Write data to .fil file.
It check the file size then decides how to write the file.
Args:
filename_out (str): Name of output file
"""
#For timing how long it takes to write a file.
t0 = time.time()
#Update header
self.__update_header()
if self.container.isheavy():
self.__write_to_fil_heavy(filename_out)
else:
self.__write_to_fil_light(filename_out)
t1 = time.time()
logger.info('Conversion time: %2.2fsec' % (t1- t0)) | python | {
"resource": ""
} |
q273575 | Waterfall.write_to_hdf5 | test | def write_to_hdf5(self, filename_out, *args, **kwargs):
""" Write data to HDF5 file.
It check the file size then decides how to write the file.
Args:
filename_out (str): Name of output file
"""
#For timing how long it takes to write a file.
t0 = time.time()
#Update header
self.__update_header()
if self.container.isheavy():
self.__write_to_hdf5_heavy(filename_out)
else:
self.__write_to_hdf5_light(filename_out)
t1 = time.time()
logger.info('Conversion time: %2.2fsec' % (t1- t0)) | python | {
"resource": ""
} |
q273576 | Waterfall.__write_to_hdf5_light | test | def __write_to_hdf5_light(self, filename_out, *args, **kwargs):
""" Write data to HDF5 file in one go.
Args:
filename_out (str): Name of output file
"""
block_size = 0
with h5py.File(filename_out, 'w') as h5:
h5.attrs[b'CLASS'] = b'FILTERBANK'
h5.attrs[b'VERSION'] = b'1.0'
if HAS_BITSHUFFLE:
bs_compression = bitshuffle.h5.H5FILTER
bs_compression_opts = (block_size, bitshuffle.h5.H5_COMPRESS_LZ4)
else:
bs_compression = None
bs_compression_opts = None
logger.warning("Warning: bitshuffle not found. No compression applied.")
dset = h5.create_dataset('data',
data=self.data,
# compression='lzf')
compression=bs_compression,
compression_opts=bs_compression_opts)
dset_mask = h5.create_dataset('mask',
shape=self.file_shape,
# compression='lzf',
compression=bs_compression,
compression_opts=bs_compression_opts,
dtype='uint8')
dset.dims[0].label = b"frequency"
dset.dims[1].label = b"feed_id"
dset.dims[2].label = b"time"
dset_mask.dims[0].label = b"frequency"
dset_mask.dims[1].label = b"feed_id"
dset_mask.dims[2].label = b"time"
# Copy over header information as attributes
for key, value in self.header.items():
dset.attrs[key] = value | python | {
"resource": ""
} |
q273577 | Waterfall.__get_blob_dimensions | test | def __get_blob_dimensions(self, chunk_dim):
""" Sets the blob dimmentions, trying to read around 1024 MiB at a time.
This is assuming a chunk is about 1 MiB.
"""
#Taking the size into consideration, but avoiding having multiple blobs within a single time bin.
if self.selection_shape[self.freq_axis] > chunk_dim[self.freq_axis]*MAX_BLOB_MB:
freq_axis_size = self.selection_shape[self.freq_axis]
# while freq_axis_size > chunk_dim[self.freq_axis]*MAX_BLOB_MB:
# freq_axis_size /= 2
time_axis_size = 1
else:
freq_axis_size = self.selection_shape[self.freq_axis]
time_axis_size = np.min([chunk_dim[self.time_axis] * MAX_BLOB_MB * chunk_dim[self.freq_axis] / freq_axis_size, self.selection_shape[self.time_axis]])
blob_dim = (int(time_axis_size), 1, freq_axis_size)
return blob_dim | python | {
"resource": ""
} |
q273578 | Waterfall.__get_chunk_dimensions | test | def __get_chunk_dimensions(self):
""" Sets the chunking dimmentions depending on the file type.
"""
#Usually '.0000.' is in self.filename
if np.abs(self.header[b'foff']) < 1e-5:
logger.info('Detecting high frequency resolution data.')
chunk_dim = (1,1,1048576) #1048576 is the number of channels in a coarse channel.
return chunk_dim
#Usually '.0001.' is in self.filename
elif np.abs(self.header[b'tsamp']) < 1e-3:
logger.info('Detecting high time resolution data.')
chunk_dim = (2048,1,512) #512 is the total number of channels per single band (ie. blc00)
return chunk_dim
#Usually '.0002.' is in self.filename
elif np.abs(self.header[b'foff']) < 1e-2 and np.abs(self.header[b'foff']) >= 1e-5:
logger.info('Detecting intermediate frequency and time resolution data.')
chunk_dim = (10,1,65536) #65536 is the total number of channels per single band (ie. blc00)
# chunk_dim = (1,1,65536/4)
return chunk_dim
else:
logger.warning('File format not known. Will use minimum chunking. NOT OPTIMAL.')
chunk_dim = (1,1,512)
return chunk_dim | python | {
"resource": ""
} |
q273579 | Waterfall.grab_data | test | def grab_data(self, f_start=None, f_stop=None,t_start=None, t_stop=None, if_id=0):
""" Extract a portion of data by frequency range.
Args:
f_start (float): start frequency in MHz
f_stop (float): stop frequency in MHz
if_id (int): IF input identification (req. when multiple IFs in file)
Returns:
(freqs, data) (np.arrays): frequency axis in MHz and data subset
"""
self.freqs = self.populate_freqs()
self.timestamps = self.populate_timestamps()
if f_start is None:
f_start = self.freqs[0]
if f_stop is None:
f_stop = self.freqs[-1]
i0 = np.argmin(np.abs(self.freqs - f_start))
i1 = np.argmin(np.abs(self.freqs - f_stop))
if i0 < i1:
plot_f = self.freqs[i0:i1 + 1]
plot_data = np.squeeze(self.data[t_start:t_stop, ..., i0:i1 + 1])
else:
plot_f = self.freqs[i1:i0 + 1]
plot_data = np.squeeze(self.data[t_start:t_stop, ..., i1:i0 + 1])
return plot_f, plot_data | python | {
"resource": ""
} |
q273580 | cmd_tool | test | def cmd_tool(args=None):
""" Command line tool for plotting and viewing info on guppi raw files """
from argparse import ArgumentParser
parser = ArgumentParser(description="Command line utility for creating spectra from GuppiRaw files.")
parser.add_argument('filename', type=str, help='Name of file to read')
parser.add_argument('-o', dest='outdir', type=str, default='./', help='output directory for PNG files')
args = parser.parse_args()
r = GuppiRaw(args.filename)
r.print_stats()
bname = os.path.splitext(os.path.basename(args.filename))[0]
bname = os.path.join(args.outdir, bname)
r.plot_histogram(filename="%s_hist.png" % bname)
r.plot_spectrum(filename="%s_spec.png" % bname) | python | {
"resource": ""
} |
q273581 | GuppiRaw.read_first_header | test | def read_first_header(self):
""" Read first header in file
Returns:
header (dict): keyword:value pairs of header metadata
"""
self.file_obj.seek(0)
header_dict, pos = self.read_header()
self.file_obj.seek(0)
return header_dict | python | {
"resource": ""
} |
q273582 | GuppiRaw.find_n_data_blocks | test | def find_n_data_blocks(self):
""" Seek through the file to find how many data blocks there are in the file
Returns:
n_blocks (int): number of data blocks in the file
"""
self.file_obj.seek(0)
header0, data_idx0 = self.read_header()
self.file_obj.seek(data_idx0)
block_size = int(header0['BLOCSIZE'])
n_bits = int(header0['NBITS'])
self.file_obj.seek(int(header0['BLOCSIZE']), 1)
n_blocks = 1
end_found = False
while not end_found:
try:
header, data_idx = self.read_header()
self.file_obj.seek(data_idx)
self.file_obj.seek(header['BLOCSIZE'], 1)
n_blocks += 1
except EndOfFileError:
end_found = True
break
self.file_obj.seek(0)
return n_blocks | python | {
"resource": ""
} |
q273583 | GuppiRaw.print_stats | test | def print_stats(self):
""" Compute some basic stats on the next block of data """
header, data = self.read_next_data_block()
data = data.view('float32')
print("AVG: %2.3f" % data.mean())
print("STD: %2.3f" % data.std())
print("MAX: %2.3f" % data.max())
print("MIN: %2.3f" % data.min())
import pylab as plt | python | {
"resource": ""
} |
q273584 | GuppiRaw.plot_histogram | test | def plot_histogram(self, filename=None):
""" Plot a histogram of data values """
header, data = self.read_next_data_block()
data = data.view('float32')
plt.figure("Histogram")
plt.hist(data.flatten(), 65, facecolor='#cc0000')
if filename:
plt.savefig(filename)
plt.show() | python | {
"resource": ""
} |
q273585 | GuppiRaw.generate_filterbank_header | test | def generate_filterbank_header(self, nchans=1, ):
""" Generate a blimpy header dictionary """
gp_head = self.read_first_header()
fb_head = {}
telescope_str = gp_head.get("TELESCOP", "unknown")
if telescope_str in ('GBT', 'GREENBANK'):
fb_head["telescope_id"] = 6
elif telescope_str in ('PKS', 'PARKES'):
fb_head["telescop_id"] = 7
else:
fb_head["telescop_id"] = 0
# Using .get() method allows us to fill in default values if not present
fb_head["source_name"] = gp_head.get("SRC_NAME", "unknown")
fb_head["az_start"] = gp_head.get("AZ", 0)
fb_head["za_start"] = gp_head.get("ZA", 0)
fb_head["src_raj"] = Angle(str(gp_head.get("RA", 0.0)) + "hr")
fb_head["src_dej"] = Angle(str(gp_head.get("DEC", 0.0)) + "deg")
fb_head["rawdatafile"] = self.filename
# hardcoded
fb_head["machine_id"] = 20
fb_head["data_type"] = 1 # blio datatype
fb_head["barycentric"] = 0
fb_head["pulsarcentric"] = 0
fb_head["nbits"] = 32
# TODO - compute these values. Need to figure out the correct calcs
fb_head["tstart"] = 0.0
fb_head["tsamp"] = 1.0
fb_head["fch1"] = 0.0
fb_head["foff"] = 187.5 / nchans
# Need to be updated based on output specs
fb_head["nchans"] = nchans
fb_head["nifs"] = 1
fb_head["nbeams"] = 1
return fb_head | python | {
"resource": ""
} |
q273586 | find_header_size | test | def find_header_size(filename):
''' Script to find the header size of a filterbank file'''
# open datafile
filfile=open(filename,'rb')
# go to the start of the file
filfile.seek(0)
#read some region larger than the header.
round1 = filfile.read(1000)
headersize = round1.find('HEADER_END')+len('HEADER_END')
return headersize | python | {
"resource": ""
} |
q273587 | cmd_tool | test | def cmd_tool(args=None):
""" Command line tool to make a md5sum comparison of two .fil files. """
if 'bl' in local_host:
header_loc = '/usr/local/sigproc/bin/header' #Current location of header command in GBT.
else:
raise IOError('Script only able to run in BL systems.')
p = OptionParser()
p.set_usage('matchfils <FIL_FILE1> <FIL_FILE2>')
opts, args = p.parse_args(sys.argv[1:])
file1 = args[0]
file2 = args[1]
#------------------------------------
#Create batch script
make_batch_script()
#------------------------------------
#First checksum
headersize1 = find_header_size(file1)
file_size1 = os.path.getsize(file1)
#Strip header from file, and calculate the md5sum of the rest.
#command=['tail','-c',str(file_size1-headersize1),file1,'|','md5sum']
command=['./tail_sum.sh',file1,str(file_size1-headersize1)]
print('[matchfils] '+' '.join(command))
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
check_sum1 = out.split()[0]
print('[matchfils] Checksum is:', check_sum1)
if err:
raise Error('There is an error.')
#---
out,err = reset_outs()
command=[header_loc,file1]
print('[matchfils] Header information:')
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
header1 = out
print(header1)
#------------------------------------
#Second checksum
out,err = reset_outs()
headersize2 = find_header_size(file2)
file_size2 = os.path.getsize(file2)
#Strip header from file, and calculate the md5sum of the rest.
command=['./tail_sum.sh',file2,str(file_size2-headersize2)]
print('[matchfils] '+' '.join(command))
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
check_sum2 = out.split()[0]
print('[matchfils] Checksum is:', check_sum2)
if err:
raise Error('There is an error.')
#---
out,err = reset_outs()
command=[header_loc,file2]
print('[matchfils] Header information:')
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
header2 = out
print(header2)
#------------------------------------
#check the checksums
if check_sum1 != check_sum2:
print('[matchfils] Booo! Checksum does not match between files.')
else:
print('[matchfils] Hooray! Checksum matches between files.')
#------------------------------------
#Remove batch script
os.remove('tail_sum.sh') | python | {
"resource": ""
} |
q273588 | cmd_tool | test | def cmd_tool(args=None):
""" Command line tool for converting guppi raw into HDF5 versions of guppi raw """
from argparse import ArgumentParser
if not HAS_BITSHUFFLE:
print("Error: the bitshuffle library is required to run this script.")
exit()
parser = ArgumentParser(description="Command line utility for creating HDF5 Raw files.")
parser.add_argument('filename', type=str, help='Name of filename to read')
args = parser.parse_args()
fileroot = args.filename.split('.0000.raw')[0]
filelist = glob.glob(fileroot + '*.raw')
filelist = sorted(filelist)
# Read first file
r = GuppiRaw(filelist[0])
header, data = r.read_next_data_block()
dshape = data.shape #r.read_next_data_block_shape()
print(dshape)
n_blocks_total = 0
for filename in filelist:
print(filename)
r = GuppiRaw(filename)
n_blocks_total += r.n_blocks
print(n_blocks_total)
full_dshape = np.concatenate(((n_blocks_total,), dshape))
# Create h5py file
h5 = h5py.File(fileroot + '.h5', 'w')
h5.attrs['CLASS'] = 'GUPPIRAW'
block_size = 0 # This is chunk block size
dset = h5.create_dataset('data',
shape=full_dshape,
#compression=bitshuffle.h5.H5FILTER,
#compression_opts=(block_size, bitshuffle.h5.H5_COMPRESS_LZ4),
dtype=data.dtype)
h5_idx = 0
for filename in filelist:
print("\nReading %s header..." % filename)
r = GuppiRaw(filename)
h5 = h5py.File(filename + '.h5', 'w')
header, data = r.read_next_data_block()
for ii in range(0, r.n_blocks):
t0 = time.time()
print("Reading block %i of %i" % (h5_idx+1, full_dshape[0]))
header, data = r.read_next_data_block()
t1 = time.time()
t2 = time.time()
print("Writing block %i of %i" % (h5_idx+1, full_dshape[0]))
dset[h5_idx, :] = data
t3 = time.time()
print("Read: %2.2fs, Write %2.2fs" % ((t1-t0), (t3-t2)))
h5_idx += 1
# Copy over header information as attributes
for key, value in header.items():
dset.attrs[key] = value
h5.close()
t1 = time.time()
print("Conversion time: %2.2fs" % (t1- t0)) | python | {
"resource": ""
} |
q273589 | foldcal | test | def foldcal(data,tsamp, diode_p=0.04,numsamps=1000,switch=False,inds=False):
'''
Returns time-averaged spectra of the ON and OFF measurements in a
calibrator measurement with flickering noise diode
Parameters
----------
data : 2D Array object (float)
2D dynamic spectrum for data (any Stokes parameter) with flickering noise diode.
tsamp : float
Sampling time of data in seconds
diode_p : float
Period of the flickering noise diode in seconds
numsamps : int
Number of samples over which to average noise diode ON and OFF
switch : boolean
Use switch=True if the noise diode "skips" turning from OFF to ON once or vice versa
inds : boolean
Use inds=True to also return the indexes of the time series where the ND is ON and OFF
'''
halfper = diode_p/2.0
foldt = halfper/tsamp #number of time samples per diode switch
onesec = 1/tsamp #number of time samples in the first second
#Find diode switches in units of time samples and round down to the nearest int
ints = np.arange(0,numsamps)
t_switch = (onesec+ints*foldt)
t_switch = t_switch.astype('int')
ONints = np.array(np.reshape(t_switch[:],(numsamps/2,2)))
ONints[:,0] = ONints[:,0]+1 #Find index ranges of ON time samples
OFFints = np.array(np.reshape(t_switch[1:-1],(numsamps/2-1,2)))
OFFints[:,0] = OFFints[:,0]+1 #Find index ranges of OFF time samples
av_ON = []
av_OFF = []
#Average ON and OFF spectra separately with respect to time
for i in ONints:
if i[1]!=i[0]:
av_ON.append(np.sum(data[i[0]:i[1],:,:],axis=0)/(i[1]-i[0]))
for i in OFFints:
if i[1]!=i[0]:
av_OFF.append(np.sum(data[i[0]:i[1],:,:],axis=0)/(i[1]-i[0]))
#If switch=True, flip the return statement since ON is actually OFF
if switch==False:
if inds==False:
return np.squeeze(np.mean(av_ON,axis=0)), np.squeeze(np.mean(av_OFF,axis=0))
else:
return np.squeeze(np.mean(av_ON,axis=0)), np.squeeze(np.mean(av_OFF,axis=0)),ONints,OFFints
if switch==True:
if inds==False:
return np.squeeze(np.mean(av_OFF,axis=0)), np.squeeze(np.mean(av_ON,axis=0))
else:
return np.squeeze(np.mean(av_OFF,axis=0)), np.squeeze(np.mean(av_ON,axis=0)),OFFints,ONints | python | {
"resource": ""
} |
q273590 | integrate_calib | test | def integrate_calib(name,chan_per_coarse,fullstokes=False,**kwargs):
'''
Folds Stokes I noise diode data and integrates along coarse channels
Parameters
----------
name : str
Path to noise diode filterbank file
chan_per_coarse : int
Number of frequency bins per coarse channel
fullstokes : boolean
Use fullstokes=True if data is in IQUV format or just Stokes I, use fullstokes=False if
it is in cross_pols format
'''
#Load data
obs = Waterfall(name,max_load=150)
data = obs.data
#If the data has cross_pols format calculate Stokes I
if fullstokes==False and data.shape[1]>1:
data = data[:,0,:]+data[:,1,:]
data = np.expand_dims(data,axis=1)
#If the data has IQUV format get Stokes I
if fullstokes==True:
data = data[:,0,:]
data = np.expand_dims(data,axis=1)
tsamp = obs.header['tsamp']
#Calculate ON and OFF values
OFF,ON = foldcal(data,tsamp,**kwargs)
freqs = obs.populate_freqs()
#Find ON and OFF spectra by coarse channel
ON_int = integrate_chans(ON,freqs,chan_per_coarse)
OFF_int = integrate_chans(OFF,freqs,chan_per_coarse)
#If "ON" is actually "OFF" switch them
if np.sum(ON_int)<np.sum(OFF_int):
temp = ON_int
ON_int = OFF_int
OFF_int = temp
#Return coarse channel spectrum of OFF and ON
return OFF_int,ON_int | python | {
"resource": ""
} |
q273591 | get_calfluxes | test | def get_calfluxes(calflux,calfreq,spec_in,centerfreqs,oneflux):
'''
Given properties of the calibrator source, calculate fluxes of the source
in a particular frequency range
Parameters
----------
calflux : float
Known flux of calibrator source at a particular frequency
calfreq : float
Frequency where calibrator source has flux calflux (see above)
spec_in : float
Known power-law spectral index of calibrator source. Use convention flux(frequency) = constant * frequency^(spec_in)
centerfreqs : 1D Array (float)
Central frequency values of each coarse channel
oneflux : boolean
Use oneflux to choose between calculating the flux for each core channel (False)
or using one value for the entire frequency range (True)
'''
const = calflux/np.power(calfreq,spec_in)
if oneflux==False:
return const*np.power(centerfreqs,spec_in)
else:
return const*np.power(np.mean(centerfreqs),spec_in) | python | {
"resource": ""
} |
q273592 | get_centerfreqs | test | def get_centerfreqs(freqs,chan_per_coarse):
'''
Returns central frequency of each coarse channel
Parameters
----------
freqs : 1D Array (float)
Frequency values for each bin of the spectrum
chan_per_coarse: int
Number of frequency bins per coarse channel
'''
num_coarse = freqs.size/chan_per_coarse
freqs = np.reshape(freqs,(num_coarse,chan_per_coarse))
return np.mean(freqs,axis=1) | python | {
"resource": ""
} |
q273593 | f_ratios | test | def f_ratios(calON_obs,calOFF_obs,chan_per_coarse,**kwargs):
'''
Calculate f_ON, and f_OFF as defined in van Straten et al. 2012 equations 2 and 3
Parameters
----------
calON_obs : str
Path to filterbank file (any format) for observation ON the calibrator source
calOFF_obs : str
Path to filterbank file (any format) for observation OFF the calibrator source
'''
#Calculate noise diode ON and noise diode OFF spectra (H and L) for both observations
L_ON,H_ON = integrate_calib(calON_obs,chan_per_coarse,**kwargs)
L_OFF,H_OFF = integrate_calib(calOFF_obs,chan_per_coarse,**kwargs)
f_ON = H_ON/L_ON-1
f_OFF = H_OFF/L_OFF-1
return f_ON, f_OFF | python | {
"resource": ""
} |
q273594 | diode_spec | test | def diode_spec(calON_obs,calOFF_obs,calflux,calfreq,spec_in,average=True,oneflux=False,**kwargs):
'''
Calculate the coarse channel spectrum and system temperature of the noise diode in Jy given two noise diode
measurements ON and OFF the calibrator source with the same frequency and time resolution
Parameters
----------
calON_obs : str
(see f_ratios() above)
calOFF_obs : str
(see f_ratios() above)
calflux : float
Known flux of calibrator source at a particular frequency
calfreq : float
Frequency where calibrator source has flux calflux (see above)
spec_in : float
Known power-law spectral index of calibrator source. Use convention flux(frequency) = constant * frequency^(spec_in)
average : boolean
Use average=True to return noise diode and Tsys spectra averaged over frequencies
'''
#Load frequencies and calculate number of channels per coarse channel
obs = Waterfall(calON_obs,max_load=150)
freqs = obs.populate_freqs()
ncoarse = obs.calc_n_coarse_chan()
nchans = obs.header['nchans']
chan_per_coarse = nchans/ncoarse
f_ON, f_OFF = f_ratios(calON_obs,calOFF_obs,chan_per_coarse,**kwargs)
#Obtain spectrum of the calibrator source for the given frequency range
centerfreqs = get_centerfreqs(freqs,chan_per_coarse)
calfluxes = get_calfluxes(calflux,calfreq,spec_in,centerfreqs,oneflux)
#C_o and Tsys as defined in van Straten et al. 2012
C_o = calfluxes/(1/f_ON-1/f_OFF)
Tsys = C_o/f_OFF
#return coarse channel diode spectrum
if average==True:
return np.mean(C_o),np.mean(Tsys)
else:
return C_o,Tsys | python | {
"resource": ""
} |
q273595 | get_Tsys | test | def get_Tsys(calON_obs,calOFF_obs,calflux,calfreq,spec_in,oneflux=False,**kwargs):
'''
Returns frequency dependent system temperature given observations on and off a calibrator source
Parameters
----------
(See diode_spec())
'''
return diode_spec(calON_obs,calOFF_obs,calflux,calfreq,spec_in,average=False,oneflux=False,**kwargs)[1] | python | {
"resource": ""
} |
q273596 | calibrate_fluxes | test | def calibrate_fluxes(main_obs_name,dio_name,dspec,Tsys,fullstokes=False,**kwargs):
'''
Produce calibrated Stokes I for an observation given a noise diode
measurement on the source and a diode spectrum with the same number of
coarse channels
Parameters
----------
main_obs_name : str
Path to filterbank file containing final data to be calibrated
dio_name : str
Path to filterbank file for observation on the target source with flickering noise diode
dspec : 1D Array (float) or float
Coarse channel spectrum (or average) of the noise diode in Jy (obtained from diode_spec())
Tsys : 1D Array (float) or float
Coarse channel spectrum (or average) of the system temperature in Jy
fullstokes: boolean
Use fullstokes=True if data is in IQUV format or just Stokes I, use fullstokes=False if
it is in cross_pols format
'''
#Find folded spectra of the target source with the noise diode ON and OFF
main_obs = Waterfall(main_obs_name,max_load=150)
ncoarse = main_obs.calc_n_coarse_chan()
dio_obs = Waterfall(dio_name,max_load=150)
dio_chan_per_coarse = dio_obs.header['nchans']/ncoarse
dOFF,dON = integrate_calib(dio_name,dio_chan_per_coarse,fullstokes,**kwargs)
#Find Jy/count for each coarse channel using the diode spectrum
main_dat = main_obs.data
scale_facs = dspec/(dON-dOFF)
print(scale_facs)
nchans = main_obs.header['nchans']
obs_chan_per_coarse = nchans/ncoarse
ax0_size = np.size(main_dat,0)
ax1_size = np.size(main_dat,1)
#Reshape data array of target observation and multiply coarse channels by the scale factors
main_dat = np.reshape(main_dat,(ax0_size,ax1_size,ncoarse,obs_chan_per_coarse))
main_dat = np.swapaxes(main_dat,2,3)
main_dat = main_dat*scale_facs
main_dat = main_dat-Tsys
main_dat = np.swapaxes(main_dat,2,3)
main_dat = np.reshape(main_dat,(ax0_size,ax1_size,nchans))
#Write calibrated data to a new filterbank file with ".fluxcal" extension
main_obs.data = main_dat
main_obs.write_to_filterbank(main_obs_name[:-4]+'.fluxcal.fil')
print('Finished: calibrated product written to ' + main_obs_name[:-4]+'.fluxcal.fil') | python | {
"resource": ""
} |
q273597 | len_header | test | def len_header(filename):
""" Return the length of the blimpy header, in bytes
Args:
filename (str): name of file to open
Returns:
idx_end (int): length of header, in bytes
"""
with open(filename, 'rb') as f:
header_sub_count = 0
eoh_found = False
while not eoh_found:
header_sub = f.read(512)
header_sub_count += 1
if b'HEADER_END' in header_sub:
idx_end = header_sub.index(b'HEADER_END') + len(b'HEADER_END')
eoh_found = True
break
idx_end = (header_sub_count -1) * 512 + idx_end
return idx_end | python | {
"resource": ""
} |
q273598 | is_filterbank | test | def is_filterbank(filename):
""" Open file and confirm if it is a filterbank file or not. """
with open(filename, 'rb') as fh:
is_fil = True
# Check this is a blimpy file
try:
keyword, value, idx = read_next_header_keyword(fh)
try:
assert keyword == b'HEADER_START'
except AssertionError:
is_fil = False
except KeyError:
is_fil = False
return is_fil | python | {
"resource": ""
} |
q273599 | fix_header | test | def fix_header(filename, keyword, new_value):
""" Apply a quick patch-up to a Filterbank header by overwriting a header value
Args:
filename (str): name of file to open and fix. WILL BE MODIFIED.
keyword (stt): header keyword to update
new_value (long, double, angle or string): New value to write.
Notes:
This will overwrite the current value of the blimpy with a desired
'fixed' version. Note that this has limited support for patching
string-type values - if the length of the string changes, all hell will
break loose.
"""
# Read header data and return indexes of data offsets in file
hd = read_header(filename)
hi = read_header(filename, return_idxs=True)
idx = hi[keyword]
# Find out the datatype for the given keyword
dtype = header_keyword_types[keyword]
dtype_to_type = {b'<l' : np.int32,
b'str' : bytes,
b'<d' : np.float64,
b'angle' : to_sigproc_angle}
value_dtype = dtype_to_type[dtype]
# Generate the new string
if isinstance(value_dtype, bytes):
if len(hd[keyword]) == len(new_value):
val_str = np.int32(len(new_value)).tostring() + new_value
else:
raise RuntimeError("String size mismatch. Cannot update without rewriting entire file.")
else:
val_str = value_dtype(new_value).tostring()
# Write the new string to file
with open(filename, 'rb+') as fh:
fh.seek(idx)
fh.write(val_str) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.